filename
stringlengths
4
198
content
stringlengths
25
939k
environment
list
variablearg
list
constarg
list
variableargjson
stringclasses
1 value
constargjson
stringlengths
2
3.9k
lang
stringclasses
3 values
constargcount
float64
0
129
variableargcount
float64
0
0
sentence
stringclasses
1 value
provision/docker/ssh_mock_test.go
// Copyright 2014 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the http://golang.org/LICENSE file. // This code is inspired by integration tests present in the package // code.google.com/p/go.crypto/ssh, with some changes by the tsuru authors. package docker import ( "bytes" "io/ioutil" "log" "net" "os" "os/exec" "os/user" "path/filepath" "text/template" "time" "launchpad.net/gocheck" ) const sshd_config = ` Protocol 2 HostKey {{.Dir}}/id_rsa Pidfile {{.Dir}}/sshd.pid Port {{.Port}} KeyRegenerationInterval 3600 ServerKeyBits 768 SyslogFacility AUTH LogLevel DEBUG2 LoginGraceTime 120 PermitRootLogin no StrictModes no RSAAuthentication yes PubkeyAuthentication yes AuthorizedKeysFile {{.Dir}}/id_rsa.pub IgnoreRhosts yes RhostsRSAAuthentication no HostbasedAuthentication no ` var configTmpl = template.Must(template.New("").Parse(sshd_config)) type sshServer struct { c *gocheck.C cleanup func() // executed during Shutdown configfile string cmd *exec.Cmd output bytes.Buffer // holds stderr from sshd process port string } func sshUsername() string { var username string if user, err := user.Current(); err == nil { username = user.Username } else { log.Printf("user.Current: %v; falling back on $USER", err) username = os.Getenv("USER") } if username == "" { panic("Unable to get username") } return username } func (s *sshServer) start() { sshd, err := exec.LookPath("sshd") if err != nil { s.c.Skip("skipping test: " + err.Error()) } s.cmd = exec.Command(sshd, "-f", s.configfile, "-e", "-D") s.cmd.Stdout = &s.output s.cmd.Stderr = &s.output if err := s.cmd.Start(); err != nil { s.c.Fail() s.Shutdown() s.c.Fatalf("s.cmd.Start: %v", err) } } func (s *sshServer) Shutdown() { if s.cmd != nil && s.cmd.Process != nil { // Don't check for errors; if it fails it's most // likely "os: process already finished", and we don't // care about that. Use os.Interrupt, so child // processes are killed too. s.cmd.Process.Signal(os.Interrupt) s.cmd.Wait() } if s.c.Failed() { // log any output from sshd process s.c.Log("sshd: " + s.output.String()) } s.cleanup() } func writeFile(path string, contents []byte) { f, err := os.OpenFile(path, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 0600) if err != nil { panic(err) } defer f.Close() if _, err := f.Write(contents); err != nil { panic(err) } } func getAvailablePort() string { listener, err := net.Listen("tcp", ":0") if err != nil { panic(err) } defer listener.Close() _, port, _ := net.SplitHostPort(listener.Addr().String()) return port } // newMockSSHServer returns a new mock ssh server. func newMockSSHServer(c *gocheck.C, timeout time.Duration) *sshServer { dir, err := ioutil.TempDir("", "sshtest") c.Assert(err, gocheck.IsNil) f, err := os.Create(filepath.Join(dir, "sshd_config")) c.Assert(err, gocheck.IsNil) port := getAvailablePort() err = configTmpl.Execute(f, map[string]string{ "Dir": dir, "Port": port, }) c.Assert(err, gocheck.IsNil) f.Close() ioutil.WriteFile(filepath.Join(dir, "id_rsa"), fakeServerPrivateKey, 0600) ioutil.WriteFile(filepath.Join(dir, "id_rsa.pub"), fakeServerPublicKey, 0644) server := sshServer{ c: c, configfile: f.Name(), cleanup: func() { err := os.RemoveAll(dir) c.Assert(err, gocheck.IsNil) }, port: port, } server.start() timedout := make(chan bool) quit := make(chan bool) go func() { addr := "localhost:" + port for { select { case <-timedout: return default: if conn, err := net.Dial("tcp", addr); err == nil { conn.Close() close(quit) return } } } }() select { case <-quit: case <-time.After(timeout): close(timedout) c.Fatalf("The SSH server didn't come up after %s.", timeout) } return &server } var fakeServerPrivateKey = []byte(`-----BEGIN RSA PRIVATE KEY----- MIICXQIBAAKBgQCj9Zd3Vhrq4GbZ3Ed8HcBJBcW7GVdVUDRmu7vTbIJ9B435QKG7 CpLAL8SHULHETDsKZliuaL+JZgTxArGKycEOCBW30NsURnTBgOuURFkkR+4++Hhx +VCR5+Z9Gu5BPZTNdGRU8z1C4+GCgIU7FdVJK+Qj00WKBMcTbb89/6z15wIDAQAB AoGATQhLFKdg2C98QylqcJbty6EpqGEclhmrtQTJF2lo2WNeQdgq5FzwW9lVhZnV G3wRVS6GxdKzAtPqyG1SivmFeNh2uj+tohxhNRQsDKSt1K4it3UctfGOeZU8pIp2 iheYFej0boKhf1Llk5OwTXGlMfD0nkpdD0kMUSjIpO/q4BECQQDXLOFw9uqcr+Ek BT8ge1lGGlCyChKT1pMkU9xivSL7s36AzG93pus0jNAYd3gOftP+IKrIUypfH/XF whhpmGhvAkEAwxEjaOm+RyTzV0L+tXNxsnmAYWrY1IZVCJx/nsAAAEv/0ULyfEs9 0P7bpo1Ov3LUNTd4Jz7AAb5G+f4dE4RWCQJAWDR0oasGF37dird/3h/SQ7Nr2t/Y J7QxExYxZGRl38n/lGq5UtIg3qTOdQkcNMz2t9jKSV4WI3JlfFCJU1f/jwJBALRY cQN7L7d4+x3PS8wYqqKWYNIwRb3fYFiwz/DGlHmxyhb/rU6rBcDnD86hUJACKx30 Zbrq8fvqnpZckSdNL3kCQQDNbVWjqR7d472Ble/dWvohFTfZQ2pEWym8Ars29Sg1 vznme2VX7myWVmDiMK4Dy4VuJWgcqZpZm0dGNAf0f2kk -----END RSA PRIVATE KEY----- `) var fakeServerPublicKey = []byte("ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQCj9Zd3Vhrq4GbZ3Ed8HcBJBcW7GVdVUDRmu7vTbIJ9B435QKG7CpLAL8SHULHETDsKZliuaL+JZgTxArGKycEOCBW30NsURnTBgOuURFkkR+4++Hhx+VCR5+Z9Gu5BPZTNdGRU8z1C4+GCgIU7FdVJK+Qj00WKBMcTbb89/6z15w== [email protected]\n")
[ "\"USER\"" ]
[]
[ "USER" ]
[]
["USER"]
go
1
0
client/configuration.py
# Copyright (c) 2016-present, Facebook, Inc. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. # pyre-unsafe import hashlib import json import logging import os import shutil import site import sys from logging import Logger from typing import Dict, List, Optional, Union from . import ( BINARY_NAME, CONFIGURATION_FILE, LOCAL_CONFIGURATION_FILE, LOG_DIRECTORY, assert_readable_directory, find_typeshed, number_of_workers, ) from .exceptions import EnvironmentException LOG: Logger = logging.getLogger(__name__) class InvalidConfiguration(Exception): pass class SearchPathElement: def __init__(self, root: str, subdirectory: Optional[str] = None) -> None: self.root = os.path.expanduser(root) self.subdirectory = subdirectory def path(self) -> str: subdirectory = self.subdirectory if subdirectory is not None: return os.path.join(self.root, subdirectory) else: return self.root def command_line_argument(self) -> str: subdirectory = self.subdirectory if subdirectory is not None: return self.root + "$" + subdirectory else: return self.root def __eq__(self, other: str) -> bool: # We support this for testing. if isinstance(other, str): return self.path() == other else: return self.root == other.root and self.subdirectory == other.subdirectory def expand_search_path(path: Union[Dict[str, str], str]) -> SearchPathElement: if isinstance(path, str): return SearchPathElement(path) else: if "root" in path and "subdirectory" in path: root = path["root"] subdirectory = path["subdirectory"] return SearchPathElement(root, subdirectory) elif "site-package" in path: site_root = site.getsitepackages() subdirectory = path["site-package"] found_element = None for root in site_root: site_package_element = SearchPathElement(root, subdirectory) if os.path.isdir(site_package_element.path()): found_element = site_package_element if found_element is None: raise InvalidConfiguration( "Cannot find site package '{}'".format(subdirectory) ) return found_element else: raise InvalidConfiguration( "Search path elements must have `root` and `subdirectory` specified." ) class _ConfigurationFile: def __init__(self, file) -> None: self._deprecated = {"do_not_check": "ignore_all_errors"} contents = file.read() self.file_hash: str = hashlib.sha1(contents.encode("utf-8")).hexdigest() self._configuration = json.loads(contents) def consume( self, key, default=None, current=None, print_on_success=False, raise_on_override=False, ): """ Consume a key from the configuration. When a key is consumed, it is removed from the configuration. If not found, the default is returned. If the current value is not None, it will be returned instead, but the key will still be considered consumed. """ value = self._configuration.pop(key, default) if raise_on_override and current and value: raise EnvironmentException( "Configuration file may not override `{}` field.".format(key) ) if current: return current if value and print_on_success: LOG.debug("Found %s: `%s`", key, ", ".join(value)) if value and key in self._deprecated: LOG.warning( "Configuration file uses deprecated item `%s`: " "please migrate to its replacement `%s`", key, self._deprecated[key], ) return value def unused_keys(self): """ Return all keys not consumed yet. Some keys are explicitly whitelisted. """ return self._configuration.keys() - { "buck_builder_binary", "continuous", "coverage", "differential", "push_blocking", "pyre_client", "saved_state", "taint_models_path", } def expand_relative_path(root: str, path: str) -> str: path = os.path.expanduser(path) if os.path.isabs(path): return path else: return os.path.join(root, path) class Configuration: disabled: bool = False def __init__( self, local_configuration: Optional[str] = None, search_path: Optional[List[str]] = None, binary: Optional[str] = None, typeshed: Optional[str] = None, preserve_pythonpath: bool = False, excludes: Optional[List[str]] = None, formatter: Optional[str] = None, logger: Optional[str] = None, log_directory: Optional[str] = None, ) -> None: self.source_directories = [] self.targets = [] self.logger = logger self.formatter = formatter self.ignore_all_errors = [] self.number_of_workers: int = 0 self.local_configuration: Optional[str] = None self.taint_models_path: List[str] = [] self.file_hash: Optional[str] = None self.extensions: List[str] = [] self._log_directory: Optional[str] = log_directory self._version_hash: Optional[str] = None self._binary: Optional[str] = None self._typeshed: Optional[str] = None self.strict: bool = False self._use_buck_builder: Optional[bool] = None self._use_json_sockets: Optional[bool] = None self.ignore_infer: List[str] = [] # Handle search path from multiple sources self._search_path = [] if preserve_pythonpath: for path in os.getenv("PYTHONPATH", default="").split(":"): if path != "": if os.path.isdir(path): self._search_path.append(SearchPathElement(path)) else: LOG.warning( "`{}` is not a valid directory, dropping it " "from PYTHONPATH".format(path) ) # sys.path often includes '' and a zipped python version, so # we don't log warnings for non-dir entries sys_path = [ SearchPathElement(path) for path in sys.path if os.path.isdir(path) ] self._search_path.extend(sys_path) if search_path: search_path_elements = [expand_search_path(path) for path in search_path] self._search_path.extend(search_path_elements) # We will extend the search path further, with the config file # items, inside _read(). if binary: self._binary = binary if typeshed: self._typeshed = typeshed self.excludes: List[str] = [] if excludes: self.excludes.extend(excludes) if local_configuration: # Handle local configuration explicitly configured on the # commandline. self._check_read_local_configuration( local_configuration, fail_on_error=True ) if log_directory: self.ignore_all_errors.append(log_directory) self.autocomplete = False # Order matters. The values will only be updated if a field is None. self._read(CONFIGURATION_FILE) self._override_version_hash() self._resolve_versioned_paths() self._apply_defaults() self._validate() def _validate(self) -> None: try: def is_list_of_strings(list): if len(list) == 0: return True return not isinstance(list, str) and all( isinstance(element, str) for element in list ) if not is_list_of_strings( self.source_directories ) or not is_list_of_strings(self.targets): raise InvalidConfiguration( "`target` and `source_directories` fields must be lists of " "strings." ) if not is_list_of_strings(self.ignore_all_errors): raise InvalidConfiguration( "`ignore_all_errors` field must be a list of strings." ) if not is_list_of_strings(self.ignore_infer): raise InvalidConfiguration( "`ignore_infer` field must be a list of strings." ) if not is_list_of_strings(self.extensions): raise InvalidConfiguration( "`extensions` field must be a list of strings." ) if not all( extension.startswith(".") or not extension for extension in self.extensions ): raise InvalidConfiguration( "`extensions` must only contain strings formatted as `.EXT`" ) if not os.path.exists(self.binary): raise InvalidConfiguration( "Binary at `{}` does not exist.".format(self.binary) ) if self.number_of_workers < 1: raise InvalidConfiguration("Number of workers must be greater than 0.") # Validate typeshed path and sub-elements. assert_readable_directory(self.typeshed) # A courtesy warning since we have changed default behaviour. if self._typeshed_has_obsolete_value(): LOG.warning( "It appears that `{}` points at a `stdlib` directory. " "Please note that the `typeshed` configuration must point at " "the root of the `typeshed` directory.".format(self.typeshed) ) non_existent_ignore_paths = [ path for path in self.ignore_all_errors if not os.path.exists(path) ] if non_existent_ignore_paths: LOG.warning( "Nonexistent paths passed in to `ignore_all_errors`: `{}`".format( non_existent_ignore_paths ) ) self.ignore_all_errors = [ path for path in self.ignore_all_errors if path not in non_existent_ignore_paths ] non_existent_infer_paths = [ path for path in self.ignore_infer if not os.path.exists(path) ] if non_existent_infer_paths: LOG.warning( "Nonexistent paths passed in to `ignore_infer`: `{}`".format( non_existent_infer_paths ) ) self.ignore_infer = [ path for path in self.ignore_infer if path not in non_existent_infer_paths ] typeshed_subdirectories = os.listdir(self.typeshed) if "stdlib" not in typeshed_subdirectories: raise InvalidConfiguration( "`typeshed` location must contain a `stdlib` directory." ) for typeshed_subdirectory_name in typeshed_subdirectories: typeshed_subdirectory = os.path.join( self.typeshed, typeshed_subdirectory_name ) if ( not os.path.isdir(typeshed_subdirectory) or typeshed_subdirectory_name == "tests" or typeshed_subdirectory_name[0] == "." or typeshed_subdirectory_name == "__pycache__" ): # Ignore some well-known directories we do not care about. continue assert_readable_directory(typeshed_subdirectory) for typeshed_version_directory_name in os.listdir( typeshed_subdirectory ): if not typeshed_version_directory_name[0].isdigit(): raise InvalidConfiguration( "Directories inside `typeshed` must only contain " "second-level subdirectories starting with " "a version number." ) typeshed_version_directory = os.path.join( typeshed_subdirectory, typeshed_version_directory_name ) assert_readable_directory(typeshed_version_directory) # Validate elements of the search path. for element in self._search_path: assert_readable_directory(element.path()) except InvalidConfiguration as error: raise EnvironmentException("Invalid configuration: {}".format(str(error))) @property def version_hash(self) -> str: return self._version_hash or "unversioned" @property def binary(self) -> str: binary = self._binary if not binary: raise InvalidConfiguration("Configuration was not validated") return binary @property def typeshed(self) -> str: typeshed = self._typeshed if not typeshed: raise InvalidConfiguration("Configuration invalid: no typeshed specified") return typeshed @property def use_buck_builder(self) -> bool: return self._use_buck_builder or False @property def search_path(self) -> List[str]: if not self._search_path: return [] return [element.command_line_argument() for element in self._search_path] @property def local_configuration_root(self) -> Optional[str]: local_configuration = self.local_configuration if local_configuration: if os.path.isdir(local_configuration): return local_configuration else: return os.path.dirname(local_configuration) @property def log_directory(self) -> str: log_directory = self._log_directory if not log_directory: raise InvalidConfiguration("Configuration was not validated") return log_directory def _check_read_local_configuration(self, path: str, fail_on_error: bool) -> None: if fail_on_error and not os.path.exists(path): raise EnvironmentException( "Local configuration path `{}` does not exist.".format(path) ) if os.path.isdir(path): local_configuration = os.path.join(path, CONFIGURATION_FILE + ".local") if not os.path.exists(local_configuration): if fail_on_error: raise EnvironmentException( "Local configuration directory `{}` does not contain " "a `{}` file.".format(path, CONFIGURATION_FILE + ".local") ) else: LOG.debug( "Configuration will be read from the project root: " "`{}`".format(os.getcwd()) ) else: self.local_configuration = local_configuration else: local_configuration = path self.local_configuration = local_configuration self._read(local_configuration) def _read(self, path: str) -> None: try: with open(path) as file: LOG.debug("Reading configuration `%s`...", path) configuration = _ConfigurationFile(file) source_directories = configuration.consume( "source_directories", default=[], current=self.source_directories, print_on_success=True, raise_on_override=True, ) configuration_directory = os.path.dirname(path) if configuration_directory: self.source_directories = [ os.path.join(configuration_directory, directory) for directory in source_directories ] else: self.source_directories = [ os.path.expanduser(directory) for directory in source_directories ] self.targets = configuration.consume( "targets", default=[], current=self.targets, print_on_success=True, raise_on_override=True, ) if configuration.consume("disabled", default=False): self.disabled = True self.logger = configuration.consume("logger", current=self.logger) self.formatter = configuration.consume( "formatter", current=self.formatter ) self.strict = configuration.consume("strict", default=self.strict) ignore_all_errors = configuration.consume( "ignore_all_errors", default=[] ) # Deprecated. ignore_all_errors += configuration.consume("do_not_check", default=[]) configuration_path = os.path.dirname(os.path.realpath(path)) self.ignore_all_errors.extend( [ expand_relative_path(root=configuration_path, path=path) for path in ignore_all_errors ] ) ignore_infer = configuration.consume("ignore_infer", default=[]) self.ignore_infer.extend( [ expand_relative_path(root=configuration_path, path=path) for path in ignore_infer ] ) self.number_of_workers = int( configuration.consume( "workers", default=0, current=self.number_of_workers ) ) binary = configuration.consume("binary", current=self._binary) assert binary is None or isinstance(binary, str) if binary is not None: binary = expand_relative_path(configuration_path, binary) self._binary = binary additional_search_path = configuration.consume( "search_path", default=[] ) if isinstance(additional_search_path, list): self._search_path.extend( [expand_search_path(path) for path in additional_search_path] ) else: self._search_path.append(SearchPathElement(additional_search_path)) version_hash = configuration.consume( "version", current=self._version_hash ) assert version_hash is None or isinstance(version_hash, str) self._version_hash = version_hash typeshed = configuration.consume("typeshed", current=self._typeshed) assert typeshed is None or isinstance(typeshed, str) if typeshed is not None: typeshed = expand_relative_path(configuration_path, typeshed) self._typeshed = typeshed taint_models_path = configuration.consume("taint_models_path") assert ( taint_models_path is None or isinstance(taint_models_path, str) or isinstance(taint_models_path, list) ) configuration_directory = os.path.dirname(os.path.realpath(path)) if isinstance(taint_models_path, str): self.taint_models_path.append( os.path.join(configuration_directory, taint_models_path) ) elif isinstance(taint_models_path, list): self.taint_models_path.extend( [ os.path.join(configuration_directory, path) for path in taint_models_path ] ) excludes = configuration.consume("exclude", default=[]) if isinstance(excludes, list): self.excludes.extend(excludes) else: self.excludes.append(excludes) extensions = configuration.consume("extensions", default=[]) self.extensions.extend(extensions) # We rely on the configuration SHA1 to make if configuration.consume("saved_state"): self.file_hash = configuration.file_hash use_buck_builder = configuration.consume("use_buck_builder") if self._use_buck_builder is None: self._use_buck_builder = use_buck_builder use_json_sockets = configuration.consume("use_json_sockets") if self._use_json_sockets is None: self._use_json_sockets = use_json_sockets self.autocomplete = configuration.consume("autocomplete", default=False) # This block should be at the bottom to be effective. unused_keys = configuration.unused_keys() if unused_keys: LOG.warning( "Some configuration items were not recognized in " "`{}`: {}".format(path, ", ".join(unused_keys)) ) except IOError: # To avoid TOCTTOU bugs, handle IOErrors here silently. # We error elsewhere if there weren't enough parameters passed into pyre. pass except json.JSONDecodeError as error: raise EnvironmentException( "Configuration file at `{}` is invalid: {}.".format(path, str(error)) ) def _resolve_versioned_paths(self) -> None: version_hash = self.version_hash if not version_hash: return binary = self._binary if binary: self._binary = binary.replace("%V", version_hash) typeshed = self._typeshed if typeshed: self._typeshed = typeshed.replace("%V", version_hash) def _override_version_hash(self) -> None: overriding_version_hash = os.getenv("PYRE_VERSION_HASH") if overriding_version_hash: self._version_hash = overriding_version_hash LOG.warning("Version hash overridden with `%s`", self._version_hash) def _apply_defaults(self) -> None: overriding_binary = os.getenv("PYRE_BINARY") if overriding_binary: self._binary = overriding_binary LOG.warning("Binary overridden with `%s`", self._binary) if not self._binary: LOG.info( "No binary specified, looking for `{}` in PATH".format(BINARY_NAME) ) self._binary = shutil.which(BINARY_NAME) if not self._binary: binary_candidate = os.path.join( os.path.dirname(sys.argv[0]), BINARY_NAME ) self._binary = shutil.which(binary_candidate) if not self._binary: LOG.warning("Could not find `{}` in PATH".format(BINARY_NAME)) else: LOG.info("Found: `%s`", self._binary) if self.number_of_workers == 0: self.number_of_workers = number_of_workers() if not self._typeshed: LOG.info("No typeshed specified, looking for it") self._typeshed = find_typeshed() if not self._typeshed: LOG.warning("Could not find a suitable typeshed") else: LOG.info("Found: `%s`", self._typeshed) if not self._log_directory: # TODO(T56191177): We should not start up a server at all if no configurations # exist. Currently, we treat the cwd as the project root if no configurations # exist. Instead, we should default logging to `tmp/.pyre` in the initial # find_log_directory as well. self._log_directory = "/tmp/.pyre" def _typeshed_has_obsolete_value(self) -> bool: (head, tail) = os.path.split(self.typeshed) if tail == "stdlib": return True if tail != "": return False # If `path` ends in a slash, tail will be empty. (head, tail) = os.path.split(head) return tail == "stdlib"
[]
[]
[ "PYRE_BINARY", "PYRE_VERSION_HASH", "PYTHONPATH" ]
[]
["PYRE_BINARY", "PYRE_VERSION_HASH", "PYTHONPATH"]
python
3
0
tests/token_classification_tests/callback_test.py
import os import unittest from transformers_keras.token_classification.callback import ( SavedModelForCRFTokenClassification, SeqEvalForCRFTokenClassification, SeqEvalForTokenClassification, ) from transformers_keras.token_classification.crf_models import BertCRFForTokenClassification from transformers_keras.token_classification.models import BertForTokenClassification BERT_PATH = os.path.join(os.environ["CHINESE_BERT_PATH"], "chinese_roberta_wwm_ext_L-12_H-768_A-12") VOCAB_PATH = os.path.join(BERT_PATH, "vocab.txt") class CallbackTest(unittest.TestCase): """Callback test""" def test_seqeval_for_token_classification(self): callback = SeqEvalForTokenClassification.from_conll_files( "testdata/conll.txt", feature_vocab_file="testdata/vocab.bert.txt", label_vocab_file="testdata/labels.txt", sep="\\s+", ) model = BertForTokenClassification.from_pretrained(BERT_PATH, override_params={"num_labels": 3}) callback.model = model callback.on_epoch_end(epoch=0, logs=None) def test_seqeval_for_crf_token_classification(self): callback = SeqEvalForCRFTokenClassification.from_conll_files( "testdata/conll.txt", feature_vocab_file="testdata/vocab.bert.txt", label_vocab_file="testdata/labels.txt", sep="\\s+", ) model = BertCRFForTokenClassification.from_pretrained(BERT_PATH, override_params={"num_labels": 3}) callback.model = model callback.on_epoch_end(epoch=0, logs=None) def test_savedmodel_for_crf_token_classification(self): callback = SavedModelForCRFTokenClassification("models/bert-crf-export") model = BertCRFForTokenClassification.from_pretrained(BERT_PATH, override_params={"num_labels": 3}) callback.model = model callback.on_epoch_end(epoch=0, logs=None) if __name__ == "__main__": unittest.main()
[]
[]
[ "CHINESE_BERT_PATH" ]
[]
["CHINESE_BERT_PATH"]
python
1
0
vote/vote/asgi.py
""" ASGI config for vote project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/4.0/howto/deployment/asgi/ """ import os from django.core.asgi import get_asgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'vote.settings') application = get_asgi_application()
[]
[]
[]
[]
[]
python
0
0
avalon/maya/compat.py
"""Compatibility This module is to ensure the compatibility between Maya, Avalon and Pyblish is maintained. """ import maya.cmds as cmds import os import logging import avalon.pipeline log = logging.getLogger(__name__) create = avalon.pipeline.create def remove_googleapiclient(): """Check if the compatibility must be maintained The Maya 2018 version tries to import the `http` module from Maya2018/plug-ins/MASH/scripts/googleapiclient/http.py in stead of the module from six.py. This import conflict causes a crash Avalon's publisher. This is due to Autodesk adding paths to the PYTHONPATH environment variable which contain modules instead of only packages. """ keyword = "googleapiclient" # reconstruct python paths python_paths = os.environ["PYTHONPATH"].split(os.pathsep) paths = [path for path in python_paths if keyword not in path] os.environ["PYTHONPATH"] = os.pathsep.join(paths) def install(): """Run all compatibility functions""" if cmds.about(version=True) == "2018": remove_googleapiclient() def load(Loader, representation, name=None, namespace=None, data=None): """Load asset via database Deprecated; this functionality is replaced by `api.load()` Arguments: Loader (api.Loader): The loader to process in host Maya. representation (dict, io.ObjectId or str): Address to representation name (str, optional): Use pre-defined name namespace (str, optional): Use pre-defined namespace data (dict, optional): Additional settings dictionary """ from avalon.vendor import six from avalon import io from avalon.maya import lib from avalon.maya.pipeline import containerise assert representation, "This is a bug" if isinstance(representation, (six.string_types, io.ObjectId)): representation = io.find_one({"_id": io.ObjectId(str(representation))}) version, subset, asset, project = io.parenthood(representation) assert all([representation, version, subset, asset, project]), ( "This is a bug" ) context = { "project": project, "asset": asset, "subset": subset, "version": version, "representation": representation, } # Ensure data is a dictionary when no explicit data provided if data: assert isinstance(data, dict), "Data must be a dictionary" else: data = dict() name = name or subset["name"] namespace = namespace or lib.unique_namespace( asset["name"] + "_", prefix="_" if asset["name"][0].isdigit() else "", suffix="_", ) # TODO(roy): add compatibility check, see `tools.loader.lib` Loader.log.info( "Running '%s' on '%s'" % (Loader.__name__, asset["name"]) ) try: loader = Loader(context) with lib.maintained_selection(): loader.process(name, namespace, context, data) except OSError as e: log.info("WARNING: %s" % e) return list() # Only containerize if any nodes were loaded by the Loader nodes = loader[:] if not nodes: return return containerise( name=name, namespace=namespace, nodes=loader[:], context=context, loader=Loader.__name__) def update(container, version=-1): """Update `container` to `version` Deprecated; this functionality is replaced by `api.update()` This function relies on a container being referenced. At the time of this writing, all assets - models, rigs, animations, shaders - are referenced and should pose no problem. But should there be an asset that isn't referenced then this function will need to see an update. Arguments: container (avalon-core:container-1.0): Container to update, from `host.ls()`. version (int, optional): Update the container to this version. If no version is passed, the latest is assumed. """ from avalon import io from avalon import api node = container["objectName"] # Assume asset has been referenced reference_node = next((node for node in cmds.sets(node, query=True) if cmds.nodeType(node) == "reference"), None) assert reference_node, ("Imported container not supported; " "container must be referenced.") current_representation = io.find_one({ "_id": io.ObjectId(container["representation"]) }) assert current_representation is not None, "This is a bug" version_, subset, asset, project = io.parenthood(current_representation) if version == -1: new_version = io.find_one({ "type": "version", "parent": subset["_id"] }, sort=[("name", -1)]) else: new_version = io.find_one({ "type": "version", "parent": subset["_id"], "name": version, }) new_representation = io.find_one({ "type": "representation", "parent": new_version["_id"], "name": current_representation["name"] }) assert new_version is not None, "This is a bug" template_publish = project["config"]["template"]["publish"] fname = template_publish.format(**{ "root": api.registered_root(), "project": project["name"], "asset": asset["name"], "silo": asset["silo"], "subset": subset["name"], "version": new_version["name"], "representation": current_representation["name"], }) file_type = { "ma": "mayaAscii", "mb": "mayaBinary", "abc": "Alembic" }.get(new_representation["name"]) assert file_type, ("Unsupported representation: %s" % new_representation) assert os.path.exists(fname), "%s does not exist." % fname cmds.file(fname, loadReference=reference_node, type=file_type) # Update metadata cmds.setAttr(container["objectName"] + ".representation", str(new_representation["_id"]), type="string") def remove(container): """Remove an existing `container` from Maya scene Deprecated; this functionality is replaced by `api.remove()` Arguments: container (avalon-core:container-1.0): Which container to remove from scene. """ node = container["objectName"] # Assume asset has been referenced reference_node = next((node for node in cmds.sets(node, query=True) if cmds.nodeType(node) == "reference"), None) assert reference_node, ("Imported container not supported; " "container must be referenced.") log.info("Removing '%s' from Maya.." % container["name"]) namespace = cmds.referenceQuery(reference_node, namespace=True) fname = cmds.referenceQuery(reference_node, filename=True) cmds.file(fname, removeReference=True) try: cmds.delete(node) except ValueError: # Already implicitly deleted by Maya upon removing reference pass try: # If container is not automatically cleaned up by May (issue #118) cmds.namespace(removeNamespace=namespace, deleteNamespaceContent=True) except RuntimeError: pass class BackwardsCompatibleLoader(avalon.pipeline.Loader): """A backwards compatible loader. This triggers the old-style `process` through the old Maya's host `load`, `update` and `remove` methods and exposes it through the new-style Loader api. Note: This inherits from `avalon.pipeline.Loader` and *not* from `avalon.maya.pipeline.Loader` """ def load(self, context, name=None, namespace=None, data=None): return load(Loader=self.__class__, representation=context['representation'], name=name, namespace=namespace, data=data) def remove(self, container): return remove(container) def update(self, container, representation): version = representation['context']['version'] return update(container, version=version)
[]
[]
[ "PYTHONPATH" ]
[]
["PYTHONPATH"]
python
1
0
scripts/train.py
from runners.experiment_utils import load_experiment, save_experiment from src import dataset, train, model from src.utils import loaders, seed import logging from runners.utils import load_yaml from . import cmd, document_parser from argparse import ArgumentParser import os def train_experiment(path_to_yml_file, **kwargs): """ Starts a training job for the experiment defined at the path specified. Fits the model accordingly. You can also pass in things into keyword arguments that will get tossed into the "options" dictionary that is passed to the Trainer class. Args: path_to_yml_file (str): Path to the configuration for the experiment that is getting trained. The script will take the configuration and launch a training job for the experiment. """ config, exp, path_to_yml_file = load_experiment(path_to_yml_file) config['train_config'].update(kwargs) if 'seed' in config['info']: seed(config['info']['seed']) train_class = config['train_config'].pop('class') TrainerClass = getattr(train, train_class) if 'train' not in config['datasets']: logging.error('Train dataset must be specified!') _datasets = {} for key in ['train', 'val']: if key in config['datasets']: _datasets[key] = loaders.load_dataset( config['datasets'][key]['class'], config['datasets'][key]['folder'], config['dataset_config'] ) else: _datasets[key] = None _model = loaders.load_model(config['model_config']) _trainer = TrainerClass( config['info']['output_folder'], _datasets['train'], _model, config['train_config'], validation_data=_datasets['val'], use_tensorboard=config['train_config'].pop('use_tensorboard', False), experiment=exp ) _trainer.fit() @document_parser('train_experiment', 'scripts.train.train') def build_parser(): parser = ArgumentParser() parser.add_argument( "-p", "--path_to_yml_file", type=str, required=True, help="""Path to the configuration for the experiment that is getting trained. The script will take the configuration and launch a training job for the experiment. """ ) return parser if __name__ == '__main__': cmd(train_experiment, build_parser)
[]
[]
[]
[]
[]
python
null
null
null
pkg/oc/bootstrap/docker/up.go
package docker import ( "bytes" "fmt" "io" "io/ioutil" "net" "os" "path" "path/filepath" "runtime" "strings" "time" "github.com/docker/docker/api/types/versions" cliconfig "github.com/docker/docker/cli/config" dockerclient "github.com/docker/docker/client" "github.com/golang/glog" "github.com/spf13/cobra" "github.com/spf13/pflag" "golang.org/x/net/context" kerrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/rest" kclientcmd "k8s.io/client-go/tools/clientcmd" clientcmdapi "k8s.io/client-go/tools/clientcmd/api" aggregatorinstall "k8s.io/kube-aggregator/pkg/apis/apiregistration/install" "k8s.io/kubernetes/pkg/api/legacyscheme" kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset" "k8s.io/kubernetes/pkg/kubectl/cmd/templates" kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util" osclientcmd "github.com/openshift/origin/pkg/client/cmd" configapi "github.com/openshift/origin/pkg/cmd/server/apis/config" cmdutil "github.com/openshift/origin/pkg/cmd/util" "github.com/openshift/origin/pkg/cmd/util/variable" oauthclientinternal "github.com/openshift/origin/pkg/oauth/generated/internalclientset" "github.com/openshift/origin/pkg/oc/bootstrap" "github.com/openshift/origin/pkg/oc/bootstrap/clusteradd/componentinstall" "github.com/openshift/origin/pkg/oc/bootstrap/clusteradd/components/registry" "github.com/openshift/origin/pkg/oc/bootstrap/clusteradd/components/service-catalog" "github.com/openshift/origin/pkg/oc/bootstrap/clusterup/kubeapiserver" "github.com/openshift/origin/pkg/oc/bootstrap/docker/dockerhelper" "github.com/openshift/origin/pkg/oc/bootstrap/docker/errors" "github.com/openshift/origin/pkg/oc/bootstrap/docker/host" "github.com/openshift/origin/pkg/oc/bootstrap/docker/localcmd" "github.com/openshift/origin/pkg/oc/bootstrap/docker/openshift" "github.com/openshift/origin/pkg/version" ) const ( // CmdUpRecommendedName is the recommended command name CmdUpRecommendedName = "up" initialUser = "developer" initialPassword = "developer" initialProjectName = "myproject" initialProjectDisplay = "My Project" initialProjectDesc = "Initial developer project" defaultRedirectClient = "openshift-web-console" developmentRedirectURI = "https://localhost:9000" dockerAPIVersion122 = "1.22" ) var ( cmdUpLong = templates.LongDesc(` Starts an OpenShift cluster using Docker containers, provisioning a registry, router, initial templates, and a default project. This command will attempt to use an existing connection to a Docker daemon. Before running the command, ensure that you can execute docker commands successfully (i.e. 'docker ps'). By default, the OpenShift cluster will be setup to use a routing suffix that ends in nip.io. This is to allow dynamic host names to be created for routes. An alternate routing suffix can be specified using the --routing-suffix flag. A public hostname can also be specified for the server with the --public-hostname flag.`) cmdUpExample = templates.Examples(` # Start OpenShift using a specific public host name %[1]s --public-hostname=my.address.example.com # Specify which set of image streams to use %[1]s --image-streams=centos7`) imageStreams = map[string]string{ "centos7": "examples/image-streams/image-streams-centos7.json", "rhel7": "examples/image-streams/image-streams-rhel7.json", } // defaultImageStreams is the default key for the above imageStreams mapping. // It should be set during build via -ldflags. defaultImageStreams string templateLocations = map[string]string{ "mongodb": "examples/db-templates/mongodb-persistent-template.json", "mariadb": "examples/db-templates/mariadb-persistent-template.json", "mysql": "examples/db-templates/mysql-persistent-template.json", "postgresql": "examples/db-templates/postgresql-persistent-template.json", "cakephp quickstart": "examples/quickstarts/cakephp-mysql-persistent.json", "dancer quickstart": "examples/quickstarts/dancer-mysql-persistent.json", "django quickstart": "examples/quickstarts/django-postgresql-persistent.json", "nodejs quickstart": "examples/quickstarts/nodejs-mongodb-persistent.json", "rails quickstart": "examples/quickstarts/rails-postgresql-persistent.json", "jenkins pipeline persistent": "examples/jenkins/jenkins-persistent-template.json", "sample pipeline": "examples/jenkins/pipeline/samplepipeline.yaml", } adminTemplateLocations = map[string]string{ "prometheus": "examples/prometheus/prometheus.yaml", "heapster standalone": "examples/heapster/heapster-standalone.yaml", } ) // NewCmdUp creates a command that starts OpenShift on Docker with reasonable defaults func NewCmdUp(name, fullName string, out, errout io.Writer, clusterAdd *cobra.Command) *cobra.Command { config := &ClusterUpConfig{ UserEnabledComponents: []string{"*"}, Out: out, UsePorts: openshift.BasePorts, PortForwarding: defaultPortForwarding(), DNSPort: openshift.DefaultDNSPort, checkAlternatePorts: true, // We pass cluster add as a command to prevent anyone from ever cheating with their wiring. You either work from flags or // or you don't work. You cannot add glue of any sort. ClusterAdd: clusterAdd, } cmd := &cobra.Command{ Use: name, Short: "Start OpenShift on Docker with reasonable defaults", Long: cmdUpLong, Example: fmt.Sprintf(cmdUpExample, fullName), Run: func(c *cobra.Command, args []string) { kcmdutil.CheckErr(config.Complete(c, out)) kcmdutil.CheckErr(config.Validate(errout)) kcmdutil.CheckErr(config.Check(out)) if err := config.Start(out); err != nil { PrintError(err, errout) os.Exit(1) } }, } config.Bind(cmd.Flags()) return cmd } type ClusterUpConfig struct { Image string ImageTag string ImageStreams string DockerMachine string SkipRegistryCheck bool PortForwarding bool ClusterAdd *cobra.Command UserEnabledComponents []string Out io.Writer // BaseTempDir is the directory to use as the root for temp directories // This allows us to bundle all of the cluster-up directories in one spot for easier cleanup and ensures we aren't // doing crazy thing like dirtying /var on the host (that does weird stuff) BaseDir string SpecifiedBaseDir bool HostName string UseExistingConfig bool Environment []string ServerLogLevel int ComponentsToEnable []string HostVolumesDir string HostConfigDir string WriteConfig bool HostDataDir string UsePorts []int DNSPort int ServerIP string AdditionalIPs []string UseNsenterMount bool PublicHostname string RoutingSuffix string HostPersistentVolumesDir string HTTPProxy string HTTPSProxy string NoProxy []string CACert string PVCount int dockerClient dockerhelper.Interface dockerHelper *dockerhelper.Helper hostHelper *host.HostHelper openshiftHelper *openshift.Helper command *cobra.Command defaultClientConfig clientcmdapi.Config usingDefaultImages bool usingDefaultOpenShiftImage bool checkAlternatePorts bool shouldInitializeData *bool shouldCreateUser *bool containerNetworkErr chan error } func (c *ClusterUpConfig) Bind(flags *pflag.FlagSet) { flags.StringVar(&c.ImageTag, "tag", "", "Specify the tag for OpenShift images") flags.MarkHidden("tag") flags.StringVar(&c.Image, "image", variable.DefaultImagePrefix, "Specify the images to use for OpenShift") flags.StringVar(&c.ImageStreams, "image-streams", defaultImageStreams, "Specify which image streams to use, centos7|rhel7") flags.BoolVar(&c.SkipRegistryCheck, "skip-registry-check", false, "Skip Docker daemon registry check") flags.StringVar(&c.PublicHostname, "public-hostname", "", "Public hostname for OpenShift cluster") flags.StringVar(&c.RoutingSuffix, "routing-suffix", "", "Default suffix for server routes") flags.BoolVar(&c.UseExistingConfig, "use-existing-config", false, "Use existing configuration if present") flags.StringVar(&c.BaseDir, "base-dir", c.BaseDir, "Directory on Docker host for cluster up configuration") flags.BoolVar(&c.WriteConfig, "write-config", false, "Write the configuration files into host config dir") flags.BoolVar(&c.PortForwarding, "forward-ports", c.PortForwarding, "Use Docker port-forwarding to communicate with origin container. Requires 'socat' locally.") flags.IntVar(&c.ServerLogLevel, "server-loglevel", 0, "Log level for OpenShift server") flags.StringArrayVarP(&c.Environment, "env", "e", c.Environment, "Specify a key-value pair for an environment variable to set on OpenShift container") flags.StringSliceVar(&c.UserEnabledComponents, "enable", c.UserEnabledComponents, fmt.Sprintf(""+ "A list of components to enable. '*' enables all on-by-default components, 'foo' enables the component "+ "named 'foo', '-foo' disables the component named 'foo'.\nAll components: %s\nDisabled-by-default components: %s", strings.Join(knownComponents.List(), ", "), strings.Join(componentsDisabledByDefault.List(), ", "))) flags.StringVar(&c.HTTPProxy, "http-proxy", "", "HTTP proxy to use for master and builds") flags.StringVar(&c.HTTPSProxy, "https-proxy", "", "HTTPS proxy to use for master and builds") flags.StringArrayVar(&c.NoProxy, "no-proxy", c.NoProxy, "List of hosts or subnets for which a proxy should not be used") } var ( knownComponents = sets.NewString("web-console", "registry", "router", "service-catalog", "template-service-broker") componentsDisabledByDefault = sets.NewString("service-catalog", "template-service-broker") ) func (c *ClusterUpConfig) Complete(cmd *cobra.Command, out io.Writer) error { // TODO: remove this when we move to container/apply based component installation aggregatorinstall.Install(legacyscheme.GroupFactoryRegistry, legacyscheme.Registry, legacyscheme.Scheme) // Get the default client config for login var err error flags := pflag.NewFlagSet("", pflag.ContinueOnError) c.defaultClientConfig, err = osclientcmd.DefaultClientConfig(flags).RawConfig() if err != nil { if !os.IsNotExist(err) { return err } c.defaultClientConfig = (*clientcmdapi.NewConfig()) } c.command = cmd // do some defaulting if len(c.ImageTag) == 0 { c.ImageTag = strings.TrimRight("v"+version.Get().Major+"."+version.Get().Minor, "+") } if len(c.BaseDir) == 0 { c.SpecifiedBaseDir = false c.BaseDir = "openshift.local.clusterup" } if !path.IsAbs(c.BaseDir) { cwd, err := os.Getwd() if err != nil { return err } absHostDir, err := cmdutil.MakeAbs(c.BaseDir, cwd) if err != nil { return err } c.BaseDir = absHostDir } for _, currComponent := range knownComponents.UnsortedList() { if isComponentEnabled(currComponent, componentsDisabledByDefault, c.UserEnabledComponents...) { c.ComponentsToEnable = append(c.ComponentsToEnable, currComponent) } } // do some struct initialization next // used for some pretty printing taskPrinter := NewTaskPrinter(GetDetailedOut(out)) // Get a Docker client. // If a Docker machine was specified, make sure that the machine is running. // Otherwise, use environment variables. taskPrinter.StartTask("Getting a Docker client") client, err := GetDockerClient() if err != nil { return taskPrinter.ToError(err) } c.dockerClient = client taskPrinter.Success() // Ensure that the OpenShift Docker image is available. // If not present, pull it. // We do this here because the image is used in the next step if running Red Hat docker. taskPrinter.StartTask(fmt.Sprintf("Checking if image %s is available", c.openshiftImage())) if err := c.checkOpenShiftImage(out); err != nil { return taskPrinter.ToError(err) } taskPrinter.Success() // Check whether the Docker host has the right binaries to use Kubernetes' nsenter mounter // If not, use a shared volume to mount volumes on OpenShift if isRedHatDocker, err := c.DockerHelper().IsRedHat(); err == nil && isRedHatDocker { taskPrinter.StartTask("Checking type of volume mount") c.UseNsenterMount, err = c.HostHelper().CanUseNsenterMounter() if err != nil { return taskPrinter.ToError(err) } taskPrinter.Success() } if err := os.MkdirAll(c.BaseDir, 0755); err != nil { return err } if c.UseNsenterMount { c.HostVolumesDir = path.Join(c.BaseDir, "openshift.local.volumes") if err := os.MkdirAll(c.HostVolumesDir, 0755); err != nil { return err } } else { c.HostVolumesDir = path.Join(NonLinuxHostVolumeDirPrefix, c.BaseDir, "openshift.local.volumes") } c.HostPersistentVolumesDir = path.Join(c.BaseDir, "openshift.local.pv") if err := os.MkdirAll(c.HostPersistentVolumesDir, 0755); err != nil { return err } c.HostDataDir = path.Join(c.BaseDir, "etcd") if err := os.MkdirAll(c.HostDataDir, 0755); err != nil { return err } // Ensure that host directories exist. // If not using the nsenter mounter, create a volume share on the host machine to // mount OpenShift volumes. taskPrinter.StartTask("Creating host directories") if !c.UseNsenterMount { if err := c.HostHelper().EnsureVolumeUseShareMount(); err != nil { return taskPrinter.ToError(err) } } taskPrinter.Success() // Determine an IP to use for OpenShift. // The result is that c.ServerIP will be populated with // the IP that will be used on the client configuration file. // The c.ServerIP will be set to a specific IP when: // 1 - DOCKER_HOST is populated with a particular tcp:// type of address // 2 - a docker-machine has been specified // 3 - 127.0.0.1 is not working and an alternate IP has been found // Otherwise, the default c.ServerIP will be 127.0.0.1 which is what // will get stored in the client's config file. The reason for this is that // the client config will not depend on the machine's current IP address which // could change over time. // // c.AdditionalIPs will be populated with additional IPs that should be // included in the server's certificate. These include any IPs that are currently // assigned to the Docker host (hostname -I) // Each IP is tested to ensure that it can be accessed from the current client taskPrinter.StartTask("Determining server IP") c.ServerIP, c.AdditionalIPs, err = c.determineServerIP(out) if err != nil { return taskPrinter.ToError(err) } glog.V(3).Infof("Using %q as primary server IP and %q as additional IPs", c.ServerIP, strings.Join(c.AdditionalIPs, ",")) taskPrinter.Success() if len(c.RoutingSuffix) == 0 { c.RoutingSuffix = c.ServerIP + ".nip.io" } // this used to be done in the openshift start method, but its mutating state. if len(c.HTTPProxy) > 0 || len(c.HTTPSProxy) > 0 { c.updateNoProxy() } return nil } // Validate validates that required fields in StartConfig have been populated func (c *ClusterUpConfig) Validate(errout io.Writer) error { if c.dockerClient == nil { return fmt.Errorf("missing dockerClient") } cmdutil.WarnAboutCommaSeparation(errout, c.Environment, "--env") return nil } // Check is a spot to do NON-MUTATING, preflight checks. Over time, we should try to move our non-mutating checks out of // Complete and into Check. func (c *ClusterUpConfig) Check(out io.Writer) error { // used for some pretty printing taskPrinter := NewTaskPrinter(GetDetailedOut(out)) // Check for an OpenShift container. If one exists and is running, exit. // If one exists but not running, delete it. taskPrinter.StartTask("Checking if OpenShift is already running") if err := checkExistingOpenShiftContainer(c.DockerHelper(), out); err != nil { return taskPrinter.ToError(err) } taskPrinter.Success() // Docker checks taskPrinter.StartTask(fmt.Sprintf("Checking for supported Docker version (=>%s)", dockerAPIVersion122)) ver, err := c.DockerHelper().APIVersion() if err != nil { return taskPrinter.ToError(err) } if versions.LessThan(ver.APIVersion, dockerAPIVersion122) { return taskPrinter.ToError(fmt.Errorf("unsupported Docker version %s, need at least %s", ver.APIVersion, dockerAPIVersion122)) } if !c.SkipRegistryCheck { taskPrinter.StartTask("Checking if insecured registry is configured properly in Docker") if err := c.checkDockerInsecureRegistry(out); err != nil { return taskPrinter.ToError(err) } taskPrinter.Success() } // Networking checks if c.PortForwarding { taskPrinter.StartTask("Checking prerequisites for port forwarding") if err := checkPortForwardingPrerequisites(out); err != nil { return taskPrinter.ToError(err) } taskPrinter.Success() err := openshift.CheckSocat() if err != nil { return err } } taskPrinter.StartTask("Checking if required ports are available") if err := c.checkAvailablePorts(out); err != nil { return taskPrinter.ToError(err) } taskPrinter.Success() // OpenShift checks taskPrinter.StartTask("Checking if OpenShift client is configured properly") if err := c.checkOpenShiftClient(); err != nil { return taskPrinter.ToError(err) } taskPrinter.Success() // Ensure that the OpenShift Docker image is available. // If not present, pull it. taskPrinter.StartTask(fmt.Sprintf("Checking if image %s is available", c.openshiftImage())) if err := c.checkOpenShiftImage(out); err != nil { return taskPrinter.ToError(err) } taskPrinter.Success() return nil } func GetDetailedOut(out io.Writer) io.Writer { // When loglevel > 0, just use stdout to write all messages if glog.V(1) { return out } else { return &bytes.Buffer{} } } // Start runs the start tasks ensuring that they are executed in sequence func (c *ClusterUpConfig) Start(out io.Writer) error { fmt.Fprintf(out, "Starting OpenShift using %s ...\n", c.openshiftImage()) if c.PortForwarding { if err := c.OpenShiftHelper().StartSocatTunnel(c.ServerIP); err != nil { return err } } if err := c.StartSelfHosted(out); err != nil { return err } if c.WriteConfig { return nil } if err := c.PostClusterStartupMutations(out); err != nil { return err } detailedOut := GetDetailedOut(out) taskPrinter := NewTaskPrinter(detailedOut) if !c.ShouldInitializeData() { taskPrinter.StartTask("Server Information") c.ServerInfo(out) taskPrinter.Success() return nil } // Add default redirect URIs to an OAuthClient to enable local web-console development. taskPrinter.StartTask("Adding default OAuthClient redirect URIs") if err := c.ensureDefaultRedirectURIs(out); err != nil { return taskPrinter.ToError(err) } taskPrinter.Success() if len(c.ComponentsToEnable) > 0 { args := append([]string{}, "--image="+c.Image) args = append(args, "--tag="+c.ImageTag) args = append(args, "--base-dir="+c.BaseDir) args = append(args, c.ComponentsToEnable...) if err := c.ClusterAdd.ParseFlags(args); err != nil { return err } glog.V(2).Infof("oc cluster add %v", args) if err := c.ClusterAdd.RunE(c.ClusterAdd, args); err != nil { return err } } // TODO, now we build up a set of things to install here. We build the list so that we can install everything in // TODO parallel to avoid anyone accidentally introducing dependencies. componentsToInstall := []componentinstall.Component{} componentsToInstall = append(componentsToInstall, c.ImportInitialObjectsComponents(c.Out)...) if err := componentinstall.InstallComponents(componentsToInstall, c.GetDockerClient(), c.GetLogDir()); err != nil { return err } if c.ShouldCreateUser() { // Login with an initial default user taskPrinter.StartTask("Login to server") if err := c.Login(out); err != nil { return taskPrinter.ToError(err) } taskPrinter.Success() // Create an initial project taskPrinter.StartTask(fmt.Sprintf("Creating initial project %q", initialProjectName)) if err := c.CreateProject(out); err != nil { return taskPrinter.ToError(err) } taskPrinter.Success() } taskPrinter.StartTask("Server Information") c.ServerInfo(out) taskPrinter.Success() return nil } func defaultPortForwarding() bool { // Defaults to true if running on Mac, with no DOCKER_HOST defined return runtime.GOOS == "darwin" && len(os.Getenv("DOCKER_HOST")) == 0 } // checkOpenShiftClient ensures that the client can be configured // for the new server func (c *ClusterUpConfig) checkOpenShiftClient() error { kubeConfig := os.Getenv("KUBECONFIG") if len(kubeConfig) == 0 { return nil } // if you're trying to use the kubeconfig into a subdirectory of the basedir, you're probably using a KUBECONFIG // location that is going to overwrite a "real" kubeconfig, usually admin.kubeconfig which will break every other component // relying on it being a full power kubeconfig kubeConfigDir := filepath.Dir(kubeConfig) cwd, err := os.Getwd() if err != nil { return err } absKubeConfigDir, err := cmdutil.MakeAbs(kubeConfigDir, cwd) if err != nil { return err } if strings.HasPrefix(absKubeConfigDir, c.BaseDir+"/") { return fmt.Errorf("cannot choose kubeconfig in subdirectory of the --base-dir: %q", kubeConfig) } var ( kubeConfigError error f *os.File ) _, err = os.Stat(kubeConfig) switch { case os.IsNotExist(err): err = os.MkdirAll(filepath.Dir(kubeConfig), 0755) if err != nil { kubeConfigError = fmt.Errorf("cannot make directory: %v", err) break } f, err = os.Create(kubeConfig) if err != nil { kubeConfigError = fmt.Errorf("cannot create file: %v", err) break } f.Close() case err == nil: f, err = os.OpenFile(kubeConfig, os.O_RDWR, 0644) if err != nil { kubeConfigError = fmt.Errorf("cannot open %s for write: %v", kubeConfig, err) break } f.Close() default: kubeConfigError = fmt.Errorf("cannot access %s: %v", kubeConfig, err) } if kubeConfigError != nil { return errors.ErrKubeConfigNotWriteable(kubeConfig, kubeConfigError) } return nil } // GetDockerClient obtains a new Docker client from the environment or // from a Docker machine, starting it if necessary func (c *ClusterUpConfig) GetDockerClient() dockerhelper.Interface { return c.dockerClient } // GetDockerClient obtains a new Docker client from the environment or // from a Docker machine, starting it if necessary and permitted func GetDockerClient() (dockerhelper.Interface, error) { dockerTLSVerify := os.Getenv("DOCKER_TLS_VERIFY") dockerCertPath := os.Getenv("DOCKER_CERT_PATH") if len(dockerTLSVerify) > 0 && len(dockerCertPath) == 0 { dockerCertPath = cliconfig.Dir() os.Setenv("DOCKER_CERT_PATH", dockerCertPath) } if glog.V(4) { dockerHost := os.Getenv("DOCKER_HOST") if len(dockerHost) == 0 && len(dockerTLSVerify) == 0 && len(dockerCertPath) == 0 { glog.Infof("No Docker environment variables found. Will attempt default socket.") } if len(dockerHost) > 0 { glog.Infof("Will try Docker connection with host (DOCKER_HOST) %q", dockerHost) } else { glog.Infof("No Docker host (DOCKER_HOST) configured. Will attempt default socket.") } if len(dockerTLSVerify) > 0 { glog.Infof("DOCKER_TLS_VERIFY=%s", dockerTLSVerify) } if len(dockerCertPath) > 0 { glog.Infof("DOCKER_CERT_PATH=%s", dockerCertPath) } } // FIXME: Workaround for docker engine API client on OS X - sets the default to // the wrong DOCKER_HOST string if runtime.GOOS == "darwin" { dockerHost := os.Getenv("DOCKER_HOST") if len(dockerHost) == 0 { os.Setenv("DOCKER_HOST", "unix:///var/run/docker.sock") } } dockerHost := os.Getenv("DOCKER_HOST") if len(dockerHost) == 0 { dockerHost = dockerclient.DefaultDockerHost } engineAPIClient, err := dockerclient.NewEnvClient() if err != nil { return nil, errors.ErrNoDockerClient(err) } // negotiate the correct API version with the server ctx, fn := context.WithTimeout(context.Background(), 10*time.Second) defer fn() engineAPIClient.NegotiateAPIVersion(ctx) return dockerhelper.NewClient(dockerHost, engineAPIClient), nil } // checkExistingOpenShiftContainer checks the state of an OpenShift container. // If one is already running, it throws an error. // If one exists, it removes it so a new one can be created. func checkExistingOpenShiftContainer(dockerHelper *dockerhelper.Helper, out io.Writer) error { container, running, err := dockerHelper.GetContainerState(openshift.ContainerName) if err != nil { return errors.NewError("unexpected error while checking OpenShift container state").WithCause(err) } if running { return errors.NewError("OpenShift is already running").WithSolution("To start OpenShift again, stop the current cluster:\n$ %s\n", "oc cluster down") } if container != nil { err = dockerHelper.RemoveContainer(openshift.ContainerName) if err != nil { return errors.NewError("cannot delete existing OpenShift container").WithCause(err) } fmt.Fprintln(out, "Deleted existing OpenShift container") } return nil } // checkOpenShiftImage checks whether the OpenShift image exists. // If not it tells the Docker daemon to pull it. func (c *ClusterUpConfig) checkOpenShiftImage(out io.Writer) error { return c.DockerHelper().CheckAndPull(c.openshiftImage(), out) } // checkDockerInsecureRegistry checks to see if the Docker daemon has an appropriate insecure registry argument set so that our services can access the registry func (c *ClusterUpConfig) checkDockerInsecureRegistry(out io.Writer) error { configured, hasEntries, err := c.DockerHelper().InsecureRegistryIsConfigured(openshift.DefaultSvcCIDR) if err != nil { return err } if !configured { if hasEntries { return errors.ErrInvalidInsecureRegistryArgument() } return errors.ErrNoInsecureRegistryArgument() } return nil } // checkPortForwardingPrerequisites checks that socat is installed when port forwarding is enabled // Socat needs to be installed manually on MacOS func checkPortForwardingPrerequisites(out io.Writer) error { err := localcmd.New("socat").Args("-V").Run() if err != nil { glog.V(2).Infof("Error from socat command execution: %v", err) fmt.Fprintln(out, "WARNING: Port forwarding requires socat command line utility."+ "Cluster public ip may not be reachable. Please make sure socat installed in your operating system.") } return nil } // ensureDefaultRedirectURIs merges a default URL to an auth client's RedirectURIs array func (c *ClusterUpConfig) ensureDefaultRedirectURIs(out io.Writer) error { restConfig, err := c.RESTConfig() if err != nil { return err } oauthClient, err := oauthclientinternal.NewForConfig(restConfig) if err != nil { return err } webConsoleOAuth, err := oauthClient.Oauth().OAuthClients().Get(defaultRedirectClient, metav1.GetOptions{}) if err != nil { if kerrors.IsNotFound(err) { fmt.Fprintf(out, "Unable to find OAuthClient %q\n", defaultRedirectClient) return nil } // announce fetch error without interrupting remaining tasks suggestedCmd := fmt.Sprintf("oc patch %s/%s -p '{%q:[%q]}'", "oauthclient", defaultRedirectClient, "redirectURIs", developmentRedirectURI) errMsg := fmt.Sprintf("Unable to fetch OAuthClient %q.\nTo manually add a development redirect URI, run %q\n", defaultRedirectClient, suggestedCmd) fmt.Fprintf(out, "%s\n", errMsg) return nil } // ensure the default redirect URI is not already present redirects := sets.NewString(webConsoleOAuth.RedirectURIs...) if redirects.Has(developmentRedirectURI) { return nil } webConsoleOAuth.RedirectURIs = append(webConsoleOAuth.RedirectURIs, developmentRedirectURI) _, err = oauthClient.Oauth().OAuthClients().Update(webConsoleOAuth) if err != nil { // announce error without interrupting remaining tasks suggestedCmd := fmt.Sprintf("oc patch %s/%s -p '{%q:[%q]}'", "oauthclient", defaultRedirectClient, "redirectURIs", developmentRedirectURI) errMsg := fmt.Sprintf("Unable to add development redirect URI to the %q OAuthClient.\nTo manually add it, run %q\n", defaultRedirectClient, suggestedCmd) fmt.Fprintf(out, "%s\n", errMsg) return nil } return nil } // checkAvailablePorts ensures that ports used by OpenShift are available on the Docker host func (c *ClusterUpConfig) checkAvailablePorts(out io.Writer) error { err := c.OpenShiftHelper().TestPorts(openshift.AllPorts) if err == nil { return nil } if !openshift.IsPortsNotAvailableErr(err) { return err } unavailable := sets.NewInt(openshift.UnavailablePorts(err)...) if unavailable.HasAny(openshift.BasePorts...) { return errors.NewError("a port needed by OpenShift is not available").WithCause(err) } if unavailable.Has(openshift.DefaultDNSPort) { return errors.NewError(fmt.Sprintf("DNS port %d is not available", openshift.DefaultDNSPort)) } for _, port := range openshift.RouterPorts { if unavailable.Has(port) { fmt.Fprintf(out, "WARNING: Port %d is already in use and may cause routing issues for applications.\n", port) } } return nil } // determineServerIP gets an appropriate IP address to communicate with the OpenShift server func (c *ClusterUpConfig) determineServerIP(out io.Writer) (string, []string, error) { ip, err := c.determineIP(out) if err != nil { return "", nil, errors.NewError("cannot determine a server IP to use").WithCause(err) } serverIP := ip additionalIPs, err := c.determineAdditionalIPs(c.ServerIP) if err != nil { return "", nil, errors.NewError("cannot determine additional IPs").WithCause(err) } return serverIP, additionalIPs, nil } // updateNoProxy will add some default values to the NO_PROXY setting if they are not present func (c *ClusterUpConfig) updateNoProxy() { values := []string{"127.0.0.1", c.ServerIP, "localhost", service_catalog.ServiceCatalogServiceIP, registry.RegistryServiceClusterIP} ipFromServer, err := c.OpenShiftHelper().ServerIP() if err == nil { values = append(values, ipFromServer) } noProxySet := sets.NewString(c.NoProxy...) for _, v := range values { if !noProxySet.Has(v) { noProxySet.Insert(v) c.NoProxy = append(c.NoProxy, v) } } } func (c *ClusterUpConfig) PostClusterStartupMutations(out io.Writer) error { restConfig, err := c.RESTConfig() if err != nil { return err } kClient, err := kclientset.NewForConfig(restConfig) if err != nil { return err } // Remove any duplicate nodes if err := c.OpenShiftHelper().CheckNodes(kClient); err != nil { return err } err = c.OpenShiftHelper().SetupPersistentStorage(restConfig, c.HostPersistentVolumesDir) if err != nil { return err } return nil } func (c *ClusterUpConfig) imageFormat() string { return fmt.Sprintf("%s-${component}:%s", c.Image, c.ImageTag) } // TODO this should become a separate thing we can install, like registry func (c *ClusterUpConfig) ImportInitialObjectsComponents(out io.Writer) []componentinstall.Component { componentsToInstall := []componentinstall.Component{} componentsToInstall = append(componentsToInstall, c.makeObjectImportInstallationComponentsOrDie(out, openshift.Namespace, map[string]string{ c.ImageStreams: imageStreams[c.ImageStreams], })...) componentsToInstall = append(componentsToInstall, c.makeObjectImportInstallationComponentsOrDie(out, openshift.Namespace, templateLocations)...) componentsToInstall = append(componentsToInstall, c.makeObjectImportInstallationComponentsOrDie(out, "kube-system", adminTemplateLocations)...) return componentsToInstall } // RegisterTemplateServiceBroker will register the tsb with the service catalog func (c *ClusterUpConfig) RegisterTemplateServiceBroker(out io.Writer) error { return c.OpenShiftHelper().RegisterTemplateServiceBroker(c.BaseDir, c.GetKubeAPIServerConfigDir(), c.GetLogDir()) } // Login logs into the new server and sets up a default user and project func (c *ClusterUpConfig) Login(out io.Writer) error { server := c.OpenShiftHelper().Master(c.ServerIP) return openshift.Login(initialUser, initialPassword, server, c.GetKubeAPIServerConfigDir(), c.defaultClientConfig, c.command, out, out) } // CreateProject creates a new project for the current user func (c *ClusterUpConfig) CreateProject(out io.Writer) error { f, err := openshift.LoggedInUserFactory() if err != nil { return errors.NewError("cannot get logged in user client").WithCause(err) } return openshift.CreateProject(f, initialProjectName, initialProjectDisplay, initialProjectDesc, "oc", out) } // ServerInfo displays server information after a successful start func (c *ClusterUpConfig) ServerInfo(out io.Writer) { masterURL := fmt.Sprintf("https://%s:8443", c.GetPublicHostName()) msg := fmt.Sprintf("OpenShift server started.\n\n"+ "The server is accessible via web console at:\n"+ " %s\n\n", masterURL) if c.ShouldCreateUser() { msg += fmt.Sprintf("You are logged in as:\n"+ " User: %s\n"+ " Password: <any value>\n\n", initialUser) msg += "To login as administrator:\n" + " oc login -u system:admin\n\n" } msg += c.checkProxySettings() fmt.Fprintf(out, msg) } // checkProxySettings compares proxy settings specified for cluster up // and those on the Docker daemon and generates appropriate warnings. func (c *ClusterUpConfig) checkProxySettings() string { warnings := []string{} dockerHTTPProxy, dockerHTTPSProxy, dockerNoProxy, err := c.DockerHelper().GetDockerProxySettings() if err != nil { return "Unexpected error: " + err.Error() } // Check HTTP proxy if len(c.HTTPProxy) > 0 && len(dockerHTTPProxy) == 0 { warnings = append(warnings, "You specified an HTTP proxy for cluster up, but one is not configured for the Docker daemon") } else if len(c.HTTPProxy) == 0 && len(dockerHTTPProxy) > 0 { warnings = append(warnings, fmt.Sprintf("An HTTP proxy (%s) is configured for the Docker daemon, but you did not specify one for cluster up", dockerHTTPProxy)) } else if c.HTTPProxy != dockerHTTPProxy { warnings = append(warnings, fmt.Sprintf("The HTTP proxy configured for the Docker daemon (%s) is not the same one you specified for cluster up", dockerHTTPProxy)) } // Check HTTPS proxy if len(c.HTTPSProxy) > 0 && len(dockerHTTPSProxy) == 0 { warnings = append(warnings, "You specified an HTTPS proxy for cluster up, but one is not configured for the Docker daemon") } else if len(c.HTTPSProxy) == 0 && len(dockerHTTPSProxy) > 0 { warnings = append(warnings, fmt.Sprintf("An HTTPS proxy (%s) is configured for the Docker daemon, but you did not specify one for cluster up", dockerHTTPSProxy)) } else if c.HTTPSProxy != dockerHTTPSProxy { warnings = append(warnings, fmt.Sprintf("The HTTPS proxy configured for the Docker daemon (%s) is not the same one you specified for cluster up", dockerHTTPSProxy)) } if len(dockerHTTPProxy) > 0 || len(dockerHTTPSProxy) > 0 { dockerNoProxyList := strings.Split(dockerNoProxy, ",") dockerNoProxySet := sets.NewString(dockerNoProxyList...) if !dockerNoProxySet.Has(registry.RegistryServiceClusterIP) { warnings = append(warnings, fmt.Sprintf("A proxy is configured for Docker, however %[1]s is not included in its NO_PROXY list.\n"+ " %[1]s needs to be included in the Docker daemon's NO_PROXY environment variable so pushes to the local OpenShift registry can succeed.", registry.RegistryServiceClusterIP)) } } if len(warnings) > 0 { buf := &bytes.Buffer{} for _, w := range warnings { fmt.Fprintf(buf, "WARNING: %s\n", w) } return buf.String() } return "" } // OpenShiftHelper returns a helper object to work with OpenShift on the server func (c *ClusterUpConfig) OpenShiftHelper() *openshift.Helper { if c.openshiftHelper == nil { c.openshiftHelper = openshift.NewHelper(c.DockerHelper(), c.openshiftImage(), openshift.ContainerName) } return c.openshiftHelper } // HostHelper returns a helper object to check Host configuration func (c *ClusterUpConfig) HostHelper() *host.HostHelper { if c.hostHelper == nil { c.hostHelper = host.NewHostHelper(c.DockerHelper(), c.openshiftImage(), c.HostVolumesDir) } return c.hostHelper } // DockerHelper returns a helper object to work with the Docker client func (c *ClusterUpConfig) DockerHelper() *dockerhelper.Helper { if c.dockerHelper == nil { c.dockerHelper = dockerhelper.NewHelper(c.dockerClient) } return c.dockerHelper } func (c *ClusterUpConfig) makeObjectImportInstallationComponents(out io.Writer, namespace string, locations map[string]string) ([]componentinstall.Component, error) { clusterAdminKubeConfig, err := c.ClusterAdminKubeConfigBytes() if err != nil { return nil, err } componentsToInstall := []componentinstall.Component{} for name, location := range locations { componentsToInstall = append(componentsToInstall, componentinstall.List{ ComponentName: namespace + "/" + name, Image: c.openshiftImage(), Namespace: namespace, KubeConfig: clusterAdminKubeConfig, List: bootstrap.MustAsset(location), }) } return componentsToInstall, nil } func (c *ClusterUpConfig) makeObjectImportInstallationComponentsOrDie(out io.Writer, namespace string, locations map[string]string) []componentinstall.Component { componentsToInstall, err := c.makeObjectImportInstallationComponents(out, namespace, locations) if err != nil { panic(err) } return componentsToInstall } func (c *ClusterUpConfig) openshiftImage() string { return fmt.Sprintf("%s:%s", c.Image, c.ImageTag) } func (c *ClusterUpConfig) determineAdditionalIPs(ip string) ([]string, error) { additionalIPs := sets.NewString() serverIPs, err := c.OpenShiftHelper().OtherIPs(ip) if err != nil { return nil, errors.NewError("could not determine additional IPs").WithCause(err) } additionalIPs.Insert(serverIPs...) if c.PortForwarding { localIPs, err := c.localIPs() if err != nil { return nil, errors.NewError("could not determine additional local IPs").WithCause(err) } additionalIPs.Insert(localIPs...) } return additionalIPs.List(), nil } func (c *ClusterUpConfig) localIPs() ([]string, error) { ips := []string{} devices, err := net.Interfaces() if err != nil { return nil, err } for _, dev := range devices { if (dev.Flags&net.FlagUp != 0) && (dev.Flags&net.FlagLoopback == 0) { addrs, err := dev.Addrs() if err != nil { continue } for i := range addrs { if ip, ok := addrs[i].(*net.IPNet); ok { if ip.IP.To4() != nil { ips = append(ips, ip.IP.String()) } } } } } return ips, nil } func (c *ClusterUpConfig) determineIP(out io.Writer) (string, error) { if ip := net.ParseIP(c.PublicHostname); ip != nil && !ip.IsUnspecified() { fmt.Fprintf(out, "Using public hostname IP %s as the host IP\n", ip) return ip.String(), nil } // If using port-forwarding, use the default loopback address if c.PortForwarding { return "127.0.0.1", nil } // Try to get the host from the DOCKER_HOST if communicating via tcp var err error ip := c.DockerHelper().HostIP() if ip != "" { glog.V(2).Infof("Testing Docker host IP (%s)", ip) if err = c.OpenShiftHelper().TestIP(ip); err == nil { return ip, nil } } glog.V(2).Infof("Cannot use the Docker host IP(%s): %v", ip, err) // If IP is not specified, try to use the loopback IP // This is to default to an ip-agnostic client setup // where the real IP of the host will not affect client operations if err = c.OpenShiftHelper().TestIP("127.0.0.1"); err == nil { return "127.0.0.1", nil } // Next, use the the --print-ip output from openshift ip, err = c.OpenShiftHelper().ServerIP() if err == nil { glog.V(2).Infof("Testing openshift --print-ip (%s)", ip) if err = c.OpenShiftHelper().TestIP(ip); err == nil { return ip, nil } glog.V(2).Infof("OpenShift server ip test failed: %v", err) } glog.V(2).Infof("Cannot use OpenShift IP: %v", err) // Next, try other IPs on Docker host ips, err := c.OpenShiftHelper().OtherIPs(ip) if err != nil { return "", err } for i := range ips { glog.V(2).Infof("Testing additional IP (%s)", ip) if err = c.OpenShiftHelper().TestIP(ips[i]); err == nil { return ip, nil } glog.V(2).Infof("OpenShift additional ip test failed: %v", err) } return "", errors.NewError("cannot determine an IP to use for your server.") } // ShouldInitializeData tries to determine whether we're dealing with // an existing OpenShift data and config. It determines that data exists by checking // for the existence of a docker-registry service. func (c *ClusterUpConfig) ShouldInitializeData() bool { if c.shouldInitializeData != nil { return *c.shouldInitializeData } result := func() bool { if !c.UseExistingConfig { return true } // For now, we determine if using existing etcd data by looking // for the registry service restConfig, err := c.RESTConfig() if err != nil { glog.V(2).Info(err) return true } kclient, err := kclientset.NewForConfig(restConfig) if err != nil { glog.V(2).Info(err) return true } if _, err = kclient.Core().Services(openshift.DefaultNamespace).Get(registry.SvcDockerRegistry, metav1.GetOptions{}); err != nil { return true } // If a registry exists, then don't initialize data return false }() c.shouldInitializeData = &result return result } // ShouldCreateUser determines whether a user and project should // be created. If the user provider has been modified in the config, then it should // not attempt to create a user. Also, even if the user provider has not been // modified, but data has been initialized, then we should also not create user. func (c *ClusterUpConfig) ShouldCreateUser() bool { if c.shouldCreateUser != nil { return *c.shouldCreateUser } result := func() bool { if !c.UseExistingConfig { return true } cfg, _, err := c.OpenShiftHelper().GetConfigFromLocalDir(c.GetKubeAPIServerConfigDir()) if err != nil { glog.V(2).Infof("error reading config: %v", err) return true } if cfg.OAuthConfig == nil || len(cfg.OAuthConfig.IdentityProviders) != 1 { return false } if _, ok := cfg.OAuthConfig.IdentityProviders[0].Provider.(*configapi.AllowAllPasswordIdentityProvider); !ok { return false } return c.ShouldInitializeData() }() c.shouldCreateUser = &result return result } func (c *ClusterUpConfig) GetKubeAPIServerConfigDir() string { return path.Join(c.BaseDir, kubeapiserver.KubeAPIServerDirName) } func (c *ClusterUpConfig) GetLogDir() string { return path.Join(c.BaseDir, "logs") } func (c *ClusterUpConfig) RESTConfig() (*rest.Config, error) { clusterAdminKubeConfigBytes, err := c.ClusterAdminKubeConfigBytes() if err != nil { return nil, err } clusterAdminKubeConfig, err := kclientcmd.RESTConfigFromKubeConfig(clusterAdminKubeConfigBytes) if err != nil { return nil, err } return clusterAdminKubeConfig, nil } func (c *ClusterUpConfig) ClusterAdminKubeConfigBytes() ([]byte, error) { return ioutil.ReadFile(path.Join(c.GetKubeAPIServerConfigDir(), "admin.kubeconfig")) } func (c *ClusterUpConfig) GetPublicHostName() string { if len(c.PublicHostname) > 0 { return c.PublicHostname } return c.ServerIP } func isComponentEnabled(name string, disabledByDefaultComponents sets.String, components ...string) bool { hasStar := false for _, ctrl := range components { if ctrl == name { return true } if ctrl == "-"+name { return false } if ctrl == "*" { hasStar = true } } // if we get here, there was no explicit choice if !hasStar { // nothing on by default return false } if disabledByDefaultComponents.Has(name) { return false } return true }
[ "\"DOCKER_HOST\"", "\"KUBECONFIG\"", "\"DOCKER_TLS_VERIFY\"", "\"DOCKER_CERT_PATH\"", "\"DOCKER_HOST\"", "\"DOCKER_HOST\"", "\"DOCKER_HOST\"" ]
[]
[ "DOCKER_HOST", "DOCKER_CERT_PATH", "DOCKER_TLS_VERIFY", "KUBECONFIG" ]
[]
["DOCKER_HOST", "DOCKER_CERT_PATH", "DOCKER_TLS_VERIFY", "KUBECONFIG"]
go
4
0
infrastructure/db/db.go
package db import ( "github.com/codeedu/imersao/codepix-go/domain/model" "log" "os" "path/filepath" "runtime" "github.com/jinzhu/gorm" "github.com/joho/godotenv" _ "github.com/lib/pq" _ "gorm.io/driver/sqlite" ) func init() { _, b, _, _ := runtime.Caller(0) basepath := filepath.Dir(b) err := godotenv.Load(basepath + "/../../.env") if err != nil { log.Fatalf("Error loading .env files") } } func ConnectDB(env string) *gorm.DB { var dsn string var db *gorm.DB var err error if env != "test" { dsn = os.Getenv("dsn") db, err = gorm.Open(os.Getenv("dbType"), dsn) } else { dsn = os.Getenv("dsnTest") db, err = gorm.Open(os.Getenv("dbTypeTest"), dsn) } if err != nil { log.Fatalf("Error connecting to database: %v", err) panic(err) } if os.Getenv("debug") == "true" { db.LogMode(true) } if os.Getenv("AutoMigrateDb") == "true" { db.AutoMigrate(&model.Bank{}, &model.Account{}, &model.PixKey{}, &model.Transaction{}) } return db }
[ "\"dsn\"", "\"dbType\"", "\"dsnTest\"", "\"dbTypeTest\"", "\"debug\"", "\"AutoMigrateDb\"" ]
[]
[ "dbTypeTest", "debug", "dsn", "dsnTest", "AutoMigrateDb", "dbType" ]
[]
["dbTypeTest", "debug", "dsn", "dsnTest", "AutoMigrateDb", "dbType"]
go
6
0
v8/examples/longRunningClient.go
// +build examples package main import ( "encoding/hex" "log" "os" "time" "github.com/wangzhengzh/gokrb5/v8/client" "github.com/wangzhengzh/gokrb5/v8/config" "github.com/wangzhengzh/gokrb5/v8/keytab" "github.com/wangzhengzh/gokrb5/v8/test/testdata" ) const ( kRB5CONF = `[libdefaults] default_realm = TEST.GOKRB5 dns_lookup_realm = false dns_lookup_kdc = false ticket_lifetime = 24h forwardable = yes default_tkt_enctypes = aes256-cts-hmac-sha1-96 default_tgs_enctypes = aes256-cts-hmac-sha1-96 [realms] TEST.GOKRB5 = { kdc = 10.80.88.88:88 admin_server = 10.80.88.88:749 default_domain = test.gokrb5 } [domain_realm] .test.gokrb5 = TEST.GOKRB5 test.gokrb5 = TEST.GOKRB5 ` ) func main() { l := log.New(os.Stderr, "GOKRB5 Client: ", log.LstdFlags) //defer profile.Start(profile.TraceProfile).Stop() // Load the keytab kb, _ := hex.DecodeString(testdata.KEYTAB_TESTUSER2_TEST_GOKRB5) kt := keytab.New() err := kt.Unmarshal(kb) if err != nil { l.Fatalf("could not load client keytab: %v", err) } // Load the client krb5 config conf, err := config.NewFromString(kRB5CONF) if err != nil { l.Fatalf("could not load krb5.conf: %v", err) } addr := os.Getenv("TEST_KDC_ADDR") if addr != "" { conf.Realms[0].KDC = []string{addr + ":88"} } // Create the client with the keytab cl := client.NewWithKeytab("testuser2", "TEST.GOKRB5", kt, conf, client.Logger(l), client.DisablePAFXFAST(true)) // Log in the client err = cl.Login() if err != nil { l.Fatalf("could not login client: %v", err) } for { _, _, err := cl.GetServiceTicket("HTTP/host.test.gokrb5") if err != nil { l.Printf("failed to get service ticket: %v\n", err) } time.Sleep(time.Minute * 5) } }
[ "\"TEST_KDC_ADDR\"" ]
[]
[ "TEST_KDC_ADDR" ]
[]
["TEST_KDC_ADDR"]
go
1
0
nflgame/game.py
"""NFL Games Games""" from collections import namedtuple import os import os.path as path import gzip import json import socket import sys import datetime import time import logging import urllib.request, urllib.error, urllib.parse from collections import OrderedDict import nflgame.player import nflgame.sched import nflgame.seq import nflgame.statmap import nflgame.live import pytz log_level = os.getenv("NFLGAME_LOG_LEVEL", "") logging.basicConfig() logger = logging.getLogger("nflgame") if log_level == "INFO": logger.root.setLevel(logging.INFO) _MAX_INT = sys.maxsize _jsonf = path.join(path.split(__file__)[0], "gamecenter-json", "%s.json.gz") _json_base_url = "http://www.nfl.com/liveupdate/game-center/%s/%s_gtd.json" GameDiff = namedtuple("GameDiff", ["before", "after", "plays", "players"]) """ Represents the difference between two points in time of the same game in terms of plays and player statistics. """ TeamStats = namedtuple( "TeamStats", [ "first_downs", "total_yds", "passing_yds", "rushing_yds", "penalty_cnt", "penalty_yds", "turnovers", "punt_cnt", "punt_yds", "punt_avg", "pos_time", ], ) """A collection of team statistics for an entire game.""" class FieldPosition(object): """ Represents field position. The representation here is an integer offset where the 50 yard line corresponds to '0'. Being in the own territory corresponds to a negative offset while being in the opponent's territory corresponds to a positive offset. e.g., NE has the ball on the NE 45, the offset is -5. e.g., NE has the ball on the NYG 2, the offset is 48. This representation allows for gains in any particular play to be added to the field offset to get the new field position as the result of the play. """ def __new__(cls, pos_team=None, yardline=None, offset=None): if not yardline and offset is None: return None return object.__new__(cls) def __init__(self, pos_team=None, yardline=None, offset=None): """ pos_team is the team on offense, and yardline is a string formatted like 'team-territory yard-line'. e.g., "NE 32". An offset can be given directly by specifying an integer for offset. """ if isinstance(offset, int): self.offset = offset return if yardline == "50": self.offset = 0 return territory, yd_str = yardline.split() yd = int(yd_str) if territory == pos_team: self.offset = -(50 - yd) else: self.offset = 50 - yd def __cmp__(self, other): if isinstance(other, int): return cmp(self.offset, other) return cmp(self.offset, other.offset) def __str__(self): if self.offset > 0: return "OPP %d" % (50 - self.offset) elif self.offset < 0: return "OWN %d" % (50 + self.offset) else: return "MIDFIELD" def add_yards(self, yards): """ Returns a new field position with the yards added to self. Yards may be negative. """ newoffset = max(-50, min(50, self.offset + yards)) return FieldPosition(offset=newoffset) class PossessionTime(object): """ Represents the amount of time a drive lasted in (minutes, seconds). """ def __init__(self, clock): self.clock = clock try: self.minutes, self.seconds = list(map(int, self.clock.split(":"))) except ValueError: self.minutes, self.seconds = 0, 0 def total_seconds(self): """ Returns the total number of seconds that this possession lasted for. """ return self.seconds + self.minutes * 60 def __lt__(self, other): return self.total_seconds() < other.total_seconds() def __gt__(self, other): return self.total_seconds() > other.total_seconds() def __eq__(self, other): return self.total_seconds() == other.total_seconds() def __ge__(self, other): return self.total_seconds() >= other.total_seconds() def __le__(self, other): return self.total_seconds() <= other.total_seconds() def __eq__(self, other): return self.total_seconds() == other.total_seconds() def __add__(self, other): new_time = PossessionTime("0:00") total_seconds = self.total_seconds() + other.total_seconds() new_time.minutes = total_seconds / 60 new_time.seconds = total_seconds % 60 new_time.clock = "%.2d:%.2d" % (new_time.minutes, new_time.seconds) return new_time def __sub__(self, other): assert self >= other new_time = PossessionTime("0:00") total_seconds = self.total_seconds() - other.total_seconds() new_time.minutes = total_seconds / 60 new_time.seconds = total_seconds % 60 new_time.clock = "%.2d:%.2d" % (new_time.minutes, new_time.seconds) return new_time def __str__(self): return self.clock class GameClock(object): """ Represents the current time in a game. Namely, it keeps track of the quarter and clock time. Also, GameClock can represent whether the game hasn't started yet, is half time or if it's over. """ def __init__(self, qtr, clock): self.qtr = qtr self.clock = clock try: self._minutes, self._seconds = list(map(int, self.clock.split(":"))) except ValueError: self._minutes, self._seconds = 0, 0 except AttributeError: self._minutes, self._seconds = 0, 0 try: self.__qtr = int(self.qtr) if self.__qtr >= 3: self.__qtr += 1 # Let halftime be quarter 3 except ValueError: if self.is_pregame(): self.__qtr = 0 elif self.is_halftime(): self.__qtr = 3 elif self.is_final(): self.__qtr = sys.maxsize else: self.qtr = "Pregame" @property def quarter(self): return self.__qtr @quarter.setter def quarter(self, value): if isinstance(value, int): assert value >= 0 and value <= 4 self.qtr = str(value) self.__qtr = value else: self.qtr = value self.__qtr = 0 def is_pregame(self): return self.qtr == "Pregame" def is_halftime(self): return self.qtr == "Halftime" def is_final(self): return "final" in self.qtr.lower() def elapsed_time(self): return self.__qtr - 1 * 15 * 60 + (self._minutes * 60) + self._seconds def __lt__(self, other): return self.elapsed_time() < other.elapsed_time() def __gt__(self, other): return self.elapsed_time() > other.elapsed_time() def __eq__(self, other): return self.elapsed_time() == other.elapsed_time() def __ge__(self, other): return self.elapsed_time() >= other.elapsed_time() def __le__(self, other): return self.elapsed_time() <= other.elapsed_time() def __eq__(self, other): return self.elapsed_time() == other.elapsed_time() def __str__(self): """ Returns a nicely formatted string indicating the current time of the game. Examples include "Q1 10:52", "Q4 1:25", "Pregame", "Halftime" and "Final". """ try: q = int(self.qtr) return "Q%d %s" % (q, self.clock) except ValueError: return self.qtr class Game(object): """ Game represents a single pre- or regular-season game. It provides a window into the statistics of every player that played into the game, along with the winner of the game, the score and a list of all the scoring plays. """ def __new__(cls, eid=None, fpath=None, **kwargs): logger.info("EID: {}".format(eid)) if eid is not None: game_starting_soon, schedule_info = _infer_gc_json_available(eid) if game_starting_soon: try: rawData = _get_json_data(eid, fpath) except urllib.error.URLError: # @TODO - find when this happens, likely never as ln# 868 catches # 404 errors and returns None return None else: if len(schedule_info) == 0: # No game was found in the schedule return None gameData = { "home": {"abbr": schedule_info["home"], "score": {"T": 0}}, "away": {"abbr": schedule_info["away"], "score": {"T": 0}}, "gamekey": schedule_info["gamekey"], "qtr": "Pregame", "gcJsonAvailable": False, "clock": 0, } game = object.__new__(cls) # IF the game isn't starting dump what schedule data we have if not game_starting_soon: game.data = gameData game.eid = eid else: game.rawData = rawData try: if eid is not None: game.eid = eid game.data = json.loads(game.rawData.decode("utf-8"))[game.eid] else: # For when we have rawData (fpath) and no eid. game.eid = None game.data = json.loads(game.rawData.decode("utf-8")) for k, v in game.data.items(): if isinstance(v, dict): game.eid = k game.data = v break assert game.eid is not None game.data["gcJsonAvailable"] = True except ValueError: return None return game def __init__(self, eid=None, fpath=None, **kwargs): """ Creates a new Game instance given a game identifier. The game identifier is used by NFL.com's GameCenter live update web pages. It is used to construct a URL to download JSON data for the game. If the game has been completed, the JSON data will be cached to disk so that subsequent accesses will not re-download the data but instead read it from disk. When the JSON data is written to disk, it is compressed using gzip. """ # Make the schedule info more accessible. self.schedule = nflgame.sched.games.get(self.eid, None) # Home and team cumulative statistics. self.home = self.data["home"]["abbr"] self.away = self.data["away"]["abbr"] self.gamekey = nflgame.sched.games[self.eid]["gamekey"] self.time = GameClock(self.data["qtr"], self.data["clock"]) self.score_home = int(self.data["home"]["score"]["T"]) self.score_away = int(self.data["away"]["score"]["T"]) self.gcJsonAvailable = self.data["gcJsonAvailable"] if self.data["gcJsonAvailable"]: self.stats_home = _json_team_stats(self.data["home"]["stats"]["team"]) self.stats_away = _json_team_stats(self.data["away"]["stats"]["team"]) # Load up some simple static values. self.down = _tryint(self.data["down"]) self.togo = _tryint(self.data["togo"]) for q in (1, 2, 3, 4, 5): for team in ("home", "away"): score = self.data[team]["score"][str(q)] self.__dict__["score_%s_q%d" % (team, q)] = int(score) if not self.game_over(): self.winner = None else: if self.score_home > self.score_away: self.winner = self.home self.loser = self.away elif self.score_away > self.score_home: self.winner = self.away self.loser = self.home else: self.winner = "%s/%s" % (self.home, self.away) self.loser = "%s/%s" % (self.home, self.away) # Load the scoring summary into a simple list of strings. self.scores = [] for k in sorted(map(int, self.data["scrsummary"])): play = self.data["scrsummary"][str(k)] s = "%s - Q%d - %s - %s" % ( play["team"], play["qtr"], play["type"], play["desc"], ) self.scores.append(s) # Check to see if the game is over, and if so, cache the data. if self.game_over() and not os.access(_jsonf % eid, os.R_OK): self.save() def is_home(self, team): """Returns true if team (i.e., 'NE') is the home team.""" return team == self.home def season(self): """Returns the year of the season this game belongs to.""" year = int(self.eid[0:4]) month = int(self.eid[4:6]) if month <= 3: year -= 1 return year def game_over(self): """game_over returns true if the game is no longer being played.""" return self.time.is_final() def playing(self): """playing returns true if the game is currently being played.""" return not self.time.is_pregame() and not self.time.is_final() def save(self, fpath=None): """ Save the JSON data to fpath. This is done automatically if the game is over. """ if fpath is None: fpath = _jsonf % self.eid try: with gzip.open(fpath, "w+") as outfile: outfile.write(self.rawData) except IOError: logger.info( "Could not cache JSON data. Please " "make '%s' writable." % os.path.dirname(fpath), file=sys.stderr, ) def nice_score(self): """ Returns a string of the score of the game. e.g., "NE (32) vs. NYG (0)". """ return "%s (%d) at %s (%d)" % ( self.away, self.score_away, self.home, self.score_home, ) def max_player_stats(self): """ Returns a GenPlayers sequence of player statistics that combines game statistics and play statistics by taking the max value of each corresponding statistic. This is useful when accuracy is desirable. Namely, using only play-by-play data or using only game statistics can be unreliable. That is, both are inconsistently correct. Taking the max values of each statistic reduces the chance of being wrong (particularly for stats that are in both play-by-play data and game statistics), but does not eliminate them. """ if not self.gcJsonAvailable: return {} game_players = list(self.players) play_players = list(self.drives.plays().players()) max_players = OrderedDict() # So this is a little tricky. It's possible for a player to have # only statistics at the play level, and therefore not be represented # in the game level statistics. Therefore, we initialize our # max_players with play-by-play stats first. Then go back through # and combine them with available game statistics. for pplay in play_players: newp = nflgame.player.GamePlayerStats( pplay.playerid, pplay.name, pplay.home, pplay.team ) maxstats = {} for stat, val in pplay._stats.items(): maxstats[stat] = val newp._overwrite_stats(maxstats) max_players[pplay.playerid] = newp for newp in max_players.values(): for pgame in game_players: if pgame.playerid != newp.playerid: continue maxstats = {} for stat, val in pgame._stats.items(): maxstats[stat] = max([val, newp._stats.get(stat, -_MAX_INT)]) newp._overwrite_stats(maxstats) break return nflgame.seq.GenPlayerStats(max_players) def __getattr__(self, name): if name == "players": self.__players = _json_game_player_stats(self, self.data) self.players = nflgame.seq.GenPlayerStats(self.__players) return self.players if name == "drives": self.__drives = _json_drives(self, self.home, self.data["drives"]) self.drives = nflgame.seq.GenDrives(self.__drives) return self.drives raise AttributeError def __sub__(self, other): return diff(other, self) def __str__(self): return self.nice_score() def diff(before, after): """ Returns the difference between two points of time in a game in terms of plays and player statistics. The return value is a GameDiff namedtuple with two attributes: plays and players. Each contains *only* the data that is in the after game but not in the before game. This is useful for sending alerts where you're guaranteed to see each play statistic only once (assuming NFL.com behaves itself). """ assert after.eid == before.eid plays = [] after_plays = list(after.drives.plays()) before_plays = list(before.drives.plays()) for play in after_plays: if play not in before_plays: plays.append(play) # You might think that updated play data is enough. You could scan # it for statistics you're looking for (like touchdowns). # But sometimes a play can sneak in twice if its description gets # updated (late call? play review? etc.) # Thus, we do a diff on the play statistics for player data too. _players = OrderedDict() after_players = list(after.max_player_stats()) before_players = list(before.max_player_stats()) for aplayer in after_players: has_before = False for bplayer in before_players: if aplayer.playerid == bplayer.playerid: has_before = True pdiff = aplayer - bplayer if pdiff is not None: _players[aplayer.playerid] = pdiff if not has_before: _players[aplayer.playerid] = aplayer players = nflgame.seq.GenPlayerStats(_players) return GameDiff(before=before, after=after, plays=plays, players=players) class Drive(object): """ Drive represents a single drive in an NFL game. It contains a list of all plays that happened in the drive, in chronological order. It also contains meta information about the drive such as the start and stop times and field position, length of possession, the number of first downs and a short descriptive string of the result of the drive. """ def __init__(self, game, drive_num, home_team, data): if data is None or "plays" not in data or len(data["plays"]) == 0: return self.game = game self.drive_num = drive_num self.team = data["posteam"] self.home = self.team == home_team self.first_downs = int(data["fds"]) self.result = data["result"] self.penalty_yds = int(data["penyds"]) self.total_yds = int(data["ydsgained"]) self.pos_time = PossessionTime(data["postime"]) self.play_cnt = int(data["numplays"]) self.field_start = FieldPosition(self.team, data["start"]["yrdln"]) self.time_start = GameClock(data["start"]["qtr"], data["start"]["time"]) # When the game is over, the yardline isn't reported. So find the # last play that does report a yardline. if data["end"]["yrdln"].strip(): self.field_end = FieldPosition(self.team, data["end"]["yrdln"]) else: self.field_end = None playids = sorted(map(int, list(data["plays"].keys())), reverse=True) for pid in playids: yrdln = data["plays"][str(pid)]["yrdln"].strip() if yrdln: self.field_end = FieldPosition(self.team, yrdln) break if self.field_end is None: self.field_end = FieldPosition(self.team, "50") # When a drive lasts from Q1 to Q2 or Q3 to Q4, the 'end' doesn't # seem to change to the proper quarter. So scan all of the plays # and use the maximal quarter listed. (Just taking the last doesn't # seem to always work.) # lastplayid = str(max(map(int, data['plays'].keys()))) # endqtr = data['plays'][lastplayid]['qtr'] qtrs = [p["qtr"] for p in list(data["plays"].values())] maxq = str(max(list(map(int, qtrs)))) self.time_end = GameClock(maxq, data["end"]["time"]) # One last sanity check. If the end time is less than the start time, # then bump the quarter if it seems reasonable. # This technique will blow up if a drive lasts more than fifteen # minutes and the quarter numbering is messed up. if self.time_end <= self.time_start and self.time_end.quarter in (1, 3): self.time_end.quarter += 1 self.__plays = _json_plays(self, data["plays"]) self.plays = nflgame.seq.GenPlays(self.__plays) def __add__(self, other): """ Adds the statistics of two drives together. Note that once two drives are added, the following fields automatically get None values: result, field_start, field_end, time_start and time_end. """ assert self.team == other.team, ( 'Cannot add drives from different teams "%s" and "%s".' % (self.team, other.team) ) new_drive = Drive(None, 0, "", None) new_drive.team = self.team new_drive.home = self.home new_drive.first_downs = self.first_downs + other.first_downs new_drive.penalty_yds = self.penalty_yds + other.penalty_yds new_drive.total_yds = self.total_yds + other.total_yds new_drive.pos_time = self.pos_time + other.pos_time new_drive.play_cnt = self.play_cnt + other.play_cnt new_drive.__plays = self.__plays + other.__plays new_drive.result = None new_drive.field_start = None new_drive.field_end = None new_drive.time_start = None new_drive.time_end = None return new_drive def __str__(self): return "%s (Start: %s, End: %s) %s" % ( self.team, self.time_start, self.time_end, self.result, ) class Play(object): """ Play represents a single play. It contains a list of all players that participated in the play (including offense, defense and special teams). The play also includes meta information about what down it is, field position, clock time, etc. Play objects also contain team-level statistics, such as whether the play was a first down, a fourth down failure, etc. """ def __init__(self, drive, playid, data): self.data = data self.drive = drive self.playid = playid self.team = data["posteam"] self.home = self.drive.home self.desc = data["desc"] self.note = data["note"] self.down = int(data["down"]) self.yards_togo = int(data["ydstogo"]) self.touchdown = "touchdown" in self.desc.lower() self._stats = {} if not self.team: self.time, self.yardline = None, None else: self.time = GameClock(data["qtr"], data["time"]) self.yardline = FieldPosition(self.team, data["yrdln"]) # Load team statistics directly into the Play instance. # Things like third down attempts, first downs, etc. if "0" in data["players"]: for info in data["players"]["0"]: if info["statId"] not in nflgame.statmap.idmap: continue statvals = nflgame.statmap.values(info["statId"], info["yards"]) for k, v in statvals.items(): v = self.__dict__.get(k, 0) + v self.__dict__[k] = v self._stats[k] = v # Load the sequence of "events" in a play into a list of dictionaries. self.events = _json_play_events(data["players"]) # Now load cumulative player data for this play into # a GenPlayerStats generator. We then flatten this data # and add it to the play itself so that plays can be # filter by these statistics. self.__players = _json_play_players(self, data["players"]) self.players = nflgame.seq.GenPlayerStats(self.__players) for p in self.players: for k, v in p.stats.items(): # Sometimes we may see duplicate statistics (like tackle # assists). Let's just overwrite in this case, since this # data is from the perspective of the play. i.e., there # is one assisted tackle rather than two. self.__dict__[k] = v self._stats[k] = v def has_player(self, playerid): """Whether a player with id playerid participated in this play.""" return playerid in self.__players def __str__(self): if self.team: if self.down != 0: return "(%s, %s, Q%d, %d and %d) %s" % ( self.team, self.data["yrdln"], self.time.qtr, self.down, self.yards_togo, self.desc, ) else: return "(%s, %s, Q%d) %s" % ( self.team, self.data["yrdln"], self.time.qtr, self.desc, ) return self.desc def __eq__(self, other): """ We use the play description to determine equality because the play description can be changed. (Like when a play is reversed.) """ return self.playid == other.playid and self.desc == other.desc def __getattr__(self, name): if name.startswith("__"): raise AttributeError return 0 def _json_team_stats(data): """ Takes a team stats JSON entry and converts it to a TeamStats namedtuple. """ return TeamStats( first_downs=int(data["totfd"]), total_yds=int(data["totyds"]), passing_yds=int(data["pyds"]), rushing_yds=int(data["ryds"]), penalty_cnt=int(data["pen"]), penalty_yds=int(data["penyds"]), turnovers=int(data["trnovr"]), punt_cnt=int(data["pt"]), punt_yds=int(data["ptyds"]), punt_avg=int(data["ptavg"]), pos_time=PossessionTime(data["top"]), ) def _json_drives(game, home_team, data): """ Takes a home or away JSON entry and converts it to a list of Drive objects. """ drive_nums = [] for drive_num in data: try: drive_nums.append(int(drive_num)) except: pass drives = [] for i, drive_num in enumerate(sorted(drive_nums), 1): d = Drive(game, i, home_team, data[str(drive_num)]) if not hasattr(d, "game"): # not a valid drive continue drives.append(d) return drives def _json_plays(drive, data): """ Takes a single JSON drive entry (data) and converts it to a list of Play objects. This includes trying to resolve duplicate play conflicts by only taking the first instance of a play. """ plays = [] seen_ids = set() seen_desc = set() # Sometimes duplicates have different play ids... for playid in map(str, sorted(map(int, data))): p = data[playid] desc = (p["desc"], p["time"], p["yrdln"], p["qtr"]) if playid in seen_ids or desc in seen_desc: continue seen_ids.add(playid) seen_desc.add(desc) plays.append(Play(drive, playid, data[playid])) return plays def _json_play_players(play, data): """ Takes a single JSON play entry (data) and converts it to an OrderedDict of player statistics. play is the instance of Play that this data is part of. It is used to determine whether the player belong to the home team or not. """ players = OrderedDict() for playerid, statcats in data.items(): if playerid == "0": continue for info in statcats: if info["statId"] not in nflgame.statmap.idmap: continue if playerid not in players: home = play.drive.game.is_home(info["clubcode"]) if home: team_name = play.drive.game.home else: team_name = play.drive.game.away stats = nflgame.player.PlayPlayerStats( playerid, info["playerName"], home, team_name ) players[playerid] = stats statvals = nflgame.statmap.values(info["statId"], info["yards"]) players[playerid]._add_stats(statvals) return players def _json_play_events(data): """ Takes a single JSON play entry (data) and converts it to a list of events. """ temp = list() for playerid, statcats in data.items(): for info in statcats: if info["statId"] not in nflgame.statmap.idmap: continue statvals = nflgame.statmap.values(info["statId"], info["yards"]) statvals["playerid"] = None if playerid == "0" else playerid statvals["playername"] = info["playerName"] or None statvals["team"] = info["clubcode"] temp.append((int(info["sequence"]), statvals)) return [t[1] for t in sorted(temp, key=lambda t: t[0])] def _json_game_player_stats(game, data): """ Parses the 'home' and 'away' team stats and returns an OrderedDict mapping player id to their total game statistics as instances of nflgame.player.GamePlayerStats. """ players = OrderedDict() for team in ("home", "away"): for category in nflgame.statmap.categories: if category not in data[team]["stats"]: continue for pid, raw in data[team]["stats"][category].items(): stats = {} for k, v in raw.items(): if k == "name": continue stats["%s_%s" % (category, k)] = v if pid not in players: home = team == "home" if home: team_name = game.home else: team_name = game.away players[pid] = nflgame.player.GamePlayerStats( pid, raw["name"], home, team_name ) players[pid]._add_stats(stats) return players def _get_json_data(eid=None, fpath=None): """ Returns the JSON data corresponding to the game represented by eid. If the JSON data is already on disk, it is read, decompressed and returned. Otherwise, the JSON data is downloaded from the NFL web site. If the data doesn't exist yet or there was an error, _get_json_data returns None. If eid is None, then the JSON data is read from the file at fpath. """ assert eid is not None or fpath is not None if fpath is not None: return gzip.open(fpath).read() fpath = _jsonf % eid if os.access(fpath, os.R_OK): logger.info("_get_json_data: json cache found, returning cached data ") return gzip.open(fpath).read() try: logger.info("_get_json_data: firing request") return urllib.request.urlopen(_json_base_url % (eid, eid), timeout=5).read() except urllib.error.HTTPError: pass except socket.timeout: pass logger.info("_get_json_data: Failed request, returning None") return None def _infer_gc_json_available(eid): """ Check to see if the game-center json even has a chance to be available, i.e. the game starts in <= 10 minutes. This is used to prevent superfluous calls to the nfl api. returns a tuple - True/False if the game is about to start and the schedule data """ logger.info("Checking to see if the game exists in the schedule") schedule_info = nflgame._search_schedule(eid=eid) if len(schedule_info) == 0: logger.info("No game found") # No game found return False, [] gametime = nflgame.live._game_datetime(schedule_info) now = nflgame.live._now() game_starting_soon = (gametime - now).total_seconds() <= 600 logger.info("Game Starting Soon Check: {}".format(game_starting_soon)) return game_starting_soon, schedule_info def _tryint(v): """ Tries to convert v to an integer. If it fails, return 0. """ try: return int(v) except: return 0
[]
[]
[ "NFLGAME_LOG_LEVEL" ]
[]
["NFLGAME_LOG_LEVEL"]
python
1
0
Tests/test_Emboss.py
# Copyright 2009 by Peter Cock. All rights reserved. # This code is part of the Biopython distribution and governed by its # license. Please see the LICENSE file that should have been included # as part of this package. """Runs a few EMBOSS tools to check our wrappers and parsers.""" import os import sys import unittest import subprocess from StringIO import StringIO from Bio.Emboss.Applications import WaterCommandline, NeedleCommandline from Bio.Emboss.Applications import SeqretCommandline, SeqmatchallCommandline from Bio import SeqIO from Bio import AlignIO from Bio import MissingExternalDependencyError from Bio.Alphabet import generic_protein, generic_dna, generic_nucleotide from Bio.Seq import Seq, translate from Bio.SeqRecord import SeqRecord #from Bio.Data.IUPACData import ambiguous_dna_letters ################################################################# #Try to avoid problems when the OS is in another language os.environ['LANG'] = 'C' exes_wanted = ["water", "needle", "seqret", "transeq", "seqmatchall", "embossversion"] exes = dict() #Dictionary mapping from names to exe locations if "EMBOSS_ROOT" in os.environ: #Windows default installation path is C:\mEMBOSS which contains the exes. #EMBOSS also sets an environment variable which we will check for. path = os.environ["EMBOSS_ROOT"] if os.path.isdir(path): for name in exes_wanted: if os.path.isfile(os.path.join(path, name+".exe")): exes[name] = os.path.join(path, name+".exe") del path, name if sys.platform!="win32": import commands for name in exes_wanted: #This will "just work" if installed on the path as normal on Unix output = commands.getoutput("%s -help" % name) if "not found" not in output and "not recognized" not in output: exes[name] = name del output del name if len(exes) < len(exes_wanted): raise MissingExternalDependencyError(\ "Install EMBOSS if you want to use Bio.Emboss.") def get_emboss_version(): """Returns a tuple of three ints, e.g. (6,1,0)""" #Windows and Unix versions of EMBOSS seem to differ in #which lines go to stdout and stderr - so merge them. child = subprocess.Popen(exes["embossversion"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, shell=(sys.platform!="win32")) stdout, stderr = child.communicate() child.stdout.close() #This is both stdout and stderr del child assert stderr is None #Send to stdout instead for line in stdout.split("\n"): if line.strip()=="Reports the current EMBOSS version number": pass elif line.startswith("Writes the current EMBOSS version number"): pass elif line.count(".")==2: return tuple(int(v) for v in line.strip().split(".")) elif line.count(".")==3: #e.g. I installed mEMBOSS-6.2.0.1-setup.exe #which reports 6.2.0.1 - for this return (6,2,0) return tuple(int(v) for v in line.strip().split("."))[:3] else: #Either we can't understand the output, or this is really #an error message not caught earlier (e.g. not in English) raise MissingExternalDependencyError(\ "Install EMBOSS if you want to use Bio.Emboss (%s)." \ % line) #To avoid confusing known errors from old versions of EMBOSS ... emboss_version = get_emboss_version() if emboss_version < (6,1,0): raise MissingExternalDependencyError(\ "Test requires EMBOSS 6.1.0 patch 3 or later.") ################################################################# #Top level function as this makes it easier to use for debugging: def emboss_convert(filename, old_format, new_format): """Run seqret, returns handle.""" #Setup, this assumes for all the format names used #Biopython and EMBOSS names are consistent! cline = SeqretCommandline(exes["seqret"], sequence = filename, sformat = old_format, osformat = new_format, auto = True, #no prompting stdout = True) #Run the tool, child = subprocess.Popen(str(cline), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, shell=(sys.platform!="win32")) child.stdin.close() child.stderr.close() return child.stdout #Top level function as this makes it easier to use for debugging: def emboss_piped_SeqIO_convert(records, old_format, new_format): """Run seqret, returns records (as a generator).""" #Setup, this assumes for all the format names used #Biopython and EMBOSS names are consistent! cline = SeqretCommandline(exes["seqret"], sformat = old_format, osformat = new_format, auto = True, #no prompting filter = True) #Run the tool, child = subprocess.Popen(str(cline), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, shell=(sys.platform!="win32")) SeqIO.write(records, child.stdin, old_format) child.stdin.close() child.stderr.close() #TODO - Is there a nice way to return an interator AND #automatically close the handle? records = list(SeqIO.parse(child.stdout, new_format)) child.stdout.close() return records #Top level function as this makes it easier to use for debugging: def emboss_piped_AlignIO_convert(alignments, old_format, new_format): """Run seqret, returns alignments (as a generator).""" #Setup, this assumes for all the format names used #Biopython and EMBOSS names are consistent! cline = SeqretCommandline(exes["seqret"], sformat = old_format, osformat = new_format, auto = True, #no prompting filter = True) #Run the tool, child = subprocess.Popen(str(cline), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, shell=(sys.platform!="win32")) try: AlignIO.write(alignments, child.stdin, old_format) except Exception, err: child.stdin.close() child.stderr.close() child.stdout.close() raise child.stdin.close() child.stderr.close() #TODO - Is there a nice way to return an interator AND #automatically close the handle? try: aligns = list(AlignIO.parse(child.stdout, new_format)) except Exception, err: child.stdout.close() raise child.stdout.close() return aligns #Top level function as this makes it easier to use for debugging: def compare_records(old_list, new_list): """Check two lists of SeqRecords agree, raises a ValueError if mismatch.""" if len(old_list) != len(new_list): raise ValueError("%i vs %i records" % (len(old_list), len(new_list))) for old, new in zip(old_list, new_list): #Note the name matching is a bit fuzzy, e.g. truncation and #no spaces in PHYLIP files. if old.id != new.id and old.name != new.name \ and (old.id not in new.id) and (new.id not in old.id) \ and (old.id.replace(" ","_") != new.id.replace(" ","_")): raise ValueError("'%s' or '%s' vs '%s' or '%s' records" \ % (old.id, old.name, new.id, new.name)) if len(old.seq) != len(new.seq): raise ValueError("%i vs %i" % (len(old.seq), len(new.seq))) if str(old.seq).upper() != str(new.seq).upper(): if str(old.seq).replace("X","N")==str(new.seq) : raise ValueError("X -> N (protein forced into nucleotide?)") if len(old.seq) < 200: raise ValueError("'%s' vs '%s'" % (old.seq, new.seq)) else: raise ValueError("'%s...%s' vs '%s...%s'" \ % (old.seq[:60], old.seq[-10:], new.seq[:60], new.seq[-10:])) if old.features and new.features \ and len(old.features) != len(new.features): raise ValueError("%i vs %i features" \ % (len(old.features, len(new.features)))) #TODO - check annotation return True #Top level function as this makes it easier to use for debugging: def compare_alignments(old_list, new_list): """Check two lists of Alignments agree, raises a ValueError if mismatch.""" if len(old_list) != len(new_list): raise ValueError("%i vs %i alignments" % (len(old_list), len(new_list))) for old, new in zip(old_list, new_list): if len(old) != len(new): raise ValueError("Alignment with %i vs %i records" \ % (len(old), len(new))) compare_records(old,new) return True class SeqRetSeqIOTests(unittest.TestCase): """Check EMBOSS seqret against Bio.SeqIO for converting files.""" def tearDown(self): clean_up() def check_SeqIO_to_EMBOSS(self, in_filename, in_format, skip_formats=[], alphabet=None): """Can Bio.SeqIO write files seqret can read back?""" if alphabet: records = list(SeqIO.parse(in_filename, in_format, alphabet)) else: records = list(SeqIO.parse(in_filename, in_format)) for temp_format in ["genbank","embl","fasta"]: if temp_format in skip_formats: continue new_records = list(emboss_piped_SeqIO_convert(records, temp_format, "fasta")) try: self.assertTrue(compare_records(records, new_records)) except ValueError, err: raise ValueError("Disagree on file %s %s in %s format: %s" \ % (in_format, in_filename, temp_format, err)) def check_EMBOSS_to_SeqIO(self, filename, old_format, skip_formats=[]): """Can Bio.SeqIO read seqret's conversion of the file?""" #TODO: Why can't we read EMBOSS's swiss output? self.assertTrue(os.path.isfile(filename)) old_records = list(SeqIO.parse(filename, old_format)) for new_format in ["genbank","fasta","pir","embl", "ig"]: if new_format in skip_formats: continue handle = emboss_convert(filename, old_format, new_format) new_records = list(SeqIO.parse(handle, new_format)) handle.close() try: self.assertTrue(compare_records(old_records, new_records)) except ValueError, err: raise ValueError("Disagree on %s file %s in %s format: %s" \ % (old_format, filename, new_format, err)) def check_SeqIO_with_EMBOSS(self, filename, old_format, skip_formats=[], alphabet=None): #Check EMBOSS can read Bio.SeqIO output... self.check_SeqIO_to_EMBOSS(filename, old_format, skip_formats, alphabet) #Check Bio.SeqIO can read EMBOSS seqret output... self.check_EMBOSS_to_SeqIO(filename, old_format, skip_formats) def test_abi(self): """SeqIO agrees with EMBOSS' Abi to FASTQ conversion.""" #This lets use check the id, sequence, and quality scores for filename in ["Abi/3730.ab1", "Abi/empty.ab1"]: old = SeqIO.read(filename, "abi") handle = emboss_convert(filename, "abi", "fastq-sanger") new = SeqIO.read(handle, "fastq-sanger") handle.close() if emboss_version == (6,4,0) and new.id == "EMBOSS_001": #Avoid bug in EMBOSS 6.4.0 (patch forthcoming) pass else: self.assertEqual(old.id, new.id) self.assertEqual(str(old.seq), str(new.seq)) if emboss_version < (6,3,0) and new.letter_annotations["phred_quality"] == [1]*len(old): #Apparent bug in EMBOSS 6.2.0.1 on Windows pass else: self.assertEqual(old.letter_annotations, new.letter_annotations) def test_genbank(self): """SeqIO & EMBOSS reading each other's conversions of a GenBank file.""" self.check_SeqIO_with_EMBOSS("GenBank/cor6_6.gb", "genbank") def test_genbank2(self): """SeqIO & EMBOSS reading each other's conversions of another GenBank file.""" self.check_SeqIO_with_EMBOSS("GenBank/NC_000932.gb", "genbank") def test_embl(self): """SeqIO & EMBOSS reading each other's conversions of an EMBL file.""" self.check_SeqIO_with_EMBOSS("EMBL/U87107.embl", "embl") def test_ig(self): """SeqIO & EMBOSS reading each other's conversions of an ig file.""" #NOTE - EMBOSS considers "genbank" to be for nucleotides only, #and will turn "X" into "N" for GenBank output. self.check_SeqIO_to_EMBOSS("IntelliGenetics/VIF_mase-pro.txt", "ig", alphabet=generic_protein, skip_formats=["genbank","embl"]) #TODO - What does a % in an ig sequence mean? #e.g. "IntelliGenetics/vpu_nucaligned.txt" #and "IntelliGenetics/TAT_mase_nuc.txt" #EMBOSS seems to ignore them. def test_pir(self): """SeqIO & EMBOSS reading each other's conversions of a PIR file.""" #Skip genbank here, EMBOSS mangles the LOCUS line: self.check_SeqIO_with_EMBOSS("NBRF/clustalw.pir", "pir", skip_formats=["genbank"]) #Skip EMBL here, EMBOSS mangles the ID line #Skip GenBank, EMBOSS 6.0.1 on Windows won't output proteins as GenBank self.check_SeqIO_with_EMBOSS("NBRF/DMB_prot.pir", "pir", skip_formats=["embl","genbank"]) def test_clustalw(self): """SeqIO & EMBOSS reading each other's conversions of a Clustalw file.""" self.check_SeqIO_with_EMBOSS("Clustalw/hedgehog.aln", "clustal", skip_formats=["embl","genbank"]) self.check_SeqIO_with_EMBOSS("Clustalw/opuntia.aln", "clustal", skip_formats=["embl","genbank"]) class SeqRetAlignIOTests(unittest.TestCase): """Check EMBOSS seqret against Bio.SeqIO for converting files.""" def tearDown(self): clean_up() def check_EMBOSS_to_AlignIO(self, filename, old_format, skip_formats=[]): """Can AlignIO read seqret's conversion of the file?""" self.assertTrue(os.path.isfile(filename), filename) old_aligns = list(AlignIO.parse(filename, old_format)) formats = ["clustal", "phylip", "ig"] if len(old_aligns) == 1: formats.extend(["fasta","nexus"]) for new_format in formats: if new_format in skip_formats: continue handle = emboss_convert(filename, old_format, new_format) try: new_aligns = list(AlignIO.parse(handle, new_format)) except: handle.close() raise ValueError("Can't parse %s file %s in %s format." \ % (old_format, filename, new_format)) handle.close() try: self.assertTrue(compare_alignments(old_aligns, new_aligns)) except ValueError, err: raise ValueError("Disagree on %s file %s in %s format: %s" \ % (old_format, filename, new_format, err)) def check_AlignIO_to_EMBOSS(self, in_filename, in_format, skip_formats=[], alphabet=None): """Can Bio.AlignIO write files seqret can read back?""" if alphabet: old_aligns = list(AlignIO.parse(in_filename,in_format,alphabet)) else: old_aligns = list(AlignIO.parse(in_filename,in_format)) formats = ["clustal", "phylip"] if len(old_aligns) == 1: formats.extend(["fasta","nexus"]) for temp_format in formats: if temp_format in skip_formats: continue #PHYLIP is a simple format which explicitly supports #multiple alignments (unlike FASTA). try: new_aligns = list(emboss_piped_AlignIO_convert(old_aligns, temp_format, "phylip")) except ValueError, e: #e.g. ValueError: Need a DNA, RNA or Protein alphabet #from writing Nexus files... continue try: self.assertTrue(compare_alignments(old_aligns, new_aligns)) except ValueError, err: raise ValueError("Disagree on file %s %s in %s format: %s" \ % (in_format, in_filename, temp_format, err)) def check_AlignIO_with_EMBOSS(self, filename, old_format, skip_formats=[], alphabet=None): #Check EMBOSS can read Bio.AlignIO output... self.check_AlignIO_to_EMBOSS(filename, old_format, skip_formats, alphabet) #Check Bio.AlignIO can read EMBOSS seqret output... self.check_EMBOSS_to_AlignIO(filename, old_format, skip_formats) def test_align_clustalw(self): """AlignIO & EMBOSS reading each other's conversions of a ClustalW file.""" self.check_AlignIO_with_EMBOSS("Clustalw/hedgehog.aln", "clustal") self.check_AlignIO_with_EMBOSS("Clustalw/opuntia.aln", "clustal") self.check_AlignIO_with_EMBOSS("Clustalw/odd_consensus.aln", "clustal", skip_formats=["nexus"]) #TODO - why not nexus? self.check_AlignIO_with_EMBOSS("Clustalw/protein.aln", "clustal") self.check_AlignIO_with_EMBOSS("Clustalw/promals3d.aln", "clustal") def test_clustalw(self): """AlignIO & EMBOSS reading each other's conversions of a PHYLIP file.""" self.check_AlignIO_with_EMBOSS("Phylip/horses.phy", "phylip") self.check_AlignIO_with_EMBOSS("Phylip/hennigian.phy", "phylip") self.check_AlignIO_with_EMBOSS("Phylip/reference_dna.phy", "phylip") self.check_AlignIO_with_EMBOSS("Phylip/reference_dna2.phy", "phylip") self.check_AlignIO_with_EMBOSS("Phylip/interlaced.phy", "phylip") self.check_AlignIO_with_EMBOSS("Phylip/interlaced2.phy", "phylip") self.check_AlignIO_with_EMBOSS("Phylip/random.phy", "phylip") class PairwiseAlignmentTests(unittest.TestCase): """Run pairwise alignments with water and needle, and parse them.""" def tearDown(self): clean_up() def pairwise_alignment_check(self, query_seq, targets, alignments, local=True): """Check pairwise alignment data is sane.""" #The datasets should be small, so making iterators into lists is OK targets = list(targets) alignments = list(alignments) self.assertEqual(len(targets), len(alignments)) for target, alignment in zip(targets, alignments): self.assertEqual(len(alignment), 2) #self.assertEqual(target.id, alignment[1].id) #too strict if alignment[1].id not in target.id \ and alignment[1].id not in target.name: raise AssertionError("%s vs %s or %s" \ % (alignment[1].id , target.id, target.name)) if local: #Local alignment self.assertTrue(str(alignment[0].seq).replace("-","") \ in query_seq) self.assertTrue(str(alignment[1].seq).replace("-","").upper() \ in str(target.seq).upper()) else: #Global alignment self.assertEqual(str(query_seq), str(alignment[0].seq).replace("-","")) self.assertEqual(str(target.seq).upper(), \ str(alignment[1].seq).replace("-","").upper()) return True def run_water(self, cline): #Run the tool, stdout, stderr = cline() self.assertTrue(stderr.strip().startswith("Smith-Waterman local alignment"), stderr) if cline.outfile: self.assertEqual(stdout.strip(), "") self.assertTrue(os.path.isfile(cline.outfile), "Missing output file %r from:\n%s" % (cline.outfile, cline)) else : #Don't use this yet... could return stdout handle instead? return stdout def test_water_file(self): """water with the asis trick, output to a file.""" #Setup, try a mixture of keyword arguments and later additions: cline = WaterCommandline(cmd=exes["water"], gapopen="10", gapextend="0.5") #Try using both human readable names, and the literal ones: cline.set_parameter("asequence", "asis:ACCCGGGCGCGGT") cline.set_parameter("-bsequence", "asis:ACCCGAGCGCGGT") #Try using a property set here: cline.outfile = "Emboss/temp with space.water" self.assertEqual(str(eval(repr(cline))), str(cline)) #Run the tool, self.run_water(cline) #Check we can parse the output... align = AlignIO.read(cline.outfile,"emboss") self.assertEqual(len(align), 2) self.assertEqual(str(align[0].seq), "ACCCGGGCGCGGT") self.assertEqual(str(align[1].seq), "ACCCGAGCGCGGT") #Clean up, os.remove(cline.outfile) def test_water_piped(self): """water with asis trick, output piped to stdout.""" cline = WaterCommandline(cmd=exes["water"], asequence="asis:ACCCGGGCGCGGT", bsequence="asis:ACCCGAGCGCGGT", gapopen=10, gapextend=0.5, auto=True, filter=True) self.assertEqual(str(cline), exes["water"] + " -auto -filter" \ + " -asequence=asis:ACCCGGGCGCGGT" \ + " -bsequence=asis:ACCCGAGCGCGGT" \ + " -gapopen=10 -gapextend=0.5") #Run the tool, child = subprocess.Popen(str(cline), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, shell=(sys.platform!="win32")) child.stdin.close() #Check we could read it's output align = AlignIO.read(child.stdout, "emboss") self.assertEqual(len(align), 2) self.assertEqual(str(align[0].seq), "ACCCGGGCGCGGT") self.assertEqual(str(align[1].seq), "ACCCGAGCGCGGT") #Check no error output: self.assertEqual(child.stderr.read(), "") self.assertEqual(0, child.wait()) child.stdout.close() child.stderr.close() def test_needle_file(self): """needle with the asis trick, output to a file.""" #Setup, cline = NeedleCommandline(cmd=exes["needle"]) cline.set_parameter("-asequence", "asis:ACCCGGGCGCGGT") cline.set_parameter("-bsequence", "asis:ACCCGAGCGCGGT") cline.set_parameter("-gapopen", "10") cline.set_parameter("-gapextend", "0.5") #EMBOSS would guess this, but let's be explicit: cline.set_parameter("-snucleotide", "True") cline.set_parameter("-outfile", "Emboss/temp with space.needle") self.assertEqual(str(eval(repr(cline))), str(cline)) #Run the tool, stdout, stderr = cline() #Check it worked, self.assertTrue(stderr.strip().startswith("Needleman-Wunsch global alignment"), stderr) self.assertEqual(stdout.strip(), "") filename = cline.outfile self.assertTrue(os.path.isfile(filename), "Missing output file %r from:\n%s" % (filename, cline)) #Check we can parse the output... align = AlignIO.read(filename,"emboss") self.assertEqual(len(align), 2) self.assertEqual(str(align[0].seq), "ACCCGGGCGCGGT") self.assertEqual(str(align[1].seq), "ACCCGAGCGCGGT") #Clean up, os.remove(filename) def test_needle_piped(self): """needle with asis trick, output piped to stdout.""" cline = NeedleCommandline(cmd=exes["needle"], asequence="asis:ACCCGGGCGCGGT", bsequence="asis:ACCCGAGCGCGGT", gapopen=10, gapextend=0.5, auto=True, filter=True) self.assertEqual(str(cline), exes["needle"] + " -auto -filter" \ + " -asequence=asis:ACCCGGGCGCGGT" \ + " -bsequence=asis:ACCCGAGCGCGGT" \ + " -gapopen=10 -gapextend=0.5") #Run the tool, child = subprocess.Popen(str(cline), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, shell=(sys.platform!="win32")) child.stdin.close() #Check we could read it's output align = AlignIO.read(child.stdout, "emboss") self.assertEqual(len(align), 2) self.assertEqual(str(align[0].seq), "ACCCGGGCGCGGT") self.assertEqual(str(align[1].seq), "ACCCGAGCGCGGT") #Check no error output: self.assertEqual(child.stderr.read(), "") self.assertEqual(0, child.wait()) child.stdout.close() child.stderr.close() def test_water_file2(self): """water with the asis trick and nucleotide FASTA file, output to a file.""" #Setup, query = "ACACACTCACACACACTTGGTCAGAGATGCTGTGCTTCTTGGAAGCAAGGNCTCAAAGGCAAGGTGCACGCAGAGGGACGTTTGAGTCTGGGATGAAGCATGTNCGTATTATTTATATGATGGAATTTCACGTTTTTATG" out_file = "Emboss/temp_test2.water" in_file = "Fasta/f002" self.assertTrue(os.path.isfile(in_file)) if os.path.isfile(out_file): os.remove(out_file) cline = WaterCommandline(cmd=exes["water"]) cline.set_parameter("-asequence", "asis:%s" % query) cline.set_parameter("-bsequence", in_file) cline.set_parameter("-gapopen", "10") cline.set_parameter("-gapextend", "0.5") cline.set_parameter("-outfile", out_file) self.assertEqual(str(eval(repr(cline))), str(cline)) #Run the tool, self.run_water(cline) #Check we can parse the output and it is sensible... self.pairwise_alignment_check(query, SeqIO.parse(in_file,"fasta"), AlignIO.parse(out_file,"emboss"), local=True) #Clean up, os.remove(out_file) def test_water_file3(self): """water with the asis trick and GenBank file, output to a file.""" #Setup, query = "TGTTGTAATGTTTTAATGTTTCTTCTCCCTTTAGATGTACTACGTTTGGA" out_file = "Emboss/temp_test3.water" in_file = "GenBank/cor6_6.gb" self.assertTrue(os.path.isfile(in_file)) if os.path.isfile(out_file): os.remove(out_file) cline = WaterCommandline(cmd=exes["water"]) cline.set_parameter("asequence", "asis:%s" % query) cline.set_parameter("bsequence", in_file) #TODO - Tell water this is a GenBank file! cline.set_parameter("gapopen", "1") cline.set_parameter("gapextend", "0.5") cline.set_parameter("outfile", out_file) self.assertEqual(str(eval(repr(cline))), str(cline)) #Run the tool, self.run_water(cline) #Check we can parse the output and it is sensible... self.pairwise_alignment_check(query, SeqIO.parse(in_file,"genbank"), AlignIO.parse(out_file,"emboss"), local=True) #Clean up, os.remove(out_file) def test_water_file4(self): """water with the asis trick and SwissProt file, output to a file.""" #Setup, query = "DVCTGKALCDPVTQNIKTYPVKIENLRVMI" out_file = "Emboss/temp_test4.water" in_file = "SwissProt/sp004" self.assertTrue(os.path.isfile(in_file)) if os.path.isfile(out_file): os.remove(out_file) cline = WaterCommandline(cmd=exes["water"]) cline.set_parameter("-asequence", "asis:%s" % query) cline.set_parameter("-bsequence", in_file) #EMBOSS should work this out, but let's be explicit: cline.set_parameter("-sprotein", True) #TODO - Tell water this is a SwissProt file! cline.set_parameter("-gapopen", "20") cline.set_parameter("-gapextend", "5") cline.set_parameter("-outfile", out_file) self.assertEqual(str(eval(repr(cline))), str(cline)) #Run the tool, self.run_water(cline) #Check we can parse the output and it is sensible... self.pairwise_alignment_check(query, SeqIO.parse(in_file,"swiss"), AlignIO.parse(out_file,"emboss"), local=True) #Clean up, os.remove(out_file) def test_needle_piped2(self): """needle with asis trick, and nucleotide FASTA file, output piped to stdout.""" #TODO - Support needle in Bio.Emboss.Applications #(ideally with the -auto and -filter arguments) #Setup, query = "ACACACTCACACACACTTGGTCAGAGATGCTGTGCTTCTTGGAA" cline = exes["needle"] cline += " -asequence asis:" + query cline += " -bsequence Fasta/f002" cline += " -auto" #no prompting cline += " -filter" #use stdout #Run the tool, child = subprocess.Popen(str(cline), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, shell=(sys.platform!="win32")) child.stdin.close() #Check we can parse the output and it is sensible... self.pairwise_alignment_check(query, SeqIO.parse("Fasta/f002","fasta"), AlignIO.parse(child.stdout,"emboss"), local=False) #Check no error output: self.assertEqual(child.stderr.read(), "") self.assertEqual(0, child.wait()) child.stdout.close() child.stderr.close() def test_water_needs_output(self): """water without output file or stdout/filter should give error.""" cline = WaterCommandline(cmd=exes["water"], asequence="asis:ACCCGGGCGCGGT", bsequence="asis:ACCCGAGCGCGGT", gapopen=10, gapextend=0.5, auto=True) self.assertTrue(cline.auto) self.assertTrue(not cline.stdout) self.assertTrue(not cline.filter) self.assertEqual(cline.outfile, None) self.assertRaises(ValueError, str, cline) def test_needle_needs_output(self): """needle without output file or stdout/filter should give error.""" cline = NeedleCommandline(cmd=exes["needle"], asequence="asis:ACCCGGGCGCGGT", bsequence="asis:ACCCGAGCGCGGT", gapopen=10, gapextend=0.5, auto=True) self.assertTrue(cline.auto) self.assertTrue(not cline.stdout) self.assertTrue(not cline.filter) self.assertEqual(cline.outfile, None) self.assertRaises(ValueError, str, cline) def test_seqtmatchall_piped(self): """seqmatchall with pair output piped to stdout.""" cline = SeqmatchallCommandline(cmd=exes["seqmatchall"], sequence="Fasta/f002", aformat="pair", wordsize=9, auto=True, stdout=True) self.assertEqual(str(cline), exes["seqmatchall"] + " -auto -stdout" \ + " -sequence=Fasta/f002" + " -wordsize=9 -aformat=pair") #Run the tool, child = subprocess.Popen(str(cline), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, shell=(sys.platform!="win32")) child.stdin.close() #Check we could read it's output for align in AlignIO.parse(child.stdout, "emboss") : self.assertEqual(len(align), 2) self.assertEqual(align.get_alignment_length(), 9) #Check no error output: self.assertEqual(child.stderr.read(), "") self.assertEqual(0, child.wait()) child.stdout.close() child.stderr.close() #Top level function as this makes it easier to use for debugging: def emboss_translate(sequence, table=None, frame=None): """Call transeq, returns protein sequence as string.""" #TODO - Support transeq in Bio.Emboss.Applications? #(doesn't seem worthwhile as Biopython can do translations) if not sequence: raise ValueError(sequence) #Setup, cline = exes["transeq"] if len(sequence) < 100: filename = None cline += " -sequence asis:%s" % sequence else: #There are limits on command line string lengths... #use a temp file instead. filename = "Emboss/temp_transeq.txt" SeqIO.write(SeqRecord(sequence, id="Test"), filename, "fasta") cline += " -sequence %s" % filename cline += " -auto" #no prompting cline += " -filter" #use stdout if table is not None: cline += " -table %s" % str(table) if frame is not None: cline += " -frame %s" % str(frame) #Run the tool, child = subprocess.Popen(str(cline), stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True, shell=(sys.platform!="win32")) out, err = child.communicate() #Check no error output: if err != "": raise ValueError(str(cline) + "\n" + err) #Check we could read it's output record = SeqIO.read(StringIO(out), "fasta") if 0 != child.wait(): raise ValueError(str(cline)) if filename: os.remove(filename) if not record.id.startswith("Test"): raise ValueError(str(cline)) else: if not record.id.startswith("asis"): raise ValueError(str(cline)) return str(record.seq) #Top level function as this makes it easier to use for debugging: def check_translation(sequence, translation, table=None): if table is None: t = 1 else: t = table if translation != str(sequence.translate(t)) \ or translation != str(translate(sequence,t)) \ or translation != translate(str(sequence),t): #More details... for i, amino in enumerate(translation): codon = sequence[i*3:i*3+3] if amino != str(codon.translate(t)): raise ValueError("%s -> %s not %s (table %s)" \ % (codon, amino, codon.translate(t), t)) #Shouldn't reach this line: raise ValueError("%s -> %s (table %s)" \ % (sequence, translation, t)) return True class TranslationTests(unittest.TestCase): """Run pairwise alignments with water and needle, and parse them.""" def tearDown(self): clean_up() def test_simple(self): """transeq vs Bio.Seq for simple translations (including alt tables).""" examples = [Seq("ACGTGACTGACGTAGCATGCCACTAGG"), #Unamibguous TA? codons: Seq("TAATACTATTAG", generic_dna), #Most of the ambiguous TA? codons: Seq("TANTARTAYTAMTAKTAHTABTADTAV", generic_dna), #Problem cases, # #Seq("TAW", generic_dna), #W = A or T, but EMBOSS does TAW -> X #TAA -> Y, TAT ->Y, so in Biopython TAW -> Y # #Seq("TAS", generic_dna), #S = C or G, but EMBOSS does TAS -> Y #TAG -> *, TAC ->Y, so in Biopython TAS -> X (Y or *) # #Seq("AAS", generic_dna), #On table 9, EMBOSS gives N, we give X. #S = C or G, so according to my reading of #table 9 on the NCBI page, AAC=N, AAG=K #suggesting this is a bug in EMBOSS. # Seq("ACGGGGGGGGTAAGTGGTGTGTGTGTAGT", generic_dna), ] for sequence in examples: #EMBOSS treats spare residues differently... avoid this issue if len(sequence) % 3 != 0: sequence = sequence[:-(len(sequence)%3)] self.assertEqual(len(sequence) % 3, 0) self.assertTrue(len(sequence) > 0) self.check(sequence) def check(self, sequence): """Compare our translation to EMBOSS's using all tables. Takes a Seq object (and a filename containing it).""" translation = emboss_translate(sequence) self.assertTrue(check_translation(sequence, translation)) for table in [1,2,3,4,5,6,9,10,11,12,13,14,15,16,21,22,23]: translation = emboss_translate(sequence, table) self.assertTrue(check_translation(sequence, translation, table)) return True def translate_all_codons(self, letters): sequence = Seq("".join([c1+c3+c3 \ for c1 in letters \ for c2 in letters \ for c3 in letters]), generic_nucleotide) self.check(sequence) #def test_all_ambig_dna_codons(self): # """transeq vs Bio.Seq on ambiguous DNA codons (inc. alt tables).""" # self.translate_all_codons(ambiguous_dna_letters) def test_all_unambig_dna_codons(self): """transeq vs Bio.Seq on unambiguous DNA codons (inc. alt tables).""" self.translate_all_codons("ATCGatcg") def test_all_unambig_rna_codons(self): """transeq vs Bio.Seq on unambiguous RNA codons (inc. alt tables).""" self.translate_all_codons("AUCGaucg") def test_mixed_unambig_rna_codons(self): """transeq vs Bio.Seq on unambiguous DNA/RNA codons (inc. alt tables).""" self.translate_all_codons("ATUCGatucg") def clean_up(): """Fallback clean up method to remove temp files.""" for filename in os.listdir("Emboss"): if filename.startswith("temp_"): try: os.remove(filename) except: pass if __name__ == "__main__": runner = unittest.TextTestRunner(verbosity = 2) unittest.main(testRunner=runner) clean_up()
[]
[]
[ "EMBOSS_ROOT", "LANG" ]
[]
["EMBOSS_ROOT", "LANG"]
python
2
0
packages/jpl/src/java/org/jpl7/test/TestJUnit.java
package org.jpl7.test; import java.math.BigInteger; import java.util.Map; import java.util.NoSuchElementException; import junit.framework.TestCase; import junit.framework.TestSuite; import org.jpl7.Atom; import org.jpl7.Compound; import org.jpl7.Integer; import org.jpl7.JPL; import org.jpl7.JPLException; import org.jpl7.PrologException; import org.jpl7.Query; import org.jpl7.Term; import org.jpl7.Util; import org.jpl7.Variable; import org.jpl7.fli.Prolog; // This class defines all the tests which are run from Java. // It needs junit.framework.TestCase and junit.framework.TestSuite, which are not supplied with JPL. public class TestJUnit extends TestCase { public static final String startup = (System.getenv("SWIPL_BOOT_FILE") == null ? "../../src/swipl.prc" : System.getenv("SWIPL_BOOT_FILE")); public static final String test_jpl = (System.getenv("TEST_JPL") == null ? "test_jpl.pl" : System.getenv("TEST_JPL")); public static final String syntax = (System.getenv("SWIPL_SYNTAX") == null ? "modern" : System.getenv("SWIPL_SYNTAX")); public static final String home = (System.getenv("SWI_HOME_DIR") == null ? "../.." : System.getenv("SWI_HOME_DIR")); public TestJUnit(String name) { // called for each public void test*() // method super(name); } public static junit.framework.Test suite() { if (syntax.equals("traditional")) { JPL.setTraditional(); Prolog.set_default_init_args(new String[] { // "libswipl.dll", "-x", startup, "-f", "none", "libswipl.dll", "-f", "none", "-g", "true", "--traditional", "-q", "--home="+home, "--nosignals" }); } else { Prolog.set_default_init_args(new String[] { // "libswipl.dll", "-x", startup, "-f", "none", "libswipl.dll", "-f", "none", "-g", "true", "-q", "--home="+home, "--nosignals" }); } assertTrue((new Query("consult", new Term[] { new Atom(test_jpl) })).hasSolution()); assertTrue((new Query("use_module(library(jpl))")).hasSolution()); return new TestSuite(TestJUnit.class); } public static void main(String args[]) { junit.textui.TestRunner.run(suite()); } protected void setUp() { } protected void tearDown() { // cleanup code } // supporting code: public static long fac(long n) { // complements // jpl:jpl_test_fac(+integer,-integer); // indirectly supports // testMutualRecursion if (n == 1) { return 1; } else if (n > 1) { return n * ((org.jpl7.Integer) Query .oneSolution("jpl_test_fac(?,F)", new Term[] { new org.jpl7.Integer(n - 1) }).get("F")).longValue(); } else { return 0; } } // the tests; all public void test*() // public void testInfo() { // Term swi = Query.oneSolution("current_prolog_flag(version_data,Swi)").get("Swi"); // System.out.println("version = " + swi.arg(1) + "." + swi.arg(2) + "." + swi.arg(3)); // System.out.println("syntax = " + Query.oneSolution("jpl:jpl_pl_syntax(Syntax)").get("Syntax")); // System.out.println("jpl.jar = " + JPL.version_string() + " " + JPL.jarPath()); // System.out.println("jpl.dll = " + Prolog.get_c_lib_version()); // System.out.println("jpl.pl = " + Query.oneSolution("jpl:jpl_pl_lib_version(V)").get("V").name() + " " // + Query.oneSolution("module_property(jpl, file(F))").get("F").name()); // System.out.println("home = " + Query.oneSolution("current_prolog_flag(home,Home)").get("Home").name()); // } public void testEmptyParentheses() { Term t = Query.oneSolution("T = a()").get("T"); // valid in both // traditional and // modern syntax in SWI // Prolog 7.x assertTrue("T is not bound to an atom", t.isAtom()); assertTrue("the atom's name is not \"a\"", t.name().equals("a")); } public void testIntegerFromByte1() { byte b = (byte) 127; // -128..127 Integer i = new Integer(b); assertTrue(i.intValue() == b); } public void testIntegerFromChar1() { char c = (char) 64; // 0..65535 // System.out.println("c = " + c); Integer i = new Integer(c); assertTrue(i.intValue() == c); } public void testInteger1() { try { Term i = Query.oneSolution("I is 2**40").get("I"); // long but not // int i.intValue(); fail("intValue() of bigger-than-int value failed to throw an exception"); } catch (JPLException e) { if (e.getMessage().endsWith("cannot represent value as an int")) { // OK: an appropriate exception was thrown } else { fail("intValue() of bigger-than-int value threw incorrect JPLException: " + e); } } catch (Exception e) { fail("intValue() of bigger-than-int value threw unexpected class of exception: " + e); } } public void testIterable1() { // System.out.println("iterating over array of solutions"); // for (Map<String, Term> m : Query.allSolutions("current_module(M)")) { // // iterating over array of solutions // System.out.println(m.get("M")); // } // System.out.println(); } public void testIterable2() { // System.out.println("iterating over successively fetched solutions"); // for (Map<String, Term> m : new Query("current_module(M)")) { // // iterating over successively fetched solutions // System.out.println(m.get("M")); // } // System.out.println(); } public void testBigInteger1() { BigInteger a = new BigInteger(Long.toString(51L)); BigInteger b = a.pow(51); // 51**51, too big for a long Term x = Query.oneSolution("X is 51**51").get("X"); assertTrue("X is an org.jpl7.Integer", x.isInteger()); // System.out.println("X.bigValue() = " + x.bigValue().toString()); // System.out.println("b.bigValue() = " + b.toString()); assertTrue("X is a big integer", x.isBigInteger()); assertTrue("X's big value is 51**51", x.bigValue().equals(b)); } public void testBigInteger2() { BigInteger b = new BigInteger("12345678901234567890123456789"); Term i = new Integer(b); // too big for a long Term g = new Compound("is", new Term[] { new Variable("X"), i }); Term x = Query.oneSolution(g).get("X"); assertTrue("X is an org.jpl7.Integer", x.isInteger()); assertTrue("X is a big org.jpl7.Integer", x.isBigInteger()); assertTrue("X's value is as expected", x.bigValue().equals(b)); } public void testCompoundZeroArity1() { Term t = new Compound("foo", new Term[] {}); assertTrue(t.isCompound()); assertFalse(t.isAtom()); assertTrue(t.name().equals("foo")); assertTrue(t.arity() == 0); } public void testCompoundZeroArity2() { Term t = Query.oneSolution("T = foo()").get("T"); // System.out.println("type = " + t.typeName()); assertTrue(t.name().equals("foo")); assertTrue(t.arity() == 0); } // public void testCompoundZeroArity3() { // Term t = Query.oneSolution("T = foo()").get("T"); // assertTrue("term is a compound", t.isCompound()); // assertFalse("term is an atom", t.isAtom()); // } public void testMap1() { Map<String, Term> h = Query.oneSolution("p(a,b) = p(X,Y)"); assertTrue(h.get("X").name().equals("a")); assertTrue(h.get("Y").name().equals("b")); } public void testMap2() { Map<String, Term>[] hs = Query.allSolutions("p(a,b) = p(X,Y)"); assertTrue(hs.length == 1); assertTrue(hs[0].get("X").name().equals("a")); assertTrue(hs[0].get("Y").name().equals("b")); } public void testSyntaxSet1() { if (syntax.equals("traditional")) { try { JPL.setTraditional(); // should succeed silently } catch (Exception e) { fail("setTraditional() under traditional syntax threw exception: " + e); } } else { try { JPL.setTraditional(); } catch (JPLException e) { // expected exception class, but is it // correct in detail? if (e.getMessage().endsWith("traditional syntax after Prolog is initialised")) { // OK: an appropriate exception was thrown } else { fail("setTraditional() under modern syntax threw incorrect JPLException: " + e); } } catch (Exception e) { fail("setTraditional() under modern syntax threw unexpected class of exception: " + e); } } } public void testMasstest() { assertTrue((new Query("assert(diagnose_declaration(_,_,_,[not,a,real,error]))")).hasSolution()); } public void testSameLibVersions1() { String java_lib_version = JPL.version_string(); String c_lib_version = Prolog.get_c_lib_version(); assertTrue("java_lib_version(" + java_lib_version + ") is same as c_lib_version(" + c_lib_version + ")", java_lib_version.equals(c_lib_version)); } public void testSameLibVersions2() { String java_lib_version = JPL.version_string(); String pl_lib_version = Query.oneSolution("jpl_pl_lib_version(V)").get("V").name(); assertTrue("java_lib_version(" + java_lib_version + ") is same as pl_lib_version(" + pl_lib_version + ")", java_lib_version.equals(pl_lib_version)); } public void testAtomName1() { String name = "fred"; Atom a = new Atom(name); assertEquals("an Atom's name is that with which it was created", a.name(), name); } public void testAtomName2() { String name = "ha ha"; Atom a = new Atom(name); assertEquals("an Atom's name is that with which it was created", a.name(), name); } public void testAtomName3() { String name = "3"; Atom a = new Atom(name); assertEquals("an Atom's name is that with which it was created", a.name(), name); } public void testAtomToString1() { String name = "fred"; String toString = "fred"; Atom a = new Atom(name); assertEquals("an Atom's .toString() value is quoted iff appropriate", a.toString(), toString); } public void testAtomToString2() { String name = "ha ha"; String toString = "'ha ha'"; Atom a = new Atom(name); assertEquals("an Atom's .toString() value is quoted iff appropriate", a.toString(), toString); } public void testAtomToString3() { String name = "3"; String toString = "'3'"; Atom a = new Atom(name); assertEquals("an Atom's .toString() value is quoted iff appropriate", a.toString(), toString); } public void testAtomArity() { Atom a = new Atom("willy"); assertEquals("an Atom has arity zero", a.arity(), 0); } public void testAtomEquality1() { String name = "fred"; Atom a1 = new Atom(name); Atom a2 = new Atom(name); assertEquals("two Atoms created with the same name are equal", a1, a2); } public void testAtomIdentity() { // how could this fail?! String name = "fred"; Atom a1 = new Atom(name); Atom a2 = new Atom(name); assertNotSame("two Atoms created with the same name are not identical", a1, a2); } public void testAtomHasFunctorNameZero() { String name = "sam"; Atom a = new Atom(name); assertTrue("a text atom has a functor whose name is the name of the atom, and whose arity is zero", a.hasFunctor(name, 0)); } public void testAtomHasFunctorWrongName() { assertFalse("an Atom does not have a functor whose name is other than that with which the Atom was created", new Atom("wally").hasFunctor("poo", 0)); } public void testAtomHasFunctorWrongArity() { String name = "ted"; assertFalse("an Atom does not have a functor whose arity is other than zero", new Atom(name).hasFunctor(name, 1)); } public void testVariableBinding1() { Term lhs = new Compound("p", new Term[] { new Variable("X"), new Variable("Y") }); Term rhs = new Compound("p", new Term[] { new Atom("a"), new Atom("b") }); Term goal = new Compound("=", new Term[] { lhs, rhs }); Map<String, Term> soln = new Query(goal).oneSolution(); assertTrue("two Variables with different names can bind to distinct atoms", soln != null && (soln.get("X")).name().equals("a") && (soln.get("Y")).name().equals("b")); } public void testVariableBinding2() { Term lhs = new Compound("p", new Term[] { new Variable("X"), new Variable("X") }); Term rhs = new Compound("p", new Term[] { new Atom("a"), new Atom("b") }); Term goal = new Compound("=", new Term[] { lhs, rhs }); assertFalse("two distinct Variables with same name cannot unify with distinct atoms", new Query(goal).hasSolution()); } public void testVariableBinding3() { Variable X = new Variable("X"); Term lhs = new Compound("p", new Term[] { X, X }); Term rhs = new Compound("p", new Term[] { new Atom("a"), new Atom("b") }); Term goal = new Compound("=", new Term[] { lhs, rhs }); assertFalse("two references to the same (named) Variable cannot unify with differing atoms", new Query(goal).hasSolution()); } public void testVariableBinding4() { Term lhs = new Compound("p", new Term[] { new Variable("_"), new Variable("_") }); Term rhs = new Compound("p", new Term[] { new Atom("a"), new Atom("b") }); Term goal = new Compound("=", new Term[] { lhs, rhs }); assertTrue("two distinct anonymous Variables can unify with distinct atoms", new Query(goal).hasSolution()); } public void testVariableBinding5() { Variable Anon = new Variable("_"); Term lhs = new Compound("p", new Term[] { Anon, Anon }); Term rhs = new Compound("p", new Term[] { new Atom("a"), new Atom("b") }); Term goal = new Compound("=", new Term[] { lhs, rhs }); assertTrue("two references to an anonymous Variable can unify with differing atoms", new Query(goal).hasSolution()); } public void testAtomEquality2() { Atom a = new Atom("a"); assertTrue("two references to an Atom are equal by .equals()", a.equals(a)); } public void testAtomEquality3() { assertTrue("two distinct, same-named Atoms are equal by .equals()", (new Atom("a")).equals(new Atom("a"))); } public void testTextToTerm1() { String text = "fred(B,p(A))"; Term t = Util.textToTerm(text); assertTrue("Util.textToTerm() converts \"fred(B,p(A))\" to a corresponding Term", t.hasFunctor("fred", 2) && t.arg(1).isVariable() && t.arg(1).name().equals("B") && t.arg(2).hasFunctor("p", 1) && t.arg(2).arg(1).isVariable() && t.arg(2).arg(1).name().equals("A")); } public void testArrayToList1() { Term l2 = Util.termArrayToList( new Term[] { new Atom("a"), new Atom("b"), new Atom("c"), new Atom("d"), new Atom("e") }); Query q9 = new Query(new Compound("append", new Term[] { new Variable("Xs"), new Variable("Ys"), l2 })); assertTrue("append(Xs,Ys,[a,b,c,d,e]) has 6 solutions", q9.allSolutions().length == 6); } public void testArrayToList2() { String goal = "append(Xs,Ys,[a,b,c,d,e])"; assertTrue(goal + " has 6 solutions", Query.allSolutions(goal).length == 6); } public void testLength1() { Query q5 = new Query(new Compound("length", new Term[] { new Variable("Zs"), new org.jpl7.Integer(2) })); Term zs = q5.oneSolution().get("Zs"); assertTrue("length(Zs,2) binds Zs to a list of two distinct variables " + zs.toString(), zs.isListPair() && zs.arg(1).isVariable() && zs.arg(2).isListPair() && zs.arg(2).arg(1).isVariable() && zs.arg(2).arg(2).isListNil() && !zs.arg(1).name().equals(zs.arg(2).arg(1).name())); } public void testListNil1() { Term x = Query.oneSolution("X = []").get("X"); if (syntax.equals("traditional")) { assertTrue("empty list is text atom []", x.isAtom() && x.atomType().equals("text") && x.name().equals("[]")); } else { assertTrue("empty list is reserved atom []", x.isAtom() && x.atomType().equals("reserved_symbol") && x.name().equals("[]")); } } public void testListCons1() { Term x = Query.oneSolution("X = [a]").get("X"); if (syntax.equals("traditional")) { assertTrue("list constructor is ./2", x.isCompound() && x.name().equals(".")); } else { assertTrue("list constructor is [|]/2", x.isCompound() && x.name().equals("[|]")); } } public void testGenerate1() { // we chickened out of verifying each solution // :-) String goal = "append(Xs,Ys,[_,_,_,_,_])"; assertTrue(goal + " has 6 solutions", Query.allSolutions(goal).length == 6); } public void testPrologException1() { try { new Query("p(]"); // writes junk to stderr and enters debugger // unless flag debug_on_error = false } catch (PrologException e) { assertTrue("new Query(\"p(]\") throws a PrologException " + e.toString(), true); return; } fail("new Query(\"p(]\") oughta throw a PrologException"); } public void testAtom1() { assertTrue("new Atom(\"3 3\")" + (new Atom("3 3")).toString(), true); } public void testTextToTerm2() { String text1 = "fred(?,2,?)"; String text2 = "[first(x,y),A]"; Term plist = Util.textToTerm(text2); Term[] ps = plist.toTermArray(); Term t = Util.textToTerm(text1).putParams(ps); assertTrue("fred(?,2,?) .putParams( [first(x,y),A] )", t.hasFunctor("fred", 3) && t.arg(1).hasFunctor("first", 2) && t.arg(1).arg(1).hasFunctor("x", 0) && t.arg(1).arg(2).hasFunctor("y", 0) && t.arg(2).hasFunctor(2, 0) && t.arg(3).isVariable() && t.arg(3).name().equals("A")); } public void testDontTellMeMode1() { final Query q = new Query("setof(_M,current_module(_M),_Ms),length(_Ms,N)"); JPL.setDTMMode(true); assertTrue( "in dont-tell-me mode, setof(_M,current_module(_M),_Ms),length(_Ms,N) returns binding for just one variable", q.oneSolution().keySet().size() == 1); } public void testDontTellMeMode2() { final Query q = new Query("setof(_M,current_module(_M),_Ms),length(_Ms,N)"); JPL.setDTMMode(false); assertTrue( "not in dont-tell-me mode, setof(_M,current_module(_M),_Ms),length(_Ms,N) returns binding for three variables", q.oneSolution().keySet().size() == 3); } public void testModulePrefix1() { assertTrue(Query.hasSolution("call(user:true)")); } private void testMutualRecursion(int n, long f) { // f is the expected // result for fac(n) try { assertEquals("mutual recursive Java<->Prolog factorial: fac(" + n + ") = " + f, fac(n), f); } catch (Exception e) { fail("fac(" + n + ") threw " + e); } } public void testMutualRecursion1() { testMutualRecursion(1, 1); } public void testMutualRecursion2() { testMutualRecursion(2, 2); } public void testMutualRecursion3() { testMutualRecursion(3, 6); } public void testMutualRecursion10() { testMutualRecursion(10, 3628800); } public void testIsJNull1() { Term atNull = new Compound("@", new Term[] { new Atom("null") }); Query q = new Query("=", new Term[] { new Variable("X"), atNull }); assertTrue(q.oneSolution().get("X").isJNull()); } public void testIsJNull2() { Term t = Query.oneSolution("X = @(3)").get("X"); assertFalse("@(3) . isJNull() fails", t.isJNull()); } public void testIsJNull3() { Term t = Query.oneSolution("X = _").get("X"); assertFalse("_ . isJNull() fails", t.isJNull()); } public void testIsJNull4() { Term t = Query.oneSolution("X = @(true)").get("X"); assertFalse("@(true) . isJNull() fails", t.isJNull()); } public void testIsJNull5() { Term t = Query.oneSolution("X = @(false)").get("X"); assertFalse("@(false) . isJNull() fails", t.isJNull()); } public void testIsJTrue1() { Term t = Query.oneSolution("X = @(true)").get("X"); assertTrue("@(true) . isJTrue() succeeds", t.isJTrue()); } public void testIsJTrue2() { Term t = Query.oneSolution("X = @(3)").get("X"); assertFalse("@(3) . isJTrue() fails", t.isJTrue()); } public void testIsJTrue3() { Term t = Query.oneSolution("X = _").get("X"); assertFalse("_ . isJTrue() fails", t.isJTrue()); } public void testIsJTrue4() { Term t = Query.oneSolution("X = @(false)").get("X"); assertFalse("@(false) . isJTrue() fails", t.isJTrue()); } public void testIsJVoid1() { Term t = Query.oneSolution("X = @(void)").get("X"); assertTrue("@(void) . isJVoid() succeeds", t.isJVoid()); } public void testIsJVoid2() { Term t = Query.oneSolution("X = @(3)").get("X"); assertFalse("@(3) . isJVoid() fails", t.isJVoid()); } public void testIsJVoid3() { Term t = Query.oneSolution("X = _").get("X"); assertFalse("_ . isJVoid() fails", t.isJVoid()); } public void testTypeName1() { assertEquals("Y = foo binds Y to an Atom", Query.oneSolution("Y = foo").get("Y").typeName(), "Atom"); } public void testTypeName2() { assertEquals("Y = 3.14159 binds Y to a Float", Query.oneSolution("Y = 3.14159").get("Y").typeName(), "Float"); } public void testTypeName4() { assertEquals("Y = 6 binds Y to an Integer", Query.oneSolution("Y = 6").get("Y").typeName(), "Integer"); } public void testTypeName5() { assertEquals("Y = _ binds Y to a Variable", Query.oneSolution("Y = _").get("Y").typeName(), "Variable"); } public void testTypeName3() { assertEquals("Y = f(x) binds Y to a Compound", Query.oneSolution("Y = f(x)").get("Y").typeName(), "Compound"); } public void testGoalWithModulePrefix1() { String goal = "jpl:jpl_modifier_bit(volatile,I)"; assertTrue(goal + " binds I to an integer", Query.oneSolution(goal).get("I").isInteger()); } public void testGoalWithModulePrefix2() { String goal = "user:length([],0)"; assertTrue(goal + " succeeds", Query.hasSolution(goal)); } public void testGoalWithModulePrefix3() { String goal = "3:length([],0)"; try { Query.hasSolution(goal); // should throw exception fail(goal + " (numeric module prefix) didn't throw exception"); // shouldn't // get // to // here } catch (PrologException e) { // expected exception class if (e.term().hasFunctor("error", 2) && e.term().arg(1).hasFunctor("type_error", 2) && e.term().arg(1).arg(1).hasFunctor("atom", 0)) { // OK: an appropriate exception was thrown } else { fail(goal + " (numeric module prefix) threw incorrect PrologException: " + e); } } catch (Exception e) { fail(goal + " (numeric module prefix) threw wrong class of exception: " + e); } } public void testGoalWithModulePrefix4() { String goal = "_:length([],0)"; try { Query.hasSolution(goal); // should throw exception fail(goal + " (unbound module prefix) wrongly succeeded"); // shouldn't // get // to // here } catch (PrologException e) { // expected exception class if (e.term().hasFunctor("error", 2) && e.term().arg(1).hasFunctor("instantiation_error", 0)) { // OK: an appropriate exception was thrown } else { fail(goal + " (unbound module prefix) threw wrong PrologException: " + e); } } catch (Exception e) { fail(goal + " (unbound module prefix) threw wrong exception class: " + e); } } public void testGoalWithModulePrefix5() { String goal = "f(x):length([],0)"; try { Query.hasSolution(goal); // should throw exception fail(goal + " (compound module prefix) wrongly succeeded"); // shouldn't // get // to // here } catch (PrologException e) { // correct exception class if (e.term().hasFunctor("error", 2) && e.term().arg(1).hasFunctor("type_error", 2) && e.term().arg(1).arg(1).hasFunctor("atom", 0)) { // OK: an appropriate exception was thrown } else { fail(goal + " (compound module prefix) threw wrong PrologException: " + e); } } catch (Exception e) { fail(goal + " (compound module prefix) threw wrong exception class: " + e); } } public void testGoalWithModulePrefix6() { String goal = "no_such_module:no_such_predicate(0)"; try { Query.hasSolution(goal); // should throw exception fail(goal + " (nonexistent module prefix) wrongly succeeded"); // shouldn't // get // to // here } catch (PrologException e) { // expected exception class if (e.term().hasFunctor("error", 2) && e.term().arg(1).hasFunctor("existence_error", 2) && e.term().arg(1).arg(1).hasFunctor("procedure", 0)) { // OK: an appropriate exception was thrown } else { fail(goal + " (nonexistent module prefix) threw wrong PrologException: " + e); } } catch (Exception e) { fail(goal + " (nonexistent module prefix) threw wrong exception class: " + e); } } // public void testFetchCyclicTerm(){ // assertTrue((new Query("X=f(X)")).hasSolution()); // } public void testFetchLongList0() { assertTrue((new Query("findall(foo(N),between(0,10,N),L)")).hasSolution()); } public void testFetchLongList1() { assertTrue((new Query("findall(foo(N),between(0,100,N),L)")).hasSolution()); } public void testFetchLongList2() { assertTrue((new Query("findall(foo(N),between(0,1000,N),L)")).hasSolution()); } public void testFetchLongList2c() { assertTrue((new Query("findall(foo(N),between(0,1023,N),L)")).hasSolution()); } // public void testFetchLongList2a() { /* leads to stack overflow */ // assertTrue((new // Query("findall(foo(N),between(0,2000,N),L)")).hasSolution()); // } // public void testFetchLongList2b() { // assertTrue((new // Query("findall(foo(N),between(0,3000,N),L)")).hasSolution()); // } // public void testFetchLongList3() { // assertTrue((new // Query("findall(foo(N),between(0,10000,N),L)")).hasSolution()); // } public void testUnicode0() { assertTrue(Query.hasSolution("atom_codes(?,[32])", new Term[] { new Atom(" ") })); } public void testUnicode0a() { assertTrue(Query.hasSolution("atom_codes(?,[32])", new Term[] { new Atom("\u0020") })); } public void testUnicode0b() { assertTrue(Query.hasSolution("atom_codes(?,[0])", new Term[] { new Atom("\u0000") })); } public void testUnicode0c() { assertTrue(Query.hasSolution("atom_codes(?,[1])", new Term[] { new Atom("\u0001") })); } public void testUnicode0d() { assertTrue(Query.hasSolution("atom_codes(?,[127])", new Term[] { new Atom("\u007F") })); } public void testUnicode0e() { assertTrue(Query.hasSolution("atom_codes(?,[128])", new Term[] { new Atom("\u0080") })); } public void testUnicode0f() { assertTrue(Query.hasSolution("atom_codes(?,[255])", new Term[] { new Atom("\u00FF") })); } public void testUnicode0g() { assertTrue(Query.hasSolution("atom_codes(?,[256])", new Term[] { new Atom("\u0100") })); } public void testUnicode1() { assertTrue(Query.hasSolution("atom_codes(?,[0,127,128,255])", new Term[] { new Atom("\u0000\u007F\u0080\u00FF") })); } public void testUnicode2() { assertTrue(Query.hasSolution("atom_codes(?,[256,32767,32768,65535])", new Term[] { new Atom("\u0100\u7FFF\u8000\uFFFF") })); } public void testStringXput1() { Term a = Query.oneSolution("string_concat(foo,bar,S)").get("S"); assertEquals("foobar", a.name()); assertEquals("string", a.atomType()); } public void testStringXput2() { String s1 = "\u0000\u007F\u0080\u00FF"; String s2 = "\u0100\u7FFF\u8000\uFFFF"; String s = s1 + s2; // concatenate in Java Term a = Query.oneSolution("string_concat(?,?,S)", new Term[] { new Atom(s1), new Atom(s2) }).get("S"); // concatenate // in // Prolog assertEquals(s, a.name()); assertEquals("string", a.atomType()); } // public void testMaxInteger1() { // assertEquals(Query.oneSolution("current_prolog_flag(max_integer,I)").get("I").longValue(), // java.lang.Long.MAX_VALUE); // i.e. 9223372036854775807L // } // public void testSingleton1() { // assertTrue(Query.hasSolution("style_check(-singleton),consult('test_singleton.pl')")); // } public void testStaticQueryInvalidSourceText2() { String goal = "p(]"; try { Query.hasSolution(goal); // should throw exception fail(goal + " (bad syntax) succeeded"); // shouldn't get to here } catch (PrologException e) { // expected exception if (e.term().hasFunctor("error", 2) && e.term().arg(1).hasFunctor("syntax_error", 1) && e.term().arg(1).arg(1).hasFunctor("cannot_start_term", 0)) { // OK: an appropriate exception was thrown } else { fail(goal + " (bad syntax) threw wrong PrologException: " + e); } } catch (Exception e) { fail(goal + " (bad syntax) threw wrong exception class: " + e); } } public void testStaticQueryInvalidSourceText1() { String goal = "bad goal"; try { Query.hasSolution(goal); // should throw exception fail(goal + " (bad syntax) succeeded"); // shouldn't get to here } catch (PrologException e) { // expected exception if (e.term().hasFunctor("error", 2) && e.term().arg(1).hasFunctor("syntax_error", 1) && e.term().arg(1).arg(1).hasFunctor("operator_expected", 0)) { // OK: an appropriate exception was thrown } else { fail(goal + " (bad syntax) threw wrong PrologException: " + e); } } catch (Exception e) { fail(goal + " (bad syntax) threw wrong exception class: " + e); } } public void testStaticQueryNSolutions1() { String goal = "member(X, [0,1,2,3,4,5,6,7,8,9])"; int n = 5; assertTrue("Query.nSolutions(" + goal + ", " + n + ") returns " + n + " solutions", Query.nSolutions(goal, n).length == n); } public void testStaticQueryNSolutions2() { String goal = "member(X, [0,1,2,3,4,5,6,7,8,9])"; int n = 0; assertTrue("Query.nSolutions(" + goal + ", " + n + ") returns " + n + " solutions", Query.nSolutions(goal, n).length == n); } public void testStaticQueryNSolutions3() { String goal = "member(X, [0,1,2,3,4,5,6,7,8,9])"; int n = 20; assertTrue("Query.nSolutions(" + goal + ", " + n + ") returns 10 solutions", Query.nSolutions(goal, n).length == 10); } public void testStaticQueryAllSolutions1() { String goal = "member(X, [0,1,2,3,4,5,6,7,8,9])"; assertTrue("Query.allSolutions(" + goal + ") returns 10 solutions", Query.allSolutions(goal).length == 10); } public void testStaticQueryHasSolution1() { String goal = "memberchk(13, [?,?,?])"; Term[] params = new Term[] { new Integer(12), new Integer(13), new Integer(14) }; assertTrue(Query.hasSolution(goal, params)); } public void testStaticQueryHasSolution2() { String goal = "memberchk(23, [?,?,?])"; Term[] params = new Term[] { new Integer(12), new Integer(13), new Integer(14) }; assertFalse(Query.hasSolution(goal, params)); } public void testUtilListToTermArray1() { String goal = "T = [a,b,c]"; Term list = Query.oneSolution(goal).get("T"); Term[] array = Util.listToTermArray(list); assertTrue(array[2].isAtom() && array[2].name().equals("c")); } public void testTermToTermArray1() { String goal = "T = [a,b,c]"; Term list = Query.oneSolution(goal).get("T"); Term[] array = list.toTermArray(); assertTrue(array[2].isAtom() && array[2].name().equals("c")); } public void testJRef1() { int i = 76543; Integer I = new Integer(i); Query q = new Query("jpl_call(?,intValue,[],I2)", new Term[] { JPL.newJRef(I) }); Term I2 = q.oneSolution().get("I2"); assertTrue(I2.isInteger() && I2.intValue() == i); } public void testBerhhard1() { assertTrue(Query.allSolutions("consult(library('lists'))").length == 1); } public void testWouter1() { // Wouter says this fails under OS X Mavericks // 10.9 x86-64 long n = 7381783232223l; // too big for an int Compound term = new Compound("is", new Term[] { new Variable("X"), new org.jpl7.Integer(n) }); Map<String, Term>[] solutions = new Query(term).allSolutions(); assertEquals(1, solutions.length); Map<String, Term> solution = solutions[0]; assertTrue(solution.containsKey("X")); Object result = solution.get("X"); assertTrue(result instanceof org.jpl7.Integer); assertEquals(n, ((org.jpl7.Integer) result).longValue()); } public void testJRef2() { int i = 76543; Integer I = new Integer(i); Query q = new Query("jpl_call(?,intValue,[],I2)", JPL.newJRef(I)); Term I2 = q.oneSolution().get("I2"); assertTrue(I2.isInteger() && I2.intValue() == i); } public void testJRef3() { StringBuffer sb = new StringBuffer(); Query.oneSolution("jpl_call(?,append,['xyz'],_)", new Term[] { JPL.newJRef(sb) }); assertTrue(sb.toString().equals("xyz")); } public void testJRef4() { Term jrefSB = Query.oneSolution("jpl_new('java.lang.StringBuffer',['abc'],SB)").get("SB"); assertTrue(jrefSB.isJRef() && ((StringBuffer) jrefSB.object()).toString().equals("abc")); } public void testJRef5() { String token = "foobar345"; Term a = Query.oneSolution("jpl_new('java.lang.StringBuffer',[?],A)", new Term[] { new Atom(token) }).get("A"); assertTrue(((java.lang.StringBuffer) (a.object())).toString().equals(token)); } public void testRef6() { Term nullJRef = JPL.newJRef(null); Object nullObject = nullJRef.object(); assertNull("JPL null Term yields a null object", nullObject); } public void testRef7() { Term badJRef = new Compound("hello", new Term[] { new Atom("foobar") }); // term hello(foobar) try { badJRef.object(); // should throw exception fail("@(foobar).object() should thrown JPLException"); // shouldn't get to here } catch (JPLException e) { // expected exception class if (e.getMessage().endsWith("term is not a JRef")) { // OK: an appropriate exception was thrown } else { fail("hello(foobar).object() threw wrong JPLException: " + e); } } catch (Exception e) { fail("hello(foobar).object() threw wrong exception class: " + e); } } public void testForeignFrame1() { int ls1 = Query.oneSolution("statistics(localused,LS)").get("LS").intValue(); int ls2 = Query.oneSolution("statistics(localused,LS)").get("LS").intValue(); assertTrue("local stack size unchanged after query", ls1 == ls2); } public void testOpenGetClose1() { StringBuffer sb = new StringBuffer(); Query q = new Query("atom_chars(prolog, Cs), member(C, Cs)"); Map<String, Term> soln; q.open(); while (q.hasMoreSolutions()) { sb.append(((Atom) q.nextSolution().get("C")).name()); } q.close(); assertEquals("prolog", sb.toString()); } public void testOpenGetClose2() { Query q = new Query("dummy"); // we're not going to open this... try { q.nextSolution(); // should throw exception (query not open) fail("nextSolution() succeeds on unopened Query"); // shouldn't get // to here } catch (JPLException e) { // expected exception class if (e.getMessage().contains("existence_error")) { // OK: an appropriate exception was thrown } else { fail("jpl.Query#nextSolution() threw wrong JPLException: " + e); } } catch (Exception e) { fail("jpl.Query#nextSolution() threw wrong exception class: " + e); } } public void testOpen1() { Query q = new Query("dummy"); assertTrue("a newly created query is not open", !q.isOpen()); } public void testOpen2() { Query q = new Query("fail"); q.open(); assertTrue("a newly opened query which has no solutions is open", q.isOpen()); } public void testGetSolution1() { Query q = new Query("fail"); q.open(); if (q.hasMoreSolutions()) q.nextSolution(); assertTrue("A query has exhausted all solutions but it is still open", !q.isOpen()); } public void testGetSolution2() { Query q = new Query("fail"); // this query has no solutions q.open(); // this opens the query try { q.nextSolution(); // this call is invalid, as the query is closed // shouldn't get to here fail("jpl.Query#nextSolution() should have thrown JPLException"); } catch (NoSuchElementException e) { // all good, right exception threw } catch (Exception e) { fail("jpl.Query#nextSolution() threw wrong class of exception: " + e); } } public void testHasMoreSolutions1() { StringBuffer sb = new StringBuffer(); Query q = new Query("atom_chars(prolog, Cs), member(C, Cs)"); Map<String, Term> soln; q.open(); while (q.hasMoreSolutions()) { soln = q.nextSolution(); sb.append(((Atom) soln.get("C")).name()); } q.close(); assertEquals("Query#hasMoreSolutions() + Query#nextSolution() work as intended", "prolog", sb.toString()); } @SuppressWarnings("unchecked") public void testHasMoreElements1() { StringBuffer sb = new StringBuffer(); Query q = new Query("atom_chars(prolog, Cs), member(C, Cs)"); Map<String, Term> soln; q.open(); while (q.hasMoreElements()) { soln = (Map<String, Term>) q.nextElement(); sb.append(((Atom) soln.get("C")).name()); } q.close(); assertEquals("Query#hasMoreElements() + Query#nextElement() work as intended", "prolog", sb.toString()); } public void testStackedQueries1() { StringBuffer sb = new StringBuffer(); Query q = new Query("atom_chars(prolog, Cs), member(C, Cs)"); Map<String, Term> soln; q.open(); while (q.hasMoreSolutions()) { soln = q.nextSolution(); Atom a = (Atom) soln.get("C"); if (Query.hasSolution("memberchk(?, [l,o,r])", new Term[] { a })) { // this query opens and closes while an earlier query is still open sb.append(((Atom) soln.get("C")).name()); } } assertTrue(!q.isOpen()); // q will have been closed by solution exhaustion assertEquals("rolo", sb.toString()); } }
[ "\"SWIPL_BOOT_FILE\"", "\"SWIPL_BOOT_FILE\"", "\"TEST_JPL\"", "\"TEST_JPL\"", "\"SWIPL_SYNTAX\"", "\"SWIPL_SYNTAX\"", "\"SWI_HOME_DIR\"", "\"SWI_HOME_DIR\"" ]
[]
[ "SWIPL_SYNTAX", "TEST_JPL", "SWIPL_BOOT_FILE", "SWI_HOME_DIR" ]
[]
["SWIPL_SYNTAX", "TEST_JPL", "SWIPL_BOOT_FILE", "SWI_HOME_DIR"]
java
4
0
tests/ci/split_build_smoke_check.py
#!/usr/bin/env python3 import os import logging import json import subprocess from github import Github from s3_helper import S3Helper from get_robot_token import get_best_robot_token from pr_info import PRInfo from build_download_helper import download_shared_build from upload_result_helper import upload_results from docker_pull_helper import get_image_with_version from commit_status_helper import post_commit_status DOCKER_IMAGE = "clickhouse/split-build-smoke-test" DOWNLOAD_RETRIES_COUNT = 5 RESULT_LOG_NAME = "run.log" CHECK_NAME = 'Split build smoke test (actions)' def process_result(result_folder, server_log_folder): status = "success" description = 'Server started and responded' summary = [("Smoke test", "OK")] with open(os.path.join(result_folder, RESULT_LOG_NAME), 'r') as run_log: lines = run_log.read().split('\n') if not lines or lines[0].strip() != 'OK': status = "failure" logging.info("Lines is not ok: %s", str('\n'.join(lines))) summary = [("Smoke test", "FAIL")] description = 'Server failed to respond, see result in logs' result_logs = [] server_log_path = os.path.join(server_log_folder, "clickhouse-server.log") stderr_log_path = os.path.join(result_folder, "stderr.log") client_stderr_log_path = os.path.join(result_folder, "clientstderr.log") run_log_path = os.path.join(result_folder, RESULT_LOG_NAME) for path in [server_log_path, stderr_log_path, client_stderr_log_path, run_log_path]: if os.path.exists(path): result_logs.append(path) return status, description, summary, result_logs def get_run_command(build_path, result_folder, server_log_folder, docker_image): return f"docker run --network=host --volume={build_path}:/package_folder" \ f" --volume={server_log_folder}:/var/log/clickhouse-server" \ f" --volume={result_folder}:/test_output" \ f" {docker_image} >{result_folder}/{RESULT_LOG_NAME}" if __name__ == "__main__": logging.basicConfig(level=logging.INFO) temp_path = os.getenv("TEMP_PATH", os.path.abspath(".")) repo_path = os.getenv("REPO_COPY", os.path.abspath("../../")) reports_path = os.getenv("REPORTS_PATH", "./reports") with open(os.getenv('GITHUB_EVENT_PATH'), 'r', encoding='utf-8') as event_file: event = json.load(event_file) pr_info = PRInfo(event) gh = Github(get_best_robot_token()) for root, _, files in os.walk(reports_path): for f in files: if f == 'changed_images.json': images_path = os.path.join(root, 'changed_images.json') break docker_image = get_image_with_version(reports_path, DOCKER_IMAGE) packages_path = os.path.join(temp_path, "packages") if not os.path.exists(packages_path): os.makedirs(packages_path) download_shared_build(CHECK_NAME, reports_path, packages_path) server_log_path = os.path.join(temp_path, "server_log") if not os.path.exists(server_log_path): os.makedirs(server_log_path) result_path = os.path.join(temp_path, "result_path") if not os.path.exists(result_path): os.makedirs(result_path) run_command = get_run_command(packages_path, result_path, server_log_path, docker_image) logging.info("Going to run command %s", run_command) with subprocess.Popen(run_command, shell=True) as process: retcode = process.wait() if retcode == 0: logging.info("Run successfully") else: logging.info("Run failed") subprocess.check_call(f"sudo chown -R ubuntu:ubuntu {temp_path}", shell=True) print("Result path", os.listdir(result_path)) print("Server log path", os.listdir(server_log_path)) state, description, test_results, additional_logs = process_result(result_path, server_log_path) s3_helper = S3Helper('https://s3.amazonaws.com') report_url = upload_results(s3_helper, pr_info.number, pr_info.sha, test_results, additional_logs, CHECK_NAME) print(f"::notice ::Report url: {report_url}") post_commit_status(gh, pr_info.sha, CHECK_NAME, description, state, report_url)
[]
[]
[ "REPO_COPY", "TEMP_PATH", "REPORTS_PATH", "GITHUB_EVENT_PATH" ]
[]
["REPO_COPY", "TEMP_PATH", "REPORTS_PATH", "GITHUB_EVENT_PATH"]
python
4
0
oneflow/python/test/ops/test_dim_gather_dynamic.py
""" Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import unittest import numpy as np import oneflow as flow import oneflow.typing as oft import os def gen_gather_test_sample(input_shape, index_shape, dim, is_float=True): def _np_dim_scatter_add(src, dim, index, outshape): output = np.zeros(outshape) for srcidx in range(0, src.size): outcoord = np.unravel_index(srcidx, src.shape) outcoord = [*outcoord] outcoord[dim] = index[np.unravel_index(srcidx, index.shape)] output_offset = np.ravel_multi_index(outcoord, outshape) output[np.unravel_index(output_offset, outshape)] += src[ np.unravel_index(srcidx, src.shape) ] return output if is_float: input = np.random.random(input_shape) else: input = np.random.randint(0, 100, input_shape) index = np.random.randint(0, input_shape[dim], index_shape) output = np.take_along_axis(input, index, dim) grad = _np_dim_scatter_add(np.ones_like(output), dim, index, input_shape) ret = {"input": input, "index": index, "dim": dim, "output": output, "grad": grad} return ret def _make_dim_gather_fn(test_case, sample, datashape): flow.clear_default_session() func_config = flow.FunctionConfig() func_config.default_data_type(flow.float32) func_config.default_logical_view(flow.scope.mirrored_view()) func_config.default_placement_scope(flow.scope.placement("gpu", "0:0")) def _compare_diff(blob: oft.ListNumpy): test_case.assertTrue(np.allclose(sample["grad"], blob[0])) @flow.global_function(type="train", function_config=func_config) def DynamicDimGatherJob( params_def: oft.ListNumpy.Placeholder(datashape, dtype=flow.float32), index_def: oft.ListNumpy.Placeholder(datashape, dtype=flow.int32), ) -> oft.ListNumpy: x_var = flow.get_variable( "input", shape=(1,), dtype=flow.float32, initializer=flow.constant_initializer(0), ) x_var = flow.cast_to_current_logical_view(x_var) x = x_var + params_def y = flow.dim_gather(x, sample["dim"], index_def) flow.optimizer.SGD( flow.optimizer.PiecewiseConstantScheduler([], [1e-3]), momentum=0 ).minimize(y) flow.watch_diff(x, _compare_diff) return y return DynamicDimGatherJob def _compare_dim_gather_with_samples(test_case, inputshape, indexshape, dim, maxshape): sample = gen_gather_test_sample((inputshape), indexshape, dim) dynamic_dim_gather = _make_dim_gather_fn(test_case, sample, maxshape) out = dynamic_dim_gather([sample["input"]], [sample["index"]])[0] test_case.assertTrue( np.allclose(out, sample["output"].astype(np.float32), 1e-3, 1e-3) ) @flow.unittest.skip_unless_1n1d() class TestDynamicDimGather(flow.unittest.TestCase): @unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases") def test_dynamic_dim_gather(test_case): if flow.eager_execution_enabled(): print("\nSkip under erger mode!") return _compare_dim_gather_with_samples( test_case, inputshape=(2, 2), indexshape=(2, 2), dim=1, maxshape=(10, 10) ) _compare_dim_gather_with_samples( test_case, inputshape=(2, 2), indexshape=(2, 2), dim=0, maxshape=(10, 10) ) _compare_dim_gather_with_samples( test_case, inputshape=(4, 4, 3), indexshape=(4, 1, 3), dim=1, maxshape=(10, 10, 10), ) if __name__ == "__main__": unittest.main()
[]
[]
[ "ONEFLOW_TEST_CPU_ONLY" ]
[]
["ONEFLOW_TEST_CPU_ONLY"]
python
1
0
src/com/xilinx/rapidwright/edif/EDIFNetlist.java
/* * * Copyright (c) 2017 Xilinx, Inc. * All rights reserved. * * Author: Chris Lavin, Xilinx Research Labs. * * This file is part of RapidWright. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ /** * */ package com.xilinx.rapidwright.edif; import java.io.BufferedWriter; import java.io.FileNotFoundException; import java.io.FileWriter; import java.io.IOException; import java.text.SimpleDateFormat; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Map.Entry; import java.util.regex.Pattern; import java.util.Queue; import java.util.Set; import com.xilinx.rapidwright.design.Design; import com.xilinx.rapidwright.design.Net; import com.xilinx.rapidwright.design.Unisim; import com.xilinx.rapidwright.device.Device; import com.xilinx.rapidwright.device.Series; import com.xilinx.rapidwright.tests.CodePerfTracker; import com.xilinx.rapidwright.util.FileTools; import com.xilinx.rapidwright.util.MessageGenerator; /** * Top level object for a (logical) EDIF netlist. * * Created on: May 11, 2017 */ public class EDIFNetlist extends EDIFName { private Map<String, EDIFLibrary> libraries; private EDIFDesign design; private EDIFCellInst topCellInstance = null; private List<String> comments; private Map<String,EDIFPropertyValue> metax; private Map<String,String> parentNetMap; private Map<String, ArrayList<EDIFHierPortInst>> physicalNetPinMap; protected int nameSpaceUniqueCount = 0; private transient Device device; private Set<String> primsToRemoveOnCollapse = new HashSet<String>(); private boolean DEBUG = false; public EDIFNetlist(String name){ super(name); init(); } protected EDIFNetlist(){ init(); } private void init(){ libraries = getNewMap(); comments = new ArrayList<>(); metax = getNewMap(); } /** * Adds date and username build comments such as: * (comment "Built on 'Mon May 1 15:17:36 PDT 2017'") * (comment "Built by 'clavin'") */ public void generateBuildComments(){ addComment("Built on '"+FileTools.getTimeString()+"'"); addComment("Built by '"+System.getenv().get("USER")+"'"); } /** * Adds the library to this netlist. Checks for naming collisions * and throws a RuntimeException if it occurs. * @param library The library to add. * @return The library that was added. */ public EDIFLibrary addLibrary(EDIFLibrary library){ library.setNetlist(this); EDIFLibrary collision = libraries.put(library.getName(), library); if(collision != null){ throw new RuntimeException("ERROR: EDIFNetlist already has " + "library named " + library.getName() ); } return library; } public EDIFLibrary getLibrary(String name){ return libraries.get(name); } public EDIFLibrary getHDIPrimitivesLibrary(){ EDIFLibrary primLib = libraries.get(EDIFTools.EDIF_LIBRARY_HDI_PRIMITIVES_NAME); if(primLib == null){ primLib = addLibrary(new EDIFLibrary(EDIFTools.EDIF_LIBRARY_HDI_PRIMITIVES_NAME)); } return primLib; } /** * Will create or get the specified unisim cell and ensure it is added to the HDI * primitives library. If the cell is already in the library, it will simply get it * and return it. * @param unisim The desired Unisim cell type. * @return The current unisim cell in the HDI primitive library for this netlist. */ public EDIFCell getHDIPrimitive(Unisim unisim){ EDIFLibrary lib = getHDIPrimitivesLibrary(); EDIFCell cell = lib.getCell(unisim.name()); if(cell == null){ cell = Design.getUnisimCell(unisim); } return lib.addCell(cell); } public EDIFLibrary getWorkLibrary(){ EDIFLibrary primLib = libraries.get(EDIFTools.EDIF_LIBRARY_WORK_NAME); if(primLib == null){ primLib = addLibrary(new EDIFLibrary(EDIFTools.EDIF_LIBRARY_WORK_NAME)); } return primLib; } public EDIFLibrary removeLibrary(String name){ return libraries.remove(name); } public void renameNetlistAndTopCell(String newName){ this.setName(newName); this.updateEDIFRename(); design.setName(newName); design.updateEDIFRename(); design.getTopCell().setName(newName); design.getTopCell().updateEDIFRename(); if(topCellInstance != null){ topCellInstance.setName(newName); topCellInstance.updateEDIFRename(); } } public void removeUnusedCellsFromWorkLibrary(){ HashMap<String,EDIFCell> cellsToRemove = new HashMap<>(getWorkLibrary().getCellMap()); cellsToRemove.remove(getTopCell().getLegalEDIFName()); for(EDIFHierCellInst i : getAllDescendants("", null, false)){ if(i.getCellType().getLibrary().getName().equals(EDIFTools.EDIF_LIBRARY_WORK_NAME)){ cellsToRemove.remove(i.getCellType().getLegalEDIFName()); } } for(String name : cellsToRemove.keySet()){ getWorkLibrary().removeCell(name); } } /** * Iterates through libraries to find first cell with matching name and * returns it. * @param legalEdifName The legal EDIF name of the cell to find. * @return The first occurring cell with the provided name. */ public EDIFCell getCell(String legalEdifName){ for(EDIFLibrary lib : getLibraries()){ EDIFCell c = lib.getCell(legalEdifName); if(c != null) return c; } return null; } /** * @return the design */ public EDIFDesign getDesign() { return design; } /** * @param design the design to set */ public void setDesign(EDIFDesign design) { this.design = design; } public Device getDevice() { return device; } public void setDevice(Device device) { this.device = device; } public EDIFCell getTopCell(){ return design.getTopCell(); } public EDIFCellInst getTopCellInst(){ if(topCellInstance == null){ topCellInstance = getTopCell().createCellInst("top", null); } return topCellInstance; } public boolean addComment(String comment){ return comments.add(comment); } public EDIFPropertyValue addMetax(String key, EDIFPropertyValue value){ return metax.put(key, value); } /** * @return the comments */ public List<String> getComments() { return comments; } /** * Migrates all cells in the provided library * into the standard work library. * @param library The library with cells to be migrated to work. */ public void migrateToWorkLibrary(String library) { EDIFLibrary work = getWorkLibrary(); EDIFLibrary oldWork = getLibrary(library); List<EDIFCell> toRemove = new ArrayList<>(oldWork.getCells()); for (EDIFCell c : toRemove) { work.addCell(c); oldWork.removeCell(c); } removeLibrary(library); } /** * Migrates all libraries except HDI primitives and work to * the work library. */ public void consolidateAllToWorkLibrary() { List<EDIFLibrary> librariesToMigrate = new ArrayList<>(); for (EDIFLibrary l : getLibraries()) { if (!l.isHDIPrimitivesLibrary() && !l.isWorkLibrary()) { librariesToMigrate.add(l); } } for (EDIFLibrary l : librariesToMigrate) { migrateToWorkLibrary(l.getName()); } } public void migrateCellAndSubCells(EDIFCell cell){ Queue<EDIFCell> cells = new LinkedList<>(); cells.add(cell); while(!cells.isEmpty()){ EDIFCell curr = cells.poll(); EDIFLibrary destLib = getLibrary(curr.getLibrary().getName()); if(destLib == null){ if(curr.getLibrary().getName().equals(EDIFTools.EDIF_LIBRARY_HDI_PRIMITIVES_NAME)){ destLib = getHDIPrimitivesLibrary(); }else{ destLib = getWorkLibrary(); } } if(!destLib.containsCell(curr)){ destLib.addCell(curr); } for(EDIFCellInst inst : curr.getCellInsts()){ cells.add(inst.getCellType()); } } } public void migrateCellAndSubCells(EDIFCell cell, boolean uniqueifyCollisions){ if (!uniqueifyCollisions){ migrateCellAndSubCells(cell); return; } Queue<EDIFCell> cells = new LinkedList<>(); // which contains cells that have been added to libraries but whose subcells haven't. //Step 1: add the top cell to the library. //If the top cell belongs to HDIPrimitivesLibrary && the top cell exists in HDIPrimitivesLibrary, return and do nothing. //Otherwise, the code would add the top cell to the library; if repeat happens, using "parameterized" suffix to distinguish EDIFLibrary destLibTop = getLibrary(cell.getLibrary().getName()); if(destLibTop == null){ if(cell.getLibrary().getName().equals(EDIFTools.EDIF_LIBRARY_HDI_PRIMITIVES_NAME)){ destLibTop = getHDIPrimitivesLibrary(); }else{ destLibTop = getWorkLibrary(); } } if (destLibTop.containsCell(cell) && destLibTop.getName().equals(EDIFTools.EDIF_LIBRARY_HDI_PRIMITIVES_NAME)) return; int i=0; String currentCellName = cell.getName(); while (destLibTop.containsCell(cell)) { cell.setName(currentCellName + "_parameterized" + i); cell.setView(currentCellName + "_parameterized" + i); cell.updateEDIFRename(); i++; } destLibTop.addCell(cell); cells.add(cell); //Step 2: add the subcells, subsubcells... to the library. //Do it like before, but updating the celltype of each cellInst should be noticed. while(!cells.isEmpty()){ EDIFCell pollFromCells = cells.poll(); for(EDIFCellInst inst : pollFromCells.getCellInsts()) { EDIFCell instCellType = inst.getCellType(); EDIFLibrary destLibSub = getLibrary(instCellType.getLibrary().getName()); if (destLibSub == null) { if (instCellType.getLibrary().getName().equals(EDIFTools.EDIF_LIBRARY_HDI_PRIMITIVES_NAME)) { destLibSub = getHDIPrimitivesLibrary(); } else { destLibSub = getWorkLibrary(); } } if (destLibSub.containsCell(instCellType) && destLibSub.getName().equals(EDIFTools.EDIF_LIBRARY_HDI_PRIMITIVES_NAME)) continue; i=0; currentCellName = instCellType.getName(); while (destLibSub.containsCell(instCellType)) { instCellType.setName(currentCellName + "_parameterized" + i); instCellType.setView(currentCellName + "_parameterized" + i); instCellType.updateEDIFRename(); i++; } inst.setCellType(instCellType); // updating the celltype, which could be changed due to adding suffix destLibSub.addCell(instCellType); cells.add(instCellType); } } } /** * Will change the netlist name and top cell and instance name. * @param newName New name for the netlist */ public void changeTopName(String newName){ this.setName(newName); this.design.setName(newName); EDIFCell top = this.design.getTopCell(); EDIFLibrary lib = top.getLibrary(); top.getLibrary().removeCell(top); top.setName(newName); lib.addCell(top); } /** * @return the libraries */ public Map<String, EDIFLibrary> getLibrariesMap() { return libraries; } public Collection<EDIFLibrary> getLibraries(){ return libraries.values(); } public void exportEDIF(String fileName){ BufferedWriter bw = null; //for(EDIFLibrary lib : getLibraries()){ // lib.ensureValidEDIFCellNames(); //} try { bw = new BufferedWriter(new FileWriter(fileName)); bw.write("(edif "); exportEDIFName(bw); bw.write("\n"); bw.write(" (edifversion 2 0 0)\n"); bw.write(" (edifLevel 0)\n"); bw.write(" (keywordmap (keywordlevel 0))\n"); bw.write("(status\n"); bw.write(" (written\n"); bw.write(" (timeStamp "); SimpleDateFormat formatter = new SimpleDateFormat("yyyy MM dd HH mm ss"); bw.write(formatter.format(new java.util.Date())); bw.write(")\n"); bw.write(" (program \""+Device.FRAMEWORK_NAME+"\" (version \"" + Device.RAPIDWRIGHT_VERSION + "\"))\n"); for(String comment : getComments()){ bw.write(" (comment \""); bw.write(comment); bw.write("\")\n"); } for(Entry<String,EDIFPropertyValue> e : metax.entrySet()){ bw.write("(metax "); bw.write(e.getKey()); bw.write(" "); e.getValue().writeEDIFString(bw); bw.write(")\n"); } bw.write(" )\n"); bw.write(")\n"); getHDIPrimitivesLibrary().exportEDIF(bw); for(EDIFLibrary lib : getLibrariesMap().values()){ if(lib.getName().equals(EDIFTools.EDIF_LIBRARY_HDI_PRIMITIVES_NAME)) continue; lib.exportEDIF(bw); } bw.write("(comment \"Reference To The Cell Of Highest Level\")\n\n"); bw.write(" (design "); EDIFDesign design = getDesign(); design.exportEDIFName(bw); bw.write("\n (cellref " + design.getTopCell().getLegalEDIFName() + " (libraryref "); bw.write(design.getTopCell().getLibrary().getLegalEDIFName() +"))\n"); design.exportEDIFProperties(bw, " "); bw.write(" )\n"); bw.write(")\n"); bw.flush(); bw.close(); } catch (IOException e) { MessageGenerator.briefError("ERROR: Failed to export EDIF file " + fileName); e.printStackTrace(); } } /** * Based on a hierarchical string, this method will get the instance corresponding * to the name provided. * @param name Hierarchical name of the instance, for example: 'clk_wiz/inst/bufg0' * @return The instance corresponding to the provided name. If the name string is empty, * it returns the top cell instance. */ public EDIFCellInst getCellInstFromHierName(String name){ EDIFCellInst currInst = getTopCellInst(); if(name.equals("")) return currInst; String[] parts = name.split(EDIFTools.EDIF_HIER_SEP); for(int i=0; i < parts.length; i++){ EDIFCellInst checkInst = currInst.getCellType().getCellInst(parts[i]); // Someone named their instance with hierarchy separators, joy! if(checkInst == null){ StringBuilder sb = new StringBuilder(parts[i]); i++; while(checkInst == null && i < parts.length){ sb.append(EDIFTools.EDIF_HIER_SEP); sb.append(parts[i]); checkInst = currInst.getCellType().getCellInst(sb.toString()); if(checkInst == null) i++; } } currInst = checkInst; } return currInst; } /** * Based on a hierarchical string name, this method gets and returns the net inside * the instance. * @param netName The hierarchical name of the net to get, for example: 'inst0/inst1/inst2/net0' * @return The hierarchical net, or null if none could be found. */ public EDIFNet getNetFromHierName(String netName){ EDIFHierNet net = getHierNetFromName(netName); return net == null ? null : net.getNet(); } /** * Gets the hierarchical port instance object from the full name. * @param hierPortInstName Full hierarchical name of the port instance. * @return The port instance of interest or null if none could be found. */ public EDIFHierPortInst getHierPortInstFromName(String hierPortInstName){ String instName = ""; String localPortName = hierPortInstName; int lastSep = hierPortInstName.lastIndexOf(EDIFTools.EDIF_HIER_SEP); if(lastSep != -1){ instName = hierPortInstName.substring(0,lastSep); localPortName = hierPortInstName.substring(lastSep+1); } EDIFCellInst inst = getCellInstFromHierName(instName); if(inst == null) return null; EDIFPortInst port = inst.getPortInst(localPortName); if(port == null) return null; String parentInstName = getHierParentName(instName); EDIFHierPortInst hierPortInst = new EDIFHierPortInst(parentInstName,port); return hierPortInst; } /** * Looks at the hierarchical name and returns the parent or instance above. For example: * "block0/operator0" -> "block0"; "block0" -> ""; "" -> "" * @param hierReferenceName Hierarchical reference name * @return */ private String getHierParentName(String hierReferenceName){ if(hierReferenceName == null) return null; if(hierReferenceName.length() == 0) return hierReferenceName; int lastSep = hierReferenceName.lastIndexOf(EDIFTools.EDIF_HIER_SEP); if(lastSep != -1){ return hierReferenceName.substring(0,lastSep); } return ""; } /** * Gets the hierarchical net from the netname provided. Returns the wrapped EDIFNet, with the hierarchical * String in {@link EDIFHierNet}. * @param netName Full hierarchical name of the net to retrieve. * @return The absolute net with hierarchical name, or null if none could be found. */ public EDIFHierNet getHierNetFromName(String netName){ String instName = ""; String localNetName = netName; int lastSep = netName.lastIndexOf(EDIFTools.EDIF_HIER_SEP); if(lastSep != -1){ instName = netName.substring(0,lastSep); localNetName = netName.substring(lastSep+1); } EDIFCellInst i = getCellInstFromHierName(instName); EDIFNet net = i == null ? null : i.getCellType().getNet(localNetName); if(i == null || net == null){ // Maybe instance or net name contains '/', try a few different alternatives while(net == null && instName.contains(EDIFTools.EDIF_HIER_SEP)){ lastSep = instName.lastIndexOf(EDIFTools.EDIF_HIER_SEP); instName = netName.substring(0,lastSep); localNetName = netName.substring(lastSep+1); i = getCellInstFromHierName(instName); net = i == null ? null : i.getCellType().getNet(localNetName); } if(net == null){ return null; } } EDIFHierNet an = new EDIFHierNet(instName, net); return an; } public Net getPhysicalNetFromPin(String parentHierInstName, EDIFPortInst p, Design d){ String hierarchicalNetName = null; if(parentHierInstName.equals("")){ hierarchicalNetName = p.getNet().getName(); }else{ hierarchicalNetName = parentHierInstName + EDIFTools.EDIF_HIER_SEP + p.getNet().getName(); } if(hierarchicalNetName.equals(EDIFTools.LOGICAL_GND_NET_NAME)) return d.getGndNet(); if(hierarchicalNetName.equals(EDIFTools.LOGICAL_VCC_NET_NAME)) return d.getVccNet(); Map<String,String> parentNetMap = getParentNetMap(); String parentNetName = parentNetMap.get(hierarchicalNetName); Net n = d.getNet(parentNetName); if(n == null){ if(parentNetName == null){ // Maybe it is GND/VCC List<EDIFPortInst> src = p.getNet().getSourcePortInsts(false); if(src.size() > 0 && src.get(0).getCellInst() != null){ String cellType = src.get(0).getCellInst().getCellType().getName(); if(cellType.equals("GND")) return d.getGndNet(); if(cellType.equals("VCC")) return d.getVccNet(); } } EDIFNet logicalNet = getNetFromHierName(parentNetName); List<EDIFPortInst> eprList = logicalNet.getSourcePortInsts(false); if(eprList.size() > 1) throw new RuntimeException("ERROR: Bad assumption on net, has two sources."); if(eprList.size() == 1){ String cellTypeName = eprList.get(0).getCellInst().getCellType().getName(); if(cellTypeName.equals("GND")){ return d.getGndNet(); }else if(cellTypeName.equals("VCC")){ return d.getVccNet(); } } // If size is 0, assume top level port in an OOC design n = d.createNet(parentNetName); n.setLogicalNet(logicalNet); } return n; } /** * Searches all EDIFCellInst objects to find those with matching names * against the wildcard pattern. * @param wildcardPattern Search pattern that includes alphanumeric and wildcards (*). * @return The list of all matching EDIFHierCellInst */ public List<EDIFHierCellInst> findCellInsts(String wildcardPattern){ return getAllDescendants("", wildcardPattern, false); } /** * Searches all lower levels of hierarchy to find all leaf descendants. It returns a * list of all leaf cells that fall under the hierarchy of the provided instance name. * @param instanceName Name of the instance to start searching from. * @return A list of all leaf cell instances or null if the instanceName was not found. */ public List<EDIFHierCellInst> getAllLeafDescendants(String instanceName){ List<EDIFHierCellInst> leafCells = new ArrayList<>(); EDIFCellInst currTop = getCellInstFromHierName(instanceName); Queue<EDIFHierCellInst> toProcess = new LinkedList<EDIFHierCellInst>(); EDIFHierCellInst eci = new EDIFHierCellInst(EDIFTools.getHierarchicalRootFromPinName(instanceName), currTop); toProcess.add(eci); while(!toProcess.isEmpty()){ EDIFHierCellInst curr = toProcess.poll(); if(curr.getCellType().isPrimitive()){ leafCells.add(curr); }else{ for(EDIFCellInst i : curr.getInst().getCellType().getCellInsts()){ toProcess.add(new EDIFHierCellInst(curr.getFullHierarchicalInstName(), i)); } } } return leafCells; } private String convertWildcardToRegex(String wildcardPattern){ if(wildcardPattern == null) return null; StringBuilder sb = new StringBuilder(); for(int i=0; i < wildcardPattern.length(); i++){ char c = wildcardPattern.charAt(i); switch (c) { case '*': sb.append(".*"); break; case '?': case '\\': case '{': case '}': case '|': case '^': case '$': case '(': case ')': case '[': case ']': sb.append("\\"); sb.append(c); break; default: sb.append(c); } } sb.append("$"); return sb.toString(); } public List<EDIFHierCellInst> getAllLeafDescendants(String instanceName, String wildcardPattern){ return getAllDescendants(instanceName, wildcardPattern, true); } /** * Searches all lower levels of hierarchy to find descendants. It returns the * set of all cells that fall under the hierarchy of the provided instance name. * @param instanceName Name of the instance to start searching from. * @param wildcardPattern if non-null, filters results by matching wildcard pattern * @param leavesOnly Flag indicating if only leaf cells should be included * @return A set of all leaf cell instances or null if the instanceName was not found. */ public List<EDIFHierCellInst> getAllDescendants(String instanceName, String wildcardPattern, boolean leavesOnly){ List<EDIFHierCellInst> children = new ArrayList<>(); EDIFCellInst eci = getCellInstFromHierName(instanceName); if(eci == null) return null; Queue<EDIFHierCellInst> q = new LinkedList<>(); q.add(new EDIFHierCellInst(instanceName, eci)); String pattern = convertWildcardToRegex(wildcardPattern); Pattern pat = wildcardPattern != null ? Pattern.compile(pattern) : null; while(!q.isEmpty()){ EDIFHierCellInst i = q.poll(); for(EDIFCellInst child : i.getInst().getCellType().getCellInsts()){ String fullName = ""; if(!i.isTopLevelInst()){ fullName = i.getFullHierarchicalInstName(); } EDIFHierCellInst newCell = new EDIFHierCellInst(fullName, child); if(newCell.getInst().getCellType().isPrimitive()){ if(pat != null && !pat.matcher(newCell.getFullHierarchicalInstName()).matches()){ continue; } children.add(newCell); } else{ q.add(newCell); if(!leavesOnly) { if(pat != null && !pat.matcher(newCell.getFullHierarchicalInstName()).matches()){ continue; } children.add(newCell); } } } } return children; } private static boolean isDeviceNullPrinted = false; private boolean isTransformPrim(EDIFHierPortInst p){ EDIFCellInst cellInst = p.getPortInst().getCellInst(); if(!cellInst.getCellType().isPrimitive()) return false; Unisim u = Unisim.valueOf(p.getPortInst().getCellInst().getCellType().getName()); if(device == null && !isDeviceNullPrinted){ System.err.println("WARNING: EDIFNetlist.device==null when calling isTransformPrim(), results may be incorrect"); isDeviceNullPrinted = true; } return u.hasTransform(device == null ? Series.UltraScale : device.getSeries()); } /** * TODO - Revisit this code, simplify, remove duplication * Get's all equivalent nets in the netlist from the provided net name. * The returned list also includes the provided netName. * @param netName Full hierarchical netname to use as a starting point in the search. * @return A list of all electrically connected nets in the netlist that are equivalent. * The list is composed of all full hierarchical net names or an empty list if netName is invalid. */ public List<String> getNetAliases(String netName){ if(physicalNetPinMap == null){ physicalNetPinMap = new HashMap<String,ArrayList<EDIFHierPortInst>>(); } String parentNetName = null; ArrayList<EDIFHierPortInst> leafCellPins = new ArrayList<>(); List<String> aliases = new ArrayList<>(); aliases.add(netName); EDIFHierNet an = getHierNetFromName(netName); if(an == null) return Collections.emptyList(); Queue<EDIFHierPortInst> queue = new LinkedList<>(); EDIFPortInst source = null; for(EDIFPortInst p : an.getNet().getPortInsts()){ EDIFHierPortInst absPortInst = new EDIFHierPortInst(an.getHierarchicalInstName(), p); // Checks if cell is primitive or black box boolean isCellPin = p.getCellInst() != null && p.getCellInst().getCellType().isLeafCellOrBlackBox(); if(isCellPin){ leafCellPins.add(absPortInst); } if((p.getCellInst() == null && p.isInput()) || (isCellPin && p.isOutput())){ source = p; parentNetName = netName; } queue.add(absPortInst); } while(!queue.isEmpty()){ EDIFHierPortInst p = queue.poll(); EDIFNet otherNet = null; if(p.getPortInst().getCellInst() == null){ // Moving up in hierarchy EDIFCellInst inst = getCellInstFromHierName(p.getHierarchicalInstName()); EDIFPortInst epr = inst.getPortInst(p.getPortInst().getPortInstNameFromPort()); if(epr == null){ if(parentNetName == null && getTopCellInst().equals(inst) && p.getPortInst().isOutput()){ source = p.getPortInst(); parentNetName = p.getPortInst().getNet().getName(); } continue; } otherNet = epr.getNet(); int lastIndex = p.getHierarchicalInstName().lastIndexOf(EDIFTools.EDIF_HIER_SEP); String instName = lastIndex > 0 ? p.getHierarchicalInstName().substring(0, lastIndex) : ""; EDIFCellInst checkInst = getCellInstFromHierName(instName); while(checkInst == null && lastIndex > 0){ // Check for cells with hierarchy separator in their name lastIndex = p.getHierarchicalInstName().lastIndexOf(EDIFTools.EDIF_HIER_SEP, lastIndex-1); instName = p.getHierarchicalInstName().substring(0, lastIndex); checkInst = getCellInstFromHierName(instName); } StringBuilder sb = new StringBuilder(instName); if(!instName.equals("")) sb.append(EDIFTools.EDIF_HIER_SEP); sb.append(otherNet); aliases.add(sb.toString()); for(EDIFPortInst opr : otherNet.getPortInsts()){ if(epr.getPort() != opr.getPort()){ // Here we really want to compare object references! EDIFHierPortInst absPortInst = new EDIFHierPortInst(instName, opr); if(opr.getCellInst() != null && opr.getCellInst().getCellType().isLeafCellOrBlackBox()){ leafCellPins.add(absPortInst); if(parentNetName == null && opr.isOutput()) { source = opr; parentNetName = netName; } } queue.add(absPortInst); } } }else if(p.isOutput() && isTransformPrim(p)){ if(p.getPortInst().getPort().getWidth() > 1){ aliases.add(p.getTransformedNetName()); }else{ aliases.add(p.toString()); } }else{ // Moving down in hierarchy EDIFPort port = p.getPortInst().getPort(); if(port != null && port.getParentCell().hasContents()){ otherNet = port.getParentCell().getInternalNet(p.getPortInst()); if(otherNet == null){ // Looks unconnected continue; } StringBuilder sb = new StringBuilder(p.getHierarchicalInstName()); if(!p.getHierarchicalInstName().equals("")) sb.append(EDIFTools.EDIF_HIER_SEP); sb.append(p.getPortInst().getCellInst().getName()); String instName = sb.toString(); sb.append(EDIFTools.EDIF_HIER_SEP); sb.append(otherNet.getName()); aliases.add(sb.toString()); for(EDIFPortInst ipr : otherNet.getPortInsts()){ if(port != ipr.getPort()){ // Here we really want to compare object references! EDIFHierPortInst absPortInst = new EDIFHierPortInst(instName, ipr); boolean isCellPin = ipr.getCellInst() != null && ipr.getCellInst().getCellType().isLeafCellOrBlackBox(); if(isCellPin){ leafCellPins.add(absPortInst); } if((ipr.getCellInst() == null && ipr.isInput()) || (isCellPin && ipr.isOutput())){ source = ipr; parentNetName = netName; } queue.add(absPortInst); } } } } } if(parentNetName != null){ String cellType = source.getCellInst() == null ? "" : source.getCellInst().getCellType().getName(); String staticNetName = cellType.equals("GND") ? Net.GND_NET : (cellType.equals("VCC") ? Net.VCC_NET : null); if(staticNetName != null){ ArrayList<EDIFHierPortInst> existing = physicalNetPinMap.get(staticNetName); if(existing == null) physicalNetPinMap.put(staticNetName, leafCellPins); else existing.addAll(leafCellPins); }else{ physicalNetPinMap.put(parentNetName, leafCellPins); } } else if(an.getNet().getPortInsts().size() == 0){ return aliases; } else{ throw new RuntimeException("ERROR: Couldn't identify parent net, no output pins (or top level output port) found."); } return aliases; } /** * Gets the canonical net for this net name. This corresponds to the driving net * in the netlist and/or the physical net name. * @param netAlias An absolute net name alias (from logical netlist) * @return The physical/parent net name or null if none could be found. */ public String getParentNetName(String netAlias){ return getParentNetMap().get(netAlias); } public Map<String,String> getParentNetMap(){ if(parentNetMap == null){ generateParentNetMap(); } return parentNetMap; } public void resetParentNetMap(){ parentNetMap = null; physicalNetPinMap = null; } private void generateParentNetMap(){ long start = 0; if(DEBUG){ start = System.currentTimeMillis(); } if(parentNetMap == null){ parentNetMap = new HashMap<>(); } if(physicalNetPinMap == null){ physicalNetPinMap = new HashMap<String,ArrayList<EDIFHierPortInst>>(); } EDIFCell c = getTopCell(); Queue<EDIFHierPortInst> queue = new LinkedList<>(); // All parent nets are either top-level inputs or outputs of leaf cells // Here we gather all top-level inputs for(EDIFNet n : c.getNets()){ for(EDIFPortInst p : n.getPortInsts()){ if(p.isTopLevelPort() && p.isInput()){ queue.add(new EDIFHierPortInst("", p)); } } } // Here we search for all leaf cell insts Queue<EDIFHierCellInst> instQueue = new LinkedList<>(); instQueue.add(new EDIFHierCellInst("", getTopCellInst())); while(!instQueue.isEmpty()){ EDIFHierCellInst currInst = instQueue.poll(); for(EDIFCellInst eci : currInst.getInst().getCellType().getCellInsts()){ // Checks if cell is primitive or black box if(eci.getCellType().getCellInsts().size() == 0 && eci.getCellType().getNets().size() == 0){ for(EDIFPortInst portInst : eci.getPortInsts()){ if(portInst.isOutput()){ queue.add(new EDIFHierPortInst(currInst.getFullHierarchicalInstName(), portInst)); } } }else{ String hName = currInst.getFullHierarchicalInstName(); instQueue.add(new EDIFHierCellInst(hName,eci)); } } } for(EDIFHierPortInst pr : queue){ String parentNetName = pr.getHierarchicalNetName(); for(String alias : getNetAliases(parentNetName)){ parentNetMap.put(alias, parentNetName); } } if(DEBUG){ long stop = System.currentTimeMillis(); System.out.println("generateParentNetMap() runtime: " + (stop-start)/1000.0f +" seconds "); } } /** * Traverses the netlist and produces a list of all primitive leaf cell instances. * @return A list of all primitive leaf cell instances. */ public List<EDIFCellInst> getAllLeafCellInstances(){ List<EDIFCellInst> insts = new ArrayList<>(); Queue<EDIFCellInst> q = new LinkedList<>(); q.add(getTopCellInst()); while(!q.isEmpty()){ EDIFCellInst curr = q.poll(); for(EDIFCellInst eci : curr.getCellType().getCellInsts()){ if(eci.getCellType().isPrimitive()) insts.add(eci); else q.add(eci); } } return insts; } /** * @return the physicalNetPinMap */ public Map<String, ArrayList<EDIFHierPortInst>> getPhysicalNetPinMap() { if(physicalNetPinMap == null){ generateParentNetMap(); } return physicalNetPinMap; } public List<EDIFHierPortInst> getPhysicalPins(String parentNetName) { return getPhysicalNetPinMap().get(parentNetName); } /** * Gets all the primitive pin sinks that are strict descendants of * this provided net. * @param net The net to trace to its sinks. * @return The list of all sink pins on primitive cells that are descendants * of the provided net */ public List<EDIFHierPortInst> getSinksFromNet(EDIFHierNet net){ Queue<EDIFHierNet> q = new LinkedList<>(); q.add(net); ArrayList<EDIFHierPortInst> sinks = new ArrayList<>(); HashSet<String> visited = new HashSet<>(); while(!q.isEmpty()){ EDIFHierNet curr = q.poll(); if(visited.contains(curr.getHierarchicalNetName())) continue; visited.add(curr.getHierarchicalNetName()); for(EDIFPortInst portInst : curr.getNet().getPortInsts()){ if(portInst.isOutput()) continue; if(portInst.isTopLevelPort()){ // Going up in hierarchy EDIFCellInst cellInst = getCellInstFromHierName(curr.getHierarchicalInstName()); if(cellInst == null) continue; EDIFPortInst epr = cellInst.getPortInst(portInst.getPortInstNameFromPort()); if(epr == null || epr.getNet() == null) continue; String hierName = EDIFTools.getHierarchicalRootFromPinName(curr.getHierarchicalInstName()); q.add(new EDIFHierNet(hierName, epr.getNet())); }else if(portInst.getCellInst().getCellType().isPrimitive()){ // We found a sink sinks.add(new EDIFHierPortInst(curr.getHierarchicalInstName(),portInst)); continue; }else{ // Going down in hierarchy EDIFNet internalNet = portInst.getInternalNet(); if(internalNet == null) continue; String hierName = curr.getHierarchicalInstName() + EDIFTools.EDIF_HIER_SEP + portInst.getCellInst().getName(); q.add(new EDIFHierNet(hierName,internalNet)); } } } return sinks; } /** * @param netlist * @param cellInstMap * @return */ public HashMap<String, EDIFNet> generateEDIFNetMap(HashMap<String, EDIFCellInst> cellInstMap) { HashMap<String,EDIFNet> map = new HashMap<String, EDIFNet>(); Queue<EDIFHierCellInst> toProcess = new LinkedList<EDIFHierCellInst>(); // Add nets at the very top level to start for(EDIFNet net : getTopCell().getNets()){ map.put(net.getName(), net); } Collection<EDIFCellInst> topInstances = getTopCellInst().getCellType().getCellInsts(); if(topInstances != null){ for(EDIFCellInst i : topInstances){ toProcess.add(new EDIFHierCellInst("",i)); } } while(!toProcess.isEmpty()){ EDIFHierCellInst curr = toProcess.poll(); String name = curr.getHierarchicalInstName() + curr.getInst().getName(); if(curr.getInst().getCellType().getNets() == null) continue; for(EDIFNet net : curr.getInst().getCellType().getNets()){ map.put(name + "/" + net.getName(), net); //System.out.println("NET: " + name + "/" + net.getOldName()); } String parentName = curr.getHierarchicalInstName() + curr.getInst().getName() + "/"; if(curr.getInst().getCellType().getCellInsts()==null) continue; for(EDIFCellInst i : curr.getInst().getCellType().getCellInsts()){ toProcess.add(new EDIFHierCellInst(parentName, i)); } } return map; } /** * This will be removed in the next release. * Consider using {@link EDIFCell#getPortMap()} instead * @deprecated * @return */ public HashMap<String,EDIFPort> generateEDIFPortMap(){ HashMap<String,EDIFPort> map = new HashMap<String, EDIFPort>(); for(EDIFPort port : getTopCellInst().getCellType().getPorts()){ if(port.isBus()){ for(int idx=0; idx < port.getWidth(); idx++){ map.put(port.getName() + "["+idx+"]",port); } }else{ map.put(port.getName(),port); } } return map; } /** * Identify primitive cell instances in EDIF netlist * @param edif The environment to look through * @return A map of hierarchical names (not including top-level name) * to EdifCellInstances that use primitives in the library */ public HashMap<String,EDIFCellInst> generateCellInstMap(){ HashMap<String,EDIFCellInst> primitiveInstances = new HashMap<String, EDIFCellInst>(); Queue<EDIFHierCellInst> toProcess = new LinkedList<EDIFHierCellInst>(); Collection<EDIFCellInst> topInstances = getTopCellInst().getCellType().getCellInsts(); if(topInstances != null){ for(EDIFCellInst i : topInstances){ toProcess.add(new EDIFHierCellInst("",i)); } } while(!toProcess.isEmpty()){ EDIFHierCellInst curr = toProcess.poll(); if(curr.getInst().getCellType().isPrimitive()){ String name = curr.getHierarchicalInstName() + curr.getInst().getName(); primitiveInstances.put(name, curr.getInst()); }else{ String parentName = curr.getHierarchicalInstName() + curr.getInst().getName()+ "/"; if(curr.getInst().getCellType().getCellInsts() == null) { //System.out.println("No instances for cell type: " + curr.inst.getCellType()); continue; } for(EDIFCellInst i : curr.getInst().getCellType().getCellInsts()){ toProcess.add(new EDIFHierCellInst(parentName, i)); } } } return primitiveInstances; } private static Set<String> getAllDecendantCellTypes(EDIFCell c) { Set<String> types = new HashSet<>(); Queue<EDIFCell> q = new LinkedList<>(); q.add(c); while(!q.isEmpty()) { EDIFCell curr = q.poll(); types.add(curr.getName()); for(EDIFCellInst i : curr.getCellInsts()) { q.add(i.getCellType()); } } return types; } /** * Expands macro primitives into a native-compatible implementation. * In Vivado, some non-native unisims are expanded or transformed * into one or more native unisims to target the architecture while * supporting the functionality of the macro unisim. When writing out * EDIF in Vivado, these primitives are collapsed back down to their * primitive state. This method compensates for this behavior by expanding * the macro primitives. As an example, IBUF => IBUF (IBUFCTRL, IBUF) for * UltraScale devices. * @param series The architecture series targeted by this netlist. */ public void expandMacroUnisims(Series series) { EDIFLibrary macros = Design.getMacroPrimitives(series); EDIFLibrary netlistPrims = getHDIPrimitivesLibrary(); // Find the macro primitives to replace Set<String> toReplace = new HashSet<String>(); for(EDIFCell c : netlistPrims.getCells()) { if(macros.containsCell(c.getName())) { toReplace.addAll(getAllDecendantCellTypes(macros.getCell(c.getName()))); } } // Replace macro primitives in library and import pre-requisite cells if needed for(String cellName : toReplace) { EDIFCell removed = netlistPrims.removeCell(cellName); if(removed == null) { primsToRemoveOnCollapse.add(cellName); } EDIFCell toAdd = macros.getCell(cellName); if(toAdd == null) { toAdd = Design.getUnisimCell(Unisim.valueOf(cellName)); } netlistPrims.addCell(toAdd); } // Update all cell references to macro versions for(EDIFLibrary lib : getLibraries()) { for(EDIFCell cell : lib.getCells()) { for(EDIFCellInst inst : cell.getCellInsts()) { if(toReplace.contains(inst.getCellType().getName())) { EDIFCell newCell = netlistPrims.getCell(inst.getCellType().getName()); inst.setCellType(newCell); for(EDIFPortInst portInst : inst.getPortInsts()) { String portName = portInst.getPort().getBusName(); portInst.setPort(newCell.getPort(portName)); } } } } } } /** * Collapses any macro primitives back into their primitive state. * Performs the opposite of {@link EDIFNetlist#expandMacroUnisims(Series)}. * @param series The architecture series targeted by this netlist. */ public void collapseMacroUnisims(Series series) { EDIFLibrary macros = Design.getMacroPrimitives(series); for(EDIFCell cell : getHDIPrimitivesLibrary().getCells()) { if(macros.containsCell(cell.getName())) { cell.makePrimitive(); } } for(String name : primsToRemoveOnCollapse) { getHDIPrimitivesLibrary().removeCell(name); } } public static void main(String[] args) throws FileNotFoundException { CodePerfTracker t = new CodePerfTracker("EDIF Import/Export", true); t.start("Read EDIF"); EDIFParser p = new EDIFParser(args[0]); EDIFNetlist n = p.parseEDIFNetlist(); t.stop().start("Export EDIF"); n.exportEDIF(args[1]); t.stop().printSummary(); } }
[]
[]
[]
[]
[]
java
0
0
built-in/PyTorch/Official/cv/image_object_detection/Faster_Mask_RCNN_for_PyTorch/detectron2/utils/env.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved # Copyright 2020 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import importlib import importlib.util import logging import numpy as np import os import random import sys from datetime import datetime import torch __all__ = ["seed_all_rng"] TORCH_VERSION = tuple(int(x) for x in torch.__version__.split(".")[:2]) """ PyTorch version as a tuple of 2 ints. Useful for comparison. """ def seed_all_rng(seed=None): """ Set the random seed for the RNG in torch, numpy and python. Args: seed (int): if None, will use a strong random seed. """ if seed is None: seed = ( os.getpid() + int(datetime.now().strftime("%S%f")) + int.from_bytes(os.urandom(2), "big") ) logger = logging.getLogger(__name__) logger.info("Using a generated random seed {}".format(seed)) np.random.seed(seed) torch.set_rng_state(torch.manual_seed(seed).get_state()) random.seed(seed) # from https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path def _import_file(module_name, file_path, make_importable=False): spec = importlib.util.spec_from_file_location(module_name, file_path) module = importlib.util.module_from_spec(spec) spec.loader.exec_module(module) if make_importable: sys.modules[module_name] = module return module def _configure_libraries(): """ Configurations for some libraries. """ # An environment option to disable `import cv2` globally, # in case it leads to negative performance impact disable_cv2 = int(os.environ.get("DETECTRON2_DISABLE_CV2", False)) if disable_cv2: sys.modules["cv2"] = None else: # Disable opencl in opencv since its interaction with cuda often has negative effects # This envvar is supported after OpenCV 3.4.0 os.environ["OPENCV_OPENCL_RUNTIME"] = "disabled" try: import cv2 if int(cv2.__version__.split(".")[0]) >= 3: cv2.ocl.setUseOpenCL(False) except ImportError: pass def get_version(module, digit=2): return tuple(map(int, module.__version__.split(".")[:digit])) # fmt: off assert get_version(torch) >= (1, 4), "Requires torch>=1.4" import fvcore assert get_version(fvcore, 3) >= (0, 1, 1), "Requires fvcore>=0.1.1" import yaml assert get_version(yaml) >= (5, 1), "Requires pyyaml>=5.1" # fmt: on _ENV_SETUP_DONE = False def setup_environment(): """Perform environment setup work. The default setup is a no-op, but this function allows the user to specify a Python source file or a module in the $DETECTRON2_ENV_MODULE environment variable, that performs custom setup work that may be necessary to their computing environment. """ global _ENV_SETUP_DONE if _ENV_SETUP_DONE: return _ENV_SETUP_DONE = True _configure_libraries() custom_module_path = os.environ.get("DETECTRON2_ENV_MODULE") if custom_module_path: setup_custom_environment(custom_module_path) else: # The default setup is a no-op pass def setup_custom_environment(custom_module): """ Load custom environment setup by importing a Python source file or a module, and run the setup function. """ if custom_module.endswith(".py"): module = _import_file("detectron2.utils.env.custom_module", custom_module) else: module = importlib.import_module(custom_module) assert hasattr(module, "setup_environment") and callable(module.setup_environment), ( "Custom environment module defined in {} does not have the " "required callable attribute 'setup_environment'." ).format(custom_module) module.setup_environment()
[]
[]
[ "OPENCV_OPENCL_RUNTIME", "DETECTRON2_DISABLE_CV2", "DETECTRON2_ENV_MODULE" ]
[]
["OPENCV_OPENCL_RUNTIME", "DETECTRON2_DISABLE_CV2", "DETECTRON2_ENV_MODULE"]
python
3
0
openstackclient/tests/utils.py
# Copyright 2012-2013 OpenStack Foundation # Copyright 2013 Nebula Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # import os import fixtures import sys import testtools from openstackclient.tests import fakes class TestCase(testtools.TestCase): def setUp(self): testtools.TestCase.setUp(self) if (os.environ.get("OS_STDOUT_CAPTURE") == "True" or os.environ.get("OS_STDOUT_CAPTURE") == "1"): stdout = self.useFixture(fixtures.StringStream("stdout")).stream self.useFixture(fixtures.MonkeyPatch("sys.stdout", stdout)) if (os.environ.get("OS_STDERR_CAPTURE") == "True" or os.environ.get("OS_STDERR_CAPTURE") == "1"): stderr = self.useFixture(fixtures.StringStream("stderr")).stream self.useFixture(fixtures.MonkeyPatch("sys.stderr", stderr)) # 2.6 doesn't have the assert dict equals so make sure that it exists if tuple(sys.version_info)[0:2] < (2, 7): def assertIsInstance(self, obj, cls, msg=None): """Same as self.assertTrue(isinstance(obj, cls)), with a nicer default message """ if not isinstance(obj, cls): standardMsg = '%s is not an instance of %r' % (obj, cls) self.fail(self._formatMessage(msg, standardMsg)) def assertDictEqual(self, d1, d2, msg=None): # Simple version taken from 2.7 self.assertIsInstance(d1, dict, 'First argument is not a dictionary') self.assertIsInstance(d2, dict, 'Second argument is not a dictionary') if d1 != d2: if msg: self.fail(msg) else: standardMsg = '%r != %r' % (d1, d2) self.fail(standardMsg) class TestCommand(TestCase): """Test cliff command classes""" def setUp(self): super(TestCommand, self).setUp() # Build up a fake app self.fake_stdout = fakes.FakeStdout() self.app = fakes.FakeApp(self.fake_stdout) self.app.client_manager = fakes.FakeClientManager() def check_parser(self, cmd, args, verify_args): cmd_parser = cmd.get_parser('check_parser') parsed_args = cmd_parser.parse_args(args) for av in verify_args: attr, value = av if attr: self.assertIn(attr, parsed_args) self.assertEqual(getattr(parsed_args, attr), value) return parsed_args
[]
[]
[ "OS_STDOUT_CAPTURE", "OS_STDERR_CAPTURE" ]
[]
["OS_STDOUT_CAPTURE", "OS_STDERR_CAPTURE"]
python
2
0
backend/db.py
from config import get_config from logging import getLogger, StreamHandler, DEBUG, Formatter from typing import Dict, Union, List from pathlib import Path import sys import os import random import string import pandas as pd import numpy as np from datetime import datetime as dt from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import scoped_session, sessionmaker, relationship from sqlalchemy.sql.functions import current_timestamp from sqlalchemy import Column, String, Integer, create_engine, TIMESTAMP, text, ForeignKey, Boolean from flask_login import UserMixin from flask_bcrypt import Bcrypt parent_dir = str(Path(__file__).parent.parent.resolve()) sys.path.append(parent_dir) # ロガー設定 logger = getLogger(__name__) handler = StreamHandler() handler.setLevel(DEBUG) logger.setLevel(DEBUG) logger.addHandler(handler) logger.propagate = False handler.setFormatter(Formatter('[shisho] %(message)s')) DATABASE = 'mysql://{0}:{1}@{2}/{3}?charset=utf8mb4'.format( os.environ['MYSQL_USER'], os.environ['MYSQL_PASSWORD'], 'mysql:3306', os.environ['MYSQL_DATABASE'] ) ENGINE = create_engine(DATABASE, convert_unicode=True, echo=True) # DBエンジン作成 session = scoped_session(sessionmaker(autocommit=False, autoflush=False, bind=ENGINE)) # scoped_sessionによるセッション生成 Base = declarative_base() # モデルベースクラス # scoped_sessionではテーブル定義の継承元クラスにqueryプロパティを仕込む # ref.) https://qiita.com/tosizo/items/86d3c60a4bb70eb1656e#scoped_session%E3%81%AB%E3%82%88%E3%82%8B%E7%94%9F%E6%88%90orm%E7%B7%A8 Base.query = session.query_property() class User(Base): """ユーザテーブル """ __tablename__ = 'users' # テーブル名 uId = Column('uId', String(100), primary_key=True, nullable=False) # ユーザID sId = Column('sId', String(200), nullable=False) # セッションID name = Column('name', String(200), nullable=False) # ユーザ名 password = Column('password', String(100), nullable=False) # パスワード # 最終アクティブ(最後に書籍情報を閲覧した)時刻 active_at = Column('active_at', TIMESTAMP, server_default=current_timestamp()) changed_sId = Column('changed_sId', Boolean, default=True) # セッションID変更済フラグ created_at = Column('created_at', TIMESTAMP, server_default=current_timestamp()) # 作成時刻 updated_at = Column('updated_at', TIMESTAMP, nullable=False, # 更新時刻 server_default=text('CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP')) histories = relationship('History', backref='users') # Historyとのリレーション class History(Base): """閲覧履歴テーブル """ __tablename__ = 'histories' # テーブル名 id = Column(Integer, primary_key=True) # ログID uId = Column('uId', String(100), ForeignKey('users.uId')) # ユーザID(ユーザテーブルとの外部キー) sId = Column('sId', String(200), nullable=False) # セッションID bId = Column('bId', String(200), nullable=False) # 書籍ID ts = Column('ts', TIMESTAMP, nullable=False) # タイムスタンプ isLast = Column('isLast', Boolean, default=False) # セッション末尾書籍フラグ class LoginUser(UserMixin, User): """ログイン機能用ユーザモデル """ def get_id(self) -> str: """認証用ユーザID取得 Returns: str:ユーザID """ return self.uId def get_sId(n=12) -> str: """セッションID生成 Args: n (int, optional): 文字列サイズ. Defaults to 12. Returns: str: セッションID """ # ref.) https://qiita.com/Scstechr/items/c3b2eb291f7c5b81902a return ''.join(random.choices(string.ascii_letters + string.digits, k=n)) def record_history(user: LoginUser, bId: str) -> History: """書籍閲覧履歴記録 Args: user (LoginUser): ユーザ bId (str): 閲覧書籍ISBN-10 """ # 履歴記録 cts = dt.now() # 現在時刻 log = History(uId=user.uId, sId=user.sId, bId=bId, ts=cts) session.add(log) # 最終アクティブ時刻更新 target_user = User.query.get(user.uId) target_user.active_at = cts target_user.changed_sId = False # AFK状態 -> 次回以降のセッション変更 session.commit() # DEBUG: # for user in session.query(User).join(History, User.uId == History.uId).all(): # for log in user.histories: # logger.debug('{0}:{1} -> {2} ({3}) ({4})'.format(log.uId, log.sId, log.bId, log.ts, log.isLast)) return log # historiesテーブル記録ログ def get_user_history(user: LoginUser) -> Dict[int, Dict[int, Union[int, str, dt]]]: """書籍閲覧履歴取得 Args: user (LoginUser): ユーザ Returns: Dict[int, Dict[int, Union[int, str, dt]]]: 閲覧履歴 """ raw_user_history = History.query.filter(History.uId == user.uId).all() # 変換: History -> Dict(key:lId) user_history = dict() for log in raw_user_history: user_history[log.id] = dict() user_history[log.id]['uId'] = log.uId user_history[log.id]['sId'] = log.sId user_history[log.id]['bId'] = log.bId user_history[log.id]['ts'] = log.ts return user_history def get_history_df() -> pd.core.frame.DataFrame: """書籍情報閲覧履歴をデータフレームに変換して取得 Returns: pd.core.frame.DataFrame: データフレーム形式 書籍情報閲覧履歴 """ sql = "SELECT * FROM histories" # 書籍情報 全閲覧履歴 history_df = pd.read_sql_query(sql=sql, con=ENGINE, index_col='id') # SQLからデータフレーム読み込み return history_df def change_session(user: LoginUser) -> History: """セッション変更 Args: user (LoginUser): ログインユーザ """ target_user = User.query.get(user.uId) target_user.sId = get_sId() # セッションID変更 target_user.changed_sId = True # セッション変更済 # セッション末尾ログフラグ設定 last_log = History.query.filter(History.uId == user.uId).order_by(History.ts.desc()).first() last_log.isLast = True session.commit() return last_log def update_session(change_limit_minutes: int) -> None: """セッション更新(変更有無確認) Args: change_limit_minutes (int): セッション変更上限時刻(現在時刻と最終アクティブ時刻の差) """ cts = dt.now() # 現在時刻 users = User.query.all() # 全ユーザ for user in users: if user.changed_sId: # セッション変更済 -> 再変更しない continue time_diff_minutes = (cts - user.active_at).seconds // 60 # 時間差(分単位) if time_diff_minutes >= change_limit_minutes: # 上限オーバー -> セッション変更 change_session(user=user) def get_guest_uIds() -> List[str]: guest_uIds = list(get_config()['user']['guest'].keys()) return guest_uIds def insert_user_and_history_for_debug() -> None: """デバッグ用ユーザ/閲覧履歴のDB各テーブルへの挿入 """ from elasticsearch import Elasticsearch def get_random_bId() -> str: """書籍ID(ISBN-10)のランダム習得 Returns: str: 書籍ID(ISBN-10) """ es = Elasticsearch('elasticsearch') es_params = {'size': 1} body = {'query': {'function_score': {"query": {"match_all": {}}, "random_score": {}}}} bId = es.search(index='book', body=body, params=es_params)['hits']['hits'][0]['_source']['isbn10'] es.close() return bId config = get_config() # 司書設定 guest_config = config['user']['guest'] # ゲストユーザ設定 guest_uIds = get_guest_uIds() # ゲストユーザID for uIx, uId in enumerate(guest_uIds): # ゲストユーザアカウント追加 user = LoginUser(uId=uId, sId=get_sId(), name=guest_config[uId]['name'], password=Bcrypt().generate_password_hash(guest_config[uId]['password']).decode('utf-8')) session.add(user) session.commit() # 閲覧履歴生成(セッション数・各セッションサイズは乱数) for _ in range(np.random.randint(2, 6)): sSize = np.random.randint(1, 5) # セッションサイズ for i in range(sSize): session.add(History(uId=uId, sId=User.query.get(user.uId).sId, bId=get_random_bId(), ts=dt.now(), isLast=(True if (i == sSize - 1) else False))) # 閲覧履歴追加 session.commit() change_session(user=user) # セッション変更 session.commit() def main(): """全テーブル初期化 """ config = get_config() # 司書設定 Base.metadata.drop_all(bind=ENGINE) # 全テーブル削除 Base.metadata.create_all(bind=ENGINE) # 全テーブル作成 # 管理者アカウント作成 admin_user = LoginUser(uId=config['user']['admin']['id'], sId=get_sId(), name=config['user']['admin']['name'], password=Bcrypt().generate_password_hash(config['user']['admin']['password']).decode('utf-8')) session.add(admin_user) # INSERT: 管理者アカウント session.commit() # テーブル更新 insert_user_and_history_for_debug() # デバッグ用ユーザ/閲覧履歴のDB各テーブルへの挿入 if __name__ == "__main__": main()
[]
[]
[ "MYSQL_USER", "MYSQL_DATABASE", "MYSQL_PASSWORD" ]
[]
["MYSQL_USER", "MYSQL_DATABASE", "MYSQL_PASSWORD"]
python
3
0
cmd/main.go
/* Copyright © 2019 The controller101 Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package main import ( "context" goflag "flag" "fmt" "os" "os/signal" "syscall" "time" "github.com/cloud-native-taiwan/controller101/pkg/controller" "github.com/cloud-native-taiwan/controller101/pkg/driver" cloudnative "github.com/cloud-native-taiwan/controller101/pkg/generated/clientset/versioned" cloudnativeinformer "github.com/cloud-native-taiwan/controller101/pkg/generated/informers/externalversions" "github.com/cloud-native-taiwan/controller101/pkg/version" flag "github.com/spf13/pflag" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" clientset "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "k8s.io/client-go/tools/leaderelection" "k8s.io/client-go/tools/leaderelection/resourcelock" "k8s.io/klog" ) const defaultSyncTime = time.Second * 30 var ( kubeconfig string showVersion bool threads int leaderElect bool id string leaseLockName string leaseLockNamespace string driverName string ) func parseFlags() { flag.StringVarP(&kubeconfig, "kubeconfig", "", "", "Absolute path to the kubeconfig file.") flag.IntVarP(&threads, "threads", "", 2, "Number of worker threads used by the controller.") flag.StringVarP(&id, "holder-identity", "", os.Getenv("POD_NAME"), "the holder identity name") flag.StringVarP(&driverName, "vm-driver", "", "", "Driver is one of: [fake docker].") flag.BoolVarP(&leaderElect, "leader-elect", "", true, "Start a leader election client and gain leadership before executing the main loop. ") flag.StringVar(&leaseLockName, "lease-lock-name", "controller101", "the lease lock resource name") flag.StringVar(&leaseLockNamespace, "lease-lock-namespace", os.Getenv("POD_NAMESPACE"), "the lease lock resource namespace") flag.BoolVarP(&showVersion, "version", "", false, "Display the version.") flag.CommandLine.AddGoFlagSet(goflag.CommandLine) flag.Parse() } func restConfig(kubeconfig string) (*rest.Config, error) { if kubeconfig != "" { cfg, err := clientcmd.BuildConfigFromFlags("", kubeconfig) if err != nil { return nil, err } return cfg, nil } cfg, err := rest.InClusterConfig() if err != nil { return nil, err } return cfg, nil } func main() { parseFlags() if showVersion { fmt.Fprintf(os.Stdout, "%s\n", version.GetVersion()) os.Exit(0) } k8scfg, err := restConfig(kubeconfig) if err != nil { klog.Fatalf("Error to build rest config: %s", err.Error()) } k8sclientset := clientset.NewForConfigOrDie(k8scfg) clientset, err := cloudnative.NewForConfig(k8scfg) if err != nil { klog.Fatalf("Error to build cloudnative clientset: %s", err.Error()) } var vmDriver driver.Interface switch driverName { case "docker": docker, err := driver.NewDockerDriver() if err != nil { klog.Fatalf("Error to new docker driver: %s", err.Error()) } vmDriver = docker default: klog.Fatalf("The driver '%s' is not supported.", driverName) } informer := cloudnativeinformer.NewSharedInformerFactory(clientset, defaultSyncTime) controller := controller.New(clientset, informer, vmDriver) ctx, cancel := context.WithCancel(context.Background()) signalChan := make(chan os.Signal, 1) signal.Notify(signalChan, syscall.SIGINT, syscall.SIGTERM) if leaderElect { lock := &resourcelock.LeaseLock{ LeaseMeta: metav1.ObjectMeta{ Name: leaseLockName, Namespace: leaseLockNamespace, }, Client: k8sclientset.CoordinationV1(), LockConfig: resourcelock.ResourceLockConfig{ Identity: id, }, } go leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{ Lock: lock, ReleaseOnCancel: true, LeaseDuration: 60 * time.Second, RenewDeadline: 15 * time.Second, RetryPeriod: 5 * time.Second, Callbacks: leaderelection.LeaderCallbacks{ OnStartedLeading: func(ctx context.Context) { if err := controller.Run(ctx, threads); err != nil { klog.Fatalf("Error to run the controller instance: %s.", err) } klog.Infof("%s: leading", id) }, OnStoppedLeading: func() { controller.Stop() klog.Infof("%s: lost", id) }, }, }) } else { if err := controller.Run(ctx, threads); err != nil { klog.Fatalf("Error to run the controller instance: %s.", err) } } <-signalChan cancel() controller.Stop() }
[ "\"POD_NAME\"", "\"POD_NAMESPACE\"" ]
[]
[ "POD_NAMESPACE", "POD_NAME" ]
[]
["POD_NAMESPACE", "POD_NAME"]
go
2
0
pkg/meta/redis.go
//go:build !noredis // +build !noredis /* * JuiceFS, Copyright 2020 Juicedata, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package meta import ( "bufio" "context" "encoding/binary" "encoding/json" "fmt" "hash/fnv" "io" "math/rand" "net" "net/url" "os" "runtime" "runtime/debug" "sort" "strconv" "strings" "sync" "sync/atomic" "syscall" "time" "github.com/pkg/errors" "github.com/go-redis/redis/v8" "github.com/juicedata/juicefs/pkg/utils" ) /* Node: i$inode -> Attribute{type,mode,uid,gid,atime,mtime,ctime,nlink,length,rdev} Dir: d$inode -> {name -> {inode,type}} Parent: p$inode -> {parent -> count} // for hard links File: c$inode_$indx -> [Slice{pos,id,length,off,len}] Symlink: s$inode -> target Xattr: x$inode -> {name -> value} Flock: lockf$inode -> { $sid_$owner -> ltype } POSIX lock: lockp$inode -> { $sid_$owner -> Plock(pid,ltype,start,end) } Sessions: sessions -> [ $sid -> heartbeat ] sustained: session$sid -> [$inode] locked: locked$sid -> { lockf$inode or lockp$inode } Removed files: delfiles -> [$inode:$length -> seconds] Slices refs: k$chunkid_$size -> refcount Redis features: Sorted Set: 1.2+ Hash Set: 4.0+ Transaction: 2.2+ Scripting: 2.6+ Scan: 2.8+ */ type redisMeta struct { *baseMeta rdb redis.UniversalClient prefix string shaLookup string // The SHA returned by Redis for the loaded `scriptLookup` shaResolve string // The SHA returned by Redis for the loaded `scriptResolve` } var _ Meta = &redisMeta{} func init() { Register("redis", newRedisMeta) Register("rediss", newRedisMeta) } // newRedisMeta return a meta store using Redis. func newRedisMeta(driver, addr string, conf *Config) (Meta, error) { uri := driver + "://" + addr var query queryMap if p := strings.Index(uri, "?"); p > 0 && p+1 < len(uri) { if q, err := url.ParseQuery(uri[p+1:]); err == nil { logger.Debugf("parsed query parameters: %v", q) query = queryMap{q} uri = uri[:p] } else { return nil, fmt.Errorf("parse query %s: %s", uri[p+1:], err) } } u, err := url.Parse(uri) if err != nil { return nil, fmt.Errorf("url parse %s: %s", uri, err) } hosts := u.Host opt, err := redis.ParseURL(uri) if err != nil { return nil, fmt.Errorf("redis parse %s: %s", uri, err) } if opt.Password == "" { opt.Password = os.Getenv("REDIS_PASSWORD") } if opt.Password == "" { opt.Password = os.Getenv("META_PASSWORD") } opt.MaxRetries = conf.Retries if opt.MaxRetries == 0 { opt.MaxRetries = -1 // Redis use -1 to disable retries } opt.MinRetryBackoff = query.duration("min-retry-backoff", time.Millisecond*20) opt.MaxRetryBackoff = query.duration("max-retry-backoff", time.Second*10) opt.ReadTimeout = query.duration("read-timeout", time.Second*30) opt.WriteTimeout = query.duration("write-timeout", time.Second*5) var rdb redis.UniversalClient var prefix string if strings.Contains(hosts, ",") && strings.Index(hosts, ",") < strings.Index(hosts, ":") { var fopt redis.FailoverOptions ps := strings.Split(hosts, ",") fopt.MasterName = ps[0] fopt.SentinelAddrs = ps[1:] _, port, _ := net.SplitHostPort(fopt.SentinelAddrs[len(fopt.SentinelAddrs)-1]) if port == "" { port = "26379" } for i, addr := range fopt.SentinelAddrs { h, p, e := net.SplitHostPort(addr) if e != nil { fopt.SentinelAddrs[i] = net.JoinHostPort(addr, port) } else if p == "" { fopt.SentinelAddrs[i] = net.JoinHostPort(h, port) } } fopt.SentinelPassword = os.Getenv("SENTINEL_PASSWORD") fopt.DB = opt.DB fopt.Username = opt.Username fopt.Password = opt.Password fopt.TLSConfig = opt.TLSConfig fopt.MaxRetries = opt.MaxRetries fopt.MinRetryBackoff = opt.MinRetryBackoff fopt.MaxRetryBackoff = opt.MaxRetryBackoff fopt.ReadTimeout = opt.ReadTimeout fopt.WriteTimeout = opt.WriteTimeout if conf.ReadOnly { // NOTE: RouteByLatency and RouteRandomly are not supported since they require cluster client fopt.SlaveOnly = query.Get("route-read") == "replica" } rdb = redis.NewFailoverClient(&fopt) } else { if !strings.Contains(hosts, ",") { c := redis.NewClient(opt) info, err := c.ClusterInfo(Background).Result() if err != nil && strings.Contains(err.Error(), "cluster mode") || err == nil && strings.Contains(info, "cluster_state:") { logger.Infof("redis %s is in cluster mode", hosts) } else { rdb = c } } if rdb == nil { var copt redis.ClusterOptions copt.Addrs = strings.Split(hosts, ",") copt.MaxRedirects = 1 copt.Username = opt.Username copt.Password = opt.Password copt.TLSConfig = opt.TLSConfig copt.MaxRetries = opt.MaxRetries copt.MinRetryBackoff = opt.MinRetryBackoff copt.MaxRetryBackoff = opt.MaxRetryBackoff copt.ReadTimeout = opt.ReadTimeout copt.WriteTimeout = opt.WriteTimeout if conf.ReadOnly { switch query.Get("route-read") { case "random": copt.RouteRandomly = true case "latency": copt.RouteByLatency = true case "replica": copt.ReadOnly = true default: // route to primary } } rdb = redis.NewClusterClient(&copt) prefix = fmt.Sprintf("{%d}", opt.DB) } } m := &redisMeta{ baseMeta: newBaseMeta(addr, conf), rdb: rdb, prefix: prefix, } m.en = m m.checkServerConfig() m.root, err = lookupSubdir(m, conf.Subdir) return m, err } func (m *redisMeta) Shutdown() error { return m.rdb.Close() } func (m *redisMeta) doDeleteSlice(chunkid uint64, size uint32) error { return m.rdb.HDel(Background, m.sliceRefs(), m.sliceKey(chunkid, size)).Err() } func (m *redisMeta) Name() string { return "redis" } func (m *redisMeta) Init(format Format, force bool) error { ctx := Background body, err := m.rdb.Get(ctx, m.setting()).Bytes() if err != nil && err != redis.Nil { return err } if err == nil { var old Format err = json.Unmarshal(body, &old) if err != nil { return fmt.Errorf("existing format is broken: %s", err) } if err = format.update(&old, force); err != nil { return err } } data, err := json.MarshalIndent(format, "", "") if err != nil { return fmt.Errorf("json: %s", err) } ts := time.Now().Unix() attr := &Attr{ Typ: TypeDirectory, Atime: ts, Mtime: ts, Ctime: ts, Nlink: 2, Length: 4 << 10, Parent: 1, } if format.TrashDays > 0 { attr.Mode = 0555 if err = m.rdb.SetNX(ctx, m.inodeKey(TrashInode), m.marshal(attr), 0).Err(); err != nil { return err } } if err = m.rdb.Set(ctx, m.setting(), data, 0).Err(); err != nil { return err } m.fmt = format if body != nil { return nil } // root inode attr.Mode = 0777 return m.rdb.Set(ctx, m.inodeKey(1), m.marshal(attr), 0).Err() } func (m *redisMeta) Reset() error { if m.prefix != "" { return m.scan(Background, "*", func(keys []string) error { return m.rdb.Del(Background, keys...).Err() }) } return m.rdb.FlushDB(Background).Err() } func (m *redisMeta) doLoad() ([]byte, error) { body, err := m.rdb.Get(Background, m.setting()).Bytes() if err == redis.Nil { return nil, nil } return body, err } func (m *redisMeta) doNewSession(sinfo []byte) error { err := m.rdb.ZAdd(Background, m.allSessions(), &redis.Z{ Score: float64(m.expireTime()), Member: strconv.FormatUint(m.sid, 10)}).Err() if err != nil { return fmt.Errorf("set session ID %d: %s", m.sid, err) } if err = m.rdb.HSet(Background, m.sessionInfos(), m.sid, sinfo).Err(); err != nil { return fmt.Errorf("set session info: %s", err) } if m.shaLookup, err = m.rdb.ScriptLoad(Background, scriptLookup).Result(); err != nil { logger.Warnf("load scriptLookup: %v", err) m.shaLookup = "" } if m.shaResolve, err = m.rdb.ScriptLoad(Background, scriptResolve).Result(); err != nil { logger.Warnf("load scriptResolve: %v", err) m.shaResolve = "" } if !m.conf.NoBGJob { go m.cleanupLegacies() } return nil } func (m *redisMeta) getCounter(name string) (int64, error) { v, err := m.rdb.Get(Background, m.prefix+name).Int64() if err == redis.Nil { err = nil } return v, err } func (m *redisMeta) incrCounter(name string, value int64) (int64, error) { if m.conf.ReadOnly { return 0, syscall.EROFS } if name == "nextInode" || name == "nextChunk" { // for nextinode, nextchunk // the current one is already used v, err := m.rdb.IncrBy(Background, m.prefix+strings.ToLower(name), value).Result() return v + 1, err } else if name == "nextSession" { name = "nextsession" } return m.rdb.IncrBy(Background, m.prefix+name, value).Result() } func (m *redisMeta) setIfSmall(name string, value, diff int64) (bool, error) { var changed bool name = m.prefix + name err := m.txn(Background, func(tx *redis.Tx) error { changed = false old, err := tx.Get(Background, name).Int64() if err != nil && err != redis.Nil { return err } if old > value-diff { return nil } else { changed = true return tx.Set(Background, name, value, 0).Err() } }, name) return changed, err } func (m *redisMeta) getSession(sid string, detail bool) (*Session, error) { ctx := Background info, err := m.rdb.HGet(ctx, m.sessionInfos(), sid).Bytes() if err == redis.Nil { // legacy client has no info info = []byte("{}") } else if err != nil { return nil, fmt.Errorf("HGet sessionInfos %s: %s", sid, err) } var s Session if err := json.Unmarshal(info, &s); err != nil { return nil, fmt.Errorf("corrupted session info; json error: %s", err) } s.Sid, _ = strconv.ParseUint(sid, 10, 64) if detail { inodes, err := m.rdb.SMembers(ctx, m.sustained(s.Sid)).Result() if err != nil { return nil, fmt.Errorf("SMembers %s: %s", sid, err) } s.Sustained = make([]Ino, 0, len(inodes)) for _, sinode := range inodes { inode, _ := strconv.ParseUint(sinode, 10, 64) s.Sustained = append(s.Sustained, Ino(inode)) } locks, err := m.rdb.SMembers(ctx, m.lockedKey(s.Sid)).Result() if err != nil { return nil, fmt.Errorf("SMembers %s: %s", sid, err) } s.Flocks = make([]Flock, 0, len(locks)) // greedy s.Plocks = make([]Plock, 0, len(locks)) for _, lock := range locks { owners, err := m.rdb.HGetAll(ctx, lock).Result() if err != nil { return nil, fmt.Errorf("HGetAll %s: %s", lock, err) } isFlock := strings.HasPrefix(lock, m.prefix+"lockf") inode, _ := strconv.ParseUint(lock[len(m.prefix)+5:], 10, 64) for k, v := range owners { parts := strings.Split(k, "_") if parts[0] != sid { continue } owner, _ := strconv.ParseUint(parts[1], 16, 64) if isFlock { s.Flocks = append(s.Flocks, Flock{Ino(inode), owner, v}) } else { s.Plocks = append(s.Plocks, Plock{Ino(inode), owner, []byte(v)}) } } } } return &s, nil } func (m *redisMeta) GetSession(sid uint64, detail bool) (*Session, error) { var legacy bool key := strconv.FormatUint(sid, 10) score, err := m.rdb.ZScore(Background, m.allSessions(), key).Result() if err == redis.Nil { legacy = true score, err = m.rdb.ZScore(Background, legacySessions, key).Result() } if err == redis.Nil { err = fmt.Errorf("session not found: %d", sid) } if err != nil { return nil, err } s, err := m.getSession(key, detail) if err != nil { return nil, err } s.Expire = time.Unix(int64(score), 0) if legacy { s.Expire = s.Expire.Add(time.Minute * 5) } return s, nil } func (m *redisMeta) ListSessions() ([]*Session, error) { keys, err := m.rdb.ZRangeWithScores(Background, m.allSessions(), 0, -1).Result() if err != nil { return nil, err } sessions := make([]*Session, 0, len(keys)) for _, k := range keys { s, err := m.getSession(k.Member.(string), false) if err != nil { logger.Errorf("get session: %s", err) continue } s.Expire = time.Unix(int64(k.Score), 0) sessions = append(sessions, s) } // add clients with version before 1.0-beta3 as well keys, err = m.rdb.ZRangeWithScores(Background, legacySessions, 0, -1).Result() if err != nil { logger.Errorf("Scan legacy sessions: %s", err) return sessions, nil } for _, k := range keys { s, err := m.getSession(k.Member.(string), false) if err != nil { logger.Errorf("Get legacy session: %s", err) continue } s.Expire = time.Unix(int64(k.Score), 0).Add(time.Minute * 5) sessions = append(sessions, s) } return sessions, nil } func (m *redisMeta) sustained(sid uint64) string { return m.prefix + "session" + strconv.FormatUint(sid, 10) } func (m *redisMeta) lockedKey(sid uint64) string { return m.prefix + "locked" + strconv.FormatUint(sid, 10) } func (m *redisMeta) symKey(inode Ino) string { return m.prefix + "s" + inode.String() } func (m *redisMeta) inodeKey(inode Ino) string { return m.prefix + "i" + inode.String() } func (m *redisMeta) entryKey(parent Ino) string { return m.prefix + "d" + parent.String() } func (m *redisMeta) parentKey(inode Ino) string { return m.prefix + "p" + inode.String() } func (m *redisMeta) chunkKey(inode Ino, indx uint32) string { return m.prefix + "c" + inode.String() + "_" + strconv.FormatInt(int64(indx), 10) } func (m *redisMeta) sliceKey(chunkid uint64, size uint32) string { // inside hashset return "k" + strconv.FormatUint(chunkid, 10) + "_" + strconv.FormatUint(uint64(size), 10) } func (m *redisMeta) xattrKey(inode Ino) string { return m.prefix + "x" + inode.String() } func (m *redisMeta) flockKey(inode Ino) string { return m.prefix + "lockf" + inode.String() } func (m *redisMeta) ownerKey(owner uint64) string { return fmt.Sprintf("%d_%016X", m.sid, owner) } func (m *redisMeta) plockKey(inode Ino) string { return m.prefix + "lockp" + inode.String() } func (m *redisMeta) setting() string { return m.prefix + "setting" } func (m *redisMeta) usedSpaceKey() string { return m.prefix + usedSpace } func (m *redisMeta) totalInodesKey() string { return m.prefix + totalInodes } func (m *redisMeta) delfiles() string { return m.prefix + "delfiles" } func (r *redisMeta) delSlices() string { return r.prefix + "delSlices" } func (r *redisMeta) allSessions() string { return r.prefix + "allSessions" } func (m *redisMeta) sessionInfos() string { return m.prefix + "sessionInfos" } func (m *redisMeta) sliceRefs() string { return m.prefix + "sliceRef" } func (m *redisMeta) packEntry(_type uint8, inode Ino) []byte { wb := utils.NewBuffer(9) wb.Put8(_type) wb.Put64(uint64(inode)) return wb.Bytes() } func (m *redisMeta) parseEntry(buf []byte) (uint8, Ino) { if len(buf) != 9 { panic("invalid entry") } return buf[0], Ino(binary.BigEndian.Uint64(buf[1:])) } func (m *redisMeta) updateStats(space int64, inodes int64) { atomic.AddInt64(&m.usedSpace, space) atomic.AddInt64(&m.usedInodes, inodes) } func (m *redisMeta) handleLuaResult(op string, res interface{}, err error, returnedIno *int64, returnedAttr *string) syscall.Errno { if err != nil { msg := err.Error() if strings.Contains(msg, "NOSCRIPT") { var err2 error switch op { case "lookup": m.shaLookup, err2 = m.rdb.ScriptLoad(Background, scriptLookup).Result() case "resolve": m.shaResolve, err2 = m.rdb.ScriptLoad(Background, scriptResolve).Result() default: return syscall.ENOTSUP } if err2 == nil { logger.Infof("loaded script succeed for %s", op) return syscall.EAGAIN } else { logger.Warnf("load script %s: %s", op, err2) return syscall.ENOTSUP } } else if strings.Contains(msg, "ENOENT") { return syscall.ENOENT } else if strings.Contains(msg, "EACCESS") { return syscall.EACCES } else if strings.Contains(msg, "ENOTDIR") { return syscall.ENOTDIR } else if strings.Contains(msg, "ENOTSUP") { return syscall.ENOTSUP } else { logger.Warnf("unexpected error for %s: %s", op, msg) switch op { case "lookup": m.shaLookup = "" case "resolve": m.shaResolve = "" } return syscall.ENOTSUP } } vals, ok := res.([]interface{}) if !ok { logger.Errorf("invalid script result: %v", res) return syscall.ENOTSUP } *returnedIno, ok = vals[0].(int64) if !ok { logger.Errorf("invalid script result: %v", res) return syscall.ENOTSUP } if vals[1] == nil { return syscall.ENOTSUP } *returnedAttr, ok = vals[1].(string) if !ok { logger.Errorf("invalid script result: %v", res) return syscall.ENOTSUP } return 0 } func (m *redisMeta) doLookup(ctx Context, parent Ino, name string, inode *Ino, attr *Attr) syscall.Errno { var foundIno Ino var foundType uint8 var encodedAttr []byte var err error entryKey := m.entryKey(parent) if len(m.shaLookup) > 0 && attr != nil && !m.conf.CaseInsensi && m.prefix == "" { var res interface{} var returnedIno int64 var returnedAttr string res, err = m.rdb.EvalSha(ctx, m.shaLookup, []string{entryKey, name}).Result() if st := m.handleLuaResult("lookup", res, err, &returnedIno, &returnedAttr); st == 0 { foundIno = Ino(returnedIno) encodedAttr = []byte(returnedAttr) } else if st == syscall.EAGAIN { return m.doLookup(ctx, parent, name, inode, attr) } else if st != syscall.ENOTSUP { return st } } if foundIno == 0 || len(encodedAttr) == 0 { var buf []byte buf, err = m.rdb.HGet(ctx, entryKey, name).Bytes() if err != nil { return errno(err) } foundType, foundIno = m.parseEntry(buf) encodedAttr, err = m.rdb.Get(ctx, m.inodeKey(foundIno)).Bytes() } if err == nil { m.parseAttr(encodedAttr, attr) } else if err == redis.Nil { // corrupt entry logger.Warnf("no attribute for inode %d (%d, %s)", foundIno, parent, name) *attr = Attr{Typ: foundType} err = nil } *inode = foundIno return errno(err) } func (m *redisMeta) Resolve(ctx Context, parent Ino, path string, inode *Ino, attr *Attr) syscall.Errno { if len(m.shaResolve) == 0 || m.conf.CaseInsensi || m.prefix != "" { return syscall.ENOTSUP } defer m.timeit(time.Now()) parent = m.checkRoot(parent) args := []string{parent.String(), path, strconv.FormatUint(uint64(ctx.Uid()), 10), strconv.FormatUint(uint64(ctx.Gid()), 10)} res, err := m.rdb.EvalSha(ctx, m.shaResolve, args).Result() var returnedIno int64 var returnedAttr string st := m.handleLuaResult("resolve", res, err, &returnedIno, &returnedAttr) if st == 0 { if inode != nil { *inode = Ino(returnedIno) } m.parseAttr([]byte(returnedAttr), attr) } else if st == syscall.EAGAIN { return m.Resolve(ctx, parent, path, inode, attr) } return st } func (m *redisMeta) doGetAttr(ctx Context, inode Ino, attr *Attr) syscall.Errno { a, err := m.rdb.Get(ctx, m.inodeKey(inode)).Bytes() if err == nil { m.parseAttr(a, attr) } return errno(err) } type timeoutError interface { Timeout() bool } func (m *redisMeta) shouldRetry(err error, retryOnFailure bool) bool { switch err { case redis.TxFailedErr: return true case io.EOF, io.ErrUnexpectedEOF: return retryOnFailure case nil, context.Canceled, context.DeadlineExceeded: return false } if v, ok := err.(timeoutError); ok && v.Timeout() { return retryOnFailure } s := err.Error() if s == "ERR max number of clients reached" || strings.Contains(s, "Conn is in a bad state") || strings.Contains(s, "EXECABORT") { return true } ps := strings.SplitN(s, " ", 3) switch ps[0] { case "LOADING": case "READONLY": case "CLUSTERDOWN": case "TRYAGAIN": case "MOVED": case "ASK": case "ERR": if len(ps) > 1 { switch ps[1] { case "DISABLE": fallthrough case "NOWRITE": fallthrough case "NOREAD": return true } } return false default: return false } return true } func (m *redisMeta) txn(ctx Context, txf func(tx *redis.Tx) error, keys ...string) error { if m.conf.ReadOnly { return syscall.EROFS } for _, k := range keys { if !strings.HasPrefix(k, m.prefix) { panic(fmt.Sprintf("Invalid key %s not starts with prefix %s", k, m.prefix)) } } var khash = fnv.New32() _, _ = khash.Write([]byte(keys[0])) h := uint(khash.Sum32()) start := time.Now() defer func() { m.txDist.Observe(time.Since(start).Seconds()) }() m.txLock(h) defer m.txUnlock(h) // TODO: enable retry for some of idempodent transactions var retryOnFailture = false var lastErr error for i := 0; i < 50; i++ { if ctx.Canceled() { return syscall.EINTR } err := m.rdb.Watch(ctx, txf, keys...) if eno, ok := err.(syscall.Errno); ok && eno == 0 { err = nil } if err != nil && m.shouldRetry(err, retryOnFailture) { m.txRestart.Add(1) logger.Debugf("Transaction failed, restart it (tried %d): %s", i+1, err) lastErr = err time.Sleep(time.Millisecond * time.Duration(rand.Int()%((i+1)*(i+1)))) continue } else if err == nil && i > 1 { logger.Warnf("Transaction succeeded after %d tries (%s), keys: %v, last error: %s", i+1, time.Since(start), keys, lastErr) } return err } logger.Warnf("Already tried 50 times, returning: %s", lastErr) return lastErr } func (m *redisMeta) Truncate(ctx Context, inode Ino, flags uint8, length uint64, attr *Attr) syscall.Errno { defer m.timeit(time.Now()) f := m.of.find(inode) if f != nil { f.Lock() defer f.Unlock() } defer func() { m.of.InvalidateChunk(inode, 0xFFFFFFFF) }() var newSpace int64 err := m.txn(ctx, func(tx *redis.Tx) error { var t Attr a, err := tx.Get(ctx, m.inodeKey(inode)).Bytes() if err != nil { return err } m.parseAttr(a, &t) if t.Typ != TypeFile { return syscall.EPERM } if length == t.Length { if attr != nil { *attr = t } return nil } newSpace = align4K(length) - align4K(t.Length) if newSpace > 0 && m.checkQuota(newSpace, 0) { return syscall.ENOSPC } var zeroChunks []uint32 var left, right = t.Length, length if left > right { right, left = left, right } if (right-left)/ChunkSize >= 100 { // super large var cursor uint64 var keys []string for { keys, cursor, err = tx.Scan(ctx, cursor, m.prefix+fmt.Sprintf("c%d_*", inode), 10000).Result() if err != nil { return err } for _, key := range keys { indx, err := strconv.Atoi(strings.Split(key[len(m.prefix):], "_")[1]) if err != nil { logger.Errorf("parse %s: %s", key, err) continue } if uint64(indx) > left/ChunkSize && uint64(indx) < right/ChunkSize { zeroChunks = append(zeroChunks, uint32(indx)) } } if cursor <= 0 { break } } } else { for i := left/ChunkSize + 1; i < right/ChunkSize; i++ { zeroChunks = append(zeroChunks, uint32(i)) } } t.Length = length now := time.Now() t.Mtime = now.Unix() t.Mtimensec = uint32(now.Nanosecond()) t.Ctime = now.Unix() t.Ctimensec = uint32(now.Nanosecond()) _, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error { pipe.Set(ctx, m.inodeKey(inode), m.marshal(&t), 0) // zero out from left to right var l = uint32(right - left) if right > (left/ChunkSize+1)*ChunkSize { l = ChunkSize - uint32(left%ChunkSize) } pipe.RPush(ctx, m.chunkKey(inode, uint32(left/ChunkSize)), marshalSlice(uint32(left%ChunkSize), 0, 0, 0, l)) buf := marshalSlice(0, 0, 0, 0, ChunkSize) for _, indx := range zeroChunks { pipe.RPushX(ctx, m.chunkKey(inode, indx), buf) } if right > (left/ChunkSize+1)*ChunkSize && right%ChunkSize > 0 { pipe.RPush(ctx, m.chunkKey(inode, uint32(right/ChunkSize)), marshalSlice(0, 0, 0, 0, uint32(right%ChunkSize))) } pipe.IncrBy(ctx, m.usedSpaceKey(), newSpace) return nil }) if err == nil { if attr != nil { *attr = t } } return err }, m.inodeKey(inode)) if err == nil { m.updateStats(newSpace, 0) } return errno(err) } func (m *redisMeta) Fallocate(ctx Context, inode Ino, mode uint8, off uint64, size uint64) syscall.Errno { if mode&fallocCollapesRange != 0 && mode != fallocCollapesRange { return syscall.EINVAL } if mode&fallocInsertRange != 0 && mode != fallocInsertRange { return syscall.EINVAL } if mode == fallocInsertRange || mode == fallocCollapesRange { return syscall.ENOTSUP } if mode&fallocPunchHole != 0 && mode&fallocKeepSize == 0 { return syscall.EINVAL } if size == 0 { return syscall.EINVAL } defer m.timeit(time.Now()) f := m.of.find(inode) if f != nil { f.Lock() defer f.Unlock() } defer func() { m.of.InvalidateChunk(inode, 0xFFFFFFFF) }() var newSpace int64 err := m.txn(ctx, func(tx *redis.Tx) error { var t Attr a, err := tx.Get(ctx, m.inodeKey(inode)).Bytes() if err != nil { return err } m.parseAttr(a, &t) if t.Typ == TypeFIFO { return syscall.EPIPE } if t.Typ != TypeFile { return syscall.EPERM } length := t.Length if off+size > t.Length { if mode&fallocKeepSize == 0 { length = off + size } } old := t.Length newSpace = align4K(length) - align4K(old) if newSpace > 0 && m.checkQuota(newSpace, 0) { return syscall.ENOSPC } t.Length = length now := time.Now() t.Mtime = now.Unix() t.Mtimensec = uint32(now.Nanosecond()) t.Ctime = now.Unix() t.Ctimensec = uint32(now.Nanosecond()) _, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error { pipe.Set(ctx, m.inodeKey(inode), m.marshal(&t), 0) if mode&(fallocZeroRange|fallocPunchHole) != 0 { if off+size > old { size = old - off } for size > 0 { indx := uint32(off / ChunkSize) coff := off % ChunkSize l := size if coff+size > ChunkSize { l = ChunkSize - coff } pipe.RPush(ctx, m.chunkKey(inode, indx), marshalSlice(uint32(coff), 0, 0, 0, uint32(l))) off += l size -= l } } pipe.IncrBy(ctx, m.usedSpaceKey(), align4K(length)-align4K(old)) return nil }) return err }, m.inodeKey(inode)) if err == nil { m.updateStats(newSpace, 0) } return errno(err) } func (m *redisMeta) SetAttr(ctx Context, inode Ino, set uint16, sugidclearmode uint8, attr *Attr) syscall.Errno { defer m.timeit(time.Now()) inode = m.checkRoot(inode) defer func() { m.of.InvalidateChunk(inode, 0xFFFFFFFE) }() return errno(m.txn(ctx, func(tx *redis.Tx) error { var cur Attr a, err := tx.Get(ctx, m.inodeKey(inode)).Bytes() if err != nil { return err } m.parseAttr(a, &cur) if (set&(SetAttrUID|SetAttrGID)) != 0 && (set&SetAttrMode) != 0 { attr.Mode |= (cur.Mode & 06000) } var changed bool if (cur.Mode&06000) != 0 && (set&(SetAttrUID|SetAttrGID)) != 0 { clearSUGID(ctx, &cur, attr) changed = true } if set&SetAttrUID != 0 && cur.Uid != attr.Uid { cur.Uid = attr.Uid changed = true } if set&SetAttrGID != 0 && cur.Gid != attr.Gid { cur.Gid = attr.Gid changed = true } if set&SetAttrMode != 0 { if ctx.Uid() != 0 && (attr.Mode&02000) != 0 { if ctx.Gid() != cur.Gid { attr.Mode &= 05777 } } if attr.Mode != cur.Mode { cur.Mode = attr.Mode changed = true } } now := time.Now() if set&SetAttrAtime != 0 && (cur.Atime != attr.Atime || cur.Atimensec != attr.Atimensec) { cur.Atime = attr.Atime cur.Atimensec = attr.Atimensec changed = true } if set&SetAttrAtimeNow != 0 { cur.Atime = now.Unix() cur.Atimensec = uint32(now.Nanosecond()) changed = true } if set&SetAttrMtime != 0 && (cur.Mtime != attr.Mtime || cur.Mtimensec != attr.Mtimensec) { cur.Mtime = attr.Mtime cur.Mtimensec = attr.Mtimensec changed = true } if set&SetAttrMtimeNow != 0 { cur.Mtime = now.Unix() cur.Mtimensec = uint32(now.Nanosecond()) changed = true } if !changed { *attr = cur return nil } cur.Ctime = now.Unix() cur.Ctimensec = uint32(now.Nanosecond()) _, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error { pipe.Set(ctx, m.inodeKey(inode), m.marshal(&cur), 0) return nil }) if err == nil { *attr = cur } return err }, m.inodeKey(inode))) } func (m *redisMeta) doReadlink(ctx Context, inode Ino) ([]byte, error) { return m.rdb.Get(ctx, m.symKey(inode)).Bytes() } func (m *redisMeta) doMknod(ctx Context, parent Ino, name string, _type uint8, mode, cumask uint16, rdev uint32, path string, inode *Ino, attr *Attr) syscall.Errno { var ino Ino var err error if parent == TrashInode { var next int64 next, err = m.incrCounter("nextTrash", 1) ino = TrashInode + Ino(next) } else { ino, err = m.nextInode() } if err != nil { return errno(err) } if attr == nil { attr = &Attr{} } attr.Typ = _type attr.Mode = mode & ^cumask attr.Uid = ctx.Uid() attr.Gid = ctx.Gid() if _type == TypeDirectory { attr.Nlink = 2 attr.Length = 4 << 10 } else { attr.Nlink = 1 if _type == TypeSymlink { attr.Length = uint64(len(path)) } else { attr.Length = 0 attr.Rdev = rdev } } attr.Parent = parent attr.Full = true if inode != nil { *inode = ino } err = m.txn(ctx, func(tx *redis.Tx) error { var pattr Attr a, err := tx.Get(ctx, m.inodeKey(parent)).Bytes() if err != nil { return err } m.parseAttr(a, &pattr) if pattr.Typ != TypeDirectory { return syscall.ENOTDIR } buf, err := tx.HGet(ctx, m.entryKey(parent), name).Bytes() if err != nil && err != redis.Nil { return err } var foundIno Ino var foundType uint8 if err == nil { foundType, foundIno = m.parseEntry(buf) } else if m.conf.CaseInsensi { // err == redis.Nil if entry := m.resolveCase(ctx, parent, name); entry != nil { foundType, foundIno = entry.Attr.Typ, entry.Inode } } if foundIno != 0 { if _type == TypeFile || _type == TypeDirectory { // file for create, directory for subTrash a, err = tx.Get(ctx, m.inodeKey(foundIno)).Bytes() if err == nil { m.parseAttr(a, attr) } else if err == redis.Nil { *attr = Attr{Typ: foundType, Parent: parent} // corrupt entry } else { return err } if inode != nil { *inode = foundIno } } return syscall.EEXIST } var updateParent bool now := time.Now() if parent != TrashInode { if _type == TypeDirectory { pattr.Nlink++ updateParent = true } if updateParent || now.Sub(time.Unix(pattr.Mtime, int64(pattr.Mtimensec))) >= minUpdateTime { pattr.Mtime = now.Unix() pattr.Mtimensec = uint32(now.Nanosecond()) pattr.Ctime = now.Unix() pattr.Ctimensec = uint32(now.Nanosecond()) updateParent = true } } attr.Atime = now.Unix() attr.Atimensec = uint32(now.Nanosecond()) attr.Mtime = now.Unix() attr.Mtimensec = uint32(now.Nanosecond()) attr.Ctime = now.Unix() attr.Ctimensec = uint32(now.Nanosecond()) if pattr.Mode&02000 != 0 || ctx.Value(CtxKey("behavior")) == "Hadoop" || runtime.GOOS == "darwin" { attr.Gid = pattr.Gid if _type == TypeDirectory && runtime.GOOS == "linux" { attr.Mode |= pattr.Mode & 02000 } } _, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error { pipe.HSet(ctx, m.entryKey(parent), name, m.packEntry(_type, ino)) if updateParent { pipe.Set(ctx, m.inodeKey(parent), m.marshal(&pattr), 0) } pipe.Set(ctx, m.inodeKey(ino), m.marshal(attr), 0) if _type == TypeSymlink { pipe.Set(ctx, m.symKey(ino), path, 0) } pipe.IncrBy(ctx, m.usedSpaceKey(), align4K(0)) pipe.Incr(ctx, m.totalInodesKey()) return nil }) return err }, m.inodeKey(parent), m.entryKey(parent)) if err == nil { m.updateStats(align4K(0), 1) } return errno(err) } func (m *redisMeta) doUnlink(ctx Context, parent Ino, name string) syscall.Errno { var _type uint8 var trash, inode Ino keys := []string{m.entryKey(parent), m.inodeKey(parent)} if st := m.checkTrash(parent, &trash); st != 0 { return st } if trash == 0 { defer func() { m.of.InvalidateChunk(inode, 0xFFFFFFFE) }() } var opened bool var attr Attr var newSpace, newInode int64 err := m.txn(ctx, func(tx *redis.Tx) error { buf, err := tx.HGet(ctx, m.entryKey(parent), name).Bytes() if err == redis.Nil && m.conf.CaseInsensi { if e := m.resolveCase(ctx, parent, name); e != nil { name = string(e.Name) buf = m.packEntry(e.Attr.Typ, e.Inode) err = nil } } if err != nil { return err } _type, inode = m.parseEntry(buf) if _type == TypeDirectory { return syscall.EPERM } if err := tx.Watch(ctx, m.inodeKey(inode)).Err(); err != nil { return err } rs, _ := tx.MGet(ctx, m.inodeKey(parent), m.inodeKey(inode)).Result() if rs[0] == nil { return redis.Nil } var pattr Attr m.parseAttr([]byte(rs[0].(string)), &pattr) if pattr.Typ != TypeDirectory { return syscall.ENOTDIR } var updateParent bool now := time.Now() if !isTrash(parent) && now.Sub(time.Unix(pattr.Mtime, int64(pattr.Mtimensec))) >= minUpdateTime { pattr.Mtime = now.Unix() pattr.Mtimensec = uint32(now.Nanosecond()) pattr.Ctime = now.Unix() pattr.Ctimensec = uint32(now.Nanosecond()) updateParent = true } attr = Attr{} opened = false if rs[1] != nil { m.parseAttr([]byte(rs[1].(string)), &attr) if ctx.Uid() != 0 && pattr.Mode&01000 != 0 && ctx.Uid() != pattr.Uid && ctx.Uid() != attr.Uid { return syscall.EACCES } attr.Ctime = now.Unix() attr.Ctimensec = uint32(now.Nanosecond()) if trash == 0 { attr.Nlink-- if _type == TypeFile && attr.Nlink == 0 { opened = m.of.IsOpen(inode) } } else if attr.Parent > 0 { attr.Parent = trash } } else { logger.Warnf("no attribute for inode %d (%d, %s)", inode, parent, name) trash = 0 } _, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error { pipe.HDel(ctx, m.entryKey(parent), name) if updateParent { pipe.Set(ctx, m.inodeKey(parent), m.marshal(&pattr), 0) } if attr.Nlink > 0 { pipe.Set(ctx, m.inodeKey(inode), m.marshal(&attr), 0) if trash > 0 { pipe.HSet(ctx, m.entryKey(trash), m.trashEntry(parent, inode, name), buf) if attr.Parent == 0 { pipe.HIncrBy(ctx, m.parentKey(inode), trash.String(), 1) } } if attr.Parent == 0 { pipe.HIncrBy(ctx, m.parentKey(inode), parent.String(), -1) } } else { switch _type { case TypeFile: if opened { pipe.Set(ctx, m.inodeKey(inode), m.marshal(&attr), 0) pipe.SAdd(ctx, m.sustained(m.sid), strconv.Itoa(int(inode))) } else { pipe.ZAdd(ctx, m.delfiles(), &redis.Z{Score: float64(now.Unix()), Member: m.toDelete(inode, attr.Length)}) pipe.Del(ctx, m.inodeKey(inode)) newSpace, newInode = -align4K(attr.Length), -1 pipe.IncrBy(ctx, m.usedSpaceKey(), newSpace) pipe.Decr(ctx, m.totalInodesKey()) } case TypeSymlink: pipe.Del(ctx, m.symKey(inode)) fallthrough default: pipe.Del(ctx, m.inodeKey(inode)) newSpace, newInode = -align4K(0), -1 pipe.IncrBy(ctx, m.usedSpaceKey(), newSpace) pipe.Decr(ctx, m.totalInodesKey()) } pipe.Del(ctx, m.xattrKey(inode)) if attr.Parent == 0 { pipe.Del(ctx, m.parentKey(inode)) } } return nil }) return err }, keys...) if err == nil && trash == 0 { if _type == TypeFile && attr.Nlink == 0 { m.fileDeleted(opened, inode, attr.Length) } m.updateStats(newSpace, newInode) } return errno(err) } func (m *redisMeta) doRmdir(ctx Context, parent Ino, name string) syscall.Errno { var typ uint8 var trash, inode Ino keys := []string{m.inodeKey(parent), m.entryKey(parent)} if st := m.checkTrash(parent, &trash); st != 0 { return st } err := m.txn(ctx, func(tx *redis.Tx) error { buf, err := tx.HGet(ctx, m.entryKey(parent), name).Bytes() if err == redis.Nil && m.conf.CaseInsensi { if e := m.resolveCase(ctx, parent, name); e != nil { name = string(e.Name) buf = m.packEntry(e.Attr.Typ, e.Inode) err = nil } } if err != nil { return err } typ, inode = m.parseEntry(buf) if typ != TypeDirectory { return syscall.ENOTDIR } if err = tx.Watch(ctx, m.inodeKey(inode), m.entryKey(inode)).Err(); err != nil { return err } rs, _ := tx.MGet(ctx, m.inodeKey(parent), m.inodeKey(inode)).Result() if rs[0] == nil { return redis.Nil } var pattr, attr Attr m.parseAttr([]byte(rs[0].(string)), &pattr) if pattr.Typ != TypeDirectory { return syscall.ENOTDIR } now := time.Now() pattr.Nlink-- pattr.Mtime = now.Unix() pattr.Mtimensec = uint32(now.Nanosecond()) pattr.Ctime = now.Unix() pattr.Ctimensec = uint32(now.Nanosecond()) cnt, err := tx.HLen(ctx, m.entryKey(inode)).Result() if err != nil { return err } if cnt > 0 { return syscall.ENOTEMPTY } if rs[1] != nil { m.parseAttr([]byte(rs[1].(string)), &attr) if ctx.Uid() != 0 && pattr.Mode&01000 != 0 && ctx.Uid() != pattr.Uid && ctx.Uid() != attr.Uid { return syscall.EACCES } if trash > 0 { attr.Ctime = now.Unix() attr.Ctimensec = uint32(now.Nanosecond()) attr.Parent = trash } } else { logger.Warnf("no attribute for inode %d (%d, %s)", inode, parent, name) trash = 0 } _, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error { pipe.HDel(ctx, m.entryKey(parent), name) if !isTrash(parent) { pipe.Set(ctx, m.inodeKey(parent), m.marshal(&pattr), 0) } if trash > 0 { pipe.Set(ctx, m.inodeKey(inode), m.marshal(&attr), 0) pipe.HSet(ctx, m.entryKey(trash), m.trashEntry(parent, inode, name), buf) } else { pipe.Del(ctx, m.inodeKey(inode)) pipe.Del(ctx, m.xattrKey(inode)) pipe.IncrBy(ctx, m.usedSpaceKey(), -align4K(0)) pipe.Decr(ctx, m.totalInodesKey()) } return nil }) return err }, keys...) if err == nil && trash == 0 { m.updateStats(-align4K(0), -1) } return errno(err) } func (m *redisMeta) doRename(ctx Context, parentSrc Ino, nameSrc string, parentDst Ino, nameDst string, flags uint32, inode *Ino, attr *Attr) syscall.Errno { exchange := flags == RenameExchange keys := []string{m.entryKey(parentSrc), m.inodeKey(parentSrc), m.entryKey(parentDst), m.inodeKey(parentDst)} var opened bool var trash, dino Ino var dtyp uint8 var tattr Attr var newSpace, newInode int64 err := m.txn(ctx, func(tx *redis.Tx) error { buf, err := tx.HGet(ctx, m.entryKey(parentSrc), nameSrc).Bytes() if err == redis.Nil && m.conf.CaseInsensi { if e := m.resolveCase(ctx, parentSrc, nameSrc); e != nil { nameSrc = string(e.Name) buf = m.packEntry(e.Attr.Typ, e.Inode) err = nil } } if err != nil { return err } typ, ino := m.parseEntry(buf) if parentSrc == parentDst && nameSrc == nameDst { if inode != nil { *inode = ino } return nil } keys = []string{m.inodeKey(ino)} dbuf, err := tx.HGet(ctx, m.entryKey(parentDst), nameDst).Bytes() if err == redis.Nil && m.conf.CaseInsensi { if e := m.resolveCase(ctx, parentDst, nameDst); e != nil { nameDst = string(e.Name) buf = m.packEntry(e.Attr.Typ, e.Inode) err = nil } } if err != nil && err != redis.Nil { return err } if err == nil { if flags == RenameNoReplace { return syscall.EEXIST } dtyp, dino = m.parseEntry(dbuf) keys = append(keys, m.inodeKey(dino)) if dtyp == TypeDirectory { keys = append(keys, m.entryKey(dino)) } if !exchange { if st := m.checkTrash(parentDst, &trash); st != 0 { return st } } } if err := tx.Watch(ctx, keys...).Err(); err != nil { return err } keys := []string{m.inodeKey(parentSrc), m.inodeKey(parentDst), m.inodeKey(ino)} if dino > 0 { keys = append(keys, m.inodeKey(dino)) } rs, _ := tx.MGet(ctx, keys...).Result() if rs[0] == nil || rs[1] == nil || rs[2] == nil { return redis.Nil } var sattr, dattr, iattr Attr m.parseAttr([]byte(rs[0].(string)), &sattr) if sattr.Typ != TypeDirectory { return syscall.ENOTDIR } m.parseAttr([]byte(rs[1].(string)), &dattr) if dattr.Typ != TypeDirectory { return syscall.ENOTDIR } m.parseAttr([]byte(rs[2].(string)), &iattr) var supdate, dupdate bool now := time.Now() tattr = Attr{} opened = false if dino > 0 { if rs[3] == nil { logger.Warnf("no attribute for inode %d (%d, %s)", dino, parentDst, nameDst) trash = 0 } m.parseAttr([]byte(rs[3].(string)), &tattr) tattr.Ctime = now.Unix() tattr.Ctimensec = uint32(now.Nanosecond()) if exchange { if parentSrc != parentDst { if dtyp == TypeDirectory { tattr.Parent = parentSrc dattr.Nlink-- sattr.Nlink++ supdate, dupdate = true, true } else if tattr.Parent > 0 { tattr.Parent = parentSrc } } } else { if dtyp == TypeDirectory { cnt, err := tx.HLen(ctx, m.entryKey(dino)).Result() if err != nil { return err } if cnt != 0 { return syscall.ENOTEMPTY } dattr.Nlink-- dupdate = true if trash > 0 { tattr.Parent = trash } } else { if trash == 0 { tattr.Nlink-- if dtyp == TypeFile && tattr.Nlink == 0 { opened = m.of.IsOpen(dino) } defer func() { m.of.InvalidateChunk(dino, 0xFFFFFFFE) }() } else if tattr.Parent > 0 { tattr.Parent = trash } } } if ctx.Uid() != 0 && dattr.Mode&01000 != 0 && ctx.Uid() != dattr.Uid && ctx.Uid() != tattr.Uid { return syscall.EACCES } } else { if exchange { return syscall.ENOENT } dino, dtyp = 0, 0 } if ctx.Uid() != 0 && sattr.Mode&01000 != 0 && ctx.Uid() != sattr.Uid && ctx.Uid() != iattr.Uid { return syscall.EACCES } if parentSrc != parentDst { if typ == TypeDirectory { iattr.Parent = parentDst sattr.Nlink-- dattr.Nlink++ supdate, dupdate = true, true } else if iattr.Parent > 0 { iattr.Parent = parentDst } } if supdate || now.Sub(time.Unix(sattr.Mtime, int64(sattr.Mtimensec))) >= minUpdateTime { sattr.Mtime = now.Unix() sattr.Mtimensec = uint32(now.Nanosecond()) sattr.Ctime = now.Unix() sattr.Ctimensec = uint32(now.Nanosecond()) supdate = true } if dupdate || now.Sub(time.Unix(dattr.Mtime, int64(dattr.Mtimensec))) >= minUpdateTime { dattr.Mtime = now.Unix() dattr.Mtimensec = uint32(now.Nanosecond()) dattr.Ctime = now.Unix() dattr.Ctimensec = uint32(now.Nanosecond()) dupdate = true } iattr.Ctime = now.Unix() iattr.Ctimensec = uint32(now.Nanosecond()) if inode != nil { *inode = ino } if attr != nil { *attr = iattr } _, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error { if exchange { // dbuf, tattr are valid pipe.HSet(ctx, m.entryKey(parentSrc), nameSrc, dbuf) pipe.Set(ctx, m.inodeKey(dino), m.marshal(&tattr), 0) if parentSrc != parentDst && tattr.Parent == 0 { pipe.HIncrBy(ctx, m.parentKey(dino), parentSrc.String(), 1) pipe.HIncrBy(ctx, m.parentKey(dino), parentDst.String(), -1) } } else { pipe.HDel(ctx, m.entryKey(parentSrc), nameSrc) if dino > 0 { if trash > 0 { pipe.Set(ctx, m.inodeKey(dino), m.marshal(&tattr), 0) pipe.HSet(ctx, m.entryKey(trash), m.trashEntry(parentDst, dino, nameDst), dbuf) if tattr.Parent == 0 { pipe.HIncrBy(ctx, m.parentKey(dino), trash.String(), 1) pipe.HIncrBy(ctx, m.parentKey(dino), parentDst.String(), -1) } } else if dtyp != TypeDirectory && tattr.Nlink > 0 { pipe.Set(ctx, m.inodeKey(dino), m.marshal(&tattr), 0) if tattr.Parent == 0 { pipe.HIncrBy(ctx, m.parentKey(dino), parentDst.String(), -1) } } else { if dtyp == TypeFile { if opened { pipe.Set(ctx, m.inodeKey(dino), m.marshal(&tattr), 0) pipe.SAdd(ctx, m.sustained(m.sid), strconv.Itoa(int(dino))) } else { pipe.ZAdd(ctx, m.delfiles(), &redis.Z{Score: float64(now.Unix()), Member: m.toDelete(dino, tattr.Length)}) pipe.Del(ctx, m.inodeKey(dino)) newSpace, newInode = -align4K(tattr.Length), -1 pipe.IncrBy(ctx, m.usedSpaceKey(), newSpace) pipe.Decr(ctx, m.totalInodesKey()) } } else { if dtyp == TypeSymlink { pipe.Del(ctx, m.symKey(dino)) } pipe.Del(ctx, m.inodeKey(dino)) newSpace, newInode = -align4K(0), -1 pipe.IncrBy(ctx, m.usedSpaceKey(), newSpace) pipe.Decr(ctx, m.totalInodesKey()) } pipe.Del(ctx, m.xattrKey(dino)) if tattr.Parent == 0 { pipe.Del(ctx, m.parentKey(dino)) } } } } if parentDst != parentSrc { if !isTrash(parentSrc) && supdate { pipe.Set(ctx, m.inodeKey(parentSrc), m.marshal(&sattr), 0) } if iattr.Parent == 0 { pipe.HIncrBy(ctx, m.parentKey(ino), parentDst.String(), 1) pipe.HIncrBy(ctx, m.parentKey(ino), parentSrc.String(), -1) } } pipe.Set(ctx, m.inodeKey(ino), m.marshal(&iattr), 0) pipe.HSet(ctx, m.entryKey(parentDst), nameDst, buf) if dupdate { pipe.Set(ctx, m.inodeKey(parentDst), m.marshal(&dattr), 0) } return nil }) return err }, keys...) if err == nil && !exchange && trash == 0 { if dino > 0 && dtyp == TypeFile && tattr.Nlink == 0 { m.fileDeleted(opened, dino, tattr.Length) } m.updateStats(newSpace, newInode) } return errno(err) } func (m *redisMeta) doLink(ctx Context, inode, parent Ino, name string, attr *Attr) syscall.Errno { return errno(m.txn(ctx, func(tx *redis.Tx) error { rs, err := tx.MGet(ctx, m.inodeKey(parent), m.inodeKey(inode)).Result() if err != nil { return err } if rs[0] == nil || rs[1] == nil { return redis.Nil } var pattr, iattr Attr m.parseAttr([]byte(rs[0].(string)), &pattr) if pattr.Typ != TypeDirectory { return syscall.ENOTDIR } var updateParent bool now := time.Now() if now.Sub(time.Unix(pattr.Mtime, int64(pattr.Mtimensec))) >= minUpdateTime { pattr.Mtime = now.Unix() pattr.Mtimensec = uint32(now.Nanosecond()) pattr.Ctime = now.Unix() pattr.Ctimensec = uint32(now.Nanosecond()) updateParent = true } m.parseAttr([]byte(rs[1].(string)), &iattr) if iattr.Typ == TypeDirectory { return syscall.EPERM } oldParent := iattr.Parent iattr.Parent = 0 iattr.Ctime = now.Unix() iattr.Ctimensec = uint32(now.Nanosecond()) iattr.Nlink++ err = tx.HGet(ctx, m.entryKey(parent), name).Err() if err != nil && err != redis.Nil { return err } else if err == nil { return syscall.EEXIST } else if err == redis.Nil && m.conf.CaseInsensi && m.resolveCase(ctx, parent, name) != nil { return syscall.EEXIST } _, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error { pipe.HSet(ctx, m.entryKey(parent), name, m.packEntry(iattr.Typ, inode)) if updateParent { pipe.Set(ctx, m.inodeKey(parent), m.marshal(&pattr), 0) } pipe.Set(ctx, m.inodeKey(inode), m.marshal(&iattr), 0) if oldParent > 0 { pipe.HIncrBy(ctx, m.parentKey(inode), oldParent.String(), 1) } pipe.HIncrBy(ctx, m.parentKey(inode), parent.String(), 1) return nil }) if err == nil && attr != nil { *attr = iattr } return err }, m.inodeKey(inode), m.entryKey(parent), m.inodeKey(parent))) } func (m *redisMeta) doReaddir(ctx Context, inode Ino, plus uint8, entries *[]*Entry, limit int) syscall.Errno { var stop = errors.New("stop") err := m.hscan(ctx, m.entryKey(inode), func(keys []string) error { newEntries := make([]Entry, len(keys)/2) newAttrs := make([]Attr, len(keys)/2) for i := 0; i < len(keys); i += 2 { typ, ino := m.parseEntry([]byte(keys[i+1])) if keys[i] == "" { logger.Errorf("Corrupt entry with empty name: inode %d parent %d", ino, inode) continue } ent := &newEntries[i/2] ent.Inode = ino ent.Name = []byte(keys[i]) ent.Attr = &newAttrs[i/2] ent.Attr.Typ = typ *entries = append(*entries, ent) if limit > 0 && len(*entries) >= limit { return stop } } return nil }) if errors.Is(err, stop) { err = nil } if err != nil { return errno(err) } if plus != 0 { fillAttr := func(es []*Entry) error { var keys = make([]string, len(es)) for i, e := range es { keys[i] = m.inodeKey(e.Inode) } rs, err := m.rdb.MGet(ctx, keys...).Result() if err != nil { return err } for j, re := range rs { if re != nil { if a, ok := re.(string); ok { m.parseAttr([]byte(a), es[j].Attr) } } } return nil } batchSize := 4096 nEntries := len(*entries) if nEntries <= batchSize { err = fillAttr(*entries) } else { indexCh := make(chan []*Entry, 10) var wg sync.WaitGroup for i := 0; i < 2; i++ { wg.Add(1) go func() { defer wg.Done() for es := range indexCh { e := fillAttr(es) if e != nil { err = e break } } }() } for i := 0; i < nEntries; i += batchSize { if i+batchSize > nEntries { indexCh <- (*entries)[i:] } else { indexCh <- (*entries)[i : i+batchSize] } } close(indexCh) wg.Wait() } if err != nil { return errno(err) } } return 0 } func (m *redisMeta) doCleanStaleSession(sid uint64) error { var fail bool // release locks var ctx = Background ssid := strconv.FormatInt(int64(sid), 10) key := m.lockedKey(sid) if inodes, err := m.rdb.SMembers(ctx, key).Result(); err == nil { for _, k := range inodes { owners, err := m.rdb.HKeys(ctx, k).Result() if err != nil { logger.Warnf("HKeys %s: %s", k, err) fail = true continue } var fields []string for _, o := range owners { if strings.Split(o, "_")[0] == ssid { fields = append(fields, o) } } if len(fields) > 0 { if err = m.rdb.HDel(ctx, k, fields...).Err(); err != nil { logger.Warnf("HDel %s %s: %s", k, fields, err) fail = true continue } } if err = m.rdb.SRem(ctx, key, k).Err(); err != nil { logger.Warnf("SRem %s %s: %s", key, k, err) fail = true } } } else { logger.Warnf("SMembers %s: %s", key, err) fail = true } key = m.sustained(sid) if inodes, err := m.rdb.SMembers(ctx, key).Result(); err == nil { for _, sinode := range inodes { inode, _ := strconv.ParseInt(sinode, 10, 0) if err = m.doDeleteSustainedInode(sid, Ino(inode)); err != nil { logger.Warnf("Delete sustained inode %d of sid %d: %s", inode, sid, err) fail = true } } } else { logger.Warnf("SMembers %s: %s", key, err) fail = true } if !fail { if err := m.rdb.HDel(ctx, m.sessionInfos(), ssid).Err(); err != nil { logger.Warnf("HDel sessionInfos %s: %s", ssid, err) fail = true } } if fail { return fmt.Errorf("failed to clean up sid %d", sid) } else { if n, err := m.rdb.ZRem(ctx, m.allSessions(), ssid).Result(); err != nil { return err } else if n == 1 { return nil } return m.rdb.ZRem(ctx, legacySessions, ssid).Err() } } func (m *redisMeta) doFindStaleSessions(limit int) ([]uint64, error) { vals, err := m.rdb.ZRangeByScore(Background, m.allSessions(), &redis.ZRangeBy{ Max: strconv.FormatInt(time.Now().Unix(), 10), Count: int64(limit)}).Result() if err != nil { return nil, err } sids := make([]uint64, len(vals)) for i, v := range vals { sids[i], _ = strconv.ParseUint(v, 10, 64) } limit -= len(sids) if limit <= 0 { return sids, nil } // check clients with version before 1.0-beta3 as well vals, err = m.rdb.ZRangeByScore(Background, legacySessions, &redis.ZRangeBy{ Max: strconv.FormatInt(time.Now().Add(time.Minute*-5).Unix(), 10), Count: int64(limit)}).Result() if err != nil { logger.Errorf("Scan stale legacy sessions: %s", err) return sids, nil } for _, v := range vals { sid, _ := strconv.ParseUint(v, 10, 64) sids = append(sids, sid) } return sids, nil } func (m *redisMeta) doRefreshSession() { m.rdb.ZAdd(Background, m.allSessions(), &redis.Z{ Score: float64(m.expireTime()), Member: strconv.FormatUint(m.sid, 10)}) } func (m *redisMeta) doDeleteSustainedInode(sid uint64, inode Ino) error { var attr Attr var ctx = Background a, err := m.rdb.Get(ctx, m.inodeKey(inode)).Bytes() if err == redis.Nil { return nil } if err != nil { return err } m.parseAttr(a, &attr) var newSpace int64 _, err = m.rdb.TxPipelined(ctx, func(pipe redis.Pipeliner) error { pipe.ZAdd(ctx, m.delfiles(), &redis.Z{Score: float64(time.Now().Unix()), Member: m.toDelete(inode, attr.Length)}) pipe.Del(ctx, m.inodeKey(inode)) newSpace = -align4K(attr.Length) pipe.IncrBy(ctx, m.usedSpaceKey(), newSpace) pipe.Decr(ctx, m.totalInodesKey()) pipe.SRem(ctx, m.sustained(sid), strconv.Itoa(int(inode))) return nil }) if err == nil { m.updateStats(newSpace, -1) m.tryDeleteFileData(inode, attr.Length) } return err } func (m *redisMeta) Read(ctx Context, inode Ino, indx uint32, chunks *[]Slice) syscall.Errno { f := m.of.find(inode) if f != nil { f.RLock() defer f.RUnlock() } if cs, ok := m.of.ReadChunk(inode, indx); ok { *chunks = cs return 0 } defer m.timeit(time.Now()) vals, err := m.rdb.LRange(ctx, m.chunkKey(inode, indx), 0, 1000000).Result() if err != nil { return errno(err) } ss := readSlices(vals) *chunks = buildSlice(ss) m.of.CacheChunk(inode, indx, *chunks) if !m.conf.ReadOnly && (len(vals) >= 5 || len(*chunks) >= 5) { go m.compactChunk(inode, indx, false) } return 0 } func (m *redisMeta) Write(ctx Context, inode Ino, indx uint32, off uint32, slice Slice) syscall.Errno { defer m.timeit(time.Now()) f := m.of.find(inode) if f != nil { f.Lock() defer f.Unlock() } defer func() { m.of.InvalidateChunk(inode, indx) }() var newSpace int64 var needCompact bool err := m.txn(ctx, func(tx *redis.Tx) error { var attr Attr a, err := tx.Get(ctx, m.inodeKey(inode)).Bytes() if err != nil { return err } m.parseAttr(a, &attr) if attr.Typ != TypeFile { return syscall.EPERM } newleng := uint64(indx)*ChunkSize + uint64(off) + uint64(slice.Len) if newleng > attr.Length { newSpace = align4K(newleng) - align4K(attr.Length) attr.Length = newleng } if m.checkQuota(newSpace, 0) { return syscall.ENOSPC } now := time.Now() attr.Mtime = now.Unix() attr.Mtimensec = uint32(now.Nanosecond()) attr.Ctime = now.Unix() attr.Ctimensec = uint32(now.Nanosecond()) var rpush *redis.IntCmd _, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error { rpush = pipe.RPush(ctx, m.chunkKey(inode, indx), marshalSlice(off, slice.Chunkid, slice.Size, slice.Off, slice.Len)) // most of chunk are used by single inode, so use that as the default (1 == not exists) // pipe.Incr(ctx, r.sliceKey(slice.Chunkid, slice.Size)) pipe.Set(ctx, m.inodeKey(inode), m.marshal(&attr), 0) if newSpace > 0 { pipe.IncrBy(ctx, m.usedSpaceKey(), newSpace) } return nil }) if err == nil { needCompact = rpush.Val()%100 == 99 } return err }, m.inodeKey(inode)) if err == nil { if needCompact { go m.compactChunk(inode, indx, false) } m.updateStats(newSpace, 0) } return errno(err) } func (m *redisMeta) CopyFileRange(ctx Context, fin Ino, offIn uint64, fout Ino, offOut uint64, size uint64, flags uint32, copied *uint64) syscall.Errno { defer m.timeit(time.Now()) f := m.of.find(fout) if f != nil { f.Lock() defer f.Unlock() } var newSpace int64 defer func() { m.of.InvalidateChunk(fout, 0xFFFFFFFF) }() err := m.txn(ctx, func(tx *redis.Tx) error { rs, err := tx.MGet(ctx, m.inodeKey(fin), m.inodeKey(fout)).Result() if err != nil { return err } if rs[0] == nil || rs[1] == nil { return redis.Nil } var sattr Attr m.parseAttr([]byte(rs[0].(string)), &sattr) if sattr.Typ != TypeFile { return syscall.EINVAL } if offIn >= sattr.Length { *copied = 0 return nil } if offIn+size > sattr.Length { size = sattr.Length - offIn } var attr Attr m.parseAttr([]byte(rs[1].(string)), &attr) if attr.Typ != TypeFile { return syscall.EINVAL } newleng := offOut + size if newleng > attr.Length { newSpace = align4K(newleng) - align4K(attr.Length) attr.Length = newleng } if m.checkQuota(newSpace, 0) { return syscall.ENOSPC } now := time.Now() attr.Mtime = now.Unix() attr.Mtimensec = uint32(now.Nanosecond()) attr.Ctime = now.Unix() attr.Ctimensec = uint32(now.Nanosecond()) p := tx.Pipeline() for i := offIn / ChunkSize; i <= (offIn+size)/ChunkSize; i++ { p.LRange(ctx, m.chunkKey(fin, uint32(i)), 0, 1000000) } vals, err := p.Exec(ctx) if err != nil { return err } _, err = tx.Pipelined(ctx, func(pipe redis.Pipeliner) error { coff := offIn / ChunkSize * ChunkSize for _, v := range vals { sv := v.(*redis.StringSliceCmd).Val() // Add a zero chunk for hole ss := append([]*slice{{len: ChunkSize}}, readSlices(sv)...) cs := buildSlice(ss) tpos := coff for _, s := range cs { pos := tpos tpos += uint64(s.Len) if pos < offIn+size && pos+uint64(s.Len) > offIn { if pos < offIn { dec := offIn - pos s.Off += uint32(dec) pos += dec s.Len -= uint32(dec) } if pos+uint64(s.Len) > offIn+size { dec := pos + uint64(s.Len) - (offIn + size) s.Len -= uint32(dec) } doff := pos - offIn + offOut indx := uint32(doff / ChunkSize) dpos := uint32(doff % ChunkSize) if dpos+s.Len > ChunkSize { pipe.RPush(ctx, m.chunkKey(fout, indx), marshalSlice(dpos, s.Chunkid, s.Size, s.Off, ChunkSize-dpos)) if s.Chunkid > 0 { pipe.HIncrBy(ctx, m.sliceRefs(), m.sliceKey(s.Chunkid, s.Size), 1) } skip := ChunkSize - dpos pipe.RPush(ctx, m.chunkKey(fout, indx+1), marshalSlice(0, s.Chunkid, s.Size, s.Off+skip, s.Len-skip)) if s.Chunkid > 0 { pipe.HIncrBy(ctx, m.sliceRefs(), m.sliceKey(s.Chunkid, s.Size), 1) } } else { pipe.RPush(ctx, m.chunkKey(fout, indx), marshalSlice(dpos, s.Chunkid, s.Size, s.Off, s.Len)) if s.Chunkid > 0 { pipe.HIncrBy(ctx, m.sliceRefs(), m.sliceKey(s.Chunkid, s.Size), 1) } } } } coff += ChunkSize } pipe.Set(ctx, m.inodeKey(fout), m.marshal(&attr), 0) if newSpace > 0 { pipe.IncrBy(ctx, m.usedSpaceKey(), newSpace) } return nil }) if err == nil { *copied = size } return err }, m.inodeKey(fout), m.inodeKey(fin)) if err == nil { m.updateStats(newSpace, 0) } return errno(err) } func (m *redisMeta) doGetParents(ctx Context, inode Ino) map[Ino]int { vals, err := m.rdb.HGetAll(ctx, m.parentKey(inode)).Result() if err != nil { logger.Warnf("Scan parent key of inode %d: %s", inode, err) return nil } ps := make(map[Ino]int) for k, v := range vals { if n, _ := strconv.Atoi(v); n > 0 { ino, _ := strconv.ParseUint(k, 10, 64) ps[Ino(ino)] = n } } return ps } // For now only deleted files func (m *redisMeta) cleanupLegacies() { for { utils.SleepWithJitter(time.Minute) rng := &redis.ZRangeBy{Max: strconv.FormatInt(time.Now().Add(-time.Hour).Unix(), 10), Count: 1000} vals, err := m.rdb.ZRangeByScore(Background, m.delfiles(), rng).Result() if err != nil { continue } var count int for _, v := range vals { ps := strings.Split(v, ":") if len(ps) != 2 { inode, _ := strconv.ParseUint(ps[0], 10, 64) var length uint64 = 1 << 30 if len(ps) > 2 { length, _ = strconv.ParseUint(ps[2], 10, 64) } logger.Infof("cleanup legacy delfile inode %d with %d bytes (%s)", inode, length, v) m.doDeleteFileData_(Ino(inode), length, v) count++ } } if count == 0 { return } } } func (m *redisMeta) doFindDeletedFiles(ts int64, limit int) (map[Ino]uint64, error) { rng := &redis.ZRangeBy{Max: strconv.FormatInt(ts, 10), Count: int64(limit)} vals, err := m.rdb.ZRangeByScore(Background, m.delfiles(), rng).Result() if err != nil { return nil, err } files := make(map[Ino]uint64, len(vals)) for _, v := range vals { ps := strings.Split(v, ":") if len(ps) != 2 { // will be cleaned up as legacy continue } inode, _ := strconv.ParseUint(ps[0], 10, 64) files[Ino(inode)], _ = strconv.ParseUint(ps[1], 10, 64) } return files, nil } func (m *redisMeta) doCleanupSlices() { _ = m.hscan(Background, m.sliceRefs(), func(keys []string) error { for i := 0; i < len(keys); i += 2 { key, val := keys[i], keys[i+1] if strings.HasPrefix(val, "-") { // < 0 ps := strings.Split(key, "_") if len(ps) == 2 { chunkid, _ := strconv.ParseUint(ps[0][1:], 10, 64) size, _ := strconv.ParseUint(ps[1], 10, 32) if chunkid > 0 && size > 0 { m.deleteSlice(chunkid, uint32(size)) } } } else if val == "0" { m.cleanupZeroRef(key) } } return nil }) } func (m *redisMeta) cleanupZeroRef(key string) { var ctx = Background _ = m.txn(ctx, func(tx *redis.Tx) error { v, err := tx.HGet(ctx, m.sliceRefs(), key).Int() if err != nil { return err } if v != 0 { return syscall.EINVAL } _, err = tx.Pipelined(ctx, func(p redis.Pipeliner) error { p.HDel(ctx, m.sliceRefs(), key) return nil }) return err }, m.sliceRefs()) } func (m *redisMeta) cleanupLeakedChunks() { var ctx = Background prefix := len(m.prefix) _ = m.scan(ctx, "c*", func(ckeys []string) error { var ikeys []string var rs []*redis.IntCmd p := m.rdb.Pipeline() for _, k := range ckeys { ps := strings.Split(k, "_") if len(ps) != 2 { continue } ino, _ := strconv.ParseInt(ps[0][prefix+1:], 10, 0) ikeys = append(ikeys, k) rs = append(rs, p.Exists(ctx, m.inodeKey(Ino(ino)))) } if len(rs) > 0 { _, err := p.Exec(ctx) if err != nil { logger.Errorf("check inodes: %s", err) return err } for i, rr := range rs { if rr.Val() == 0 { key := ikeys[i] logger.Infof("found leaked chunk %s", key) ps := strings.Split(key, "_") ino, _ := strconv.ParseInt(ps[0][prefix+1:], 10, 0) indx, _ := strconv.Atoi(ps[1]) _ = m.deleteChunk(Ino(ino), uint32(indx)) } } } return nil }) } func (m *redisMeta) cleanupOldSliceRefs() { var ctx = Background _ = m.scan(ctx, "k*", func(ckeys []string) error { values, err := m.rdb.MGet(ctx, ckeys...).Result() if err != nil { logger.Warnf("mget slices: %s", err) return err } var todel []string for i, v := range values { if v == nil { continue } if strings.HasPrefix(v.(string), m.prefix+"-") || v == "0" { // < 0 // the objects will be deleted by gc todel = append(todel, ckeys[i]) } else { vv, _ := strconv.Atoi(v.(string)) m.rdb.HIncrBy(ctx, m.sliceRefs(), ckeys[i], int64(vv)) m.rdb.DecrBy(ctx, ckeys[i], int64(vv)) logger.Infof("move refs %d for slice %s", vv, ckeys[i]) } } m.rdb.Del(ctx, todel...) return nil }) } func (m *redisMeta) toDelete(inode Ino, length uint64) string { return inode.String() + ":" + strconv.Itoa(int(length)) } func (m *redisMeta) deleteChunk(inode Ino, indx uint32) error { var ctx = Background key := m.chunkKey(inode, indx) for { var slices []*slice var rs []*redis.IntCmd err := m.txn(ctx, func(tx *redis.Tx) error { vals, err := tx.LRange(ctx, key, 0, 100).Result() if err == redis.Nil { return nil } slices = nil rs = nil _, err = tx.TxPipelined(ctx, func(pipe redis.Pipeliner) error { for _, v := range vals { pipe.LPop(ctx, key) rb := utils.ReadBuffer([]byte(v)) _ = rb.Get32() // pos chunkid := rb.Get64() if chunkid == 0 { continue } size := rb.Get32() slices = append(slices, &slice{chunkid: chunkid, size: size}) rs = append(rs, pipe.HIncrBy(ctx, m.sliceRefs(), m.sliceKey(chunkid, size), -1)) } return nil }) return err }, key) if err != nil { return fmt.Errorf("delete slice from chunk %s fail: %s, retry later", key, err) } for i, s := range slices { if rs[i].Val() < 0 { m.deleteSlice(s.chunkid, s.size) } } if len(slices) < 100 { break } } return nil } func (m *redisMeta) doDeleteFileData(inode Ino, length uint64) { m.doDeleteFileData_(inode, length, "") } func (m *redisMeta) doDeleteFileData_(inode Ino, length uint64, tracking string) { var ctx = Background var indx uint32 p := m.rdb.Pipeline() for uint64(indx)*ChunkSize < length { var keys []string for i := 0; uint64(indx)*ChunkSize < length && i < 1000; i++ { key := m.chunkKey(inode, indx) keys = append(keys, key) _ = p.LLen(ctx, key) indx++ } cmds, err := p.Exec(ctx) if err != nil { logger.Warnf("delete chunks of inode %d: %s", inode, err) return } for i, cmd := range cmds { val, err := cmd.(*redis.IntCmd).Result() if err == redis.Nil || val == 0 { continue } idx, _ := strconv.Atoi(strings.Split(keys[i][len(m.prefix):], "_")[1]) err = m.deleteChunk(inode, uint32(idx)) if err != nil { logger.Warnf("delete chunk %s: %s", keys[i], err) return } } } if tracking == "" { tracking = inode.String() + ":" + strconv.FormatInt(int64(length), 10) } _ = m.rdb.ZRem(ctx, m.delfiles(), tracking) } func (r *redisMeta) doCleanupDelayedSlices(edge int64, limit int) (int, error) { ctx := Background stop := fmt.Errorf("reach limit") var count int var ss []Slice var rs []*redis.IntCmd err := r.hscan(ctx, r.delSlices(), func(keys []string) error { for i := 0; i < len(keys); i += 2 { key := keys[i] ps := strings.Split(key, "_") if len(ps) != 2 { logger.Warnf("Invalid key %s", key) continue } if ts, e := strconv.ParseInt(ps[1], 10, 64); e != nil { logger.Warnf("Invalid key %s", key) continue } else if ts >= edge { continue } if err := r.txn(ctx, func(tx *redis.Tx) error { val, e := tx.HGet(ctx, r.delSlices(), key).Result() if e == redis.Nil { return nil } else if e != nil { return e } ss, rs = ss[:0], rs[:0] buf := []byte(val) r.decodeDelayedSlices(buf, &ss) if len(ss) == 0 { return fmt.Errorf("invalid value for delSlices %s: %v", key, buf) } _, e = tx.Pipelined(ctx, func(pipe redis.Pipeliner) error { for _, s := range ss { rs = append(rs, pipe.HIncrBy(ctx, r.sliceRefs(), r.sliceKey(s.Chunkid, s.Size), -1)) } pipe.HDel(ctx, r.delSlices(), key) return nil }) return e }, r.delSlices()); err != nil { logger.Warnf("Cleanup delSlices %s: %s", key, err) continue } for i, s := range ss { if rs[i].Err() == nil && rs[i].Val() < 0 { r.deleteSlice(s.Chunkid, s.Size) count++ } } if count >= limit { return stop } } return nil }) if err == stop { err = nil } return count, err } func (m *redisMeta) compactChunk(inode Ino, indx uint32, force bool) { // avoid too many or duplicated compaction if !force { m.Lock() k := uint64(inode) + (uint64(indx) << 32) if len(m.compacting) > 10 || m.compacting[k] { m.Unlock() return } m.compacting[k] = true m.Unlock() defer func() { m.Lock() delete(m.compacting, k) m.Unlock() }() } var ctx = Background vals, err := m.rdb.LRange(ctx, m.chunkKey(inode, indx), 0, 1000).Result() if err != nil { return } ss := readSlices(vals) skipped := skipSome(ss) ss = ss[skipped:] pos, size, chunks := compactChunk(ss) if len(ss) < 2 || size == 0 { return } var chunkid uint64 st := m.NewChunk(ctx, &chunkid) if st != 0 { return } logger.Debugf("compact %d:%d: skipped %d slices (%d bytes) %d slices (%d bytes)", inode, indx, skipped, pos, len(ss), size) err = m.newMsg(CompactChunk, chunks, chunkid) if err != nil { if !strings.Contains(err.Error(), "not exist") && !strings.Contains(err.Error(), "not found") { logger.Warnf("compact %d %d with %d slices: %s", inode, indx, len(ss), err) } return } var buf []byte // trash enabled: track delayed slices var rs []*redis.IntCmd // trash disabled: check reference of slices trash := m.toTrash(0) if trash { for _, s := range ss { if s.chunkid > 0 { buf = append(buf, m.encodeDelayedSlice(s.chunkid, s.size)...) } } } else { rs = make([]*redis.IntCmd, len(ss)) } key := m.chunkKey(inode, indx) errno := errno(m.txn(ctx, func(tx *redis.Tx) error { vals2, err := tx.LRange(ctx, key, 0, int64(len(vals)-1)).Result() if err != nil { return err } if len(vals2) != len(vals) { return syscall.EINVAL } for i, val := range vals2 { if val != vals[i] { return syscall.EINVAL } } _, err = tx.Pipelined(ctx, func(pipe redis.Pipeliner) error { pipe.LTrim(ctx, key, int64(len(vals)), -1) pipe.LPush(ctx, key, marshalSlice(pos, chunkid, size, 0, size)) for i := skipped; i > 0; i-- { pipe.LPush(ctx, key, vals[i-1]) } pipe.HSet(ctx, m.sliceRefs(), m.sliceKey(chunkid, size), "0") // create the key to tracking it if trash { if len(buf) > 0 { pipe.HSet(ctx, m.delSlices(), fmt.Sprintf("%d_%d", chunkid, time.Now().Unix()), buf) } } else { for i, s := range ss { if s.chunkid > 0 { rs[i] = pipe.HIncrBy(ctx, m.sliceRefs(), m.sliceKey(s.chunkid, s.size), -1) } } } return nil }) return err }, key)) // there could be false-negative that the compaction is successful, double-check if errno != 0 && errno != syscall.EINVAL { if e := m.rdb.HGet(ctx, m.sliceRefs(), m.sliceKey(chunkid, size)).Err(); e == redis.Nil { errno = syscall.EINVAL // failed } else if e == nil { errno = 0 // successful } } if errno == syscall.EINVAL { m.rdb.HIncrBy(ctx, m.sliceRefs(), m.sliceKey(chunkid, size), -1) logger.Infof("compaction for %d:%d is wasted, delete slice %d (%d bytes)", inode, indx, chunkid, size) m.deleteSlice(chunkid, size) } else if errno == 0 { m.of.InvalidateChunk(inode, indx) m.cleanupZeroRef(m.sliceKey(chunkid, size)) if !trash { for i, s := range ss { if s.chunkid > 0 && rs[i].Err() == nil && rs[i].Val() < 0 { m.deleteSlice(s.chunkid, s.size) } } } } else { logger.Warnf("compact %s: %s", key, errno) } if force { m.compactChunk(inode, indx, force) } else { go func() { // wait for the current compaction to finish time.Sleep(time.Millisecond * 10) m.compactChunk(inode, indx, force) }() } } func (m *redisMeta) CompactAll(ctx Context, bar *utils.Bar) syscall.Errno { p := m.rdb.Pipeline() return errno(m.scan(ctx, "c*_*", func(keys []string) error { bar.IncrTotal(int64(len(keys))) for _, key := range keys { _ = p.LLen(ctx, key) } cmds, err := p.Exec(ctx) if err != nil { logger.Warnf("list slices: %s", err) return errno(err) } for i, cmd := range cmds { cnt := cmd.(*redis.IntCmd).Val() if cnt > 1 { var inode uint64 var indx uint32 n, err := fmt.Sscanf(keys[i], m.prefix+"c%d_%d", &inode, &indx) if err == nil && n == 2 { logger.Debugf("compact chunk %d:%d (%d slices)", inode, indx, cnt) m.compactChunk(Ino(inode), indx, true) } } bar.Increment() } return nil })) } func (m *redisMeta) cleanupLeakedInodes(delete bool) { var ctx = Background var foundInodes = make(map[Ino]struct{}) foundInodes[RootInode] = struct{}{} foundInodes[TrashInode] = struct{}{} cutoff := time.Now().Add(time.Hour * -1) prefix := len(m.prefix) _ = m.scan(ctx, "d*", func(keys []string) error { for _, key := range keys { ino, _ := strconv.Atoi(key[prefix+1:]) var entries []*Entry eno := m.doReaddir(ctx, Ino(ino), 0, &entries, 0) if eno != syscall.ENOENT && eno != 0 { logger.Errorf("readdir %d: %s", ino, eno) return eno } for _, e := range entries { foundInodes[e.Inode] = struct{}{} } } return nil }) _ = m.scan(ctx, "i*", func(keys []string) error { values, err := m.rdb.MGet(ctx, keys...).Result() if err != nil { logger.Warnf("mget inodes: %s", err) return nil } for i, v := range values { if v == nil { continue } var attr Attr m.parseAttr([]byte(v.(string)), &attr) ino, _ := strconv.Atoi(keys[i][prefix+1:]) if _, ok := foundInodes[Ino(ino)]; !ok && time.Unix(attr.Ctime, 0).Before(cutoff) { logger.Infof("found dangling inode: %s %+v", keys[i], attr) if delete { err = m.doDeleteSustainedInode(0, Ino(ino)) if err != nil { logger.Errorf("delete leaked inode %d : %s", ino, err) } } } } return nil }) } func (m *redisMeta) scan(ctx context.Context, pattern string, f func([]string) error) error { var rdb *redis.Client if c, ok := m.rdb.(*redis.ClusterClient); ok { var err error rdb, err = c.MasterForKey(ctx, m.prefix) if err != nil { return err } } else { rdb = m.rdb.(*redis.Client) } var cursor uint64 for { keys, c, err := rdb.Scan(ctx, cursor, m.prefix+pattern, 10000).Result() if err != nil { logger.Warnf("scan %s: %s", pattern, err) return err } if len(keys) > 0 { err = f(keys) if err != nil { return err } } if c == 0 { break } cursor = c } return nil } func (m *redisMeta) hscan(ctx context.Context, key string, f func([]string) error) error { var cursor uint64 for { keys, c, err := m.rdb.HScan(ctx, key, cursor, "*", 10000).Result() if err != nil { logger.Warnf("HSCAN %s: %s", key, err) return err } if len(keys) > 0 { if err = f(keys); err != nil { return err } } if c == 0 { break } cursor = c } return nil } func (m *redisMeta) ListSlices(ctx Context, slices map[Ino][]Slice, delete bool, showProgress func()) syscall.Errno { m.cleanupLeakedInodes(delete) m.cleanupLeakedChunks() m.cleanupOldSliceRefs() if delete { m.doCleanupSlices() } p := m.rdb.Pipeline() err := m.scan(ctx, "c*_*", func(keys []string) error { for _, key := range keys { _ = p.LRange(ctx, key, 0, 100000000) } cmds, err := p.Exec(ctx) if err != nil { logger.Warnf("list slices: %s", err) return err } for _, cmd := range cmds { key := cmd.(*redis.StringSliceCmd).Args()[1].(string) inode, _ := strconv.Atoi(strings.Split(key[len(m.prefix)+1:], "_")[0]) vals := cmd.(*redis.StringSliceCmd).Val() ss := readSlices(vals) for _, s := range ss { if s.chunkid > 0 { slices[Ino(inode)] = append(slices[Ino(inode)], Slice{Chunkid: s.chunkid, Size: s.size}) if showProgress != nil { showProgress() } } } } return nil }) if err != nil || m.fmt.TrashDays == 0 { return errno(err) } var ss []Slice err = m.hscan(ctx, m.delSlices(), func(keys []string) error { for i := 0; i < len(keys); i += 2 { ss = ss[:0] m.decodeDelayedSlices([]byte(keys[i+1]), &ss) if showProgress != nil { for range ss { showProgress() } } for _, s := range ss { if s.Chunkid > 0 { slices[1] = append(slices[1], s) } } } return nil }) return errno(err) } func (m *redisMeta) GetXattr(ctx Context, inode Ino, name string, vbuff *[]byte) syscall.Errno { defer m.timeit(time.Now()) inode = m.checkRoot(inode) var err error *vbuff, err = m.rdb.HGet(ctx, m.xattrKey(inode), name).Bytes() if err == redis.Nil { err = ENOATTR } return errno(err) } func (m *redisMeta) ListXattr(ctx Context, inode Ino, names *[]byte) syscall.Errno { defer m.timeit(time.Now()) inode = m.checkRoot(inode) vals, err := m.rdb.HKeys(ctx, m.xattrKey(inode)).Result() if err != nil { return errno(err) } *names = nil for _, name := range vals { *names = append(*names, []byte(name)...) *names = append(*names, 0) } return 0 } func (m *redisMeta) doSetXattr(ctx Context, inode Ino, name string, value []byte, flags uint32) syscall.Errno { c := Background key := m.xattrKey(inode) return errno(m.txn(ctx, func(tx *redis.Tx) error { switch flags { case XattrCreate: ok, err := tx.HSetNX(c, key, name, value).Result() if err != nil { return err } if !ok { return syscall.EEXIST } return nil case XattrReplace: if ok, err := tx.HExists(c, key, name).Result(); err != nil { return err } else if !ok { return ENOATTR } _, err := m.rdb.HSet(ctx, key, name, value).Result() return err default: // XattrCreateOrReplace _, err := m.rdb.HSet(ctx, key, name, value).Result() return err } }, key)) } func (m *redisMeta) doRemoveXattr(ctx Context, inode Ino, name string) syscall.Errno { n, err := m.rdb.HDel(ctx, m.xattrKey(inode), name).Result() if err != nil { return errno(err) } else if n == 0 { return ENOATTR } else { return 0 } } func (m *redisMeta) checkServerConfig() { rawInfo, err := m.rdb.Info(Background).Result() if err != nil { logger.Warnf("parse info: %s", err) return } rInfo, err := checkRedisInfo(rawInfo) if err != nil { logger.Warnf("parse info: %s", err) } if rInfo.maxMemoryPolicy != "noeviction" { if _, err := m.rdb.ConfigSet(Background, "maxmemory-policy", "noeviction").Result(); err != nil { logger.Errorf("try to reconfigure maxmemory-policy to 'noeviction' failed: %s", err) } else if result, err := m.rdb.ConfigGet(Background, "maxmemory-policy").Result(); err != nil { logger.Warnf("get config maxmemory-policy failed: %s", err) } else if len(result) == 2 && result[1] != "noeviction" { logger.Warnf("reconfigured maxmemory-policy to 'noeviction', but it's still %s", result[1]) } else { logger.Infof("set maxmemory-policy to 'noeviction' successfully") } } start := time.Now() _ = m.rdb.Ping(Background) logger.Infof("Ping redis: %s", time.Since(start)) } var entryPool = sync.Pool{ New: func() interface{} { return &DumpedEntry{ Attr: &DumpedAttr{}, } }, } func (m *redisMeta) dumpEntries(es ...*DumpedEntry) error { ctx := Background var keys []string for _, e := range es { keys = append(keys, m.inodeKey(e.Attr.Inode)) } return m.txn(ctx, func(tx *redis.Tx) error { p := tx.Pipeline() var ar = make([]*redis.StringCmd, len(es)) var xr = make([]*redis.StringStringMapCmd, len(es)) var sr = make([]*redis.StringCmd, len(es)) var cr = make([]*redis.StringSliceCmd, len(es)) var dr = make([]*redis.StringStringMapCmd, len(es)) for i, e := range es { inode := e.Attr.Inode ar[i] = p.Get(ctx, m.inodeKey(inode)) xr[i] = p.HGetAll(ctx, m.xattrKey(inode)) switch e.Attr.Type { case "regular": cr[i] = p.LRange(ctx, m.chunkKey(inode, 0), 0, 1000000) case "directory": dr[i] = p.HGetAll(ctx, m.entryKey(inode)) case "symlink": sr[i] = p.Get(ctx, m.symKey(inode)) } } if _, err := p.Exec(ctx); err != nil && err != redis.Nil { return err } type lchunk struct { inode Ino indx uint32 i uint32 } var lcs []*lchunk for i, e := range es { inode := e.Attr.Inode typ := typeFromString(e.Attr.Type) a, err := ar[i].Bytes() if err != nil { if err != redis.Nil { return err } logger.Warnf("The entry of the inode was not found. inode: %v", inode) } var attr Attr attr.Typ = typ attr.Nlink = 1 m.parseAttr(a, &attr) if attr.Typ != typ { e.Attr.Type = typeToString(attr.Typ) return redis.TxFailedErr // retry } dumpAttr(&attr, e.Attr) keys, err := xr[i].Result() if err != nil { return err } if len(keys) > 0 { xattrs := make([]*DumpedXattr, 0, len(keys)) for k, v := range keys { xattrs = append(xattrs, &DumpedXattr{k, v}) } sort.Slice(xattrs, func(i, j int) bool { return xattrs[i].Name < xattrs[j].Name }) e.Xattrs = xattrs } switch typ { case TypeFile: if attr.Length > 0 { vals, err := cr[i].Result() if err != nil { return err } if len(vals) > 0 { ss := readSlices(vals) slices := make([]*DumpedSlice, 0, len(ss)) for _, s := range ss { slices = append(slices, &DumpedSlice{Chunkid: s.chunkid, Pos: s.pos, Size: s.size, Off: s.off, Len: s.len}) } e.Chunks = append(e.Chunks, &DumpedChunk{0, slices}) } } if attr.Length > ChunkSize { for indx := uint32(1); uint64(indx)*ChunkSize < attr.Length; indx++ { lcs = append(lcs, &lchunk{inode, indx, uint32(i)}) } } case TypeDirectory: dirs, err := dr[i].Result() if err != nil { return err } e.Entries = make(map[string]*DumpedEntry) for name := range dirs { t, inode := m.parseEntry([]byte(dirs[name])) ce := entryPool.Get().(*DumpedEntry) ce.Attr.Inode = inode ce.Attr.Type = typeToString(t) e.Entries[name] = ce } case TypeSymlink: if e.Symlink, err = sr[i].Result(); err != nil { if err != redis.Nil { return err } logger.Warnf("The symlink of inode %d is not found", inode) } } } cr = make([]*redis.StringSliceCmd, len(es)*3) for len(lcs) > 0 { if len(cr) > len(lcs) { cr = cr[:len(lcs)] } for i := range cr { c := lcs[i] cr[i] = p.LRange(ctx, m.chunkKey(c.inode, c.indx), 0, 1000000) } if _, err := p.Exec(ctx); err != nil { return err } for i := range cr { vals, err := cr[i].Result() if err != nil { return err } if len(vals) > 0 { ss := readSlices(vals) slices := make([]*DumpedSlice, 0, len(ss)) for _, s := range ss { slices = append(slices, &DumpedSlice{Chunkid: s.chunkid, Pos: s.pos, Size: s.size, Off: s.off, Len: s.len}) } e := es[lcs[i].i] e.Chunks = append(e.Chunks, &DumpedChunk{lcs[i].indx, slices}) } } lcs = lcs[len(cr):] } return nil }, keys...) } func (m *redisMeta) dumpDir(inode Ino, tree *DumpedEntry, bw *bufio.Writer, depth int, bar *utils.Bar) error { bwWrite := func(s string) { if _, err := bw.WriteString(s); err != nil { panic(err) } } var err error entries := make([]*DumpedEntry, 0, len(tree.Entries)) for name, e := range tree.Entries { e.Name = name entries = append(entries, e) } if err = tree.writeJsonWithOutEntry(bw, depth); err != nil { return err } sort.Slice(entries, func(i, j int) bool { return entries[i].Name < entries[j].Name }) var concurrent = 2 var batch = 100 ms := make([]sync.Mutex, concurrent) conds := make([]*sync.Cond, concurrent) ready := make([]int, concurrent) for c := 0; c < concurrent; c++ { conds[c] = sync.NewCond(&ms[c]) if c*batch < len(entries) { go func(c int) { for i := c * batch; i < len(entries); i += concurrent * batch { es := entries[i:] if len(es) > batch { es = es[:batch] } e := m.dumpEntries(es...) ms[c].Lock() ready[c] = len(es) if e != nil { err = e } conds[c].Signal() ms[c].Unlock() ms[c].Lock() for ready[c] > 0 { conds[c].Wait() } ms[c].Unlock() } }(c) } } for i, e := range entries { b := i / batch c := b % concurrent ms[c].Lock() for ready[c] == 0 { conds[c].Wait() } ready[c]-- if ready[c] == 0 { conds[c].Signal() } ms[c].Unlock() if err != nil { return err } if e.Attr.Type == "directory" { err = m.dumpDir(inode, e, bw, depth+2, bar) } else { err = e.writeJSON(bw, depth+2) } entries[i] = nil delete(tree.Entries, e.Name) e.Xattrs = nil e.Chunks = nil e.Entries = nil e.Symlink = "" entryPool.Put(e) if err != nil { return err } if i != len(entries)-1 { bwWrite(",") } if bar != nil { bar.IncrInt64(1) } } bwWrite(fmt.Sprintf("\n%s}\n%s}", strings.Repeat(jsonIndent, depth+1), strings.Repeat(jsonIndent, depth))) return nil } func (m *redisMeta) DumpMeta(w io.Writer, root Ino) (err error) { defer func() { if p := recover(); p != nil { if e, ok := p.(error); ok { debug.PrintStack() err = e } else { err = errors.Errorf("DumpMeta error: %v", p) } } }() ctx := Background zs, err := m.rdb.ZRangeWithScores(ctx, m.delfiles(), 0, -1).Result() if err != nil { return err } dels := make([]*DumpedDelFile, 0, len(zs)) for _, z := range zs { parts := strings.Split(z.Member.(string), ":") if len(parts) != 2 { logger.Warnf("invalid delfile string: %s", z.Member.(string)) continue } inode, _ := strconv.ParseUint(parts[0], 10, 64) length, _ := strconv.ParseUint(parts[1], 10, 64) dels = append(dels, &DumpedDelFile{Ino(inode), length, int64(z.Score)}) } names := []string{usedSpace, totalInodes, "nextinode", "nextchunk", "nextsession", "nextTrash"} for i := range names { names[i] = m.prefix + names[i] } rs, _ := m.rdb.MGet(ctx, names...).Result() cs := make([]int64, len(rs)) for i, r := range rs { if r != nil { cs[i], _ = strconv.ParseInt(r.(string), 10, 64) } } keys, err := m.rdb.ZRange(ctx, m.allSessions(), 0, -1).Result() if err != nil { return err } sessions := make([]*DumpedSustained, 0, len(keys)) for _, k := range keys { sid, _ := strconv.ParseUint(k, 10, 64) var ss []string ss, err = m.rdb.SMembers(ctx, m.sustained(sid)).Result() if err != nil { return err } if len(ss) > 0 { inodes := make([]Ino, 0, len(ss)) for _, s := range ss { inode, _ := strconv.ParseUint(s, 10, 64) inodes = append(inodes, Ino(inode)) } sessions = append(sessions, &DumpedSustained{sid, inodes}) } } dm := &DumpedMeta{ Setting: m.fmt, Counters: &DumpedCounters{ UsedSpace: cs[0], UsedInodes: cs[1], NextInode: cs[2] + 1, // Redis nextInode/nextChunk is 1 smaller than sql/tkv NextChunk: cs[3] + 1, NextSession: cs[4], NextTrash: cs[5], }, Sustained: sessions, DelFiles: dels, } if dm.Setting.SecretKey != "" { dm.Setting.SecretKey = "removed" logger.Warnf("Secret key is removed for the sake of safety") } if dm.Setting.SessionToken != "" { dm.Setting.SessionToken = "removed" logger.Warnf("Session token is removed for the sake of safety") } bw, err := dm.writeJsonWithOutTree(w) if err != nil { return err } progress := utils.NewProgress(false, false) bar := progress.AddCountBar("Dumped entries", dm.Counters.UsedInodes) // with root root = m.checkRoot(root) var tree = &DumpedEntry{ Name: "FSTree", Attr: &DumpedAttr{ Inode: root, Type: typeToString(TypeDirectory), }, } if err = m.dumpEntries(tree); err != nil { return err } if err = m.dumpDir(root, tree, bw, 1, bar); err != nil { return err } if root == RootInode { trash := &DumpedEntry{ Name: "Trash", Attr: &DumpedAttr{ Inode: TrashInode, Type: typeToString(TypeDirectory), }, } if err = m.dumpEntries(trash); err != nil { return err } if _, err = bw.WriteString(","); err != nil { return err } if err = m.dumpDir(TrashInode, trash, bw, 1, bar); err != nil { return err } } if _, err = bw.WriteString("\n}\n"); err != nil { return err } progress.Done() return bw.Flush() } func (m *redisMeta) loadEntry(e *DumpedEntry, p redis.Pipeliner, tryExec func()) { ctx := Background inode := e.Attr.Inode attr := loadAttr(e.Attr) attr.Parent = e.Parents[0] batch := 100 if attr.Typ == TypeFile { attr.Length = e.Attr.Length for _, c := range e.Chunks { if len(c.Slices) == 0 { continue } slices := make([]string, 0, len(c.Slices)) for _, s := range c.Slices { slices = append(slices, string(marshalSlice(s.Pos, s.Chunkid, s.Size, s.Off, s.Len))) if len(slices) > batch { p.RPush(ctx, m.chunkKey(inode, c.Index), slices) tryExec() slices = slices[:0] } } if len(slices) > 0 { p.RPush(ctx, m.chunkKey(inode, c.Index), slices) } } } else if attr.Typ == TypeDirectory { attr.Length = 4 << 10 dentries := make(map[string]interface{}, batch) for name, c := range e.Entries { dentries[string(unescape(name))] = m.packEntry(typeFromString(c.Attr.Type), c.Attr.Inode) if len(dentries) >= batch { p.HSet(ctx, m.entryKey(inode), dentries) tryExec() dentries = make(map[string]interface{}, batch) } } if len(dentries) > 0 { p.HSet(ctx, m.entryKey(inode), dentries) } } else if attr.Typ == TypeSymlink { symL := unescape(e.Symlink) attr.Length = uint64(len(symL)) p.Set(ctx, m.symKey(inode), symL, 0) } if len(e.Xattrs) > 0 { xattrs := make(map[string]interface{}) for _, x := range e.Xattrs { xattrs[x.Name] = unescape(x.Value) } p.HSet(ctx, m.xattrKey(inode), xattrs) } p.Set(ctx, m.inodeKey(inode), m.marshal(attr), 0) tryExec() } func (m *redisMeta) LoadMeta(r io.Reader) (err error) { ctx := Background if _, ok := m.rdb.(*redis.ClusterClient); ok { err = m.scan(ctx, "*", func(keys []string) error { return fmt.Errorf("found key with same prefix: %s", keys[0]) }) if err != nil { return err } } else { dbsize, err := m.rdb.DBSize(ctx).Result() if err != nil { return err } if dbsize > 0 { return fmt.Errorf("Database redis://%s is not empty", m.addr) } } p := m.rdb.TxPipeline() tryExec := func() { if p.Len() > 1000 { if rs, err := p.Exec(ctx); err != nil { for i, r := range rs { if r.Err() != nil { logger.Errorf("failed command %d %+v: %s", i, r, r.Err()) break } } panic(err) } } } defer func() { if e := recover(); e != nil { if ee, ok := e.(error); ok { err = ee } else { panic(e) } } }() dm, counters, parents, refs, err := loadEntries(r, func(e *DumpedEntry) { m.loadEntry(e, p, tryExec) }, nil) if err != nil { return err } format, _ := json.MarshalIndent(dm.Setting, "", "") p.Set(ctx, m.setting(), format, 0) cs := make(map[string]interface{}) cs[m.prefix+usedSpace] = counters.UsedSpace cs[m.prefix+totalInodes] = counters.UsedInodes cs[m.prefix+"nextinode"] = counters.NextInode - 1 cs[m.prefix+"nextchunk"] = counters.NextChunk - 1 cs[m.prefix+"nextsession"] = counters.NextSession cs[m.prefix+"nextTrash"] = counters.NextTrash p.MSet(ctx, cs) if l := len(dm.DelFiles); l > 0 { if l > 100 { l = 100 } zs := make([]*redis.Z, 0, l) for _, d := range dm.DelFiles { if len(zs) >= 100 { p.ZAdd(ctx, m.delfiles(), zs...) tryExec() zs = zs[:0] } zs = append(zs, &redis.Z{ Score: float64(d.Expire), Member: m.toDelete(d.Inode, d.Length), }) } p.ZAdd(ctx, m.delfiles(), zs...) } slices := make(map[string]interface{}) for k, v := range refs { if v > 1 { if len(slices) > 100 { p.HSet(ctx, m.sliceRefs(), slices) tryExec() slices = make(map[string]interface{}) } slices[m.sliceKey(k.id, k.size)] = v - 1 } } if len(slices) > 0 { p.HSet(ctx, m.sliceRefs(), slices) } if _, err = p.Exec(ctx); err != nil { return err } // update nlinks and parents for hardlinks st := make(map[Ino]int64) for i, ps := range parents { if len(ps) > 1 { a, _ := m.rdb.Get(ctx, m.inodeKey(i)).Bytes() // reset nlink and parent binary.BigEndian.PutUint32(a[47:51], uint32(len(ps))) // nlink binary.BigEndian.PutUint64(a[63:71], 0) p.Set(ctx, m.inodeKey(i), a, 0) for k := range st { delete(st, k) } for _, p := range ps { st[p] = st[p] + 1 } for parent, c := range st { p.HIncrBy(ctx, m.parentKey(i), parent.String(), c) } } } _, err = p.Exec(ctx) return err }
[ "\"REDIS_PASSWORD\"", "\"META_PASSWORD\"", "\"SENTINEL_PASSWORD\"" ]
[]
[ "SENTINEL_PASSWORD", "REDIS_PASSWORD", "META_PASSWORD" ]
[]
["SENTINEL_PASSWORD", "REDIS_PASSWORD", "META_PASSWORD"]
go
3
0
pkg/ingress/controller/controller.go
// Licensed to the Apache Software Foundation (ASF) under one or more // contributor license agreements. See the NOTICE file distributed with // this work for additional information regarding copyright ownership. // The ASF licenses this file to You under the Apache License, Version 2.0 // (the "License"); you may not use this file except in compliance with // the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package controller import ( "context" "os" "sync" "time" clientSet "github.com/gxthrj/apisix-ingress-types/pkg/client/clientset/versioned" crdclientset "github.com/gxthrj/apisix-ingress-types/pkg/client/clientset/versioned" "github.com/gxthrj/apisix-ingress-types/pkg/client/informers/externalversions" "go.uber.org/zap" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/leaderelection" "k8s.io/client-go/tools/leaderelection/resourcelock" "github.com/api7/ingress-controller/pkg/api" "github.com/api7/ingress-controller/pkg/apisix" "github.com/api7/ingress-controller/pkg/config" "github.com/api7/ingress-controller/pkg/kube" "github.com/api7/ingress-controller/pkg/log" "github.com/api7/ingress-controller/pkg/metrics" "github.com/api7/ingress-controller/pkg/seven/conf" ) // recover any exception func recoverException() { if err := recover(); err != nil { log.Error(err) } } // Controller is the ingress apisix controller object. type Controller struct { name string namespace string cfg *config.Config wg sync.WaitGroup watchingNamespace map[string]struct{} apisix apisix.APISIX apiServer *api.Server clientset kubernetes.Interface crdClientset crdclientset.Interface metricsCollector metrics.Collector crdController *Api6Controller crdInformerFactory externalversions.SharedInformerFactory } // NewController creates an ingress apisix controller object. func NewController(cfg *config.Config) (*Controller, error) { podName := os.Getenv("POD_NAME") podNamespace := os.Getenv("POD_NAMESPACE") if podNamespace == "" { podNamespace = "default" } client, err := apisix.NewClient() if err != nil { return nil, err } conf.SetAPISIXClient(client) if err := kube.InitInformer(cfg); err != nil { return nil, err } apiSrv, err := api.NewServer(cfg) if err != nil { return nil, err } crdClientset := kube.GetApisixClient() sharedInformerFactory := externalversions.NewSharedInformerFactory(crdClientset, cfg.Kubernetes.ResyncInterval.Duration) var watchingNamespace map[string]struct{} if len(cfg.Kubernetes.AppNamespaces) > 1 || cfg.Kubernetes.AppNamespaces[0] != v1.NamespaceAll { watchingNamespace = make(map[string]struct{}, len(cfg.Kubernetes.AppNamespaces)) for _, ns := range cfg.Kubernetes.AppNamespaces { watchingNamespace[ns] = struct{}{} } } c := &Controller{ name: podName, namespace: podNamespace, cfg: cfg, apiServer: apiSrv, apisix: client, metricsCollector: metrics.NewPrometheusCollector(podName, podNamespace), clientset: kube.GetKubeClient(), crdClientset: crdClientset, crdInformerFactory: sharedInformerFactory, watchingNamespace: watchingNamespace, } return c, nil } func (c *Controller) goAttach(handler func()) { c.wg.Add(1) go func() { defer c.wg.Done() handler() }() } // Eventf implements the resourcelock.EventRecorder interface. func (c *Controller) Eventf(_ runtime.Object, eventType string, reason string, message string, _ ...interface{}) { log.Infow(reason, zap.String("message", message), zap.String("event_type", eventType)) } // Run launches the controller. func (c *Controller) Run(stop chan struct{}) error { rootCtx, rootCancel := context.WithCancel(context.Background()) defer rootCancel() go func() { <-stop rootCancel() }() c.metricsCollector.ResetLeader(false) go func() { if err := c.apiServer.Run(rootCtx.Done()); err != nil { log.Errorf("failed to launch API Server: %s", err) } }() lock := &resourcelock.LeaseLock{ LeaseMeta: metav1.ObjectMeta{ Namespace: c.namespace, Name: c.cfg.Kubernetes.ElectionID, }, Client: c.clientset.CoordinationV1(), LockConfig: resourcelock.ResourceLockConfig{ Identity: c.name, EventRecorder: c, }, } cfg := leaderelection.LeaderElectionConfig{ Lock: lock, LeaseDuration: 15 * time.Second, RenewDeadline: 5 * time.Second, RetryPeriod: 2 * time.Second, Callbacks: leaderelection.LeaderCallbacks{ OnStartedLeading: c.run, OnNewLeader: func(identity string) { log.Warnf("found a new leader %s", identity) if identity != c.name { log.Infow("controller now is running as a candidate", zap.String("namespace", c.namespace), zap.String("pod", c.name), ) } }, OnStoppedLeading: func() { log.Infow("controller now is running as a candidate", zap.String("namespace", c.namespace), zap.String("pod", c.name), ) c.metricsCollector.ResetLeader(false) }, }, ReleaseOnCancel: true, Name: "ingress-apisix", } elector, err := leaderelection.NewLeaderElector(cfg) if err != nil { log.Errorf("failed to create leader elector: %s", err.Error()) return err } election: elector.Run(rootCtx) select { case <-rootCtx.Done(): return nil default: goto election } } func (c *Controller) run(ctx context.Context) { log.Infow("controller now is running as leader", zap.String("namespace", c.namespace), zap.String("pod", c.name), ) c.metricsCollector.ResetLeader(true) err := c.apisix.AddCluster(&apisix.ClusterOptions{ Name: "", AdminKey: c.cfg.APISIX.AdminKey, BaseURL: c.cfg.APISIX.BaseURL, }) if err != nil { // TODO give up the leader role. log.Errorf("failed to add default cluster: %s", err) return } if err := c.apisix.Cluster("").HasSynced(ctx); err != nil { // TODO give up the leader role. log.Errorf("failed to wait the default cluster to be ready: %s", err) return } ac := &Api6Controller{ KubeClientSet: c.clientset, Api6ClientSet: c.crdClientset, SharedInformerFactory: c.crdInformerFactory, CoreSharedInformerFactory: kube.CoreSharedInformerFactory, Stop: ctx.Done(), } epInformer := ac.CoreSharedInformerFactory.Core().V1().Endpoints() kube.EndpointsInformer = epInformer // endpoint ac.Endpoint(c) c.goAttach(func() { ac.CoreSharedInformerFactory.Start(ctx.Done()) }) // ApisixRoute ac.ApisixRoute(c) // ApisixUpstream ac.ApisixUpstream(c) // ApisixService ac.ApisixService(c) // ApisixTLS ac.ApisixTLS(c) c.goAttach(func() { ac.SharedInformerFactory.Start(ctx.Done()) }) <-ctx.Done() c.wg.Wait() } // namespaceWatching accepts a resource key, getting the namespace part // and checking whether the namespace is being watched. func (c *Controller) namespaceWatching(key string) (ok bool) { if c.watchingNamespace == nil { ok = true return } ns, _, err := cache.SplitMetaNamespaceKey(key) if err != nil { // Ignore resource with invalid key. ok = false log.Warnf("resource %s was ignored since: %s", key, err) return } _, ok = c.watchingNamespace[ns] return } type Api6Controller struct { KubeClientSet kubernetes.Interface Api6ClientSet clientSet.Interface SharedInformerFactory externalversions.SharedInformerFactory CoreSharedInformerFactory informers.SharedInformerFactory Stop <-chan struct{} } func (api6 *Api6Controller) ApisixRoute(controller *Controller) { arc := BuildApisixRouteController( api6.KubeClientSet, api6.Api6ClientSet, api6.SharedInformerFactory.Apisix().V1().ApisixRoutes(), controller) arc.Run(api6.Stop) } func (api6 *Api6Controller) ApisixUpstream(controller *Controller) { auc := BuildApisixUpstreamController( api6.KubeClientSet, api6.Api6ClientSet, api6.SharedInformerFactory.Apisix().V1().ApisixUpstreams(), controller) auc.Run(api6.Stop) } func (api6 *Api6Controller) ApisixService(controller *Controller) { auc := BuildApisixServiceController( api6.KubeClientSet, api6.Api6ClientSet, api6.SharedInformerFactory.Apisix().V1().ApisixServices(), controller) auc.Run(api6.Stop) } func (api6 *Api6Controller) ApisixTLS(controller *Controller) { auc := BuildApisixTlsController( api6.KubeClientSet, api6.Api6ClientSet, api6.SharedInformerFactory.Apisix().V1().ApisixTlses(), controller) auc.Run(api6.Stop) } func (api6 *Api6Controller) Endpoint(controller *Controller) { auc := BuildEndpointController(api6.KubeClientSet, controller) //conf.EndpointsInformer) auc.Run(api6.Stop) }
[ "\"POD_NAME\"", "\"POD_NAMESPACE\"" ]
[]
[ "POD_NAMESPACE", "POD_NAME" ]
[]
["POD_NAMESPACE", "POD_NAME"]
go
2
0
build.py
#!/usr/bin/env python3 import ctypes.util import distutils.ccompiler import os import platform import sys import tempfile # adapted from https://github.com/tree-sitter/py-tree-sitter def build(repo_paths, output_path="libjava-tree-sitter"): if not repo_paths: raise ValueError("Must provide at least one language folder") output_path = f"{output_path}.{'dylib' if platform.system() == 'Darwin' else 'so'}" here = os.path.dirname(os.path.realpath(__file__)) os.system(f"make -C {os.path.join(here, 'tree-sitter')} > /dev/null") cpp = False source_paths = [ os.path.join(here, "lib", "ai_serenade_treesitter_TreeSitter.cc"), os.path.join(here, "lib", "ai_serenade_treesitter_Languages.cc"), ] compiler = distutils.ccompiler.new_compiler() for repo_path in repo_paths: src_path = os.path.join(repo_path, "src") source_paths.append(os.path.join(src_path, "parser.c")) scanner_c = os.path.join(src_path, "scanner.c") scanner_cc = os.path.join(src_path, "scanner.cc") if os.path.exists(scanner_cc): cpp = True source_paths.append(scanner_cc) elif os.path.exists(scanner_c): source_paths.append(scanner_c) compiler.define_macro( f"TS_LANGUAGE_{os.path.split(repo_path.rstrip('/'))[1].split('tree-sitter-')[-1].replace('-', '_').upper()}", "1", ) source_mtimes = [os.path.getmtime(__file__)] + [ os.path.getmtime(path) for path in source_paths ] if cpp: if ctypes.util.find_library("stdc++"): compiler.add_library("stdc++") elif ctypes.util.find_library("c++"): compiler.add_library("c++") output_mtime = os.path.getmtime(output_path) if os.path.exists(output_path) else 0 if max(source_mtimes) <= output_mtime: return False with tempfile.TemporaryDirectory(suffix="tree_sitter_language") as out_dir: object_paths = [] for source_path in source_paths: flags = ["-O3"] if platform.system() == "Linux": flags.append("-fPIC") if source_path.endswith(".c"): flags.append("-std=c99") include_dirs = [ os.path.dirname(source_path), os.path.join(here, "tree-sitter", "lib", "include"), os.path.join(os.environ["JAVA_HOME"], "include"), ] if platform.system() == "Linux": include_dirs.append( os.path.join(os.environ["JAVA_HOME"], "include", "linux") ) elif platform.system() == "Darwin": include_dirs.append( os.path.join(os.environ["JAVA_HOME"], "include", "darwin") ) object_paths.append( compiler.compile( [source_path], output_dir=out_dir, include_dirs=include_dirs, extra_preargs=flags, )[0] ) extra_preargs = [] if platform.system() == "Darwin": extra_preargs = ["-dynamiclib"] compiler.link_shared_object( object_paths, output_path, extra_preargs=extra_preargs, extra_postargs=[os.path.join(here, "tree-sitter", "libtree-sitter.a")], library_dirs=[os.path.join(here, "tree-sitter")], ) return True if __name__ == "__main__": if len(sys.argv) < 3: print( "Usage: build.py libjava-tree-sitter ./tree-sitter-python ./tree-sitter-javascript" ) sys.exit(1) distutils.log.set_verbosity(0) build(sys.argv[2:], sys.argv[1])
[]
[]
[ "JAVA_HOME" ]
[]
["JAVA_HOME"]
python
1
0
pm.py
#!/usr/bin/env python3 """ ------------------------------------------------------------------------ A re-write of my original pm shell script in python pm is a script that is meant to act as a status tracker for my projects. It will use VCS integration to provide in-depth information on all projects. Cheers. The quality of code is extremely bad. I'm not a python programmer and this script is solely meant to be used by me but is extensible for other users as well at your own risk obviously. Author : canopeerus License : MIT ------------------------------------------------------------------------ """ import os,sys,json,getopt,configparser # some global variable declarations for directory and file locations # will need to clean this up to not make these options hardcoded homedir = os.getenv("HOME") config_dir = homedir + "/.config/pm" config_fil = config_dir + "/config" # This is run everytime to read configuration values like project locations # Maybe this doesn't need to run everytime. We'll see later config = configparser.ConfigParser() config.read(config_fil) if config['OPTIONS']['DatabaseFileLocation'] == 'Default': db_fil = homedir + "/.cache/pm/db.json" else: db_fil = config['OPTIONS']['DatabaseFileLocation'] dbdir = db_fil.strip("db.json") db_fil_old = db_fil + ".old" proj_dir = config['OPTIONS']['ProjectDirectory'] class color: FG_BLUE = "\033[1;34m" FG_CYAN = "\033[1;36m" FG_GREEN = "\033[0;32m" FG_RESET = "\033[0;0m" FG_BOLD = "\033[;1m" FG_GREY = '\033[90m' FG_BLACK = '\033[30m' REVERSE = "\033[;7m" END = '\033[0m' FG_RED = '\033[31m' BG_RED = '\033[41m' BG_GREEN = '\033[42m' BG_BLUE = '\033[46m' BG_GREY = '\033[47m' ULINE = '\033[4m' class pmpy_info_class: version = '0.0.1' name = 'pmpy' license = 'MIT' author = 'canopeerus' class misc_text_func: def query_yes_no(self,question, default="yes"): valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False} if default is None: prompt = " [y/n] " elif default == "yes": prompt = " [Y/n] " elif default == "no": prompt = " [y/N] " else: raise ValueError("invalid default answer: '%s'" % default) while True: sys.stdout.write(question + prompt) choice = input().lower() if default is not None and choice == '': return valid[default] elif choice in valid: return valid[choice] else: sys.stdout.write("Please respond with 'yes' or 'no' " "(or 'y' or 'n').\n") def print_help(self): sys.stdout.write("usage : pm [-ildhv] [ -m active,inactive,abandoned,complete]\n"+ "Supported options:\n"+ "\t-i : initialization process to populate project database\n"+ "\t-d : Delete the central database json file\n"+ "\t-h : Print usage help\n"+ "\t-v : Print pmpy version info\n"+ "\t-m : Set project status\n"+ "\t-s <name> : Show detailed project information for one or all projects\n"+ "\t-l : List the names of all projects\n"+ "Status options : active,inactive,abandoned,complete\n"+ "\nThis project is hosted at https://github.com/canopeerus/pmpy\n") sys.exit(1) def print_version(self): sys.stdout.write("pmpy version: "+pmpy_info_class.version+"\n"+ "License: "+pmpy_info_class.license+"\n"+ "Author: "+pmpy_info_class.author+"\n") class pm_write_database: def delete_db_arg_func(self): local_screen = misc_text_func() if os.path.isfile(db_fil): if local_screen.query_yes_no("Are you sure you want to delete the database?"): os.remove(db_fil) sys.stdout.write(color.FG_GREEN+"Project database successfully deleted\n"+color.END) else: sys.stdout.write(color.FG_RED+"Operation aborted\n"+color.END) sys.exit(1) else: sys.stdout.write("Database not found. Run pm -i to populate database.\n") def backup(self,db_option = "current"): if db_option == "old": os.remove(db_fil_old) os.rename(db_fil,db_fil_old) os.remove(db_fil) elif db_option == "current": os.rename(db_fil,db_fil_old) def pm_init(self): if os.path.isfile(db_fil) and os.path.isfile(db_fil_old): local_screen = misc_text_func() sys.stdout.write("There is a database file and a backup file already available!!\n") user_choice_init = local_screen.query_yes_no("Delete old db and backup current db file?") if user_choice_init: self.backup("old") else: sys.stdout.write(color.FG_RED+"Operation aborted!\n"+color.END) sys.exit(2) elif os.path.isfile(db_fil): sys.stdout.write("Found existing database file. Backing it up to db.json.old\n") self.backup("current") if not os.path.isdir(dbdir): os.mkdir(dbdir) sys.stdout.write("Beginnning pm init process...\n") sys.stdout.write("Using projects location "+proj_dir+"\n") all_p_files = os.listdir(proj_dir) if len(all_p_files) == 0: sys.stdout.write(color.FG_RED+"No project directories found in central code directory!!\n"+color.END) sys.exit(3) else: db_file_out = open(db_fil,'w+') proj_json_obj = {} proj_json_obj['project']=[] count = 0 for i in all_p_files: if os.path.isdir(proj_dir+"/"+i): count += 1 sys.stdout.write("\nShort description for "+i+" : ") s_desc = input() sys.stdout.write("Project status for "+i+" [active,inactive,complete,abandoned]: ") p_status = input() proj_json_obj['project'].append({ 'name':i, 'status':p_status, 'short_desc': s_desc, 'author':'canopeerus', 'location':proj_dir+"/"+i }) sys.stdout.write(color.FG_GREEN+"\nFound "+str(count)+" projects\n") json.dump(proj_json_obj,db_file_out) db_file_out.close() sys.stdout.write("Init process complete. Database created at "+db_fil+"\n"+color.END) class pm_read_database: def list_projects(self) -> bool: if not os.path.isfile(db_fil): sys.stdout.write("Project database not found. Run pmpy -i to populate the database\n") else: p_file_in = open(db_fil,'r') data_dict = json.load(p_file_in) for pname in data_dict['project']: sys.stdout.write(pname['name']+"\n") p_file_in.close() def set_p_status_colour(self,pstatus) -> str: if pstatus == "active": return color.BG_GREEN + color.FG_BLACK + pstatus + color.END elif pstatus == "abandoned": return color.BG_RED + color.FG_BLACK + pstatus + color.END elif pstatus == "inactive": return color.BG_GREY + color.FG_BLACK + pstatus + color.END elif pstatus == "complete": return color.BG_GREEN + color.FG_BLACK + pstatus + color.END def show_single_project(self,name): """ despite the misleading name this function will print out all projects too if you pass the all argument """ if not os.path.isfile(db_fil): sys.stdout.write("Project database not found.Run pmpy -i to populate the database\n") else: p_file_in = open(db_fil,'r') data_dict = json.load(p_file_in) if name == "all": for pname in data_dict['project']: sys.stdout.write( "Name : "+pname['name']+"\n"+ "Author : "+pname['author']+"\n"+ "Short description : "+pname['short_desc']+"\n"+ "Status : "+self.set_p_status_colour(pname['status'])+"\n"+ "Location : "+color.ULINE+pname['location']+color.END+"\n\n") sys.exit(3) else: for pname in data_dict['project']: if name == pname['name']: sys.stdout.write( "Name : "+pname['name']+"\n"+ "Author : "+pname['author']+"\n"+ "Short description : "+pname['short_desc']+"\n"+ "Status : "+self.set_p_status_colour(pname['status'])+"\n"+ "Location : "+color.ULINE+pname['location']+color.END+"\n") sys.exit(3) sys.stdout.write("No matching project found for "+name+"\n") def main_func(argv): screen = misc_text_func() write_db = pm_write_database() read_db = pm_read_database() try: options,args = getopt.getopt(argv,"hldivms:",["help","list","delete","init","version","show="]) except getopt.GetoptError as err: sys.stdout.write(color.FG_RED + "pmpy : " + str(err) + color.END+"\n" ) screen.print_help() if len(argv) == 0: sys.stdout.write(color.FG_RED + "pmpy : No options specified\n\n" + color.END) screen.print_help() for opt,arg in options: if opt in ("-h","--help"): screen.print_help() elif opt in ("-d","--delete"): write_db.delete_db_arg_func() sys.exit(2) elif opt in ("-i","--init"): write_db.pm_init() elif opt in ("-v","--version"): screen.print_version() elif opt in ("-l","--list"): read_db.list_projects() elif opt in ("-s","--show"): proj_arg = arg read_db.show_single_project(proj_arg) elif opt == "-m": sys.stdout.write("Updating is not supported at the moment.\nRun pmpy -di to reinitiate with changes.\n") else: assert False if __name__ == "__main__": main_func(sys.argv[1:])
[]
[]
[ "HOME" ]
[]
["HOME"]
python
1
0
statistics/selectivity_test.go
// Copyright 2017 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package statistics_test import ( "context" "fmt" "math" "os" "runtime/pprof" "strings" "testing" "time" . "github.com/pingcap/check" "github.com/pingcap/errors" "github.com/pingcap/failpoint" "github.com/pingcap/log" "github.com/pingcap/parser/model" "github.com/pingcap/parser/mysql" "github.com/pingcap/tidb/domain" "github.com/pingcap/tidb/kv" plannercore "github.com/pingcap/tidb/planner/core" "github.com/pingcap/tidb/session" "github.com/pingcap/tidb/sessionctx" "github.com/pingcap/tidb/sessionctx/stmtctx" "github.com/pingcap/tidb/statistics" "github.com/pingcap/tidb/statistics/handle" "github.com/pingcap/tidb/store/mockstore" "github.com/pingcap/tidb/types" "github.com/pingcap/tidb/util/codec" "github.com/pingcap/tidb/util/collate" "github.com/pingcap/tidb/util/ranger" "github.com/pingcap/tidb/util/testkit" "github.com/pingcap/tidb/util/testleak" "github.com/pingcap/tidb/util/testutil" "go.uber.org/zap" "go.uber.org/zap/zapcore" ) const eps = 1e-9 var _ = SerialSuites(&testStatsSuite{}) type testStatsSuite struct { store kv.Storage do *domain.Domain hook *logHook testData testutil.TestData } func (s *testStatsSuite) SetUpSuite(c *C) { testleak.BeforeTest() // Add the hook here to avoid data race. s.registerHook() var err error s.store, s.do, err = newStoreWithBootstrap() c.Assert(err, IsNil) s.testData, err = testutil.LoadTestSuiteData("testdata", "stats_suite") c.Assert(err, IsNil) } func (s *testStatsSuite) TearDownSuite(c *C) { s.do.Close() c.Assert(s.store.Close(), IsNil) testleak.AfterTest(c)() c.Assert(s.testData.GenerateOutputIfNeeded(), IsNil) } func (s *testStatsSuite) registerHook() { conf := &log.Config{Level: os.Getenv("log_level"), File: log.FileLogConfig{}} _, r, _ := log.InitLogger(conf) s.hook = &logHook{r.Core, ""} lg := zap.New(s.hook) log.ReplaceGlobals(lg, r) } type logHook struct { zapcore.Core results string } func (h *logHook) Write(entry zapcore.Entry, fields []zapcore.Field) error { message := entry.Message if idx := strings.Index(message, "[stats"); idx != -1 { h.results = h.results + message for _, f := range fields { h.results = h.results + ", " + f.Key + "=" + h.field2String(f) } } return nil } func (h *logHook) field2String(field zapcore.Field) string { switch field.Type { case zapcore.StringType: return field.String case zapcore.Int64Type, zapcore.Int32Type, zapcore.Uint32Type: return fmt.Sprintf("%v", field.Integer) case zapcore.Float64Type: return fmt.Sprintf("%v", math.Float64frombits(uint64(field.Integer))) case zapcore.StringerType: return field.Interface.(fmt.Stringer).String() } return "not support" } func (h *logHook) Check(e zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore.CheckedEntry { if h.Enabled(e.Level) { return ce.AddCore(e, h) } return ce } func newStoreWithBootstrap() (kv.Storage, *domain.Domain, error) { store, err := mockstore.NewMockStore() if err != nil { return nil, nil, errors.Trace(err) } session.SetSchemaLease(0) session.DisableStats4Test() domain.RunAutoAnalyze = false do, err := session.BootstrapSession(store) do.SetStatsUpdating(true) return store, do, errors.Trace(err) } func cleanEnv(c *C, store kv.Storage, do *domain.Domain) { tk := testkit.NewTestKit(c, store) tk.MustExec("use test") r := tk.MustQuery("show tables") for _, tb := range r.Rows() { tableName := tb[0] tk.MustExec(fmt.Sprintf("drop table %v", tableName)) } tk.MustExec("delete from mysql.stats_meta") tk.MustExec("delete from mysql.stats_histograms") tk.MustExec("delete from mysql.stats_buckets") do.StatsHandle().Clear() } // generateIntDatum will generate a datum slice, every dimension is begin from 0, end with num - 1. // If dimension is x, num is y, the total number of datum is y^x. And This slice is sorted. func (s *testStatsSuite) generateIntDatum(dimension, num int) ([]types.Datum, error) { length := int(math.Pow(float64(num), float64(dimension))) ret := make([]types.Datum, length) if dimension == 1 { for i := 0; i < num; i++ { ret[i] = types.NewIntDatum(int64(i)) } } else { sc := &stmtctx.StatementContext{TimeZone: time.Local} // In this way, we can guarantee the datum is in order. for i := 0; i < length; i++ { data := make([]types.Datum, dimension) j := i for k := 0; k < dimension; k++ { data[dimension-k-1].SetInt64(int64(j % num)) j = j / num } bytes, err := codec.EncodeKey(sc, nil, data...) if err != nil { return nil, err } ret[i].SetBytes(bytes) } } return ret, nil } // mockStatsHistogram will create a statistics.Histogram, of which the data is uniform distribution. func mockStatsHistogram(id int64, values []types.Datum, repeat int64, tp *types.FieldType) *statistics.Histogram { ndv := len(values) histogram := statistics.NewHistogram(id, int64(ndv), 0, 0, tp, ndv, 0) for i := 0; i < ndv; i++ { histogram.AppendBucket(&values[i], &values[i], repeat*int64(i+1), repeat) } return histogram } func mockStatsTable(tbl *model.TableInfo, rowCount int64) *statistics.Table { histColl := statistics.HistColl{ PhysicalID: tbl.ID, HavePhysicalID: true, Count: rowCount, Columns: make(map[int64]*statistics.Column, len(tbl.Columns)), Indices: make(map[int64]*statistics.Index, len(tbl.Indices)), } statsTbl := &statistics.Table{ HistColl: histColl, } return statsTbl } func (s *testStatsSuite) prepareSelectivity(testKit *testkit.TestKit, c *C) *statistics.Table { testKit.MustExec("use test") testKit.MustExec("drop table if exists t") testKit.MustExec("create table t(a int primary key, b int, c int, d int, e int, index idx_cd(c, d), index idx_de(d, e))") is := s.do.InfoSchema() tb, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t")) c.Assert(err, IsNil) tbl := tb.Meta() // mock the statistic table statsTbl := mockStatsTable(tbl, 540) // Set the value of columns' histogram. colValues, err := s.generateIntDatum(1, 54) c.Assert(err, IsNil) for i := 1; i <= 5; i++ { statsTbl.Columns[int64(i)] = &statistics.Column{Histogram: *mockStatsHistogram(int64(i), colValues, 10, types.NewFieldType(mysql.TypeLonglong)), Info: tbl.Columns[i-1]} } // Set the value of two indices' histograms. idxValues, err := s.generateIntDatum(2, 3) c.Assert(err, IsNil) tp := types.NewFieldType(mysql.TypeBlob) statsTbl.Indices[1] = &statistics.Index{Histogram: *mockStatsHistogram(1, idxValues, 60, tp), Info: tbl.Indices[0]} statsTbl.Indices[2] = &statistics.Index{Histogram: *mockStatsHistogram(2, idxValues, 60, tp), Info: tbl.Indices[1]} return statsTbl } func (s *testStatsSuite) TestSelectivity(c *C) { defer cleanEnv(c, s.store, s.do) testKit := testkit.NewTestKit(c, s.store) statsTbl := s.prepareSelectivity(testKit, c) is := s.do.InfoSchema() longExpr := "0 < a and a = 1 " for i := 1; i < 64; i++ { longExpr += fmt.Sprintf(" and a > %d ", i) } tests := []struct { exprs string selectivity float64 }{ { exprs: "a > 0 and a < 2", selectivity: 0.01851851851, }, { exprs: "a >= 1 and a < 2", selectivity: 0.01851851851, }, { exprs: "a >= 1 and b > 1 and a < 2", selectivity: 0.01783264746, }, { exprs: "a >= 1 and c > 1 and a < 2", selectivity: 0.00617283950, }, { exprs: "a >= 1 and c >= 1 and a < 2", selectivity: 0.01234567901, }, { exprs: "d = 0 and e = 1", selectivity: 0.11111111111, }, { exprs: "b > 1", selectivity: 0.96296296296, }, { exprs: "a > 1 and b < 2 and c > 3 and d < 4 and e > 5", selectivity: 0, }, { exprs: longExpr, selectivity: 0.001, }, } ctx := context.Background() for _, tt := range tests { sql := "select * from t where " + tt.exprs comment := Commentf("for %s", tt.exprs) sctx := testKit.Se.(sessionctx.Context) stmts, err := session.Parse(sctx, sql) c.Assert(err, IsNil, Commentf("error %v, for expr %s", err, tt.exprs)) c.Assert(stmts, HasLen, 1) err = plannercore.Preprocess(sctx, stmts[0], is) c.Assert(err, IsNil, comment) p, _, err := plannercore.BuildLogicalPlan(ctx, sctx, stmts[0], is) c.Assert(err, IsNil, Commentf("error %v, for building plan, expr %s", err, tt.exprs)) sel := p.(plannercore.LogicalPlan).Children()[0].(*plannercore.LogicalSelection) ds := sel.Children()[0].(*plannercore.DataSource) histColl := statsTbl.GenerateHistCollFromColumnInfo(ds.Columns, ds.Schema().Columns) ratio, _, err := histColl.Selectivity(sctx, sel.Conditions, nil) c.Assert(err, IsNil, comment) c.Assert(math.Abs(ratio-tt.selectivity) < eps, IsTrue, Commentf("for %s, needed: %v, got: %v", tt.exprs, tt.selectivity, ratio)) histColl.Count *= 10 ratio, _, err = histColl.Selectivity(sctx, sel.Conditions, nil) c.Assert(err, IsNil, comment) c.Assert(math.Abs(ratio-tt.selectivity) < eps, IsTrue, Commentf("for %s, needed: %v, got: %v", tt.exprs, tt.selectivity, ratio)) } } // TestDiscreteDistribution tests the estimation for discrete data distribution. This is more common when the index // consists several columns, and the first column has small NDV. func (s *testStatsSuite) TestDiscreteDistribution(c *C) { defer cleanEnv(c, s.store, s.do) testKit := testkit.NewTestKit(c, s.store) testKit.MustExec("use test") testKit.MustExec("drop table if exists t") testKit.MustExec("create table t(a char(10), b int, key idx(a, b))") for i := 0; i < 499; i++ { testKit.MustExec(fmt.Sprintf("insert into t values ('cn', %d)", i)) } for i := 0; i < 10; i++ { testKit.MustExec("insert into t values ('tw', 0)") } testKit.MustExec("analyze table t") var ( input []string output [][]string ) s.testData.GetTestCases(c, &input, &output) for i, tt := range input { s.testData.OnRecord(func() { output[i] = s.testData.ConvertRowsToStrings(testKit.MustQuery(tt).Rows()) }) testKit.MustQuery(tt).Check(testkit.Rows(output[i]...)) } } func (s *testStatsSuite) TestSelectCombinedLowBound(c *C) { defer cleanEnv(c, s.store, s.do) testKit := testkit.NewTestKit(c, s.store) testKit.MustExec("use test") testKit.MustExec("drop table if exists t") testKit.MustExec("create table t(id int auto_increment, kid int, pid int, primary key(id), key(kid, pid))") testKit.MustExec("insert into t (kid, pid) values (1,2), (1,3), (1,4),(1, 11), (1, 12), (1, 13), (1, 14), (2, 2), (2, 3), (2, 4)") testKit.MustExec("analyze table t") var ( input []string output [][]string ) s.testData.GetTestCases(c, &input, &output) for i, tt := range input { s.testData.OnRecord(func() { output[i] = s.testData.ConvertRowsToStrings(testKit.MustQuery(tt).Rows()) }) testKit.MustQuery(tt).Check(testkit.Rows(output[i]...)) } } func getRange(start, end int64) []*ranger.Range { ran := &ranger.Range{ LowVal: []types.Datum{types.NewIntDatum(start)}, HighVal: []types.Datum{types.NewIntDatum(end)}, } return []*ranger.Range{ran} } func (s *testStatsSuite) TestOutOfRangeEQEstimation(c *C) { defer cleanEnv(c, s.store, s.do) testKit := testkit.NewTestKit(c, s.store) testKit.MustExec("use test") testKit.MustExec("drop table if exists t") testKit.MustExec("create table t(a int)") for i := 0; i < 1000; i++ { testKit.MustExec(fmt.Sprintf("insert into t values (%v)", i/4)) // 0 ~ 249 } testKit.MustExec("analyze table t") h := s.do.StatsHandle() table, err := s.do.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t")) c.Assert(err, IsNil) statsTbl := h.GetTableStats(table.Meta()) sc := &stmtctx.StatementContext{} col := statsTbl.Columns[table.Meta().Columns[0].ID] count, err := col.GetColumnRowCount(sc, getRange(250, 250), 0, false) c.Assert(err, IsNil) c.Assert(count, Equals, float64(0)) for i := 0; i < 8; i++ { count, err := col.GetColumnRowCount(sc, getRange(250, 250), int64(i+1), false) c.Assert(err, IsNil) c.Assert(count, Equals, math.Min(float64(i+1), 4)) // estRows must be less than modifyCnt } } func (s *testStatsSuite) TestEstimationForUnknownValues(c *C) { defer cleanEnv(c, s.store, s.do) testKit := testkit.NewTestKit(c, s.store) testKit.MustExec("use test") testKit.MustExec("drop table if exists t") testKit.MustExec("create table t(a int, b int, key idx(a, b))") testKit.MustExec("analyze table t") for i := 0; i < 10; i++ { testKit.MustExec(fmt.Sprintf("insert into t values (%d, %d)", i, i)) } h := s.do.StatsHandle() c.Assert(h.DumpStatsDeltaToKV(handle.DumpAll), IsNil) testKit.MustExec("analyze table t") for i := 0; i < 10; i++ { testKit.MustExec(fmt.Sprintf("insert into t values (%d, %d)", i+10, i+10)) } c.Assert(h.DumpStatsDeltaToKV(handle.DumpAll), IsNil) c.Assert(h.Update(s.do.InfoSchema()), IsNil) table, err := s.do.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t")) c.Assert(err, IsNil) statsTbl := h.GetTableStats(table.Meta()) sc := &stmtctx.StatementContext{} colID := table.Meta().Columns[0].ID count, err := statsTbl.GetRowCountByColumnRanges(sc, colID, getRange(30, 30)) c.Assert(err, IsNil) c.Assert(count, Equals, 0.2) count, err = statsTbl.GetRowCountByColumnRanges(sc, colID, getRange(9, 30)) c.Assert(err, IsNil) c.Assert(count, Equals, 2.4000000000000004) count, err = statsTbl.GetRowCountByColumnRanges(sc, colID, getRange(9, math.MaxInt64)) c.Assert(err, IsNil) c.Assert(count, Equals, 2.4000000000000004) idxID := table.Meta().Indices[0].ID count, err = statsTbl.GetRowCountByIndexRanges(sc, idxID, getRange(30, 30)) c.Assert(err, IsNil) c.Assert(count, Equals, 0.2) count, err = statsTbl.GetRowCountByIndexRanges(sc, idxID, getRange(9, 30)) c.Assert(err, IsNil) c.Assert(count, Equals, 2.2) testKit.MustExec("truncate table t") testKit.MustExec("insert into t values (null, null)") testKit.MustExec("analyze table t") table, err = s.do.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t")) c.Assert(err, IsNil) statsTbl = h.GetTableStats(table.Meta()) colID = table.Meta().Columns[0].ID count, err = statsTbl.GetRowCountByColumnRanges(sc, colID, getRange(1, 30)) c.Assert(err, IsNil) c.Assert(count, Equals, 0.0) testKit.MustExec("drop table t") testKit.MustExec("create table t(a int, b int, index idx(b))") testKit.MustExec("insert into t values (1,1)") testKit.MustExec("analyze table t") table, err = s.do.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t")) c.Assert(err, IsNil) statsTbl = h.GetTableStats(table.Meta()) colID = table.Meta().Columns[0].ID count, err = statsTbl.GetRowCountByColumnRanges(sc, colID, getRange(2, 2)) c.Assert(err, IsNil) c.Assert(count, Equals, 0.0) idxID = table.Meta().Indices[0].ID count, err = statsTbl.GetRowCountByIndexRanges(sc, idxID, getRange(2, 2)) c.Assert(err, IsNil) c.Assert(count, Equals, 0.0) } func (s *testStatsSuite) TestEstimationUniqueKeyEqualConds(c *C) { defer cleanEnv(c, s.store, s.do) testKit := testkit.NewTestKit(c, s.store) testKit.MustExec("use test") testKit.MustExec("drop table if exists t") testKit.MustExec("create table t(a int, b int, c int, unique key(b))") testKit.MustExec("insert into t values (1,1,1),(2,2,2),(3,3,3),(4,4,4),(5,5,5),(6,6,6),(7,7,7)") testKit.MustExec("analyze table t with 4 cmsketch width, 1 cmsketch depth;") table, err := s.do.InfoSchema().TableByName(model.NewCIStr("test"), model.NewCIStr("t")) c.Assert(err, IsNil) statsTbl := s.do.StatsHandle().GetTableStats(table.Meta()) sc := &stmtctx.StatementContext{} idxID := table.Meta().Indices[0].ID count, err := statsTbl.GetRowCountByIndexRanges(sc, idxID, getRange(7, 7)) c.Assert(err, IsNil) c.Assert(count, Equals, 1.0) count, err = statsTbl.GetRowCountByIndexRanges(sc, idxID, getRange(6, 6)) c.Assert(err, IsNil) c.Assert(count, Equals, 1.0) colID := table.Meta().Columns[0].ID count, err = statsTbl.GetRowCountByIntColumnRanges(sc, colID, getRange(7, 7)) c.Assert(err, IsNil) c.Assert(count, Equals, 1.0) count, err = statsTbl.GetRowCountByIntColumnRanges(sc, colID, getRange(6, 6)) c.Assert(err, IsNil) c.Assert(count, Equals, 1.0) } func (s *testStatsSuite) TestPrimaryKeySelectivity(c *C) { defer cleanEnv(c, s.store, s.do) testKit := testkit.NewTestKit(c, s.store) testKit.MustExec("use test") testKit.MustExec("drop table if exists t") testKit.Se.GetSessionVars().EnableClusteredIndex = false testKit.MustExec("create table t(a char(10) primary key, b int)") var input, output [][]string s.testData.GetTestCases(c, &input, &output) for i, ts := range input { for j, tt := range ts { if j != len(ts)-1 { testKit.MustExec(tt) } s.testData.OnRecord(func() { if j == len(ts)-1 { output[i] = s.testData.ConvertRowsToStrings(testKit.MustQuery(tt).Rows()) } }) if j == len(ts)-1 { testKit.MustQuery(tt).Check(testkit.Rows(output[i]...)) } } } } func BenchmarkSelectivity(b *testing.B) { c := &C{} s := &testStatsSuite{} s.SetUpSuite(c) defer s.TearDownSuite(c) testKit := testkit.NewTestKit(c, s.store) statsTbl := s.prepareSelectivity(testKit, c) is := s.do.InfoSchema() exprs := "a > 1 and b < 2 and c > 3 and d < 4 and e > 5" sql := "select * from t where " + exprs comment := Commentf("for %s", exprs) sctx := testKit.Se.(sessionctx.Context) stmts, err := session.Parse(sctx, sql) c.Assert(err, IsNil, Commentf("error %v, for expr %s", err, exprs)) c.Assert(stmts, HasLen, 1) err = plannercore.Preprocess(sctx, stmts[0], is) c.Assert(err, IsNil, comment) p, _, err := plannercore.BuildLogicalPlan(context.Background(), sctx, stmts[0], is) c.Assert(err, IsNil, Commentf("error %v, for building plan, expr %s", err, exprs)) file, err := os.Create("cpu.profile") c.Assert(err, IsNil) defer file.Close() pprof.StartCPUProfile(file) b.Run("Selectivity", func(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { _, _, err := statsTbl.Selectivity(sctx, p.(plannercore.LogicalPlan).Children()[0].(*plannercore.LogicalSelection).Conditions, nil) c.Assert(err, IsNil) } b.ReportAllocs() }) pprof.StopCPUProfile() } func (s *testStatsSuite) TestStatsVer2(c *C) { defer cleanEnv(c, s.store, s.do) testKit := testkit.NewTestKit(c, s.store) testKit.MustExec("use test") testKit.MustExec("set tidb_analyze_version=2") testKit.MustExec("drop table if exists tint") testKit.MustExec("create table tint(a int, b int, c int, index singular(a), index multi(b, c))") testKit.MustExec("insert into tint values (1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4), (5, 5, 5), (6, 6, 6), (7, 7, 7), (8, 8, 8)") testKit.MustExec("analyze table tint with 2 topn, 3 buckets") testKit.MustExec("drop table if exists tdouble") testKit.MustExec("create table tdouble(a double, b double, c double, index singular(a), index multi(b, c))") testKit.MustExec("insert into tdouble values (1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4), (5, 5, 5), (6, 6, 6), (7, 7, 7), (8, 8, 8)") testKit.MustExec("analyze table tdouble with 2 topn, 3 buckets") testKit.MustExec("drop table if exists tdecimal") testKit.MustExec("create table tdecimal(a decimal(40, 20), b decimal(40, 20), c decimal(40, 20), index singular(a), index multi(b, c))") testKit.MustExec("insert into tdecimal values (1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4), (5, 5, 5), (6, 6, 6), (7, 7, 7), (8, 8, 8)") testKit.MustExec("analyze table tdecimal with 2 topn, 3 buckets") testKit.MustExec("drop table if exists tstring") testKit.MustExec("create table tstring(a varchar(64), b varchar(64), c varchar(64), index singular(a), index multi(b, c))") testKit.MustExec("insert into tstring values ('1', '1', '1'), ('2', '2', '2'), ('3', '3', '3'), ('4', '4', '4'), ('5', '5', '5'), ('6', '6', '6'), ('7', '7', '7'), ('8', '8', '8')") testKit.MustExec("analyze table tstring with 2 topn, 3 buckets") testKit.MustExec("drop table if exists tdatetime") testKit.MustExec("create table tdatetime(a datetime, b datetime, c datetime, index singular(a), index multi(b, c))") testKit.MustExec("insert into tdatetime values ('2001-01-01', '2001-01-01', '2001-01-01'), ('2001-01-02', '2001-01-02', '2001-01-02'), ('2001-01-03', '2001-01-03', '2001-01-03'), ('2001-01-04', '2001-01-04', '2001-01-04')") testKit.MustExec("analyze table tdatetime with 2 topn, 3 buckets") testKit.MustExec("drop table if exists tprefix") testKit.MustExec("create table tprefix(a varchar(64), b varchar(64), index prefixa(a(2)))") testKit.MustExec("insert into tprefix values ('111', '111'), ('222', '222'), ('333', '333'), ('444', '444'), ('555', '555'), ('666', '666')") testKit.MustExec("analyze table tprefix with 2 topn, 3 buckets") // test with clustered index testKit.MustExec("set @@tidb_enable_clustered_index = 1") testKit.MustExec("drop table if exists ct1") testKit.MustExec("create table ct1 (a int, pk varchar(10), primary key(pk))") testKit.MustExec("insert into ct1 values (1, '1'), (2, '2'), (3, '3'), (4, '4'), (5, '5'), (6, '6'), (7, '7'), (8, '8')") testKit.MustExec("analyze table ct1 with 2 topn, 3 buckets") testKit.MustExec("drop table if exists ct2") testKit.MustExec("create table ct2 (a int, b int, c int, primary key(a, b))") testKit.MustExec("insert into ct2 values (1, 1, 1), (2, 2, 2), (3, 3, 3), (4, 4, 4), (5, 5, 5), (6, 6, 6), (7, 7, 7), (8, 8, 8)") testKit.MustExec("analyze table ct2 with 2 topn, 3 buckets") rows := testKit.MustQuery("select stats_ver from mysql.stats_histograms").Rows() for _, r := range rows { // ensure statsVer = 2 c.Assert(fmt.Sprintf("%v", r[0]), Equals, "2") } var ( input []string output [][]string ) s.testData.GetTestCases(c, &input, &output) for i := range input { s.testData.OnRecord(func() { output[i] = s.testData.ConvertRowsToStrings(testKit.MustQuery(input[i]).Rows()) }) testKit.MustQuery(input[i]).Check(testkit.Rows(output[i]...)) } } func (s *testStatsSuite) TestColumnIndexNullEstimation(c *C) { defer cleanEnv(c, s.store, s.do) testKit := testkit.NewTestKit(c, s.store) testKit.MustExec("use test") testKit.MustExec("drop table if exists t") testKit.MustExec("create table t(a int, b int, c int, index idx_b(b), index idx_c_a(c, a))") testKit.MustExec("insert into t values(1,null,1),(2,null,2),(3,3,3),(4,null,4),(null,null,null);") h := s.do.StatsHandle() c.Assert(h.DumpStatsDeltaToKV(handle.DumpAll), IsNil) testKit.MustExec("analyze table t") var ( input []string output [][]string ) s.testData.GetTestCases(c, &input, &output) for i := 0; i < 5; i++ { s.testData.OnRecord(func() { output[i] = s.testData.ConvertRowsToStrings(testKit.MustQuery(input[i]).Rows()) }) testKit.MustQuery(input[i]).Check(testkit.Rows(output[i]...)) } // Make sure column stats has been loaded. testKit.MustExec(`explain select * from t where a is null`) c.Assert(h.LoadNeededHistograms(), IsNil) for i := 5; i < len(input); i++ { s.testData.OnRecord(func() { output[i] = s.testData.ConvertRowsToStrings(testKit.MustQuery(input[i]).Rows()) }) testKit.MustQuery(input[i]).Check(testkit.Rows(output[i]...)) } } func (s *testStatsSuite) TestUniqCompEqualEst(c *C) { defer cleanEnv(c, s.store, s.do) testKit := testkit.NewTestKit(c, s.store) testKit.MustExec("use test") testKit.Se.GetSessionVars().EnableClusteredIndex = true testKit.MustExec("drop table if exists t") testKit.MustExec("create table t(a int, b int, primary key(a, b))") testKit.MustExec("insert into t values(1,1),(1,2),(1,3),(1,4),(1,5),(1,6),(1,7),(1,8),(1,9),(1,10)") h := s.do.StatsHandle() c.Assert(h.DumpStatsDeltaToKV(handle.DumpAll), IsNil) testKit.MustExec("analyze table t") var ( input []string output [][]string ) s.testData.GetTestCases(c, &input, &output) for i := 0; i < 1; i++ { s.testData.OnRecord(func() { output[i] = s.testData.ConvertRowsToStrings(testKit.MustQuery(input[i]).Rows()) }) testKit.MustQuery(input[i]).Check(testkit.Rows(output[i]...)) } } func (s *testStatsSuite) TestSelectivityGreedyAlgo(c *C) { nodes := make([]*statistics.StatsNode, 3) nodes[0] = statistics.MockStatsNode(1, 3, 2) nodes[1] = statistics.MockStatsNode(2, 5, 2) nodes[2] = statistics.MockStatsNode(3, 9, 2) // Sets should not overlap on mask, so only nodes[0] is chosen. usedSets := statistics.GetUsableSetsByGreedy(nodes) c.Assert(len(usedSets), Equals, 1) c.Assert(usedSets[0].ID, Equals, int64(1)) nodes[0], nodes[1] = nodes[1], nodes[0] // Sets chosen should be stable, so the returned node is still the one with ID 1. usedSets = statistics.GetUsableSetsByGreedy(nodes) c.Assert(len(usedSets), Equals, 1) c.Assert(usedSets[0].ID, Equals, int64(1)) } func (s *testStatsSuite) TestCollationColumnEstimate(c *C) { defer cleanEnv(c, s.store, s.do) tk := testkit.NewTestKit(c, s.store) collate.SetNewCollationEnabledForTest(true) defer collate.SetNewCollationEnabledForTest(false) tk.MustExec("use test") tk.MustExec("drop table if exists t") tk.MustExec("create table t(a varchar(20) collate utf8mb4_general_ci)") tk.MustExec("insert into t values('aaa'), ('bbb'), ('AAA'), ('BBB')") h := s.do.StatsHandle() c.Assert(h.DumpStatsDeltaToKV(handle.DumpAll), IsNil) tk.MustExec("analyze table t") tk.MustExec("explain select * from t where a = 'aaa'") c.Assert(h.LoadNeededHistograms(), IsNil) var ( input []string output [][]string ) s.testData.GetTestCases(c, &input, &output) for i := 0; i < len(input); i++ { s.testData.OnRecord(func() { output[i] = s.testData.ConvertRowsToStrings(tk.MustQuery(input[i]).Rows()) }) tk.MustQuery(input[i]).Check(testkit.Rows(output[i]...)) } } // TestDNFCondSelectivity tests selectivity calculation with DNF conditions covered by using independence assumption. func (s *testStatsSuite) TestDNFCondSelectivity(c *C) { defer cleanEnv(c, s.store, s.do) testKit := testkit.NewTestKit(c, s.store) testKit.MustExec("use test") testKit.MustExec("drop table if exists t") testKit.MustExec("create table t(a int, b int, c int, d int)") testKit.MustExec("insert into t value(1,5,4,4),(3,4,1,8),(4,2,6,10),(6,7,2,5),(7,1,4,9),(8,9,8,3),(9,1,9,1),(10,6,6,2)") testKit.MustExec("alter table t add index (b)") testKit.MustExec("alter table t add index (d)") testKit.MustExec(`analyze table t`) ctx := context.Background() is := s.do.InfoSchema() h := s.do.StatsHandle() tb, err := is.TableByName(model.NewCIStr("test"), model.NewCIStr("t")) c.Assert(err, IsNil) tblInfo := tb.Meta() statsTbl := h.GetTableStats(tblInfo) var ( input []string output []struct { SQL string Selectivity float64 } ) s.testData.GetTestCases(c, &input, &output) for i, tt := range input { sctx := testKit.Se.(sessionctx.Context) stmts, err := session.Parse(sctx, tt) c.Assert(err, IsNil, Commentf("error %v, for sql %s", err, tt)) c.Assert(stmts, HasLen, 1) err = plannercore.Preprocess(sctx, stmts[0], is) c.Assert(err, IsNil, Commentf("error %v, for sql %s", err, tt)) p, _, err := plannercore.BuildLogicalPlan(ctx, sctx, stmts[0], is) c.Assert(err, IsNil, Commentf("error %v, for building plan, sql %s", err, tt)) sel := p.(plannercore.LogicalPlan).Children()[0].(*plannercore.LogicalSelection) ds := sel.Children()[0].(*plannercore.DataSource) histColl := statsTbl.GenerateHistCollFromColumnInfo(ds.Columns, ds.Schema().Columns) ratio, _, err := histColl.Selectivity(sctx, sel.Conditions, nil) c.Assert(err, IsNil, Commentf("error %v, for expr %s", err, tt)) s.testData.OnRecord(func() { output[i].SQL = tt output[i].Selectivity = ratio }) c.Assert(math.Abs(ratio-output[i].Selectivity) < eps, IsTrue, Commentf("for %s, needed: %v, got: %v", tt, output[i].Selectivity, ratio)) } // Test issue 19981 testKit.MustExec("select * from t where _tidb_rowid is null or _tidb_rowid > 7") // Test issue 22134 // Information about column n will not be in stats immediately after this SQL executed. // If we don't have a check against this, DNF condition could lead to infinite recursion in Selectivity(). testKit.MustExec("alter table t add column n timestamp;") testKit.MustExec("select * from t where n = '2000-01-01' or n = '2000-01-02';") } func (s *testStatsSuite) TestIndexEstimationCrossValidate(c *C) { defer cleanEnv(c, s.store, s.do) tk := testkit.NewTestKit(c, s.store) tk.MustExec("use test") tk.MustExec("drop table if exists t") tk.MustExec("create table t(a int, b int, key(a,b))") tk.MustExec("insert into t values(1, 1), (1, 2), (1, 3), (2, 2)") tk.MustExec("analyze table t") c.Assert(failpoint.Enable("github.com/pingcap/tidb/statistics/table/mockQueryBytesMaxUint64", `return(100000)`), IsNil) tk.MustQuery("explain select * from t where a = 1 and b = 2").Check(testkit.Rows( "IndexReader_6 1.00 root index:IndexRangeScan_5", "└─IndexRangeScan_5 1.00 cop[tikv] table:t, index:a(a, b) range:[1 2,1 2], keep order:false")) c.Assert(failpoint.Disable("github.com/pingcap/tidb/statistics/table/mockQueryBytesMaxUint64"), IsNil) // Test issue 22466 tk.MustExec("drop table if exists t2") tk.MustExec("create table t2(a int, b int, key b(b))") tk.MustExec("insert into t2 values(1, 1), (2, 2), (3, 3), (4, 4), (5,5)") // This line of select will mark column b stats as needed, and an invalid(empty) stats for column b // will be loaded at the next analyze line, this will trigger the bug. tk.MustQuery("select * from t2 where b=2") tk.MustExec("analyze table t2 index b") tk.MustQuery("explain select * from t2 where b=2").Check(testkit.Rows( "TableReader_7 1.00 root data:Selection_6", "└─Selection_6 1.00 cop[tikv] eq(test.t2.b, 2)", " └─TableFullScan_5 5.00 cop[tikv] table:t2 keep order:false")) }
[ "\"log_level\"" ]
[]
[ "log_level" ]
[]
["log_level"]
go
1
0
config.py
import os class Config: ''' General configuration parent class ''' SECRET_KEY = os.environ.get('SECRET_KEY') UPLOADED_PHOTOS_DEST ='app/static/photos' QUOTE_BASE_URL = 'http://quotes.stormconsultancy.co.uk/random.json' SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://kennedy:new@localhost/blogblossom' MAIL_SERVER = 'smtp.googlemail.com' MAIL_PORT = 587 MAIL_USE_TLS = True MAIL_USERNAME = os.environ.get("MAIL_USERNAME") MAIL_PASSWORD = os.environ.get("MAIL_PASSWORD") class ProdConfig(Config): ''' Production configuration child class Args: Config: The parent configuration class with General configuration settings ''' SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_URL","") if SQLALCHEMY_DATABASE_URI.startswith("postgres://"): SQLALCHEMY_DATABASE_URI =SQLALCHEMY_DATABASE_URI.replace("postgres://","postgresql://",1) class TestConfig(Config): SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://kennedy:new@localhost/blogblossom' class DevConfig(Config): ''' Development configuration child class Args: Config: The parent configuration class with General configuration settings ''' SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://kennedy:new@localhost/blogblossom' DEBUG = True config_options = { 'development': DevConfig, 'production': ProdConfig, 'test': TestConfig }
[]
[]
[ "MAIL_PASSWORD", "SECRET_KEY", "DATABASE_URL", "MAIL_USERNAME" ]
[]
["MAIL_PASSWORD", "SECRET_KEY", "DATABASE_URL", "MAIL_USERNAME"]
python
4
0
client/query.go
package client import ( "context" "fmt" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/types/query" authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" bankTypes "github.com/cosmos/cosmos-sdk/x/bank/types" distTypes "github.com/cosmos/cosmos-sdk/x/distribution/types" transfertypes "github.com/cosmos/ibc-go/v3/modules/apps/transfer/types" ) // queryBalanceWithAddress returns the amount of coins in the relayer account with address as input // TODO add pagination support func (cc *ChainClient) queryBalanceWithAddress(ctx context.Context, address string) (sdk.Coins, error) { p := &bankTypes.QueryAllBalancesRequest{Address: address, Pagination: DefaultPageRequest()} queryClient := bankTypes.NewQueryClient(cc) res, err := queryClient.AllBalances(ctx, p) if err != nil { return nil, err } return res.Balances, nil } func (cc *ChainClient) queryLatestHeight(ctx context.Context) (int64, error) { stat, err := cc.RPCClient.Status(ctx) if err != nil { return -1, err } else if stat.SyncInfo.CatchingUp { return -1, fmt.Errorf("node at %s running chain %s not caught up", cc.Config.RPCAddr, cc.Config.ChainID) } return stat.SyncInfo.LatestBlockHeight, nil } // queryDenomTraces returns all the denom traces from a given chain // TODO add pagination support func (cc *ChainClient) queryDenomTraces(ctx context.Context, offset, limit uint64, height int64) ([]transfertypes.DenomTrace, error) { transfers, err := transfertypes.NewQueryClient(cc).DenomTraces( ctx, &transfertypes.QueryDenomTracesRequest{ Pagination: DefaultPageRequest(), }, ) if err != nil { return nil, err } return transfers.DenomTraces, nil } func (cc *ChainClient) QueryAccount(ctx context.Context, address sdk.AccAddress) (authtypes.AccountI, error) { addr, err := cc.EncodeBech32AccAddr(address) if err != nil { return nil, err } res, err := authtypes.NewQueryClient(cc).Account(ctx, &authtypes.QueryAccountRequest{Address: addr}) if err != nil { return nil, err } var acc authtypes.AccountI if err := cc.Codec.InterfaceRegistry.UnpackAny(res.Account, &acc); err != nil { return nil, err } return acc, nil } // QueryBalanceWithDenomTraces is a helper function for query balance func (cc *ChainClient) QueryBalanceWithDenomTraces(ctx context.Context, address sdk.AccAddress, pageReq *query.PageRequest) (sdk.Coins, error) { coins, err := cc.queryBalanceWithAddress(ctx, cc.MustEncodeAccAddr(address)) if err != nil { return nil, err } h, err := cc.queryLatestHeight(ctx) if err != nil { return nil, err } // TODO: figure out how to handle this // we don't want to expose user to this // so maybe we need a QueryAllDenomTraces function // that will paginate the responses automatically // TODO fix pagination here later dts, err := cc.queryDenomTraces(ctx, 0, 1000, h) if err != nil { return nil, err } if len(dts) == 0 { return coins, nil } var out sdk.Coins for _, c := range coins { if c.Amount.Equal(sdk.NewInt(0)) { continue } for i, d := range dts { if c.Denom == d.IBCDenom() { out = append(out, sdk.Coin{Denom: d.GetFullDenomPath(), Amount: c.Amount}) break } if i == len(dts)-1 { out = append(out, c) } } } return out, nil } func (cc *ChainClient) QueryDelegatorValidators(ctx context.Context, address sdk.AccAddress) ([]string, error) { res, err := distTypes.NewQueryClient(cc).DelegatorValidators(ctx, &distTypes.QueryDelegatorValidatorsRequest{ DelegatorAddress: cc.MustEncodeAccAddr(address), }) if err != nil { return nil, err } return res.Validators, nil } func (cc *ChainClient) QueryDistributionCommission(ctx context.Context, address sdk.ValAddress) (sdk.DecCoins, error) { valAddr, err := cc.EncodeBech32ValAddr(address) if err != nil { return nil, err } request := distTypes.QueryValidatorCommissionRequest{ ValidatorAddress: valAddr, } res, err := distTypes.NewQueryClient(cc).ValidatorCommission(ctx, &request) if err != nil { return nil, err } return res.Commission.Commission, nil } func (cc *ChainClient) QueryDistributionCommunityPool(ctx context.Context) (sdk.DecCoins, error) { res, err := distTypes.NewQueryClient(cc).CommunityPool(ctx, &distTypes.QueryCommunityPoolRequest{}) if err != nil { return nil, err } return res.Pool, err } func (cc *ChainClient) QueryDistributionParams(ctx context.Context) (*distTypes.Params, error) { res, err := distTypes.NewQueryClient(cc).Params(ctx, &distTypes.QueryParamsRequest{}) if err != nil { return nil, err } return &res.Params, nil } func (cc *ChainClient) QueryDistributionRewards(ctx context.Context, delegatorAddress sdk.AccAddress, validatorAddress sdk.ValAddress) (sdk.DecCoins, error) { delAddr, err := cc.EncodeBech32AccAddr(delegatorAddress) if err != nil { return nil, err } valAddr, err := cc.EncodeBech32ValAddr(validatorAddress) if err != nil { return nil, err } request := distTypes.QueryDelegationRewardsRequest{ DelegatorAddress: delAddr, ValidatorAddress: valAddr, } res, err := distTypes.NewQueryClient(cc).DelegationRewards(ctx, &request) if err != nil { return nil, err } return res.Rewards, nil } // QueryDistributionSlashes returns all slashes of a validator, optionally pass the start and end height func (cc *ChainClient) QueryDistributionSlashes(ctx context.Context, validatorAddress sdk.ValAddress, startHeight, endHeight uint64, pageReq *query.PageRequest) (*distTypes.QueryValidatorSlashesResponse, error) { valAddr, err := cc.EncodeBech32ValAddr(validatorAddress) if err != nil { return nil, err } request := distTypes.QueryValidatorSlashesRequest{ ValidatorAddress: valAddr, StartingHeight: startHeight, EndingHeight: endHeight, Pagination: pageReq, } return distTypes.NewQueryClient(cc).ValidatorSlashes(ctx, &request) } // QueryDistributionValidatorRewards returns all the validator distribution rewards from a given height func (cc *ChainClient) QueryDistributionValidatorRewards(ctx context.Context, validatorAddress sdk.ValAddress) (sdk.DecCoins, error) { valAddr, err := cc.EncodeBech32ValAddr(validatorAddress) if err != nil { return nil, err } request := distTypes.QueryValidatorOutstandingRewardsRequest{ ValidatorAddress: valAddr, } res, err := distTypes.NewQueryClient(cc).ValidatorOutstandingRewards(ctx, &request) if err != nil { return nil, err } return res.Rewards.Rewards, nil } // QueryTotalSupply returns the total supply of coins on a chain func (cc *ChainClient) QueryTotalSupply(ctx context.Context, pageReq *query.PageRequest) (*bankTypes.QueryTotalSupplyResponse, error) { return bankTypes.NewQueryClient(cc).TotalSupply(ctx, &bankTypes.QueryTotalSupplyRequest{Pagination: pageReq}) } func (cc *ChainClient) QueryDenomsMetadata(ctx context.Context, pageReq *query.PageRequest) (*bankTypes.QueryDenomsMetadataResponse, error) { return bankTypes.NewQueryClient(cc).DenomsMetadata(ctx, &bankTypes.QueryDenomsMetadataRequest{Pagination: pageReq}) } func DefaultPageRequest() *query.PageRequest { return &query.PageRequest{ Key: []byte(""), Offset: 0, Limit: 1000, CountTotal: true, } }
[]
[]
[]
[]
[]
go
null
null
null
pkg/host_cleaning/local_docker_server.go
package host_cleaning import ( "context" "encoding/json" "fmt" "os" "path/filepath" "runtime" "sort" "strings" "time" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" "github.com/dustin/go-humanize" "github.com/werf/lockgate" "github.com/werf/logboek" "github.com/werf/werf/pkg/container_runtime" "github.com/werf/werf/pkg/docker" "github.com/werf/werf/pkg/image" "github.com/werf/werf/pkg/storage/lrumeta" "github.com/werf/werf/pkg/volumeutils" "github.com/werf/werf/pkg/werf" "github.com/werf/kubedog/pkg/utils" ) const ( MinImagesToDelete = 10 ) func GetLocalDockerServerStoragePath(ctx context.Context) (string, error) { dockerInfo, err := docker.Info(ctx) if err != nil { return "", fmt.Errorf("unable to get docker info: %s", err) } var storagePath string if dockerInfo.OperatingSystem == "Docker Desktop" { switch runtime.GOOS { case "windows": storagePath = filepath.Join(os.Getenv("HOMEDRIVE"), `\\ProgramData\DockerDesktop\vm-data\`) case "darwin": storagePath = filepath.Join(os.Getenv("HOME"), "Library/Containers/com.docker.docker/Data") } } else { storagePath = dockerInfo.DockerRootDir } if _, err := os.Stat(storagePath); os.IsNotExist(err) { return "", nil } else if err != nil { return "", fmt.Errorf("error accessing %q: %s", storagePath, err) } return storagePath, nil } func getDockerServerStoragePath(ctx context.Context, dockerServerStoragePathOption string) (string, error) { var dockerServerStoragePath string if dockerServerStoragePathOption != "" { dockerServerStoragePath = dockerServerStoragePathOption } else { path, err := GetLocalDockerServerStoragePath(ctx) if err != nil { return "", err } dockerServerStoragePath = path } return dockerServerStoragePath, nil } func ShouldRunAutoGCForLocalDockerServer(ctx context.Context, allowedVolumeUsagePercentage float64, dockerServerStoragePath string) (bool, error) { if dockerServerStoragePath == "" { return false, nil } vu, err := volumeutils.GetVolumeUsageByPath(ctx, dockerServerStoragePath) if err != nil { return false, fmt.Errorf("error getting volume usage by path %q: %s", dockerServerStoragePath, err) } return vu.Percentage > allowedVolumeUsagePercentage, nil } type LocalDockerServerStorageCheckResult struct { VolumeUsage volumeutils.VolumeUsage TotalImagesBytes uint64 ImagesDescs []*LocalImageDesc } func (checkResult *LocalDockerServerStorageCheckResult) GetBytesToFree(targetVolumeUsage float64) uint64 { allowedVolumeUsageToFree := checkResult.VolumeUsage.Percentage - targetVolumeUsage bytesToFree := uint64((float64(checkResult.VolumeUsage.TotalBytes) / 100.0) * allowedVolumeUsageToFree) return bytesToFree } func GetLocalDockerServerStorageCheck(ctx context.Context, dockerServerStoragePath string) (*LocalDockerServerStorageCheckResult, error) { res := &LocalDockerServerStorageCheckResult{} vu, err := volumeutils.GetVolumeUsageByPath(ctx, dockerServerStoragePath) if err != nil { return nil, fmt.Errorf("error getting volume usage by path %q: %s", dockerServerStoragePath, err) } res.VolumeUsage = vu var images []types.ImageSummary { filterSet := filters.NewArgs() filterSet.Add("label", image.WerfLabel) filterSet.Add("label", image.WerfStageDigestLabel) imgs, err := docker.Images(ctx, types.ImageListOptions{Filters: filterSet}) if err != nil { return nil, fmt.Errorf("unable to get werf docker images: %s", err) } images = append(images, imgs...) } { filterSet := filters.NewArgs() filterSet.Add("label", image.WerfLabel) filterSet.Add("label", "werf-stage-signature") // v1.1 legacy images imgs, err := docker.Images(ctx, types.ImageListOptions{Filters: filterSet}) if err != nil { return nil, fmt.Errorf("unable to get werf v1.1 legacy docker images: %s", err) } ExcludeLocalV1_1StagesStorage: for _, img := range imgs { for _, ref := range img.RepoTags { // Do not remove stages-storage=:local images, because this is primary stages storage data, and it can only be cleaned by the werf-cleanup command if strings.HasPrefix(ref, "werf-stages-storage/") { continue ExcludeLocalV1_1StagesStorage } } images = append(images, img) } } { // **NOTICE** Remove v1.1 last-run-at timestamp check when v1.1 reaches its end of life t, err := werf.GetWerfLastRunAtV1_1(ctx) if err != nil { return nil, fmt.Errorf("error getting v1.1 last run timestamp: %s", err) } // No werf v1.1 runs on this host. // This is stupid check, but the only awailalble safe option at the moment. if t.IsZero() { filterSet := filters.NewArgs() filterSet.Add("reference", "*client-id-*") filterSet.Add("reference", "*managed-image-*") filterSet.Add("reference", "*meta-*") filterSet.Add("reference", "*import-metadata-*") filterSet.Add("reference", "*-rejected") filterSet.Add("reference", "werf-client-id/*") filterSet.Add("reference", "werf-managed-images/*") filterSet.Add("reference", "werf-images-metadata-by-commit/*") filterSet.Add("reference", "werf-import-metadata/*") imgs, err := docker.Images(ctx, types.ImageListOptions{Filters: filterSet}) if err != nil { return nil, fmt.Errorf("unable to get werf service images: %s", err) } for _, img := range imgs { // **NOTICE.** Cannot remove by werf label, because currently there is no such label for service-images by historical reasons. // So check by size at least for now. if img.Size != 0 { continue } images = append(images, img) } } } CreateImagesDescs: for _, imageSummary := range images { data, _ := json.Marshal(imageSummary) logboek.Context(ctx).Debug().LogF("Image summary:\n%s\n---\n", data) res.TotalImagesBytes += uint64(imageSummary.VirtualSize - imageSummary.SharedSize) lastUsedAt := time.Unix(imageSummary.Created, 0) CheckEachRef: for _, ref := range imageSummary.RepoTags { // IMPORTANT: ignore none images, these may be either orphans or just built fresh images and we shall not delete these if ref == "<none>:<none>" { continue CreateImagesDescs } lastRecentlyUsedAt, err := lrumeta.CommonLRUImagesCache.GetImageLastAccessTime(ctx, ref) if err != nil { return nil, fmt.Errorf("error accessing last recently used images cache: %s", err) } if lastRecentlyUsedAt.IsZero() { continue CheckEachRef } lastUsedAt = lastRecentlyUsedAt break } desc := &LocalImageDesc{ ImageSummary: imageSummary, LastUsedAt: lastUsedAt, } res.ImagesDescs = append(res.ImagesDescs, desc) } sort.Sort(ImagesLruSort(res.ImagesDescs)) return res, nil } func RunGCForLocalDockerServer(ctx context.Context, allowedVolumeUsagePercentage, allowedVolumeUsageMarginPercentage float64, dockerServerStoragePath string, force, dryRun bool) error { if dockerServerStoragePath == "" { return nil } targetVolumeUsage := allowedVolumeUsagePercentage - allowedVolumeUsageMarginPercentage if targetVolumeUsage < 0 { targetVolumeUsage = 0 } checkResult, err := GetLocalDockerServerStorageCheck(ctx, dockerServerStoragePath) if err != nil { return fmt.Errorf("error getting local docker server storage check: %s", err) } bytesToFree := checkResult.GetBytesToFree(targetVolumeUsage) if checkResult.VolumeUsage.Percentage <= allowedVolumeUsagePercentage { logboek.Context(ctx).Default().LogBlock("Local docker server storage check").Do(func() { logboek.Context(ctx).Default().LogF("Docker server storage path: %s\n", dockerServerStoragePath) logboek.Context(ctx).Default().LogF("Volume usage: %s / %s\n", humanize.Bytes(checkResult.VolumeUsage.UsedBytes), humanize.Bytes(checkResult.VolumeUsage.TotalBytes)) logboek.Context(ctx).Default().LogF("Allowed volume usage percentage: %s <= %s — %s\n", utils.GreenF("%0.2f%%", checkResult.VolumeUsage.Percentage), utils.BlueF("%0.2f%%", allowedVolumeUsagePercentage), utils.GreenF("OK")) }) return nil } logboek.Context(ctx).Default().LogBlock("Local docker server storage check").Do(func() { logboek.Context(ctx).Default().LogF("Docker server storage path: %s\n", dockerServerStoragePath) logboek.Context(ctx).Default().LogF("Volume usage: %s / %s\n", humanize.Bytes(checkResult.VolumeUsage.UsedBytes), humanize.Bytes(checkResult.VolumeUsage.TotalBytes)) logboek.Context(ctx).Default().LogF("Allowed percentage level exceeded: %s > %s — %s\n", utils.RedF("%0.2f%%", checkResult.VolumeUsage.Percentage), utils.YellowF("%0.2f%%", allowedVolumeUsagePercentage), utils.RedF("HIGH VOLUME USAGE")) logboek.Context(ctx).Default().LogF("Target percentage level after cleanup: %0.2f%% - %0.2f%% (margin) = %s\n", allowedVolumeUsagePercentage, allowedVolumeUsageMarginPercentage, utils.BlueF("%0.2f%%", targetVolumeUsage)) logboek.Context(ctx).Default().LogF("Needed to free: %s\n", utils.RedF("%s", humanize.Bytes(bytesToFree))) logboek.Context(ctx).Default().LogF("Available images to free: %s\n", utils.YellowF("%d (~ %s)", len(checkResult.ImagesDescs), humanize.Bytes(checkResult.TotalImagesBytes))) }) for { var freedBytes uint64 var freedImagesCount uint64 var acquiredHostLocks []lockgate.LockHandle if len(checkResult.ImagesDescs) > 0 { if err := logboek.Context(ctx).Default().LogProcess("Running cleanup for least recently used docker images created by werf").DoError(func() error { DeleteImages: for _, desc := range checkResult.ImagesDescs { imageRemovalFailed := false for _, ref := range desc.ImageSummary.RepoTags { var args []string if ref == "<none>:<none>" { args = append(args, desc.ImageSummary.ID) } else { lockName := container_runtime.ImageLockName(ref) isLocked, lock, err := werf.AcquireHostLock(ctx, lockName, lockgate.AcquireOptions{NonBlocking: true}) if err != nil { return fmt.Errorf("error locking image %q: %s", lockName, err) } if !isLocked { logboek.Context(ctx).Default().LogFDetails("Image %q is locked at the moment: skip removal\n", ref) continue DeleteImages } acquiredHostLocks = append(acquiredHostLocks, lock) args = append(args, ref) } if force { args = append(args, "--force") } logboek.Context(ctx).Default().LogF("Removing %s\n", ref) if dryRun { continue } if err := docker.CliRmi(ctx, args...); err != nil { logboek.Context(ctx).Warn().LogF("failed to remove local docker image %q: %s\n", ref, err) imageRemovalFailed = true } } if !imageRemovalFailed { freedBytes += uint64(desc.ImageSummary.VirtualSize - desc.ImageSummary.SharedSize) freedImagesCount++ } if freedImagesCount < MinImagesToDelete { continue } if freedBytes > bytesToFree { break } } logboek.Context(ctx).Default().LogF("Freed images: %s\n", utils.GreenF("%d (~ %s)", freedImagesCount, humanize.Bytes(freedBytes))) return nil }); err != nil { return err } } if freedImagesCount == 0 { logboek.Context(ctx).Warn().LogF("WARNING: Detected high docker storage volume usage, while no werf images available to cleanup!\n") logboek.Context(ctx).Warn().LogF("WARNING:\n") logboek.Context(ctx).Warn().LogF("WARNING: Werf tries to maintain host clean by deleting:\n") logboek.Context(ctx).Warn().LogF("WARNING: - old unused files from werf caches (which are stored in the ~/.werf/local_cache);\n") logboek.Context(ctx).Warn().LogF("WARNING: - old temporary service files /tmp/werf-project-data-* and /tmp/werf-config-render-*;\n") logboek.Context(ctx).Warn().LogF("WARNING: - least recently used werf images.\n") logboek.Context(ctx).Warn().LogF("WARNING:\n") logboek.Context(ctx).Warn().LogF("WARNING: Werf-host-cleanup procedure of v1.2 werf version will not cleanup --stages-storage=:local stages of v1.1 werf version, because this is primary stages storage data, and it can only be cleaned by the regular per-project werf-cleanup command with git-history based algorithm.\n") logboek.Context(ctx).Warn().LogOptionalLn() } for _, lock := range acquiredHostLocks { if err := werf.ReleaseHostLock(lock); err != nil { return fmt.Errorf("unable to release lock %q: %s", lock.LockName, err) } } commonOptions := CommonOptions{ RmContainersThatUseWerfImages: force, SkipUsedImages: !force, RmiForce: force, RmForce: true, DryRun: dryRun, } if err := logboek.Context(ctx).Default().LogProcess("Running cleanup for docker containers created by werf").DoError(func() error { return safeContainersCleanup(ctx, commonOptions) }); err != nil { return err } if err := logboek.Context(ctx).Default().LogProcess("Running cleanup for dangling docker images created by werf").DoError(func() error { return safeDanglingImagesCleanup(ctx, commonOptions) }); err != nil { return err } if freedImagesCount == 0 { break } if dryRun { break } logboek.Context(ctx).Default().LogOptionalLn() checkResult, err = GetLocalDockerServerStorageCheck(ctx, dockerServerStoragePath) if err != nil { return fmt.Errorf("error getting local docker server storage check: %s", err) } if checkResult.VolumeUsage.Percentage <= targetVolumeUsage { logboek.Context(ctx).Default().LogBlock("Local docker server storage check").Do(func() { logboek.Context(ctx).Default().LogF("Docker server storage path: %s\n", dockerServerStoragePath) logboek.Context(ctx).Default().LogF("Volume usage: %s / %s\n", humanize.Bytes(checkResult.VolumeUsage.UsedBytes), humanize.Bytes(checkResult.VolumeUsage.TotalBytes)) logboek.Context(ctx).Default().LogF("Target volume usage percentage: %s <= %s — %s\n", utils.GreenF("%0.2f%%", checkResult.VolumeUsage.Percentage), utils.BlueF("%0.2f%%", targetVolumeUsage), utils.GreenF("OK")) }) break } bytesToFree = checkResult.GetBytesToFree(targetVolumeUsage) logboek.Context(ctx).Default().LogBlock("Local docker server storage check").Do(func() { logboek.Context(ctx).Default().LogF("Docker server storage path: %s\n", dockerServerStoragePath) logboek.Context(ctx).Default().LogF("Volume usage: %s / %s\n", humanize.Bytes(checkResult.VolumeUsage.UsedBytes), humanize.Bytes(checkResult.VolumeUsage.TotalBytes)) logboek.Context(ctx).Default().LogF("Target volume usage percentage: %s > %s — %s\n", utils.RedF("%0.2f%%", checkResult.VolumeUsage.Percentage), utils.BlueF("%0.2f%%", targetVolumeUsage), utils.RedF("HIGH VOLUME USAGE")) logboek.Context(ctx).Default().LogF("Needed to free: %s\n", utils.RedF("%s", humanize.Bytes(bytesToFree))) logboek.Context(ctx).Default().LogF("Available images to free: %s\n", utils.YellowF("%d (~ %s)", len(checkResult.ImagesDescs), humanize.Bytes(checkResult.TotalImagesBytes))) }) } return nil } type LocalImageDesc struct { ImageSummary types.ImageSummary LastUsedAt time.Time } type ImagesLruSort []*LocalImageDesc func (a ImagesLruSort) Len() int { return len(a) } func (a ImagesLruSort) Less(i, j int) bool { return a[i].LastUsedAt.Before(a[j].LastUsedAt) } func (a ImagesLruSort) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func safeDanglingImagesCleanup(ctx context.Context, options CommonOptions) error { images, err := werfImagesByFilterSet(ctx, danglingFilterSet()) if err != nil { return err } var imagesToRemove []types.ImageSummary for _, img := range images { imagesToRemove = append(imagesToRemove, img) } imagesToRemove, err = processUsedImages(ctx, imagesToRemove, options) if err != nil { return err } if err := imagesRemove(ctx, imagesToRemove, options); err != nil { return err } return nil } func safeContainersCleanup(ctx context.Context, options CommonOptions) error { containers, err := werfContainersByFilterSet(ctx, filters.NewArgs()) if err != nil { return fmt.Errorf("cannot get stages build containers: %s", err) } for _, container := range containers { var containerName string for _, name := range container.Names { if strings.HasPrefix(name, fmt.Sprintf("/%s", image.StageContainerNamePrefix)) { containerName = strings.TrimPrefix(name, "/") break } } if containerName == "" { logboek.Context(ctx).Warn().LogF("Ignore bad container %s\n", container.ID) continue } if err := func() error { containerLockName := container_runtime.ContainerLockName(containerName) isLocked, lock, err := werf.AcquireHostLock(ctx, containerLockName, lockgate.AcquireOptions{NonBlocking: true}) if err != nil { return fmt.Errorf("failed to lock %s for container %s: %s", containerLockName, logContainerName(container), err) } if !isLocked { logboek.Context(ctx).Default().LogFDetails("Ignore container %s used by another process\n", logContainerName(container)) return nil } defer werf.ReleaseHostLock(lock) if err := containersRemove(ctx, []types.Container{container}, options); err != nil { return fmt.Errorf("failed to remove container %s: %s", logContainerName(container), err) } return nil }(); err != nil { return err } } return nil }
[ "\"HOMEDRIVE\"", "\"HOME\"" ]
[]
[ "HOME", "HOMEDRIVE" ]
[]
["HOME", "HOMEDRIVE"]
go
2
0
manage.py
#!/usr/bin/env python import os import sys if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "webpy.settings") try: from django.core.management import execute_from_command_line except ImportError: # The above import may fail for some other reason. Ensure that the # issue is really that Django is missing to avoid masking other # exceptions on Python 2. try: import django except ImportError: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) raise execute_from_command_line(sys.argv)
[]
[]
[]
[]
[]
python
0
0
var/spack/repos/builtin/packages/xfs/package.py
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Xfs(AutotoolsPackage): """X Font Server.""" homepage = "http://cgit.freedesktop.org/xorg/app/xfs" url = "https://www.x.org/archive/individual/app/xfs-1.1.4.tar.gz" version('1.1.4', sha256='28f89b854d1ff14fa1efa5b408e5e1c4f6a145420310073c4e44705feeb6d23b') depends_on('[email protected]:') depends_on('font-util') depends_on('[email protected]:', type='build') depends_on('fontsproto', type='build') depends_on('xtrans', type='build') depends_on('pkgconfig', type='build') depends_on('util-macros', type='build')
[]
[]
[]
[]
[]
python
null
null
null
cmd/abapAddonAssemblyKitCheckPV_generated.go
// Code generated by piper's step-generator. DO NOT EDIT. package cmd import ( "fmt" "os" "path/filepath" "time" "github.com/SAP/jenkins-library/pkg/config" "github.com/SAP/jenkins-library/pkg/log" "github.com/SAP/jenkins-library/pkg/piperenv" "github.com/SAP/jenkins-library/pkg/splunk" "github.com/SAP/jenkins-library/pkg/telemetry" "github.com/spf13/cobra" ) type abapAddonAssemblyKitCheckPVOptions struct { AbapAddonAssemblyKitEndpoint string `json:"abapAddonAssemblyKitEndpoint,omitempty"` Username string `json:"username,omitempty"` Password string `json:"password,omitempty"` AddonDescriptorFileName string `json:"addonDescriptorFileName,omitempty"` AddonDescriptor string `json:"addonDescriptor,omitempty"` } type abapAddonAssemblyKitCheckPVCommonPipelineEnvironment struct { abap struct { addonDescriptor string } } func (p *abapAddonAssemblyKitCheckPVCommonPipelineEnvironment) persist(path, resourceName string) { content := []struct { category string name string value interface{} }{ {category: "abap", name: "addonDescriptor", value: p.abap.addonDescriptor}, } errCount := 0 for _, param := range content { err := piperenv.SetResourceParameter(path, resourceName, filepath.Join(param.category, param.name), param.value) if err != nil { log.Entry().WithError(err).Error("Error persisting piper environment.") errCount++ } } if errCount > 0 { log.Entry().Fatal("failed to persist Piper environment") } } // AbapAddonAssemblyKitCheckPVCommand This step checks the validity of a Addon Product Version. func AbapAddonAssemblyKitCheckPVCommand() *cobra.Command { const STEP_NAME = "abapAddonAssemblyKitCheckPV" metadata := abapAddonAssemblyKitCheckPVMetadata() var stepConfig abapAddonAssemblyKitCheckPVOptions var startTime time.Time var commonPipelineEnvironment abapAddonAssemblyKitCheckPVCommonPipelineEnvironment var logCollector *log.CollectorHook var createAbapAddonAssemblyKitCheckPVCmd = &cobra.Command{ Use: STEP_NAME, Short: "This step checks the validity of a Addon Product Version.", Long: `This step checks whether the Addon Product Version in the addonDescriptorFileName does exist or is a valid successor of an existing Product Version. It resolves the dotted version string into version, support package stack level and patch level and writes it to the commonPipelineEnvironment.`, PreRunE: func(cmd *cobra.Command, _ []string) error { startTime = time.Now() log.SetStepName(STEP_NAME) log.SetVerbose(GeneralConfig.Verbose) path, _ := os.Getwd() fatalHook := &log.FatalHook{CorrelationID: GeneralConfig.CorrelationID, Path: path} log.RegisterHook(fatalHook) err := PrepareConfig(cmd, &metadata, STEP_NAME, &stepConfig, config.OpenPiperFile) if err != nil { log.SetErrorCategory(log.ErrorConfiguration) return err } log.RegisterSecret(stepConfig.Username) log.RegisterSecret(stepConfig.Password) if len(GeneralConfig.HookConfig.SentryConfig.Dsn) > 0 { sentryHook := log.NewSentryHook(GeneralConfig.HookConfig.SentryConfig.Dsn, GeneralConfig.CorrelationID) log.RegisterHook(&sentryHook) } if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 { logCollector = &log.CollectorHook{CorrelationID: GeneralConfig.CorrelationID} log.RegisterHook(logCollector) } return nil }, Run: func(_ *cobra.Command, _ []string) { telemetryData := telemetry.CustomData{} telemetryData.ErrorCode = "1" handler := func() { config.RemoveVaultSecretFiles() commonPipelineEnvironment.persist(GeneralConfig.EnvRootPath, "commonPipelineEnvironment") telemetryData.Duration = fmt.Sprintf("%v", time.Since(startTime).Milliseconds()) telemetryData.ErrorCategory = log.GetErrorCategory().String() telemetry.Send(&telemetryData) if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 { splunk.Send(&telemetryData, logCollector) } } log.DeferExitHandler(handler) defer handler() telemetry.Initialize(GeneralConfig.NoTelemetry, STEP_NAME) if len(GeneralConfig.HookConfig.SplunkConfig.Dsn) > 0 { splunk.Initialize(GeneralConfig.CorrelationID, GeneralConfig.HookConfig.SplunkConfig.Dsn, GeneralConfig.HookConfig.SplunkConfig.Token, GeneralConfig.HookConfig.SplunkConfig.Index, GeneralConfig.HookConfig.SplunkConfig.SendLogs) } abapAddonAssemblyKitCheckPV(stepConfig, &telemetryData, &commonPipelineEnvironment) telemetryData.ErrorCode = "0" log.Entry().Info("SUCCESS") }, } addAbapAddonAssemblyKitCheckPVFlags(createAbapAddonAssemblyKitCheckPVCmd, &stepConfig) return createAbapAddonAssemblyKitCheckPVCmd } func addAbapAddonAssemblyKitCheckPVFlags(cmd *cobra.Command, stepConfig *abapAddonAssemblyKitCheckPVOptions) { cmd.Flags().StringVar(&stepConfig.AbapAddonAssemblyKitEndpoint, "abapAddonAssemblyKitEndpoint", `https://apps.support.sap.com`, "Base URL to the Addon Assembly Kit as a Service (AAKaaS) system") cmd.Flags().StringVar(&stepConfig.Username, "username", os.Getenv("PIPER_username"), "User for the Addon Assembly Kit as a Service (AAKaaS) system") cmd.Flags().StringVar(&stepConfig.Password, "password", os.Getenv("PIPER_password"), "Password for the Addon Assembly Kit as a Service (AAKaaS) system") cmd.Flags().StringVar(&stepConfig.AddonDescriptorFileName, "addonDescriptorFileName", `addon.yml`, "File name of the YAML file which describes the Product Version and corresponding Software Component Versions") cmd.Flags().StringVar(&stepConfig.AddonDescriptor, "addonDescriptor", os.Getenv("PIPER_addonDescriptor"), "Structure in the commonPipelineEnvironment containing information about the Product Version and corresponding Software Component Versions") cmd.MarkFlagRequired("abapAddonAssemblyKitEndpoint") cmd.MarkFlagRequired("username") cmd.MarkFlagRequired("password") cmd.MarkFlagRequired("addonDescriptorFileName") } // retrieve step metadata func abapAddonAssemblyKitCheckPVMetadata() config.StepData { var theMetaData = config.StepData{ Metadata: config.StepMetadata{ Name: "abapAddonAssemblyKitCheckPV", Aliases: []config.Alias{}, Description: "This step checks the validity of a Addon Product Version.", }, Spec: config.StepSpec{ Inputs: config.StepInputs{ Parameters: []config.StepParameters{ { Name: "abapAddonAssemblyKitEndpoint", ResourceRef: []config.ResourceReference{}, Scope: []string{"PARAMETERS", "STAGES", "STEPS", "GENERAL"}, Type: "string", Mandatory: true, Aliases: []config.Alias{}, }, { Name: "username", ResourceRef: []config.ResourceReference{}, Scope: []string{"PARAMETERS", "STAGES", "STEPS"}, Type: "string", Mandatory: true, Aliases: []config.Alias{}, }, { Name: "password", ResourceRef: []config.ResourceReference{}, Scope: []string{"PARAMETERS"}, Type: "string", Mandatory: true, Aliases: []config.Alias{}, }, { Name: "addonDescriptorFileName", ResourceRef: []config.ResourceReference{}, Scope: []string{}, Type: "string", Mandatory: true, Aliases: []config.Alias{}, }, { Name: "addonDescriptor", ResourceRef: []config.ResourceReference{ { Name: "commonPipelineEnvironment", Param: "abap/addonDescriptor", }, }, Scope: []string{"PARAMETERS", "STAGES", "STEPS"}, Type: "string", Mandatory: false, Aliases: []config.Alias{}, }, }, }, Outputs: config.StepOutputs{ Resources: []config.StepResources{ { Name: "commonPipelineEnvironment", Type: "piperEnvironment", Parameters: []map[string]interface{}{ {"Name": "abap/addonDescriptor"}, }, }, }, }, }, } return theMetaData }
[ "\"PIPER_username\"", "\"PIPER_password\"", "\"PIPER_addonDescriptor\"" ]
[]
[ "PIPER_addonDescriptor", "PIPER_password", "PIPER_username" ]
[]
["PIPER_addonDescriptor", "PIPER_password", "PIPER_username"]
go
3
0
main_test.go
package main import ( "os" "testing" "github.com/jetstack/cert-manager/test/acme/dns" ) var ( zone = os.Getenv("TEST_ZONE_NAME") kubeBuilderBinPath = "./_out/kubebuilder/bin" ) func TestRunsSuite(t *testing.T) { // The manifest path should contain a file named config.json that is a // snippet of valid configuration that should be included on the // ChallengeRequest passed as part of the test cases. fixture := dns.NewFixture(&cisProviderSolver{}, dns.SetBinariesPath(kubeBuilderBinPath), dns.SetResolvedZone(zone), dns.SetAllowAmbientCredentials(false), dns.SetManifestPath("testdata/cis"), dns.SetStrict(true), ) fixture.RunConformance(t) }
[ "\"TEST_ZONE_NAME\"" ]
[]
[ "TEST_ZONE_NAME" ]
[]
["TEST_ZONE_NAME"]
go
1
0
DinoRun.py
# Dino Run # 2D game inspired by Google Chrome's dinosaur game # Original Author: Robin Rezwan # GitHub: http://github.com/robinrezwan import pygame from pygame.locals import * import os import sys import random # set the game window at the center of the display os.environ['SDL_VIDEO_CENTERED'] = '1' # defining some global variables SCREEN_WIDTH = 1280 SCREEN_HEIGHT = 720 FPS = 30 GROUND_HEIGHT = SCREEN_HEIGHT - 70 PLAY_GAME = True # initialize pygame and create window pygame.init() window = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT)) pygame.display.set_caption("Dino Run") clock = pygame.time.Clock() # load audio files jump_sound = pygame.mixer.Sound("sound/jump.ogg") score_sound = pygame.mixer.Sound("sound/score.ogg") game_over_sound = pygame.mixer.Sound("sound/game_over.ogg") # function for drawing text on the screen def draw_text(text, font_name, size, text_color, position_x, position_y, position): font = pygame.font.Font(font_name, size) # loads font text_plane = font.render(text, True, text_color) # renders given text in the selected font text_rect = text_plane.get_rect() # setting text position if position == "midtop": text_rect.midtop = (int(position_x), int(position_y)) elif position == "topright": text_rect.topright = (int(position_x), int(position_y)) window.blit(text_plane, text_rect) # draws the rendered text on the screen # function for loading single image file def load_image(path, size_x=0, size_y=0): image = pygame.image.load(path).convert_alpha() # loads image file and converts it into pixels if size_x > 0 and size_y > 0: image = pygame.transform.scale(image, (size_x, size_y)) # resizing the image to the given size return image, image.get_rect() # function for loading multiple image files in a list def load_sprites(image_path, image_name_prefix, number_of_image, size_x=0, size_y=0): images = [] # declaring list to store the images for i in range(0, number_of_image): path = image_path + image_name_prefix + str(i) + ".png" # creating the path string image = pygame.image.load(path).convert_alpha() # loads image file and converts it into pixels if size_x > 0 and size_y > 0: image = pygame.transform.scale(image, (size_x, size_y)) # resizing the image to the given size images.append(image) return images # class for creating and moving single background class Background: def __init__(self, image_path, speed=10): self.image0, self.rect0 = load_image(image_path, 1280, 720) self.image1, self.rect1 = load_image(image_path, 1280, 720) self.rect0.bottom = SCREEN_HEIGHT self.rect1.bottom = SCREEN_HEIGHT self.rect1.left = self.rect0.right self.speed = speed def draw(self): window.blit(self.image0, self.rect0) window.blit(self.image1, self.rect1) def update(self): self.rect0.left -= int(self.speed) self.rect1.left -= int(self.speed) if self.rect0.right < 0: self.rect0.left = self.rect1.right if self.rect1.right < 0: self.rect1.left = self.rect0.right # class for creating and moving single multiple backgrounds using the Background class class AllBackgrounds: def __init__(self, game_speed): self.background_0 = Background("image/background/bg_0.png", game_speed) self.background_1 = Background("image/background/bg_1.png", game_speed - 12) self.background_2 = Background("image/background/bg_2.png", game_speed - 13) self.background_3 = Background("image/background/bg_3.png", game_speed - 14) def update_speed(self, speed): self.background_0.speed = speed self.background_1.speed = speed - 12 self.background_2.speed = speed - 13 self.background_3.speed = speed - 14 def draw(self): self.background_3.draw() self.background_2.draw() self.background_1.draw() self.background_0.draw() def update(self): self.background_3.update() self.background_2.update() self.background_1.update() self.background_0.update() # class for creating and moving obstacle cactus class Cactus: def __init__(self, speed=10): self.cactus_images = load_sprites("image/cactus/", "cactus_", 5, 160, 160) self.cactus_image_0, self.rect_0 = self.cactus_images[0], self.cactus_images[0].get_rect() self.cactus_image_1, self.rect_1 = self.cactus_images[1], self.cactus_images[1].get_rect() self.rect_0.bottom = GROUND_HEIGHT - 11 self.rect_0.left = SCREEN_WIDTH self.rect_1.bottom = GROUND_HEIGHT - 11 self.rect_1.left = self.rect_0.right + int(SCREEN_WIDTH/2) self.speed = speed self.range_0 = 240 self.range_1 = 720 def get_cactus(self): current_cactus = [self.cactus_image_0, self.cactus_image_1] cactus_rect = [self.rect_0, self.rect_1] return current_cactus, cactus_rect def update_speed(self, speed): self.speed = speed self.range_0 += 1 self.range_1 += 1 def draw(self): window.blit(self.cactus_image_0, self.rect_0) window.blit(self.cactus_image_1, self.rect_1) def update(self): self.rect_0.left -= int(self.speed) self.rect_1.left -= int(self.speed) if self.rect_0.right < 0: temp_position = self.rect_1.right + random.randrange(self.range_0, self.range_1) if temp_position > SCREEN_WIDTH: self.rect_0.left = temp_position else: self.rect_0.left = SCREEN_WIDTH temp_index = random.randrange(0, 5) self.cactus_image_0 = self.cactus_images[temp_index] if self.rect_1.right < 0: temp_position = self.rect_0.right + random.randrange(self.range_0, self.range_1) if temp_position > SCREEN_WIDTH: self.rect_1.left = temp_position else: self.rect_1.left = SCREEN_WIDTH temp_index = random.randrange(0, 5) self.cactus_image_1 = self.cactus_images[temp_index] # class for creating and moving our dino buddy class Dino: def __init__(self): self.idle_images = load_sprites("image/dino/", "idle_", 10, 220, 153) self.running_images = load_sprites("image/dino/", "run_", 8, 220, 153) self.jumping_images = load_sprites("image/dino/", "jump_", 16, 220, 153) self.rect = self.idle_images[0].get_rect() self.rect.bottom = GROUND_HEIGHT self.rect.left = 70 self.jump_limit = GROUND_HEIGHT - 290 self.jump_speed = 50 # starting speed of the jump self.gravity_up = 4 # change rate when jumping up self.gravity_down = 2 # change rate when falling down # these indexes cycle through the images of the sprites, make the dino look moving self.idle_index = 0 self.running_index = 0 self.jumping_index = 0 # these booleans determine which images should be shown self.idle = True self.running = False self.jumping = False self.falling = False self.call_count = 0 # this variable is used to determine how often a task in a function should be done def check_collision(self, all_cactus): if self.running: dino_mask = pygame.mask.from_surface(self.running_images[self.running_index]) elif self.jumping: dino_mask = pygame.mask.from_surface(self.jumping_images[self.jumping_index]) else: dino_mask = pygame.mask.from_surface(self.idle_images[self.idle_index]) current_cactus, cactus_rect = all_cactus offset_0 = (cactus_rect[0].left - self.rect.left, cactus_rect[0].top - self.rect.top) offset_1 = (cactus_rect[1].left - self.rect.left, cactus_rect[1].top - self.rect.top) collide = dino_mask.overlap(pygame.mask.from_surface(current_cactus[0]), offset_0) or \ dino_mask.overlap(pygame.mask.from_surface(current_cactus[1]), offset_1) return collide def draw(self): if self.running: window.blit(self.running_images[self.running_index], self.rect) elif self.jumping: window.blit(self.jumping_images[self.jumping_index], self.rect) elif self.idle: window.blit(self.idle_images[self.idle_index], self.rect) def update(self): if self.running and self.call_count % 3 == 0: self.running_index = (self.running_index + 1) % 8 elif self.jumping: if not self.falling: self.rect.bottom -= self.jump_speed if self.jump_speed >= self.gravity_up: self.jump_speed -= self.gravity_up if self.rect.bottom < self.jump_limit: self.jump_speed = 0 self.falling = True else: self.rect.bottom += self.jump_speed self.jump_speed += self.gravity_down if self.rect.bottom > GROUND_HEIGHT: self.rect.bottom = GROUND_HEIGHT self.jump_speed = 50 self.jumping_index = 0 self.running_index = 0 self.jumping = False self.falling = False self.running = True if self.call_count % 2 == 0 or self.call_count % 3 == 0: self.jumping_index = (self.jumping_index + 1) % 16 elif self.idle and self.call_count % 3 == 0: self.idle_index = (self.idle_index + 1) % 10 self.call_count = self.call_count + 1 # class for counting and drawing score on the screen and saving high score on a file class Score: def __init__(self): self.high_score_image, self.rect_high = load_image("image/score/high_score.png", 35, 35) self.current_score_image, self.rect_current = load_image("image/score/current_score.png", 35, 35) self.rect_high.topright = (SCREEN_WIDTH - 15, 20) self.rect_current.topright = (SCREEN_WIDTH - 15, 65) self.high_score = 0 self.score = 0 self.load() self.high_score_achieved = False self.call_count = 0 def count(self): if self.call_count % 2 == 0: self.score += 1 if self.high_score_achieved: self.high_score = self.score elif self.score > self.high_score: self.high_score = self.score self.high_score_achieved = True score_sound.play() self.call_count = self.call_count + 1 def draw(self): window.blit(self.high_score_image, self.rect_high) window.blit(self.current_score_image, self.rect_current) draw_text(str(self.high_score), "font/monofonto.ttf", 28, (19, 130, 98), SCREEN_WIDTH - 60, 20, "topright") draw_text(str(self.score), "font/monofonto.ttf", 28, (19, 130, 98), SCREEN_WIDTH - 60, 65, "topright") def load(self): # load high score try: with open("high_score.txt", "r") as file: self.high_score = int(file.read()) except (IOError, ValueError): self.high_score = 0 def save(self): # save high score if self.high_score_achieved: with open("high_score.txt", "w") as file: file.write(str(self.high_score)) # class for showing game over screen class GameOver: def __init__(self): self.replay_image, self.rect = load_image("image/game_over/replay_0.png", 200, 60) self.rect.center = (int(SCREEN_WIDTH/2), int(SCREEN_HEIGHT/2)) def draw(self): draw_text("GAME OVER", "font/northcliff_stencil.otf", 80, (255, 0, 0), SCREEN_WIDTH/2, SCREEN_HEIGHT/3, "midtop") window.blit(self.replay_image, self.rect) # main game function def start_game(): # declaring necessary variables and creating objects of the classes run = True play_again = False game_over = False game_speed = 15 # the speed the number of pixels the game moves backgrounds = AllBackgrounds(game_speed) cactus = Cactus(game_speed) dino = Dino() score = Score() game_over_screen = GameOver() # main game loop, this will run continuously and draw everything on the screen while run: clock.tick(FPS) # limiting frames per second to run loop at the right speed # handling input events for event in pygame.event.get(): if event.type == QUIT: pygame.quit() # quits pygame sys.exit() # exits the program if event.type == pygame.MOUSEBUTTONDOWN: mx, my = pygame.mouse.get_pos() # gets mouse click co-ordinates if game_over: # checks if the play again button is clicked if game_over_screen.rect.left < mx < game_over_screen.rect.right and \ game_over_screen.rect.top < my < game_over_screen.rect.bottom: play_again = True run = False key = pygame.key.get_pressed() # gets pressed key if key[K_SPACE] or key[K_UP]: if game_over: play_again = True run = False elif not dino.jumping: jump_sound.play() dino.jumping = True dino.running = False if dino.idle: dino.idle = False # calling draw functions to draw all the elements on the screen backgrounds.draw() cactus.draw() dino.draw() score.draw() if game_over: game_over_screen.draw() else: if not dino.idle: score.count() backgrounds.update() cactus.update() # increasing game speed over time if score.score % 120 == 0: game_speed += 0.5 backgrounds.update_speed(game_speed) cactus.update_speed(game_speed) dino.jump_speed += 5 dino.update() # calling function to check collision if dino.check_collision(cactus.get_cactus()): game_over = True game_over_screen.draw() game_over_sound.play() score.save() pygame.display.flip() # clears the display before running the loop again return play_again # returns true after game over, if the player wants to play again # this loop keeps calling the main game function as long as the player wants to continue while PLAY_GAME: PLAY_GAME = start_game()
[]
[]
[ "SDL_VIDEO_CENTERED" ]
[]
["SDL_VIDEO_CENTERED"]
python
1
0
src/parse/asp/exec.go
package asp import ( "bytes" "context" "fmt" "os" "os/exec" "strings" "sync" "github.com/thought-machine/please/src/core" ) type execKey struct { args string wantStdout bool wantStderr bool } type execPromise struct { wg *sync.WaitGroup lock sync.Mutex } type execOut struct { out string success bool } var ( // The output from doExec() is memoized by default execCachedOuts sync.Map // The absolute path of commands execCmdPath sync.Map execPromisesLock sync.Mutex execPromises map[execKey]*execPromise ) func init() { execPromisesLock.Lock() defer execPromisesLock.Unlock() const initCacheSize = 8 execPromises = make(map[execKey]*execPromise, initCacheSize) } // doExec fork/exec's a command and returns the output as a string. exec // accepts either a string or a list of commands and arguments. The output from // exec() is memoized by default to prevent side effects and aid in performance // of duplicate calls to the same command with the same arguments (e.g. `git // rev-parse --short HEAD`). The output from exec()'ed commands must be // reproducible. If storeNegative is true, it is possible for success to return // successfully and return an error (i.e. we're expecing a command to fail and // want to cache the failure). // // NOTE: Commands that rely on the current working directory must not be cached. func doExec(s *scope, cmdIn pyObject, wantStdout bool, wantStderr bool, cacheOutput bool, storeNegative bool) (pyObj pyObject, success bool, err error) { if !wantStdout && !wantStderr { return s.Error("exec() must have at least stdout or stderr set to true, both can not be false"), false, nil } var argv []string if isType(cmdIn, "str") { argv = strings.Fields(string(cmdIn.(pyString))) } else if isType(cmdIn, "list") { pl := cmdIn.(pyList) argv = make([]string, 0, len(pl)) for i := 0; i < len(pl); i++ { argv = append(argv, pl[i].String()) } } // The cache key is tightly coupled to the operating parameters key := execMakeKey(argv, wantStdout, wantStderr) if cacheOutput { out, found := execGetCachedOutput(key, argv) if found { return pyString(out.out), out.success, nil } } ctx, cancel := context.WithTimeout(context.TODO(), core.TargetTimeoutOrDefault(nil, s.state)) defer cancel() cmdPath, err := execFindCmd(argv[0]) if err != nil { return s.Error("exec() unable to find %q in PATH %q", argv[0], os.Getenv("PATH")), false, err } cmdArgs := argv[1:] var out []byte cmd := exec.CommandContext(ctx, cmdPath, cmdArgs...) if wantStdout && wantStderr { out, err = cmd.CombinedOutput() } else { buf := &bytes.Buffer{} switch { case wantStdout: cmd.Stderr = nil cmd.Stdout = buf case wantStderr: cmd.Stderr = buf cmd.Stdout = nil } err = cmd.Run() out = buf.Bytes() } out = bytes.TrimSpace(out) outStr := string(out) if err != nil { if cacheOutput && storeNegative { // Completed successfully and returned an error. Store the negative value // since we're also returning an error, which tells the caller to // fallthrough their logic if a command returns with a non-zero exit code. outStr = execSetCachedOutput(key, argv, &execOut{out: outStr, success: false}) return pyString(outStr), true, err } return pyString(fmt.Sprintf("exec() unable to run command %q: %v", argv, err)), false, err } if cacheOutput { outStr = execSetCachedOutput(key, argv, &execOut{out: outStr, success: true}) } return pyString(outStr), true, nil } // execFindCmd looks for a command using PATH and returns a cached abspath. func execFindCmd(cmdName string) (path string, err error) { pathRaw, found := execCmdPath.Load(cmdName) if !found { // Perform a racy LookPath assuming the path is stable between concurrent // lookups for the same cmdName. path, err := exec.LookPath(cmdName) if err != nil { return "", err } // First write wins pathRaw, _ = execCmdPath.LoadOrStore(cmdName, path) } return pathRaw.(string), nil } // execGetCachedOutput returns the output if found, sets found to true if found, // and returns a held promise that must be completed. func execGetCachedOutput(key execKey, args []string) (output *execOut, found bool) { outputRaw, found := execCachedOuts.Load(key) if found { return outputRaw.(*execOut), true } // Re-check with promises exclusive lock held execPromisesLock.Lock() outputRaw, found = execCachedOuts.Load(key) if found { execPromisesLock.Unlock() return outputRaw.(*execOut), true } // Create a new promise. Increment the WaitGroup while the lock is held. promise, found := execPromises[key] if !found { promise = &execPromise{ wg: &sync.WaitGroup{}, } promise.wg.Add(1) execPromises[key] = promise execPromisesLock.Unlock() return nil, false // Let the caller fulfill the promise } execPromisesLock.Unlock() promise.wg.Wait() // Block until the promise is completed execPromisesLock.Lock() defer execPromisesLock.Unlock() outputRaw, found = execCachedOuts.Load(key) if found { return outputRaw.(*execOut), true } if !found { panic(fmt.Sprintf("blocked on promise %v, didn't find value", key)) } return outputRaw.(*execOut), true } // execGitBranch returns the output of a git_branch() command. // // git_branch() returns the output of `git symbolic-ref -q --short HEAD` func execGitBranch(s *scope, args []pyObject) pyObject { short := args[0].IsTruthy() cmdIn := make([]pyObject, 3, 5) cmdIn[0] = pyString("git") cmdIn[1] = pyString("symbolic-ref") cmdIn[2] = pyString("-q") if short { cmdIn = append(cmdIn, pyString("--short")) } cmdIn = append(cmdIn, pyString("HEAD")) wantStdout := true wantStderr := false cacheOutput := true storeNegative := true gitSymRefResult, success, err := doExec(s, pyList(cmdIn), wantStdout, wantStderr, cacheOutput, storeNegative) switch { case success && err == nil: return gitSymRefResult case success && err != nil: // ran a thing that failed, handle case below case !success && err == nil: // previous invocation cached a negative value default: return s.Error("exec() %q failed: %v", pyList(cmdIn).String(), err) } // We're in a detached head cmdIn = make([]pyObject, 4) cmdIn[0] = pyString("git") cmdIn[1] = pyString("show") cmdIn[2] = pyString("-q") cmdIn[3] = pyString("--format=%D") storeNegative = false gitShowResult, success, err := doExec(s, pyList(cmdIn), wantStdout, wantStderr, cacheOutput, storeNegative) if !success { // doExec returns a formatted error string return s.Error("exec() %q failed: %v", pyList(cmdIn).String(), err) } results := strings.Fields(gitShowResult.String()) if len(results) == 0 { // We're seeing something unknown and unexpected, go back to the original error message return gitSymRefResult } return pyString(results[len(results)-1]) } // execGitCommit returns the output of a git_commit() command. // // git_commit() returns the output of `git rev-parse HEAD` func execGitCommit(s *scope, args []pyObject) pyObject { cmdIn := []pyObject{ pyString("git"), pyString("rev-parse"), pyString("HEAD"), } wantStdout := true wantStderr := false cacheOutput := true storeNegative := false // No error handling required since we don't want to retry pyResult, success, err := doExec(s, pyList(cmdIn), wantStdout, wantStderr, cacheOutput, storeNegative) if !success { return s.Error("git_commit() failed: %v", err) } return pyResult } // execGitShow returns the output of a git_show() command with a strict format. // // git_show() returns the output of `git show -s --format=%{fmt}` // // %ci == commit-date: // `git show -s --format=%ci` = 2018-12-10 00:53:35 -0800 func execGitShow(s *scope, args []pyObject) pyObject { formatVerb := args[0].(pyString) switch formatVerb { case "%H": // commit hash case "%T": // tree hash case "%P": // parent hashes case "%an": // author name case "%ae": // author email case "%at": // author date, UNIX timestamp case "%cn": // committer name case "%ce": // committer email case "%ct": // committer date, UNIX timestamp case "%D": // ref names without the " (", ")" wrapping. case "%e": // encoding case "%s": // subject case "%f": // sanitized subject line, suitable for a filename case "%b": // body case "%B": // raw body (unwrapped subject and body) case "%N": // commit notes case "%GG": // raw verification message from GPG for a signed commit case "%G?": // show "G" for a good (valid) signature, "B" for a bad signature, "U" for a good signature with unknown validity, "X" for a good signature that has expired, "Y" for a good signature made by an expired key, "R" for a good signature made by a revoked key, "E" if the signature cannot be checked (e.g. missing key) and "N" for no signature case "%GS": // show the name of the signer for a signed commit case "%GK": // show the key used to sign a signed commit case "%n": // newline case "%%": // a raw % default: return s.Error("git_show() unsupported format code: %q", formatVerb) } cmdIn := []pyObject{ pyString("git"), pyString("show"), pyString("-s"), pyString(fmt.Sprintf("--format=%s", formatVerb)), } wantStdout := true wantStderr := false cacheOutput := true storeNegative := false pyResult, success, err := doExec(s, pyList(cmdIn), wantStdout, wantStderr, cacheOutput, storeNegative) if !success { return s.Error("git_show() failed: %v", err) } return pyResult } // execGitState returns the output of a git_state() command. // // git_state() returns the output of `git status --porcelain`. func execGitState(s *scope, args []pyObject) pyObject { cleanLabel := args[0].(pyString) dirtyLabel := args[1].(pyString) cmdIn := []pyObject{ pyString("git"), pyString("status"), pyString("--porcelain"), } wantStdout := true wantStderr := false cacheOutput := true storeNegative := false pyResult, success, err := doExec(s, pyList(cmdIn), wantStdout, wantStderr, cacheOutput, storeNegative) if !success { return s.Error("git_state() failed: %v", err) } if !isType(pyResult, "str") { return pyResult } result := pyResult.String() if len(result) != 0 { return dirtyLabel } return cleanLabel } // execMakeKey returns an execKey. func execMakeKey(args []string, wantStdout bool, wantStderr bool) execKey { return execKey{ args: strings.Join(args, ""), wantStdout: wantStdout, wantStderr: wantStderr, } } // execSetCachedOutput sets a value to be cached func execSetCachedOutput(key execKey, args []string, output *execOut) string { outputRaw, alreadyLoaded := execCachedOuts.LoadOrStore(key, output) if alreadyLoaded { panic(fmt.Sprintf("race detected for key %v", key)) } execPromisesLock.Lock() defer execPromisesLock.Unlock() if promise, found := execPromises[key]; found { delete(execPromises, key) promise.lock.Lock() defer promise.lock.Unlock() promise.wg.Done() } out := outputRaw.(*execOut).out return out }
[ "\"PATH\"" ]
[]
[ "PATH" ]
[]
["PATH"]
go
1
0
pkg/v1/cli/pluginmanager/manager_test.go
// Copyright 2021 VMware, Inc. All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 package pluginmanager import ( "fmt" "os" "os/exec" "path/filepath" "testing" "github.com/otiai10/copy" "github.com/stretchr/testify/assert" cliv1alpha1 "github.com/vmware-tanzu/tanzu-framework/apis/cli/v1alpha1" "github.com/vmware-tanzu/tanzu-framework/pkg/v1/cli/common" "github.com/vmware-tanzu/tanzu-framework/pkg/v1/tkg/log" ) var testCase string const ( testcaseInstallLogin = "install-login" testcaseInstallCluster = "install-cluster" testcaseInstallNotexists = "install-notexists" ) func Test_DiscoverPlugins(t *testing.T) { assert := assert.New(t) defer setupLocalDistoForTesting()() serverPlugins, standalonePlugins, err := DiscoverPlugins("") assert.Nil(err) assert.Equal(0, len(serverPlugins)) assert.Equal(1, len(standalonePlugins)) serverPlugins, standalonePlugins, err = DiscoverPlugins("mgmt-does-not-exists") assert.Nil(err) assert.Equal(0, len(serverPlugins)) assert.Equal(1, len(standalonePlugins)) serverPlugins, standalonePlugins, err = DiscoverPlugins("mgmt") assert.Nil(err) assert.Equal(1, len(serverPlugins)) assert.Equal(1, len(standalonePlugins)) assert.Equal("cluster", serverPlugins[0].Name) assert.Equal("login", standalonePlugins[0].Name) } func Test_InstallPlugin_InstalledPlugins(t *testing.T) { assert := assert.New(t) defer setupLocalDistoForTesting()() execCommand = fakeExecCommand defer func() { execCommand = exec.Command }() // Try installing not-existing package testCase = testcaseInstallNotexists err := InstallPlugin("", "notexists", "v0.2.0") assert.NotNil(err) assert.Contains(err.Error(), "unable to find plugin 'notexists'") // Install login (standalone) package testCase = testcaseInstallLogin err = InstallPlugin("", "login", "v0.2.0") assert.Nil(err) // Verify installed plugin installedServerPlugins, installedStandalonePlugins, err := InstalledPlugins("") assert.Nil(err) assert.Equal(0, len(installedServerPlugins)) assert.Equal(1, len(installedStandalonePlugins)) assert.Equal("login", installedStandalonePlugins[0].Name) // Try installing cluster plugin through standalone discovery testCase = testcaseInstallCluster err = InstallPlugin("", "cluster", "v0.2.0") assert.NotNil(err) assert.Contains(err.Error(), "unable to find plugin 'cluster'") // Try installing cluster plugin through context discovery err = InstallPlugin("mgmt", "cluster", "v0.2.0") assert.Nil(err) // Verify installed plugins installedServerPlugins, installedStandalonePlugins, err = InstalledPlugins("mgmt") assert.Nil(err) assert.Equal(1, len(installedStandalonePlugins)) assert.Equal("login", installedStandalonePlugins[0].Name) assert.Equal(1, len(installedServerPlugins)) assert.Equal("cluster", installedServerPlugins[0].Name) } func Test_AvailablePlugins(t *testing.T) { assert := assert.New(t) defer setupLocalDistoForTesting()() discovered, err := AvailablePlugins("") assert.Nil(err) assert.Equal(1, len(discovered)) assert.Equal("login", discovered[0].Name) assert.Equal(PluginScopeStandalone, discovered[0].Scope) assert.Equal(PluginStatusNotInstalled, discovered[0].Status) discovered, err = AvailablePlugins("mgmt") assert.Nil(err) assert.Equal(2, len(discovered)) assert.Equal("cluster", discovered[0].Name) assert.Equal(PluginScopeContext, discovered[0].Scope) assert.Equal(PluginStatusNotInstalled, discovered[0].Status) assert.Equal("login", discovered[1].Name) assert.Equal(PluginScopeStandalone, discovered[1].Scope) assert.Equal(PluginStatusNotInstalled, discovered[1].Status) // Install login, cluster package mockInstallPlugin(assert, "", "login", "v0.2.0") mockInstallPlugin(assert, "mgmt", "cluster", "v0.2.0") // Get available plugin after install and verify installation status discovered, err = AvailablePlugins("mgmt") assert.Nil(err) assert.Equal(2, len(discovered)) assert.Equal("cluster", discovered[0].Name) assert.Equal(PluginScopeContext, discovered[0].Scope) assert.Equal(PluginStatusInstalled, discovered[0].Status) assert.Equal("login", discovered[1].Name) assert.Equal(PluginScopeStandalone, discovered[1].Scope) assert.Equal(PluginStatusInstalled, discovered[1].Status) } func Test_DescribePlugin(t *testing.T) { assert := assert.New(t) defer setupLocalDistoForTesting()() // Try describe plugin when plugin is not installed _, err := DescribePlugin("", "login") assert.NotNil(err) assert.Contains(err.Error(), "could not get plugin path for plugin \"login\"") // Install login (standalone) package mockInstallPlugin(assert, "", "login", "v0.2.0") // Try describe plugin when plugin after installing plugin pd, err := DescribePlugin("", "login") assert.Nil(err) assert.Equal("login", pd.Name) assert.Equal("v0.2.0", pd.Version) // Try describe plugin when plugin is not installed _, err = DescribePlugin("mgmt", "cluster") assert.NotNil(err) assert.Contains(err.Error(), "could not get plugin path for plugin \"cluster\"") // Install cluster (context) package // Install login (standalone) package mockInstallPlugin(assert, "mgmt", "cluster", "v0.2.0") // Try describe plugin when plugin after installing plugin pd, err = DescribePlugin("mgmt", "cluster") assert.Nil(err) assert.Equal("cluster", pd.Name) assert.Equal("v0.2.0", pd.Version) } func Test_DeletePlugin(t *testing.T) { assert := assert.New(t) defer setupLocalDistoForTesting()() // Try delete plugin when plugin is not installed err := DeletePlugin("", "login") assert.NotNil(err) assert.Contains(err.Error(), "could not get plugin path for plugin \"login\"") // Install login (standalone) package mockInstallPlugin(assert, "", "login", "v0.2.0") // Try delete plugin when plugin is installed err = DeletePlugin("mgmt", "cluster") assert.NotNil(err) assert.Contains(err.Error(), "could not get plugin path for plugin \"cluster\"") // Install cluster (context) package mockInstallPlugin(assert, "mgmt", "cluster", "v0.2.0") // Try describe plugin when plugin after installing plugin err = DeletePlugin("mgmt", "cluster") assert.Nil(err) } func Test_ValidatePlugin(t *testing.T) { assert := assert.New(t) pd := cliv1alpha1.PluginDescriptor{} err := ValidatePlugin(&pd) assert.Contains(err.Error(), "plugin name cannot be empty") pd.Name = "fakeplugin" err = ValidatePlugin(&pd) assert.NotContains(err.Error(), "plugin name cannot be empty") assert.Contains(err.Error(), "plugin \"fakeplugin\" version cannot be empty") assert.Contains(err.Error(), "plugin \"fakeplugin\" group cannot be empty") } func mockInstallPlugin(assert *assert.Assertions, server, name, version string) { //nolint:unparam execCommand = fakeExecCommand defer func() { execCommand = exec.Command }() testCase = "install-" + name err := InstallPlugin(server, name, version) assert.Nil(err) } func fakeExecCommand(command string, args ...string) *exec.Cmd { cs := []string{"-test.run=TestHelperProcess", "--", command} cs = append(cs, args...) cmd := exec.Command(os.Args[0], cs...) //nolint:gosec tc := "TEST_CASE=" + testCase cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1", tc} return cmd } func TestHelperProcess(t *testing.T) { if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { return } defer os.Exit(0) args := os.Args for len(args) > 0 { if args[0] == "--" { args = args[1:] break } args = args[1:] } if len(args) == 0 { fmt.Fprintf(os.Stderr, "No command\n") os.Exit(2) } switch os.Getenv("TEST_CASE") { case testcaseInstallCluster: out := `{"name":"cluster","description":"Kubernetes cluster operations","version":"v0.2.0","buildSHA":"c2dbd15","digest":"","group":"Run","docURL":"","completionType":0,"aliases":["cl","clusters"],"installationPath":"","discovery":"","scope":"","status":""}` fmt.Fprint(os.Stdout, out) case testcaseInstallLogin: out := `{"name":"login","description":"Login to the platform","version":"v0.2.0","buildSHA":"c2dbd15","digest":"","group":"System","docURL":"","completionType":0,"aliases":["lo","logins"],"installationPath":"","discovery":"","scope":"","status":""}` fmt.Fprint(os.Stdout, out) case testcaseInstallNotexists: out := `` fmt.Fprint(os.Stdout, out) } } func setupLocalDistoForTesting() func() { tmpDir, err := os.MkdirTemp(os.TempDir(), "") if err != nil { log.Fatal(err, "unable to create temporary directory") } common.DefaultPluginRoot = filepath.Join(tmpDir, "plugin-root") common.DefaultLocalPluginDistroDir = filepath.Join(tmpDir, "distro") common.DefaultCacheDir = filepath.Join(tmpDir, "cache") tkgConfigFile := filepath.Join(tmpDir, "tanzu_config.yaml") os.Setenv("TANZU_CONFIG", tkgConfigFile) err = copy.Copy(filepath.Join("test", "local"), common.DefaultLocalPluginDistroDir) if err != nil { log.Fatal(err, "Error while setting local distro for testing") } err = copy.Copy(filepath.Join("test", "config.yaml"), tkgConfigFile) if err != nil { log.Fatal(err, "Error while coping tanzu config file for testing") } return func() { os.RemoveAll(tmpDir) } }
[ "\"GO_WANT_HELPER_PROCESS\"", "\"TEST_CASE\"" ]
[]
[ "GO_WANT_HELPER_PROCESS", "TEST_CASE" ]
[]
["GO_WANT_HELPER_PROCESS", "TEST_CASE"]
go
2
0
rw/httpbase.py
# Copyright 2015 Florian Ludwig # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from __future__ import absolute_import, division, print_function, with_statement import os import inspect import tornado.web import tornado.httpserver import tornado.httputil import tornado.ioloop from tornado import gen from tornado import iostream from tornado.web import HTTPError from tornado.concurrent import is_future from tornado.web import _has_stream_request_body import tornado.routing import rw.cfg import rw.scope import rw.routing import rw.template import rw.server import rw.event PRE_REQUEST = rw.event.Event('httpbase.pre_request') POST_REQUEST = rw.event.Event('httpbase.post_request') class Application(tornado.routing.ReversibleRouter): def __init__(self, handler=None, root=None, extra_configs=None): """rueckenwind Application to plug into tornado's httpserver. Either `root` or `handler` must be specified. :param rw.http.Module root: The root module to serve :param handler: The request handler (should subclass `tornado.web.RequestHandler`) :param extra_configs: path to alternative config file for rueckenwind """ self.settings = {} self.rw_settings = {} self.root = root self.scope = rw.scope.Scope() self.scope['app'] = self self.extra_configs = extra_configs if self.root: self.handler = handler if handler is not None else RequestHandler self.scope['settings'] = rw.cfg.read_configs(self.root.name, self.extra_configs) pkgs = self.scope['settings'].get('rw.templates', {}).get('pkgs', None) if not pkgs: pkgs = [root.name] self.scope['template_env'] = rw.template.create_template_env(pkgs) self.scope['template_env'].globals['app'] = self else: self.handler = handler self.scope['settings'] = {} assert handler is not None self._wsgi = False # wsgi is not supported # compatibility so we can mount tornado RequestHandlers self.ui_modules = {} self.ui_methods = {} rw.server.PHASE_CONFIGURATION.add(self.configure) def configure(self): with self.scope(): return self._scoped_configure() @gen.coroutine def _scoped_configure(self): yield rw.scope.setup_app_scope(self.root.name, self.scope) self.rw_settings = self.scope['settings'] cfg_rw_http = self.rw_settings.setdefault('rw.http', {}) cfg_rw_http['live_settings'] = self.settings self._configure_cookie_secret() yield self.scope.activate(self.root) def _configure_cookie_secret(self): cfg = self.rw_settings['rw.http'] if 'cookie_secret' in cfg: if 'file' in cfg['cookie_secret']: cs_path = cfg['cookie_secret']['file'] cs_path = cs_path.format(**os.environ) if os.path.exists(cs_path): cookie_secret = open(cs_path, 'rb').read().strip() else: cs_dir = os.path.dirname(cs_path) if not os.path.exists(cs_dir): os.makedirs(cs_dir) cookie_secret = os.urandom(32) open(cs_path, 'wb').write(cookie_secret) elif 'random' in cfg['cookie_secret'] and cfg['cookie_secret']['random']: cookie_secret = os.urandom(32) cfg['live_settings']['cookie_secret'] = cookie_secret def start_request(self, server_conn, request_conn): """Called by `tornado.httpserver.HTTPServer` to handle a request.""" return RequestDispatcher(self, request_conn) @gen.coroutine def _handle_request(self, request_scope, request): handler = self.handler(self, request) request_scope['handler'] = handler try: yield PRE_REQUEST() yield handler._execute([]) yield POST_REQUEST() except Exception as e: # Ensure exceptions in PRE and POST_REQUEST are # also forwarded to the exception handler. # It is not just important for logging but making # `raise HTTPError()` in event handlers work. handler._transforms = [] handler._handle_request_exception(e) def _request_finished(self, request_future): # access result to throw exceptions that might have occurred during # request handling request_future.result() def log_request(self, request): # TODO print(request) pass class RequestDispatcher(tornado.httputil.HTTPMessageDelegate): def __init__(self, application, connection): self.application = application self.connection = connection self.request = None self.chunks = [] self.handler_class = None self.handler_kwargs = None self.path_args = [] self.path_kwargs = {} self.stream_request_body = False def headers_received(self, start_line, headers): self.request = tornado.httputil.HTTPServerRequest( connection=self.connection, start_line=start_line, headers=headers) if self.stream_request_body: self.request.body = Future() return self.execute() def data_received(self, data): if self.stream_request_body: return self.handler.data_received(data) else: self.chunks.append(data) def finish(self): if self.stream_request_body: self.request.body.set_result(None) else: self.request.body = b''.join(self.chunks) self.request._parse_body() self.execute() def on_connection_close(self): if self.stream_request_body: self.handler.on_connection_close() else: self.chunks = None def execute(self): app = self.application with app.scope(): request_scope = rw.scope.Scope() with request_scope(): request_handling = app._handle_request(request_scope, self.request) io_loop = tornado.ioloop.IOLoop.current() io_loop.add_future(request_handling, app._request_finished) class RequestHandler(tornado.web.RequestHandler, dict): def __init__(self, application, request, **kwargs): # The super class is not called since it creates # some structures we do not care about. Since # the "not caring" leads to memory leaks they # are not created in the first place. self.application = application self.request = request self._headers_written = False self._finished = False self._auto_finish = False # vanilla tornado defaults to True self._transforms = None # will be set in _execute self._prepared_future = None # variables from vanilla tornado, not avaiable in rw # self.path_args # self.path_kwargs # self.ui self.clear() self.request.connection.set_close_callback(self.on_connection_close) self.initialize(**kwargs) def head(self, *args, **kwargs): return self.handle_request() def get(self, *args, **kwargs): return self.handle_request() def post(self, *args, **kwargs): return self.handle_request() def delete(self, *args, **kwargs): return self.handle_request() def patch(self, *args, **kwargs): return self.handle_request() def put(self, *args, **kwargs): return self.handle_request() def options(self, *args, **kwargs): return self.handle_request() def send_error(self, status_code=500, **kwargs): if status_code == 500: handle = rw.scope.get('rw.httpbase:handle_exception', None) if handle: handle(self, kwargs) return tornado.web.RequestHandler.send_error(self, status_code, **kwargs) def handle_request(self): routing_table = rw.scope.get('rw.http')['routing_table'] prefix, module, fn, args = routing_table.find_route(self.request.method, self.request.path) current_scope = rw.scope.get_current_scope() current_scope['rw.routing.prefix'] = prefix current_scope['url_variables'] = args current_scope['module'] = module if fn is None: raise tornado.web.HTTPError(404) # only supply arguments if those are "welcome" if hasattr(fn, '_rw_wrapped_function'): arg_spec = inspect.getargspec(fn._rw_wrapped_function) else: arg_spec = inspect.getargspec(fn) if arg_spec.keywords is not None: # fn accepts **keywords arguments so we pass all variables return fn(**args) call_args = {} for arg, value in args.items(): if arg in arg_spec.args: call_args[arg] = value return fn(**call_args) # overwrite methodes that are not supported to make sure # they get not used by accident. def render(self, template_name, **kwargs): """tornado API, not available in rw""" raise NotImplementedError() def render_string(self, template_name, **kwargs): """tornado API, not available in rw""" raise NotImplementedError() def get_template_namespace(self): """tornado API, not available in rw""" raise NotImplementedError() def create_template_loader(self, template_path): """tornado API, not available in rw""" raise NotImplementedError() def get_template_path(self): """tornado API, not available in rw""" raise NotImplementedError() def static_url(self, path, include_host=None, **kwargs): """tornado API, not available in rw""" raise NotImplementedError() def reverse_url(self, name, *args): """tornado API, not available in rw""" raise NotImplementedError() def _ui_module(self, name, module): """tornado internal method, not used in rw""" raise NotImplementedError() def _ui_method(self, method): """tornado internal method, not used in rw""" raise NotImplementedError()
[]
[]
[]
[]
[]
python
0
0
manage.py
#!/usr/bin/env python import os import sys if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "sonet.settings") from django.core.management import execute_from_command_line execute_from_command_line(sys.argv)
[]
[]
[]
[]
[]
python
0
0
lxd/container.go
package main import ( "fmt" "io" "os" "os/exec" "path/filepath" "strings" "time" "gopkg.in/lxc/go-lxc.v2" "github.com/lxc/lxd/lxd/cluster" "github.com/lxc/lxd/lxd/db" "github.com/lxc/lxd/lxd/state" "github.com/lxc/lxd/lxd/sys" "github.com/lxc/lxd/lxd/types" "github.com/lxc/lxd/lxd/util" "github.com/lxc/lxd/shared" "github.com/lxc/lxd/shared/api" "github.com/lxc/lxd/shared/idmap" "github.com/lxc/lxd/shared/logger" "github.com/lxc/lxd/shared/osarch" ) // Helper functions // Returns the parent container name, snapshot name, and whether it actually was // a snapshot name. func containerGetParentAndSnapshotName(name string) (string, string, bool) { fields := strings.SplitN(name, shared.SnapshotDelimiter, 2) if len(fields) == 1 { return name, "", false } return fields[0], fields[1], true } func containerPath(name string, isSnapshot bool) string { if isSnapshot { return shared.VarPath("snapshots", name) } return shared.VarPath("containers", name) } func containerValidName(name string) error { if strings.Contains(name, shared.SnapshotDelimiter) { return fmt.Errorf( "The character '%s' is reserved for snapshots.", shared.SnapshotDelimiter) } if !shared.ValidHostname(name) { return fmt.Errorf("Container name isn't a valid hostname.") } return nil } func containerValidConfigKey(os *sys.OS, key string, value string) error { f, err := shared.ConfigKeyChecker(key) if err != nil { return err } if err = f(value); err != nil { return err } if key == "raw.lxc" { return lxcValidConfig(value) } if key == "security.syscalls.blacklist_compat" { for _, arch := range os.Architectures { if arch == osarch.ARCH_64BIT_INTEL_X86 || arch == osarch.ARCH_64BIT_ARMV8_LITTLE_ENDIAN || arch == osarch.ARCH_64BIT_POWERPC_BIG_ENDIAN { return nil } } return fmt.Errorf("security.syscalls.blacklist_compat isn't supported on this architecture") } return nil } var containerNetworkLimitKeys = []string{"limits.max", "limits.ingress", "limits.egress"} func containerValidDeviceConfigKey(t, k string) bool { if k == "type" { return true } switch t { case "unix-char", "unix-block": switch k { case "gid": return true case "major": return true case "minor": return true case "mode": return true case "source": return true case "path": return true case "required": return true case "uid": return true default: return false } case "nic": switch k { case "limits.max": return true case "limits.ingress": return true case "limits.egress": return true case "host_name": return true case "hwaddr": return true case "mtu": return true case "name": return true case "nictype": return true case "parent": return true case "vlan": return true case "ipv4.address": return true case "ipv6.address": return true case "security.mac_filtering": return true case "maas.subnet.ipv4": return true case "maas.subnet.ipv6": return true default: return false } case "disk": switch k { case "limits.max": return true case "limits.read": return true case "limits.write": return true case "optional": return true case "path": return true case "readonly": return true case "size": return true case "source": return true case "recursive": return true case "pool": return true case "propagation": return true default: return false } case "usb": switch k { case "vendorid": return true case "productid": return true case "mode": return true case "gid": return true case "uid": return true case "required": return true default: return false } case "gpu": switch k { case "vendorid": return true case "productid": return true case "id": return true case "pci": return true case "mode": return true case "gid": return true case "uid": return true default: return false } case "infiniband": switch k { case "hwaddr": return true case "mtu": return true case "name": return true case "nictype": return true case "parent": return true default: return false } case "proxy": switch k { case "listen": return true case "connect": return true case "bind": return true default: return false } case "none": return false default: return false } } func containerValidConfig(sysOS *sys.OS, config map[string]string, profile bool, expanded bool) error { if config == nil { return nil } for k, v := range config { if profile && strings.HasPrefix(k, "volatile.") { return fmt.Errorf("Volatile keys can only be set on containers.") } if profile && strings.HasPrefix(k, "image.") { return fmt.Errorf("Image keys can only be set on containers.") } err := containerValidConfigKey(sysOS, k, v) if err != nil { return err } } _, rawSeccomp := config["raw.seccomp"] _, whitelist := config["security.syscalls.whitelist"] _, blacklist := config["security.syscalls.blacklist"] blacklistDefault := shared.IsTrue(config["security.syscalls.blacklist_default"]) blacklistCompat := shared.IsTrue(config["security.syscalls.blacklist_compat"]) if rawSeccomp && (whitelist || blacklist || blacklistDefault || blacklistCompat) { return fmt.Errorf("raw.seccomp is mutually exclusive with security.syscalls*") } if whitelist && (blacklist || blacklistDefault || blacklistCompat) { return fmt.Errorf("security.syscalls.whitelist is mutually exclusive with security.syscalls.blacklist*") } if expanded && (config["security.privileged"] == "" || !shared.IsTrue(config["security.privileged"])) && sysOS.IdmapSet == nil { return fmt.Errorf("LXD doesn't have a uid/gid allocation. In this mode, only privileged containers are supported.") } unprivOnly := os.Getenv("LXD_UNPRIVILEGED_ONLY") if shared.IsTrue(unprivOnly) { if config["raw.idmap"] != "" { return fmt.Errorf("raw.idmap can't be set as LXD was configured to only allow unprivileged containers") } if shared.IsTrue(config["security.privileged"]) { return fmt.Errorf("LXD was configured to only allow unprivileged containers") } } return nil } func containerValidDevices(db *db.Cluster, devices types.Devices, profile bool, expanded bool) error { // Empty device list if devices == nil { return nil } var diskDevicePaths []string // Check each device individually for name, m := range devices { if m["type"] == "" { return fmt.Errorf("Missing device type for device '%s'", name) } if !shared.StringInSlice(m["type"], []string{"disk", "gpu", "infiniband", "nic", "none", "proxy", "unix-block", "unix-char", "usb"}) { return fmt.Errorf("Invalid device type for device '%s'", name) } for k := range m { if !containerValidDeviceConfigKey(m["type"], k) { return fmt.Errorf("Invalid device configuration key for %s: %s", m["type"], k) } } if m["type"] == "nic" { if m["nictype"] == "" { return fmt.Errorf("Missing nic type") } if !shared.StringInSlice(m["nictype"], []string{"bridged", "macvlan", "p2p", "physical", "sriov"}) { return fmt.Errorf("Bad nic type: %s", m["nictype"]) } if shared.StringInSlice(m["nictype"], []string{"bridged", "macvlan", "physical", "sriov"}) && m["parent"] == "" { return fmt.Errorf("Missing parent for %s type nic", m["nictype"]) } } else if m["type"] == "infiniband" { if m["nictype"] == "" { return fmt.Errorf("Missing nic type") } if !shared.StringInSlice(m["nictype"], []string{"physical", "sriov"}) { return fmt.Errorf("Bad nic type: %s", m["nictype"]) } if m["parent"] == "" { return fmt.Errorf("Missing parent for %s type nic", m["nictype"]) } } else if m["type"] == "disk" { if !expanded && !shared.StringInSlice(m["path"], diskDevicePaths) { diskDevicePaths = append(diskDevicePaths, m["path"]) } else if !expanded { return fmt.Errorf("More than one disk device uses the same path: %s.", m["path"]) } if m["path"] == "" { return fmt.Errorf("Disk entry is missing the required \"path\" property.") } if m["source"] == "" && m["path"] != "/" { return fmt.Errorf("Disk entry is missing the required \"source\" property.") } if m["path"] == "/" && m["source"] != "" { return fmt.Errorf("Root disk entry may not have a \"source\" property set.") } if m["size"] != "" && m["path"] != "/" { return fmt.Errorf("Only the root disk may have a size quota.") } if (m["path"] == "/" || !shared.IsDir(m["source"])) && m["recursive"] != "" { return fmt.Errorf("The recursive option is only supported for additional bind-mounted paths.") } if m["pool"] != "" { if filepath.IsAbs(m["source"]) { return fmt.Errorf("Storage volumes cannot be specified as absolute paths.") } _, err := db.StoragePoolGetID(m["pool"]) if err != nil { return fmt.Errorf("The \"%s\" storage pool doesn't exist.", m["pool"]) } } if m["propagation"] != "" { if !util.RuntimeLiblxcVersionAtLeast(3, 0, 0) { return fmt.Errorf("liblxc 3.0 is required for mount propagation configuration") } if !shared.StringInSlice(m["propagation"], []string{"private", "shared", "slave", "unbindable", "rprivate", "rshared", "rslave", "runbindable"}) { return fmt.Errorf("Invalid propagation mode '%s'", m["propagation"]) } } } else if shared.StringInSlice(m["type"], []string{"unix-char", "unix-block"}) { if m["source"] == "" && m["path"] == "" { return fmt.Errorf("Unix device entry is missing the required \"source\" or \"path\" property.") } if (m["required"] == "" || shared.IsTrue(m["required"])) && (m["major"] == "" || m["minor"] == "") { srcPath, exist := m["source"] if !exist { srcPath = m["path"] } if !shared.PathExists(srcPath) { return fmt.Errorf("The device path doesn't exist on the host and major/minor wasn't specified.") } dType, _, _, err := deviceGetAttributes(srcPath) if err != nil { return err } if m["type"] == "unix-char" && dType != "c" { return fmt.Errorf("Path specified for unix-char device is a block device.") } if m["type"] == "unix-block" && dType != "b" { return fmt.Errorf("Path specified for unix-block device is a character device.") } } } else if m["type"] == "usb" { if m["vendorid"] == "" { return fmt.Errorf("Missing vendorid for USB device.") } } else if m["type"] == "gpu" { // Probably no checks needed, since we allow users to // pass in all GPUs. } else if m["type"] == "proxy" { if m["listen"] == "" { return fmt.Errorf("Proxy device entry is missing the required \"listen\" property.") } if m["connect"] == "" { return fmt.Errorf("Proxy device entry is missing the required \"connect\" property.") } } else if m["type"] == "none" { continue } else { return fmt.Errorf("Invalid device type: %s", m["type"]) } } // Checks on the expanded config if expanded { _, _, err := shared.GetRootDiskDevice(devices) if err != nil { return err } } return nil } // The container interface type container interface { // Container actions Freeze() error Shutdown(timeout time.Duration) error Start(stateful bool) error Stop(stateful bool) error Unfreeze() error // Snapshots & migration & backups Restore(sourceContainer container, stateful bool) error /* actionScript here is a script called action.sh in the stateDir, to * be passed to CRIU as --action-script */ Migrate(args *CriuMigrationArgs) error Snapshots() ([]container, error) Backups() ([]backup, error) // Config handling Rename(newName string) error Update(newConfig db.ContainerArgs, userRequested bool) error Delete() error Export(w io.Writer, properties map[string]string) error // Live configuration CGroupGet(key string) (string, error) CGroupSet(key string, value string) error ConfigKeySet(key string, value string) error // File handling FileExists(path string) error FilePull(srcpath string, dstpath string) (int64, int64, os.FileMode, string, []string, error) FilePush(type_ string, srcpath string, dstpath string, uid int64, gid int64, mode int, write string) error FileRemove(path string) error // Console - Allocate and run a console tty. // // terminal - Bidirectional file descriptor. // // This function will not return until the console has been exited by // the user. Console(terminal *os.File) *exec.Cmd ConsoleLog(opts lxc.ConsoleLogOptions) (string, error) /* Command execution: * 1. passing in false for wait * - equivalent to calling cmd.Run() * 2. passing in true for wait * - start the command and return its PID in the first return * argument and the PID of the attached process in the second * argument. It's the callers responsibility to wait on the * command. (Note. The returned PID of the attached process can not * be waited upon since it's a child of the lxd forkexec command * (the PID returned in the first return argument). It can however * be used to e.g. forward signals.) */ Exec(command []string, env map[string]string, stdin *os.File, stdout *os.File, stderr *os.File, wait bool) (*exec.Cmd, int, int, error) // Status Render() (interface{}, interface{}, error) RenderState() (*api.ContainerState, error) IsPrivileged() bool IsRunning() bool IsFrozen() bool IsEphemeral() bool IsSnapshot() bool IsStateful() bool IsNesting() bool // Hooks OnStart() error OnStop(target string) error // Properties Id() int Name() string Description() string Architecture() int CreationDate() time.Time LastUsedDate() time.Time ExpandedConfig() map[string]string ExpandedDevices() types.Devices LocalConfig() map[string]string LocalDevices() types.Devices Profiles() []string InitPID() int State() string // Paths Path() string RootfsPath() string TemplatesPath() string StatePath() string LogFilePath() string ConsoleBufferLogPath() string LogPath() string // Storage StoragePool() (string, error) // Progress reporting SetOperation(op *operation) // FIXME: Those should be internal functions // Needed for migration for now. StorageStart() (bool, error) StorageStop() (bool, error) Storage() storage IdmapSet() (*idmap.IdmapSet, error) LastIdmapSet() (*idmap.IdmapSet, error) TemplateApply(trigger string) error DaemonState() *state.State } // Loader functions func containerCreateAsEmpty(d *Daemon, args db.ContainerArgs) (container, error) { // Create the container c, err := containerCreateInternal(d.State(), args) if err != nil { return nil, err } // Now create the empty storage err = c.Storage().ContainerCreate(c) if err != nil { d.cluster.ContainerRemove(args.Name) return nil, err } // Apply any post-storage configuration err = containerConfigureInternal(c) if err != nil { c.Delete() return nil, err } return c, nil } func containerCreateFromBackup(s *state.State, info backupInfo, data io.ReadSeeker) error { var pool storage var fixBackupFile = false // Get storage pool from index.yaml pool, storageErr := storagePoolInit(s, info.Pool) if storageErr != nil && storageErr != db.ErrNoSuchObject { // Unexpected error return storageErr } if storageErr == db.ErrNoSuchObject { // The pool doesn't exist, and the backup is in binary format so we // cannot alter the backup.yaml. if info.HasBinaryFormat { return storageErr } // Get the default profile _, profile, err := s.Cluster.ProfileGet("default") if err != nil { return err } _, v, err := shared.GetRootDiskDevice(profile.Devices) if err != nil { return err } // Use the default-profile's root pool pool, err = storagePoolInit(s, v["pool"]) if err != nil { return err } fixBackupFile = true } // Unpack tarball err := pool.ContainerBackupLoad(info, data) if err != nil { return err } if fixBackupFile { // Use the default pool since the pool provided in the backup.yaml // doesn't exist. err = fixBackupStoragePool(s.Cluster, info) if err != nil { return err } } return nil } func containerCreateEmptySnapshot(s *state.State, args db.ContainerArgs) (container, error) { // Create the snapshot c, err := containerCreateInternal(s, args) if err != nil { return nil, err } // Now create the empty snapshot err = c.Storage().ContainerSnapshotCreateEmpty(c) if err != nil { s.Cluster.ContainerRemove(args.Name) return nil, err } return c, nil } func containerCreateFromImage(d *Daemon, args db.ContainerArgs, hash string) (container, error) { s := d.State() // Get the image properties _, img, err := s.Cluster.ImageGet(hash, false, false) if err != nil { return nil, err } // Check if the image is available locally or it's on another node. nodeAddress, err := s.Cluster.ImageLocate(hash) if err != nil { return nil, err } if nodeAddress != "" { // The image is available from another node, let's try to // import it. logger.Debugf("Transferring image %s from node %s", hash, nodeAddress) client, err := cluster.Connect(nodeAddress, d.endpoints.NetworkCert(), false) if err != nil { return nil, err } err = imageImportFromNode(filepath.Join(d.os.VarDir, "images"), client, hash) if err != nil { return nil, err } err = d.cluster.ImageAssociateNode(hash) if err != nil { return nil, err } } // Set the "image.*" keys if img.Properties != nil { for k, v := range img.Properties { args.Config[fmt.Sprintf("image.%s", k)] = v } } // Set the BaseImage field (regardless of previous value) args.BaseImage = hash // Create the container c, err := containerCreateInternal(s, args) if err != nil { return nil, err } err = s.Cluster.ImageLastAccessUpdate(hash, time.Now().UTC()) if err != nil { s.Cluster.ContainerRemove(args.Name) return nil, fmt.Errorf("Error updating image last use date: %s", err) } // Now create the storage from an image err = c.Storage().ContainerCreateFromImage(c, hash) if err != nil { s.Cluster.ContainerRemove(args.Name) return nil, err } // Apply any post-storage configuration err = containerConfigureInternal(c) if err != nil { c.Delete() return nil, err } return c, nil } func containerCreateAsCopy(s *state.State, args db.ContainerArgs, sourceContainer container, containerOnly bool) (container, error) { // Create the container. ct, err := containerCreateInternal(s, args) if err != nil { return nil, err } csList := []*container{} if !containerOnly { snapshots, err := sourceContainer.Snapshots() if err != nil { s.Cluster.ContainerRemove(args.Name) return nil, err } csList = make([]*container, len(snapshots)) for i, snap := range snapshots { fields := strings.SplitN(snap.Name(), shared.SnapshotDelimiter, 2) newSnapName := fmt.Sprintf("%s/%s", ct.Name(), fields[1]) csArgs := db.ContainerArgs{ Architecture: snap.Architecture(), Config: snap.LocalConfig(), Ctype: db.CTypeSnapshot, Devices: snap.LocalDevices(), Ephemeral: snap.IsEphemeral(), Name: newSnapName, Profiles: snap.Profiles(), } // Create the snapshots. cs, err := containerCreateInternal(s, csArgs) if err != nil { return nil, err } csList[i] = &cs } } // Now clone the storage. err = ct.Storage().ContainerCopy(ct, sourceContainer, containerOnly) if err != nil { for _, v := range csList { s.Cluster.ContainerRemove((*v).Name()) } s.Cluster.ContainerRemove(args.Name) return nil, err } // Apply any post-storage configuration. err = containerConfigureInternal(ct) if err != nil { ct.Delete() return nil, err } if !containerOnly { for _, cs := range csList { // Apply any post-storage configuration. err = containerConfigureInternal(*cs) if err != nil { (*cs).Delete() return nil, err } } } return ct, nil } func containerCreateAsSnapshot(s *state.State, args db.ContainerArgs, sourceContainer container) (container, error) { // Deal with state if args.Stateful { if !sourceContainer.IsRunning() { return nil, fmt.Errorf("Unable to create a stateful snapshot. The container isn't running.") } _, err := exec.LookPath("criu") if err != nil { return nil, fmt.Errorf("Unable to create a stateful snapshot. CRIU isn't installed.") } stateDir := sourceContainer.StatePath() err = os.MkdirAll(stateDir, 0700) if err != nil { return nil, err } /* TODO: ideally we would freeze here and unfreeze below after * we've copied the filesystem, to make sure there are no * changes by the container while snapshotting. Unfortunately * there is abug in CRIU where it doesn't leave the container * in the same state it found it w.r.t. freezing, i.e. CRIU * freezes too, and then /always/ thaws, even if the container * was frozen. Until that's fixed, all calls to Unfreeze() * after snapshotting will fail. */ criuMigrationArgs := CriuMigrationArgs{ cmd: lxc.MIGRATE_DUMP, stateDir: stateDir, function: "snapshot", stop: false, actionScript: false, dumpDir: "", preDumpDir: "", } err = sourceContainer.Migrate(&criuMigrationArgs) if err != nil { os.RemoveAll(sourceContainer.StatePath()) return nil, err } } // Create the snapshot c, err := containerCreateInternal(s, args) if err != nil { return nil, err } // Clone the container err = sourceContainer.Storage().ContainerSnapshotCreate(c, sourceContainer) if err != nil { s.Cluster.ContainerRemove(args.Name) return nil, err } ourStart, err := c.StorageStart() if err != nil { return nil, err } if ourStart { defer c.StorageStop() } err = writeBackupFile(sourceContainer) if err != nil { c.Delete() return nil, err } // Once we're done, remove the state directory if args.Stateful { os.RemoveAll(sourceContainer.StatePath()) } eventSendLifecycle("container-snapshot-created", fmt.Sprintf("/1.0/containers/%s", sourceContainer.Name()), map[string]interface{}{ "snapshot_name": args.Name, }) return c, nil } func containerCreateInternal(s *state.State, args db.ContainerArgs) (container, error) { // Set default values if args.Profiles == nil { args.Profiles = []string{"default"} } if args.Config == nil { args.Config = map[string]string{} } if args.BaseImage != "" { args.Config["volatile.base_image"] = args.BaseImage } if args.Devices == nil { args.Devices = types.Devices{} } if args.Architecture == 0 { args.Architecture = s.OS.Architectures[0] } // Validate container name if args.Ctype == db.CTypeRegular { err := containerValidName(args.Name) if err != nil { return nil, err } } // Validate container config err := containerValidConfig(s.OS, args.Config, false, false) if err != nil { return nil, err } // Validate container devices err = containerValidDevices(s.Cluster, args.Devices, false, false) if err != nil { return nil, err } // Validate architecture _, err = osarch.ArchitectureName(args.Architecture) if err != nil { return nil, err } if !shared.IntInSlice(args.Architecture, s.OS.Architectures) { return nil, fmt.Errorf("Requested architecture isn't supported by this host") } // Validate profiles profiles, err := s.Cluster.Profiles() if err != nil { return nil, err } checkedProfiles := []string{} for _, profile := range args.Profiles { if !shared.StringInSlice(profile, profiles) { return nil, fmt.Errorf("Requested profile '%s' doesn't exist", profile) } if shared.StringInSlice(profile, checkedProfiles) { return nil, fmt.Errorf("Duplicate profile found in request") } checkedProfiles = append(checkedProfiles, profile) } // Create the container entry id, err := s.Cluster.ContainerCreate(args) if err != nil { if err == db.ErrAlreadyDefined { thing := "Container" if shared.IsSnapshot(args.Name) { thing = "Snapshot" } return nil, fmt.Errorf("%s '%s' already exists", thing, args.Name) } return nil, err } // Wipe any existing log for this container name os.RemoveAll(shared.LogPath(args.Name)) args.ID = id // Read the timestamp from the database dbArgs, err := s.Cluster.ContainerGet(args.Name) if err != nil { s.Cluster.ContainerRemove(args.Name) return nil, err } args.CreationDate = dbArgs.CreationDate args.LastUsedDate = dbArgs.LastUsedDate // Setup the container struct and finish creation (storage and idmap) c, err := containerLXCCreate(s, args) if err != nil { s.Cluster.ContainerRemove(args.Name) return nil, err } return c, nil } func containerConfigureInternal(c container) error { // Find the root device _, rootDiskDevice, err := shared.GetRootDiskDevice(c.ExpandedDevices()) if err != nil { return err } ourStart, err := c.StorageStart() if err != nil { return err } // handle quota: at this point, storage is guaranteed to be ready storage := c.Storage() if rootDiskDevice["size"] != "" { storageTypeName := storage.GetStorageTypeName() if storageTypeName == "lvm" && c.IsRunning() { err = c.ConfigKeySet("volatile.apply_quota", rootDiskDevice["size"]) if err != nil { return err } } else { size, err := shared.ParseByteSizeString(rootDiskDevice["size"]) if err != nil { return err } err = storage.StorageEntitySetQuota(storagePoolVolumeTypeContainer, size, c) if err != nil { return err } } } if ourStart { defer c.StorageStop() } err = writeBackupFile(c) if err != nil { return err } return nil } func containerLoadById(s *state.State, id int) (container, error) { // Get the DB record name, err := s.Cluster.ContainerName(id) if err != nil { return nil, err } return containerLoadByName(s, name) } func containerLoadByName(s *state.State, name string) (container, error) { // Get the DB record args, err := s.Cluster.ContainerGet(name) if err != nil { return nil, err } return containerLXCLoad(s, args) } func containerBackupLoadByName(s *state.State, name string) (*backup, error) { // Get the DB record args, err := s.Cluster.ContainerGetBackup(name) if err != nil { return nil, err } c, err := containerLoadById(s, args.ContainerID) if err != nil { return nil, err } return &backup{ state: s, container: c, id: args.ID, name: name, creationDate: args.CreationDate, expiryDate: args.ExpiryDate, containerOnly: args.ContainerOnly, optimizedStorage: args.OptimizedStorage, }, nil } func containerBackupCreate(s *state.State, args db.ContainerBackupArgs, sourceContainer container) error { err := s.Cluster.ContainerBackupCreate(args) if err != nil { if err == db.ErrAlreadyDefined { return fmt.Errorf("backup '%s' already exists", args.Name) } return err } b, err := containerBackupLoadByName(s, args.Name) if err != nil { return err } // Now create the empty snapshot err = sourceContainer.Storage().ContainerBackupCreate(*b, sourceContainer) if err != nil { s.Cluster.ContainerBackupRemove(args.Name) return err } // Create index.yaml containing information regarding the backup err = createBackupIndexFile(sourceContainer, *b) if err != nil { s.Cluster.ContainerBackupRemove(args.Name) return err } return nil }
[ "\"LXD_UNPRIVILEGED_ONLY\"" ]
[]
[ "LXD_UNPRIVILEGED_ONLY" ]
[]
["LXD_UNPRIVILEGED_ONLY"]
go
1
0
components/function-controller/pkg/webhook/default_server/server.go
/* Copyright 2019 The Kyma Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package defaultserver import ( "fmt" "os" "k8s.io/apimachinery/pkg/types" "sigs.k8s.io/controller-runtime/pkg/manager" logf "sigs.k8s.io/controller-runtime/pkg/runtime/log" "sigs.k8s.io/controller-runtime/pkg/webhook" "sigs.k8s.io/controller-runtime/pkg/webhook/admission" "sigs.k8s.io/controller-runtime/pkg/webhook/admission/builder" ) var ( log = logf.Log.WithName("default_server") builderMap = map[string]*builder.WebhookBuilder{} // HandlerMap contains all admission webhook handlers. HandlerMap = map[string][]admission.Handler{} ) // Add adds itself to the manager func Add(mgr manager.Manager) error { ns := os.Getenv("POD_NAMESPACE") if len(ns) == 0 { ns = "default" } secretName := os.Getenv("SECRET_NAME") if len(secretName) == 0 { secretName = "webhook-server-secret" } svr, err := webhook.NewServer("function-admission-server", mgr, webhook.ServerOptions{ // TODO(user): change the configuration of ServerOptions based on your need. Port: 9876, CertDir: "/tmp/cert", BootstrapOptions: &webhook.BootstrapOptions{ MutatingWebhookConfigName: "function-webhook.serverless.kyma-project.io", Secret: &types.NamespacedName{ Namespace: ns, Name: secretName, }, Service: &webhook.Service{ Namespace: ns, Name: "function-controller-webhook-server-service", // Selectors should select the pods that runs this webhook server. Selectors: map[string]string{ "app": "function-controller", "control-plane": "controller-manager", "controller-tools.k8s.io": "1.0", }, }, }, }) if err != nil { return err } var webhooks []webhook.Webhook for k, builder := range builderMap { handlers, ok := HandlerMap[k] if !ok { log.V(1).Info(fmt.Sprintf("can't find handlers for builder: %v", k)) handlers = []admission.Handler{} } wh, err := builder. Handlers(handlers...). WithManager(mgr). Build() if err != nil { return err } webhooks = append(webhooks, wh) } return svr.Register(webhooks...) }
[ "\"POD_NAMESPACE\"", "\"SECRET_NAME\"" ]
[]
[ "POD_NAMESPACE", "SECRET_NAME" ]
[]
["POD_NAMESPACE", "SECRET_NAME"]
go
2
0
manager/controllers/app/fybrikapplication_controller_test.go
// Copyright 2020 IBM Corp. // SPDX-License-Identifier: Apache-2.0 package app import ( "context" "fmt" "os" "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/controller-runtime/pkg/client" apiv1alpha1 "fybrik.io/fybrik/manager/apis/app/v1alpha1" "fybrik.io/fybrik/manager/controllers/utils" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" corev1 "k8s.io/api/core/v1" ) const timeout = time.Second * 30 const interval = time.Millisecond * 100 var _ = Describe("FybrikApplication Controller", func() { Context("FybrikApplication", func() { controllerNamespace := utils.GetControllerNamespace() fmt.Printf("FybrikApplication: controller namespace: %s\n", controllerNamespace) BeforeEach(func() { // Add any setup steps that needs to be executed before each test module := &apiv1alpha1.FybrikModule{} Expect(readObjectFromFile("../../testdata/e2e/module-read.yaml", module)).ToNot(HaveOccurred()) module.Namespace = controllerNamespace application := &apiv1alpha1.FybrikApplication{} Expect(readObjectFromFile("../../testdata/e2e/fybrikapplication.yaml", application)).ToNot(HaveOccurred()) _ = k8sClient.Delete(context.Background(), application) _ = k8sClient.Delete(context.Background(), module) }) AfterEach(func() { // Add any teardown steps that needs to be executed after each test }) It("Test restricted access to secrets", func() { if os.Getenv("USE_EXISTING_CONTROLLER") != "true" { // test access restriction: only secrets from blueprints namespace can be accessed // Create secrets in default and fybrik-blueprints namespaces // A secret from the default namespace should not be listed secret1 := &corev1.Secret{Type: corev1.SecretTypeOpaque, StringData: map[string]string{"password": "123"}} secret1.Name = "test-secret" secret1.Namespace = "default" Expect(k8sClient.Create(context.TODO(), secret1)).NotTo(HaveOccurred(), "a secret could not be created") secret2 := &corev1.Secret{Type: corev1.SecretTypeOpaque, StringData: map[string]string{"password": "123"}} secret2.Name = "test-secret" blueprintNamespace := utils.GetBlueprintNamespace() fmt.Printf("Application test using blueprint namespace: %s\n", blueprintNamespace) secret2.Namespace = blueprintNamespace Expect(k8sClient.Create(context.TODO(), secret2)).NotTo(HaveOccurred(), "a secret could not be created") secretList := &corev1.SecretList{} Expect(k8sClient.List(context.Background(), secretList)).NotTo(HaveOccurred()) Expect(len(secretList.Items)).To(Equal(1), "Secrets from other namespaces should not be listed") } }) It("Test restricted access to modules", func() { if os.Getenv("USE_EXISTING_CONTROLLER") != "true" { // test access restriction: only modules from the control plane can be accessed // Create a module in default namespace // An attempt to fetch it will fail module := &apiv1alpha1.FybrikModule{} Expect(readObjectFromFile("../../testdata/e2e/module-read.yaml", module)).ToNot(HaveOccurred()) module.Namespace = "default" Expect(k8sClient.Create(context.Background(), module)).Should(Succeed()) fetchedModule := &apiv1alpha1.FybrikModule{} moduleKey := client.ObjectKeyFromObject(module) Expect(k8sClient.Get(context.Background(), moduleKey, fetchedModule)).To(HaveOccurred(), "Should deny access") } }) It("Test end-to-end for FybrikApplication", func() { connector := os.Getenv("USE_MOCKUP_CONNECTOR") fmt.Printf("Connector: %s\n", connector) if len(connector) > 0 && connector != "true" { Skip("Skipping test when not running with mockup connector!") } module := &apiv1alpha1.FybrikModule{} Expect(readObjectFromFile("../../testdata/e2e/module-read.yaml", module)).ToNot(HaveOccurred()) moduleKey := client.ObjectKeyFromObject(module) module.Namespace = controllerNamespace application := &apiv1alpha1.FybrikApplication{} Expect(readObjectFromFile("../../testdata/e2e/fybrikapplication.yaml", application)).ToNot(HaveOccurred()) application.Labels = map[string]string{"label1": "foo", "label2": "bar"} applicationKey := client.ObjectKeyFromObject(application) fmt.Printf("Module: %v\n", module.Namespace) fmt.Printf("Application: %v\n", application.Namespace) // Create FybrikApplication and FybrikModule Expect(k8sClient.Create(context.Background(), module)).Should(Succeed()) Expect(k8sClient.Create(context.Background(), application)).Should(Succeed()) // Ensure getting cleaned up after tests finish defer func() { application := &apiv1alpha1.FybrikApplication{ObjectMeta: metav1.ObjectMeta{Namespace: applicationKey.Namespace, Name: applicationKey.Name}} _ = k8sClient.Get(context.Background(), applicationKey, application) // _ = k8sClient.Delete(context.Background(), application) module := &apiv1alpha1.FybrikApplication{ObjectMeta: metav1.ObjectMeta{Namespace: moduleKey.Namespace, Name: moduleKey.Name}} _ = k8sClient.Get(context.Background(), moduleKey, module) // _ = k8sClient.Delete(context.Background(), module) }() By("Expecting application to be created") Eventually(func() error { return k8sClient.Get(context.Background(), applicationKey, application) }, timeout, interval).Should(Succeed()) By("Expecting plotter to be constructed") Eventually(func() *apiv1alpha1.ResourceReference { _ = k8sClient.Get(context.Background(), applicationKey, application) return application.Status.Generated }, timeout, interval).ShouldNot(BeNil()) // The plotter has to be created plotter := &apiv1alpha1.Plotter{} plotterObjectKey := client.ObjectKey{Namespace: application.Status.Generated.Namespace, Name: application.Status.Generated.Name} By("Expecting plotter to be fetchable") Eventually(func() error { return k8sClient.Get(context.Background(), plotterObjectKey, plotter) }, timeout, interval).Should(Succeed()) By("Expect plotter to be ready at some point") Eventually(func() bool { Expect(k8sClient.Get(context.Background(), plotterObjectKey, plotter)).To(Succeed()) return plotter.Status.ObservedState.Ready }, timeout*10, interval).Should(BeTrue(), "plotter is not ready") blueprintObjectKey := client.ObjectKey{Namespace: utils.GetBlueprintNamespace(), Name: plotter.Name} By("Expecting Blueprint to contain application labels") blueprint := &apiv1alpha1.Blueprint{} Eventually(func() error { return k8sClient.Get(context.Background(), blueprintObjectKey, blueprint) }, timeout, interval).Should(Succeed(), "Blueprint has not been created") for _, module := range blueprint.Spec.Modules { Expect(module.Arguments.Labels["label1"]).To(Equal("foo")) Expect(module.Arguments.Labels["label2"]).To(Equal("bar")) Expect(module.Arguments.Labels[apiv1alpha1.ApplicationNameLabel]).To(Equal(applicationKey.Name)) Expect(module.Arguments.Labels[apiv1alpha1.ApplicationNamespaceLabel]).To(Equal(applicationKey.Namespace)) Expect(module.Arguments.AppSelector.MatchLabels["app"]).To(Equal("notebook")) } By("Expecting FybrikApplication to eventually be ready") Eventually(func() bool { Expect(k8sClient.Get(context.Background(), applicationKey, application)).To(Succeed()) return application.Status.Ready }, timeout, interval).Should(BeTrue(), "FybrikApplication is not ready after timeout!") By("Status should contain the details of the endpoint") Expect(len(application.Status.AssetStates)).To(Equal(1)) // TODO endpoint details are not set yet fqdn := "test-app-e2e-default-read-module-test-e2e." + blueprintObjectKey.Namespace Expect(application.Status.AssetStates["s3/redact-dataset"].Endpoint).To(Equal(apiv1alpha1.EndpointSpec{ Hostname: fqdn, Port: 80, Scheme: "grpc", })) }) }) })
[ "\"USE_EXISTING_CONTROLLER\"", "\"USE_EXISTING_CONTROLLER\"", "\"USE_MOCKUP_CONNECTOR\"" ]
[]
[ "USE_MOCKUP_CONNECTOR", "USE_EXISTING_CONTROLLER" ]
[]
["USE_MOCKUP_CONNECTOR", "USE_EXISTING_CONTROLLER"]
go
2
0
freshservice/client.go
package freshservice import ( "bytes" "encoding/json" "fmt" "io/ioutil" "log" "net/http" "os" ) //Client is the freshservice client type Client struct { domain string apiKey string hc *http.Client } //NewClient returns a new client func NewClient(domain string, apiKey string) *Client { return &Client{ domain: domain, apiKey: apiKey, hc: &http.Client{}, } } //ReadObject reads an object with GET method func (c *Client) ReadObject(path string, obj interface{}) error { log.Printf("freshservice: reading object from %v", path) req, err := c.newRequest("GET", path, nil) if err != nil { return err } res, err := c.hc.Do(req) if err != nil { return err } defer res.Body.Close() err = responseOK(res) if err != nil { return err } return json.NewDecoder(res.Body).Decode(obj) } //WriteObject writes object with the provided method func (c *Client) WriteObject(path string, method string, in interface{}, out interface{}) error { log.Printf("freshservice: writing (%v) object %v to %v", method, in, path) data, err := json.Marshal(in) if err != nil { return err } req, err := c.newRequest(method, path, data) if err != nil { return err } res, err := c.hc.Do(req) if err != nil { return err } defer res.Body.Close() err = responseOK(res) if err != nil { return err } if out != nil { return json.NewDecoder(res.Body).Decode(out) } return nil } func (c *Client) newRequest(method string, path string, data []byte) (*http.Request, error) { url := fmt.Sprintf("https://%s.freshdesk.com%s", c.domain, path) req, err := http.NewRequest(method, url, bytes.NewBuffer(data)) if err != nil { return nil, err } req.Header.Set("Content-Type", "application/json") req.SetBasicAuth(c.apiKey, "X") return req, nil } //responseOK returns nil if the response was ok or the error func responseOK(res *http.Response) error { switch res.StatusCode { case http.StatusOK: return nil case http.StatusCreated: return nil case http.StatusNoContent: return nil } data, err := ioutil.ReadAll(res.Body) return fmt.Errorf("bad status: %v. %v (read error: %v)", res.Status, string(data), err) } //GetCredentials of fresh service client from environment varibles func GetCredentials() (string, string) { domain := os.Getenv("FS_DOMAIN") apiKey := os.Getenv("FS_KEY") return domain, apiKey }
[ "\"FS_DOMAIN\"", "\"FS_KEY\"" ]
[]
[ "FS_KEY", "FS_DOMAIN" ]
[]
["FS_KEY", "FS_DOMAIN"]
go
2
0
Lib/platform.py
#!/usr/bin/env python3 """ This module tries to retrieve as much platform-identifying data as possible. It makes this information available via function APIs. If called from the command line, it prints the platform information concatenated as single string to stdout. The output format is useable as part of a filename. """ # This module is maintained by Marc-Andre Lemburg <[email protected]>. # If you find problems, please submit bug reports/patches via the # Python bug tracker (http://bugs.python.org) and assign them to "lemburg". # # Still needed: # * support for MS-DOS (PythonDX ?) # * support for Amiga and other still unsupported platforms running Python # * support for additional Linux distributions # # Many thanks to all those who helped adding platform-specific # checks (in no particular order): # # Charles G Waldman, David Arnold, Gordon McMillan, Ben Darnell, # Jeff Bauer, Cliff Crawford, Ivan Van Laningham, Josef # Betancourt, Randall Hopper, Karl Putland, John Farrell, Greg # Andruk, Just van Rossum, Thomas Heller, Mark R. Levinson, Mark # Hammond, Bill Tutt, Hans Nowak, Uwe Zessin (OpenVMS support), # Colin Kong, Trent Mick, Guido van Rossum, Anthony Baxter, Steve # Dower # # History: # # <see CVS and SVN checkin messages for history> # # 1.0.8 - changed Windows support to read version from kernel32.dll # 1.0.7 - added DEV_NULL # 1.0.6 - added linux_distribution() # 1.0.5 - fixed Java support to allow running the module on Jython # 1.0.4 - added IronPython support # 1.0.3 - added normalization of Windows system name # 1.0.2 - added more Windows support # 1.0.1 - reformatted to make doc.py happy # 1.0.0 - reformatted a bit and checked into Python CVS # 0.8.0 - added sys.version parser and various new access # APIs (python_version(), python_compiler(), etc.) # 0.7.2 - fixed architecture() to use sizeof(pointer) where available # 0.7.1 - added support for Caldera OpenLinux # 0.7.0 - some fixes for WinCE; untabified the source file # 0.6.2 - support for OpenVMS - requires version 1.5.2-V006 or higher and # vms_lib.getsyi() configured # 0.6.1 - added code to prevent 'uname -p' on platforms which are # known not to support it # 0.6.0 - fixed win32_ver() to hopefully work on Win95,98,NT and Win2k; # did some cleanup of the interfaces - some APIs have changed # 0.5.5 - fixed another type in the MacOS code... should have # used more coffee today ;-) # 0.5.4 - fixed a few typos in the MacOS code # 0.5.3 - added experimental MacOS support; added better popen() # workarounds in _syscmd_ver() -- still not 100% elegant # though # 0.5.2 - fixed uname() to return '' instead of 'unknown' in all # return values (the system uname command tends to return # 'unknown' instead of just leaving the field empty) # 0.5.1 - included code for slackware dist; added exception handlers # to cover up situations where platforms don't have os.popen # (e.g. Mac) or fail on socket.gethostname(); fixed libc # detection RE # 0.5.0 - changed the API names referring to system commands to *syscmd*; # added java_ver(); made syscmd_ver() a private # API (was system_ver() in previous versions) -- use uname() # instead; extended the win32_ver() to also return processor # type information # 0.4.0 - added win32_ver() and modified the platform() output for WinXX # 0.3.4 - fixed a bug in _follow_symlinks() # 0.3.3 - fixed popen() and "file" command invokation bugs # 0.3.2 - added architecture() API and support for it in platform() # 0.3.1 - fixed syscmd_ver() RE to support Windows NT # 0.3.0 - added system alias support # 0.2.3 - removed 'wince' again... oh well. # 0.2.2 - added 'wince' to syscmd_ver() supported platforms # 0.2.1 - added cache logic and changed the platform string format # 0.2.0 - changed the API to use functions instead of module globals # since some action take too long to be run on module import # 0.1.0 - first release # # You can always get the latest version of this module at: # # http://www.egenix.com/files/python/platform.py # # If that URL should fail, try contacting the author. __copyright__ = """ Copyright (c) 1999-2000, Marc-Andre Lemburg; mailto:[email protected] Copyright (c) 2000-2010, eGenix.com Software GmbH; mailto:[email protected] Permission to use, copy, modify, and distribute this software and its documentation for any purpose and without fee or royalty is hereby granted, provided that the above copyright notice appear in all copies and that both that copyright notice and this permission notice appear in supporting documentation or portions thereof, including modifications, that you make. EGENIX.COM SOFTWARE GMBH DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE ! """ __version__ = '1.0.8' import collections import sys, os, re, subprocess import warnings ### Globals & Constants # Determine the platform's /dev/null device try: DEV_NULL = os.devnull except AttributeError: # os.devnull was added in Python 2.4, so emulate it for earlier # Python versions if sys.platform in ('dos', 'win32', 'win16'): # Use the old CP/M NUL as device name DEV_NULL = 'NUL' else: # Standard Unix uses /dev/null DEV_NULL = '/dev/null' # Helper for comparing two version number strings. # Based on the description of the PHP's version_compare(): # http://php.net/manual/en/function.version-compare.php _ver_stages = { # any string not found in this dict, will get 0 assigned 'dev': 10, 'alpha': 20, 'a': 20, 'beta': 30, 'b': 30, 'c': 40, 'RC': 50, 'rc': 50, # number, will get 100 assigned 'pl': 200, 'p': 200, } _component_re = re.compile(r'([0-9]+|[._+-])') def _comparable_version(version): result = [] for v in _component_re.split(version): if v not in '._+-': try: v = int(v, 10) t = 100 except ValueError: t = _ver_stages.get(v, 0) result.extend((t, v)) return result ### Platform specific APIs _libc_search = re.compile(b'(__libc_init)' b'|' b'(GLIBC_([0-9.]+))' b'|' br'(libc(_\w+)?\.so(?:\.(\d[0-9.]*))?)', re.ASCII) def libc_ver(executable=sys.executable, lib='', version='', chunksize=16384): """ Tries to determine the libc version that the file executable (which defaults to the Python interpreter) is linked against. Returns a tuple of strings (lib,version) which default to the given parameters in case the lookup fails. Note that the function has intimate knowledge of how different libc versions add symbols to the executable and thus is probably only useable for executables compiled using gcc. The file is read and scanned in chunks of chunksize bytes. """ V = _comparable_version if hasattr(os.path, 'realpath'): # Python 2.2 introduced os.path.realpath(); it is used # here to work around problems with Cygwin not being # able to open symlinks for reading executable = os.path.realpath(executable) with open(executable, 'rb') as f: binary = f.read(chunksize) pos = 0 while pos < len(binary): if b'libc' in binary or b'GLIBC' in binary: m = _libc_search.search(binary, pos) else: m = None if not m or m.end() == len(binary): chunk = f.read(chunksize) if chunk: binary = binary[max(pos, len(binary) - 1000):] + chunk pos = 0 continue if not m: break libcinit, glibc, glibcversion, so, threads, soversion = [ s.decode('latin1') if s is not None else s for s in m.groups()] if libcinit and not lib: lib = 'libc' elif glibc: if lib != 'glibc': lib = 'glibc' version = glibcversion elif V(glibcversion) > V(version): version = glibcversion elif so: if lib != 'glibc': lib = 'libc' if soversion and (not version or V(soversion) > V(version)): version = soversion if threads and version[-len(threads):] != threads: version = version + threads pos = m.end() return lib, version def _dist_try_harder(distname, version, id): """ Tries some special tricks to get the distribution information in case the default method fails. Currently supports older SuSE Linux, Caldera OpenLinux and Slackware Linux distributions. """ if os.path.exists('/var/adm/inst-log/info'): # SuSE Linux stores distribution information in that file distname = 'SuSE' for line in open('/var/adm/inst-log/info'): tv = line.split() if len(tv) == 2: tag, value = tv else: continue if tag == 'MIN_DIST_VERSION': version = value.strip() elif tag == 'DIST_IDENT': values = value.split('-') id = values[2] return distname, version, id if os.path.exists('/etc/.installed'): # Caldera OpenLinux has some infos in that file (thanks to Colin Kong) for line in open('/etc/.installed'): pkg = line.split('-') if len(pkg) >= 2 and pkg[0] == 'OpenLinux': # XXX does Caldera support non Intel platforms ? If yes, # where can we find the needed id ? return 'OpenLinux', pkg[1], id if os.path.isdir('/usr/lib/setup'): # Check for slackware version tag file (thanks to Greg Andruk) verfiles = os.listdir('/usr/lib/setup') for n in range(len(verfiles)-1, -1, -1): if verfiles[n][:14] != 'slack-version-': del verfiles[n] if verfiles: verfiles.sort() distname = 'slackware' version = verfiles[-1][14:] return distname, version, id return distname, version, id def popen(cmd, mode='r', bufsize=-1): """ Portable popen() interface. """ import warnings warnings.warn('use os.popen instead', DeprecationWarning, stacklevel=2) return os.popen(cmd, mode, bufsize) def _norm_version(version, build=''): """ Normalize the version and build strings and return a single version string using the format major.minor.build (or patchlevel). """ l = version.split('.') if build: l.append(build) try: ints = map(int, l) except ValueError: strings = l else: strings = list(map(str, ints)) version = '.'.join(strings[:3]) return version _ver_output = re.compile(r'(?:([\w ]+) ([\w.]+) ' r'.*' r'\[.* ([\d.]+)\])') # Examples of VER command output: # # Windows 2000: Microsoft Windows 2000 [Version 5.00.2195] # Windows XP: Microsoft Windows XP [Version 5.1.2600] # Windows Vista: Microsoft Windows [Version 6.0.6002] # # Note that the "Version" string gets localized on different # Windows versions. def _syscmd_ver(system='', release='', version='', supported_platforms=('win32', 'win16', 'dos')): """ Tries to figure out the OS version used and returns a tuple (system, release, version). It uses the "ver" shell command for this which is known to exists on Windows, DOS. XXX Others too ? In case this fails, the given parameters are used as defaults. """ if sys.platform not in supported_platforms: return system, release, version # Try some common cmd strings for cmd in ('ver', 'command /c ver', 'cmd /c ver'): try: pipe = os.popen(cmd) info = pipe.read() if pipe.close(): raise OSError('command failed') # XXX How can I suppress shell errors from being written # to stderr ? except OSError as why: #print 'Command %s failed: %s' % (cmd, why) continue else: break else: return system, release, version # Parse the output info = info.strip() m = _ver_output.match(info) if m is not None: system, release, version = m.groups() # Strip trailing dots from version and release if release[-1] == '.': release = release[:-1] if version[-1] == '.': version = version[:-1] # Normalize the version and build strings (eliminating additional # zeros) version = _norm_version(version) return system, release, version _WIN32_CLIENT_RELEASES = { (5, 0): "2000", (5, 1): "XP", # Strictly, 5.2 client is XP 64-bit, but platform.py historically # has always called it 2003 Server (5, 2): "2003Server", (5, None): "post2003", (6, 0): "Vista", (6, 1): "7", (6, 2): "8", (6, 3): "8.1", (6, None): "post8.1", (10, 0): "10", (10, None): "post10", } # Server release name lookup will default to client names if necessary _WIN32_SERVER_RELEASES = { (5, 2): "2003Server", (6, 0): "2008Server", (6, 1): "2008ServerR2", (6, 2): "2012Server", (6, 3): "2012ServerR2", (6, None): "post2012ServerR2", } def win32_ver(release='', version='', csd='', ptype=''): try: from sys import getwindowsversion except ImportError: return release, version, csd, ptype try: from winreg import OpenKeyEx, QueryValueEx, CloseKey, HKEY_LOCAL_MACHINE except ImportError: from _winreg import OpenKeyEx, QueryValueEx, CloseKey, HKEY_LOCAL_MACHINE winver = getwindowsversion() maj, min, build = winver.platform_version or winver[:3] version = '{0}.{1}.{2}'.format(maj, min, build) release = (_WIN32_CLIENT_RELEASES.get((maj, min)) or _WIN32_CLIENT_RELEASES.get((maj, None)) or release) # getwindowsversion() reflect the compatibility mode Python is # running under, and so the service pack value is only going to be # valid if the versions match. if winver[:2] == (maj, min): try: csd = 'SP{}'.format(winver.service_pack_major) except AttributeError: if csd[:13] == 'Service Pack ': csd = 'SP' + csd[13:] # VER_NT_SERVER = 3 if getattr(winver, 'product_type', None) == 3: release = (_WIN32_SERVER_RELEASES.get((maj, min)) or _WIN32_SERVER_RELEASES.get((maj, None)) or release) key = None try: key = OpenKeyEx(HKEY_LOCAL_MACHINE, r'SOFTWARE\Microsoft\Windows NT\CurrentVersion') ptype = QueryValueEx(key, 'CurrentType')[0] except: pass finally: if key: CloseKey(key) return release, version, csd, ptype def _mac_ver_xml(): fn = '/System/Library/CoreServices/SystemVersion.plist' if not os.path.exists(fn): return None try: import plistlib except ImportError: return None with open(fn, 'rb') as f: pl = plistlib.load(f) release = pl['ProductVersion'] versioninfo = ('', '', '') machine = os.uname().machine if machine in ('ppc', 'Power Macintosh'): # Canonical name machine = 'PowerPC' return release, versioninfo, machine def mac_ver(release='', versioninfo=('', '', ''), machine=''): """ Get MacOS version information and return it as tuple (release, versioninfo, machine) with versioninfo being a tuple (version, dev_stage, non_release_version). Entries which cannot be determined are set to the parameter values which default to ''. All tuple entries are strings. """ # First try reading the information from an XML file which should # always be present info = _mac_ver_xml() if info is not None: return info # If that also doesn't work return the default values return release, versioninfo, machine def _java_getprop(name, default): from java.lang import System try: value = System.getProperty(name) if value is None: return default return value except AttributeError: return default def java_ver(release='', vendor='', vminfo=('', '', ''), osinfo=('', '', '')): """ Version interface for Jython. Returns a tuple (release, vendor, vminfo, osinfo) with vminfo being a tuple (vm_name, vm_release, vm_vendor) and osinfo being a tuple (os_name, os_version, os_arch). Values which cannot be determined are set to the defaults given as parameters (which all default to ''). """ # Import the needed APIs try: import java.lang except ImportError: return release, vendor, vminfo, osinfo vendor = _java_getprop('java.vendor', vendor) release = _java_getprop('java.version', release) vm_name, vm_release, vm_vendor = vminfo vm_name = _java_getprop('java.vm.name', vm_name) vm_vendor = _java_getprop('java.vm.vendor', vm_vendor) vm_release = _java_getprop('java.vm.version', vm_release) vminfo = vm_name, vm_release, vm_vendor os_name, os_version, os_arch = osinfo os_arch = _java_getprop('java.os.arch', os_arch) os_name = _java_getprop('java.os.name', os_name) os_version = _java_getprop('java.os.version', os_version) osinfo = os_name, os_version, os_arch return release, vendor, vminfo, osinfo ### System name aliasing def system_alias(system, release, version): """ Returns (system, release, version) aliased to common marketing names used for some systems. It also does some reordering of the information in some cases where it would otherwise cause confusion. """ if system == 'Rhapsody': # Apple's BSD derivative # XXX How can we determine the marketing release number ? return 'MacOS X Server', system+release, version elif system == 'SunOS': # Sun's OS if release < '5': # These releases use the old name SunOS return system, release, version # Modify release (marketing release = SunOS release - 3) l = release.split('.') if l: try: major = int(l[0]) except ValueError: pass else: major = major - 3 l[0] = str(major) release = '.'.join(l) if release < '6': system = 'Solaris' else: # XXX Whatever the new SunOS marketing name is... system = 'Solaris' elif system == 'IRIX64': # IRIX reports IRIX64 on platforms with 64-bit support; yet it # is really a version and not a different platform, since 32-bit # apps are also supported.. system = 'IRIX' if version: version = version + ' (64bit)' else: version = '64bit' elif system in ('win32', 'win16'): # In case one of the other tricks system = 'Windows' return system, release, version ### Various internal helpers def _platform(*args): """ Helper to format the platform string in a filename compatible format e.g. "system-version-machine". """ # Format the platform string platform = '-'.join(x.strip() for x in filter(len, args)) # Cleanup some possible filename obstacles... platform = platform.replace(' ', '_') platform = platform.replace('/', '-') platform = platform.replace('\\', '-') platform = platform.replace(':', '-') platform = platform.replace(';', '-') platform = platform.replace('"', '-') platform = platform.replace('(', '-') platform = platform.replace(')', '-') # No need to report 'unknown' information... platform = platform.replace('unknown', '') # Fold '--'s and remove trailing '-' while 1: cleaned = platform.replace('--', '-') if cleaned == platform: break platform = cleaned while platform[-1] == '-': platform = platform[:-1] return platform def _node(default=''): """ Helper to determine the node name of this machine. """ try: import socket except ImportError: # No sockets... return default try: return socket.gethostname() except OSError: # Still not working... return default def _follow_symlinks(filepath): """ In case filepath is a symlink, follow it until a real file is reached. """ filepath = os.path.abspath(filepath) while os.path.islink(filepath): filepath = os.path.normpath( os.path.join(os.path.dirname(filepath), os.readlink(filepath))) return filepath def _syscmd_uname(option, default=''): """ Interface to the system's uname command. """ if sys.platform in ('dos', 'win32', 'win16'): # XXX Others too ? return default try: f = os.popen('uname %s 2> %s' % (option, DEV_NULL)) except (AttributeError, OSError): return default output = f.read().strip() rc = f.close() if not output or rc: return default else: return output def _syscmd_file(target, default=''): """ Interface to the system's file command. The function uses the -b option of the file command to have it omit the filename in its output. Follow the symlinks. It returns default in case the command should fail. """ if sys.platform in ('dos', 'win32', 'win16'): # XXX Others too ? return default target = _follow_symlinks(target) try: proc = subprocess.Popen(['file', target], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) except (AttributeError, OSError): return default output = proc.communicate()[0].decode('latin-1') rc = proc.wait() if not output or rc: return default else: return output ### Information about the used architecture # Default values for architecture; non-empty strings override the # defaults given as parameters _default_architecture = { 'win32': ('', 'WindowsPE'), 'win16': ('', 'Windows'), 'dos': ('', 'MSDOS'), } def architecture(executable=sys.executable, bits='', linkage=''): """ Queries the given executable (defaults to the Python interpreter binary) for various architecture information. Returns a tuple (bits, linkage) which contains information about the bit architecture and the linkage format used for the executable. Both values are returned as strings. Values that cannot be determined are returned as given by the parameter presets. If bits is given as '', the sizeof(pointer) (or sizeof(long) on Python version < 1.5.2) is used as indicator for the supported pointer size. The function relies on the system's "file" command to do the actual work. This is available on most if not all Unix platforms. On some non-Unix platforms where the "file" command does not exist and the executable is set to the Python interpreter binary defaults from _default_architecture are used. """ # Use the sizeof(pointer) as default number of bits if nothing # else is given as default. if not bits: import struct try: size = struct.calcsize('P') except struct.error: # Older installations can only query longs size = struct.calcsize('l') bits = str(size*8) + 'bit' # Get data from the 'file' system command if executable: fileout = _syscmd_file(executable, '') else: fileout = '' if not fileout and \ executable == sys.executable: # "file" command did not return anything; we'll try to provide # some sensible defaults then... if sys.platform in _default_architecture: b, l = _default_architecture[sys.platform] if b: bits = b if l: linkage = l return bits, linkage if 'executable' not in fileout: # Format not supported return bits, linkage # Bits if '32-bit' in fileout: bits = '32bit' elif 'N32' in fileout: # On Irix only bits = 'n32bit' elif '64-bit' in fileout: bits = '64bit' # Linkage if 'ELF' in fileout: linkage = 'ELF' elif 'PE' in fileout: # E.g. Windows uses this format if 'Windows' in fileout: linkage = 'WindowsPE' else: linkage = 'PE' elif 'COFF' in fileout: linkage = 'COFF' elif 'MS-DOS' in fileout: linkage = 'MSDOS' else: # XXX the A.OUT format also falls under this class... pass return bits, linkage ### Portable uname() interface uname_result = collections.namedtuple("uname_result", "system node release version machine processor") _uname_cache = None def uname(): """ Fairly portable uname interface. Returns a tuple of strings (system, node, release, version, machine, processor) identifying the underlying platform. Note that unlike the os.uname function this also returns possible processor information as an additional tuple entry. Entries which cannot be determined are set to ''. """ global _uname_cache no_os_uname = 0 if _uname_cache is not None: return _uname_cache processor = '' # Get some infos from the builtin os.uname API... try: system, node, release, version, machine = os.uname() except AttributeError: no_os_uname = 1 if no_os_uname or not list(filter(None, (system, node, release, version, machine))): # Hmm, no there is either no uname or uname has returned #'unknowns'... we'll have to poke around the system then. if no_os_uname: system = sys.platform release = '' version = '' node = _node() machine = '' use_syscmd_ver = 1 # Try win32_ver() on win32 platforms if system == 'win32': release, version, csd, ptype = win32_ver() if release and version: use_syscmd_ver = 0 # Try to use the PROCESSOR_* environment variables # available on Win XP and later; see # http://support.microsoft.com/kb/888731 and # http://www.geocities.com/rick_lively/MANUALS/ENV/MSWIN/PROCESSI.HTM if not machine: # WOW64 processes mask the native architecture if "PROCESSOR_ARCHITEW6432" in os.environ: machine = os.environ.get("PROCESSOR_ARCHITEW6432", '') else: machine = os.environ.get('PROCESSOR_ARCHITECTURE', '') if not processor: processor = os.environ.get('PROCESSOR_IDENTIFIER', machine) # Try the 'ver' system command available on some # platforms if use_syscmd_ver: system, release, version = _syscmd_ver(system) # Normalize system to what win32_ver() normally returns # (_syscmd_ver() tends to return the vendor name as well) if system == 'Microsoft Windows': system = 'Windows' elif system == 'Microsoft' and release == 'Windows': # Under Windows Vista and Windows Server 2008, # Microsoft changed the output of the ver command. The # release is no longer printed. This causes the # system and release to be misidentified. system = 'Windows' if '6.0' == version[:3]: release = 'Vista' else: release = '' # In case we still don't know anything useful, we'll try to # help ourselves if system in ('win32', 'win16'): if not version: if system == 'win32': version = '32bit' else: version = '16bit' system = 'Windows' elif system[:4] == 'java': release, vendor, vminfo, osinfo = java_ver() system = 'Java' version = ', '.join(vminfo) if not version: version = vendor # System specific extensions if system == 'OpenVMS': # OpenVMS seems to have release and version mixed up if not release or release == '0': release = version version = '' # Get processor information try: import vms_lib except ImportError: pass else: csid, cpu_number = vms_lib.getsyi('SYI$_CPU', 0) if (cpu_number >= 128): processor = 'Alpha' else: processor = 'VAX' if not processor: # Get processor information from the uname system command processor = _syscmd_uname('-p', '') #If any unknowns still exist, replace them with ''s, which are more portable if system == 'unknown': system = '' if node == 'unknown': node = '' if release == 'unknown': release = '' if version == 'unknown': version = '' if machine == 'unknown': machine = '' if processor == 'unknown': processor = '' # normalize name if system == 'Microsoft' and release == 'Windows': system = 'Windows' release = 'Vista' _uname_cache = uname_result(system, node, release, version, machine, processor) return _uname_cache ### Direct interfaces to some of the uname() return values def system(): """ Returns the system/OS name, e.g. 'Linux', 'Windows' or 'Java'. An empty string is returned if the value cannot be determined. """ return uname().system def node(): """ Returns the computer's network name (which may not be fully qualified) An empty string is returned if the value cannot be determined. """ return uname().node def release(): """ Returns the system's release, e.g. '2.2.0' or 'NT' An empty string is returned if the value cannot be determined. """ return uname().release def version(): """ Returns the system's release version, e.g. '#3 on degas' An empty string is returned if the value cannot be determined. """ return uname().version def machine(): """ Returns the machine type, e.g. 'i386' An empty string is returned if the value cannot be determined. """ return uname().machine def processor(): """ Returns the (true) processor name, e.g. 'amdk6' An empty string is returned if the value cannot be determined. Note that many platforms do not provide this information or simply return the same value as for machine(), e.g. NetBSD does this. """ return uname().processor ### Various APIs for extracting information from sys.version _sys_version_parser = re.compile( r'([\w.+]+)\s*' # "version<space>" r'\(#?([^,]+)' # "(#buildno" r'(?:,\s*([\w ]*)' # ", builddate" r'(?:,\s*([\w :]*))?)?\)\s*' # ", buildtime)<space>" r'\[([^\]]+)\]?', re.ASCII) # "[compiler]" _ironpython_sys_version_parser = re.compile( r'IronPython\s*' r'([\d\.]+)' r'(?: \(([\d\.]+)\))?' r' on (.NET [\d\.]+)', re.ASCII) # IronPython covering 2.6 and 2.7 _ironpython26_sys_version_parser = re.compile( r'([\d.]+)\s*' r'\(IronPython\s*' r'[\d.]+\s*' r'\(([\d.]+)\) on ([\w.]+ [\d.]+(?: \(\d+-bit\))?)\)' ) _pypy_sys_version_parser = re.compile( r'([\w.+]+)\s*' r'\(#?([^,]+),\s*([\w ]+),\s*([\w :]+)\)\s*' r'\[PyPy [^\]]+\]?') _sys_version_cache = {} def _sys_version(sys_version=None): """ Returns a parsed version of Python's sys.version as tuple (name, version, branch, revision, buildno, builddate, compiler) referring to the Python implementation name, version, branch, revision, build number, build date/time as string and the compiler identification string. Note that unlike the Python sys.version, the returned value for the Python version will always include the patchlevel (it defaults to '.0'). The function returns empty strings for tuple entries that cannot be determined. sys_version may be given to parse an alternative version string, e.g. if the version was read from a different Python interpreter. """ # Get the Python version if sys_version is None: sys_version = sys.version # Try the cache first result = _sys_version_cache.get(sys_version, None) if result is not None: return result # Parse it if 'IronPython' in sys_version: # IronPython name = 'IronPython' if sys_version.startswith('IronPython'): match = _ironpython_sys_version_parser.match(sys_version) else: match = _ironpython26_sys_version_parser.match(sys_version) if match is None: raise ValueError( 'failed to parse IronPython sys.version: %s' % repr(sys_version)) version, alt_version, compiler = match.groups() buildno = '' builddate = '' elif sys.platform.startswith('java'): # Jython name = 'Jython' match = _sys_version_parser.match(sys_version) if match is None: raise ValueError( 'failed to parse Jython sys.version: %s' % repr(sys_version)) version, buildno, builddate, buildtime, _ = match.groups() if builddate is None: builddate = '' compiler = sys.platform elif "PyPy" in sys_version: # PyPy name = "PyPy" match = _pypy_sys_version_parser.match(sys_version) if match is None: raise ValueError("failed to parse PyPy sys.version: %s" % repr(sys_version)) version, buildno, builddate, buildtime = match.groups() compiler = "" else: # CPython match = _sys_version_parser.match(sys_version) if match is None: raise ValueError( 'failed to parse CPython sys.version: %s' % repr(sys_version)) version, buildno, builddate, buildtime, compiler = \ match.groups() name = 'CPython' if builddate is None: builddate = '' elif buildtime: builddate = builddate + ' ' + buildtime if hasattr(sys, '_git'): _, branch, revision = sys._git elif hasattr(sys, '_mercurial'): _, branch, revision = sys._mercurial else: branch = '' revision = '' # Add the patchlevel version if missing l = version.split('.') if len(l) == 2: l.append('0') version = '.'.join(l) # Build and cache the result result = (name, version, branch, revision, buildno, builddate, compiler) _sys_version_cache[sys_version] = result return result def python_implementation(): """ Returns a string identifying the Python implementation. Currently, the following implementations are identified: 'CPython' (C implementation of Python), 'IronPython' (.NET implementation of Python), 'Jython' (Java implementation of Python), 'PyPy' (Python implementation of Python). """ return _sys_version()[0] def python_version(): """ Returns the Python version as string 'major.minor.patchlevel' Note that unlike the Python sys.version, the returned value will always include the patchlevel (it defaults to 0). """ return _sys_version()[1] def python_version_tuple(): """ Returns the Python version as tuple (major, minor, patchlevel) of strings. Note that unlike the Python sys.version, the returned value will always include the patchlevel (it defaults to 0). """ return tuple(_sys_version()[1].split('.')) def python_branch(): """ Returns a string identifying the Python implementation branch. For CPython this is the SCM branch from which the Python binary was built. If not available, an empty string is returned. """ return _sys_version()[2] def python_revision(): """ Returns a string identifying the Python implementation revision. For CPython this is the SCM revision from which the Python binary was built. If not available, an empty string is returned. """ return _sys_version()[3] def python_build(): """ Returns a tuple (buildno, builddate) stating the Python build number and date as strings. """ return _sys_version()[4:6] def python_compiler(): """ Returns a string identifying the compiler used for compiling Python. """ return _sys_version()[6] ### The Opus Magnum of platform strings :-) _platform_cache = {} def platform(aliased=0, terse=0): """ Returns a single string identifying the underlying platform with as much useful information as possible (but no more :). The output is intended to be human readable rather than machine parseable. It may look different on different platforms and this is intended. If "aliased" is true, the function will use aliases for various platforms that report system names which differ from their common names, e.g. SunOS will be reported as Solaris. The system_alias() function is used to implement this. Setting terse to true causes the function to return only the absolute minimum information needed to identify the platform. """ result = _platform_cache.get((aliased, terse), None) if result is not None: return result # Get uname information and then apply platform specific cosmetics # to it... system, node, release, version, machine, processor = uname() if machine == processor: processor = '' if aliased: system, release, version = system_alias(system, release, version) if system == 'Windows': # MS platforms rel, vers, csd, ptype = win32_ver(version) if terse: platform = _platform(system, release) else: platform = _platform(system, release, version, csd) elif system in ('Linux',): # check for libc vs. glibc libcname, libcversion = libc_ver(sys.executable) platform = _platform(system, release, machine, processor, 'with', libcname+libcversion) elif system == 'Java': # Java platforms r, v, vminfo, (os_name, os_version, os_arch) = java_ver() if terse or not os_name: platform = _platform(system, release, version) else: platform = _platform(system, release, version, 'on', os_name, os_version, os_arch) elif system == 'MacOS': # MacOS platforms if terse: platform = _platform(system, release) else: platform = _platform(system, release, machine) else: # Generic handler if terse: platform = _platform(system, release) else: bits, linkage = architecture(sys.executable) platform = _platform(system, release, machine, processor, bits, linkage) _platform_cache[(aliased, terse)] = platform return platform ### Command line interface if __name__ == '__main__': # Default is to print the aliased verbose platform string terse = ('terse' in sys.argv or '--terse' in sys.argv) aliased = (not 'nonaliased' in sys.argv and not '--nonaliased' in sys.argv) print(platform(aliased, terse)) sys.exit(0)
[]
[]
[ "PROCESSOR_ARCHITEW6432", "PROCESSOR_IDENTIFIER", "PROCESSOR_ARCHITECTURE" ]
[]
["PROCESSOR_ARCHITEW6432", "PROCESSOR_IDENTIFIER", "PROCESSOR_ARCHITECTURE"]
python
3
0
src/_04RepeatedString/Solution.java
package _04RepeatedString; import java.io.BufferedWriter; import java.io.FileWriter; import java.io.IOException; import java.util.Scanner; public class Solution { // Complete the repeatedString function below. static long repeatedString(String s, long n) { String temp = s; long count = temp.replaceAll("[^a]", "").length(); long repeatTimes = n / s.length(); count *= repeatTimes; long remaining = n - repeatTimes * s.length(); temp = s.substring(0, Math.toIntExact(remaining)); count += temp.replaceAll("[^a]", "").length(); return count; } private static final Scanner scanner = new Scanner(System.in); public static void main(String[] args) throws IOException { BufferedWriter bufferedWriter = new BufferedWriter(new FileWriter(System.getenv("OUTPUT_PATH"))); String s = scanner.nextLine(); long n = scanner.nextLong(); scanner.skip("(\r\n|[\n\r\u2028\u2029\u0085])?"); long result = repeatedString(s, n); bufferedWriter.write(String.valueOf(result)); bufferedWriter.newLine(); bufferedWriter.close(); scanner.close(); } }
[ "\"OUTPUT_PATH\"" ]
[]
[ "OUTPUT_PATH" ]
[]
["OUTPUT_PATH"]
java
1
0
vendor/pkg/mod/github.com/rs/[email protected]/log_test.go
package zerolog import ( "bytes" "errors" "fmt" "net" "reflect" "runtime" "strconv" "strings" "testing" "time" ) func TestLog(t *testing.T) { t.Run("empty", func(t *testing.T) { out := &bytes.Buffer{} log := New(out) log.Log().Msg("") if got, want := decodeIfBinaryToString(out.Bytes()), "{}\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } }) t.Run("one-field", func(t *testing.T) { out := &bytes.Buffer{} log := New(out) log.Log().Str("foo", "bar").Msg("") if got, want := decodeIfBinaryToString(out.Bytes()), `{"foo":"bar"}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } }) t.Run("two-field", func(t *testing.T) { out := &bytes.Buffer{} log := New(out) log.Log(). Str("foo", "bar"). Int("n", 123). Msg("") if got, want := decodeIfBinaryToString(out.Bytes()), `{"foo":"bar","n":123}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } }) } func TestInfo(t *testing.T) { t.Run("empty", func(t *testing.T) { out := &bytes.Buffer{} log := New(out) log.Info().Msg("") if got, want := decodeIfBinaryToString(out.Bytes()), `{"level":"info"}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } }) t.Run("one-field", func(t *testing.T) { out := &bytes.Buffer{} log := New(out) log.Info().Str("foo", "bar").Msg("") if got, want := decodeIfBinaryToString(out.Bytes()), `{"level":"info","foo":"bar"}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } }) t.Run("two-field", func(t *testing.T) { out := &bytes.Buffer{} log := New(out) log.Info(). Str("foo", "bar"). Int("n", 123). Msg("") if got, want := decodeIfBinaryToString(out.Bytes()), `{"level":"info","foo":"bar","n":123}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } }) } func TestWith(t *testing.T) { out := &bytes.Buffer{} ctx := New(out).With(). Str("string", "foo"). Bytes("bytes", []byte("bar")). Hex("hex", []byte{0x12, 0xef}). RawJSON("json", []byte(`{"some":"json"}`)). AnErr("some_err", nil). Err(errors.New("some error")). Bool("bool", true). Int("int", 1). Int8("int8", 2). Int16("int16", 3). Int32("int32", 4). Int64("int64", 5). Uint("uint", 6). Uint8("uint8", 7). Uint16("uint16", 8). Uint32("uint32", 9). Uint64("uint64", 10). Float32("float32", 11.101). Float64("float64", 12.30303). Time("time", time.Time{}) _, file, line, _ := runtime.Caller(0) caller := fmt.Sprintf("%s:%d", file, line+3) log := ctx.Caller().Logger() log.Log().Msg("") if got, want := decodeIfBinaryToString(out.Bytes()), `{"string":"foo","bytes":"bar","hex":"12ef","json":{"some":"json"},"error":"some error","bool":true,"int":1,"int8":2,"int16":3,"int32":4,"int64":5,"uint":6,"uint8":7,"uint16":8,"uint32":9,"uint64":10,"float32":11.101,"float64":12.30303,"time":"0001-01-01T00:00:00Z","caller":"`+caller+`"}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } // Validate CallerWithSkipFrameCount. out.Reset() _, file, line, _ = runtime.Caller(0) caller = fmt.Sprintf("%s:%d", file, line+5) log = ctx.CallerWithSkipFrameCount(3).Logger() func() { log.Log().Msg("") }() // The above line is a little contrived, but the line above should be the line due // to the extra frame skip. if got, want := decodeIfBinaryToString(out.Bytes()), `{"string":"foo","bytes":"bar","hex":"12ef","json":{"some":"json"},"error":"some error","bool":true,"int":1,"int8":2,"int16":3,"int32":4,"int64":5,"uint":6,"uint8":7,"uint16":8,"uint32":9,"uint64":10,"float32":11.101,"float64":12.30303,"time":"0001-01-01T00:00:00Z","caller":"`+caller+`"}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } } func TestFieldsMap(t *testing.T) { out := &bytes.Buffer{} log := New(out) log.Log().Fields(map[string]interface{}{ "nil": nil, "string": "foo", "bytes": []byte("bar"), "error": errors.New("some error"), "bool": true, "int": int(1), "int8": int8(2), "int16": int16(3), "int32": int32(4), "int64": int64(5), "uint": uint(6), "uint8": uint8(7), "uint16": uint16(8), "uint32": uint32(9), "uint64": uint64(10), "float32": float32(11), "float64": float64(12), "ipv6": net.IP{0x20, 0x01, 0x0d, 0xb8, 0x85, 0xa3, 0x00, 0x00, 0x00, 0x00, 0x8a, 0x2e, 0x03, 0x70, 0x73, 0x34}, "dur": 1 * time.Second, "time": time.Time{}, "obj": obj{"a", "b", 1}, }).Msg("") if got, want := decodeIfBinaryToString(out.Bytes()), `{"bool":true,"bytes":"bar","dur":1000,"error":"some error","float32":11,"float64":12,"int":1,"int16":3,"int32":4,"int64":5,"int8":2,"ipv6":"2001:db8:85a3::8a2e:370:7334","nil":null,"obj":{"Pub":"a","Tag":"b","priv":1},"string":"foo","time":"0001-01-01T00:00:00Z","uint":6,"uint16":8,"uint32":9,"uint64":10,"uint8":7}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } } func TestFieldsMapPnt(t *testing.T) { out := &bytes.Buffer{} log := New(out) log.Log().Fields(map[string]interface{}{ "string": new(string), "bool": new(bool), "int": new(int), "int8": new(int8), "int16": new(int16), "int32": new(int32), "int64": new(int64), "uint": new(uint), "uint8": new(uint8), "uint16": new(uint16), "uint32": new(uint32), "uint64": new(uint64), "float32": new(float32), "float64": new(float64), "dur": new(time.Duration), "time": new(time.Time), }).Msg("") if got, want := decodeIfBinaryToString(out.Bytes()), `{"bool":false,"dur":0,"float32":0,"float64":0,"int":0,"int16":0,"int32":0,"int64":0,"int8":0,"string":"","time":"0001-01-01T00:00:00Z","uint":0,"uint16":0,"uint32":0,"uint64":0,"uint8":0}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } } func TestFieldsMapNilPnt(t *testing.T) { var ( stringPnt *string boolPnt *bool intPnt *int int8Pnt *int8 int16Pnt *int16 int32Pnt *int32 int64Pnt *int64 uintPnt *uint uint8Pnt *uint8 uint16Pnt *uint16 uint32Pnt *uint32 uint64Pnt *uint64 float32Pnt *float32 float64Pnt *float64 durPnt *time.Duration timePnt *time.Time ) out := &bytes.Buffer{} log := New(out) fields := map[string]interface{}{ "string": stringPnt, "bool": boolPnt, "int": intPnt, "int8": int8Pnt, "int16": int16Pnt, "int32": int32Pnt, "int64": int64Pnt, "uint": uintPnt, "uint8": uint8Pnt, "uint16": uint16Pnt, "uint32": uint32Pnt, "uint64": uint64Pnt, "float32": float32Pnt, "float64": float64Pnt, "dur": durPnt, "time": timePnt, } log.Log().Fields(fields).Msg("") if got, want := decodeIfBinaryToString(out.Bytes()), `{"bool":null,"dur":null,"float32":null,"float64":null,"int":null,"int16":null,"int32":null,"int64":null,"int8":null,"string":null,"time":null,"uint":null,"uint16":null,"uint32":null,"uint64":null,"uint8":null}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } } func TestFields(t *testing.T) { out := &bytes.Buffer{} log := New(out) now := time.Now() _, file, line, _ := runtime.Caller(0) caller := fmt.Sprintf("%s:%d", file, line+3) log.Log(). Caller(). Str("string", "foo"). Stringer("stringer", net.IP{127, 0, 0, 1}). Stringer("stringer_nil", nil). Bytes("bytes", []byte("bar")). Hex("hex", []byte{0x12, 0xef}). RawJSON("json", []byte(`{"some":"json"}`)). AnErr("some_err", nil). Err(errors.New("some error")). Bool("bool", true). Int("int", 1). Int8("int8", 2). Int16("int16", 3). Int32("int32", 4). Int64("int64", 5). Uint("uint", 6). Uint8("uint8", 7). Uint16("uint16", 8). Uint32("uint32", 9). Uint64("uint64", 10). IPAddr("IPv4", net.IP{192, 168, 0, 100}). IPAddr("IPv6", net.IP{0x20, 0x01, 0x0d, 0xb8, 0x85, 0xa3, 0x00, 0x00, 0x00, 0x00, 0x8a, 0x2e, 0x03, 0x70, 0x73, 0x34}). MACAddr("Mac", net.HardwareAddr{0x00, 0x14, 0x22, 0x01, 0x23, 0x45}). IPPrefix("Prefix", net.IPNet{IP: net.IP{192, 168, 0, 100}, Mask: net.CIDRMask(24, 32)}). Float32("float32", 11.1234). Float64("float64", 12.321321321). Dur("dur", 1*time.Second). Time("time", time.Time{}). TimeDiff("diff", now, now.Add(-10*time.Second)). Msg("") if got, want := decodeIfBinaryToString(out.Bytes()), `{"caller":"`+caller+`","string":"foo","stringer":"127.0.0.1","stringer_nil":null,"bytes":"bar","hex":"12ef","json":{"some":"json"},"error":"some error","bool":true,"int":1,"int8":2,"int16":3,"int32":4,"int64":5,"uint":6,"uint8":7,"uint16":8,"uint32":9,"uint64":10,"IPv4":"192.168.0.100","IPv6":"2001:db8:85a3::8a2e:370:7334","Mac":"00:14:22:01:23:45","Prefix":"192.168.0.100/24","float32":11.1234,"float64":12.321321321,"dur":1000,"time":"0001-01-01T00:00:00Z","diff":10000}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } } func TestFieldsArrayEmpty(t *testing.T) { out := &bytes.Buffer{} log := New(out) log.Log(). Strs("string", []string{}). Errs("err", []error{}). Bools("bool", []bool{}). Ints("int", []int{}). Ints8("int8", []int8{}). Ints16("int16", []int16{}). Ints32("int32", []int32{}). Ints64("int64", []int64{}). Uints("uint", []uint{}). Uints8("uint8", []uint8{}). Uints16("uint16", []uint16{}). Uints32("uint32", []uint32{}). Uints64("uint64", []uint64{}). Floats32("float32", []float32{}). Floats64("float64", []float64{}). Durs("dur", []time.Duration{}). Times("time", []time.Time{}). Msg("") if got, want := decodeIfBinaryToString(out.Bytes()), `{"string":[],"err":[],"bool":[],"int":[],"int8":[],"int16":[],"int32":[],"int64":[],"uint":[],"uint8":[],"uint16":[],"uint32":[],"uint64":[],"float32":[],"float64":[],"dur":[],"time":[]}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } } func TestFieldsArraySingleElement(t *testing.T) { out := &bytes.Buffer{} log := New(out) log.Log(). Strs("string", []string{"foo"}). Errs("err", []error{errors.New("some error")}). Bools("bool", []bool{true}). Ints("int", []int{1}). Ints8("int8", []int8{2}). Ints16("int16", []int16{3}). Ints32("int32", []int32{4}). Ints64("int64", []int64{5}). Uints("uint", []uint{6}). Uints8("uint8", []uint8{7}). Uints16("uint16", []uint16{8}). Uints32("uint32", []uint32{9}). Uints64("uint64", []uint64{10}). Floats32("float32", []float32{11}). Floats64("float64", []float64{12}). Durs("dur", []time.Duration{1 * time.Second}). Times("time", []time.Time{time.Time{}}). Msg("") if got, want := decodeIfBinaryToString(out.Bytes()), `{"string":["foo"],"err":["some error"],"bool":[true],"int":[1],"int8":[2],"int16":[3],"int32":[4],"int64":[5],"uint":[6],"uint8":[7],"uint16":[8],"uint32":[9],"uint64":[10],"float32":[11],"float64":[12],"dur":[1000],"time":["0001-01-01T00:00:00Z"]}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } } func TestFieldsArrayMultipleElement(t *testing.T) { out := &bytes.Buffer{} log := New(out) log.Log(). Strs("string", []string{"foo", "bar"}). Errs("err", []error{errors.New("some error"), nil}). Bools("bool", []bool{true, false}). Ints("int", []int{1, 0}). Ints8("int8", []int8{2, 0}). Ints16("int16", []int16{3, 0}). Ints32("int32", []int32{4, 0}). Ints64("int64", []int64{5, 0}). Uints("uint", []uint{6, 0}). Uints8("uint8", []uint8{7, 0}). Uints16("uint16", []uint16{8, 0}). Uints32("uint32", []uint32{9, 0}). Uints64("uint64", []uint64{10, 0}). Floats32("float32", []float32{11, 0}). Floats64("float64", []float64{12, 0}). Durs("dur", []time.Duration{1 * time.Second, 0}). Times("time", []time.Time{time.Time{}, time.Time{}}). Msg("") if got, want := decodeIfBinaryToString(out.Bytes()), `{"string":["foo","bar"],"err":["some error",null],"bool":[true,false],"int":[1,0],"int8":[2,0],"int16":[3,0],"int32":[4,0],"int64":[5,0],"uint":[6,0],"uint8":[7,0],"uint16":[8,0],"uint32":[9,0],"uint64":[10,0],"float32":[11,0],"float64":[12,0],"dur":[1000,0],"time":["0001-01-01T00:00:00Z","0001-01-01T00:00:00Z"]}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } } func TestFieldsDisabled(t *testing.T) { out := &bytes.Buffer{} log := New(out).Level(InfoLevel) now := time.Now() log.Debug(). Str("string", "foo"). Bytes("bytes", []byte("bar")). Hex("hex", []byte{0x12, 0xef}). AnErr("some_err", nil). Err(errors.New("some error")). Bool("bool", true). Int("int", 1). Int8("int8", 2). Int16("int16", 3). Int32("int32", 4). Int64("int64", 5). Uint("uint", 6). Uint8("uint8", 7). Uint16("uint16", 8). Uint32("uint32", 9). Uint64("uint64", 10). Float32("float32", 11). Float64("float64", 12). Dur("dur", 1*time.Second). Time("time", time.Time{}). TimeDiff("diff", now, now.Add(-10*time.Second)). Msg("") if got, want := decodeIfBinaryToString(out.Bytes()), ""; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } } func TestMsgf(t *testing.T) { out := &bytes.Buffer{} log := New(out) log.Log().Msgf("one %s %.1f %d %v", "two", 3.4, 5, errors.New("six")) if got, want := decodeIfBinaryToString(out.Bytes()), `{"message":"one two 3.4 5 six"}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } } func TestWithAndFieldsCombined(t *testing.T) { out := &bytes.Buffer{} log := New(out).With().Str("f1", "val").Str("f2", "val").Logger() log.Log().Str("f3", "val").Msg("") if got, want := decodeIfBinaryToString(out.Bytes()), `{"f1":"val","f2":"val","f3":"val"}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } } func TestLevel(t *testing.T) { t.Run("Disabled", func(t *testing.T) { out := &bytes.Buffer{} log := New(out).Level(Disabled) log.Info().Msg("test") if got, want := decodeIfBinaryToString(out.Bytes()), ""; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } }) t.Run("NoLevel/Disabled", func(t *testing.T) { out := &bytes.Buffer{} log := New(out).Level(Disabled) log.Log().Msg("test") if got, want := decodeIfBinaryToString(out.Bytes()), ""; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } }) t.Run("NoLevel/Info", func(t *testing.T) { out := &bytes.Buffer{} log := New(out).Level(InfoLevel) log.Log().Msg("test") if got, want := decodeIfBinaryToString(out.Bytes()), `{"message":"test"}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } }) t.Run("NoLevel/Panic", func(t *testing.T) { out := &bytes.Buffer{} log := New(out).Level(PanicLevel) log.Log().Msg("test") if got, want := decodeIfBinaryToString(out.Bytes()), `{"message":"test"}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } }) t.Run("NoLevel/WithLevel", func(t *testing.T) { out := &bytes.Buffer{} log := New(out).Level(InfoLevel) log.WithLevel(NoLevel).Msg("test") if got, want := decodeIfBinaryToString(out.Bytes()), `{"message":"test"}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } }) t.Run("Info", func(t *testing.T) { out := &bytes.Buffer{} log := New(out).Level(InfoLevel) log.Info().Msg("test") if got, want := decodeIfBinaryToString(out.Bytes()), `{"level":"info","message":"test"}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } }) } func TestGetLevel(t *testing.T) { levels := []Level{ DebugLevel, InfoLevel, WarnLevel, ErrorLevel, FatalLevel, PanicLevel, NoLevel, Disabled, } for _, level := range levels { if got, want := New(nil).Level(level).GetLevel(), level; got != want { t.Errorf("GetLevel() = %v, want: %v", got, want) } } } func TestSampling(t *testing.T) { out := &bytes.Buffer{} log := New(out).Sample(&BasicSampler{N: 2}) log.Log().Int("i", 1).Msg("") log.Log().Int("i", 2).Msg("") log.Log().Int("i", 3).Msg("") log.Log().Int("i", 4).Msg("") if got, want := decodeIfBinaryToString(out.Bytes()), "{\"i\":1}\n{\"i\":3}\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } } func TestDiscard(t *testing.T) { out := &bytes.Buffer{} log := New(out) log.Log().Discard().Str("a", "b").Msgf("one %s %.1f %d %v", "two", 3.4, 5, errors.New("six")) if got, want := decodeIfBinaryToString(out.Bytes()), ""; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } // Double call log.Log().Discard().Discard().Str("a", "b").Msgf("one %s %.1f %d %v", "two", 3.4, 5, errors.New("six")) if got, want := decodeIfBinaryToString(out.Bytes()), ""; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } } type levelWriter struct { ops []struct { l Level p string } } func (lw *levelWriter) Write(p []byte) (int, error) { return len(p), nil } func (lw *levelWriter) WriteLevel(lvl Level, p []byte) (int, error) { p = decodeIfBinaryToBytes(p) lw.ops = append(lw.ops, struct { l Level p string }{lvl, string(p)}) return len(p), nil } func TestLevelWriter(t *testing.T) { lw := &levelWriter{ ops: []struct { l Level p string }{}, } log := New(lw) log.Trace().Msg("0") log.Debug().Msg("1") log.Info().Msg("2") log.Warn().Msg("3") log.Error().Msg("4") log.Log().Msg("nolevel-1") log.WithLevel(TraceLevel).Msg("5") log.WithLevel(DebugLevel).Msg("6") log.WithLevel(InfoLevel).Msg("7") log.WithLevel(WarnLevel).Msg("8") log.WithLevel(ErrorLevel).Msg("9") log.WithLevel(NoLevel).Msg("nolevel-2") want := []struct { l Level p string }{ {TraceLevel, `{"level":"trace","message":"0"}` + "\n"}, {DebugLevel, `{"level":"debug","message":"1"}` + "\n"}, {InfoLevel, `{"level":"info","message":"2"}` + "\n"}, {WarnLevel, `{"level":"warn","message":"3"}` + "\n"}, {ErrorLevel, `{"level":"error","message":"4"}` + "\n"}, {NoLevel, `{"message":"nolevel-1"}` + "\n"}, {TraceLevel, `{"level":"trace","message":"5"}` + "\n"}, {DebugLevel, `{"level":"debug","message":"6"}` + "\n"}, {InfoLevel, `{"level":"info","message":"7"}` + "\n"}, {WarnLevel, `{"level":"warn","message":"8"}` + "\n"}, {ErrorLevel, `{"level":"error","message":"9"}` + "\n"}, {NoLevel, `{"message":"nolevel-2"}` + "\n"}, } if got := lw.ops; !reflect.DeepEqual(got, want) { t.Errorf("invalid ops:\ngot:\n%v\nwant:\n%v", got, want) } } func TestContextTimestamp(t *testing.T) { TimestampFunc = func() time.Time { return time.Date(2001, time.February, 3, 4, 5, 6, 7, time.UTC) } defer func() { TimestampFunc = time.Now }() out := &bytes.Buffer{} log := New(out).With().Timestamp().Str("foo", "bar").Logger() log.Log().Msg("hello world") if got, want := decodeIfBinaryToString(out.Bytes()), `{"foo":"bar","time":"2001-02-03T04:05:06Z","message":"hello world"}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } } func TestEventTimestamp(t *testing.T) { TimestampFunc = func() time.Time { return time.Date(2001, time.February, 3, 4, 5, 6, 7, time.UTC) } defer func() { TimestampFunc = time.Now }() out := &bytes.Buffer{} log := New(out).With().Str("foo", "bar").Logger() log.Log().Timestamp().Msg("hello world") if got, want := decodeIfBinaryToString(out.Bytes()), `{"foo":"bar","time":"2001-02-03T04:05:06Z","message":"hello world"}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } } func TestOutputWithoutTimestamp(t *testing.T) { ignoredOut := &bytes.Buffer{} out := &bytes.Buffer{} log := New(ignoredOut).Output(out).With().Str("foo", "bar").Logger() log.Log().Msg("hello world") if got, want := decodeIfBinaryToString(out.Bytes()), `{"foo":"bar","message":"hello world"}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } } func TestOutputWithTimestamp(t *testing.T) { TimestampFunc = func() time.Time { return time.Date(2001, time.February, 3, 4, 5, 6, 7, time.UTC) } defer func() { TimestampFunc = time.Now }() ignoredOut := &bytes.Buffer{} out := &bytes.Buffer{} log := New(ignoredOut).Output(out).With().Timestamp().Str("foo", "bar").Logger() log.Log().Msg("hello world") if got, want := decodeIfBinaryToString(out.Bytes()), `{"foo":"bar","time":"2001-02-03T04:05:06Z","message":"hello world"}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } } type loggableError struct { error } func (l loggableError) MarshalZerologObject(e *Event) { e.Str("message", l.error.Error()+": loggableError") } func TestErrorMarshalFunc(t *testing.T) { out := &bytes.Buffer{} log := New(out) // test default behaviour log.Log().Err(errors.New("err")).Msg("msg") if got, want := decodeIfBinaryToString(out.Bytes()), `{"error":"err","message":"msg"}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } out.Reset() log.Log().Err(loggableError{errors.New("err")}).Msg("msg") if got, want := decodeIfBinaryToString(out.Bytes()), `{"error":{"message":"err: loggableError"},"message":"msg"}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } out.Reset() // test overriding the ErrorMarshalFunc originalErrorMarshalFunc := ErrorMarshalFunc defer func() { ErrorMarshalFunc = originalErrorMarshalFunc }() ErrorMarshalFunc = func(err error) interface{} { return err.Error() + ": marshaled string" } log.Log().Err(errors.New("err")).Msg("msg") if got, want := decodeIfBinaryToString(out.Bytes()), `{"error":"err: marshaled string","message":"msg"}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } out.Reset() ErrorMarshalFunc = func(err error) interface{} { return errors.New(err.Error() + ": new error") } log.Log().Err(errors.New("err")).Msg("msg") if got, want := decodeIfBinaryToString(out.Bytes()), `{"error":"err: new error","message":"msg"}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } out.Reset() ErrorMarshalFunc = func(err error) interface{} { return loggableError{err} } log.Log().Err(errors.New("err")).Msg("msg") if got, want := decodeIfBinaryToString(out.Bytes()), `{"error":{"message":"err: loggableError"},"message":"msg"}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } } func TestCallerMarshalFunc(t *testing.T) { out := &bytes.Buffer{} log := New(out) // test default behaviour this is really brittle due to the line numbers // actually mattering for validation _, file, line, _ := runtime.Caller(0) caller := fmt.Sprintf("%s:%d", file, line+2) log.Log().Caller().Msg("msg") if got, want := decodeIfBinaryToString(out.Bytes()), `{"caller":"`+caller+`","message":"msg"}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } out.Reset() // test custom behavior. In this case we'll take just the last directory origCallerMarshalFunc := CallerMarshalFunc defer func() { CallerMarshalFunc = origCallerMarshalFunc }() CallerMarshalFunc = func(file string, line int) string { parts := strings.Split(file, "/") if len(parts) > 1 { return strings.Join(parts[len(parts)-2:], "/") + ":" + strconv.Itoa(line) } return file + ":" + strconv.Itoa(line) } _, file, line, _ = runtime.Caller(0) caller = CallerMarshalFunc(file, line+2) log.Log().Caller().Msg("msg") if got, want := decodeIfBinaryToString(out.Bytes()), `{"caller":"`+caller+`","message":"msg"}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } } func TestLevelFieldMarshalFunc(t *testing.T) { origLevelFieldMarshalFunc := LevelFieldMarshalFunc LevelFieldMarshalFunc = func(l Level) string { return strings.ToUpper(l.String()) } defer func() { LevelFieldMarshalFunc = origLevelFieldMarshalFunc }() out := &bytes.Buffer{} log := New(out) log.Debug().Msg("test") if got, want := decodeIfBinaryToString(out.Bytes()), `{"level":"DEBUG","message":"test"}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } out.Reset() log.Info().Msg("test") if got, want := decodeIfBinaryToString(out.Bytes()), `{"level":"INFO","message":"test"}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } out.Reset() log.Warn().Msg("test") if got, want := decodeIfBinaryToString(out.Bytes()), `{"level":"WARN","message":"test"}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } out.Reset() log.Error().Msg("test") if got, want := decodeIfBinaryToString(out.Bytes()), `{"level":"ERROR","message":"test"}`+"\n"; got != want { t.Errorf("invalid log output:\ngot: %v\nwant: %v", got, want) } out.Reset() } type errWriter struct { error } func (w errWriter) Write(p []byte) (n int, err error) { return 0, w.error } func TestErrorHandler(t *testing.T) { var got error want := errors.New("write error") ErrorHandler = func(err error) { got = err } log := New(errWriter{want}) log.Log().Msg("test") if got != want { t.Errorf("ErrorHandler err = %#v, want %#v", got, want) } }
[]
[]
[]
[]
[]
go
null
null
null
FUU.go
package BLC import ( "fmt" "flag" "os" "log" ) type CLI struct { } //打印目前左右命令使用方法 func printUsage() { fmt.Println("Usage:") fmt.Println("\tcreateBlockchain -address --创世区块地址 ") fmt.Println("\tsend -from FROM -to TO -amount AMOUNT --交易明细") fmt.Println("\tprintchain --打印所有区块信息") fmt.Println("\tgetbalance -address -- 输出区块信息.") fmt.Println("\tcreateWallet -- 创建钱包.") fmt.Println("\tgetAddressList -- 输出所有钱包地址.") fmt.Println("\tresetUTXOset -- 测试UTXOSet.") fmt.Println("\tstartnode -miner ADDRESS -- 启动节点服务器,并且指定挖矿奖励的地址.") } func isValidArgs() { //获取当前输入参数个数 if len(os.Args) < 2 { printUsage() os.Exit(1) } } func (cli *CLI) Run() { isValidArgs() //获取节点 //在命令行可以通过 export NODE_ID=8888 设置节点ID nodeID := os.Getenv("NODE_ID") if nodeID == "" { fmt.Printf("NODE_ID env var is not set!\n") os.Exit(1) } fmt.Printf("NODE_ID:%s\n", nodeID) //自定义cli命令 sendBlockCmd := flag.NewFlagSet("send", flag.ExitOnError) printchainCmd := flag.NewFlagSet("printchain", flag.ExitOnError) createBlockchainCmd := flag.NewFlagSet("createBlockchain", flag.ExitOnError) blanceBlockCmd := flag.NewFlagSet("getBalance", flag.ExitOnError) createWalletCmd := flag.NewFlagSet("createWallet", flag.ExitOnError) getAddressListCmd := flag.NewFlagSet("getAddressList", flag.ExitOnError) resetUTXOsetCmd := flag.NewFlagSet("resetUTXOset", flag.ExitOnError) startNodeCmd := flag.NewFlagSet("startnode", flag.ExitOnError) //addBlockCmd 设置默认参数 flagSendBlockMine := sendBlockCmd.Bool("mine",false,"是否在当前节点中立即验证....") flagSendBlockFrom := sendBlockCmd.String("from", "", "源地址") flagSendBlockTo := sendBlockCmd.String("to", "", "目标地址") flagSendBlockAmount := sendBlockCmd.String("amount", "", "转账金额") flagCreateBlockchainAddress := createBlockchainCmd.String("address", "", "创世区块地址") flagBlanceBlockAddress := blanceBlockCmd.String("address", "", "输出区块信息") flagMiner := startNodeCmd.String("miner","","定义挖矿奖励的地址......") //解析输入的第二个参数是addBlock还是printchain,第一个参数为./main switch os.Args[1] { case "send": //第二个参数为相应命令,取第三个参数开始作为参数并解析 err := sendBlockCmd.Parse(os.Args[2:]) if err != nil { log.Panic(err) } case "printchain": err := printchainCmd.Parse(os.Args[2:]) if err != nil { log.Panic(err) } case "createBlockchain": err := createBlockchainCmd.Parse(os.Args[2:]) if err != nil { log.Panic(err) } case "getBalance": err := blanceBlockCmd.Parse(os.Args[2:]) if err != nil { log.Panic(err) } case "createWallet": err := createWalletCmd.Parse(os.Args[2:]) if err != nil { log.Panic(err) } case "getAddressList": err := getAddressListCmd.Parse(os.Args[2:]) if err != nil { log.Panic(err) } case "resetUTXOset": err := resetUTXOsetCmd.Parse(os.Args[2:]) if err != nil { log.Panic(err) } case "startnode": err := startNodeCmd.Parse(os.Args[2:]) if err != nil { log.Panic(err) } default: printUsage() os.Exit(1) } //对addBlockCmd命令的解析 if sendBlockCmd.Parsed() { if *flagSendBlockFrom == "" { printUsage() os.Exit(1) } if *flagSendBlockTo == "" { printUsage() os.Exit(1) } if *flagSendBlockAmount == "" { printUsage() os.Exit(1) } //cli.addBlock(*flagAddBlockData) //这里真正地调用转账方法 from := Json2Array(*flagSendBlockFrom) to := Json2Array(*flagSendBlockTo) //输入地址有效性判断 for index, fromAddress := range from { if IsValidForAddress([]byte(fromAddress)) == false || IsValidForAddress([]byte(to[index])) == false { fmt.Printf("Address:%s incalid", fromAddress) os.Exit(1) } } amount := Json2Array(*flagSendBlockAmount) cli.send(from, to, amount, nodeID, *flagSendBlockMine) } //对printchainCmd命令的解析 if printchainCmd.Parsed() { cli.printchain(nodeID) } //创建区块链 if createBlockchainCmd.Parsed() { if *flagCreateBlockchainAddress == "" { printUsage() os.Exit(1) } cli.creatBlockchain(*flagCreateBlockchainAddress, nodeID) } //查询余额 if blanceBlockCmd.Parsed() { if *flagBlanceBlockAddress == "" { printUsage() os.Exit(1) } cli.getBlance(*flagBlanceBlockAddress, nodeID) } //创建钱包 if createWalletCmd.Parsed() { cli.createWallet(nodeID) } //获取所有钱包地址 if getAddressListCmd.Parsed() { cli.getAddressList(nodeID) } //UTXOSet测试 if resetUTXOsetCmd.Parsed() { cli.ResetUTXOSet(nodeID) } //设置挖矿节点 if startNodeCmd.Parsed() { cli.startNode(nodeID, *flagMiner) } }
[ "\"NODE_ID\"" ]
[]
[ "NODE_ID" ]
[]
["NODE_ID"]
go
1
0
executor/main.go
package executor import ( "os" "github.com/Sirupsen/logrus" "github.com/rancher/event-subscriber/events" "github.com/rancher/go-rancher/v3" "github.com/rancher/rancher-compose-executor/composinator" "github.com/rancher/rancher-compose-executor/executor/handlers" "github.com/rancher/rancher-compose-executor/version" ) func Main() { logger := logrus.WithFields(logrus.Fields{ "version": version.VERSION, }) go func() { err := composinator.StartServer() if err != nil { logger.Error(err) os.Exit(1) } }() logger.Info("Starting rancher-compose-executor") eventHandlers := map[string]events.EventHandler{ "stack.create": handlers.WithTimeout(handlers.CreateStack), "stack.update": handlers.WithTimeout(handlers.UpdateStack), "stack.remove": handlers.WithTimeout(handlers.DeleteStack), "ping": func(event *events.Event, apiClient *client.RancherClient) error { return nil }, } url := os.Getenv("CATTLE_URL") if url == "" { url = "http://localhost:8080/v3" } router, err := events.NewEventRouter("rancher-compose-executor", 2000, url, os.Getenv("CATTLE_ACCESS_KEY"), os.Getenv("CATTLE_SECRET_KEY"), nil, eventHandlers, "stack", 250, events.DefaultPingConfig) if err != nil { logrus.WithField("error", err).Fatal("Unable to create event router") } if err := router.Start(nil); err != nil { logrus.WithField("error", err).Fatal("Unable to start event router") } logger.Info("Exiting rancher-compose-executor") }
[ "\"CATTLE_URL\"", "\"CATTLE_ACCESS_KEY\"", "\"CATTLE_SECRET_KEY\"" ]
[]
[ "CATTLE_SECRET_KEY", "CATTLE_URL", "CATTLE_ACCESS_KEY" ]
[]
["CATTLE_SECRET_KEY", "CATTLE_URL", "CATTLE_ACCESS_KEY"]
go
3
0
cmd/alpacascloud-signal/main.go
package main import ( "github.com/signal-golang/textsecure/config" "github.com/signal-golang/textsecure/contacts" "log" "net/http" "os" "github.com/LeSuisse/alpacas.cloud/pkg/signal" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/signal-golang/textsecure" ) func getConfig() (*config.Config, error) { telNumber := os.Getenv("TEL_NUMBER") if telNumber == "" { log.Fatal("TEL_NUMBER environment variable must be set") } storageDirectory := os.Getenv("STORAGE_DIRECTORY") if storageDirectory == "" { log.Fatal("STORAGE_DIRECTORY environment variable must be set") } return &config.Config{ Tel: telNumber, StorageDir: storageDirectory, UnencryptedStorage: true, Name: "Alpacas.cloud", AlwaysTrustPeerID: true, AccountCapabilities: config.AccountCapabilities{ Gv2: true, Gv1Migration: true, }, }, nil } func getLocalContacts() ([]contacts.Contact, error) { return []contacts.Contact{}, nil } func main() { client := &textsecure.Client{ GetConfig: getConfig, GetLocalContacts: getLocalContacts, GetVerificationCode: func() string { log.Fatal("Phone number is expected to be already verified, please register it first if needed") return "" }, MessageHandler: signal.MessageHandler, RegistrationDone: func() { }, } err := textsecure.Setup(client) if err != nil { log.Fatal(err) } stop := make(chan bool) http.Handle("/metrics", promhttp.Handler()) go func() { err = http.ListenAndServe(":8080", nil) if err != nil { log.Fatal(err) } stop <- true }() go func() { err = textsecure.StartListening() if err != nil { log.Fatal(err) } stop<-true }() <-stop }
[ "\"TEL_NUMBER\"", "\"STORAGE_DIRECTORY\"" ]
[]
[ "TEL_NUMBER", "STORAGE_DIRECTORY" ]
[]
["TEL_NUMBER", "STORAGE_DIRECTORY"]
go
2
0
microservice/src/vendor/github.com/hashicorp/vault/vendor/github.com/gocql/gocql/control.go
package gocql import ( "context" crand "crypto/rand" "errors" "fmt" "math/rand" "net" "os" "regexp" "strconv" "sync" "sync/atomic" "time" ) var ( randr *rand.Rand mutRandr sync.Mutex ) func init() { b := make([]byte, 4) if _, err := crand.Read(b); err != nil { panic(fmt.Sprintf("unable to seed random number generator: %v", err)) } randr = rand.New(rand.NewSource(int64(readInt(b)))) } // Ensure that the atomic variable is aligned to a 64bit boundary // so that atomic operations can be applied on 32bit architectures. type controlConn struct { started int32 reconnecting int32 session *Session conn atomic.Value retry RetryPolicy quit chan struct{} } func createControlConn(session *Session) *controlConn { control := &controlConn{ session: session, quit: make(chan struct{}), retry: &SimpleRetryPolicy{NumRetries: 3}, } control.conn.Store((*connHost)(nil)) return control } func (c *controlConn) heartBeat() { if !atomic.CompareAndSwapInt32(&c.started, 0, 1) { return } sleepTime := 1 * time.Second timer := time.NewTimer(sleepTime) defer timer.Stop() for { timer.Reset(sleepTime) select { case <-c.quit: return case <-timer.C: } resp, err := c.writeFrame(&writeOptionsFrame{}) if err != nil { goto reconn } switch resp.(type) { case *supportedFrame: // Everything ok sleepTime = 5 * time.Second continue case error: goto reconn default: panic(fmt.Sprintf("gocql: unknown frame in response to options: %T", resp)) } reconn: // try to connect a bit faster sleepTime = 1 * time.Second c.reconnect(true) continue } } var hostLookupPreferV4 = os.Getenv("GOCQL_HOST_LOOKUP_PREFER_V4") == "true" func hostInfo(addr string, defaultPort int) (*HostInfo, error) { var port int host, portStr, err := net.SplitHostPort(addr) if err != nil { host = addr port = defaultPort } else { port, err = strconv.Atoi(portStr) if err != nil { return nil, err } } ip := net.ParseIP(host) if ip == nil { ips, err := net.LookupIP(host) if err != nil { return nil, err } else if len(ips) == 0 { return nil, fmt.Errorf("No IP's returned from DNS lookup for %q", addr) } if hostLookupPreferV4 { for _, v := range ips { if v4 := v.To4(); v4 != nil { ip = v4 break } } if ip == nil { ip = ips[0] } } else { // TODO(zariel): should we check that we can connect to any of the ips? ip = ips[0] } } return &HostInfo{connectAddress: ip, port: port}, nil } func shuffleHosts(hosts []*HostInfo) []*HostInfo { mutRandr.Lock() perm := randr.Perm(len(hosts)) mutRandr.Unlock() shuffled := make([]*HostInfo, len(hosts)) for i, host := range hosts { shuffled[perm[i]] = host } return shuffled } func (c *controlConn) shuffleDial(endpoints []*HostInfo) (*Conn, error) { // shuffle endpoints so not all drivers will connect to the same initial // node. shuffled := shuffleHosts(endpoints) var err error for _, host := range shuffled { var conn *Conn conn, err = c.session.connect(host, c) if err == nil { return conn, nil } Logger.Printf("gocql: unable to dial control conn %v: %v\n", host.ConnectAddress(), err) } return nil, err } // this is going to be version dependant and a nightmare to maintain :( var protocolSupportRe = regexp.MustCompile(`the lowest supported version is \d+ and the greatest is (\d+)$`) func parseProtocolFromError(err error) int { // I really wish this had the actual info in the error frame... matches := protocolSupportRe.FindAllStringSubmatch(err.Error(), -1) if len(matches) != 1 || len(matches[0]) != 2 { if verr, ok := err.(*protocolError); ok { return int(verr.frame.Header().version.version()) } return 0 } max, err := strconv.Atoi(matches[0][1]) if err != nil { return 0 } return max } func (c *controlConn) discoverProtocol(hosts []*HostInfo) (int, error) { hosts = shuffleHosts(hosts) connCfg := *c.session.connCfg connCfg.ProtoVersion = 4 // TODO: define maxProtocol handler := connErrorHandlerFn(func(c *Conn, err error, closed bool) { // we should never get here, but if we do it means we connected to a // host successfully which means our attempted protocol version worked if !closed { c.Close() } }) var err error for _, host := range hosts { var conn *Conn conn, err = c.session.dial(host.ConnectAddress(), host.Port(), &connCfg, handler) if conn != nil { conn.Close() } if err == nil { return connCfg.ProtoVersion, nil } if proto := parseProtocolFromError(err); proto > 0 { return proto, nil } } return 0, err } func (c *controlConn) connect(hosts []*HostInfo) error { if len(hosts) == 0 { return errors.New("control: no endpoints specified") } conn, err := c.shuffleDial(hosts) if err != nil { return fmt.Errorf("control: unable to connect to initial hosts: %v", err) } if err := c.setupConn(conn); err != nil { conn.Close() return fmt.Errorf("control: unable to setup connection: %v", err) } // we could fetch the initial ring here and update initial host data. So that // when we return from here we have a ring topology ready to go. go c.heartBeat() return nil } type connHost struct { conn *Conn host *HostInfo } func (c *controlConn) setupConn(conn *Conn) error { if err := c.registerEvents(conn); err != nil { conn.Close() return err } // TODO(zariel): do we need to fetch host info everytime // the control conn connects? Surely we have it cached? host, err := conn.localHostInfo() if err != nil { return err } ch := &connHost{ conn: conn, host: host, } c.conn.Store(ch) c.session.handleNodeUp(host.ConnectAddress(), host.Port(), false) return nil } func (c *controlConn) registerEvents(conn *Conn) error { var events []string if !c.session.cfg.Events.DisableTopologyEvents { events = append(events, "TOPOLOGY_CHANGE") } if !c.session.cfg.Events.DisableNodeStatusEvents { events = append(events, "STATUS_CHANGE") } if !c.session.cfg.Events.DisableSchemaEvents { events = append(events, "SCHEMA_CHANGE") } if len(events) == 0 { return nil } framer, err := conn.exec(context.Background(), &writeRegisterFrame{ events: events, }, nil) if err != nil { return err } frame, err := framer.parseFrame() if err != nil { return err } else if _, ok := frame.(*readyFrame); !ok { return fmt.Errorf("unexpected frame in response to register: got %T: %v\n", frame, frame) } return nil } func (c *controlConn) reconnect(refreshring bool) { if !atomic.CompareAndSwapInt32(&c.reconnecting, 0, 1) { return } defer atomic.StoreInt32(&c.reconnecting, 0) // TODO: simplify this function, use session.ring to get hosts instead of the // connection pool var host *HostInfo ch := c.getConn() if ch != nil { host = ch.host ch.conn.Close() } var newConn *Conn if host != nil { // try to connect to the old host conn, err := c.session.connect(host, c) if err != nil { // host is dead // TODO: this is replicated in a few places c.session.handleNodeDown(host.ConnectAddress(), host.Port()) } else { newConn = conn } } // TODO: should have our own round-robin for hosts so that we can try each // in succession and guarantee that we get a different host each time. if newConn == nil { host := c.session.ring.rrHost() if host == nil { c.connect(c.session.ring.endpoints) return } var err error newConn, err = c.session.connect(host, c) if err != nil { // TODO: add log handler for things like this return } } if err := c.setupConn(newConn); err != nil { newConn.Close() Logger.Printf("gocql: control unable to register events: %v\n", err) return } if refreshring { c.session.hostSource.refreshRing() } } func (c *controlConn) HandleError(conn *Conn, err error, closed bool) { if !closed { return } oldConn := c.getConn() if oldConn.conn != conn { return } c.reconnect(false) } func (c *controlConn) getConn() *connHost { return c.conn.Load().(*connHost) } func (c *controlConn) writeFrame(w frameWriter) (frame, error) { ch := c.getConn() if ch == nil { return nil, errNoControl } framer, err := ch.conn.exec(context.Background(), w, nil) if err != nil { return nil, err } return framer.parseFrame() } func (c *controlConn) withConnHost(fn func(*connHost) *Iter) *Iter { const maxConnectAttempts = 5 connectAttempts := 0 for i := 0; i < maxConnectAttempts; i++ { ch := c.getConn() if ch == nil { if connectAttempts > maxConnectAttempts { break } connectAttempts++ c.reconnect(false) continue } return fn(ch) } return &Iter{err: errNoControl} } func (c *controlConn) withConn(fn func(*Conn) *Iter) *Iter { return c.withConnHost(func(ch *connHost) *Iter { return fn(ch.conn) }) } // query will return nil if the connection is closed or nil func (c *controlConn) query(statement string, values ...interface{}) (iter *Iter) { q := c.session.Query(statement, values...).Consistency(One).RoutingKey([]byte{}).Trace(nil) for { iter = c.withConn(func(conn *Conn) *Iter { return conn.executeQuery(q) }) if gocqlDebug && iter.err != nil { Logger.Printf("control: error executing %q: %v\n", statement, iter.err) } q.attempts++ if iter.err == nil || !c.retry.Attempt(q) { break } } return } func (c *controlConn) awaitSchemaAgreement() error { return c.withConn(func(conn *Conn) *Iter { return &Iter{err: conn.awaitSchemaAgreement()} }).err } func (c *controlConn) close() { if atomic.CompareAndSwapInt32(&c.started, 1, -1) { c.quit <- struct{}{} } ch := c.getConn() if ch != nil { ch.conn.Close() } } var errNoControl = errors.New("gocql: no control connection available")
[ "\"GOCQL_HOST_LOOKUP_PREFER_V4\"" ]
[]
[ "GOCQL_HOST_LOOKUP_PREFER_V4" ]
[]
["GOCQL_HOST_LOOKUP_PREFER_V4"]
go
1
0
service/confirm_pay_service.go
//Package service ... /* * @Descripttion: * @Author: congz * @Date: 2020-06-10 10:58:11 * @LastEditors: congz * @LastEditTime: 2020-10-28 13:14:20 */ package service import ( "cmall/cache" "cmall/model" "cmall/pkg/logging" "os" ) // ConfirmPayService 接收FM支付回调接口 type ConfirmPayService struct { MerchantNum string `form:"merchantNum" json:"merchantNum" ` OrderNo string `form:"orderNo" json:"orderNo" ` PlatformOrderNo string `form:"platformOrderNo" json:"platformOrderNo"` Amount string `form:"amount" json:"amount" ` ActualPayAmount string `form:"actualPayAmount" json:"actualPayAmount" ` State int `form:"state" json:"state" ` Attch string `form:"attch" json:"attch" ` PayTime string `form:"payTime" json:"payTime" ` Sign string `form:"sign" json:"sign" ` } // Confirm 接收FM支付回调 详情请查阅FM支付文档 func (service *ConfirmPayService) Confirm() { if service.Attch == os.Getenv("FM_Pay_attch") { if service.State == 1 { if err := model.DB.Model(model.Order{}).Where("order_num=?", service.OrderNo).Update("type", 2).Error; err != nil { logging.Info(err) } if err := cache.RedisClient.ZRem(os.Getenv("REDIS_ZSET_KEY"), service.OrderNo).Err(); err != nil { logging.Info(err) } } } }
[ "\"FM_Pay_attch\"", "\"REDIS_ZSET_KEY\"" ]
[]
[ "REDIS_ZSET_KEY", "FM_Pay_attch" ]
[]
["REDIS_ZSET_KEY", "FM_Pay_attch"]
go
2
0
gaia/config.py
# -*- coding: utf-8 -*- import os, sys import json, yaml import ConfigParser import gaia.base as base class DictCfgParser(ConfigParser.ConfigParser): def as_dict(self): d = dict(self._sections) for k in d: d[k] = dict(self._defaults, **d[k]) d[k].pop('__name__', None) return d if "PRJ_ENV" in os.environ: prj_env = os.environ["PRJ_ENV"] else: prj_env = "dev" cfg_path = os.path.join(base.find_path(), 'conf', prj_env) def on_production(): return prj_env == "prod" def on_stagging(): return prj_env == "stagging" def on_dev(): return prj_env == "dev" def load_config(fname): result = None try: filename, extname = os.path.splitext(fname) basename = os.path.basename(filename) fpath = os.path.join(cfg_path, fname) result = {} if extname == '.cfg' or extname == '.ini': config = DictCfgParser() config.read(fpath) result = config.as_dict() if extname == '.json': with open(fpath, 'r') as fcfg: if basename != 'app': result[basename] = json.load(fcfg) else: result = json.load(fcfg) if extname == '.yaml': with open(fpath, 'r') as fcfg: if basename != 'app': result[basename] = yaml.load(fcfg) else: result = yaml.load(fcfg) except Exception as e: pass return result def main(): try: fname = sys.argv[1] item = sys.argv[2] prop = sys.argv[3] cfg = load_config(fname) sys.stdout.write(str(cfg.get(item, prop))) sys.exit(0) except BaseException: sys.exit(-1) if __name__ == "__main__": main()
[]
[]
[ "PRJ_ENV" ]
[]
["PRJ_ENV"]
python
1
0
test/functional/test_runner.py
#!/usr/bin/env python3 # Copyright (c) 2014-2017 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Run regression test suite. This module calls down into individual test cases via subprocess. It will forward all unrecognized arguments onto the individual test scripts. Functional tests are disabled on Windows by default. Use --force to run them anyway. For a description of arguments recognized by test scripts, see `test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`. """ import argparse from collections import deque import configparser import datetime import os import time import shutil import signal import sys import subprocess import tempfile import re import logging # Formatting. Default colors to empty strings. BOLD, BLUE, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "") try: # Make sure python thinks it can write unicode to its stdout "\u2713".encode("utf_8").decode(sys.stdout.encoding) TICK = "✓ " CROSS = "✖ " CIRCLE = "○ " except UnicodeDecodeError: TICK = "P " CROSS = "x " CIRCLE = "o " if os.name == 'posix': # primitive formatting on supported # terminal via ANSI escape sequences: BOLD = ('\033[0m', '\033[1m') BLUE = ('\033[0m', '\033[0;34m') RED = ('\033[0m', '\033[0;31m') GREY = ('\033[0m', '\033[1;30m') TEST_EXIT_PASSED = 0 TEST_EXIT_SKIPPED = 77 BASE_SCRIPTS= [ # Scripts that are run by the travis build process. # Longest test should go first, to favor running tests in parallel 'wallet_hd.py', 'wallet_backup.py', # vv Tests less than 5m vv 'feature_block.py', 'rpc_fundrawtransaction.py', 'p2p_compactblocks.py', 'feature_segwit.py', # vv Tests less than 2m vv 'wallet_basic.py', 'wallet_accounts.py', 'p2p_segwit.py', 'wallet_dump.py', 'rpc_listtransactions.py', # vv Tests less than 60s vv 'p2p_sendheaders.py', 'wallet_zapwallettxes.py', 'wallet_importmulti.py', 'mempool_limit.py', 'rpc_txoutproof.py', 'wallet_listreceivedby.py', 'wallet_abandonconflict.py', 'feature_csv_activation.py', 'rpc_rawtransaction.py', 'wallet_address_types.py', 'feature_reindex.py', # vv Tests less than 30s vv 'wallet_keypool_topup.py', 'interface_zmq.py', 'interface_bitcoin_cli.py', 'mempool_resurrect.py', 'wallet_txn_doublespend.py --mineblock', 'wallet_txn_clone.py', 'wallet_txn_clone.py --segwit', 'rpc_getchaintips.py', 'interface_rest.py', 'mempool_spend_coinbase.py', 'mempool_reorg.py', 'mempool_persist.py', 'wallet_multiwallet.py', 'wallet_multiwallet.py --usecli', 'interface_http.py', 'rpc_users.py', 'feature_proxy.py', 'rpc_signrawtransaction.py', 'p2p_disconnect_ban.py', 'rpc_decodescript.py', 'rpc_blockchain.py', 'rpc_deprecated.py', 'wallet_disable.py', 'rpc_net.py', 'wallet_keypool.py', 'p2p_mempool.py', 'mining_prioritisetransaction.py', 'p2p_invalid_block.py', 'p2p_invalid_tx.py', 'feature_versionbits_warning.py', 'rpc_preciousblock.py', 'wallet_importprunedfunds.py', 'rpc_signmessage.py', 'feature_nulldummy.py', 'wallet_import_rescan.py', 'mining_basic.py', 'wallet_bumpfee.py', 'rpc_named_arguments.py', 'wallet_listsinceblock.py', 'p2p_leak.py', 'wallet_encryption.py', 'wallet_scriptaddress2.py', 'feature_dersig.py', 'feature_cltv.py', 'rpc_uptime.py', 'wallet_resendwallettransactions.py', 'feature_minchainwork.py', 'p2p_fingerprint.py', 'feature_uacomment.py', 'p2p_unrequested_blocks.py', 'feature_logging.py', 'p2p_node_network_limited.py', 'feature_config_args.py', 'feature_help.py', # Don't append tests at the end to avoid merge conflicts # Put them in a random line within the section that fits their approximate run-time ] EXTENDED_SCRIPTS = [ # These tests are not run by the travis build process. # Longest test should go first, to favor running tests in parallel 'feature_pruning.py', # vv Tests less than 20m vv 'feature_fee_estimation.py', # vv Tests less than 5m vv 'feature_maxuploadtarget.py', 'mempool_packages.py', 'feature_dbcrash.py', # vv Tests less than 2m vv 'feature_bip68_sequence.py', 'mining_getblocktemplate_longpoll.py', 'p2p_timeouts.py', # vv Tests less than 60s vv 'feature_bip9_softforks.py', 'p2p_feefilter.py', 'rpc_bind.py', # vv Tests less than 30s vv 'feature_assumevalid.py', 'example_test.py', 'wallet_txn_doublespend.py', 'wallet_txn_clone.py --mineblock', 'feature_notifications.py', 'rpc_invalidateblock.py', 'feature_rbf.py', ] # Place EXTENDED_SCRIPTS first since it has the 3 longest running tests ALL_SCRIPTS = EXTENDED_SCRIPTS + BASE_SCRIPTS NON_SCRIPTS = [ # These are python files that live in the functional tests directory, but are not test scripts. "combine_logs.py", "create_cache.py", "test_runner.py", ] def main(): # Parse arguments and pass through unrecognised args parser = argparse.ArgumentParser(add_help=False, usage='%(prog)s [test_runner.py options] [script options] [scripts]', description=__doc__, epilog=''' Help text and arguments for individual test script:''', formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('--combinedlogslen', '-c', type=int, default=0, help='print a combined log (of length n lines) from all test nodes and test framework to the console on failure.') parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface') parser.add_argument('--exclude', '-x', help='specify a comma-separated-list of scripts to exclude.') parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests') parser.add_argument('--force', '-f', action='store_true', help='run tests even on platforms where they are disabled by default (e.g. windows).') parser.add_argument('--help', '-h', '-?', action='store_true', help='print help text and exit') parser.add_argument('--jobs', '-j', type=int, default=4, help='how many test scripts to run in parallel. Default=4.') parser.add_argument('--keepcache', '-k', action='store_true', help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.') parser.add_argument('--quiet', '-q', action='store_true', help='only print results summary and failure logs') parser.add_argument('--tmpdirprefix', '-t', default=tempfile.gettempdir(), help="Root directory for datadirs") args, unknown_args = parser.parse_known_args() # args to be passed on always start with two dashes; tests are the remaining unknown args tests = [arg for arg in unknown_args if arg[:2] != "--"] passon_args = [arg for arg in unknown_args if arg[:2] == "--"] # Read config generated by configure. config = configparser.ConfigParser() configfile = os.path.abspath(os.path.dirname(__file__)) + "/../config.ini" config.read_file(open(configfile)) passon_args.append("--configfile=%s" % configfile) # Set up logging logging_level = logging.INFO if args.quiet else logging.DEBUG logging.basicConfig(format='%(message)s', level=logging_level) # Create base test directory tmpdir = "%s/jahcoin_test_runner_%s" % (args.tmpdirprefix, datetime.datetime.now().strftime("%Y%m%d_%H%M%S")) os.makedirs(tmpdir) logging.debug("Temporary test directory at %s" % tmpdir) enable_wallet = config["components"].getboolean("ENABLE_WALLET") enable_utils = config["components"].getboolean("ENABLE_UTILS") enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND") if config["environment"]["EXEEXT"] == ".exe" and not args.force: # https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9 # https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964 print("Tests currently disabled on Windows by default. Use --force option to enable") sys.exit(0) if not (enable_wallet and enable_utils and enable_bitcoind): print("No functional tests to run. Wallet, utils, and jahcoind must all be enabled") print("Rerun `configure` with -enable-wallet, -with-utils and -with-daemon and rerun make") sys.exit(0) # Build list of tests if tests: # Individual tests have been specified. Run specified tests that exist # in the ALL_SCRIPTS list. Accept the name with or without .py extension. tests = [re.sub("\.py$", "", t) + ".py" for t in tests] test_list = [] for t in tests: if t in ALL_SCRIPTS: test_list.append(t) else: print("{}WARNING!{} Test '{}' not found in full test list.".format(BOLD[1], BOLD[0], t)) else: # No individual tests have been specified. # Run all base tests, and optionally run extended tests. test_list = BASE_SCRIPTS if args.extended: # place the EXTENDED_SCRIPTS first since the three longest ones # are there and the list is shorter test_list = EXTENDED_SCRIPTS + test_list # Remove the test cases that the user has explicitly asked to exclude. if args.exclude: tests_excl = [re.sub("\.py$", "", t) + ".py" for t in args.exclude.split(',')] for exclude_test in tests_excl: if exclude_test in test_list: test_list.remove(exclude_test) else: print("{}WARNING!{} Test '{}' not found in current test list.".format(BOLD[1], BOLD[0], exclude_test)) if not test_list: print("No valid test scripts specified. Check that your test is in one " "of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests") sys.exit(0) if args.help: # Print help for test_runner.py, then print help of the first script (with args removed) and exit. parser.print_help() subprocess.check_call([(config["environment"]["SRCDIR"] + '/test/functional/' + test_list[0].split()[0])] + ['-h']) sys.exit(0) check_script_list(config["environment"]["SRCDIR"]) check_script_prefixes() if not args.keepcache: shutil.rmtree("%s/test/cache" % config["environment"]["BUILDDIR"], ignore_errors=True) run_tests(test_list, config["environment"]["SRCDIR"], config["environment"]["BUILDDIR"], config["environment"]["EXEEXT"], tmpdir, args.jobs, args.coverage, passon_args, args.combinedlogslen) def run_tests(test_list, src_dir, build_dir, exeext, tmpdir, jobs=1, enable_coverage=False, args=[], combined_logs_len=0): # Warn if bitcoind is already running (unix only) try: if subprocess.check_output(["pidof", "jahcoind"]) is not None: print("%sWARNING!%s There is already a jahcoind process running on this system. Tests may fail unexpectedly due to resource contention!" % (BOLD[1], BOLD[0])) except (OSError, subprocess.SubprocessError): pass # Warn if there is a cache directory cache_dir = "%s/test/cache" % build_dir if os.path.isdir(cache_dir): print("%sWARNING!%s There is a cache directory here: %s. If tests fail unexpectedly, try deleting the cache directory." % (BOLD[1], BOLD[0], cache_dir)) #Set env vars if "LITECOIND" not in os.environ: os.environ["LITECOIND"] = build_dir + '/src/jahcoind' + exeext os.environ["LITECOINCLI"] = build_dir + '/src/jahcoin-cli' + exeext tests_dir = src_dir + '/test/functional/' flags = ["--srcdir={}/src".format(build_dir)] + args flags.append("--cachedir=%s" % cache_dir) if enable_coverage: coverage = RPCCoverage() flags.append(coverage.flag) logging.debug("Initializing coverage directory at %s" % coverage.dir) else: coverage = None if len(test_list) > 1 and jobs > 1: # Populate cache try: subprocess.check_output([tests_dir + 'create_cache.py'] + flags + ["--tmpdir=%s/cache" % tmpdir]) except subprocess.CalledProcessError as e: sys.stdout.buffer.write(e.output) raise #Run Tests job_queue = TestHandler(jobs, tests_dir, tmpdir, test_list, flags) time0 = time.time() test_results = [] max_len_name = len(max(test_list, key=len)) for _ in range(len(test_list)): test_result, testdir, stdout, stderr = job_queue.get_next() test_results.append(test_result) if test_result.status == "Passed": logging.debug("\n%s%s%s passed, Duration: %s s" % (BOLD[1], test_result.name, BOLD[0], test_result.time)) elif test_result.status == "Skipped": logging.debug("\n%s%s%s skipped" % (BOLD[1], test_result.name, BOLD[0])) else: print("\n%s%s%s failed, Duration: %s s\n" % (BOLD[1], test_result.name, BOLD[0], test_result.time)) print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n') print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n') if combined_logs_len and os.path.isdir(testdir): # Print the final `combinedlogslen` lines of the combined logs print('{}Combine the logs and print the last {} lines ...{}'.format(BOLD[1], combined_logs_len, BOLD[0])) print('\n============') print('{}Combined log for {}:{}'.format(BOLD[1], testdir, BOLD[0])) print('============\n') combined_logs, _ = subprocess.Popen([os.path.join(tests_dir, 'combine_logs.py'), '-c', testdir], universal_newlines=True, stdout=subprocess.PIPE).communicate() print("\n".join(deque(combined_logs.splitlines(), combined_logs_len))) print_results(test_results, max_len_name, (int(time.time() - time0))) if coverage: coverage.report_rpc_coverage() logging.debug("Cleaning up coverage data") coverage.cleanup() # Clear up the temp directory if all subdirectories are gone if not os.listdir(tmpdir): os.rmdir(tmpdir) all_passed = all(map(lambda test_result: test_result.was_successful, test_results)) sys.exit(not all_passed) def print_results(test_results, max_len_name, runtime): results = "\n" + BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0] test_results.sort(key=lambda result: result.name.lower()) all_passed = True time_sum = 0 for test_result in test_results: all_passed = all_passed and test_result.was_successful time_sum += test_result.time test_result.padding = max_len_name results += str(test_result) status = TICK + "Passed" if all_passed else CROSS + "Failed" results += BOLD[1] + "\n%s | %s | %s s (accumulated) \n" % ("ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0] results += "Runtime: %s s\n" % (runtime) print(results) class TestHandler: """ Trigger the test scripts passed in via the list. """ def __init__(self, num_tests_parallel, tests_dir, tmpdir, test_list=None, flags=None): assert(num_tests_parallel >= 1) self.num_jobs = num_tests_parallel self.tests_dir = tests_dir self.tmpdir = tmpdir self.test_list = test_list self.flags = flags self.num_running = 0 # In case there is a graveyard of zombie bitcoinds, we can apply a # pseudorandom offset to hopefully jump over them. # (625 is PORT_RANGE/MAX_NODES) self.portseed_offset = int(time.time() * 1000) % 625 self.jobs = [] def get_next(self): while self.num_running < self.num_jobs and self.test_list: # Add tests self.num_running += 1 t = self.test_list.pop(0) portseed = len(self.test_list) + self.portseed_offset portseed_arg = ["--portseed={}".format(portseed)] log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16) log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16) test_argv = t.split() testdir = "{}/{}_{}".format(self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed) tmpdir_arg = ["--tmpdir={}".format(testdir)] self.jobs.append((t, time.time(), subprocess.Popen([self.tests_dir + test_argv[0]] + test_argv[1:] + self.flags + portseed_arg + tmpdir_arg, universal_newlines=True, stdout=log_stdout, stderr=log_stderr), testdir, log_stdout, log_stderr)) if not self.jobs: raise IndexError('pop from empty list') while True: # Return first proc that finishes time.sleep(.5) for j in self.jobs: (name, time0, proc, testdir, log_out, log_err) = j if os.getenv('TRAVIS') == 'true' and int(time.time() - time0) > 20 * 60: # In travis, timeout individual tests after 20 minutes (to stop tests hanging and not # providing useful output. proc.send_signal(signal.SIGINT) if proc.poll() is not None: log_out.seek(0), log_err.seek(0) [stdout, stderr] = [l.read().decode('utf-8') for l in (log_out, log_err)] log_out.close(), log_err.close() if proc.returncode == TEST_EXIT_PASSED and stderr == "": status = "Passed" elif proc.returncode == TEST_EXIT_SKIPPED: status = "Skipped" else: status = "Failed" self.num_running -= 1 self.jobs.remove(j) return TestResult(name, status, int(time.time() - time0)), testdir, stdout, stderr print('.', end='', flush=True) class TestResult(): def __init__(self, name, status, time): self.name = name self.status = status self.time = time self.padding = 0 def __repr__(self): if self.status == "Passed": color = BLUE glyph = TICK elif self.status == "Failed": color = RED glyph = CROSS elif self.status == "Skipped": color = GREY glyph = CIRCLE return color[1] + "%s | %s%s | %s s\n" % (self.name.ljust(self.padding), glyph, self.status.ljust(7), self.time) + color[0] @property def was_successful(self): return self.status != "Failed" def check_script_prefixes(): """Check that at most a handful of the test scripts don't start with one of the allowed name prefixes.""" # LEEWAY is provided as a transition measure, so that pull-requests # that introduce new tests that don't conform with the naming # convention don't immediately cause the tests to fail. LEEWAY = 10 good_prefixes_re = re.compile("(example|feature|interface|mempool|mining|p2p|rpc|wallet)_") bad_script_names = [script for script in ALL_SCRIPTS if good_prefixes_re.match(script) is None] if len(bad_script_names) > 0: print("INFO: %d tests not meeting naming conventions:" % (len(bad_script_names))) print(" %s" % ("\n ".join(sorted(bad_script_names)))) assert len(bad_script_names) <= LEEWAY, "Too many tests not following naming convention! (%d found, maximum: %d)" % (len(bad_script_names), LEEWAY) def check_script_list(src_dir): """Check scripts directory. Check that there are no scripts in the functional tests directory which are not being run by pull-tester.py.""" script_dir = src_dir + '/test/functional/' python_files = set([t for t in os.listdir(script_dir) if t[-3:] == ".py"]) missed_tests = list(python_files - set(map(lambda x: x.split()[0], ALL_SCRIPTS + NON_SCRIPTS))) if len(missed_tests) != 0: print("%sWARNING!%s The following scripts are not being run: %s. Check the test lists in test_runner.py." % (BOLD[1], BOLD[0], str(missed_tests))) if os.getenv('TRAVIS') == 'true': # On travis this warning is an error to prevent merging incomplete commits into master sys.exit(1) class RPCCoverage(): """ Coverage reporting utilities for test_runner. Coverage calculation works by having each test script subprocess write coverage files into a particular directory. These files contain the RPC commands invoked during testing, as well as a complete listing of RPC commands per `jahcoin-cli help` (`rpc_interface.txt`). After all tests complete, the commands run are combined and diff'd against the complete list to calculate uncovered RPC commands. See also: test/functional/test_framework/coverage.py """ def __init__(self): self.dir = tempfile.mkdtemp(prefix="coverage") self.flag = '--coveragedir=%s' % self.dir def report_rpc_coverage(self): """ Print out RPC commands that were unexercised by tests. """ uncovered = self._get_uncovered_rpc_commands() if uncovered: print("Uncovered RPC commands:") print("".join((" - %s\n" % i) for i in sorted(uncovered))) else: print("All RPC commands covered.") def cleanup(self): return shutil.rmtree(self.dir) def _get_uncovered_rpc_commands(self): """ Return a set of currently untested RPC commands. """ # This is shared from `test/functional/test-framework/coverage.py` reference_filename = 'rpc_interface.txt' coverage_file_prefix = 'coverage.' coverage_ref_filename = os.path.join(self.dir, reference_filename) coverage_filenames = set() all_cmds = set() covered_cmds = set() if not os.path.isfile(coverage_ref_filename): raise RuntimeError("No coverage reference found") with open(coverage_ref_filename, 'r') as f: all_cmds.update([i.strip() for i in f.readlines()]) for root, dirs, files in os.walk(self.dir): for filename in files: if filename.startswith(coverage_file_prefix): coverage_filenames.add(os.path.join(root, filename)) for filename in coverage_filenames: with open(filename, 'r') as f: covered_cmds.update([i.strip() for i in f.readlines()]) return all_cmds - covered_cmds if __name__ == '__main__': main()
[]
[]
[ "LITECOIND", "LITECOINCLI", "TRAVIS" ]
[]
["LITECOIND", "LITECOINCLI", "TRAVIS"]
python
3
0
example/main.go
package main import ( "fmt" "net/http" "os" "text/template" "github.com/codegangsta/negroni" "github.com/gophergala2016/thunderbird" "github.com/gorilla/mux" ) var homeTempl = template.Must(template.ParseFiles("home.html")) func serveHome(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "text/html; charset=utf-8") scheme := "ws" if os.Getenv("GO_ENV") == "production" { scheme = "wss" } url := fmt.Sprintf("%s://%s/ws", scheme, r.Host) homeTempl.Execute(w, url) } type RoomChannel struct { tb *thunderbird.Thunderbird } func (rc *RoomChannel) Received(event thunderbird.Event) { switch event.Type { case "message": rc.tb.Broadcast(event.Channel, event.Body) } } func main() { tb := thunderbird.New() ch := &RoomChannel{tb} tb.HandleChannel("room", ch) router := mux.NewRouter() router.HandleFunc("/", serveHome).Methods("GET") router.Handle("/ws", tb.HTTPHandler()) n := negroni.New( negroni.NewRecovery(), negroni.NewLogger(), negroni.NewStatic(http.Dir("../client/lib")), // serve thunderbird.js negroni.NewStatic(http.Dir("public")), // serve other assets ) n.UseHandler(router) n.Run(":" + os.Getenv("PORT")) }
[ "\"GO_ENV\"", "\"PORT\"" ]
[]
[ "PORT", "GO_ENV" ]
[]
["PORT", "GO_ENV"]
go
2
0
web_page_replay_go/src/httparchive.go
// Copyright 2017 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // Program httparchive prints information about archives saved by record. package main import ( "bufio" "bytes" "fmt" "io" "io/ioutil" "net/http" "os" "os/exec" "path/filepath" "strings" "github.com/catapult-project/catapult/web_page_replay_go/src/webpagereplay" "github.com/urfave/cli/v2" ) const usage = "%s [ls|cat|edit|merge|add|addAll] [options] archive_file [output_file] [url]" type Config struct { method, host, fullPath string decodeResponseBody, skipExisting, overwriteExisting bool } func (cfg *Config) DefaultFlags() []cli.Flag { return []cli.Flag{ &cli.StringFlag{ Name: "command", Value: "", Usage: "Only show URLs matching this HTTP method.", Destination: &cfg.method, }, &cli.StringFlag{ Name: "host", Value: "", Usage: "Only show URLs matching this host.", Destination: &cfg.host, }, &cli.StringFlag{ Name: "full_path", Value: "", Usage: "Only show URLs matching this full path.", Destination: &cfg.fullPath, }, &cli.BoolFlag{ Name: "decode_response_body", Usage: "Decode/encode response body according to Content-Encoding header.", Destination: &cfg.decodeResponseBody, }, } } func (cfg *Config) AddFlags() []cli.Flag { return []cli.Flag{ &cli.BoolFlag{ Name: "skip-existing", Usage: "Skip over existing urls in the archive", Destination: &cfg.skipExisting, }, &cli.BoolFlag{ Name: "overwrite-existing", Usage: "Overwrite existing urls in the archive", Destination: &cfg.overwriteExisting, }, } } func (cfg *Config) requestEnabled(req *http.Request) bool { if cfg.method != "" && strings.ToUpper(cfg.method) != req.Method { return false } if cfg.host != "" && cfg.host != req.Host { return false } if cfg.fullPath != "" && cfg.fullPath != req.URL.Path { return false } return true } func list(cfg *Config, a *webpagereplay.Archive, printFull bool) error { return a.ForEach(func(req *http.Request, resp *http.Response) error { if !cfg.requestEnabled(req) { return nil } if printFull { fmt.Fprint(os.Stdout, "----------------------------------------\n") req.Write(os.Stdout) fmt.Fprint(os.Stdout, "\n") err := webpagereplay.DecompressResponse(resp) if err != nil { return fmt.Errorf("Unable to decompress body:\n%v", err) } resp.Write(os.Stdout) fmt.Fprint(os.Stdout, "\n") } else { fmt.Fprintf(os.Stdout, "%s %s %s\n", req.Method, req.Host, req.URL) } return nil }) } func trim(cfg *Config, a *webpagereplay.Archive, outfile string) error { newA, err := a.Trim(func(req *http.Request) (bool, error) { if !cfg.requestEnabled(req) { fmt.Printf("Keeping request: host=%s uri=%s\n", req.Host, req.URL.String()) return false, nil } else { fmt.Printf("Trimming request: host=%s uri=%s\n", req.Host, req.URL.String()) return true, nil } }) if err != nil { return fmt.Errorf("error editing archive:\n%v", err) } return writeArchive(newA, outfile) } func edit(cfg *Config, a *webpagereplay.Archive, outfile string) error { editor := os.Getenv("EDITOR") if editor == "" { fmt.Printf("Warning: EDITOR not specified, using default.\n") editor = "vi" } marshalForEdit := func(w io.Writer, req *http.Request, resp *http.Response) error { if err := req.Write(w); err != nil { return err } if cfg.decodeResponseBody { if err := webpagereplay.DecompressResponse(resp); err != nil { return fmt.Errorf("couldn't decompress body: %v", err) } } return resp.Write(w) } unmarshalAfterEdit := func(r io.Reader) (*http.Request, *http.Response, error) { br := bufio.NewReader(r) req, err := http.ReadRequest(br) if err != nil { return nil, nil, fmt.Errorf("couldn't unmarshal request: %v", err) } resp, err := http.ReadResponse(br, req) if err != nil { if req.Body != nil { req.Body.Close() } return nil, nil, fmt.Errorf("couldn't unmarshal response: %v", err) } if cfg.decodeResponseBody { // Compress body back according to Content-Encoding if err := compressResponse(resp); err != nil { return nil, nil, fmt.Errorf("couldn't compress response: %v", err) } } // Read resp.Body into a buffer since the tmpfile is about to be deleted. body, err := ioutil.ReadAll(resp.Body) resp.Body.Close() if err != nil { return nil, nil, fmt.Errorf("couldn't unmarshal response body: %v", err) } resp.Body = ioutil.NopCloser(bytes.NewReader(body)) return req, resp, nil } newA, err := a.Edit(func(req *http.Request, resp *http.Response) (*http.Request, *http.Response, error) { if !cfg.requestEnabled(req) { return req, resp, nil } fmt.Printf("Editing request: host=%s uri=%s\n", req.Host, req.URL.String()) // Serialize the req/resp to a temporary file, let the user edit that file, then // de-serialize and return the result. Repeat until de-serialization succeeds. for { tmpf, err := ioutil.TempFile("", "httparchive_edit_request") if err != nil { return nil, nil, err } tmpname := tmpf.Name() defer os.Remove(tmpname) if err := marshalForEdit(tmpf, req, resp); err != nil { tmpf.Close() return nil, nil, err } if err := tmpf.Close(); err != nil { return nil, nil, err } // Edit this file. cmd := exec.Command(editor, tmpname) cmd.Stdin = os.Stdin cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr if err := cmd.Run(); err != nil { return nil, nil, fmt.Errorf("Error running %s %s: %v", editor, tmpname, err) } // Reload. tmpf, err = os.Open(tmpname) if err != nil { return nil, nil, err } defer tmpf.Close() newReq, newResp, err := unmarshalAfterEdit(tmpf) if err != nil { fmt.Printf("Error in editing request. Try again: %v\n", err) continue } return newReq, newResp, nil } }) if err != nil { return fmt.Errorf("error editing archive:\n%v", err) } return writeArchive(newA, outfile) } func writeArchive(archive *webpagereplay.Archive, outfile string) error { outf, err := os.OpenFile(outfile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, os.FileMode(0660)) if err != nil { return fmt.Errorf("error opening output file %s:\n%v", outfile, err) } err0 := archive.Serialize(outf) err1 := outf.Close() if err0 != nil || err1 != nil { if err0 == nil { err0 = err1 } return fmt.Errorf("error writing edited archive to %s:\n%v", outfile, err0) } fmt.Printf("Wrote edited archive to %s\n", outfile) return nil } func merge(cfg *Config, archive *webpagereplay.Archive, input *webpagereplay.Archive, outfile string) error { if err := archive.Merge(input); err != nil { return fmt.Errorf("Merge archives failed: %v", err) } return writeArchive(archive, outfile) } func addUrl(cfg *Config, archive *webpagereplay.Archive, urlString string) error { addMode := webpagereplay.AddModeAppend if cfg.skipExisting { addMode = webpagereplay.AddModeSkipExisting } else if cfg.overwriteExisting { addMode = webpagereplay.AddModeOverwriteExisting } if err := archive.Add("GET", urlString, addMode); err != nil { return fmt.Errorf("Error adding request: %v", err) } return nil } func add(cfg *Config, archive *webpagereplay.Archive, outfile string, urls []string) error { for _, urlString := range urls { if err := addUrl(cfg, archive, urlString); err != nil { return err } } return writeArchive(archive, outfile) } func addAll(cfg *Config, archive *webpagereplay.Archive, outfile string, inputFilePath string) error { f, err := os.OpenFile(inputFilePath, os.O_RDONLY, os.ModePerm) if err != nil { return fmt.Errorf("open file error: %v", err) } defer f.Close() sc := bufio.NewScanner(f) for sc.Scan() { urlString := sc.Text() // GET the line string if err := addUrl(cfg, archive, urlString); err != nil { return err } } if err := sc.Err(); err != nil { return fmt.Errorf("scan file error: %v", err) } return writeArchive(archive, outfile) } // compressResponse compresses resp.Body in place according to resp's Content-Encoding header. func compressResponse(resp *http.Response) error { ce := strings.ToLower(resp.Header.Get("Content-Encoding")) if ce == "" { return nil } body, err := ioutil.ReadAll(resp.Body) if err != nil { return err } resp.Body.Close() body, newCE, err := webpagereplay.CompressBody(ce, body) if err != nil { return err } if ce != newCE { return fmt.Errorf("can't compress body to '%s' recieved Content-Encoding: '%s'", ce, newCE) } resp.Body = ioutil.NopCloser(bytes.NewReader(body)) return nil } func main() { progName := filepath.Base(os.Args[0]) cfg := &Config{} fail := func(c *cli.Context, err error) { fmt.Fprintf(os.Stderr, "Error:\n%v.\n\n", err) cli.ShowSubcommandHelp(c) os.Exit(1) } checkArgs := func(cmdName string, wantArgs int) func(*cli.Context) error { return func(c *cli.Context) error { if c.Args().Len() != wantArgs { return fmt.Errorf("Expected %d arguments but got %d", wantArgs, c.Args().Len()) } return nil } } loadArchiveOrDie := func(c *cli.Context, arg int) *webpagereplay.Archive { archive, err := webpagereplay.OpenArchive(c.Args().Get(arg)) if err != nil { fail(c, err) } return archive } app := cli.NewApp() app.Commands = []*cli.Command{ &cli.Command{ Name: "ls", Usage: "List the requests in an archive", ArgsUsage: "archive", Flags: cfg.DefaultFlags(), Before: checkArgs("ls", 1), Action: func(c *cli.Context) error { return list(cfg, loadArchiveOrDie(c, 0), false) }, }, &cli.Command{ Name: "cat", Usage: "Dump the requests/responses in an archive", ArgsUsage: "archive", Flags: cfg.DefaultFlags(), Before: checkArgs("cat", 1), Action: func(c *cli.Context) error { return list(cfg, loadArchiveOrDie(c, 0), true) }, }, &cli.Command{ Name: "edit", Usage: "Edit the requests/responses in an archive", ArgsUsage: "input_archive output_archive", Flags: cfg.DefaultFlags(), Before: checkArgs("edit", 2), Action: func(c *cli.Context) error { return edit(cfg, loadArchiveOrDie(c, 0), c.Args().Get(1)) }, }, &cli.Command{ Name: "merge", Usage: "Merge the requests/responses of two archives", ArgsUsage: "base_archive input_archive output_archive", Before: checkArgs("merge", 3), Action: func(c *cli.Context) error { return merge(cfg, loadArchiveOrDie(c, 0), loadArchiveOrDie(c, 1), c.Args().Get(2)) }, }, &cli.Command{ Name: "add", Usage: "Add a simple GET request from the network to the archive", ArgsUsage: "input_archive output_archive [urls...]", Flags: cfg.AddFlags(), Before: func(c *cli.Context) error { if c.Args().Len() < 3 { return fmt.Errorf("Expected at least 3 arguments but got %d", c.Args().Len()) } return nil }, Action: func(c *cli.Context) error { return add(cfg, loadArchiveOrDie(c, 0), c.Args().Get(1), c.Args().Tail()) }, }, &cli.Command{ Name: "addAll", Usage: "Add a simple GET request from the network to the archive", ArgsUsage: "input_archive output_archive urls_file", Flags: cfg.AddFlags(), Before: checkArgs("add", 3), Action: func(c *cli.Context) error { return addAll(cfg, loadArchiveOrDie(c, 0), c.Args().Get(1), c.Args().Get(2)) }, }, &cli.Command{ Name: "trim", Usage: "Trim the requests/responses in an archive", ArgsUsage: "input_archive output_archive", Flags: cfg.DefaultFlags(), Before: checkArgs("trim", 2), Action: func(c *cli.Context) error { return trim(cfg, loadArchiveOrDie(c, 0), c.Args().Get(1)) }, }, } app.Usage = "HTTP Archive Utils" app.UsageText = fmt.Sprintf(usage, progName) app.HideVersion = true app.Version = "" app.Writer = os.Stderr err := app.Run(os.Args) if err != nil { fmt.Printf("%v\n", err) os.Exit(1) } }
[ "\"EDITOR\"" ]
[]
[ "EDITOR" ]
[]
["EDITOR"]
go
1
0
awesomeProject/myawesomems.go
package main import ( "os" "log" ) var configurations RegistrationVariables func init() { registryType := os.Getenv("REGISTRY_TYPE") if registryType == "" { log.Fatal("$REGISTRY_TYPE not set") } serviceRegistryURL := os.Getenv("REGISTRY_URL") if serviceRegistryURL == "" { log.Fatal("REGISTRY_URL not set. Exiting application") } userName := os.Getenv("REGISTRY_USER") if userName == "" { log.Print("REGISTRY_USER not set. Shall be proceeding without user name") } password := os.Getenv("REGISTRY_PASSWORD") if password == "" { log.Print("REGISTRY_PASSWORD not set. Shall be proceeding without password") } configurations = RegistrationVariables{registryType, serviceRegistryURL, userName, password} } func main() { log.Print("registryType : " + configurations.RegistryType()) log.Print("serviceRegistryURL : " + configurations.ServiceRegistryURL()) log.Print("userName : " + configurations.UserName()) log.Print("password : " + configurations.Password()) ManageDiscovery(configurations) }
[ "\"REGISTRY_TYPE\"", "\"REGISTRY_URL\"", "\"REGISTRY_USER\"", "\"REGISTRY_PASSWORD\"" ]
[]
[ "REGISTRY_URL", "REGISTRY_USER", "REGISTRY_PASSWORD", "REGISTRY_TYPE" ]
[]
["REGISTRY_URL", "REGISTRY_USER", "REGISTRY_PASSWORD", "REGISTRY_TYPE"]
go
4
0
test/echo_test_server_test.py
# -*- coding: utf-8 -*- import unittest import os # noqa: F401 import json # noqa: F401 import time from os import environ from ConfigParser import ConfigParser # py2 from pprint import pprint # noqa: F401 from biokbase.workspace.client import Workspace as workspaceService from echo_test.echo_testImpl import echo_test from echo_test.echo_testServer import MethodContext from echo_test.authclient import KBaseAuth as _KBaseAuth class echo_testTest(unittest.TestCase): @classmethod def setUpClass(cls): token = environ.get('KB_AUTH_TOKEN', None) config_file = environ.get('KB_DEPLOYMENT_CONFIG', None) cls.cfg = {} config = ConfigParser() config.read(config_file) for nameval in config.items('echo_test'): cls.cfg[nameval[0]] = nameval[1] # Getting username from Auth profile for token authServiceUrl = cls.cfg['auth-service-url'] auth_client = _KBaseAuth(authServiceUrl) user_id = auth_client.get_user(token) # WARNING: don't call any logging methods on the context object, # it'll result in a NoneType error cls.ctx = MethodContext(None) cls.ctx.update({'token': token, 'user_id': user_id, 'provenance': [ {'service': 'echo_test', 'method': 'please_never_use_it_in_production', 'method_params': [] }], 'authenticated': 1}) cls.wsURL = cls.cfg['workspace-url'] cls.wsClient = workspaceService(cls.wsURL) cls.serviceImpl = echo_test(cls.cfg) cls.scratch = cls.cfg['scratch'] cls.callback_url = os.environ['SDK_CALLBACK_URL'] @classmethod def tearDownClass(cls): if hasattr(cls, 'wsName'): cls.wsClient.delete_workspace({'workspace': cls.wsName}) print('Test workspace was deleted') def getWsClient(self): return self.__class__.wsClient def getWsName(self): if hasattr(self.__class__, 'wsName'): return self.__class__.wsName suffix = int(time.time() * 1000) wsName = "test_echo_test_" + str(suffix) ret = self.getWsClient().create_workspace({'workspace': wsName}) # noqa self.__class__.wsName = wsName return wsName def getImpl(self): return self.__class__.serviceImpl def getContext(self): return self.__class__.ctx def test_echo(self): result = self.getImpl().echo(self.getContext(), { 'message': 'xyzxyz', 'workspace_name': self.getWsName() }) self.assertEqual(result[0]['report_name'], 'echo_response') self.assertEqual(result[0]['message'], 'xyzxyz') def test_echo_fail(self): with self.assertRaises(ValueError): self.getImpl().echo_fail(self.getContext(), { 'message': 'xyz', 'workspace_name': self.getWsName() })
[]
[]
[ "SDK_CALLBACK_URL" ]
[]
["SDK_CALLBACK_URL"]
python
1
0
tests/api/v2/datadog/api_dashboard_lists_test.go
/* * Unless explicitly stated otherwise all files in this repository are licensed under the Apache-2.0 License. * This product includes software developed at Datadog (https://www.datadoghq.com/). * Copyright 2019-Present Datadog, Inc. */ package test import ( "context" "fmt" "os" "testing" datadogV1 "github.com/DataDog/datadog-api-client-go/api/v1/datadog" "github.com/DataDog/datadog-api-client-go/api/v2/datadog" "github.com/DataDog/datadog-api-client-go/tests" ) const ( integrationTimeboardID string = "1" customTimeboardID string = "q5j-nti-fv6" customScreenboardID string = "4n7-s4g-dqv" ) var ( dashboardListID int64 testAuthV1 context.Context testAPIClientV1 *datadogV1.APIClient ) func initializeClientV1(ctx context.Context) { testAuthV1 = context.WithValue( context.Background(), datadogV1.ContextAPIKeys, map[string]datadogV1.APIKey{ "apiKeyAuth": { Key: os.Getenv("DD_TEST_CLIENT_API_KEY"), }, "appKeyAuth": { Key: os.Getenv("DD_TEST_CLIENT_APP_KEY"), }, }, ) config := datadogV1.NewConfiguration() config.Debug = os.Getenv("DEBUG") == "true" config.HTTPClient = Client(ctx).GetConfig().HTTPClient testAPIClientV1 = datadogV1.NewAPIClient(config) } func createDashboardList(ctx context.Context, t *testing.T) error { initializeClientV1(ctx) res, httpresp, err := testAPIClientV1.DashboardListsApi.CreateDashboardList(testAuthV1). Body(datadogV1.DashboardList{Name: *tests.UniqueEntityName(ctx, t)}). Execute() if err != nil || httpresp.StatusCode != 200 { return fmt.Errorf("error creating dashboard list: %v", err.(datadogV1.GenericOpenAPIError).Body()) } dashboardListID = res.GetId() return nil } func deleteDashboardList() { testAPIClientV1.DashboardListsApi.DeleteDashboardList(testAuthV1, dashboardListID).Execute() } func TestDashboardListItemCRUD(t *testing.T) { ctx, finish := tests.WithTestSpan(context.Background(), t) defer finish() ctx, finish = WithRecorder(WithTestAuth(ctx), t) defer finish() assert := tests.Assert(ctx, t) err := createDashboardList(ctx, t) defer deleteDashboardList() if err != nil { t.Fatal(err) } integrationTimeboard := datadog.NewDashboardListItemRequest(integrationTimeboardID, datadog.DASHBOARDTYPE_INTEGRATION_TIMEBOARD) customTimeboard := datadog.NewDashboardListItemRequest(customTimeboardID, datadog.DASHBOARDTYPE_CUSTOM_TIMEBOARD) customScreenboard := datadog.NewDashboardListItemRequest(customScreenboardID, datadog.DASHBOARDTYPE_CUSTOM_SCREENBOARD) dashboards := []datadog.DashboardListItemRequest{ *integrationTimeboard, *customTimeboard, *customScreenboard, } integrationTimeboardResponse := datadog.NewDashboardListItemResponse(integrationTimeboardID, datadog.DASHBOARDTYPE_INTEGRATION_TIMEBOARD) customTimeboardResponse := datadog.NewDashboardListItemResponse(customTimeboardID, datadog.DASHBOARDTYPE_CUSTOM_TIMEBOARD) customScreenboardResponse := datadog.NewDashboardListItemResponse(customScreenboardID, datadog.DASHBOARDTYPE_CUSTOM_SCREENBOARD) addRequest := datadog.NewDashboardListAddItemsRequest() addRequest.SetDashboards(dashboards) addResponse, httpresp, err := Client(ctx).DashboardListsApi.CreateDashboardListItems(ctx, dashboardListID).Body(*addRequest).Execute() if err != nil { t.Fatalf("error adding items to dashboard list %d: Response %s: %v", dashboardListID, err.(datadog.GenericOpenAPIError).Body(), err) } assert.Equal(200, httpresp.StatusCode) assert.Equal(3, len(addResponse.GetAddedDashboardsToList())) assert.Contains(addResponse.GetAddedDashboardsToList(), *integrationTimeboardResponse) assert.Contains(addResponse.GetAddedDashboardsToList(), *customTimeboardResponse) assert.Contains(addResponse.GetAddedDashboardsToList(), *customScreenboardResponse) deleteRequest := datadog.NewDashboardListDeleteItemsRequest() deleteRequest.SetDashboards(dashboards) deleteResponse, httpresp, err := Client(ctx).DashboardListsApi.DeleteDashboardListItems(ctx, dashboardListID).Body(*deleteRequest).Execute() if err != nil { t.Fatalf("error deleting items from dashboard list %d: Response %s: %v", dashboardListID, err.(datadog.GenericOpenAPIError).Body(), err) } assert.Equal(200, httpresp.StatusCode) assert.Equal(3, len(deleteResponse.GetDeletedDashboardsFromList())) assert.Contains(deleteResponse.GetDeletedDashboardsFromList(), *integrationTimeboardResponse) assert.Contains(deleteResponse.GetDeletedDashboardsFromList(), *customTimeboardResponse) assert.Contains(deleteResponse.GetDeletedDashboardsFromList(), *customScreenboardResponse) getResponse, httpresp, err := Client(ctx).DashboardListsApi.GetDashboardListItems(ctx, dashboardListID).Execute() if err != nil { t.Fatalf("error getting items from dashboard list %d: Response %s: %v", dashboardListID, err.(datadog.GenericOpenAPIError).Body(), err) } assert.Equal(200, httpresp.StatusCode) assert.Equal(int64(0), getResponse.GetTotal()) assert.Equal(0, len(getResponse.GetDashboards())) updateRequest := datadog.NewDashboardListUpdateItemsRequest() updateRequest.SetDashboards(dashboards) updateResponse, httpresp, err := Client(ctx).DashboardListsApi.UpdateDashboardListItems(ctx, dashboardListID).Body(*updateRequest).Execute() if err != nil { t.Fatalf("error updating items from dashboard list %d: Response %s: %v", dashboardListID, err.(datadog.GenericOpenAPIError).Body(), err) } assert.Equal(200, httpresp.StatusCode) assert.Equal(3, len(updateResponse.GetDashboards())) assert.Contains(updateResponse.GetDashboards(), *integrationTimeboardResponse) assert.Contains(updateResponse.GetDashboards(), *customTimeboardResponse) assert.Contains(updateResponse.GetDashboards(), *customScreenboardResponse) // Leave only one dash in the list for easier assertion dashboards = []datadog.DashboardListItemRequest{ *integrationTimeboard, *customTimeboard, } deleteRequest.SetDashboards(dashboards) deleteResponse, httpresp, err = Client(ctx).DashboardListsApi.DeleteDashboardListItems(ctx, dashboardListID).Body(*deleteRequest).Execute() if err != nil { t.Fatalf("error deleting items from dashboard list %d: Response %s: %v", dashboardListID, err.(datadog.GenericOpenAPIError).Body(), err) } assert.Equal(200, httpresp.StatusCode) assert.Equal(2, len(deleteResponse.GetDeletedDashboardsFromList())) assert.Equal(200, httpresp.StatusCode) getResponse, httpresp, err = Client(ctx).DashboardListsApi.GetDashboardListItems(ctx, dashboardListID).Execute() if err != nil { t.Fatalf("error getting items from dashboard list %d: Response %s: %v", dashboardListID, err.(datadog.GenericOpenAPIError).Body(), err) } assert.Equal(1, len(getResponse.GetDashboards())) assert.Equal(int64(1), getResponse.GetTotal()) assert.True(getResponse.GetDashboards()[0].GetIsReadOnly()) assert.True(getResponse.GetDashboards()[0].GetIsShared()) assert.Equal(customScreenboardID, getResponse.GetDashboards()[0].GetId()) assert.Equal(datadog.DASHBOARDTYPE_CUSTOM_SCREENBOARD, getResponse.GetDashboards()[0].GetType()) assert.Equal("For dashboard list tests - DO NOT DELETE", getResponse.GetDashboards()[0].GetTitle()) assert.Equal("/dashboard/4n7-s4g-dqv/for-dashboard-list-tests---do-not-delete", getResponse.GetDashboards()[0].GetUrl()) assert.True(getResponse.GetDashboards()[0].GetPopularity() >= 0) assert.NotNil(getResponse.GetDashboards()[0].Author) assert.NotNil(getResponse.GetDashboards()[0].Modified) assert.NotNil(getResponse.GetDashboards()[0].Created) assert.Nil(getResponse.GetDashboards()[0].Icon) } func TestDashboardListGetItemsErrors(t *testing.T) { ctx, finish := tests.WithTestSpan(context.Background(), t) defer finish() testCases := map[string]struct { Ctx func(context.Context) context.Context ExpectedStatusCode int }{ "403 Forbidden": {WithFakeAuth, 403}, "404 Not Found": {WithTestAuth, 404}, } for name, tc := range testCases { t.Run(name, func(t *testing.T) { ctx, stop := WithRecorder(tc.Ctx(ctx), t) defer stop() assert := tests.Assert(ctx, t) _, httpresp, err := Client(ctx).DashboardListsApi.GetDashboardListItems(ctx, 1234).Execute() assert.Equal(tc.ExpectedStatusCode, httpresp.StatusCode) apiError, ok := err.(datadog.GenericOpenAPIError).Model().(datadog.APIErrorResponse) assert.True(ok) assert.NotEmpty(apiError.GetErrors()) }) } } func TestDashboardListCreateItemsErrors(t *testing.T) { // Setup the Client we'll use to interact with the Test account ctx, finish := tests.WithTestSpan(context.Background(), t) defer finish() ctx, finish = WithRecorder(WithTestAuth(ctx), t) defer finish() err := createDashboardList(ctx, t) if err != nil { t.Fatal(err) } defer deleteDashboardList() testCases := map[string]struct { Ctx func(context.Context) context.Context ID int64 ExpectedStatusCode int }{ "400 Bad Request": {WithTestAuth, dashboardListID, 400}, "403 Forbidden": {WithFakeAuth, 0, 403}, "404 Not Found": {WithTestAuth, 0, 404}, } for name, tc := range testCases { t.Run(name, func(t *testing.T) { ctx, stop := WithRecorder(tc.Ctx(ctx), t) defer stop() assert := tests.Assert(ctx, t) _, httpresp, err := Client(ctx).DashboardListsApi.CreateDashboardListItems(ctx, tc.ID).Body(*datadog.NewDashboardListAddItemsRequest()).Execute() assert.IsType(datadog.GenericOpenAPIError{}, err, "%v", err) assert.Equal(tc.ExpectedStatusCode, httpresp.StatusCode) apiError, ok := err.(datadog.GenericOpenAPIError).Model().(datadog.APIErrorResponse) assert.True(ok) assert.NotEmpty(apiError.GetErrors()) }) } } func TestDashboardListUpdateItemsErrors(t *testing.T) { // Setup the Client we'll use to interact with the Test account ctx, finish := tests.WithTestSpan(context.Background(), t) defer finish() ctx, finish = WithRecorder(WithTestAuth(ctx), t) defer finish() err := createDashboardList(ctx, t) if err != nil { t.Fatal(err) } defer deleteDashboardList() testCases := map[string]struct { Ctx func(context.Context) context.Context ID int64 ExpectedStatusCode int }{ "400 Bad Request": {WithTestAuth, dashboardListID, 400}, "403 Forbidden": {WithFakeAuth, 0, 403}, "404 Not Found": {WithTestAuth, 0, 404}, } for name, tc := range testCases { t.Run(name, func(t *testing.T) { ctx, stop := WithRecorder(tc.Ctx(ctx), t) defer stop() assert := tests.Assert(ctx, t) _, httpresp, err := Client(ctx).DashboardListsApi.UpdateDashboardListItems(ctx, tc.ID).Body(*datadog.NewDashboardListUpdateItemsRequest()).Execute() assert.IsType(datadog.GenericOpenAPIError{}, err, "%v", err) assert.Equal(tc.ExpectedStatusCode, httpresp.StatusCode) apiError, ok := err.(datadog.GenericOpenAPIError).Model().(datadog.APIErrorResponse) assert.True(ok) assert.NotEmpty(apiError.GetErrors()) }) } } func TestDashboardListDeleteItemsErrors(t *testing.T) { // Setup the Client we'll use to interact with the Test account ctx, finish := tests.WithTestSpan(context.Background(), t) defer finish() ctx, finish = WithRecorder(WithTestAuth(ctx), t) defer finish() err := createDashboardList(ctx, t) if err != nil { t.Fatal(err) } defer deleteDashboardList() testCases := map[string]struct { Ctx func(context.Context) context.Context ID int64 ExpectedStatusCode int }{ "400 Bad Request": {WithTestAuth, dashboardListID, 400}, "403 Forbidden": {WithFakeAuth, 0, 403}, "404 Not Found": {WithTestAuth, 0, 404}, } for name, tc := range testCases { t.Run(name, func(t *testing.T) { ctx, stop := WithRecorder(tc.Ctx(ctx), t) defer stop() assert := tests.Assert(ctx, t) _, httpresp, err := Client(ctx).DashboardListsApi.DeleteDashboardListItems(ctx, tc.ID).Body(*datadog.NewDashboardListDeleteItemsRequest()).Execute() assert.IsType(datadog.GenericOpenAPIError{}, err, "%v", err) assert.Equal(tc.ExpectedStatusCode, httpresp.StatusCode) apiError, ok := err.(datadog.GenericOpenAPIError).Model().(datadog.APIErrorResponse) assert.True(ok) assert.NotEmpty(apiError.GetErrors()) }) } }
[ "\"DD_TEST_CLIENT_API_KEY\"", "\"DD_TEST_CLIENT_APP_KEY\"", "\"DEBUG\"" ]
[]
[ "DEBUG", "DD_TEST_CLIENT_APP_KEY", "DD_TEST_CLIENT_API_KEY" ]
[]
["DEBUG", "DD_TEST_CLIENT_APP_KEY", "DD_TEST_CLIENT_API_KEY"]
go
3
0
ziti/cmd/ziti/cmd/create_config.go
/* Copyright NetFoundry, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cmd import ( "github.com/openziti/channel" edge "github.com/openziti/edge/controller/config" fabCtrl "github.com/openziti/fabric/controller" fabForwarder "github.com/openziti/fabric/router/forwarder" fabXweb "github.com/openziti/fabric/xweb" foundation "github.com/openziti/transport/v2" "github.com/openziti/ziti/ziti/cmd/ziti/cmd/common" cmdhelper "github.com/openziti/ziti/ziti/cmd/ziti/cmd/helpers" "github.com/openziti/ziti/ziti/cmd/ziti/constants" "github.com/sirupsen/logrus" "github.com/spf13/cobra" "os" "time" ) const ( optionVerbose = "verbose" defaultVerbose = false verboseDescription = "Enable verbose logging. Logging will be sent to stdout if the config output is sent to a file. If output is sent to stdout, logging will be sent to stderr" optionOutput = "output" defaultOutput = "stdout" outputDescription = "designated output destination for config, use \"stdout\" or a filepath." ) // CreateConfigOptions the options for the create config command type CreateConfigOptions struct { common.CommonOptions Output string DatabaseFile string } type ConfigTemplateValues struct { ZitiHome string Hostname string Controller ControllerTemplateValues Router RouterTemplateValues } type ControllerTemplateValues struct { Name string Port string AdvertisedAddress string ListenerAddress string IdentityCert string IdentityServerCert string IdentityKey string IdentityCA string MinQueuedConnects int MaxQueuedConnects int DefaultQueuedConnects int MinOutstandingConnects int MaxOutstandingConnects int DefaultOutstandingConnects int MinConnectTimeout time.Duration MaxConnectTimeout time.Duration DefaultConnectTimeout time.Duration EdgeIdentityDuration time.Duration EdgeRouterDuration time.Duration Edge EdgeControllerValues WebListener ControllerWebListenerValues HealthCheck ControllerHealthCheckValues } type EdgeControllerValues struct { AdvertisedPort string ZitiSigningCert string ZitiSigningKey string APIActivityUpdateBatchSize int APIActivityUpdateInterval time.Duration APISessionTimeout time.Duration ListenerHostPort string AdvertisedHostPort string IdentityCert string IdentityServerCert string IdentityKey string IdentityCA string } type ControllerWebListenerValues struct { IdleTimeout time.Duration ReadTimeout time.Duration WriteTimeout time.Duration MinTLSVersion string MaxTLSVersion string } type ControllerHealthCheckValues struct { Interval time.Duration Timeout time.Duration InitialDelay time.Duration } type RouterTemplateValues struct { Name string IsPrivate bool IsFabric bool IsWss bool IdentityCert string IdentityServerCert string IdentityKey string IdentityCA string Edge EdgeRouterTemplateValues Wss WSSRouterTemplateValues Forwarder RouterForwarderTemplateValues Listener RouterListenerTemplateValues } type EdgeRouterTemplateValues struct { Hostname string Port string } type WSSRouterTemplateValues struct { WriteTimeout time.Duration ReadTimeout time.Duration IdleTimeout time.Duration PongTimeout time.Duration PingInterval time.Duration HandshakeTimeout time.Duration ReadBufferSize int WriteBufferSize int EnableCompression bool } type RouterForwarderTemplateValues struct { LatencyProbeInterval time.Duration XgressDialQueueLength int XgressDialWorkerCount int LinkDialQueueLength int LinkDialWorkerCount int } type RouterListenerTemplateValues struct { ConnectTimeout time.Duration GetSessionTimeout time.Duration BindPort int OutQueueSize int } var workingDir string var data = &ConfigTemplateValues{} func init() { zh := os.Getenv("ZITI_HOME") if zh == "" { wd, err := os.Getwd() if wd == "" || err != nil { //on error just use "." workingDir = "." } } workingDir = cmdhelper.NormalizePath(zh) } // NewCmdCreateConfig creates a command object for the "config" command func NewCmdCreateConfig() *cobra.Command { cmd := &cobra.Command{ Use: "config", Short: "Creates a config file for specified Ziti component using environment variables", Aliases: []string{"cfg"}, Run: func(cmd *cobra.Command, args []string) { cmdhelper.CheckErr(cmd.Help()) }, } cmd.AddCommand(NewCmdCreateConfigController()) cmd.AddCommand(NewCmdCreateConfigRouter()) cmd.AddCommand(NewCmdCreateConfigEnvironment()) return cmd } // Add flags that are global to all "create config" commands func (options *CreateConfigOptions) addCreateFlags(cmd *cobra.Command) { cmd.PersistentFlags().BoolVarP(&options.Verbose, optionVerbose, "v", defaultVerbose, verboseDescription) cmd.PersistentFlags().StringVarP(&options.Output, optionOutput, "o", defaultOutput, outputDescription) } func (data *ConfigTemplateValues) populateEnvVars() { // Get and add hostname to the params hostname, err := os.Hostname() handleVariableError(err, "hostname") // Get and add ziti home to the params zitiHome, err := cmdhelper.GetZitiHome() handleVariableError(err, constants.ZitiHomeVarName) // Get Ziti Controller Name zitiCtrlHostname, err := cmdhelper.GetZitiCtrlName() handleVariableError(err, constants.ZitiCtrlNameVarName) // Get Ziti Edge Router Port zitiEdgeRouterPort, err := cmdhelper.GetZitiEdgeRouterPort() handleVariableError(err, constants.ZitiEdgeRouterPortVarName) // Get Ziti Controller Listener Address zitiCtrlListenerAddress, err := cmdhelper.GetZitiCtrlListenerAddress() handleVariableError(err, constants.ZitiCtrlListenerAddressVarName) // Get Ziti Controller Advertised Address zitiCtrlAdvertisedAddress, err := cmdhelper.GetZitiCtrlAdvertisedAddress() handleVariableError(err, constants.ZitiCtrlAdvertisedAddressVarName) // Get Ziti Controller Port zitiCtrlPort, err := cmdhelper.GetZitiCtrlPort() handleVariableError(err, constants.ZitiCtrlPortVarName) // Get Ziti Edge Controller Listener Host and Port zitiEdgeCtrlListenerHostPort, err := cmdhelper.GetZitiEdgeCtrlListenerHostPort() handleVariableError(err, constants.ZitiEdgeCtrlListenerHostPortVarName) // Get Ziti Edge Controller Advertised Host and Port zitiEdgeCtrlAdvertisedHostPort, err := cmdhelper.GetZitiEdgeCtrlAdvertisedHostPort() handleVariableError(err, constants.ZitiEdgeCtrlAdvertisedHostPortVarName) // Get Ziti Edge Controller Advertised Port zitiEdgeCtrlAdvertisedPort, err := cmdhelper.GetZitiEdgeCtrlAdvertisedPort() handleVariableError(err, constants.ZitiEdgeCtrlAdvertisedPortVarName) data.ZitiHome = zitiHome data.Hostname = hostname data.Controller.Name = zitiCtrlHostname data.Controller.ListenerAddress = zitiCtrlListenerAddress data.Controller.AdvertisedAddress = zitiCtrlAdvertisedAddress data.Controller.Port = zitiCtrlPort data.Controller.Edge.ListenerHostPort = zitiEdgeCtrlListenerHostPort data.Controller.Edge.AdvertisedHostPort = zitiEdgeCtrlAdvertisedHostPort data.Router.Edge.Port = zitiEdgeRouterPort data.Controller.Edge.AdvertisedPort = zitiEdgeCtrlAdvertisedPort } func (data *ConfigTemplateValues) populateDefaults() { data.Router.Listener.BindPort = constants.DefaultListenerBindPort data.Router.Listener.GetSessionTimeout = constants.DefaultGetSessionTimeout data.Controller.MinQueuedConnects = channel.MinQueuedConnects data.Controller.MaxQueuedConnects = channel.MaxQueuedConnects data.Controller.DefaultQueuedConnects = channel.DefaultQueuedConnects data.Controller.MinOutstandingConnects = channel.MinOutstandingConnects data.Controller.MaxOutstandingConnects = channel.MaxOutstandingConnects data.Controller.DefaultOutstandingConnects = channel.DefaultOutstandingConnects data.Controller.MinConnectTimeout = channel.MinConnectTimeout data.Controller.MaxConnectTimeout = channel.MaxConnectTimeout data.Controller.DefaultConnectTimeout = channel.DefaultConnectTimeout data.Controller.HealthCheck.Timeout = fabCtrl.DefaultHealthChecksBoltCheckTimeout data.Controller.HealthCheck.Interval = fabCtrl.DefaultHealthChecksBoltCheckInterval data.Controller.HealthCheck.InitialDelay = fabCtrl.DefaultHealthChecksBoltCheckInitialDelay data.Controller.Edge.APIActivityUpdateBatchSize = edge.DefaultEdgeApiActivityUpdateBatchSize data.Controller.Edge.APIActivityUpdateInterval = edge.DefaultEdgeAPIActivityUpdateInterval data.Controller.Edge.APISessionTimeout = edge.DefaultEdgeSessionTimeout data.Controller.EdgeIdentityDuration = edge.DefaultEdgeEnrollmentDuration data.Controller.EdgeRouterDuration = edge.DefaultEdgeEnrollmentDuration data.Controller.WebListener.IdleTimeout = edge.DefaultHttpIdleTimeout data.Controller.WebListener.ReadTimeout = edge.DefaultHttpReadTimeout data.Controller.WebListener.WriteTimeout = edge.DefaultHttpWriteTimeout data.Controller.WebListener.MinTLSVersion = fabXweb.ReverseTlsVersionMap[fabXweb.MinTLSVersion] data.Controller.WebListener.MaxTLSVersion = fabXweb.ReverseTlsVersionMap[fabXweb.MaxTLSVersion] data.Router.Wss.WriteTimeout = foundation.DefaultWsWriteTimeout data.Router.Wss.ReadTimeout = foundation.DefaultWsReadTimeout data.Router.Wss.IdleTimeout = foundation.DefaultWsIdleTimeout data.Router.Wss.PongTimeout = foundation.DefaultWsPongTimeout data.Router.Wss.PingInterval = foundation.DefaultWsPingInterval data.Router.Wss.HandshakeTimeout = foundation.DefaultWsHandshakeTimeout data.Router.Wss.ReadBufferSize = foundation.DefaultWsReadBufferSize data.Router.Wss.WriteBufferSize = foundation.DefaultWsWriteBufferSize data.Router.Wss.EnableCompression = foundation.DefaultWsEnableCompression data.Router.Forwarder.LatencyProbeInterval = fabForwarder.DefaultLatencyProbeInterval data.Router.Forwarder.XgressDialQueueLength = fabForwarder.DefaultXgressDialWorkerQueueLength data.Router.Forwarder.XgressDialWorkerCount = fabForwarder.DefaultXgressDialWorkerCount data.Router.Forwarder.LinkDialQueueLength = fabForwarder.DefaultLinkDialQueueLength data.Router.Forwarder.LinkDialWorkerCount = fabForwarder.DefaultLinkDialWorkerCount data.Router.Listener.OutQueueSize = channel.DefaultOutQueueSize data.Router.Listener.ConnectTimeout = channel.DefaultConnectTimeout } func handleVariableError(err error, varName string) { if err != nil { logrus.Errorf("Unable to get %s: %v", varName, err) } }
[ "\"ZITI_HOME\"" ]
[]
[ "ZITI_HOME" ]
[]
["ZITI_HOME"]
go
1
0
feder/virus_scan/tests/test_management.py
import os import time import struct import random from unittest import skipIf from io import StringIO from django.test import TestCase from django.core.management import call_command from feder.virus_scan.engine import get_engine, is_available from feder.virus_scan.models import Request from feder.letters.factories import AttachmentFactory from feder.virus_scan.factories import AttachmentRequestFactory EICAR_TEST = r"X5O!P%@AP[4\PZX54(P^)7CC)7}$EICAR-STANDARD-ANTIVIRUS-TEST-FILE!$H+H*" def random_binary(): return struct.pack("=I", random.randint(0, ((2**32) - 1))) def skipIfNoEngine(x): return skipIf( not is_available() and "CI" not in os.environ, "Missing engine configuration" )(x) class VirusScanCommandTestCase(TestCase): @skipIfNoEngine def test_virus_scan_for_eicar(self): current_engine = get_engine() request = AttachmentRequestFactory(content_object__attachment__data=EICAR_TEST) stdout = StringIO() call_command( "virus_scan", stdout=stdout, ) request.refresh_from_db() if request.status == Request.STATUS.queued: self._receive_until_transition(request) self.assertEqual(request.status, Request.STATUS.infected) self.assertEqual(request.engine_name, current_engine.name) self.assertNotEqual(request.engine_id, "") @skipIfNoEngine def test_virus_scan_for_safe_file(self): current_engine = get_engine() request = AttachmentRequestFactory(content_object__attachment__data="zółć.docx") stdout = StringIO() call_command( "virus_scan", stdout=stdout, ) request.refresh_from_db() if request.status == Request.STATUS.queued: self._receive_until_transition(request) self.assertEqual(request.status, Request.STATUS.not_detected) self.assertEqual(request.engine_name, current_engine.name) self.assertNotEqual(request.engine_id, "") def _receive_until_transition(self, obj, delay=10, timeout=180): initial_state = obj.status stdout = StringIO() for _ in range(round(timeout / delay)): call_command( "virus_scan", "--skip-send", stdout=stdout, ) obj.refresh_from_db() if obj.status != initial_state: return # print("Waiting to transition state: {}".format(obj)) time.sleep(delay) raise Exception("Timeout for transition of state") @skipIfNoEngine def test_virus_scan_file_for_random(self): request = AttachmentRequestFactory( content_object__attachment__data=random_binary() ) stdout = StringIO() call_command( "virus_scan", "--skip-receive", stdout=stdout, ) request.refresh_from_db() self.assertEqual(request.status, Request.STATUS.queued) self._receive_until_transition(request) self.assertEqual(request.status, Request.STATUS.not_detected) def test_queue_skip_scanned(self): request = AttachmentRequestFactory() stdout = StringIO() self.assertEqual(Request.objects.for_object(request.content_object).count(), 1) call_command( "queue_virus_scan", stdout=stdout, ) self.assertEqual(Request.objects.for_object(request.content_object).count(), 1) def test_queue_request_new(self): attachment = AttachmentFactory() stdout = StringIO() self.assertEqual(Request.objects.for_object(attachment).count(), 0) call_command( "queue_virus_scan", stdout=stdout, ) self.assertEqual(Request.objects.for_object(attachment).count(), 1)
[]
[]
[]
[]
[]
python
0
0
vendor/github.com/docker/docker/pkg/archive/archive_test.go
package archive import ( "archive/tar" "bytes" "fmt" "io" "io/ioutil" "os" "os/exec" "path/filepath" "runtime" "strings" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) var tmp string func init() { tmp = "/tmp/" if runtime.GOOS == "windows" { tmp = os.Getenv("TEMP") + `\` } } var defaultArchiver = NewDefaultArchiver() func defaultTarUntar(src, dst string) error { return defaultArchiver.TarUntar(src, dst) } func defaultUntarPath(src, dst string) error { return defaultArchiver.UntarPath(src, dst) } func defaultCopyFileWithTar(src, dst string) (err error) { return defaultArchiver.CopyFileWithTar(src, dst) } func defaultCopyWithTar(src, dst string) error { return defaultArchiver.CopyWithTar(src, dst) } func TestIsArchivePathDir(t *testing.T) { cmd := exec.Command("sh", "-c", "mkdir -p /tmp/archivedir") output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if IsArchivePath(tmp + "archivedir") { t.Fatalf("Incorrectly recognised directory as an archive") } } func TestIsArchivePathInvalidFile(t *testing.T) { cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1024 count=1 of=/tmp/archive && gzip --stdout /tmp/archive > /tmp/archive.gz") output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if IsArchivePath(tmp + "archive") { t.Fatalf("Incorrectly recognised invalid tar path as archive") } if IsArchivePath(tmp + "archive.gz") { t.Fatalf("Incorrectly recognised invalid compressed tar path as archive") } } func TestIsArchivePathTar(t *testing.T) { var whichTar string if runtime.GOOS == "solaris" { whichTar = "gtar" } else { whichTar = "tar" } cmdStr := fmt.Sprintf("touch /tmp/archivedata && %s -cf /tmp/archive /tmp/archivedata && gzip --stdout /tmp/archive > /tmp/archive.gz", whichTar) cmd := exec.Command("sh", "-c", cmdStr) output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Fail to create an archive file for test : %s.", output) } if !IsArchivePath(tmp + "/archive") { t.Fatalf("Did not recognise valid tar path as archive") } if !IsArchivePath(tmp + "archive.gz") { t.Fatalf("Did not recognise valid compressed tar path as archive") } } func testDecompressStream(t *testing.T, ext, compressCommand string) { cmd := exec.Command("sh", "-c", fmt.Sprintf("touch /tmp/archive && %s /tmp/archive", compressCommand)) output, err := cmd.CombinedOutput() if err != nil { t.Fatalf("Failed to create an archive file for test : %s.", output) } filename := "archive." + ext archive, err := os.Open(tmp + filename) if err != nil { t.Fatalf("Failed to open file %s: %v", filename, err) } defer archive.Close() r, err := DecompressStream(archive) if err != nil { t.Fatalf("Failed to decompress %s: %v", filename, err) } if _, err = ioutil.ReadAll(r); err != nil { t.Fatalf("Failed to read the decompressed stream: %v ", err) } if err = r.Close(); err != nil { t.Fatalf("Failed to close the decompressed stream: %v ", err) } } func TestDecompressStreamGzip(t *testing.T) { testDecompressStream(t, "gz", "gzip -f") } func TestDecompressStreamBzip2(t *testing.T) { testDecompressStream(t, "bz2", "bzip2 -f") } func TestDecompressStreamXz(t *testing.T) { if runtime.GOOS == "windows" { t.Skip("Xz not present in msys2") } testDecompressStream(t, "xz", "xz -f") } func TestCompressStreamXzUnsupported(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, Xz) if err == nil { t.Fatalf("Should fail as xz is unsupported for compression format.") } } func TestCompressStreamBzip2Unsupported(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, Xz) if err == nil { t.Fatalf("Should fail as xz is unsupported for compression format.") } } func TestCompressStreamInvalid(t *testing.T) { dest, err := os.Create(tmp + "dest") if err != nil { t.Fatalf("Fail to create the destination file") } defer dest.Close() _, err = CompressStream(dest, -1) if err == nil { t.Fatalf("Should fail as xz is unsupported for compression format.") } } func TestExtensionInvalid(t *testing.T) { compression := Compression(-1) output := compression.Extension() if output != "" { t.Fatalf("The extension of an invalid compression should be an empty string.") } } func TestExtensionUncompressed(t *testing.T) { compression := Uncompressed output := compression.Extension() if output != "tar" { t.Fatalf("The extension of an uncompressed archive should be 'tar'.") } } func TestExtensionBzip2(t *testing.T) { compression := Bzip2 output := compression.Extension() if output != "tar.bz2" { t.Fatalf("The extension of a bzip2 archive should be 'tar.bz2'") } } func TestExtensionGzip(t *testing.T) { compression := Gzip output := compression.Extension() if output != "tar.gz" { t.Fatalf("The extension of a bzip2 archive should be 'tar.gz'") } } func TestExtensionXz(t *testing.T) { compression := Xz output := compression.Extension() if output != "tar.xz" { t.Fatalf("The extension of a bzip2 archive should be 'tar.xz'") } } func TestCmdStreamLargeStderr(t *testing.T) { cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello") out, _, err := cmdStream(cmd, nil) if err != nil { t.Fatalf("Failed to start command: %s", err) } errCh := make(chan error) go func() { _, err := io.Copy(ioutil.Discard, out) errCh <- err }() select { case err := <-errCh: if err != nil { t.Fatalf("Command should not have failed (err=%.100s...)", err) } case <-time.After(5 * time.Second): t.Fatalf("Command did not complete in 5 seconds; probable deadlock") } } func TestCmdStreamBad(t *testing.T) { // TODO Windows: Figure out why this is failing in CI but not locally if runtime.GOOS == "windows" { t.Skip("Failing on Windows CI machines") } badCmd := exec.Command("sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1") out, _, err := cmdStream(badCmd, nil) if err != nil { t.Fatalf("Failed to start command: %s", err) } if output, err := ioutil.ReadAll(out); err == nil { t.Fatalf("Command should have failed") } else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" { t.Fatalf("Wrong error value (%s)", err) } else if s := string(output); s != "hello\n" { t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) } } func TestCmdStreamGood(t *testing.T) { cmd := exec.Command("sh", "-c", "echo hello; exit 0") out, _, err := cmdStream(cmd, nil) if err != nil { t.Fatal(err) } if output, err := ioutil.ReadAll(out); err != nil { t.Fatalf("Command should not have failed (err=%s)", err) } else if s := string(output); s != "hello\n" { t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) } } func TestUntarPathWithInvalidDest(t *testing.T) { tempFolder, err := ioutil.TempDir("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tempFolder) invalidDestFolder := filepath.Join(tempFolder, "invalidDest") // Create a src file srcFile := filepath.Join(tempFolder, "src") tarFile := filepath.Join(tempFolder, "src.tar") os.Create(srcFile) os.Create(invalidDestFolder) // being a file (not dir) should cause an error // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() if err != nil { t.Fatal(err) } err = defaultUntarPath(tarFile, invalidDestFolder) if err == nil { t.Fatalf("UntarPath with invalid destination path should throw an error.") } } func TestUntarPathWithInvalidSrc(t *testing.T) { dest, err := ioutil.TempDir("", "docker-archive-test") if err != nil { t.Fatalf("Fail to create the destination file") } defer os.RemoveAll(dest) err = defaultUntarPath("/invalid/path", dest) if err == nil { t.Fatalf("UntarPath with invalid src path should throw an error.") } } func TestUntarPath(t *testing.T) { tmpFolder, err := ioutil.TempDir("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(filepath.Join(tmpFolder, "src")) destFolder := filepath.Join(tmpFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatalf("Fail to create the destination file") } // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() if err != nil { t.Fatal(err) } err = defaultUntarPath(tarFile, destFolder) if err != nil { t.Fatalf("UntarPath shouldn't throw an error, %s.", err) } expectedFile := filepath.Join(destFolder, srcFileU) _, err = os.Stat(expectedFile) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } // Do the same test as above but with the destination as file, it should fail func TestUntarPathWithDestinationFile(t *testing.T) { tmpFolder, err := ioutil.TempDir("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(filepath.Join(tmpFolder, "src")) // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() if err != nil { t.Fatal(err) } destFile := filepath.Join(tmpFolder, "dest") _, err = os.Create(destFile) if err != nil { t.Fatalf("Fail to create the destination file") } err = defaultUntarPath(tarFile, destFile) if err == nil { t.Fatalf("UntarPath should throw an error if the destination if a file") } } // Do the same test as above but with the destination folder already exists // and the destination file is a directory // It's working, see https://github.com/docker/docker/issues/10040 func TestUntarPathWithDestinationSrcFileAsFolder(t *testing.T) { tmpFolder, err := ioutil.TempDir("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpFolder) srcFile := filepath.Join(tmpFolder, "src") tarFile := filepath.Join(tmpFolder, "src.tar") os.Create(srcFile) // Translate back to Unix semantics as next exec.Command is run under sh srcFileU := srcFile tarFileU := tarFile if runtime.GOOS == "windows" { tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" } cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) _, err = cmd.CombinedOutput() if err != nil { t.Fatal(err) } destFolder := filepath.Join(tmpFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatalf("Fail to create the destination folder") } // Let's create a folder that will has the same path as the extracted file (from tar) destSrcFileAsFolder := filepath.Join(destFolder, srcFileU) err = os.MkdirAll(destSrcFileAsFolder, 0740) if err != nil { t.Fatal(err) } err = defaultUntarPath(tarFile, destFolder) if err != nil { t.Fatalf("UntarPath should throw not throw an error if the extracted file already exists and is a folder") } } func TestCopyWithTarInvalidSrc(t *testing.T) { tempFolder, err := ioutil.TempDir("", "docker-archive-test") if err != nil { t.Fatal(nil) } destFolder := filepath.Join(tempFolder, "dest") invalidSrc := filepath.Join(tempFolder, "doesnotexists") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatal(err) } err = defaultCopyWithTar(invalidSrc, destFolder) if err == nil { t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") } } func TestCopyWithTarInexistentDestWillCreateIt(t *testing.T) { tempFolder, err := ioutil.TempDir("", "docker-archive-test") if err != nil { t.Fatal(nil) } srcFolder := filepath.Join(tempFolder, "src") inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = defaultCopyWithTar(srcFolder, inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") } _, err = os.Stat(inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder should create it.") } } // Test CopyWithTar with a file as src func TestCopyWithTarSrcFile(t *testing.T) { folder, err := ioutil.TempDir("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") srcFolder := filepath.Join(folder, "src") src := filepath.Join(folder, filepath.Join("src", "src")) err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } ioutil.WriteFile(src, []byte("content"), 0777) err = defaultCopyWithTar(src, dest) if err != nil { t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) // FIXME Check the content if err != nil { t.Fatalf("Destination file should be the same as the source.") } } // Test CopyWithTar with a folder as src func TestCopyWithTarSrcFolder(t *testing.T) { folder, err := ioutil.TempDir("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") src := filepath.Join(folder, filepath.Join("src", "folder")) err = os.MkdirAll(src, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } ioutil.WriteFile(filepath.Join(src, "file"), []byte("content"), 0777) err = defaultCopyWithTar(src, dest) if err != nil { t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) // FIXME Check the content (the file inside) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } func TestCopyFileWithTarInvalidSrc(t *testing.T) { tempFolder, err := ioutil.TempDir("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tempFolder) destFolder := filepath.Join(tempFolder, "dest") err = os.MkdirAll(destFolder, 0740) if err != nil { t.Fatal(err) } invalidFile := filepath.Join(tempFolder, "doesnotexists") err = defaultCopyFileWithTar(invalidFile, destFolder) if err == nil { t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") } } func TestCopyFileWithTarInexistentDestWillCreateIt(t *testing.T) { tempFolder, err := ioutil.TempDir("", "docker-archive-test") if err != nil { t.Fatal(nil) } defer os.RemoveAll(tempFolder) srcFile := filepath.Join(tempFolder, "src") inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") _, err = os.Create(srcFile) if err != nil { t.Fatal(err) } err = defaultCopyFileWithTar(srcFile, inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") } _, err = os.Stat(inexistentDestFolder) if err != nil { t.Fatalf("CopyWithTar with an inexistent folder should create it.") } // FIXME Test the src file and content } func TestCopyFileWithTarSrcFolder(t *testing.T) { folder, err := ioutil.TempDir("", "docker-archive-copyfilewithtar-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") src := filepath.Join(folder, "srcfolder") err = os.MkdirAll(src, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } err = defaultCopyFileWithTar(src, dest) if err == nil { t.Fatalf("CopyFileWithTar should throw an error with a folder.") } } func TestCopyFileWithTarSrcFile(t *testing.T) { folder, err := ioutil.TempDir("", "docker-archive-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(folder) dest := filepath.Join(folder, "dest") srcFolder := filepath.Join(folder, "src") src := filepath.Join(folder, filepath.Join("src", "src")) err = os.MkdirAll(srcFolder, 0740) if err != nil { t.Fatal(err) } err = os.MkdirAll(dest, 0740) if err != nil { t.Fatal(err) } ioutil.WriteFile(src, []byte("content"), 0777) err = defaultCopyWithTar(src, dest+"/") if err != nil { t.Fatalf("archiver.CopyFileWithTar shouldn't throw an error, %s.", err) } _, err = os.Stat(dest) if err != nil { t.Fatalf("Destination folder should contain the source file but did not.") } } func TestTarFiles(t *testing.T) { // TODO Windows: Figure out how to port this test. if runtime.GOOS == "windows" { t.Skip("Failing on Windows") } // try without hardlinks if err := checkNoChanges(1000, false); err != nil { t.Fatal(err) } // try with hardlinks if err := checkNoChanges(1000, true); err != nil { t.Fatal(err) } } func checkNoChanges(fileNum int, hardlinks bool) error { srcDir, err := ioutil.TempDir("", "docker-test-srcDir") if err != nil { return err } defer os.RemoveAll(srcDir) destDir, err := ioutil.TempDir("", "docker-test-destDir") if err != nil { return err } defer os.RemoveAll(destDir) _, err = prepareUntarSourceDirectory(fileNum, srcDir, hardlinks) if err != nil { return err } err = defaultTarUntar(srcDir, destDir) if err != nil { return err } changes, err := ChangesDirs(destDir, srcDir) if err != nil { return err } if len(changes) > 0 { return fmt.Errorf("with %d files and %v hardlinks: expected 0 changes, got %d", fileNum, hardlinks, len(changes)) } return nil } func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) { archive, err := TarWithOptions(origin, options) if err != nil { t.Fatal(err) } defer archive.Close() buf := make([]byte, 10) if _, err := archive.Read(buf); err != nil { return nil, err } wrap := io.MultiReader(bytes.NewReader(buf), archive) detectedCompression := DetectCompression(buf) compression := options.Compression if detectedCompression.Extension() != compression.Extension() { return nil, fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension()) } tmp, err := ioutil.TempDir("", "docker-test-untar") if err != nil { return nil, err } defer os.RemoveAll(tmp) if err := Untar(wrap, tmp, nil); err != nil { return nil, err } if _, err := os.Stat(tmp); err != nil { return nil, err } return ChangesDirs(origin, tmp) } func TestTarUntar(t *testing.T) { // TODO Windows: Figure out how to fix this test. if runtime.GOOS == "windows" { t.Skip("Failing on Windows") } origin, err := ioutil.TempDir("", "docker-test-untar-origin") if err != nil { t.Fatal(err) } defer os.RemoveAll(origin) if err := ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { t.Fatal(err) } if err := ioutil.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { t.Fatal(err) } if err := ioutil.WriteFile(filepath.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { t.Fatal(err) } for _, c := range []Compression{ Uncompressed, Gzip, } { changes, err := tarUntar(t, origin, &TarOptions{ Compression: c, ExcludePatterns: []string{"3"}, }) if err != nil { t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) } if len(changes) != 1 || changes[0].Path != "/3" { t.Fatalf("Unexpected differences after tarUntar: %v", changes) } } } func TestTarWithOptions(t *testing.T) { // TODO Windows: Figure out how to fix this test. if runtime.GOOS == "windows" { t.Skip("Failing on Windows") } origin, err := ioutil.TempDir("", "docker-test-untar-origin") if err != nil { t.Fatal(err) } if _, err := ioutil.TempDir(origin, "folder"); err != nil { t.Fatal(err) } defer os.RemoveAll(origin) if err := ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { t.Fatal(err) } if err := ioutil.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { t.Fatal(err) } cases := []struct { opts *TarOptions numChanges int }{ {&TarOptions{IncludeFiles: []string{"1"}}, 2}, {&TarOptions{ExcludePatterns: []string{"2"}}, 1}, {&TarOptions{ExcludePatterns: []string{"1", "folder*"}}, 2}, {&TarOptions{IncludeFiles: []string{"1", "1"}}, 2}, {&TarOptions{IncludeFiles: []string{"1"}, RebaseNames: map[string]string{"1": "test"}}, 4}, } for _, testCase := range cases { changes, err := tarUntar(t, origin, testCase.opts) if err != nil { t.Fatalf("Error tar/untar when testing inclusion/exclusion: %s", err) } if len(changes) != testCase.numChanges { t.Errorf("Expected %d changes, got %d for %+v:", testCase.numChanges, len(changes), testCase.opts) } } } // Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz // use PAX Global Extended Headers. // Failing prevents the archives from being uncompressed during ADD func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) { hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader} tmpDir, err := ioutil.TempDir("", "docker-test-archive-pax-test") if err != nil { t.Fatal(err) } defer os.RemoveAll(tmpDir) err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true, nil, false) if err != nil { t.Fatal(err) } } // Some tar have both GNU specific (huge uid) and Ustar specific (long name) things. // Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work. func TestUntarUstarGnuConflict(t *testing.T) { f, err := os.Open("testdata/broken.tar") if err != nil { t.Fatal(err) } defer f.Close() found := false tr := tar.NewReader(f) // Iterate through the files in the archive. for { hdr, err := tr.Next() if err == io.EOF { // end of tar archive break } if err != nil { t.Fatal(err) } if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" { found = true break } } if !found { t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm") } } func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { fileData := []byte("fooo") for n := 0; n < numberOfFiles; n++ { fileName := fmt.Sprintf("file-%d", n) if err := ioutil.WriteFile(filepath.Join(targetPath, fileName), fileData, 0700); err != nil { return 0, err } if makeLinks { if err := os.Link(filepath.Join(targetPath, fileName), filepath.Join(targetPath, fileName+"-link")); err != nil { return 0, err } } } totalSize := numberOfFiles * len(fileData) return totalSize, nil } func BenchmarkTarUntar(b *testing.B) { origin, err := ioutil.TempDir("", "docker-test-untar-origin") if err != nil { b.Fatal(err) } tempDir, err := ioutil.TempDir("", "docker-test-untar-destination") if err != nil { b.Fatal(err) } target := filepath.Join(tempDir, "dest") n, err := prepareUntarSourceDirectory(100, origin, false) if err != nil { b.Fatal(err) } defer os.RemoveAll(origin) defer os.RemoveAll(tempDir) b.ResetTimer() b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { err := defaultTarUntar(origin, target) if err != nil { b.Fatal(err) } os.RemoveAll(target) } } func BenchmarkTarUntarWithLinks(b *testing.B) { origin, err := ioutil.TempDir("", "docker-test-untar-origin") if err != nil { b.Fatal(err) } tempDir, err := ioutil.TempDir("", "docker-test-untar-destination") if err != nil { b.Fatal(err) } target := filepath.Join(tempDir, "dest") n, err := prepareUntarSourceDirectory(100, origin, true) if err != nil { b.Fatal(err) } defer os.RemoveAll(origin) defer os.RemoveAll(tempDir) b.ResetTimer() b.SetBytes(int64(n)) for n := 0; n < b.N; n++ { err := defaultTarUntar(origin, target) if err != nil { b.Fatal(err) } os.RemoveAll(target) } } func TestUntarInvalidFilenames(t *testing.T) { // TODO Windows: Figure out how to fix this test. if runtime.GOOS == "windows" { t.Skip("Passes but hits breakoutError: platform and architecture is not supported") } for i, headers := range [][]*tar.Header{ { { Name: "../victim/dotdot", Typeflag: tar.TypeReg, Mode: 0644, }, }, { { // Note the leading slash Name: "/../victim/slash-dotdot", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidFilenames", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarHardlinkToSymlink(t *testing.T) { // TODO Windows. There may be a way of running this, but turning off for now if runtime.GOOS == "windows" { t.Skip("hardlinks on Windows") } for i, headers := range [][]*tar.Header{ { { Name: "symlink1", Typeflag: tar.TypeSymlink, Linkname: "regfile", Mode: 0644, }, { Name: "symlink2", Typeflag: tar.TypeLink, Linkname: "symlink1", Mode: 0644, }, { Name: "regfile", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarHardlinkToSymlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarInvalidHardlink(t *testing.T) { // TODO Windows. There may be a way of running this, but turning off for now if runtime.GOOS == "windows" { t.Skip("hardlinks on Windows") } for i, headers := range [][]*tar.Header{ { // try reading victim/hello (../) { Name: "dotdot", Typeflag: tar.TypeLink, Linkname: "../victim/hello", Mode: 0644, }, }, { // try reading victim/hello (/../) { Name: "slash-dotdot", Typeflag: tar.TypeLink, // Note the leading slash Linkname: "/../victim/hello", Mode: 0644, }, }, { // try writing victim/file { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim/file", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try reading victim/hello (hardlink, symlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // Try reading victim/hello (hardlink, hardlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "hardlink", Typeflag: tar.TypeLink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // Try removing victim directory (hardlink) { Name: "loophole-victim", Typeflag: tar.TypeLink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidHardlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestUntarInvalidSymlink(t *testing.T) { // TODO Windows. There may be a way of running this, but turning off for now if runtime.GOOS == "windows" { t.Skip("hardlinks on Windows") } for i, headers := range [][]*tar.Header{ { // try reading victim/hello (../) { Name: "dotdot", Typeflag: tar.TypeSymlink, Linkname: "../victim/hello", Mode: 0644, }, }, { // try reading victim/hello (/../) { Name: "slash-dotdot", Typeflag: tar.TypeSymlink, // Note the leading slash Linkname: "/../victim/hello", Mode: 0644, }, }, { // try writing victim/file { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim/file", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try reading victim/hello (symlink, symlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // try reading victim/hello (symlink, hardlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "hardlink", Typeflag: tar.TypeLink, Linkname: "loophole-victim/hello", Mode: 0644, }, }, { // try removing victim directory (symlink) { Name: "loophole-victim", Typeflag: tar.TypeSymlink, Linkname: "../victim", Mode: 0755, }, { Name: "loophole-victim", Typeflag: tar.TypeReg, Mode: 0644, }, }, { // try writing to victim/newdir/newfile with a symlink in the path { // this header needs to be before the next one, or else there is an error Name: "dir/loophole", Typeflag: tar.TypeSymlink, Linkname: "../../victim", Mode: 0755, }, { Name: "dir/loophole/newdir/newfile", Typeflag: tar.TypeReg, Mode: 0644, }, }, } { if err := testBreakout("untar", "docker-TestUntarInvalidSymlink", headers); err != nil { t.Fatalf("i=%d. %v", i, err) } } } func TestTempArchiveCloseMultipleTimes(t *testing.T) { reader := ioutil.NopCloser(strings.NewReader("hello")) tempArchive, err := NewTempArchive(reader, "") buf := make([]byte, 10) n, err := tempArchive.Read(buf) if n != 5 { t.Fatalf("Expected to read 5 bytes. Read %d instead", n) } for i := 0; i < 3; i++ { if err = tempArchive.Close(); err != nil { t.Fatalf("i=%d. Unexpected error closing temp archive: %v", i, err) } } } func TestReplaceFileTarWrapper(t *testing.T) { filesInArchive := 20 testcases := []struct { doc string filename string modifier TarModifierFunc expected string fileCount int }{ { doc: "Modifier creates a new file", filename: "newfile", modifier: createModifier(t), expected: "the new content", fileCount: filesInArchive + 1, }, { doc: "Modifier replaces a file", filename: "file-2", modifier: createOrReplaceModifier, expected: "the new content", fileCount: filesInArchive, }, { doc: "Modifier replaces the last file", filename: fmt.Sprintf("file-%d", filesInArchive-1), modifier: createOrReplaceModifier, expected: "the new content", fileCount: filesInArchive, }, { doc: "Modifier appends to a file", filename: "file-3", modifier: appendModifier, expected: "fooo\nnext line", fileCount: filesInArchive, }, } for _, testcase := range testcases { sourceArchive, cleanup := buildSourceArchive(t, filesInArchive) defer cleanup() resultArchive := ReplaceFileTarWrapper( sourceArchive, map[string]TarModifierFunc{testcase.filename: testcase.modifier}) actual := readFileFromArchive(t, resultArchive, testcase.filename, testcase.fileCount, testcase.doc) assert.Equal(t, testcase.expected, actual, testcase.doc) } } func buildSourceArchive(t *testing.T, numberOfFiles int) (io.ReadCloser, func()) { srcDir, err := ioutil.TempDir("", "docker-test-srcDir") require.NoError(t, err) _, err = prepareUntarSourceDirectory(numberOfFiles, srcDir, false) require.NoError(t, err) sourceArchive, err := TarWithOptions(srcDir, &TarOptions{}) require.NoError(t, err) return sourceArchive, func() { os.RemoveAll(srcDir) sourceArchive.Close() } } func createOrReplaceModifier(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { return &tar.Header{ Mode: 0600, Typeflag: tar.TypeReg, }, []byte("the new content"), nil } func createModifier(t *testing.T) TarModifierFunc { return func(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { assert.Nil(t, content) return createOrReplaceModifier(path, header, content) } } func appendModifier(path string, header *tar.Header, content io.Reader) (*tar.Header, []byte, error) { buffer := bytes.Buffer{} if content != nil { if _, err := buffer.ReadFrom(content); err != nil { return nil, nil, err } } buffer.WriteString("\nnext line") return &tar.Header{Mode: 0600, Typeflag: tar.TypeReg}, buffer.Bytes(), nil } func readFileFromArchive(t *testing.T, archive io.ReadCloser, name string, expectedCount int, doc string) string { destDir, err := ioutil.TempDir("", "docker-test-destDir") require.NoError(t, err) defer os.RemoveAll(destDir) err = Untar(archive, destDir, nil) require.NoError(t, err) files, _ := ioutil.ReadDir(destDir) assert.Len(t, files, expectedCount, doc) content, err := ioutil.ReadFile(filepath.Join(destDir, name)) assert.NoError(t, err) return string(content) }
[ "\"TEMP\"" ]
[]
[ "TEMP" ]
[]
["TEMP"]
go
1
0
fifty/commands/train.py
# fifty/commands/train.py # from .base import Base import numpy as np import os import random import pandas as pd import tensorflow as tf tf.logging.set_verbosity(tf.logging.ERROR) # os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' from keras.models import Sequential, load_model from keras.layers import Dense, Embedding, Dropout, MaxPool1D, GlobalAveragePooling1D, Conv1D, LeakyReLU from keras import callbacks, backend from keras.utils.np_utils import to_categorical from keras.utils import multi_gpu_model from hyperopt import partial, Trials, fmin, hp, tpe, rand from fifty.utilities.framework import read_files, make_output_folder, load_labels_tags, get_utilities_dir class Train: def __init__(self, options, *args): random.seed(random.randint(0, 1000)) self.input = os.path.abspath(options['<input>']) if options['--model-name'] is not None: self.model_name = os.path.abspath(options['--model-name']) else: self.model_name = None self.data_dir = options['--data-dir'] self.percent = float(options['--percent']) self.block_size = options['--block_size'] self.gpu = int(options['--gpus']) self.output = options['--output'] self.verbose = int(options['-v']) self.algo = options['--algo'] self.down = options['--down'] self.up = options['--up'] self.max_evals = int(options['--max-evals']) self.args = options self.dataset = () self.last_dense_layer = [75, 11, 25, 5, 2, 2] self.no_of_classes = last_dense_layer[scenario - 1] self.df = pd.DataFrame(columns=['dense', 'embed_size', 'filter', 'kernel', 'layers', 'pool', 'accuracy']) def run(self): self.output = make_output_folder(self.input, self.output, self.force) train() if self.input is not None: model = get_model() from fifty.commands.whatis import WhatIs classifier = WhatIs(self.args) gen_files = read_files(self.input, self.block_size, self.recursive) try: while True: file, file_name = next(gen_files) pred_probability = classifier.infer(model, file) classifier.output_predictions(pred_probability, file_name) del file, file_name except: pass else: print('No input file given for inference on trained model.') return def get_model(self): """Finds and returns a relevant pre-trained model""" model = None if self.model_name is not None: try: if os.path.isfile(self.model_name): model = load_model(self.model_name) else: raise FileNotFoundError('Could not find the specified model! {}'.format(self.model_name)) except RuntimeError: raise RuntimeError('Could not load the specified model! {}'.format(self.model_name)) if self.verbose == 2: print('Loaded model: {}. \nSummary of model:'.format(self.model_name)) model.summary() return model def make_new_dataset(self): labels, tags = load_labels_tags(1) out_data_dir = os.path.join(self.output, 'data') os.mkdir(out_data_dir) input_data_dir = os.path.join(self.data_dir, '{}_1'.format(self.block_size)) with open(self.scale_down, 'r') as req_types: file_types = [] for line in req_types: file_types.append(line[:-1]) x, y = np.empty((0, self.block_size), dtype=np.uint8), np.empty(0, dtype=np.uint8) for file in ['train.npz', 'val.npz', 'test.npz']: data = np.load(os.path.join(input_data_dir, file)) x, y = np.concatenate((x, data['x'])), np.concatenate((y, data['y'])) scale_down_x, scale_down_y = np.empty((0, self.block_size), dtype=np.uint8), np.empty(0, dtype=np.uint8) for file_type in file_types: index_type = labels.index(file_type.lower()) indices = np.array([i for i in range(len(y)) if y[i] == index_type]) scale_down_x = np.concatenate((scale_down_x, x[indices])) scale_down_y = np.concatenate((scale_down_y, y[indices])) del x, y indices = np.arange(len(scale_down_y)) for _ in range(10): random.shuffle(indices) scale_down_x = scale_down_x[indices] scale_down_y = scale_down_y[indices] split_train = int(len(scale_down_y) * 0.8) split_val = int(len(scale_down_y) * 0.9) np.savez_compressed(os.path.join(out_data_dir, 'train.npz'), x=scale_down_x[:split_train], y=scale_down_y[:split_train]) np.savez_compressed(os.path.join(out_data_dir, 'val.npz'), x=scale_down_x[split_train: split_val], y=scale_down_y[split_train: split_val]) np.savez_compressed(os.path.join(out_data_dir, 'test.npz'), x=scale_down_x[split_val:], y=scale_down_y[split_val:]) load_dataset(self, out_data_dir) def load_dataset(self, data_dir=None): """Loads relevant already prepared FFT-75 dataset""" if data_dir is None: if self.block_size == 4096: model_name = '4k_{}'.format(self.scenario) else: model_name = '512_{}'.format(self.scenario) data_dir = os.path.join(self.data_dir, model_name) else: self.model_name = 'new_model' train_data = np.load(os.path.join(data_dir, 'train.npz')) x_train, y_train = train_data['x'], train_data['y'] one_hot_y_train = to_categorical(y_train) print("Training Data loaded with shape: {} and labels with shape - {}".format(x_train.shape, one_hot_y_train.shape)) val_data = np.load(os.path.join(data_dir, 'val.npz')) x_val, y_val = val_data['x'], val_data['y'] one_hot_y_val = to_categorical(y_val) print( "Validation Data loaded with shape: {} and labels with shape - {}".format(x_val.shape, one_hot_y_val.shape)) self.dataset = x_train, one_hot_y_train, x_val, one_hot_y_val def get_best(self, ): best_idx = df['accuracy'].idxmax() best = dict() best['dense'] = int(df['dense'].loc[best_idx]) best['embed_size'] = int(df['embed_size'].loc[best_idx]) best['filter'] = int(df['filter'].loc[best_idx]) best['kernel'] = int(df['kernel'].loc[best_idx]) best['layers'] = int(df['layers'].loc[best_idx]) best['pool'] = int(df['pool'].loc[best_idx]) return best def train_network(self, parameters): print("\nParameters:") print(parameters) x_train, one_hot_y_train, x_val, one_hot_y_val = self.dataset try: model = Sequential() model.add(Embedding(256, parameters['embed_size'], input_length=block_size)) for _ in range(parameters['layers']): model.add(Conv1D(filters=int(parameters['filter']), kernel_size=parameters['kernel'])) model.add(LeakyReLU(alpha=0.3)) model.add(MaxPool1D(parameters['pool'])) model.add(GlobalAveragePooling1D()) model.add(Dropout(0.1)) model.add(Dense(parameters['dense'])) model.add(LeakyReLU(alpha=0.3)) model.add(Dense(self.no_of_classes, activation='softmax')) callbacks_list = [ callbacks.EarlyStopping(monitor='val_acc', patience=3, restore_best_weights=True, min_delta=0.01), callbacks.ModelCheckpoint(os.path.join(output, '{}.h5'.format(new_model)), monitor='val_acc'), callbacks.CSVLogger(filename=os.path.join(output, '{}.log'.format(new_model)), append=True) ] # transform the model to a parallel one if multiple gpus are available. if gpu != 1: model = multi_gpu_model(model, gpus=gpu) model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['acc']) model.summary() history = model.fit( x=x_train[:int(len(x_train) * percent)], y=one_hot_y_train[:int(len(x_train) * percent)], epochs=1, batch_size=128, validation_data=( x_val[:int(len(x_val) * percent)], one_hot_y_val[:int(len(x_val) * percent)]), verbose=verbose, callbacks=callbacks_list) loss = min(history.history['val_loss']) accuracy = max(history.history['val_acc']) backend.clear_session() parameters['accuracy'] = accuracy df.loc[len(df)] = list(parameters.values()) except: accuracy = 0 loss = np.inf print("Loss: {}".format(loss)) print("Accuracy: {:.2%}".format(accuracy)) return loss def train_model(self): if self.data_dir: load_dataset(self, self.data_dir) elif self.scale_down: make_new_dataset(self) elif self.scale_up: raise SystemExit( 'Please refer documentation. Requires you to prepare the dataset on your own and then use -d option.') else: load_dataset(self) # updating global variables. train_network only takes one and only one argument. global percent, block_size, scenario, gpu, output, verbose, new_model, no_of_classes percent = self.percent block_size = self.block_size scenario = self.scenario gpu = self.gpus output = self.output new_model = self.new_model if self.scale_down: no_of_classes = len(list(open(self.scale_down, 'r'))) if self.v: verbose = 0 elif self.vv: verbose = 1 elif self.vvv: verbose = 2 parameter_space = { 'layers': hp.choice('layers', [1, 2, 3]), 'embed_size': hp.choice('embed_size', [16, 32, 48, 64]), 'filter': hp.choice('filter', [16, 32, 64, 128]), 'kernel': hp.choice('kernel', [3, 11, 19, 27, 35]), 'pool': hp.choice('pool', [2, 4, 6, 8]), 'dense': hp.choice('dense', [16, 32, 64, 128, 256]) } trials = Trials() if self.algo.lower() == 'tpe': algo = partial( tpe.suggest, n_EI_candidates=1000, gamma=0.2, n_startup_jobs=int(0.1 * self.max_evals), ) elif self.algo.lower() == 'rand': algo = rand.suggest else: print('Warning! The requested hyper-parameter algorithm is not supported. Using TPE.') algo = partial( tpe.suggest, n_EI_candidates=1000, gamma=0.2, n_startup_jobs=int(0.1 * self.max_evals), ) fmin( train_network, trials=trials, space=parameter_space, algo=algo, max_evals=self.max_evals, show_progressbar=False ) df.to_csv(os.path.join(self.output, 'parameters.csv')) best = get_best() print('\n-------------------------------------\n') print('Hyper-parameter space exploration ended. \nRetraining the best again on the full dataset.') percent = 1 train_network(best) print('The best model has been retrained and saved as {}.'.format(self.new_model))
[]
[]
[ "TF_CPP_MIN_LOG_LEVEL" ]
[]
["TF_CPP_MIN_LOG_LEVEL"]
python
1
0
regolith/export.go
package regolith import ( "io/fs" "os" "path/filepath" "github.com/otiai10/copy" ) // GetExportPaths returns file paths for exporting behavior pack and // resource pack based on exportTarget (a structure with data related to // export settings) and the name of the project. func GetExportPaths( exportTarget ExportTarget, name string, ) (bpPath string, rpPath string, err error) { if exportTarget.Target == "development" { comMojang, err := FindMojangDir() if err != nil { return "", "", WrapError( err, "Failed to find \"com.mojang\" directory.") } // TODO - I don't like the _rp and _bp sufixes. Can we get rid of that? // I for example always name my packs "0". bpPath = comMojang + "/development_behavior_packs/" + name + "_bp" rpPath = comMojang + "/development_resource_packs/" + name + "_rp" } else if exportTarget.Target == "preview" { comMojang, err := FindPreviewDir() if err != nil { return "", "", WrapError( err, "Failed to find preview \"com.mojang\" directory.") } // TODO - I don't like the _rp and _bp sufixes. Can we get rid of that? // I for example always name my packs "0". bpPath = comMojang + "/development_behavior_packs/" + name + "_bp" rpPath = comMojang + "/development_resource_packs/" + name + "_rp" } else if exportTarget.Target == "exact" { bpPath = exportTarget.BpPath rpPath = exportTarget.RpPath } else if exportTarget.Target == "world" { if exportTarget.WorldPath != "" { if exportTarget.WorldName != "" { return "", "", WrappedError( "Using both \"worldName\" and \"worldPath\" is not" + " allowed.") } bpPath = filepath.Join( exportTarget.WorldPath, "behavior_packs", name+"_bp") rpPath = filepath.Join( exportTarget.WorldPath, "resource_packs", name+"_rp") } else if exportTarget.WorldName != "" { dir, err := FindMojangDir() if err != nil { return "", "", WrapError( err, "Failed to find \"com.mojang\" directory.") } worlds, err := ListWorlds(dir) if err != nil { return "", "", WrapError(err, "Failed to list worlds.") } for _, world := range worlds { if world.Name == exportTarget.WorldName { bpPath = filepath.Join( world.Path, "behavior_packs", name+"_bp") rpPath = filepath.Join( world.Path, "resource_packs", name+"_rp") } } } else { err = WrappedError( "The \"world\" export target requires either a " + "\"worldName\" or \"worldPath\" property") } } else if exportTarget.Target == "local" { bpPath = "build/BP/" rpPath = "build/RP/" } else { err = WrappedErrorf( "Export target %q is not valid", exportTarget.Target) } return } // ExportProject copies files from the tmp paths (tmp/BP and tmp/RP) into // the project's export target. The paths are generated with GetExportPaths. func ExportProject(profile Profile, name string, dataPath string) error { exportTarget := profile.ExportTarget bpPath, rpPath, err := GetExportPaths(exportTarget, name) if err != nil { return WrapError( err, "Failed to get generate export paths.") } // Loading edited_files.json or creating empty object editedFiles := LoadEditedFiles() err = editedFiles.CheckDeletionSafety(rpPath, bpPath) if err != nil { return WrapErrorf( err, "Safety mechanism stopped Regolith to protect unexpected files "+ "from your export targets.\n"+ "Did you edit the exported files manually?\n"+ "Please clear your export paths and try again.\n"+ "Resource pack export path: %s\n"+ "Behavior pack export path: %s", rpPath, bpPath) } // Clearing output locations // Spooky, I hope file protection works, and it won't do any damage err = os.RemoveAll(bpPath) if err != nil { return WrapErrorf( err, "Failed to clear behavior pack from build path %q.\n"+ "Are user permissions correct?", bpPath) } err = os.RemoveAll(rpPath) if err != nil { return WrapErrorf( err, "Failed to clear resource pack from build path %q.\n"+ "Are user permissions correct?", rpPath) } err = os.RemoveAll(dataPath) if err != nil { return WrapErrorf( err, "Failed to clear filter data path %q.", dataPath) } Logger.Infof("Exporting behavior pack to \"%s\".", bpPath) err = MoveOrCopy(".regolith/tmp/BP", bpPath, exportTarget.ReadOnly, true) if err != nil { return WrapError(err, "Failed to export behavior pack.") } Logger.Infof("Exporting project to \"%s\".", rpPath) err = MoveOrCopy(".regolith/tmp/RP", rpPath, exportTarget.ReadOnly, true) if err != nil { return WrapError(err, "Failed to export resource pack.") } err = MoveOrCopy(".regolith/tmp/data", dataPath, false, false) if err != nil { return WrapError( err, "Failed to move the filter data back to the project's "+ "data folder.") } // Update or create edited_files.json err = editedFiles.UpdateFromPaths(rpPath, bpPath) if err != nil { return WrapError( err, "Failed to create a list of files edited by this 'regolith run'") } err = editedFiles.Dump() if err != nil { return WrapError( err, "Failed to update the list of the files edited by Regolith."+ "This may cause the next run to fail.") } return nil } // MoveOrCopy tries to move the the source to destination first and in case // of failore it copies the files instead. func MoveOrCopy( source string, destination string, makeReadOnly bool, copyParentAcl bool, ) error { if err := os.Rename(source, destination); err != nil { Logger.Infof( "Couldn't move files to \"%s\".\n"+ " Trying to copy files instead...", destination) copyOptions := copy.Options{PreserveTimes: false, Sync: false} err := copy.Copy(source, destination, copyOptions) if err != nil { return WrapErrorf( err, "Couldn't copy data files to \"%s\", aborting.", destination) } } else if copyParentAcl { // No errors with moving files but needs ACL copy parent := filepath.Dir(destination) if _, err := os.Stat(parent); os.IsNotExist(err) { return WrapError( err, "Couldn't copy ACLs - parent directory (used as a source of "+ "ACL data) doesn't exist.") } err = copyFileSecurityInfo(parent, destination) if err != nil { return WrapErrorf( err, "Counldn't copy ACLs to the target file \"%s\".", destination, ) } } // Make files read only if this option is selected if makeReadOnly { err := filepath.WalkDir(destination, func(s string, d fs.DirEntry, e error) error { if e != nil { return WrapErrorf( e, "Failed to walk directory \"%s\".", destination) } if !d.IsDir() { os.Chmod(s, 0444) } return nil }) if err != nil { Logger.Warnf( "Unable to change file permissions of \"%s\" into read-only", destination) } } return nil }
[]
[]
[]
[]
[]
go
null
null
null
nwModularRiggingTool/Modules/System/animation_UI.py
import pymel.core as pm import os import System.utils as utils import System.controlObject as controlObject from functools import partial reload(utils) reload(controlObject) class Animation_UI: def __init__(self): self.directory = "%s/nwModularRiggingTool" %pm.internalVar(userScriptDir = True) self.previousBlueprintListEntry = None self.previousBlueprintModule = None self.previousAnimationModule = None #baseIconsDir = "%s/Icons/" %self.directory baseIconsDir = "%s/Icons/" %os.environ["RIGGING_TOOL_ROOT"] self.selectedCharacter = self.FindSelectedCharacter() if self.selectedCharacter == None: return self.characterName = self.selectedCharacter.partition("__")[2] self.windowName = "%s_window" %self.characterName # Create UI self.UIElements = {} if pm.window(self.windowName, exists = True): pm.deleteUI(self.windowName) self.windowWidth = 420 self.windowHeight = 730 self.frameColumnHeight = 125 self.UIElements["window"] = pm.window(self.windowName, width = self.windowWidth, height = self.windowHeight, title = "Animation UI: %s" %self.characterName, sizeable = False) self.UIElements["topColumnLayout"] = pm.columnLayout(adjustableColumn = True, rowSpacing = 3, parent = self.UIElements["window"]) buttonWidth = 32 columnOffset = 5 buttonColumnWidth = buttonWidth + (2 * columnOffset) textScrollWidth = (self.windowWidth - buttonColumnWidth - 8) / 2 self.UIElements["listboxRowLayout"] = pm.rowLayout(numberOfColumns = 3, columnWidth3 = [textScrollWidth, textScrollWidth, buttonColumnWidth], columnAttach = ([1, "both", columnOffset], [2, "both", columnOffset], [3, "both", columnOffset]), parent = self.UIElements["topColumnLayout"]) self.UIElements["blueprintModule_textScroll"] = pm.textScrollList(numberOfRows = 12, allowMultiSelection = False, selectCommand = self.RefreshAnimationModuleList, parent = self.UIElements["listboxRowLayout"]) self.InitializeBlueprintModuleList() self.UIElements["animationModule_textScroll"] = pm.textScrollList(numberOfRows = 12, allowMultiSelection = False, selectCommand = self.SetupModuleSpecificControls, parent = self.UIElements["listboxRowLayout"]) self.UIElements["buttonColumnLayout"] = pm.columnLayout(parent = self.UIElements["listboxRowLayout"]) self.UIElements["pinButton"] = pm.symbolCheckBox(onImage = "%s_pinned.xpm" %baseIconsDir, offImage = "%s_unpinned.xpm" %baseIconsDir, width = buttonWidth, height = buttonWidth, onCommand = self.DeleteScriptJob, offCommand = self.SetupScriptjob, parent = self.UIElements["buttonColumnLayout"]) if pm.objExists("%s:non_blueprint_grp" %self.selectedCharacter): value = pm.getAttr("%s:non_blueprint_grp.display" %self.selectedCharacter) self.UIElements["non_blueprintVisibility"] = pm.symbolCheckBox(image = "%s_shelf_character.xpm" %baseIconsDir, value = value, width = buttonWidth, height = buttonWidth, onCommand = self.ToggleNonBlueprintVisibility, offCommand = self.ToggleNonBlueprintVisibility, parent = self.UIElements["buttonColumnLayout"]) value = pm.getAttr("%s:character_grp.animationControlVisibility" %self.selectedCharacter) self.UIElements["animControlVisibility"] = pm.symbolCheckBox(image = "%s_visibility.xpm" %baseIconsDir, value = value, width = buttonWidth, height = buttonWidth, onCommand = self.ToggleAnimControlVisibility, offCommand = self.ToggleAnimControlVisibility, parent = self.UIElements["buttonColumnLayout"]) self.UIElements["deleteModuleButton"] = pm.symbolButton(image = "%s_shelf_delete.xpm" %baseIconsDir, width = buttonWidth, height = buttonWidth, enable = False, command = self.DeleteSelectedModule, parent = self.UIElements["buttonColumnLayout"]) self.UIElements["duplicateModuleButton"] = pm.symbolButton(image = "%s_duplicate.xpm" %baseIconsDir, width = buttonWidth, height = buttonWidth, enable = False, parent = self.UIElements["buttonColumnLayout"]) pm.separator(style = "in", parent = self.UIElements["topColumnLayout"]) self.UIElements["activeModuleColumn"] = pm.columnLayout(adjustableColumn = True, parent = self.UIElements["topColumnLayout"]) self.SetupActiveModuleControls() pm.separator(style = "in", parent = self.UIElements["topColumnLayout"]) self.UIElements["matchingButton"] = pm.button(label = "Match Controls to Result", enable = False, parent = self.UIElements["topColumnLayout"]) pm.separator(style = "in", parent = self.UIElements["topColumnLayout"]) pm.rowColumnLayout("module_rowColumn", numberOfRows = 1, rowAttach = [1, "both", 0], rowHeight = [1, self.windowHeight - 395], parent = self.UIElements["topColumnLayout"]) self.UIElements["moduleSpecificControlsScroll"] = pm.scrollLayout(width = self.windowWidth + 10, horizontalScrollBarThickness = 0, parent = "module_rowColumn") self.UIElements["moduleSpecificControlsColumn"] = pm.columnLayout(columnWidth = self.windowWidth, columnAttach = ["both", 5], parent = self.UIElements["moduleSpecificControlsScroll"]) self.RefreshAnimationModuleList() self.SetupScriptjob() pm.showWindow(self.UIElements["window"]) self.SelectionChanged() def InitializeBlueprintModuleList(self): pm.namespace(setNamespace = self.selectedCharacter) blueprintNamespaces = pm.namespaceInfo(listOnlyNamespaces = True) pm.namespace(setNamespace = ":") self.blueprintModules = {} if len(blueprintNamespaces) > 0: for namespace in blueprintNamespaces: blueprintModule = utils.StripLeadingNamespace(namespace)[1] userSpecifiedName = blueprintModule.partition("__")[2] pm.textScrollList(self.UIElements["blueprintModule_textScroll"], edit = True, append = userSpecifiedName) self.blueprintModules[userSpecifiedName] = namespace pm.textScrollList(self.UIElements["blueprintModule_textScroll"], edit = True, selectIndexedItem = 1) selectedBlueprintModule = pm.textScrollList(self.UIElements["blueprintModule_textScroll"], query = True, selectItem = True) self.selectedBlueprintModule = self.blueprintModules[selectedBlueprintModule[0]] def RefreshAnimationModuleList(self, _index = 1): pm.textScrollList(self.UIElements["animationModule_textScroll"], edit = True, removeAll = True) pm.symbolButton(self.UIElements["deleteModuleButton"], edit = True, enable = False) pm.symbolButton(self.UIElements["duplicateModuleButton"], edit = True, enable = False) selectedBlueprintModule = pm.textScrollList(self.UIElements["blueprintModule_textScroll"], query = True, selectItem = True) self.selectedBlueprintModule = self.blueprintModules[selectedBlueprintModule[0]] self.SetupActiveModuleControls() pm.namespace(setNamespace = self.selectedBlueprintModule) controlModuleNamespaces = pm.namespaceInfo(listOnlyNamespaces = True) pm.namespace(setNamespace = ":") if len(controlModuleNamespaces) != 0: for module in controlModuleNamespaces: moduleName = utils.StripAllNamespaces(module)[1] pm.textScrollList(self.UIElements["animationModule_textScroll"], edit = True, append = moduleName) pm.textScrollList(self.UIElements["animationModule_textScroll"], edit = True, selectIndexedItem = _index) pm.symbolButton(self.UIElements["deleteModuleButton"], edit = True, enable = True) pm.symbolButton(self.UIElements["duplicateModuleButton"], edit = True, enable = True) self.SetupModuleSpecificControls() self.previousBlueprintListEntry = selectedBlueprintModule def FindSelectedCharacter(self): selection = pm.ls(selection = True, transforms = True) character = None if len(selection) > 0: selected = selection[0] selectedNamespaceInfo = utils.StripLeadingNamespace(selected) if selectedNamespaceInfo != None: selectedNamespace = selectedNamespaceInfo[0] if selectedNamespace.find("Character__") == 0: character = selectedNamespace return character def ToggleNonBlueprintVisibility(self, *args): visibility = not pm.getAttr("%s:non_blueprint_grp.display" %self.selectedCharacter) pm.setAttr("%s:non_blueprint_grp.display" %self.selectedCharacter, visibility) def ToggleAnimControlVisibility(self, *args): visibility = not pm.getAttr("%s:character_grp.animationControlVisibility" %self.selectedCharacter) pm.setAttr("%s:character_grp.animationControlVisibility" %self.selectedCharacter, visibility) def SetupScriptjob(self, *args): self.scriptJobNum = pm.scriptJob(parent = self.UIElements["window"], event = ["SelectionChanged", self.SelectionChanged]) def DeleteScriptJob(self, *args): pm.scriptJob(kill = self.scriptJobNum) def SelectionChanged(self): selection = pm.ls(selection = True, transforms = True) if len(selection) > 0: selectedNode = selection[0] characterNamespaceInfo = utils.StripLeadingNamespace(selectedNode) if characterNamespaceInfo != None and characterNamespaceInfo[0] == self.selectedCharacter: blueprintNamespaceInfo = utils.StripLeadingNamespace(characterNamespaceInfo[1]) if blueprintNamespaceInfo != None: listEntry = blueprintNamespaceInfo[0].partition("__")[2] allEntries = pm.textScrollList(self.UIElements["blueprintModule_textScroll"], query = True, allItems = True) if listEntry in allEntries: pm.textScrollList(self.UIElements["blueprintModule_textScroll"], edit = True, selectItem = listEntry) if listEntry != self.previousBlueprintListEntry: self.RefreshAnimationModuleList() moduleNamespaceInfo = utils.StripLeadingNamespace(blueprintNamespaceInfo[1]) if moduleNamespaceInfo != None: allEntries = pm.textScrollList(self.UIElements["animationModule_textScroll"], query = True, allItems = True) if moduleNamespaceInfo[0] in allEntries: pm.textScrollList(self.UIElements["animationModule_textScroll"], edit = True, selectItem = moduleNamespaceInfo[0]) self.SetupModuleSpecificControls() def SetupActiveModuleControls(self): existingControls = pm.columnLayout(self.UIElements["activeModuleColumn"], query = True, childArray = True) if existingControls != None: pm.deleteUI(existingControls) largeButtonSize = 100 enumOptionWidth = self.windowWidth - (2 * largeButtonSize) self.settingsLocator = "%s:SETTINGS" %self.selectedBlueprintModule activeModuleAttribute = "%s.activeModule" %self.settingsLocator currentEntries = pm.attributeQuery("activeModule", node = self.settingsLocator, listEnum = True) enable = True if currentEntries[0] == "None": enable = False self.UIElements["activeModue_rowLayout"] = pm.rowLayout(numberOfColumns = 3, adjustableColumn = 1, columnAttach3 = ("both", "both", "both"), columnWidth3 = (enumOptionWidth, largeButtonSize, largeButtonSize), parent = self.UIElements["activeModuleColumn"]) attributes = pm.listAttr(self.settingsLocator, keyable = False) weightAttributes = [] for attr in attributes: if attr.find("_weight") != -1: weightAttributes.append(attr) self.UIElements["activeModule"] = pm.attrEnumOptionMenu(label = "Active Module", width = enumOptionWidth, attribute = activeModuleAttribute, changeCommand = partial(self.ActiveModule_enumCallback, weightAttributes), enable = enable, parent = self.UIElements["activeModue_rowLayout"]) self.UIElements["keyModuleWeights"] = pm.button(label = "Key All", command = partial(self.KeyModuleWeights, weightAttributes), enable = enable, parent = self.UIElements["activeModue_rowLayout"]) self.UIElements["graphModuleWeights"] = pm.button(label = "Graph Weights", command = self.GraphModuleWeights, enable = enable, parent = self.UIElements["activeModue_rowLayout"]) self.UIElements["moduleWeights_frameLayout"] = pm.frameLayout(collapsable = True, collapse = False, label = "Module Weights", height = 100, collapseCommand = self.ModuleWeights_UICollapse, expandCommand = self.ModuleWeights_UIExpand, parent = self.UIElements["activeModuleColumn"]) pm.scrollLayout("frame_scroll", horizontalScrollBarThickness = 0, parent = self.UIElements["moduleWeights_frameLayout"]) pm.columnLayout("frameScroll_column", adjustableColumn = True, parent = "frame_scroll") pm.attrFieldSliderGrp(attribute = "%s.creationPoseWeight" %self.settingsLocator, enable = False, parent = "frameScroll_column") pm.separator(style = "in", parent = "frameScroll_column") for attr in weightAttributes: self.UIElements[attr] = pm.floatSliderGrp(label = attr, field = True, precision = 4, minValue = 0.0, maxValue = 1.0, value = pm.getAttr("%s.%s" %(self.settingsLocator, attr)), changeCommand = partial(self.ModuleWeights_sliderCallback, attr, weightAttributes), parent = "frameScroll_column") parentUIElement = self.UIElements["moduleWeights_frameLayout"] self.Create_moduleWeightScriptJob(parentUIElement, weightAttributes) self.ModuleWeights_updateMatchingButton() def ModuleWeights_UICollapse(self, *args): pm.columnLayout(self.UIElements["activeModuleColumn"], edit = True, height = 47) def ModuleWeights_UIExpand(self, *args): pm.columnLayout(self.UIElements["activeModuleColumn"], edit = True, height = self.frameColumnHeight) def ActiveModule_enumCallback(self, _weightAttributes, *args): enumValue = args[0] for attr in _weightAttributes: value = 0 if "%s_weight" %enumValue == attr: value = 1 pm.setAttr("%s.%s" %(self.settingsLocator, attr), value) pm.setAttr("%s.creationPoseWeight" %self.settingsLocator, 0) self.ModuleWeights_timeUpdateScriptJobCallback(_weightAttributes) self.ModuleWeights_updateMatchingButton() def ModuleWeights_sliderCallback(self, _controlledAttribute, _weightAttributes, *args): value = float(args[0]) currentTotalWeight = 0.0 for attr in _weightAttributes: if attr != _controlledAttribute: currentTotalWeight += pm.getAttr("%s.%s" %(self.settingsLocator, attr)) if currentTotalWeight + value > 1.0: value = 1.0 - currentTotalWeight pm.setAttr("%s.%s" %(self.settingsLocator, _controlledAttribute), value) pm.floatSliderGrp(self.UIElements[_controlledAttribute], edit = True, value = value) newTotalWeight = currentTotalWeight + value creationPoseWeight = 1.0 - newTotalWeight pm.setAttr("%s.creationPoseWeight" %self.settingsLocator, creationPoseWeight) self.ModuleWeights_updateMatchingButton() def Create_moduleWeightScriptJob(self, _parentUIElement, _weightAttributes): pm.scriptJob(event = ["timeChanged", partial(self.ModuleWeights_timeUpdateScriptJobCallback, _weightAttributes)], parent = _parentUIElement) def ModuleWeights_timeUpdateScriptJobCallback(self, _weightAttributes): for attr in _weightAttributes: value = pm.getAttr("%s.%s" %(self.settingsLocator, attr)) pm.floatSliderGrp(self.UIElements[attr], edit = True, value = value) self.ModuleWeights_updateMatchingButton() def ModuleWeights_updateMatchingButton(self): currentlySelectedModuleInfo = pm.textScrollList(self.UIElements["animationModule_textScroll"], query = True, selectItem = True) if len(currentlySelectedModuleInfo) != 0: currentlySelectedModuleNamespace = currentlySelectedModuleInfo[0] moduleWeightValue = pm.getAttr("%s.%s_weight" %(self.settingsLocator, currentlySelectedModuleNamespace)) matchButtonEnable = moduleWeightValue > 0.0001 pm.button(self.UIElements["matchingButton"], edit = True, enable = matchButtonEnable) def KeyModuleWeights(self, _weightAttributes, *args): for attr in _weightAttributes: pm.setKeyframe(self.settingsLocator, attribute = attr, inTangentType = "linear", outTangentType = "linear") pm.setKeyframe(self.settingsLocator, attribute = "creationPoseWeight", inTangentType = "linear", outTangentType = "linear") def GraphModuleWeights(self, *args): import maya.mel as mel pm.select(self.settingsLocator, replace = True) mel.eval('tearOffPanel "Graph Editor" graphEditor true') def SetupModuleSpecificControls(self): currentlySelectedModuleInfo = pm.textScrollList(self.UIElements["animationModule_textScroll"], query = True, selectItem = True) currentlySelectedModuleNamespace = None if len(currentlySelectedModuleInfo) != 0: currentlySelectedModuleNamespace = currentlySelectedModuleInfo[0] if currentlySelectedModuleNamespace == self.previousAnimationModule and self.selectedBlueprintModule == self.previousBlueprintModule: return existingControls = pm.columnLayout(self.UIElements["moduleSpecificControlsColumn"], query = True, childArray = True) if existingControls != None: pm.deleteUI(existingControls) pm.button(self.UIElements["matchingButton"], edit = True, enable = False) pm.setParent(self.UIElements["moduleSpecificControlsColumn"]) moduleNameInfo = utils.FindAllModuleNames("/Modules/Animation") modules = moduleNameInfo[0] moduleNames = moduleNameInfo[1] if len(currentlySelectedModuleInfo) != 0: currentlySelectedModule = currentlySelectedModuleNamespace.rpartition("_")[0] if currentlySelectedModule in moduleNames: moduleWeightValue = pm.getAttr("%s:SETTINGS.%s_weight" %(self.selectedBlueprintModule, currentlySelectedModuleNamespace)) matchButtonEnable = moduleWeightValue > 0.0001 moduleIndex = moduleNames.index(currentlySelectedModule) module = modules[moduleIndex] pm.attrControlGrp(attribute = "%s:%s:module_grp.levelOfDetail" %(self.selectedBlueprintModule, currentlySelectedModuleNamespace), label = "Module LOD") mod = __import__("Animation.%s" %module, (), (), [module]) reload(mod) moduleClass = getattr(mod, mod.CLASS_NAME) moduleInst = moduleClass("%s:%s" %(self.selectedBlueprintModule, currentlySelectedModuleNamespace)) moduleInst.UI(self.UIElements["moduleSpecificControlsColumn"]) self.UIElements["moduleSpecificControls_preferenceFrame"] = pm.frameLayout(borderVisible = False, label = "preferences", collapsable = True, parent = self.UIElements["moduleSpecificControlsColumn"]) self.UIElements["moduleSpecificControls_preferenceColumn"] = pm.columnLayout(columnAttach = ["both", 5], adjustableColumn = True, parent = self.UIElements["moduleSpecificControls_preferenceFrame"]) pm.attrControlGrp(attribute = "%s:%s:module_grp.iconScale" %(self.selectedBlueprintModule, currentlySelectedModuleNamespace), label = "Icon Scale") value = pm.getAttr("%s:%s:module_grp.overrideColor" %(self.selectedBlueprintModule, currentlySelectedModuleNamespace)) + 1 self.UIElements["iconColor"] = pm.colorIndexSliderGrp(label = "Icon Color", maxValue = 32, value = value, changeCommand = partial(self.IconColor_callback, currentlySelectedModuleNamespace), parent = self.UIElements["moduleSpecificControls_preferenceColumn"]) moduleInst.UI_preferences(self.UIElements["moduleSpecificControls_preferenceColumn"]) pm.button(self.UIElements["matchingButton"], edit = True, enable = matchButtonEnable, command = moduleInst.Match) self.previousBlueprintModule = self.selectedBlueprintModule self.previousAnimationModule = currentlySelectedModuleNamespace def IconColor_callback(self, _moduleNamespace, *args): value = pm.colorIndexSliderGrp(self.UIElements["iconColor"], query = True, value = True) - 1 pm.setAttr("%s:%s:module_grp.overrideColor" %(self.selectedBlueprintModule, _moduleNamespace), value) def DeleteSelectedModule(self, *args): selectedModule = pm.textScrollList(self.UIElements["animationModule_textScroll"], query = True, selectItem = True)[0] selectedModuleNamespace = "%s:%s" %(self.selectedBlueprintModule, selectedModule) moduleNameInfo = utils.FindAllModuleNames("/Modules/Animation") modules = moduleNameInfo[0] moduleNames = moduleNameInfo[1] selectedModuleName = selectedModule.rpartition("_")[0] if selectedModuleName in moduleNames: moduleIndex = moduleNames.index(selectedModuleName) module = modules[moduleIndex] mod = __import__("Animation.%s" %module, (), (), [module]) reload(mod) moduleClass = getattr(mod, mod.CLASS_NAME) moduleInst = moduleClass(selectedModuleNamespace) moduleInst.Uninstall() self.RefreshAnimationModuleList()
[]
[]
[ "RIGGING_TOOL_ROOT" ]
[]
["RIGGING_TOOL_ROOT"]
python
1
0
mock.go
package main import ( "context" "encoding/json" "os" tink "github.com/tinkerbell/tink/protos/hardware" "github.com/packethost/cacher/protos/cacher" "google.golang.org/grpc" ) // hardwareGetterMock is a mock implentation of the type hardwareGetterMock struct { hardwareResp string } func (hg hardwareGetterMock) ByIP(ctx context.Context, in getRequest, opts ...grpc.CallOption) (hardware, error) { var hw hardware dataModelVersion := os.Getenv("DATA_MODEL_VERSION") switch dataModelVersion { case "1": hw = &tink.Hardware{} err := json.Unmarshal([]byte(hg.hardwareResp), hw) if err != nil { return nil, err } default: hw = &cacher.Hardware{JSON: hg.hardwareResp} } return hw, nil } func (hg hardwareGetterMock) Watch(ctx context.Context, in getRequest, opts ...grpc.CallOption) (watchClient, error) { // TODO (kdeng3849) return nil, nil } const ( cacherDataModel = ` { "id": "8978e7d4-1a55-4845-8a66-a5259236b104", "arch": "x86_64", "name": "node-name", "state": "provisioning", "allow_pxe": true, "allow_workflow": true, "plan_slug": "t1.small.x86", "facility_code": "onprem", "efi_boot": false, "instance": { "storage": { "disks": [ { "device": "/dev/sda", "wipeTable": true, "partitions": [ { "size": 4096, "label": "BIOS", "number": 1 }, { "size": "3993600", "label": "SWAP", "number": 2 }, { "size": 0, "label": "ROOT", "number": 3 } ] } ], "filesystems": [ { "mount": { "point": "/", "create": { "options": ["-L", "ROOT"] }, "device": "/dev/sda3", "format": "ext4" } }, { "mount": { "point": "none", "create": { "options": ["-L", "SWAP"] }, "device": "/dev/sda2", "format": "swap" } } ] }, "crypted_root_password": "$6$qViImWbWFfH/a4pq$s1bpFFXMpQj1eQbHWsruLy6/", "operating_system_version": { "distro": "ubuntu", "version": "16.04", "os_slug": "ubuntu_16_04" } }, "ip_addresses": [ { "cidr": 29, "public": false, "address": "192.168.1.5", "enabled": true, "gateway": "192.168.1.1", "netmask": "255.255.255.248", "network": "192.168.1.0", "address_family": 4 } ], "network_ports": [ { "data": { "mac": "98:03:9b:48:de:bc" }, "name": "eth0", "type": "data" } ] } ` cacherPartitionSizeInt = ` { "id": "8978e7d4-1a55-4845-8a66-a5259236b104", "instance": { "storage": { "disks": [ { "partitions": [ { "size": 4096, "label": "BIOS", "number": 1 } ] } ] } } } ` cacherPartitionSizeString = ` { "id": "8978e7d4-1a55-4845-8a66-a5259236b104", "instance": { "storage": { "disks": [ { "partitions": [ { "size": "3333", "label": "BIOS", "number": 1 } ] } ] } } } ` cacherPartitionSizeStringLeadingZeros = ` { "id": "8978e7d4-1a55-4845-8a66-a5259236b104", "instance": { "storage": { "disks": [ { "partitions": [ { "size": "007", "label": "BIOS", "number": 1 } ] } ] } } } ` cacherPartitionSizeWhitespace = ` { "id": "8978e7d4-1a55-4845-8a66-a5259236b104", "instance": { "storage": { "disks": [ { "partitions": [ { "size": " \t 1234\n ", "label": "BIOS", "number": 1 } ] } ] } } } ` cacherPartitionSizeInterceptingWhitespace = ` { "id": "8978e7d4-1a55-4845-8a66-a5259236b104", "instance": { "storage": { "disks": [ { "partitions": [ { "size": "12\tmb", "label": "BIOS", "number": 1 } ] } ] } } } ` cacherPartitionSizeBLower = ` { "id": "8978e7d4-1a55-4845-8a66-a5259236b104", "instance": { "storage": { "disks": [ { "partitions": [ { "size": "1000000b", "label": "BIOS", "number": 1 } ] } ] } } } ` cacherPartitionSizeBUpper = ` { "id": "8978e7d4-1a55-4845-8a66-a5259236b104", "instance": { "storage": { "disks": [ { "partitions": [ { "size": "1000000B", "label": "BIOS", "number": 1 } ] } ] } } } ` cacherPartitionSizeK = ` { "id": "8978e7d4-1a55-4845-8a66-a5259236b104", "instance": { "storage": { "disks": [ { "partitions": [ { "size": "24K", "label": "BIOS", "number": 1 } ] } ] } } } ` cacherPartitionSizeKBLower = ` { "id": "8978e7d4-1a55-4845-8a66-a5259236b104", "instance": { "storage": { "disks": [ { "partitions": [ { "size": "24kb", "label": "BIOS", "number": 1 } ] } ] } } } ` cacherPartitionSizeKBUpper = ` { "id": "8978e7d4-1a55-4845-8a66-a5259236b104", "instance": { "storage": { "disks": [ { "partitions": [ { "size": "24KB", "label": "BIOS", "number": 1 } ] } ] } } } ` cacherPartitionSizeKBMixed = ` { "id": "8978e7d4-1a55-4845-8a66-a5259236b104", "instance": { "storage": { "disks": [ { "partitions": [ { "size": "24Kb", "label": "BIOS", "number": 1 } ] } ] } } } ` cacherPartitionSizeM = ` { "id": "8978e7d4-1a55-4845-8a66-a5259236b104", "instance": { "storage": { "disks": [ { "partitions": [ { "size": "3m", "label": "BIOS", "number": 1 } ] } ] } } } ` cacherPartitionSizeTB = ` { "id": "8978e7d4-1a55-4845-8a66-a5259236b104", "instance": { "storage": { "disks": [ { "partitions": [ { "size": "2TB", "label": "BIOS", "number": 1 } ] } ] } } } ` cacherPartitionSizeInvalidSuffix = ` { "id": "8978e7d4-1a55-4845-8a66-a5259236b104", "instance": { "storage": { "disks": [ { "partitions": [ { "size": "3kmgtb", "label": "BIOS", "number": 1 } ] } ] } } } ` cacherPartitionSizeInvalidIntertwined = ` { "id": "8978e7d4-1a55-4845-8a66-a5259236b104", "instance": { "storage": { "disks": [ { "partitions": [ { "size": "12kb3", "label": "BIOS", "number": 1 } ] } ] } } } ` cacherPartitionSizeInvalidIntertwined2 = ` { "id": "8978e7d4-1a55-4845-8a66-a5259236b104", "instance": { "storage": { "disks": [ { "partitions": [ { "size": "k123b", "label": "BIOS", "number": 1 } ] } ] } } } ` cacherPartitionSizeEmpty = ` { "id": "8978e7d4-1a55-4845-8a66-a5259236b104", "instance": { "storage": { "disks": [ { "partitions": [ { "size": "", "label": "BIOS", "number": 1 } ] } ] } } } ` cacherPartitionSizeReversedPlacement = ` { "id": "8978e7d4-1a55-4845-8a66-a5259236b104", "instance": { "storage": { "disks": [ { "partitions": [ { "size": "mb10", "label": "BIOS", "number": 1 } ] } ] } } } ` tinkerbellDataModel = ` { "network":{ "interfaces":[ { "dhcp":{ "mac":"ec:0d:9a:c0:01:0c", "hostname":"server001", "lease_time":86400, "arch":"x86_64", "ip":{ "address":"192.168.1.5", "netmask":"255.255.255.248", "gateway":"192.168.1.1" } }, "netboot":{ "allow_pxe":true, "allow_workflow":true, "ipxe":{ "url":"http://url/menu.ipxe", "contents":"#!ipxe" }, "osie":{ "kernel":"vmlinuz-x86_64" } } } ] }, "id":"fde7c87c-d154-447e-9fce-7eb7bdec90c0", "metadata":"{\"bonding_mode\":5,\"custom\":{\"preinstalled_operating_system_version\":{},\"private_subnets\":[]},\"facility\":{\"facility_code\":\"ewr1\",\"plan_slug\":\"c2.medium.x86\",\"plan_version_slug\":\"\"},\"instance\":{\"crypted_root_password\":\"redacted/\",\"operating_system_version\":{\"distro\":\"ubuntu\",\"os_slug\":\"ubuntu_18_04\",\"version\":\"18.04\"},\"storage\":{\"disks\":[{\"device\":\"/dev/sda\",\"partitions\":[{\"label\":\"BIOS\",\"number\":1,\"size\":4096},{\"label\":\"SWAP\",\"number\":2,\"size\":3993600},{\"label\":\"ROOT\",\"number\":3,\"size\":0}],\"wipe_table\":true}],\"filesystems\":[{\"mount\":{\"create\":{\"options\":[\"-L\",\"ROOT\"]},\"device\":\"/dev/sda3\",\"format\":\"ext4\",\"point\":\"/\"}},{\"mount\":{\"create\":{\"options\":[\"-L\",\"SWAP\"]},\"device\":\"/dev/sda2\",\"format\":\"swap\",\"point\":\"none\"}}]}},\"manufacturer\":{\"id\":\"\",\"slug\":\"\"},\"state\":\"\"}" } ` tinkerbellNoMetadata = ` { "network":{ "interfaces":[ { "dhcp":{ "mac":"ec:0d:9a:c0:01:0c", "hostname":"server001", "lease_time":86400, "arch":"x86_64", "ip":{ "address":"192.168.1.5", "netmask":"255.255.255.248", "gateway":"192.168.1.1" } }, "netboot":{ "allow_pxe":true, "allow_workflow":true, "ipxe":{ "url":"http://url/menu.ipxe", "contents":"#!ipxe" }, "osie":{ "kernel":"vmlinuz-x86_64" } } } ] }, "id":"363115b0-f03d-4ce5-9a15-5514193d131a" } ` )
[ "\"DATA_MODEL_VERSION\"" ]
[]
[ "DATA_MODEL_VERSION" ]
[]
["DATA_MODEL_VERSION"]
go
1
0
visual-chatbot/manage.py
#!/usr/bin/env python import os import sys if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "visdial.settings") try: from django.core.management import execute_from_command_line except ImportError: # The above import may fail for some other reason. Ensure that the # issue is really that Django is missing to avoid masking other # exceptions on Python 2. try: import django except ImportError: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) raise execute_from_command_line(sys.argv)
[]
[]
[]
[]
[]
python
0
0
integration-cli/docker_cli_build_test.go
package main import ( "archive/tar" "bytes" "encoding/json" "fmt" "io/ioutil" "os" "os/exec" "path/filepath" "reflect" "regexp" "runtime" "strconv" "strings" "text/template" "time" "github.com/docker/docker/builder/dockerfile/command" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/integration/checker" "github.com/docker/docker/pkg/stringutils" "github.com/go-check/check" ) func (s *DockerSuite) TestBuildJSONEmptyRun(c *check.C) { name := "testbuildjsonemptyrun" _, err := buildImage( name, ` FROM busybox RUN [] `, true) if err != nil { c.Fatal("error when dealing with a RUN statement with empty JSON array") } } func (s *DockerSuite) TestBuildShCmdJSONEntrypoint(c *check.C) { name := "testbuildshcmdjsonentrypoint" _, err := buildImage( name, ` FROM busybox ENTRYPOINT ["echo"] CMD echo test `, true) if err != nil { c.Fatal(err) } out, _ := dockerCmd(c, "run", "--rm", name) if daemonPlatform == "windows" { if !strings.Contains(out, "cmd /S /C echo test") { c.Fatalf("CMD did not contain cmd /S /C echo test : %q", out) } } else { if strings.TrimSpace(out) != "/bin/sh -c echo test" { c.Fatalf("CMD did not contain /bin/sh -c : %q", out) } } } func (s *DockerSuite) TestBuildEnvironmentReplacementUser(c *check.C) { // Windows does not support FROM scratch or the USER command testRequires(c, DaemonIsLinux) name := "testbuildenvironmentreplacement" _, err := buildImage(name, ` FROM scratch ENV user foo USER ${user} `, true) if err != nil { c.Fatal(err) } res := inspectFieldJSON(c, name, "Config.User") if res != `"foo"` { c.Fatal("User foo from environment not in Config.User on image") } } func (s *DockerSuite) TestBuildEnvironmentReplacementVolume(c *check.C) { name := "testbuildenvironmentreplacement" var volumePath string if daemonPlatform == "windows" { volumePath = "c:/quux" } else { volumePath = "/quux" } _, err := buildImage(name, ` FROM `+minimalBaseImage()+` ENV volume `+volumePath+` VOLUME ${volume} `, true) if err != nil { c.Fatal(err) } res := inspectFieldJSON(c, name, "Config.Volumes") var volumes map[string]interface{} if err := json.Unmarshal([]byte(res), &volumes); err != nil { c.Fatal(err) } if _, ok := volumes[volumePath]; !ok { c.Fatal("Volume " + volumePath + " from environment not in Config.Volumes on image") } } func (s *DockerSuite) TestBuildEnvironmentReplacementExpose(c *check.C) { // Windows does not support FROM scratch or the EXPOSE command testRequires(c, DaemonIsLinux) name := "testbuildenvironmentreplacement" _, err := buildImage(name, ` FROM scratch ENV port 80 EXPOSE ${port} ENV ports " 99 100 " EXPOSE ${ports} `, true) if err != nil { c.Fatal(err) } res := inspectFieldJSON(c, name, "Config.ExposedPorts") var exposedPorts map[string]interface{} if err := json.Unmarshal([]byte(res), &exposedPorts); err != nil { c.Fatal(err) } exp := []int{80, 99, 100} for _, p := range exp { tmp := fmt.Sprintf("%d/tcp", p) if _, ok := exposedPorts[tmp]; !ok { c.Fatalf("Exposed port %d from environment not in Config.ExposedPorts on image", p) } } } func (s *DockerSuite) TestBuildEnvironmentReplacementWorkdir(c *check.C) { name := "testbuildenvironmentreplacement" _, err := buildImage(name, ` FROM busybox ENV MYWORKDIR /work RUN mkdir ${MYWORKDIR} WORKDIR ${MYWORKDIR} `, true) if err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildEnvironmentReplacementAddCopy(c *check.C) { name := "testbuildenvironmentreplacement" ctx, err := fakeContext(` FROM `+minimalBaseImage()+` ENV baz foo ENV quux bar ENV dot . ENV fee fff ENV gee ggg ADD ${baz} ${dot} COPY ${quux} ${dot} ADD ${zzz:-${fee}} ${dot} COPY ${zzz:-${gee}} ${dot} `, map[string]string{ "foo": "test1", "bar": "test2", "fff": "test3", "ggg": "test4", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildEnvironmentReplacementEnv(c *check.C) { // ENV expansions work differently in Windows testRequires(c, DaemonIsLinux) name := "testbuildenvironmentreplacement" _, err := buildImage(name, ` FROM busybox ENV foo zzz ENV bar ${foo} ENV abc1='$foo' ENV env1=$foo env2=${foo} env3="$foo" env4="${foo}" RUN [ "$abc1" = '$foo' ] && (echo "$abc1" | grep -q foo) ENV abc2="\$foo" RUN [ "$abc2" = '$foo' ] && (echo "$abc2" | grep -q foo) ENV abc3 '$foo' RUN [ "$abc3" = '$foo' ] && (echo "$abc3" | grep -q foo) ENV abc4 "\$foo" RUN [ "$abc4" = '$foo' ] && (echo "$abc4" | grep -q foo) `, true) if err != nil { c.Fatal(err) } res := inspectFieldJSON(c, name, "Config.Env") envResult := []string{} if err = unmarshalJSON([]byte(res), &envResult); err != nil { c.Fatal(err) } found := false envCount := 0 for _, env := range envResult { parts := strings.SplitN(env, "=", 2) if parts[0] == "bar" { found = true if parts[1] != "zzz" { c.Fatalf("Could not find replaced var for env `bar`: got %q instead of `zzz`", parts[1]) } } else if strings.HasPrefix(parts[0], "env") { envCount++ if parts[1] != "zzz" { c.Fatalf("%s should be 'foo' but instead its %q", parts[0], parts[1]) } } else if strings.HasPrefix(parts[0], "env") { envCount++ if parts[1] != "foo" { c.Fatalf("%s should be 'foo' but instead its %q", parts[0], parts[1]) } } } if !found { c.Fatal("Never found the `bar` env variable") } if envCount != 4 { c.Fatalf("Didn't find all env vars - only saw %d\n%s", envCount, envResult) } } func (s *DockerSuite) TestBuildHandleEscapes(c *check.C) { // The volume paths used in this test are invalid on Windows testRequires(c, DaemonIsLinux) name := "testbuildhandleescapes" _, err := buildImage(name, ` FROM scratch ENV FOO bar VOLUME ${FOO} `, true) if err != nil { c.Fatal(err) } var result map[string]map[string]struct{} res := inspectFieldJSON(c, name, "Config.Volumes") if err = unmarshalJSON([]byte(res), &result); err != nil { c.Fatal(err) } if _, ok := result["bar"]; !ok { c.Fatal("Could not find volume bar set from env foo in volumes table") } deleteImages(name) _, err = buildImage(name, ` FROM scratch ENV FOO bar VOLUME \${FOO} `, true) if err != nil { c.Fatal(err) } res = inspectFieldJSON(c, name, "Config.Volumes") if err = unmarshalJSON([]byte(res), &result); err != nil { c.Fatal(err) } if _, ok := result["${FOO}"]; !ok { c.Fatal("Could not find volume ${FOO} set from env foo in volumes table") } deleteImages(name) // this test in particular provides *7* backslashes and expects 6 to come back. // Like above, the first escape is swallowed and the rest are treated as // literals, this one is just less obvious because of all the character noise. _, err = buildImage(name, ` FROM scratch ENV FOO bar VOLUME \\\\\\\${FOO} `, true) if err != nil { c.Fatal(err) } res = inspectFieldJSON(c, name, "Config.Volumes") if err = unmarshalJSON([]byte(res), &result); err != nil { c.Fatal(err) } if _, ok := result[`\\\${FOO}`]; !ok { c.Fatal(`Could not find volume \\\${FOO} set from env foo in volumes table`, result) } } func (s *DockerSuite) TestBuildOnBuildLowercase(c *check.C) { name := "testbuildonbuildlowercase" name2 := "testbuildonbuildlowercase2" _, err := buildImage(name, ` FROM busybox onbuild run echo quux `, true) if err != nil { c.Fatal(err) } _, out, err := buildImageWithOut(name2, fmt.Sprintf(` FROM %s `, name), true) if err != nil { c.Fatal(err) } if !strings.Contains(out, "quux") { c.Fatalf("Did not receive the expected echo text, got %s", out) } if strings.Contains(out, "ONBUILD ONBUILD") { c.Fatalf("Got an ONBUILD ONBUILD error with no error: got %s", out) } } func (s *DockerSuite) TestBuildEnvEscapes(c *check.C) { // ENV expansions work differently in Windows testRequires(c, DaemonIsLinux) name := "testbuildenvescapes" _, err := buildImage(name, ` FROM busybox ENV TEST foo CMD echo \$ `, true) if err != nil { c.Fatal(err) } out, _ := dockerCmd(c, "run", "-t", name) if strings.TrimSpace(out) != "$" { c.Fatalf("Env TEST was not overwritten with bar when foo was supplied to dockerfile: was %q", strings.TrimSpace(out)) } } func (s *DockerSuite) TestBuildEnvOverwrite(c *check.C) { // ENV expansions work differently in Windows testRequires(c, DaemonIsLinux) name := "testbuildenvoverwrite" _, err := buildImage(name, ` FROM busybox ENV TEST foo CMD echo ${TEST} `, true) if err != nil { c.Fatal(err) } out, _ := dockerCmd(c, "run", "-e", "TEST=bar", "-t", name) if strings.TrimSpace(out) != "bar" { c.Fatalf("Env TEST was not overwritten with bar when foo was supplied to dockerfile: was %q", strings.TrimSpace(out)) } } func (s *DockerSuite) TestBuildOnBuildForbiddenMaintainerInSourceImage(c *check.C) { name := "testbuildonbuildforbiddenmaintainerinsourceimage" out, _ := dockerCmd(c, "create", "busybox", "true") cleanedContainerID := strings.TrimSpace(out) dockerCmd(c, "commit", "--run", "{\"OnBuild\":[\"MAINTAINER docker.io\"]}", cleanedContainerID, "onbuild") _, err := buildImage(name, `FROM onbuild`, true) if err != nil { if !strings.Contains(err.Error(), "maintainer isn't allowed as an ONBUILD trigger") { c.Fatalf("Wrong error %v, must be about MAINTAINER and ONBUILD in source image", err) } } else { c.Fatal("Error must not be nil") } } func (s *DockerSuite) TestBuildOnBuildForbiddenFromInSourceImage(c *check.C) { name := "testbuildonbuildforbiddenfrominsourceimage" out, _ := dockerCmd(c, "create", "busybox", "true") cleanedContainerID := strings.TrimSpace(out) dockerCmd(c, "commit", "--run", "{\"OnBuild\":[\"FROM busybox\"]}", cleanedContainerID, "onbuild") _, err := buildImage(name, `FROM onbuild`, true) if err != nil { if !strings.Contains(err.Error(), "from isn't allowed as an ONBUILD trigger") { c.Fatalf("Wrong error %v, must be about FROM and ONBUILD in source image", err) } } else { c.Fatal("Error must not be nil") } } func (s *DockerSuite) TestBuildOnBuildForbiddenChainedInSourceImage(c *check.C) { name := "testbuildonbuildforbiddenchainedinsourceimage" out, _ := dockerCmd(c, "create", "busybox", "true") cleanedContainerID := strings.TrimSpace(out) dockerCmd(c, "commit", "--run", "{\"OnBuild\":[\"ONBUILD RUN ls\"]}", cleanedContainerID, "onbuild") _, err := buildImage(name, `FROM onbuild`, true) if err != nil { if !strings.Contains(err.Error(), "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") { c.Fatalf("Wrong error %v, must be about chaining ONBUILD in source image", err) } } else { c.Fatal("Error must not be nil") } } func (s *DockerSuite) TestBuildOnBuildCmdEntrypointJSON(c *check.C) { name1 := "onbuildcmd" name2 := "onbuildgenerated" _, err := buildImage(name1, ` FROM busybox ONBUILD CMD ["hello world"] ONBUILD ENTRYPOINT ["echo"] ONBUILD RUN ["true"]`, false) if err != nil { c.Fatal(err) } _, err = buildImage(name2, fmt.Sprintf(`FROM %s`, name1), false) if err != nil { c.Fatal(err) } out, _ := dockerCmd(c, "run", name2) if !regexp.MustCompile(`(?m)^hello world`).MatchString(out) { c.Fatalf("did not get echo output from onbuild. Got: %q", out) } } func (s *DockerSuite) TestBuildOnBuildEntrypointJSON(c *check.C) { name1 := "onbuildcmd" name2 := "onbuildgenerated" _, err := buildImage(name1, ` FROM busybox ONBUILD ENTRYPOINT ["echo"]`, false) if err != nil { c.Fatal(err) } _, err = buildImage(name2, fmt.Sprintf("FROM %s\nCMD [\"hello world\"]\n", name1), false) if err != nil { c.Fatal(err) } out, _ := dockerCmd(c, "run", name2) if !regexp.MustCompile(`(?m)^hello world`).MatchString(out) { c.Fatal("got malformed output from onbuild", out) } } func (s *DockerSuite) TestBuildCacheAdd(c *check.C) { testRequires(c, DaemonIsLinux) // Windows doesn't have httpserver image yet name := "testbuildtwoimageswithadd" server, err := fakeStorage(map[string]string{ "robots.txt": "hello", "index.html": "world", }) if err != nil { c.Fatal(err) } defer server.Close() if _, err := buildImage(name, fmt.Sprintf(`FROM scratch ADD %s/robots.txt /`, server.URL()), true); err != nil { c.Fatal(err) } if err != nil { c.Fatal(err) } deleteImages(name) _, out, err := buildImageWithOut(name, fmt.Sprintf(`FROM scratch ADD %s/index.html /`, server.URL()), true) if err != nil { c.Fatal(err) } if strings.Contains(out, "Using cache") { c.Fatal("2nd build used cache on ADD, it shouldn't") } } func (s *DockerSuite) TestBuildLastModified(c *check.C) { testRequires(c, DaemonIsLinux) // Windows doesn't have httpserver image yet name := "testbuildlastmodified" server, err := fakeStorage(map[string]string{ "file": "hello", }) if err != nil { c.Fatal(err) } defer server.Close() var out, out2 string dFmt := `FROM busybox ADD %s/file / RUN ls -le /file` dockerfile := fmt.Sprintf(dFmt, server.URL()) if _, out, err = buildImageWithOut(name, dockerfile, false); err != nil { c.Fatal(err) } originMTime := regexp.MustCompile(`root.*/file.*\n`).FindString(out) // Make sure our regexp is correct if strings.Index(originMTime, "/file") < 0 { c.Fatalf("Missing ls info on 'file':\n%s", out) } // Build it again and make sure the mtime of the file didn't change. // Wait a few seconds to make sure the time changed enough to notice time.Sleep(2 * time.Second) if _, out2, err = buildImageWithOut(name, dockerfile, false); err != nil { c.Fatal(err) } newMTime := regexp.MustCompile(`root.*/file.*\n`).FindString(out2) if newMTime != originMTime { c.Fatalf("MTime changed:\nOrigin:%s\nNew:%s", originMTime, newMTime) } // Now 'touch' the file and make sure the timestamp DID change this time // Create a new fakeStorage instead of just using Add() to help windows server, err = fakeStorage(map[string]string{ "file": "hello", }) if err != nil { c.Fatal(err) } defer server.Close() dockerfile = fmt.Sprintf(dFmt, server.URL()) if _, out2, err = buildImageWithOut(name, dockerfile, false); err != nil { c.Fatal(err) } newMTime = regexp.MustCompile(`root.*/file.*\n`).FindString(out2) if newMTime == originMTime { c.Fatalf("MTime didn't change:\nOrigin:%s\nNew:%s", originMTime, newMTime) } } func (s *DockerSuite) TestBuildSixtySteps(c *check.C) { testRequires(c, DaemonIsLinux) // TODO Windows: This test passes on Windows, // but currently adds a disproportionate amount of time for the value it has. // Removing it from Windows CI for now, but this will be revisited in the // TP5 timeframe when perf is better. name := "foobuildsixtysteps" ctx, err := fakeContext("FROM "+minimalBaseImage()+"\n"+strings.Repeat("ADD foo /\n", 60), map[string]string{ "foo": "test1", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildAddSingleFileToRoot(c *check.C) { testRequires(c, DaemonIsLinux) // Linux specific test name := "testaddimg" ctx, err := fakeContext(fmt.Sprintf(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN touch /exists RUN chown dockerio.dockerio /exists ADD test_file / RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /test_file | awk '{print $1}') = '%s' ] RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod), map[string]string{ "test_file": "test1", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } // Issue #3960: "ADD src ." hangs func (s *DockerSuite) TestBuildAddSingleFileToWorkdir(c *check.C) { name := "testaddsinglefiletoworkdir" ctx, err := fakeContext(`FROM busybox ADD test_file .`, map[string]string{ "test_file": "test1", }) if err != nil { c.Fatal(err) } defer ctx.Close() errChan := make(chan error) go func() { _, err := buildImageFromContext(name, ctx, true) errChan <- err close(errChan) }() select { case <-time.After(15 * time.Second): c.Fatal("Build with adding to workdir timed out") case err := <-errChan: c.Assert(err, check.IsNil) } } func (s *DockerSuite) TestBuildAddSingleFileToExistDir(c *check.C) { testRequires(c, DaemonIsLinux) // Linux specific test name := "testaddsinglefiletoexistdir" ctx, err := fakeContext(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN mkdir /exists RUN touch /exists/exists_file RUN chown -R dockerio.dockerio /exists ADD test_file /exists/ RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, map[string]string{ "test_file": "test1", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildCopyAddMultipleFiles(c *check.C) { testRequires(c, DaemonIsLinux) // Linux specific test server, err := fakeStorage(map[string]string{ "robots.txt": "hello", }) if err != nil { c.Fatal(err) } defer server.Close() name := "testcopymultiplefilestofile" ctx, err := fakeContext(fmt.Sprintf(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN mkdir /exists RUN touch /exists/exists_file RUN chown -R dockerio.dockerio /exists COPY test_file1 test_file2 /exists/ ADD test_file3 test_file4 %s/robots.txt /exists/ RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] RUN [ $(ls -l /exists/test_file1 | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /exists/test_file2 | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /exists/test_file3 | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /exists/test_file4 | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /exists/robots.txt | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] `, server.URL()), map[string]string{ "test_file1": "test1", "test_file2": "test2", "test_file3": "test3", "test_file4": "test4", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } // This test is mainly for user namespaces to verify that new directories // are created as the remapped root uid/gid pair func (s *DockerSuite) TestBuildAddToNewDestination(c *check.C) { testRequires(c, DaemonIsLinux) // Linux specific test name := "testaddtonewdest" ctx, err := fakeContext(`FROM busybox ADD . /new_dir RUN ls -l / RUN [ $(ls -l / | grep new_dir | awk '{print $3":"$4}') = 'root:root' ]`, map[string]string{ "test_dir/test_file": "test file", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } // This test is mainly for user namespaces to verify that new directories // are created as the remapped root uid/gid pair func (s *DockerSuite) TestBuildCopyToNewParentDirectory(c *check.C) { testRequires(c, DaemonIsLinux) // Linux specific test name := "testcopytonewdir" ctx, err := fakeContext(`FROM busybox COPY test_dir /new_dir RUN ls -l /new_dir RUN [ $(ls -l / | grep new_dir | awk '{print $3":"$4}') = 'root:root' ]`, map[string]string{ "test_dir/test_file": "test file", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } // This test is mainly for user namespaces to verify that new directories // are created as the remapped root uid/gid pair func (s *DockerSuite) TestBuildWorkdirIsContainerRoot(c *check.C) { testRequires(c, DaemonIsLinux) // Linux specific test name := "testworkdirownership" if _, err := buildImage(name, `FROM busybox WORKDIR /new_dir RUN ls -l / RUN [ $(ls -l / | grep new_dir | awk '{print $3":"$4}') = 'root:root' ]`, true); err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildAddFileWithWhitespace(c *check.C) { testRequires(c, DaemonIsLinux) // Not currently passing on Windows name := "testaddfilewithwhitespace" ctx, err := fakeContext(`FROM busybox RUN mkdir "/test dir" RUN mkdir "/test_dir" ADD [ "test file1", "/test_file1" ] ADD [ "test_file2", "/test file2" ] ADD [ "test file3", "/test file3" ] ADD [ "test dir/test_file4", "/test_dir/test_file4" ] ADD [ "test_dir/test_file5", "/test dir/test_file5" ] ADD [ "test dir/test_file6", "/test dir/test_file6" ] RUN [ $(cat "/test_file1") = 'test1' ] RUN [ $(cat "/test file2") = 'test2' ] RUN [ $(cat "/test file3") = 'test3' ] RUN [ $(cat "/test_dir/test_file4") = 'test4' ] RUN [ $(cat "/test dir/test_file5") = 'test5' ] RUN [ $(cat "/test dir/test_file6") = 'test6' ]`, map[string]string{ "test file1": "test1", "test_file2": "test2", "test file3": "test3", "test dir/test_file4": "test4", "test_dir/test_file5": "test5", "test dir/test_file6": "test6", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildCopyFileWithWhitespace(c *check.C) { testRequires(c, DaemonIsLinux) // Not currently passing on Windows name := "testcopyfilewithwhitespace" ctx, err := fakeContext(`FROM busybox RUN mkdir "/test dir" RUN mkdir "/test_dir" COPY [ "test file1", "/test_file1" ] COPY [ "test_file2", "/test file2" ] COPY [ "test file3", "/test file3" ] COPY [ "test dir/test_file4", "/test_dir/test_file4" ] COPY [ "test_dir/test_file5", "/test dir/test_file5" ] COPY [ "test dir/test_file6", "/test dir/test_file6" ] RUN [ $(cat "/test_file1") = 'test1' ] RUN [ $(cat "/test file2") = 'test2' ] RUN [ $(cat "/test file3") = 'test3' ] RUN [ $(cat "/test_dir/test_file4") = 'test4' ] RUN [ $(cat "/test dir/test_file5") = 'test5' ] RUN [ $(cat "/test dir/test_file6") = 'test6' ]`, map[string]string{ "test file1": "test1", "test_file2": "test2", "test file3": "test3", "test dir/test_file4": "test4", "test_dir/test_file5": "test5", "test dir/test_file6": "test6", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildCopyWildcard(c *check.C) { testRequires(c, DaemonIsLinux) // Windows doesn't have httpserver image yet name := "testcopywildcard" server, err := fakeStorage(map[string]string{ "robots.txt": "hello", "index.html": "world", }) if err != nil { c.Fatal(err) } defer server.Close() ctx, err := fakeContext(fmt.Sprintf(`FROM busybox COPY file*.txt /tmp/ RUN ls /tmp/file1.txt /tmp/file2.txt RUN mkdir /tmp1 COPY dir* /tmp1/ RUN ls /tmp1/dirt /tmp1/nested_file /tmp1/nested_dir/nest_nest_file RUN mkdir /tmp2 ADD dir/*dir %s/robots.txt /tmp2/ RUN ls /tmp2/nest_nest_file /tmp2/robots.txt `, server.URL()), map[string]string{ "file1.txt": "test1", "file2.txt": "test2", "dir/nested_file": "nested file", "dir/nested_dir/nest_nest_file": "2 times nested", "dirt": "dirty", }) if err != nil { c.Fatal(err) } defer ctx.Close() id1, err := buildImageFromContext(name, ctx, true) if err != nil { c.Fatal(err) } // Now make sure we use a cache the 2nd time id2, err := buildImageFromContext(name, ctx, true) if err != nil { c.Fatal(err) } if id1 != id2 { c.Fatal("didn't use the cache") } } func (s *DockerSuite) TestBuildCopyWildcardInName(c *check.C) { name := "testcopywildcardinname" ctx, err := fakeContext(`FROM busybox COPY *.txt /tmp/ RUN [ "$(cat /tmp/\*.txt)" = 'hi there' ] `, map[string]string{"*.txt": "hi there"}) if err != nil { // Normally we would do c.Fatal(err) here but given that // the odds of this failing are so rare, it must be because // the OS we're running the client on doesn't support * in // filenames (like windows). So, instead of failing the test // just let it pass. Then we don't need to explicitly // say which OSs this works on or not. return } defer ctx.Close() _, err = buildImageFromContext(name, ctx, true) if err != nil { c.Fatalf("should have built: %q", err) } } func (s *DockerSuite) TestBuildCopyWildcardCache(c *check.C) { name := "testcopywildcardcache" ctx, err := fakeContext(`FROM busybox COPY file1.txt /tmp/`, map[string]string{ "file1.txt": "test1", }) if err != nil { c.Fatal(err) } defer ctx.Close() id1, err := buildImageFromContext(name, ctx, true) if err != nil { c.Fatal(err) } // Now make sure we use a cache the 2nd time even with wild cards. // Use the same context so the file is the same and the checksum will match ctx.Add("Dockerfile", `FROM busybox COPY file*.txt /tmp/`) id2, err := buildImageFromContext(name, ctx, true) if err != nil { c.Fatal(err) } if id1 != id2 { c.Fatal("didn't use the cache") } } func (s *DockerSuite) TestBuildAddSingleFileToNonExistingDir(c *check.C) { testRequires(c, DaemonIsLinux) // Linux specific test name := "testaddsinglefiletononexistingdir" ctx, err := fakeContext(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN touch /exists RUN chown dockerio.dockerio /exists ADD test_file /test_dir/ RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, map[string]string{ "test_file": "test1", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildAddDirContentToRoot(c *check.C) { testRequires(c, DaemonIsLinux) // Linux specific test name := "testadddircontenttoroot" ctx, err := fakeContext(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN touch /exists RUN chown dockerio.dockerio exists ADD test_dir / RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, map[string]string{ "test_dir/test_file": "test1", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildAddDirContentToExistingDir(c *check.C) { testRequires(c, DaemonIsLinux) // Linux specific test name := "testadddircontenttoexistingdir" ctx, err := fakeContext(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN mkdir /exists RUN touch /exists/exists_file RUN chown -R dockerio.dockerio /exists ADD test_dir/ /exists/ RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`, map[string]string{ "test_dir/test_file": "test1", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildAddWholeDirToRoot(c *check.C) { testRequires(c, DaemonIsLinux) // Linux specific test name := "testaddwholedirtoroot" ctx, err := fakeContext(fmt.Sprintf(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN touch /exists RUN chown dockerio.dockerio exists ADD test_dir /test_dir RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l / | grep test_dir | awk '{print $1}') = 'drwxr-xr-x' ] RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /test_dir/test_file | awk '{print $1}') = '%s' ] RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod), map[string]string{ "test_dir/test_file": "test1", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } // Testing #5941 func (s *DockerSuite) TestBuildAddEtcToRoot(c *check.C) { name := "testaddetctoroot" ctx, err := fakeContext(`FROM `+minimalBaseImage()+` ADD . /`, map[string]string{ "etc/test_file": "test1", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } // Testing #9401 func (s *DockerSuite) TestBuildAddPreservesFilesSpecialBits(c *check.C) { testRequires(c, DaemonIsLinux) // Linux specific test name := "testaddpreservesfilesspecialbits" ctx, err := fakeContext(`FROM busybox ADD suidbin /usr/bin/suidbin RUN chmod 4755 /usr/bin/suidbin RUN [ $(ls -l /usr/bin/suidbin | awk '{print $1}') = '-rwsr-xr-x' ] ADD ./data/ / RUN [ $(ls -l /usr/bin/suidbin | awk '{print $1}') = '-rwsr-xr-x' ]`, map[string]string{ "suidbin": "suidbin", "/data/usr/test_file": "test1", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildCopySingleFileToRoot(c *check.C) { testRequires(c, DaemonIsLinux) // Linux specific test name := "testcopysinglefiletoroot" ctx, err := fakeContext(fmt.Sprintf(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN touch /exists RUN chown dockerio.dockerio /exists COPY test_file / RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /test_file | awk '{print $1}') = '%s' ] RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod), map[string]string{ "test_file": "test1", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } // Issue #3960: "ADD src ." hangs - adapted for COPY func (s *DockerSuite) TestBuildCopySingleFileToWorkdir(c *check.C) { name := "testcopysinglefiletoworkdir" ctx, err := fakeContext(`FROM busybox COPY test_file .`, map[string]string{ "test_file": "test1", }) if err != nil { c.Fatal(err) } defer ctx.Close() errChan := make(chan error) go func() { _, err := buildImageFromContext(name, ctx, true) errChan <- err close(errChan) }() select { case <-time.After(15 * time.Second): c.Fatal("Build with adding to workdir timed out") case err := <-errChan: c.Assert(err, check.IsNil) } } func (s *DockerSuite) TestBuildCopySingleFileToExistDir(c *check.C) { testRequires(c, DaemonIsLinux) // Linux specific test name := "testcopysinglefiletoexistdir" ctx, err := fakeContext(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN mkdir /exists RUN touch /exists/exists_file RUN chown -R dockerio.dockerio /exists COPY test_file /exists/ RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, map[string]string{ "test_file": "test1", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildCopySingleFileToNonExistDir(c *check.C) { testRequires(c, DaemonIsLinux) // Linux specific test name := "testcopysinglefiletononexistdir" ctx, err := fakeContext(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN touch /exists RUN chown dockerio.dockerio /exists COPY test_file /test_dir/ RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, map[string]string{ "test_file": "test1", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildCopyDirContentToRoot(c *check.C) { testRequires(c, DaemonIsLinux) // Linux specific test name := "testcopydircontenttoroot" ctx, err := fakeContext(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN touch /exists RUN chown dockerio.dockerio exists COPY test_dir / RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, map[string]string{ "test_dir/test_file": "test1", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildCopyDirContentToExistDir(c *check.C) { testRequires(c, DaemonIsLinux) // Linux specific test name := "testcopydircontenttoexistdir" ctx, err := fakeContext(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN mkdir /exists RUN touch /exists/exists_file RUN chown -R dockerio.dockerio /exists COPY test_dir/ /exists/ RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`, map[string]string{ "test_dir/test_file": "test1", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildCopyWholeDirToRoot(c *check.C) { testRequires(c, DaemonIsLinux) // Linux specific test name := "testcopywholedirtoroot" ctx, err := fakeContext(fmt.Sprintf(`FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd RUN echo 'dockerio:x:1001:' >> /etc/group RUN touch /exists RUN chown dockerio.dockerio exists COPY test_dir /test_dir RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l / | grep test_dir | awk '{print $1}') = 'drwxr-xr-x' ] RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] RUN [ $(ls -l /test_dir/test_file | awk '{print $1}') = '%s' ] RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod), map[string]string{ "test_dir/test_file": "test1", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildCopyEtcToRoot(c *check.C) { name := "testcopyetctoroot" ctx, err := fakeContext(`FROM `+minimalBaseImage()+` COPY . /`, map[string]string{ "etc/test_file": "test1", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildAddBadLinks(c *check.C) { testRequires(c, DaemonIsLinux) // Not currently working on Windows dockerfile := ` FROM scratch ADD links.tar / ADD foo.txt /symlink/ ` targetFile := "foo.txt" var ( name = "test-link-absolute" ) ctx, err := fakeContext(dockerfile, nil) if err != nil { c.Fatal(err) } defer ctx.Close() tempDir, err := ioutil.TempDir("", "test-link-absolute-temp-") if err != nil { c.Fatalf("failed to create temporary directory: %s", tempDir) } defer os.RemoveAll(tempDir) var symlinkTarget string if runtime.GOOS == "windows" { var driveLetter string if abs, err := filepath.Abs(tempDir); err != nil { c.Fatal(err) } else { driveLetter = abs[:1] } tempDirWithoutDrive := tempDir[2:] symlinkTarget = fmt.Sprintf(`%s:\..\..\..\..\..\..\..\..\..\..\..\..%s`, driveLetter, tempDirWithoutDrive) } else { symlinkTarget = fmt.Sprintf("/../../../../../../../../../../../..%s", tempDir) } tarPath := filepath.Join(ctx.Dir, "links.tar") nonExistingFile := filepath.Join(tempDir, targetFile) fooPath := filepath.Join(ctx.Dir, targetFile) tarOut, err := os.Create(tarPath) if err != nil { c.Fatal(err) } tarWriter := tar.NewWriter(tarOut) header := &tar.Header{ Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: symlinkTarget, Mode: 0755, Uid: 0, Gid: 0, } err = tarWriter.WriteHeader(header) if err != nil { c.Fatal(err) } tarWriter.Close() tarOut.Close() foo, err := os.Create(fooPath) if err != nil { c.Fatal(err) } defer foo.Close() if _, err := foo.WriteString("test"); err != nil { c.Fatal(err) } if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } if _, err := os.Stat(nonExistingFile); err == nil || err != nil && !os.IsNotExist(err) { c.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile) } } func (s *DockerSuite) TestBuildAddBadLinksVolume(c *check.C) { testRequires(c, DaemonIsLinux) // ln not implemented on Windows busybox const ( dockerfileTemplate = ` FROM busybox RUN ln -s /../../../../../../../../%s /x VOLUME /x ADD foo.txt /x/` targetFile = "foo.txt" ) var ( name = "test-link-absolute-volume" dockerfile = "" ) tempDir, err := ioutil.TempDir("", "test-link-absolute-volume-temp-") if err != nil { c.Fatalf("failed to create temporary directory: %s", tempDir) } defer os.RemoveAll(tempDir) dockerfile = fmt.Sprintf(dockerfileTemplate, tempDir) nonExistingFile := filepath.Join(tempDir, targetFile) ctx, err := fakeContext(dockerfile, nil) if err != nil { c.Fatal(err) } defer ctx.Close() fooPath := filepath.Join(ctx.Dir, targetFile) foo, err := os.Create(fooPath) if err != nil { c.Fatal(err) } defer foo.Close() if _, err := foo.WriteString("test"); err != nil { c.Fatal(err) } if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } if _, err := os.Stat(nonExistingFile); err == nil || err != nil && !os.IsNotExist(err) { c.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile) } } // Issue #5270 - ensure we throw a better error than "unexpected EOF" // when we can't access files in the context. func (s *DockerSuite) TestBuildWithInaccessibleFilesInContext(c *check.C) { testRequires(c, DaemonIsLinux, UnixCli) // test uses chown/chmod: not available on windows { name := "testbuildinaccessiblefiles" ctx, err := fakeContext("FROM scratch\nADD . /foo/", map[string]string{"fileWithoutReadAccess": "foo"}) if err != nil { c.Fatal(err) } defer ctx.Close() // This is used to ensure we detect inaccessible files early during build in the cli client pathToFileWithoutReadAccess := filepath.Join(ctx.Dir, "fileWithoutReadAccess") if err = os.Chown(pathToFileWithoutReadAccess, 0, 0); err != nil { c.Fatalf("failed to chown file to root: %s", err) } if err = os.Chmod(pathToFileWithoutReadAccess, 0700); err != nil { c.Fatalf("failed to chmod file to 700: %s", err) } buildCmd := exec.Command("su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)) buildCmd.Dir = ctx.Dir out, _, err := runCommandWithOutput(buildCmd) if err == nil { c.Fatalf("build should have failed: %s %s", err, out) } // check if we've detected the failure before we started building if !strings.Contains(out, "no permission to read from ") { c.Fatalf("output should've contained the string: no permission to read from but contained: %s", out) } if !strings.Contains(out, "Error checking context") { c.Fatalf("output should've contained the string: Error checking context") } } { name := "testbuildinaccessibledirectory" ctx, err := fakeContext("FROM scratch\nADD . /foo/", map[string]string{"directoryWeCantStat/bar": "foo"}) if err != nil { c.Fatal(err) } defer ctx.Close() // This is used to ensure we detect inaccessible directories early during build in the cli client pathToDirectoryWithoutReadAccess := filepath.Join(ctx.Dir, "directoryWeCantStat") pathToFileInDirectoryWithoutReadAccess := filepath.Join(pathToDirectoryWithoutReadAccess, "bar") if err = os.Chown(pathToDirectoryWithoutReadAccess, 0, 0); err != nil { c.Fatalf("failed to chown directory to root: %s", err) } if err = os.Chmod(pathToDirectoryWithoutReadAccess, 0444); err != nil { c.Fatalf("failed to chmod directory to 444: %s", err) } if err = os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700); err != nil { c.Fatalf("failed to chmod file to 700: %s", err) } buildCmd := exec.Command("su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)) buildCmd.Dir = ctx.Dir out, _, err := runCommandWithOutput(buildCmd) if err == nil { c.Fatalf("build should have failed: %s %s", err, out) } // check if we've detected the failure before we started building if !strings.Contains(out, "can't stat") { c.Fatalf("output should've contained the string: can't access %s", out) } if !strings.Contains(out, "Error checking context") { c.Fatalf("output should've contained the string: Error checking context\ngot:%s", out) } } { name := "testlinksok" ctx, err := fakeContext("FROM scratch\nADD . /foo/", nil) if err != nil { c.Fatal(err) } defer ctx.Close() target := "../../../../../../../../../../../../../../../../../../../azA" if err := os.Symlink(filepath.Join(ctx.Dir, "g"), target); err != nil { c.Fatal(err) } defer os.Remove(target) // This is used to ensure we don't follow links when checking if everything in the context is accessible // This test doesn't require that we run commands as an unprivileged user if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } { name := "testbuildignoredinaccessible" ctx, err := fakeContext("FROM scratch\nADD . /foo/", map[string]string{ "directoryWeCantStat/bar": "foo", ".dockerignore": "directoryWeCantStat", }) if err != nil { c.Fatal(err) } defer ctx.Close() // This is used to ensure we don't try to add inaccessible files when they are ignored by a .dockerignore pattern pathToDirectoryWithoutReadAccess := filepath.Join(ctx.Dir, "directoryWeCantStat") pathToFileInDirectoryWithoutReadAccess := filepath.Join(pathToDirectoryWithoutReadAccess, "bar") if err = os.Chown(pathToDirectoryWithoutReadAccess, 0, 0); err != nil { c.Fatalf("failed to chown directory to root: %s", err) } if err = os.Chmod(pathToDirectoryWithoutReadAccess, 0444); err != nil { c.Fatalf("failed to chmod directory to 755: %s", err) } if err = os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700); err != nil { c.Fatalf("failed to chmod file to 444: %s", err) } buildCmd := exec.Command("su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)) buildCmd.Dir = ctx.Dir if out, _, err := runCommandWithOutput(buildCmd); err != nil { c.Fatalf("build should have worked: %s %s", err, out) } } } func (s *DockerSuite) TestBuildForceRm(c *check.C) { containerCountBefore, err := getContainerCount() if err != nil { c.Fatalf("failed to get the container count: %s", err) } name := "testbuildforcerm" ctx, err := fakeContext(`FROM `+minimalBaseImage()+` RUN true RUN thiswillfail`, nil) if err != nil { c.Fatal(err) } defer ctx.Close() dockerCmdInDir(c, ctx.Dir, "build", "-t", name, "--force-rm", ".") containerCountAfter, err := getContainerCount() if err != nil { c.Fatalf("failed to get the container count: %s", err) } if containerCountBefore != containerCountAfter { c.Fatalf("--force-rm shouldn't have left containers behind") } } func (s *DockerSuite) TestBuildRm(c *check.C) { name := "testbuildrm" ctx, err := fakeContext(`FROM `+minimalBaseImage()+` ADD foo / ADD foo /`, map[string]string{"foo": "bar"}) if err != nil { c.Fatal(err) } defer ctx.Close() { containerCountBefore, err := getContainerCount() if err != nil { c.Fatalf("failed to get the container count: %s", err) } out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "--rm", "-t", name, ".") if err != nil { c.Fatal("failed to build the image", out) } containerCountAfter, err := getContainerCount() if err != nil { c.Fatalf("failed to get the container count: %s", err) } if containerCountBefore != containerCountAfter { c.Fatalf("-rm shouldn't have left containers behind") } deleteImages(name) } { containerCountBefore, err := getContainerCount() if err != nil { c.Fatalf("failed to get the container count: %s", err) } out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", name, ".") if err != nil { c.Fatal("failed to build the image", out) } containerCountAfter, err := getContainerCount() if err != nil { c.Fatalf("failed to get the container count: %s", err) } if containerCountBefore != containerCountAfter { c.Fatalf("--rm shouldn't have left containers behind") } deleteImages(name) } { containerCountBefore, err := getContainerCount() if err != nil { c.Fatalf("failed to get the container count: %s", err) } out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "--rm=false", "-t", name, ".") if err != nil { c.Fatal("failed to build the image", out) } containerCountAfter, err := getContainerCount() if err != nil { c.Fatalf("failed to get the container count: %s", err) } if containerCountBefore == containerCountAfter { c.Fatalf("--rm=false should have left containers behind") } deleteImages(name) } } func (s *DockerSuite) TestBuildWithVolumes(c *check.C) { testRequires(c, DaemonIsLinux) // Invalid volume paths on Windows var ( result map[string]map[string]struct{} name = "testbuildvolumes" emptyMap = make(map[string]struct{}) expected = map[string]map[string]struct{}{ "/test1": emptyMap, "/test2": emptyMap, "/test3": emptyMap, "/test4": emptyMap, "/test5": emptyMap, "/test6": emptyMap, "[/test7": emptyMap, "/test8]": emptyMap, } ) _, err := buildImage(name, `FROM scratch VOLUME /test1 VOLUME /test2 VOLUME /test3 /test4 VOLUME ["/test5", "/test6"] VOLUME [/test7 /test8] `, true) if err != nil { c.Fatal(err) } res := inspectFieldJSON(c, name, "Config.Volumes") err = unmarshalJSON([]byte(res), &result) if err != nil { c.Fatal(err) } equal := reflect.DeepEqual(&result, &expected) if !equal { c.Fatalf("Volumes %s, expected %s", result, expected) } } func (s *DockerSuite) TestBuildMaintainer(c *check.C) { name := "testbuildmaintainer" expected := "dockerio" _, err := buildImage(name, `FROM `+minimalBaseImage()+` MAINTAINER dockerio`, true) if err != nil { c.Fatal(err) } res := inspectField(c, name, "Author") if res != expected { c.Fatalf("Maintainer %s, expected %s", res, expected) } } func (s *DockerSuite) TestBuildUser(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuilduser" expected := "dockerio" _, err := buildImage(name, `FROM busybox RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd USER dockerio RUN [ $(whoami) = 'dockerio' ]`, true) if err != nil { c.Fatal(err) } res := inspectField(c, name, "Config.User") if res != expected { c.Fatalf("User %s, expected %s", res, expected) } } func (s *DockerSuite) TestBuildRelativeWorkdir(c *check.C) { name := "testbuildrelativeworkdir" var ( expected1 string expected2 string expected3 string expected4 string expectedFinal string ) if daemonPlatform == "windows" { expected1 = `C:/` expected2 = `C:/test1` expected3 = `C:/test2` expected4 = `C:/test2/test3` expectedFinal = `C:\test2\test3` // Note inspect is going to return Windows paths, as it's not in busybox } else { expected1 = `/` expected2 = `/test1` expected3 = `/test2` expected4 = `/test2/test3` expectedFinal = `/test2/test3` } _, err := buildImage(name, `FROM busybox RUN sh -c "[ "$PWD" = "`+expected1+`" ]" WORKDIR test1 RUN sh -c "[ "$PWD" = "`+expected2+`" ]" WORKDIR /test2 RUN sh -c "[ "$PWD" = "`+expected3+`" ]" WORKDIR test3 RUN sh -c "[ "$PWD" = "`+expected4+`" ]"`, true) if err != nil { c.Fatal(err) } res := inspectField(c, name, "Config.WorkingDir") if res != expectedFinal { c.Fatalf("Workdir %s, expected %s", res, expectedFinal) } } // #22181 Regression test. Single end-to-end test of using // Windows semantics. Most path handling verifications are in unit tests func (s *DockerSuite) TestBuildWindowsWorkdirProcessing(c *check.C) { testRequires(c, DaemonIsWindows) name := "testbuildwindowsworkdirprocessing" _, err := buildImage(name, `FROM busybox WORKDIR C:\\foo WORKDIR bar RUN sh -c "[ "$PWD" = "C:/foo/bar" ]" `, true) if err != nil { c.Fatal(err) } } // #22181 Regression test. Most paths handling verifications are in unit test. // One functional test for end-to-end func (s *DockerSuite) TestBuildWindowsAddCopyPathProcessing(c *check.C) { testRequires(c, DaemonIsWindows) name := "testbuildwindowsaddcopypathprocessing" // TODO Windows (@jhowardmsft). Needs a follow-up PR to 22181 to // support backslash such as .\\ being equivalent to ./ and c:\\ being // equivalent to c:/. This is not currently (nor ever has been) supported // by docker on the Windows platform. dockerfile := ` FROM busybox # No trailing slash on COPY/ADD # Results in dir being changed to a file WORKDIR /wc1 COPY wc1 c:/wc1 WORKDIR /wc2 ADD wc2 c:/wc2 WORKDIR c:/ RUN sh -c "[ $(cat c:/wc1) = 'hellowc1' ]" RUN sh -c "[ $(cat c:/wc2) = 'worldwc2' ]" # Trailing slash on COPY/ADD, Windows-style path. WORKDIR /wd1 COPY wd1 c:/wd1/ WORKDIR /wd2 ADD wd2 c:/wd2/ RUN sh -c "[ $(cat c:/wd1/wd1) = 'hellowd1' ]" RUN sh -c "[ $(cat c:/wd2/wd2) = 'worldwd2' ]" ` ctx, err := fakeContext(dockerfile, map[string]string{ "wc1": "hellowc1", "wc2": "worldwc2", "wd1": "hellowd1", "wd2": "worldwd2", }) if err != nil { c.Fatal(err) } defer ctx.Close() _, err = buildImageFromContext(name, ctx, false) if err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildWorkdirWithEnvVariables(c *check.C) { name := "testbuildworkdirwithenvvariables" var expected string if daemonPlatform == "windows" { expected = `C:\test1\test2` } else { expected = `/test1/test2` } _, err := buildImage(name, `FROM busybox ENV DIRPATH /test1 ENV SUBDIRNAME test2 WORKDIR $DIRPATH WORKDIR $SUBDIRNAME/$MISSING_VAR`, true) if err != nil { c.Fatal(err) } res := inspectField(c, name, "Config.WorkingDir") if res != expected { c.Fatalf("Workdir %s, expected %s", res, expected) } } func (s *DockerSuite) TestBuildRelativeCopy(c *check.C) { // cat /test1/test2/foo gets permission denied for the user testRequires(c, NotUserNamespace) var expected string if daemonPlatform == "windows" { expected = `C:/test1/test2` } else { expected = `/test1/test2` } name := "testbuildrelativecopy" dockerfile := ` FROM busybox WORKDIR /test1 WORKDIR test2 RUN sh -c "[ "$PWD" = '` + expected + `' ]" COPY foo ./ RUN sh -c "[ $(cat /test1/test2/foo) = 'hello' ]" ADD foo ./bar/baz RUN sh -c "[ $(cat /test1/test2/bar/baz) = 'hello' ]" COPY foo ./bar/baz2 RUN sh -c "[ $(cat /test1/test2/bar/baz2) = 'hello' ]" WORKDIR .. COPY foo ./ RUN sh -c "[ $(cat /test1/foo) = 'hello' ]" COPY foo /test3/ RUN sh -c "[ $(cat /test3/foo) = 'hello' ]" WORKDIR /test4 COPY . . RUN sh -c "[ $(cat /test4/foo) = 'hello' ]" WORKDIR /test5/test6 COPY foo ../ RUN sh -c "[ $(cat /test5/foo) = 'hello' ]" ` ctx, err := fakeContext(dockerfile, map[string]string{ "foo": "hello", }) if err != nil { c.Fatal(err) } defer ctx.Close() _, err = buildImageFromContext(name, ctx, false) if err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildEnv(c *check.C) { testRequires(c, DaemonIsLinux) // ENV expansion is different in Windows name := "testbuildenv" expected := "[PATH=/test:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin PORT=2375]" _, err := buildImage(name, `FROM busybox ENV PATH /test:$PATH ENV PORT 2375 RUN [ $(env | grep PORT) = 'PORT=2375' ]`, true) if err != nil { c.Fatal(err) } res := inspectField(c, name, "Config.Env") if res != expected { c.Fatalf("Env %s, expected %s", res, expected) } } func (s *DockerSuite) TestBuildPATH(c *check.C) { testRequires(c, DaemonIsLinux) // ENV expansion is different in Windows defPath := "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" fn := func(dockerfile string, exp string) { _, err := buildImage("testbldpath", dockerfile, true) c.Assert(err, check.IsNil) res := inspectField(c, "testbldpath", "Config.Env") if res != exp { c.Fatalf("Env %q, expected %q for dockerfile:%q", res, exp, dockerfile) } } tests := []struct{ dockerfile, exp string }{ {"FROM scratch\nMAINTAINER me", "[PATH=" + defPath + "]"}, {"FROM busybox\nMAINTAINER me", "[PATH=" + defPath + "]"}, {"FROM scratch\nENV FOO=bar", "[PATH=" + defPath + " FOO=bar]"}, {"FROM busybox\nENV FOO=bar", "[PATH=" + defPath + " FOO=bar]"}, {"FROM scratch\nENV PATH=/test", "[PATH=/test]"}, {"FROM busybox\nENV PATH=/test", "[PATH=/test]"}, {"FROM scratch\nENV PATH=''", "[PATH=]"}, {"FROM busybox\nENV PATH=''", "[PATH=]"}, } for _, test := range tests { fn(test.dockerfile, test.exp) } } func (s *DockerSuite) TestBuildContextCleanup(c *check.C) { testRequires(c, DaemonIsLinux) testRequires(c, SameHostDaemon) name := "testbuildcontextcleanup" entries, err := ioutil.ReadDir(filepath.Join(dockerBasePath, "tmp")) if err != nil { c.Fatalf("failed to list contents of tmp dir: %s", err) } _, err = buildImage(name, `FROM scratch ENTRYPOINT ["/bin/echo"]`, true) if err != nil { c.Fatal(err) } entriesFinal, err := ioutil.ReadDir(filepath.Join(dockerBasePath, "tmp")) if err != nil { c.Fatalf("failed to list contents of tmp dir: %s", err) } if err = compareDirectoryEntries(entries, entriesFinal); err != nil { c.Fatalf("context should have been deleted, but wasn't") } } func (s *DockerSuite) TestBuildContextCleanupFailedBuild(c *check.C) { testRequires(c, DaemonIsLinux) testRequires(c, SameHostDaemon) name := "testbuildcontextcleanup" entries, err := ioutil.ReadDir(filepath.Join(dockerBasePath, "tmp")) if err != nil { c.Fatalf("failed to list contents of tmp dir: %s", err) } _, err = buildImage(name, `FROM scratch RUN /non/existing/command`, true) if err == nil { c.Fatalf("expected build to fail, but it didn't") } entriesFinal, err := ioutil.ReadDir(filepath.Join(dockerBasePath, "tmp")) if err != nil { c.Fatalf("failed to list contents of tmp dir: %s", err) } if err = compareDirectoryEntries(entries, entriesFinal); err != nil { c.Fatalf("context should have been deleted, but wasn't") } } func (s *DockerSuite) TestBuildCmd(c *check.C) { name := "testbuildcmd" expected := "[/bin/echo Hello World]" _, err := buildImage(name, `FROM `+minimalBaseImage()+` CMD ["/bin/echo", "Hello World"]`, true) if err != nil { c.Fatal(err) } res := inspectField(c, name, "Config.Cmd") if res != expected { c.Fatalf("Cmd %s, expected %s", res, expected) } } func (s *DockerSuite) TestBuildExpose(c *check.C) { testRequires(c, DaemonIsLinux) // Expose not implemented on Windows name := "testbuildexpose" expected := "map[2375/tcp:{}]" _, err := buildImage(name, `FROM scratch EXPOSE 2375`, true) if err != nil { c.Fatal(err) } res := inspectField(c, name, "Config.ExposedPorts") if res != expected { c.Fatalf("Exposed ports %s, expected %s", res, expected) } } func (s *DockerSuite) TestBuildExposeMorePorts(c *check.C) { testRequires(c, DaemonIsLinux) // Expose not implemented on Windows // start building docker file with a large number of ports portList := make([]string, 50) line := make([]string, 100) expectedPorts := make([]int, len(portList)*len(line)) for i := 0; i < len(portList); i++ { for j := 0; j < len(line); j++ { p := i*len(line) + j + 1 line[j] = strconv.Itoa(p) expectedPorts[p-1] = p } if i == len(portList)-1 { portList[i] = strings.Join(line, " ") } else { portList[i] = strings.Join(line, " ") + ` \` } } dockerfile := `FROM scratch EXPOSE {{range .}} {{.}} {{end}}` tmpl := template.Must(template.New("dockerfile").Parse(dockerfile)) buf := bytes.NewBuffer(nil) tmpl.Execute(buf, portList) name := "testbuildexpose" _, err := buildImage(name, buf.String(), true) if err != nil { c.Fatal(err) } // check if all the ports are saved inside Config.ExposedPorts res := inspectFieldJSON(c, name, "Config.ExposedPorts") var exposedPorts map[string]interface{} if err := json.Unmarshal([]byte(res), &exposedPorts); err != nil { c.Fatal(err) } for _, p := range expectedPorts { ep := fmt.Sprintf("%d/tcp", p) if _, ok := exposedPorts[ep]; !ok { c.Errorf("Port(%s) is not exposed", ep) } else { delete(exposedPorts, ep) } } if len(exposedPorts) != 0 { c.Errorf("Unexpected extra exposed ports %v", exposedPorts) } } func (s *DockerSuite) TestBuildExposeOrder(c *check.C) { testRequires(c, DaemonIsLinux) // Expose not implemented on Windows buildID := func(name, exposed string) string { _, err := buildImage(name, fmt.Sprintf(`FROM scratch EXPOSE %s`, exposed), true) if err != nil { c.Fatal(err) } id := inspectField(c, name, "Id") return id } id1 := buildID("testbuildexpose1", "80 2375") id2 := buildID("testbuildexpose2", "2375 80") if id1 != id2 { c.Errorf("EXPOSE should invalidate the cache only when ports actually changed") } } func (s *DockerSuite) TestBuildExposeUpperCaseProto(c *check.C) { testRequires(c, DaemonIsLinux) // Expose not implemented on Windows name := "testbuildexposeuppercaseproto" expected := "map[5678/udp:{}]" _, err := buildImage(name, `FROM scratch EXPOSE 5678/UDP`, true) if err != nil { c.Fatal(err) } res := inspectField(c, name, "Config.ExposedPorts") if res != expected { c.Fatalf("Exposed ports %s, expected %s", res, expected) } } func (s *DockerSuite) TestBuildEmptyEntrypointInheritance(c *check.C) { name := "testbuildentrypointinheritance" name2 := "testbuildentrypointinheritance2" _, err := buildImage(name, `FROM busybox ENTRYPOINT ["/bin/echo"]`, true) if err != nil { c.Fatal(err) } res := inspectField(c, name, "Config.Entrypoint") expected := "[/bin/echo]" if res != expected { c.Fatalf("Entrypoint %s, expected %s", res, expected) } _, err = buildImage(name2, fmt.Sprintf(`FROM %s ENTRYPOINT []`, name), true) if err != nil { c.Fatal(err) } res = inspectField(c, name2, "Config.Entrypoint") expected = "[]" if res != expected { c.Fatalf("Entrypoint %s, expected %s", res, expected) } } func (s *DockerSuite) TestBuildEmptyEntrypoint(c *check.C) { name := "testbuildentrypoint" expected := "[]" _, err := buildImage(name, `FROM busybox ENTRYPOINT []`, true) if err != nil { c.Fatal(err) } res := inspectField(c, name, "Config.Entrypoint") if res != expected { c.Fatalf("Entrypoint %s, expected %s", res, expected) } } func (s *DockerSuite) TestBuildEntrypoint(c *check.C) { name := "testbuildentrypoint" expected := "[/bin/echo]" _, err := buildImage(name, `FROM `+minimalBaseImage()+` ENTRYPOINT ["/bin/echo"]`, true) if err != nil { c.Fatal(err) } res := inspectField(c, name, "Config.Entrypoint") if res != expected { c.Fatalf("Entrypoint %s, expected %s", res, expected) } } // #6445 ensure ONBUILD triggers aren't committed to grandchildren func (s *DockerSuite) TestBuildOnBuildLimitedInheritence(c *check.C) { var ( out2, out3 string ) { name1 := "testonbuildtrigger1" dockerfile1 := ` FROM busybox RUN echo "GRANDPARENT" ONBUILD RUN echo "ONBUILD PARENT" ` ctx, err := fakeContext(dockerfile1, nil) if err != nil { c.Fatal(err) } defer ctx.Close() out1, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", name1, ".") if err != nil { c.Fatalf("build failed to complete: %s, %v", out1, err) } } { name2 := "testonbuildtrigger2" dockerfile2 := ` FROM testonbuildtrigger1 ` ctx, err := fakeContext(dockerfile2, nil) if err != nil { c.Fatal(err) } defer ctx.Close() out2, _, err = dockerCmdInDir(c, ctx.Dir, "build", "-t", name2, ".") if err != nil { c.Fatalf("build failed to complete: %s, %v", out2, err) } } { name3 := "testonbuildtrigger3" dockerfile3 := ` FROM testonbuildtrigger2 ` ctx, err := fakeContext(dockerfile3, nil) if err != nil { c.Fatal(err) } defer ctx.Close() out3, _, err = dockerCmdInDir(c, ctx.Dir, "build", "-t", name3, ".") if err != nil { c.Fatalf("build failed to complete: %s, %v", out3, err) } } // ONBUILD should be run in second build. if !strings.Contains(out2, "ONBUILD PARENT") { c.Fatalf("ONBUILD instruction did not run in child of ONBUILD parent") } // ONBUILD should *not* be run in third build. if strings.Contains(out3, "ONBUILD PARENT") { c.Fatalf("ONBUILD instruction ran in grandchild of ONBUILD parent") } } func (s *DockerSuite) TestBuildWithCache(c *check.C) { testRequires(c, DaemonIsLinux) // Expose not implemented on Windows name := "testbuildwithcache" id1, err := buildImage(name, `FROM scratch MAINTAINER dockerio EXPOSE 5432 ENTRYPOINT ["/bin/echo"]`, true) if err != nil { c.Fatal(err) } id2, err := buildImage(name, `FROM scratch MAINTAINER dockerio EXPOSE 5432 ENTRYPOINT ["/bin/echo"]`, true) if err != nil { c.Fatal(err) } if id1 != id2 { c.Fatal("The cache should have been used but hasn't.") } } func (s *DockerSuite) TestBuildWithoutCache(c *check.C) { testRequires(c, DaemonIsLinux) // Expose not implemented on Windows name := "testbuildwithoutcache" name2 := "testbuildwithoutcache2" id1, err := buildImage(name, `FROM scratch MAINTAINER dockerio EXPOSE 5432 ENTRYPOINT ["/bin/echo"]`, true) if err != nil { c.Fatal(err) } id2, err := buildImage(name2, `FROM scratch MAINTAINER dockerio EXPOSE 5432 ENTRYPOINT ["/bin/echo"]`, false) if err != nil { c.Fatal(err) } if id1 == id2 { c.Fatal("The cache should have been invalided but hasn't.") } } func (s *DockerSuite) TestBuildConditionalCache(c *check.C) { name := "testbuildconditionalcache" dockerfile := ` FROM busybox ADD foo /tmp/` ctx, err := fakeContext(dockerfile, map[string]string{ "foo": "hello", }) if err != nil { c.Fatal(err) } defer ctx.Close() id1, err := buildImageFromContext(name, ctx, true) if err != nil { c.Fatalf("Error building #1: %s", err) } if err := ctx.Add("foo", "bye"); err != nil { c.Fatalf("Error modifying foo: %s", err) } id2, err := buildImageFromContext(name, ctx, false) if err != nil { c.Fatalf("Error building #2: %s", err) } if id2 == id1 { c.Fatal("Should not have used the cache") } id3, err := buildImageFromContext(name, ctx, true) if err != nil { c.Fatalf("Error building #3: %s", err) } if id3 != id2 { c.Fatal("Should have used the cache") } } func (s *DockerSuite) TestBuildAddLocalFileWithCache(c *check.C) { // local files are not owned by the correct user testRequires(c, NotUserNamespace) name := "testbuildaddlocalfilewithcache" name2 := "testbuildaddlocalfilewithcache2" dockerfile := ` FROM busybox MAINTAINER dockerio ADD foo /usr/lib/bla/bar RUN sh -c "[ $(cat /usr/lib/bla/bar) = "hello" ]"` ctx, err := fakeContext(dockerfile, map[string]string{ "foo": "hello", }) if err != nil { c.Fatal(err) } defer ctx.Close() id1, err := buildImageFromContext(name, ctx, true) if err != nil { c.Fatal(err) } id2, err := buildImageFromContext(name2, ctx, true) if err != nil { c.Fatal(err) } if id1 != id2 { c.Fatal("The cache should have been used but hasn't.") } } func (s *DockerSuite) TestBuildAddMultipleLocalFileWithCache(c *check.C) { name := "testbuildaddmultiplelocalfilewithcache" name2 := "testbuildaddmultiplelocalfilewithcache2" dockerfile := ` FROM busybox MAINTAINER dockerio ADD foo Dockerfile /usr/lib/bla/ RUN sh -c "[ $(cat /usr/lib/bla/foo) = "hello" ]"` ctx, err := fakeContext(dockerfile, map[string]string{ "foo": "hello", }) if err != nil { c.Fatal(err) } defer ctx.Close() id1, err := buildImageFromContext(name, ctx, true) if err != nil { c.Fatal(err) } id2, err := buildImageFromContext(name2, ctx, true) if err != nil { c.Fatal(err) } if id1 != id2 { c.Fatal("The cache should have been used but hasn't.") } } func (s *DockerSuite) TestBuildAddLocalFileWithoutCache(c *check.C) { // local files are not owned by the correct user testRequires(c, NotUserNamespace) name := "testbuildaddlocalfilewithoutcache" name2 := "testbuildaddlocalfilewithoutcache2" dockerfile := ` FROM busybox MAINTAINER dockerio ADD foo /usr/lib/bla/bar RUN sh -c "[ $(cat /usr/lib/bla/bar) = "hello" ]"` ctx, err := fakeContext(dockerfile, map[string]string{ "foo": "hello", }) if err != nil { c.Fatal(err) } defer ctx.Close() id1, err := buildImageFromContext(name, ctx, true) if err != nil { c.Fatal(err) } id2, err := buildImageFromContext(name2, ctx, false) if err != nil { c.Fatal(err) } if id1 == id2 { c.Fatal("The cache should have been invalided but hasn't.") } } func (s *DockerSuite) TestBuildCopyDirButNotFile(c *check.C) { name := "testbuildcopydirbutnotfile" name2 := "testbuildcopydirbutnotfile2" dockerfile := ` FROM ` + minimalBaseImage() + ` COPY dir /tmp/` ctx, err := fakeContext(dockerfile, map[string]string{ "dir/foo": "hello", }) if err != nil { c.Fatal(err) } defer ctx.Close() id1, err := buildImageFromContext(name, ctx, true) if err != nil { c.Fatal(err) } // Check that adding file with similar name doesn't mess with cache if err := ctx.Add("dir_file", "hello2"); err != nil { c.Fatal(err) } id2, err := buildImageFromContext(name2, ctx, true) if err != nil { c.Fatal(err) } if id1 != id2 { c.Fatal("The cache should have been used but wasn't") } } func (s *DockerSuite) TestBuildAddCurrentDirWithCache(c *check.C) { name := "testbuildaddcurrentdirwithcache" name2 := name + "2" name3 := name + "3" name4 := name + "4" dockerfile := ` FROM ` + minimalBaseImage() + ` MAINTAINER dockerio ADD . /usr/lib/bla` ctx, err := fakeContext(dockerfile, map[string]string{ "foo": "hello", }) if err != nil { c.Fatal(err) } defer ctx.Close() id1, err := buildImageFromContext(name, ctx, true) if err != nil { c.Fatal(err) } // Check that adding file invalidate cache of "ADD ." if err := ctx.Add("bar", "hello2"); err != nil { c.Fatal(err) } id2, err := buildImageFromContext(name2, ctx, true) if err != nil { c.Fatal(err) } if id1 == id2 { c.Fatal("The cache should have been invalided but hasn't.") } // Check that changing file invalidate cache of "ADD ." if err := ctx.Add("foo", "hello1"); err != nil { c.Fatal(err) } id3, err := buildImageFromContext(name3, ctx, true) if err != nil { c.Fatal(err) } if id2 == id3 { c.Fatal("The cache should have been invalided but hasn't.") } // Check that changing file to same content with different mtime does not // invalidate cache of "ADD ." time.Sleep(1 * time.Second) // wait second because of mtime precision if err := ctx.Add("foo", "hello1"); err != nil { c.Fatal(err) } id4, err := buildImageFromContext(name4, ctx, true) if err != nil { c.Fatal(err) } if id3 != id4 { c.Fatal("The cache should have been used but hasn't.") } } func (s *DockerSuite) TestBuildAddCurrentDirWithoutCache(c *check.C) { name := "testbuildaddcurrentdirwithoutcache" name2 := "testbuildaddcurrentdirwithoutcache2" dockerfile := ` FROM ` + minimalBaseImage() + ` MAINTAINER dockerio ADD . /usr/lib/bla` ctx, err := fakeContext(dockerfile, map[string]string{ "foo": "hello", }) if err != nil { c.Fatal(err) } defer ctx.Close() id1, err := buildImageFromContext(name, ctx, true) if err != nil { c.Fatal(err) } id2, err := buildImageFromContext(name2, ctx, false) if err != nil { c.Fatal(err) } if id1 == id2 { c.Fatal("The cache should have been invalided but hasn't.") } } func (s *DockerSuite) TestBuildAddRemoteFileWithCache(c *check.C) { testRequires(c, DaemonIsLinux) // Windows doesn't have httpserver image yet name := "testbuildaddremotefilewithcache" server, err := fakeStorage(map[string]string{ "baz": "hello", }) if err != nil { c.Fatal(err) } defer server.Close() id1, err := buildImage(name, fmt.Sprintf(`FROM scratch MAINTAINER dockerio ADD %s/baz /usr/lib/baz/quux`, server.URL()), true) if err != nil { c.Fatal(err) } id2, err := buildImage(name, fmt.Sprintf(`FROM scratch MAINTAINER dockerio ADD %s/baz /usr/lib/baz/quux`, server.URL()), true) if err != nil { c.Fatal(err) } if id1 != id2 { c.Fatal("The cache should have been used but hasn't.") } } func (s *DockerSuite) TestBuildAddRemoteFileWithoutCache(c *check.C) { testRequires(c, DaemonIsLinux) // Windows doesn't have httpserver image yet name := "testbuildaddremotefilewithoutcache" name2 := "testbuildaddremotefilewithoutcache2" server, err := fakeStorage(map[string]string{ "baz": "hello", }) if err != nil { c.Fatal(err) } defer server.Close() id1, err := buildImage(name, fmt.Sprintf(`FROM scratch MAINTAINER dockerio ADD %s/baz /usr/lib/baz/quux`, server.URL()), true) if err != nil { c.Fatal(err) } id2, err := buildImage(name2, fmt.Sprintf(`FROM scratch MAINTAINER dockerio ADD %s/baz /usr/lib/baz/quux`, server.URL()), false) if err != nil { c.Fatal(err) } if id1 == id2 { c.Fatal("The cache should have been invalided but hasn't.") } } func (s *DockerSuite) TestBuildAddRemoteFileMTime(c *check.C) { testRequires(c, DaemonIsLinux) // Windows doesn't have httpserver image yet name := "testbuildaddremotefilemtime" name2 := name + "2" name3 := name + "3" files := map[string]string{"baz": "hello"} server, err := fakeStorage(files) if err != nil { c.Fatal(err) } defer server.Close() ctx, err := fakeContext(fmt.Sprintf(`FROM scratch MAINTAINER dockerio ADD %s/baz /usr/lib/baz/quux`, server.URL()), nil) if err != nil { c.Fatal(err) } defer ctx.Close() id1, err := buildImageFromContext(name, ctx, true) if err != nil { c.Fatal(err) } id2, err := buildImageFromContext(name2, ctx, true) if err != nil { c.Fatal(err) } if id1 != id2 { c.Fatal("The cache should have been used but wasn't - #1") } // Now create a different server with same contents (causes different mtime) // The cache should still be used // allow some time for clock to pass as mtime precision is only 1s time.Sleep(2 * time.Second) server2, err := fakeStorage(files) if err != nil { c.Fatal(err) } defer server2.Close() ctx2, err := fakeContext(fmt.Sprintf(`FROM scratch MAINTAINER dockerio ADD %s/baz /usr/lib/baz/quux`, server2.URL()), nil) if err != nil { c.Fatal(err) } defer ctx2.Close() id3, err := buildImageFromContext(name3, ctx2, true) if err != nil { c.Fatal(err) } if id1 != id3 { c.Fatal("The cache should have been used but wasn't") } } func (s *DockerSuite) TestBuildAddLocalAndRemoteFilesWithCache(c *check.C) { testRequires(c, DaemonIsLinux) // Windows doesn't have httpserver image yet name := "testbuildaddlocalandremotefilewithcache" server, err := fakeStorage(map[string]string{ "baz": "hello", }) if err != nil { c.Fatal(err) } defer server.Close() ctx, err := fakeContext(fmt.Sprintf(`FROM scratch MAINTAINER dockerio ADD foo /usr/lib/bla/bar ADD %s/baz /usr/lib/baz/quux`, server.URL()), map[string]string{ "foo": "hello world", }) if err != nil { c.Fatal(err) } defer ctx.Close() id1, err := buildImageFromContext(name, ctx, true) if err != nil { c.Fatal(err) } id2, err := buildImageFromContext(name, ctx, true) if err != nil { c.Fatal(err) } if id1 != id2 { c.Fatal("The cache should have been used but hasn't.") } } func testContextTar(c *check.C, compression archive.Compression) { ctx, err := fakeContext( `FROM busybox ADD foo /foo CMD ["cat", "/foo"]`, map[string]string{ "foo": "bar", }, ) if err != nil { c.Fatal(err) } defer ctx.Close() context, err := archive.Tar(ctx.Dir, compression) if err != nil { c.Fatalf("failed to build context tar: %v", err) } name := "contexttar" buildCmd := exec.Command(dockerBinary, "build", "-t", name, "-") buildCmd.Stdin = context if out, _, err := runCommandWithOutput(buildCmd); err != nil { c.Fatalf("build failed to complete: %v %v", out, err) } } func (s *DockerSuite) TestBuildContextTarGzip(c *check.C) { testContextTar(c, archive.Gzip) } func (s *DockerSuite) TestBuildContextTarNoCompression(c *check.C) { testContextTar(c, archive.Uncompressed) } func (s *DockerSuite) TestBuildNoContext(c *check.C) { buildCmd := exec.Command(dockerBinary, "build", "-t", "nocontext", "-") buildCmd.Stdin = strings.NewReader( `FROM busybox CMD ["echo", "ok"]`) if out, _, err := runCommandWithOutput(buildCmd); err != nil { c.Fatalf("build failed to complete: %v %v", out, err) } if out, _ := dockerCmd(c, "run", "--rm", "nocontext"); out != "ok\n" { c.Fatalf("run produced invalid output: %q, expected %q", out, "ok") } } // TODO: TestCaching func (s *DockerSuite) TestBuildAddLocalAndRemoteFilesWithoutCache(c *check.C) { testRequires(c, DaemonIsLinux) // Windows doesn't have httpserver image yet name := "testbuildaddlocalandremotefilewithoutcache" name2 := "testbuildaddlocalandremotefilewithoutcache2" server, err := fakeStorage(map[string]string{ "baz": "hello", }) if err != nil { c.Fatal(err) } defer server.Close() ctx, err := fakeContext(fmt.Sprintf(`FROM scratch MAINTAINER dockerio ADD foo /usr/lib/bla/bar ADD %s/baz /usr/lib/baz/quux`, server.URL()), map[string]string{ "foo": "hello world", }) if err != nil { c.Fatal(err) } defer ctx.Close() id1, err := buildImageFromContext(name, ctx, true) if err != nil { c.Fatal(err) } id2, err := buildImageFromContext(name2, ctx, false) if err != nil { c.Fatal(err) } if id1 == id2 { c.Fatal("The cache should have been invalided but hasn't.") } } func (s *DockerSuite) TestBuildWithVolumeOwnership(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildimg" _, err := buildImage(name, `FROM busybox:latest RUN mkdir /test && chown daemon:daemon /test && chmod 0600 /test VOLUME /test`, true) if err != nil { c.Fatal(err) } out, _ := dockerCmd(c, "run", "--rm", "testbuildimg", "ls", "-la", "/test") if expected := "drw-------"; !strings.Contains(out, expected) { c.Fatalf("expected %s received %s", expected, out) } if expected := "daemon daemon"; !strings.Contains(out, expected) { c.Fatalf("expected %s received %s", expected, out) } } // testing #1405 - config.Cmd does not get cleaned up if // utilizing cache func (s *DockerSuite) TestBuildEntrypointRunCleanup(c *check.C) { name := "testbuildcmdcleanup" if _, err := buildImage(name, `FROM busybox RUN echo "hello"`, true); err != nil { c.Fatal(err) } ctx, err := fakeContext(`FROM busybox RUN echo "hello" ADD foo /foo ENTRYPOINT ["/bin/echo"]`, map[string]string{ "foo": "hello", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } res := inspectField(c, name, "Config.Cmd") // Cmd must be cleaned up if res != "[]" { c.Fatalf("Cmd %s, expected nil", res) } } func (s *DockerSuite) TestBuildAddFileNotFound(c *check.C) { name := "testbuildaddnotfound" expected := "foo: no such file or directory" if daemonPlatform == "windows" { expected = "foo: The system cannot find the file specified" } ctx, err := fakeContext(`FROM `+minimalBaseImage()+` ADD foo /usr/local/bar`, map[string]string{"bar": "hello"}) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { if !strings.Contains(err.Error(), expected) { c.Fatalf("Wrong error %v, must be about missing foo file or directory", err) } } else { c.Fatal("Error must not be nil") } } func (s *DockerSuite) TestBuildInheritance(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildinheritance" _, err := buildImage(name, `FROM scratch EXPOSE 2375`, true) if err != nil { c.Fatal(err) } ports1 := inspectField(c, name, "Config.ExposedPorts") _, err = buildImage(name, fmt.Sprintf(`FROM %s ENTRYPOINT ["/bin/echo"]`, name), true) if err != nil { c.Fatal(err) } res := inspectField(c, name, "Config.Entrypoint") if expected := "[/bin/echo]"; res != expected { c.Fatalf("Entrypoint %s, expected %s", res, expected) } ports2 := inspectField(c, name, "Config.ExposedPorts") if ports1 != ports2 { c.Fatalf("Ports must be same: %s != %s", ports1, ports2) } } func (s *DockerSuite) TestBuildFails(c *check.C) { name := "testbuildfails" _, err := buildImage(name, `FROM busybox RUN sh -c "exit 23"`, true) if err != nil { if !strings.Contains(err.Error(), "returned a non-zero code: 23") { c.Fatalf("Wrong error %v, must be about non-zero code 23", err) } } else { c.Fatal("Error must not be nil") } } func (s *DockerSuite) TestBuildOnBuild(c *check.C) { name := "testbuildonbuild" _, err := buildImage(name, `FROM busybox ONBUILD RUN touch foobar`, true) if err != nil { c.Fatal(err) } _, err = buildImage(name, fmt.Sprintf(`FROM %s RUN [ -f foobar ]`, name), true) if err != nil { c.Fatal(err) } } // gh #2446 func (s *DockerSuite) TestBuildAddToSymlinkDest(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildaddtosymlinkdest" ctx, err := fakeContext(`FROM busybox RUN mkdir /foo RUN ln -s /foo /bar ADD foo /bar/ RUN [ -f /bar/foo ] RUN [ -f /foo/foo ]`, map[string]string{ "foo": "hello", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildEscapeWhitespace(c *check.C) { name := "testbuildescapewhitespace" _, err := buildImage(name, ` # ESCAPE=\ FROM busybox MAINTAINER "Docker \ IO <io@\ docker.com>" `, true) if err != nil { c.Fatal(err) } res := inspectField(c, name, "Author") if res != "\"Docker IO <[email protected]>\"" { c.Fatalf("Parsed string did not match the escaped string. Got: %q", res) } } func (s *DockerSuite) TestBuildVerifyIntString(c *check.C) { // Verify that strings that look like ints are still passed as strings name := "testbuildstringing" _, err := buildImage(name, ` FROM busybox MAINTAINER 123 `, true) if err != nil { c.Fatal(err) } out, _ := dockerCmd(c, "inspect", name) if !strings.Contains(out, "\"123\"") { c.Fatalf("Output does not contain the int as a string:\n%s", out) } } func (s *DockerSuite) TestBuildDockerignore(c *check.C) { testRequires(c, DaemonIsLinux) // TODO Windows: This test passes on Windows, // but currently adds a disproportionate amount of time for the value it has. // Removing it from Windows CI for now, but this will be revisited in the // TP5 timeframe when perf is better. name := "testbuilddockerignore" dockerfile := ` FROM busybox ADD . /bla RUN sh -c "[[ -f /bla/src/x.go ]]" RUN sh -c "[[ -f /bla/Makefile ]]" RUN sh -c "[[ ! -e /bla/src/_vendor ]]" RUN sh -c "[[ ! -e /bla/.gitignore ]]" RUN sh -c "[[ ! -e /bla/README.md ]]" RUN sh -c "[[ ! -e /bla/dir/foo ]]" RUN sh -c "[[ ! -e /bla/foo ]]" RUN sh -c "[[ ! -e /bla/.git ]]" RUN sh -c "[[ ! -e v.cc ]]" RUN sh -c "[[ ! -e src/v.cc ]]" RUN sh -c "[[ ! -e src/_vendor/v.cc ]]"` ctx, err := fakeContext(dockerfile, map[string]string{ "Makefile": "all:", ".git/HEAD": "ref: foo", "src/x.go": "package main", "src/_vendor/v.go": "package main", "src/_vendor/v.cc": "package main", "src/v.cc": "package main", "v.cc": "package main", "dir/foo": "", ".gitignore": "", "README.md": "readme", ".dockerignore": ` .git pkg .gitignore src/_vendor *.md **/*.cc dir`, }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildDockerignoreCleanPaths(c *check.C) { name := "testbuilddockerignorecleanpaths" dockerfile := ` FROM busybox ADD . /tmp/ RUN sh -c "(! ls /tmp/foo) && (! ls /tmp/foo2) && (! ls /tmp/dir1/foo)"` ctx, err := fakeContext(dockerfile, map[string]string{ "foo": "foo", "foo2": "foo2", "dir1/foo": "foo in dir1", ".dockerignore": "./foo\ndir1//foo\n./dir1/../foo2", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildDockerignoreExceptions(c *check.C) { testRequires(c, DaemonIsLinux) // TODO Windows: This test passes on Windows, // but currently adds a disproportionate amount of time for the value it has. // Removing it from Windows CI for now, but this will be revisited in the // TP5 timeframe when perf is better. name := "testbuilddockerignoreexceptions" dockerfile := ` FROM busybox ADD . /bla RUN sh -c "[[ -f /bla/src/x.go ]]" RUN sh -c "[[ -f /bla/Makefile ]]" RUN sh -c "[[ ! -e /bla/src/_vendor ]]" RUN sh -c "[[ ! -e /bla/.gitignore ]]" RUN sh -c "[[ ! -e /bla/README.md ]]" RUN sh -c "[[ -e /bla/dir/dir/foo ]]" RUN sh -c "[[ ! -e /bla/dir/foo1 ]]" RUN sh -c "[[ -f /bla/dir/e ]]" RUN sh -c "[[ -f /bla/dir/e-dir/foo ]]" RUN sh -c "[[ ! -e /bla/foo ]]" RUN sh -c "[[ ! -e /bla/.git ]]" RUN sh -c "[[ -e /bla/dir/a.cc ]]"` ctx, err := fakeContext(dockerfile, map[string]string{ "Makefile": "all:", ".git/HEAD": "ref: foo", "src/x.go": "package main", "src/_vendor/v.go": "package main", "dir/foo": "", "dir/foo1": "", "dir/dir/f1": "", "dir/dir/foo": "", "dir/e": "", "dir/e-dir/foo": "", ".gitignore": "", "README.md": "readme", "dir/a.cc": "hello", ".dockerignore": ` .git pkg .gitignore src/_vendor *.md dir !dir/e* !dir/dir/foo **/*.cc !**/*.cc`, }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildDockerignoringDockerfile(c *check.C) { name := "testbuilddockerignoredockerfile" dockerfile := ` FROM busybox ADD . /tmp/ RUN sh -c "! ls /tmp/Dockerfile" RUN ls /tmp/.dockerignore` ctx, err := fakeContext(dockerfile, map[string]string{ "Dockerfile": dockerfile, ".dockerignore": "Dockerfile\n", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err = buildImageFromContext(name, ctx, true); err != nil { c.Fatalf("Didn't ignore Dockerfile correctly:%s", err) } // now try it with ./Dockerfile ctx.Add(".dockerignore", "./Dockerfile\n") if _, err = buildImageFromContext(name, ctx, true); err != nil { c.Fatalf("Didn't ignore ./Dockerfile correctly:%s", err) } } func (s *DockerSuite) TestBuildDockerignoringRenamedDockerfile(c *check.C) { name := "testbuilddockerignoredockerfile" dockerfile := ` FROM busybox ADD . /tmp/ RUN ls /tmp/Dockerfile RUN sh -c "! ls /tmp/MyDockerfile" RUN ls /tmp/.dockerignore` ctx, err := fakeContext(dockerfile, map[string]string{ "Dockerfile": "Should not use me", "MyDockerfile": dockerfile, ".dockerignore": "MyDockerfile\n", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err = buildImageFromContext(name, ctx, true); err != nil { c.Fatalf("Didn't ignore MyDockerfile correctly:%s", err) } // now try it with ./MyDockerfile ctx.Add(".dockerignore", "./MyDockerfile\n") if _, err = buildImageFromContext(name, ctx, true); err != nil { c.Fatalf("Didn't ignore ./MyDockerfile correctly:%s", err) } } func (s *DockerSuite) TestBuildDockerignoringDockerignore(c *check.C) { name := "testbuilddockerignoredockerignore" dockerfile := ` FROM busybox ADD . /tmp/ RUN sh -c "! ls /tmp/.dockerignore" RUN ls /tmp/Dockerfile` ctx, err := fakeContext(dockerfile, map[string]string{ "Dockerfile": dockerfile, ".dockerignore": ".dockerignore\n", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err = buildImageFromContext(name, ctx, true); err != nil { c.Fatalf("Didn't ignore .dockerignore correctly:%s", err) } } func (s *DockerSuite) TestBuildDockerignoreTouchDockerfile(c *check.C) { var id1 string var id2 string name := "testbuilddockerignoretouchdockerfile" dockerfile := ` FROM busybox ADD . /tmp/` ctx, err := fakeContext(dockerfile, map[string]string{ "Dockerfile": dockerfile, ".dockerignore": "Dockerfile\n", }) if err != nil { c.Fatal(err) } defer ctx.Close() if id1, err = buildImageFromContext(name, ctx, true); err != nil { c.Fatalf("Didn't build it correctly:%s", err) } if id2, err = buildImageFromContext(name, ctx, true); err != nil { c.Fatalf("Didn't build it correctly:%s", err) } if id1 != id2 { c.Fatalf("Didn't use the cache - 1") } // Now make sure touching Dockerfile doesn't invalidate the cache if err = ctx.Add("Dockerfile", dockerfile+"\n# hi"); err != nil { c.Fatalf("Didn't add Dockerfile: %s", err) } if id2, err = buildImageFromContext(name, ctx, true); err != nil { c.Fatalf("Didn't build it correctly:%s", err) } if id1 != id2 { c.Fatalf("Didn't use the cache - 2") } // One more time but just 'touch' it instead of changing the content if err = ctx.Add("Dockerfile", dockerfile+"\n# hi"); err != nil { c.Fatalf("Didn't add Dockerfile: %s", err) } if id2, err = buildImageFromContext(name, ctx, true); err != nil { c.Fatalf("Didn't build it correctly:%s", err) } if id1 != id2 { c.Fatalf("Didn't use the cache - 3") } } func (s *DockerSuite) TestBuildDockerignoringWholeDir(c *check.C) { name := "testbuilddockerignorewholedir" dockerfile := ` FROM busybox COPY . / RUN sh -c "[[ ! -e /.gitignore ]]" RUN sh -c "[[ -f /Makefile ]]"` ctx, err := fakeContext(dockerfile, map[string]string{ "Dockerfile": "FROM scratch", "Makefile": "all:", ".gitignore": "", ".dockerignore": ".*\n", }) c.Assert(err, check.IsNil) defer ctx.Close() if _, err = buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } c.Assert(ctx.Add(".dockerfile", "*"), check.IsNil) if _, err = buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } c.Assert(ctx.Add(".dockerfile", "."), check.IsNil) if _, err = buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } c.Assert(ctx.Add(".dockerfile", "?"), check.IsNil) if _, err = buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildDockerignoringBadExclusion(c *check.C) { name := "testbuilddockerignorebadexclusion" dockerfile := ` FROM busybox COPY . / RUN sh -c "[[ ! -e /.gitignore ]]" RUN sh -c "[[ -f /Makefile ]]"` ctx, err := fakeContext(dockerfile, map[string]string{ "Dockerfile": "FROM scratch", "Makefile": "all:", ".gitignore": "", ".dockerignore": "!\n", }) c.Assert(err, check.IsNil) defer ctx.Close() if _, err = buildImageFromContext(name, ctx, true); err == nil { c.Fatalf("Build was supposed to fail but didn't") } if err.Error() != "failed to build the image: Error checking context: 'Illegal exclusion pattern: !'.\n" { c.Fatalf("Incorrect output, got:%q", err.Error()) } } func (s *DockerSuite) TestBuildDockerignoringWildTopDir(c *check.C) { dockerfile := ` FROM busybox COPY . / RUN sh -c "[[ ! -e /.dockerignore ]]" RUN sh -c "[[ ! -e /Dockerfile ]]" RUN sh -c "[[ ! -e /file1 ]]" RUN sh -c "[[ ! -e /dir ]]"` ctx, err := fakeContext(dockerfile, map[string]string{ "Dockerfile": "FROM scratch", "file1": "", "dir/dfile1": "", }) c.Assert(err, check.IsNil) defer ctx.Close() // All of these should result in ignoring all files for _, variant := range []string{"**", "**/", "**/**", "*"} { ctx.Add(".dockerignore", variant) _, err = buildImageFromContext("noname", ctx, true) c.Assert(err, check.IsNil, check.Commentf("variant: %s", variant)) } } func (s *DockerSuite) TestBuildDockerignoringWildDirs(c *check.C) { testRequires(c, DaemonIsLinux) // TODO Windows: Fix this test; also perf dockerfile := ` FROM busybox COPY . / #RUN sh -c "[[ -e /.dockerignore ]]" RUN sh -c "[[ -e /Dockerfile ]] && \ [[ ! -e /file0 ]] && \ [[ ! -e /dir1/file0 ]] && \ [[ ! -e /dir2/file0 ]] && \ [[ ! -e /file1 ]] && \ [[ ! -e /dir1/file1 ]] && \ [[ ! -e /dir1/dir2/file1 ]] && \ [[ ! -e /dir1/file2 ]] && \ [[ -e /dir1/dir2/file2 ]] && \ [[ ! -e /dir1/dir2/file4 ]] && \ [[ ! -e /dir1/dir2/file5 ]] && \ [[ ! -e /dir1/dir2/file6 ]] && \ [[ ! -e /dir1/dir3/file7 ]] && \ [[ ! -e /dir1/dir3/file8 ]] && \ [[ -e /dir1/dir3 ]] && \ [[ -e /dir1/dir4 ]] && \ [[ ! -e 'dir1/dir5/fileAA' ]] && \ [[ -e 'dir1/dir5/fileAB' ]] && \ [[ -e 'dir1/dir5/fileB' ]]" # "." in pattern means nothing RUN echo all done!` ctx, err := fakeContext(dockerfile, map[string]string{ "Dockerfile": "FROM scratch", "file0": "", "dir1/file0": "", "dir1/dir2/file0": "", "file1": "", "dir1/file1": "", "dir1/dir2/file1": "", "dir1/file2": "", "dir1/dir2/file2": "", // remains "dir1/dir2/file4": "", "dir1/dir2/file5": "", "dir1/dir2/file6": "", "dir1/dir3/file7": "", "dir1/dir3/file8": "", "dir1/dir4/file9": "", "dir1/dir5/fileAA": "", "dir1/dir5/fileAB": "", "dir1/dir5/fileB": "", ".dockerignore": ` **/file0 **/*file1 **/dir1/file2 dir1/**/file4 **/dir2/file5 **/dir1/dir2/file6 dir1/dir3/** **/dir4/** **/file?A **/file\?B **/dir5/file. `, }) c.Assert(err, check.IsNil) defer ctx.Close() _, err = buildImageFromContext("noname", ctx, true) c.Assert(err, check.IsNil) } func (s *DockerSuite) TestBuildLineBreak(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildlinebreak" _, err := buildImage(name, `FROM busybox RUN sh -c 'echo root:testpass \ > /tmp/passwd' RUN mkdir -p /var/run/sshd RUN sh -c "[ "$(cat /tmp/passwd)" = "root:testpass" ]" RUN sh -c "[ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]"`, true) if err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildEOLInLine(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildeolinline" _, err := buildImage(name, `FROM busybox RUN sh -c 'echo root:testpass > /tmp/passwd' RUN echo "foo \n bar"; echo "baz" RUN mkdir -p /var/run/sshd RUN sh -c "[ "$(cat /tmp/passwd)" = "root:testpass" ]" RUN sh -c "[ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]"`, true) if err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildCommentsShebangs(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildcomments" _, err := buildImage(name, `FROM busybox # This is an ordinary comment. RUN { echo '#!/bin/sh'; echo 'echo hello world'; } > /hello.sh RUN [ ! -x /hello.sh ] # comment with line break \ RUN chmod +x /hello.sh RUN [ -x /hello.sh ] RUN [ "$(cat /hello.sh)" = $'#!/bin/sh\necho hello world' ] RUN [ "$(/hello.sh)" = "hello world" ]`, true) if err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildUsersAndGroups(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildusers" _, err := buildImage(name, `FROM busybox # Make sure our defaults work RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)" = '0:0/root:root' ] # TODO decide if "args.user = strconv.Itoa(syscall.Getuid())" is acceptable behavior for changeUser in sysvinit instead of "return nil" when "USER" isn't specified (so that we get the proper group list even if that is the empty list, even in the default case of not supplying an explicit USER to run as, which implies USER 0) USER root RUN [ "$(id -G):$(id -Gn)" = '0 10:root wheel' ] # Setup dockerio user and group RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd && \ echo 'dockerio:x:1001:' >> /etc/group # Make sure we can switch to our user and all the information is exactly as we expect it to be USER dockerio RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] # Switch back to root and double check that worked exactly as we might expect it to USER root RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '0:0/root:root/0 10:root wheel' ] && \ # Add a "supplementary" group for our dockerio user \ echo 'supplementary:x:1002:dockerio' >> /etc/group # ... and then go verify that we get it like we expect USER dockerio RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001 1002:dockerio supplementary' ] USER 1001 RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001 1002:dockerio supplementary' ] # super test the new "user:group" syntax USER dockerio:dockerio RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] USER 1001:dockerio RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] USER dockerio:1001 RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] USER 1001:1001 RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] USER dockerio:supplementary RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] USER dockerio:1002 RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] USER 1001:supplementary RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] USER 1001:1002 RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] # make sure unknown uid/gid still works properly USER 1042:1043 RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1042:1043/1042:1043/1043:1043' ]`, true) if err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildEnvUsage(c *check.C) { // /docker/world/hello is not owned by the correct user testRequires(c, NotUserNamespace) testRequires(c, DaemonIsLinux) name := "testbuildenvusage" dockerfile := `FROM busybox ENV HOME /root ENV PATH $HOME/bin:$PATH ENV PATH /tmp:$PATH RUN [ "$PATH" = "/tmp:$HOME/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ] ENV FOO /foo/baz ENV BAR /bar ENV BAZ $BAR ENV FOOPATH $PATH:$FOO RUN [ "$BAR" = "$BAZ" ] RUN [ "$FOOPATH" = "$PATH:/foo/baz" ] ENV FROM hello/docker/world ENV TO /docker/world/hello ADD $FROM $TO RUN [ "$(cat $TO)" = "hello" ] ENV abc=def ENV ghi=$abc RUN [ "$ghi" = "def" ] ` ctx, err := fakeContext(dockerfile, map[string]string{ "hello/docker/world": "hello", }) if err != nil { c.Fatal(err) } defer ctx.Close() _, err = buildImageFromContext(name, ctx, true) if err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildEnvUsage2(c *check.C) { // /docker/world/hello is not owned by the correct user testRequires(c, NotUserNamespace) testRequires(c, DaemonIsLinux) name := "testbuildenvusage2" dockerfile := `FROM busybox ENV abc=def def="hello world" RUN [ "$abc,$def" = "def,hello world" ] ENV def=hello\ world v1=abc v2="hi there" v3='boogie nights' v4="with'quotes too" RUN [ "$def,$v1,$v2,$v3,$v4" = "hello world,abc,hi there,boogie nights,with'quotes too" ] ENV abc=zzz FROM=hello/docker/world ENV abc=zzz TO=/docker/world/hello ADD $FROM $TO RUN [ "$abc,$(cat $TO)" = "zzz,hello" ] ENV abc 'yyy' RUN [ $abc = 'yyy' ] ENV abc= RUN [ "$abc" = "" ] # use grep to make sure if the builder substitutes \$foo by mistake # we don't get a false positive ENV abc=\$foo RUN [ "$abc" = "\$foo" ] && (echo "$abc" | grep foo) ENV abc \$foo RUN [ "$abc" = "\$foo" ] && (echo "$abc" | grep foo) ENV abc=\'foo\' abc2=\"foo\" RUN [ "$abc,$abc2" = "'foo',\"foo\"" ] ENV abc "foo" RUN [ "$abc" = "foo" ] ENV abc 'foo' RUN [ "$abc" = 'foo' ] ENV abc \'foo\' RUN [ "$abc" = "'foo'" ] ENV abc \"foo\" RUN [ "$abc" = '"foo"' ] ENV abc=ABC RUN [ "$abc" = "ABC" ] ENV def1=${abc:-DEF} def2=${ccc:-DEF} ENV def3=${ccc:-${def2}xx} def4=${abc:+ALT} def5=${def2:+${abc}:} def6=${ccc:-\$abc:} def7=${ccc:-\${abc}:} RUN [ "$def1,$def2,$def3,$def4,$def5,$def6,$def7" = 'ABC,DEF,DEFxx,ALT,ABC:,$abc:,${abc:}' ] ENV mypath=${mypath:+$mypath:}/home ENV mypath=${mypath:+$mypath:}/away RUN [ "$mypath" = '/home:/away' ] ENV e1=bar ENV e2=$e1 e3=$e11 e4=\$e1 e5=\$e11 RUN [ "$e0,$e1,$e2,$e3,$e4,$e5" = ',bar,bar,,$e1,$e11' ] ENV ee1 bar ENV ee2 $ee1 ENV ee3 $ee11 ENV ee4 \$ee1 ENV ee5 \$ee11 RUN [ "$ee1,$ee2,$ee3,$ee4,$ee5" = 'bar,bar,,$ee1,$ee11' ] ENV eee1="foo" eee2='foo' ENV eee3 "foo" ENV eee4 'foo' RUN [ "$eee1,$eee2,$eee3,$eee4" = 'foo,foo,foo,foo' ] ` ctx, err := fakeContext(dockerfile, map[string]string{ "hello/docker/world": "hello", }) if err != nil { c.Fatal(err) } defer ctx.Close() _, err = buildImageFromContext(name, ctx, true) if err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildAddScript(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildaddscript" dockerfile := ` FROM busybox ADD test /test RUN ["chmod","+x","/test"] RUN ["/test"] RUN [ "$(cat /testfile)" = 'test!' ]` ctx, err := fakeContext(dockerfile, map[string]string{ "test": "#!/bin/sh\necho 'test!' > /testfile", }) if err != nil { c.Fatal(err) } defer ctx.Close() _, err = buildImageFromContext(name, ctx, true) if err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildAddTar(c *check.C) { // /test/foo is not owned by the correct user testRequires(c, NotUserNamespace) name := "testbuildaddtar" ctx := func() *FakeContext { dockerfile := ` FROM busybox ADD test.tar / RUN cat /test/foo | grep Hi ADD test.tar /test.tar RUN cat /test.tar/test/foo | grep Hi ADD test.tar /unlikely-to-exist RUN cat /unlikely-to-exist/test/foo | grep Hi ADD test.tar /unlikely-to-exist-trailing-slash/ RUN cat /unlikely-to-exist-trailing-slash/test/foo | grep Hi RUN sh -c "mkdir /existing-directory" #sh -c is needed on Windows to use the correct mkdir ADD test.tar /existing-directory RUN cat /existing-directory/test/foo | grep Hi ADD test.tar /existing-directory-trailing-slash/ RUN cat /existing-directory-trailing-slash/test/foo | grep Hi` tmpDir, err := ioutil.TempDir("", "fake-context") c.Assert(err, check.IsNil) testTar, err := os.Create(filepath.Join(tmpDir, "test.tar")) if err != nil { c.Fatalf("failed to create test.tar archive: %v", err) } defer testTar.Close() tw := tar.NewWriter(testTar) if err := tw.WriteHeader(&tar.Header{ Name: "test/foo", Size: 2, }); err != nil { c.Fatalf("failed to write tar file header: %v", err) } if _, err := tw.Write([]byte("Hi")); err != nil { c.Fatalf("failed to write tar file content: %v", err) } if err := tw.Close(); err != nil { c.Fatalf("failed to close tar archive: %v", err) } if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { c.Fatalf("failed to open destination dockerfile: %v", err) } return fakeContextFromDir(tmpDir) }() defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatalf("build failed to complete for TestBuildAddTar: %v", err) } } func (s *DockerSuite) TestBuildAddBrokenTar(c *check.C) { name := "testbuildaddbrokentar" ctx := func() *FakeContext { dockerfile := ` FROM busybox ADD test.tar /` tmpDir, err := ioutil.TempDir("", "fake-context") c.Assert(err, check.IsNil) testTar, err := os.Create(filepath.Join(tmpDir, "test.tar")) if err != nil { c.Fatalf("failed to create test.tar archive: %v", err) } defer testTar.Close() tw := tar.NewWriter(testTar) if err := tw.WriteHeader(&tar.Header{ Name: "test/foo", Size: 2, }); err != nil { c.Fatalf("failed to write tar file header: %v", err) } if _, err := tw.Write([]byte("Hi")); err != nil { c.Fatalf("failed to write tar file content: %v", err) } if err := tw.Close(); err != nil { c.Fatalf("failed to close tar archive: %v", err) } // Corrupt the tar by removing one byte off the end stat, err := testTar.Stat() if err != nil { c.Fatalf("failed to stat tar archive: %v", err) } if err := testTar.Truncate(stat.Size() - 1); err != nil { c.Fatalf("failed to truncate tar archive: %v", err) } if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { c.Fatalf("failed to open destination dockerfile: %v", err) } return fakeContextFromDir(tmpDir) }() defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err == nil { c.Fatalf("build should have failed for TestBuildAddBrokenTar") } } func (s *DockerSuite) TestBuildAddNonTar(c *check.C) { name := "testbuildaddnontar" // Should not try to extract test.tar ctx, err := fakeContext(` FROM busybox ADD test.tar / RUN test -f /test.tar`, map[string]string{"test.tar": "not_a_tar_file"}) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatalf("build failed for TestBuildAddNonTar") } } func (s *DockerSuite) TestBuildAddTarXz(c *check.C) { // /test/foo is not owned by the correct user testRequires(c, NotUserNamespace) testRequires(c, DaemonIsLinux) name := "testbuildaddtarxz" ctx := func() *FakeContext { dockerfile := ` FROM busybox ADD test.tar.xz / RUN cat /test/foo | grep Hi` tmpDir, err := ioutil.TempDir("", "fake-context") c.Assert(err, check.IsNil) testTar, err := os.Create(filepath.Join(tmpDir, "test.tar")) if err != nil { c.Fatalf("failed to create test.tar archive: %v", err) } defer testTar.Close() tw := tar.NewWriter(testTar) if err := tw.WriteHeader(&tar.Header{ Name: "test/foo", Size: 2, }); err != nil { c.Fatalf("failed to write tar file header: %v", err) } if _, err := tw.Write([]byte("Hi")); err != nil { c.Fatalf("failed to write tar file content: %v", err) } if err := tw.Close(); err != nil { c.Fatalf("failed to close tar archive: %v", err) } xzCompressCmd := exec.Command("xz", "-k", "test.tar") xzCompressCmd.Dir = tmpDir out, _, err := runCommandWithOutput(xzCompressCmd) if err != nil { c.Fatal(err, out) } if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { c.Fatalf("failed to open destination dockerfile: %v", err) } return fakeContextFromDir(tmpDir) }() defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatalf("build failed to complete for TestBuildAddTarXz: %v", err) } } func (s *DockerSuite) TestBuildAddTarXzGz(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildaddtarxzgz" ctx := func() *FakeContext { dockerfile := ` FROM busybox ADD test.tar.xz.gz / RUN ls /test.tar.xz.gz` tmpDir, err := ioutil.TempDir("", "fake-context") c.Assert(err, check.IsNil) testTar, err := os.Create(filepath.Join(tmpDir, "test.tar")) if err != nil { c.Fatalf("failed to create test.tar archive: %v", err) } defer testTar.Close() tw := tar.NewWriter(testTar) if err := tw.WriteHeader(&tar.Header{ Name: "test/foo", Size: 2, }); err != nil { c.Fatalf("failed to write tar file header: %v", err) } if _, err := tw.Write([]byte("Hi")); err != nil { c.Fatalf("failed to write tar file content: %v", err) } if err := tw.Close(); err != nil { c.Fatalf("failed to close tar archive: %v", err) } xzCompressCmd := exec.Command("xz", "-k", "test.tar") xzCompressCmd.Dir = tmpDir out, _, err := runCommandWithOutput(xzCompressCmd) if err != nil { c.Fatal(err, out) } gzipCompressCmd := exec.Command("gzip", "test.tar.xz") gzipCompressCmd.Dir = tmpDir out, _, err = runCommandWithOutput(gzipCompressCmd) if err != nil { c.Fatal(err, out) } if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { c.Fatalf("failed to open destination dockerfile: %v", err) } return fakeContextFromDir(tmpDir) }() defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatalf("build failed to complete for TestBuildAddTarXz: %v", err) } } func (s *DockerSuite) TestBuildFromGIT(c *check.C) { name := "testbuildfromgit" git, err := newFakeGit("repo", map[string]string{ "Dockerfile": `FROM busybox ADD first /first RUN [ -f /first ] MAINTAINER docker`, "first": "test git data", }, true) if err != nil { c.Fatal(err) } defer git.Close() _, err = buildImageFromPath(name, git.RepoURL, true) if err != nil { c.Fatal(err) } res := inspectField(c, name, "Author") if res != "docker" { c.Fatalf("Maintainer should be docker, got %s", res) } } func (s *DockerSuite) TestBuildFromGITWithContext(c *check.C) { name := "testbuildfromgit" git, err := newFakeGit("repo", map[string]string{ "docker/Dockerfile": `FROM busybox ADD first /first RUN [ -f /first ] MAINTAINER docker`, "docker/first": "test git data", }, true) if err != nil { c.Fatal(err) } defer git.Close() u := fmt.Sprintf("%s#master:docker", git.RepoURL) _, err = buildImageFromPath(name, u, true) if err != nil { c.Fatal(err) } res := inspectField(c, name, "Author") if res != "docker" { c.Fatalf("Maintainer should be docker, got %s", res) } } func (s *DockerSuite) TestBuildFromGITwithF(c *check.C) { name := "testbuildfromgitwithf" git, err := newFakeGit("repo", map[string]string{ "myApp/myDockerfile": `FROM busybox RUN echo hi from Dockerfile`, }, true) if err != nil { c.Fatal(err) } defer git.Close() out, _, err := dockerCmdWithError("build", "-t", name, "--no-cache", "-f", "myApp/myDockerfile", git.RepoURL) if err != nil { c.Fatalf("Error on build. Out: %s\nErr: %v", out, err) } if !strings.Contains(out, "hi from Dockerfile") { c.Fatalf("Missing expected output, got:\n%s", out) } } func (s *DockerSuite) TestBuildFromRemoteTarball(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildfromremotetarball" buffer := new(bytes.Buffer) tw := tar.NewWriter(buffer) defer tw.Close() dockerfile := []byte(`FROM busybox MAINTAINER docker`) if err := tw.WriteHeader(&tar.Header{ Name: "Dockerfile", Size: int64(len(dockerfile)), }); err != nil { c.Fatalf("failed to write tar file header: %v", err) } if _, err := tw.Write(dockerfile); err != nil { c.Fatalf("failed to write tar file content: %v", err) } if err := tw.Close(); err != nil { c.Fatalf("failed to close tar archive: %v", err) } server, err := fakeBinaryStorage(map[string]*bytes.Buffer{ "testT.tar": buffer, }) c.Assert(err, check.IsNil) defer server.Close() _, err = buildImageFromPath(name, server.URL()+"/testT.tar", true) c.Assert(err, check.IsNil) res := inspectField(c, name, "Author") if res != "docker" { c.Fatalf("Maintainer should be docker, got %s", res) } } func (s *DockerSuite) TestBuildCleanupCmdOnEntrypoint(c *check.C) { name := "testbuildcmdcleanuponentrypoint" if _, err := buildImage(name, `FROM `+minimalBaseImage()+` CMD ["test"] ENTRYPOINT ["echo"]`, true); err != nil { c.Fatal(err) } if _, err := buildImage(name, fmt.Sprintf(`FROM %s ENTRYPOINT ["cat"]`, name), true); err != nil { c.Fatal(err) } res := inspectField(c, name, "Config.Cmd") if res != "[]" { c.Fatalf("Cmd %s, expected nil", res) } res = inspectField(c, name, "Config.Entrypoint") if expected := "[cat]"; res != expected { c.Fatalf("Entrypoint %s, expected %s", res, expected) } } func (s *DockerSuite) TestBuildClearCmd(c *check.C) { name := "testbuildclearcmd" _, err := buildImage(name, `From `+minimalBaseImage()+` ENTRYPOINT ["/bin/bash"] CMD []`, true) if err != nil { c.Fatal(err) } res := inspectFieldJSON(c, name, "Config.Cmd") if res != "[]" { c.Fatalf("Cmd %s, expected %s", res, "[]") } } func (s *DockerSuite) TestBuildEmptyCmd(c *check.C) { // Windows Server 2016 RS1 builds load the windowsservercore image from a tar rather than // a .WIM file, and the tar layer has the default CMD set (same as the Linux ubuntu image), // where-as the TP5 .WIM had a blank CMD. Hence this test is not applicable on RS1 or later // builds if daemonPlatform == "windows" && windowsDaemonKV >= 14375 { c.Skip("Not applicable on Windows RS1 or later builds") } name := "testbuildemptycmd" if _, err := buildImage(name, "FROM "+minimalBaseImage()+"\nMAINTAINER quux\n", true); err != nil { c.Fatal(err) } res := inspectFieldJSON(c, name, "Config.Cmd") if res != "null" { c.Fatalf("Cmd %s, expected %s", res, "null") } } func (s *DockerSuite) TestBuildOnBuildOutput(c *check.C) { name := "testbuildonbuildparent" if _, err := buildImage(name, "FROM busybox\nONBUILD RUN echo foo\n", true); err != nil { c.Fatal(err) } _, out, err := buildImageWithOut(name, "FROM "+name+"\nMAINTAINER quux\n", true) if err != nil { c.Fatal(err) } if !strings.Contains(out, "# Executing 1 build trigger") { c.Fatal("failed to find the build trigger output", out) } } func (s *DockerSuite) TestBuildInvalidTag(c *check.C) { name := "abcd:" + stringutils.GenerateRandomAlphaOnlyString(200) _, out, err := buildImageWithOut(name, "FROM "+minimalBaseImage()+"\nMAINTAINER quux\n", true) // if the error doesn't check for illegal tag name, or the image is built // then this should fail if !strings.Contains(out, "Error parsing reference") || strings.Contains(out, "Sending build context to Docker daemon") { c.Fatalf("failed to stop before building. Error: %s, Output: %s", err, out) } } func (s *DockerSuite) TestBuildCmdShDashC(c *check.C) { name := "testbuildcmdshc" if _, err := buildImage(name, "FROM busybox\nCMD echo cmd\n", true); err != nil { c.Fatal(err) } res := inspectFieldJSON(c, name, "Config.Cmd") expected := `["/bin/sh","-c","echo cmd"]` if daemonPlatform == "windows" { expected = `["cmd","/S","/C","echo cmd"]` } if res != expected { c.Fatalf("Expected value %s not in Config.Cmd: %s", expected, res) } } func (s *DockerSuite) TestBuildCmdSpaces(c *check.C) { // Test to make sure that when we strcat arrays we take into account // the arg separator to make sure ["echo","hi"] and ["echo hi"] don't // look the same name := "testbuildcmdspaces" var id1 string var id2 string var err error if id1, err = buildImage(name, "FROM busybox\nCMD [\"echo hi\"]\n", true); err != nil { c.Fatal(err) } if id2, err = buildImage(name, "FROM busybox\nCMD [\"echo\", \"hi\"]\n", true); err != nil { c.Fatal(err) } if id1 == id2 { c.Fatal("Should not have resulted in the same CMD") } // Now do the same with ENTRYPOINT if id1, err = buildImage(name, "FROM busybox\nENTRYPOINT [\"echo hi\"]\n", true); err != nil { c.Fatal(err) } if id2, err = buildImage(name, "FROM busybox\nENTRYPOINT [\"echo\", \"hi\"]\n", true); err != nil { c.Fatal(err) } if id1 == id2 { c.Fatal("Should not have resulted in the same ENTRYPOINT") } } func (s *DockerSuite) TestBuildCmdJSONNoShDashC(c *check.C) { name := "testbuildcmdjson" if _, err := buildImage(name, "FROM busybox\nCMD [\"echo\", \"cmd\"]", true); err != nil { c.Fatal(err) } res := inspectFieldJSON(c, name, "Config.Cmd") expected := `["echo","cmd"]` if res != expected { c.Fatalf("Expected value %s not in Config.Cmd: %s", expected, res) } } func (s *DockerSuite) TestBuildEntrypointInheritance(c *check.C) { if _, err := buildImage("parent", ` FROM busybox ENTRYPOINT exit 130 `, true); err != nil { c.Fatal(err) } if _, status, _ := dockerCmdWithError("run", "parent"); status != 130 { c.Fatalf("expected exit code 130 but received %d", status) } if _, err := buildImage("child", ` FROM parent ENTRYPOINT exit 5 `, true); err != nil { c.Fatal(err) } if _, status, _ := dockerCmdWithError("run", "child"); status != 5 { c.Fatalf("expected exit code 5 but received %d", status) } } func (s *DockerSuite) TestBuildEntrypointInheritanceInspect(c *check.C) { var ( name = "testbuildepinherit" name2 = "testbuildepinherit2" expected = `["/bin/sh","-c","echo quux"]` ) if daemonPlatform == "windows" { expected = `["cmd","/S","/C","echo quux"]` } if _, err := buildImage(name, "FROM busybox\nENTRYPOINT /foo/bar", true); err != nil { c.Fatal(err) } if _, err := buildImage(name2, fmt.Sprintf("FROM %s\nENTRYPOINT echo quux", name), true); err != nil { c.Fatal(err) } res := inspectFieldJSON(c, name2, "Config.Entrypoint") if res != expected { c.Fatalf("Expected value %s not in Config.Entrypoint: %s", expected, res) } out, _ := dockerCmd(c, "run", name2) expected = "quux" if strings.TrimSpace(out) != expected { c.Fatalf("Expected output is %s, got %s", expected, out) } } func (s *DockerSuite) TestBuildRunShEntrypoint(c *check.C) { name := "testbuildentrypoint" _, err := buildImage(name, `FROM busybox ENTRYPOINT echo`, true) if err != nil { c.Fatal(err) } dockerCmd(c, "run", "--rm", name) } func (s *DockerSuite) TestBuildExoticShellInterpolation(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildexoticshellinterpolation" _, err := buildImage(name, ` FROM busybox ENV SOME_VAR a.b.c RUN [ "$SOME_VAR" = 'a.b.c' ] RUN [ "${SOME_VAR}" = 'a.b.c' ] RUN [ "${SOME_VAR%.*}" = 'a.b' ] RUN [ "${SOME_VAR%%.*}" = 'a' ] RUN [ "${SOME_VAR#*.}" = 'b.c' ] RUN [ "${SOME_VAR##*.}" = 'c' ] RUN [ "${SOME_VAR/c/d}" = 'a.b.d' ] RUN [ "${#SOME_VAR}" = '5' ] RUN [ "${SOME_UNSET_VAR:-$SOME_VAR}" = 'a.b.c' ] RUN [ "${SOME_VAR:+Version: ${SOME_VAR}}" = 'Version: a.b.c' ] RUN [ "${SOME_UNSET_VAR:+${SOME_VAR}}" = '' ] RUN [ "${SOME_UNSET_VAR:-${SOME_VAR:-d.e.f}}" = 'a.b.c' ] `, false) if err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildVerifySingleQuoteFails(c *check.C) { // This testcase is supposed to generate an error because the // JSON array we're passing in on the CMD uses single quotes instead // of double quotes (per the JSON spec). This means we interpret it // as a "string" instead of "JSON array" and pass it on to "sh -c" and // it should barf on it. name := "testbuildsinglequotefails" if _, err := buildImage(name, `FROM busybox CMD [ '/bin/sh', '-c', 'echo hi' ]`, true); err != nil { c.Fatal(err) } if _, _, err := dockerCmdWithError("run", "--rm", name); err == nil { c.Fatal("The image was not supposed to be able to run") } } func (s *DockerSuite) TestBuildVerboseOut(c *check.C) { name := "testbuildverboseout" expected := "\n123\n" if daemonPlatform == "windows" { expected = "\n123\r\n" } _, out, err := buildImageWithOut(name, `FROM busybox RUN echo 123`, false) if err != nil { c.Fatal(err) } if !strings.Contains(out, expected) { c.Fatalf("Output should contain %q: %q", "123", out) } } func (s *DockerSuite) TestBuildWithTabs(c *check.C) { name := "testbuildwithtabs" _, err := buildImage(name, "FROM busybox\nRUN echo\tone\t\ttwo", true) if err != nil { c.Fatal(err) } res := inspectFieldJSON(c, name, "ContainerConfig.Cmd") expected1 := `["/bin/sh","-c","echo\tone\t\ttwo"]` expected2 := `["/bin/sh","-c","echo\u0009one\u0009\u0009two"]` // syntactically equivalent, and what Go 1.3 generates if daemonPlatform == "windows" { expected1 = `["cmd","/S","/C","echo\tone\t\ttwo"]` expected2 = `["cmd","/S","/C","echo\u0009one\u0009\u0009two"]` // syntactically equivalent, and what Go 1.3 generates } if res != expected1 && res != expected2 { c.Fatalf("Missing tabs.\nGot: %s\nExp: %s or %s", res, expected1, expected2) } } func (s *DockerSuite) TestBuildLabels(c *check.C) { name := "testbuildlabel" expected := `{"License":"GPL","Vendor":"Acme"}` _, err := buildImage(name, `FROM busybox LABEL Vendor=Acme LABEL License GPL`, true) if err != nil { c.Fatal(err) } res := inspectFieldJSON(c, name, "Config.Labels") if res != expected { c.Fatalf("Labels %s, expected %s", res, expected) } } func (s *DockerSuite) TestBuildLabelsCache(c *check.C) { name := "testbuildlabelcache" id1, err := buildImage(name, `FROM busybox LABEL Vendor=Acme`, false) if err != nil { c.Fatalf("Build 1 should have worked: %v", err) } id2, err := buildImage(name, `FROM busybox LABEL Vendor=Acme`, true) if err != nil || id1 != id2 { c.Fatalf("Build 2 should have worked & used cache(%s,%s): %v", id1, id2, err) } id2, err = buildImage(name, `FROM busybox LABEL Vendor=Acme1`, true) if err != nil || id1 == id2 { c.Fatalf("Build 3 should have worked & NOT used cache(%s,%s): %v", id1, id2, err) } id2, err = buildImage(name, `FROM busybox LABEL Vendor Acme`, true) // Note: " " and "=" should be same if err != nil || id1 != id2 { c.Fatalf("Build 4 should have worked & used cache(%s,%s): %v", id1, id2, err) } // Now make sure the cache isn't used by mistake id1, err = buildImage(name, `FROM busybox LABEL f1=b1 f2=b2`, false) if err != nil { c.Fatalf("Build 5 should have worked: %q", err) } id2, err = buildImage(name, `FROM busybox LABEL f1="b1 f2=b2"`, true) if err != nil || id1 == id2 { c.Fatalf("Build 6 should have worked & NOT used the cache(%s,%s): %q", id1, id2, err) } } func (s *DockerSuite) TestBuildNotVerboseSuccess(c *check.C) { // This test makes sure that -q works correctly when build is successful: // stdout has only the image ID (long image ID) and stderr is empty. var stdout, stderr string var err error outRegexp := regexp.MustCompile("^(sha256:|)[a-z0-9]{64}\\n$") tt := []struct { Name string BuildFunc func(string) }{ { Name: "quiet_build_stdin_success", BuildFunc: func(name string) { _, stdout, stderr, err = buildImageWithStdoutStderr(name, "FROM busybox", true, "-q", "--force-rm", "--rm") }, }, { Name: "quiet_build_ctx_success", BuildFunc: func(name string) { ctx, err := fakeContext("FROM busybox", map[string]string{ "quiet_build_success_fctx": "test", }) if err != nil { c.Fatalf("Failed to create context: %s", err.Error()) } defer ctx.Close() _, stdout, stderr, err = buildImageFromContextWithStdoutStderr(name, ctx, true, "-q", "--force-rm", "--rm") }, }, { Name: "quiet_build_git_success", BuildFunc: func(name string) { git, err := newFakeGit("repo", map[string]string{ "Dockerfile": "FROM busybox", }, true) if err != nil { c.Fatalf("Failed to create the git repo: %s", err.Error()) } defer git.Close() _, stdout, stderr, err = buildImageFromGitWithStdoutStderr(name, git, true, "-q", "--force-rm", "--rm") }, }, } for _, te := range tt { te.BuildFunc(te.Name) if err != nil { c.Fatalf("Test %s shouldn't fail, but got the following error: %s", te.Name, err.Error()) } if outRegexp.Find([]byte(stdout)) == nil { c.Fatalf("Test %s expected stdout to match the [%v] regexp, but it is [%v]", te.Name, outRegexp, stdout) } if stderr != "" { c.Fatalf("Test %s expected stderr to be empty, but it is [%#v]", te.Name, stderr) } } } func (s *DockerSuite) TestBuildNotVerboseFailureWithNonExistImage(c *check.C) { // This test makes sure that -q works correctly when build fails by // comparing between the stderr output in quiet mode and in stdout // and stderr output in verbose mode testRequires(c, Network) testName := "quiet_build_not_exists_image" buildCmd := "FROM busybox11" _, _, qstderr, qerr := buildImageWithStdoutStderr(testName, buildCmd, false, "-q", "--force-rm", "--rm") _, vstdout, vstderr, verr := buildImageWithStdoutStderr(testName, buildCmd, false, "--force-rm", "--rm") if verr == nil || qerr == nil { c.Fatal(fmt.Errorf("Test [%s] expected to fail but didn't", testName)) } if qstderr != vstdout+vstderr { c.Fatal(fmt.Errorf("Test[%s] expected that quiet stderr and verbose stdout are equal; quiet [%v], verbose [%v]", testName, qstderr, vstdout+vstderr)) } } func (s *DockerSuite) TestBuildNotVerboseFailure(c *check.C) { // This test makes sure that -q works correctly when build fails by // comparing between the stderr output in quiet mode and in stdout // and stderr output in verbose mode tt := []struct { TestName string BuildCmds string }{ {"quiet_build_no_from_at_the_beginning", "RUN whoami"}, {"quiet_build_unknown_instr", "FROMD busybox"}, } for _, te := range tt { _, _, qstderr, qerr := buildImageWithStdoutStderr(te.TestName, te.BuildCmds, false, "-q", "--force-rm", "--rm") _, vstdout, vstderr, verr := buildImageWithStdoutStderr(te.TestName, te.BuildCmds, false, "--force-rm", "--rm") if verr == nil || qerr == nil { c.Fatal(fmt.Errorf("Test [%s] expected to fail but didn't", te.TestName)) } if qstderr != vstdout+vstderr { c.Fatal(fmt.Errorf("Test[%s] expected that quiet stderr and verbose stdout are equal; quiet [%v], verbose [%v]", te.TestName, qstderr, vstdout+vstderr)) } } } func (s *DockerSuite) TestBuildNotVerboseFailureRemote(c *check.C) { // This test ensures that when given a wrong URL, stderr in quiet mode and // stderr in verbose mode are identical. // TODO(vdemeester) with cobra, stdout has a carriage return too much so this test should not check stdout URL := "http://something.invalid" Name := "quiet_build_wrong_remote" _, _, qstderr, qerr := buildImageWithStdoutStderr(Name, "", false, "-q", "--force-rm", "--rm", URL) _, _, vstderr, verr := buildImageWithStdoutStderr(Name, "", false, "--force-rm", "--rm", URL) if qerr == nil || verr == nil { c.Fatal(fmt.Errorf("Test [%s] expected to fail but didn't", Name)) } if qstderr != vstderr { c.Fatal(fmt.Errorf("Test[%s] expected that quiet stderr and verbose stdout are equal; quiet [%v], verbose [%v]", Name, qstderr, vstderr)) } } func (s *DockerSuite) TestBuildStderr(c *check.C) { // This test just makes sure that no non-error output goes // to stderr name := "testbuildstderr" _, _, stderr, err := buildImageWithStdoutStderr(name, "FROM busybox\nRUN echo one", true) if err != nil { c.Fatal(err) } if runtime.GOOS == "windows" && daemonPlatform != "windows" { // Windows to non-Windows should have a security warning if !strings.Contains(stderr, "SECURITY WARNING:") { c.Fatalf("Stderr contains unexpected output: %q", stderr) } } else { // Other platform combinations should have no stderr written too if stderr != "" { c.Fatalf("Stderr should have been empty, instead it's: %q", stderr) } } } func (s *DockerSuite) TestBuildChownSingleFile(c *check.C) { testRequires(c, UnixCli) // test uses chown: not available on windows testRequires(c, DaemonIsLinux) name := "testbuildchownsinglefile" ctx, err := fakeContext(` FROM busybox COPY test / RUN ls -l /test RUN [ $(ls -l /test | awk '{print $3":"$4}') = 'root:root' ] `, map[string]string{ "test": "test", }) if err != nil { c.Fatal(err) } defer ctx.Close() if err := os.Chown(filepath.Join(ctx.Dir, "test"), 4242, 4242); err != nil { c.Fatal(err) } if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildSymlinkBreakout(c *check.C) { name := "testbuildsymlinkbreakout" tmpdir, err := ioutil.TempDir("", name) c.Assert(err, check.IsNil) defer os.RemoveAll(tmpdir) ctx := filepath.Join(tmpdir, "context") if err := os.MkdirAll(ctx, 0755); err != nil { c.Fatal(err) } if err := ioutil.WriteFile(filepath.Join(ctx, "Dockerfile"), []byte(` from busybox add symlink.tar / add inject /symlink/ `), 0644); err != nil { c.Fatal(err) } inject := filepath.Join(ctx, "inject") if err := ioutil.WriteFile(inject, nil, 0644); err != nil { c.Fatal(err) } f, err := os.Create(filepath.Join(ctx, "symlink.tar")) if err != nil { c.Fatal(err) } w := tar.NewWriter(f) w.WriteHeader(&tar.Header{ Name: "symlink2", Typeflag: tar.TypeSymlink, Linkname: "/../../../../../../../../../../../../../../", Uid: os.Getuid(), Gid: os.Getgid(), }) w.WriteHeader(&tar.Header{ Name: "symlink", Typeflag: tar.TypeSymlink, Linkname: filepath.Join("symlink2", tmpdir), Uid: os.Getuid(), Gid: os.Getgid(), }) w.Close() f.Close() if _, err := buildImageFromContext(name, fakeContextFromDir(ctx), false); err != nil { c.Fatal(err) } if _, err := os.Lstat(filepath.Join(tmpdir, "inject")); err == nil { c.Fatal("symlink breakout - inject") } else if !os.IsNotExist(err) { c.Fatalf("unexpected error: %v", err) } } func (s *DockerSuite) TestBuildXZHost(c *check.C) { // /usr/local/sbin/xz gets permission denied for the user testRequires(c, NotUserNamespace) testRequires(c, DaemonIsLinux) name := "testbuildxzhost" ctx, err := fakeContext(` FROM busybox ADD xz /usr/local/sbin/ RUN chmod 755 /usr/local/sbin/xz ADD test.xz / RUN [ ! -e /injected ]`, map[string]string{ "test.xz": "\xfd\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00" + "\x21\x01\x16\x00\x00\x00\x74\x2f\xe5\xa3\x01\x00\x3f\xfd" + "\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00\x21", "xz": "#!/bin/sh\ntouch /injected", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildVolumesRetainContents(c *check.C) { // /foo/file gets permission denied for the user testRequires(c, NotUserNamespace) testRequires(c, DaemonIsLinux) // TODO Windows: Issue #20127 var ( name = "testbuildvolumescontent" expected = "some text" volName = "/foo" ) if daemonPlatform == "windows" { volName = "C:/foo" } ctx, err := fakeContext(` FROM busybox COPY content /foo/file VOLUME `+volName+` CMD cat /foo/file`, map[string]string{ "content": expected, }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, false); err != nil { c.Fatal(err) } out, _ := dockerCmd(c, "run", "--rm", name) if out != expected { c.Fatalf("expected file contents for /foo/file to be %q but received %q", expected, out) } } func (s *DockerSuite) TestBuildRenamedDockerfile(c *check.C) { ctx, err := fakeContext(`FROM busybox RUN echo from Dockerfile`, map[string]string{ "Dockerfile": "FROM busybox\nRUN echo from Dockerfile", "files/Dockerfile": "FROM busybox\nRUN echo from files/Dockerfile", "files/dFile": "FROM busybox\nRUN echo from files/dFile", "dFile": "FROM busybox\nRUN echo from dFile", "files/dFile2": "FROM busybox\nRUN echo from files/dFile2", }) if err != nil { c.Fatal(err) } defer ctx.Close() out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", "test1", ".") if err != nil { c.Fatalf("Failed to build: %s\n%s", out, err) } if !strings.Contains(out, "from Dockerfile") { c.Fatalf("test1 should have used Dockerfile, output:%s", out) } out, _, err = dockerCmdInDir(c, ctx.Dir, "build", "-f", filepath.Join("files", "Dockerfile"), "-t", "test2", ".") if err != nil { c.Fatal(err) } if !strings.Contains(out, "from files/Dockerfile") { c.Fatalf("test2 should have used files/Dockerfile, output:%s", out) } out, _, err = dockerCmdInDir(c, ctx.Dir, "build", fmt.Sprintf("--file=%s", filepath.Join("files", "dFile")), "-t", "test3", ".") if err != nil { c.Fatal(err) } if !strings.Contains(out, "from files/dFile") { c.Fatalf("test3 should have used files/dFile, output:%s", out) } out, _, err = dockerCmdInDir(c, ctx.Dir, "build", "--file=dFile", "-t", "test4", ".") if err != nil { c.Fatal(err) } if !strings.Contains(out, "from dFile") { c.Fatalf("test4 should have used dFile, output:%s", out) } dirWithNoDockerfile, err := ioutil.TempDir(os.TempDir(), "test5") c.Assert(err, check.IsNil) nonDockerfileFile := filepath.Join(dirWithNoDockerfile, "notDockerfile") if _, err = os.Create(nonDockerfileFile); err != nil { c.Fatal(err) } out, _, err = dockerCmdInDir(c, ctx.Dir, "build", fmt.Sprintf("--file=%s", nonDockerfileFile), "-t", "test5", ".") if err == nil { c.Fatalf("test5 was supposed to fail to find passwd") } if expected := fmt.Sprintf("The Dockerfile (%s) must be within the build context (.)", nonDockerfileFile); !strings.Contains(out, expected) { c.Fatalf("wrong error message:%v\nexpected to contain=%v", out, expected) } out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", filepath.Join("..", "Dockerfile"), "-t", "test6", "..") if err != nil { c.Fatalf("test6 failed: %s", err) } if !strings.Contains(out, "from Dockerfile") { c.Fatalf("test6 should have used root Dockerfile, output:%s", out) } out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", filepath.Join(ctx.Dir, "files", "Dockerfile"), "-t", "test7", "..") if err != nil { c.Fatalf("test7 failed: %s", err) } if !strings.Contains(out, "from files/Dockerfile") { c.Fatalf("test7 should have used files Dockerfile, output:%s", out) } out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", filepath.Join("..", "Dockerfile"), "-t", "test8", ".") if err == nil || !strings.Contains(out, "must be within the build context") { c.Fatalf("test8 should have failed with Dockerfile out of context: %s", err) } tmpDir := os.TempDir() out, _, err = dockerCmdInDir(c, tmpDir, "build", "-t", "test9", ctx.Dir) if err != nil { c.Fatalf("test9 - failed: %s", err) } if !strings.Contains(out, "from Dockerfile") { c.Fatalf("test9 should have used root Dockerfile, output:%s", out) } out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", "dFile2", "-t", "test10", ".") if err != nil { c.Fatalf("test10 should have worked: %s", err) } if !strings.Contains(out, "from files/dFile2") { c.Fatalf("test10 should have used files/dFile2, output:%s", out) } } func (s *DockerSuite) TestBuildFromMixedcaseDockerfile(c *check.C) { testRequires(c, UnixCli) // Dockerfile overwrites dockerfile on windows testRequires(c, DaemonIsLinux) ctx, err := fakeContext(`FROM busybox RUN echo from dockerfile`, map[string]string{ "dockerfile": "FROM busybox\nRUN echo from dockerfile", }) if err != nil { c.Fatal(err) } defer ctx.Close() out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", "test1", ".") if err != nil { c.Fatalf("Failed to build: %s\n%s", out, err) } if !strings.Contains(out, "from dockerfile") { c.Fatalf("Missing proper output: %s", out) } } func (s *DockerSuite) TestBuildWithTwoDockerfiles(c *check.C) { testRequires(c, UnixCli) // Dockerfile overwrites dockerfile on windows testRequires(c, DaemonIsLinux) ctx, err := fakeContext(`FROM busybox RUN echo from Dockerfile`, map[string]string{ "dockerfile": "FROM busybox\nRUN echo from dockerfile", }) if err != nil { c.Fatal(err) } defer ctx.Close() out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", "test1", ".") if err != nil { c.Fatalf("Failed to build: %s\n%s", out, err) } if !strings.Contains(out, "from Dockerfile") { c.Fatalf("Missing proper output: %s", out) } } func (s *DockerSuite) TestBuildFromURLWithF(c *check.C) { testRequires(c, DaemonIsLinux) server, err := fakeStorage(map[string]string{"baz": `FROM busybox RUN echo from baz COPY * /tmp/ RUN find /tmp/`}) if err != nil { c.Fatal(err) } defer server.Close() ctx, err := fakeContext(`FROM busybox RUN echo from Dockerfile`, map[string]string{}) if err != nil { c.Fatal(err) } defer ctx.Close() // Make sure that -f is ignored and that we don't use the Dockerfile // that's in the current dir out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-f", "baz", "-t", "test1", server.URL()+"/baz") if err != nil { c.Fatalf("Failed to build: %s\n%s", out, err) } if !strings.Contains(out, "from baz") || strings.Contains(out, "/tmp/baz") || !strings.Contains(out, "/tmp/Dockerfile") { c.Fatalf("Missing proper output: %s", out) } } func (s *DockerSuite) TestBuildFromStdinWithF(c *check.C) { testRequires(c, DaemonIsLinux) // TODO Windows: This test is flaky; no idea why ctx, err := fakeContext(`FROM busybox RUN echo "from Dockerfile"`, map[string]string{}) if err != nil { c.Fatal(err) } defer ctx.Close() // Make sure that -f is ignored and that we don't use the Dockerfile // that's in the current dir dockerCommand := exec.Command(dockerBinary, "build", "-f", "baz", "-t", "test1", "-") dockerCommand.Dir = ctx.Dir dockerCommand.Stdin = strings.NewReader(`FROM busybox RUN echo "from baz" COPY * /tmp/ RUN sh -c "find /tmp/" # sh -c is needed on Windows to use the correct find`) out, status, err := runCommandWithOutput(dockerCommand) if err != nil || status != 0 { c.Fatalf("Error building: %s", err) } if !strings.Contains(out, "from baz") || strings.Contains(out, "/tmp/baz") || !strings.Contains(out, "/tmp/Dockerfile") { c.Fatalf("Missing proper output: %s", out) } } func (s *DockerSuite) TestBuildFromOfficialNames(c *check.C) { name := "testbuildfromofficial" fromNames := []string{ "busybox", "docker.io/busybox", "index.docker.io/busybox", "library/busybox", "docker.io/library/busybox", "index.docker.io/library/busybox", } for idx, fromName := range fromNames { imgName := fmt.Sprintf("%s%d", name, idx) _, err := buildImage(imgName, "FROM "+fromName, true) if err != nil { c.Errorf("Build failed using FROM %s: %s", fromName, err) } deleteImages(imgName) } } func (s *DockerSuite) TestBuildDockerfileOutsideContext(c *check.C) { testRequires(c, UnixCli) // uses os.Symlink: not implemented in windows at the time of writing (go-1.4.2) testRequires(c, DaemonIsLinux) name := "testbuilddockerfileoutsidecontext" tmpdir, err := ioutil.TempDir("", name) c.Assert(err, check.IsNil) defer os.RemoveAll(tmpdir) ctx := filepath.Join(tmpdir, "context") if err := os.MkdirAll(ctx, 0755); err != nil { c.Fatal(err) } if err := ioutil.WriteFile(filepath.Join(ctx, "Dockerfile"), []byte("FROM scratch\nENV X Y"), 0644); err != nil { c.Fatal(err) } wd, err := os.Getwd() if err != nil { c.Fatal(err) } defer os.Chdir(wd) if err := os.Chdir(ctx); err != nil { c.Fatal(err) } if err := ioutil.WriteFile(filepath.Join(tmpdir, "outsideDockerfile"), []byte("FROM scratch\nENV x y"), 0644); err != nil { c.Fatal(err) } if err := os.Symlink(filepath.Join("..", "outsideDockerfile"), filepath.Join(ctx, "dockerfile1")); err != nil { c.Fatal(err) } if err := os.Symlink(filepath.Join(tmpdir, "outsideDockerfile"), filepath.Join(ctx, "dockerfile2")); err != nil { c.Fatal(err) } for _, dockerfilePath := range []string{ filepath.Join("..", "outsideDockerfile"), filepath.Join(ctx, "dockerfile1"), filepath.Join(ctx, "dockerfile2"), } { out, _, err := dockerCmdWithError("build", "-t", name, "--no-cache", "-f", dockerfilePath, ".") if err == nil { c.Fatalf("Expected error with %s. Out: %s", dockerfilePath, out) } if !strings.Contains(out, "must be within the build context") && !strings.Contains(out, "Cannot locate Dockerfile") { c.Fatalf("Unexpected error with %s. Out: %s", dockerfilePath, out) } deleteImages(name) } os.Chdir(tmpdir) // Path to Dockerfile should be resolved relative to working directory, not relative to context. // There is a Dockerfile in the context, but since there is no Dockerfile in the current directory, the following should fail out, _, err := dockerCmdWithError("build", "-t", name, "--no-cache", "-f", "Dockerfile", ctx) if err == nil { c.Fatalf("Expected error. Out: %s", out) } } func (s *DockerSuite) TestBuildSpaces(c *check.C) { // Test to make sure that leading/trailing spaces on a command // doesn't change the error msg we get var ( err1 error err2 error ) name := "testspaces" ctx, err := fakeContext("FROM busybox\nCOPY\n", map[string]string{ "Dockerfile": "FROM busybox\nCOPY\n", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err1 = buildImageFromContext(name, ctx, false); err1 == nil { c.Fatal("Build 1 was supposed to fail, but didn't") } ctx.Add("Dockerfile", "FROM busybox\nCOPY ") if _, err2 = buildImageFromContext(name, ctx, false); err2 == nil { c.Fatal("Build 2 was supposed to fail, but didn't") } removeLogTimestamps := func(s string) string { return regexp.MustCompile(`time="(.*?)"`).ReplaceAllString(s, `time=[TIMESTAMP]`) } // Skip over the times e1 := removeLogTimestamps(err1.Error()) e2 := removeLogTimestamps(err2.Error()) // Ignore whitespace since that's what were verifying doesn't change stuff if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) { c.Fatalf("Build 2's error wasn't the same as build 1's\n1:%s\n2:%s", err1, err2) } ctx.Add("Dockerfile", "FROM busybox\n COPY") if _, err2 = buildImageFromContext(name, ctx, false); err2 == nil { c.Fatal("Build 3 was supposed to fail, but didn't") } // Skip over the times e1 = removeLogTimestamps(err1.Error()) e2 = removeLogTimestamps(err2.Error()) // Ignore whitespace since that's what were verifying doesn't change stuff if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) { c.Fatalf("Build 3's error wasn't the same as build 1's\n1:%s\n3:%s", err1, err2) } ctx.Add("Dockerfile", "FROM busybox\n COPY ") if _, err2 = buildImageFromContext(name, ctx, false); err2 == nil { c.Fatal("Build 4 was supposed to fail, but didn't") } // Skip over the times e1 = removeLogTimestamps(err1.Error()) e2 = removeLogTimestamps(err2.Error()) // Ignore whitespace since that's what were verifying doesn't change stuff if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) { c.Fatalf("Build 4's error wasn't the same as build 1's\n1:%s\n4:%s", err1, err2) } } func (s *DockerSuite) TestBuildSpacesWithQuotes(c *check.C) { testRequires(c, DaemonIsLinux) // Test to make sure that spaces in quotes aren't lost name := "testspacesquotes" dockerfile := `FROM busybox RUN echo " \ foo "` _, out, err := buildImageWithOut(name, dockerfile, false) if err != nil { c.Fatal("Build failed:", err) } expecting := "\n foo \n" if !strings.Contains(out, expecting) { c.Fatalf("Bad output: %q expecting to contain %q", out, expecting) } } // #4393 func (s *DockerSuite) TestBuildVolumeFileExistsinContainer(c *check.C) { testRequires(c, DaemonIsLinux) // TODO Windows: This should error out buildCmd := exec.Command(dockerBinary, "build", "-t", "docker-test-errcreatevolumewithfile", "-") buildCmd.Stdin = strings.NewReader(` FROM busybox RUN touch /foo VOLUME /foo `) out, _, err := runCommandWithOutput(buildCmd) if err == nil || !strings.Contains(out, "file exists") { c.Fatalf("expected build to fail when file exists in container at requested volume path") } } func (s *DockerSuite) TestBuildMissingArgs(c *check.C) { // Test to make sure that all Dockerfile commands (except the ones listed // in skipCmds) will generate an error if no args are provided. // Note: INSERT is deprecated so we exclude it because of that. skipCmds := map[string]struct{}{ "CMD": {}, "RUN": {}, "ENTRYPOINT": {}, "INSERT": {}, } if daemonPlatform == "windows" { skipCmds = map[string]struct{}{ "CMD": {}, "RUN": {}, "ENTRYPOINT": {}, "INSERT": {}, "STOPSIGNAL": {}, "ARG": {}, "USER": {}, "EXPOSE": {}, } } for cmd := range command.Commands { cmd = strings.ToUpper(cmd) if _, ok := skipCmds[cmd]; ok { continue } var dockerfile string if cmd == "FROM" { dockerfile = cmd } else { // Add FROM to make sure we don't complain about it missing dockerfile = "FROM busybox\n" + cmd } ctx, err := fakeContext(dockerfile, map[string]string{}) if err != nil { c.Fatal(err) } defer ctx.Close() var out string if out, err = buildImageFromContext("args", ctx, true); err == nil { c.Fatalf("%s was supposed to fail. Out:%s", cmd, out) } if !strings.Contains(err.Error(), cmd+" requires") { c.Fatalf("%s returned the wrong type of error:%s", cmd, err) } } } func (s *DockerSuite) TestBuildEmptyScratch(c *check.C) { testRequires(c, DaemonIsLinux) _, out, err := buildImageWithOut("sc", "FROM scratch", true) if err == nil { c.Fatalf("Build was supposed to fail") } if !strings.Contains(out, "No image was generated") { c.Fatalf("Wrong error message: %v", out) } } func (s *DockerSuite) TestBuildDotDotFile(c *check.C) { ctx, err := fakeContext("FROM busybox\n", map[string]string{ "..gitme": "", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err = buildImageFromContext("sc", ctx, false); err != nil { c.Fatalf("Build was supposed to work: %s", err) } } func (s *DockerSuite) TestBuildRUNoneJSON(c *check.C) { testRequires(c, DaemonIsLinux) // No hello-world Windows image name := "testbuildrunonejson" ctx, err := fakeContext(`FROM hello-world:frozen RUN [ "/hello" ]`, map[string]string{}) if err != nil { c.Fatal(err) } defer ctx.Close() out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "--no-cache", "-t", name, ".") if err != nil { c.Fatalf("failed to build the image: %s, %v", out, err) } if !strings.Contains(out, "Hello from Docker") { c.Fatalf("bad output: %s", out) } } func (s *DockerSuite) TestBuildEmptyStringVolume(c *check.C) { name := "testbuildemptystringvolume" _, err := buildImage(name, ` FROM busybox ENV foo="" VOLUME $foo `, false) if err == nil { c.Fatal("Should have failed to build") } } func (s *DockerSuite) TestBuildContainerWithCgroupParent(c *check.C) { testRequires(c, SameHostDaemon) testRequires(c, DaemonIsLinux) cgroupParent := "test" data, err := ioutil.ReadFile("/proc/self/cgroup") if err != nil { c.Fatalf("failed to read '/proc/self/cgroup - %v", err) } selfCgroupPaths := parseCgroupPaths(string(data)) _, found := selfCgroupPaths["memory"] if !found { c.Fatalf("unable to find self memory cgroup path. CgroupsPath: %v", selfCgroupPaths) } cmd := exec.Command(dockerBinary, "build", "--cgroup-parent", cgroupParent, "-") cmd.Stdin = strings.NewReader(` FROM busybox RUN cat /proc/self/cgroup `) out, _, err := runCommandWithOutput(cmd) if err != nil { c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) } m, err := regexp.MatchString(fmt.Sprintf("memory:.*/%s/.*", cgroupParent), out) c.Assert(err, check.IsNil) if !m { c.Fatalf("There is no expected memory cgroup with parent /%s/: %s", cgroupParent, out) } } func (s *DockerSuite) TestBuildNoDupOutput(c *check.C) { // Check to make sure our build output prints the Dockerfile cmd // property - there was a bug that caused it to be duplicated on the // Step X line name := "testbuildnodupoutput" _, out, err := buildImageWithOut(name, ` FROM busybox RUN env`, false) if err != nil { c.Fatalf("Build should have worked: %q", err) } exp := "\nStep 2 : RUN env\n" if !strings.Contains(out, exp) { c.Fatalf("Bad output\nGot:%s\n\nExpected to contain:%s\n", out, exp) } } // GH15826 func (s *DockerSuite) TestBuildStartsFromOne(c *check.C) { // Explicit check to ensure that build starts from step 1 rather than 0 name := "testbuildstartsfromone" _, out, err := buildImageWithOut(name, ` FROM busybox`, false) if err != nil { c.Fatalf("Build should have worked: %q", err) } exp := "\nStep 1 : FROM busybox\n" if !strings.Contains(out, exp) { c.Fatalf("Bad output\nGot:%s\n\nExpected to contain:%s\n", out, exp) } } func (s *DockerSuite) TestBuildRUNErrMsg(c *check.C) { // Test to make sure the bad command is quoted with just "s and // not as a Go []string name := "testbuildbadrunerrmsg" _, out, err := buildImageWithOut(name, ` FROM busybox RUN badEXE a1 \& a2 a3`, false) // tab between a2 and a3 if err == nil { c.Fatal("Should have failed to build") } shell := "/bin/sh -c" exitCode := "127" if daemonPlatform == "windows" { shell = "cmd /S /C" // architectural - Windows has to start the container to determine the exe is bad, Linux does not exitCode = "1" } exp := `The command '` + shell + ` badEXE a1 \& a2 a3' returned a non-zero code: ` + exitCode if !strings.Contains(out, exp) { c.Fatalf("RUN doesn't have the correct output:\nGot:%s\nExpected:%s", out, exp) } } func (s *DockerTrustSuite) TestTrustedBuild(c *check.C) { repoName := s.setupTrustedImage(c, "trusted-build") dockerFile := fmt.Sprintf(` FROM %s RUN [] `, repoName) name := "testtrustedbuild" buildCmd := buildImageCmd(name, dockerFile, true) s.trustedCmd(buildCmd) out, _, err := runCommandWithOutput(buildCmd) if err != nil { c.Fatalf("Error running trusted build: %s\n%s", err, out) } if !strings.Contains(out, fmt.Sprintf("FROM %s@sha", repoName[:len(repoName)-7])) { c.Fatalf("Unexpected output on trusted build:\n%s", out) } // We should also have a tag reference for the image. if out, exitCode := dockerCmd(c, "inspect", repoName); exitCode != 0 { c.Fatalf("unexpected exit code inspecting image %q: %d: %s", repoName, exitCode, out) } // We should now be able to remove the tag reference. if out, exitCode := dockerCmd(c, "rmi", repoName); exitCode != 0 { c.Fatalf("unexpected exit code inspecting image %q: %d: %s", repoName, exitCode, out) } } func (s *DockerTrustSuite) TestTrustedBuildUntrustedTag(c *check.C) { repoName := fmt.Sprintf("%v/dockercli/build-untrusted-tag:latest", privateRegistryURL) dockerFile := fmt.Sprintf(` FROM %s RUN [] `, repoName) name := "testtrustedbuilduntrustedtag" buildCmd := buildImageCmd(name, dockerFile, true) s.trustedCmd(buildCmd) out, _, err := runCommandWithOutput(buildCmd) if err == nil { c.Fatalf("Expected error on trusted build with untrusted tag: %s\n%s", err, out) } if !strings.Contains(out, "does not have trust data for") { c.Fatalf("Unexpected output on trusted build with untrusted tag:\n%s", out) } } func (s *DockerTrustSuite) TestBuildContextDirIsSymlink(c *check.C) { testRequires(c, DaemonIsLinux) tempDir, err := ioutil.TempDir("", "test-build-dir-is-symlink-") c.Assert(err, check.IsNil) defer os.RemoveAll(tempDir) // Make a real context directory in this temp directory with a simple // Dockerfile. realContextDirname := filepath.Join(tempDir, "context") if err := os.Mkdir(realContextDirname, os.FileMode(0755)); err != nil { c.Fatal(err) } if err = ioutil.WriteFile( filepath.Join(realContextDirname, "Dockerfile"), []byte(` FROM busybox RUN echo hello world `), os.FileMode(0644), ); err != nil { c.Fatal(err) } // Make a symlink to the real context directory. contextSymlinkName := filepath.Join(tempDir, "context_link") if err := os.Symlink(realContextDirname, contextSymlinkName); err != nil { c.Fatal(err) } // Executing the build with the symlink as the specified context should // *not* fail. if out, exitStatus := dockerCmd(c, "build", contextSymlinkName); exitStatus != 0 { c.Fatalf("build failed with exit status %d: %s", exitStatus, out) } } func (s *DockerTrustSuite) TestTrustedBuildTagFromReleasesRole(c *check.C) { testRequires(c, NotaryHosting) latestTag := s.setupTrustedImage(c, "trusted-build-releases-role") repoName := strings.TrimSuffix(latestTag, ":latest") // Now create the releases role s.notaryCreateDelegation(c, repoName, "targets/releases", s.not.keys[0].Public) s.notaryImportKey(c, repoName, "targets/releases", s.not.keys[0].Private) s.notaryPublish(c, repoName) // push a different tag to the releases role otherTag := fmt.Sprintf("%s:other", repoName) dockerCmd(c, "tag", "busybox", otherTag) pushCmd := exec.Command(dockerBinary, "push", otherTag) s.trustedCmd(pushCmd) out, _, err := runCommandWithOutput(pushCmd) c.Assert(err, check.IsNil, check.Commentf("Trusted push failed: %s", out)) s.assertTargetInRoles(c, repoName, "other", "targets/releases") s.assertTargetNotInRoles(c, repoName, "other", "targets") out, status := dockerCmd(c, "rmi", otherTag) c.Assert(status, check.Equals, 0, check.Commentf("docker rmi failed: %s", out)) dockerFile := fmt.Sprintf(` FROM %s RUN [] `, otherTag) name := "testtrustedbuildreleasesrole" buildCmd := buildImageCmd(name, dockerFile, true) s.trustedCmd(buildCmd) out, _, err = runCommandWithOutput(buildCmd) c.Assert(err, check.IsNil, check.Commentf("Trusted build failed: %s", out)) c.Assert(out, checker.Contains, fmt.Sprintf("FROM %s@sha", repoName)) } func (s *DockerTrustSuite) TestTrustedBuildTagIgnoresOtherDelegationRoles(c *check.C) { testRequires(c, NotaryHosting) latestTag := s.setupTrustedImage(c, "trusted-build-releases-role") repoName := strings.TrimSuffix(latestTag, ":latest") // Now create a non-releases delegation role s.notaryCreateDelegation(c, repoName, "targets/other", s.not.keys[0].Public) s.notaryImportKey(c, repoName, "targets/other", s.not.keys[0].Private) s.notaryPublish(c, repoName) // push a different tag to the other role otherTag := fmt.Sprintf("%s:other", repoName) dockerCmd(c, "tag", "busybox", otherTag) pushCmd := exec.Command(dockerBinary, "push", otherTag) s.trustedCmd(pushCmd) out, _, err := runCommandWithOutput(pushCmd) c.Assert(err, check.IsNil, check.Commentf("Trusted push failed: %s", out)) s.assertTargetInRoles(c, repoName, "other", "targets/other") s.assertTargetNotInRoles(c, repoName, "other", "targets") out, status := dockerCmd(c, "rmi", otherTag) c.Assert(status, check.Equals, 0, check.Commentf("docker rmi failed: %s", out)) dockerFile := fmt.Sprintf(` FROM %s RUN [] `, otherTag) name := "testtrustedbuildotherrole" buildCmd := buildImageCmd(name, dockerFile, true) s.trustedCmd(buildCmd) out, _, err = runCommandWithOutput(buildCmd) c.Assert(err, check.NotNil, check.Commentf("Trusted build expected to fail: %s", out)) } // Issue #15634: COPY fails when path starts with "null" func (s *DockerSuite) TestBuildNullStringInAddCopyVolume(c *check.C) { name := "testbuildnullstringinaddcopyvolume" volName := "nullvolume" if daemonPlatform == "windows" { volName = `C:\\nullvolume` } ctx, err := fakeContext(` FROM busybox ADD null / COPY nullfile / VOLUME `+volName+` `, map[string]string{ "null": "test1", "nullfile": "test2", }, ) c.Assert(err, check.IsNil) defer ctx.Close() _, err = buildImageFromContext(name, ctx, true) c.Assert(err, check.IsNil) } func (s *DockerSuite) TestBuildStopSignal(c *check.C) { testRequires(c, DaemonIsLinux) // Windows does not support STOPSIGNAL yet imgName := "test_build_stop_signal" _, err := buildImage(imgName, `FROM busybox STOPSIGNAL SIGKILL`, true) c.Assert(err, check.IsNil) res := inspectFieldJSON(c, imgName, "Config.StopSignal") if res != `"SIGKILL"` { c.Fatalf("Signal %s, expected SIGKILL", res) } containerName := "test-container-stop-signal" dockerCmd(c, "run", "-d", "--name", containerName, imgName, "top") res = inspectFieldJSON(c, containerName, "Config.StopSignal") if res != `"SIGKILL"` { c.Fatalf("Signal %s, expected SIGKILL", res) } } func (s *DockerSuite) TestBuildBuildTimeArg(c *check.C) { imgName := "bldargtest" envKey := "foo" envVal := "bar" args := []string{"--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)} var dockerfile string if daemonPlatform == "windows" { // Bugs in Windows busybox port - use the default base image and native cmd stuff dockerfile = fmt.Sprintf(`FROM `+minimalBaseImage()+` ARG %s RUN echo %%%s%% CMD setlocal enableextensions && if defined %s (echo %%%s%%)`, envKey, envKey, envKey, envKey) } else { dockerfile = fmt.Sprintf(`FROM busybox ARG %s RUN echo $%s CMD echo $%s`, envKey, envKey, envKey) } if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || !strings.Contains(out, envVal) { if err != nil { c.Fatalf("build failed to complete: %q %q", out, err) } c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envVal) } containerName := "bldargCont" out, _ := dockerCmd(c, "run", "--name", containerName, imgName) out = strings.Trim(out, " \r\n'") if out != "" { c.Fatalf("run produced invalid output: %q, expected empty string", out) } } func (s *DockerSuite) TestBuildBuildTimeArgHistory(c *check.C) { testRequires(c, DaemonIsLinux) // Windows does not support ARG imgName := "bldargtest" envKey := "foo" envVal := "bar" envDef := "bar1" args := []string{ "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), } dockerfile := fmt.Sprintf(`FROM busybox ARG %s=%s`, envKey, envDef) if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || !strings.Contains(out, envVal) { if err != nil { c.Fatalf("build failed to complete: %q %q", out, err) } c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envVal) } out, _ := dockerCmd(c, "history", "--no-trunc", imgName) outputTabs := strings.Split(out, "\n")[1] if !strings.Contains(outputTabs, envDef) { c.Fatalf("failed to find arg default in image history output: %q expected: %q", outputTabs, envDef) } } func (s *DockerSuite) TestBuildBuildTimeArgCacheHit(c *check.C) { testRequires(c, DaemonIsLinux) // Windows does not support ARG imgName := "bldargtest" envKey := "foo" envVal := "bar" args := []string{ "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), } dockerfile := fmt.Sprintf(`FROM busybox ARG %s RUN echo $%s`, envKey, envKey) origImgID := "" var err error if origImgID, err = buildImage(imgName, dockerfile, true, args...); err != nil { c.Fatal(err) } imgNameCache := "bldargtestcachehit" if newImgID, err := buildImage(imgNameCache, dockerfile, true, args...); err != nil || newImgID != origImgID { if err != nil { c.Fatal(err) } c.Fatalf("build didn't use cache! expected image id: %q built image id: %q", origImgID, newImgID) } } func (s *DockerSuite) TestBuildBuildTimeArgCacheMissExtraArg(c *check.C) { testRequires(c, DaemonIsLinux) // Windows does not support ARG imgName := "bldargtest" envKey := "foo" envVal := "bar" extraEnvKey := "foo1" extraEnvVal := "bar1" args := []string{ "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), } dockerfile := fmt.Sprintf(`FROM busybox ARG %s ARG %s RUN echo $%s`, envKey, extraEnvKey, envKey) origImgID := "" var err error if origImgID, err = buildImage(imgName, dockerfile, true, args...); err != nil { c.Fatal(err) } imgNameCache := "bldargtestcachemiss" args = append(args, "--build-arg", fmt.Sprintf("%s=%s", extraEnvKey, extraEnvVal)) if newImgID, err := buildImage(imgNameCache, dockerfile, true, args...); err != nil || newImgID == origImgID { if err != nil { c.Fatal(err) } c.Fatalf("build used cache, expected a miss!") } } func (s *DockerSuite) TestBuildBuildTimeArgCacheMissSameArgDiffVal(c *check.C) { testRequires(c, DaemonIsLinux) // Windows does not support ARG imgName := "bldargtest" envKey := "foo" envVal := "bar" newEnvVal := "bar1" args := []string{ "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), } dockerfile := fmt.Sprintf(`FROM busybox ARG %s RUN echo $%s`, envKey, envKey) origImgID := "" var err error if origImgID, err = buildImage(imgName, dockerfile, true, args...); err != nil { c.Fatal(err) } imgNameCache := "bldargtestcachemiss" args = []string{ "--build-arg", fmt.Sprintf("%s=%s", envKey, newEnvVal), } if newImgID, err := buildImage(imgNameCache, dockerfile, true, args...); err != nil || newImgID == origImgID { if err != nil { c.Fatal(err) } c.Fatalf("build used cache, expected a miss!") } } func (s *DockerSuite) TestBuildBuildTimeArgOverrideArgDefinedBeforeEnv(c *check.C) { testRequires(c, DaemonIsLinux) // Windows does not support ARG imgName := "bldargtest" envKey := "foo" envVal := "bar" envValOveride := "barOverride" args := []string{ "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), } dockerfile := fmt.Sprintf(`FROM busybox ARG %s ENV %s %s RUN echo $%s CMD echo $%s `, envKey, envKey, envValOveride, envKey, envKey) if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Count(out, envValOveride) != 2 { if err != nil { c.Fatalf("build failed to complete: %q %q", out, err) } c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envValOveride) } containerName := "bldargCont" if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) { c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride) } } func (s *DockerSuite) TestBuildBuildTimeArgOverrideEnvDefinedBeforeArg(c *check.C) { testRequires(c, DaemonIsLinux) // Windows does not support ARG imgName := "bldargtest" envKey := "foo" envVal := "bar" envValOveride := "barOverride" args := []string{ "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), } dockerfile := fmt.Sprintf(`FROM busybox ENV %s %s ARG %s RUN echo $%s CMD echo $%s `, envKey, envValOveride, envKey, envKey, envKey) if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Count(out, envValOveride) != 2 { if err != nil { c.Fatalf("build failed to complete: %q %q", out, err) } c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envValOveride) } containerName := "bldargCont" if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) { c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride) } } func (s *DockerSuite) TestBuildBuildTimeArgExpansion(c *check.C) { testRequires(c, DaemonIsLinux) // Windows does not support ARG imgName := "bldvarstest" wdVar := "WDIR" wdVal := "/tmp/" addVar := "AFILE" addVal := "addFile" copyVar := "CFILE" copyVal := "copyFile" envVar := "foo" envVal := "bar" exposeVar := "EPORT" exposeVal := "9999" userVar := "USER" userVal := "testUser" volVar := "VOL" volVal := "/testVol/" args := []string{ "--build-arg", fmt.Sprintf("%s=%s", wdVar, wdVal), "--build-arg", fmt.Sprintf("%s=%s", addVar, addVal), "--build-arg", fmt.Sprintf("%s=%s", copyVar, copyVal), "--build-arg", fmt.Sprintf("%s=%s", envVar, envVal), "--build-arg", fmt.Sprintf("%s=%s", exposeVar, exposeVal), "--build-arg", fmt.Sprintf("%s=%s", userVar, userVal), "--build-arg", fmt.Sprintf("%s=%s", volVar, volVal), } ctx, err := fakeContext(fmt.Sprintf(`FROM busybox ARG %s WORKDIR ${%s} ARG %s ADD ${%s} testDir/ ARG %s COPY $%s testDir/ ARG %s ENV %s=${%s} ARG %s EXPOSE $%s ARG %s USER $%s ARG %s VOLUME ${%s}`, wdVar, wdVar, addVar, addVar, copyVar, copyVar, envVar, envVar, envVar, exposeVar, exposeVar, userVar, userVar, volVar, volVar), map[string]string{ addVal: "some stuff", copyVal: "some stuff", }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(imgName, ctx, true, args...); err != nil { c.Fatal(err) } var resMap map[string]interface{} var resArr []string res := "" res = inspectField(c, imgName, "Config.WorkingDir") if res != filepath.ToSlash(filepath.Clean(wdVal)) { c.Fatalf("Config.WorkingDir value mismatch. Expected: %s, got: %s", filepath.ToSlash(filepath.Clean(wdVal)), res) } inspectFieldAndMarshall(c, imgName, "Config.Env", &resArr) found := false for _, v := range resArr { if fmt.Sprintf("%s=%s", envVar, envVal) == v { found = true break } } if !found { c.Fatalf("Config.Env value mismatch. Expected <key=value> to exist: %s=%s, got: %v", envVar, envVal, resArr) } inspectFieldAndMarshall(c, imgName, "Config.ExposedPorts", &resMap) if _, ok := resMap[fmt.Sprintf("%s/tcp", exposeVal)]; !ok { c.Fatalf("Config.ExposedPorts value mismatch. Expected exposed port: %s/tcp, got: %v", exposeVal, resMap) } res = inspectField(c, imgName, "Config.User") if res != userVal { c.Fatalf("Config.User value mismatch. Expected: %s, got: %s", userVal, res) } inspectFieldAndMarshall(c, imgName, "Config.Volumes", &resMap) if _, ok := resMap[volVal]; !ok { c.Fatalf("Config.Volumes value mismatch. Expected volume: %s, got: %v", volVal, resMap) } } func (s *DockerSuite) TestBuildBuildTimeArgExpansionOverride(c *check.C) { testRequires(c, DaemonIsLinux) // Windows does not support ARG imgName := "bldvarstest" envKey := "foo" envVal := "bar" envKey1 := "foo1" envValOveride := "barOverride" args := []string{ "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), } dockerfile := fmt.Sprintf(`FROM busybox ARG %s ENV %s %s ENV %s ${%s} RUN echo $%s CMD echo $%s`, envKey, envKey, envValOveride, envKey1, envKey, envKey1, envKey1) if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Count(out, envValOveride) != 2 { if err != nil { c.Fatalf("build failed to complete: %q %q", out, err) } c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envValOveride) } containerName := "bldargCont" if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) { c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride) } } func (s *DockerSuite) TestBuildBuildTimeArgUntrustedDefinedAfterUse(c *check.C) { testRequires(c, DaemonIsLinux) // Windows does not support ARG imgName := "bldargtest" envKey := "foo" envVal := "bar" args := []string{ "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), } dockerfile := fmt.Sprintf(`FROM busybox RUN echo $%s ARG %s CMD echo $%s`, envKey, envKey, envKey) if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Contains(out, envVal) { if err != nil { c.Fatalf("build failed to complete: %q %q", out, err) } c.Fatalf("able to access environment variable in output: %q expected to be missing", out) } containerName := "bldargCont" if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); out != "\n" { c.Fatalf("run produced invalid output: %q, expected empty string", out) } } func (s *DockerSuite) TestBuildBuildTimeArgBuiltinArg(c *check.C) { testRequires(c, DaemonIsLinux) // Windows does not support --build-arg imgName := "bldargtest" envKey := "HTTP_PROXY" envVal := "bar" args := []string{ "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), } dockerfile := fmt.Sprintf(`FROM busybox RUN echo $%s CMD echo $%s`, envKey, envKey) if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || !strings.Contains(out, envVal) { if err != nil { c.Fatalf("build failed to complete: %q %q", out, err) } c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envVal) } containerName := "bldargCont" if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); out != "\n" { c.Fatalf("run produced invalid output: %q, expected empty string", out) } } func (s *DockerSuite) TestBuildBuildTimeArgDefaultOverride(c *check.C) { testRequires(c, DaemonIsLinux) // Windows does not support ARG imgName := "bldargtest" envKey := "foo" envVal := "bar" envValOveride := "barOverride" args := []string{ "--build-arg", fmt.Sprintf("%s=%s", envKey, envValOveride), } dockerfile := fmt.Sprintf(`FROM busybox ARG %s=%s ENV %s $%s RUN echo $%s CMD echo $%s`, envKey, envVal, envKey, envKey, envKey, envKey) if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Count(out, envValOveride) != 1 { if err != nil { c.Fatalf("build failed to complete: %q %q", out, err) } c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envValOveride) } containerName := "bldargCont" if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) { c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride) } } func (s *DockerSuite) TestBuildBuildTimeArgUnconsumedArg(c *check.C) { testRequires(c, DaemonIsLinux) // Windows does not support --build-arg imgName := "bldargtest" envKey := "foo" envVal := "bar" args := []string{ "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), } dockerfile := fmt.Sprintf(`FROM busybox RUN echo $%s CMD echo $%s`, envKey, envKey) errStr := "One or more build-args" if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err == nil { c.Fatalf("build succeeded, expected to fail. Output: %v", out) } else if !strings.Contains(out, errStr) { c.Fatalf("Unexpected error. output: %q, expected error: %q", out, errStr) } } func (s *DockerSuite) TestBuildBuildTimeArgQuotedValVariants(c *check.C) { testRequires(c, DaemonIsLinux) // Windows does not support ARG imgName := "bldargtest" envKey := "foo" envKey1 := "foo1" envKey2 := "foo2" envKey3 := "foo3" args := []string{} dockerfile := fmt.Sprintf(`FROM busybox ARG %s="" ARG %s='' ARG %s="''" ARG %s='""' RUN [ "$%s" != "$%s" ] RUN [ "$%s" != "$%s" ] RUN [ "$%s" != "$%s" ] RUN [ "$%s" != "$%s" ] RUN [ "$%s" != "$%s" ]`, envKey, envKey1, envKey2, envKey3, envKey, envKey2, envKey, envKey3, envKey1, envKey2, envKey1, envKey3, envKey2, envKey3) if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil { c.Fatalf("build failed to complete: %q %q", out, err) } } func (s *DockerSuite) TestBuildBuildTimeArgEmptyValVariants(c *check.C) { testRequires(c, DaemonIsLinux) // Windows does not support ARG imgName := "bldargtest" envKey := "foo" envKey1 := "foo1" envKey2 := "foo2" args := []string{} dockerfile := fmt.Sprintf(`FROM busybox ARG %s= ARG %s="" ARG %s='' RUN [ "$%s" == "$%s" ] RUN [ "$%s" == "$%s" ] RUN [ "$%s" == "$%s" ]`, envKey, envKey1, envKey2, envKey, envKey1, envKey1, envKey2, envKey, envKey2) if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil { c.Fatalf("build failed to complete: %q %q", out, err) } } func (s *DockerSuite) TestBuildBuildTimeArgDefintionWithNoEnvInjection(c *check.C) { testRequires(c, DaemonIsLinux) // Windows does not support ARG imgName := "bldargtest" envKey := "foo" args := []string{} dockerfile := fmt.Sprintf(`FROM busybox ARG %s RUN env`, envKey) if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Count(out, envKey) != 1 { if err != nil { c.Fatalf("build failed to complete: %q %q", out, err) } c.Fatalf("unexpected number of occurrences of the arg in output: %q expected: 1", out) } } func (s *DockerSuite) TestBuildNoNamedVolume(c *check.C) { volName := "testname:/foo" if daemonPlatform == "windows" { volName = "testname:C:\\foo" } dockerCmd(c, "run", "-v", volName, "busybox", "sh", "-c", "touch /foo/oops") dockerFile := `FROM busybox VOLUME ` + volName + ` RUN ls /foo/oops ` _, err := buildImage("test", dockerFile, false) c.Assert(err, check.NotNil, check.Commentf("image build should have failed")) } func (s *DockerSuite) TestBuildTagEvent(c *check.C) { since := daemonUnixTime(c) dockerFile := `FROM busybox RUN echo events ` _, err := buildImage("test", dockerFile, false) c.Assert(err, check.IsNil) until := daemonUnixTime(c) out, _ := dockerCmd(c, "events", "--since", since, "--until", until, "--filter", "type=image") events := strings.Split(strings.TrimSpace(out), "\n") actions := eventActionsByIDAndType(c, events, "test:latest", "image") var foundTag bool for _, a := range actions { if a == "tag" { foundTag = true break } } c.Assert(foundTag, checker.True, check.Commentf("No tag event found:\n%s", out)) } // #15780 func (s *DockerSuite) TestBuildMultipleTags(c *check.C) { dockerfile := ` FROM busybox MAINTAINER test-15780 ` cmd := exec.Command(dockerBinary, "build", "-t", "tag1", "-t", "tag2:v2", "-t", "tag1:latest", "-t", "tag1", "--no-cache", "-") cmd.Stdin = strings.NewReader(dockerfile) _, err := runCommand(cmd) c.Assert(err, check.IsNil) id1, err := getIDByName("tag1") c.Assert(err, check.IsNil) id2, err := getIDByName("tag2:v2") c.Assert(err, check.IsNil) c.Assert(id1, check.Equals, id2) } // #17290 func (s *DockerSuite) TestBuildCacheBrokenSymlink(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildbrokensymlink" ctx, err := fakeContext(` FROM busybox COPY . ./`, map[string]string{ "foo": "bar", }) c.Assert(err, checker.IsNil) defer ctx.Close() err = os.Symlink(filepath.Join(ctx.Dir, "nosuchfile"), filepath.Join(ctx.Dir, "asymlink")) c.Assert(err, checker.IsNil) // warm up cache _, err = buildImageFromContext(name, ctx, true) c.Assert(err, checker.IsNil) // add new file to context, should invalidate cache err = ioutil.WriteFile(filepath.Join(ctx.Dir, "newfile"), []byte("foo"), 0644) c.Assert(err, checker.IsNil) _, out, err := buildImageFromContextWithOut(name, ctx, true) c.Assert(err, checker.IsNil) c.Assert(out, checker.Not(checker.Contains), "Using cache") } func (s *DockerSuite) TestBuildFollowSymlinkToFile(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildbrokensymlink" ctx, err := fakeContext(` FROM busybox COPY asymlink target`, map[string]string{ "foo": "bar", }) c.Assert(err, checker.IsNil) defer ctx.Close() err = os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink")) c.Assert(err, checker.IsNil) id, err := buildImageFromContext(name, ctx, true) c.Assert(err, checker.IsNil) out, _ := dockerCmd(c, "run", "--rm", id, "cat", "target") c.Assert(out, checker.Matches, "bar") // change target file should invalidate cache err = ioutil.WriteFile(filepath.Join(ctx.Dir, "foo"), []byte("baz"), 0644) c.Assert(err, checker.IsNil) id, out, err = buildImageFromContextWithOut(name, ctx, true) c.Assert(err, checker.IsNil) c.Assert(out, checker.Not(checker.Contains), "Using cache") out, _ = dockerCmd(c, "run", "--rm", id, "cat", "target") c.Assert(out, checker.Matches, "baz") } func (s *DockerSuite) TestBuildFollowSymlinkToDir(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildbrokensymlink" ctx, err := fakeContext(` FROM busybox COPY asymlink /`, map[string]string{ "foo/abc": "bar", "foo/def": "baz", }) c.Assert(err, checker.IsNil) defer ctx.Close() err = os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink")) c.Assert(err, checker.IsNil) id, err := buildImageFromContext(name, ctx, true) c.Assert(err, checker.IsNil) out, _ := dockerCmd(c, "run", "--rm", id, "cat", "abc", "def") c.Assert(out, checker.Matches, "barbaz") // change target file should invalidate cache err = ioutil.WriteFile(filepath.Join(ctx.Dir, "foo/def"), []byte("bax"), 0644) c.Assert(err, checker.IsNil) id, out, err = buildImageFromContextWithOut(name, ctx, true) c.Assert(err, checker.IsNil) c.Assert(out, checker.Not(checker.Contains), "Using cache") out, _ = dockerCmd(c, "run", "--rm", id, "cat", "abc", "def") c.Assert(out, checker.Matches, "barbax") } // TestBuildSymlinkBasename tests that target file gets basename from symlink, // not from the target file. func (s *DockerSuite) TestBuildSymlinkBasename(c *check.C) { testRequires(c, DaemonIsLinux) name := "testbuildbrokensymlink" ctx, err := fakeContext(` FROM busybox COPY asymlink /`, map[string]string{ "foo": "bar", }) c.Assert(err, checker.IsNil) defer ctx.Close() err = os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink")) c.Assert(err, checker.IsNil) id, err := buildImageFromContext(name, ctx, true) c.Assert(err, checker.IsNil) out, _ := dockerCmd(c, "run", "--rm", id, "cat", "asymlink") c.Assert(out, checker.Matches, "bar") } // #17827 func (s *DockerSuite) TestBuildCacheRootSource(c *check.C) { name := "testbuildrootsource" ctx, err := fakeContext(` FROM busybox COPY / /data`, map[string]string{ "foo": "bar", }) c.Assert(err, checker.IsNil) defer ctx.Close() // warm up cache _, err = buildImageFromContext(name, ctx, true) c.Assert(err, checker.IsNil) // change file, should invalidate cache err = ioutil.WriteFile(filepath.Join(ctx.Dir, "foo"), []byte("baz"), 0644) c.Assert(err, checker.IsNil) _, out, err := buildImageFromContextWithOut(name, ctx, true) c.Assert(err, checker.IsNil) c.Assert(out, checker.Not(checker.Contains), "Using cache") } // #19375 func (s *DockerSuite) TestBuildFailsGitNotCallable(c *check.C) { cmd := exec.Command(dockerBinary, "build", "github.com/docker/v1.10-migrator.git") cmd.Env = append(cmd.Env, "PATH=") out, _, err := runCommandWithOutput(cmd) c.Assert(err, checker.NotNil) c.Assert(out, checker.Contains, "unable to prepare context: unable to find 'git': ") cmd = exec.Command(dockerBinary, "build", "https://github.com/docker/v1.10-migrator.git") cmd.Env = append(cmd.Env, "PATH=") out, _, err = runCommandWithOutput(cmd) c.Assert(err, checker.NotNil) c.Assert(out, checker.Contains, "unable to prepare context: unable to find 'git': ") } // TestBuildWorkdirWindowsPath tests that a Windows style path works as a workdir func (s *DockerSuite) TestBuildWorkdirWindowsPath(c *check.C) { testRequires(c, DaemonIsWindows) name := "testbuildworkdirwindowspath" _, err := buildImage(name, ` FROM `+WindowsBaseImage+` RUN mkdir C:\\work WORKDIR C:\\work RUN if "%CD%" NEQ "C:\work" exit -1 `, true) if err != nil { c.Fatal(err) } } func (s *DockerSuite) TestBuildLabel(c *check.C) { name := "testbuildlabel" testLabel := "foo" _, err := buildImage(name, ` FROM `+minimalBaseImage()+` LABEL default foo `, false, "--label", testLabel) c.Assert(err, checker.IsNil) res := inspectFieldJSON(c, name, "Config.Labels") var labels map[string]string if err := json.Unmarshal([]byte(res), &labels); err != nil { c.Fatal(err) } if _, ok := labels[testLabel]; !ok { c.Fatal("label not found in image") } } func (s *DockerSuite) TestBuildLabelOneNode(c *check.C) { name := "testbuildlabel" _, err := buildImage(name, "FROM busybox", false, "--label", "foo=bar") c.Assert(err, checker.IsNil) res, err := inspectImage(name, "json .Config.Labels") c.Assert(err, checker.IsNil) var labels map[string]string if err := json.Unmarshal([]byte(res), &labels); err != nil { c.Fatal(err) } v, ok := labels["foo"] if !ok { c.Fatal("label `foo` not found in image") } c.Assert(v, checker.Equals, "bar") } func (s *DockerSuite) TestBuildLabelCacheCommit(c *check.C) { name := "testbuildlabelcachecommit" testLabel := "foo" if _, err := buildImage(name, ` FROM `+minimalBaseImage()+` LABEL default foo `, false); err != nil { c.Fatal(err) } _, err := buildImage(name, ` FROM `+minimalBaseImage()+` LABEL default foo `, true, "--label", testLabel) c.Assert(err, checker.IsNil) res := inspectFieldJSON(c, name, "Config.Labels") var labels map[string]string if err := json.Unmarshal([]byte(res), &labels); err != nil { c.Fatal(err) } if _, ok := labels[testLabel]; !ok { c.Fatal("label not found in image") } } func (s *DockerSuite) TestBuildLabelMultiple(c *check.C) { name := "testbuildlabelmultiple" testLabels := map[string]string{ "foo": "bar", "123": "456", } labelArgs := []string{} for k, v := range testLabels { labelArgs = append(labelArgs, "--label", k+"="+v) } _, err := buildImage(name, ` FROM `+minimalBaseImage()+` LABEL default foo `, false, labelArgs...) if err != nil { c.Fatal("error building image with labels", err) } res := inspectFieldJSON(c, name, "Config.Labels") var labels map[string]string if err := json.Unmarshal([]byte(res), &labels); err != nil { c.Fatal(err) } for k, v := range testLabels { if x, ok := labels[k]; !ok || x != v { c.Fatalf("label %s=%s not found in image", k, v) } } } func (s *DockerSuite) TestBuildLabelOverwrite(c *check.C) { name := "testbuildlabeloverwrite" testLabel := "foo" testValue := "bar" _, err := buildImage(name, ` FROM `+minimalBaseImage()+` LABEL `+testLabel+`+ foo `, false, []string{"--label", testLabel + "=" + testValue}...) if err != nil { c.Fatal("error building image with labels", err) } res := inspectFieldJSON(c, name, "Config.Labels") var labels map[string]string if err := json.Unmarshal([]byte(res), &labels); err != nil { c.Fatal(err) } v, ok := labels[testLabel] if !ok { c.Fatal("label not found in image") } if v != testValue { c.Fatal("label not overwritten") } } func (s *DockerRegistryAuthHtpasswdSuite) TestBuildFromAuthenticatedRegistry(c *check.C) { dockerCmd(c, "login", "-u", s.reg.username, "-p", s.reg.password, privateRegistryURL) baseImage := privateRegistryURL + "/baseimage" _, err := buildImage(baseImage, ` FROM busybox ENV env1 val1 `, true) c.Assert(err, checker.IsNil) dockerCmd(c, "push", baseImage) dockerCmd(c, "rmi", baseImage) _, err = buildImage(baseImage, fmt.Sprintf(` FROM %s ENV env2 val2 `, baseImage), true) c.Assert(err, checker.IsNil) } func (s *DockerRegistryAuthHtpasswdSuite) TestBuildWithExternalAuth(c *check.C) { osPath := os.Getenv("PATH") defer os.Setenv("PATH", osPath) workingDir, err := os.Getwd() c.Assert(err, checker.IsNil) absolute, err := filepath.Abs(filepath.Join(workingDir, "fixtures", "auth")) c.Assert(err, checker.IsNil) testPath := fmt.Sprintf("%s%c%s", osPath, filepath.ListSeparator, absolute) os.Setenv("PATH", testPath) repoName := fmt.Sprintf("%v/dockercli/busybox:authtest", privateRegistryURL) tmp, err := ioutil.TempDir("", "integration-cli-") c.Assert(err, checker.IsNil) externalAuthConfig := `{ "credsStore": "shell-test" }` configPath := filepath.Join(tmp, "config.json") err = ioutil.WriteFile(configPath, []byte(externalAuthConfig), 0644) c.Assert(err, checker.IsNil) dockerCmd(c, "--config", tmp, "login", "-u", s.reg.username, "-p", s.reg.password, privateRegistryURL) b, err := ioutil.ReadFile(configPath) c.Assert(err, checker.IsNil) c.Assert(string(b), checker.Not(checker.Contains), "\"auth\":") dockerCmd(c, "--config", tmp, "tag", "busybox", repoName) dockerCmd(c, "--config", tmp, "push", repoName) // make sure the image is pulled when building dockerCmd(c, "rmi", repoName) buildCmd := exec.Command(dockerBinary, "--config", tmp, "build", "-") buildCmd.Stdin = strings.NewReader(fmt.Sprintf("FROM %s", repoName)) out, _, err := runCommandWithOutput(buildCmd) c.Assert(err, check.IsNil, check.Commentf(out)) } // Test cases in #22036 func (s *DockerSuite) TestBuildLabelsOverride(c *check.C) { testRequires(c, DaemonIsLinux) // Command line option labels will always override name := "scratchy" expected := `{"bar":"from-flag","foo":"from-flag"}` _, err := buildImage(name, `FROM scratch LABEL foo=from-dockerfile`, true, "--label", "foo=from-flag", "--label", "bar=from-flag") c.Assert(err, check.IsNil) res := inspectFieldJSON(c, name, "Config.Labels") if res != expected { c.Fatalf("Labels %s, expected %s", res, expected) } name = "from" expected = `{"foo":"from-dockerfile"}` _, err = buildImage(name, `FROM scratch LABEL foo from-dockerfile`, true) c.Assert(err, check.IsNil) res = inspectFieldJSON(c, name, "Config.Labels") if res != expected { c.Fatalf("Labels %s, expected %s", res, expected) } // Command line option label will override even via `FROM` name = "new" expected = `{"bar":"from-dockerfile2","foo":"new"}` _, err = buildImage(name, `FROM from LABEL bar from-dockerfile2`, true, "--label", "foo=new") c.Assert(err, check.IsNil) res = inspectFieldJSON(c, name, "Config.Labels") if res != expected { c.Fatalf("Labels %s, expected %s", res, expected) } // Command line option without a value set (--label foo, --label bar=) // will be treated as --label foo="", --label bar="" name = "scratchy2" expected = `{"bar":"","foo":""}` _, err = buildImage(name, `FROM scratch LABEL foo=from-dockerfile`, true, "--label", "foo", "--label", "bar=") c.Assert(err, check.IsNil) res = inspectFieldJSON(c, name, "Config.Labels") if res != expected { c.Fatalf("Labels %s, expected %s", res, expected) } // Command line option without a value set (--label foo, --label bar=) // will be treated as --label foo="", --label bar="" // This time is for inherited images name = "new2" expected = `{"bar":"","foo":""}` _, err = buildImage(name, `FROM from LABEL bar from-dockerfile2`, true, "--label", "foo=", "--label", "bar") c.Assert(err, check.IsNil) res = inspectFieldJSON(c, name, "Config.Labels") if res != expected { c.Fatalf("Labels %s, expected %s", res, expected) } // Command line option labels with only `FROM` name = "scratchy" expected = `{"bar":"from-flag","foo":"from-flag"}` _, err = buildImage(name, `FROM scratch`, true, "--label", "foo=from-flag", "--label", "bar=from-flag") c.Assert(err, check.IsNil) res = inspectFieldJSON(c, name, "Config.Labels") if res != expected { c.Fatalf("Labels %s, expected %s", res, expected) } } // Test case for #22855 func (s *DockerSuite) TestBuildDeleteCommittedFile(c *check.C) { name := "test-delete-committed-file" _, err := buildImage(name, `FROM busybox RUN echo test > file RUN test -e file RUN rm file RUN sh -c "! test -e file"`, false) if err != nil { c.Fatal(err) } } // #20083 func (s *DockerSuite) TestBuildDockerignoreComment(c *check.C) { // TODO Windows: Figure out why this test is flakey on TP5. If you add // something like RUN sleep 5, or even RUN ls /tmp after the ADD line, // it is more reliable, but that's not a good fix. testRequires(c, DaemonIsLinux) name := "testbuilddockerignorecleanpaths" dockerfile := ` FROM busybox ADD . /tmp/ RUN sh -c "(ls -la /tmp/#1)" RUN sh -c "(! ls -la /tmp/#2)" RUN sh -c "(! ls /tmp/foo) && (! ls /tmp/foo2) && (ls /tmp/dir1/foo)"` ctx, err := fakeContext(dockerfile, map[string]string{ "foo": "foo", "foo2": "foo2", "dir1/foo": "foo in dir1", "#1": "# file 1", "#2": "# file 2", ".dockerignore": `# Visual C++ cache files # because we have git ;-) # The above comment is from #20083 foo #dir1/foo foo2 # The following is considered as comment as # is at the beginning #1 # The following is not considered as comment as # is not at the beginning #2 `, }) if err != nil { c.Fatal(err) } defer ctx.Close() if _, err := buildImageFromContext(name, ctx, true); err != nil { c.Fatal(err) } } // Test case for #23221 func (s *DockerSuite) TestBuildWithUTF8BOM(c *check.C) { name := "test-with-utf8-bom" dockerfile := []byte(`FROM busybox`) bomDockerfile := append([]byte{0xEF, 0xBB, 0xBF}, dockerfile...) ctx, err := fakeContextFromNewTempDir() c.Assert(err, check.IsNil) defer ctx.Close() err = ctx.addFile("Dockerfile", bomDockerfile) c.Assert(err, check.IsNil) _, err = buildImageFromContext(name, ctx, true) c.Assert(err, check.IsNil) } // Test case for UTF-8 BOM in .dockerignore, related to #23221 func (s *DockerSuite) TestBuildWithUTF8BOMDockerignore(c *check.C) { name := "test-with-utf8-bom-dockerignore" dockerfile := ` FROM busybox ADD . /tmp/ RUN ls -la /tmp RUN sh -c "! ls /tmp/Dockerfile" RUN ls /tmp/.dockerignore` dockerignore := []byte("./Dockerfile\n") bomDockerignore := append([]byte{0xEF, 0xBB, 0xBF}, dockerignore...) ctx, err := fakeContext(dockerfile, map[string]string{ "Dockerfile": dockerfile, }) c.Assert(err, check.IsNil) defer ctx.Close() err = ctx.addFile(".dockerignore", bomDockerignore) c.Assert(err, check.IsNil) _, err = buildImageFromContext(name, ctx, true) if err != nil { c.Fatal(err) } } // #22489 Shell test to confirm config gets updated correctly func (s *DockerSuite) TestBuildShellUpdatesConfig(c *check.C) { name := "testbuildshellupdatesconfig" expected := `["foo","-bar","#(nop) ","SHELL [foo -bar]"]` _, err := buildImage(name, `FROM `+minimalBaseImage()+` SHELL ["foo", "-bar"]`, true) if err != nil { c.Fatal(err) } res := inspectFieldJSON(c, name, "ContainerConfig.Cmd") if res != expected { c.Fatalf("%s, expected %s", res, expected) } res = inspectFieldJSON(c, name, "ContainerConfig.Shell") if res != `["foo","-bar"]` { c.Fatalf(`%s, expected ["foo","-bar"]`, res) } } // #22489 Changing the shell multiple times and CMD after. func (s *DockerSuite) TestBuildShellMultiple(c *check.C) { name := "testbuildshellmultiple" _, out, _, err := buildImageWithStdoutStderr(name, `FROM busybox RUN echo defaultshell SHELL ["echo"] RUN echoshell SHELL ["ls"] RUN -l CMD -l`, true) if err != nil { c.Fatal(err) } // Must contain 'defaultshell' twice if len(strings.Split(out, "defaultshell")) != 3 { c.Fatalf("defaultshell should have appeared twice in %s", out) } // Must contain 'echoshell' twice if len(strings.Split(out, "echoshell")) != 3 { c.Fatalf("echoshell should have appeared twice in %s", out) } // Must contain "total " (part of ls -l) if !strings.Contains(out, "total ") { c.Fatalf("%s should have contained 'total '", out) } // A container started from the image uses the shell-form CMD. // Last shell is ls. CMD is -l. So should contain 'total '. outrun, _ := dockerCmd(c, "run", "--rm", name) if !strings.Contains(outrun, "total ") { c.Fatalf("Expected started container to run ls -l. %s", outrun) } } // #22489. Changed SHELL with ENTRYPOINT func (s *DockerSuite) TestBuildShellEntrypoint(c *check.C) { name := "testbuildshellentrypoint" _, err := buildImage(name, `FROM busybox SHELL ["ls"] ENTRYPOINT -l`, true) if err != nil { c.Fatal(err) } // A container started from the image uses the shell-form ENTRYPOINT. // Shell is ls. ENTRYPOINT is -l. So should contain 'total '. outrun, _ := dockerCmd(c, "run", "--rm", name) if !strings.Contains(outrun, "total ") { c.Fatalf("Expected started container to run ls -l. %s", outrun) } } // #22489 Shell test to confirm shell is inherited in a subsequent build func (s *DockerSuite) TestBuildShellInherited(c *check.C) { name1 := "testbuildshellinherited1" _, err := buildImage(name1, `FROM busybox SHELL ["ls"]`, true) if err != nil { c.Fatal(err) } name2 := "testbuildshellinherited2" _, out, _, err := buildImageWithStdoutStderr(name2, `FROM `+name1+` RUN -l`, true) if err != nil { c.Fatal(err) } // ls -l has "total " followed by some number in it, ls without -l does not. if !strings.Contains(out, "total ") { c.Fatalf("Should have seen total in 'ls -l'.\n%s", out) } } // #22489 Shell test to confirm non-JSON doesn't work func (s *DockerSuite) TestBuildShellNotJSON(c *check.C) { name := "testbuildshellnotjson" _, err := buildImage(name, `FROM `+minimalBaseImage()+` sHeLl exec -form`, // Casing explicit to ensure error is upper-cased. true) if err == nil { c.Fatal("Image build should have failed") } if !strings.Contains(err.Error(), "SHELL requires the arguments to be in JSON form") { c.Fatal("Error didn't indicate that arguments must be in JSON form") } } // #22489 Windows shell test to confirm native is powershell if executing a PS command // This would error if the default shell were still cmd. func (s *DockerSuite) TestBuildShellWindowsPowershell(c *check.C) { testRequires(c, DaemonIsWindows) name := "testbuildshellpowershell" _, out, err := buildImageWithOut(name, `FROM `+minimalBaseImage()+` SHELL ["powershell", "-command"] RUN Write-Host John`, true) if err != nil { c.Fatal(err) } if !strings.Contains(out, "\nJohn\n") { c.Fatalf("Line with 'John' not found in output %q", out) } } // #22868. Make sure shell-form CMD is marked as escaped in the config of the image func (s *DockerSuite) TestBuildCmdShellArgsEscaped(c *check.C) { testRequires(c, DaemonIsWindows) name := "testbuildcmdshellescaped" _, err := buildImage(name, ` FROM `+minimalBaseImage()+` CMD "ipconfig" `, true) if err != nil { c.Fatal(err) } res := inspectFieldJSON(c, name, "Config.ArgsEscaped") if res != "true" { c.Fatalf("CMD did not update Config.ArgsEscaped on image: %v", res) } dockerCmd(c, "run", "--name", "inspectme", name) dockerCmd(c, "wait", "inspectme") res = inspectFieldJSON(c, name, "Config.Cmd") if res != `["cmd","/S","/C","\"ipconfig\""]` { c.Fatalf("CMD was not escaped Config.Cmd: got %v", res) } }
[ "\"PATH\"" ]
[]
[ "PATH" ]
[]
["PATH"]
go
1
0
djBackend/manage.py
#!/usr/bin/env python """Django's command-line utility for administrative tasks.""" import os import sys def main(): """Run administrative tasks.""" os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djBackend.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv) if __name__ == '__main__': main()
[]
[]
[]
[]
[]
python
0
0
mesonbuild/interpreter.py
# Copyright 2012-2019 The Meson development team # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from . import mparser from . import environment from . import coredata from . import dependencies from . import mlog from . import build from . import optinterpreter from . import compilers from .wrap import wrap, WrapMode from . import mesonlib from .mesonlib import FileMode, MachineChoice, Popen_safe, listify, extract_as_list, has_path_sep, unholder from .dependencies import ExternalProgram from .dependencies import InternalDependency, Dependency, NotFoundDependency, DependencyException from .depfile import DepFile from .interpreterbase import InterpreterBase from .interpreterbase import check_stringlist, flatten, noPosargs, noKwargs, stringArgs, permittedKwargs, noArgsFlattening from .interpreterbase import InterpreterException, InvalidArguments, InvalidCode, SubdirDoneRequest from .interpreterbase import InterpreterObject, MutableInterpreterObject, Disabler, disablerIfNotFound from .interpreterbase import FeatureNew, FeatureDeprecated, FeatureNewKwargs, FeatureDeprecatedKwargs from .interpreterbase import ObjectHolder, MesonVersionString from .interpreterbase import TYPE_var, TYPE_nkwargs from .modules import ModuleReturnValue, ExtensionModule from .cmake import CMakeInterpreter from .backend.backends import TestProtocol, Backend from ._pathlib import Path, PurePath import os import shutil import uuid import re import shlex import stat import subprocess import collections import functools import typing as T import importlib permitted_method_kwargs = { 'partial_dependency': {'compile_args', 'link_args', 'links', 'includes', 'sources'}, } def stringifyUserArguments(args): if isinstance(args, list): return '[%s]' % ', '.join([stringifyUserArguments(x) for x in args]) elif isinstance(args, dict): return '{%s}' % ', '.join(['%s : %s' % (stringifyUserArguments(k), stringifyUserArguments(v)) for k, v in args.items()]) elif isinstance(args, int): return str(args) elif isinstance(args, str): return "'%s'" % args raise InvalidArguments('Function accepts only strings, integers, lists and lists thereof.') class OverrideProgram(dependencies.ExternalProgram): pass class FeatureOptionHolder(InterpreterObject, ObjectHolder): def __init__(self, env, name, option): InterpreterObject.__init__(self) ObjectHolder.__init__(self, option) if option.is_auto(): self.held_object = env.coredata.builtins['auto_features'] self.name = name self.methods.update({'enabled': self.enabled_method, 'disabled': self.disabled_method, 'auto': self.auto_method, }) @noPosargs @permittedKwargs({}) def enabled_method(self, args, kwargs): return self.held_object.is_enabled() @noPosargs @permittedKwargs({}) def disabled_method(self, args, kwargs): return self.held_object.is_disabled() @noPosargs @permittedKwargs({}) def auto_method(self, args, kwargs): return self.held_object.is_auto() def extract_required_kwarg(kwargs, subproject, feature_check=None, default=True): val = kwargs.get('required', default) disabled = False required = False feature = None if isinstance(val, FeatureOptionHolder): if not feature_check: feature_check = FeatureNew('User option "feature"', '0.47.0') feature_check.use(subproject) option = val.held_object feature = val.name if option.is_disabled(): disabled = True elif option.is_enabled(): required = True elif isinstance(val, bool): required = val else: raise InterpreterException('required keyword argument must be boolean or a feature option') # Keep boolean value in kwargs to simplify other places where this kwarg is # checked. kwargs['required'] = required return disabled, required, feature def extract_search_dirs(kwargs): search_dirs = mesonlib.stringlistify(kwargs.get('dirs', [])) search_dirs = [Path(d).expanduser() for d in search_dirs] for d in search_dirs: if mesonlib.is_windows() and d.root.startswith('\\'): # a Unix-path starting with `/` that is not absolute on Windows. # discard without failing for end-user ease of cross-platform directory arrays continue if not d.is_absolute(): raise InvalidCode('Search directory {} is not an absolute path.'.format(d)) return list(map(str, search_dirs)) class TryRunResultHolder(InterpreterObject): def __init__(self, res): super().__init__() self.res = res self.methods.update({'returncode': self.returncode_method, 'compiled': self.compiled_method, 'stdout': self.stdout_method, 'stderr': self.stderr_method, }) @noPosargs @permittedKwargs({}) def returncode_method(self, args, kwargs): return self.res.returncode @noPosargs @permittedKwargs({}) def compiled_method(self, args, kwargs): return self.res.compiled @noPosargs @permittedKwargs({}) def stdout_method(self, args, kwargs): return self.res.stdout @noPosargs @permittedKwargs({}) def stderr_method(self, args, kwargs): return self.res.stderr class RunProcess(InterpreterObject): def __init__(self, cmd, args, env, source_dir, build_dir, subdir, mesonintrospect, in_builddir=False, check=False, capture=True): super().__init__() if not isinstance(cmd, ExternalProgram): raise AssertionError('BUG: RunProcess must be passed an ExternalProgram') self.capture = capture pc, self.stdout, self.stderr = self.run_command(cmd, args, env, source_dir, build_dir, subdir, mesonintrospect, in_builddir, check) self.returncode = pc.returncode self.methods.update({'returncode': self.returncode_method, 'stdout': self.stdout_method, 'stderr': self.stderr_method, }) def run_command(self, cmd, args, env, source_dir, build_dir, subdir, mesonintrospect, in_builddir, check=False): command_array = cmd.get_command() + args menv = {'MESON_SOURCE_ROOT': source_dir, 'MESON_BUILD_ROOT': build_dir, 'MESON_SUBDIR': subdir, 'MESONINTROSPECT': ' '.join([shlex.quote(x) for x in mesonintrospect]), } if in_builddir: cwd = os.path.join(build_dir, subdir) else: cwd = os.path.join(source_dir, subdir) child_env = os.environ.copy() child_env.update(menv) child_env = env.get_env(child_env) stdout = subprocess.PIPE if self.capture else subprocess.DEVNULL mlog.debug('Running command:', ' '.join(command_array)) try: p, o, e = Popen_safe(command_array, stdout=stdout, env=child_env, cwd=cwd) if self.capture: mlog.debug('--- stdout ---') mlog.debug(o) else: o = '' mlog.debug('--- stdout disabled ---') mlog.debug('--- stderr ---') mlog.debug(e) mlog.debug('') if check and p.returncode != 0: raise InterpreterException('Command "{}" failed with status {}.'.format(' '.join(command_array), p.returncode)) return p, o, e except FileNotFoundError: raise InterpreterException('Could not execute command "%s".' % ' '.join(command_array)) @noPosargs @permittedKwargs({}) def returncode_method(self, args, kwargs): return self.returncode @noPosargs @permittedKwargs({}) def stdout_method(self, args, kwargs): return self.stdout @noPosargs @permittedKwargs({}) def stderr_method(self, args, kwargs): return self.stderr class ConfigureFileHolder(InterpreterObject, ObjectHolder): def __init__(self, subdir, sourcename, targetname, configuration_data): InterpreterObject.__init__(self) obj = build.ConfigureFile(subdir, sourcename, targetname, configuration_data) ObjectHolder.__init__(self, obj) class EnvironmentVariablesHolder(MutableInterpreterObject, ObjectHolder): def __init__(self, initial_values=None): MutableInterpreterObject.__init__(self) ObjectHolder.__init__(self, build.EnvironmentVariables()) self.methods.update({'set': self.set_method, 'append': self.append_method, 'prepend': self.prepend_method, }) if isinstance(initial_values, dict): for k, v in initial_values.items(): self.set_method([k, v], {}) elif isinstance(initial_values, list): for e in initial_values: if '=' not in e: raise InterpreterException('Env var definition must be of type key=val.') (k, val) = e.split('=', 1) k = k.strip() val = val.strip() if ' ' in k: raise InterpreterException('Env var key must not have spaces in it.') self.set_method([k, val], {}) elif initial_values: raise AssertionError('Unsupported EnvironmentVariablesHolder initial_values') def __repr__(self): repr_str = "<{0}: {1}>" return repr_str.format(self.__class__.__name__, self.held_object.envvars) def add_var(self, method, args, kwargs): if not isinstance(kwargs.get("separator", ""), str): raise InterpreterException("EnvironmentVariablesHolder methods 'separator'" " argument needs to be a string.") if len(args) < 2: raise InterpreterException("EnvironmentVariablesHolder methods require at least" "2 arguments, first is the name of the variable and" " following one are values") # Warn when someone tries to use append() or prepend() on an env var # which already has an operation set on it. People seem to think that # multiple append/prepend operations stack, but they don't. if method != self.held_object.set and self.held_object.has_name(args[0]): mlog.warning('Overriding previous value of environment variable {!r} with a new one' .format(args[0]), location=self.current_node) self.held_object.add_var(method, args[0], args[1:], kwargs) @stringArgs @permittedKwargs({'separator'}) def set_method(self, args, kwargs): self.add_var(self.held_object.set, args, kwargs) @stringArgs @permittedKwargs({'separator'}) def append_method(self, args, kwargs): self.add_var(self.held_object.append, args, kwargs) @stringArgs @permittedKwargs({'separator'}) def prepend_method(self, args, kwargs): self.add_var(self.held_object.prepend, args, kwargs) class ConfigurationDataHolder(MutableInterpreterObject, ObjectHolder): def __init__(self, pv, initial_values=None): MutableInterpreterObject.__init__(self) self.used = False # These objects become immutable after use in configure_file. ObjectHolder.__init__(self, build.ConfigurationData(), pv) self.methods.update({'set': self.set_method, 'set10': self.set10_method, 'set_quoted': self.set_quoted_method, 'has': self.has_method, 'get': self.get_method, 'get_unquoted': self.get_unquoted_method, 'merge_from': self.merge_from_method, }) if isinstance(initial_values, dict): for k, v in initial_values.items(): self.set_method([k, v], {}) elif initial_values: raise AssertionError('Unsupported ConfigurationDataHolder initial_values') def is_used(self): return self.used def mark_used(self): self.used = True def validate_args(self, args, kwargs): if len(args) == 1 and isinstance(args[0], list) and len(args[0]) == 2: mlog.deprecation('Passing a list as the single argument to ' 'configuration_data.set is deprecated. This will ' 'become a hard error in the future.', location=self.current_node) args = args[0] if len(args) != 2: raise InterpreterException("Configuration set requires 2 arguments.") if self.used: raise InterpreterException("Can not set values on configuration object that has been used.") name, val = args if not isinstance(val, (int, str)): msg = 'Setting a configuration data value to {!r} is invalid, ' \ 'and will fail at configure_file(). If you are using it ' \ 'just to store some values, please use a dict instead.' mlog.deprecation(msg.format(val), location=self.current_node) desc = kwargs.get('description', None) if not isinstance(name, str): raise InterpreterException("First argument to set must be a string.") if desc is not None and not isinstance(desc, str): raise InterpreterException('Description must be a string.') return name, val, desc @noArgsFlattening def set_method(self, args, kwargs): (name, val, desc) = self.validate_args(args, kwargs) self.held_object.values[name] = (val, desc) def set_quoted_method(self, args, kwargs): (name, val, desc) = self.validate_args(args, kwargs) if not isinstance(val, str): raise InterpreterException("Second argument to set_quoted must be a string.") escaped_val = '\\"'.join(val.split('"')) self.held_object.values[name] = ('"' + escaped_val + '"', desc) def set10_method(self, args, kwargs): (name, val, desc) = self.validate_args(args, kwargs) if val: self.held_object.values[name] = (1, desc) else: self.held_object.values[name] = (0, desc) def has_method(self, args, kwargs): return args[0] in self.held_object.values @FeatureNew('configuration_data.get()', '0.38.0') @noArgsFlattening def get_method(self, args, kwargs): if len(args) < 1 or len(args) > 2: raise InterpreterException('Get method takes one or two arguments.') name = args[0] if name in self.held_object: return self.held_object.get(name)[0] if len(args) > 1: return args[1] raise InterpreterException('Entry %s not in configuration data.' % name) @FeatureNew('configuration_data.get_unquoted()', '0.44.0') def get_unquoted_method(self, args, kwargs): if len(args) < 1 or len(args) > 2: raise InterpreterException('Get method takes one or two arguments.') name = args[0] if name in self.held_object: val = self.held_object.get(name)[0] elif len(args) > 1: val = args[1] else: raise InterpreterException('Entry %s not in configuration data.' % name) if val[0] == '"' and val[-1] == '"': return val[1:-1] return val def get(self, name): return self.held_object.values[name] # (val, desc) def keys(self): return self.held_object.values.keys() def merge_from_method(self, args, kwargs): if len(args) != 1: raise InterpreterException('Merge_from takes one positional argument.') from_object = args[0] if not isinstance(from_object, ConfigurationDataHolder): raise InterpreterException('Merge_from argument must be a configuration data object.') from_object = from_object.held_object for k, v in from_object.values.items(): self.held_object.values[k] = v # Interpreter objects can not be pickled so we must have # these wrappers. class DependencyHolder(InterpreterObject, ObjectHolder): def __init__(self, dep, pv): InterpreterObject.__init__(self) ObjectHolder.__init__(self, dep, pv) self.methods.update({'found': self.found_method, 'type_name': self.type_name_method, 'version': self.version_method, 'name': self.name_method, 'get_pkgconfig_variable': self.pkgconfig_method, 'get_configtool_variable': self.configtool_method, 'get_variable': self.variable_method, 'partial_dependency': self.partial_dependency_method, 'include_type': self.include_type_method, 'as_system': self.as_system_method, 'as_link_whole': self.as_link_whole_method, }) def found(self): return self.found_method([], {}) @noPosargs @permittedKwargs({}) def type_name_method(self, args, kwargs): return self.held_object.type_name @noPosargs @permittedKwargs({}) def found_method(self, args, kwargs): if self.held_object.type_name == 'internal': return True return self.held_object.found() @noPosargs @permittedKwargs({}) def version_method(self, args, kwargs): return self.held_object.get_version() @noPosargs @permittedKwargs({}) def name_method(self, args, kwargs): return self.held_object.get_name() @FeatureDeprecated('Dependency.get_pkgconfig_variable', '0.56.0', 'use Dependency.get_variable(pkgconfig : ...) instead') @permittedKwargs({'define_variable', 'default'}) def pkgconfig_method(self, args, kwargs): args = listify(args) if len(args) != 1: raise InterpreterException('get_pkgconfig_variable takes exactly one argument.') varname = args[0] if not isinstance(varname, str): raise InterpreterException('Variable name must be a string.') return self.held_object.get_pkgconfig_variable(varname, kwargs) @FeatureNew('dep.get_configtool_variable', '0.44.0') @FeatureDeprecated('Dependency.get_configtool_variable', '0.56.0', 'use Dependency.get_variable(configtool : ...) instead') @permittedKwargs({}) def configtool_method(self, args, kwargs): args = listify(args) if len(args) != 1: raise InterpreterException('get_configtool_variable takes exactly one argument.') varname = args[0] if not isinstance(varname, str): raise InterpreterException('Variable name must be a string.') return self.held_object.get_configtool_variable(varname) @FeatureNew('dep.partial_dependency', '0.46.0') @noPosargs @permittedKwargs(permitted_method_kwargs['partial_dependency']) def partial_dependency_method(self, args, kwargs): pdep = self.held_object.get_partial_dependency(**kwargs) return DependencyHolder(pdep, self.subproject) @FeatureNew('dep.get_variable', '0.51.0') @noPosargs @permittedKwargs({'cmake', 'pkgconfig', 'configtool', 'internal', 'default_value', 'pkgconfig_define'}) @FeatureNewKwargs('dep.get_variable', '0.54.0', ['internal']) def variable_method(self, args, kwargs): return self.held_object.get_variable(**kwargs) @FeatureNew('dep.include_type', '0.52.0') @noPosargs @permittedKwargs({}) def include_type_method(self, args, kwargs): return self.held_object.get_include_type() @FeatureNew('dep.as_system', '0.52.0') @permittedKwargs({}) def as_system_method(self, args, kwargs): args = listify(args) new_is_system = 'system' if len(args) > 1: raise InterpreterException('as_system takes only one optional value') if len(args) == 1: new_is_system = args[0] new_dep = self.held_object.generate_system_dependency(new_is_system) return DependencyHolder(new_dep, self.subproject) @FeatureNew('dep.as_link_whole', '0.56.0') @permittedKwargs({}) @noPosargs def as_link_whole_method(self, args, kwargs): if not isinstance(self.held_object, InternalDependency): raise InterpreterException('as_link_whole method is only supported on declare_dependency() objects') new_dep = self.held_object.generate_link_whole_dependency() return DependencyHolder(new_dep, self.subproject) class ExternalProgramHolder(InterpreterObject, ObjectHolder): def __init__(self, ep, subproject, backend=None): InterpreterObject.__init__(self) ObjectHolder.__init__(self, ep) self.subproject = subproject self.backend = backend self.methods.update({'found': self.found_method, 'path': self.path_method, 'full_path': self.full_path_method}) self.cached_version = None @noPosargs @permittedKwargs({}) def found_method(self, args, kwargs): return self.found() @noPosargs @permittedKwargs({}) @FeatureDeprecated('ExternalProgram.path', '0.55.0', 'use ExternalProgram.full_path() instead') def path_method(self, args, kwargs): return self._full_path() @noPosargs @permittedKwargs({}) @FeatureNew('ExternalProgram.full_path', '0.55.0') def full_path_method(self, args, kwargs): return self._full_path() def _full_path(self): exe = self.held_object if isinstance(exe, build.Executable): return self.backend.get_target_filename_abs(exe) return exe.get_path() def found(self): return isinstance(self.held_object, build.Executable) or self.held_object.found() def get_command(self): return self.held_object.get_command() def get_name(self): exe = self.held_object if isinstance(exe, build.Executable): return exe.name return exe.get_name() def get_version(self, interpreter): if isinstance(self.held_object, build.Executable): return self.held_object.project_version if not self.cached_version: raw_cmd = self.get_command() + ['--version'] cmd = [self, '--version'] res = interpreter.run_command_impl(interpreter.current_node, cmd, {}, True) if res.returncode != 0: m = 'Running {!r} failed' raise InterpreterException(m.format(raw_cmd)) output = res.stdout.strip() if not output: output = res.stderr.strip() match = re.search(r'([0-9][0-9\.]+)', output) if not match: m = 'Could not find a version number in output of {!r}' raise InterpreterException(m.format(raw_cmd)) self.cached_version = match.group(1) return self.cached_version class ExternalLibraryHolder(InterpreterObject, ObjectHolder): def __init__(self, el, pv): InterpreterObject.__init__(self) ObjectHolder.__init__(self, el, pv) self.methods.update({'found': self.found_method, 'type_name': self.type_name_method, 'partial_dependency': self.partial_dependency_method, }) def found(self): return self.held_object.found() @noPosargs @permittedKwargs({}) def type_name_method(self, args, kwargs): return self.held_object.type_name @noPosargs @permittedKwargs({}) def found_method(self, args, kwargs): return self.found() def get_name(self): return self.held_object.name def get_compile_args(self): return self.held_object.get_compile_args() def get_link_args(self): return self.held_object.get_link_args() def get_exe_args(self): return self.held_object.get_exe_args() @FeatureNew('dep.partial_dependency', '0.46.0') @noPosargs @permittedKwargs(permitted_method_kwargs['partial_dependency']) def partial_dependency_method(self, args, kwargs): pdep = self.held_object.get_partial_dependency(**kwargs) return DependencyHolder(pdep, self.subproject) class GeneratorHolder(InterpreterObject, ObjectHolder): @FeatureNewKwargs('generator', '0.43.0', ['capture']) def __init__(self, interp, args, kwargs): self.interpreter = interp InterpreterObject.__init__(self) ObjectHolder.__init__(self, build.Generator(args, kwargs), interp.subproject) self.methods.update({'process': self.process_method}) @FeatureNewKwargs('generator.process', '0.45.0', ['preserve_path_from']) @permittedKwargs({'extra_args', 'preserve_path_from'}) def process_method(self, args, kwargs): extras = mesonlib.stringlistify(kwargs.get('extra_args', [])) if 'preserve_path_from' in kwargs: preserve_path_from = kwargs['preserve_path_from'] if not isinstance(preserve_path_from, str): raise InvalidArguments('Preserve_path_from must be a string.') preserve_path_from = os.path.normpath(preserve_path_from) if not os.path.isabs(preserve_path_from): # This is a bit of a hack. Fix properly before merging. raise InvalidArguments('Preserve_path_from must be an absolute path for now. Sorry.') else: preserve_path_from = None gl = self.held_object.process_files('Generator', args, self.interpreter, preserve_path_from, extra_args=extras) return GeneratedListHolder(gl) class GeneratedListHolder(InterpreterObject, ObjectHolder): def __init__(self, arg1, extra_args=None): InterpreterObject.__init__(self) if isinstance(arg1, GeneratorHolder): ObjectHolder.__init__(self, build.GeneratedList(arg1.held_object, extra_args if extra_args is not None else [])) else: ObjectHolder.__init__(self, arg1) def __repr__(self): r = '<{}: {!r}>' return r.format(self.__class__.__name__, self.held_object.get_outputs()) def add_file(self, a): self.held_object.add_file(a) # A machine that's statically known from the cross file class MachineHolder(InterpreterObject, ObjectHolder): def __init__(self, machine_info): InterpreterObject.__init__(self) ObjectHolder.__init__(self, machine_info) self.methods.update({'system': self.system_method, 'cpu': self.cpu_method, 'cpu_family': self.cpu_family_method, 'endian': self.endian_method, }) @noPosargs @permittedKwargs({}) def cpu_family_method(self, args: T.List[TYPE_var], kwargs: TYPE_nkwargs) -> str: return self.held_object.cpu_family @noPosargs @permittedKwargs({}) def cpu_method(self, args: T.List[TYPE_var], kwargs: TYPE_nkwargs) -> str: return self.held_object.cpu @noPosargs @permittedKwargs({}) def system_method(self, args: T.List[TYPE_var], kwargs: TYPE_nkwargs) -> str: return self.held_object.system @noPosargs @permittedKwargs({}) def endian_method(self, args: T.List[TYPE_var], kwargs: TYPE_nkwargs) -> str: return self.held_object.endian class IncludeDirsHolder(InterpreterObject, ObjectHolder): def __init__(self, idobj): InterpreterObject.__init__(self) ObjectHolder.__init__(self, idobj) class Headers(InterpreterObject): def __init__(self, sources, kwargs): InterpreterObject.__init__(self) self.sources = sources self.install_subdir = kwargs.get('subdir', '') if os.path.isabs(self.install_subdir): mlog.deprecation('Subdir keyword must not be an absolute path. This will be a hard error in the next release.') self.custom_install_dir = kwargs.get('install_dir', None) self.custom_install_mode = kwargs.get('install_mode', None) if self.custom_install_dir is not None: if not isinstance(self.custom_install_dir, str): raise InterpreterException('Custom_install_dir must be a string.') def set_install_subdir(self, subdir): self.install_subdir = subdir def get_install_subdir(self): return self.install_subdir def get_sources(self): return self.sources def get_custom_install_dir(self): return self.custom_install_dir def get_custom_install_mode(self): return self.custom_install_mode class DataHolder(InterpreterObject, ObjectHolder): def __init__(self, data): InterpreterObject.__init__(self) ObjectHolder.__init__(self, data) def get_source_subdir(self): return self.held_object.source_subdir def get_sources(self): return self.held_object.sources def get_install_dir(self): return self.held_object.install_dir class InstallDir(InterpreterObject): def __init__(self, src_subdir, inst_subdir, install_dir, install_mode, exclude, strip_directory, from_source_dir=True): InterpreterObject.__init__(self) self.source_subdir = src_subdir self.installable_subdir = inst_subdir self.install_dir = install_dir self.install_mode = install_mode self.exclude = exclude self.strip_directory = strip_directory self.from_source_dir = from_source_dir class Man(InterpreterObject): def __init__(self, sources, kwargs): InterpreterObject.__init__(self) self.sources = sources self.validate_sources() self.custom_install_dir = kwargs.get('install_dir', None) self.custom_install_mode = kwargs.get('install_mode', None) if self.custom_install_dir is not None and not isinstance(self.custom_install_dir, str): raise InterpreterException('Custom_install_dir must be a string.') def validate_sources(self): for s in self.sources: try: num = int(s.split('.')[-1]) except (IndexError, ValueError): num = 0 if num < 1 or num > 8: raise InvalidArguments('Man file must have a file extension of a number between 1 and 8') def get_custom_install_dir(self): return self.custom_install_dir def get_custom_install_mode(self): return self.custom_install_mode def get_sources(self): return self.sources class GeneratedObjectsHolder(InterpreterObject, ObjectHolder): def __init__(self, held_object): InterpreterObject.__init__(self) ObjectHolder.__init__(self, held_object) class TargetHolder(InterpreterObject, ObjectHolder): def __init__(self, target, interp): InterpreterObject.__init__(self) ObjectHolder.__init__(self, target, interp.subproject) self.interpreter = interp class BuildTargetHolder(TargetHolder): def __init__(self, target, interp): super().__init__(target, interp) self.methods.update({'extract_objects': self.extract_objects_method, 'extract_all_objects': self.extract_all_objects_method, 'name': self.name_method, 'get_id': self.get_id_method, 'outdir': self.outdir_method, 'full_path': self.full_path_method, 'private_dir_include': self.private_dir_include_method, }) def __repr__(self): r = '<{} {}: {}>' h = self.held_object return r.format(self.__class__.__name__, h.get_id(), h.filename) def is_cross(self): return not self.held_object.environment.machines.matches_build_machine(self.held_object.for_machine) @noPosargs @permittedKwargs({}) def private_dir_include_method(self, args, kwargs): return IncludeDirsHolder(build.IncludeDirs('', [], False, [self.interpreter.backend.get_target_private_dir(self.held_object)])) @noPosargs @permittedKwargs({}) def full_path_method(self, args, kwargs): return self.interpreter.backend.get_target_filename_abs(self.held_object) @noPosargs @permittedKwargs({}) def outdir_method(self, args, kwargs): return self.interpreter.backend.get_target_dir(self.held_object) @permittedKwargs({}) def extract_objects_method(self, args, kwargs): gobjs = self.held_object.extract_objects(args) return GeneratedObjectsHolder(gobjs) @FeatureNewKwargs('extract_all_objects', '0.46.0', ['recursive']) @noPosargs @permittedKwargs({'recursive'}) def extract_all_objects_method(self, args, kwargs): recursive = kwargs.get('recursive', False) gobjs = self.held_object.extract_all_objects(recursive) if gobjs.objlist and 'recursive' not in kwargs: mlog.warning('extract_all_objects called without setting recursive ' 'keyword argument. Meson currently defaults to ' 'non-recursive to maintain backward compatibility but ' 'the default will be changed in the future.', location=self.current_node) return GeneratedObjectsHolder(gobjs) @noPosargs @permittedKwargs({}) def get_id_method(self, args, kwargs): return self.held_object.get_id() @FeatureNew('name', '0.54.0') @noPosargs @permittedKwargs({}) def name_method(self, args, kwargs): return self.held_object.name class ExecutableHolder(BuildTargetHolder): def __init__(self, target, interp): super().__init__(target, interp) class StaticLibraryHolder(BuildTargetHolder): def __init__(self, target, interp): super().__init__(target, interp) class SharedLibraryHolder(BuildTargetHolder): def __init__(self, target, interp): super().__init__(target, interp) # Set to True only when called from self.func_shared_lib(). target.shared_library_only = False class BothLibrariesHolder(BuildTargetHolder): def __init__(self, shared_holder, static_holder, interp): # FIXME: This build target always represents the shared library, but # that should be configurable. super().__init__(shared_holder.held_object, interp) self.shared_holder = shared_holder self.static_holder = static_holder self.methods.update({'get_shared_lib': self.get_shared_lib_method, 'get_static_lib': self.get_static_lib_method, }) def __repr__(self): r = '<{} {}: {}, {}: {}>' h1 = self.shared_holder.held_object h2 = self.static_holder.held_object return r.format(self.__class__.__name__, h1.get_id(), h1.filename, h2.get_id(), h2.filename) @noPosargs @permittedKwargs({}) def get_shared_lib_method(self, args, kwargs): return self.shared_holder @noPosargs @permittedKwargs({}) def get_static_lib_method(self, args, kwargs): return self.static_holder class SharedModuleHolder(BuildTargetHolder): def __init__(self, target, interp): super().__init__(target, interp) class JarHolder(BuildTargetHolder): def __init__(self, target, interp): super().__init__(target, interp) class CustomTargetIndexHolder(TargetHolder): def __init__(self, target, interp): super().__init__(target, interp) self.methods.update({'full_path': self.full_path_method, }) @FeatureNew('custom_target[i].full_path', '0.54.0') @noPosargs @permittedKwargs({}) def full_path_method(self, args, kwargs): return self.interpreter.backend.get_target_filename_abs(self.held_object) class CustomTargetHolder(TargetHolder): def __init__(self, target, interp): super().__init__(target, interp) self.methods.update({'full_path': self.full_path_method, 'to_list': self.to_list_method, }) def __repr__(self): r = '<{} {}: {}>' h = self.held_object return r.format(self.__class__.__name__, h.get_id(), h.command) @noPosargs @permittedKwargs({}) def full_path_method(self, args, kwargs): return self.interpreter.backend.get_target_filename_abs(self.held_object) @FeatureNew('custom_target.to_list', '0.54.0') @noPosargs @permittedKwargs({}) def to_list_method(self, args, kwargs): result = [] for i in self.held_object: result.append(CustomTargetIndexHolder(i, self.interpreter)) return result def __getitem__(self, index): return CustomTargetIndexHolder(self.held_object[index], self.interpreter) def __setitem__(self, index, value): # lgtm[py/unexpected-raise-in-special-method] raise InterpreterException('Cannot set a member of a CustomTarget') def __delitem__(self, index): # lgtm[py/unexpected-raise-in-special-method] raise InterpreterException('Cannot delete a member of a CustomTarget') def outdir_include(self): return IncludeDirsHolder(build.IncludeDirs('', [], False, [os.path.join('@BUILD_ROOT@', self.interpreter.backend.get_target_dir(self.held_object))])) class RunTargetHolder(TargetHolder): def __init__(self, target, interp): super().__init__(target, interp) def __repr__(self): r = '<{} {}: {}>' h = self.held_object return r.format(self.__class__.__name__, h.get_id(), h.command) class Test(InterpreterObject): def __init__(self, name: str, project: str, suite: T.List[str], exe: build.Executable, depends: T.List[T.Union[build.CustomTarget, build.BuildTarget]], is_parallel: bool, cmd_args: T.List[str], env: build.EnvironmentVariables, should_fail: bool, timeout: int, workdir: T.Optional[str], protocol: str, priority: int): InterpreterObject.__init__(self) self.name = name self.suite = suite self.project_name = project self.exe = exe self.depends = depends self.is_parallel = is_parallel self.cmd_args = cmd_args self.env = env self.should_fail = should_fail self.timeout = timeout self.workdir = workdir self.protocol = TestProtocol.from_str(protocol) self.priority = priority def get_exe(self): return self.exe def get_name(self): return self.name class SubprojectHolder(InterpreterObject, ObjectHolder): def __init__(self, subinterpreter, subdir, warnings=0, disabled_feature=None, exception=None): InterpreterObject.__init__(self) ObjectHolder.__init__(self, subinterpreter) self.warnings = warnings self.disabled_feature = disabled_feature self.exception = exception self.subdir = PurePath(subdir).as_posix() self.methods.update({'get_variable': self.get_variable_method, 'found': self.found_method, }) @noPosargs @permittedKwargs({}) def found_method(self, args, kwargs): return self.found() def found(self): return self.held_object is not None @permittedKwargs({}) @noArgsFlattening def get_variable_method(self, args, kwargs): if len(args) < 1 or len(args) > 2: raise InterpreterException('Get_variable takes one or two arguments.') if not self.found(): raise InterpreterException('Subproject "%s" disabled can\'t get_variable on it.' % (self.subdir)) varname = args[0] if not isinstance(varname, str): raise InterpreterException('Get_variable first argument must be a string.') try: return self.held_object.variables[varname] except KeyError: pass if len(args) == 2: return args[1] raise InvalidArguments('Requested variable "{0}" not found.'.format(varname)) header_permitted_kwargs = set([ 'required', 'prefix', 'no_builtin_args', 'include_directories', 'args', 'dependencies', ]) find_library_permitted_kwargs = set([ 'has_headers', 'required', 'dirs', 'static', ]) find_library_permitted_kwargs |= set(['header_' + k for k in header_permitted_kwargs]) class CompilerHolder(InterpreterObject): def __init__(self, compiler, env, subproject): InterpreterObject.__init__(self) self.compiler = compiler self.environment = env self.subproject = subproject self.methods.update({'compiles': self.compiles_method, 'links': self.links_method, 'get_id': self.get_id_method, 'get_linker_id': self.get_linker_id_method, 'compute_int': self.compute_int_method, 'sizeof': self.sizeof_method, 'get_define': self.get_define_method, 'check_header': self.check_header_method, 'has_header': self.has_header_method, 'has_header_symbol': self.has_header_symbol_method, 'run': self.run_method, 'has_function': self.has_function_method, 'has_member': self.has_member_method, 'has_members': self.has_members_method, 'has_type': self.has_type_method, 'alignment': self.alignment_method, 'version': self.version_method, 'cmd_array': self.cmd_array_method, 'find_library': self.find_library_method, 'has_argument': self.has_argument_method, 'has_function_attribute': self.has_func_attribute_method, 'get_supported_function_attributes': self.get_supported_function_attributes_method, 'has_multi_arguments': self.has_multi_arguments_method, 'get_supported_arguments': self.get_supported_arguments_method, 'first_supported_argument': self.first_supported_argument_method, 'has_link_argument': self.has_link_argument_method, 'has_multi_link_arguments': self.has_multi_link_arguments_method, 'get_supported_link_arguments': self.get_supported_link_arguments_method, 'first_supported_link_argument': self.first_supported_link_argument_method, 'unittest_args': self.unittest_args_method, 'symbols_have_underscore_prefix': self.symbols_have_underscore_prefix_method, 'get_argument_syntax': self.get_argument_syntax_method, }) def _dep_msg(self, deps, endl): msg_single = 'with dependency {}' msg_many = 'with dependencies {}' if not deps: return endl if endl is None: endl = '' tpl = msg_many if len(deps) > 1 else msg_single names = [] for d in deps: if isinstance(d, dependencies.ExternalLibrary): name = '-l' + d.name else: name = d.name names.append(name) return tpl.format(', '.join(names)) + endl @noPosargs @permittedKwargs({}) def version_method(self, args, kwargs): return self.compiler.version @noPosargs @permittedKwargs({}) def cmd_array_method(self, args, kwargs): return self.compiler.exelist def determine_args(self, kwargs, mode='link'): nobuiltins = kwargs.get('no_builtin_args', False) if not isinstance(nobuiltins, bool): raise InterpreterException('Type of no_builtin_args not a boolean.') args = [] incdirs = extract_as_list(kwargs, 'include_directories') for i in incdirs: if not isinstance(i, IncludeDirsHolder): raise InterpreterException('Include directories argument must be an include_directories object.') for idir in i.held_object.get_incdirs(): idir = os.path.join(self.environment.get_source_dir(), i.held_object.get_curdir(), idir) args += self.compiler.get_include_args(idir, False) if not nobuiltins: for_machine = Interpreter.machine_from_native_kwarg(kwargs) opts = self.environment.coredata.compiler_options[for_machine][self.compiler.language] args += self.compiler.get_option_compile_args(opts) if mode == 'link': args += self.compiler.get_option_link_args(opts) args += mesonlib.stringlistify(kwargs.get('args', [])) return args def determine_dependencies(self, kwargs, endl=':'): deps = kwargs.get('dependencies', None) if deps is not None: deps = listify(deps) final_deps = [] for d in deps: try: d = d.held_object except Exception: pass if isinstance(d, InternalDependency) or not isinstance(d, Dependency): raise InterpreterException('Dependencies must be external dependencies') final_deps.append(d) deps = final_deps return deps, self._dep_msg(deps, endl) @permittedKwargs({ 'prefix', 'args', 'dependencies', }) def alignment_method(self, args, kwargs): if len(args) != 1: raise InterpreterException('Alignment method takes exactly one positional argument.') check_stringlist(args) typename = args[0] prefix = kwargs.get('prefix', '') if not isinstance(prefix, str): raise InterpreterException('Prefix argument of alignment must be a string.') extra_args = mesonlib.stringlistify(kwargs.get('args', [])) deps, msg = self.determine_dependencies(kwargs) result = self.compiler.alignment(typename, prefix, self.environment, extra_args=extra_args, dependencies=deps) mlog.log('Checking for alignment of', mlog.bold(typename, True), msg, result) return result @permittedKwargs({ 'name', 'no_builtin_args', 'include_directories', 'args', 'dependencies', }) def run_method(self, args, kwargs): if len(args) != 1: raise InterpreterException('Run method takes exactly one positional argument.') code = args[0] if isinstance(code, mesonlib.File): code = mesonlib.File.from_absolute_file( code.rel_to_builddir(self.environment.source_dir)) elif not isinstance(code, str): raise InvalidArguments('Argument must be string or file.') testname = kwargs.get('name', '') if not isinstance(testname, str): raise InterpreterException('Testname argument must be a string.') extra_args = functools.partial(self.determine_args, kwargs) deps, msg = self.determine_dependencies(kwargs, endl=None) result = self.compiler.run(code, self.environment, extra_args=extra_args, dependencies=deps) if len(testname) > 0: if not result.compiled: h = mlog.red('DID NOT COMPILE') elif result.returncode == 0: h = mlog.green('YES') else: h = mlog.red('NO (%d)' % result.returncode) mlog.log('Checking if', mlog.bold(testname, True), msg, 'runs:', h) return TryRunResultHolder(result) @noPosargs @permittedKwargs({}) def get_id_method(self, args, kwargs): return self.compiler.get_id() @noPosargs @permittedKwargs({}) @FeatureNew('compiler.get_linker_id', '0.53.0') def get_linker_id_method(self, args, kwargs): return self.compiler.get_linker_id() @noPosargs @permittedKwargs({}) def symbols_have_underscore_prefix_method(self, args, kwargs): ''' Check if the compiler prefixes _ (underscore) to global C symbols See: https://en.wikipedia.org/wiki/Name_mangling#C ''' return self.compiler.symbols_have_underscore_prefix(self.environment) @noPosargs @permittedKwargs({}) def unittest_args_method(self, args, kwargs): ''' This function is deprecated and should not be used. It can be removed in a future version of Meson. ''' if not hasattr(self.compiler, 'get_feature_args'): raise InterpreterException('This {} compiler has no feature arguments.'.format(self.compiler.get_display_language())) build_to_src = os.path.relpath(self.environment.get_source_dir(), self.environment.get_build_dir()) return self.compiler.get_feature_args({'unittest': 'true'}, build_to_src) @permittedKwargs({ 'prefix', 'no_builtin_args', 'include_directories', 'args', 'dependencies', }) def has_member_method(self, args, kwargs): if len(args) != 2: raise InterpreterException('Has_member takes exactly two arguments.') check_stringlist(args) typename, membername = args prefix = kwargs.get('prefix', '') if not isinstance(prefix, str): raise InterpreterException('Prefix argument of has_member must be a string.') extra_args = functools.partial(self.determine_args, kwargs) deps, msg = self.determine_dependencies(kwargs) had, cached = self.compiler.has_members(typename, [membername], prefix, self.environment, extra_args=extra_args, dependencies=deps) cached = mlog.blue('(cached)') if cached else '' if had: hadtxt = mlog.green('YES') else: hadtxt = mlog.red('NO') mlog.log('Checking whether type', mlog.bold(typename, True), 'has member', mlog.bold(membername, True), msg, hadtxt, cached) return had @permittedKwargs({ 'prefix', 'no_builtin_args', 'include_directories', 'args', 'dependencies', }) def has_members_method(self, args, kwargs): if len(args) < 2: raise InterpreterException('Has_members needs at least two arguments.') check_stringlist(args) typename, *membernames = args prefix = kwargs.get('prefix', '') if not isinstance(prefix, str): raise InterpreterException('Prefix argument of has_members must be a string.') extra_args = functools.partial(self.determine_args, kwargs) deps, msg = self.determine_dependencies(kwargs) had, cached = self.compiler.has_members(typename, membernames, prefix, self.environment, extra_args=extra_args, dependencies=deps) cached = mlog.blue('(cached)') if cached else '' if had: hadtxt = mlog.green('YES') else: hadtxt = mlog.red('NO') members = mlog.bold(', '.join(['"{}"'.format(m) for m in membernames])) mlog.log('Checking whether type', mlog.bold(typename, True), 'has members', members, msg, hadtxt, cached) return had @permittedKwargs({ 'prefix', 'no_builtin_args', 'include_directories', 'args', 'dependencies', }) def has_function_method(self, args, kwargs): if len(args) != 1: raise InterpreterException('Has_function takes exactly one argument.') check_stringlist(args) funcname = args[0] prefix = kwargs.get('prefix', '') if not isinstance(prefix, str): raise InterpreterException('Prefix argument of has_function must be a string.') extra_args = self.determine_args(kwargs) deps, msg = self.determine_dependencies(kwargs) had, cached = self.compiler.has_function(funcname, prefix, self.environment, extra_args=extra_args, dependencies=deps) cached = mlog.blue('(cached)') if cached else '' if had: hadtxt = mlog.green('YES') else: hadtxt = mlog.red('NO') mlog.log('Checking for function', mlog.bold(funcname, True), msg, hadtxt, cached) return had @permittedKwargs({ 'prefix', 'no_builtin_args', 'include_directories', 'args', 'dependencies', }) def has_type_method(self, args, kwargs): if len(args) != 1: raise InterpreterException('Has_type takes exactly one argument.') check_stringlist(args) typename = args[0] prefix = kwargs.get('prefix', '') if not isinstance(prefix, str): raise InterpreterException('Prefix argument of has_type must be a string.') extra_args = functools.partial(self.determine_args, kwargs) deps, msg = self.determine_dependencies(kwargs) had, cached = self.compiler.has_type(typename, prefix, self.environment, extra_args=extra_args, dependencies=deps) cached = mlog.blue('(cached)') if cached else '' if had: hadtxt = mlog.green('YES') else: hadtxt = mlog.red('NO') mlog.log('Checking for type', mlog.bold(typename, True), msg, hadtxt, cached) return had @FeatureNew('compiler.compute_int', '0.40.0') @permittedKwargs({ 'prefix', 'low', 'high', 'guess', 'no_builtin_args', 'include_directories', 'args', 'dependencies', }) def compute_int_method(self, args, kwargs): if len(args) != 1: raise InterpreterException('Compute_int takes exactly one argument.') check_stringlist(args) expression = args[0] prefix = kwargs.get('prefix', '') low = kwargs.get('low', None) high = kwargs.get('high', None) guess = kwargs.get('guess', None) if not isinstance(prefix, str): raise InterpreterException('Prefix argument of compute_int must be a string.') if low is not None and not isinstance(low, int): raise InterpreterException('Low argument of compute_int must be an int.') if high is not None and not isinstance(high, int): raise InterpreterException('High argument of compute_int must be an int.') if guess is not None and not isinstance(guess, int): raise InterpreterException('Guess argument of compute_int must be an int.') extra_args = functools.partial(self.determine_args, kwargs) deps, msg = self.determine_dependencies(kwargs) res = self.compiler.compute_int(expression, low, high, guess, prefix, self.environment, extra_args=extra_args, dependencies=deps) mlog.log('Computing int of', mlog.bold(expression, True), msg, res) return res @permittedKwargs({ 'prefix', 'no_builtin_args', 'include_directories', 'args', 'dependencies', }) def sizeof_method(self, args, kwargs): if len(args) != 1: raise InterpreterException('Sizeof takes exactly one argument.') check_stringlist(args) element = args[0] prefix = kwargs.get('prefix', '') if not isinstance(prefix, str): raise InterpreterException('Prefix argument of sizeof must be a string.') extra_args = functools.partial(self.determine_args, kwargs) deps, msg = self.determine_dependencies(kwargs) esize = self.compiler.sizeof(element, prefix, self.environment, extra_args=extra_args, dependencies=deps) mlog.log('Checking for size of', mlog.bold(element, True), msg, esize) return esize @FeatureNew('compiler.get_define', '0.40.0') @permittedKwargs({ 'prefix', 'no_builtin_args', 'include_directories', 'args', 'dependencies', }) def get_define_method(self, args, kwargs): if len(args) != 1: raise InterpreterException('get_define() takes exactly one argument.') check_stringlist(args) element = args[0] prefix = kwargs.get('prefix', '') if not isinstance(prefix, str): raise InterpreterException('Prefix argument of get_define() must be a string.') extra_args = functools.partial(self.determine_args, kwargs) deps, msg = self.determine_dependencies(kwargs) value, cached = self.compiler.get_define(element, prefix, self.environment, extra_args=extra_args, dependencies=deps) cached = mlog.blue('(cached)') if cached else '' mlog.log('Fetching value of define', mlog.bold(element, True), msg, value, cached) return value @permittedKwargs({ 'name', 'no_builtin_args', 'include_directories', 'args', 'dependencies', }) def compiles_method(self, args, kwargs): if len(args) != 1: raise InterpreterException('compiles method takes exactly one argument.') code = args[0] if isinstance(code, mesonlib.File): code = mesonlib.File.from_absolute_file( code.rel_to_builddir(self.environment.source_dir)) elif not isinstance(code, str): raise InvalidArguments('Argument must be string or file.') testname = kwargs.get('name', '') if not isinstance(testname, str): raise InterpreterException('Testname argument must be a string.') extra_args = functools.partial(self.determine_args, kwargs) deps, msg = self.determine_dependencies(kwargs, endl=None) result, cached = self.compiler.compiles(code, self.environment, extra_args=extra_args, dependencies=deps) if len(testname) > 0: if result: h = mlog.green('YES') else: h = mlog.red('NO') cached = mlog.blue('(cached)') if cached else '' mlog.log('Checking if', mlog.bold(testname, True), msg, 'compiles:', h, cached) return result @permittedKwargs({ 'name', 'no_builtin_args', 'include_directories', 'args', 'dependencies', }) def links_method(self, args, kwargs): if len(args) != 1: raise InterpreterException('links method takes exactly one argument.') code = args[0] if isinstance(code, mesonlib.File): code = mesonlib.File.from_absolute_file( code.rel_to_builddir(self.environment.source_dir)) elif not isinstance(code, str): raise InvalidArguments('Argument must be string or file.') testname = kwargs.get('name', '') if not isinstance(testname, str): raise InterpreterException('Testname argument must be a string.') extra_args = functools.partial(self.determine_args, kwargs) deps, msg = self.determine_dependencies(kwargs, endl=None) result, cached = self.compiler.links(code, self.environment, extra_args=extra_args, dependencies=deps) cached = mlog.blue('(cached)') if cached else '' if len(testname) > 0: if result: h = mlog.green('YES') else: h = mlog.red('NO') mlog.log('Checking if', mlog.bold(testname, True), msg, 'links:', h, cached) return result @FeatureNew('compiler.check_header', '0.47.0') @FeatureNewKwargs('compiler.check_header', '0.50.0', ['required']) @permittedKwargs(header_permitted_kwargs) def check_header_method(self, args, kwargs): if len(args) != 1: raise InterpreterException('check_header method takes exactly one argument.') check_stringlist(args) hname = args[0] prefix = kwargs.get('prefix', '') if not isinstance(prefix, str): raise InterpreterException('Prefix argument of has_header must be a string.') disabled, required, feature = extract_required_kwarg(kwargs, self.subproject, default=False) if disabled: mlog.log('Check usable header', mlog.bold(hname, True), 'skipped: feature', mlog.bold(feature), 'disabled') return False extra_args = functools.partial(self.determine_args, kwargs) deps, msg = self.determine_dependencies(kwargs) haz, cached = self.compiler.check_header(hname, prefix, self.environment, extra_args=extra_args, dependencies=deps) cached = mlog.blue('(cached)') if cached else '' if required and not haz: raise InterpreterException('{} header {!r} not usable'.format(self.compiler.get_display_language(), hname)) elif haz: h = mlog.green('YES') else: h = mlog.red('NO') mlog.log('Check usable header', mlog.bold(hname, True), msg, h, cached) return haz @FeatureNewKwargs('compiler.has_header', '0.50.0', ['required']) @permittedKwargs(header_permitted_kwargs) def has_header_method(self, args, kwargs): if len(args) != 1: raise InterpreterException('has_header method takes exactly one argument.') check_stringlist(args) hname = args[0] prefix = kwargs.get('prefix', '') if not isinstance(prefix, str): raise InterpreterException('Prefix argument of has_header must be a string.') disabled, required, feature = extract_required_kwarg(kwargs, self.subproject, default=False) if disabled: mlog.log('Has header', mlog.bold(hname, True), 'skipped: feature', mlog.bold(feature), 'disabled') return False extra_args = functools.partial(self.determine_args, kwargs) deps, msg = self.determine_dependencies(kwargs) haz, cached = self.compiler.has_header(hname, prefix, self.environment, extra_args=extra_args, dependencies=deps) cached = mlog.blue('(cached)') if cached else '' if required and not haz: raise InterpreterException('{} header {!r} not found'.format(self.compiler.get_display_language(), hname)) elif haz: h = mlog.green('YES') else: h = mlog.red('NO') mlog.log('Has header', mlog.bold(hname, True), msg, h, cached) return haz @FeatureNewKwargs('compiler.has_header_symbol', '0.50.0', ['required']) @permittedKwargs(header_permitted_kwargs) def has_header_symbol_method(self, args, kwargs): if len(args) != 2: raise InterpreterException('has_header_symbol method takes exactly two arguments.') check_stringlist(args) hname, symbol = args prefix = kwargs.get('prefix', '') if not isinstance(prefix, str): raise InterpreterException('Prefix argument of has_header_symbol must be a string.') disabled, required, feature = extract_required_kwarg(kwargs, self.subproject, default=False) if disabled: mlog.log('Header <{0}> has symbol'.format(hname), mlog.bold(symbol, True), 'skipped: feature', mlog.bold(feature), 'disabled') return False extra_args = functools.partial(self.determine_args, kwargs) deps, msg = self.determine_dependencies(kwargs) haz, cached = self.compiler.has_header_symbol(hname, symbol, prefix, self.environment, extra_args=extra_args, dependencies=deps) if required and not haz: raise InterpreterException('{} symbol {} not found in header {}'.format(self.compiler.get_display_language(), symbol, hname)) elif haz: h = mlog.green('YES') else: h = mlog.red('NO') cached = mlog.blue('(cached)') if cached else '' mlog.log('Header <{0}> has symbol'.format(hname), mlog.bold(symbol, True), msg, h, cached) return haz def notfound_library(self, libname): lib = dependencies.ExternalLibrary(libname, None, self.environment, self.compiler.language, silent=True) return ExternalLibraryHolder(lib, self.subproject) @FeatureNewKwargs('compiler.find_library', '0.51.0', ['static']) @FeatureNewKwargs('compiler.find_library', '0.50.0', ['has_headers']) @FeatureNewKwargs('compiler.find_library', '0.49.0', ['disabler']) @disablerIfNotFound @permittedKwargs(find_library_permitted_kwargs) def find_library_method(self, args, kwargs): # TODO add dependencies support? if len(args) != 1: raise InterpreterException('find_library method takes one argument.') libname = args[0] if not isinstance(libname, str): raise InterpreterException('Library name not a string.') disabled, required, feature = extract_required_kwarg(kwargs, self.subproject) if disabled: mlog.log('Library', mlog.bold(libname), 'skipped: feature', mlog.bold(feature), 'disabled') return self.notfound_library(libname) has_header_kwargs = {k[7:]: v for k, v in kwargs.items() if k.startswith('header_')} has_header_kwargs['required'] = required headers = mesonlib.stringlistify(kwargs.get('has_headers', [])) for h in headers: if not self.has_header_method([h], has_header_kwargs): return self.notfound_library(libname) search_dirs = extract_search_dirs(kwargs) libtype = mesonlib.LibType.PREFER_SHARED if 'static' in kwargs: if not isinstance(kwargs['static'], bool): raise InterpreterException('static must be a boolean') libtype = mesonlib.LibType.STATIC if kwargs['static'] else mesonlib.LibType.SHARED linkargs = self.compiler.find_library(libname, self.environment, search_dirs, libtype) if required and not linkargs: if libtype == mesonlib.LibType.PREFER_SHARED: libtype = 'shared or static' else: libtype = libtype.name.lower() raise InterpreterException('{} {} library {!r} not found' .format(self.compiler.get_display_language(), libtype, libname)) lib = dependencies.ExternalLibrary(libname, linkargs, self.environment, self.compiler.language) return ExternalLibraryHolder(lib, self.subproject) @permittedKwargs({}) def has_argument_method(self, args: T.Sequence[str], kwargs) -> bool: args = mesonlib.stringlistify(args) if len(args) != 1: raise InterpreterException('has_argument takes exactly one argument.') return self.has_multi_arguments_method(args, kwargs) @permittedKwargs({}) def has_multi_arguments_method(self, args: T.Sequence[str], kwargs: dict): args = mesonlib.stringlistify(args) result, cached = self.compiler.has_multi_arguments(args, self.environment) if result: h = mlog.green('YES') else: h = mlog.red('NO') cached = mlog.blue('(cached)') if cached else '' mlog.log( 'Compiler for {} supports arguments {}:'.format( self.compiler.get_display_language(), ' '.join(args)), h, cached) return result @FeatureNew('compiler.get_supported_arguments', '0.43.0') @permittedKwargs({}) def get_supported_arguments_method(self, args, kwargs): args = mesonlib.stringlistify(args) supported_args = [] for arg in args: if self.has_argument_method(arg, kwargs): supported_args.append(arg) return supported_args @permittedKwargs({}) def first_supported_argument_method(self, args: T.Sequence[str], kwargs: dict) -> T.List[str]: for arg in mesonlib.stringlistify(args): if self.has_argument_method(arg, kwargs): mlog.log('First supported argument:', mlog.bold(arg)) return [arg] mlog.log('First supported argument:', mlog.red('None')) return [] @FeatureNew('compiler.has_link_argument', '0.46.0') @permittedKwargs({}) def has_link_argument_method(self, args, kwargs): args = mesonlib.stringlistify(args) if len(args) != 1: raise InterpreterException('has_link_argument takes exactly one argument.') return self.has_multi_link_arguments_method(args, kwargs) @FeatureNew('compiler.has_multi_link_argument', '0.46.0') @permittedKwargs({}) def has_multi_link_arguments_method(self, args, kwargs): args = mesonlib.stringlistify(args) result, cached = self.compiler.has_multi_link_arguments(args, self.environment) cached = mlog.blue('(cached)') if cached else '' if result: h = mlog.green('YES') else: h = mlog.red('NO') mlog.log( 'Compiler for {} supports link arguments {}:'.format( self.compiler.get_display_language(), ' '.join(args)), h, cached) return result @FeatureNew('compiler.get_supported_link_arguments_method', '0.46.0') @permittedKwargs({}) def get_supported_link_arguments_method(self, args, kwargs): args = mesonlib.stringlistify(args) supported_args = [] for arg in args: if self.has_link_argument_method(arg, kwargs): supported_args.append(arg) return supported_args @FeatureNew('compiler.first_supported_link_argument_method', '0.46.0') @permittedKwargs({}) def first_supported_link_argument_method(self, args, kwargs): for i in mesonlib.stringlistify(args): if self.has_link_argument_method(i, kwargs): mlog.log('First supported link argument:', mlog.bold(i)) return [i] mlog.log('First supported link argument:', mlog.red('None')) return [] @FeatureNew('compiler.has_function_attribute', '0.48.0') @permittedKwargs({}) def has_func_attribute_method(self, args, kwargs): args = mesonlib.stringlistify(args) if len(args) != 1: raise InterpreterException('has_func_attribute takes exactly one argument.') result, cached = self.compiler.has_func_attribute(args[0], self.environment) cached = mlog.blue('(cached)') if cached else '' h = mlog.green('YES') if result else mlog.red('NO') mlog.log('Compiler for {} supports function attribute {}:'.format(self.compiler.get_display_language(), args[0]), h, cached) return result @FeatureNew('compiler.get_supported_function_attributes', '0.48.0') @permittedKwargs({}) def get_supported_function_attributes_method(self, args, kwargs): args = mesonlib.stringlistify(args) return [a for a in args if self.has_func_attribute_method(a, kwargs)] @FeatureNew('compiler.get_argument_syntax_method', '0.49.0') @noPosargs @noKwargs def get_argument_syntax_method(self, args, kwargs): return self.compiler.get_argument_syntax() ModuleState = collections.namedtuple('ModuleState', [ 'source_root', 'build_to_src', 'subproject', 'subdir', 'current_lineno', 'environment', 'project_name', 'project_version', 'backend', 'targets', 'data', 'headers', 'man', 'global_args', 'project_args', 'build_machine', 'host_machine', 'target_machine', 'current_node']) class ModuleHolder(InterpreterObject, ObjectHolder): def __init__(self, modname, module, interpreter): InterpreterObject.__init__(self) ObjectHolder.__init__(self, module) self.modname = modname self.interpreter = interpreter def method_call(self, method_name, args, kwargs): try: fn = getattr(self.held_object, method_name) except AttributeError: raise InvalidArguments('Module %s does not have method %s.' % (self.modname, method_name)) if method_name.startswith('_'): raise InvalidArguments('Function {!r} in module {!r} is private.'.format(method_name, self.modname)) if not getattr(fn, 'no-args-flattening', False): args = flatten(args) # This is not 100% reliable but we can't use hash() # because the Build object contains dicts and lists. num_targets = len(self.interpreter.build.targets) state = ModuleState( source_root = self.interpreter.environment.get_source_dir(), build_to_src=mesonlib.relpath(self.interpreter.environment.get_source_dir(), self.interpreter.environment.get_build_dir()), subproject=self.interpreter.subproject, subdir=self.interpreter.subdir, current_lineno=self.interpreter.current_lineno, environment=self.interpreter.environment, project_name=self.interpreter.build.project_name, project_version=self.interpreter.build.dep_manifest[self.interpreter.active_projectname], # The backend object is under-used right now, but we will need it: # https://github.com/mesonbuild/meson/issues/1419 backend=self.interpreter.backend, targets=self.interpreter.build.targets, data=self.interpreter.build.data, headers=self.interpreter.build.get_headers(), man=self.interpreter.build.get_man(), #global_args_for_build = self.interpreter.build.global_args.build, global_args = self.interpreter.build.global_args.host, #project_args_for_build = self.interpreter.build.projects_args.build.get(self.interpreter.subproject, {}), project_args = self.interpreter.build.projects_args.host.get(self.interpreter.subproject, {}), build_machine=self.interpreter.builtin['build_machine'].held_object, host_machine=self.interpreter.builtin['host_machine'].held_object, target_machine=self.interpreter.builtin['target_machine'].held_object, current_node=self.current_node ) # Many modules do for example self.interpreter.find_program_impl(), # so we have to ensure they use the current interpreter and not the one # that first imported that module, otherwise it will use outdated # overrides. self.held_object.interpreter = self.interpreter if self.held_object.is_snippet(method_name): value = fn(self.interpreter, state, args, kwargs) return self.interpreter.holderify(value) else: value = fn(state, args, kwargs) if num_targets != len(self.interpreter.build.targets): raise InterpreterException('Extension module altered internal state illegally.') return self.interpreter.module_method_callback(value) class Summary: def __init__(self, project_name, project_version): self.project_name = project_name self.project_version = project_version self.sections = collections.defaultdict(dict) self.max_key_len = 0 def add_section(self, section, values, kwargs): bool_yn = kwargs.get('bool_yn', False) if not isinstance(bool_yn, bool): raise InterpreterException('bool_yn keyword argument must be boolean') list_sep = kwargs.get('list_sep') if list_sep is not None and not isinstance(list_sep, str): raise InterpreterException('list_sep keyword argument must be string') for k, v in values.items(): if k in self.sections[section]: raise InterpreterException('Summary section {!r} already have key {!r}'.format(section, k)) formatted_values = [] for i in listify(v): if not isinstance(i, (str, int)): m = 'Summary value in section {!r}, key {!r}, must be string, integer or boolean' raise InterpreterException(m.format(section, k)) if bool_yn and isinstance(i, bool): formatted_values.append(mlog.green('YES') if i else mlog.red('NO')) else: formatted_values.append(str(i)) self.sections[section][k] = (formatted_values, list_sep) self.max_key_len = max(self.max_key_len, len(k)) def text_len(self, v): if isinstance(v, str): return len(v) elif isinstance(v, mlog.AnsiDecorator): return len(v.text) else: raise RuntimeError('Expecting only strings or AnsiDecorator') def dump(self): mlog.log(self.project_name, mlog.normal_cyan(self.project_version)) for section, values in self.sections.items(): mlog.log('') # newline if section: mlog.log(' ', mlog.bold(section)) for k, v in values.items(): v, list_sep = v indent = self.max_key_len - len(k) + 3 end = ' ' if v else '' mlog.log(' ' * indent, k + ':', end=end) indent = self.max_key_len + 6 self.dump_value(v, list_sep, indent) mlog.log('') # newline def dump_value(self, arr, list_sep, indent): lines_sep = '\n' + ' ' * indent if list_sep is None: mlog.log(*arr, sep=lines_sep) return max_len = shutil.get_terminal_size().columns line = [] line_len = indent lines_sep = list_sep.rstrip() + lines_sep for v in arr: v_len = self.text_len(v) + len(list_sep) if line and line_len + v_len > max_len: mlog.log(*line, sep=list_sep, end=lines_sep) line_len = indent line = [] line.append(v) line_len += v_len mlog.log(*line, sep=list_sep) class MesonMain(InterpreterObject): def __init__(self, build, interpreter): InterpreterObject.__init__(self) self.build = build self.interpreter = interpreter self._found_source_scripts = {} self.methods.update({'get_compiler': self.get_compiler_method, 'is_cross_build': self.is_cross_build_method, 'has_exe_wrapper': self.has_exe_wrapper_method, 'can_run_host_binaries': self.can_run_host_binaries_method, 'is_unity': self.is_unity_method, 'is_subproject': self.is_subproject_method, 'current_source_dir': self.current_source_dir_method, 'current_build_dir': self.current_build_dir_method, 'source_root': self.source_root_method, 'build_root': self.build_root_method, 'project_source_root': self.project_source_root_method, 'project_build_root': self.project_build_root_method, 'add_install_script': self.add_install_script_method, 'add_postconf_script': self.add_postconf_script_method, 'add_dist_script': self.add_dist_script_method, 'install_dependency_manifest': self.install_dependency_manifest_method, 'override_dependency': self.override_dependency_method, 'override_find_program': self.override_find_program_method, 'project_version': self.project_version_method, 'project_license': self.project_license_method, 'version': self.version_method, 'project_name': self.project_name_method, 'get_cross_property': self.get_cross_property_method, 'get_external_property': self.get_external_property_method, 'backend': self.backend_method, }) def _find_source_script(self, prog: T.Union[str, ExecutableHolder], args): if isinstance(prog, ExecutableHolder): prog_path = self.interpreter.backend.get_target_filename(prog.held_object) return build.RunScript([prog_path], args) elif isinstance(prog, ExternalProgramHolder): return build.RunScript(prog.get_command(), args) # Prefer scripts in the current source directory search_dir = os.path.join(self.interpreter.environment.source_dir, self.interpreter.subdir) key = (prog, search_dir) if key in self._found_source_scripts: found = self._found_source_scripts[key] else: found = dependencies.ExternalProgram(prog, search_dir=search_dir) if found.found(): self._found_source_scripts[key] = found else: m = 'Script or command {!r} not found or not executable' raise InterpreterException(m.format(prog)) return build.RunScript(found.get_command(), args) def _process_script_args( self, name: str, args: T.List[T.Union[ str, mesonlib.File, CustomTargetHolder, CustomTargetIndexHolder, ConfigureFileHolder, ExternalProgramHolder, ExecutableHolder, ]], allow_built: bool = False) -> T.List[str]: script_args = [] # T.List[str] new = False for a in args: a = unholder(a) if isinstance(a, str): script_args.append(a) elif isinstance(a, mesonlib.File): new = True script_args.append(a.rel_to_builddir(self.interpreter.environment.source_dir)) elif isinstance(a, (build.BuildTarget, build.CustomTarget, build.CustomTargetIndex)): if not allow_built: raise InterpreterException('Arguments to {} cannot be built'.format(name)) new = True script_args.extend([os.path.join(a.get_subdir(), o) for o in a.get_outputs()]) # This feels really hacky, but I'm not sure how else to fix # this without completely rewriting install script handling. # This is complicated by the fact that the install target # depends on all. if isinstance(a, build.CustomTargetIndex): a.target.build_by_default = True else: a.build_by_default = True elif isinstance(a, build.ConfigureFile): new = True script_args.append(os.path.join(a.subdir, a.targetname)) elif isinstance(a, dependencies.ExternalProgram): script_args.extend(a.command) new = True else: raise InterpreterException( 'Arguments to {} must be strings, Files, CustomTargets, ' 'Indexes of CustomTargets, or ConfigureFiles'.format(name)) if new: FeatureNew.single_use( 'Calling "{}" with File, CustomTaget, Index of CustomTarget, ' 'ConfigureFile, Executable, or ExternalProgram'.format(name), '0.55.0', self.interpreter.subproject) return script_args @permittedKwargs(set()) def add_install_script_method(self, args: 'T.Tuple[T.Union[str, ExecutableHolder], T.Union[str, mesonlib.File, CustomTargetHolder, CustomTargetIndexHolder, ConfigureFileHolder], ...]', kwargs): if len(args) < 1: raise InterpreterException('add_install_script takes one or more arguments') script_args = self._process_script_args('add_install_script', args[1:], allow_built=True) script = self._find_source_script(args[0], script_args) self.build.install_scripts.append(script) @permittedKwargs(set()) def add_postconf_script_method(self, args, kwargs): if len(args) < 1: raise InterpreterException('add_postconf_script takes one or more arguments') script_args = self._process_script_args('add_postconf_script', args[1:], allow_built=True) script = self._find_source_script(args[0], script_args) self.build.postconf_scripts.append(script) @permittedKwargs(set()) def add_dist_script_method(self, args, kwargs): if len(args) < 1: raise InterpreterException('add_dist_script takes one or more arguments') if len(args) > 1: FeatureNew.single_use('Calling "add_dist_script" with multiple arguments', '0.49.0', self.interpreter.subproject) if self.interpreter.subproject != '': raise InterpreterException('add_dist_script may not be used in a subproject.') script_args = self._process_script_args('add_dist_script', args[1:], allow_built=True) script = self._find_source_script(args[0], script_args) self.build.dist_scripts.append(script) @noPosargs @permittedKwargs({}) def current_source_dir_method(self, args, kwargs): src = self.interpreter.environment.source_dir sub = self.interpreter.subdir if sub == '': return src return os.path.join(src, sub) @noPosargs @permittedKwargs({}) def current_build_dir_method(self, args, kwargs): src = self.interpreter.environment.build_dir sub = self.interpreter.subdir if sub == '': return src return os.path.join(src, sub) @noPosargs @permittedKwargs({}) def backend_method(self, args, kwargs): return self.interpreter.backend.name @noPosargs @permittedKwargs({}) @FeatureDeprecated('meson.source_root', '0.56.0', 'use meson.current_source_dir instead.') def source_root_method(self, args, kwargs): return self.interpreter.environment.source_dir @noPosargs @permittedKwargs({}) @FeatureDeprecated('meson.build_root', '0.56.0', 'use meson.current_build_dir instead.') def build_root_method(self, args, kwargs): return self.interpreter.environment.build_dir @noPosargs @permittedKwargs({}) @FeatureNew('meson.project_source_root', '0.56.0') def project_source_root_method(self, args, kwargs): src = self.interpreter.environment.source_dir sub = self.interpreter.root_subdir if sub == '': return src return os.path.join(src, sub) @noPosargs @permittedKwargs({}) @FeatureNew('meson.project_build_root', '0.56.0') def project_build_root_method(self, args, kwargs): src = self.interpreter.environment.build_dir sub = self.interpreter.root_subdir if sub == '': return src return os.path.join(src, sub) @noPosargs @permittedKwargs({}) @FeatureDeprecated('meson.has_exe_wrapper', '0.55.0', 'use meson.can_run_host_binaries instead.') def has_exe_wrapper_method(self, args: T.Tuple[object, ...], kwargs: T.Dict[str, object]) -> bool: return self.can_run_host_binaries_impl(args, kwargs) @noPosargs @permittedKwargs({}) @FeatureNew('meson.can_run_host_binaries', '0.55.0') def can_run_host_binaries_method(self, args: T.Tuple[object, ...], kwargs: T.Dict[str, object]) -> bool: return self.can_run_host_binaries_impl(args, kwargs) def can_run_host_binaries_impl(self, args, kwargs): if (self.is_cross_build_method(None, None) and self.build.environment.need_exe_wrapper()): if self.build.environment.exe_wrapper is None: return False # We return True when exe_wrap is defined, when it's not needed, and # when we're compiling natively. The last two are semantically confusing. # Need to revisit this. return True @noPosargs @permittedKwargs({}) def is_cross_build_method(self, args, kwargs): return self.build.environment.is_cross_build() @permittedKwargs({'native'}) def get_compiler_method(self, args, kwargs): if len(args) != 1: raise InterpreterException('get_compiler_method must have one and only one argument.') cname = args[0] for_machine = Interpreter.machine_from_native_kwarg(kwargs) clist = self.interpreter.coredata.compilers[for_machine] if cname in clist: return CompilerHolder(clist[cname], self.build.environment, self.interpreter.subproject) raise InterpreterException('Tried to access compiler for language "%s", not specified for %s machine.' % (cname, for_machine.get_lower_case_name())) @noPosargs @permittedKwargs({}) def is_unity_method(self, args, kwargs): optval = self.interpreter.environment.coredata.get_builtin_option('unity') if optval == 'on' or (optval == 'subprojects' and self.interpreter.is_subproject()): return True return False @noPosargs @permittedKwargs({}) def is_subproject_method(self, args, kwargs): return self.interpreter.is_subproject() @permittedKwargs({}) def install_dependency_manifest_method(self, args, kwargs): if len(args) != 1: raise InterpreterException('Must specify manifest install file name') if not isinstance(args[0], str): raise InterpreterException('Argument must be a string.') self.build.dep_manifest_name = args[0] @FeatureNew('meson.override_find_program', '0.46.0') @permittedKwargs({}) def override_find_program_method(self, args, kwargs): if len(args) != 2: raise InterpreterException('Override needs two arguments') name, exe = args if not isinstance(name, str): raise InterpreterException('First argument must be a string') if hasattr(exe, 'held_object'): exe = exe.held_object if isinstance(exe, mesonlib.File): abspath = exe.absolute_path(self.interpreter.environment.source_dir, self.interpreter.environment.build_dir) if not os.path.exists(abspath): raise InterpreterException('Tried to override %s with a file that does not exist.' % name) exe = OverrideProgram(name, abspath) if not isinstance(exe, (dependencies.ExternalProgram, build.Executable)): raise InterpreterException('Second argument must be an external program or executable.') self.interpreter.add_find_program_override(name, exe) @FeatureNew('meson.override_dependency', '0.54.0') @permittedKwargs({'native'}) def override_dependency_method(self, args, kwargs): if len(args) != 2: raise InterpreterException('Override needs two arguments') name = args[0] dep = args[1] if not isinstance(name, str) or not name: raise InterpreterException('First argument must be a string and cannot be empty') if hasattr(dep, 'held_object'): dep = dep.held_object if not isinstance(dep, dependencies.Dependency): raise InterpreterException('Second argument must be a dependency object') identifier = dependencies.get_dep_identifier(name, kwargs) for_machine = self.interpreter.machine_from_native_kwarg(kwargs) override = self.build.dependency_overrides[for_machine].get(identifier) if override: m = 'Tried to override dependency {!r} which has already been resolved or overridden at {}' location = mlog.get_error_location_string(override.node.filename, override.node.lineno) raise InterpreterException(m.format(name, location)) self.build.dependency_overrides[for_machine][identifier] = \ build.DependencyOverride(dep, self.interpreter.current_node) @noPosargs @permittedKwargs({}) def project_version_method(self, args, kwargs): return self.build.dep_manifest[self.interpreter.active_projectname]['version'] @FeatureNew('meson.project_license()', '0.45.0') @noPosargs @permittedKwargs({}) def project_license_method(self, args, kwargs): return self.build.dep_manifest[self.interpreter.active_projectname]['license'] @noPosargs @permittedKwargs({}) def version_method(self, args, kwargs): return MesonVersionString(coredata.version) @noPosargs @permittedKwargs({}) def project_name_method(self, args, kwargs): return self.interpreter.active_projectname @noArgsFlattening @permittedKwargs({}) def get_cross_property_method(self, args, kwargs) -> str: if len(args) < 1 or len(args) > 2: raise InterpreterException('Must have one or two arguments.') propname = args[0] if not isinstance(propname, str): raise InterpreterException('Property name must be string.') try: props = self.interpreter.environment.properties.host return props[propname] except Exception: if len(args) == 2: return args[1] raise InterpreterException('Unknown cross property: %s.' % propname) @noArgsFlattening @permittedKwargs({'native'}) @FeatureNew('meson.get_external_property', '0.54.0') def get_external_property_method(self, args: T.Sequence[str], kwargs: dict) -> str: if len(args) < 1 or len(args) > 2: raise InterpreterException('Must have one or two positional arguments.') propname = args[0] if not isinstance(propname, str): raise InterpreterException('Property name must be string.') def _get_native() -> str: try: props = self.interpreter.environment.properties.build return props[propname] except Exception: if len(args) == 2: return args[1] raise InterpreterException('Unknown native property: %s.' % propname) if 'native' in kwargs: if kwargs['native']: return _get_native() else: return self.get_cross_property_method(args, {}) else: # native: not specified if self.build.environment.is_cross_build(): return self.get_cross_property_method(args, kwargs) else: return _get_native() known_library_kwargs = ( build.known_shlib_kwargs | build.known_stlib_kwargs ) known_build_target_kwargs = ( known_library_kwargs | build.known_exe_kwargs | build.known_jar_kwargs | {'target_type'} ) _base_test_args = {'args', 'depends', 'env', 'should_fail', 'timeout', 'workdir', 'suite', 'priority', 'protocol'} permitted_kwargs = {'add_global_arguments': {'language', 'native'}, 'add_global_link_arguments': {'language', 'native'}, 'add_languages': {'required', 'native'}, 'add_project_link_arguments': {'language', 'native'}, 'add_project_arguments': {'language', 'native'}, 'add_test_setup': {'exe_wrapper', 'gdb', 'timeout_multiplier', 'env', 'is_default'}, 'benchmark': _base_test_args, 'build_target': known_build_target_kwargs, 'configure_file': {'input', 'output', 'configuration', 'command', 'copy', 'depfile', 'install_dir', 'install_mode', 'capture', 'install', 'format', 'output_format', 'encoding'}, 'custom_target': {'input', 'output', 'command', 'install', 'install_dir', 'install_mode', 'build_always', 'capture', 'depends', 'depend_files', 'depfile', 'build_by_default', 'build_always_stale', 'console'}, 'dependency': {'default_options', 'embed', 'fallback', 'language', 'main', 'method', 'modules', 'components', 'cmake_module_path', 'optional_modules', 'native', 'not_found_message', 'required', 'static', 'version', 'private_headers', 'cmake_args', 'include_type', }, 'declare_dependency': {'include_directories', 'link_with', 'sources', 'dependencies', 'compile_args', 'link_args', 'link_whole', 'version', 'variables', }, 'executable': build.known_exe_kwargs, 'find_program': {'required', 'native', 'version', 'dirs'}, 'generator': {'arguments', 'output', 'depends', 'depfile', 'capture', 'preserve_path_from'}, 'include_directories': {'is_system'}, 'install_data': {'install_dir', 'install_mode', 'rename', 'sources'}, 'install_headers': {'install_dir', 'install_mode', 'subdir'}, 'install_man': {'install_dir', 'install_mode'}, 'install_subdir': {'exclude_files', 'exclude_directories', 'install_dir', 'install_mode', 'strip_directory'}, 'jar': build.known_jar_kwargs, 'project': {'version', 'meson_version', 'default_options', 'license', 'subproject_dir'}, 'run_command': {'check', 'capture', 'env'}, 'run_target': {'command', 'depends'}, 'shared_library': build.known_shlib_kwargs, 'shared_module': build.known_shmod_kwargs, 'static_library': build.known_stlib_kwargs, 'both_libraries': known_library_kwargs, 'library': known_library_kwargs, 'subdir': {'if_found'}, 'subproject': {'version', 'default_options', 'required'}, 'test': set.union(_base_test_args, {'is_parallel'}), 'vcs_tag': {'input', 'output', 'fallback', 'command', 'replace_string'}, } class Interpreter(InterpreterBase): def __init__( self, build: build.Build, backend: T.Optional[Backend] = None, subproject: str = '', subdir: str = '', subproject_dir: str = 'subprojects', modules: T.Optional[T.Dict[str, ExtensionModule]] = None, default_project_options: T.Optional[T.Dict[str, str]] = None, mock: bool = False, ast: T.Optional[mparser.CodeBlockNode] = None, is_translated: bool = False, ) -> None: super().__init__(build.environment.get_source_dir(), subdir, subproject) self.an_unpicklable_object = mesonlib.an_unpicklable_object self.build = build self.environment = build.environment self.coredata = self.environment.get_coredata() self.backend = backend self.summary = {} if modules is None: self.modules = {} else: self.modules = modules # Subproject directory is usually the name of the subproject, but can # be different for dependencies provided by wrap files. self.subproject_directory_name = subdir.split(os.path.sep)[-1] self.subproject_dir = subproject_dir self.option_file = os.path.join(self.source_root, self.subdir, 'meson_options.txt') if not mock and ast is None: self.load_root_meson_file() self.sanity_check_ast() elif ast is not None: self.ast = ast self.sanity_check_ast() self.builtin.update({'meson': MesonMain(build, self)}) self.generators = [] self.visited_subdirs = {} self.project_args_frozen = False self.global_args_frozen = False # implies self.project_args_frozen self.subprojects = {} self.subproject_stack = [] self.configure_file_outputs = {} # Passed from the outside, only used in subprojects. if default_project_options: self.default_project_options = default_project_options.copy() else: self.default_project_options = {} self.project_default_options = {} self.build_func_dict() # build_def_files needs to be defined before parse_project is called # # For non-meson subprojects, we'll be using the ast. Even if it does # exist we don't want to add a dependency on it, it's autogenerated # from the actual build files, and is just for reference. self.build_def_files = [] build_filename = os.path.join(self.subdir, environment.build_filename) if not is_translated: self.build_def_files.append(build_filename) if not mock: self.parse_project() self._redetect_machines() def _redetect_machines(self): # Re-initialize machine descriptions. We can do a better job now because we # have the compilers needed to gain more knowledge, so wipe out old # inference and start over. machines = self.build.environment.machines.miss_defaulting() machines.build = environment.detect_machine_info(self.coredata.compilers.build) self.build.environment.machines = machines.default_missing() assert self.build.environment.machines.build.cpu is not None assert self.build.environment.machines.host.cpu is not None assert self.build.environment.machines.target.cpu is not None self.builtin['build_machine'] = \ MachineHolder(self.build.environment.machines.build) self.builtin['host_machine'] = \ MachineHolder(self.build.environment.machines.host) self.builtin['target_machine'] = \ MachineHolder(self.build.environment.machines.target) # TODO: Why is this in interpreter.py and not CoreData or Environment? def get_non_matching_default_options(self) -> T.Iterator[T.Tuple[str, str, coredata.UserOption]]: env = self.environment for def_opt_name, def_opt_value in self.project_default_options.items(): for opts in env.coredata.get_all_options(): cur_opt_value = opts.get(def_opt_name) if cur_opt_value is not None: def_opt_value = env.coredata.validate_option_value(def_opt_name, def_opt_value) if def_opt_value != cur_opt_value.value: yield (def_opt_name, def_opt_value, cur_opt_value) def build_func_dict(self): self.funcs.update({'add_global_arguments': self.func_add_global_arguments, 'add_project_arguments': self.func_add_project_arguments, 'add_global_link_arguments': self.func_add_global_link_arguments, 'add_project_link_arguments': self.func_add_project_link_arguments, 'add_test_setup': self.func_add_test_setup, 'add_languages': self.func_add_languages, 'alias_target': self.func_alias_target, 'assert': self.func_assert, 'benchmark': self.func_benchmark, 'build_target': self.func_build_target, 'configuration_data': self.func_configuration_data, 'configure_file': self.func_configure_file, 'custom_target': self.func_custom_target, 'declare_dependency': self.func_declare_dependency, 'dependency': self.func_dependency, 'disabler': self.func_disabler, 'environment': self.func_environment, 'error': self.func_error, 'executable': self.func_executable, 'generator': self.func_generator, 'gettext': self.func_gettext, 'get_option': self.func_get_option, 'get_variable': self.func_get_variable, 'files': self.func_files, 'find_library': self.func_find_library, 'find_program': self.func_find_program, 'include_directories': self.func_include_directories, 'import': self.func_import, 'install_data': self.func_install_data, 'install_headers': self.func_install_headers, 'install_man': self.func_install_man, 'install_subdir': self.func_install_subdir, 'is_disabler': self.func_is_disabler, 'is_variable': self.func_is_variable, 'jar': self.func_jar, 'join_paths': self.func_join_paths, 'library': self.func_library, 'message': self.func_message, 'warning': self.func_warning, 'option': self.func_option, 'project': self.func_project, 'run_target': self.func_run_target, 'run_command': self.func_run_command, 'set_variable': self.func_set_variable, 'subdir': self.func_subdir, 'subdir_done': self.func_subdir_done, 'subproject': self.func_subproject, 'summary': self.func_summary, 'shared_library': self.func_shared_lib, 'shared_module': self.func_shared_module, 'static_library': self.func_static_lib, 'both_libraries': self.func_both_lib, 'test': self.func_test, 'vcs_tag': self.func_vcs_tag }) if 'MESON_UNIT_TEST' in os.environ: self.funcs.update({'exception': self.func_exception}) def holderify(self, item): if isinstance(item, list): return [self.holderify(x) for x in item] if isinstance(item, dict): return {k: self.holderify(v) for k, v in item.items()} if isinstance(item, build.CustomTarget): return CustomTargetHolder(item, self) elif isinstance(item, (int, str, bool, Disabler, InterpreterObject)) or item is None: return item elif isinstance(item, build.Executable): return ExecutableHolder(item, self) elif isinstance(item, build.GeneratedList): return GeneratedListHolder(item) elif isinstance(item, build.RunTarget): raise RuntimeError('This is not a pipe.') elif isinstance(item, build.RunScript): raise RuntimeError('Do not do this.') elif isinstance(item, build.Data): return DataHolder(item) elif isinstance(item, dependencies.Dependency): return DependencyHolder(item, self.subproject) elif isinstance(item, dependencies.ExternalProgram): return ExternalProgramHolder(item, self.subproject) elif hasattr(item, 'held_object'): return item elif isinstance(item, InterpreterObject): return item else: raise InterpreterException('Module returned a value of unknown type.') def process_new_values(self, invalues): invalues = listify(invalues) for v in invalues: if isinstance(v, (RunTargetHolder, CustomTargetHolder, BuildTargetHolder)): v = v.held_object if isinstance(v, (build.BuildTarget, build.CustomTarget, build.RunTarget)): self.add_target(v.name, v) elif isinstance(v, list): self.module_method_callback(v) elif isinstance(v, build.GeneratedList): pass elif isinstance(v, build.RunScript): self.build.install_scripts.append(v) elif isinstance(v, build.Data): self.build.data.append(v) elif isinstance(v, dependencies.ExternalProgram): return ExternalProgramHolder(v, self.subproject) elif isinstance(v, dependencies.InternalDependency): # FIXME: This is special cased and not ideal: # The first source is our new VapiTarget, the rest are deps self.process_new_values(v.sources[0]) elif isinstance(v, InstallDir): self.build.install_dirs.append(v) elif hasattr(v, 'held_object'): pass elif isinstance(v, (int, str, bool, Disabler)): pass else: raise InterpreterException('Module returned a value of unknown type.') def module_method_callback(self, return_object): if not isinstance(return_object, ModuleReturnValue): raise InterpreterException('Bug in module, it returned an invalid object') invalues = return_object.new_objects self.process_new_values(invalues) return self.holderify(return_object.return_value) def get_build_def_files(self) -> T.List[str]: return self.build_def_files def add_build_def_file(self, f): # Use relative path for files within source directory, and absolute path # for system files. Skip files within build directory. Also skip not regular # files (e.g. /dev/stdout) Normalize the path to avoid duplicates, this # is especially important to convert '/' to '\' on Windows. if isinstance(f, mesonlib.File): if f.is_built: return f = os.path.normpath(f.relative_name()) elif os.path.isfile(f) and not f.startswith('/dev'): srcdir = Path(self.environment.get_source_dir()) builddir = Path(self.environment.get_build_dir()) try: f = Path(f).resolve() except OSError: f = Path(f) s = f.stat() if (hasattr(s, 'st_file_attributes') and s.st_file_attributes & stat.FILE_ATTRIBUTE_REPARSE_POINT != 0 and s.st_reparse_tag == stat.IO_REPARSE_TAG_APPEXECLINK): # This is a Windows Store link which we can't # resolve, so just do our best otherwise. f = f.parent.resolve() / f.name else: raise if builddir in f.parents: return if srcdir in f.parents: f = f.relative_to(srcdir) f = str(f) else: return if f not in self.build_def_files: self.build_def_files.append(f) def get_variables(self): return self.variables def check_stdlibs(self): machine_choices = [MachineChoice.HOST] if self.coredata.is_cross_build(): machine_choices.append(MachineChoice.BUILD) for for_machine in machine_choices: props = self.build.environment.properties[for_machine] for l in self.coredata.compilers[for_machine].keys(): try: di = mesonlib.stringlistify(props.get_stdlib(l)) except KeyError: continue if len(di) == 1: FeatureNew.single_use('stdlib without variable name', '0.56.0', self.subproject) kwargs = {'fallback': di, 'native': for_machine is MachineChoice.BUILD, } name = display_name = l + '_stdlib' dep = self.dependency_impl(name, display_name, kwargs, force_fallback=True) self.build.stdlibs[for_machine][l] = dep def import_module(self, modname): if modname in self.modules: return try: module = importlib.import_module('mesonbuild.modules.' + modname) except ImportError: raise InvalidArguments('Module "%s" does not exist' % (modname, )) ext_module = module.initialize(self) assert isinstance(ext_module, ExtensionModule) self.modules[modname] = ext_module @stringArgs @noKwargs def func_import(self, node, args, kwargs): if len(args) != 1: raise InvalidCode('Import takes one argument.') modname = args[0] if modname.startswith('unstable-'): plainname = modname.split('-', 1)[1] try: # check if stable module exists self.import_module(plainname) mlog.warning('Module %s is now stable, please use the %s module instead.' % (modname, plainname)) modname = plainname except InvalidArguments: mlog.warning('Module %s has no backwards or forwards compatibility and might not exist in future releases.' % modname, location=node) modname = 'unstable_' + plainname self.import_module(modname) return ModuleHolder(modname, self.modules[modname], self) @stringArgs @noKwargs def func_files(self, node, args, kwargs): return [mesonlib.File.from_source_file(self.environment.source_dir, self.subdir, fname) for fname in args] # Used by declare_dependency() and pkgconfig.generate() def extract_variables(self, kwargs, argname='variables', list_new=False, dict_new=False): variables = kwargs.get(argname, {}) if isinstance(variables, dict): if dict_new and variables: FeatureNew.single_use('variables as dictionary', '0.56.0', self.subproject) else: varlist = mesonlib.stringlistify(variables) if list_new: FeatureNew.single_use('variables as list of strings', '0.56.0', self.subproject) variables = collections.OrderedDict() for v in varlist: try: (key, value) = v.split('=', 1) except ValueError: raise InterpreterException('Variable {!r} must have a value separated by equals sign.'.format(v)) variables[key.strip()] = value.strip() for k, v in variables.items(): if not k or not v: raise InterpreterException('Empty variable name or value') if any(c.isspace() for c in k): raise InterpreterException('Invalid whitespace in variable name "{}"'.format(k)) if not isinstance(v, str): raise InterpreterException('variables values must be strings.') return variables @FeatureNewKwargs('declare_dependency', '0.46.0', ['link_whole']) @FeatureNewKwargs('declare_dependency', '0.54.0', ['variables']) @permittedKwargs(permitted_kwargs['declare_dependency']) @noPosargs def func_declare_dependency(self, node, args, kwargs): version = kwargs.get('version', self.project_version) if not isinstance(version, str): raise InterpreterException('Version must be a string.') incs = self.extract_incdirs(kwargs) libs = unholder(extract_as_list(kwargs, 'link_with')) libs_whole = unholder(extract_as_list(kwargs, 'link_whole')) sources = extract_as_list(kwargs, 'sources') sources = unholder(listify(self.source_strings_to_files(sources))) deps = unholder(extract_as_list(kwargs, 'dependencies')) compile_args = mesonlib.stringlistify(kwargs.get('compile_args', [])) link_args = mesonlib.stringlistify(kwargs.get('link_args', [])) variables = self.extract_variables(kwargs, list_new=True) final_deps = [] for d in deps: try: d = d.held_object except Exception: pass if not isinstance(d, (dependencies.Dependency, dependencies.ExternalLibrary, dependencies.InternalDependency)): raise InterpreterException('Dependencies must be external deps') final_deps.append(d) for l in libs: if isinstance(l, dependencies.Dependency): raise InterpreterException('''Entries in "link_with" may only be self-built targets, external dependencies (including libraries) must go to "dependencies".''') dep = dependencies.InternalDependency(version, incs, compile_args, link_args, libs, libs_whole, sources, final_deps, variables) return DependencyHolder(dep, self.subproject) @noKwargs def func_assert(self, node, args, kwargs): if len(args) == 1: FeatureNew.single_use('assert function without message argument', '0.53.0', self.subproject) value = args[0] message = None elif len(args) == 2: value, message = args if not isinstance(message, str): raise InterpreterException('Assert message not a string.') else: raise InterpreterException('Assert takes between one and two arguments') if not isinstance(value, bool): raise InterpreterException('Assert value not bool.') if not value: if message is None: from .ast import AstPrinter printer = AstPrinter() node.args.arguments[0].accept(printer) message = printer.result raise InterpreterException('Assert failed: ' + message) def validate_arguments(self, args, argcount, arg_types): if argcount is not None: if argcount != len(args): raise InvalidArguments('Expected %d arguments, got %d.' % (argcount, len(args))) for actual, wanted in zip(args, arg_types): if wanted is not None: if not isinstance(actual, wanted): raise InvalidArguments('Incorrect argument type.') @FeatureNewKwargs('run_command', '0.50.0', ['env']) @FeatureNewKwargs('run_command', '0.47.0', ['check', 'capture']) @permittedKwargs(permitted_kwargs['run_command']) def func_run_command(self, node, args, kwargs): return self.run_command_impl(node, args, kwargs) def run_command_impl(self, node, args, kwargs, in_builddir=False): if len(args) < 1: raise InterpreterException('Not enough arguments') cmd, *cargs = args capture = kwargs.get('capture', True) srcdir = self.environment.get_source_dir() builddir = self.environment.get_build_dir() check = kwargs.get('check', False) if not isinstance(check, bool): raise InterpreterException('Check must be boolean.') env = self.unpack_env_kwarg(kwargs) m = 'must be a string, or the output of find_program(), files() '\ 'or configure_file(), or a compiler object; not {!r}' expanded_args = [] if isinstance(cmd, ExternalProgramHolder): cmd = cmd.held_object if isinstance(cmd, build.Executable): progname = node.args.arguments[0].value msg = 'Program {!r} was overridden with the compiled executable {!r}'\ ' and therefore cannot be used during configuration' raise InterpreterException(msg.format(progname, cmd.description())) if not cmd.found(): raise InterpreterException('command {!r} not found or not executable'.format(cmd.get_name())) elif isinstance(cmd, CompilerHolder): exelist = cmd.compiler.get_exelist() cmd = exelist[0] prog = ExternalProgram(cmd, silent=True) if not prog.found(): raise InterpreterException('Program {!r} not found ' 'or not executable'.format(cmd)) cmd = prog expanded_args = exelist[1:] else: if isinstance(cmd, mesonlib.File): cmd = cmd.absolute_path(srcdir, builddir) elif not isinstance(cmd, str): raise InterpreterException('First argument ' + m.format(cmd)) # Prefer scripts in the current source directory search_dir = os.path.join(srcdir, self.subdir) prog = ExternalProgram(cmd, silent=True, search_dir=search_dir) if not prog.found(): raise InterpreterException('Program or command {!r} not found ' 'or not executable'.format(cmd)) cmd = prog for a in listify(cargs): if isinstance(a, str): expanded_args.append(a) elif isinstance(a, mesonlib.File): expanded_args.append(a.absolute_path(srcdir, builddir)) elif isinstance(a, ExternalProgramHolder): expanded_args.append(a.held_object.get_path()) else: raise InterpreterException('Arguments ' + m.format(a)) # If any file that was used as an argument to the command # changes, we must re-run the configuration step. self.add_build_def_file(cmd.get_path()) for a in expanded_args: if not os.path.isabs(a): a = os.path.join(builddir if in_builddir else srcdir, self.subdir, a) self.add_build_def_file(a) return RunProcess(cmd, expanded_args, env, srcdir, builddir, self.subdir, self.environment.get_build_command() + ['introspect'], in_builddir=in_builddir, check=check, capture=capture) @stringArgs def func_gettext(self, nodes, args, kwargs): raise InterpreterException('Gettext() function has been moved to module i18n. Import it and use i18n.gettext() instead') def func_option(self, nodes, args, kwargs): raise InterpreterException('Tried to call option() in build description file. All options must be in the option file.') @FeatureNewKwargs('subproject', '0.38.0', ['default_options']) @permittedKwargs(permitted_kwargs['subproject']) @stringArgs def func_subproject(self, nodes, args, kwargs): if len(args) != 1: raise InterpreterException('Subproject takes exactly one argument') subp_name = args[0] return self.do_subproject(subp_name, 'meson', kwargs) def disabled_subproject(self, subp_name, disabled_feature=None, exception=None): sub = SubprojectHolder(None, os.path.join(self.subproject_dir, subp_name), disabled_feature=disabled_feature, exception=exception) self.subprojects[subp_name] = sub return sub def get_subproject(self, subp_name): sub = self.subprojects.get(subp_name) if sub and sub.found(): return sub return None def do_subproject(self, subp_name: str, method: str, kwargs): disabled, required, feature = extract_required_kwarg(kwargs, self.subproject) if disabled: mlog.log('Subproject', mlog.bold(subp_name), ':', 'skipped: feature', mlog.bold(feature), 'disabled') return self.disabled_subproject(subp_name, disabled_feature=feature) default_options = mesonlib.stringlistify(kwargs.get('default_options', [])) default_options = coredata.create_options_dict(default_options) if subp_name == '': raise InterpreterException('Subproject name must not be empty.') if subp_name[0] == '.': raise InterpreterException('Subproject name must not start with a period.') if '..' in subp_name: raise InterpreterException('Subproject name must not contain a ".." path segment.') if os.path.isabs(subp_name): raise InterpreterException('Subproject name must not be an absolute path.') if has_path_sep(subp_name): mlog.warning('Subproject name has a path separator. This may cause unexpected behaviour.', location=self.current_node) if subp_name in self.subproject_stack: fullstack = self.subproject_stack + [subp_name] incpath = ' => '.join(fullstack) raise InvalidCode('Recursive include of subprojects: %s.' % incpath) if subp_name in self.subprojects: subproject = self.subprojects[subp_name] if required and not subproject.found(): raise InterpreterException('Subproject "%s" required but not found.' % (subproject.subdir)) return subproject r = self.environment.wrap_resolver try: subdir = r.resolve(subp_name, method, self.subproject) except wrap.WrapException as e: if not required: mlog.log(e) mlog.log('Subproject ', mlog.bold(subp_name), 'is buildable:', mlog.red('NO'), '(disabling)') return self.disabled_subproject(subp_name, exception=e) raise e subdir_abs = os.path.join(self.environment.get_source_dir(), subdir) os.makedirs(os.path.join(self.build.environment.get_build_dir(), subdir), exist_ok=True) self.global_args_frozen = True mlog.log() with mlog.nested(): mlog.log('Executing subproject', mlog.bold(subp_name), 'method', mlog.bold(method), '\n') try: if method == 'meson': return self._do_subproject_meson(subp_name, subdir, default_options, kwargs) elif method == 'cmake': return self._do_subproject_cmake(subp_name, subdir, subdir_abs, default_options, kwargs) else: raise InterpreterException('The method {} is invalid for the subproject {}'.format(method, subp_name)) # Invalid code is always an error except InvalidCode: raise except Exception as e: if not required: with mlog.nested(): # Suppress the 'ERROR:' prefix because this exception is not # fatal and VS CI treat any logs with "ERROR:" as fatal. mlog.exception(e, prefix=mlog.yellow('Exception:')) mlog.log('\nSubproject', mlog.bold(subdir), 'is buildable:', mlog.red('NO'), '(disabling)') return self.disabled_subproject(subp_name, exception=e) raise e def _do_subproject_meson(self, subp_name: str, subdir: str, default_options, kwargs, ast: T.Optional[mparser.CodeBlockNode] = None, build_def_files: T.Optional[T.List[str]] = None, is_translated: bool = False) -> SubprojectHolder: with mlog.nested(): new_build = self.build.copy() subi = Interpreter(new_build, self.backend, subp_name, subdir, self.subproject_dir, self.modules, default_options, ast=ast, is_translated=is_translated) subi.subprojects = self.subprojects subi.subproject_stack = self.subproject_stack + [subp_name] current_active = self.active_projectname current_warnings_counter = mlog.log_warnings_counter mlog.log_warnings_counter = 0 subi.run() subi_warnings = mlog.log_warnings_counter mlog.log_warnings_counter = current_warnings_counter mlog.log('Subproject', mlog.bold(subp_name), 'finished.') mlog.log() if 'version' in kwargs: pv = subi.project_version wanted = kwargs['version'] if pv == 'undefined' or not mesonlib.version_compare_many(pv, wanted)[0]: raise InterpreterException('Subproject %s version is %s but %s required.' % (subp_name, pv, wanted)) self.active_projectname = current_active self.subprojects.update(subi.subprojects) self.subprojects[subp_name] = SubprojectHolder(subi, subdir, warnings=subi_warnings) # Duplicates are possible when subproject uses files from project root if build_def_files: self.build_def_files = list(set(self.build_def_files + build_def_files)) # We always need the subi.build_def_files, to propgate sub-sub-projects self.build_def_files = list(set(self.build_def_files + subi.build_def_files)) self.build.merge(subi.build) self.build.subprojects[subp_name] = subi.project_version self.summary.update(subi.summary) return self.subprojects[subp_name] def _do_subproject_cmake(self, subp_name, subdir, subdir_abs, default_options, kwargs): with mlog.nested(): new_build = self.build.copy() prefix = self.coredata.builtins['prefix'].value from .modules.cmake import CMakeSubprojectOptions options = kwargs.get('options', CMakeSubprojectOptions()) if not isinstance(options, CMakeSubprojectOptions): raise InterpreterException('"options" kwarg must be CMakeSubprojectOptions' ' object (created by cmake.subproject_options())') cmake_options = mesonlib.stringlistify(kwargs.get('cmake_options', [])) cmake_options += options.cmake_options cm_int = CMakeInterpreter(new_build, Path(subdir), Path(subdir_abs), Path(prefix), new_build.environment, self.backend) cm_int.initialise(cmake_options) cm_int.analyse() # Generate a meson ast and execute it with the normal do_subproject_meson ast = cm_int.pretend_to_be_meson(options.target_options) mlog.log() with mlog.nested(): mlog.log('Processing generated meson AST') # Debug print the generated meson file from .ast import AstIndentationGenerator, AstPrinter printer = AstPrinter() ast.accept(AstIndentationGenerator()) ast.accept(printer) printer.post_process() meson_filename = os.path.join(self.build.environment.get_build_dir(), subdir, 'meson.build') with open(meson_filename, "w") as f: f.write(printer.result) mlog.log('Build file:', meson_filename) mlog.cmd_ci_include(meson_filename) mlog.log() result = self._do_subproject_meson(subp_name, subdir, default_options, kwargs, ast, cm_int.bs_files, is_translated=True) result.cm_interpreter = cm_int mlog.log() return result def get_option_internal(self, optname): raw_optname = optname if self.is_subproject(): optname = self.subproject + ':' + optname for opts in [ self.coredata.base_options, compilers.base_options, self.coredata.builtins, dict(self.coredata.get_prefixed_options_per_machine(self.coredata.builtins_per_machine)), dict(self.coredata.flatten_lang_iterator( self.coredata.get_prefixed_options_per_machine(self.coredata.compiler_options))), ]: v = opts.get(optname) if v is None or v.yielding: v = opts.get(raw_optname) if v is not None: return v try: opt = self.coredata.user_options[optname] if opt.yielding and ':' in optname and raw_optname in self.coredata.user_options: popt = self.coredata.user_options[raw_optname] if type(opt) is type(popt): opt = popt else: # Get class name, then option type as a string opt_type = opt.__class__.__name__[4:][:-6].lower() popt_type = popt.__class__.__name__[4:][:-6].lower() # This is not a hard error to avoid dependency hell, the workaround # when this happens is to simply set the subproject's option directly. mlog.warning('Option {0!r} of type {1!r} in subproject {2!r} cannot yield ' 'to parent option of type {3!r}, ignoring parent value. ' 'Use -D{2}:{0}=value to set the value for this option manually' '.'.format(raw_optname, opt_type, self.subproject, popt_type), location=self.current_node) return opt except KeyError: pass raise InterpreterException('Tried to access unknown option "%s".' % optname) @stringArgs @noKwargs def func_get_option(self, nodes, args, kwargs): if len(args) != 1: raise InterpreterException('Argument required for get_option.') optname = args[0] if ':' in optname: raise InterpreterException('Having a colon in option name is forbidden, ' 'projects are not allowed to directly access ' 'options of other subprojects.') opt = self.get_option_internal(optname) if isinstance(opt, coredata.UserFeatureOption): return FeatureOptionHolder(self.environment, optname, opt) elif isinstance(opt, coredata.UserOption): return opt.value return opt @noKwargs def func_configuration_data(self, node, args, kwargs): if len(args) > 1: raise InterpreterException('configuration_data takes only one optional positional arguments') elif len(args) == 1: FeatureNew.single_use('configuration_data dictionary', '0.49.0', self.subproject) initial_values = args[0] if not isinstance(initial_values, dict): raise InterpreterException('configuration_data first argument must be a dictionary') else: initial_values = {} return ConfigurationDataHolder(self.subproject, initial_values) def set_backend(self): # The backend is already set when parsing subprojects if self.backend is not None: return backend = self.coredata.get_builtin_option('backend') from .backend import backends self.backend = backends.get_backend_from_name(backend, self.build, self) if self.backend is None: raise InterpreterException('Unknown backend "%s".' % backend) if backend != self.backend.name: if self.backend.name.startswith('vs'): mlog.log('Auto detected Visual Studio backend:', mlog.bold(self.backend.name)) self.coredata.set_builtin_option('backend', self.backend.name) # Only init backend options on first invocation otherwise it would # override values previously set from command line. if self.environment.first_invocation: self.coredata.init_backend_options(backend) options = {k: v for k, v in self.environment.raw_options.items() if k.startswith('backend_')} self.coredata.set_options(options) @stringArgs @permittedKwargs(permitted_kwargs['project']) def func_project(self, node, args, kwargs): if len(args) < 1: raise InvalidArguments('Not enough arguments to project(). Needs at least the project name.') proj_name, *proj_langs = args if ':' in proj_name: raise InvalidArguments("Project name {!r} must not contain ':'".format(proj_name)) # This needs to be evaluated as early as possible, as meson uses this # for things like deprecation testing. if 'meson_version' in kwargs: cv = coredata.version pv = kwargs['meson_version'] if not mesonlib.version_compare(cv, pv): raise InterpreterException('Meson version is %s but project requires %s' % (cv, pv)) mesonlib.project_meson_versions[self.subproject] = kwargs['meson_version'] if os.path.exists(self.option_file): oi = optinterpreter.OptionInterpreter(self.subproject) oi.process(self.option_file) self.coredata.merge_user_options(oi.options) self.add_build_def_file(self.option_file) # Do not set default_options on reconfigure otherwise it would override # values previously set from command line. That means that changing # default_options in a project will trigger a reconfigure but won't # have any effect. self.project_default_options = mesonlib.stringlistify(kwargs.get('default_options', [])) self.project_default_options = coredata.create_options_dict(self.project_default_options) if self.environment.first_invocation: default_options = self.project_default_options.copy() default_options.update(self.default_project_options) self.coredata.init_builtins(self.subproject) else: default_options = {} self.coredata.set_default_options(default_options, self.subproject, self.environment) if not self.is_subproject(): self.build.project_name = proj_name self.active_projectname = proj_name self.project_version = kwargs.get('version', 'undefined') if self.build.project_version is None: self.build.project_version = self.project_version proj_license = mesonlib.stringlistify(kwargs.get('license', 'unknown')) self.build.dep_manifest[proj_name] = {'version': self.project_version, 'license': proj_license} if self.subproject in self.build.projects: raise InvalidCode('Second call to project().') # spdirname is the subproject_dir for this project, relative to self.subdir. # self.subproject_dir is the subproject_dir for the main project, relative to top source dir. spdirname = kwargs.get('subproject_dir') if spdirname: if not isinstance(spdirname, str): raise InterpreterException('Subproject_dir must be a string') if os.path.isabs(spdirname): raise InterpreterException('Subproject_dir must not be an absolute path.') if spdirname.startswith('.'): raise InterpreterException('Subproject_dir must not begin with a period.') if '..' in spdirname: raise InterpreterException('Subproject_dir must not contain a ".." segment.') if not self.is_subproject(): self.subproject_dir = spdirname else: spdirname = 'subprojects' self.build.subproject_dir = self.subproject_dir # Load wrap files from this (sub)project. wrap_mode = self.coredata.get_builtin_option('wrap_mode') if not self.is_subproject() or wrap_mode != WrapMode.nopromote: subdir = os.path.join(self.subdir, spdirname) r = wrap.Resolver(self.environment.get_source_dir(), subdir, wrap_mode) if self.is_subproject(): self.environment.wrap_resolver.merge_wraps(r) else: self.environment.wrap_resolver = r self.build.projects[self.subproject] = proj_name mlog.log('Project name:', mlog.bold(proj_name)) mlog.log('Project version:', mlog.bold(self.project_version)) self.add_languages(proj_langs, True, MachineChoice.HOST) self.add_languages(proj_langs, False, MachineChoice.BUILD) self.set_backend() if not self.is_subproject(): self.check_stdlibs() @FeatureNewKwargs('add_languages', '0.54.0', ['native']) @permittedKwargs(permitted_kwargs['add_languages']) @stringArgs def func_add_languages(self, node, args, kwargs): disabled, required, feature = extract_required_kwarg(kwargs, self.subproject) if disabled: for lang in sorted(args, key=compilers.sort_clink): mlog.log('Compiler for language', mlog.bold(lang), 'skipped: feature', mlog.bold(feature), 'disabled') return False if 'native' in kwargs: return self.add_languages(args, required, self.machine_from_native_kwarg(kwargs)) else: # absent 'native' means 'both' for backwards compatibility tv = FeatureNew.get_target_version(self.subproject) if FeatureNew.check_version(tv, '0.54.0'): mlog.warning('add_languages is missing native:, assuming languages are wanted for both host and build.', location=self.current_node) success = self.add_languages(args, False, MachineChoice.BUILD) success &= self.add_languages(args, required, MachineChoice.HOST) return success def get_message_string_arg(self, arg): if isinstance(arg, list): argstr = stringifyUserArguments(arg) elif isinstance(arg, dict): argstr = stringifyUserArguments(arg) elif isinstance(arg, str): argstr = arg elif isinstance(arg, int): argstr = str(arg) else: raise InvalidArguments('Function accepts only strings, integers, lists and lists thereof.') return argstr @noArgsFlattening @noKwargs def func_message(self, node, args, kwargs): if len(args) > 1: FeatureNew.single_use('message with more than one argument', '0.54.0', self.subproject) args_str = [self.get_message_string_arg(i) for i in args] self.message_impl(args_str) def message_impl(self, args): mlog.log(mlog.bold('Message:'), *args) @noArgsFlattening @FeatureNewKwargs('summary', '0.54.0', ['list_sep']) @permittedKwargs({'section', 'bool_yn', 'list_sep'}) @FeatureNew('summary', '0.53.0') def func_summary(self, node, args, kwargs): if len(args) == 1: if not isinstance(args[0], dict): raise InterpreterException('Summary first argument must be dictionary.') values = args[0] elif len(args) == 2: if not isinstance(args[0], str): raise InterpreterException('Summary first argument must be string.') values = {args[0]: args[1]} else: raise InterpreterException('Summary accepts at most 2 arguments.') section = kwargs.get('section', '') if not isinstance(section, str): raise InterpreterException('Summary\'s section keyword argument must be string.') self.summary_impl(section, values, kwargs) def summary_impl(self, section, values, kwargs): if self.subproject not in self.summary: self.summary[self.subproject] = Summary(self.active_projectname, self.project_version) self.summary[self.subproject].add_section(section, values, kwargs) def _print_summary(self): # Add automatic 'Supbrojects' section in main project. all_subprojects = collections.OrderedDict() for name, subp in sorted(self.subprojects.items()): value = subp.found() if subp.disabled_feature: value = [value, 'Feature {!r} disabled'.format(subp.disabled_feature)] elif subp.exception: value = [value, str(subp.exception)] elif subp.warnings > 0: value = [value, '{} warnings'.format(subp.warnings)] all_subprojects[name] = value if all_subprojects: self.summary_impl('Subprojects', all_subprojects, {'bool_yn': True, 'list_sep': ' ', }) # Print all summaries, main project last. mlog.log('') # newline main_summary = self.summary.pop('', None) for _, summary in sorted(self.summary.items()): summary.dump() if main_summary: main_summary.dump() @noArgsFlattening @FeatureNew('warning', '0.44.0') @noKwargs def func_warning(self, node, args, kwargs): if len(args) > 1: FeatureNew.single_use('warning with more than one argument', '0.54.0', self.subproject) args_str = [self.get_message_string_arg(i) for i in args] mlog.warning(*args_str, location=node) @noKwargs def func_error(self, node, args, kwargs): self.validate_arguments(args, 1, [str]) raise InterpreterException('Problem encountered: ' + args[0]) @noKwargs def func_exception(self, node, args, kwargs): self.validate_arguments(args, 0, []) raise Exception() def add_languages(self, args: T.Sequence[str], required: bool, for_machine: MachineChoice) -> bool: success = self.add_languages_for(args, required, for_machine) if not self.coredata.is_cross_build(): self.coredata.copy_build_options_from_regular_ones() self._redetect_machines() return success def should_skip_sanity_check(self, for_machine: MachineChoice) -> bool: should = self.environment.properties.host.get('skip_sanity_check', False) if not isinstance(should, bool): raise InterpreterException('Option skip_sanity_check must be a boolean.') if for_machine != MachineChoice.HOST and not should: return False if not self.environment.is_cross_build() and not should: return False return should def add_languages_for(self, args, required, for_machine: MachineChoice): args = [a.lower() for a in args] langs = set(self.coredata.compilers[for_machine].keys()) langs.update(args) if 'vala' in langs: if 'c' not in langs: raise InterpreterException('Compiling Vala requires C. Add C to your project languages and rerun Meson.') success = True for lang in sorted(args, key=compilers.sort_clink): clist = self.coredata.compilers[for_machine] machine_name = for_machine.get_lower_case_name() if lang in clist: comp = clist[lang] else: try: comp = self.environment.detect_compiler_for(lang, for_machine) if comp is None: raise InvalidArguments('Tried to use unknown language "%s".' % lang) if self.should_skip_sanity_check(for_machine): mlog.log_once('Cross compiler sanity tests disabled via the cross file.') else: comp.sanity_check(self.environment.get_scratch_dir(), self.environment) except Exception: if not required: mlog.log('Compiler for language', mlog.bold(lang), 'for the', machine_name, 'machine not found.') success = False continue else: raise if for_machine == MachineChoice.HOST or self.environment.is_cross_build(): logger_fun = mlog.log else: logger_fun = mlog.debug logger_fun(comp.get_display_language(), 'compiler for the', machine_name, 'machine:', mlog.bold(' '.join(comp.get_exelist())), comp.get_version_string()) if comp.linker is not None: logger_fun(comp.get_display_language(), 'linker for the', machine_name, 'machine:', mlog.bold(' '.join(comp.linker.get_exelist())), comp.linker.id, comp.linker.version) self.build.ensure_static_linker(comp) return success def program_from_file_for(self, for_machine, prognames): for p in unholder(prognames): if isinstance(p, mesonlib.File): continue # Always points to a local (i.e. self generated) file. if not isinstance(p, str): raise InterpreterException('Executable name must be a string') prog = ExternalProgram.from_bin_list(self.environment, for_machine, p) if prog.found(): return ExternalProgramHolder(prog, self.subproject) return None def program_from_system(self, args, search_dirs, extra_info): # Search for scripts relative to current subdir. # Do not cache found programs because find_program('foobar') # might give different results when run from different source dirs. source_dir = os.path.join(self.environment.get_source_dir(), self.subdir) for exename in args: if isinstance(exename, mesonlib.File): if exename.is_built: search_dir = os.path.join(self.environment.get_build_dir(), exename.subdir) else: search_dir = os.path.join(self.environment.get_source_dir(), exename.subdir) exename = exename.fname extra_search_dirs = [] elif isinstance(exename, str): search_dir = source_dir extra_search_dirs = search_dirs else: raise InvalidArguments('find_program only accepts strings and ' 'files, not {!r}'.format(exename)) extprog = dependencies.ExternalProgram(exename, search_dir=search_dir, extra_search_dirs=extra_search_dirs, silent=True) progobj = ExternalProgramHolder(extprog, self.subproject) if progobj.found(): extra_info.append('({})'.format(' '.join(progobj.get_command()))) return progobj def program_from_overrides(self, command_names, extra_info): for name in command_names: if not isinstance(name, str): continue if name in self.build.find_overrides: exe = self.build.find_overrides[name] extra_info.append(mlog.blue('(overriden)')) return ExternalProgramHolder(exe, self.subproject, self.backend) return None def store_name_lookups(self, command_names): for name in command_names: if isinstance(name, str): self.build.searched_programs.add(name) def add_find_program_override(self, name, exe): if name in self.build.searched_programs: raise InterpreterException('Tried to override finding of executable "%s" which has already been found.' % name) if name in self.build.find_overrides: raise InterpreterException('Tried to override executable "%s" which has already been overridden.' % name) self.build.find_overrides[name] = exe def notfound_program(self, args): return ExternalProgramHolder(dependencies.NonExistingExternalProgram(' '.join(args)), self.subproject) # TODO update modules to always pass `for_machine`. It is bad-form to assume # the host machine. def find_program_impl(self, args, for_machine: MachineChoice = MachineChoice.HOST, required=True, silent=True, wanted='', search_dirs=None, version_func=None): args = mesonlib.listify(args) extra_info = [] progobj = self.program_lookup(args, for_machine, required, search_dirs, extra_info) if progobj is None: progobj = self.notfound_program(args) if not progobj.found(): mlog.log('Program', mlog.bold(progobj.get_name()), 'found:', mlog.red('NO')) if required: m = 'Program {!r} not found' raise InterpreterException(m.format(progobj.get_name())) return progobj if wanted: if version_func: version = version_func(progobj) else: version = progobj.get_version(self) is_found, not_found, found = mesonlib.version_compare_many(version, wanted) if not is_found: mlog.log('Program', mlog.bold(progobj.get_name()), 'found:', mlog.red('NO'), 'found', mlog.normal_cyan(version), 'but need:', mlog.bold(', '.join(["'{}'".format(e) for e in not_found])), *extra_info) if required: m = 'Invalid version of program, need {!r} {!r} found {!r}.' raise InterpreterException(m.format(progobj.get_name(), not_found, version)) return self.notfound_program(args) extra_info.insert(0, mlog.normal_cyan(version)) # Only store successful lookups self.store_name_lookups(args) mlog.log('Program', mlog.bold(progobj.get_name()), 'found:', mlog.green('YES'), *extra_info) return progobj def program_lookup(self, args, for_machine, required, search_dirs, extra_info): progobj = self.program_from_overrides(args, extra_info) if progobj: return progobj fallback = None wrap_mode = self.coredata.get_builtin_option('wrap_mode') if wrap_mode != WrapMode.nofallback and self.environment.wrap_resolver: fallback = self.environment.wrap_resolver.find_program_provider(args) if fallback and wrap_mode == WrapMode.forcefallback: return self.find_program_fallback(fallback, args, required, extra_info) progobj = self.program_from_file_for(for_machine, args) if progobj is None: progobj = self.program_from_system(args, search_dirs, extra_info) if progobj is None and args[0].endswith('python3'): prog = dependencies.ExternalProgram('python3', mesonlib.python_command, silent=True) progobj = ExternalProgramHolder(prog, self.subproject) if prog.found() else None if progobj is None and fallback and required: progobj = self.find_program_fallback(fallback, args, required, extra_info) return progobj def find_program_fallback(self, fallback, args, required, extra_info): mlog.log('Fallback to subproject', mlog.bold(fallback), 'which provides program', mlog.bold(' '.join(args))) sp_kwargs = { 'required': required } self.do_subproject(fallback, 'meson', sp_kwargs) return self.program_from_overrides(args, extra_info) @FeatureNewKwargs('find_program', '0.53.0', ['dirs']) @FeatureNewKwargs('find_program', '0.52.0', ['version']) @FeatureNewKwargs('find_program', '0.49.0', ['disabler']) @disablerIfNotFound @permittedKwargs(permitted_kwargs['find_program']) def func_find_program(self, node, args, kwargs): if not args: raise InterpreterException('No program name specified.') disabled, required, feature = extract_required_kwarg(kwargs, self.subproject) if disabled: mlog.log('Program', mlog.bold(' '.join(args)), 'skipped: feature', mlog.bold(feature), 'disabled') return self.notfound_program(args) search_dirs = extract_search_dirs(kwargs) wanted = mesonlib.stringlistify(kwargs.get('version', [])) for_machine = self.machine_from_native_kwarg(kwargs) return self.find_program_impl(args, for_machine, required=required, silent=False, wanted=wanted, search_dirs=search_dirs) def func_find_library(self, node, args, kwargs): raise InvalidCode('find_library() is removed, use meson.get_compiler(\'name\').find_library() instead.\n' 'Look here for documentation: http://mesonbuild.com/Reference-manual.html#compiler-object\n' 'Look here for example: http://mesonbuild.com/howtox.html#add-math-library-lm-portably\n' ) def _find_cached_dep(self, name, display_name, kwargs): # Check if we want this as a build-time / build machine or runt-time / # host machine dep. for_machine = self.machine_from_native_kwarg(kwargs) identifier = dependencies.get_dep_identifier(name, kwargs) wanted_vers = mesonlib.stringlistify(kwargs.get('version', [])) override = self.build.dependency_overrides[for_machine].get(identifier) if override: info = [mlog.blue('(overridden)' if override.explicit else '(cached)')] cached_dep = override.dep # We don't implicitly override not-found dependencies, but user could # have explicitly called meson.override_dependency() with a not-found # dep. if not cached_dep.found(): mlog.log('Dependency', mlog.bold(display_name), 'found:', mlog.red('NO'), *info) return identifier, cached_dep found_vers = cached_dep.get_version() if not self.check_version(wanted_vers, found_vers): mlog.log('Dependency', mlog.bold(name), 'found:', mlog.red('NO'), 'found', mlog.normal_cyan(found_vers), 'but need:', mlog.bold(', '.join(["'{}'".format(e) for e in wanted_vers])), *info) return identifier, NotFoundDependency(self.environment) else: info = [mlog.blue('(cached)')] cached_dep = self.coredata.deps[for_machine].get(identifier) if cached_dep: found_vers = cached_dep.get_version() if not self.check_version(wanted_vers, found_vers): return identifier, None if cached_dep: if found_vers: info = [mlog.normal_cyan(found_vers), *info] mlog.log('Dependency', mlog.bold(display_name), 'found:', mlog.green('YES'), *info) return identifier, cached_dep return identifier, None @staticmethod def check_version(wanted, found): if not wanted: return True if found == 'undefined' or not mesonlib.version_compare_many(found, wanted)[0]: return False return True def notfound_dependency(self): return DependencyHolder(NotFoundDependency(self.environment), self.subproject) def verify_fallback_consistency(self, subp_name, varname, cached_dep): subi = self.get_subproject(subp_name) if not cached_dep or not varname or not subi or not cached_dep.found(): return dep = subi.get_variable_method([varname], {}) if dep.held_object != cached_dep: m = 'Inconsistency: Subproject has overridden the dependency with another variable than {!r}' raise DependencyException(m.format(varname)) def get_subproject_dep(self, name, display_name, subp_name, varname, kwargs): required = kwargs.get('required', True) wanted = mesonlib.stringlistify(kwargs.get('version', [])) dep = self.notfound_dependency() # Verify the subproject is found subproject = self.subprojects.get(subp_name) if not subproject or not subproject.found(): mlog.log('Dependency', mlog.bold(display_name), 'from subproject', mlog.bold(subproject.subdir), 'found:', mlog.red('NO'), mlog.blue('(subproject failed to configure)')) if required: m = 'Subproject {} failed to configure for dependency {}' raise DependencyException(m.format(subproject.subdir, display_name)) return dep extra_info = [] try: # Check if the subproject overridden the dependency _, cached_dep = self._find_cached_dep(name, display_name, kwargs) if cached_dep: if varname: self.verify_fallback_consistency(subp_name, varname, cached_dep) if required and not cached_dep.found(): m = 'Dependency {!r} is not satisfied' raise DependencyException(m.format(display_name)) return DependencyHolder(cached_dep, self.subproject) elif varname is None: mlog.log('Dependency', mlog.bold(display_name), 'from subproject', mlog.bold(subproject.subdir), 'found:', mlog.red('NO')) if required: m = 'Subproject {} did not override dependency {}' raise DependencyException(m.format(subproject.subdir, display_name)) return self.notfound_dependency() else: # The subproject did not override the dependency, but we know the # variable name to take. dep = subproject.get_variable_method([varname], {}) except InvalidArguments: # This is raised by get_variable_method() if varname does no exist # in the subproject. Just add the reason in the not-found message # that will be printed later. extra_info.append(mlog.blue('(Variable {!r} not found)'.format(varname))) if not isinstance(dep, DependencyHolder): raise InvalidCode('Fetched variable {!r} in the subproject {!r} is ' 'not a dependency object.'.format(varname, subp_name)) if not dep.found(): mlog.log('Dependency', mlog.bold(display_name), 'from subproject', mlog.bold(subproject.subdir), 'found:', mlog.red('NO'), *extra_info) if required: raise DependencyException('Could not find dependency {} in subproject {}' ''.format(varname, subp_name)) return dep found = dep.held_object.get_version() if not self.check_version(wanted, found): mlog.log('Dependency', mlog.bold(display_name), 'from subproject', mlog.bold(subproject.subdir), 'found:', mlog.red('NO'), 'found', mlog.normal_cyan(found), 'but need:', mlog.bold(', '.join(["'{}'".format(e) for e in wanted]))) if required: raise DependencyException('Version {} of subproject dependency {} already ' 'cached, requested incompatible version {} for ' 'dep {}'.format(found, subp_name, wanted, display_name)) return self.notfound_dependency() found = mlog.normal_cyan(found) if found else None mlog.log('Dependency', mlog.bold(display_name), 'from subproject', mlog.bold(subproject.subdir), 'found:', mlog.green('YES'), found) return dep def _handle_featurenew_dependencies(self, name): 'Do a feature check on dependencies used by this subproject' if name == 'mpi': FeatureNew.single_use('MPI Dependency', '0.42.0', self.subproject) elif name == 'pcap': FeatureNew.single_use('Pcap Dependency', '0.42.0', self.subproject) elif name == 'vulkan': FeatureNew.single_use('Vulkan Dependency', '0.42.0', self.subproject) elif name == 'libwmf': FeatureNew.single_use('LibWMF Dependency', '0.44.0', self.subproject) elif name == 'openmp': FeatureNew.single_use('OpenMP Dependency', '0.46.0', self.subproject) @FeatureNewKwargs('dependency', '0.54.0', ['components']) @FeatureNewKwargs('dependency', '0.52.0', ['include_type']) @FeatureNewKwargs('dependency', '0.50.0', ['not_found_message', 'cmake_module_path', 'cmake_args']) @FeatureNewKwargs('dependency', '0.49.0', ['disabler']) @FeatureNewKwargs('dependency', '0.40.0', ['method']) @FeatureNewKwargs('dependency', '0.38.0', ['default_options']) @disablerIfNotFound @permittedKwargs(permitted_kwargs['dependency']) def func_dependency(self, node, args, kwargs): self.validate_arguments(args, 1, [str]) name = args[0] display_name = name if name else '(anonymous)' mods = extract_as_list(kwargs, 'modules') if mods: display_name += ' (modules: {})'.format(', '.join(str(i) for i in mods)) not_found_message = kwargs.get('not_found_message', '') if not isinstance(not_found_message, str): raise InvalidArguments('The not_found_message must be a string.') try: d = self.dependency_impl(name, display_name, kwargs) except Exception: if not_found_message: self.message_impl([not_found_message]) raise assert isinstance(d, DependencyHolder) if not d.found() and not_found_message: self.message_impl([not_found_message]) self.message_impl([not_found_message]) # Ensure the correct include type if 'include_type' in kwargs: wanted = kwargs['include_type'] actual = d.include_type_method([], {}) if wanted != actual: mlog.debug('Current include type of {} is {}. Converting to requested {}'.format(name, actual, wanted)) d = d.as_system_method([wanted], {}) # Override this dependency to have consistent results in subsequent # dependency lookups. if name and d.found(): for_machine = self.machine_from_native_kwarg(kwargs) identifier = dependencies.get_dep_identifier(name, kwargs) if identifier not in self.build.dependency_overrides[for_machine]: self.build.dependency_overrides[for_machine][identifier] = \ build.DependencyOverride(d.held_object, node, explicit=False) return d def dependency_impl(self, name, display_name, kwargs, force_fallback=False): disabled, required, feature = extract_required_kwarg(kwargs, self.subproject) if disabled: mlog.log('Dependency', mlog.bold(display_name), 'skipped: feature', mlog.bold(feature), 'disabled') return self.notfound_dependency() fallback = kwargs.get('fallback', None) allow_fallback = kwargs.get('allow_fallback', None) if allow_fallback is not None: FeatureNew.single_use('"allow_fallback" keyword argument for dependency', '0.56.0', self.subproject) if fallback is not None: raise InvalidArguments('"fallback" and "allow_fallback" arguments are mutually exclusive') if not isinstance(allow_fallback, bool): raise InvalidArguments('"allow_fallback" argument must be boolean') # If "fallback" is absent, look for an implicit fallback. if name and fallback is None and allow_fallback is not False: # Add an implicit fallback if we have a wrap file or a directory with the same name, # but only if this dependency is required. It is common to first check for a pkg-config, # then fallback to use find_library() and only afterward check again the dependency # with a fallback. If the fallback has already been configured then we have to use it # even if the dependency is not required. provider = self.environment.wrap_resolver.find_dep_provider(name) if not provider and allow_fallback is True: raise InvalidArguments('Fallback wrap or subproject not found for dependency \'%s\'' % name) subp_name = mesonlib.listify(provider)[0] if provider and (allow_fallback is True or required or self.get_subproject(subp_name)): fallback = provider if 'default_options' in kwargs and not fallback: mlog.warning('The "default_options" keyword argument does nothing without a fallback subproject.', location=self.current_node) # writing just "dependency('')" is an error, because it can only fail if name == '' and required and not fallback: raise InvalidArguments('Dependency is both required and not-found') if '<' in name or '>' in name or '=' in name: raise InvalidArguments('Characters <, > and = are forbidden in dependency names. To specify' 'version\n requirements use the \'version\' keyword argument instead.') identifier, cached_dep = self._find_cached_dep(name, display_name, kwargs) if cached_dep: if fallback: subp_name, varname = self.get_subproject_infos(fallback) self.verify_fallback_consistency(subp_name, varname, cached_dep) if required and not cached_dep.found(): m = 'Dependency {!r} was already checked and was not found' raise DependencyException(m.format(display_name)) return DependencyHolder(cached_dep, self.subproject) if fallback: # If the dependency has already been configured, possibly by # a higher level project, try to use it first. subp_name, varname = self.get_subproject_infos(fallback) if self.get_subproject(subp_name): return self.get_subproject_dep(name, display_name, subp_name, varname, kwargs) wrap_mode = self.coredata.get_builtin_option('wrap_mode') force_fallback_for = self.coredata.get_builtin_option('force_fallback_for') force_fallback = (force_fallback or wrap_mode == WrapMode.forcefallback or name in force_fallback_for or subp_name in force_fallback_for) if name != '' and (not fallback or not force_fallback): self._handle_featurenew_dependencies(name) kwargs['required'] = required and not fallback dep = dependencies.find_external_dependency(name, self.environment, kwargs) kwargs['required'] = required # Only store found-deps in the cache # Never add fallback deps to self.coredata.deps since we # cannot cache them. They must always be evaluated else # we won't actually read all the build files. if dep.found(): for_machine = self.machine_from_native_kwarg(kwargs) self.coredata.deps[for_machine].put(identifier, dep) return DependencyHolder(dep, self.subproject) if fallback: return self.dependency_fallback(name, display_name, fallback, kwargs) return self.notfound_dependency() @FeatureNew('disabler', '0.44.0') @noKwargs @noPosargs def func_disabler(self, node, args, kwargs): return Disabler() def get_subproject_infos(self, fbinfo): fbinfo = mesonlib.stringlistify(fbinfo) if len(fbinfo) == 1: FeatureNew.single_use('Fallback without variable name', '0.53.0', self.subproject) return fbinfo[0], None elif len(fbinfo) != 2: raise InterpreterException('Fallback info must have one or two items.') return fbinfo def dependency_fallback(self, name, display_name, fallback, kwargs): subp_name, varname = self.get_subproject_infos(fallback) required = kwargs.get('required', True) # Explicitly listed fallback preferences for specific subprojects # take precedence over wrap-mode force_fallback_for = self.coredata.get_builtin_option('force_fallback_for') if name in force_fallback_for or subp_name in force_fallback_for: mlog.log('Looking for a fallback subproject for the dependency', mlog.bold(display_name), 'because:\nUse of fallback was forced for that specific subproject') elif self.coredata.get_builtin_option('wrap_mode') == WrapMode.nofallback: mlog.log('Not looking for a fallback subproject for the dependency', mlog.bold(display_name), 'because:\nUse of fallback ' 'dependencies is disabled.') if required: m = 'Dependency {!r} not found and fallback is disabled' raise DependencyException(m.format(display_name)) return self.notfound_dependency() elif self.coredata.get_builtin_option('wrap_mode') == WrapMode.forcefallback: mlog.log('Looking for a fallback subproject for the dependency', mlog.bold(display_name), 'because:\nUse of fallback dependencies is forced.') else: mlog.log('Looking for a fallback subproject for the dependency', mlog.bold(display_name)) sp_kwargs = { 'default_options': kwargs.get('default_options', []), 'required': required, } self.do_subproject(subp_name, 'meson', sp_kwargs) return self.get_subproject_dep(name, display_name, subp_name, varname, kwargs) @FeatureNewKwargs('executable', '0.42.0', ['implib']) @FeatureNewKwargs('executable', '0.56.0', ['win_subsystem']) @FeatureDeprecatedKwargs('executable', '0.56.0', ['gui_app'], extra_message="Use 'win_subsystem' instead.") @permittedKwargs(permitted_kwargs['executable']) def func_executable(self, node, args, kwargs): return self.build_target(node, args, kwargs, ExecutableHolder) @permittedKwargs(permitted_kwargs['static_library']) def func_static_lib(self, node, args, kwargs): return self.build_target(node, args, kwargs, StaticLibraryHolder) @permittedKwargs(permitted_kwargs['shared_library']) def func_shared_lib(self, node, args, kwargs): holder = self.build_target(node, args, kwargs, SharedLibraryHolder) holder.held_object.shared_library_only = True return holder @permittedKwargs(permitted_kwargs['both_libraries']) def func_both_lib(self, node, args, kwargs): return self.build_both_libraries(node, args, kwargs) @FeatureNew('shared_module', '0.37.0') @permittedKwargs(permitted_kwargs['shared_module']) def func_shared_module(self, node, args, kwargs): return self.build_target(node, args, kwargs, SharedModuleHolder) @permittedKwargs(permitted_kwargs['library']) def func_library(self, node, args, kwargs): return self.build_library(node, args, kwargs) @permittedKwargs(permitted_kwargs['jar']) def func_jar(self, node, args, kwargs): return self.build_target(node, args, kwargs, JarHolder) @FeatureNewKwargs('build_target', '0.40.0', ['link_whole', 'override_options']) @permittedKwargs(permitted_kwargs['build_target']) def func_build_target(self, node, args, kwargs): if 'target_type' not in kwargs: raise InterpreterException('Missing target_type keyword argument') target_type = kwargs.pop('target_type') if target_type == 'executable': return self.build_target(node, args, kwargs, ExecutableHolder) elif target_type == 'shared_library': return self.build_target(node, args, kwargs, SharedLibraryHolder) elif target_type == 'shared_module': FeatureNew('build_target(target_type: \'shared_module\')', '0.51.0').use(self.subproject) return self.build_target(node, args, kwargs, SharedModuleHolder) elif target_type == 'static_library': return self.build_target(node, args, kwargs, StaticLibraryHolder) elif target_type == 'both_libraries': return self.build_both_libraries(node, args, kwargs) elif target_type == 'library': return self.build_library(node, args, kwargs) elif target_type == 'jar': return self.build_target(node, args, kwargs, JarHolder) else: raise InterpreterException('Unknown target_type.') @permittedKwargs(permitted_kwargs['vcs_tag']) @FeatureDeprecatedKwargs('custom_target', '0.47.0', ['build_always'], 'combine build_by_default and build_always_stale instead.') def func_vcs_tag(self, node, args, kwargs): if 'input' not in kwargs or 'output' not in kwargs: raise InterpreterException('Keyword arguments input and output must exist') if 'fallback' not in kwargs: FeatureNew.single_use('Optional fallback in vcs_tag', '0.41.0', self.subproject) fallback = kwargs.pop('fallback', self.project_version) if not isinstance(fallback, str): raise InterpreterException('Keyword argument fallback must be a string.') replace_string = kwargs.pop('replace_string', '@VCS_TAG@') regex_selector = '(.*)' # default regex selector for custom command: use complete output vcs_cmd = kwargs.get('command', None) if vcs_cmd and not isinstance(vcs_cmd, list): vcs_cmd = [vcs_cmd] source_dir = os.path.normpath(os.path.join(self.environment.get_source_dir(), self.subdir)) if vcs_cmd: # Is the command an executable in path or maybe a script in the source tree? vcs_cmd[0] = shutil.which(vcs_cmd[0]) or os.path.join(source_dir, vcs_cmd[0]) else: vcs = mesonlib.detect_vcs(source_dir) if vcs: mlog.log('Found %s repository at %s' % (vcs['name'], vcs['wc_dir'])) vcs_cmd = vcs['get_rev'].split() regex_selector = vcs['rev_regex'] else: vcs_cmd = [' '] # executing this cmd will fail in vcstagger.py and force to use the fallback string # vcstagger.py parameters: infile, outfile, fallback, source_dir, replace_string, regex_selector, command... kwargs['command'] = self.environment.get_build_command() + \ ['--internal', 'vcstagger', '@INPUT0@', '@OUTPUT0@', fallback, source_dir, replace_string, regex_selector] + vcs_cmd kwargs.setdefault('build_by_default', True) kwargs.setdefault('build_always_stale', True) return self._func_custom_target_impl(node, [kwargs['output']], kwargs) @FeatureNew('subdir_done', '0.46.0') @stringArgs def func_subdir_done(self, node, args, kwargs): if len(kwargs) > 0: raise InterpreterException('exit does not take named arguments') if len(args) > 0: raise InterpreterException('exit does not take any arguments') raise SubdirDoneRequest() @stringArgs @FeatureNewKwargs('custom_target', '0.48.0', ['console']) @FeatureNewKwargs('custom_target', '0.47.0', ['install_mode', 'build_always_stale']) @FeatureNewKwargs('custom_target', '0.40.0', ['build_by_default']) @permittedKwargs(permitted_kwargs['custom_target']) def func_custom_target(self, node, args, kwargs): if len(args) != 1: raise InterpreterException('custom_target: Only one positional argument is allowed, and it must be a string name') if 'depfile' in kwargs and ('@BASENAME@' in kwargs['depfile'] or '@PLAINNAME@' in kwargs['depfile']): FeatureNew.single_use('substitutions in custom_target depfile', '0.47.0', self.subproject) return self._func_custom_target_impl(node, args, kwargs) def _func_custom_target_impl(self, node, args, kwargs): 'Implementation-only, without FeatureNew checks, for internal use' name = args[0] kwargs['install_mode'] = self._get_kwarg_install_mode(kwargs) if 'input' in kwargs: try: kwargs['input'] = self.source_strings_to_files(extract_as_list(kwargs, 'input')) except mesonlib.MesonException: mlog.warning('''Custom target input \'%s\' can\'t be converted to File object(s). This will become a hard error in the future.''' % kwargs['input'], location=self.current_node) tg = CustomTargetHolder(build.CustomTarget(name, self.subdir, self.subproject, kwargs, backend=self.backend), self) self.add_target(name, tg.held_object) return tg @permittedKwargs(permitted_kwargs['run_target']) def func_run_target(self, node, args, kwargs): if len(args) > 1: raise InvalidCode('Run_target takes only one positional argument: the target name.') elif len(args) == 1: if 'command' not in kwargs: raise InterpreterException('Missing "command" keyword argument') all_args = extract_as_list(kwargs, 'command') deps = unholder(extract_as_list(kwargs, 'depends')) else: raise InterpreterException('Run_target needs at least one positional argument.') cleaned_args = [] for i in unholder(listify(all_args)): if not isinstance(i, (str, build.BuildTarget, build.CustomTarget, dependencies.ExternalProgram, mesonlib.File)): mlog.debug('Wrong type:', str(i)) raise InterpreterException('Invalid argument to run_target.') if isinstance(i, dependencies.ExternalProgram) and not i.found(): raise InterpreterException('Tried to use non-existing executable {!r}'.format(i.name)) cleaned_args.append(i) name = args[0] if not isinstance(name, str): raise InterpreterException('First argument must be a string.') cleaned_deps = [] for d in deps: if not isinstance(d, (build.BuildTarget, build.CustomTarget)): raise InterpreterException('Depends items must be build targets.') cleaned_deps.append(d) command, *cmd_args = cleaned_args tg = RunTargetHolder(build.RunTarget(name, command, cmd_args, cleaned_deps, self.subdir, self.subproject), self) self.add_target(name, tg.held_object) full_name = (self.subproject, name) assert(full_name not in self.build.run_target_names) self.build.run_target_names.add(full_name) return tg @FeatureNew('alias_target', '0.52.0') @noKwargs def func_alias_target(self, node, args, kwargs): if len(args) < 2: raise InvalidCode('alias_target takes at least 2 arguments.') name = args[0] if not isinstance(name, str): raise InterpreterException('First argument must be a string.') deps = unholder(listify(args[1:])) for d in deps: if not isinstance(d, (build.BuildTarget, build.CustomTarget)): raise InterpreterException('Depends items must be build targets.') tg = RunTargetHolder(build.AliasTarget(name, deps, self.subdir, self.subproject), self) self.add_target(name, tg.held_object) return tg @permittedKwargs(permitted_kwargs['generator']) def func_generator(self, node, args, kwargs): gen = GeneratorHolder(self, args, kwargs) self.generators.append(gen) return gen @FeatureNewKwargs('benchmark', '0.46.0', ['depends']) @FeatureNewKwargs('benchmark', '0.52.0', ['priority']) @permittedKwargs(permitted_kwargs['benchmark']) def func_benchmark(self, node, args, kwargs): # is_parallel isn't valid here, so make sure it isn't passed if 'is_parallel' in kwargs: del kwargs['is_parallel'] self.add_test(node, args, kwargs, False) @FeatureNewKwargs('test', '0.46.0', ['depends']) @FeatureNewKwargs('test', '0.52.0', ['priority']) @permittedKwargs(permitted_kwargs['test']) def func_test(self, node, args, kwargs): if kwargs.get('protocol') == 'gtest': FeatureNew.single_use('"gtest" protocol for tests', '0.55.0', self.subproject) self.add_test(node, args, kwargs, True) def unpack_env_kwarg(self, kwargs) -> build.EnvironmentVariables: envlist = kwargs.get('env', EnvironmentVariablesHolder()) if isinstance(envlist, EnvironmentVariablesHolder): env = envlist.held_object elif isinstance(envlist, dict): FeatureNew.single_use('environment dictionary', '0.52.0', self.subproject) env = EnvironmentVariablesHolder(envlist) env = env.held_object else: envlist = listify(envlist) # Convert from array to environment object env = EnvironmentVariablesHolder(envlist) env = env.held_object return env def add_test(self, node, args, kwargs, is_base_test): if len(args) != 2: raise InterpreterException('test expects 2 arguments, {} given'.format(len(args))) name = args[0] if not isinstance(name, str): raise InterpreterException('First argument of test must be a string.') if ':' in name: mlog.deprecation('":" is not allowed in test name "{}", it has been replaced with "_"'.format(name), location=node) name = name.replace(':', '_') exe = args[1] if not isinstance(exe, (ExecutableHolder, JarHolder, ExternalProgramHolder)): if isinstance(exe, mesonlib.File): exe = self.func_find_program(node, args[1], {}) else: raise InterpreterException('Second argument must be executable.') par = kwargs.get('is_parallel', True) if not isinstance(par, bool): raise InterpreterException('Keyword argument is_parallel must be a boolean.') cmd_args = unholder(extract_as_list(kwargs, 'args')) for i in cmd_args: if not isinstance(i, (str, mesonlib.File, build.Target)): raise InterpreterException('Command line arguments must be strings, files or targets.') env = self.unpack_env_kwarg(kwargs) should_fail = kwargs.get('should_fail', False) if not isinstance(should_fail, bool): raise InterpreterException('Keyword argument should_fail must be a boolean.') timeout = kwargs.get('timeout', 30) if 'workdir' in kwargs: workdir = kwargs['workdir'] if not isinstance(workdir, str): raise InterpreterException('Workdir keyword argument must be a string.') if not os.path.isabs(workdir): raise InterpreterException('Workdir keyword argument must be an absolute path.') else: workdir = None if not isinstance(timeout, int): raise InterpreterException('Timeout must be an integer.') protocol = kwargs.get('protocol', 'exitcode') if protocol not in {'exitcode', 'tap', 'gtest'}: raise InterpreterException('Protocol must be "exitcode", "tap", or "gtest".') suite = [] prj = self.subproject if self.is_subproject() else self.build.project_name for s in mesonlib.stringlistify(kwargs.get('suite', '')): if len(s) > 0: s = ':' + s suite.append(prj.replace(' ', '_').replace(':', '_') + s) depends = unholder(extract_as_list(kwargs, 'depends')) for dep in depends: if not isinstance(dep, (build.CustomTarget, build.BuildTarget)): raise InterpreterException('Depends items must be build targets.') priority = kwargs.get('priority', 0) if not isinstance(priority, int): raise InterpreterException('Keyword argument priority must be an integer.') t = Test(name, prj, suite, exe.held_object, depends, par, cmd_args, env, should_fail, timeout, workdir, protocol, priority) if is_base_test: self.build.tests.append(t) mlog.debug('Adding test', mlog.bold(name, True)) else: self.build.benchmarks.append(t) mlog.debug('Adding benchmark', mlog.bold(name, True)) @FeatureNewKwargs('install_headers', '0.47.0', ['install_mode']) @permittedKwargs(permitted_kwargs['install_headers']) def func_install_headers(self, node, args, kwargs): source_files = self.source_strings_to_files(args) kwargs['install_mode'] = self._get_kwarg_install_mode(kwargs) h = Headers(source_files, kwargs) self.build.headers.append(h) return h @FeatureNewKwargs('install_man', '0.47.0', ['install_mode']) @permittedKwargs(permitted_kwargs['install_man']) def func_install_man(self, node, args, kwargs): fargs = self.source_strings_to_files(args) kwargs['install_mode'] = self._get_kwarg_install_mode(kwargs) m = Man(fargs, kwargs) self.build.man.append(m) return m @FeatureNewKwargs('subdir', '0.44.0', ['if_found']) @permittedKwargs(permitted_kwargs['subdir']) def func_subdir(self, node, args, kwargs): self.validate_arguments(args, 1, [str]) mesonlib.check_direntry_issues(args) if '..' in args[0]: raise InvalidArguments('Subdir contains ..') if self.subdir == '' and args[0] == self.subproject_dir: raise InvalidArguments('Must not go into subprojects dir with subdir(), use subproject() instead.') if self.subdir == '' and args[0].startswith('meson-'): raise InvalidArguments('The "meson-" prefix is reserved and cannot be used for top-level subdir().') for i in mesonlib.extract_as_list(kwargs, 'if_found'): if not hasattr(i, 'found_method'): raise InterpreterException('Object used in if_found does not have a found method.') if not i.found_method([], {}): return prev_subdir = self.subdir subdir = os.path.join(prev_subdir, args[0]) if os.path.isabs(subdir): raise InvalidArguments('Subdir argument must be a relative path.') absdir = os.path.join(self.environment.get_source_dir(), subdir) symlinkless_dir = os.path.realpath(absdir) if symlinkless_dir in self.visited_subdirs: raise InvalidArguments('Tried to enter directory "%s", which has already been visited.' % subdir) self.visited_subdirs[symlinkless_dir] = True self.subdir = subdir os.makedirs(os.path.join(self.environment.build_dir, subdir), exist_ok=True) buildfilename = os.path.join(self.subdir, environment.build_filename) self.build_def_files.append(buildfilename) absname = os.path.join(self.environment.get_source_dir(), buildfilename) if not os.path.isfile(absname): self.subdir = prev_subdir raise InterpreterException("Non-existent build file '{!s}'".format(buildfilename)) with open(absname, encoding='utf8') as f: code = f.read() assert(isinstance(code, str)) try: codeblock = mparser.Parser(code, absname).parse() except mesonlib.MesonException as me: me.file = absname raise me try: self.evaluate_codeblock(codeblock) except SubdirDoneRequest: pass self.subdir = prev_subdir def _get_kwarg_install_mode(self, kwargs): if kwargs.get('install_mode', None) is None: return None install_mode = [] mode = mesonlib.typeslistify(kwargs.get('install_mode', []), (str, int)) for m in mode: # We skip any arguments that are set to `false` if m is False: m = None install_mode.append(m) if len(install_mode) > 3: raise InvalidArguments('Keyword argument install_mode takes at ' 'most 3 arguments.') if len(install_mode) > 0 and install_mode[0] is not None and \ not isinstance(install_mode[0], str): raise InvalidArguments('Keyword argument install_mode requires the ' 'permissions arg to be a string or false') return FileMode(*install_mode) @FeatureNewKwargs('install_data', '0.46.0', ['rename']) @FeatureNewKwargs('install_data', '0.38.0', ['install_mode']) @permittedKwargs(permitted_kwargs['install_data']) def func_install_data(self, node, args, kwargs): kwsource = mesonlib.stringlistify(kwargs.get('sources', [])) raw_sources = args + kwsource sources = [] source_strings = [] for s in raw_sources: if isinstance(s, mesonlib.File): sources.append(s) elif isinstance(s, str): source_strings.append(s) else: raise InvalidArguments('Argument must be string or file.') sources += self.source_strings_to_files(source_strings) install_dir = kwargs.get('install_dir', None) if not isinstance(install_dir, (str, type(None))): raise InvalidArguments('Keyword argument install_dir not a string.') install_mode = self._get_kwarg_install_mode(kwargs) rename = kwargs.get('rename', None) data = DataHolder(build.Data(sources, install_dir, install_mode, rename)) self.build.data.append(data.held_object) return data @FeatureNewKwargs('install_subdir', '0.42.0', ['exclude_files', 'exclude_directories']) @FeatureNewKwargs('install_subdir', '0.38.0', ['install_mode']) @permittedKwargs(permitted_kwargs['install_subdir']) @stringArgs def func_install_subdir(self, node, args, kwargs): if len(args) != 1: raise InvalidArguments('Install_subdir requires exactly one argument.') subdir = args[0] if 'install_dir' not in kwargs: raise InvalidArguments('Missing keyword argument install_dir') install_dir = kwargs['install_dir'] if not isinstance(install_dir, str): raise InvalidArguments('Keyword argument install_dir not a string.') if 'strip_directory' in kwargs: if not isinstance(kwargs['strip_directory'], bool): raise InterpreterException('"strip_directory" keyword must be a boolean.') strip_directory = kwargs['strip_directory'] else: strip_directory = False if 'exclude_files' in kwargs: exclude = extract_as_list(kwargs, 'exclude_files') for f in exclude: if not isinstance(f, str): raise InvalidArguments('Exclude argument not a string.') elif os.path.isabs(f): raise InvalidArguments('Exclude argument cannot be absolute.') exclude_files = set(exclude) else: exclude_files = set() if 'exclude_directories' in kwargs: exclude = extract_as_list(kwargs, 'exclude_directories') for d in exclude: if not isinstance(d, str): raise InvalidArguments('Exclude argument not a string.') elif os.path.isabs(d): raise InvalidArguments('Exclude argument cannot be absolute.') exclude_directories = set(exclude) else: exclude_directories = set() exclude = (exclude_files, exclude_directories) install_mode = self._get_kwarg_install_mode(kwargs) idir = InstallDir(self.subdir, subdir, install_dir, install_mode, exclude, strip_directory) self.build.install_dirs.append(idir) return idir @FeatureNewKwargs('configure_file', '0.47.0', ['copy', 'output_format', 'install_mode', 'encoding']) @FeatureNewKwargs('configure_file', '0.46.0', ['format']) @FeatureNewKwargs('configure_file', '0.41.0', ['capture']) @FeatureNewKwargs('configure_file', '0.50.0', ['install']) @FeatureNewKwargs('configure_file', '0.52.0', ['depfile']) @permittedKwargs(permitted_kwargs['configure_file']) def func_configure_file(self, node, args, kwargs): if len(args) > 0: raise InterpreterException("configure_file takes only keyword arguments.") if 'output' not in kwargs: raise InterpreterException('Required keyword argument "output" not defined.') actions = set(['configuration', 'command', 'copy']).intersection(kwargs.keys()) if len(actions) == 0: raise InterpreterException('Must specify an action with one of these ' 'keyword arguments: \'configuration\', ' '\'command\', or \'copy\'.') elif len(actions) == 2: raise InterpreterException('Must not specify both {!r} and {!r} ' 'keyword arguments since they are ' 'mutually exclusive.'.format(*actions)) elif len(actions) == 3: raise InterpreterException('Must specify one of {!r}, {!r}, and ' '{!r} keyword arguments since they are ' 'mutually exclusive.'.format(*actions)) if 'capture' in kwargs: if not isinstance(kwargs['capture'], bool): raise InterpreterException('"capture" keyword must be a boolean.') if 'command' not in kwargs: raise InterpreterException('"capture" keyword requires "command" keyword.') if 'format' in kwargs: fmt = kwargs['format'] if not isinstance(fmt, str): raise InterpreterException('"format" keyword must be a string.') else: fmt = 'meson' if fmt not in ('meson', 'cmake', 'cmake@'): raise InterpreterException('"format" possible values are "meson", "cmake" or "cmake@".') if 'output_format' in kwargs: output_format = kwargs['output_format'] if not isinstance(output_format, str): raise InterpreterException('"output_format" keyword must be a string.') else: output_format = 'c' if output_format not in ('c', 'nasm'): raise InterpreterException('"format" possible values are "c" or "nasm".') if 'depfile' in kwargs: depfile = kwargs['depfile'] if not isinstance(depfile, str): raise InterpreterException('depfile file name must be a string') else: depfile = None # Validate input inputs = self.source_strings_to_files(extract_as_list(kwargs, 'input')) inputs_abs = [] for f in inputs: if isinstance(f, mesonlib.File): inputs_abs.append(f.absolute_path(self.environment.source_dir, self.environment.build_dir)) self.add_build_def_file(f) else: raise InterpreterException('Inputs can only be strings or file objects') # Validate output output = kwargs['output'] if not isinstance(output, str): raise InterpreterException('Output file name must be a string') if inputs_abs: values = mesonlib.get_filenames_templates_dict(inputs_abs, None) outputs = mesonlib.substitute_values([output], values) output = outputs[0] if depfile: depfile = mesonlib.substitute_values([depfile], values)[0] ofile_rpath = os.path.join(self.subdir, output) if ofile_rpath in self.configure_file_outputs: mesonbuildfile = os.path.join(self.subdir, 'meson.build') current_call = "{}:{}".format(mesonbuildfile, self.current_lineno) first_call = "{}:{}".format(mesonbuildfile, self.configure_file_outputs[ofile_rpath]) mlog.warning('Output file', mlog.bold(ofile_rpath, True), 'for configure_file() at', current_call, 'overwrites configure_file() output at', first_call) else: self.configure_file_outputs[ofile_rpath] = self.current_lineno if os.path.dirname(output) != '': raise InterpreterException('Output file name must not contain a subdirectory.') (ofile_path, ofile_fname) = os.path.split(os.path.join(self.subdir, output)) ofile_abs = os.path.join(self.environment.build_dir, ofile_path, ofile_fname) # Perform the appropriate action if 'configuration' in kwargs: conf = kwargs['configuration'] if isinstance(conf, dict): FeatureNew.single_use('configure_file.configuration dictionary', '0.49.0', self.subproject) conf = ConfigurationDataHolder(self.subproject, conf) elif not isinstance(conf, ConfigurationDataHolder): raise InterpreterException('Argument "configuration" is not of type configuration_data') mlog.log('Configuring', mlog.bold(output), 'using configuration') if len(inputs) > 1: raise InterpreterException('At most one input file can given in configuration mode') if inputs: os.makedirs(os.path.join(self.environment.build_dir, self.subdir), exist_ok=True) file_encoding = kwargs.setdefault('encoding', 'utf-8') missing_variables, confdata_useless = \ mesonlib.do_conf_file(inputs_abs[0], ofile_abs, conf.held_object, fmt, file_encoding) if missing_variables: var_list = ", ".join(map(repr, sorted(missing_variables))) mlog.warning( "The variable(s) %s in the input file '%s' are not " "present in the given configuration data." % ( var_list, inputs[0]), location=node) if confdata_useless: ifbase = os.path.basename(inputs_abs[0]) mlog.warning('Got an empty configuration_data() object and found no ' 'substitutions in the input file {!r}. If you want to ' 'copy a file to the build dir, use the \'copy:\' keyword ' 'argument added in 0.47.0'.format(ifbase), location=node) else: mesonlib.dump_conf_header(ofile_abs, conf.held_object, output_format) conf.mark_used() elif 'command' in kwargs: if len(inputs) > 1: FeatureNew.single_use('multiple inputs in configure_file()', '0.52.0', self.subproject) # We use absolute paths for input and output here because the cwd # that the command is run from is 'unspecified', so it could change. # Currently it's builddir/subdir for in_builddir else srcdir/subdir. values = mesonlib.get_filenames_templates_dict(inputs_abs, [ofile_abs]) if depfile: depfile = os.path.join(self.environment.get_scratch_dir(), depfile) values['@DEPFILE@'] = depfile # Substitute @INPUT@, @OUTPUT@, etc here. cmd = mesonlib.substitute_values(kwargs['command'], values) mlog.log('Configuring', mlog.bold(output), 'with command') res = self.run_command_impl(node, cmd, {}, True) if res.returncode != 0: raise InterpreterException('Running configure command failed.\n%s\n%s' % (res.stdout, res.stderr)) if 'capture' in kwargs and kwargs['capture']: dst_tmp = ofile_abs + '~' file_encoding = kwargs.setdefault('encoding', 'utf-8') with open(dst_tmp, 'w', encoding=file_encoding) as f: f.writelines(res.stdout) if inputs_abs: shutil.copymode(inputs_abs[0], dst_tmp) mesonlib.replace_if_different(ofile_abs, dst_tmp) if depfile: mlog.log('Reading depfile:', mlog.bold(depfile)) with open(depfile, 'r') as f: df = DepFile(f.readlines()) deps = df.get_all_dependencies(ofile_fname) for dep in deps: self.add_build_def_file(dep) elif 'copy' in kwargs: if len(inputs_abs) != 1: raise InterpreterException('Exactly one input file must be given in copy mode') os.makedirs(os.path.join(self.environment.build_dir, self.subdir), exist_ok=True) shutil.copy2(inputs_abs[0], ofile_abs) else: # Not reachable raise AssertionError # Install file if requested, we check for the empty string # for backwards compatibility. That was the behaviour before # 0.45.0 so preserve it. idir = kwargs.get('install_dir', '') if idir is False: idir = '' mlog.deprecation('Please use the new `install:` kwarg instead of passing ' '`false` to `install_dir:`', location=node) if not isinstance(idir, str): if isinstance(idir, list) and len(idir) == 0: mlog.deprecation('install_dir: kwarg must be a string and not an empty array. ' 'Please use the install: kwarg to enable or disable installation. ' 'This will be a hard error in the next release.') else: raise InterpreterException('"install_dir" must be a string') install = kwargs.get('install', idir != '') if not isinstance(install, bool): raise InterpreterException('"install" must be a boolean') if install: if not idir: raise InterpreterException('"install_dir" must be specified ' 'when "install" in a configure_file ' 'is true') cfile = mesonlib.File.from_built_file(ofile_path, ofile_fname) install_mode = self._get_kwarg_install_mode(kwargs) self.build.data.append(build.Data([cfile], idir, install_mode)) return mesonlib.File.from_built_file(self.subdir, output) def extract_incdirs(self, kwargs): prospectives = unholder(extract_as_list(kwargs, 'include_directories')) result = [] for p in prospectives: if isinstance(p, build.IncludeDirs): result.append(p) elif isinstance(p, str): result.append(self.build_incdir_object([p]).held_object) else: raise InterpreterException('Include directory objects can only be created from strings or include directories.') return result @permittedKwargs(permitted_kwargs['include_directories']) @stringArgs def func_include_directories(self, node, args, kwargs): return self.build_incdir_object(args, kwargs.get('is_system', False)) def build_incdir_object(self, incdir_strings, is_system=False): if not isinstance(is_system, bool): raise InvalidArguments('Is_system must be boolean.') src_root = self.environment.get_source_dir() build_root = self.environment.get_build_dir() absbase_src = os.path.join(src_root, self.subdir) absbase_build = os.path.join(build_root, self.subdir) for a in incdir_strings: if a.startswith(src_root): raise InvalidArguments('Tried to form an absolute path to a source dir. ' 'You should not do that but use relative paths instead.' ''' To get include path to any directory relative to the current dir do incdir = include_directories(dirname) After this incdir will contain both the current source dir as well as the corresponding build dir. It can then be used in any subdirectory and Meson will take care of all the busywork to make paths work. Dirname can even be '.' to mark the current directory. Though you should remember that the current source and build directories are always put in the include directories by default so you only need to do include_directories('.') if you intend to use the result in a different subdirectory. ''') absdir_src = os.path.join(absbase_src, a) absdir_build = os.path.join(absbase_build, a) if not os.path.isdir(absdir_src) and not os.path.isdir(absdir_build): raise InvalidArguments('Include dir %s does not exist.' % a) i = IncludeDirsHolder(build.IncludeDirs(self.subdir, incdir_strings, is_system)) return i @permittedKwargs(permitted_kwargs['add_test_setup']) @stringArgs def func_add_test_setup(self, node, args, kwargs): if len(args) != 1: raise InterpreterException('Add_test_setup needs one argument for the setup name.') setup_name = args[0] if re.fullmatch('([_a-zA-Z][_0-9a-zA-Z]*:)?[_a-zA-Z][_0-9a-zA-Z]*', setup_name) is None: raise InterpreterException('Setup name may only contain alphanumeric characters.') if ":" not in setup_name: setup_name = (self.subproject if self.subproject else self.build.project_name) + ":" + setup_name try: inp = unholder(extract_as_list(kwargs, 'exe_wrapper')) exe_wrapper = [] for i in inp: if isinstance(i, str): exe_wrapper.append(i) elif isinstance(i, dependencies.ExternalProgram): if not i.found(): raise InterpreterException('Tried to use non-found executable.') exe_wrapper += i.get_command() else: raise InterpreterException('Exe wrapper can only contain strings or external binaries.') except KeyError: exe_wrapper = None gdb = kwargs.get('gdb', False) if not isinstance(gdb, bool): raise InterpreterException('Gdb option must be a boolean') timeout_multiplier = kwargs.get('timeout_multiplier', 1) if not isinstance(timeout_multiplier, int): raise InterpreterException('Timeout multiplier must be a number.') is_default = kwargs.get('is_default', False) if not isinstance(is_default, bool): raise InterpreterException('is_default option must be a boolean') if is_default: if self.build.test_setup_default_name is not None: raise InterpreterException('\'%s\' is already set as default. ' 'is_default can be set to true only once' % self.build.test_setup_default_name) self.build.test_setup_default_name = setup_name env = self.unpack_env_kwarg(kwargs) self.build.test_setups[setup_name] = build.TestSetup(exe_wrapper, gdb, timeout_multiplier, env) @permittedKwargs(permitted_kwargs['add_global_arguments']) @stringArgs def func_add_global_arguments(self, node, args, kwargs): for_machine = self.machine_from_native_kwarg(kwargs) self.add_global_arguments(node, self.build.global_args[for_machine], args, kwargs) @permittedKwargs(permitted_kwargs['add_global_link_arguments']) @stringArgs def func_add_global_link_arguments(self, node, args, kwargs): for_machine = self.machine_from_native_kwarg(kwargs) self.add_global_arguments(node, self.build.global_link_args[for_machine], args, kwargs) @permittedKwargs(permitted_kwargs['add_project_arguments']) @stringArgs def func_add_project_arguments(self, node, args, kwargs): for_machine = self.machine_from_native_kwarg(kwargs) self.add_project_arguments(node, self.build.projects_args[for_machine], args, kwargs) @permittedKwargs(permitted_kwargs['add_project_link_arguments']) @stringArgs def func_add_project_link_arguments(self, node, args, kwargs): for_machine = self.machine_from_native_kwarg(kwargs) self.add_project_arguments(node, self.build.projects_link_args[for_machine], args, kwargs) def warn_about_builtin_args(self, args): warnargs = ('/W1', '/W2', '/W3', '/W4', '/Wall', '-Wall', '-Wextra', '-Wpedantic') optargs = ('-O0', '-O2', '-O3', '-Os', '/O1', '/O2', '/Os') for arg in args: if arg in warnargs: mlog.warning('Consider using the built-in warning_level option instead of using "{}".'.format(arg), location=self.current_node) elif arg in optargs: mlog.warning('Consider using the built-in optimization level instead of using "{}".'.format(arg), location=self.current_node) elif arg == '-g': mlog.warning('Consider using the built-in debug option instead of using "{}".'.format(arg), location=self.current_node) elif arg == '-pipe': mlog.warning("You don't need to add -pipe, Meson will use it automatically when it is available.", location=self.current_node) elif arg.startswith('-fsanitize'): mlog.warning('Consider using the built-in option for sanitizers instead of using "{}".'.format(arg), location=self.current_node) elif arg.startswith('-std=') or arg.startswith('/std:'): mlog.warning('Consider using the built-in option for language standard version instead of using "{}".'.format(arg), location=self.current_node) def add_global_arguments(self, node, argsdict, args, kwargs): if self.is_subproject(): msg = 'Function \'{}\' cannot be used in subprojects because ' \ 'there is no way to make that reliable.\nPlease only call ' \ 'this if is_subproject() returns false. Alternatively, ' \ 'define a variable that\ncontains your language-specific ' \ 'arguments and add it to the appropriate *_args kwarg ' \ 'in each target.'.format(node.func_name) raise InvalidCode(msg) frozen = self.project_args_frozen or self.global_args_frozen self.add_arguments(node, argsdict, frozen, args, kwargs) def add_project_arguments(self, node, argsdict, args, kwargs): if self.subproject not in argsdict: argsdict[self.subproject] = {} self.add_arguments(node, argsdict[self.subproject], self.project_args_frozen, args, kwargs) def add_arguments(self, node, argsdict, args_frozen, args, kwargs): if args_frozen: msg = 'Tried to use \'{}\' after a build target has been declared.\n' \ 'This is not permitted. Please declare all ' \ 'arguments before your targets.'.format(node.func_name) raise InvalidCode(msg) if 'language' not in kwargs: raise InvalidCode('Missing language definition in {}'.format(node.func_name)) self.warn_about_builtin_args(args) for lang in mesonlib.stringlistify(kwargs['language']): lang = lang.lower() argsdict[lang] = argsdict.get(lang, []) + args @noKwargs @noArgsFlattening def func_environment(self, node, args, kwargs): if len(args) > 1: raise InterpreterException('environment takes only one optional positional arguments') elif len(args) == 1: FeatureNew.single_use('environment positional arguments', '0.52.0', self.subproject) initial_values = args[0] if not isinstance(initial_values, dict) and not isinstance(initial_values, list): raise InterpreterException('environment first argument must be a dictionary or a list') else: initial_values = {} return EnvironmentVariablesHolder(initial_values) @stringArgs @noKwargs def func_join_paths(self, node, args, kwargs): return self.join_path_strings(args) def run(self) -> None: super().run() mlog.log('Build targets in project:', mlog.bold(str(len(self.build.targets)))) FeatureNew.report(self.subproject) FeatureDeprecated.report(self.subproject) if not self.is_subproject(): self.print_extra_warnings() if self.subproject == '': self._print_summary() def print_extra_warnings(self) -> None: # TODO cross compilation for c in self.coredata.compilers.host.values(): if c.get_id() == 'clang': self.check_clang_asan_lundef() break def check_clang_asan_lundef(self) -> None: if 'b_lundef' not in self.coredata.base_options: return if 'b_sanitize' not in self.coredata.base_options: return if (self.coredata.base_options['b_lundef'].value and self.coredata.base_options['b_sanitize'].value != 'none'): mlog.warning('''Trying to use {} sanitizer on Clang with b_lundef. This will probably not work. Try setting b_lundef to false instead.'''.format(self.coredata.base_options['b_sanitize'].value), location=self.current_node) def evaluate_subproject_info(self, path_from_source_root, subproject_dir): depth = 0 subproj_name = '' segs = PurePath(path_from_source_root).parts segs_spd = PurePath(subproject_dir).parts while segs and segs[0] == segs_spd[0]: if len(segs_spd) == 1: subproj_name = segs[1] segs = segs[2:] depth += 1 else: segs_spd = segs_spd[1:] segs = segs[1:] return (depth, subproj_name) # Check that the indicated file is within the same subproject # as we currently are. This is to stop people doing # nasty things like: # # f = files('../../master_src/file.c') # # Note that this is validated only when the file # object is generated. The result can be used in a different # subproject than it is defined in (due to e.g. a # declare_dependency). def validate_within_subproject(self, subdir, fname): norm = os.path.normpath(os.path.join(subdir, fname)) if os.path.isabs(norm): if not norm.startswith(self.environment.source_dir): # Grabbing files outside the source tree is ok. # This is for vendor stuff like: # # /opt/vendorsdk/src/file_with_license_restrictions.c return norm = os.path.relpath(norm, self.environment.source_dir) assert(not os.path.isabs(norm)) (num_sps, sproj_name) = self.evaluate_subproject_info(norm, self.subproject_dir) plain_filename = os.path.basename(norm) if num_sps == 0: if not self.is_subproject(): return raise InterpreterException('Sandbox violation: Tried to grab file %s from a different subproject.' % plain_filename) if num_sps > 1: raise InterpreterException('Sandbox violation: Tried to grab file %s from a nested subproject.' % plain_filename) if sproj_name != self.subproject_directory_name: raise InterpreterException('Sandbox violation: Tried to grab file %s from a different subproject.' % plain_filename) def source_strings_to_files(self, sources): results = [] mesonlib.check_direntry_issues(sources) if not isinstance(sources, list): sources = [sources] for s in sources: if isinstance(s, (mesonlib.File, GeneratedListHolder, TargetHolder, CustomTargetIndexHolder, GeneratedObjectsHolder)): pass elif isinstance(s, str): self.validate_within_subproject(self.subdir, s) s = mesonlib.File.from_source_file(self.environment.source_dir, self.subdir, s) else: raise InterpreterException('Source item is {!r} instead of ' 'string or File-type object'.format(s)) results.append(s) return results def add_target(self, name, tobj): if name == '': raise InterpreterException('Target name must not be empty.') if name.strip() == '': raise InterpreterException('Target name must not consist only of whitespace.') if name.startswith('meson-'): raise InvalidArguments("Target names starting with 'meson-' are reserved " "for Meson's internal use. Please rename.") if name in coredata.FORBIDDEN_TARGET_NAMES: raise InvalidArguments("Target name '%s' is reserved for Meson's " "internal use. Please rename." % name) # To permit an executable and a shared library to have the # same name, such as "foo.exe" and "libfoo.a". idname = tobj.get_id() if idname in self.build.targets: raise InvalidCode('Tried to create target "%s", but a target of that name already exists.' % name) self.build.targets[idname] = tobj if idname not in self.coredata.target_guids: self.coredata.target_guids[idname] = str(uuid.uuid4()).upper() @FeatureNew('both_libraries', '0.46.0') def build_both_libraries(self, node, args, kwargs): shared_holder = self.build_target(node, args, kwargs, SharedLibraryHolder) # Check if user forces non-PIC static library. pic = True if 'pic' in kwargs: pic = kwargs['pic'] elif 'b_staticpic' in self.environment.coredata.base_options: pic = self.environment.coredata.base_options['b_staticpic'].value if pic: # Exclude sources from args and kwargs to avoid building them twice static_args = [args[0]] static_kwargs = kwargs.copy() static_kwargs['sources'] = [] static_kwargs['objects'] = shared_holder.held_object.extract_all_objects() else: static_args = args static_kwargs = kwargs static_holder = self.build_target(node, static_args, static_kwargs, StaticLibraryHolder) return BothLibrariesHolder(shared_holder, static_holder, self) def build_library(self, node, args, kwargs): default_library = self.coredata.get_builtin_option('default_library', self.subproject) if default_library == 'shared': return self.build_target(node, args, kwargs, SharedLibraryHolder) elif default_library == 'static': return self.build_target(node, args, kwargs, StaticLibraryHolder) elif default_library == 'both': return self.build_both_libraries(node, args, kwargs) else: raise InterpreterException('Unknown default_library value: %s.', default_library) def build_target(self, node, args, kwargs, targetholder): @FeatureNewKwargs('build target', '0.42.0', ['rust_crate_type', 'build_rpath', 'implicit_include_directories']) @FeatureNewKwargs('build target', '0.41.0', ['rust_args']) @FeatureNewKwargs('build target', '0.40.0', ['build_by_default']) @FeatureNewKwargs('build target', '0.48.0', ['gnu_symbol_visibility']) def build_target_decorator_caller(self, node, args, kwargs): return True build_target_decorator_caller(self, node, args, kwargs) if not args: raise InterpreterException('Target does not have a name.') name, *sources = args for_machine = self.machine_from_native_kwarg(kwargs) if 'sources' in kwargs: sources += listify(kwargs['sources']) sources = self.source_strings_to_files(sources) objs = extract_as_list(kwargs, 'objects') kwargs['dependencies'] = extract_as_list(kwargs, 'dependencies') kwargs['install_mode'] = self._get_kwarg_install_mode(kwargs) if 'extra_files' in kwargs: ef = extract_as_list(kwargs, 'extra_files') kwargs['extra_files'] = self.source_strings_to_files(ef) self.check_sources_exist(os.path.join(self.source_root, self.subdir), sources) if targetholder == ExecutableHolder: targetclass = build.Executable elif targetholder == SharedLibraryHolder: targetclass = build.SharedLibrary elif targetholder == SharedModuleHolder: targetclass = build.SharedModule elif targetholder == StaticLibraryHolder: targetclass = build.StaticLibrary elif targetholder == JarHolder: targetclass = build.Jar else: mlog.debug('Unknown target type:', str(targetholder)) raise RuntimeError('Unreachable code') self.kwarg_strings_to_includedirs(kwargs) # Filter out kwargs from other target types. For example 'soversion' # passed to library() when default_library == 'static'. kwargs = {k: v for k, v in kwargs.items() if k in targetclass.known_kwargs} kwargs['include_directories'] = self.extract_incdirs(kwargs) target = targetclass(name, self.subdir, self.subproject, for_machine, sources, objs, self.environment, kwargs) target.project_version = self.project_version self.add_stdlib_info(target) l = targetholder(target, self) self.add_target(name, l.held_object) self.project_args_frozen = True return l def kwarg_strings_to_includedirs(self, kwargs): if 'd_import_dirs' in kwargs: items = mesonlib.extract_as_list(kwargs, 'd_import_dirs') cleaned_items = [] for i in items: if isinstance(i, str): # BW compatibility. This was permitted so we must support it # for a few releases so people can transition to "correct" # path declarations. if os.path.normpath(i).startswith(self.environment.get_source_dir()): mlog.warning('''Building a path to the source dir is not supported. Use a relative path instead. This will become a hard error in the future.''', location=self.current_node) i = os.path.relpath(i, os.path.join(self.environment.get_source_dir(), self.subdir)) i = self.build_incdir_object([i]) cleaned_items.append(i) kwargs['d_import_dirs'] = cleaned_items def get_used_languages(self, target): result = set() for i in target.sources: for lang, c in self.coredata.compilers[target.for_machine].items(): if c.can_compile(i): result.add(lang) break return result def add_stdlib_info(self, target): for l in self.get_used_languages(target): dep = self.build.stdlibs[target.for_machine].get(l, None) if dep: target.add_deps(dep) def check_sources_exist(self, subdir, sources): for s in sources: if not isinstance(s, str): continue # This means a generated source and they always exist. fname = os.path.join(subdir, s) if not os.path.isfile(fname): raise InterpreterException('Tried to add non-existing source file %s.' % s) # Only permit object extraction from the same subproject def validate_extraction(self, buildtarget: InterpreterObject) -> None: if self.subproject != buildtarget.subproject: raise InterpreterException('Tried to extract objects from a different subproject.') def is_subproject(self): return self.subproject != '' @noKwargs @noArgsFlattening def func_set_variable(self, node, args, kwargs): if len(args) != 2: raise InvalidCode('Set_variable takes two arguments.') varname, value = args self.set_variable(varname, value) @noKwargs @noArgsFlattening def func_get_variable(self, node, args, kwargs): if len(args) < 1 or len(args) > 2: raise InvalidCode('Get_variable takes one or two arguments.') varname = args[0] if isinstance(varname, Disabler): return varname if not isinstance(varname, str): raise InterpreterException('First argument must be a string.') try: return self.variables[varname] except KeyError: pass if len(args) == 2: return args[1] raise InterpreterException('Tried to get unknown variable "%s".' % varname) @stringArgs @noKwargs def func_is_variable(self, node, args, kwargs): if len(args) != 1: raise InvalidCode('Is_variable takes two arguments.') varname = args[0] return varname in self.variables @staticmethod def machine_from_native_kwarg(kwargs: T.Dict[str, T.Any]) -> MachineChoice: native = kwargs.get('native', False) if not isinstance(native, bool): raise InvalidArguments('Argument to "native" must be a boolean.') return MachineChoice.BUILD if native else MachineChoice.HOST @FeatureNew('is_disabler', '0.52.0') @noKwargs def func_is_disabler(self, node, args, kwargs): if len(args) != 1: raise InvalidCode('Is_disabler takes one argument.') varname = args[0] return isinstance(varname, Disabler)
[]
[]
[]
[]
[]
python
0
0
daemon/graphdriver/driver.go
package graphdriver import ( "errors" "fmt" "github.com/dotcloud/docker/archive" "os" "path" ) type InitFunc func(root string) (Driver, error) type Driver interface { String() string Create(id, parent string) error Remove(id string) error Get(id, mountLabel string) (dir string, err error) Put(id string) Exists(id string) bool Status() [][2]string Cleanup() error } type Differ interface { Diff(id string) (archive.Archive, error) Changes(id string) ([]archive.Change, error) ApplyDiff(id string, diff archive.ArchiveReader) error DiffSize(id string) (bytes int64, err error) } var ( DefaultDriver string // All registred drivers drivers map[string]InitFunc // Slice of drivers that should be used in an order priority = []string{ "aufs", "btrfs", "devicemapper", "vfs", } ErrNotSupported = errors.New("driver not supported") ErrPrerequisites = errors.New("prerequisites for driver not satisfied (wrong filesystem?)") ErrIncompatibleFS = fmt.Errorf("backing file system is unsupported for this graph driver") ) func init() { drivers = make(map[string]InitFunc) } func Register(name string, initFunc InitFunc) error { if _, exists := drivers[name]; exists { return fmt.Errorf("Name already registered %s", name) } drivers[name] = initFunc return nil } func GetDriver(name, home string) (Driver, error) { if initFunc, exists := drivers[name]; exists { return initFunc(path.Join(home, name)) } return nil, ErrNotSupported } func New(root string) (driver Driver, err error) { for _, name := range []string{os.Getenv("DOCKER_DRIVER"), DefaultDriver} { if name != "" { return GetDriver(name, root) } } // Check for priority drivers first for _, name := range priority { driver, err = GetDriver(name, root) if err != nil { if err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS { continue } return nil, err } return driver, nil } // Check all registered drivers if no priority driver is found for _, initFunc := range drivers { if driver, err = initFunc(root); err != nil { if err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS { continue } return nil, err } return driver, nil } return nil, fmt.Errorf("No supported storage backend found") }
[ "\"DOCKER_DRIVER\"" ]
[]
[ "DOCKER_DRIVER" ]
[]
["DOCKER_DRIVER"]
go
1
0
ssh/ssh.go
package ssh import ( "errors" "net" "os" "strconv" "strings" "time" gossh "github.com/coreos/fleet/third_party/code.google.com/p/gosshnew/ssh" gosshagent "github.com/coreos/fleet/third_party/code.google.com/p/gosshnew/ssh/agent" "github.com/coreos/fleet/third_party/code.google.com/p/gosshnew/ssh/terminal" ) type SSHForwardingClient struct { agentForwarding bool *gossh.Client } func (s *SSHForwardingClient) ForwardAgentAuthentication(session *gossh.Session) error { if s.agentForwarding { return gosshagent.RequestAgentForwarding(session) } return nil } func newSSHForwardingClient(client *gossh.Client, agentForwarding bool) (*SSHForwardingClient, error) { a, err := SSHAgentClient() if err != nil { return nil, err } err = gosshagent.ForwardToAgent(client, a) if err != nil { return nil, err } return &SSHForwardingClient{agentForwarding, client}, nil } // makeSession initializes a gossh.Session connected to the invoking process's stdout/stderr/stdout. // If the invoking session is a terminal, a TTY will be requested for the SSH session. // It returns a gossh.Session, a finalizing function used to clean up after the session terminates, // and any error encountered in setting up the session. func makeSession(client *SSHForwardingClient) (session *gossh.Session, finalize func(), err error) { session, err = client.NewSession() if err != nil { return } if err = client.ForwardAgentAuthentication(session); err != nil { return } session.Stdout = os.Stdout session.Stderr = os.Stderr session.Stdin = os.Stdin modes := gossh.TerminalModes{ gossh.ECHO: 1, // enable echoing gossh.TTY_OP_ISPEED: 14400, // input speed = 14.4kbaud gossh.TTY_OP_OSPEED: 14400, // output speed = 14.4kbaud } fd := int(os.Stdin.Fd()) if terminal.IsTerminal(fd) { var termWidth, termHeight int var oldState *terminal.State oldState, err = terminal.MakeRaw(fd) if err != nil { return } finalize = func() { session.Close() terminal.Restore(fd, oldState) } termWidth, termHeight, err = terminal.GetSize(fd) if err != nil { return } err = session.RequestPty("xterm-256color", termHeight, termWidth, modes) } else { finalize = func() { session.Close() } } return } // Execute runs the given command on the given client with stdin/stdout/stderr // connected to the controlling terminal. It returns any error encountered in // the SSH session, and the exit status of the remote command. func Execute(client *SSHForwardingClient, cmd string) (error, int) { session, finalize, err := makeSession(client) if err != nil { return err, -1 } defer finalize() session.Start(cmd) err = session.Wait() // the command ran and exited successfully if err == nil { return nil, 0 } // if the session terminated normally, err should be ExitError; in that // case, return nil error and actual exit status of command if werr, ok := err.(*gossh.ExitError); ok { return nil, werr.ExitStatus() } // otherwise, we had an actual SSH error return err, -1 } // Shell launches an interactive shell on the given client. It returns any // error encountered in setting up the SSH session. func Shell(client *SSHForwardingClient) error { session, finalize, err := makeSession(client) if err != nil { return err } defer finalize() if err = session.Shell(); err != nil { return err } session.Wait() return nil } // SSHAgentClient returns an Agent that talks to the local ssh-agent func SSHAgentClient() (gosshagent.Agent, error) { sock := os.Getenv("SSH_AUTH_SOCK") if sock == "" { return nil, errors.New("SSH_AUTH_SOCK environment variable is not set. Verify ssh-agent is running. See https://github.com/coreos/fleet/blob/master/Documentation/remote-access.md for help.") } agent, err := net.Dial("unix", sock) if err != nil { return nil, err } return gosshagent.NewClient(agent), nil } func sshClientConfig(user string, checker *HostKeyChecker) (*gossh.ClientConfig, error) { agentClient, err := SSHAgentClient() if err != nil { return nil, err } signers, err := agentClient.Signers() if err != nil { return nil, err } cfg := gossh.ClientConfig{ User: user, Auth: []gossh.AuthMethod{ gossh.PublicKeys(signers...), }, } if checker != nil { cfg.HostKeyCallback = checker.Check } return &cfg, nil } func maybeAddDefaultPort(addr string) string { if strings.Contains(addr, ":") { return addr } return net.JoinHostPort(addr, strconv.Itoa(sshDefaultPort)) } func NewSSHClient(user, addr string, checker *HostKeyChecker, agentForwarding bool) (*SSHForwardingClient, error) { clientConfig, err := sshClientConfig(user, checker) if err != nil { return nil, err } addr = maybeAddDefaultPort(addr) var client *gossh.Client dialFunc := func(echan chan error) { var err error client, err = gossh.Dial("tcp", addr, clientConfig) echan <- err } err = timeoutSSHDial(dialFunc) if err != nil { return nil, err } return newSSHForwardingClient(client, agentForwarding) } func NewTunnelledSSHClient(user, tunaddr, tgtaddr string, checker *HostKeyChecker, agentForwarding bool) (*SSHForwardingClient, error) { clientConfig, err := sshClientConfig(user, checker) if err != nil { return nil, err } tunaddr = maybeAddDefaultPort(tunaddr) tgtaddr = maybeAddDefaultPort(tgtaddr) var tunnelClient *gossh.Client dialFunc := func(echan chan error) { var err error tunnelClient, err = gossh.Dial("tcp", tunaddr, clientConfig) echan <- err } err = timeoutSSHDial(dialFunc) if err != nil { return nil, err } var targetConn net.Conn dialFunc = func(echan chan error) { tgtTCPAddr, err := net.ResolveTCPAddr("tcp", tgtaddr) if err != nil { echan <- err return } targetConn, err = tunnelClient.DialTCP("tcp", nil, tgtTCPAddr) echan <- err } err = timeoutSSHDial(dialFunc) if err != nil { return nil, err } c, chans, reqs, err := gossh.NewClientConn(targetConn, tgtaddr, clientConfig) if err != nil { return nil, err } return newSSHForwardingClient(gossh.NewClient(c, chans, reqs), agentForwarding) } func timeoutSSHDial(dial func(chan error)) error { var err error echan := make(chan error) go dial(echan) select { case <-time.After(time.Duration(time.Second * 10)): return errors.New("timed out while initiating SSH connection") case err = <-echan: return err } }
[ "\"SSH_AUTH_SOCK\"" ]
[]
[ "SSH_AUTH_SOCK" ]
[]
["SSH_AUTH_SOCK"]
go
1
0
main.go
package main import ( "bytes" "encoding/base64" "encoding/json" "html/template" "io/ioutil" "log" "math/rand" "net/http" "os" "strings" "time" "github.com/joho/godotenv" "github.com/gorilla/mux" "gopkg.in/mgo.v2" "gopkg.in/mgo.v2/bson" ) const ( letterBytes = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" letterIdxBits = 6 // 6 bits to represent a letter index letterIdxMask = 1<<letterIdxBits - 1 // All 1-bits, as many as letterIdxBits letterIdxMax = 63 / letterIdxBits // # of letter indices fitting in 63 bits ) type User struct { Comment_karma int `json:"comment_karma"` Created float32 `json:"created"` Created_utc float32 `json:"created_utc"` Has_mail bool `json:"has_mail"` Has_mod_mail bool `json:"has_mod_mail"` Id string `json:"id"` Is_gold bool `json:"is_gold"` Is_mod bool `json:"is_mod"` Link_karma int `json:"link_karma"` Over_18 bool `json:"over_18"` Name string `bson:"name" json:"name"` Level string `bson:"level" json:"level"` Active string `bson:"active" json:"active"` Activation_token string `bson:"activation_token" json:"activation_token"` Created_at string `bson:"created_at" json:"created_at"` Auth RedditAuth IP string } type Chatroom struct { Id bson.ObjectId `bson:"_id,omitempty" json:"_id,omitempty" inline` Name string `bson:"name" json:"name"` Level string `bson:"level" json:"level"` Active string `bson:"active" json:"active"` Timestamp time.Time `bson:"timestamp,omitempty" json:"timestamp,omitempty"` Messages []bson.ObjectId `bson:"messages,omitempty" json:"messages" inline` } type Message struct { MessageId bson.ObjectId `bson:"_id,omitempty" json:"_id,omitempty" inline` Level int `bson:"level" json:"level"` Text string `bson:"text" json:"text"` UserName string `bson:"name" json:"name"` ChatRoomName string `bson:"room_name" json:"room_name"` ChatRoomId bson.ObjectId `bson:"chatRoomId,omitempty" json:"chatRoomId,omitempty"` Timestamp time.Time `bson:"timestamp,omitempty" json:"timestamp,omitempty"` } type RedditAuth struct { Access_token string `json:"access_token"` Token_type string `json:"token_type"` Expires_in int `json:"expires_in"` Scope string `json:"scope"` } type MongoDBConnections struct { Session *mgo.Session Messages *mgo.Collection Chatrooms *mgo.Collection } // env var CLIENT_ID = "YOUR_APP_ID" var CLIENT_SECRET = "YOUR_APP_SECRET" var DOMAIN = "192.168.1.43" var PORT = "9000" var REDIRECT_URI = SERVER_ADDRESS + "/reddit_callback" var SERVER_ADDRESS = "http://192.168.1.43:9000" var COOKIE_NAME = "goddit" var PROJ_ROOT = "" var MONGO_ADDR = "YOUR_MONGO_ADDR" var MONGO_USER = "YOUR_MONGO_USR" var MONGO_PASS = "YOUR_MONGO_PASS" // mem var users map[string]User var authorizedIPs []string var Mongo *MongoDBConnections var MessageChannel chan []byte func newMongoDBConnections() *MongoDBConnections { for { // connect to the database mongoDBDialInfo := &mgo.DialInfo{ Addrs: []string{MONGO_ADDR}, Timeout: 60 * time.Hour, Database: "admin", Username: MONGO_USER, Password: MONGO_PASS, } session, err := mgo.DialWithInfo(mongoDBDialInfo) if err == nil { log.Println("CONNECTED TO MOngoDB") session.SetMode(mgo.Monotonic, true) return &MongoDBConnections{ Session: session, Messages: session.DB("views").C("messages"), Chatrooms: session.DB("views").C("chatrooms"), } } log.Println("Attempting MongoDB connection", err) time.Sleep(1 * time.Second) } } /** * Serve the /chat route * * Checks the cookie in the request, if the cookie is not found or the value * is not found in the server memory map, then return 403. */ func chat(w http.ResponseWriter, r *http.Request) { if r.Method != "GET" { http.Error(w, "Method not allowed", 405) return } var Rooms []Chatroom err := Mongo.Chatrooms.Find(nil).All(&Rooms) if err != nil { log.Println(err) panic(err) // didn't find any rooms, something wrong with the DB } cookie, err := r.Cookie(COOKIE_NAME) /** * Cookie not found or user not logged in */ if err != nil || users[cookie.Value].Name == "" { // respond with forbidden template.Must(template.New("403.html").ParseFiles( PROJ_ROOT+"/403.html")).Execute(w, "") } else { template.Must( template.New("chat.html").ParseFiles( PROJ_ROOT+"/chat.html")).Execute(w, struct { CookieName string ServerAddr string Username string Chatrooms []Chatroom }{COOKIE_NAME, SERVER_ADDRESS, users[cookie.Value].Name, Rooms}) } } // If the user is already logged in, redirect to the /chat func index(w http.ResponseWriter, r *http.Request) { if r.Method != "GET" { http.Error(w, "Method not allowed", 405) return } cookie, err := r.Cookie(COOKIE_NAME) if err != nil || users[cookie.Value].Name == "" { state := getRandomString(8) url := "https://ssl.reddit.com/api/v1/authorize.compact?" + "client_id=" + CLIENT_ID + "&response_type=code&state=" + state + "&redirect_uri=" + REDIRECT_URI + "&duration=temporary&scope=identity" template.Must(template.New("index.html").ParseFiles( PROJ_ROOT+"/index.html")).Execute(w, struct { Url string }{url}) } else { http.Redirect(w, r, SERVER_ADDRESS+"/chat", 302) } } func redditCallback(w http.ResponseWriter, r *http.Request) { err := r.FormValue("error") if err != "" { log.Println(err) } authData := getRedditAuth(r.FormValue("code")) user := getRedditUserData(authData) // failure to get data, redirect to / if user.Name == "" { http.Redirect(w, r, SERVER_ADDRESS+"/", 302) return } clientIp := strings.Split(r.RemoteAddr, ":")[0] authorizedIPs = append(authorizedIPs, clientIp) user.Auth = authData user.IP = clientIp // store reddit auth data in the map, Username -> RedditAuth data users[user.Name] = *user expire := time.Now().AddDate(0, 0, 1) cookie := &http.Cookie{ Expires: expire, MaxAge: 86400, Name: COOKIE_NAME, Value: user.Name, Path: "/", Domain: DOMAIN, } http.SetCookie(w, cookie) http.Redirect(w, r, SERVER_ADDRESS+"/chat", 302) } func getRedditUserData(auth RedditAuth) *User { client := &http.Client{} req, err := http.NewRequest("GET", "https://oauth.reddit.com/api/v1/me", nil) if err != nil { log.Println(err) } req.Header.Set("User-agent", "Web 1x83QLDFHequ8w 1.9.3 (by /u/SEND_ME_RARE_PEPES)") req.Header.Add("Authorization", "bearer "+auth.Access_token) res, err := client.Do(req) if err != nil { log.Println(err) } user := User{} body, err := ioutil.ReadAll(res.Body) err = json.Unmarshal(body, &user) if err != nil { log.Println(err) } return &user } func getPopularSubreddits() { client := &http.Client{} req, err := http.NewRequest("GET", "https://www.reddit.com/subreddits/popular/.json", nil) if err != nil { log.Println(err) } req.Header.Set("User-agent", "Web 1x83QLDFHequ8w 1.9.3 (by /u/SEND_ME_RARE_PEPES)") res, err := client.Do(req) if err != nil { log.Println("ERROR DOING REQUEST", err) } body, err := ioutil.ReadAll(res.Body) if err != nil { log.Println("ERROR READING BODY", err) } var pS PopularSubreddits err = json.Unmarshal(body, &pS) if err != nil { log.Println(err) return } bulkT := Mongo.Chatrooms.Bulk() bulkT.Unordered() // Avoid dupes (?) // Index index := mgo.Index{ Key: []string{"name"}, Unique: true, DropDups: true, Background: true, Sparse: true, } err = Mongo.Chatrooms.EnsureIndex(index) for i := 0; i < 25; i++ { if err != nil { log.Println(err) } subreddit := Chatroom{ Id: bson.NewObjectId(), Name: pS.Data.Children[i].Data.DisplayName, Level: "0", Active: "1", Timestamp: time.Now(), } bulkT.Insert(subreddit) } _, err = bulkT.Run() if err != nil { log.Println("Found duplicate subreddits...") } } func getRedditAuth(code string) RedditAuth { client := &http.Client{} req, err := http.NewRequest("POST", "https://ssl.reddit.com/api/v1/access_token", bytes.NewBufferString( "grant_type=authorization_code&code="+code+"&redirect_uri="+REDIRECT_URI)) req.Header.Add("User-agent", "Web 1x83QLDFHequ8w 1.9.3 (by /u/SEND_ME_RARE_PEPES)") encoded := base64.StdEncoding.EncodeToString( []byte(CLIENT_ID + ":" + CLIENT_SECRET)) req.Header.Add("Authorization", "Basic "+encoded) res, err := client.Do(req) defer res.Body.Close() if err != nil { log.Fatal(err) } redditAuth := RedditAuth{} body, err := ioutil.ReadAll(res.Body) err = json.Unmarshal(body, &redditAuth) return redditAuth } /** * Load the previous messages from this channel from the database */ func channelHistory(w http.ResponseWriter, r *http.Request) { vars := mux.Vars(r) name := r.Header.Get("name") if name == "" || users[name].Name == "" { http.Error(w, "Forbidden", 403) return } var room Chatroom // find the chatroom at this request err := Mongo.Chatrooms.Find(bson.M{"name": vars["channel"]}).One(&room) if err != nil { // channel not found log.Printf("Creating new channel: %s ...", vars["channel"]) // create new channel room.Id = bson.NewObjectId() room.Name = vars["channel"] room.Level = "0" room.Active = "true" err := Mongo.Chatrooms.Insert(room) if err != nil { log.Println(err) } else { // new welcome message for the room welcomeMessage := Message{ MessageId: bson.NewObjectId(), Text: "Welcome to the new " + vars["channel"] + " chat", ChatRoomName: vars["channel"], UserName: "Moderator", ChatRoomId: room.Id, Timestamp: time.Now(), Level: 1, // level = power } room.Messages = append(room.Messages, welcomeMessage.MessageId) // insert the new welcome message into the messages // collection, with this chatroom id and the user id err = Mongo.Messages.Insert(welcomeMessage) if err != nil { panic(err) // error inserting } } } // initialize a slice of size messageAmount to store the messages var messageSlice []Message // find the last 150 messages in the room err = Mongo.Messages.Find( bson.M{"chatRoomId": room.Id}).Sort( "-timestamp").Limit(150).All(&messageSlice) js, err := json.Marshal(messageSlice) if err != nil { panic(err) } // serve if r.Method != "GET" { http.Error(w, "Method not allowed", 405) return } w.Header().Set("Content-Type", "application/json") w.Write(js) } /** * Channel to save messages to the database */ func saveMessages(m *chan []byte) { for { message, ok := <-*m if !ok { log.Println("Error when trying to save") return } saveMessage(&message) } } func saveMessage(msg *[]byte) { message := Message{} err := json.Unmarshal(*msg, &message) message.MessageId = bson.NewObjectId() message.Timestamp = time.Now() var room Chatroom // find the chatroom at this request err = Mongo.Chatrooms.Find(bson.M{"name": message.ChatRoomName}).One(&room) if err != nil { // channel not found // create new channel room.Name = message.ChatRoomName room.Level = "0" room.Active = "true" room.Id = bson.NewObjectId() err := Mongo.Chatrooms.Insert(room) if err != nil { log.Println(err) } else { room.Messages = append(room.Messages, message.MessageId) } } // construct the new message message.ChatRoomId = room.Id // insert the message into the messages collection, with this chatroom // and the user id err = Mongo.Messages.Insert(message) if err != nil { log.Println(err) // panic(err) // error inserting } var messageSlice []Message var bsonMessageSlice []bson.ObjectId // find all the messages that have this room as chatRoomId err = Mongo.Messages.Find( bson.M{"chatRoomId": room.Id}).Sort("-timestamp").All(&messageSlice) if err != nil { panic(err) } if len(messageSlice) > 0 { if err != nil { log.Println(err) } // if there is no messages it won't enter the loop for i := 0; i < len(messageSlice); i++ { bsonMessageSlice = append(bsonMessageSlice, messageSlice[i].MessageId) } } // append the new message bsonMessageSlice = append(bsonMessageSlice, message.MessageId) // update the room with the new messsage err = Mongo.Chatrooms.Update(bson.M{"_id": room.Id}, bson.M{"$set": bson.M{"messages": bsonMessageSlice}}) if err != nil { panic(err) } } func init() { err := godotenv.Load() if err != nil { log.Fatal("Error loading .env file") } // env variables CLIENT_ID = os.Getenv("APPID") CLIENT_SECRET = os.Getenv("APPSECRET") SERVER_ADDRESS = os.Getenv("GODDITADDR") DOMAIN = os.Getenv("GODDITDOMAIN") PORT = os.Getenv("PORT") COOKIE_NAME = os.Getenv("GCOOKIE") MONGO_ADDR = os.Getenv("MONGO_ADDR") MONGO_USER = os.Getenv("MONGO_USER") MONGO_PASS = os.Getenv("MONGO_PASS") log.Println("ENVIRONMENT", CLIENT_ID, CLIENT_SECRET, SERVER_ADDRESS, DOMAIN, PORT, COOKIE_NAME, MONGO_ADDR) ROOT, err := os.Getwd() if err != nil { log.Println(err) } PROJ_ROOT = ROOT } func main() { REDIRECT_URI = SERVER_ADDRESS + "/reddit_callback" // set database Mongo = newMongoDBConnections() MessageChannel = make(chan []byte, 256) // a goroutine for saving messages go saveMessages(&MessageChannel) // crawl popular subreddits go getPopularSubreddits() //for keeping track of users in memory users = make(map[string]User) r := mux.NewRouter() hub := newHub() go hub.run() r.HandleFunc("/", index) r.HandleFunc("/chat", chat) r.HandleFunc("/reddit_callback", redditCallback) r.HandleFunc("/history/{channel}", channelHistory) r.HandleFunc("/room/{channel}", func(w http.ResponseWriter, r *http.Request) { serveWs(hub, w, r) }) r.PathPrefix("/").Handler(http.StripPrefix("/", http.FileServer(http.Dir(PROJ_ROOT+"/icons")))) srv := &http.Server{ Handler: r, Addr: ":" + PORT, WriteTimeout: 5 * time.Second, ReadTimeout: 5 * time.Second, } log.Println("Serving in port", PORT) err := srv.ListenAndServe() if err != nil { log.Fatal("ListenAndServe: ", err) } } func getRandomString(n int) string { b := make([]byte, n) src := rand.NewSource(time.Now().UnixNano()) // A src.Int63() generates 63 random bits, enough for letterIdxMax characters! for i, cache, remain := n-1, src.Int63(), letterIdxMax; i >= 0; { if remain == 0 { cache, remain = src.Int63(), letterIdxMax } if idx := int(cache & letterIdxMask); idx < len(letterBytes) { b[i] = letterBytes[idx] i-- } cache >>= letterIdxBits remain-- } return string(b) }
[ "\"APPID\"", "\"APPSECRET\"", "\"GODDITADDR\"", "\"GODDITDOMAIN\"", "\"PORT\"", "\"GCOOKIE\"", "\"MONGO_ADDR\"", "\"MONGO_USER\"", "\"MONGO_PASS\"" ]
[]
[ "PORT", "MONGO_PASS", "GODDITDOMAIN", "GODDITADDR", "APPSECRET", "MONGO_USER", "GCOOKIE", "MONGO_ADDR", "APPID" ]
[]
["PORT", "MONGO_PASS", "GODDITDOMAIN", "GODDITADDR", "APPSECRET", "MONGO_USER", "GCOOKIE", "MONGO_ADDR", "APPID"]
go
9
0
main.go
package main import ( "context" "crypto/rand" "encoding/base64" "encoding/json" "fmt" "html/template" "io/ioutil" "math" "net/http" "net/http/httptrace" "os" "path/filepath" "sort" "strconv" "time" "github.com/apex/log" "github.com/gorilla/mux" ) type key int const ( logger key = iota visitor ) // NextBus describes when the bus is coming type NextBus struct { OriginCode string `json:"OriginCode"` DestinationCode string `json:"DestinationCode"` EstimatedArrival string `json:"EstimatedArrival"` Latitude string `json:"Latitude"` Longitude string `json:"Longitude"` VisitNumber string `json:"VisitNumber"` Load string `json:"Load"` Feature string `json:"Feature"` Type string `json:"Type"` } // SGBusArrivals describes the response from the datamall API type SGBusArrivals struct { OdataMetadata string `json:"odata.metadata"` BusStopCode string `json:"BusStopCode"` Services []struct { ServiceNo string `json:"ServiceNo"` Operator string `json:"Operator"` NextBus NextBus `json:"NextBus"` NextBus2 NextBus `json:"NextBus2"` NextBus3 NextBus `json:"NextBus3"` } `json:"Services"` } type Server struct { router *mux.Router busStops BusStops } func main() { if _, ok := os.LookupEnv("accountkey"); !ok { log.Errorf("Missing accountKey") os.Exit(1) } server, err := NewServer("all.json") if err != nil { log.Fatalf("failed to create server: %v", err) } err = http.ListenAndServe(":"+os.Getenv("PORT"), server.router) if err != nil { log.Fatalf("failed to start server: %v", err) } } func NewServer(busStopsPath string) (*Server, error) { bs, err := loadBusJSON(busStopsPath) if err != nil { log.WithError(err).Fatal("unable to load bus stops") } srv := Server{ router: mux.NewRouter(), busStops: bs, } srv.routes() return &srv, nil } func (s *Server) routes() { s.router.HandleFunc("/", s.handleIndex) s.router.HandleFunc("/closest", s.handleClosest) s.router.HandleFunc("/icon", handleIcon) staticDir := "/static/" s.router.PathPrefix(staticDir). Handler(http.StripPrefix(staticDir, http.FileServer(http.Dir("."+staticDir)))) s.router.Use(addContextMiddleware) } func (s *Server) handleClosest(w http.ResponseWriter, r *http.Request) { lat, err := strconv.ParseFloat(r.URL.Query().Get("lat"), 32) if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } lng, err := strconv.ParseFloat(r.URL.Query().Get("lng"), 32) if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } closest := s.busStops.closest(Point{lat: lat, lng: lng}) http.Redirect(w, r, fmt.Sprintf("/?id=%s", closest.BusStopCode), 302) } func (s *Server) handleIndex(w http.ResponseWriter, r *http.Request) { if os.Getenv("UP_STAGE") != "production" { w.Header().Set("X-Robots-Tag", "none") } log, ok := r.Context().Value(logger).(*log.Entry) if !ok { http.Error(w, "Unable to get logging context", http.StatusInternalServerError) return } funcs := template.FuncMap{ "totalStops": func() int { return len(s.busStops) }, "nameBusStop": func(id string) string { return s.busStops.nameBusStop(id) }, "getEnv": os.Getenv, } t, err := template.New("").Funcs(funcs).ParseFiles("templates/index.html") if err != nil { log.WithError(err).Error("template failed to parse") http.Error(w, err.Error(), http.StatusInternalServerError) return } id := r.URL.Query().Get("id") var arriving SGBusArrivals if id != "" { arriving, err = busArrivals(id) if err != nil { log.WithError(err).Error("failed to retrieve bus timings") } log.WithField("input", id).Info("serving") } err = t.ExecuteTemplate(w, "index.html", arriving) if err != nil { log.WithError(err).Error("template failed to parse") http.Error(w, err.Error(), http.StatusInternalServerError) } } func busArrivals(stopID string) (arrivals SGBusArrivals, err error) { if stopID == "" { return } ctx := log.WithFields( log.Fields{ "stopID": stopID, }) url := fmt.Sprintf("http://datamall2.mytransport.sg/ltaodataservice/BusArrivalv2?BusStopCode=%s", stopID) req, err := http.NewRequest("GET", url, nil) if err != nil { return } req.Header.Add("AccountKey", os.Getenv("accountkey")) start := time.Now() timings := log.Fields{} trace := &httptrace.ClientTrace{ DNSStart: func(_ httptrace.DNSStartInfo) { timings["DNSStart"] = ms(time.Since(start)) }, GotFirstResponseByte: func() { timings["GotFirstResponseByte"] = ms(time.Since(start)) }, } req = req.WithContext(httptrace.WithClientTrace(req.Context(), trace)) res, err := http.DefaultClient.Do(req) if err != nil { return } defer res.Body.Close() timings["Total"] = ms(time.Since(start)) ctx.WithFields(timings).Info("LTA API") if res.StatusCode != http.StatusOK { return arrivals, fmt.Errorf("Bad response: %d", res.StatusCode) } decoder := json.NewDecoder(res.Body) err = decoder.Decode(&arrivals) if err != nil { log.WithError(err).Error("failed to decode response") return } // Sort by buses arriving first sort.Slice(arrivals.Services, func(i, j int) bool { return arrivals.Services[i].NextBus.EstimatedArrival < arrivals.Services[j].NextBus.EstimatedArrival }) return } func addContextMiddleware(next http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { cookie, _ := r.Cookie("visitor") logging := log.WithFields( log.Fields{ "id": r.Header.Get("X-Request-Id"), "country": r.Header.Get("Cloudfront-Viewer-Country"), "ua": r.UserAgent(), }) if cookie != nil { cvisitor := context.WithValue(r.Context(), visitor, cookie.Value) logging = logging.WithField("visitor", cookie.Value) clog := context.WithValue(cvisitor, logger, logging) next.ServeHTTP(w, r.WithContext(clog)) } else { visitorID, _ := generateRandomString(24) // log.Infof("Generating vistor id: %s", visitorID) expiration := time.Now().Add(365 * 24 * time.Hour) setCookie := http.Cookie{Name: "visitor", Value: visitorID, Expires: expiration} http.SetCookie(w, &setCookie) cvisitor := context.WithValue(r.Context(), visitor, visitorID) logging = logging.WithField("visitor", visitorID) clog := context.WithValue(cvisitor, logger, logging) next.ServeHTTP(w, r.WithContext(clog)) } }) } func generateRandomBytes(n int) ([]byte, error) { b := make([]byte, n) _, err := rand.Read(b) if err != nil { return nil, err } return b, nil } func generateRandomString(s int) (string, error) { b, err := generateRandomBytes(s) return base64.URLEncoding.EncodeToString(b), err } func ms(d time.Duration) int { return int(d / time.Millisecond) } // Point is a geo co-ordinate type Point struct { lat float64 lng float64 } // BusStop describes a Singaporean (LTA) bus stop type BusStop struct { BusStopCode string `json:"BusStopCode"` RoadName string `json:"RoadName"` Description string `json:"Description"` Latitude float64 `json:"Latitude"` Longitude float64 `json:"Longitude"` } // BusStops are many bus stops type BusStops []BusStop func loadBusJSON(jsonfile string) (bs BusStops, err error) { content, err := ioutil.ReadFile(filepath.Clean(jsonfile)) if err != nil { return } err = json.Unmarshal(content, &bs) if err != nil { return } return } func (bs BusStops) closest(location Point) BusStop { c := -1 closestSoFar := math.Inf(1) for i := range bs { distance := location.distance(Point{bs[i].Latitude, bs[i].Longitude}) if distance < closestSoFar { // Set the return c = i // Record closest distance closestSoFar = distance } } return bs[c] } func (bs BusStops) nameBusStop(busid string) (description string) { for _, p := range bs { if busid == p.BusStopCode { return p.Description } } return "" } // distance calculates the distance between two points func (p Point) distance(p2 Point) float64 { latd := p2.lat - p.lat lngd := p2.lng - p.lng return latd*latd + lngd*lngd }
[ "\"PORT\"", "\"UP_STAGE\"", "\"accountkey\"" ]
[]
[ "PORT", "UP_STAGE", "accountkey" ]
[]
["PORT", "UP_STAGE", "accountkey"]
go
3
0
vendor/code.cloudfoundry.org/cli/command/v2/create_buildpack_command.go
package v2 import ( "os" "code.cloudfoundry.org/cli/cf/cmd" "code.cloudfoundry.org/cli/command" "code.cloudfoundry.org/cli/command/flag" "code.cloudfoundry.org/cli/command/translatableerror" ) type CreateBuildpackCommand struct { RequiredArgs flag.CreateBuildpackArgs `positional-args:"yes"` Disable bool `long:"disable" description:"Disable the buildpack from being used for staging"` Enable bool `long:"enable" description:"Enable the buildpack to be used for staging"` usage interface{} `usage:"CF_NAME create-buildpack BUILDPACK PATH POSITION [--enable|--disable]\n\nTIP:\n Path should be a zip file, a url to a zip file, or a local directory. Position is a positive integer, sets priority, and is sorted from lowest to highest."` relatedCommands interface{} `related_commands:"buildpacks, push"` } func (CreateBuildpackCommand) Setup(config command.Config, ui command.UI) error { return nil } func (c CreateBuildpackCommand) Execute(args []string) error { _, err := flag.ParseStringToInt(c.RequiredArgs.Position) if err != nil { return translatableerror.ParseArgumentError{ ArgumentName: "POSITION", ExpectedType: "integer", } } cmd.Main(os.Getenv("CF_TRACE"), os.Args) return nil }
[ "\"CF_TRACE\"" ]
[]
[ "CF_TRACE" ]
[]
["CF_TRACE"]
go
1
0
tests/conftest.py
import os import shutil import sys import tempfile from pathlib import Path import django # Path to the temp mezzanine project folder TMP_PATH = Path(tempfile.mkdtemp()) / "project_template" TEST_SETTINGS = """ from . import settings globals().update(i for i in settings.__dict__.items() if i[0].isupper()) # Add our own tests folder to installed apps (required to test models) INSTALLED_APPS = list(INSTALLED_APPS) INSTALLED_APPS.append("tests") if "mezzanine.accounts" not in INSTALLED_APPS: INSTALLED_APPS.append("mezzanine.accounts") # Use the MD5 password hasher by default for quicker test runs. PASSWORD_HASHERS = ('django.contrib.auth.hashers.MD5PasswordHasher',) """ def pytest_report_header(config): """ Have pytest report the path of the project folder """ return f"mezzanine proj (tmp): {TMP_PATH}" def pytest_configure(): """ Hack the `project_template` dir into an actual project to test against. """ from mezzanine.utils.importing import path_for_import template_path = Path(path_for_import("mezzanine")) / "project_template" shutil.copytree(str(template_path), str(TMP_PATH)) proj_path = TMP_PATH / "project_name" local_settings = (proj_path / "local_settings.py.template").read_text() (proj_path / "test_settings.py").write_text(TEST_SETTINGS + local_settings) # Setup the environment for Django sys.path.insert(0, str(TMP_PATH)) os.environ.setdefault("DJANGO_SETTINGS_MODULE", "project_name.test_settings") django.setup() def pytest_unconfigure(): """ Remove the temporary folder """ try: shutil.rmtree(str(TMP_PATH)) except OSError: pass
[]
[]
[]
[]
[]
python
0
0
config/pgoconfig.go
package config /* Copyright 2018 - 2020 Crunchy Data Solutions, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ import ( "errors" "fmt" "io/ioutil" "os" "strconv" "strings" "text/template" crv1 "github.com/crunchydata/postgres-operator/apis/crunchydata.com/v1" "github.com/crunchydata/postgres-operator/kubeapi" log "github.com/sirupsen/logrus" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/util/validation" "k8s.io/client-go/kubernetes" "sigs.k8s.io/yaml" ) const CustomConfigMapName = "pgo-config" const DefaultConfigsPath = "/default-pgo-config/" const CustomConfigsPath = "/pgo-config/" var PgoDefaultServiceAccountTemplate *template.Template const PGODefaultServiceAccountPath = "pgo-default-sa.json" var PgoTargetRoleBindingTemplate *template.Template const PGOTargetRoleBindingPath = "pgo-target-role-binding.json" var PgoBackrestServiceAccountTemplate *template.Template const PGOBackrestServiceAccountPath = "pgo-backrest-sa.json" var PgoTargetServiceAccountTemplate *template.Template const PGOTargetServiceAccountPath = "pgo-target-sa.json" var PgoBackrestRoleTemplate *template.Template const PGOBackrestRolePath = "pgo-backrest-role.json" var PgoBackrestRoleBindingTemplate *template.Template const PGOBackrestRoleBindingPath = "pgo-backrest-role-binding.json" var PgoTargetRoleTemplate *template.Template const PGOTargetRolePath = "pgo-target-role.json" var PgoPgServiceAccountTemplate *template.Template const PGOPgServiceAccountPath = "pgo-pg-sa.json" var PgoPgRoleTemplate *template.Template const PGOPgRolePath = "pgo-pg-role.json" var PgoPgRoleBindingTemplate *template.Template const PGOPgRoleBindingPath = "pgo-pg-role-binding.json" var PolicyJobTemplate *template.Template const policyJobTemplatePath = "pgo.sqlrunner-template.json" var PVCTemplate *template.Template const pvcPath = "pvc.json" var ContainerResourcesTemplate *template.Template const containerResourcesTemplatePath = "container-resources.json" var LoadTemplate *template.Template const loadTemplatePath = "pgo.load-template.json" var AffinityTemplate *template.Template const affinityTemplatePath = "affinity.json" var PodAntiAffinityTemplate *template.Template const podAntiAffinityTemplatePath = "pod-anti-affinity.json" var PgoBackrestRepoServiceTemplate *template.Template const pgoBackrestRepoServiceTemplatePath = "pgo-backrest-repo-service-template.json" var PgoBackrestRepoTemplate *template.Template const pgoBackrestRepoTemplatePath = "pgo-backrest-repo-template.json" var PgmonitorEnvVarsTemplate *template.Template const pgmonitorEnvVarsPath = "pgmonitor-env-vars.json" var PgbackrestEnvVarsTemplate *template.Template const pgbackrestEnvVarsPath = "pgbackrest-env-vars.json" var PgbackrestS3EnvVarsTemplate *template.Template const pgbackrestS3EnvVarsPath = "pgbackrest-s3-env-vars.json" var PgbouncerTemplate *template.Template const pgbouncerTemplatePath = "pgbouncer-template.json" var PgbouncerConfTemplate *template.Template const pgbouncerConfTemplatePath = "pgbouncer.ini" var PgbouncerUsersTemplate *template.Template const pgbouncerUsersTemplatePath = "users.txt" var PgbouncerHBATemplate *template.Template const pgbouncerHBATemplatePath = "pgbouncer_hba.conf" var ServiceTemplate *template.Template const serviceTemplatePath = "cluster-service.json" var RmdatajobTemplate *template.Template const rmdatajobPath = "rmdata-job.json" var BackrestjobTemplate *template.Template const backrestjobPath = "backrest-job.json" var BackrestRestorejobTemplate *template.Template const backrestRestorejobPath = "backrest-restore-job.json" var PgDumpBackupJobTemplate *template.Template const pgDumpBackupJobPath = "pgdump-job.json" var PgRestoreJobTemplate *template.Template const pgRestoreJobPath = "pgrestore-job.json" var PVCMatchLabelsTemplate *template.Template const pvcMatchLabelsPath = "pvc-matchlabels.json" var PVCStorageClassTemplate *template.Template const pvcSCPath = "pvc-storageclass.json" var CollectTemplate *template.Template const collectTemplatePath = "collect.json" var BadgerTemplate *template.Template const badgerTemplatePath = "pgbadger.json" var DeploymentTemplate *template.Template const deploymentTemplatePath = "cluster-deployment.json" type ClusterStruct struct { CCPImagePrefix string CCPImageTag string PrimaryNodeLabel string ReplicaNodeLabel string Policies string Metrics bool Badger bool Port string PGBadgerPort string ExporterPort string User string Database string PasswordAgeDays string PasswordLength string Replicas string ServiceType string BackrestPort int Backrest bool BackrestS3Bucket string BackrestS3Endpoint string BackrestS3Region string DisableAutofail bool PgmonitorPassword string EnableCrunchyadm bool DisableReplicaStartFailReinit bool PodAntiAffinity string PodAntiAffinityPgBackRest string PodAntiAffinityPgBouncer string SyncReplication bool DefaultInstanceResourceMemory resource.Quantity `json:"DefaultInstanceMemory"` DefaultBackrestResourceMemory resource.Quantity `json:"DefaultBackrestMemory"` DefaultPgBouncerResourceMemory resource.Quantity `json:"DefaultPgBouncerMemory"` } type StorageStruct struct { AccessMode string Size string StorageType string StorageClass string SupplementalGroups string MatchLabels string } type PgoStruct struct { Audit bool PGOImagePrefix string PGOImageTag string } type PgoConfig struct { BasicAuth string Cluster ClusterStruct Pgo PgoStruct PrimaryStorage string BackupStorage string ReplicaStorage string BackrestStorage string Storage map[string]StorageStruct } const DEFAULT_SERVICE_TYPE = "ClusterIP" const LOAD_BALANCER_SERVICE_TYPE = "LoadBalancer" const NODEPORT_SERVICE_TYPE = "NodePort" const CONFIG_PATH = "pgo.yaml" var log_statement_values = []string{"ddl", "none", "mod", "all"} const DEFAULT_BACKREST_PORT = 2022 const DEFAULT_PGBADGER_PORT = "10000" const DEFAULT_EXPORTER_PORT = "9187" const DEFAULT_POSTGRES_PORT = "5432" const DEFAULT_PATRONI_PORT = "8009" func (c *PgoConfig) Validate() error { var err error errPrefix := "Error in pgoconfig: check pgo.yaml: " if c.Cluster.BackrestPort == 0 { c.Cluster.BackrestPort = DEFAULT_BACKREST_PORT log.Infof("setting BackrestPort to default %d", c.Cluster.BackrestPort) } if c.Cluster.PGBadgerPort == "" { c.Cluster.PGBadgerPort = DEFAULT_PGBADGER_PORT log.Infof("setting PGBadgerPort to default %s", c.Cluster.PGBadgerPort) } else { if _, err := strconv.Atoi(c.Cluster.PGBadgerPort); err != nil { return errors.New(errPrefix + "Invalid PGBadgerPort: " + err.Error()) } } if c.Cluster.ExporterPort == "" { c.Cluster.ExporterPort = DEFAULT_EXPORTER_PORT log.Infof("setting ExporterPort to default %s", c.Cluster.ExporterPort) } else { if _, err := strconv.Atoi(c.Cluster.ExporterPort); err != nil { return errors.New(errPrefix + "Invalid ExporterPort: " + err.Error()) } } if c.Cluster.Port == "" { c.Cluster.Port = DEFAULT_POSTGRES_PORT log.Infof("setting Postgres Port to default %s", c.Cluster.Port) } else { if _, err := strconv.Atoi(c.Cluster.Port); err != nil { return errors.New(errPrefix + "Invalid Port: " + err.Error()) } } if c.Cluster.PrimaryNodeLabel != "" { parts := strings.Split(c.Cluster.PrimaryNodeLabel, "=") if len(parts) != 2 { return errors.New(errPrefix + "Cluster.PrimaryNodeLabel does not follow key=value format") } } if c.Cluster.ReplicaNodeLabel != "" { parts := strings.Split(c.Cluster.ReplicaNodeLabel, "=") if len(parts) != 2 { return errors.New(errPrefix + "Cluster.ReplicaNodeLabel does not follow key=value format") } } log.Infof("pgo.yaml Cluster.Backrest is %v", c.Cluster.Backrest) _, ok := c.Storage[c.PrimaryStorage] if !ok { return errors.New(errPrefix + "PrimaryStorage setting required") } _, ok = c.Storage[c.BackupStorage] if !ok { return errors.New(errPrefix + "BackupStorage setting required") } _, ok = c.Storage[c.BackrestStorage] if !ok { log.Warning("BackrestStorage setting not set, will use PrimaryStorage setting") c.Storage[c.BackrestStorage] = c.Storage[c.PrimaryStorage] } _, ok = c.Storage[c.ReplicaStorage] if !ok { return errors.New(errPrefix + "ReplicaStorage setting required") } for k := range c.Storage { _, err = c.GetStorageSpec(k) if err != nil { return err } } if c.Pgo.PGOImagePrefix == "" { return errors.New(errPrefix + "Pgo.PGOImagePrefix is required") } if c.Pgo.PGOImageTag == "" { return errors.New(errPrefix + "Pgo.PGOImageTag is required") } if c.Cluster.ServiceType == "" { log.Warn("Cluster.ServiceType not set, using default, ClusterIP ") c.Cluster.ServiceType = DEFAULT_SERVICE_TYPE } else { if c.Cluster.ServiceType != DEFAULT_SERVICE_TYPE && c.Cluster.ServiceType != LOAD_BALANCER_SERVICE_TYPE && c.Cluster.ServiceType != NODEPORT_SERVICE_TYPE { return errors.New(errPrefix + "Cluster.ServiceType is required to be either ClusterIP, NodePort, or LoadBalancer") } } if c.Cluster.CCPImagePrefix == "" { return errors.New(errPrefix + "Cluster.CCPImagePrefix is required") } if c.Cluster.CCPImageTag == "" { return errors.New(errPrefix + "Cluster.CCPImageTag is required") } if c.Cluster.User == "" { return errors.New(errPrefix + "Cluster.User is required") } else { // validates that username can be used as the kubernetes secret name // Must consist of lower case alphanumeric characters, // '-' or '.', and must start and end with an alphanumeric character errs := validation.IsDNS1123Subdomain(c.Cluster.User) if len(errs) > 0 { var msg string for i := range errs { msg = msg + errs[i] } return errors.New(errPrefix + msg) } // validate any of the resources and if they are unavailable, set defaults if c.Cluster.DefaultInstanceResourceMemory.IsZero() { c.Cluster.DefaultInstanceResourceMemory = DefaultInstanceResourceMemory } log.Infof("default instance memory set to [%s]", c.Cluster.DefaultInstanceResourceMemory.String()) if c.Cluster.DefaultBackrestResourceMemory.IsZero() { c.Cluster.DefaultBackrestResourceMemory = DefaultBackrestResourceMemory } log.Infof("default pgbackrest repository memory set to [%s]", c.Cluster.DefaultBackrestResourceMemory.String()) if c.Cluster.DefaultPgBouncerResourceMemory.IsZero() { c.Cluster.DefaultPgBouncerResourceMemory = DefaultPgBouncerResourceMemory } log.Infof("default pgbouncer memory set to [%s]", c.Cluster.DefaultPgBouncerResourceMemory.String()) } // if provided, ensure that the type of pod anti-affinity values are valid podAntiAffinityType := crv1.PodAntiAffinityType(c.Cluster.PodAntiAffinity) if err := podAntiAffinityType.Validate(); err != nil { return errors.New(errPrefix + "Invalid value provided for Cluster.PodAntiAffinityType") } podAntiAffinityType = crv1.PodAntiAffinityType(c.Cluster.PodAntiAffinityPgBackRest) if err := podAntiAffinityType.Validate(); err != nil { return errors.New(errPrefix + "Invalid value provided for Cluster.PodAntiAffinityPgBackRest") } podAntiAffinityType = crv1.PodAntiAffinityType(c.Cluster.PodAntiAffinityPgBouncer) if err := podAntiAffinityType.Validate(); err != nil { return errors.New(errPrefix + "Invalid value provided for Cluster.PodAntiAffinityPgBouncer") } return err } // GetPodAntiAffinitySpec accepts possible user-defined values for what the // pod anti-affinity spec should be, which include rules for: // - PostgreSQL instances // - pgBackRest // - pgBouncer func (c *PgoConfig) GetPodAntiAffinitySpec(cluster, pgBackRest, pgBouncer crv1.PodAntiAffinityType) (crv1.PodAntiAffinitySpec, error) { spec := crv1.PodAntiAffinitySpec{} // first, set the values for the PostgreSQL cluster, which is the "default" // value. Otherwise, set the default to that in the configuration if cluster != "" { spec.Default = cluster } else { spec.Default = crv1.PodAntiAffinityType(c.Cluster.PodAntiAffinity) } // perform a validation check against the default type if err := spec.Default.Validate(); err != nil { log.Error(err) return spec, err } // now that the default is set, determine if the user or the configuration // overrode the settings for pgBackRest and pgBouncer. The heuristic is as // such: // // 1. If the user provides a value, use that value // 2. If there is a value provided in the configuration, use that value // 3. If there is a value in the cluster default, use that value, which also // encompasses using the default value in the config at this point in the // execution. // // First, do pgBackRest: switch { case pgBackRest != "": spec.PgBackRest = pgBackRest case c.Cluster.PodAntiAffinityPgBackRest != "": spec.PgBackRest = crv1.PodAntiAffinityType(c.Cluster.PodAntiAffinityPgBackRest) case spec.Default != "": spec.PgBackRest = spec.Default } // perform a validation check against the pgBackRest type if err := spec.PgBackRest.Validate(); err != nil { log.Error(err) return spec, err } // Now, pgBouncer: switch { case pgBouncer != "": spec.PgBouncer = pgBouncer case c.Cluster.PodAntiAffinityPgBackRest != "": spec.PgBouncer = crv1.PodAntiAffinityType(c.Cluster.PodAntiAffinityPgBouncer) case spec.Default != "": spec.PgBouncer = spec.Default } // perform a validation check against the pgBackRest type if err := spec.PgBouncer.Validate(); err != nil { log.Error(err) return spec, err } return spec, nil } func (c *PgoConfig) GetStorageSpec(name string) (crv1.PgStorageSpec, error) { var err error storage := crv1.PgStorageSpec{} s, ok := c.Storage[name] if !ok { err = errors.New("invalid Storage name " + name) log.Error(err) return storage, err } storage.StorageClass = s.StorageClass storage.AccessMode = s.AccessMode storage.Size = s.Size storage.StorageType = s.StorageType storage.MatchLabels = s.MatchLabels storage.SupplementalGroups = s.SupplementalGroups if storage.MatchLabels != "" { test := strings.Split(storage.MatchLabels, "=") if len(test) != 2 { err = errors.New("invalid Storage config " + name + " MatchLabels needs to be in key=value format.") log.Error(err) return storage, err } } return storage, err } func (c *PgoConfig) GetConfig(clientset *kubernetes.Clientset, namespace string) error { cMap, rootPath := getRootPath(clientset, namespace) var yamlFile []byte var err error //get the pgo.yaml config file if cMap != nil { str := cMap.Data[CONFIG_PATH] if str == "" { errMsg := fmt.Sprintf("could not get %s from ConfigMap", CONFIG_PATH) return errors.New(errMsg) } yamlFile = []byte(str) } else { yamlFile, err = ioutil.ReadFile(rootPath + CONFIG_PATH) if err != nil { log.Errorf("yamlFile.Get err #%v ", err) return err } } err = yaml.Unmarshal(yamlFile, c) if err != nil { log.Errorf("Unmarshal: %v", err) return err } // validate the pgo.yaml config file if err := c.Validate(); err != nil { log.Error(err) return err } c.CheckEnv() //load up all the templates PgoDefaultServiceAccountTemplate, err = c.LoadTemplate(cMap, rootPath, PGODefaultServiceAccountPath) if err != nil { return err } PgoBackrestServiceAccountTemplate, err = c.LoadTemplate(cMap, rootPath, PGOBackrestServiceAccountPath) if err != nil { return err } PgoTargetServiceAccountTemplate, err = c.LoadTemplate(cMap, rootPath, PGOTargetServiceAccountPath) if err != nil { return err } PgoTargetRoleBindingTemplate, err = c.LoadTemplate(cMap, rootPath, PGOTargetRoleBindingPath) if err != nil { return err } PgoBackrestRoleTemplate, err = c.LoadTemplate(cMap, rootPath, PGOBackrestRolePath) if err != nil { return err } PgoBackrestRoleBindingTemplate, err = c.LoadTemplate(cMap, rootPath, PGOBackrestRoleBindingPath) if err != nil { return err } PgoTargetRoleTemplate, err = c.LoadTemplate(cMap, rootPath, PGOTargetRolePath) if err != nil { return err } PgoPgServiceAccountTemplate, err = c.LoadTemplate(cMap, rootPath, PGOPgServiceAccountPath) if err != nil { return err } PgoPgRoleTemplate, err = c.LoadTemplate(cMap, rootPath, PGOPgRolePath) if err != nil { return err } PgoPgRoleBindingTemplate, err = c.LoadTemplate(cMap, rootPath, PGOPgRoleBindingPath) if err != nil { return err } PVCTemplate, err = c.LoadTemplate(cMap, rootPath, pvcPath) if err != nil { return err } PolicyJobTemplate, err = c.LoadTemplate(cMap, rootPath, policyJobTemplatePath) if err != nil { return err } ContainerResourcesTemplate, err = c.LoadTemplate(cMap, rootPath, containerResourcesTemplatePath) if err != nil { return err } LoadTemplate, err = c.LoadTemplate(cMap, rootPath, loadTemplatePath) if err != nil { return err } PgoBackrestRepoServiceTemplate, err = c.LoadTemplate(cMap, rootPath, pgoBackrestRepoServiceTemplatePath) if err != nil { return err } PgoBackrestRepoTemplate, err = c.LoadTemplate(cMap, rootPath, pgoBackrestRepoTemplatePath) if err != nil { return err } PgmonitorEnvVarsTemplate, err = c.LoadTemplate(cMap, rootPath, pgmonitorEnvVarsPath) if err != nil { return err } PgbackrestEnvVarsTemplate, err = c.LoadTemplate(cMap, rootPath, pgbackrestEnvVarsPath) if err != nil { return err } PgbackrestS3EnvVarsTemplate, err = c.LoadTemplate(cMap, rootPath, pgbackrestS3EnvVarsPath) if err != nil { return err } PgbouncerTemplate, err = c.LoadTemplate(cMap, rootPath, pgbouncerTemplatePath) if err != nil { return err } PgbouncerConfTemplate, err = c.LoadTemplate(cMap, rootPath, pgbouncerConfTemplatePath) if err != nil { return err } PgbouncerUsersTemplate, err = c.LoadTemplate(cMap, rootPath, pgbouncerUsersTemplatePath) if err != nil { return err } PgbouncerHBATemplate, err = c.LoadTemplate(cMap, rootPath, pgbouncerHBATemplatePath) if err != nil { return err } ServiceTemplate, err = c.LoadTemplate(cMap, rootPath, serviceTemplatePath) if err != nil { return err } RmdatajobTemplate, err = c.LoadTemplate(cMap, rootPath, rmdatajobPath) if err != nil { return err } BackrestjobTemplate, err = c.LoadTemplate(cMap, rootPath, backrestjobPath) if err != nil { return err } BackrestRestorejobTemplate, err = c.LoadTemplate(cMap, rootPath, backrestRestorejobPath) if err != nil { return err } PgDumpBackupJobTemplate, err = c.LoadTemplate(cMap, rootPath, pgDumpBackupJobPath) if err != nil { return err } PgRestoreJobTemplate, err = c.LoadTemplate(cMap, rootPath, pgRestoreJobPath) if err != nil { return err } PVCMatchLabelsTemplate, err = c.LoadTemplate(cMap, rootPath, pvcMatchLabelsPath) if err != nil { return err } PVCStorageClassTemplate, err = c.LoadTemplate(cMap, rootPath, pvcSCPath) if err != nil { return err } AffinityTemplate, err = c.LoadTemplate(cMap, rootPath, affinityTemplatePath) if err != nil { return err } PodAntiAffinityTemplate, err = c.LoadTemplate(cMap, rootPath, podAntiAffinityTemplatePath) if err != nil { return err } CollectTemplate, err = c.LoadTemplate(cMap, rootPath, collectTemplatePath) if err != nil { return err } BadgerTemplate, err = c.LoadTemplate(cMap, rootPath, badgerTemplatePath) if err != nil { return err } DeploymentTemplate, err = c.LoadTemplate(cMap, rootPath, deploymentTemplatePath) if err != nil { return err } return nil } func getRootPath(clientset *kubernetes.Clientset, namespace string) (*v1.ConfigMap, string) { cMap, found := kubeapi.GetConfigMap(clientset, CustomConfigMapName, namespace) if found { log.Infof("Config: %s ConfigMap found, using config files from the configmap", CustomConfigMapName) return cMap, "" } log.Infof("Config: %s ConfigMap NOT found, using default baked-in config files from %s", CustomConfigMapName, DefaultConfigsPath) return nil, DefaultConfigsPath } // LoadTemplate will load a JSON template from a path func (c *PgoConfig) LoadTemplate(cMap *v1.ConfigMap, rootPath, path string) (*template.Template, error) { var value string var err error // Determine if there exists a configmap entry for the template file. if cMap != nil { // Get the data that is stored in the configmap value = cMap.Data[path] } // if the configmap does not exist, or there is no data in the configmap for // this particular configuration template, attempt to load the template from // the default configuration if cMap == nil || value == "" { value, err = c.DefaultTemplate(path) if err != nil { return nil, err } } // if we have a value for the templated file, return return template.Must(template.New(path).Parse(value)), nil } // DefaultTemplate attempts to load a default configuration template file func (c *PgoConfig) DefaultTemplate(path string) (string, error) { // set the lookup value for the file path based on the default configuration // path and the template file requested to be loaded fullPath := DefaultConfigsPath + path log.Debugf("No entry in cmap loading default path [%s]", fullPath) // read in the file from the default path buf, err := ioutil.ReadFile(fullPath) if err != nil { log.Errorf("error: could not read %s", fullPath) log.Error(err) return "", err } // extract the value of the default configuration file and return value := string(buf) return value, nil } // CheckEnv is mostly used for the OLM deployment use case // when someone wants to deploy with OLM, use the baked-in // configuration, but use a different set of images, by // setting these env vars in the OLM CSV, users can override // the baked in images func (c *PgoConfig) CheckEnv() { pgoImageTag := os.Getenv("PGO_IMAGE_TAG") if pgoImageTag != "" { c.Pgo.PGOImageTag = pgoImageTag log.Infof("CheckEnv: using PGO_IMAGE_TAG env var: %s", pgoImageTag) } pgoImagePrefix := os.Getenv("PGO_IMAGE_PREFIX") if pgoImagePrefix != "" { c.Pgo.PGOImagePrefix = pgoImagePrefix log.Infof("CheckEnv: using PGO_IMAGE_PREFIX env var: %s", pgoImagePrefix) } ccpImageTag := os.Getenv("CCP_IMAGE_TAG") if ccpImageTag != "" { c.Cluster.CCPImageTag = ccpImageTag log.Infof("CheckEnv: using CCP_IMAGE_TAG env var: %s", ccpImageTag) } ccpImagePrefix := os.Getenv("CCP_IMAGE_PREFIX") if ccpImagePrefix != "" { c.Cluster.CCPImagePrefix = ccpImagePrefix log.Infof("CheckEnv: using CCP_IMAGE_PREFIX env var: %s", ccpImagePrefix) } }
[ "\"PGO_IMAGE_TAG\"", "\"PGO_IMAGE_PREFIX\"", "\"CCP_IMAGE_TAG\"", "\"CCP_IMAGE_PREFIX\"" ]
[]
[ "PGO_IMAGE_PREFIX", "CCP_IMAGE_PREFIX", "PGO_IMAGE_TAG", "CCP_IMAGE_TAG" ]
[]
["PGO_IMAGE_PREFIX", "CCP_IMAGE_PREFIX", "PGO_IMAGE_TAG", "CCP_IMAGE_TAG"]
go
4
0
pkg/libratopublisher/publisher.go
package libratopublisher import ( "fmt" "os" "github.com/tnorris/canary/pkg/libratoaggregator" "github.com/tnorris/canary/pkg/sampler" "github.com/tnorris/canary/pkg/sensor" ) // Publisher implements the canary.Publisher interface and // is our means of ingesting canary.Measurements and converting // them to Librato metrics. type Publisher struct { aggregator *libratoaggregator.Aggregator } // New takes a user, token and source and return a pointer // to a Publisher. func New(user, token, source string) (p *Publisher) { p = &Publisher{ aggregator: libratoaggregator.New(user, token, source), } return } // NewFromEnv is a convenience func that wraps New, // and populates the required arguments via environment variables. // If required variables cannot be found, errors are returned. func NewFromEnv() (*Publisher, error) { user := os.Getenv("LIBRATO_USER") if user == "" { return nil, fmt.Errorf("LIBRATO_USER not set in ENV") } token := os.Getenv("LIBRATO_TOKEN") if token == "" { return nil, fmt.Errorf("LIBRATO_TOKEN not set in ENV") } var err error source := os.Getenv("SOURCE") if source == "" { source, err = os.Hostname() if err != nil { return nil, err } } return New(user, token, source), nil } // Publish takes a canary.Measurement and delivers it to the aggregator. func (p *Publisher) Publish(m sensor.Measurement) (err error) { // convert our measurement into a map of metrics // send the map on to the librato aggregator p.aggregator.C <- mapMeasurement(m) return } // mapMeasurments takes a canary.Measurement and returns a map with all of the appropriate metrics func mapMeasurement(m sensor.Measurement) map[string]float64 { metrics := make(map[string]float64) // latency latency := m.Sample.TimeEnd.Sub(m.Sample.TimeStart).Seconds() * 1000 metrics["canary."+m.Target.Name+".latency"] = latency if m.Error != nil { // increment a general error metric metrics["canary."+m.Target.Name+".errors"] = 1 // increment a specific error metric switch m.Error.(type) { case sampler.StatusCodeError: metrics["canary."+m.Target.Name+".errors.http"] = 1 default: metrics["canary."+m.Target.Name+".errors.sampler"] = 1 } } return metrics }
[ "\"LIBRATO_USER\"", "\"LIBRATO_TOKEN\"", "\"SOURCE\"" ]
[]
[ "SOURCE", "LIBRATO_TOKEN", "LIBRATO_USER" ]
[]
["SOURCE", "LIBRATO_TOKEN", "LIBRATO_USER"]
go
3
0
main.py
import numpy as np from keras import backend as K from plotting import lossplot, roc, prcurve from performance import ACC, MCC from load_data import load_data from model import build_model,compileModel,build_model_CNN K.set_image_data_format('channels_last') import os os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152 os.environ["CUDA_VISIBLE_DEVICES"] = "4" def main(): import argparse parser = argparse.ArgumentParser() parser.add_argument('-gene', dest='gene', default=None, type=str, help='select the gene') parser.add_argument('-condition', dest='condition', default=None, type=str, help='select full or exon') parser.add_argument('-length', dest='length', default=None, type=str, help='specify the two ends sequence length 125/250/500/1000') parser.add_argument('-mode', default=None, type=str, help='select your framework, CNN or CNN+RNN') args = parser.parse_args() ## assign the input value to variables gene = args.gene condition = args.condition length = args.length mode = args.mode data_path = '/home/yuxuan/dp/longer_seq_data/{}_{}_{}.csv'.format(gene, condition, length) x_train, x_test, x_val, y_test, y_train, y_val = load_data(data_path) if mode =='CNN+RNN': model = build_model(x_train) else: model =build_model_CNN(x_train) history = compileModel(model, x_train, x_val, y_val, y_train, gene, condition, length) lossplot(history, gene, condition, length) auc = roc(model, x_val, y_val, gene, condition, length) prauc = prcurve(model, x_val, y_val, gene, condition, length) mcc = MCC(model, x_val, y_val) acc = ACC(model, x_val, y_val) results = np.array([auc, prauc, mcc, acc]) np.savetxt('/home/yuxuan/dp/CNN/longseq/{}_{}_{}(RNN)_test.csv'.format(gene, condition, length), results, delimiter=',', fmt='%.3f') if __name__ == '__main__': main()
[]
[]
[ "CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES" ]
[]
["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"]
python
2
0
build/tools/roomservice.py
#!/usr/bin/env python # Copyright (C) 2012-2013, The CyanogenMod Project # (C) 2017, The FuseOS PRoject # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import print_function import base64 import json import netrc import os import sys from xml.etree import ElementTree try: # For python3 import urllib.error import urllib.parse import urllib.request except ImportError: # For python2 import imp import urllib2 import urlparse urllib = imp.new_module('urllib') urllib.error = urllib2 urllib.parse = urlparse urllib.request = urllib2 DEBUG = False custom_local_manifest = ".repo/manifests/snippets/fuse.xml" custom_default_revision = os.getenv('ROOMSERVICE_DEFAULT_BRANCH', 'raft') custom_dependencies = "fuse.dependencies" org_manifest = "FuseOS-Devices" # leave empty if org is provided in manifest org_display = "FuseOS-Devices" # needed for displaying github_auth = None local_manifests = '.repo/manifests' if not os.path.exists(local_manifests): os.makedirs(local_manifests) def debug(*args, **kwargs): if DEBUG: print(*args, **kwargs) def add_auth(g_req): global github_auth if github_auth is None: try: auth = netrc.netrc().authenticators("api.github.com") except (netrc.NetrcParseError, IOError): auth = None if auth: github_auth = base64.b64encode( ('%s:%s' % (auth[0], auth[2])).encode() ) else: github_auth = "" if github_auth: g_req.add_header("Authorization", "Basic %s" % github_auth) def indent(elem, level=0): # in-place prettyprint formatter i = "\n" + " " * level if len(elem): if not elem.text or not elem.text.strip(): elem.text = i + " " if not elem.tail or not elem.tail.strip(): elem.tail = i for elem in elem: indent(elem, level+1) if not elem.tail or not elem.tail.strip(): elem.tail = i else: if level and (not elem.tail or not elem.tail.strip()): elem.tail = i def get_manifest_path(): '''Find the current manifest path In old versions of repo this is at .repo/manifest.xml In new versions, .repo/manifest.xml includes an include to some arbitrary file in .repo/manifests''' m = ElementTree.parse(".repo/manifest.xml") try: m.findall('default')[0] return '.repo/manifest.xml' except IndexError: return ".repo/manifests/{}".format(m.find("include").get("name")) def load_manifest(manifest): try: man = ElementTree.parse(manifest).getroot() except (IOError, ElementTree.ParseError): man = ElementTree.Element("manifest") return man def get_default(manifest=None): m = manifest or load_manifest(get_manifest_path()) d = m.findall('default')[0] return d def get_remote(manifest=None, remote_name=None): m = manifest or load_manifest(get_manifest_path()) if not remote_name: remote_name = get_default(manifest=m).get('remote') remotes = m.findall('remote') for remote in remotes: if remote_name == remote.get('name'): return remote def get_from_manifest(device_name): if os.path.exists(custom_local_manifest): man = load_manifest(custom_local_manifest) for local_path in man.findall("project"): lp = local_path.get("path").strip('/') if lp.startswith("device/") and lp.endswith("/" + device_name): return lp return None def is_in_manifest(project_path): for man in (custom_local_manifest, get_manifest_path()): man = load_manifest(man) for local_path in man.findall("project"): if local_path.get("path") == project_path: return True return False def add_to_manifest(repos, fallback_branch=None): lm = load_manifest(custom_local_manifest) for repo in repos: repo_name = repo['repository'] repo_path = repo['target_path'] if 'branch' in repo: repo_branch=repo['branch'] else: repo_branch=custom_default_revision if 'remote' in repo: repo_remote=repo['remote'] elif "/" not in repo_name: repo_remote=org_manifest elif "/" in repo_name: repo_remote="github" if is_in_manifest(repo_path): print('already exists: %s' % repo_path) continue print('Adding dependency:\nRepository: %s\nBranch: %s\nRemote: %s\nPath: %s\n' % (repo_name, repo_branch,repo_remote, repo_path)) project = ElementTree.Element( "project", attrib={"path": repo_path, "remote": repo_remote, "name": "%s" % repo_name} ) clone_depth = os.getenv('ROOMSERVICE_CLONE_DEPTH') if clone_depth: project.set('clone-depth', clone_depth) if repo_branch is not None: project.set('revision', repo_branch) elif fallback_branch: print("Using branch %s for %s" % (fallback_branch, repo_name)) project.set('revision', fallback_branch) else: print("Using default branch for %s" % repo_name) if 'clone-depth' in repo: print("Setting clone-depth to %s for %s" % (repo['clone-depth'], repo_name)) project.set('clone-depth', repo['clone-depth']) lm.append(project) indent(lm) raw_xml = "\n".join(('<?xml version="1.0" encoding="UTF-8"?>', ElementTree.tostring(lm).decode())) f = open(custom_local_manifest, 'w') f.write(raw_xml) f.close() _fetch_dep_cache = [] def fetch_dependencies(repo_path, fallback_branch=None): global _fetch_dep_cache if repo_path in _fetch_dep_cache: return _fetch_dep_cache.append(repo_path) print('Looking for dependencies') dep_p = '/'.join((repo_path, custom_dependencies)) if os.path.exists(dep_p): with open(dep_p) as dep_f: dependencies = json.load(dep_f) else: dependencies = {} print('%s has no additional dependencies.' % repo_path) fetch_list = [] syncable_repos = [] for dependency in dependencies: if not is_in_manifest(dependency['target_path']): if not dependency.get('branch'): dependency['branch'] = custom_default_revision fetch_list.append(dependency) syncable_repos.append(dependency['target_path']) else: print("Dependency already present in manifest: %s => %s" % (dependency['repository'], dependency['target_path'])) if fetch_list: print('Adding dependencies to manifest\n') add_to_manifest(fetch_list, fallback_branch) if syncable_repos: print('Syncing dependencies') os.system('repo sync --force-sync --no-tags --current-branch --no-clone-bundle %s' % ' '.join(syncable_repos)) for deprepo in syncable_repos: fetch_dependencies(deprepo) def has_branch(branches, revision): return revision in (branch['name'] for branch in branches) def detect_revision(repo): """ returns None if using the default revision, else return the branch name if using a different revision """ print("Checking branch info") githubreq = urllib.request.Request( repo['branches_url'].replace('{/branch}', '')) add_auth(githubreq) result = json.loads(urllib.request.urlopen(githubreq).read().decode()) print("Calculated revision: %s" % custom_default_revision) if has_branch(result, custom_default_revision): return custom_default_revision print("Branch %s not found" % custom_default_revision) sys.exit() def main(): global DEBUG try: depsonly = bool(sys.argv[2] in ['true', 1]) except IndexError: depsonly = False if os.getenv('ROOMSERVICE_DEBUG'): DEBUG = True product = sys.argv[1] device = product[product.find("_") + 1:] or product if depsonly: repo_path = get_from_manifest(device) if repo_path: fetch_dependencies(repo_path) else: print("Trying dependencies-only mode on a " "non-existing device tree?") sys.exit() print("Device {0} not found. Attempting to retrieve device repository from " "{1} Github (http://github.com/{1}).".format(device, org_display)) githubreq = urllib.request.Request( "https://api.github.com/search/repositories?" "q={0}+user:{1}+in:name+fork:true".format(device, org_display)) add_auth(githubreq) repositories = [] try: result = json.loads(urllib.request.urlopen(githubreq).read().decode()) except urllib.error.URLError: print("Failed to search GitHub") sys.exit() except ValueError: print("Failed to parse return data from GitHub") sys.exit() for res in result.get('items', []): repositories.append(res) for repository in repositories: repo_name = repository['name'] if not (repo_name.startswith("device_") and repo_name.endswith("_" + device)): continue print("Found repository: %s" % repository['name']) fallback_branch = detect_revision(repository) manufacturer = repo_name[7:-(len(device)+1)] repo_path = "device/%s/%s" % (manufacturer, device) adding = [{'repository': repo_name, 'target_path': repo_path}] add_to_manifest(adding, fallback_branch) print("Syncing repository to retrieve project.") os.system('repo sync --force-sync --no-tags --current-branch --no-clone-bundle %s' % repo_path) print("Repository synced!") fetch_dependencies(repo_path, fallback_branch) print("Done") sys.exit() print("Repository for %s not found in the %s Github repository list." % (device, org_display)) print("If this is in error, you may need to manually add it to your " "%s" % custom_local_manifest) if __name__ == "__main__": main()
[]
[]
[ "ROOMSERVICE_DEBUG", "ROOMSERVICE_CLONE_DEPTH", "ROOMSERVICE_DEFAULT_BRANCH" ]
[]
["ROOMSERVICE_DEBUG", "ROOMSERVICE_CLONE_DEPTH", "ROOMSERVICE_DEFAULT_BRANCH"]
python
3
0
cmd/sailfish/sdnotify.go
// Code forked from Docker project package main import ( "errors" "net" "os" ) var SdNotifyNoSocket = errors.New("No socket") // SdNotify sends a message to the init daemon. It is common to ignore the error. func SdNotify(state string) error { socketAddr := &net.UnixAddr{ Name: os.Getenv("NOTIFY_SOCKET"), Net: "unixgram", } if socketAddr.Name == "" { return SdNotifyNoSocket } conn, err := net.DialUnix(socketAddr.Net, nil, socketAddr) if err != nil { return err } defer conn.Close() _, err = conn.Write([]byte(state)) return err }
[ "\"NOTIFY_SOCKET\"" ]
[]
[ "NOTIFY_SOCKET" ]
[]
["NOTIFY_SOCKET"]
go
1
0
train.py
import os import torch import argparse def train(cfg): device = torch.device() model = build_model(cfg).to(device) optimizer = make_optimizer(cfg, model) scheduler = make_lr_scheduler(cfg, optimizer) dataset = make_dataloader(cfg, is_train=True) for iter, (images, targets, _) in enumerate(dataset, start_iter): images = images.to(device) targets = [target.to(device) for target in targets] loss_dict = model(images, targets) losses = sum(loss for loss in loss_dict.values()) # reduce losses over all GPUs for logging purposes loss_dict_reduced = reduce_loss_dict(loss_dict) losses_reduced = sum(loss for loss in loss_dict_reduced.values()) meters.update(loss=losses_reduced, **loss_dict_reduced) optimizer.zero_grad() losses.backward() optimizer.step() scheduler.step() if iteration % 20 == 0 or iteration == max_iter: logger.info( meters.delimiter.join( [ "eta: {eta}", "iter: {iter}", "{meters}", "lr: {lr:.6f}", "max mem: {memory:.0f}", ] ).format( eta=eta_string, iter=iteration, meters=str(meters), lr=optimizer.param_groups[0]["lr"], memory=torch.cuda.max_memory_allocated() / 1024.0 / 1024.0, ) ) if iteration % checkpoint_period == 0: checkpointer.save("model_{:07d}".format(iteration), **arguments) if iteration == max_iter: checkpointer.save("model_final", **arguments) def main(): parser = argparse.ArgumentParser(description="FCOS Training") parser.add_argument("--config-file", default="", metavar="FILE", help="path to config file", type=str) parser.add_argument("--local_rank", type=int, default=0) parser.add_argument("--skip-test", dest="skip_test", help="Do not test the final model", action="store_true") parser.add_argument("opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER) args = parser.parse_args() num_gpus = int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1 logger = setup_logger("fcos_core", output_dir, get_rank()) logger.info("Using {} GPUs".format(num_gpus)) logger.info(args) logger.info("Collecting env info (might take some time)") logger.info("\n" + collect_env_info()) logger.info("Loaded configuration file {}".format(args.config_file))
[]
[]
[ "WORLD_SIZE" ]
[]
["WORLD_SIZE"]
python
1
0
parsl/executors/high_throughput/process_worker_pool.py
#!/usr/bin/env python3 import argparse import logging import os import sys import platform # import random import threading import pickle import time import datetime import queue import uuid import zmq import math import json import psutil from parsl.version import VERSION as PARSL_VERSION from parsl.app.errors import RemoteExceptionWrapper from parsl.executors.high_throughput.errors import WorkerLost import multiprocessing from ipyparallel.serialize import unpack_apply_message # pack_apply_message, from ipyparallel.serialize import serialize_object RESULT_TAG = 10 TASK_REQUEST_TAG = 11 HEARTBEAT_CODE = (2 ** 32) - 1 class Manager(object): """ Manager manages task execution by the workers | 0mq | Manager | Worker Processes | | | | <-----Request N task-----+--Count task reqs | Request task<--+ Interchange | -------------------------+->Receive task batch| | | | | Distribute tasks--+----> Get(block) & | | | | Execute task | | | | | | | <------------------------+--Return results----+---- Post result | | | | | | | | | +----------+ | | IPC-Qeueues """ def __init__(self, task_q_url="tcp://127.0.0.1:50097", result_q_url="tcp://127.0.0.1:50098", cores_per_worker=1, mem_per_worker=None, max_workers=float('inf'), prefetch_capacity=0, uid=None, block_id=None, heartbeat_threshold=120, heartbeat_period=30, poll_period=10): """ Parameters ---------- worker_url : str Worker url on which workers will attempt to connect back uid : str string unique identifier block_id : str Block identifier that maps managers to the provider blocks they belong to. cores_per_worker : float cores to be assigned to each worker. Oversubscription is possible by setting cores_per_worker < 1.0. Default=1 mem_per_worker : float GB of memory required per worker. If this option is specified, the node manager will check the available memory at startup and limit the number of workers such that the there's sufficient memory for each worker. If set to None, memory on node is not considered in the determination of workers to be launched on node by the manager. Default: None max_workers : int caps the maximum number of workers that can be launched. default: infinity prefetch_capacity : int Number of tasks that could be prefetched over available worker capacity. When there are a few tasks (<100) or when tasks are long running, this option should be set to 0 for better load balancing. Default is 0. heartbeat_threshold : int Seconds since the last message from the interchange after which the interchange is assumed to be un-available, and the manager initiates shutdown. Default:120s Number of seconds since the last message from the interchange after which the worker assumes that the interchange is lost and the manager shuts down. Default:120 heartbeat_period : int Number of seconds after which a heartbeat message is sent to the interchange poll_period : int Timeout period used by the manager in milliseconds. Default: 10ms """ logger.info("Manager started") self.context = zmq.Context() self.task_incoming = self.context.socket(zmq.DEALER) self.task_incoming.setsockopt(zmq.IDENTITY, uid.encode('utf-8')) # Linger is set to 0, so that the manager can exit even when there might be # messages in the pipe self.task_incoming.setsockopt(zmq.LINGER, 0) self.task_incoming.connect(task_q_url) self.result_outgoing = self.context.socket(zmq.DEALER) self.result_outgoing.setsockopt(zmq.IDENTITY, uid.encode('utf-8')) self.result_outgoing.setsockopt(zmq.LINGER, 0) self.result_outgoing.connect(result_q_url) logger.info("Manager connected") self.uid = uid self.block_id = block_id if os.environ.get('PARSL_CORES'): cores_on_node = int(os.environ['PARSL_CORES']) else: cores_on_node = multiprocessing.cpu_count() if os.environ.get('PARSL_MEMORY_GB'): available_mem_on_node = float(os.environ['PARSL_MEMORY_GB']) else: available_mem_on_node = round(psutil.virtual_memory().available / (2**30), 1) self.max_workers = max_workers self.prefetch_capacity = prefetch_capacity mem_slots = max_workers # Avoid a divide by 0 error. if mem_per_worker and mem_per_worker > 0: mem_slots = math.floor(available_mem_on_node / mem_per_worker) self.worker_count = min(max_workers, mem_slots, math.floor(cores_on_node / cores_per_worker)) logger.info("Manager will spawn {} workers".format(self.worker_count)) self.pending_task_queue = multiprocessing.Queue() self.pending_result_queue = multiprocessing.Queue() self.ready_worker_queue = multiprocessing.Queue() self.max_queue_size = self.prefetch_capacity + self.worker_count self.tasks_per_round = 1 self.heartbeat_period = heartbeat_period self.heartbeat_threshold = heartbeat_threshold self.poll_period = poll_period def create_reg_message(self): """ Creates a registration message to identify the worker to the interchange """ msg = {'parsl_v': PARSL_VERSION, 'python_v': "{}.{}.{}".format(sys.version_info.major, sys.version_info.minor, sys.version_info.micro), 'worker_count': self.worker_count, 'block_id': self.block_id, 'prefetch_capacity': self.prefetch_capacity, 'max_capacity': self.worker_count + self.prefetch_capacity, 'os': platform.system(), 'hostname': platform.node(), 'dir': os.getcwd(), 'cpu_count': psutil.cpu_count(logical=False), 'total_memory': psutil.virtual_memory().total, 'reg_time': datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), } b_msg = json.dumps(msg).encode('utf-8') return b_msg def heartbeat(self): """ Send heartbeat to the incoming task queue """ heartbeat = (HEARTBEAT_CODE).to_bytes(4, "little") r = self.task_incoming.send(heartbeat) logger.debug("Return from heartbeat: {}".format(r)) def pull_tasks(self, kill_event): """ Pull tasks from the incoming tasks 0mq pipe onto the internal pending task queue Parameters: ----------- kill_event : threading.Event Event to let the thread know when it is time to die. """ logger.info("[TASK PULL THREAD] starting") poller = zmq.Poller() poller.register(self.task_incoming, zmq.POLLIN) # Send a registration message msg = self.create_reg_message() logger.debug("Sending registration message: {}".format(msg)) self.task_incoming.send(msg) last_beat = time.time() last_interchange_contact = time.time() task_recv_counter = 0 poll_timer = self.poll_period while not kill_event.is_set(): ready_worker_count = self.ready_worker_queue.qsize() pending_task_count = self.pending_task_queue.qsize() logger.debug("[TASK_PULL_THREAD] ready workers:{}, pending tasks:{}".format(ready_worker_count, pending_task_count)) if time.time() > last_beat + self.heartbeat_period: self.heartbeat() last_beat = time.time() if pending_task_count < self.max_queue_size and ready_worker_count > 0: logger.debug("[TASK_PULL_THREAD] Requesting tasks: {}".format(ready_worker_count)) msg = ((ready_worker_count).to_bytes(4, "little")) self.task_incoming.send(msg) socks = dict(poller.poll(timeout=poll_timer)) if self.task_incoming in socks and socks[self.task_incoming] == zmq.POLLIN: poll_timer = 0 _, pkl_msg = self.task_incoming.recv_multipart() tasks = pickle.loads(pkl_msg) last_interchange_contact = time.time() if tasks == 'STOP': logger.critical("[TASK_PULL_THREAD] Received stop request") kill_event.set() break elif tasks == HEARTBEAT_CODE: logger.debug("Got heartbeat from interchange") else: task_recv_counter += len(tasks) logger.debug("[TASK_PULL_THREAD] Got tasks: {} of {}".format([t['task_id'] for t in tasks], task_recv_counter)) for task in tasks: self.pending_task_queue.put(task) # logger.debug("[TASK_PULL_THREAD] Ready tasks: {}".format( # [i['task_id'] for i in self.pending_task_queue])) else: logger.debug("[TASK_PULL_THREAD] No incoming tasks") # Limit poll duration to heartbeat_period # heartbeat_period is in s vs poll_timer in ms if not poll_timer: poll_timer = self.poll_period poll_timer = min(self.heartbeat_period * 1000, poll_timer * 2) # Only check if no messages were received. if time.time() > last_interchange_contact + self.heartbeat_threshold: logger.critical("[TASK_PULL_THREAD] Missing contact with interchange beyond heartbeat_threshold") kill_event.set() logger.critical("[TASK_PULL_THREAD] Exiting") break def push_results(self, kill_event): """ Listens on the pending_result_queue and sends out results via 0mq Parameters: ----------- kill_event : threading.Event Event to let the thread know when it is time to die. """ logger.debug("[RESULT_PUSH_THREAD] Starting thread") push_poll_period = max(10, self.poll_period) / 1000 # push_poll_period must be atleast 10 ms logger.debug("[RESULT_PUSH_THREAD] push poll period: {}".format(push_poll_period)) last_beat = time.time() items = [] while not kill_event.is_set(): try: r = self.pending_result_queue.get(block=True, timeout=push_poll_period) items.append(r) except queue.Empty: pass except Exception as e: logger.exception("[RESULT_PUSH_THREAD] Got an exception: {}".format(e)) # If we have reached poll_period duration or timer has expired, we send results if len(items) >= self.max_queue_size or time.time() > last_beat + push_poll_period: last_beat = time.time() if items: self.result_outgoing.send_multipart(items) items = [] logger.critical("[RESULT_PUSH_THREAD] Exiting") def worker_watchdog(self, kill_event): """ Listens on the pending_result_queue and sends out results via 0mq Parameters: ----------- kill_event : threading.Event Event to let the thread know when it is time to die. """ logger.debug("[WORKER_WATCHDOG_THREAD] Starting thread") while not kill_event.is_set(): for worker_id, p in self.procs.items(): if not p.is_alive(): logger.info("[WORKER_WATCHDOG_THREAD] Worker {} has died".format(worker_id)) try: task = self._tasks_in_progress.pop(worker_id) logger.info("[WORKER_WATCHDOG_THREAD] Worker {} was busy when it died".format(worker_id)) try: raise WorkerLost(worker_id, platform.node()) except Exception: logger.info("[WORKER_WATCHDOG_THREAD] Putting exception for task {} in the pending result queue".format(task['task_id'])) result_package = {'task_id': task['task_id'], 'exception': serialize_object(RemoteExceptionWrapper(*sys.exc_info()))} pkl_package = pickle.dumps(result_package) self.pending_result_queue.put(pkl_package) except KeyError: logger.info("[WORKER_WATCHDOG_THREAD] Worker {} was not busy when it died".format(worker_id)) p = multiprocessing.Process(target=worker, args=(worker_id, self.uid, self.pending_task_queue, self.pending_result_queue, self.ready_worker_queue, self._tasks_in_progress ), name="HTEX-Worker-{}".format(worker_id)) self.procs[worker_id] = p logger.info("[WORKER_WATCHDOG_THREAD] Worker {} has been restarted".format(worker_id)) time.sleep(self.poll_period) logger.critical("[WORKER_WATCHDOG_THREAD] Exiting") def start(self): """ Start the worker processes. TODO: Move task receiving to a thread """ start = time.time() self._kill_event = threading.Event() self._tasks_in_progress = multiprocessing.Manager().dict() self.procs = {} for worker_id in range(self.worker_count): p = multiprocessing.Process(target=worker, args=(worker_id, self.uid, self.pending_task_queue, self.pending_result_queue, self.ready_worker_queue, self._tasks_in_progress ), name="HTEX-Worker-{}".format(worker_id)) p.start() self.procs[worker_id] = p logger.debug("Manager synced with workers") self._task_puller_thread = threading.Thread(target=self.pull_tasks, args=(self._kill_event,), name="Task-Puller") self._result_pusher_thread = threading.Thread(target=self.push_results, args=(self._kill_event,), name="Result-Pusher") self._worker_watchdog_thread = threading.Thread(target=self.worker_watchdog, args=(self._kill_event,), name="worker-watchdog") self._task_puller_thread.start() self._result_pusher_thread.start() self._worker_watchdog_thread.start() logger.info("Loop start") # TODO : Add mechanism in this loop to stop the worker pool # This might need a multiprocessing event to signal back. self._kill_event.wait() logger.critical("[MAIN] Received kill event, terminating worker processes") self._task_puller_thread.join() self._result_pusher_thread.join() self._worker_watchdog_thread.join() for proc_id in self.procs: self.procs[proc_id].terminate() logger.critical("Terminating worker {}:{}".format(self.procs[proc_id], self.procs[proc_id].is_alive())) self.procs[proc_id].join() logger.debug("Worker:{} joined successfully".format(self.procs[proc_id])) self.task_incoming.close() self.result_outgoing.close() self.context.term() delta = time.time() - start logger.info("process_worker_pool ran for {} seconds".format(delta)) return def execute_task(bufs): """Deserialize the buffer and execute the task. Returns the result or throws exception. """ user_ns = locals() user_ns.update({'__builtins__': __builtins__}) f, args, kwargs = unpack_apply_message(bufs, user_ns, copy=False) # We might need to look into callability of the function from itself # since we change it's name in the new namespace prefix = "parsl_" fname = prefix + "f" argname = prefix + "args" kwargname = prefix + "kwargs" resultname = prefix + "result" user_ns.update({fname: f, argname: args, kwargname: kwargs, resultname: resultname}) code = "{0} = {1}(*{2}, **{3})".format(resultname, fname, argname, kwargname) try: # logger.debug("[RUNNER] Executing: {0}".format(code)) exec(code, user_ns, user_ns) except Exception as e: logger.warning("Caught exception; will raise it: {}".format(e), exc_info=True) raise e else: # logger.debug("[RUNNER] Result: {0}".format(user_ns.get(resultname))) return user_ns.get(resultname) def worker(worker_id, pool_id, task_queue, result_queue, worker_queue, tasks_in_progress): """ Put request token into queue Get task from task_queue Pop request from queue Put result into result_queue """ start_file_logger('{}/{}/worker_{}.log'.format(args.logdir, pool_id, worker_id), worker_id, name="worker_log", level=logging.DEBUG if args.debug else logging.INFO) # Sync worker with master logger.info('Worker {} started'.format(worker_id)) if args.debug: logger.debug("Debug logging enabled") while True: worker_queue.put(worker_id) # The worker will receive {'task_id':<tid>, 'buffer':<buf>} req = task_queue.get() tasks_in_progress[worker_id] = req tid = req['task_id'] logger.info("Received task {}".format(tid)) try: worker_queue.get() except queue.Empty: logger.warning("Worker ID: {} failed to remove itself from ready_worker_queue".format(worker_id)) pass try: result = execute_task(req['buffer']) serialized_result = serialize_object(result) except Exception as e: logger.info('Caught an exception: {}'.format(e)) result_package = {'task_id': tid, 'exception': serialize_object(RemoteExceptionWrapper(*sys.exc_info()))} else: result_package = {'task_id': tid, 'result': serialized_result} # logger.debug("Result: {}".format(result)) logger.info("Completed task {}".format(tid)) pkl_package = pickle.dumps(result_package) result_queue.put(pkl_package) tasks_in_progress.pop(worker_id) def start_file_logger(filename, rank, name='parsl', level=logging.DEBUG, format_string=None): """Add a stream log handler. Args: - filename (string): Name of the file to write logs to - name (string): Logger name - level (logging.LEVEL): Set the logging level. - format_string (string): Set the format string Returns: - None """ if format_string is None: format_string = "%(asctime)s.%(msecs)03d %(name)s:%(lineno)d Rank:{0} [%(levelname)s] %(message)s".format(rank) global logger logger = logging.getLogger(name) logger.setLevel(logging.DEBUG) handler = logging.FileHandler(filename) handler.setLevel(level) formatter = logging.Formatter(format_string, datefmt='%Y-%m-%d %H:%M:%S') handler.setFormatter(formatter) logger.addHandler(handler) def set_stream_logger(name='parsl', level=logging.DEBUG, format_string=None): """Add a stream log handler. Args: - name (string) : Set the logger name. - level (logging.LEVEL) : Set to logging.DEBUG by default. - format_string (sting) : Set to None by default. Returns: - None """ if format_string is None: format_string = "%(asctime)s %(name)s [%(levelname)s] Thread:%(thread)d %(message)s" # format_string = "%(asctime)s %(name)s:%(lineno)d [%(levelname)s] %(message)s" global logger logger = logging.getLogger(name) logger.setLevel(logging.DEBUG) handler = logging.StreamHandler() handler.setLevel(level) formatter = logging.Formatter(format_string, datefmt='%Y-%m-%d %H:%M:%S') handler.setFormatter(formatter) logger.addHandler(handler) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("-d", "--debug", action='store_true', help="Count of apps to launch") parser.add_argument("-l", "--logdir", default="process_worker_pool_logs", help="Process worker pool log directory") parser.add_argument("-u", "--uid", default=str(uuid.uuid4()).split('-')[-1], help="Unique identifier string for Manager") parser.add_argument("-b", "--block_id", default=None, help="Block identifier for Manager") parser.add_argument("-c", "--cores_per_worker", default="1.0", help="Number of cores assigned to each worker process. Default=1.0") parser.add_argument("-m", "--mem_per_worker", default=0, help="GB of memory assigned to each worker process. Default=0, no assignment") parser.add_argument("-t", "--task_url", required=True, help="REQUIRED: ZMQ url for receiving tasks") parser.add_argument("--max_workers", default=float('inf'), help="Caps the maximum workers that can be launched, default:infinity") parser.add_argument("-p", "--prefetch_capacity", default=0, help="Number of tasks that can be prefetched to the manager. Default is 0.") parser.add_argument("--hb_period", default=30, help="Heartbeat period in seconds. Uses manager default unless set") parser.add_argument("--hb_threshold", default=120, help="Heartbeat threshold in seconds. Uses manager default unless set") parser.add_argument("--poll", default=10, help="Poll period used in milliseconds") parser.add_argument("-r", "--result_url", required=True, help="REQUIRED: ZMQ url for posting results") args = parser.parse_args() os.makedirs(os.path.join(args.logdir, args.uid), exist_ok=True) try: start_file_logger('{}/{}/manager.log'.format(args.logdir, args.uid), 0, level=logging.DEBUG if args.debug is True else logging.INFO) logger.info("Python version: {}".format(sys.version)) logger.info("Debug logging: {}".format(args.debug)) logger.info("Log dir: {}".format(args.logdir)) logger.info("Manager ID: {}".format(args.uid)) logger.info("Block ID: {}".format(args.block_id)) logger.info("cores_per_worker: {}".format(args.cores_per_worker)) logger.info("mem_per_worker: {}".format(args.mem_per_worker)) logger.info("task_url: {}".format(args.task_url)) logger.info("result_url: {}".format(args.result_url)) logger.info("max_workers: {}".format(args.max_workers)) logger.info("poll_period: {}".format(args.poll)) logger.info("Prefetch capacity: {}".format(args.prefetch_capacity)) manager = Manager(task_q_url=args.task_url, result_q_url=args.result_url, uid=args.uid, block_id=args.block_id, cores_per_worker=float(args.cores_per_worker), mem_per_worker=None if args.mem_per_worker == 'None' else float(args.mem_per_worker), max_workers=args.max_workers if args.max_workers == float('inf') else int(args.max_workers), prefetch_capacity=int(args.prefetch_capacity), heartbeat_threshold=int(args.hb_threshold), heartbeat_period=int(args.hb_period), poll_period=int(args.poll)) manager.start() except Exception as e: logger.critical("process_worker_pool exiting from an exception") logger.exception("Caught error: {}".format(e)) raise else: logger.info("process_worker_pool exiting") print("PROCESS_WORKER_POOL exiting")
[]
[]
[ "PARSL_MEMORY_GB", "PARSL_CORES" ]
[]
["PARSL_MEMORY_GB", "PARSL_CORES"]
python
2
0
djangoapp/MainApp/settings.py
""" Django settings for MainApp project. Generated by 'django-admin startproject' using Django 3.2.7. For more information on this file, see https://docs.djangoproject.com/en/3.2/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/3.2/ref/settings/ """ import os from pathlib import Path # Build paths inside the project like this: BASE_DIR / 'subdir'. BASE_DIR = Path(__file__).resolve().parent.parent # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = os.environ.get('SECRET_KEY') # SECURITY WARNING: don't run with debug turned on in production! DEBUG = bool(int(os.environ.get('DEBUG', 0))) ALLOWED_HOSTS = [] ALLOWED_HOSTS.extend( filter( None, os.environ.get('ALLOWED_HOSTS', '').split(','), ) ) # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'rest_framework', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'MainApp.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'MainApp.wsgi.application' # Database # https://docs.djangoproject.com/en/3.2/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql', 'HOST': os.environ.get('DB_HOST'), 'NAME': os.environ.get('DB_NAME'), 'USER': os.environ.get('DB_USER'), 'PASSWORD': os.environ.get('DB_PASS'), } } # Password validation # https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/3.2/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/3.2/howto/static-files/ STATIC_URL = '/static/' # Default primary key field type # https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
[]
[]
[ "ALLOWED_HOSTS", "DB_HOST", "DB_NAME", "DB_PASS", "SECRET_KEY", "DEBUG", "DB_USER" ]
[]
["ALLOWED_HOSTS", "DB_HOST", "DB_NAME", "DB_PASS", "SECRET_KEY", "DEBUG", "DB_USER"]
python
7
0
eval.py
#!/usr/bin/python #set default GPU to gpu 0 import os os.environ["CUDA_VISIBLE_DEVICES"]="0" import argparse import glob import re import numpy as np def recog_file(filename, ground_truth_path): # read ground truth gt_file = ground_truth_path + re.sub('.*/','/',filename) + '.txt' with open(gt_file, 'r') as f: ground_truth = f.read().split('\n')[0:-1] f.close() # read recognized sequence with open(filename, 'r') as f: recognized = f.read().split('\n')[5].split() # framelevel recognition is in 6-th line of file f.close() n_frame_errors = 0 n_subactions = 0 for i in range(len(recognized)): if not recognized[i] == ground_truth[i]: n_frame_errors += 1 if i == 0 or not ground_truth[i] == ground_truth[i-1]: n_subactions += 1 return n_frame_errors, len(recognized), n_subactions ### MAIN ####################################################################### ### arguments ### ### --recog_dir: the directory where the recognition files from inferency.py are placed ### --ground_truth_dir: the directory where the framelevel ground truth can be found parser = argparse.ArgumentParser() parser.add_argument('--recog_dir', default='results') parser.add_argument('--ground_truth_dir', default='data/groundTruth') args = parser.parse_args() filelist = glob.glob(args.recog_dir + '/P*') print('Evaluate %d video files...' % len(filelist)) n_frames = 0 n_errors = 0 stats = [] # loop over all recognition files and evaluate the frame error for filename in filelist: errors, frames, n_subactions = recog_file(filename, args.ground_truth_dir) n_errors += errors n_frames += frames stats.append([errors, frames, n_subactions]) # print frame accuracy (1.0 - frame error rate) print('frame accuracy: %f' % (1.0 - float(n_errors) / n_frames)) np.save(stats, 'errors_frames_subactions_stats.npy')
[]
[]
[ "CUDA_VISIBLE_DEVICES" ]
[]
["CUDA_VISIBLE_DEVICES"]
python
1
0
stdlib/go1_13_os.go
// Code generated by 'goexports os'. DO NOT EDIT. // +build go1.13,!go1.14 package stdlib import ( "go/constant" "go/token" "os" "reflect" "time" ) func init() { Symbols["os"] = map[string]reflect.Value{ // function, constant and variable definitions "Args": reflect.ValueOf(&os.Args).Elem(), "Chdir": reflect.ValueOf(os.Chdir), "Chmod": reflect.ValueOf(os.Chmod), "Chown": reflect.ValueOf(os.Chown), "Chtimes": reflect.ValueOf(os.Chtimes), "Clearenv": reflect.ValueOf(os.Clearenv), "Create": reflect.ValueOf(os.Create), "DevNull": reflect.ValueOf(os.DevNull), "Environ": reflect.ValueOf(os.Environ), "ErrClosed": reflect.ValueOf(&os.ErrClosed).Elem(), "ErrExist": reflect.ValueOf(&os.ErrExist).Elem(), "ErrInvalid": reflect.ValueOf(&os.ErrInvalid).Elem(), "ErrNoDeadline": reflect.ValueOf(&os.ErrNoDeadline).Elem(), "ErrNotExist": reflect.ValueOf(&os.ErrNotExist).Elem(), "ErrPermission": reflect.ValueOf(&os.ErrPermission).Elem(), "Executable": reflect.ValueOf(os.Executable), "Exit": reflect.ValueOf(os.Exit), "Expand": reflect.ValueOf(os.Expand), "ExpandEnv": reflect.ValueOf(os.ExpandEnv), "FindProcess": reflect.ValueOf(os.FindProcess), "Getegid": reflect.ValueOf(os.Getegid), "Getenv": reflect.ValueOf(os.Getenv), "Geteuid": reflect.ValueOf(os.Geteuid), "Getgid": reflect.ValueOf(os.Getgid), "Getgroups": reflect.ValueOf(os.Getgroups), "Getpagesize": reflect.ValueOf(os.Getpagesize), "Getpid": reflect.ValueOf(os.Getpid), "Getppid": reflect.ValueOf(os.Getppid), "Getuid": reflect.ValueOf(os.Getuid), "Getwd": reflect.ValueOf(os.Getwd), "Hostname": reflect.ValueOf(os.Hostname), "Interrupt": reflect.ValueOf(&os.Interrupt).Elem(), "IsExist": reflect.ValueOf(os.IsExist), "IsNotExist": reflect.ValueOf(os.IsNotExist), "IsPathSeparator": reflect.ValueOf(os.IsPathSeparator), "IsPermission": reflect.ValueOf(os.IsPermission), "IsTimeout": reflect.ValueOf(os.IsTimeout), "Kill": reflect.ValueOf(&os.Kill).Elem(), "Lchown": reflect.ValueOf(os.Lchown), "Link": reflect.ValueOf(os.Link), "LookupEnv": reflect.ValueOf(os.LookupEnv), "Lstat": reflect.ValueOf(os.Lstat), "Mkdir": reflect.ValueOf(os.Mkdir), "MkdirAll": reflect.ValueOf(os.MkdirAll), "ModeAppend": reflect.ValueOf(os.ModeAppend), "ModeCharDevice": reflect.ValueOf(os.ModeCharDevice), "ModeDevice": reflect.ValueOf(os.ModeDevice), "ModeDir": reflect.ValueOf(os.ModeDir), "ModeExclusive": reflect.ValueOf(os.ModeExclusive), "ModeIrregular": reflect.ValueOf(os.ModeIrregular), "ModeNamedPipe": reflect.ValueOf(os.ModeNamedPipe), "ModePerm": reflect.ValueOf(os.ModePerm), "ModeSetgid": reflect.ValueOf(os.ModeSetgid), "ModeSetuid": reflect.ValueOf(os.ModeSetuid), "ModeSocket": reflect.ValueOf(os.ModeSocket), "ModeSticky": reflect.ValueOf(os.ModeSticky), "ModeSymlink": reflect.ValueOf(os.ModeSymlink), "ModeTemporary": reflect.ValueOf(os.ModeTemporary), "ModeType": reflect.ValueOf(os.ModeType), "NewFile": reflect.ValueOf(os.NewFile), "NewSyscallError": reflect.ValueOf(os.NewSyscallError), "O_APPEND": reflect.ValueOf(os.O_APPEND), "O_CREATE": reflect.ValueOf(os.O_CREATE), "O_EXCL": reflect.ValueOf(os.O_EXCL), "O_RDONLY": reflect.ValueOf(os.O_RDONLY), "O_RDWR": reflect.ValueOf(os.O_RDWR), "O_SYNC": reflect.ValueOf(os.O_SYNC), "O_TRUNC": reflect.ValueOf(os.O_TRUNC), "O_WRONLY": reflect.ValueOf(os.O_WRONLY), "Open": reflect.ValueOf(os.Open), "OpenFile": reflect.ValueOf(os.OpenFile), "PathListSeparator": reflect.ValueOf(constant.MakeFromLiteral("58", token.INT, 0)), "PathSeparator": reflect.ValueOf(constant.MakeFromLiteral("47", token.INT, 0)), "Pipe": reflect.ValueOf(os.Pipe), "Readlink": reflect.ValueOf(os.Readlink), "Remove": reflect.ValueOf(os.Remove), "RemoveAll": reflect.ValueOf(os.RemoveAll), "Rename": reflect.ValueOf(os.Rename), "SEEK_CUR": reflect.ValueOf(os.SEEK_CUR), "SEEK_END": reflect.ValueOf(os.SEEK_END), "SEEK_SET": reflect.ValueOf(os.SEEK_SET), "SameFile": reflect.ValueOf(os.SameFile), "Setenv": reflect.ValueOf(os.Setenv), "StartProcess": reflect.ValueOf(os.StartProcess), "Stat": reflect.ValueOf(os.Stat), "Stderr": reflect.ValueOf(&os.Stderr).Elem(), "Stdin": reflect.ValueOf(&os.Stdin).Elem(), "Stdout": reflect.ValueOf(&os.Stdout).Elem(), "Symlink": reflect.ValueOf(os.Symlink), "TempDir": reflect.ValueOf(os.TempDir), "Truncate": reflect.ValueOf(os.Truncate), "Unsetenv": reflect.ValueOf(os.Unsetenv), "UserCacheDir": reflect.ValueOf(os.UserCacheDir), "UserConfigDir": reflect.ValueOf(os.UserConfigDir), "UserHomeDir": reflect.ValueOf(os.UserHomeDir), // type definitions "File": reflect.ValueOf((*os.File)(nil)), "FileInfo": reflect.ValueOf((*os.FileInfo)(nil)), "FileMode": reflect.ValueOf((*os.FileMode)(nil)), "LinkError": reflect.ValueOf((*os.LinkError)(nil)), "PathError": reflect.ValueOf((*os.PathError)(nil)), "ProcAttr": reflect.ValueOf((*os.ProcAttr)(nil)), "Process": reflect.ValueOf((*os.Process)(nil)), "ProcessState": reflect.ValueOf((*os.ProcessState)(nil)), "Signal": reflect.ValueOf((*os.Signal)(nil)), "SyscallError": reflect.ValueOf((*os.SyscallError)(nil)), // interface wrapper definitions "_FileInfo": reflect.ValueOf((*_os_FileInfo)(nil)), "_Signal": reflect.ValueOf((*_os_Signal)(nil)), } } // _os_FileInfo is an interface wrapper for FileInfo type type _os_FileInfo struct { WIsDir func() bool WModTime func() time.Time WMode func() os.FileMode WName func() string WSize func() int64 WSys func() interface{} } func (W _os_FileInfo) IsDir() bool { return W.WIsDir() } func (W _os_FileInfo) ModTime() time.Time { return W.WModTime() } func (W _os_FileInfo) Mode() os.FileMode { return W.WMode() } func (W _os_FileInfo) Name() string { return W.WName() } func (W _os_FileInfo) Size() int64 { return W.WSize() } func (W _os_FileInfo) Sys() interface{} { return W.WSys() } // _os_Signal is an interface wrapper for Signal type type _os_Signal struct { WSignal func() WString func() string } func (W _os_Signal) Signal() { W.WSignal() } func (W _os_Signal) String() string { return W.WString() }
[]
[]
[]
[]
[]
go
0
0
orc8r/cloud/go/tools/swaggergen/main.go
/* * Copyright 2020 The Magma Authors. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* Package main holds the swaggergen executable combined with combine-swagger The combine_swagger and swaggergen tools are strongly related and share a lot of functionality. Merging these tools as separate sub-commands under a single executable would reduce final container image size, as well as providing improved conceptual cohesion. combine_swagger command is now accessible by argument: swaggergen --combine */ package main import ( "flag" "os" combine_swagger "magma/orc8r/cloud/go/tools/swaggergen/combine_swagger" swaggergen "magma/orc8r/cloud/go/tools/swaggergen/swaggergen" ) func main() { cmdCombine := flag.Bool("combine", false, "calls combine_swagger command") //swaggergen commands targetFilepath := flag.String("target", "", "Target swagger spec to generate code from") configFilepath := flag.String("config", "", "Config file for go-swagger command") rootDir := flag.String("root", os.Getenv("MAGMA_ROOT"), "Root path to resolve dependency and output directories based on") //combine_swagger commands inDir := flag.String("in", "", "Input directory") commonFilepath := flag.String("common", "", "Common definitions filepath") outFilepath := flag.String("out", "", "Output directory") generateStandAloneSpec := flag.Bool("standalone", true, "Generate standalone specs") flag.Parse() if *cmdCombine { combine_swagger.Run(inDir, commonFilepath, outFilepath, generateStandAloneSpec) } else { swaggergen.Run(targetFilepath, configFilepath, rootDir) } }
[ "\"MAGMA_ROOT\"" ]
[]
[ "MAGMA_ROOT" ]
[]
["MAGMA_ROOT"]
go
1
0
internal/pprof/driver/cli.go
// Copyright 2014 Google Inc. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package driver import ( "errors" "fmt" "os" "strings" "github.com/parca-dev/parca-agent/internal/pprof/binutils" "github.com/parca-dev/parca-agent/internal/pprof/plugin" ) type source struct { Sources []string ExecName string BuildID string Base []string DiffBase bool Normalize bool Seconds int Timeout int Symbolize string HTTPHostport string HTTPDisableBrowser bool Comment string } // parseFlags parses the command lines through the specified flags package // and returns the source of the profile and optionally the command // for the kind of report to generate (nil for interactive use). func parseFlags(o *plugin.Options) (*source, []string, error) { flag := o.Flagset // Comparisons. flagDiffBase := flag.StringList("diff_base", "", "Source of base profile for comparison") flagBase := flag.StringList("base", "", "Source of base profile for profile subtraction") // Source options. flagSymbolize := flag.String("symbolize", "", "Options for profile symbolization") flagBuildID := flag.String("buildid", "", "Override build id for first mapping") flagTimeout := flag.Int("timeout", -1, "Timeout in seconds for fetching a profile") flagAddComment := flag.String("add_comment", "", "Annotation string to record in the profile") // CPU profile options flagSeconds := flag.Int("seconds", -1, "Length of time for dynamic profiles") // Heap profile options flagInUseSpace := flag.Bool("inuse_space", false, "Display in-use memory size") flagInUseObjects := flag.Bool("inuse_objects", false, "Display in-use object counts") flagAllocSpace := flag.Bool("alloc_space", false, "Display allocated memory size") flagAllocObjects := flag.Bool("alloc_objects", false, "Display allocated object counts") // Contention profile options flagTotalDelay := flag.Bool("total_delay", false, "Display total delay at each region") flagContentions := flag.Bool("contentions", false, "Display number of delays at each region") flagMeanDelay := flag.Bool("mean_delay", false, "Display mean delay at each region") flagTools := flag.String("tools", os.Getenv("PPROF_TOOLS"), "Path for object tool pathnames") flagHTTP := flag.String("http", "", "Present interactive web UI at the specified http host:port") flagNoBrowser := flag.Bool("no_browser", false, "Skip opening a browswer for the interactive web UI") // Flags that set configuration properties. cfg := currentConfig() configFlagSetter := installConfigFlags(flag, &cfg) flagCommands := make(map[string]*bool) flagParamCommands := make(map[string]*string) for name, cmd := range pprofCommands { if cmd.hasParam { flagParamCommands[name] = flag.String(name, "", "Generate a report in "+name+" format, matching regexp") } else { flagCommands[name] = flag.Bool(name, false, "Generate a report in "+name+" format") } } args := flag.Parse(func() { o.UI.Print(usageMsgHdr + usage(true) + usageMsgSrc + flag.ExtraUsage() + usageMsgVars) }) if len(args) == 0 { return nil, nil, errors.New("no profile source specified") } var execName string // Recognize first argument as an executable or buildid override. if len(args) > 1 { arg0 := args[0] if file, err := o.Obj.Open(arg0, 0, ^uint64(0), 0, ""); err == nil { file.Close() execName = arg0 args = args[1:] } else if *flagBuildID == "" && isBuildID(arg0) { *flagBuildID = arg0 args = args[1:] } } // Apply any specified flags to cfg. if err := configFlagSetter(); err != nil { return nil, nil, err } cmd, err := outputFormat(flagCommands, flagParamCommands) if err != nil { return nil, nil, err } if cmd != nil && *flagHTTP != "" { return nil, nil, errors.New("-http is not compatible with an output format on the command line") } if *flagNoBrowser && *flagHTTP == "" { return nil, nil, errors.New("-no_browser only makes sense with -http") } si := cfg.SampleIndex si = sampleIndex(flagTotalDelay, si, "delay", "-total_delay", o.UI) si = sampleIndex(flagMeanDelay, si, "delay", "-mean_delay", o.UI) si = sampleIndex(flagContentions, si, "contentions", "-contentions", o.UI) si = sampleIndex(flagInUseSpace, si, "inuse_space", "-inuse_space", o.UI) si = sampleIndex(flagInUseObjects, si, "inuse_objects", "-inuse_objects", o.UI) si = sampleIndex(flagAllocSpace, si, "alloc_space", "-alloc_space", o.UI) si = sampleIndex(flagAllocObjects, si, "alloc_objects", "-alloc_objects", o.UI) cfg.SampleIndex = si if *flagMeanDelay { cfg.Mean = true } source := &source{ Sources: args, ExecName: execName, BuildID: *flagBuildID, Seconds: *flagSeconds, Timeout: *flagTimeout, Symbolize: *flagSymbolize, HTTPHostport: *flagHTTP, HTTPDisableBrowser: *flagNoBrowser, Comment: *flagAddComment, } if err := source.addBaseProfiles(*flagBase, *flagDiffBase); err != nil { return nil, nil, err } normalize := cfg.Normalize if normalize && len(source.Base) == 0 { return nil, nil, errors.New("must have base profile to normalize by") } source.Normalize = normalize if bu, ok := o.Obj.(*binutils.Binutils); ok { bu.SetTools(*flagTools) } setCurrentConfig(cfg) return source, cmd, nil } // addBaseProfiles adds the list of base profiles or diff base profiles to // the source. This function will return an error if both base and diff base // profiles are specified. func (source *source) addBaseProfiles(flagBase, flagDiffBase []*string) error { base, diffBase := dropEmpty(flagBase), dropEmpty(flagDiffBase) if len(base) > 0 && len(diffBase) > 0 { return errors.New("-base and -diff_base flags cannot both be specified") } source.Base = base if len(diffBase) > 0 { source.Base, source.DiffBase = diffBase, true } return nil } // dropEmpty list takes a slice of string pointers, and outputs a slice of // non-empty strings associated with the flag. func dropEmpty(list []*string) []string { var l []string for _, s := range list { if *s != "" { l = append(l, *s) } } return l } // installConfigFlags creates command line flags for configuration // fields and returns a function which can be called after flags have // been parsed to copy any flags specified on the command line to // *cfg. func installConfigFlags(flag plugin.FlagSet, cfg *config) func() error { // List of functions for setting the different parts of a config. var setters []func() var err error // Holds any errors encountered while running setters. for _, field := range configFields { n := field.name help := configHelp[n] var setter func() switch ptr := cfg.fieldPtr(field).(type) { case *bool: f := flag.Bool(n, *ptr, help) setter = func() { *ptr = *f } case *int: f := flag.Int(n, *ptr, help) setter = func() { *ptr = *f } case *float64: f := flag.Float64(n, *ptr, help) setter = func() { *ptr = *f } case *string: if len(field.choices) == 0 { f := flag.String(n, *ptr, help) setter = func() { *ptr = *f } } else { // Make a separate flag per possible choice. // Set all flags to initially false so we can // identify conflicts. bools := make(map[string]*bool) for _, choice := range field.choices { bools[choice] = flag.Bool(choice, false, configHelp[choice]) } setter = func() { var set []string for k, v := range bools { if *v { set = append(set, k) } } switch len(set) { case 0: // Leave as default value. case 1: *ptr = set[0] default: err = fmt.Errorf("conflicting options set: %v", set) } } } } setters = append(setters, setter) } return func() error { // Apply the setter for every flag. for _, setter := range setters { setter() if err != nil { return err } } return nil } } // isBuildID determines if the profile may contain a build ID, by // checking that it is a string of hex digits. func isBuildID(id string) bool { return strings.Trim(id, "0123456789abcdefABCDEF") == "" } func sampleIndex(flag *bool, si string, sampleType, option string, ui plugin.UI) string { if *flag { if si == "" { return sampleType } ui.PrintErr("Multiple value selections, ignoring ", option) } return si } func outputFormat(bcmd map[string]*bool, acmd map[string]*string) (cmd []string, err error) { for n, b := range bcmd { if *b { if cmd != nil { return nil, errors.New("must set at most one output format") } cmd = []string{n} } } for n, s := range acmd { if *s != "" { if cmd != nil { return nil, errors.New("must set at most one output format") } cmd = []string{n, *s} } } return cmd, nil } var usageMsgHdr = `usage: Produce output in the specified format. pprof <format> [options] [binary] <source> ... Omit the format to get an interactive shell whose commands can be used to generate various views of a profile pprof [options] [binary] <source> ... Omit the format and provide the "-http" flag to get an interactive web interface at the specified host:port that can be used to navigate through various views of a profile. pprof -http [host]:[port] [options] [binary] <source> ... Details: ` var usageMsgSrc = "\n\n" + " Source options:\n" + " -seconds Duration for time-based profile collection\n" + " -timeout Timeout in seconds for profile collection\n" + " -buildid Override build id for main binary\n" + " -add_comment Free-form annotation to add to the profile\n" + " Displayed on some reports or with pprof -comments\n" + " -diff_base source Source of base profile for comparison\n" + " -base source Source of base profile for profile subtraction\n" + " profile.pb.gz Profile in compressed protobuf format\n" + " legacy_profile Profile in legacy pprof format\n" + " http://host/profile URL for profile handler to retrieve\n" + " -symbolize= Controls source of symbol information\n" + " none Do not attempt symbolization\n" + " local Examine only local binaries\n" + " fastlocal Only get function names from local binaries\n" + " remote Do not examine local binaries\n" + " force Force re-symbolization\n" + " Binary Local path or build id of binary for symbolization\n" var usageMsgVars = "\n\n" + " Misc options:\n" + " -http Provide web interface at host:port.\n" + " Host is optional and 'localhost' by default.\n" + " Port is optional and a randomly available port by default.\n" + " -no_browser Skip opening a browser for the interactive web UI.\n" + " -tools Search path for object tools\n" + "\n" + " Legacy convenience options:\n" + " -inuse_space Same as -sample_index=inuse_space\n" + " -inuse_objects Same as -sample_index=inuse_objects\n" + " -alloc_space Same as -sample_index=alloc_space\n" + " -alloc_objects Same as -sample_index=alloc_objects\n" + " -total_delay Same as -sample_index=delay\n" + " -contentions Same as -sample_index=contentions\n" + " -mean_delay Same as -mean -sample_index=delay\n" + "\n" + " Environment Variables:\n" + " PPROF_TMPDIR Location for saved profiles (default $HOME/pprof)\n" + " PPROF_TOOLS Search path for object-level tools\n" + " PPROF_BINARY_PATH Search path for local binary files\n" + " default: $HOME/pprof/binaries\n" + " searches $name, $path, $buildid/$name, $path/$buildid\n" + " * On Windows, %USERPROFILE% is used instead of $HOME"
[ "\"PPROF_TOOLS\"" ]
[]
[ "PPROF_TOOLS" ]
[]
["PPROF_TOOLS"]
go
1
0
doc/conf.py
# -*- coding: utf-8 -*- # # MNE documentation build configuration file, created by # sphinx-quickstart on Fri Jun 11 10:45:48 2010. # # This file is execfile()d with the current directory set to its containing # dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. from datetime import datetime, timezone from distutils.version import LooseVersion import gc import os import os.path as op import sys import time import warnings import sphinx_gallery from sphinx_gallery.sorting import FileNameSortKey, ExplicitOrder from numpydoc import docscrape import matplotlib import mne from mne.viz import Brain # noqa from mne.utils import (linkcode_resolve, # noqa, analysis:ignore _assert_no_instances, sizeof_fmt) if LooseVersion(sphinx_gallery.__version__) < LooseVersion('0.2'): raise ImportError('Must have at least version 0.2 of sphinx-gallery, got ' '%s' % (sphinx_gallery.__version__,)) matplotlib.use('agg') # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. curdir = os.path.dirname(__file__) sys.path.append(os.path.abspath(os.path.join(curdir, '..', 'mne'))) sys.path.append(os.path.abspath(os.path.join(curdir, 'sphinxext'))) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. needs_sphinx = '2.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.coverage', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.linkcode', 'sphinx.ext.mathjax', 'sphinx.ext.todo', 'sphinx.ext.graphviz', 'numpydoc', 'sphinx_gallery.gen_gallery', 'sphinx_fontawesome', 'gen_commands', 'gh_substitutions', 'mne_substitutions', 'sphinx_bootstrap_theme', 'sphinx_bootstrap_divs', 'sphinxcontrib.bibtex', ] linkcheck_ignore = [ 'https://doi.org/10.1088/0031-9155/57/7/1937', # noqa 403 Client Error: Forbidden for url: http://iopscience.iop.org/article/10.1088/0031-9155/57/7/1937/meta 'https://doi.org/10.1088/0031-9155/51/7/008', # noqa 403 Client Error: Forbidden for url: https://iopscience.iop.org/article/10.1088/0031-9155/51/7/008 'https://sccn.ucsd.edu/wiki/.*', # noqa HTTPSConnectionPool(host='sccn.ucsd.edu', port=443): Max retries exceeded with url: /wiki/Firfilt_FAQ (Caused by SSLError(SSLError(1, '[SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed (_ssl.c:847)'),)) 'https://docs.python.org/dev/howto/logging.html', # noqa ('Connection aborted.', ConnectionResetError(104, 'Connection reset by peer')) 'https://docs.python.org/3/library/.*', # noqa ('Connection aborted.', ConnectionResetError(104, 'Connection reset by peer')) 'https://hal.archives-ouvertes.fr/hal-01848442/', # noqa Sometimes: 503 Server Error: Service Unavailable for url: https://hal.archives-ouvertes.fr/hal-01848442/ ] linkcheck_anchors = False # saves a bit of time autosummary_generate = True autodoc_default_options = {'inherited-members': None} # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = ['_includes'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The main toctree document. master_doc = 'index' # General information about the project. project = u'MNE' td = datetime.now(tz=timezone.utc) copyright = ( '2012-%(year)s, MNE Developers. Last updated ' '<time datetime="%(iso)s" class="localized">%(short)s</time>\n' '<script type="text/javascript">$(function () { $("time.localized").each(function () { var el = $(this); el.text(new Date(el.attr("datetime")).toLocaleString([], {dateStyle: "medium", timeStyle: "long"})); }); } )</script>' # noqa: E501 ) % dict(year=td.year, iso=td.isoformat(), short=td.strftime('%Y-%m-%d %H:%M %Z')) nitpicky = True nitpick_ignore = [ ("py:class", "None. Remove all items from D."), ("py:class", "a set-like object providing a view on D's items"), ("py:class", "a set-like object providing a view on D's keys"), ("py:class", "v, remove specified key and return the corresponding value."), # noqa: E501 ("py:class", "None. Update D from dict/iterable E and F."), ("py:class", "an object providing a view on D's values"), ("py:class", "a shallow copy of D"), ("py:class", "(k, v), remove and return some (key, value) pair as a"), ] for key in ('AcqParserFIF', 'BiHemiLabel', 'Dipole', 'DipoleFixed', 'Label', 'MixedSourceEstimate', 'MixedVectorSourceEstimate', 'Report', 'SourceEstimate', 'SourceMorph', 'VectorSourceEstimate', 'VolSourceEstimate', 'VolVectorSourceEstimate', 'channels.DigMontage', 'channels.Layout', 'decoding.CSP', 'decoding.EMS', 'decoding.FilterEstimator', 'decoding.GeneralizingEstimator', 'decoding.LinearModel', 'decoding.PSDEstimator', 'decoding.ReceptiveField', 'decoding.SSD', 'decoding.SPoC', 'decoding.Scaler', 'decoding.SlidingEstimator', 'decoding.TemporalFilter', 'decoding.TimeDelayingRidge', 'decoding.TimeFrequency', 'decoding.UnsupervisedSpatialFilter', 'decoding.Vectorizer', 'preprocessing.ICA', 'preprocessing.Xdawn', 'simulation.SourceSimulator', 'time_frequency.CrossSpectralDensity', 'utils.deprecated', 'viz.ClickableImage'): nitpick_ignore.append(('py:obj', f'mne.{key}.__hash__')) suppress_warnings = ['image.nonlocal_uri'] # we intentionally link outside # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = mne.__version__ # The full version, including alpha/beta/rc tags. release = version # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of documents that shouldn't be included in the build. unused_docs = [] # List of directories, relative to source directory, that shouldn't be searched # for source files. exclude_trees = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. default_role = "py:obj" # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'default' # A list of ignored prefixes for module index sorting. modindex_common_prefix = ['mne.'] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'bootstrap' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { 'navbar_title': ' ', # we replace this with an image 'source_link_position': "nav", # default 'bootswatch_theme': "flatly", # yeti paper lumen 'navbar_sidebarrel': False, # Render the next/prev links in navbar? 'navbar_pagenav': False, 'navbar_class': "navbar", 'bootstrap_version': "3", # default 'navbar_links': [ ("Install", "install/index"), ("Overview", "overview/index"), ("Tutorials", "auto_tutorials/index"), ("Examples", "auto_examples/index"), ("Glossary", "glossary"), ("API", "python_reference"), ], } # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = "_static/mne_logo_small.svg" # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. html_favicon = "_static/favicon.ico" # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. html_extra_path = [ 'contributing.html', 'documentation.html', 'getting_started.html', 'install_mne_python.html', ] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. html_show_sourcelink = False html_copy_source = False # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. html_show_sphinx = False # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # variables to pass to HTML templating engine build_dev_html = bool(int(os.environ.get('BUILD_DEV_HTML', False))) html_context = {'use_google_analytics': True, 'use_media_buttons': True, 'build_dev_html': build_dev_html} # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'mne-doc' # -- Options for LaTeX output --------------------------------------------- # The paper size ('letter' or 'a4'). # latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). # latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ # ('index', 'MNE.tex', u'MNE Manual', # u'MNE Contributors', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. latex_logo = "_static/logo.png" # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. latex_toplevel_sectioning = 'part' # Additional stuff for the LaTeX preamble. # latex_preamble = '' # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True trim_doctests_flags = True # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = { 'python': ('https://docs.python.org/3', None), 'numpy': ('https://numpy.org/devdocs', None), 'scipy': ('https://scipy.github.io/devdocs', None), 'matplotlib': ('https://matplotlib.org', None), 'sklearn': ('https://scikit-learn.org/stable', None), 'numba': ('https://numba.pydata.org/numba-doc/latest', None), 'joblib': ('https://joblib.readthedocs.io/en/latest', None), 'mayavi': ('http://docs.enthought.com/mayavi/mayavi', None), 'nibabel': ('https://nipy.org/nibabel', None), 'nilearn': ('http://nilearn.github.io', None), 'surfer': ('https://pysurfer.github.io/', None), 'pandas': ('https://pandas.pydata.org/pandas-docs/stable', None), 'seaborn': ('https://seaborn.pydata.org/', None), 'statsmodels': ('https://www.statsmodels.org/dev', None), 'patsy': ('https://patsy.readthedocs.io/en/latest', None), 'pyvista': ('https://docs.pyvista.org', None), 'imageio': ('https://imageio.readthedocs.io/en/latest', None), # We need to stick with 1.2.0 for now: # https://github.com/dipy/dipy/issues/2290 'dipy': ('https://dipy.org/documentation/1.2.0.', None), 'mne_realtime': ('https://mne.tools/mne-realtime', None), 'picard': ('https://pierreablin.github.io/picard/', None), } ############################################################################## # sphinxcontrib-bibtex bibtex_bibfiles = ['./references.bib'] bibtex_style = 'unsrt' bibtex_footbibliography_header = '' ############################################################################## # sphinx-gallery examples_dirs = ['../tutorials', '../examples'] gallery_dirs = ['auto_tutorials', 'auto_examples'] os.environ['_MNE_BUILDING_DOC'] = 'true' scrapers = ('matplotlib',) try: mlab = mne.utils._import_mlab() # Do not pop up any mayavi windows while running the # examples. These are very annoying since they steal the focus. mlab.options.offscreen = True # hack to initialize the Mayavi Engine mlab.test_plot3d() mlab.close() except Exception: pass else: scrapers += ('mayavi',) try: with warnings.catch_warnings(): warnings.filterwarnings("ignore", category=DeprecationWarning) import pyvista pyvista.OFF_SCREEN = False except Exception: pass else: scrapers += ('pyvista',) if any(x in scrapers for x in ('pyvista', 'mayavi')): from traits.api import push_exception_handler push_exception_handler(reraise_exceptions=True) report_scraper = mne.report._ReportScraper() scrapers += (report_scraper,) else: report_scraper = None if 'pyvista' in scrapers: brain_scraper = mne.viz._brain._BrainScraper() scrapers = list(scrapers) scrapers.insert(scrapers.index('pyvista'), brain_scraper) scrapers = tuple(scrapers) def append_attr_meth_examples(app, what, name, obj, options, lines): """Append SG examples backreferences to method and attr docstrings.""" # NumpyDoc nicely embeds method and attribute docstrings for us, but it # does not respect the autodoc templates that would otherwise insert # the .. include:: lines, so we need to do it. # Eventually this could perhaps live in SG. if what in ('attribute', 'method'): size = os.path.getsize(op.join( op.dirname(__file__), 'generated', '%s.examples' % (name,))) if size > 0: lines += """ .. _sphx_glr_backreferences_{1}: .. rubric:: Examples using ``{0}``: .. minigallery:: {1} """.format(name.split('.')[-1], name).split('\n') def setup(app): """Set up the Sphinx app.""" app.connect('autodoc-process-docstring', append_attr_meth_examples) if report_scraper is not None: report_scraper.app = app app.connect('build-finished', report_scraper.copyfiles) class Resetter(object): """Simple class to make the str(obj) static for Sphinx build env hash.""" def __init__(self): self.t0 = time.time() def __repr__(self): return '<%s>' % (self.__class__.__name__,) def __call__(self, gallery_conf, fname): import matplotlib.pyplot as plt try: from pyvista import Plotter # noqa except ImportError: Plotter = None # noqa reset_warnings(gallery_conf, fname) # in case users have interactive mode turned on in matplotlibrc, # turn it off here (otherwise the build can be very slow) plt.ioff() plt.rcParams['animation.embed_limit'] = 30. gc.collect() # _assert_no_instances(Brain, 'running') # calls gc.collect() # if Plotter is not None: # _assert_no_instances(Plotter, 'running') # This will overwrite some Sphinx printing but it's useful # for memory timestamps if os.getenv('SG_STAMP_STARTS', '').lower() == 'true': import psutil process = psutil.Process(os.getpid()) mem = sizeof_fmt(process.memory_info().rss) print(f'{time.time() - self.t0:6.1f} s : {mem}'.ljust(22)) def reset_warnings(gallery_conf, fname): """Ensure we are future compatible and ignore silly warnings.""" # In principle, our examples should produce no warnings. # Here we cause warnings to become errors, with a few exceptions. # This list should be considered alongside # setup.cfg -> [tool:pytest] -> filterwarnings # remove tweaks from other module imports or example runs warnings.resetwarnings() # restrict warnings.filterwarnings('error') # allow these, but show them warnings.filterwarnings('always', '.*non-standard config type: "foo".*') warnings.filterwarnings('always', '.*config type: "MNEE_USE_CUUDAA".*') warnings.filterwarnings('always', '.*cannot make axes width small.*') warnings.filterwarnings('always', '.*Axes that are not compatible.*') warnings.filterwarnings('always', '.*FastICA did not converge.*') warnings.filterwarnings( # xhemi morph (should probably update sample) 'always', '.*does not exist, creating it and saving it.*') warnings.filterwarnings('default', module='sphinx') # internal warnings warnings.filterwarnings( 'always', '.*converting a masked element to nan.*') # matplotlib? # allow these warnings, but don't show them warnings.filterwarnings( 'ignore', '.*OpenSSL\\.rand is deprecated.*') warnings.filterwarnings('ignore', '.*is currently using agg.*') warnings.filterwarnings( # SciPy-related warning (maybe 1.2.0 will fix it) 'ignore', '.*the matrix subclass is not the recommended.*') warnings.filterwarnings( # some joblib warning 'ignore', '.*semaphore_tracker: process died unexpectedly.*') warnings.filterwarnings( # needed until SciPy 1.2.0 is released 'ignore', '.*will be interpreted as an array index.*', module='scipy') for key in ('HasTraits', r'numpy\.testing', 'importlib', r'np\.loads', 'Using or importing the ABCs from', # internal modules on 3.7 r"it will be an error for 'np\.bool_'", # ndimage "DocumenterBridge requires a state object", # sphinx dev "'U' mode is deprecated", # sphinx io r"joblib is deprecated in 0\.21", # nilearn 'The usage of `cmp` is deprecated and will', # sklearn/pytest 'scipy.* is deprecated and will be removed in', # dipy r'Converting `np\.character` to a dtype is deprecated', # vtk r'sphinx\.util\.smartypants is deprecated', 'is a deprecated alias for the builtin', # NumPy ): warnings.filterwarnings( # deal with other modules having bad imports 'ignore', message=".*%s.*" % key, category=DeprecationWarning) warnings.filterwarnings( # deal with bootstrap-theme bug 'ignore', message=".*modify script_files in the theme.*", category=Warning) warnings.filterwarnings( # nilearn 'ignore', message=r'sklearn\.externals\.joblib is deprecated.*', category=FutureWarning) warnings.filterwarnings( # nilearn 'ignore', message=r'The sklearn.* module is.*', category=FutureWarning) warnings.filterwarnings( # deal with other modules having bad imports 'ignore', message=".*ufunc size changed.*", category=RuntimeWarning) warnings.filterwarnings( # realtime 'ignore', message=".*unclosed file.*", category=ResourceWarning) warnings.filterwarnings('ignore', message='Exception ignored in.*') # allow this ImportWarning, but don't show it warnings.filterwarnings( 'ignore', message="can't resolve package from", category=ImportWarning) warnings.filterwarnings( 'ignore', message='.*mne-realtime.*', category=DeprecationWarning) reset_warnings(None, None) sphinx_gallery_conf = { 'doc_module': ('mne',), 'reference_url': dict(mne=None), 'examples_dirs': examples_dirs, 'subsection_order': ExplicitOrder(['../examples/io/', '../examples/simulation/', '../examples/preprocessing/', '../examples/visualization/', '../examples/time_frequency/', '../examples/stats/', '../examples/decoding/', '../examples/connectivity/', '../examples/forward/', '../examples/inverse/', '../examples/realtime/', '../examples/datasets/', '../tutorials/intro/', '../tutorials/io/', '../tutorials/raw/', '../tutorials/preprocessing/', '../tutorials/epochs/', '../tutorials/evoked/', '../tutorials/time-freq/', '../tutorials/source-modeling/', '../tutorials/stats-sensor-space/', '../tutorials/stats-source-space/', '../tutorials/machine-learning/', '../tutorials/simulation/', '../tutorials/sample-datasets/', '../tutorials/discussions/', '../tutorials/misc/']), 'gallery_dirs': gallery_dirs, 'default_thumb_file': os.path.join('_static', 'mne_helmet.png'), 'backreferences_dir': 'generated', 'plot_gallery': 'True', # Avoid annoying Unicode/bool default warning 'thumbnail_size': (160, 112), 'remove_config_comments': True, 'min_reported_time': 1., 'abort_on_example_error': False, 'reset_modules': ('matplotlib', Resetter()), # called w/each script 'image_scrapers': scrapers, 'show_memory': not sys.platform.startswith('win'), 'line_numbers': False, # XXX currently (0.3.dev0) messes with style 'within_subsection_order': FileNameSortKey, 'capture_repr': ('_repr_html_',), 'junit': op.join('..', 'test-results', 'sphinx-gallery', 'junit.xml'), 'matplotlib_animations': True, 'compress_images': ('images', 'thumbnails'), } ############################################################################## # numpydoc # XXX This hack defines what extra methods numpydoc will document docscrape.ClassDoc.extra_public_methods = mne.utils._doc_special_members numpydoc_class_members_toctree = False numpydoc_attributes_as_param_list = True numpydoc_xref_param_type = True numpydoc_xref_aliases = { # Python 'file-like': ':term:`file-like <python:file object>`', # Matplotlib 'colormap': ':doc:`colormap <matplotlib:tutorials/colors/colormaps>`', 'color': ':doc:`color <matplotlib:api/colors_api>`', 'collection': ':doc:`collections <matplotlib:api/collections_api>`', 'Axes': 'matplotlib.axes.Axes', 'Figure': 'matplotlib.figure.Figure', 'Axes3D': 'mpl_toolkits.mplot3d.axes3d.Axes3D', 'ColorbarBase': 'matplotlib.colorbar.ColorbarBase', # Mayavi 'mayavi.mlab.Figure': 'mayavi.core.api.Scene', 'mlab.Figure': 'mayavi.core.api.Scene', # sklearn 'LeaveOneOut': 'sklearn.model_selection.LeaveOneOut', # joblib 'joblib.Parallel': 'joblib.Parallel', # nibabel 'Nifti1Image': 'nibabel.nifti1.Nifti1Image', 'Nifti2Image': 'nibabel.nifti2.Nifti2Image', 'SpatialImage': 'nibabel.spatialimages.SpatialImage', # MNE 'Label': 'mne.Label', 'Forward': 'mne.Forward', 'Evoked': 'mne.Evoked', 'Info': 'mne.Info', 'SourceSpaces': 'mne.SourceSpaces', 'SourceMorph': 'mne.SourceMorph', 'Epochs': 'mne.Epochs', 'Layout': 'mne.channels.Layout', 'EvokedArray': 'mne.EvokedArray', 'BiHemiLabel': 'mne.BiHemiLabel', 'AverageTFR': 'mne.time_frequency.AverageTFR', 'EpochsTFR': 'mne.time_frequency.EpochsTFR', 'Raw': 'mne.io.Raw', 'ICA': 'mne.preprocessing.ICA', 'Covariance': 'mne.Covariance', 'Annotations': 'mne.Annotations', 'DigMontage': 'mne.channels.DigMontage', 'VectorSourceEstimate': 'mne.VectorSourceEstimate', 'VolSourceEstimate': 'mne.VolSourceEstimate', 'VolVectorSourceEstimate': 'mne.VolVectorSourceEstimate', 'MixedSourceEstimate': 'mne.MixedSourceEstimate', 'MixedVectorSourceEstimate': 'mne.MixedVectorSourceEstimate', 'SourceEstimate': 'mne.SourceEstimate', 'Projection': 'mne.Projection', 'ConductorModel': 'mne.bem.ConductorModel', 'Dipole': 'mne.Dipole', 'DipoleFixed': 'mne.DipoleFixed', 'InverseOperator': 'mne.minimum_norm.InverseOperator', 'CrossSpectralDensity': 'mne.time_frequency.CrossSpectralDensity', 'SourceMorph': 'mne.SourceMorph', 'Xdawn': 'mne.preprocessing.Xdawn', 'Report': 'mne.Report', 'Forward': 'mne.Forward', 'TimeDelayingRidge': 'mne.decoding.TimeDelayingRidge', 'Vectorizer': 'mne.decoding.Vectorizer', 'UnsupervisedSpatialFilter': 'mne.decoding.UnsupervisedSpatialFilter', 'TemporalFilter': 'mne.decoding.TemporalFilter', 'SSD': 'mne.decoding.SSD', 'Scaler': 'mne.decoding.Scaler', 'SPoC': 'mne.decoding.SPoC', 'PSDEstimator': 'mne.decoding.PSDEstimator', 'LinearModel': 'mne.decoding.LinearModel', 'FilterEstimator': 'mne.decoding.FilterEstimator', 'EMS': 'mne.decoding.EMS', 'CSP': 'mne.decoding.CSP', 'Beamformer': 'mne.beamformer.Beamformer', 'Transform': 'mne.transforms.Transform', } numpydoc_xref_ignore = { # words 'instance', 'instances', 'of', 'default', 'shape', 'or', 'with', 'length', 'pair', 'matplotlib', 'optional', 'kwargs', 'in', 'dtype', 'object', 'self.verbose', # shapes 'n_vertices', 'n_faces', 'n_channels', 'm', 'n', 'n_events', 'n_colors', 'n_times', 'obj', 'n_chan', 'n_epochs', 'n_picks', 'n_ch_groups', 'n_dipoles', 'n_ica_components', 'n_pos', 'n_node_names', 'n_tapers', 'n_signals', 'n_step', 'n_freqs', 'wsize', 'Tx', 'M', 'N', 'p', 'q', 'n_observations', 'n_regressors', 'n_cols', 'n_frequencies', 'n_tests', 'n_samples', 'n_permutations', 'nchan', 'n_points', 'n_features', 'n_parts', 'n_features_new', 'n_components', 'n_labels', 'n_events_in', 'n_splits', 'n_scores', 'n_outputs', 'n_trials', 'n_estimators', 'n_tasks', 'nd_features', 'n_classes', 'n_targets', 'n_slices', 'n_hpi', 'n_fids', 'n_elp', 'n_pts', 'n_tris', 'n_nodes', 'n_nonzero', 'n_events_out', 'n_segments', 'n_orient_inv', 'n_orient_fwd', 'n_orient', 'n_dipoles_lcmv', 'n_dipoles_fwd', 'n_picks_ref', 'n_coords', # Undocumented (on purpose) 'RawKIT', 'RawEximia', 'RawEGI', 'RawEEGLAB', 'RawEDF', 'RawCTF', 'RawBTi', 'RawBrainVision', 'RawCurry', 'RawNIRX', 'RawGDF', 'RawSNIRF', 'RawBOXY', 'RawPersyst', 'RawNihon', 'RawNedf', # sklearn subclasses 'mapping', 'to', 'any', # unlinkable 'mayavi.mlab.pipeline.surface', 'CoregFrame', 'Kit2FiffFrame', 'FiducialsFrame', }
[]
[]
[ "_MNE_BUILDING_DOC", "SG_STAMP_STARTS", "BUILD_DEV_HTML" ]
[]
["_MNE_BUILDING_DOC", "SG_STAMP_STARTS", "BUILD_DEV_HTML"]
python
3
0
singleinitiatorzone/initiatorscheck.py
#!/usr/bin/python # initiatorscheck.py # # check fcp initatior show on a filer for logged in initiators with no # igroup members or multiple igroup members import sys import argparse import json import os import getpass import time import pprint sys.path.append("./library") from na_funcs import * from cisco_funcs import * debug = False # parse command line arguments and optional environment variables arguments = argparse.ArgumentParser( description='Get connected fcp initiators from cDOT filer and check that they are mapped to an igroup and also not mapped to multiple igroups ') arguments.add_argument( '--filer_hostname', required=True, type=str, help='filer fqdn or IP') arguments.add_argument( '--filer_username', required=False, type=str, help='optional username to ssh into mds switch. Alternate: set environment variable FILER_USERNAME. If neither exists, defaults to admin') arguments.add_argument( '--filer_password', required=False, type=str, help='optional password to ssh into mds switch. Alternate: set environment variable filer_PASSWORD. If unset use_keys defaults to True.') arguments.add_argument( '--svm', '--vserver', required=False, type=str, help='limit "fcp initiator show" query named netapp vserver') arguments.add_argument( '--lif', '--adapter', required=False, type=str, help='limit "fcp initiator show" query to named netapp vserver lif') args = arguments.parse_args() if args.filer_password : filer_password = args.filer_password elif os.getenv('FILER_PASSWORD') : filer_password = os.getenv('FILER_PASSWORD') else : filer_password = '' if args.filer_username : filer_username = args.filer_username elif os.getenv('FILER_USERNAME') : filer_username = os.getenv('FILER_USERNAME') else: filer_username = 'admin' if args.svm : svm = args.svm else : svm = False if args.lif : lif = args.lif else: lif = False filer_hostname = args.filer_hostname # main loop pp = pprint.PrettyPrinter(indent=4) filerconnect = cdotconnect(filer_hostname,filer_username,filer_password) api = NaElement("fcp-initiator-get-iter") ## can stack elements on to the query to limit output xi = NaElement("query") api.child_add(xi) xi1 = NaElement("fcp-adapter-initiators-info") xi.child_add(xi1) if svm : xi1.child_add_string("vserver",svm) if lif : xi1.child_add_string("adapter",lif) xo = filerconnect.invoke_elem(api) if (xo.results_status() == "failed") : print ("Error:\n") print (xo.sprintf()) sys.exit (1) if debug : print xo.sprintf() #debugging initiators_list = [] initiators_list = getfcpinitiators(xo) i = 1 for item in range(len(initiators_list)) : if i > len(initiators_list) : break # leave the loop when iterator > length of our list x = 0 for initiators in range(len(initiators_list[i])) : if len(initiators_list[i][x]['igroups']) > 1 : print "[ { 'results' : 'multiple igroups found'}," print "[ %s," % initiators_list[i-1] print " [ %s ] ]" % initiators_list[i][x] elif initiators_list[i][x]['igroups'][0]['igroup'] == None : print "[ { 'results' : 'no igroup found'}," print "[ %s," % initiators_list[i-1] print " [ %s ] ] ]" % initiators_list[i][x] x = x+1 i = i+2 if debug : print(json.dumps(initiators_list, indent=2, sort_keys=True))
[]
[]
[ "FILER_USERNAME", "FILER_PASSWORD" ]
[]
["FILER_USERNAME", "FILER_PASSWORD"]
python
2
0
routes/addr/addr.go
package addr import ( "database/sql" "encoding/json" "fmt" "log" "net/http" "os" "strconv" ) type Addr struct { IDADDR int `json:"idaddr"` Addr string `json:"addr"` Lat string `json:"lat"` Lng string `json:"lng"` Postcode string `json:"postcode"` } var db *sql.DB var err error func GetAddr(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Methods", "GET") w.Header().Set("Access-Control-Allow-Headers", "Content-Type") db, err = sql.Open("mysql", os.Getenv("MYSQL_URL")) if err != nil { panic(err.Error()) } defer db.Close() var addrs []Addr result, err := db.Query("SELECT idaddr, addr, lat, lng, postcode FROM addr ORDER BY `addr`") if err != nil { panic(err.Error()) } defer result.Close() for result.Next() { var addr Addr err := result.Scan(&addr.IDADDR, &addr.Addr, &addr.Lat, &addr.Lng, &addr.Postcode) if err != nil { panic(err.Error()) } addrs = append(addrs, addr) } json.NewEncoder(w).Encode(addrs) } func GetOneAddr(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Methods", "GET") w.Header().Set("Access-Control-Allow-Headers", "Content-Type") db, err = sql.Open("mysql", os.Getenv("MYSQL_URL")) if err != nil { panic(err.Error()) } defer db.Close() idaddr, err := strconv.Atoi(r.URL.Query().Get("idaddr")) if err != nil || idaddr < 1 { http.NotFound(w, r) return } result, err := db.Query("SELECT idaddr, addr, lat, lng, postcode FROM addr WHERE idaddr like ? LIMIT 1", idaddr) if err != nil { panic(err.Error()) } defer result.Close() var addr Addr for result.Next() { err := result.Scan(&addr.IDADDR, &addr.Addr, &addr.Lat, &addr.Lng, &addr.Postcode) if err != nil { panic(err.Error()) } } json.NewEncoder(w).Encode(addr) } func GetListAddr(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Methods", "GET") w.Header().Set("Access-Control-Allow-Headers", "Content-Type") db, err = sql.Open("mysql", os.Getenv("MYSQL_URL")) if err != nil { panic(err.Error()) } defer db.Close() query := r.URL.Query().Get("query") var addrs []Addr result, err := db.Query("SELECT idaddr, addr, lat, lng, postcode FROM addr WHERE addr like concat('%', ?, '%') LIMIT 5", query) if err != nil { panic(err.Error()) } defer result.Close() for result.Next() { var addr Addr err := result.Scan(&addr.IDADDR, &addr.Addr, &addr.Lat, &addr.Lng, &addr.Postcode) if err != nil { panic(err.Error()) } addrs = append(addrs, addr) } json.NewEncoder(w).Encode(addrs) } func CreateAddr(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Methods", "POST") w.Header().Set("Access-Control-Allow-Headers", "Content-Type") db, err = sql.Open("mysql", os.Getenv("MYSQL_URL")) if err != nil { panic(err.Error()) } defer db.Close() if r.Method != "POST" { fmt.Println("Not Post") return } var ca Addr err := json.NewDecoder(r.Body).Decode(&ca) if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } addr := ca.Addr lat := ca.Lat lng := ca.Lng postcode := ca.Postcode if addr == "" { fmt.Println("Feild is empty") } res, err := db.Exec("INSERT INTO addr (addr, lat, lng, postcode) VALUES (?, ?, ?, ?)", addr, lat, lng, postcode) if err != nil { panic(err) } lastId, err := res.LastInsertId() if err != nil { log.Fatal(err) } fmt.Printf("The last inserted row id: %d\n", lastId) } func UpdateAddr(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Methods", "PUT") w.Header().Set("Access-Control-Allow-Headers", "Content-Type") db, err = sql.Open("mysql", os.Getenv("MYSQL_URL")) if err != nil { panic(err.Error()) } defer db.Close() if r.Method != "PUT" { fmt.Println("Not PUT") return } var ea Addr err := json.NewDecoder(r.Body).Decode(&ea) if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } idaddr := ea.IDADDR addr := ea.Addr lat := ea.Lat lng := ea.Lng postcode := ea.Postcode if addr == "" { fmt.Println("Feild is empty") } _, err = db.Exec("UPDATE addr SET addr = ?, postcode = ?, lat = ?, lng = ? WHERE idaddr = ?", addr, postcode, lat, lng, idaddr) if err != nil { panic(err.Error()) } fmt.Fprintf(w, "Addr with ID = %s was updated", strconv.Itoa(idaddr)) } func DeleteAddr(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json") w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Methods", "DELETE") w.Header().Set("Access-Control-Allow-Headers", "Content-Type") db, err = sql.Open("mysql", os.Getenv("MYSQL_URL")) if err != nil { panic(err.Error()) } defer db.Close() idaddr, err := strconv.Atoi(r.URL.Query().Get("idaddr")) if err != nil || idaddr < 1 { http.NotFound(w, r) return } stmt, err := db.Prepare("DELETE FROM addr WHERE idaddr = ?") if err != nil { panic(err.Error()) } _, err = stmt.Exec(idaddr) if err != nil { panic(err.Error()) } fmt.Fprintf(w, "Addr with ID = %s was deleted", strconv.Itoa(idaddr)) }
[ "\"MYSQL_URL\"", "\"MYSQL_URL\"", "\"MYSQL_URL\"", "\"MYSQL_URL\"", "\"MYSQL_URL\"", "\"MYSQL_URL\"" ]
[]
[ "MYSQL_URL" ]
[]
["MYSQL_URL"]
go
1
0
HIRS_Utils/src/test/java/hirs/persist/DBPortalInfoManagerTest.java
package hirs.persist; import java.lang.reflect.Field; import java.net.URI; import java.util.Collections; import java.util.HashMap; import java.util.Map; import hirs.data.persist.SpringPersistenceTest; import org.apache.logging.log4j.LogManager; import org.apache.logging.log4j.Logger; import hirs.data.persist.info.PortalInfo; import hirs.data.persist.enums.PortalScheme; import org.testng.Assert; import org.testng.annotations.AfterClass; import org.testng.annotations.AfterMethod; import org.testng.annotations.BeforeClass; import org.testng.annotations.Test; /** * Tests for the DBPortalInfoManager. */ public class DBPortalInfoManagerTest extends SpringPersistenceTest { private static final Logger LOGGER = LogManager.getLogger(DBPortalInfoManagerTest.class); /** * Initializes a <code>SessionFactory</code>. The factory is used for an in-memory database that * is used for testing. Sets up an initial Alert Service equivalent to the HIRS SystemConfig */ @BeforeClass public final void beforeClass() { LOGGER.debug("retrieving session factory"); } /** * Closes the <code>SessionFactory</code> from setup. */ @AfterClass public final void afterClass() { LOGGER.debug("cleaning up AlertServiceConfigs closing session factory"); } /** * Cleans up the DB after each Test. */ @AfterMethod public final void afterMethod() { DBUtility.removeAllInstances(sessionFactory, PortalInfo.class); } /** * Test for the deletePortalInfo method. */ @Test public final void deletePortalInfo() { final PortalScheme scheme = PortalScheme.HTTPS; LOGGER.debug("creating DBPortalInfoManager"); PortalInfoManager dbpim = new DBPortalInfoManager(sessionFactory); LOGGER.debug("creating a Portal Info"); PortalInfo info = new PortalInfo(); info.setSchemeName(scheme); dbpim.savePortalInfo(info); LOGGER.debug("saving a Portal Info"); PortalInfo info2 = dbpim.getPortalInfo(scheme); Assert.assertEquals(info2.getSchemeName(), scheme.name()); LOGGER.debug("deleting a Portal Info"); dbpim.deletePortalInfo(scheme); PortalInfo info3 = dbpim.getPortalInfo(scheme); Assert.assertNull(info3); } /** * Test for the getPortalInfo method. */ @Test public final void getPortalInfo() { final PortalScheme scheme = PortalScheme.HTTPS; PortalInfoManager dbpim = new DBPortalInfoManager(sessionFactory); PortalInfo info = new PortalInfo(); info.setSchemeName(scheme); LOGGER.debug("saving a Portal Info"); dbpim.savePortalInfo(info); LOGGER.debug("retrieving a Portal Info"); PortalInfo info2 = dbpim.getPortalInfo(scheme); Assert.assertEquals(info2.getSchemeName(), scheme.name()); } /** * Test for the savePortalInfo method. */ @Test public final void savePortalInfo() { final PortalScheme scheme = PortalScheme.HTTPS; PortalInfoManager dbpim = new DBPortalInfoManager(sessionFactory); PortalInfo info = new PortalInfo(); info.setSchemeName(scheme); dbpim.savePortalInfo(info); PortalInfo info2 = dbpim.getPortalInfo(scheme); Assert.assertEquals(info2.getSchemeName(), scheme.name()); } /** * Test for the updatePortalInfo method. */ @Test public final void updatePortalInfo() { final PortalScheme scheme = PortalScheme.HTTPS; final int port = 127; PortalInfoManager dbpim = new DBPortalInfoManager(sessionFactory); PortalInfo info = new PortalInfo(); info.setSchemeName(scheme); dbpim.savePortalInfo(info); LOGGER.debug("Updating a Portal Info"); PortalInfo info2 = dbpim.getPortalInfo(scheme); info2.setPort(port); dbpim.updatePortalInfo(info2); LOGGER.debug("Verifying changes to the updated Portal Info"); PortalInfo info3 = dbpim.getPortalInfo(scheme); Assert.assertEquals(info3.getPort(), port); } /** * Test for the getPortalUrlBase static method. * @throws Exception To report problems. */ @Test public final void testGetPortalUrl() throws Exception { final PortalScheme scheme = PortalScheme.HTTPS; final int port = 127; final String contextName = "HIRS_Portal"; final String address = "localhost"; try { HashMap<String, String> envMap = new HashMap<>(System.getenv()); setEnv(envMap); PortalInfoManager dbpim = new DBPortalInfoManager(sessionFactory); PortalInfo info = new PortalInfo(); info.setSchemeName(scheme); info.setPort(port); info.setContextName(contextName); info.setIpAddress(address); dbpim.savePortalInfo(info); String url = dbpim.getPortalUrlBase(); Assert.assertEquals(url, "https://localhost:127/HIRS_Portal/"); Assert.assertEquals(url, URI.create(url).toString()); String urlExtension = "jsp/alerts.jsp?UUID=1342-ABCD"; Assert.assertEquals(url + urlExtension, URI.create(url + urlExtension).toString()); } finally { // Unset the process environment variable for other tests. HashMap<String, String> envMap = new HashMap<>(System.getenv()); envMap.remove("HIRS_HIBERNATE_CONFIG"); setEnv(envMap); } } /** * Test getPortalUrl works as expected when there is no PortalInfo object. * @throws Exception To report problems. */ @Test public final void testGetPortalUrlNoPortalInfoObject() throws Exception { PortalInfoManager dbpim = new DBPortalInfoManager(sessionFactory); dbpim.getPortalInfo(PortalScheme.HTTPS); String url = dbpim.getPortalUrlBase(); Assert.assertEquals(url, "Your_HIRS_Portal/"); Assert.assertEquals(url, URI.create(url).toString()); } /** * Set an environment variable for the process. * @param newenv envMap to use */ @SuppressWarnings("unchecked") public static void setEnv(final Map<String, String> newenv) { try { Class<?> processEnvironmentClass = Class.forName("java.lang.ProcessEnvironment"); Field theEnvironmentField = processEnvironmentClass.getDeclaredField("theEnvironment"); theEnvironmentField.setAccessible(true); Map<String, String> env = (Map<String, String>) theEnvironmentField.get(null); env.putAll(newenv); Field theCaseInsensitiveEnvironmentField = processEnvironmentClass.getDeclaredField("theCaseInsensitiveEnvironment"); theCaseInsensitiveEnvironmentField.setAccessible(true); Map<String, String> cienv = (Map<String, String>) theCaseInsensitiveEnvironmentField.get(null); cienv.putAll(newenv); } catch (ReflectiveOperationException e) { try { Class[] classes = Collections.class.getDeclaredClasses(); Map<String, String> env = System.getenv(); for (Class cl : classes) { if ("java.util.Collections$UnmodifiableMap".equals(cl.getName())) { Field field = cl.getDeclaredField("m"); field.setAccessible(true); Object obj = field.get(env); Map<String, String> map = (Map<String, String>) obj; map.clear(); map.putAll(newenv); } } } catch (ReflectiveOperationException e2) { LOGGER.error(e2.getMessage()); } } } }
[]
[]
[]
[]
[]
java
0
0
numpy/distutils/mingw32ccompiler.py
""" Support code for building Python extensions on Windows. # NT stuff # 1. Make sure libpython<version>.a exists for gcc. If not, build it. # 2. Force windows to use gcc (we're struggling with MSVC and g77 support) # 3. Force windows to use g77 """ import os import sys import subprocess import re import textwrap # Overwrite certain distutils.ccompiler functions: import numpy.distutils.ccompiler # noqa: F401 from numpy.distutils import log # NT stuff # 1. Make sure libpython<version>.a exists for gcc. If not, build it. # 2. Force windows to use gcc (we're struggling with MSVC and g77 support) # --> this is done in numpy/distutils/ccompiler.py # 3. Force windows to use g77 import distutils.cygwinccompiler from distutils.version import StrictVersion from distutils.unixccompiler import UnixCCompiler from distutils.msvccompiler import get_build_version as get_build_msvc_version from distutils.errors import UnknownFileError from numpy.distutils.misc_util import (msvc_runtime_library, msvc_runtime_version, msvc_runtime_major, get_build_architecture) def get_msvcr_replacement(): """Replacement for outdated version of get_msvcr from cygwinccompiler""" msvcr = msvc_runtime_library() return [] if msvcr is None else [msvcr] # monkey-patch cygwinccompiler with our updated version from misc_util # to avoid getting an exception raised on Python 3.5 distutils.cygwinccompiler.get_msvcr = get_msvcr_replacement # Useful to generate table of symbols from a dll _START = re.compile(r'\[Ordinal/Name Pointer\] Table') _TABLE = re.compile(r'^\s+\[([\s*[0-9]*)\] ([a-zA-Z0-9_]*)') # the same as cygwin plus some additional parameters class Mingw32CCompiler(distutils.cygwinccompiler.CygwinCCompiler): """ A modified MingW32 compiler compatible with an MSVC built Python. """ compiler_type = 'mingw32' def __init__ (self, verbose=0, dry_run=0, force=0): distutils.cygwinccompiler.CygwinCCompiler.__init__ (self, verbose, dry_run, force) # we need to support 3.2 which doesn't match the standard # get_versions methods regex if self.gcc_version is None: p = subprocess.Popen(['gcc', '-dumpversion'], shell=True, stdout=subprocess.PIPE) out_string = p.stdout.read() p.stdout.close() result = re.search(r'(\d+\.\d+)', out_string) if result: self.gcc_version = StrictVersion(result.group(1)) # A real mingw32 doesn't need to specify a different entry point, # but cygwin 2.91.57 in no-cygwin-mode needs it. if self.gcc_version <= "2.91.57": entry_point = '--entry _DllMain@12' else: entry_point = '' if self.linker_dll == 'dllwrap': # Commented out '--driver-name g++' part that fixes weird # g++.exe: g++: No such file or directory # error (mingw 1.0 in Enthon24 tree, gcc-3.4.5). # If the --driver-name part is required for some environment # then make the inclusion of this part specific to that # environment. self.linker = 'dllwrap' # --driver-name g++' elif self.linker_dll == 'gcc': self.linker = 'g++' # **changes: eric jones 4/11/01 # 1. Check for import library on Windows. Build if it doesn't exist. build_import_library() # Check for custom msvc runtime library on Windows. Build if it doesn't exist. msvcr_success = build_msvcr_library() msvcr_dbg_success = build_msvcr_library(debug=True) if msvcr_success or msvcr_dbg_success: # add preprocessor statement for using customized msvcr lib self.define_macro('NPY_MINGW_USE_CUSTOM_MSVCR') # Define the MSVC version as hint for MinGW msvcr_version = msvc_runtime_version() if msvcr_version: self.define_macro('__MSVCRT_VERSION__', '0x%04i' % msvcr_version) # MS_WIN64 should be defined when building for amd64 on windows, # but python headers define it only for MS compilers, which has all # kind of bad consequences, like using Py_ModuleInit4 instead of # Py_ModuleInit4_64, etc... So we add it here if get_build_architecture() == 'AMD64': if self.gcc_version < "4.0": self.set_executables( compiler='gcc -g -DDEBUG -DMS_WIN64 -mno-cygwin -O0 -Wall', compiler_so='gcc -g -DDEBUG -DMS_WIN64 -mno-cygwin -O0' ' -Wall -Wstrict-prototypes', linker_exe='gcc -g -mno-cygwin', linker_so='gcc -g -mno-cygwin -shared') else: # gcc-4 series releases do not support -mno-cygwin option self.set_executables( compiler='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall', compiler_so='gcc -g -DDEBUG -DMS_WIN64 -O0 -Wall -Wstrict-prototypes', linker_exe='gcc -g', linker_so='gcc -g -shared') else: if self.gcc_version <= "3.0.0": self.set_executables( compiler='gcc -mno-cygwin -O2 -w', compiler_so='gcc -mno-cygwin -mdll -O2 -w' ' -Wstrict-prototypes', linker_exe='g++ -mno-cygwin', linker_so='%s -mno-cygwin -mdll -static %s' % (self.linker, entry_point)) elif self.gcc_version < "4.0": self.set_executables( compiler='gcc -mno-cygwin -O2 -Wall', compiler_so='gcc -mno-cygwin -O2 -Wall' ' -Wstrict-prototypes', linker_exe='g++ -mno-cygwin', linker_so='g++ -mno-cygwin -shared') else: # gcc-4 series releases do not support -mno-cygwin option self.set_executables(compiler='gcc -O2 -Wall', compiler_so='gcc -O2 -Wall -Wstrict-prototypes', linker_exe='g++ ', linker_so='g++ -shared') # added for python2.3 support # we can't pass it through set_executables because pre 2.2 would fail self.compiler_cxx = ['g++'] # Maybe we should also append -mthreads, but then the finished dlls # need another dll (mingwm10.dll see Mingw32 docs) (-mthreads: Support # thread-safe exception handling on `Mingw32') # no additional libraries needed #self.dll_libraries=[] return # __init__ () def link(self, target_desc, objects, output_filename, output_dir, libraries, library_dirs, runtime_library_dirs, export_symbols = None, debug=0, extra_preargs=None, extra_postargs=None, build_temp=None, target_lang=None): # Include the appropriate MSVC runtime library if Python was built # with MSVC >= 7.0 (MinGW standard is msvcrt) runtime_library = msvc_runtime_library() if runtime_library: if not libraries: libraries = [] libraries.append(runtime_library) args = (self, target_desc, objects, output_filename, output_dir, libraries, library_dirs, runtime_library_dirs, None, #export_symbols, we do this in our def-file debug, extra_preargs, extra_postargs, build_temp, target_lang) if self.gcc_version < "3.0.0": func = distutils.cygwinccompiler.CygwinCCompiler.link else: func = UnixCCompiler.link func(*args[:func.__code__.co_argcount]) return def object_filenames (self, source_filenames, strip_dir=0, output_dir=''): if output_dir is None: output_dir = '' obj_names = [] for src_name in source_filenames: # use normcase to make sure '.rc' is really '.rc' and not '.RC' (base, ext) = os.path.splitext (os.path.normcase(src_name)) # added these lines to strip off windows drive letters # without it, .o files are placed next to .c files # instead of the build directory drv, base = os.path.splitdrive(base) if drv: base = base[1:] if ext not in (self.src_extensions + ['.rc', '.res']): raise UnknownFileError( "unknown file type '%s' (from '%s')" % \ (ext, src_name)) if strip_dir: base = os.path.basename (base) if ext == '.res' or ext == '.rc': # these need to be compiled to object files obj_names.append (os.path.join (output_dir, base + ext + self.obj_extension)) else: obj_names.append (os.path.join (output_dir, base + self.obj_extension)) return obj_names # object_filenames () def find_python_dll(): # We can't do much here: # - find it in the virtualenv (sys.prefix) # - find it in python main dir (sys.base_prefix, if in a virtualenv) # - sys.real_prefix is main dir for virtualenvs in Python 2.7 # - in system32, # - ortherwise (Sxs), I don't know how to get it. stems = [sys.prefix] if hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix: stems.append(sys.base_prefix) elif hasattr(sys, 'real_prefix') and sys.real_prefix != sys.prefix: stems.append(sys.real_prefix) sub_dirs = ['', 'lib', 'bin'] # generate possible combinations of directory trees and sub-directories lib_dirs = [] for stem in stems: for folder in sub_dirs: lib_dirs.append(os.path.join(stem, folder)) # add system directory as well if 'SYSTEMROOT' in os.environ: lib_dirs.append(os.path.join(os.environ['SYSTEMROOT'], 'System32')) # search in the file system for possible candidates major_version, minor_version = tuple(sys.version_info[:2]) patterns = ['python%d%d.dll'] for pat in patterns: dllname = pat % (major_version, minor_version) print("Looking for %s" % dllname) for folder in lib_dirs: dll = os.path.join(folder, dllname) if os.path.exists(dll): return dll raise ValueError("%s not found in %s" % (dllname, lib_dirs)) def dump_table(dll): st = subprocess.Popen(["objdump.exe", "-p", dll], stdout=subprocess.PIPE) return st.stdout.readlines() def generate_def(dll, dfile): """Given a dll file location, get all its exported symbols and dump them into the given def file. The .def file will be overwritten""" dump = dump_table(dll) for i in range(len(dump)): if _START.match(dump[i].decode()): break else: raise ValueError("Symbol table not found") syms = [] for j in range(i+1, len(dump)): m = _TABLE.match(dump[j].decode()) if m: syms.append((int(m.group(1).strip()), m.group(2))) else: break if len(syms) == 0: log.warn('No symbols found in %s' % dll) d = open(dfile, 'w') d.write('LIBRARY %s\n' % os.path.basename(dll)) d.write(';CODE PRELOAD MOVEABLE DISCARDABLE\n') d.write(';DATA PRELOAD SINGLE\n') d.write('\nEXPORTS\n') for s in syms: #d.write('@%d %s\n' % (s[0], s[1])) d.write('%s\n' % s[1]) d.close() def find_dll(dll_name): arch = {'AMD64' : 'amd64', 'Intel' : 'x86'}[get_build_architecture()] def _find_dll_in_winsxs(dll_name): # Walk through the WinSxS directory to find the dll. winsxs_path = os.path.join(os.environ.get('WINDIR', r'C:\WINDOWS'), 'winsxs') if not os.path.exists(winsxs_path): return None for root, dirs, files in os.walk(winsxs_path): if dll_name in files and arch in root: return os.path.join(root, dll_name) return None def _find_dll_in_path(dll_name): # First, look in the Python directory, then scan PATH for # the given dll name. for path in [sys.prefix] + os.environ['PATH'].split(';'): filepath = os.path.join(path, dll_name) if os.path.exists(filepath): return os.path.abspath(filepath) return _find_dll_in_winsxs(dll_name) or _find_dll_in_path(dll_name) def build_msvcr_library(debug=False): if os.name != 'nt': return False # If the version number is None, then we couldn't find the MSVC runtime at # all, because we are running on a Python distribution which is customed # compiled; trust that the compiler is the same as the one available to us # now, and that it is capable of linking with the correct runtime without # any extra options. msvcr_ver = msvc_runtime_major() if msvcr_ver is None: log.debug('Skip building import library: ' 'Runtime is not compiled with MSVC') return False # Skip using a custom library for versions < MSVC 8.0 if msvcr_ver < 80: log.debug('Skip building msvcr library:' ' custom functionality not present') return False msvcr_name = msvc_runtime_library() if debug: msvcr_name += 'd' # Skip if custom library already exists out_name = "lib%s.a" % msvcr_name out_file = os.path.join(sys.prefix, 'libs', out_name) if os.path.isfile(out_file): log.debug('Skip building msvcr library: "%s" exists' % (out_file,)) return True # Find the msvcr dll msvcr_dll_name = msvcr_name + '.dll' dll_file = find_dll(msvcr_dll_name) if not dll_file: log.warn('Cannot build msvcr library: "%s" not found' % msvcr_dll_name) return False def_name = "lib%s.def" % msvcr_name def_file = os.path.join(sys.prefix, 'libs', def_name) log.info('Building msvcr library: "%s" (from %s)' \ % (out_file, dll_file)) # Generate a symbol definition file from the msvcr dll generate_def(dll_file, def_file) # Create a custom mingw library for the given symbol definitions cmd = ['dlltool', '-d', def_file, '-l', out_file] retcode = subprocess.call(cmd) # Clean up symbol definitions os.remove(def_file) return (not retcode) def build_import_library(): if os.name != 'nt': return arch = get_build_architecture() if arch == 'AMD64': return _build_import_library_amd64() elif arch == 'Intel': return _build_import_library_x86() else: raise ValueError("Unhandled arch %s" % arch) def _check_for_import_lib(): """Check if an import library for the Python runtime already exists.""" major_version, minor_version = tuple(sys.version_info[:2]) # patterns for the file name of the library itself patterns = ['libpython%d%d.a', 'libpython%d%d.dll.a', 'libpython%d.%d.dll.a'] # directory trees that may contain the library stems = [sys.prefix] if hasattr(sys, 'base_prefix') and sys.base_prefix != sys.prefix: stems.append(sys.base_prefix) elif hasattr(sys, 'real_prefix') and sys.real_prefix != sys.prefix: stems.append(sys.real_prefix) # possible subdirectories within those trees where it is placed sub_dirs = ['libs', 'lib'] # generate a list of candidate locations candidates = [] for pat in patterns: filename = pat % (major_version, minor_version) for stem_dir in stems: for folder in sub_dirs: candidates.append(os.path.join(stem_dir, folder, filename)) # test the filesystem to see if we can find any of these for fullname in candidates: if os.path.isfile(fullname): # already exists, in location given return (True, fullname) # needs to be built, preferred location given first return (False, candidates[0]) def _build_import_library_amd64(): out_exists, out_file = _check_for_import_lib() if out_exists: log.debug('Skip building import library: "%s" exists', out_file) return # get the runtime dll for which we are building import library dll_file = find_python_dll() log.info('Building import library (arch=AMD64): "%s" (from %s)' % (out_file, dll_file)) # generate symbol list from this library def_name = "python%d%d.def" % tuple(sys.version_info[:2]) def_file = os.path.join(sys.prefix, 'libs', def_name) generate_def(dll_file, def_file) # generate import library from this symbol list cmd = ['dlltool', '-d', def_file, '-l', out_file] subprocess.Popen(cmd) def _build_import_library_x86(): """ Build the import libraries for Mingw32-gcc on Windows """ out_exists, out_file = _check_for_import_lib() if out_exists: log.debug('Skip building import library: "%s" exists', out_file) return lib_name = "python%d%d.lib" % tuple(sys.version_info[:2]) lib_file = os.path.join(sys.prefix, 'libs', lib_name) if not os.path.isfile(lib_file): # didn't find library file in virtualenv, try base distribution, too, # and use that instead if found there. for Python 2.7 venvs, the base # directory is in attribute real_prefix instead of base_prefix. if hasattr(sys, 'base_prefix'): base_lib = os.path.join(sys.base_prefix, 'libs', lib_name) elif hasattr(sys, 'real_prefix'): base_lib = os.path.join(sys.real_prefix, 'libs', lib_name) else: base_lib = '' # os.path.isfile('') == False if os.path.isfile(base_lib): lib_file = base_lib else: log.warn('Cannot build import library: "%s" not found', lib_file) return log.info('Building import library (ARCH=x86): "%s"', out_file) from numpy.distutils import lib2def def_name = "python%d%d.def" % tuple(sys.version_info[:2]) def_file = os.path.join(sys.prefix, 'libs', def_name) nm_cmd = '%s %s' % (lib2def.DEFAULT_NM, lib_file) nm_output = lib2def.getnm(nm_cmd) dlist, flist = lib2def.parse_nm(nm_output) lib2def.output_def(dlist, flist, lib2def.DEF_HEADER, open(def_file, 'w')) dll_name = find_python_dll () args = (dll_name, def_file, out_file) cmd = 'dlltool --dllname "%s" --def "%s" --output-lib "%s"' % args status = os.system(cmd) # for now, fail silently if status: log.warn('Failed to build import library for gcc. Linking will fail.') return #===================================== # Dealing with Visual Studio MANIFESTS #===================================== # Functions to deal with visual studio manifests. Manifest are a mechanism to # enforce strong DLL versioning on windows, and has nothing to do with # distutils MANIFEST. manifests are XML files with version info, and used by # the OS loader; they are necessary when linking against a DLL not in the # system path; in particular, official python 2.6 binary is built against the # MS runtime 9 (the one from VS 2008), which is not available on most windows # systems; python 2.6 installer does install it in the Win SxS (Side by side) # directory, but this requires the manifest for this to work. This is a big # mess, thanks MS for a wonderful system. # XXX: ideally, we should use exactly the same version as used by python. I # submitted a patch to get this version, but it was only included for python # 2.6.1 and above. So for versions below, we use a "best guess". _MSVCRVER_TO_FULLVER = {} if sys.platform == 'win32': try: import msvcrt # I took one version in my SxS directory: no idea if it is the good # one, and we can't retrieve it from python _MSVCRVER_TO_FULLVER['80'] = "8.0.50727.42" _MSVCRVER_TO_FULLVER['90'] = "9.0.21022.8" # Value from msvcrt.CRT_ASSEMBLY_VERSION under Python 3.3.0 # on Windows XP: _MSVCRVER_TO_FULLVER['100'] = "10.0.30319.460" if hasattr(msvcrt, "CRT_ASSEMBLY_VERSION"): major, minor, rest = msvcrt.CRT_ASSEMBLY_VERSION.split(".", 2) _MSVCRVER_TO_FULLVER[major + minor] = msvcrt.CRT_ASSEMBLY_VERSION del major, minor, rest except ImportError: # If we are here, means python was not built with MSVC. Not sure what # to do in that case: manifest building will fail, but it should not be # used in that case anyway log.warn('Cannot import msvcrt: using manifest will not be possible') def msvc_manifest_xml(maj, min): """Given a major and minor version of the MSVCR, returns the corresponding XML file.""" try: fullver = _MSVCRVER_TO_FULLVER[str(maj * 10 + min)] except KeyError: raise ValueError("Version %d,%d of MSVCRT not supported yet" % (maj, min)) # Don't be fooled, it looks like an XML, but it is not. In particular, it # should not have any space before starting, and its size should be # divisible by 4, most likely for alignment constraints when the xml is # embedded in the binary... # This template was copied directly from the python 2.6 binary (using # strings.exe from mingw on python.exe). template = textwrap.dedent("""\ <assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0"> <trustInfo xmlns="urn:schemas-microsoft-com:asm.v3"> <security> <requestedPrivileges> <requestedExecutionLevel level="asInvoker" uiAccess="false"></requestedExecutionLevel> </requestedPrivileges> </security> </trustInfo> <dependency> <dependentAssembly> <assemblyIdentity type="win32" name="Microsoft.VC%(maj)d%(min)d.CRT" version="%(fullver)s" processorArchitecture="*" publicKeyToken="1fc8b3b9a1e18e3b"></assemblyIdentity> </dependentAssembly> </dependency> </assembly>""") return template % {'fullver': fullver, 'maj': maj, 'min': min} def manifest_rc(name, type='dll'): """Return the rc file used to generate the res file which will be embedded as manifest for given manifest file name, of given type ('dll' or 'exe'). Parameters ---------- name : str name of the manifest file to embed type : str {'dll', 'exe'} type of the binary which will embed the manifest """ if type == 'dll': rctype = 2 elif type == 'exe': rctype = 1 else: raise ValueError("Type %s not supported" % type) return """\ #include "winuser.h" %d RT_MANIFEST %s""" % (rctype, name) def check_embedded_msvcr_match_linked(msver): """msver is the ms runtime version used for the MANIFEST.""" # check msvcr major version are the same for linking and # embedding maj = msvc_runtime_major() if maj: if not maj == int(msver): raise ValueError( "Discrepancy between linked msvcr " \ "(%d) and the one about to be embedded " \ "(%d)" % (int(msver), maj)) def configtest_name(config): base = os.path.basename(config._gen_temp_sourcefile("yo", [], "c")) return os.path.splitext(base)[0] def manifest_name(config): # Get configest name (including suffix) root = configtest_name(config) exext = config.compiler.exe_extension return root + exext + ".manifest" def rc_name(config): # Get configtest name (including suffix) root = configtest_name(config) return root + ".rc" def generate_manifest(config): msver = get_build_msvc_version() if msver is not None: if msver >= 8: check_embedded_msvcr_match_linked(msver) ma = int(msver) mi = int((msver - ma) * 10) # Write the manifest file manxml = msvc_manifest_xml(ma, mi) man = open(manifest_name(config), "w") config.temp_files.append(manifest_name(config)) man.write(manxml) man.close()
[]
[]
[ "SYSTEMROOT", "PATH", "WINDIR" ]
[]
["SYSTEMROOT", "PATH", "WINDIR"]
python
3
0
cmd/ipfs-cluster-follow/main.go
// The ipfs-cluster-follow application. package main import ( "fmt" "os" "os/signal" "os/user" "path/filepath" "syscall" "github.com/ipfs/ipfs-cluster/api/rest/client" "github.com/ipfs/ipfs-cluster/cmdutils" "github.com/ipfs/ipfs-cluster/version" "github.com/multiformats/go-multiaddr" "github.com/pkg/errors" semver "github.com/blang/semver" cli "github.com/urfave/cli/v2" ) const ( // ProgramName of this application programName = "ipfs-cluster-follow" clusterNameFlag = "clusterName" logLevel = "info" ) // Default location for the configurations and data var ( // DefaultFolder is the name of the cluster folder DefaultFolder = ".ipfs-cluster-follow" // DefaultPath is set on init() to $HOME/DefaultFolder // and holds all the ipfs-cluster data DefaultPath string // The name of the configuration file inside DefaultPath DefaultConfigFile = "service.json" // The name of the identity file inside DefaultPath DefaultIdentityFile = "identity.json" DefaultGateway = "127.0.0.1:8080" ) var ( commit string configPath string identityPath string signalChan = make(chan os.Signal, 20) ) // Description provides a short summary of the functionality of this tool var Description = fmt.Sprintf(` %s helps running IPFS Cluster follower peers. Follower peers subscribe to a Cluster controlled by a set of "trusted peers". They collaborate in pinning items as dictated by the trusted peers and do not have the power to make Cluster-wide modifications to the pinset. Follower peers cannot access information nor trigger actions in other peers. %s can be used to follow different clusters by launching it with different options. Each Cluster has an identity, a configuration and a datastore associated to it, which are kept under "~/%s/<cluster_name>". For feedback, bug reports or any additional information, visit https://github.com/ipfs/ipfs-cluster. EXAMPLES: List configured follower peers: $ %s Display information for a follower peer: $ %s <clusterName> info Initialize a follower peer: $ %s <clusterName> init <example.url> Launch a follower peer (will stay running): $ %s <clusterName> run List items in the pinset for a given cluster: $ %s <clusterName> list Getting help and usage info: $ %s --help $ %s <clusterName> --help $ %s <clusterName> info --help $ %s <clusterName> init --help $ %s <clusterName> run --help $ %s <clusterName> list --help `, programName, programName, DefaultFolder, programName, programName, programName, programName, programName, programName, programName, programName, programName, programName, programName, ) func init() { // Set build information. if build, err := semver.NewBuildVersion(commit); err == nil { version.Version.Build = []string{"git" + build} } // We try guessing user's home from the HOME variable. This // allows HOME hacks for things like Snapcraft builds. HOME // should be set in all UNIX by the OS. Alternatively, we fall back to // usr.HomeDir (which should work on Windows etc.). home := os.Getenv("HOME") if home == "" { usr, err := user.Current() if err != nil { panic(fmt.Sprintf("cannot get current user: %s", err)) } home = usr.HomeDir } DefaultPath = filepath.Join(home, DefaultFolder) // This will abort the program on signal. We close the signal channel // when launching the peer so that we can do an orderly shutdown in // that case though. go func() { signal.Notify( signalChan, syscall.SIGINT, syscall.SIGTERM, syscall.SIGHUP, ) _, ok := <-signalChan // channel closed. if !ok { return } os.Exit(1) }() } func main() { app := cli.NewApp() app.Name = programName app.Usage = "IPFS Cluster Follower" app.UsageText = fmt.Sprintf("%s [global options] <clusterName> [subcommand]...", programName) app.Description = Description //app.Copyright = "© Protocol Labs, Inc." app.Version = version.Version.String() app.Flags = []cli.Flag{ &cli.StringFlag{ Name: "config, c", Value: DefaultPath, Usage: "path to the followers configuration and data `FOLDER`", EnvVars: []string{"IPFS_CLUSTER_PATH"}, }, } app.Action = func(c *cli.Context) error { if !c.Args().Present() { return listClustersCmd(c) } clusterName := c.Args().Get(0) clusterApp := cli.NewApp() clusterApp.Name = fmt.Sprintf("%s %s", programName, clusterName) clusterApp.HelpName = clusterApp.Name clusterApp.Usage = fmt.Sprintf("Follower peer management for \"%s\"", clusterName) clusterApp.UsageText = fmt.Sprintf("%s %s [subcommand]", programName, clusterName) clusterApp.Action = infoCmd clusterApp.HideVersion = true clusterApp.Flags = []cli.Flag{ &cli.StringFlag{ // pass clusterName to subcommands Name: clusterNameFlag, Value: clusterName, Hidden: true, }, } clusterApp.Commands = []*cli.Command{ { Name: "info", Usage: "displays information for this peer", ArgsUsage: "", Description: fmt.Sprintf(` This command display useful information for "%s"'s follower peer. `, clusterName), Action: infoCmd, }, { Name: "init", Usage: "initializes the follower peer", ArgsUsage: "<template_URL>", Description: fmt.Sprintf(` This command initializes a follower peer for the cluster named "%s". You will need to pass the peer configuration URL. The command will generate a new peer identity and leave things ready to run "%s %s run". An error will be returned if a configuration folder for a cluster peer with this name already exists. If you wish to re-initialize from scratch, delete this folder first. `, clusterName, programName, clusterName), Action: initCmd, Flags: []cli.Flag{ &cli.StringFlag{ Name: "gateway", Value: DefaultGateway, Usage: "gateway URL", EnvVars: []string{"IPFS_GATEWAY"}, Hidden: true, }, }, }, { Name: "run", Usage: "runs the follower peer", ArgsUsage: "", Description: fmt.Sprintf(` This commands runs a "%s" cluster follower peer. The peer should have already been initialized with "init" alternatively the --init flag needs to be passed. Before running, ensure that you have connectivity and that the IPFS daemon is running. You can obtain more information about this follower peer by running "%s %s" (without any arguments). The peer will stay running in the foreground until manually stopped. `, clusterName, programName, clusterName), Action: runCmd, Flags: []cli.Flag{ &cli.StringFlag{ Name: "init", Usage: "initialize cluster peer with the given URL before running", }, &cli.StringFlag{ Name: "gateway", Value: DefaultGateway, Usage: "gateway URL", EnvVars: []string{"IPFS_GATEWAY"}, Hidden: true, }, }, }, { Name: "list", Usage: "list items in the peers' pinset", ArgsUsage: "", Description: ` This commands lists all the items pinned by this follower cluster peer on IPFS. If the peer is currently running, it will display status information for each pin (such as PINNING). If not, it will just display the current list of pins as obtained from the internal state on disk. `, Action: listCmd, }, } return clusterApp.RunAsSubcommand(c) } app.Run(os.Args) } // build paths returns the path to the configuration folder, // the identity.json and the service.json files. func buildPaths(c *cli.Context, clusterName string) (string, string, string) { absPath, err := filepath.Abs(c.String("config")) if err != nil { cmdutils.ErrorOut("error getting absolute path for %s: %s", err, clusterName) os.Exit(1) } // ~/.ipfs-cluster-follow/clusterName absPath = filepath.Join(absPath, clusterName) // ~/.ipfs-cluster-follow/clusterName/service.json configPath = filepath.Join(absPath, DefaultConfigFile) // ~/.ipfs-cluster-follow/clusterName/indentity.json identityPath = filepath.Join(absPath, DefaultIdentityFile) return absPath, configPath, identityPath } func socketAddress(absPath, clusterName string) (multiaddr.Multiaddr, error) { socket := fmt.Sprintf("/unix/%s", filepath.Join(absPath, "api-socket")) ma, err := multiaddr.NewMultiaddr(socket) if err != nil { return nil, errors.Wrapf(err, "error parsing socket: %s", socket) } return ma, nil } // returns an REST API client. Points to the socket address unless // CLUSTER_RESTAPI_HTTPLISTENMULTIADDRESS is set, in which case it uses it. func getClient(absPath, clusterName string) (client.Client, error) { var endp multiaddr.Multiaddr var err error if endpStr := os.Getenv("CLUSTER_RESTAPI_HTTPLISTENMULTIADDRESS"); endpStr != "" { endp, err = multiaddr.NewMultiaddr(endpStr) if err != nil { return nil, errors.Wrapf(err, "error parsing the value of CLUSTER_RESTAPI_HTTPLISTENMULTIADDRESS: %s", endpStr) } } else { endp, err = socketAddress(absPath, clusterName) } if err != nil { return nil, err } cfg := client.Config{ APIAddr: endp, } return client.NewDefaultClient(&cfg) }
[ "\"HOME\"", "\"CLUSTER_RESTAPI_HTTPLISTENMULTIADDRESS\"" ]
[]
[ "CLUSTER_RESTAPI_HTTPLISTENMULTIADDRESS", "HOME" ]
[]
["CLUSTER_RESTAPI_HTTPLISTENMULTIADDRESS", "HOME"]
go
2
0
internal/tool/notes/amygdala/amygdala.go
package amygdala import ( "errors" "flag" "fmt" "io" "net/http" "os" "runtime/debug" "github.com/frioux/leatherman/internal/log" "github.com/frioux/leatherman/internal/middleware" "github.com/frioux/leatherman/internal/notes" "github.com/frioux/leatherman/internal/twilio" ) func Amygdala(args []string, _ io.Reader) error { var ( dropboxAccessToken, myCell, version string twilioAuthToken, twilioURL string port int ) dropboxAccessToken = os.Getenv("LM_DROPBOX_TOKEN") if dropboxAccessToken == "" { return errors.New("LM_DROPBOX_TOKEN is missing") } myCell = os.Getenv("LM_MY_CELL") if myCell == "" { myCell = "+15555555555" } twilioAuthToken = os.Getenv("LM_TWILIO_TOKEN") if len(twilioAuthToken) == 0 { twilioAuthToken = "xyzzy" } twilioURL = os.Getenv("LM_TWILIO_URL") if len(twilioURL) == 0 { twilioURL = "http://localhost:8080/twilio" } fs := flag.NewFlagSet("amgydala", flag.ContinueOnError) fs.IntVar(&port, "port", 8080, "port to listen on") if version == "" { version = "unknown" } if err := fs.Parse(args[1:]); err != nil { return err } cl := &http.Client{} mux := http.NewServeMux() mux.Handle("/version", http.HandlerFunc(func(rw http.ResponseWriter, _ *http.Request) { rw.Header().Set("Content-Type", "text/plain") rw.Header().Set("Cache-Control", "no-cache") bi, ok := debug.ReadBuildInfo() if !ok { rw.WriteHeader(500) } fmt.Fprintln(rw, "version:", version) for _, dep := range bi.Deps { fmt.Fprintf(rw, "%s@%s (%s)\n", dep.Path, dep.Version, dep.Sum) if dep.Replace != nil { r := dep.Replace fmt.Fprintf(rw, " replaced by %s@%s (%s)\n", r.Path, r.Version, r.Sum) } } })) mux.Handle("/twilio", receiveSMS(cl, dropboxAccessToken, twilioAuthToken, twilioURL, myCell)) h := middleware.Adapt(mux, middleware.Log(os.Stdout)) return http.ListenAndServe(fmt.Sprintf(":%d", port), h) } // receiveSMS handles https://www.twilio.com/docs/sms/twiml func receiveSMS(cl *http.Client, tok, twilioAuthToken, twilioURL, myCell string) http.HandlerFunc { rules, err := notes.NewRules(tok) if err != nil { panic(err) } return func(rw http.ResponseWriter, r *http.Request) { rw.Header().Set("Cache-Control", "no-cache") if err := r.ParseForm(); err != nil { rw.WriteHeader(http.StatusBadRequest) io.WriteString(rw, "Couldn't Parse Form") log.Err(fmt.Errorf("http.Request.ParseForm: %w", err)) return } if ok, err := twilio.CheckMAC([]byte(twilioAuthToken), []byte(twilioURL), r); err != nil || !ok { rw.WriteHeader(403) if err != nil { log.Err(fmt.Errorf("twilio.CheckMAC: %w", err)) } return } if r.Form.Get("From") != myCell { rw.WriteHeader(http.StatusForbidden) io.WriteString(rw, "Wrong Cell\n") return } message := r.Form.Get("Body") media, _ := twilio.ExtractMedia(r.Form) if message == "" && len(media) == 0 { rw.WriteHeader(http.StatusBadRequest) io.WriteString(rw, "No Message\n") return } resp, err := rules.Dispatch(message, media) if err != nil { // normally it's a really bad idea to use other values if the error is // non-nil, but care has been taken to propogate cheeky responses even // in that situation. // // Note that the cheeky values won't work unless we return a 200 OK. io.WriteString(rw, resp+"\n") log.Err(fmt.Errorf("notes.Dispatch: %w", err)) return } rw.WriteHeader(http.StatusOK) rw.Header().Set("Content-Type", "text/plain") io.WriteString(rw, resp+"\n") } }
[ "\"LM_DROPBOX_TOKEN\"", "\"LM_MY_CELL\"", "\"LM_TWILIO_TOKEN\"", "\"LM_TWILIO_URL\"" ]
[]
[ "LM_DROPBOX_TOKEN", "LM_TWILIO_URL", "LM_MY_CELL", "LM_TWILIO_TOKEN" ]
[]
["LM_DROPBOX_TOKEN", "LM_TWILIO_URL", "LM_MY_CELL", "LM_TWILIO_TOKEN"]
go
4
0
services/dynamodb.go
package services import ( "log" "os" "strconv" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/dynamodb" "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbattribute" "github.com/pkg/errors" "github.com/roryhow/cravack/db" ) func PutCravackUser(user *db.CravackUser) (*dynamodb.PutItemOutput, error) { sess := session.Must(session.NewSession()) svc := dynamodb.New(sess) av, err := dynamodbattribute.MarshalMap(user) if err != nil { log.Printf("Error when trying to marshal map") return nil, err } input := &dynamodb.PutItemInput{ Item: av, TableName: aws.String(os.Getenv("CRAVACK_USER_TABLE")), } output, err := svc.PutItem(input) return output, err } func GetCravackUser(athleteID int) (*db.CravackUser, error) { sess := session.Must(session.NewSession()) svc := dynamodb.New(sess) result, err := svc.GetItem(&dynamodb.GetItemInput{ TableName: aws.String(os.Getenv("CRAVACK_USER_TABLE")), Key: map[string]*dynamodb.AttributeValue{ "UserID": { N: aws.String(strconv.Itoa(athleteID)), }, }, }) if err != nil { log.Printf("Error when fetching from database\n%s", err.Error()) return nil, err } user := db.CravackUser{} err = dynamodbattribute.UnmarshalMap(result.Item, &user) if err != nil { log.Printf("Error when unmarshalling result from DB into StravaUser\n%s", err.Error()) return nil, err } return &user, nil } func GetCravackUserBySlackID(slackUserID string) (*db.CravackUser, error) { sess := session.Must(session.NewSession()) svc := dynamodb.New(sess) result, err := svc.Scan(&dynamodb.ScanInput{ TableName: aws.String(os.Getenv("CRAVACK_USER_TABLE")), ExpressionAttributeValues: map[string]*dynamodb.AttributeValue{ ":user": { S: aws.String(slackUserID), }, }, FilterExpression: aws.String("SlackUser.UserID = :user"), }) if err != nil { log.Printf("Error when fetching from database\n%s", err.Error()) return nil, err } if *result.Count < 1 { return nil, errors.Errorf("No such user exists for slackID %s", slackUserID) } user := db.CravackUser{} err = dynamodbattribute.UnmarshalMap(result.Items[0], &user) if err != nil { log.Printf("Error when unmarshalling result from DB into StravaUser\n%s", err.Error()) return nil, err } return &user, nil } func DeleteCravackUser(user *db.CravackUser) error { sess := session.Must(session.NewSession()) svc := dynamodb.New(sess) _, err := svc.DeleteItem(&dynamodb.DeleteItemInput{ TableName: aws.String(os.Getenv("CRAVACK_USER_TABLE")), Key: map[string]*dynamodb.AttributeValue{ "UserID": { N: aws.String(strconv.Itoa(user.UserID)), }, }, }) return err } func UpdateCravackStravaToken(refreshedUser *db.StravaRefreshToken, athleteID int) (*db.CravackUser, error) { sess := session.Must(session.NewSession()) svc := dynamodb.New(sess) expr, err := dynamodbattribute.MarshalMap(refreshedUser) if err != nil { log.Printf("Error when marshalling refresh token:\n%s", err.Error()) return nil, err } input := &dynamodb.UpdateItemInput{ ExpressionAttributeValues: expr, TableName: aws.String(os.Getenv("CRAVACK_USER_TABLE")), Key: map[string]*dynamodb.AttributeValue{ "UserID": { N: aws.String(strconv.Itoa(athleteID)), }, }, ReturnValues: aws.String("ALL_NEW"), UpdateExpression: aws.String(` set StravaUser.TokenType = :t, StravaUser.AccessToken = :a, StravaUser.ExpiresIn = :ei, StravaUser.ExpiresAt = :ea, StravaUser.RefreshToken = :r`), } result, err := svc.UpdateItem(input) if err != nil { log.Printf("Error when user token in database for athelete: %d\n%s", athleteID, err.Error()) return nil, err } updatedAthlete := db.CravackUser{} err = dynamodbattribute.UnmarshalMap(result.Attributes, &updatedAthlete) if err != nil { log.Printf("Error when unmarshalling results from dynamodb update into StravaUser\n%s", err.Error()) return nil, err } return &updatedAthlete, nil } func PutCravackActivityEvent(event *db.StravaEvent, slackChannelId, slackMsgTs string) (*dynamodb.PutItemOutput, error) { sess := session.Must(session.NewSession()) svc := dynamodb.New(sess) c := db.NewCravackActivityEvent(event, slackChannelId, slackMsgTs) av, err := dynamodbattribute.MarshalMap(c) if err != nil { log.Printf("Error when trying to marshal map") return nil, err } input := &dynamodb.PutItemInput{ Item: av, TableName: aws.String(os.Getenv("CRAVACK_EVENT_TABLE")), } output, err := svc.PutItem(input) return output, err } func GetCravackActivityEvent(event *db.StravaEvent) (*db.CravackActivityEvent, error) { sess := session.Must(session.NewSession()) svc := dynamodb.New(sess) result, err := svc.GetItem(&dynamodb.GetItemInput{ TableName: aws.String(os.Getenv("CRAVACK_EVENT_TABLE")), Key: map[string]*dynamodb.AttributeValue{ "UserID": { N: aws.String(strconv.Itoa(event.AthleteID)), }, "EventID": { N: aws.String(strconv.Itoa(event.ObjectID)), }, }, }) if err != nil { log.Printf("Error when fetching from database\n%s", err.Error()) return nil, err } cravackEvent := db.CravackActivityEvent{} err = dynamodbattribute.UnmarshalMap(result.Item, &cravackEvent) if err != nil { log.Printf("Error when unmarshalling result from DB into StravaUser\n%s", err.Error()) return nil, err } return &cravackEvent, nil } func DeleteCravackActivityEvent(event *db.StravaEvent) (*db.CravackActivityEvent, error) { sess := session.Must(session.NewSession()) svc := dynamodb.New(sess) result, err := svc.DeleteItem(&dynamodb.DeleteItemInput{ TableName: aws.String(os.Getenv("CRAVACK_EVENT_TABLE")), Key: map[string]*dynamodb.AttributeValue{ "UserID": { N: aws.String(strconv.Itoa(event.AthleteID)), }, "EventID": { N: aws.String(strconv.Itoa(event.ObjectID)), }, }, ReturnValues: aws.String("ALL_OLD"), }) if err != nil { return nil, err } cravackEvent := db.CravackActivityEvent{} err = dynamodbattribute.UnmarshalMap(result.Attributes, &cravackEvent) if err != nil { log.Printf("Error when unmarshalling result from DB into StravaUser\n%s", err.Error()) return nil, err } return &cravackEvent, nil }
[ "\"CRAVACK_USER_TABLE\"", "\"CRAVACK_USER_TABLE\"", "\"CRAVACK_USER_TABLE\"", "\"CRAVACK_USER_TABLE\"", "\"CRAVACK_USER_TABLE\"", "\"CRAVACK_EVENT_TABLE\"", "\"CRAVACK_EVENT_TABLE\"", "\"CRAVACK_EVENT_TABLE\"" ]
[]
[ "CRAVACK_USER_TABLE", "CRAVACK_EVENT_TABLE" ]
[]
["CRAVACK_USER_TABLE", "CRAVACK_EVENT_TABLE"]
go
2
0
tests/test_main.py
# Copyright 2020-2021 Canonical Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # For further info, check https://github.com/canonical/charmcraft import argparse import os import subprocess import sys from unittest.mock import patch import pytest from craft_cli import emit, EmitterMode, CraftError from charmcraft import __version__ from charmcraft.main import ( _DEFAULT_GLOBAL_ARGS, ArgumentParsingError, COMMAND_GROUPS, CommandGroup, Dispatcher, GlobalArgument, ProvideHelpException, main, ) from charmcraft.cmdbase import BaseCommand, CommandError from tests.factory import create_command # --- Tests for the Dispatcher def test_dispatcher_pre_parsing(): """Parses and return global arguments.""" groups = [CommandGroup("title", [create_command("somecommand")])] dispatcher = Dispatcher(groups) global_args = dispatcher.pre_parse_args(["-q", "somecommand"]) assert global_args == {"help": False, "verbose": False, "quiet": True, "trace": False} def test_dispatcher_command_loading(): """Parses and return global arguments.""" cmd = create_command("somecommand") groups = [CommandGroup("title", [cmd])] dispatcher = Dispatcher(groups) dispatcher.pre_parse_args(["somecommand"]) command = dispatcher.load_command("test-config") assert isinstance(command, cmd) assert command.config == "test-config" def test_dispatcher_command_execution_ok(): """Command execution depends of the indicated name in command line, return code ok.""" class MyCommandControl(BaseCommand): help_msg = "some help" def run(self, parsed_args): self._executed.append(parsed_args) class MyCommand1(MyCommandControl): name = "name1" _executed = [] class MyCommand2(MyCommandControl): name = "name2" _executed = [] groups = [CommandGroup("title", [MyCommand1, MyCommand2])] dispatcher = Dispatcher(groups) dispatcher.pre_parse_args(["name2"]) dispatcher.load_command("config") dispatcher.run() assert MyCommand1._executed == [] assert isinstance(MyCommand2._executed[0], argparse.Namespace) def test_dispatcher_command_return_code(): """Command ends indicating the return code to be used.""" class MyCommand(BaseCommand): help_msg = "some help" name = "cmdname" def run(self, parsed_args): return 17 groups = [CommandGroup("title", [MyCommand])] dispatcher = Dispatcher(groups) dispatcher.pre_parse_args(["cmdname"]) dispatcher.load_command("config") retcode = dispatcher.run() assert retcode == 17 def test_dispatcher_command_execution_crash(): """Command crashing doesn't pass through, we inform nicely.""" class MyCommand(BaseCommand): help_msg = "some help" name = "cmdname" def run(self, parsed_args): raise ValueError() groups = [CommandGroup("title", [MyCommand])] dispatcher = Dispatcher(groups) dispatcher.pre_parse_args(["cmdname"]) dispatcher.load_command("config") with pytest.raises(ValueError): dispatcher.run() def test_dispatcher_generic_setup_default(): """Generic parameter handling for default values.""" cmd = create_command("somecommand") groups = [CommandGroup("title", [cmd])] emit.set_mode(EmitterMode.NORMAL) # this is how `main` will init the Emitter dispatcher = Dispatcher(groups) dispatcher.pre_parse_args(["somecommand"]) assert emit.get_mode() == EmitterMode.NORMAL @pytest.mark.parametrize( "options", [ ["somecommand", "--verbose"], ["somecommand", "-v"], ["-v", "somecommand"], ["--verbose", "somecommand"], ["--verbose", "somecommand", "-v"], ], ) def test_dispatcher_generic_setup_verbose(options): """Generic parameter handling for verbose log setup, directly or after the command.""" cmd = create_command("somecommand") groups = [CommandGroup("title", [cmd])] emit.set_mode(EmitterMode.NORMAL) # this is how `main` will init the Emitter dispatcher = Dispatcher(groups) dispatcher.pre_parse_args(options) assert emit.get_mode() == EmitterMode.VERBOSE @pytest.mark.parametrize( "options", [ ["somecommand", "--quiet"], ["somecommand", "-q"], ["-q", "somecommand"], ["--quiet", "somecommand"], ["--quiet", "somecommand", "-q"], ], ) def test_dispatcher_generic_setup_quiet(options): """Generic parameter handling for quiet log setup, directly or after the command.""" cmd = create_command("somecommand") groups = [CommandGroup("title", [cmd])] emit.set_mode(EmitterMode.NORMAL) # this is how `main` will init the Emitter dispatcher = Dispatcher(groups) dispatcher.pre_parse_args(options) assert emit.get_mode() == EmitterMode.QUIET @pytest.mark.parametrize( "options", [ ["somecommand", "--trace"], ["somecommand", "-t"], ["-t", "somecommand"], ["--trace", "somecommand"], ["--trace", "somecommand", "-t"], ], ) def test_dispatcher_generic_setup_trace(options): """Generic parameter handling for trace log setup, directly or after the command.""" cmd = create_command("somecommand") groups = [CommandGroup("title", [cmd])] emit.set_mode(EmitterMode.NORMAL) # this is how `main` will init the Emitter dispatcher = Dispatcher(groups) dispatcher.pre_parse_args(options) assert emit.get_mode() == EmitterMode.TRACE @pytest.mark.parametrize( "options", [ ["--quiet", "--verbose", "somecommand"], ["-v", "-q", "somecommand"], ["somecommand", "--quiet", "--verbose"], ["somecommand", "-v", "-q"], ["--verbose", "somecommand", "--quiet"], ["-q", "somecommand", "-v"], ["--trace", "--verbose", "somecommand"], ["-v", "-t", "somecommand"], ["somecommand", "--trace", "--verbose"], ["somecommand", "-v", "-t"], ["--verbose", "somecommand", "--trace"], ["-t", "somecommand", "-v"], ["--quiet", "--trace", "somecommand"], ["-t", "-q", "somecommand"], ["somecommand", "--quiet", "--trace"], ["somecommand", "-t", "-q"], ["--trace", "somecommand", "--quiet"], ["-q", "somecommand", "-t"], ], ) def test_dispatcher_generic_setup_mutually_exclusive(options): """Disallow mutually exclusive generic options.""" cmd = create_command("somecommand") groups = [CommandGroup("title", [cmd])] dispatcher = Dispatcher(groups) with pytest.raises(ArgumentParsingError) as err: dispatcher.pre_parse_args(options) assert str(err.value) == "The 'verbose', 'trace' and 'quiet' options are mutually exclusive." @pytest.mark.parametrize( "options", [ ["somecommand", "--globalparam", "foobar"], ["somecommand", "--globalparam=foobar"], ["somecommand", "-g", "foobar"], ["-g", "foobar", "somecommand"], ["--globalparam", "foobar", "somecommand"], ["--globalparam=foobar", "somecommand"], ], ) def test_dispatcher_generic_setup_paramglobal_with_param(options): """Generic parameter handling for a param type global arg, directly or after the cmd.""" cmd = create_command("somecommand") groups = [CommandGroup("title", [cmd])] extra = GlobalArgument("globalparam", "option", "-g", "--globalparam", "Test global param.") dispatcher = Dispatcher(groups, [extra]) global_args = dispatcher.pre_parse_args(options) assert global_args["globalparam"] == "foobar" @pytest.mark.parametrize( "options", [ ["somecommand", "--globalparam"], ["somecommand", "--globalparam="], ["somecommand", "-g"], ["--globalparam=", "somecommand"], ], ) def test_dispatcher_generic_setup_paramglobal_without_param_simple(options): """Generic parameter handling for a param type global arg without the requested parameter.""" cmd = create_command("somecommand") groups = [CommandGroup("title", [cmd])] extra = GlobalArgument("globalparam", "option", "-g", "--globalparam", "Test global param.") dispatcher = Dispatcher(groups, [extra]) with pytest.raises(ArgumentParsingError) as err: dispatcher.pre_parse_args(options) assert str(err.value) == "The 'globalparam' option expects one argument." @pytest.mark.parametrize( "options", [ ["-g", "somecommand"], ["--globalparam", "somecommand"], ], ) def test_dispatcher_generic_setup_paramglobal_without_param_confusing(options): """Generic parameter handling for a param type global arg confusing the command as the arg.""" cmd = create_command("somecommand") groups = [CommandGroup("title", [cmd])] extra = GlobalArgument("globalparam", "option", "-g", "--globalparam", "Test global param.") dispatcher = Dispatcher(groups, [extra]) with patch("charmcraft.helptexts.HelpBuilder.get_full_help") as mock_helper: mock_helper.return_value = "help text" with pytest.raises(ArgumentParsingError) as err: dispatcher.pre_parse_args(options) # generic usage message because "no command" (as 'somecommand' was consumed by --globalparam) assert str(err.value) == "help text" def test_dispatcher_build_commands_ok(): """Correct command loading.""" cmd0, cmd1, cmd2 = [create_command("cmd-name-{}".format(n), "cmd help") for n in range(3)] groups = [ CommandGroup("whatever title", [cmd0]), CommandGroup("other title", [cmd1, cmd2]), ] dispatcher = Dispatcher(groups) assert len(dispatcher.commands) == 3 for cmd in [cmd0, cmd1, cmd2]: expected_class = dispatcher.commands[cmd.name] assert expected_class == cmd def test_dispatcher_build_commands_repeated(): """Error while loading commands with repeated name.""" class Foo(BaseCommand): help_msg = "some help" name = "repeated" class Bar(BaseCommand): help_msg = "some help" name = "cool" class Baz(BaseCommand): help_msg = "some help" name = "repeated" groups = [ CommandGroup("whatever title", [Foo, Bar]), CommandGroup("other title", [Baz]), ] expected_msg = "Multiple commands with same name: (Foo|Baz) and (Baz|Foo)" with pytest.raises(RuntimeError, match=expected_msg): Dispatcher(groups) def test_dispatcher_commands_are_not_loaded_if_not_needed(): class MyCommand1(BaseCommand): """Expected to be executed.""" name = "command1" help_msg = "some help" _executed = [] def run(self, parsed_args): self._executed.append(parsed_args) class MyCommand2(BaseCommand): """Expected to not be instantiated, or parse args, or run.""" name = "command2" help_msg = "some help" def __init__(self, *args): raise AssertionError def fill_parser(self, parser): raise AssertionError def run(self, parsed_args): raise AssertionError groups = [CommandGroup("title", [MyCommand1, MyCommand2])] dispatcher = Dispatcher(groups) dispatcher.pre_parse_args(["command1"]) dispatcher.load_command("config") dispatcher.run() assert isinstance(MyCommand1._executed[0], argparse.Namespace) def test_dispatcher_global_arguments_default(): """The dispatcher uses the default global arguments.""" cmd = create_command("somecommand") groups = [CommandGroup("title", [cmd])] dispatcher = Dispatcher(groups) assert dispatcher.global_arguments == _DEFAULT_GLOBAL_ARGS def test_dispatcher_global_arguments_extra_arguments(): """The dispatcher uses the default global arguments.""" cmd = create_command("somecommand") groups = [CommandGroup("title", [cmd])] extra_arg = GlobalArgument("other", "flag", "-o", "--other", "Other stuff") dispatcher = Dispatcher(groups, extra_global_args=[extra_arg]) assert dispatcher.global_arguments == _DEFAULT_GLOBAL_ARGS + [extra_arg] # --- Tests for the main entry point # In all the test methods below we patch Dispatcher.run so we don't really exercise any # command machinery, even if we call to main using a real command (which is to just # make argument parsing system happy). def test_main_ok(): """Work ended ok: message handler notified properly, return code in 0.""" with patch("charmcraft.main.emit") as emit_mock: with patch("charmcraft.main.Dispatcher.run") as d_mock: d_mock.return_value = None retcode = main(["charmcraft", "version"]) assert retcode == 0 emit_mock.ended_ok.assert_called_once_with() # check how Emitter was initted emit_mock.init.assert_called_once_with( EmitterMode.NORMAL, "charmcraft", f"Starting charmcraft version {__version__}" ) def test_main_load_config_ok(create_config): """Command is properly executed, after loading and receiving the config.""" tmp_path = create_config( """ type: charm """ ) class MyCommand(BaseCommand): help_msg = "some help" name = "cmdname" def run(self, parsed_args): assert self.config.type == "charm" with patch("charmcraft.main.COMMAND_GROUPS", [CommandGroup("title", [MyCommand])]): retcode = main(["charmcraft", "cmdname", f"--project-dir={tmp_path}"]) assert retcode == 0 def test_main_load_config_not_present_ok(): """Command ends indicating the return code to be used.""" class MyCommand(BaseCommand): help_msg = "some help" name = "cmdname" def run(self, parsed_args): assert self.config.type is None assert not self.config.project.config_provided with patch("charmcraft.main.COMMAND_GROUPS", [CommandGroup("title", [MyCommand])]): retcode = main(["charmcraft", "cmdname", "--project-dir=/whatever"]) assert retcode == 0 def test_main_load_config_not_present_but_needed(capsys): """Command ends indicating the return code to be used.""" cmd = create_command("cmdname", needs_config_=True) with patch("charmcraft.main.COMMAND_GROUPS", [CommandGroup("title", [cmd])]): retcode = main(["charmcraft", "cmdname", "--project-dir=/whatever"]) assert retcode == 1 out, err = capsys.readouterr() assert not out assert err == ( "The specified command needs a valid 'charmcraft.yaml' configuration file (in " "the current directory or where specified with --project-dir option); see " "the reference: https://discourse.charmhub.io/t/charmcraft-configuration/4138\n" ) def test_main_no_args(): """The setup.py entry_point function needs to work with no arguments.""" with patch("sys.argv", ["charmcraft"]): retcode = main() assert retcode == 1 def test_main_controlled_error(): """Work raised CommandError: message handler notified properly, use indicated return code.""" simulated_exception = CommandError("boom", retcode=33) with patch("charmcraft.main.emit") as emit_mock: with patch("charmcraft.main.Dispatcher.run") as d_mock: d_mock.side_effect = simulated_exception retcode = main(["charmcraft", "version"]) assert retcode == 33 emit_mock.error.assert_called_once_with(simulated_exception) def test_main_controlled_return_code(): """Work ended ok, and the command indicated the return code.""" with patch("charmcraft.main.emit") as emit_mock: with patch("charmcraft.main.Dispatcher.run") as d_mock: d_mock.return_value = 9 retcode = main(["charmcraft", "version"]) assert retcode == 9 emit_mock.ended_ok.assert_called_once_with() def test_main_crash(): """Work crashed: message handler notified properly, return code in 1.""" simulated_exception = ValueError("boom") with patch("charmcraft.main.emit") as emit_mock: with patch("charmcraft.main.Dispatcher.run") as d_mock: d_mock.side_effect = simulated_exception retcode = main(["charmcraft", "version"]) assert retcode == 1 (call,) = emit_mock.error.mock_calls (exc,) = call.args assert isinstance(exc, CraftError) assert str(exc) == "charmcraft internal error: ValueError('boom')" assert exc.__cause__ == simulated_exception def test_main_interrupted(): """Work interrupted: message handler notified properly, return code in 1.""" simulated_exception = KeyboardInterrupt() with patch("charmcraft.main.emit") as emit_mock: with patch("charmcraft.main.Dispatcher.run") as d_mock: d_mock.side_effect = simulated_exception retcode = main(["charmcraft", "version"]) assert retcode == 1 (call,) = emit_mock.error.mock_calls (exc,) = call.args assert isinstance(exc, CraftError) assert str(exc) == "Interrupted." assert exc.__cause__ == simulated_exception def test_main_controlled_arguments_error(capsys): """The execution failed because an argument parsing error.""" with patch("charmcraft.main.emit") as emit_mock: with patch("charmcraft.main.Dispatcher.run") as d_mock: d_mock.side_effect = ArgumentParsingError("test error") retcode = main(["charmcraft", "version"]) assert retcode == 1 emit_mock.ended_ok.assert_called_once_with() out, err = capsys.readouterr() assert not out assert err == "test error\n" def test_main_providing_help(capsys): """The execution ended up providing a help message.""" with patch("charmcraft.main.emit") as emit_mock: with patch("charmcraft.main.Dispatcher.run") as d_mock: d_mock.side_effect = ProvideHelpException("nice and shiny help message") retcode = main(["charmcraft", "version"]) assert retcode == 0 emit_mock.ended_ok.assert_called_once_with() out, err = capsys.readouterr() assert not out assert err == "nice and shiny help message\n" @pytest.mark.parametrize( "cmd_name", [cmd.name for cgroup in COMMAND_GROUPS for cmd in cgroup.commands] ) def test_commands(cmd_name): """Sanity validation of a command. This is done through asking help for it *in real life*, which would mean that the command is usable by the tool: that can be imported, instantiated, parse arguments, etc. """ env = os.environ.copy() # Bypass unsupported environment error. env["CHARMCRAFT_DEVELOPER"] = "1" env_paths = [p for p in sys.path if "env/lib/python" in p] if env_paths: if "PYTHONPATH" in env: env["PYTHONPATH"] += ":" + ":".join(env_paths) else: env["PYTHONPATH"] = ":".join(env_paths) external_command = [sys.executable, "-m", "charmcraft", cmd_name, "-h"] subprocess.run(external_command, check=True, env=env, stdout=subprocess.DEVNULL)
[]
[]
[]
[]
[]
python
0
0
sdk/keyvault/azure-keyvault-secrets/samples/backup_restore_operations_async.py
# ------------------------------------ # Copyright (c) Microsoft Corporation. # Licensed under the MIT License. # ------------------------------------ import asyncio import os from azure.keyvault.secrets.aio import SecretClient from azure.identity.aio import DefaultAzureCredential from azure.core.exceptions import HttpResponseError # ---------------------------------------------------------------------------------------------------------- # Prerequisites: # 1. An Azure Key Vault (https://docs.microsoft.com/en-us/azure/key-vault/quick-create-cli) # # 2. Microsoft Azure Key Vault PyPI package - # https://pypi.python.org/pypi/azure-keyvault-secrets/ # # 3. Set Environment variables AZURE_CLIENT_ID, AZURE_TENANT_ID, AZURE_CLIENT_SECRET, VAULT_URL # (See https://github.com/Azure/azure-sdk-for-python/tree/master/sdk/keyvault/azure-keyvault-keys#authenticate-the-client) # # ---------------------------------------------------------------------------------------------------------- # Sample - demonstrates the basic backup and restore operations on a vault(secret) resource for Azure Key Vault # # 1. Create a secret (set_secret) # # 2. Backup a secret (backup_secret) # # 3. Delete a secret (delete_secret) # # 4. Restore a secret (restore_secret_backup) # ---------------------------------------------------------------------------------------------------------- async def run_sample(): # Instantiate a secret client that will be used to call the service. # Notice that the client is using default Azure credentials. # To make default credentials work, ensure that environment variables 'AZURE_CLIENT_ID', # 'AZURE_CLIENT_SECRET' and 'AZURE_TENANT_ID' are set with the service principal credentials. VAULT_URL = os.environ["VAULT_URL"] credential = DefaultAzureCredential() client = SecretClient(vault_url=VAULT_URL, credential=credential) try: # Let's create a secret holding storage account credentials. # if the secret already exists in the Key Vault, then a new version of the secret is created. print("\n.. Create Secret") secret = await client.set_secret("backupRestoreSecretName", "backupRestoreSecretValue") print("Secret with name '{0}' created with value '{1}'".format(secret.name, secret.value)) # Backups are good to have, if in case secrets gets deleted accidentally. # For long term storage, it is ideal to write the backup to a file. print("\n.. Create a backup for an existing Secret") secret_backup = await client.backup_secret(secret.name) print("Backup created for secret with name '{0}'.".format(secret.name)) # The storage account secret is no longer in use, so you delete it. print("\n.. Deleting secret...") await client.delete_secret(secret.name) print("Deleted Secret with name '{0}'".format(secret.name)) # In future, if the secret is required again, we can use the backup value to restore it in the Key Vault. print("\n.. Restore the secret using the backed up secret bytes") secret = await client.restore_secret_backup(secret_backup) print("Restored Secret with name '{0}'".format(secret.name)) except HttpResponseError as e: print("\nrun_sample has caught an error. {0}".format(e.message)) finally: print("\nrun_sample done") if __name__ == "__main__": try: loop = asyncio.get_event_loop() loop.run_until_complete(run_sample()) loop.close() except Exception as e: print("Top level Error: {0}".format(str(e)))
[]
[]
[ "VAULT_URL" ]
[]
["VAULT_URL"]
python
1
0
src/net/http/server.go
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // HTTP server. See RFC 7230 through 7235. package http import ( "bufio" "bytes" "context" "crypto/tls" "errors" "fmt" "io" "io/ioutil" "log" "net" "net/textproto" "net/url" urlpkg "net/url" "os" "path" "runtime" "sort" "strconv" "strings" "sync" "sync/atomic" "time" "golang.org/x/net/http/httpguts" ) // Errors used by the HTTP server. var ( // ErrBodyNotAllowed is returned by ResponseWriter.Write calls // when the HTTP method or response code does not permit a // body. ErrBodyNotAllowed = errors.New("http: request method or response status code does not allow body") // ErrHijacked is returned by ResponseWriter.Write calls when // the underlying connection has been hijacked using the // Hijacker interface. A zero-byte write on a hijacked // connection will return ErrHijacked without any other side // effects. ErrHijacked = errors.New("http: connection has been hijacked") // ErrContentLength is returned by ResponseWriter.Write calls // when a Handler set a Content-Length response header with a // declared size and then attempted to write more bytes than // declared. ErrContentLength = errors.New("http: wrote more than the declared Content-Length") // Deprecated: ErrWriteAfterFlush is no longer returned by // anything in the net/http package. Callers should not // compare errors against this variable. ErrWriteAfterFlush = errors.New("unused") ) // A Handler responds to an HTTP request. // // ServeHTTP should write reply headers and data to the ResponseWriter // and then return. Returning signals that the request is finished; it // is not valid to use the ResponseWriter or read from the // Request.Body after or concurrently with the completion of the // ServeHTTP call. // // Depending on the HTTP client software, HTTP protocol version, and // any intermediaries between the client and the Go server, it may not // be possible to read from the Request.Body after writing to the // ResponseWriter. Cautious handlers should read the Request.Body // first, and then reply. // // Except for reading the body, handlers should not modify the // provided Request. // // If ServeHTTP panics, the server (the caller of ServeHTTP) assumes // that the effect of the panic was isolated to the active request. // It recovers the panic, logs a stack trace to the server error log, // and either closes the network connection or sends an HTTP/2 // RST_STREAM, depending on the HTTP protocol. To abort a handler so // the client sees an interrupted response but the server doesn't log // an error, panic with the value ErrAbortHandler. type Handler interface { ServeHTTP(ResponseWriter, *Request) } // A ResponseWriter interface is used by an HTTP handler to // construct an HTTP response. // // A ResponseWriter may not be used after the Handler.ServeHTTP method // has returned. type ResponseWriter interface { // Header returns the header map that will be sent by // WriteHeader. The Header map also is the mechanism with which // Handlers can set HTTP trailers. // // Changing the header map after a call to WriteHeader (or // Write) has no effect unless the modified headers are // trailers. // // There are two ways to set Trailers. The preferred way is to // predeclare in the headers which trailers you will later // send by setting the "Trailer" header to the names of the // trailer keys which will come later. In this case, those // keys of the Header map are treated as if they were // trailers. See the example. The second way, for trailer // keys not known to the Handler until after the first Write, // is to prefix the Header map keys with the TrailerPrefix // constant value. See TrailerPrefix. // // To suppress automatic response headers (such as "Date"), set // their value to nil. Header() Header // Write writes the data to the connection as part of an HTTP reply. // // If WriteHeader has not yet been called, Write calls // WriteHeader(http.StatusOK) before writing the data. If the Header // does not contain a Content-Type line, Write adds a Content-Type set // to the result of passing the initial 512 bytes of written data to // DetectContentType. Additionally, if the total size of all written // data is under a few KB and there are no Flush calls, the // Content-Length header is added automatically. // // Depending on the HTTP protocol version and the client, calling // Write or WriteHeader may prevent future reads on the // Request.Body. For HTTP/1.x requests, handlers should read any // needed request body data before writing the response. Once the // headers have been flushed (due to either an explicit Flusher.Flush // call or writing enough data to trigger a flush), the request body // may be unavailable. For HTTP/2 requests, the Go HTTP server permits // handlers to continue to read the request body while concurrently // writing the response. However, such behavior may not be supported // by all HTTP/2 clients. Handlers should read before writing if // possible to maximize compatibility. Write([]byte) (int, error) // WriteHeader sends an HTTP response header with the provided // status code. // // If WriteHeader is not called explicitly, the first call to Write // will trigger an implicit WriteHeader(http.StatusOK). // Thus explicit calls to WriteHeader are mainly used to // send error codes. // // The provided code must be a valid HTTP 1xx-5xx status code. // Only one header may be written. Go does not currently // support sending user-defined 1xx informational headers, // with the exception of 100-continue response header that the // Server sends automatically when the Request.Body is read. WriteHeader(statusCode int) } // The Flusher interface is implemented by ResponseWriters that allow // an HTTP handler to flush buffered data to the client. // // The default HTTP/1.x and HTTP/2 ResponseWriter implementations // support Flusher, but ResponseWriter wrappers may not. Handlers // should always test for this ability at runtime. // // Note that even for ResponseWriters that support Flush, // if the client is connected through an HTTP proxy, // the buffered data may not reach the client until the response // completes. type Flusher interface { // Flush sends any buffered data to the client. Flush() } // The Hijacker interface is implemented by ResponseWriters that allow // an HTTP handler to take over the connection. // // The default ResponseWriter for HTTP/1.x connections supports // Hijacker, but HTTP/2 connections intentionally do not. // ResponseWriter wrappers may also not support Hijacker. Handlers // should always test for this ability at runtime. type Hijacker interface { // Hijack lets the caller take over the connection. // After a call to Hijack the HTTP server library // will not do anything else with the connection. // // It becomes the caller's responsibility to manage // and close the connection. // // The returned net.Conn may have read or write deadlines // already set, depending on the configuration of the // Server. It is the caller's responsibility to set // or clear those deadlines as needed. // // The returned bufio.Reader may contain unprocessed buffered // data from the client. // // After a call to Hijack, the original Request.Body must not // be used. The original Request's Context remains valid and // is not canceled until the Request's ServeHTTP method // returns. Hijack() (net.Conn, *bufio.ReadWriter, error) } // The CloseNotifier interface is implemented by ResponseWriters which // allow detecting when the underlying connection has gone away. // // This mechanism can be used to cancel long operations on the server // if the client has disconnected before the response is ready. // // Deprecated: the CloseNotifier interface predates Go's context package. // New code should use Request.Context instead. type CloseNotifier interface { // CloseNotify returns a channel that receives at most a // single value (true) when the client connection has gone // away. // // CloseNotify may wait to notify until Request.Body has been // fully read. // // After the Handler has returned, there is no guarantee // that the channel receives a value. // // If the protocol is HTTP/1.1 and CloseNotify is called while // processing an idempotent request (such a GET) while // HTTP/1.1 pipelining is in use, the arrival of a subsequent // pipelined request may cause a value to be sent on the // returned channel. In practice HTTP/1.1 pipelining is not // enabled in browsers and not seen often in the wild. If this // is a problem, use HTTP/2 or only use CloseNotify on methods // such as POST. CloseNotify() <-chan bool } var ( // ServerContextKey is a context key. It can be used in HTTP // handlers with Context.Value to access the server that // started the handler. The associated value will be of // type *Server. ServerContextKey = &contextKey{"http-server"} // LocalAddrContextKey is a context key. It can be used in // HTTP handlers with Context.Value to access the local // address the connection arrived on. // The associated value will be of type net.Addr. LocalAddrContextKey = &contextKey{"local-addr"} ) // A conn represents the server side of an HTTP connection. type conn struct { // server is the server on which the connection arrived. // Immutable; never nil. server *Server // cancelCtx cancels the connection-level context. cancelCtx context.CancelFunc // rwc is the underlying network connection. // This is never wrapped by other types and is the value given out // to CloseNotifier callers. It is usually of type *net.TCPConn or // *tls.Conn. rwc net.Conn // remoteAddr is rwc.RemoteAddr().String(). It is not populated synchronously // inside the Listener's Accept goroutine, as some implementations block. // It is populated immediately inside the (*conn).serve goroutine. // This is the value of a Handler's (*Request).RemoteAddr. remoteAddr string // tlsState is the TLS connection state when using TLS. // nil means not TLS. tlsState *tls.ConnectionState // werr is set to the first write error to rwc. // It is set via checkConnErrorWriter{w}, where bufw writes. werr error // r is bufr's read source. It's a wrapper around rwc that provides // io.LimitedReader-style limiting (while reading request headers) // and functionality to support CloseNotifier. See *connReader docs. r *connReader // bufr reads from r. bufr *bufio.Reader // bufw writes to checkConnErrorWriter{c}, which populates werr on error. bufw *bufio.Writer // lastMethod is the method of the most recent request // on this connection, if any. lastMethod string curReq atomic.Value // of *response (which has a Request in it) curState struct{ atomic uint64 } // packed (unixtime<<8|uint8(ConnState)) // mu guards hijackedv mu sync.Mutex // hijackedv is whether this connection has been hijacked // by a Handler with the Hijacker interface. // It is guarded by mu. hijackedv bool } func (c *conn) hijacked() bool { c.mu.Lock() defer c.mu.Unlock() return c.hijackedv } // c.mu must be held. func (c *conn) hijackLocked() (rwc net.Conn, buf *bufio.ReadWriter, err error) { if c.hijackedv { return nil, nil, ErrHijacked } c.r.abortPendingRead() c.hijackedv = true rwc = c.rwc rwc.SetDeadline(time.Time{}) buf = bufio.NewReadWriter(c.bufr, bufio.NewWriter(rwc)) if c.r.hasByte { if _, err := c.bufr.Peek(c.bufr.Buffered() + 1); err != nil { return nil, nil, fmt.Errorf("unexpected Peek failure reading buffered byte: %v", err) } } c.setState(rwc, StateHijacked) return } // This should be >= 512 bytes for DetectContentType, // but otherwise it's somewhat arbitrary. const bufferBeforeChunkingSize = 2048 // chunkWriter writes to a response's conn buffer, and is the writer // wrapped by the response.bufw buffered writer. // // chunkWriter also is responsible for finalizing the Header, including // conditionally setting the Content-Type and setting a Content-Length // in cases where the handler's final output is smaller than the buffer // size. It also conditionally adds chunk headers, when in chunking mode. // // See the comment above (*response).Write for the entire write flow. type chunkWriter struct { res *response // header is either nil or a deep clone of res.handlerHeader // at the time of res.writeHeader, if res.writeHeader is // called and extra buffering is being done to calculate // Content-Type and/or Content-Length. header Header // wroteHeader tells whether the header's been written to "the // wire" (or rather: w.conn.buf). this is unlike // (*response).wroteHeader, which tells only whether it was // logically written. wroteHeader bool // set by the writeHeader method: chunking bool // using chunked transfer encoding for reply body } var ( crlf = []byte("\r\n") colonSpace = []byte(": ") ) func (cw *chunkWriter) Write(p []byte) (n int, err error) { if !cw.wroteHeader { cw.writeHeader(p) } if cw.res.req.Method == "HEAD" { // Eat writes. return len(p), nil } if cw.chunking { _, err = fmt.Fprintf(cw.res.conn.bufw, "%x\r\n", len(p)) if err != nil { cw.res.conn.rwc.Close() return } } n, err = cw.res.conn.bufw.Write(p) if cw.chunking && err == nil { _, err = cw.res.conn.bufw.Write(crlf) } if err != nil { cw.res.conn.rwc.Close() } return } func (cw *chunkWriter) flush() { if !cw.wroteHeader { cw.writeHeader(nil) } cw.res.conn.bufw.Flush() } func (cw *chunkWriter) close() { if !cw.wroteHeader { cw.writeHeader(nil) } if cw.chunking { bw := cw.res.conn.bufw // conn's bufio writer // zero chunk to mark EOF bw.WriteString("0\r\n") if trailers := cw.res.finalTrailers(); trailers != nil { trailers.Write(bw) // the writer handles noting errors } // final blank line after the trailers (whether // present or not) bw.WriteString("\r\n") } } // A response represents the server side of an HTTP response. type response struct { conn *conn req *Request // request for this response reqBody io.ReadCloser cancelCtx context.CancelFunc // when ServeHTTP exits wroteHeader bool // reply header has been (logically) written wroteContinue bool // 100 Continue response was written wants10KeepAlive bool // HTTP/1.0 w/ Connection "keep-alive" wantsClose bool // HTTP request has Connection "close" w *bufio.Writer // buffers output in chunks to chunkWriter cw chunkWriter // handlerHeader is the Header that Handlers get access to, // which may be retained and mutated even after WriteHeader. // handlerHeader is copied into cw.header at WriteHeader // time, and privately mutated thereafter. handlerHeader Header calledHeader bool // handler accessed handlerHeader via Header written int64 // number of bytes written in body contentLength int64 // explicitly-declared Content-Length; or -1 status int // status code passed to WriteHeader // close connection after this reply. set on request and // updated after response from handler if there's a // "Connection: keep-alive" response header and a // Content-Length. closeAfterReply bool // requestBodyLimitHit is set by requestTooLarge when // maxBytesReader hits its max size. It is checked in // WriteHeader, to make sure we don't consume the // remaining request body to try to advance to the next HTTP // request. Instead, when this is set, we stop reading // subsequent requests on this connection and stop reading // input from it. requestBodyLimitHit bool // trailers are the headers to be sent after the handler // finishes writing the body. This field is initialized from // the Trailer response header when the response header is // written. trailers []string handlerDone atomicBool // set true when the handler exits // Buffers for Date, Content-Length, and status code dateBuf [len(TimeFormat)]byte clenBuf [10]byte statusBuf [3]byte // closeNotifyCh is the channel returned by CloseNotify. // TODO(bradfitz): this is currently (for Go 1.8) always // non-nil. Make this lazily-created again as it used to be? closeNotifyCh chan bool didCloseNotify int32 // atomic (only 0->1 winner should send) } // TrailerPrefix is a magic prefix for ResponseWriter.Header map keys // that, if present, signals that the map entry is actually for // the response trailers, and not the response headers. The prefix // is stripped after the ServeHTTP call finishes and the values are // sent in the trailers. // // This mechanism is intended only for trailers that are not known // prior to the headers being written. If the set of trailers is fixed // or known before the header is written, the normal Go trailers mechanism // is preferred: // https://golang.org/pkg/net/http/#ResponseWriter // https://golang.org/pkg/net/http/#example_ResponseWriter_trailers const TrailerPrefix = "Trailer:" // finalTrailers is called after the Handler exits and returns a non-nil // value if the Handler set any trailers. func (w *response) finalTrailers() Header { var t Header for k, vv := range w.handlerHeader { if strings.HasPrefix(k, TrailerPrefix) { if t == nil { t = make(Header) } t[strings.TrimPrefix(k, TrailerPrefix)] = vv } } for _, k := range w.trailers { if t == nil { t = make(Header) } for _, v := range w.handlerHeader[k] { t.Add(k, v) } } return t } type atomicBool int32 func (b *atomicBool) isSet() bool { return atomic.LoadInt32((*int32)(b)) != 0 } func (b *atomicBool) setTrue() { atomic.StoreInt32((*int32)(b), 1) } // declareTrailer is called for each Trailer header when the // response header is written. It notes that a header will need to be // written in the trailers at the end of the response. func (w *response) declareTrailer(k string) { k = CanonicalHeaderKey(k) if !httpguts.ValidTrailerHeader(k) { // Forbidden by RFC 7230, section 4.1.2 return } w.trailers = append(w.trailers, k) } // requestTooLarge is called by maxBytesReader when too much input has // been read from the client. func (w *response) requestTooLarge() { w.closeAfterReply = true w.requestBodyLimitHit = true if !w.wroteHeader { w.Header().Set("Connection", "close") } } // needsSniff reports whether a Content-Type still needs to be sniffed. func (w *response) needsSniff() bool { _, haveType := w.handlerHeader["Content-Type"] return !w.cw.wroteHeader && !haveType && w.written < sniffLen } // writerOnly hides an io.Writer value's optional ReadFrom method // from io.Copy. type writerOnly struct { io.Writer } func srcIsRegularFile(src io.Reader) (isRegular bool, err error) { switch v := src.(type) { case *os.File: fi, err := v.Stat() if err != nil { return false, err } return fi.Mode().IsRegular(), nil case *io.LimitedReader: return srcIsRegularFile(v.R) default: return } } // ReadFrom is here to optimize copying from an *os.File regular file // to a *net.TCPConn with sendfile. func (w *response) ReadFrom(src io.Reader) (n int64, err error) { // Our underlying w.conn.rwc is usually a *TCPConn (with its // own ReadFrom method). If not, or if our src isn't a regular // file, just fall back to the normal copy method. rf, ok := w.conn.rwc.(io.ReaderFrom) regFile, err := srcIsRegularFile(src) if err != nil { return 0, err } if !ok || !regFile { bufp := copyBufPool.Get().(*[]byte) defer copyBufPool.Put(bufp) return io.CopyBuffer(writerOnly{w}, src, *bufp) } // sendfile path: if !w.wroteHeader { w.WriteHeader(StatusOK) } if w.needsSniff() { n0, err := io.Copy(writerOnly{w}, io.LimitReader(src, sniffLen)) n += n0 if err != nil { return n, err } } w.w.Flush() // get rid of any previous writes w.cw.flush() // make sure Header is written; flush data to rwc // Now that cw has been flushed, its chunking field is guaranteed initialized. if !w.cw.chunking && w.bodyAllowed() { n0, err := rf.ReadFrom(src) n += n0 w.written += n0 return n, err } n0, err := io.Copy(writerOnly{w}, src) n += n0 return n, err } // debugServerConnections controls whether all server connections are wrapped // with a verbose logging wrapper. const debugServerConnections = false // Create new connection from rwc. func (srv *Server) newConn(rwc net.Conn) *conn { c := &conn{ server: srv, rwc: rwc, } if debugServerConnections { c.rwc = newLoggingConn("server", c.rwc) } return c } type readResult struct { n int err error b byte // byte read, if n == 1 } // connReader is the io.Reader wrapper used by *conn. It combines a // selectively-activated io.LimitedReader (to bound request header // read sizes) with support for selectively keeping an io.Reader.Read // call blocked in a background goroutine to wait for activity and // trigger a CloseNotifier channel. type connReader struct { conn *conn mu sync.Mutex // guards following hasByte bool byteBuf [1]byte cond *sync.Cond inRead bool aborted bool // set true before conn.rwc deadline is set to past remain int64 // bytes remaining } func (cr *connReader) lock() { cr.mu.Lock() if cr.cond == nil { cr.cond = sync.NewCond(&cr.mu) } } func (cr *connReader) unlock() { cr.mu.Unlock() } func (cr *connReader) startBackgroundRead() { cr.lock() defer cr.unlock() if cr.inRead { panic("invalid concurrent Body.Read call") } if cr.hasByte { return } cr.inRead = true cr.conn.rwc.SetReadDeadline(time.Time{}) go cr.backgroundRead() } func (cr *connReader) backgroundRead() { n, err := cr.conn.rwc.Read(cr.byteBuf[:]) cr.lock() if n == 1 { cr.hasByte = true // We were past the end of the previous request's body already // (since we wouldn't be in a background read otherwise), so // this is a pipelined HTTP request. Prior to Go 1.11 we used to // send on the CloseNotify channel and cancel the context here, // but the behavior was documented as only "may", and we only // did that because that's how CloseNotify accidentally behaved // in very early Go releases prior to context support. Once we // added context support, people used a Handler's // Request.Context() and passed it along. Having that context // cancel on pipelined HTTP requests caused problems. // Fortunately, almost nothing uses HTTP/1.x pipelining. // Unfortunately, apt-get does, or sometimes does. // New Go 1.11 behavior: don't fire CloseNotify or cancel // contexts on pipelined requests. Shouldn't affect people, but // fixes cases like Issue 23921. This does mean that a client // closing their TCP connection after sending a pipelined // request won't cancel the context, but we'll catch that on any // write failure (in checkConnErrorWriter.Write). // If the server never writes, yes, there are still contrived // server & client behaviors where this fails to ever cancel the // context, but that's kinda why HTTP/1.x pipelining died // anyway. } if ne, ok := err.(net.Error); ok && cr.aborted && ne.Timeout() { // Ignore this error. It's the expected error from // another goroutine calling abortPendingRead. } else if err != nil { cr.handleReadError(err) } cr.aborted = false cr.inRead = false cr.unlock() cr.cond.Broadcast() } func (cr *connReader) abortPendingRead() { cr.lock() defer cr.unlock() if !cr.inRead { return } cr.aborted = true cr.conn.rwc.SetReadDeadline(aLongTimeAgo) for cr.inRead { cr.cond.Wait() } cr.conn.rwc.SetReadDeadline(time.Time{}) } func (cr *connReader) setReadLimit(remain int64) { cr.remain = remain } func (cr *connReader) setInfiniteReadLimit() { cr.remain = maxInt64 } func (cr *connReader) hitReadLimit() bool { return cr.remain <= 0 } // handleReadError is called whenever a Read from the client returns a // non-nil error. // // The provided non-nil err is almost always io.EOF or a "use of // closed network connection". In any case, the error is not // particularly interesting, except perhaps for debugging during // development. Any error means the connection is dead and we should // down its context. // // It may be called from multiple goroutines. func (cr *connReader) handleReadError(_ error) { cr.conn.cancelCtx() cr.closeNotify() } // may be called from multiple goroutines. func (cr *connReader) closeNotify() { res, _ := cr.conn.curReq.Load().(*response) if res != nil && atomic.CompareAndSwapInt32(&res.didCloseNotify, 0, 1) { res.closeNotifyCh <- true } } func (cr *connReader) Read(p []byte) (n int, err error) { cr.lock() if cr.inRead { cr.unlock() if cr.conn.hijacked() { panic("invalid Body.Read call. After hijacked, the original Request must not be used") } panic("invalid concurrent Body.Read call") } if cr.hitReadLimit() { cr.unlock() return 0, io.EOF } if len(p) == 0 { cr.unlock() return 0, nil } if int64(len(p)) > cr.remain { p = p[:cr.remain] } if cr.hasByte { p[0] = cr.byteBuf[0] cr.hasByte = false cr.unlock() return 1, nil } cr.inRead = true cr.unlock() n, err = cr.conn.rwc.Read(p) cr.lock() cr.inRead = false if err != nil { cr.handleReadError(err) } cr.remain -= int64(n) cr.unlock() cr.cond.Broadcast() return n, err } var ( bufioReaderPool sync.Pool bufioWriter2kPool sync.Pool bufioWriter4kPool sync.Pool ) var copyBufPool = sync.Pool{ New: func() interface{} { b := make([]byte, 32*1024) return &b }, } func bufioWriterPool(size int) *sync.Pool { switch size { case 2 << 10: return &bufioWriter2kPool case 4 << 10: return &bufioWriter4kPool } return nil } func newBufioReader(r io.Reader) *bufio.Reader { if v := bufioReaderPool.Get(); v != nil { br := v.(*bufio.Reader) br.Reset(r) return br } // Note: if this reader size is ever changed, update // TestHandlerBodyClose's assumptions. return bufio.NewReader(r) } func putBufioReader(br *bufio.Reader) { br.Reset(nil) bufioReaderPool.Put(br) } func newBufioWriterSize(w io.Writer, size int) *bufio.Writer { pool := bufioWriterPool(size) if pool != nil { if v := pool.Get(); v != nil { bw := v.(*bufio.Writer) bw.Reset(w) return bw } } return bufio.NewWriterSize(w, size) } func putBufioWriter(bw *bufio.Writer) { bw.Reset(nil) if pool := bufioWriterPool(bw.Available()); pool != nil { pool.Put(bw) } } // DefaultMaxHeaderBytes is the maximum permitted size of the headers // in an HTTP request. // This can be overridden by setting Server.MaxHeaderBytes. const DefaultMaxHeaderBytes = 1 << 20 // 1 MB func (srv *Server) maxHeaderBytes() int { if srv.MaxHeaderBytes > 0 { return srv.MaxHeaderBytes } return DefaultMaxHeaderBytes } func (srv *Server) initialReadLimitSize() int64 { return int64(srv.maxHeaderBytes()) + 4096 // bufio slop } // wrapper around io.ReadCloser which on first read, sends an // HTTP/1.1 100 Continue header type expectContinueReader struct { resp *response readCloser io.ReadCloser closed bool sawEOF bool } func (ecr *expectContinueReader) Read(p []byte) (n int, err error) { if ecr.closed { return 0, ErrBodyReadAfterClose } if !ecr.resp.wroteContinue && !ecr.resp.conn.hijacked() { ecr.resp.wroteContinue = true ecr.resp.conn.bufw.WriteString("HTTP/1.1 100 Continue\r\n\r\n") ecr.resp.conn.bufw.Flush() } n, err = ecr.readCloser.Read(p) if err == io.EOF { ecr.sawEOF = true } return } func (ecr *expectContinueReader) Close() error { ecr.closed = true return ecr.readCloser.Close() } // TimeFormat is the time format to use when generating times in HTTP // headers. It is like time.RFC1123 but hard-codes GMT as the time // zone. The time being formatted must be in UTC for Format to // generate the correct format. // // For parsing this time format, see ParseTime. const TimeFormat = "Mon, 02 Jan 2006 15:04:05 GMT" // appendTime is a non-allocating version of []byte(t.UTC().Format(TimeFormat)) func appendTime(b []byte, t time.Time) []byte { const days = "SunMonTueWedThuFriSat" const months = "JanFebMarAprMayJunJulAugSepOctNovDec" t = t.UTC() yy, mm, dd := t.Date() hh, mn, ss := t.Clock() day := days[3*t.Weekday():] mon := months[3*(mm-1):] return append(b, day[0], day[1], day[2], ',', ' ', byte('0'+dd/10), byte('0'+dd%10), ' ', mon[0], mon[1], mon[2], ' ', byte('0'+yy/1000), byte('0'+(yy/100)%10), byte('0'+(yy/10)%10), byte('0'+yy%10), ' ', byte('0'+hh/10), byte('0'+hh%10), ':', byte('0'+mn/10), byte('0'+mn%10), ':', byte('0'+ss/10), byte('0'+ss%10), ' ', 'G', 'M', 'T') } var errTooLarge = errors.New("http: request too large") // Read next request from connection. func (c *conn) readRequest(ctx context.Context) (w *response, err error) { if c.hijacked() { return nil, ErrHijacked } var ( wholeReqDeadline time.Time // or zero if none hdrDeadline time.Time // or zero if none ) t0 := time.Now() if d := c.server.readHeaderTimeout(); d != 0 { hdrDeadline = t0.Add(d) } if d := c.server.ReadTimeout; d != 0 { wholeReqDeadline = t0.Add(d) } c.rwc.SetReadDeadline(hdrDeadline) if d := c.server.WriteTimeout; d != 0 { defer func() { c.rwc.SetWriteDeadline(time.Now().Add(d)) }() } c.r.setReadLimit(c.server.initialReadLimitSize()) if c.lastMethod == "POST" { // RFC 7230 section 3 tolerance for old buggy clients. peek, _ := c.bufr.Peek(4) // ReadRequest will get err below c.bufr.Discard(numLeadingCRorLF(peek)) } req, err := readRequest(c.bufr, keepHostHeader) if err != nil { if c.r.hitReadLimit() { return nil, errTooLarge } return nil, err } if !http1ServerSupportsRequest(req) { return nil, badRequestError("unsupported protocol version") } c.lastMethod = req.Method c.r.setInfiniteReadLimit() hosts, haveHost := req.Header["Host"] isH2Upgrade := req.isH2Upgrade() if req.ProtoAtLeast(1, 1) && (!haveHost || len(hosts) == 0) && !isH2Upgrade && req.Method != "CONNECT" { return nil, badRequestError("missing required Host header") } if len(hosts) > 1 { return nil, badRequestError("too many Host headers") } if len(hosts) == 1 && !httpguts.ValidHostHeader(hosts[0]) { return nil, badRequestError("malformed Host header") } for k, vv := range req.Header { if !httpguts.ValidHeaderFieldName(k) { return nil, badRequestError("invalid header name") } for _, v := range vv { if !httpguts.ValidHeaderFieldValue(v) { return nil, badRequestError("invalid header value") } } } delete(req.Header, "Host") ctx, cancelCtx := context.WithCancel(ctx) req.ctx = ctx req.RemoteAddr = c.remoteAddr req.TLS = c.tlsState if body, ok := req.Body.(*body); ok { body.doEarlyClose = true } // Adjust the read deadline if necessary. if !hdrDeadline.Equal(wholeReqDeadline) { c.rwc.SetReadDeadline(wholeReqDeadline) } w = &response{ conn: c, cancelCtx: cancelCtx, req: req, reqBody: req.Body, handlerHeader: make(Header), contentLength: -1, closeNotifyCh: make(chan bool, 1), // We populate these ahead of time so we're not // reading from req.Header after their Handler starts // and maybe mutates it (Issue 14940) wants10KeepAlive: req.wantsHttp10KeepAlive(), wantsClose: req.wantsClose(), } if isH2Upgrade { w.closeAfterReply = true } w.cw.res = w w.w = newBufioWriterSize(&w.cw, bufferBeforeChunkingSize) return w, nil } // http1ServerSupportsRequest reports whether Go's HTTP/1.x server // supports the given request. func http1ServerSupportsRequest(req *Request) bool { if req.ProtoMajor == 1 { return true } // Accept "PRI * HTTP/2.0" upgrade requests, so Handlers can // wire up their own HTTP/2 upgrades. if req.ProtoMajor == 2 && req.ProtoMinor == 0 && req.Method == "PRI" && req.RequestURI == "*" { return true } // Reject HTTP/0.x, and all other HTTP/2+ requests (which // aren't encoded in ASCII anyway). return false } func (w *response) Header() Header { if w.cw.header == nil && w.wroteHeader && !w.cw.wroteHeader { // Accessing the header between logically writing it // and physically writing it means we need to allocate // a clone to snapshot the logically written state. w.cw.header = w.handlerHeader.Clone() } w.calledHeader = true return w.handlerHeader } // maxPostHandlerReadBytes is the max number of Request.Body bytes not // consumed by a handler that the server will read from the client // in order to keep a connection alive. If there are more bytes than // this then the server to be paranoid instead sends a "Connection: // close" response. // // This number is approximately what a typical machine's TCP buffer // size is anyway. (if we have the bytes on the machine, we might as // well read them) const maxPostHandlerReadBytes = 256 << 10 func checkWriteHeaderCode(code int) { // Issue 22880: require valid WriteHeader status codes. // For now we only enforce that it's three digits. // In the future we might block things over 599 (600 and above aren't defined // at https://httpwg.org/specs/rfc7231.html#status.codes) // and we might block under 200 (once we have more mature 1xx support). // But for now any three digits. // // We used to send "HTTP/1.1 000 0" on the wire in responses but there's // no equivalent bogus thing we can realistically send in HTTP/2, // so we'll consistently panic instead and help people find their bugs // early. (We can't return an error from WriteHeader even if we wanted to.) if code < 100 || code > 999 { panic(fmt.Sprintf("invalid WriteHeader code %v", code)) } } // relevantCaller searches the call stack for the first function outside of net/http. // The purpose of this function is to provide more helpful error messages. func relevantCaller() runtime.Frame { pc := make([]uintptr, 16) n := runtime.Callers(1, pc) frames := runtime.CallersFrames(pc[:n]) var frame runtime.Frame for { frame, more := frames.Next() if !strings.HasPrefix(frame.Function, "net/http.") { return frame } if !more { break } } return frame } func (w *response) WriteHeader(code int) { if w.conn.hijacked() { caller := relevantCaller() w.conn.server.logf("http: response.WriteHeader on hijacked connection from %s (%s:%d)", caller.Function, path.Base(caller.File), caller.Line) return } if w.wroteHeader { caller := relevantCaller() w.conn.server.logf("http: superfluous response.WriteHeader call from %s (%s:%d)", caller.Function, path.Base(caller.File), caller.Line) return } checkWriteHeaderCode(code) w.wroteHeader = true w.status = code if w.calledHeader && w.cw.header == nil { w.cw.header = w.handlerHeader.Clone() } if cl := w.handlerHeader.get("Content-Length"); cl != "" { v, err := strconv.ParseInt(cl, 10, 64) if err == nil && v >= 0 { w.contentLength = v } else { w.conn.server.logf("http: invalid Content-Length of %q", cl) w.handlerHeader.Del("Content-Length") } } } // extraHeader is the set of headers sometimes added by chunkWriter.writeHeader. // This type is used to avoid extra allocations from cloning and/or populating // the response Header map and all its 1-element slices. type extraHeader struct { contentType string connection string transferEncoding string date []byte // written if not nil contentLength []byte // written if not nil } // Sorted the same as extraHeader.Write's loop. var extraHeaderKeys = [][]byte{ []byte("Content-Type"), []byte("Connection"), []byte("Transfer-Encoding"), } var ( headerContentLength = []byte("Content-Length: ") headerDate = []byte("Date: ") ) // Write writes the headers described in h to w. // // This method has a value receiver, despite the somewhat large size // of h, because it prevents an allocation. The escape analysis isn't // smart enough to realize this function doesn't mutate h. func (h extraHeader) Write(w *bufio.Writer) { if h.date != nil { w.Write(headerDate) w.Write(h.date) w.Write(crlf) } if h.contentLength != nil { w.Write(headerContentLength) w.Write(h.contentLength) w.Write(crlf) } for i, v := range []string{h.contentType, h.connection, h.transferEncoding} { if v != "" { w.Write(extraHeaderKeys[i]) w.Write(colonSpace) w.WriteString(v) w.Write(crlf) } } } // writeHeader finalizes the header sent to the client and writes it // to cw.res.conn.bufw. // // p is not written by writeHeader, but is the first chunk of the body // that will be written. It is sniffed for a Content-Type if none is // set explicitly. It's also used to set the Content-Length, if the // total body size was small and the handler has already finished // running. func (cw *chunkWriter) writeHeader(p []byte) { if cw.wroteHeader { return } cw.wroteHeader = true w := cw.res keepAlivesEnabled := w.conn.server.doKeepAlives() isHEAD := w.req.Method == "HEAD" // header is written out to w.conn.buf below. Depending on the // state of the handler, we either own the map or not. If we // don't own it, the exclude map is created lazily for // WriteSubset to remove headers. The setHeader struct holds // headers we need to add. header := cw.header owned := header != nil if !owned { header = w.handlerHeader } var excludeHeader map[string]bool delHeader := func(key string) { if owned { header.Del(key) return } if _, ok := header[key]; !ok { return } if excludeHeader == nil { excludeHeader = make(map[string]bool) } excludeHeader[key] = true } var setHeader extraHeader // Don't write out the fake "Trailer:foo" keys. See TrailerPrefix. trailers := false for k := range cw.header { if strings.HasPrefix(k, TrailerPrefix) { if excludeHeader == nil { excludeHeader = make(map[string]bool) } excludeHeader[k] = true trailers = true } } for _, v := range cw.header["Trailer"] { trailers = true foreachHeaderElement(v, cw.res.declareTrailer) } te := header.get("Transfer-Encoding") hasTE := te != "" // If the handler is done but never sent a Content-Length // response header and this is our first (and last) write, set // it, even to zero. This helps HTTP/1.0 clients keep their // "keep-alive" connections alive. // Exceptions: 304/204/1xx responses never get Content-Length, and if // it was a HEAD request, we don't know the difference between // 0 actual bytes and 0 bytes because the handler noticed it // was a HEAD request and chose not to write anything. So for // HEAD, the handler should either write the Content-Length or // write non-zero bytes. If it's actually 0 bytes and the // handler never looked at the Request.Method, we just don't // send a Content-Length header. // Further, we don't send an automatic Content-Length if they // set a Transfer-Encoding, because they're generally incompatible. if w.handlerDone.isSet() && !trailers && !hasTE && bodyAllowedForStatus(w.status) && header.get("Content-Length") == "" && (!isHEAD || len(p) > 0) { w.contentLength = int64(len(p)) setHeader.contentLength = strconv.AppendInt(cw.res.clenBuf[:0], int64(len(p)), 10) } // If this was an HTTP/1.0 request with keep-alive and we sent a // Content-Length back, we can make this a keep-alive response ... if w.wants10KeepAlive && keepAlivesEnabled { sentLength := header.get("Content-Length") != "" if sentLength && header.get("Connection") == "keep-alive" { w.closeAfterReply = false } } // Check for an explicit (and valid) Content-Length header. hasCL := w.contentLength != -1 if w.wants10KeepAlive && (isHEAD || hasCL || !bodyAllowedForStatus(w.status)) { _, connectionHeaderSet := header["Connection"] if !connectionHeaderSet { setHeader.connection = "keep-alive" } } else if !w.req.ProtoAtLeast(1, 1) || w.wantsClose { w.closeAfterReply = true } if header.get("Connection") == "close" || !keepAlivesEnabled { w.closeAfterReply = true } // If the client wanted a 100-continue but we never sent it to // them (or, more strictly: we never finished reading their // request body), don't reuse this connection because it's now // in an unknown state: we might be sending this response at // the same time the client is now sending its request body // after a timeout. (Some HTTP clients send Expect: // 100-continue but knowing that some servers don't support // it, the clients set a timer and send the body later anyway) // If we haven't seen EOF, we can't skip over the unread body // because we don't know if the next bytes on the wire will be // the body-following-the-timer or the subsequent request. // See Issue 11549. if ecr, ok := w.req.Body.(*expectContinueReader); ok && !ecr.sawEOF { w.closeAfterReply = true } // Per RFC 2616, we should consume the request body before // replying, if the handler hasn't already done so. But we // don't want to do an unbounded amount of reading here for // DoS reasons, so we only try up to a threshold. // TODO(bradfitz): where does RFC 2616 say that? See Issue 15527 // about HTTP/1.x Handlers concurrently reading and writing, like // HTTP/2 handlers can do. Maybe this code should be relaxed? if w.req.ContentLength != 0 && !w.closeAfterReply { var discard, tooBig bool switch bdy := w.req.Body.(type) { case *expectContinueReader: if bdy.resp.wroteContinue { discard = true } case *body: bdy.mu.Lock() switch { case bdy.closed: if !bdy.sawEOF { // Body was closed in handler with non-EOF error. w.closeAfterReply = true } case bdy.unreadDataSizeLocked() >= maxPostHandlerReadBytes: tooBig = true default: discard = true } bdy.mu.Unlock() default: discard = true } if discard { _, err := io.CopyN(ioutil.Discard, w.reqBody, maxPostHandlerReadBytes+1) switch err { case nil: // There must be even more data left over. tooBig = true case ErrBodyReadAfterClose: // Body was already consumed and closed. case io.EOF: // The remaining body was just consumed, close it. err = w.reqBody.Close() if err != nil { w.closeAfterReply = true } default: // Some other kind of error occurred, like a read timeout, or // corrupt chunked encoding. In any case, whatever remains // on the wire must not be parsed as another HTTP request. w.closeAfterReply = true } } if tooBig { w.requestTooLarge() delHeader("Connection") setHeader.connection = "close" } } code := w.status if bodyAllowedForStatus(code) { // If no content type, apply sniffing algorithm to body. _, haveType := header["Content-Type"] // If the Content-Encoding was set and is non-blank, // we shouldn't sniff the body. See Issue 31753. ce := header.Get("Content-Encoding") hasCE := len(ce) > 0 if !hasCE && !haveType && !hasTE && len(p) > 0 { setHeader.contentType = DetectContentType(p) } } else { for _, k := range suppressedHeaders(code) { delHeader(k) } } if !header.has("Date") { setHeader.date = appendTime(cw.res.dateBuf[:0], time.Now()) } if hasCL && hasTE && te != "identity" { // TODO: return an error if WriteHeader gets a return parameter // For now just ignore the Content-Length. w.conn.server.logf("http: WriteHeader called with both Transfer-Encoding of %q and a Content-Length of %d", te, w.contentLength) delHeader("Content-Length") hasCL = false } if w.req.Method == "HEAD" || !bodyAllowedForStatus(code) { // do nothing } else if code == StatusNoContent { delHeader("Transfer-Encoding") } else if hasCL { delHeader("Transfer-Encoding") } else if w.req.ProtoAtLeast(1, 1) { // HTTP/1.1 or greater: Transfer-Encoding has been set to identity, and no // content-length has been provided. The connection must be closed after the // reply is written, and no chunking is to be done. This is the setup // recommended in the Server-Sent Events candidate recommendation 11, // section 8. if hasTE && te == "identity" { cw.chunking = false w.closeAfterReply = true } else { // HTTP/1.1 or greater: use chunked transfer encoding // to avoid closing the connection at EOF. cw.chunking = true setHeader.transferEncoding = "chunked" if hasTE && te == "chunked" { // We will send the chunked Transfer-Encoding header later. delHeader("Transfer-Encoding") } } } else { // HTTP version < 1.1: cannot do chunked transfer // encoding and we don't know the Content-Length so // signal EOF by closing connection. w.closeAfterReply = true delHeader("Transfer-Encoding") // in case already set } // Cannot use Content-Length with non-identity Transfer-Encoding. if cw.chunking { delHeader("Content-Length") } if !w.req.ProtoAtLeast(1, 0) { return } if w.closeAfterReply && (!keepAlivesEnabled || !hasToken(cw.header.get("Connection"), "close")) { delHeader("Connection") if w.req.ProtoAtLeast(1, 1) { setHeader.connection = "close" } } writeStatusLine(w.conn.bufw, w.req.ProtoAtLeast(1, 1), code, w.statusBuf[:]) cw.header.WriteSubset(w.conn.bufw, excludeHeader) setHeader.Write(w.conn.bufw) w.conn.bufw.Write(crlf) } // foreachHeaderElement splits v according to the "#rule" construction // in RFC 7230 section 7 and calls fn for each non-empty element. func foreachHeaderElement(v string, fn func(string)) { v = textproto.TrimString(v) if v == "" { return } if !strings.Contains(v, ",") { fn(v) return } for _, f := range strings.Split(v, ",") { if f = textproto.TrimString(f); f != "" { fn(f) } } } // writeStatusLine writes an HTTP/1.x Status-Line (RFC 7230 Section 3.1.2) // to bw. is11 is whether the HTTP request is HTTP/1.1. false means HTTP/1.0. // code is the response status code. // scratch is an optional scratch buffer. If it has at least capacity 3, it's used. func writeStatusLine(bw *bufio.Writer, is11 bool, code int, scratch []byte) { if is11 { bw.WriteString("HTTP/1.1 ") } else { bw.WriteString("HTTP/1.0 ") } if text, ok := statusText[code]; ok { bw.Write(strconv.AppendInt(scratch[:0], int64(code), 10)) bw.WriteByte(' ') bw.WriteString(text) bw.WriteString("\r\n") } else { // don't worry about performance fmt.Fprintf(bw, "%03d status code %d\r\n", code, code) } } // bodyAllowed reports whether a Write is allowed for this response type. // It's illegal to call this before the header has been flushed. func (w *response) bodyAllowed() bool { if !w.wroteHeader { panic("") } return bodyAllowedForStatus(w.status) } // The Life Of A Write is like this: // // Handler starts. No header has been sent. The handler can either // write a header, or just start writing. Writing before sending a header // sends an implicitly empty 200 OK header. // // If the handler didn't declare a Content-Length up front, we either // go into chunking mode or, if the handler finishes running before // the chunking buffer size, we compute a Content-Length and send that // in the header instead. // // Likewise, if the handler didn't set a Content-Type, we sniff that // from the initial chunk of output. // // The Writers are wired together like: // // 1. *response (the ResponseWriter) -> // 2. (*response).w, a *bufio.Writer of bufferBeforeChunkingSize bytes // 3. chunkWriter.Writer (whose writeHeader finalizes Content-Length/Type) // and which writes the chunk headers, if needed. // 4. conn.buf, a bufio.Writer of default (4kB) bytes, writing to -> // 5. checkConnErrorWriter{c}, which notes any non-nil error on Write // and populates c.werr with it if so. but otherwise writes to: // 6. the rwc, the net.Conn. // // TODO(bradfitz): short-circuit some of the buffering when the // initial header contains both a Content-Type and Content-Length. // Also short-circuit in (1) when the header's been sent and not in // chunking mode, writing directly to (4) instead, if (2) has no // buffered data. More generally, we could short-circuit from (1) to // (3) even in chunking mode if the write size from (1) is over some // threshold and nothing is in (2). The answer might be mostly making // bufferBeforeChunkingSize smaller and having bufio's fast-paths deal // with this instead. func (w *response) Write(data []byte) (n int, err error) { return w.write(len(data), data, "") } func (w *response) WriteString(data string) (n int, err error) { return w.write(len(data), nil, data) } // either dataB or dataS is non-zero. func (w *response) write(lenData int, dataB []byte, dataS string) (n int, err error) { if w.conn.hijacked() { if lenData > 0 { caller := relevantCaller() w.conn.server.logf("http: response.Write on hijacked connection from %s (%s:%d)", caller.Function, path.Base(caller.File), caller.Line) } return 0, ErrHijacked } if !w.wroteHeader { w.WriteHeader(StatusOK) } if lenData == 0 { return 0, nil } if !w.bodyAllowed() { return 0, ErrBodyNotAllowed } w.written += int64(lenData) // ignoring errors, for errorKludge if w.contentLength != -1 && w.written > w.contentLength { return 0, ErrContentLength } if dataB != nil { return w.w.Write(dataB) } else { return w.w.WriteString(dataS) } } func (w *response) finishRequest() { w.handlerDone.setTrue() if !w.wroteHeader { w.WriteHeader(StatusOK) } w.w.Flush() putBufioWriter(w.w) w.cw.close() w.conn.bufw.Flush() w.conn.r.abortPendingRead() // Close the body (regardless of w.closeAfterReply) so we can // re-use its bufio.Reader later safely. w.reqBody.Close() if w.req.MultipartForm != nil { w.req.MultipartForm.RemoveAll() } } // shouldReuseConnection reports whether the underlying TCP connection can be reused. // It must only be called after the handler is done executing. func (w *response) shouldReuseConnection() bool { if w.closeAfterReply { // The request or something set while executing the // handler indicated we shouldn't reuse this // connection. return false } if w.req.Method != "HEAD" && w.contentLength != -1 && w.bodyAllowed() && w.contentLength != w.written { // Did not write enough. Avoid getting out of sync. return false } // There was some error writing to the underlying connection // during the request, so don't re-use this conn. if w.conn.werr != nil { return false } if w.closedRequestBodyEarly() { return false } return true } func (w *response) closedRequestBodyEarly() bool { body, ok := w.req.Body.(*body) return ok && body.didEarlyClose() } func (w *response) Flush() { if !w.wroteHeader { w.WriteHeader(StatusOK) } w.w.Flush() w.cw.flush() } func (c *conn) finalFlush() { if c.bufr != nil { // Steal the bufio.Reader (~4KB worth of memory) and its associated // reader for a future connection. putBufioReader(c.bufr) c.bufr = nil } if c.bufw != nil { c.bufw.Flush() // Steal the bufio.Writer (~4KB worth of memory) and its associated // writer for a future connection. putBufioWriter(c.bufw) c.bufw = nil } } // Close the connection. func (c *conn) close() { c.finalFlush() c.rwc.Close() } // rstAvoidanceDelay is the amount of time we sleep after closing the // write side of a TCP connection before closing the entire socket. // By sleeping, we increase the chances that the client sees our FIN // and processes its final data before they process the subsequent RST // from closing a connection with known unread data. // This RST seems to occur mostly on BSD systems. (And Windows?) // This timeout is somewhat arbitrary (~latency around the planet). const rstAvoidanceDelay = 500 * time.Millisecond type closeWriter interface { CloseWrite() error } var _ closeWriter = (*net.TCPConn)(nil) // closeWrite flushes any outstanding data and sends a FIN packet (if // client is connected via TCP), signalling that we're done. We then // pause for a bit, hoping the client processes it before any // subsequent RST. // // See https://golang.org/issue/3595 func (c *conn) closeWriteAndWait() { c.finalFlush() if tcp, ok := c.rwc.(closeWriter); ok { tcp.CloseWrite() } time.Sleep(rstAvoidanceDelay) } // validNextProto reports whether the proto is not a blacklisted ALPN // protocol name. Empty and built-in protocol types are blacklisted // and can't be overridden with alternate implementations. func validNextProto(proto string) bool { switch proto { case "", "http/1.1", "http/1.0": return false } return true } func (c *conn) setState(nc net.Conn, state ConnState) { srv := c.server switch state { case StateNew: srv.trackConn(c, true) case StateHijacked, StateClosed: srv.trackConn(c, false) } if state > 0xff || state < 0 { panic("internal error") } packedState := uint64(time.Now().Unix()<<8) | uint64(state) atomic.StoreUint64(&c.curState.atomic, packedState) if hook := srv.ConnState; hook != nil { hook(nc, state) } } func (c *conn) getState() (state ConnState, unixSec int64) { packedState := atomic.LoadUint64(&c.curState.atomic) return ConnState(packedState & 0xff), int64(packedState >> 8) } // badRequestError is a literal string (used by in the server in HTML, // unescaped) to tell the user why their request was bad. It should // be plain text without user info or other embedded errors. type badRequestError string func (e badRequestError) Error() string { return "Bad Request: " + string(e) } // ErrAbortHandler is a sentinel panic value to abort a handler. // While any panic from ServeHTTP aborts the response to the client, // panicking with ErrAbortHandler also suppresses logging of a stack // trace to the server's error log. var ErrAbortHandler = errors.New("net/http: abort Handler") // isCommonNetReadError reports whether err is a common error // encountered during reading a request off the network when the // client has gone away or had its read fail somehow. This is used to // determine which logs are interesting enough to log about. func isCommonNetReadError(err error) bool { if err == io.EOF { return true } if neterr, ok := err.(net.Error); ok && neterr.Timeout() { return true } if oe, ok := err.(*net.OpError); ok && oe.Op == "read" { return true } return false } // Serve a new connection. func (c *conn) serve(ctx context.Context) { c.remoteAddr = c.rwc.RemoteAddr().String() ctx = context.WithValue(ctx, LocalAddrContextKey, c.rwc.LocalAddr()) defer func() { if err := recover(); err != nil && err != ErrAbortHandler { const size = 64 << 10 buf := make([]byte, size) buf = buf[:runtime.Stack(buf, false)] c.server.logf("http: panic serving %v: %v\n%s", c.remoteAddr, err, buf) } if !c.hijacked() { c.close() c.setState(c.rwc, StateClosed) } }() if tlsConn, ok := c.rwc.(*tls.Conn); ok { if d := c.server.ReadTimeout; d != 0 { c.rwc.SetReadDeadline(time.Now().Add(d)) } if d := c.server.WriteTimeout; d != 0 { c.rwc.SetWriteDeadline(time.Now().Add(d)) } if err := tlsConn.Handshake(); err != nil { // If the handshake failed due to the client not speaking // TLS, assume they're speaking plaintext HTTP and write a // 400 response on the TLS conn's underlying net.Conn. if re, ok := err.(tls.RecordHeaderError); ok && re.Conn != nil && tlsRecordHeaderLooksLikeHTTP(re.RecordHeader) { io.WriteString(re.Conn, "HTTP/1.0 400 Bad Request\r\n\r\nClient sent an HTTP request to an HTTPS server.\n") re.Conn.Close() return } c.server.logf("http: TLS handshake error from %s: %v", c.rwc.RemoteAddr(), err) return } c.tlsState = new(tls.ConnectionState) *c.tlsState = tlsConn.ConnectionState() if proto := c.tlsState.NegotiatedProtocol; validNextProto(proto) { if fn := c.server.TLSNextProto[proto]; fn != nil { h := initALPNRequest{ctx, tlsConn, serverHandler{c.server}} fn(c.server, tlsConn, h) } return } } // HTTP/1.x from here on. ctx, cancelCtx := context.WithCancel(ctx) c.cancelCtx = cancelCtx defer cancelCtx() c.r = &connReader{conn: c} c.bufr = newBufioReader(c.r) c.bufw = newBufioWriterSize(checkConnErrorWriter{c}, 4<<10) for { w, err := c.readRequest(ctx) if c.r.remain != c.server.initialReadLimitSize() { // If we read any bytes off the wire, we're active. c.setState(c.rwc, StateActive) } if err != nil { const errorHeaders = "\r\nContent-Type: text/plain; charset=utf-8\r\nConnection: close\r\n\r\n" switch { case err == errTooLarge: // Their HTTP client may or may not be // able to read this if we're // responding to them and hanging up // while they're still writing their // request. Undefined behavior. const publicErr = "431 Request Header Fields Too Large" fmt.Fprintf(c.rwc, "HTTP/1.1 "+publicErr+errorHeaders+publicErr) c.closeWriteAndWait() return case isUnsupportedTEError(err): // Respond as per RFC 7230 Section 3.3.1 which says, // A server that receives a request message with a // transfer coding it does not understand SHOULD // respond with 501 (Unimplemented). code := StatusNotImplemented // We purposefully aren't echoing back the transfer-encoding's value, // so as to mitigate the risk of cross side scripting by an attacker. fmt.Fprintf(c.rwc, "HTTP/1.1 %d %s%sUnsupported transfer encoding", code, StatusText(code), errorHeaders) return case isCommonNetReadError(err): return // don't reply default: publicErr := "400 Bad Request" if v, ok := err.(badRequestError); ok { publicErr = publicErr + ": " + string(v) } fmt.Fprintf(c.rwc, "HTTP/1.1 "+publicErr+errorHeaders+publicErr) return } } // Expect 100 Continue support req := w.req if req.expectsContinue() { if req.ProtoAtLeast(1, 1) && req.ContentLength != 0 { // Wrap the Body reader with one that replies on the connection req.Body = &expectContinueReader{readCloser: req.Body, resp: w} } } else if req.Header.get("Expect") != "" { w.sendExpectationFailed() return } c.curReq.Store(w) if requestBodyRemains(req.Body) { registerOnHitEOF(req.Body, w.conn.r.startBackgroundRead) } else { w.conn.r.startBackgroundRead() } // HTTP cannot have multiple simultaneous active requests.[*] // Until the server replies to this request, it can't read another, // so we might as well run the handler in this goroutine. // [*] Not strictly true: HTTP pipelining. We could let them all process // in parallel even if their responses need to be serialized. // But we're not going to implement HTTP pipelining because it // was never deployed in the wild and the answer is HTTP/2. serverHandler{c.server}.ServeHTTP(w, w.req) w.cancelCtx() if c.hijacked() { return } w.finishRequest() if !w.shouldReuseConnection() { if w.requestBodyLimitHit || w.closedRequestBodyEarly() { c.closeWriteAndWait() } return } c.setState(c.rwc, StateIdle) c.curReq.Store((*response)(nil)) if !w.conn.server.doKeepAlives() { // We're in shutdown mode. We might've replied // to the user without "Connection: close" and // they might think they can send another // request, but such is life with HTTP/1.1. return } if d := c.server.idleTimeout(); d != 0 { c.rwc.SetReadDeadline(time.Now().Add(d)) if _, err := c.bufr.Peek(4); err != nil { return } } c.rwc.SetReadDeadline(time.Time{}) } } func (w *response) sendExpectationFailed() { // TODO(bradfitz): let ServeHTTP handlers handle // requests with non-standard expectation[s]? Seems // theoretical at best, and doesn't fit into the // current ServeHTTP model anyway. We'd need to // make the ResponseWriter an optional // "ExpectReplier" interface or something. // // For now we'll just obey RFC 7231 5.1.1 which says // "A server that receives an Expect field-value other // than 100-continue MAY respond with a 417 (Expectation // Failed) status code to indicate that the unexpected // expectation cannot be met." w.Header().Set("Connection", "close") w.WriteHeader(StatusExpectationFailed) w.finishRequest() } // Hijack implements the Hijacker.Hijack method. Our response is both a ResponseWriter // and a Hijacker. func (w *response) Hijack() (rwc net.Conn, buf *bufio.ReadWriter, err error) { if w.handlerDone.isSet() { panic("net/http: Hijack called after ServeHTTP finished") } if w.wroteHeader { w.cw.flush() } c := w.conn c.mu.Lock() defer c.mu.Unlock() // Release the bufioWriter that writes to the chunk writer, it is not // used after a connection has been hijacked. rwc, buf, err = c.hijackLocked() if err == nil { putBufioWriter(w.w) w.w = nil } return rwc, buf, err } func (w *response) CloseNotify() <-chan bool { if w.handlerDone.isSet() { panic("net/http: CloseNotify called after ServeHTTP finished") } return w.closeNotifyCh } func registerOnHitEOF(rc io.ReadCloser, fn func()) { switch v := rc.(type) { case *expectContinueReader: registerOnHitEOF(v.readCloser, fn) case *body: v.registerOnHitEOF(fn) default: panic("unexpected type " + fmt.Sprintf("%T", rc)) } } // requestBodyRemains reports whether future calls to Read // on rc might yield more data. func requestBodyRemains(rc io.ReadCloser) bool { if rc == NoBody { return false } switch v := rc.(type) { case *expectContinueReader: return requestBodyRemains(v.readCloser) case *body: return v.bodyRemains() default: panic("unexpected type " + fmt.Sprintf("%T", rc)) } } // The HandlerFunc type is an adapter to allow the use of // ordinary functions as HTTP handlers. If f is a function // with the appropriate signature, HandlerFunc(f) is a // Handler that calls f. type HandlerFunc func(ResponseWriter, *Request) // ServeHTTP calls f(w, r). func (f HandlerFunc) ServeHTTP(w ResponseWriter, r *Request) { f(w, r) } // Helper handlers // Error replies to the request with the specified error message and HTTP code. // It does not otherwise end the request; the caller should ensure no further // writes are done to w. // The error message should be plain text. func Error(w ResponseWriter, error string, code int) { w.Header().Set("Content-Type", "text/plain; charset=utf-8") w.Header().Set("X-Content-Type-Options", "nosniff") w.WriteHeader(code) fmt.Fprintln(w, error) } // NotFound replies to the request with an HTTP 404 not found error. func NotFound(w ResponseWriter, r *Request) { Error(w, "404 page not found", StatusNotFound) } // NotFoundHandler returns a simple request handler // that replies to each request with a ``404 page not found'' reply. func NotFoundHandler() Handler { return HandlerFunc(NotFound) } // StripPrefix returns a handler that serves HTTP requests // by removing the given prefix from the request URL's Path // and invoking the handler h. StripPrefix handles a // request for a path that doesn't begin with prefix by // replying with an HTTP 404 not found error. func StripPrefix(prefix string, h Handler) Handler { if prefix == "" { return h } return HandlerFunc(func(w ResponseWriter, r *Request) { if p := strings.TrimPrefix(r.URL.Path, prefix); len(p) < len(r.URL.Path) { r2 := new(Request) *r2 = *r r2.URL = new(url.URL) *r2.URL = *r.URL r2.URL.Path = p h.ServeHTTP(w, r2) } else { NotFound(w, r) } }) } // Redirect replies to the request with a redirect to url, // which may be a path relative to the request path. // // The provided code should be in the 3xx range and is usually // StatusMovedPermanently, StatusFound or StatusSeeOther. // // If the Content-Type header has not been set, Redirect sets it // to "text/html; charset=utf-8" and writes a small HTML body. // Setting the Content-Type header to any value, including nil, // disables that behavior. func Redirect(w ResponseWriter, r *Request, url string, code int) { if u, err := urlpkg.Parse(url); err == nil { // If url was relative, make its path absolute by // combining with request path. // The client would probably do this for us, // but doing it ourselves is more reliable. // See RFC 7231, section 7.1.2 if u.Scheme == "" && u.Host == "" { oldpath := r.URL.Path if oldpath == "" { // should not happen, but avoid a crash if it does oldpath = "/" } // no leading http://server if url == "" || url[0] != '/' { // make relative path absolute olddir, _ := path.Split(oldpath) url = olddir + url } var query string if i := strings.Index(url, "?"); i != -1 { url, query = url[:i], url[i:] } // clean up but preserve trailing slash trailing := strings.HasSuffix(url, "/") url = path.Clean(url) if trailing && !strings.HasSuffix(url, "/") { url += "/" } url += query } } h := w.Header() // RFC 7231 notes that a short HTML body is usually included in // the response because older user agents may not understand 301/307. // Do it only if the request didn't already have a Content-Type header. _, hadCT := h["Content-Type"] h.Set("Location", hexEscapeNonASCII(url)) if !hadCT && (r.Method == "GET" || r.Method == "HEAD") { h.Set("Content-Type", "text/html; charset=utf-8") } w.WriteHeader(code) // Shouldn't send the body for POST or HEAD; that leaves GET. if !hadCT && r.Method == "GET" { body := "<a href=\"" + htmlEscape(url) + "\">" + statusText[code] + "</a>.\n" fmt.Fprintln(w, body) } } var htmlReplacer = strings.NewReplacer( "&", "&amp;", "<", "&lt;", ">", "&gt;", // "&#34;" is shorter than "&quot;". `"`, "&#34;", // "&#39;" is shorter than "&apos;" and apos was not in HTML until HTML5. "'", "&#39;", ) func htmlEscape(s string) string { return htmlReplacer.Replace(s) } // Redirect to a fixed URL type redirectHandler struct { url string code int } func (rh *redirectHandler) ServeHTTP(w ResponseWriter, r *Request) { Redirect(w, r, rh.url, rh.code) } // RedirectHandler returns a request handler that redirects // each request it receives to the given url using the given // status code. // // The provided code should be in the 3xx range and is usually // StatusMovedPermanently, StatusFound or StatusSeeOther. func RedirectHandler(url string, code int) Handler { return &redirectHandler{url, code} } // ServeMux is an HTTP request multiplexer. // It matches the URL of each incoming request against a list of registered // patterns and calls the handler for the pattern that // most closely matches the URL. // // Patterns name fixed, rooted paths, like "/favicon.ico", // or rooted subtrees, like "/images/" (note the trailing slash). // Longer patterns take precedence over shorter ones, so that // if there are handlers registered for both "/images/" // and "/images/thumbnails/", the latter handler will be // called for paths beginning "/images/thumbnails/" and the // former will receive requests for any other paths in the // "/images/" subtree. // // Note that since a pattern ending in a slash names a rooted subtree, // the pattern "/" matches all paths not matched by other registered // patterns, not just the URL with Path == "/". // // If a subtree has been registered and a request is received naming the // subtree root without its trailing slash, ServeMux redirects that // request to the subtree root (adding the trailing slash). This behavior can // be overridden with a separate registration for the path without // the trailing slash. For example, registering "/images/" causes ServeMux // to redirect a request for "/images" to "/images/", unless "/images" has // been registered separately. // // Patterns may optionally begin with a host name, restricting matches to // URLs on that host only. Host-specific patterns take precedence over // general patterns, so that a handler might register for the two patterns // "/codesearch" and "codesearch.google.com/" without also taking over // requests for "http://www.google.com/". // // ServeMux also takes care of sanitizing the URL request path and the Host // header, stripping the port number and redirecting any request containing . or // .. elements or repeated slashes to an equivalent, cleaner URL. type ServeMux struct { mu sync.RWMutex m map[string]muxEntry es []muxEntry // slice of entries sorted from longest to shortest. hosts bool // whether any patterns contain hostnames } type muxEntry struct { h Handler pattern string } // NewServeMux allocates and returns a new ServeMux. func NewServeMux() *ServeMux { return new(ServeMux) } // DefaultServeMux is the default ServeMux used by Serve. var DefaultServeMux = &defaultServeMux var defaultServeMux ServeMux // cleanPath returns the canonical path for p, eliminating . and .. elements. func cleanPath(p string) string { if p == "" { return "/" } if p[0] != '/' { p = "/" + p } np := path.Clean(p) // path.Clean removes trailing slash except for root; // put the trailing slash back if necessary. if p[len(p)-1] == '/' && np != "/" { // Fast path for common case of p being the string we want: if len(p) == len(np)+1 && strings.HasPrefix(p, np) { np = p } else { np += "/" } } return np } // stripHostPort returns h without any trailing ":<port>". func stripHostPort(h string) string { // If no port on host, return unchanged if strings.IndexByte(h, ':') == -1 { return h } host, _, err := net.SplitHostPort(h) if err != nil { return h // on error, return unchanged } return host } // Find a handler on a handler map given a path string. // Most-specific (longest) pattern wins. func (mux *ServeMux) match(path string) (h Handler, pattern string) { // Check for exact match first. v, ok := mux.m[path] if ok { return v.h, v.pattern } // Check for longest valid match. mux.es contains all patterns // that end in / sorted from longest to shortest. for _, e := range mux.es { if strings.HasPrefix(path, e.pattern) { return e.h, e.pattern } } return nil, "" } // redirectToPathSlash determines if the given path needs appending "/" to it. // This occurs when a handler for path + "/" was already registered, but // not for path itself. If the path needs appending to, it creates a new // URL, setting the path to u.Path + "/" and returning true to indicate so. func (mux *ServeMux) redirectToPathSlash(host, path string, u *url.URL) (*url.URL, bool) { mux.mu.RLock() shouldRedirect := mux.shouldRedirectRLocked(host, path) mux.mu.RUnlock() if !shouldRedirect { return u, false } path = path + "/" u = &url.URL{Path: path, RawQuery: u.RawQuery} return u, true } // shouldRedirectRLocked reports whether the given path and host should be redirected to // path+"/". This should happen if a handler is registered for path+"/" but // not path -- see comments at ServeMux. func (mux *ServeMux) shouldRedirectRLocked(host, path string) bool { p := []string{path, host + path} for _, c := range p { if _, exist := mux.m[c]; exist { return false } } n := len(path) if n == 0 { return false } for _, c := range p { if _, exist := mux.m[c+"/"]; exist { return path[n-1] != '/' } } return false } // Handler returns the handler to use for the given request, // consulting r.Method, r.Host, and r.URL.Path. It always returns // a non-nil handler. If the path is not in its canonical form, the // handler will be an internally-generated handler that redirects // to the canonical path. If the host contains a port, it is ignored // when matching handlers. // // The path and host are used unchanged for CONNECT requests. // // Handler also returns the registered pattern that matches the // request or, in the case of internally-generated redirects, // the pattern that will match after following the redirect. // // If there is no registered handler that applies to the request, // Handler returns a ``page not found'' handler and an empty pattern. func (mux *ServeMux) Handler(r *Request) (h Handler, pattern string) { // CONNECT requests are not canonicalized. if r.Method == "CONNECT" { // If r.URL.Path is /tree and its handler is not registered, // the /tree -> /tree/ redirect applies to CONNECT requests // but the path canonicalization does not. if u, ok := mux.redirectToPathSlash(r.URL.Host, r.URL.Path, r.URL); ok { return RedirectHandler(u.String(), StatusMovedPermanently), u.Path } return mux.handler(r.Host, r.URL.Path) } // All other requests have any port stripped and path cleaned // before passing to mux.handler. host := stripHostPort(r.Host) path := cleanPath(r.URL.Path) // If the given path is /tree and its handler is not registered, // redirect for /tree/. if u, ok := mux.redirectToPathSlash(host, path, r.URL); ok { return RedirectHandler(u.String(), StatusMovedPermanently), u.Path } if path != r.URL.Path { _, pattern = mux.handler(host, path) url := *r.URL url.Path = path return RedirectHandler(url.String(), StatusMovedPermanently), pattern } return mux.handler(host, r.URL.Path) } // handler is the main implementation of Handler. // The path is known to be in canonical form, except for CONNECT methods. func (mux *ServeMux) handler(host, path string) (h Handler, pattern string) { mux.mu.RLock() defer mux.mu.RUnlock() // Host-specific pattern takes precedence over generic ones if mux.hosts { h, pattern = mux.match(host + path) } if h == nil { h, pattern = mux.match(path) } if h == nil { h, pattern = NotFoundHandler(), "" } return } // ServeHTTP dispatches the request to the handler whose // pattern most closely matches the request URL. func (mux *ServeMux) ServeHTTP(w ResponseWriter, r *Request) { if r.RequestURI == "*" { if r.ProtoAtLeast(1, 1) { w.Header().Set("Connection", "close") } w.WriteHeader(StatusBadRequest) return } h, _ := mux.Handler(r) h.ServeHTTP(w, r) } // Handle registers the handler for the given pattern. // If a handler already exists for pattern, Handle panics. func (mux *ServeMux) Handle(pattern string, handler Handler) { mux.mu.Lock() defer mux.mu.Unlock() if pattern == "" { panic("http: invalid pattern") } if handler == nil { panic("http: nil handler") } if _, exist := mux.m[pattern]; exist { panic("http: multiple registrations for " + pattern) } if mux.m == nil { mux.m = make(map[string]muxEntry) } e := muxEntry{h: handler, pattern: pattern} mux.m[pattern] = e if pattern[len(pattern)-1] == '/' { mux.es = appendSorted(mux.es, e) } if pattern[0] != '/' { mux.hosts = true } } func appendSorted(es []muxEntry, e muxEntry) []muxEntry { n := len(es) i := sort.Search(n, func(i int) bool { return len(es[i].pattern) < len(e.pattern) }) if i == n { return append(es, e) } // we now know that i points at where we want to insert es = append(es, muxEntry{}) // try to grow the slice in place, any entry works. copy(es[i+1:], es[i:]) // Move shorter entries down es[i] = e return es } // HandleFunc registers the handler function for the given pattern. func (mux *ServeMux) HandleFunc(pattern string, handler func(ResponseWriter, *Request)) { if handler == nil { panic("http: nil handler") } mux.Handle(pattern, HandlerFunc(handler)) } // Handle registers the handler for the given pattern // in the DefaultServeMux. // The documentation for ServeMux explains how patterns are matched. func Handle(pattern string, handler Handler) { DefaultServeMux.Handle(pattern, handler) } // HandleFunc registers the handler function for the given pattern // in the DefaultServeMux. // The documentation for ServeMux explains how patterns are matched. func HandleFunc(pattern string, handler func(ResponseWriter, *Request)) { DefaultServeMux.HandleFunc(pattern, handler) } // Serve accepts incoming HTTP connections on the listener l, // creating a new service goroutine for each. The service goroutines // read requests and then call handler to reply to them. // // The handler is typically nil, in which case the DefaultServeMux is used. // // HTTP/2 support is only enabled if the Listener returns *tls.Conn // connections and they were configured with "h2" in the TLS // Config.NextProtos. // // Serve always returns a non-nil error. func Serve(l net.Listener, handler Handler) error { srv := &Server{Handler: handler} return srv.Serve(l) } // ServeTLS accepts incoming HTTPS connections on the listener l, // creating a new service goroutine for each. The service goroutines // read requests and then call handler to reply to them. // // The handler is typically nil, in which case the DefaultServeMux is used. // // Additionally, files containing a certificate and matching private key // for the server must be provided. If the certificate is signed by a // certificate authority, the certFile should be the concatenation // of the server's certificate, any intermediates, and the CA's certificate. // // ServeTLS always returns a non-nil error. func ServeTLS(l net.Listener, handler Handler, certFile, keyFile string) error { srv := &Server{Handler: handler} return srv.ServeTLS(l, certFile, keyFile) } // A Server defines parameters for running an HTTP server. // The zero value for Server is a valid configuration. type Server struct { // Addr optionally specifies the TCP address for the server to listen on, // in the form "host:port". If empty, ":http" (port 80) is used. // The service names are defined in RFC 6335 and assigned by IANA. // See net.Dial for details of the address format. Addr string Handler Handler // handler to invoke, http.DefaultServeMux if nil // TLSConfig optionally provides a TLS configuration for use // by ServeTLS and ListenAndServeTLS. Note that this value is // cloned by ServeTLS and ListenAndServeTLS, so it's not // possible to modify the configuration with methods like // tls.Config.SetSessionTicketKeys. To use // SetSessionTicketKeys, use Server.Serve with a TLS Listener // instead. TLSConfig *tls.Config // ReadTimeout is the maximum duration for reading the entire // request, including the body. // // Because ReadTimeout does not let Handlers make per-request // decisions on each request body's acceptable deadline or // upload rate, most users will prefer to use // ReadHeaderTimeout. It is valid to use them both. ReadTimeout time.Duration // ReadHeaderTimeout is the amount of time allowed to read // request headers. The connection's read deadline is reset // after reading the headers and the Handler can decide what // is considered too slow for the body. If ReadHeaderTimeout // is zero, the value of ReadTimeout is used. If both are // zero, there is no timeout. ReadHeaderTimeout time.Duration // WriteTimeout is the maximum duration before timing out // writes of the response. It is reset whenever a new // request's header is read. Like ReadTimeout, it does not // let Handlers make decisions on a per-request basis. WriteTimeout time.Duration // IdleTimeout is the maximum amount of time to wait for the // next request when keep-alives are enabled. If IdleTimeout // is zero, the value of ReadTimeout is used. If both are // zero, there is no timeout. IdleTimeout time.Duration // MaxHeaderBytes controls the maximum number of bytes the // server will read parsing the request header's keys and // values, including the request line. It does not limit the // size of the request body. // If zero, DefaultMaxHeaderBytes is used. MaxHeaderBytes int // TLSNextProto optionally specifies a function to take over // ownership of the provided TLS connection when an ALPN // protocol upgrade has occurred. The map key is the protocol // name negotiated. The Handler argument should be used to // handle HTTP requests and will initialize the Request's TLS // and RemoteAddr if not already set. The connection is // automatically closed when the function returns. // If TLSNextProto is not nil, HTTP/2 support is not enabled // automatically. TLSNextProto map[string]func(*Server, *tls.Conn, Handler) // ConnState specifies an optional callback function that is // called when a client connection changes state. See the // ConnState type and associated constants for details. ConnState func(net.Conn, ConnState) // ErrorLog specifies an optional logger for errors accepting // connections, unexpected behavior from handlers, and // underlying FileSystem errors. // If nil, logging is done via the log package's standard logger. ErrorLog *log.Logger // BaseContext optionally specifies a function that returns // the base context for incoming requests on this server. // The provided Listener is the specific Listener that's // about to start accepting requests. // If BaseContext is nil, the default is context.Background(). // If non-nil, it must return a non-nil context. BaseContext func(net.Listener) context.Context // ConnContext optionally specifies a function that modifies // the context used for a new connection c. The provided ctx // is derived from the base context and has a ServerContextKey // value. ConnContext func(ctx context.Context, c net.Conn) context.Context disableKeepAlives int32 // accessed atomically. inShutdown int32 // accessed atomically (non-zero means we're in Shutdown) nextProtoOnce sync.Once // guards setupHTTP2_* init nextProtoErr error // result of http2.ConfigureServer if used mu sync.Mutex listeners map[*net.Listener]struct{} activeConn map[*conn]struct{} doneChan chan struct{} onShutdown []func() } func (s *Server) getDoneChan() <-chan struct{} { s.mu.Lock() defer s.mu.Unlock() return s.getDoneChanLocked() } func (s *Server) getDoneChanLocked() chan struct{} { if s.doneChan == nil { s.doneChan = make(chan struct{}) } return s.doneChan } func (s *Server) closeDoneChanLocked() { ch := s.getDoneChanLocked() select { case <-ch: // Already closed. Don't close again. default: // Safe to close here. We're the only closer, guarded // by s.mu. close(ch) } } // Close immediately closes all active net.Listeners and any // connections in state StateNew, StateActive, or StateIdle. For a // graceful shutdown, use Shutdown. // // Close does not attempt to close (and does not even know about) // any hijacked connections, such as WebSockets. // // Close returns any error returned from closing the Server's // underlying Listener(s). func (srv *Server) Close() error { atomic.StoreInt32(&srv.inShutdown, 1) srv.mu.Lock() defer srv.mu.Unlock() srv.closeDoneChanLocked() err := srv.closeListenersLocked() for c := range srv.activeConn { c.rwc.Close() delete(srv.activeConn, c) } return err } // shutdownPollInterval is how often we poll for quiescence // during Server.Shutdown. This is lower during tests, to // speed up tests. // Ideally we could find a solution that doesn't involve polling, // but which also doesn't have a high runtime cost (and doesn't // involve any contentious mutexes), but that is left as an // exercise for the reader. var shutdownPollInterval = 500 * time.Millisecond // Shutdown gracefully shuts down the server without interrupting any // active connections. Shutdown works by first closing all open // listeners, then closing all idle connections, and then waiting // indefinitely for connections to return to idle and then shut down. // If the provided context expires before the shutdown is complete, // Shutdown returns the context's error, otherwise it returns any // error returned from closing the Server's underlying Listener(s). // // When Shutdown is called, Serve, ListenAndServe, and // ListenAndServeTLS immediately return ErrServerClosed. Make sure the // program doesn't exit and waits instead for Shutdown to return. // // Shutdown does not attempt to close nor wait for hijacked // connections such as WebSockets. The caller of Shutdown should // separately notify such long-lived connections of shutdown and wait // for them to close, if desired. See RegisterOnShutdown for a way to // register shutdown notification functions. // // Once Shutdown has been called on a server, it may not be reused; // future calls to methods such as Serve will return ErrServerClosed. func (srv *Server) Shutdown(ctx context.Context) error { atomic.StoreInt32(&srv.inShutdown, 1) srv.mu.Lock() lnerr := srv.closeListenersLocked() srv.closeDoneChanLocked() for _, f := range srv.onShutdown { go f() } srv.mu.Unlock() ticker := time.NewTicker(shutdownPollInterval) defer ticker.Stop() for { if srv.closeIdleConns() { return lnerr } select { case <-ctx.Done(): return ctx.Err() case <-ticker.C: } } } // RegisterOnShutdown registers a function to call on Shutdown. // This can be used to gracefully shutdown connections that have // undergone ALPN protocol upgrade or that have been hijacked. // This function should start protocol-specific graceful shutdown, // but should not wait for shutdown to complete. func (srv *Server) RegisterOnShutdown(f func()) { srv.mu.Lock() srv.onShutdown = append(srv.onShutdown, f) srv.mu.Unlock() } // closeIdleConns closes all idle connections and reports whether the // server is quiescent. func (s *Server) closeIdleConns() bool { s.mu.Lock() defer s.mu.Unlock() quiescent := true for c := range s.activeConn { st, unixSec := c.getState() // Issue 22682: treat StateNew connections as if // they're idle if we haven't read the first request's // header in over 5 seconds. if st == StateNew && unixSec < time.Now().Unix()-5 { st = StateIdle } if st != StateIdle || unixSec == 0 { // Assume unixSec == 0 means it's a very new // connection, without state set yet. quiescent = false continue } c.rwc.Close() delete(s.activeConn, c) } return quiescent } func (s *Server) closeListenersLocked() error { var err error for ln := range s.listeners { if cerr := (*ln).Close(); cerr != nil && err == nil { err = cerr } delete(s.listeners, ln) } return err } // A ConnState represents the state of a client connection to a server. // It's used by the optional Server.ConnState hook. type ConnState int const ( // StateNew represents a new connection that is expected to // send a request immediately. Connections begin at this // state and then transition to either StateActive or // StateClosed. StateNew ConnState = iota // StateActive represents a connection that has read 1 or more // bytes of a request. The Server.ConnState hook for // StateActive fires before the request has entered a handler // and doesn't fire again until the request has been // handled. After the request is handled, the state // transitions to StateClosed, StateHijacked, or StateIdle. // For HTTP/2, StateActive fires on the transition from zero // to one active request, and only transitions away once all // active requests are complete. That means that ConnState // cannot be used to do per-request work; ConnState only notes // the overall state of the connection. StateActive // StateIdle represents a connection that has finished // handling a request and is in the keep-alive state, waiting // for a new request. Connections transition from StateIdle // to either StateActive or StateClosed. StateIdle // StateHijacked represents a hijacked connection. // This is a terminal state. It does not transition to StateClosed. StateHijacked // StateClosed represents a closed connection. // This is a terminal state. Hijacked connections do not // transition to StateClosed. StateClosed ) var stateName = map[ConnState]string{ StateNew: "new", StateActive: "active", StateIdle: "idle", StateHijacked: "hijacked", StateClosed: "closed", } func (c ConnState) String() string { return stateName[c] } // serverHandler delegates to either the server's Handler or // DefaultServeMux and also handles "OPTIONS *" requests. type serverHandler struct { srv *Server } func (sh serverHandler) ServeHTTP(rw ResponseWriter, req *Request) { handler := sh.srv.Handler if handler == nil { handler = DefaultServeMux } if req.RequestURI == "*" && req.Method == "OPTIONS" { handler = globalOptionsHandler{} } handler.ServeHTTP(rw, req) } // ListenAndServe listens on the TCP network address srv.Addr and then // calls Serve to handle requests on incoming connections. // Accepted connections are configured to enable TCP keep-alives. // // If srv.Addr is blank, ":http" is used. // // ListenAndServe always returns a non-nil error. After Shutdown or Close, // the returned error is ErrServerClosed. func (srv *Server) ListenAndServe() error { if srv.shuttingDown() { return ErrServerClosed } addr := srv.Addr if addr == "" { addr = ":http" } ln, err := net.Listen("tcp", addr) if err != nil { return err } return srv.Serve(ln) } var testHookServerServe func(*Server, net.Listener) // used if non-nil // shouldDoServeHTTP2 reports whether Server.Serve should configure // automatic HTTP/2. (which sets up the srv.TLSNextProto map) func (srv *Server) shouldConfigureHTTP2ForServe() bool { if srv.TLSConfig == nil { // Compatibility with Go 1.6: // If there's no TLSConfig, it's possible that the user just // didn't set it on the http.Server, but did pass it to // tls.NewListener and passed that listener to Serve. // So we should configure HTTP/2 (to set up srv.TLSNextProto) // in case the listener returns an "h2" *tls.Conn. return true } // The user specified a TLSConfig on their http.Server. // In this, case, only configure HTTP/2 if their tls.Config // explicitly mentions "h2". Otherwise http2.ConfigureServer // would modify the tls.Config to add it, but they probably already // passed this tls.Config to tls.NewListener. And if they did, // it's too late anyway to fix it. It would only be potentially racy. // See Issue 15908. return strSliceContains(srv.TLSConfig.NextProtos, http2NextProtoTLS) } // ErrServerClosed is returned by the Server's Serve, ServeTLS, ListenAndServe, // and ListenAndServeTLS methods after a call to Shutdown or Close. var ErrServerClosed = errors.New("http: Server closed") // Serve accepts incoming connections on the Listener l, creating a // new service goroutine for each. The service goroutines read requests and // then call srv.Handler to reply to them. // // HTTP/2 support is only enabled if the Listener returns *tls.Conn // connections and they were configured with "h2" in the TLS // Config.NextProtos. // // Serve always returns a non-nil error and closes l. // After Shutdown or Close, the returned error is ErrServerClosed. func (srv *Server) Serve(l net.Listener) error { if fn := testHookServerServe; fn != nil { fn(srv, l) // call hook with unwrapped listener } origListener := l l = &onceCloseListener{Listener: l} defer l.Close() if err := srv.setupHTTP2_Serve(); err != nil { return err } if !srv.trackListener(&l, true) { return ErrServerClosed } defer srv.trackListener(&l, false) baseCtx := context.Background() if srv.BaseContext != nil { baseCtx = srv.BaseContext(origListener) if baseCtx == nil { panic("BaseContext returned a nil context") } } var tempDelay time.Duration // how long to sleep on accept failure ctx := context.WithValue(baseCtx, ServerContextKey, srv) for { rw, err := l.Accept() if err != nil { select { case <-srv.getDoneChan(): return ErrServerClosed default: } if ne, ok := err.(net.Error); ok && ne.Temporary() { if tempDelay == 0 { tempDelay = 5 * time.Millisecond } else { tempDelay *= 2 } if max := 1 * time.Second; tempDelay > max { tempDelay = max } srv.logf("http: Accept error: %v; retrying in %v", err, tempDelay) time.Sleep(tempDelay) continue } return err } if cc := srv.ConnContext; cc != nil { ctx = cc(ctx, rw) if ctx == nil { panic("ConnContext returned nil") } } tempDelay = 0 c := srv.newConn(rw) c.setState(c.rwc, StateNew) // before Serve can return go c.serve(ctx) } } // ServeTLS accepts incoming connections on the Listener l, creating a // new service goroutine for each. The service goroutines perform TLS // setup and then read requests, calling srv.Handler to reply to them. // // Files containing a certificate and matching private key for the // server must be provided if neither the Server's // TLSConfig.Certificates nor TLSConfig.GetCertificate are populated. // If the certificate is signed by a certificate authority, the // certFile should be the concatenation of the server's certificate, // any intermediates, and the CA's certificate. // // ServeTLS always returns a non-nil error. After Shutdown or Close, the // returned error is ErrServerClosed. func (srv *Server) ServeTLS(l net.Listener, certFile, keyFile string) error { // Setup HTTP/2 before srv.Serve, to initialize srv.TLSConfig // before we clone it and create the TLS Listener. if err := srv.setupHTTP2_ServeTLS(); err != nil { return err } config := cloneTLSConfig(srv.TLSConfig) if !strSliceContains(config.NextProtos, "http/1.1") { config.NextProtos = append(config.NextProtos, "http/1.1") } configHasCert := len(config.Certificates) > 0 || config.GetCertificate != nil if !configHasCert || certFile != "" || keyFile != "" { var err error config.Certificates = make([]tls.Certificate, 1) config.Certificates[0], err = tls.LoadX509KeyPair(certFile, keyFile) if err != nil { return err } } tlsListener := tls.NewListener(l, config) return srv.Serve(tlsListener) } // trackListener adds or removes a net.Listener to the set of tracked // listeners. // // We store a pointer to interface in the map set, in case the // net.Listener is not comparable. This is safe because we only call // trackListener via Serve and can track+defer untrack the same // pointer to local variable there. We never need to compare a // Listener from another caller. // // It reports whether the server is still up (not Shutdown or Closed). func (s *Server) trackListener(ln *net.Listener, add bool) bool { s.mu.Lock() defer s.mu.Unlock() if s.listeners == nil { s.listeners = make(map[*net.Listener]struct{}) } if add { if s.shuttingDown() { return false } s.listeners[ln] = struct{}{} } else { delete(s.listeners, ln) } return true } func (s *Server) trackConn(c *conn, add bool) { s.mu.Lock() defer s.mu.Unlock() if s.activeConn == nil { s.activeConn = make(map[*conn]struct{}) } if add { s.activeConn[c] = struct{}{} } else { delete(s.activeConn, c) } } func (s *Server) idleTimeout() time.Duration { if s.IdleTimeout != 0 { return s.IdleTimeout } return s.ReadTimeout } func (s *Server) readHeaderTimeout() time.Duration { if s.ReadHeaderTimeout != 0 { return s.ReadHeaderTimeout } return s.ReadTimeout } func (s *Server) doKeepAlives() bool { return atomic.LoadInt32(&s.disableKeepAlives) == 0 && !s.shuttingDown() } func (s *Server) shuttingDown() bool { // TODO: replace inShutdown with the existing atomicBool type; // see https://github.com/golang/go/issues/20239#issuecomment-381434582 return atomic.LoadInt32(&s.inShutdown) != 0 } // SetKeepAlivesEnabled controls whether HTTP keep-alives are enabled. // By default, keep-alives are always enabled. Only very // resource-constrained environments or servers in the process of // shutting down should disable them. func (srv *Server) SetKeepAlivesEnabled(v bool) { if v { atomic.StoreInt32(&srv.disableKeepAlives, 0) return } atomic.StoreInt32(&srv.disableKeepAlives, 1) // Close idle HTTP/1 conns: srv.closeIdleConns() // TODO: Issue 26303: close HTTP/2 conns as soon as they become idle. } func (s *Server) logf(format string, args ...interface{}) { if s.ErrorLog != nil { s.ErrorLog.Printf(format, args...) } else { log.Printf(format, args...) } } // logf prints to the ErrorLog of the *Server associated with request r // via ServerContextKey. If there's no associated server, or if ErrorLog // is nil, logging is done via the log package's standard logger. func logf(r *Request, format string, args ...interface{}) { s, _ := r.Context().Value(ServerContextKey).(*Server) if s != nil && s.ErrorLog != nil { s.ErrorLog.Printf(format, args...) } else { log.Printf(format, args...) } } // ListenAndServe listens on the TCP network address addr and then calls // Serve with handler to handle requests on incoming connections. // Accepted connections are configured to enable TCP keep-alives. // // The handler is typically nil, in which case the DefaultServeMux is used. // // ListenAndServe always returns a non-nil error. func ListenAndServe(addr string, handler Handler) error { server := &Server{Addr: addr, Handler: handler} return server.ListenAndServe() } // ListenAndServeTLS acts identically to ListenAndServe, except that it // expects HTTPS connections. Additionally, files containing a certificate and // matching private key for the server must be provided. If the certificate // is signed by a certificate authority, the certFile should be the concatenation // of the server's certificate, any intermediates, and the CA's certificate. func ListenAndServeTLS(addr, certFile, keyFile string, handler Handler) error { server := &Server{Addr: addr, Handler: handler} return server.ListenAndServeTLS(certFile, keyFile) } // ListenAndServeTLS listens on the TCP network address srv.Addr and // then calls ServeTLS to handle requests on incoming TLS connections. // Accepted connections are configured to enable TCP keep-alives. // // Filenames containing a certificate and matching private key for the // server must be provided if neither the Server's TLSConfig.Certificates // nor TLSConfig.GetCertificate are populated. If the certificate is // signed by a certificate authority, the certFile should be the // concatenation of the server's certificate, any intermediates, and // the CA's certificate. // // If srv.Addr is blank, ":https" is used. // // ListenAndServeTLS always returns a non-nil error. After Shutdown or // Close, the returned error is ErrServerClosed. func (srv *Server) ListenAndServeTLS(certFile, keyFile string) error { if srv.shuttingDown() { return ErrServerClosed } addr := srv.Addr if addr == "" { addr = ":https" } ln, err := net.Listen("tcp", addr) if err != nil { return err } defer ln.Close() return srv.ServeTLS(ln, certFile, keyFile) } // setupHTTP2_ServeTLS conditionally configures HTTP/2 on // srv and reports whether there was an error setting it up. If it is // not configured for policy reasons, nil is returned. func (srv *Server) setupHTTP2_ServeTLS() error { srv.nextProtoOnce.Do(srv.onceSetNextProtoDefaults) return srv.nextProtoErr } // setupHTTP2_Serve is called from (*Server).Serve and conditionally // configures HTTP/2 on srv using a more conservative policy than // setupHTTP2_ServeTLS because Serve is called after tls.Listen, // and may be called concurrently. See shouldConfigureHTTP2ForServe. // // The tests named TestTransportAutomaticHTTP2* and // TestConcurrentServerServe in server_test.go demonstrate some // of the supported use cases and motivations. func (srv *Server) setupHTTP2_Serve() error { srv.nextProtoOnce.Do(srv.onceSetNextProtoDefaults_Serve) return srv.nextProtoErr } func (srv *Server) onceSetNextProtoDefaults_Serve() { if srv.shouldConfigureHTTP2ForServe() { srv.onceSetNextProtoDefaults() } } // onceSetNextProtoDefaults configures HTTP/2, if the user hasn't // configured otherwise. (by setting srv.TLSNextProto non-nil) // It must only be called via srv.nextProtoOnce (use srv.setupHTTP2_*). func (srv *Server) onceSetNextProtoDefaults() { if strings.Contains(os.Getenv("GODEBUG"), "http2server=0") { return } // Enable HTTP/2 by default if the user hasn't otherwise // configured their TLSNextProto map. if srv.TLSNextProto == nil { conf := &http2Server{ NewWriteScheduler: func() http2WriteScheduler { return http2NewPriorityWriteScheduler(nil) }, } srv.nextProtoErr = http2ConfigureServer(srv, conf) } } // TimeoutHandler returns a Handler that runs h with the given time limit. // // The new Handler calls h.ServeHTTP to handle each request, but if a // call runs for longer than its time limit, the handler responds with // a 503 Service Unavailable error and the given message in its body. // (If msg is empty, a suitable default message will be sent.) // After such a timeout, writes by h to its ResponseWriter will return // ErrHandlerTimeout. // // TimeoutHandler supports the Flusher and Pusher interfaces but does not // support the Hijacker interface. func TimeoutHandler(h Handler, dt time.Duration, msg string) Handler { return &timeoutHandler{ handler: h, body: msg, dt: dt, } } // ErrHandlerTimeout is returned on ResponseWriter Write calls // in handlers which have timed out. var ErrHandlerTimeout = errors.New("http: Handler timeout") type timeoutHandler struct { handler Handler body string dt time.Duration // When set, no context will be created and this context will // be used instead. testContext context.Context } func (h *timeoutHandler) errorBody() string { if h.body != "" { return h.body } return "<html><head><title>Timeout</title></head><body><h1>Timeout</h1></body></html>" } func (h *timeoutHandler) ServeHTTP(w ResponseWriter, r *Request) { ctx := h.testContext if ctx == nil { var cancelCtx context.CancelFunc ctx, cancelCtx = context.WithTimeout(r.Context(), h.dt) defer cancelCtx() } r = r.WithContext(ctx) done := make(chan struct{}) tw := &timeoutWriter{ w: w, h: make(Header), req: r, } panicChan := make(chan interface{}, 1) go func() { defer func() { if p := recover(); p != nil { panicChan <- p } }() h.handler.ServeHTTP(tw, r) close(done) }() select { case p := <-panicChan: panic(p) case <-done: tw.mu.Lock() defer tw.mu.Unlock() dst := w.Header() for k, vv := range tw.h { dst[k] = vv } if !tw.wroteHeader { tw.code = StatusOK } w.WriteHeader(tw.code) w.Write(tw.wbuf.Bytes()) case <-ctx.Done(): tw.mu.Lock() defer tw.mu.Unlock() w.WriteHeader(StatusServiceUnavailable) io.WriteString(w, h.errorBody()) tw.timedOut = true } } type timeoutWriter struct { w ResponseWriter h Header wbuf bytes.Buffer req *Request mu sync.Mutex timedOut bool wroteHeader bool code int } var _ Pusher = (*timeoutWriter)(nil) // Push implements the Pusher interface. func (tw *timeoutWriter) Push(target string, opts *PushOptions) error { if pusher, ok := tw.w.(Pusher); ok { return pusher.Push(target, opts) } return ErrNotSupported } func (tw *timeoutWriter) Header() Header { return tw.h } func (tw *timeoutWriter) Write(p []byte) (int, error) { tw.mu.Lock() defer tw.mu.Unlock() if tw.timedOut { return 0, ErrHandlerTimeout } if !tw.wroteHeader { tw.writeHeaderLocked(StatusOK) } return tw.wbuf.Write(p) } func (tw *timeoutWriter) writeHeaderLocked(code int) { checkWriteHeaderCode(code) switch { case tw.timedOut: return case tw.wroteHeader: if tw.req != nil { caller := relevantCaller() logf(tw.req, "http: superfluous response.WriteHeader call from %s (%s:%d)", caller.Function, path.Base(caller.File), caller.Line) } default: tw.wroteHeader = true tw.code = code } } func (tw *timeoutWriter) WriteHeader(code int) { tw.mu.Lock() defer tw.mu.Unlock() tw.writeHeaderLocked(code) } // onceCloseListener wraps a net.Listener, protecting it from // multiple Close calls. type onceCloseListener struct { net.Listener once sync.Once closeErr error } func (oc *onceCloseListener) Close() error { oc.once.Do(oc.close) return oc.closeErr } func (oc *onceCloseListener) close() { oc.closeErr = oc.Listener.Close() } // globalOptionsHandler responds to "OPTIONS *" requests. type globalOptionsHandler struct{} func (globalOptionsHandler) ServeHTTP(w ResponseWriter, r *Request) { w.Header().Set("Content-Length", "0") if r.ContentLength != 0 { // Read up to 4KB of OPTIONS body (as mentioned in the // spec as being reserved for future use), but anything // over that is considered a waste of server resources // (or an attack) and we abort and close the connection, // courtesy of MaxBytesReader's EOF behavior. mb := MaxBytesReader(w, r.Body, 4<<10) io.Copy(ioutil.Discard, mb) } } // initALPNRequest is an HTTP handler that initializes certain // uninitialized fields in its *Request. Such partially-initialized // Requests come from ALPN protocol handlers. type initALPNRequest struct { ctx context.Context c *tls.Conn h serverHandler } // BaseContext is an exported but unadvertised http.Handler method // recognized by x/net/http2 to pass down a context; the TLSNextProto // API predates context support so we shoehorn through the only // interface we have available. func (h initALPNRequest) BaseContext() context.Context { return h.ctx } func (h initALPNRequest) ServeHTTP(rw ResponseWriter, req *Request) { if req.TLS == nil { req.TLS = &tls.ConnectionState{} *req.TLS = h.c.ConnectionState() } if req.Body == nil { req.Body = NoBody } if req.RemoteAddr == "" { req.RemoteAddr = h.c.RemoteAddr().String() } h.h.ServeHTTP(rw, req) } // loggingConn is used for debugging. type loggingConn struct { name string net.Conn } var ( uniqNameMu sync.Mutex uniqNameNext = make(map[string]int) ) func newLoggingConn(baseName string, c net.Conn) net.Conn { uniqNameMu.Lock() defer uniqNameMu.Unlock() uniqNameNext[baseName]++ return &loggingConn{ name: fmt.Sprintf("%s-%d", baseName, uniqNameNext[baseName]), Conn: c, } } func (c *loggingConn) Write(p []byte) (n int, err error) { log.Printf("%s.Write(%d) = ....", c.name, len(p)) n, err = c.Conn.Write(p) log.Printf("%s.Write(%d) = %d, %v", c.name, len(p), n, err) return } func (c *loggingConn) Read(p []byte) (n int, err error) { log.Printf("%s.Read(%d) = ....", c.name, len(p)) n, err = c.Conn.Read(p) log.Printf("%s.Read(%d) = %d, %v", c.name, len(p), n, err) return } func (c *loggingConn) Close() (err error) { log.Printf("%s.Close() = ...", c.name) err = c.Conn.Close() log.Printf("%s.Close() = %v", c.name, err) return } // checkConnErrorWriter writes to c.rwc and records any write errors to c.werr. // It only contains one field (and a pointer field at that), so it // fits in an interface value without an extra allocation. type checkConnErrorWriter struct { c *conn } func (w checkConnErrorWriter) Write(p []byte) (n int, err error) { n, err = w.c.rwc.Write(p) if err != nil && w.c.werr == nil { w.c.werr = err w.c.cancelCtx() } return } func numLeadingCRorLF(v []byte) (n int) { for _, b := range v { if b == '\r' || b == '\n' { n++ continue } break } return } func strSliceContains(ss []string, s string) bool { for _, v := range ss { if v == s { return true } } return false } // tlsRecordHeaderLooksLikeHTTP reports whether a TLS record header // looks like it might've been a misdirected plaintext HTTP request. func tlsRecordHeaderLooksLikeHTTP(hdr [5]byte) bool { switch string(hdr[:]) { case "GET /", "HEAD ", "POST ", "PUT /", "OPTIO": return true } return false }
[ "\"GODEBUG\"" ]
[]
[ "GODEBUG" ]
[]
["GODEBUG"]
go
1
0
qa/rpc-tests/replace-by-fee.py
#!/usr/bin/env python3 # Copyright (c) 2014-2022 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Test replace by fee code # from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * from test_framework.script import * from test_framework.mininode import * MAX_REPLACEMENT_LIMIT = 100 def txToHex(tx): return bytes_to_hex_str(tx.serialize()) def make_utxo(node, amount, confirmed=True, scriptPubKey=CScript([1])): """Create a txout with a given amount and scriptPubKey Mines coins as needed. confirmed - txouts created will be confirmed in the blockchain; unconfirmed otherwise. """ fee = 1*COIN while node.getbalance() < satoshi_round((amount + fee)/COIN): node.generate(100) #print (node.getbalance(), amount, fee) new_addr = node.getnewaddress() #print new_addr txid = node.sendtoaddress(new_addr, satoshi_round((amount+fee)/COIN)) tx1 = node.getrawtransaction(txid, 1) txid = int(txid, 16) i = None for i, txout in enumerate(tx1['vout']): #print i, txout['scriptPubKey']['addresses'] if txout['scriptPubKey']['addresses'] == [new_addr]: #print i break assert i is not None tx2 = CTransaction() tx2.vin = [CTxIn(COutPoint(txid, i))] tx2.vout = [CTxOut(amount, scriptPubKey)] tx2.rehash() signed_tx = node.signrawtransaction(txToHex(tx2)) txid = node.sendrawtransaction(signed_tx['hex'], True) # If requested, ensure txouts are confirmed. if confirmed: mempool_size = len(node.getrawmempool()) while mempool_size > 0: node.generate(1) new_size = len(node.getrawmempool()) # Error out if we have something stuck in the mempool, as this # would likely be a bug. assert(new_size < mempool_size) mempool_size = new_size return COutPoint(int(txid, 16), 0) class ReplaceByFeeTest(BitcoinTestFramework): def __init__(self): super().__init__() self.num_nodes = 1 self.setup_clean_chain = False def setup_network(self): self.nodes = [] self.nodes.append(start_node(0, self.options.tmpdir, ["-maxorphantx=1000", "-debug", "-whitelist=127.0.0.1", "-limitancestorcount=50", "-limitancestorsize=101", "-limitdescendantcount=200", "-limitdescendantsize=101" ])) self.is_network_split = False def run_test(self): make_utxo(self.nodes[0], 1*COIN) print("Running test simple doublespend...") self.test_simple_doublespend() print("Running test doublespend chain...") self.test_doublespend_chain() print("Running test doublespend tree...") self.test_doublespend_tree() print("Running test replacement feeperkb...") self.test_replacement_feeperkb() print("Running test spends of conflicting outputs...") self.test_spends_of_conflicting_outputs() print("Running test new unconfirmed inputs...") self.test_new_unconfirmed_inputs() print("Running test too many replacements...") self.test_too_many_replacements() print("Running test opt-in...") self.test_opt_in() print("Running test prioritised transactions...") self.test_prioritised_transactions() print("Passed\n") def test_simple_doublespend(self): """Simple doublespend""" tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN)) tx1a = CTransaction() tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)] tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))] tx1a_hex = txToHex(tx1a) tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True) # Should fail because we haven't changed the fee tx1b = CTransaction() tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] tx1b.vout = [CTxOut(1*COIN, CScript([b'b']))] tx1b_hex = txToHex(tx1b) try: tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True) except JSONRPCException as exp: assert_equal(exp.error['code'], -26) # insufficient fee else: assert(False) # Extra 0.1 BTC fee tx1b = CTransaction() tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] tx1b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))] tx1b_hex = txToHex(tx1b) tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True) mempool = self.nodes[0].getrawmempool() assert (tx1a_txid not in mempool) assert (tx1b_txid in mempool) assert_equal(tx1b_hex, self.nodes[0].getrawtransaction(tx1b_txid)) def test_doublespend_chain(self): """Doublespend of a long chain""" initial_nValue = 50*COIN tx0_outpoint = make_utxo(self.nodes[0], initial_nValue) prevout = tx0_outpoint remaining_value = initial_nValue chain_txids = [] while remaining_value > 10*COIN: remaining_value -= 1*COIN tx = CTransaction() tx.vin = [CTxIn(prevout, nSequence=0)] tx.vout = [CTxOut(remaining_value, CScript([1]))] tx_hex = txToHex(tx) txid = self.nodes[0].sendrawtransaction(tx_hex, True) chain_txids.append(txid) prevout = COutPoint(int(txid, 16), 0) # Whether the double-spend is allowed is evaluated by including all # child fees - 40 BTC - so this attempt is rejected. dbl_tx = CTransaction() dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] dbl_tx.vout = [CTxOut(initial_nValue - 30*COIN, CScript([1]))] dbl_tx_hex = txToHex(dbl_tx) try: self.nodes[0].sendrawtransaction(dbl_tx_hex, True) except JSONRPCException as exp: assert_equal(exp.error['code'], -26) # insufficient fee else: assert(False) # transaction mistakenly accepted! # Accepted with sufficient fee dbl_tx = CTransaction() dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] dbl_tx.vout = [CTxOut(1*COIN, CScript([1]))] dbl_tx_hex = txToHex(dbl_tx) self.nodes[0].sendrawtransaction(dbl_tx_hex, True) mempool = self.nodes[0].getrawmempool() for doublespent_txid in chain_txids: assert(doublespent_txid not in mempool) def test_doublespend_tree(self): """Doublespend of a big tree of transactions""" initial_nValue = 50*COIN tx0_outpoint = make_utxo(self.nodes[0], initial_nValue) def branch(prevout, initial_value, max_txs, tree_width=5, fee=0.0001*COIN, _total_txs=None): if _total_txs is None: _total_txs = [0] if _total_txs[0] >= max_txs: return txout_value = (initial_value - fee) // tree_width if txout_value < fee: return vout = [CTxOut(txout_value, CScript([i+1])) for i in range(tree_width)] tx = CTransaction() tx.vin = [CTxIn(prevout, nSequence=0)] tx.vout = vout tx_hex = txToHex(tx) assert(len(tx.serialize()) < 100000) txid = self.nodes[0].sendrawtransaction(tx_hex, True) yield tx _total_txs[0] += 1 txid = int(txid, 16) for i, txout in enumerate(tx.vout): for x in branch(COutPoint(txid, i), txout_value, max_txs, tree_width=tree_width, fee=fee, _total_txs=_total_txs): yield x fee = int(0.0001*COIN) n = MAX_REPLACEMENT_LIMIT tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee)) assert_equal(len(tree_txs), n) # Attempt double-spend, will fail because too little fee paid dbl_tx = CTransaction() dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] dbl_tx.vout = [CTxOut(initial_nValue - fee*n, CScript([1]))] dbl_tx_hex = txToHex(dbl_tx) try: self.nodes[0].sendrawtransaction(dbl_tx_hex, True) except JSONRPCException as exp: assert_equal(exp.error['code'], -26) # insufficient fee else: assert(False) # 1 BTC fee is enough dbl_tx = CTransaction() dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] dbl_tx.vout = [CTxOut(initial_nValue - fee*n - 1*COIN, CScript([1]))] dbl_tx_hex = txToHex(dbl_tx) self.nodes[0].sendrawtransaction(dbl_tx_hex, True) mempool = self.nodes[0].getrawmempool() for tx in tree_txs: tx.rehash() assert (tx.hash not in mempool) # Try again, but with more total transactions than the "max txs # double-spent at once" anti-DoS limit. for n in (MAX_REPLACEMENT_LIMIT+1, MAX_REPLACEMENT_LIMIT*2): fee = int(0.0001*COIN) tx0_outpoint = make_utxo(self.nodes[0], initial_nValue) tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee)) assert_equal(len(tree_txs), n) dbl_tx = CTransaction() dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] dbl_tx.vout = [CTxOut(initial_nValue - 2*fee*n, CScript([1]))] dbl_tx_hex = txToHex(dbl_tx) try: self.nodes[0].sendrawtransaction(dbl_tx_hex, True) except JSONRPCException as exp: assert_equal(exp.error['code'], -26) assert_equal("too many potential replacements" in exp.error['message'], True) else: assert(False) for tx in tree_txs: tx.rehash() self.nodes[0].getrawtransaction(tx.hash) def test_replacement_feeperkb(self): """Replacement requires fee-per-KB to be higher""" tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN)) tx1a = CTransaction() tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)] tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))] tx1a_hex = txToHex(tx1a) tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True) # Higher fee, but the fee per KB is much lower, so the replacement is # rejected. tx1b = CTransaction() tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] tx1b.vout = [CTxOut(int(0.001*COIN), CScript([b'a'*999000]))] tx1b_hex = txToHex(tx1b) try: tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True) except JSONRPCException as exp: assert_equal(exp.error['code'], -26) # insufficient fee else: assert(False) def test_spends_of_conflicting_outputs(self): """Replacements that spend conflicting tx outputs are rejected""" utxo1 = make_utxo(self.nodes[0], int(1.2*COIN)) utxo2 = make_utxo(self.nodes[0], 3*COIN) tx1a = CTransaction() tx1a.vin = [CTxIn(utxo1, nSequence=0)] tx1a.vout = [CTxOut(int(1.1*COIN), CScript([b'a']))] tx1a_hex = txToHex(tx1a) tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True) tx1a_txid = int(tx1a_txid, 16) # Direct spend an output of the transaction we're replacing. tx2 = CTransaction() tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0)] tx2.vin.append(CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)) tx2.vout = tx1a.vout tx2_hex = txToHex(tx2) try: tx2_txid = self.nodes[0].sendrawtransaction(tx2_hex, True) except JSONRPCException as exp: assert_equal(exp.error['code'], -26) else: assert(False) # Spend tx1a's output to test the indirect case. tx1b = CTransaction() tx1b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)] tx1b.vout = [CTxOut(1*COIN, CScript([b'a']))] tx1b_hex = txToHex(tx1b) tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True) tx1b_txid = int(tx1b_txid, 16) tx2 = CTransaction() tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0), CTxIn(COutPoint(tx1b_txid, 0))] tx2.vout = tx1a.vout tx2_hex = txToHex(tx2) try: tx2_txid = self.nodes[0].sendrawtransaction(tx2_hex, True) except JSONRPCException as exp: assert_equal(exp.error['code'], -26) else: assert(False) def test_new_unconfirmed_inputs(self): """Replacements that add new unconfirmed inputs are rejected""" confirmed_utxo = make_utxo(self.nodes[0], int(1.1*COIN)) unconfirmed_utxo = make_utxo(self.nodes[0], int(0.1*COIN), False) tx1 = CTransaction() tx1.vin = [CTxIn(confirmed_utxo)] tx1.vout = [CTxOut(1*COIN, CScript([b'a']))] tx1_hex = txToHex(tx1) tx1_txid = self.nodes[0].sendrawtransaction(tx1_hex, True) tx2 = CTransaction() tx2.vin = [CTxIn(confirmed_utxo), CTxIn(unconfirmed_utxo)] tx2.vout = tx1.vout tx2_hex = txToHex(tx2) try: tx2_txid = self.nodes[0].sendrawtransaction(tx2_hex, True) except JSONRPCException as exp: assert_equal(exp.error['code'], -26) else: assert(False) def test_too_many_replacements(self): """Replacements that evict too many transactions are rejected""" # Try directly replacing more than MAX_REPLACEMENT_LIMIT # transactions # Start by creating a single transaction with many outputs initial_nValue = 10*COIN utxo = make_utxo(self.nodes[0], initial_nValue) fee = int(0.0001*COIN) split_value = int((initial_nValue-fee)/(MAX_REPLACEMENT_LIMIT+1)) outputs = [] for i in range(MAX_REPLACEMENT_LIMIT+1): outputs.append(CTxOut(split_value, CScript([1]))) splitting_tx = CTransaction() splitting_tx.vin = [CTxIn(utxo, nSequence=0)] splitting_tx.vout = outputs splitting_tx_hex = txToHex(splitting_tx) txid = self.nodes[0].sendrawtransaction(splitting_tx_hex, True) txid = int(txid, 16) # Now spend each of those outputs individually for i in range(MAX_REPLACEMENT_LIMIT+1): tx_i = CTransaction() tx_i.vin = [CTxIn(COutPoint(txid, i), nSequence=0)] tx_i.vout = [CTxOut(split_value-fee, CScript([b'a']))] tx_i_hex = txToHex(tx_i) self.nodes[0].sendrawtransaction(tx_i_hex, True) # Now create doublespend of the whole lot; should fail. # Need a big enough fee to cover all spending transactions and have # a higher fee rate double_spend_value = (split_value-100*fee)*(MAX_REPLACEMENT_LIMIT+1) inputs = [] for i in range(MAX_REPLACEMENT_LIMIT+1): inputs.append(CTxIn(COutPoint(txid, i), nSequence=0)) double_tx = CTransaction() double_tx.vin = inputs double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))] double_tx_hex = txToHex(double_tx) try: self.nodes[0].sendrawtransaction(double_tx_hex, True) except JSONRPCException as exp: assert_equal(exp.error['code'], -26) assert_equal("too many potential replacements" in exp.error['message'], True) else: assert(False) # If we remove an input, it should pass double_tx = CTransaction() double_tx.vin = inputs[0:-1] double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))] double_tx_hex = txToHex(double_tx) self.nodes[0].sendrawtransaction(double_tx_hex, True) def test_opt_in(self): """ Replacing should only work if orig tx opted in """ tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN)) # Create a non-opting in transaction tx1a = CTransaction() tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0xffffffff)] tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))] tx1a_hex = txToHex(tx1a) tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True) # Shouldn't be able to double-spend tx1b = CTransaction() tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] tx1b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))] tx1b_hex = txToHex(tx1b) try: tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True) except JSONRPCException as exp: assert_equal(exp.error['code'], -26) else: print(tx1b_txid) assert(False) tx1_outpoint = make_utxo(self.nodes[0], int(1.1*COIN)) # Create a different non-opting in transaction tx2a = CTransaction() tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0xfffffffe)] tx2a.vout = [CTxOut(1*COIN, CScript([b'a']))] tx2a_hex = txToHex(tx2a) tx2a_txid = self.nodes[0].sendrawtransaction(tx2a_hex, True) # Still shouldn't be able to double-spend tx2b = CTransaction() tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)] tx2b.vout = [CTxOut(int(0.9*COIN), CScript([b'b']))] tx2b_hex = txToHex(tx2b) try: tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, True) except JSONRPCException as exp: assert_equal(exp.error['code'], -26) else: assert(False) # Now create a new transaction that spends from tx1a and tx2a # opt-in on one of the inputs # Transaction should be replaceable on either input tx1a_txid = int(tx1a_txid, 16) tx2a_txid = int(tx2a_txid, 16) tx3a = CTransaction() tx3a.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0xffffffff), CTxIn(COutPoint(tx2a_txid, 0), nSequence=0xfffffffd)] tx3a.vout = [CTxOut(int(0.9*COIN), CScript([b'c'])), CTxOut(int(0.9*COIN), CScript([b'd']))] tx3a_hex = txToHex(tx3a) self.nodes[0].sendrawtransaction(tx3a_hex, True) tx3b = CTransaction() tx3b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)] tx3b.vout = [CTxOut(int(0.5*COIN), CScript([b'e']))] tx3b_hex = txToHex(tx3b) tx3c = CTransaction() tx3c.vin = [CTxIn(COutPoint(tx2a_txid, 0), nSequence=0)] tx3c.vout = [CTxOut(int(0.5*COIN), CScript([b'f']))] tx3c_hex = txToHex(tx3c) self.nodes[0].sendrawtransaction(tx3b_hex, True) # If tx3b was accepted, tx3c won't look like a replacement, # but make sure it is accepted anyway self.nodes[0].sendrawtransaction(tx3c_hex, True) def test_prioritised_transactions(self): # Ensure that fee deltas used via prioritisetransaction are # correctly used by replacement logic # 1. Check that feeperkb uses modified fees tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN)) tx1a = CTransaction() tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)] tx1a.vout = [CTxOut(1*COIN, CScript([b'a']))] tx1a_hex = txToHex(tx1a) tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, True) # Higher fee, but the actual fee per KB is much lower. tx1b = CTransaction() tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] tx1b.vout = [CTxOut(int(0.001*COIN), CScript([b'a'*740000]))] tx1b_hex = txToHex(tx1b) # Verify tx1b cannot replace tx1a. try: tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True) except JSONRPCException as exp: assert_equal(exp.error['code'], -26) else: assert(False) # Use prioritisetransaction to set tx1a's fee to 0. self.nodes[0].prioritisetransaction(tx1a_txid, 0, int(-0.1*COIN)) # Now tx1b should be able to replace tx1a tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, True) assert(tx1b_txid in self.nodes[0].getrawmempool()) # 2. Check that absolute fee checks use modified fee. tx1_outpoint = make_utxo(self.nodes[0], int(1.1*COIN)) tx2a = CTransaction() tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0)] tx2a.vout = [CTxOut(1*COIN, CScript([b'a']))] tx2a_hex = txToHex(tx2a) tx2a_txid = self.nodes[0].sendrawtransaction(tx2a_hex, True) # Lower fee, but we'll prioritise it tx2b = CTransaction() tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)] tx2b.vout = [CTxOut(int(1.01*COIN), CScript([b'a']))] tx2b.rehash() tx2b_hex = txToHex(tx2b) # Verify tx2b cannot replace tx2a. try: tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, True) except JSONRPCException as exp: assert_equal(exp.error['code'], -26) else: assert(False) # Now prioritise tx2b to have a higher modified fee self.nodes[0].prioritisetransaction(tx2b.hash, 0, int(0.1*COIN)) # tx2b should now be accepted tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, True) assert(tx2b_txid in self.nodes[0].getrawmempool()) if __name__ == '__main__': ReplaceByFeeTest().main()
[]
[]
[]
[]
[]
python
null
null
null
topology-generator/cmd/instance/instance.go
package main import ( "context" "fmt" "log" "net/http" "os" "os/signal" "time" "github.com/gorilla/mux" "github.com/leandroberetta/mimik/pkg/api" "github.com/leandroberetta/mimik/pkg/service" ) func main() { instance, _ := service.NewService( os.Getenv("MIMIK_SERVICE_NAME"), os.Getenv("MIMIK_SERVICE_PORT"), os.Getenv("MIMIK_ENDPOINTS_FILE"), service.GetVersion(os.Getenv("MIMIK_LABELS_FILE"))) client := &http.Client{} r := mux.NewRouter() r.Path("/").HandlerFunc(service.EndpointHandler(instance, client)) srv := &http.Server{ Addr: ":8080", Handler: r, } log.Println("serving at :8080") go srv.ListenAndServe() tc := make(chan struct{}) if tg := os.Getenv("MIMIK_TRAFFIC_GENERATOR"); tg != "" { go generateTraffic(&instance, client, tc) } c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt) <-c close(tc) log.Println("shutting down") ctx, cancel := context.WithTimeout(context.Background(), time.Second*15) defer cancel() srv.Shutdown(ctx) log.Println("shutdown complete") os.Exit(0) } func generateTraffic(service *api.Service, client *http.Client, quit chan struct{}) { for { select { case <-quit: log.Println("stopping traffic generator") return default: for _, endpoint := range service.Endpoints { req, _ := http.NewRequest(endpoint.Method, fmt.Sprintf("http://localhost:%d", 8080), nil) resp, err := client.Do(req) if err != nil { log.Println(err) } defer resp.Body.Close() } time.Sleep(1 * time.Second) } } }
[ "\"MIMIK_SERVICE_NAME\"", "\"MIMIK_SERVICE_PORT\"", "\"MIMIK_ENDPOINTS_FILE\"", "\"MIMIK_LABELS_FILE\"", "\"MIMIK_TRAFFIC_GENERATOR\"" ]
[]
[ "MIMIK_SERVICE_NAME", "MIMIK_SERVICE_PORT", "MIMIK_TRAFFIC_GENERATOR", "MIMIK_LABELS_FILE", "MIMIK_ENDPOINTS_FILE" ]
[]
["MIMIK_SERVICE_NAME", "MIMIK_SERVICE_PORT", "MIMIK_TRAFFIC_GENERATOR", "MIMIK_LABELS_FILE", "MIMIK_ENDPOINTS_FILE"]
go
5
0
tasks/collections/main.go
package main import ( "os" "github.com/bjartek/overflow/overflow" ) func main() { o := overflow.NewOverflowMainnet().Start() account := os.Getenv("account") o.ScriptFromFile("collections").Args(o.Arguments().RawAccount(account)).Run() }
[ "\"account\"" ]
[]
[ "account" ]
[]
["account"]
go
1
0
pkg/plugins/runtime/gateway/route_configuration_generator.go
package gateway import ( mesh_proto "github.com/kumahq/kuma/api/mesh/v1alpha1" core_xds "github.com/kumahq/kuma/pkg/core/xds" xds_context "github.com/kumahq/kuma/pkg/xds/context" envoy_routes "github.com/kumahq/kuma/pkg/xds/envoy/routes" ) // RouteConfigurationGenerator generates Kuma gateway listeners. type RouteConfigurationGenerator struct{} func (*RouteConfigurationGenerator) SupportsProtocol(p mesh_proto.Gateway_Listener_Protocol) bool { switch p { case mesh_proto.Gateway_Listener_UDP, mesh_proto.Gateway_Listener_TCP, mesh_proto.Gateway_Listener_TLS, mesh_proto.Gateway_Listener_HTTP, mesh_proto.Gateway_Listener_HTTPS: return true default: return false } } func (*RouteConfigurationGenerator) GenerateHost(ctx xds_context.Context, info *GatewayResourceInfo) (*core_xds.ResourceSet, error) { if info.Resources.RouteConfiguration != nil { return nil, nil } info.Resources.RouteConfiguration = envoy_routes.NewRouteConfigurationBuilder(info.Proxy.APIVersion). Configure( envoy_routes.CommonRouteConfiguration(info.Listener.ResourceName), // TODO(jpeach) propagate merged listener tags. // Ideally we would propagate the tags header // to mesh services but not to external services, // but in the route configuration, we don't know // yet where the request will route to. // envoy_routes.TagsHeader(...), envoy_routes.ResetTagsHeader(), ) // TODO(jpeach) apply additional route configuration configuration. return nil, nil }
[]
[]
[]
[]
[]
go
null
null
null
pkg/virt-launcher/virtwrap/network/common.go
/* * This file is part of the KubeVirt project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Copyright 2018 Red Hat, Inc. * */ //go:generate mockgen -source $GOFILE -package=$GOPACKAGE -destination=generated_mock_$GOFILE package network import ( "crypto/rand" "encoding/json" "fmt" "io/ioutil" "net" "os" "os/exec" "github.com/coreos/go-iptables/iptables" lmf "github.com/subgraph/libmacouflage" "github.com/vishvananda/netlink" v1 "kubevirt.io/client-go/api/v1" "kubevirt.io/client-go/log" "kubevirt.io/kubevirt/pkg/virt-launcher/virtwrap/api" "kubevirt.io/kubevirt/pkg/virt-launcher/virtwrap/network/dhcp" netutils "k8s.io/utils/net" ) const randomMacGenerationAttempts = 10 type VIF struct { Name string IP netlink.Addr IPv6 netlink.Addr MAC net.HardwareAddr Gateway net.IP GatewayIpv6 net.IP Routes *[]netlink.Route Mtu uint16 IPAMDisabled bool } type CriticalNetworkError struct { msg string } func (e *CriticalNetworkError) Error() string { return e.msg } func (vif VIF) String() string { return fmt.Sprintf( "VIF: { Name: %s, IP: %s, Mask: %s, MAC: %s, Gateway: %s, MTU: %d, IPAMDisabled: %t}", vif.Name, vif.IP.IP, vif.IP.Mask, vif.MAC, vif.Gateway, vif.Mtu, vif.IPAMDisabled, ) } type NetworkHandler interface { LinkByName(name string) (netlink.Link, error) AddrList(link netlink.Link, family int) ([]netlink.Addr, error) RouteList(link netlink.Link, family int) ([]netlink.Route, error) AddrDel(link netlink.Link, addr *netlink.Addr) error AddrAdd(link netlink.Link, addr *netlink.Addr) error LinkSetDown(link netlink.Link) error LinkSetUp(link netlink.Link) error LinkAdd(link netlink.Link) error LinkSetLearningOff(link netlink.Link) error ParseAddr(s string) (*netlink.Addr, error) GetHostAndGwAddressesFromCIDR(s string) (string, string, error) SetRandomMac(iface string) (net.HardwareAddr, error) GenerateRandomMac() (net.HardwareAddr, error) GetMacDetails(iface string) (net.HardwareAddr, error) LinkSetMaster(link netlink.Link, master *netlink.Bridge) error StartDHCP(nic *VIF, serverAddr *netlink.Addr, bridgeInterfaceName string, dhcpOptions *v1.DHCPOptions) error HasNatIptables(proto iptables.Protocol) bool IsIpv6Enabled() bool ConfigureIpv6Forwarding() error IptablesNewChain(proto iptables.Protocol, table, chain string) error IptablesAppendRule(proto iptables.Protocol, table, chain string, rulespec ...string) error NftablesNewChain(proto iptables.Protocol, table, chain string) error NftablesAppendRule(proto iptables.Protocol, table, chain string, rulespec ...string) error NftablesLoad(fnName string) error GetNFTIPString(proto iptables.Protocol) string } type NetworkUtilsHandler struct{} var Handler NetworkHandler func (h *NetworkUtilsHandler) LinkByName(name string) (netlink.Link, error) { return netlink.LinkByName(name) } func (h *NetworkUtilsHandler) AddrList(link netlink.Link, family int) ([]netlink.Addr, error) { return netlink.AddrList(link, family) } func (h *NetworkUtilsHandler) RouteList(link netlink.Link, family int) ([]netlink.Route, error) { return netlink.RouteList(link, family) } func (h *NetworkUtilsHandler) AddrDel(link netlink.Link, addr *netlink.Addr) error { return netlink.AddrDel(link, addr) } func (h *NetworkUtilsHandler) LinkSetDown(link netlink.Link) error { return netlink.LinkSetDown(link) } func (h *NetworkUtilsHandler) LinkSetUp(link netlink.Link) error { return netlink.LinkSetUp(link) } func (h *NetworkUtilsHandler) LinkAdd(link netlink.Link) error { return netlink.LinkAdd(link) } func (h *NetworkUtilsHandler) LinkSetLearningOff(link netlink.Link) error { return netlink.LinkSetLearning(link, false) } func (h *NetworkUtilsHandler) ParseAddr(s string) (*netlink.Addr, error) { return netlink.ParseAddr(s) } func (h *NetworkUtilsHandler) AddrAdd(link netlink.Link, addr *netlink.Addr) error { return netlink.AddrAdd(link, addr) } func (h *NetworkUtilsHandler) LinkSetMaster(link netlink.Link, master *netlink.Bridge) error { return netlink.LinkSetMaster(link, master) } func (h *NetworkUtilsHandler) HasNatIptables(proto iptables.Protocol) bool { iptablesObject, err := iptables.NewWithProtocol(proto) if err != nil { log.Log.V(5).Reason(err).Infof("No iptables") return false } _, err = iptablesObject.List("nat", "OUTPUT") if err != nil { log.Log.V(5).Reason(err).Infof("No nat iptables") return false } return true } func (h *NetworkUtilsHandler) ConfigureIpv6Forwarding() error { _, err := exec.Command("sysctl", "net.ipv6.conf.all.forwarding=1").CombinedOutput() return err } func (h *NetworkUtilsHandler) IsIpv6Enabled() bool { podIp := os.Getenv("MY_POD_IP") if !netutils.IsIPv6String(podIp) { log.Log.V(5).Info("Since the pod ip is non IPv6, IPv6 is disabled") return false } return true } func (h *NetworkUtilsHandler) IptablesNewChain(proto iptables.Protocol, table, chain string) error { iptablesObject, err := iptables.NewWithProtocol(proto) if err != nil { return err } return iptablesObject.NewChain(table, chain) } func (h *NetworkUtilsHandler) IptablesAppendRule(proto iptables.Protocol, table, chain string, rulespec ...string) error { iptablesObject, err := iptables.NewWithProtocol(proto) if err != nil { return err } return iptablesObject.Append(table, chain, rulespec...) } func (h *NetworkUtilsHandler) NftablesNewChain(proto iptables.Protocol, table, chain string) error { output, err := exec.Command("nft", "add", "chain", Handler.GetNFTIPString(proto), table, chain).CombinedOutput() if err != nil { return fmt.Errorf("%s", string(output)) } return nil } func (h *NetworkUtilsHandler) NftablesAppendRule(proto iptables.Protocol, table, chain string, rulespec ...string) error { cmd := append([]string{"add", "rule", Handler.GetNFTIPString(proto), table, chain}, rulespec...) output, err := exec.Command("nft", cmd...).CombinedOutput() if err != nil { return fmt.Errorf("failed to apped new nfrule error %s", string(output)) } return nil } func (h *NetworkUtilsHandler) GetNFTIPString(proto iptables.Protocol) string { if proto == iptables.ProtocolIPv6 { return "ip6" } return "ip" } func (h *NetworkUtilsHandler) NftablesLoad(fnName string) error { output, err := exec.Command("nft", "-f", fmt.Sprintf("/etc/nftables/%s.nft", fnName)).CombinedOutput() if err != nil { log.Log.V(5).Reason(err).Infof("failed to load nftable %s", fnName) return fmt.Errorf("failed to load nftable %s error %s", fnName, string(output)) } return nil } func (h *NetworkUtilsHandler) GetHostAndGwAddressesFromCIDR(s string) (string, string, error) { ip, ipnet, err := net.ParseCIDR(s) if err != nil { return "", "", err } subnet, _ := ipnet.Mask.Size() var ips []string for ip := ip.Mask(ipnet.Mask); ipnet.Contains(ip); inc(ip) { ips = append(ips, fmt.Sprintf("%s/%d", ip.String(), subnet)) if len(ips) == 4 { // remove network address and broadcast address return ips[1], ips[2], nil } } return "", "", fmt.Errorf("less than 4 addresses on network") } func inc(ip net.IP) { for j := len(ip) - 1; j >= 0; j-- { ip[j]++ if ip[j] > 0 { break } } } // GetMacDetails from an interface func (h *NetworkUtilsHandler) GetMacDetails(iface string) (net.HardwareAddr, error) { currentMac, err := lmf.GetCurrentMac(iface) if err != nil { log.Log.Reason(err).Errorf("failed to get mac information for interface: %s", iface) return nil, err } return currentMac, nil } // SetRandomMac changes the MAC address for a given interface to a randomly generated, preserving the vendor prefix func (h *NetworkUtilsHandler) SetRandomMac(iface string) (net.HardwareAddr, error) { var mac net.HardwareAddr currentMac, err := Handler.GetMacDetails(iface) if err != nil { return nil, err } changed := false for i := 0; i < randomMacGenerationAttempts; i++ { changed, err = lmf.SpoofMacSameVendor(iface, false) if err != nil { log.Log.Reason(err).Errorf("failed to spoof MAC for an interface: %s", iface) return nil, err } if changed { mac, err = Handler.GetMacDetails(iface) if err != nil { return nil, err } log.Log.Infof("updated MAC for %s interface: old: %s -> new: %s", iface, currentMac, mac) break } } if !changed { err := fmt.Errorf("failed to spoof MAC for an interface %s after %d attempts", iface, randomMacGenerationAttempts) log.Log.Reason(err) return nil, err } return currentMac, nil } func (h *NetworkUtilsHandler) StartDHCP(nic *VIF, serverAddr *netlink.Addr, bridgeInterfaceName string, dhcpOptions *v1.DHCPOptions) error { log.Log.V(4).Infof("StartDHCP network Nic: %+v", nic) nameservers, searchDomains, err := api.GetResolvConfDetailsFromPod() if err != nil { return fmt.Errorf("Failed to get DNS servers from resolv.conf: %v", err) } // panic in case the DHCP server failed during the vm creation // but ignore dhcp errors when the vm is destroyed or shutting down go func() { if err = DHCPServer( nic.MAC, nic.IP.IP, nic.IP.Mask, bridgeInterfaceName, serverAddr.IP, nic.Gateway, nameservers, nic.Routes, searchDomains, nic.Mtu, dhcpOptions, ); err != nil { log.Log.Errorf("failed to run DHCP: %v", err) panic(err) } }() return nil } // Generate a random mac for interface // Avoid MAC address starting with reserved value 0xFE (https://github.com/kubevirt/kubevirt/issues/1494) func (h *NetworkUtilsHandler) GenerateRandomMac() (net.HardwareAddr, error) { prefix := []byte{0x02, 0x00, 0x00} // local unicast prefix suffix := make([]byte, 3) _, err := rand.Read(suffix) if err != nil { return nil, err } return net.HardwareAddr(append(prefix, suffix...)), nil } // Allow mocking for tests var SetupPodNetworkPhase1 = SetupNetworkInterfacesPhase1 var SetupPodNetworkPhase2 = SetupNetworkInterfacesPhase2 var DHCPServer = dhcp.SingleClientDHCPServer func initHandler() { if Handler == nil { Handler = &NetworkUtilsHandler{} } } func writeToCachedFile(inter interface{}, fileName, pid, name string) error { buf, err := json.MarshalIndent(&inter, "", " ") if err != nil { return fmt.Errorf("error marshaling cached object: %v", err) } fileName = getInterfaceCacheFile(fileName, pid, name) err = ioutil.WriteFile(fileName, buf, 0644) if err != nil { return fmt.Errorf("error writing cached object: %v", err) } return nil } func readFromCachedFile(pid, name, fileName string, inter interface{}) (bool, error) { buf, err := ioutil.ReadFile(getInterfaceCacheFile(fileName, pid, name)) if err != nil { if os.IsNotExist(err) { return false, nil } return false, err } err = json.Unmarshal(buf, &inter) if err != nil { return false, fmt.Errorf("error unmarshaling cached object: %v", err) } return true, nil } func getInterfaceCacheFile(filePath, pid, name string) string { return fmt.Sprintf(filePath, pid, name) } // filter out irrelevant routes func filterPodNetworkRoutes(routes []netlink.Route, nic *VIF) (filteredRoutes []netlink.Route) { for _, route := range routes { // don't create empty static routes if route.Dst == nil && route.Src.Equal(nil) && route.Gw.Equal(nil) { continue } // don't create static route for src == nic if route.Src != nil && route.Src.Equal(nic.IP.IP) { continue } filteredRoutes = append(filteredRoutes, route) } return } // only used by unit test suite func setInterfaceCacheFile(path string) { interfaceCacheFile = path } func setVifCacheFile(path string) { vifCacheFile = path }
[ "\"MY_POD_IP\"" ]
[]
[ "MY_POD_IP" ]
[]
["MY_POD_IP"]
go
1
0