filename
stringlengths
4
198
content
stringlengths
25
939k
environment
list
variablearg
list
constarg
list
variableargjson
stringclasses
1 value
constargjson
stringlengths
2
3.9k
lang
stringclasses
3 values
constargcount
float64
0
129
variableargcount
float64
0
0
sentence
stringclasses
1 value
webhawk/services/config_loader.py
import os from lib import common __author__ = "Dimi Balaouras" __copyright__ = "Copyright 2016, Stek.io" __abs_dirpath__ = os.path.dirname(os.path.abspath(__file__)) class ConfigLoader(object): """ WebHawk Configuration Abstraction """ def __init__(self, cli_options=None): """ Class Initializer :param cli_options: (optional) Command line options """ # Initialize instance variables self._cli_options = cli_options self._config_cache = {} self._file_config = None # We always load the default yaml file config_file_csv = "%s/../../config/config.yaml" % __abs_dirpath__ # Override config if an additional config file is supplied config_file = self._get_configuration("config_file") if config_file: config_file_csv += ",%s" % config_file # Load config from file self._file_config = self._sanitize_config(common.load_config(config_file_csv)) def _sanitize_config(self, config): """ Validates a configuration dictionary :param config: :return: """ # Ensure we have a path starting with a slash if not config["base_path"] or config["base_path"][:1] != "/": config["base_path"] = "/%s" % config["base_path"] return config def _get_configuration(self, name): """ Get configuration from: command line options, OR environment OR config file :param name: :return: The configuration value if found; None otherwise """ # Initialize return value value = None if not self._config_cache.get(name, None): # Try cli options first if value is None and self._cli_options: value = getattr(self._cli_options, name, None) # Try environment next if value is None: value = os.environ.get("WEBHAWK_%s" % name.upper(), None) # Finally, try existing config if value is None and self._file_config: value = self._file_config.get(name, None) # Store value in local cache self._config_cache[name] = value else: value = self._config_cache[name] return value def __getitem__(self, name): """ Load a configuration by name :param name: The name of the configuration :return: The """ return self._get_configuration(name=name)
[]
[]
[ "WEBHAWK_%s\" % name.upper" ]
[]
["WEBHAWK_%s\" % name.upper"]
python
1
0
truststore_darwin.go
// Copyright 2018 The mkcert Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package main import ( "bytes" "encoding/asn1" "io/ioutil" "log" "os" "os/exec" "path/filepath" "howett.net/plist" ) var ( FirefoxProfile = os.Getenv("HOME") + "/Library/Application Support/Firefox/Profiles/*" CertutilInstallHelp = "brew install nss" NSSBrowsers = "Firefox" ) // https://github.com/golang/go/issues/24652#issuecomment-399826583 var trustSettings []interface{} var _, _ = plist.Unmarshal(trustSettingsData, &trustSettings) var trustSettingsData = []byte(` <array> <dict> <key>kSecTrustSettingsPolicy</key> <data> KoZIhvdjZAED </data> <key>kSecTrustSettingsPolicyName</key> <string>sslServer</string> <key>kSecTrustSettingsResult</key> <integer>1</integer> </dict> <dict> <key>kSecTrustSettingsPolicy</key> <data> KoZIhvdjZAEC </data> <key>kSecTrustSettingsPolicyName</key> <string>basicX509</string> <key>kSecTrustSettingsResult</key> <integer>1</integer> </dict> </array> `) func (m *mkcert) installPlatform() bool { cmd := exec.Command("sudo", "security", "add-trusted-cert", "-d", "-k", "/Library/Keychains/System.keychain", filepath.Join(m.CAROOT, rootName)) out, err := cmd.CombinedOutput() fatalIfCmdErr(err, "security add-trusted-cert", out) // Make trustSettings explicit, as older Go does not know the defaults. // https://github.com/golang/go/issues/24652 plistFile, err := ioutil.TempFile("", "trust-settings") fatalIfErr(err, "failed to create temp file") defer os.Remove(plistFile.Name()) cmd = exec.Command("sudo", "security", "trust-settings-export", "-d", plistFile.Name()) out, err = cmd.CombinedOutput() fatalIfCmdErr(err, "security trust-settings-export", out) plistData, err := ioutil.ReadFile(plistFile.Name()) fatalIfErr(err, "failed to read trust settings") var plistRoot map[string]interface{} _, err = plist.Unmarshal(plistData, &plistRoot) fatalIfErr(err, "failed to parse trust settings") rootSubjectASN1, _ := asn1.Marshal(m.caCert.Subject.ToRDNSequence()) if plistRoot["trustVersion"].(uint64) != 1 { log.Fatalln("ERROR: unsupported trust settings version:", plistRoot["trustVersion"]) } trustList := plistRoot["trustList"].(map[string]interface{}) for key := range trustList { entry := trustList[key].(map[string]interface{}) if _, ok := entry["issuerName"]; !ok { continue } issuerName := entry["issuerName"].([]byte) if !bytes.Equal(rootSubjectASN1, issuerName) { continue } entry["trustSettings"] = trustSettings break } plistData, err = plist.MarshalIndent(plistRoot, plist.XMLFormat, "\t") fatalIfErr(err, "failed to serialize trust settings") err = ioutil.WriteFile(plistFile.Name(), plistData, 0600) fatalIfErr(err, "failed to write trust settings") cmd = exec.Command("sudo", "security", "trust-settings-import", "-d", plistFile.Name()) out, err = cmd.CombinedOutput() fatalIfCmdErr(err, "security trust-settings-import", out) return true } func (m *mkcert) uninstallPlatform() bool { cmd := exec.Command("sudo", "security", "remove-trusted-cert", "-d", filepath.Join(m.CAROOT, rootName)) out, err := cmd.CombinedOutput() fatalIfCmdErr(err, "security remove-trusted-cert", out) return true }
[ "\"HOME\"" ]
[]
[ "HOME" ]
[]
["HOME"]
go
1
0
src/python/turicreate/data_structures/sframe.py
# -*- coding: utf-8 -*- # Copyright © 2017 Apple Inc. All rights reserved. # # Use of this source code is governed by a BSD-3-clause license that can # be found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause ''' This module defines the SFrame class which provides the ability to create, access and manipulate a remote scalable dataframe object. SFrame acts similarly to pandas.DataFrame, but the data is completely immutable and is stored column wise. ''' from __future__ import print_function as _ from __future__ import division as _ from __future__ import absolute_import as _ from .._connect import main as glconnect from .._cython.cy_flexible_type import infer_type_of_list from .._cython.context import debug_trace as cython_context from .._cython.cy_sframe import UnitySFrameProxy from ..util import _is_non_string_iterable, _make_internal_url from ..util import _infer_dbapi2_types from ..util import _get_module_from_object, _pytype_to_printf from ..visualization import _get_client_app_path from .sarray import SArray, _create_sequential_sarray from .. import aggregate from .image import Image as _Image from .._deps import pandas, numpy, HAS_PANDAS, HAS_NUMPY from .grouped_sframe import GroupedSFrame from ..visualization import Plot import array from prettytable import PrettyTable from textwrap import wrap import datetime import time import itertools import logging as _logging import numbers import sys import six import csv from collections import Iterable as _Iterable __all__ = ['SFrame'] __LOGGER__ = _logging.getLogger(__name__) FOOTER_STRS = ['Note: Only the head of the SFrame is printed.', 'You can use print_rows(num_rows=m, num_columns=n) to print more rows and columns.'] LAZY_FOOTER_STRS = ['Note: Only the head of the SFrame is printed. This SFrame is lazily evaluated.', 'You can use sf.materialize() to force materialization.'] if sys.version_info.major > 2: long = int def load_sframe(filename): """ Load an SFrame. The filename extension is used to determine the format automatically. This function is particularly useful for SFrames previously saved in binary format. For CSV imports the ``SFrame.read_csv`` function provides greater control. If the SFrame is in binary format, ``filename`` is actually a directory, created when the SFrame is saved. Parameters ---------- filename : string Location of the file to load. Can be a local path or a remote URL. Returns ------- out : SFrame See Also -------- SFrame.save, SFrame.read_csv Examples -------- >>> sf = turicreate.SFrame({'id':[1,2,3], 'val':['A','B','C']}) >>> sf.save('my_sframe') # 'my_sframe' is a directory >>> sf_loaded = turicreate.load_sframe('my_sframe') """ sf = SFrame(data=filename) return sf def _get_global_dbapi_info(dbapi_module, conn): """ Fetches all needed information from the top-level DBAPI module, guessing at the module if it wasn't passed as a parameter. Returns a dictionary of all the needed variables. This is put in one place to make sure the error message is clear if the module "guess" is wrong. """ module_given_msg = "The DBAPI2 module given ({0}) is missing the global\n"+\ "variable '{1}'. Please make sure you are supplying a module that\n"+\ "conforms to the DBAPI 2.0 standard (PEP 0249)." module_not_given_msg = "Hello! I gave my best effort to find the\n"+\ "top-level module that the connection object you gave me came from.\n"+\ "I found '{0}' which doesn't have the global variable '{1}'.\n"+\ "To avoid this confusion, you can pass the module as a parameter using\n"+\ "the 'dbapi_module' argument to either from_sql or to_sql." if dbapi_module is None: dbapi_module = _get_module_from_object(conn) module_given = False else: module_given = True module_name = dbapi_module.__name__ if hasattr(dbapi_module, '__name__') else None needed_vars = ['apilevel','paramstyle','Error','DATETIME','NUMBER','ROWID'] ret_dict = {} ret_dict['module_name'] = module_name for i in needed_vars: tmp = None try: tmp = eval("dbapi_module."+i) except AttributeError as e: # Some DBs don't actually care about types, so they won't define # the types. These are the ACTUALLY needed variables though if i not in ['apilevel','paramstyle','Error']: pass elif module_given: raise AttributeError(module_given_msg.format(module_name, i)) else: raise AttributeError(module_not_given_msg.format(module_name, i)) ret_dict[i] = tmp try: if ret_dict['apilevel'][0:3] != "2.0": raise NotImplementedError("Unsupported API version " +\ str(ret_dict['apilevel']) + ". Only DBAPI 2.0 is supported.") except TypeError as e: e.message = "Module's 'apilevel' value is invalid." raise e acceptable_paramstyles = ['qmark','numeric','named','format','pyformat'] try: if ret_dict['paramstyle'] not in acceptable_paramstyles: raise TypeError("Module's 'paramstyle' value is invalid.") except TypeError as e: raise TypeError("Module's 'paramstyle' value is invalid.") return ret_dict def _convert_rows_to_builtin_seq(data): # Flexible type expects a builtin type (like list or tuple) for conversion. # Some DBAPI modules abstract rows as classes that act as single sequences # and this allows these to work with flexible type. list is chosen to allow # mutation in case we need to force cast any entries if len(data) > 0 and type(data[0]) != list: data = [list(row) for row in data] return data # Expects list of tuples def _force_cast_sql_types(data, result_types, force_cast_cols): if len(force_cast_cols) == 0: return data ret_data = [] for row in data: for idx in force_cast_cols: if row[idx] is not None and result_types[idx] != datetime.datetime: row[idx] = result_types[idx](row[idx]) ret_data.append(row) return ret_data class SFrame(object): """ A tabular, column-mutable dataframe object that can scale to big data. The data in SFrame is stored column-wise, and is stored on persistent storage (e.g. disk) to avoid being constrained by memory size. Each column in an SFrame is a size-immutable :class:`~turicreate.SArray`, but SFrames are mutable in that columns can be added and subtracted with ease. An SFrame essentially acts as an ordered dict of SArrays. Currently, we support constructing an SFrame from the following data formats: * csv file (comma separated value) * sframe directory archive (A directory where an sframe was saved previously) * general text file (with csv parsing options, See :py:meth:`read_csv()`) * a Python dictionary * pandas.DataFrame * JSON and from the following sources: * your local file system * a network file system mounted locally * HDFS * Amazon S3 * HTTP(S). Only basic examples of construction are covered here. For more information and examples, please see the `User Guide <https://apple.github.io/turicreate/docs/user guide/index.html#Working_with_data_Tabular_data>`_. Parameters ---------- data : array | pandas.DataFrame | string | dict, optional The actual interpretation of this field is dependent on the ``format`` parameter. If ``data`` is an array or Pandas DataFrame, the contents are stored in the SFrame. If ``data`` is a string, it is interpreted as a file. Files can be read from local file system or urls (local://, hdfs://, s3://, http://). format : string, optional Format of the data. The default, "auto" will automatically infer the input data format. The inference rules are simple: If the data is an array or a dataframe, it is associated with 'array' and 'dataframe' respectively. If the data is a string, it is interpreted as a file, and the file extension is used to infer the file format. The explicit options are: - "auto" - "array" - "dict" - "sarray" - "dataframe" - "csv" - "tsv" - "sframe". See Also -------- read_csv: Create a new SFrame from a csv file. Preferred for text and CSV formats, because it has a lot more options for controlling the parser. save : Save an SFrame for later use. Notes ----- - When reading from HDFS on Linux we must guess the location of your java installation. By default, we will use the location pointed to by the JAVA_HOME environment variable. If this is not set, we check many common installation paths. You may use two environment variables to override this behavior. TURI_JAVA_HOME allows you to specify a specific java installation and overrides JAVA_HOME. TURI_LIBJVM_DIRECTORY overrides all and expects the exact directory that your preferred libjvm.so file is located. Use this ONLY if you'd like to use a non-standard JVM. Examples -------- >>> import turicreate >>> from turicreate import SFrame **Construction** Construct an SFrame from a dataframe and transfers the dataframe object across the network. >>> df = pandas.DataFrame() >>> sf = SFrame(data=df) Construct an SFrame from a local csv file (only works for local server). >>> sf = SFrame(data='~/mydata/foo.csv') Construct an SFrame from a csv file on Amazon S3. This requires the environment variables: *AWS_ACCESS_KEY_ID* and *AWS_SECRET_ACCESS_KEY* to be set before the python session started. >>> sf = SFrame(data='s3://mybucket/foo.csv') Read from HDFS using a specific java installation (environment variable only applies when using Linux) >>> import os >>> os.environ['TURI_JAVA_HOME'] = '/my/path/to/java' >>> from turicreate import SFrame >>> sf = SFrame("hdfs://mycluster.example.com:8020/user/myname/coolfile.txt") An SFrame can be constructed from a dictionary of values or SArrays: >>> sf = tc.SFrame({'id':[1,2,3],'val':['A','B','C']}) >>> sf Columns: id int val str Rows: 3 Data: id val 0 1 A 1 2 B 2 3 C Or equivalently: >>> ids = SArray([1,2,3]) >>> vals = SArray(['A','B','C']) >>> sf = SFrame({'id':ids,'val':vals}) It can also be constructed from an array of SArrays in which case column names are automatically assigned. >>> ids = SArray([1,2,3]) >>> vals = SArray(['A','B','C']) >>> sf = SFrame([ids, vals]) >>> sf Columns: X1 int X2 str Rows: 3 Data: X1 X2 0 1 A 1 2 B 2 3 C If the SFrame is constructed from a list of values, an SFrame of a single column is constructed. >>> sf = SFrame([1,2,3]) >>> sf Columns: X1 int Rows: 3 Data: X1 0 1 1 2 2 3 **Parsing** The :py:func:`turicreate.SFrame.read_csv()` is quite powerful and, can be used to import a variety of row-based formats. First, some simple cases: >>> !cat ratings.csv user_id,movie_id,rating 10210,1,1 10213,2,5 10217,2,2 10102,1,3 10109,3,4 10117,5,2 10122,2,4 10114,1,5 10125,1,1 >>> tc.SFrame.read_csv('ratings.csv') Columns: user_id int movie_id int rating int Rows: 9 Data: +---------+----------+--------+ | user_id | movie_id | rating | +---------+----------+--------+ | 10210 | 1 | 1 | | 10213 | 2 | 5 | | 10217 | 2 | 2 | | 10102 | 1 | 3 | | 10109 | 3 | 4 | | 10117 | 5 | 2 | | 10122 | 2 | 4 | | 10114 | 1 | 5 | | 10125 | 1 | 1 | +---------+----------+--------+ [9 rows x 3 columns] Delimiters can be specified, if "," is not the delimiter, for instance space ' ' in this case. Only single character delimiters are supported. >>> !cat ratings.csv user_id movie_id rating 10210 1 1 10213 2 5 10217 2 2 10102 1 3 10109 3 4 10117 5 2 10122 2 4 10114 1 5 10125 1 1 >>> tc.SFrame.read_csv('ratings.csv', delimiter=' ') By default, "NA" or a missing element are interpreted as missing values. >>> !cat ratings2.csv user,movie,rating "tom",,1 harry,5, jack,2,2 bill,, >>> tc.SFrame.read_csv('ratings2.csv') Columns: user str movie int rating int Rows: 4 Data: +---------+-------+--------+ | user | movie | rating | +---------+-------+--------+ | tom | None | 1 | | harry | 5 | None | | jack | 2 | 2 | | missing | None | None | +---------+-------+--------+ [4 rows x 3 columns] Furthermore due to the dictionary types and list types, can handle parsing of JSON-like formats. >>> !cat ratings3.csv business, categories, ratings "Restaurant 1", [1 4 9 10], {"funny":5, "cool":2} "Restaurant 2", [], {"happy":2, "sad":2} "Restaurant 3", [2, 11, 12], {} >>> tc.SFrame.read_csv('ratings3.csv') Columns: business str categories array ratings dict Rows: 3 Data: +--------------+--------------------------------+-------------------------+ | business | categories | ratings | +--------------+--------------------------------+-------------------------+ | Restaurant 1 | array('d', [1.0, 4.0, 9.0, ... | {'funny': 5, 'cool': 2} | | Restaurant 2 | array('d') | {'sad': 2, 'happy': 2} | | Restaurant 3 | array('d', [2.0, 11.0, 12.0]) | {} | +--------------+--------------------------------+-------------------------+ [3 rows x 3 columns] The list and dictionary parsers are quite flexible and can absorb a variety of purely formatted inputs. Also, note that the list and dictionary types are recursive, allowing for arbitrary values to be contained. All these are valid lists: >>> !cat interesting_lists.csv list [] [1,2,3] [1;2,3] [1 2 3] [{a:b}] ["c",d, e] [[a]] >>> tc.SFrame.read_csv('interesting_lists.csv') Columns: list list Rows: 7 Data: +-----------------+ | list | +-----------------+ | [] | | [1, 2, 3] | | [1, 2, 3] | | [1, 2, 3] | | [{'a': 'b'}] | | ['c', 'd', 'e'] | | [['a']] | +-----------------+ [7 rows x 1 columns] All these are valid dicts: >>> !cat interesting_dicts.csv dict {"classic":1,"dict":1} {space:1 separated:1} {emptyvalue:} {} {:} {recursive1:[{a:b}]} {:[{:[a]}]} >>> tc.SFrame.read_csv('interesting_dicts.csv') Columns: dict dict Rows: 7 Data: +------------------------------+ | dict | +------------------------------+ | {'dict': 1, 'classic': 1} | | {'separated': 1, 'space': 1} | | {'emptyvalue': None} | | {} | | {None: None} | | {'recursive1': [{'a': 'b'}]} | | {None: [{None: array('d')}]} | +------------------------------+ [7 rows x 1 columns] **Saving** Save and load the sframe in native format. >>> sf.save('mysframedir') >>> sf2 = turicreate.load_sframe('mysframedir') **Column Manipulation** An SFrame is composed of a collection of columns of SArrays, and individual SArrays can be extracted easily. For instance given an SFrame: >>> sf = SFrame({'id':[1,2,3],'val':['A','B','C']}) >>> sf Columns: id int val str Rows: 3 Data: id val 0 1 A 1 2 B 2 3 C The "id" column can be extracted using: >>> sf["id"] dtype: int Rows: 3 [1, 2, 3] And can be deleted using: >>> del sf["id"] Multiple columns can be selected by passing a list of column names: >>> sf = SFrame({'id':[1,2,3],'val':['A','B','C'],'val2':[5,6,7]}) >>> sf Columns: id int val str val2 int Rows: 3 Data: id val val2 0 1 A 5 1 2 B 6 2 3 C 7 >>> sf2 = sf[['id','val']] >>> sf2 Columns: id int val str Rows: 3 Data: id val 0 1 A 1 2 B 2 3 C You can also select columns using types or a list of types: >>> sf2 = sf[int] >>> sf2 Columns: id int val2 int Rows: 3 Data: id val2 0 1 5 1 2 6 2 3 7 Or a mix of types and names: >>> sf2 = sf[['id', str]] >>> sf2 Columns: id int val str Rows: 3 Data: id val 0 1 A 1 2 B 2 3 C The same mechanism can be used to re-order columns: >>> sf = SFrame({'id':[1,2,3],'val':['A','B','C']}) >>> sf Columns: id int val str Rows: 3 Data: id val 0 1 A 1 2 B 2 3 C >>> sf[['val','id']] >>> sf Columns: val str id int Rows: 3 Data: val id 0 A 1 1 B 2 2 C 3 **Element Access and Slicing** SFrames can be accessed by integer keys just like a regular python list. Such operations may not be fast on large datasets so looping over an SFrame should be avoided. >>> sf = SFrame({'id':[1,2,3],'val':['A','B','C']}) >>> sf[0] {'id': 1, 'val': 'A'} >>> sf[2] {'id': 3, 'val': 'C'} >>> sf[5] IndexError: SFrame index out of range Negative indices can be used to access elements from the tail of the array >>> sf[-1] # returns the last element {'id': 3, 'val': 'C'} >>> sf[-2] # returns the second to last element {'id': 2, 'val': 'B'} The SFrame also supports the full range of python slicing operators: >>> sf[1000:] # Returns an SFrame containing rows 1000 to the end >>> sf[:1000] # Returns an SFrame containing rows 0 to row 999 inclusive >>> sf[0:1000:2] # Returns an SFrame containing rows 0 to row 1000 in steps of 2 >>> sf[-100:] # Returns an SFrame containing last 100 rows >>> sf[-100:len(sf):2] # Returns an SFrame containing last 100 rows in steps of 2 **Logical Filter** An SFrame can be filtered using >>> sframe[binary_filter] where sframe is an SFrame and binary_filter is an SArray of the same length. The result is a new SFrame which contains only rows of the SFrame where its matching row in the binary_filter is non zero. This permits the use of boolean operators that can be used to perform logical filtering operations. For instance, given an SFrame >>> sf Columns: id int val str Rows: 3 Data: id val 0 1 A 1 2 B 2 3 C >>> sf[(sf['id'] >= 1) & (sf['id'] <= 2)] Columns: id int val str Rows: 3 Data: id val 0 1 A 1 2 B See :class:`~turicreate.SArray` for more details on the use of the logical filter. This can also be used more generally to provide filtering capability which is otherwise not expressible with simple boolean functions. For instance: >>> sf[sf['id'].apply(lambda x: math.log(x) <= 1)] Columns: id int val str Rows: 3 Data: id val 0 1 A 1 2 B Or alternatively: >>> sf[sf.apply(lambda x: math.log(x['id']) <= 1)] Create an SFrame from a Python dictionary. >>> from turicreate import SFrame >>> sf = SFrame({'id':[1,2,3], 'val':['A','B','C']}) >>> sf Columns: id int val str Rows: 3 Data: id val 0 1 A 1 2 B 2 3 C """ __slots__ = ['_proxy', '_cache'] def __init__(self, data=None, format='auto', _proxy=None): """__init__(data=list(), format='auto') Construct a new SFrame from a url or a pandas.DataFrame. """ # emit metrics for num_rows, num_columns, and type (local://, s3, hdfs, http) if (_proxy): self.__proxy__ = _proxy else: self.__proxy__ = UnitySFrameProxy() _format = None if six.PY2 and isinstance(data, unicode): data = data.encode('utf-8') if (format == 'auto'): if (HAS_PANDAS and isinstance(data, pandas.DataFrame)): _format = 'dataframe' elif (isinstance(data, str) or (sys.version_info.major < 3 and isinstance(data, unicode))): if data.endswith(('.csv', '.csv.gz')): _format = 'csv' elif data.endswith(('.tsv', '.tsv.gz')): _format = 'tsv' elif data.endswith(('.txt', '.txt.gz')): print("Assuming file is csv. For other delimiters, " + \ "please use `SFrame.read_csv`.") _format = 'csv' else: _format = 'sframe' elif type(data) == SArray: _format = 'sarray' elif isinstance(data, SFrame): _format = 'sframe_obj' elif isinstance(data, dict): _format = 'dict' elif _is_non_string_iterable(data): _format = 'array' elif data is None: _format = 'empty' else: raise ValueError('Cannot infer input type for data ' + str(data)) else: _format = format with cython_context(): if (_format == 'dataframe'): for c in data.columns.values: self.add_column(SArray(data[c].values), str(c), inplace=True) elif (_format == 'sframe_obj'): for col in data.column_names(): self.__proxy__.add_column(data[col].__proxy__, col) elif (_format == 'sarray'): self.__proxy__.add_column(data.__proxy__, '') elif (_format == 'array'): if len(data) > 0: unique_types = set([type(x) for x in data if x is not None]) if len(unique_types) == 1 and SArray in unique_types: for arr in data: self.add_column(arr, inplace=True) elif SArray in unique_types: raise ValueError("Cannot create SFrame from mix of regular values and SArrays") else: self.__proxy__.add_column(SArray(data).__proxy__, '') elif (_format == 'dict'): # Validate that every column is the same length. if len(set(len(value) for value in data.values())) > 1: # probably should be a value error. But we used to raise # runtime error here... raise RuntimeError("All column should be of the same length") # split into SArray values and other iterable values. # We convert the iterable values in bulk, and then add the sarray values as columns sarray_keys = sorted(key for key,value in six.iteritems(data) if isinstance(value, SArray)) self.__proxy__.load_from_dataframe({key:value for key,value in six.iteritems(data) if not isinstance(value, SArray)}) for key in sarray_keys: self.__proxy__.add_column(data[key].__proxy__, key) elif (_format == 'csv'): url = data tmpsf = SFrame.read_csv(url, delimiter=',', header=True) self.__proxy__ = tmpsf.__proxy__ elif (_format == 'tsv'): url = data tmpsf = SFrame.read_csv(url, delimiter='\t', header=True) self.__proxy__ = tmpsf.__proxy__ elif (_format == 'sframe'): url = _make_internal_url(data) self.__proxy__.load_from_sframe_index(url) elif (_format == 'empty'): pass else: raise ValueError('Unknown input type: ' + format) @staticmethod def _infer_column_types_from_lines(first_rows): if (len(first_rows.column_names()) < 1): print("Insufficient number of columns to perform type inference") raise RuntimeError("Insufficient columns ") if len(first_rows) < 1: print("Insufficient number of rows to perform type inference") raise RuntimeError("Insufficient rows") # gets all the values column-wise all_column_values_transposed = [list(first_rows[col]) for col in first_rows.column_names()] # transpose all_column_values = [list(x) for x in list(zip(*all_column_values_transposed))] all_column_type_hints = [[type(t) for t in vals] for vals in all_column_values] # collect the hints # if every line was inferred to have a different number of elements, die if len(set(len(x) for x in all_column_type_hints)) != 1: print("Unable to infer column types. Defaulting to str") return str import types column_type_hints = all_column_type_hints[0] # now perform type combining across rows for i in range(1, len(all_column_type_hints)): currow = all_column_type_hints[i] for j in range(len(column_type_hints)): # combine types d = set([currow[j], column_type_hints[j]]) if (len(d) == 1): # easy case. both agree on the type continue if (((long in d) or (int in d)) and (float in d)): # one is an int, one is a float. its a float column_type_hints[j] = float elif ((array.array in d) and (list in d)): # one is an array , one is a list. its a list column_type_hints[j] = list elif type(None) in d: # one is a NoneType. assign to other type if currow[j] != type(None): column_type_hints[j] = currow[j] else: column_type_hints[j] = str # final pass. everything which is still NoneType is now a str for i in range(len(column_type_hints)): if column_type_hints[i] == type(None): column_type_hints[i] = str return column_type_hints @classmethod def _read_csv_impl(cls, url, delimiter=',', header=True, error_bad_lines=False, comment_char='', escape_char='\\', double_quote=True, quote_char='\"', skip_initial_space=True, column_type_hints=None, na_values=["NA"], line_terminator="\n", usecols=[], nrows=None, skiprows=0, verbose=True, store_errors=True, nrows_to_infer=100, true_values=[], false_values=[], _only_raw_string_substitutions=False, **kwargs): """ Constructs an SFrame from a CSV file or a path to multiple CSVs, and returns a pair containing the SFrame and optionally (if store_errors=True) a dict of filenames to SArrays indicating for each file, what are the incorrectly parsed lines encountered. Parameters ---------- store_errors : bool If true, the output errors dict will be filled. See `read_csv` for the rest of the parameters. """ # Pandas argument compatibility if "sep" in kwargs: delimiter = kwargs['sep'] del kwargs['sep'] if "quotechar" in kwargs: quote_char = kwargs['quotechar'] del kwargs['quotechar'] if "doublequote" in kwargs: double_quote = kwargs['doublequote'] del kwargs['doublequote'] if "comment" in kwargs: comment_char = kwargs['comment'] del kwargs['comment'] if comment_char is None: comment_char = '' if "lineterminator" in kwargs: line_terminator = kwargs['lineterminator'] del kwargs['lineterminator'] if len(kwargs) > 0: raise TypeError("Unexpected keyword arguments " + str(kwargs.keys())) parsing_config = dict() parsing_config["delimiter"] = delimiter parsing_config["use_header"] = header parsing_config["continue_on_failure"] = not error_bad_lines parsing_config["comment_char"] = comment_char parsing_config["escape_char"] = '\0' if escape_char is None else escape_char parsing_config["use_escape_char"] = escape_char is None parsing_config["double_quote"] = double_quote parsing_config["quote_char"] = quote_char parsing_config["skip_initial_space"] = skip_initial_space parsing_config["store_errors"] = store_errors parsing_config["line_terminator"] = line_terminator parsing_config["output_columns"] = usecols parsing_config["skip_rows"] =skiprows parsing_config["true_values"] = true_values parsing_config["false_values"] = false_values parsing_config["only_raw_string_substitutions"] = _only_raw_string_substitutions if type(na_values) is str: na_values = [na_values] if na_values is not None and len(na_values) > 0: parsing_config["na_values"] = na_values if nrows is not None: parsing_config["row_limit"] = nrows proxy = UnitySFrameProxy() internal_url = _make_internal_url(url) # Attempt to automatically detect the column types. Either produce a # list of types; otherwise default to all str types. column_type_inference_was_used = False if column_type_hints is None: try: # Get the first nrows_to_infer rows (using all the desired arguments). first_rows = SFrame.read_csv(url, nrows=nrows_to_infer, column_type_hints=type(None), header=header, delimiter=delimiter, comment_char=comment_char, escape_char=escape_char, double_quote=double_quote, quote_char=quote_char, skip_initial_space=skip_initial_space, na_values=na_values, line_terminator=line_terminator, usecols=usecols, skiprows=skiprows, verbose=verbose, true_values=true_values, false_values=false_values, _only_raw_string_substitutions=_only_raw_string_substitutions) column_type_hints = SFrame._infer_column_types_from_lines(first_rows) typelist = '[' + ','.join(t.__name__ for t in column_type_hints) + ']' if verbose: print("------------------------------------------------------") print("Inferred types from first %d line(s) of file as " % nrows_to_infer) print("column_type_hints="+ typelist) print("If parsing fails due to incorrect types, you can correct") print("the inferred type list above and pass it to read_csv in") print( "the column_type_hints argument") print("------------------------------------------------------") column_type_inference_was_used = True except RuntimeError as e: if type(e) == RuntimeError and ("cancel" in str(e.args[0]) or "Cancel" in str(e.args[0])): raise e # If the above fails, default back to str for all columns. column_type_hints = str if verbose: print('Could not detect types. Using str for each column.') if type(column_type_hints) is type: type_hints = {'__all_columns__': column_type_hints} elif type(column_type_hints) is list: type_hints = dict(list(zip(['__X%d__' % i for i in range(len(column_type_hints))], column_type_hints))) elif type(column_type_hints) is dict: # we need to fill in a potentially incomplete dictionary try: # Get the first nrows_to_infer rows (using all the desired arguments). first_rows = SFrame.read_csv(url, nrows=nrows_to_infer, column_type_hints=type(None), header=header, delimiter=delimiter, comment_char=comment_char, escape_char=escape_char, double_quote=double_quote, quote_char=quote_char, skip_initial_space=skip_initial_space, na_values=na_values, line_terminator=line_terminator, usecols=usecols, skiprows=skiprows, verbose=verbose, true_values=true_values, false_values=false_values, _only_raw_string_substitutions=_only_raw_string_substitutions) inferred_types = SFrame._infer_column_types_from_lines(first_rows) # make a dict of column_name to type inferred_types = dict(list(zip(first_rows.column_names(), inferred_types))) # overwrite with the user's specified types for key in column_type_hints: inferred_types[key] = column_type_hints[key] column_type_hints = inferred_types except RuntimeError as e: if type(e) == RuntimeError and ("cancel" in str(e) or "Cancel" in str(e)): raise e # If the above fails, default back to str for unmatched columns if verbose: print('Could not detect types. Using str for all unspecified columns.') type_hints = column_type_hints else: raise TypeError("Invalid type for column_type_hints. Must be a dictionary, list or a single type.") try: if (not verbose): glconnect.get_server().set_log_progress(False) with cython_context(): errors = proxy.load_from_csvs(internal_url, parsing_config, type_hints) except Exception as e: if type(e) == RuntimeError and "CSV parsing cancelled" in str(e.args[0]): raise e if column_type_inference_was_used: # try again if verbose: print("Unable to parse the file with automatic type inference.") print("Defaulting to column_type_hints=str") type_hints = {'__all_columns__': str} try: with cython_context(): errors = proxy.load_from_csvs(internal_url, parsing_config, type_hints) except: glconnect.get_server().set_log_progress(True) raise else: glconnect.get_server().set_log_progress(True) raise glconnect.get_server().set_log_progress(True) return (cls(_proxy=proxy), { f: SArray(_proxy = es) for (f, es) in errors.items() }) @classmethod def read_csv_with_errors(cls, url, delimiter=',', header=True, comment_char='', escape_char='\\', double_quote=True, quote_char='\"', skip_initial_space=True, column_type_hints=None, na_values=["NA"], line_terminator='\n', usecols = [], nrows=None, skiprows=0, verbose=True, nrows_to_infer=100, true_values=[], false_values=[], _only_raw_string_substitutions=False, **kwargs): """ Constructs an SFrame from a CSV file or a path to multiple CSVs, and returns a pair containing the SFrame and a dict of filenames to SArrays indicating for each file, what are the incorrectly parsed lines encountered. Parameters ---------- url : string Location of the CSV file or directory to load. If URL is a directory or a "glob" pattern, all matching files will be loaded. delimiter : string, optional This describes the delimiter used for parsing csv files. header : bool, optional If true, uses the first row as the column names. Otherwise use the default column names: 'X1, X2, ...'. comment_char : string, optional The character which denotes that the remainder of the line is a comment. escape_char : string, optional Character which begins a C escape sequence. Defaults to backslash(\\) Set to None to disable. double_quote : bool, optional If True, two consecutive quotes in a string are parsed to a single quote. quote_char : string, optional Character sequence that indicates a quote. skip_initial_space : bool, optional Ignore extra spaces at the start of a field column_type_hints : None, type, list[type], dict[string, type], optional This provides type hints for each column. By default, this method attempts to detect the type of each column automatically. Supported types are int, float, str, list, dict, and array.array. * If a single type is provided, the type will be applied to all columns. For instance, column_type_hints=float will force all columns to be parsed as float. * If a list of types is provided, the types applies to each column in order, e.g.[int, float, str] will parse the first column as int, second as float and third as string. * If a dictionary of column name to type is provided, each type value in the dictionary is applied to the key it belongs to. For instance {'user':int} will hint that the column called "user" should be parsed as an integer, and the rest will be type inferred. na_values : str | list of str, optional A string or list of strings to be interpreted as missing values. true_values : str | list of str, optional A string or list of strings to be interpreted as 1 false_values : str | list of str, optional A string or list of strings to be interpreted as 0 line_terminator : str, optional A string to be interpreted as the line terminator. Defaults to "\\n" which will also correctly match Mac, Linux and Windows line endings ("\\r", "\\n" and "\\r\\n" respectively) usecols : list of str, optional A subset of column names to output. If unspecified (default), all columns will be read. This can provide performance gains if the number of columns are large. If the input file has no headers, usecols=['X1','X3'] will read columns 1 and 3. nrows : int, optional If set, only this many rows will be read from the file. skiprows : int, optional If set, this number of rows at the start of the file are skipped. verbose : bool, optional If True, print the progress. Returns ------- out : tuple The first element is the SFrame with good data. The second element is a dictionary of filenames to SArrays indicating for each file, what are the incorrectly parsed lines encountered. See Also -------- read_csv, SFrame Examples -------- >>> bad_url = 'https://static.turi.com/datasets/bad_csv_example.csv' >>> (sf, bad_lines) = turicreate.SFrame.read_csv_with_errors(bad_url) >>> sf +---------+----------+--------+ | user_id | movie_id | rating | +---------+----------+--------+ | 25904 | 1663 | 3 | | 25907 | 1663 | 3 | | 25923 | 1663 | 3 | | 25924 | 1663 | 3 | | 25928 | 1663 | 2 | | ... | ... | ... | +---------+----------+--------+ [98 rows x 3 columns] >>> bad_lines {'https://static.turi.com/datasets/bad_csv_example.csv': dtype: str Rows: 1 ['x,y,z,a,b,c']} """ return cls._read_csv_impl(url, delimiter=delimiter, header=header, error_bad_lines=False, # we are storing errors, # thus we must not fail # on bad lines comment_char=comment_char, escape_char=escape_char, double_quote=double_quote, quote_char=quote_char, skip_initial_space=skip_initial_space, column_type_hints=column_type_hints, na_values=na_values, line_terminator=line_terminator, usecols=usecols, nrows=nrows, verbose=verbose, skiprows=skiprows, store_errors=True, nrows_to_infer=nrows_to_infer, true_values=true_values, false_values=false_values, _only_raw_string_substitutions=_only_raw_string_substitutions, **kwargs) @classmethod def read_csv(cls, url, delimiter=',', header=True, error_bad_lines=False, comment_char='', escape_char='\\', double_quote=True, quote_char='\"', skip_initial_space=True, column_type_hints=None, na_values=["NA"], line_terminator='\n', usecols=[], nrows=None, skiprows=0, verbose=True, nrows_to_infer=100, true_values=[], false_values=[], _only_raw_string_substitutions=False, **kwargs): """ Constructs an SFrame from a CSV file or a path to multiple CSVs. Parameters ---------- url : string Location of the CSV file or directory to load. If URL is a directory or a "glob" pattern, all matching files will be loaded. delimiter : string, optional This describes the delimiter used for parsing csv files. header : bool, optional If true, uses the first row as the column names. Otherwise use the default column names : 'X1, X2, ...'. error_bad_lines : bool If true, will fail upon encountering a bad line. If false, will continue parsing skipping lines which fail to parse correctly. A sample of the first 10 encountered bad lines will be printed. comment_char : string, optional The character which denotes that the remainder of the line is a comment. escape_char : string, optional Character which begins a C escape sequence. Defaults to backslash(\\) Set to None to disable. double_quote : bool, optional If True, two consecutive quotes in a string are parsed to a single quote. quote_char : string, optional Character sequence that indicates a quote. skip_initial_space : bool, optional Ignore extra spaces at the start of a field column_type_hints : None, type, list[type], dict[string, type], optional This provides type hints for each column. By default, this method attempts to detect the type of each column automatically. Supported types are int, float, str, list, dict, and array.array. * If a single type is provided, the type will be applied to all columns. For instance, column_type_hints=float will force all columns to be parsed as float. * If a list of types is provided, the types applies to each column in order, e.g.[int, float, str] will parse the first column as int, second as float and third as string. * If a dictionary of column name to type is provided, each type value in the dictionary is applied to the key it belongs to. For instance {'user':int} will hint that the column called "user" should be parsed as an integer, and the rest will be type inferred. na_values : str | list of str, optional A string or list of strings to be interpreted as missing values. true_values : str | list of str, optional A string or list of strings to be interpreted as 1 false_values : str | list of str, optional A string or list of strings to be interpreted as 0 line_terminator : str, optional A string to be interpreted as the line terminator. Defaults to "\n" which will also correctly match Mac, Linux and Windows line endings ("\\r", "\\n" and "\\r\\n" respectively) usecols : list of str, optional A subset of column names to output. If unspecified (default), all columns will be read. This can provide performance gains if the number of columns are large. If the input file has no headers, usecols=['X1','X3'] will read columns 1 and 3. nrows : int, optional If set, only this many rows will be read from the file. skiprows : int, optional If set, this number of rows at the start of the file are skipped. verbose : bool, optional If True, print the progress. Returns ------- out : SFrame See Also -------- read_csv_with_errors, SFrame Examples -------- Read a regular csv file, with all default options, automatically determine types: >>> url = 'https://static.turi.com/datasets/rating_data_example.csv' >>> sf = turicreate.SFrame.read_csv(url) >>> sf Columns: user_id int movie_id int rating int Rows: 10000 +---------+----------+--------+ | user_id | movie_id | rating | +---------+----------+--------+ | 25904 | 1663 | 3 | | 25907 | 1663 | 3 | | 25923 | 1663 | 3 | | 25924 | 1663 | 3 | | 25928 | 1663 | 2 | | ... | ... | ... | +---------+----------+--------+ [10000 rows x 3 columns] Read only the first 100 lines of the csv file: >>> sf = turicreate.SFrame.read_csv(url, nrows=100) >>> sf Columns: user_id int movie_id int rating int Rows: 100 +---------+----------+--------+ | user_id | movie_id | rating | +---------+----------+--------+ | 25904 | 1663 | 3 | | 25907 | 1663 | 3 | | 25923 | 1663 | 3 | | 25924 | 1663 | 3 | | 25928 | 1663 | 2 | | ... | ... | ... | +---------+----------+--------+ [100 rows x 3 columns] Read all columns as str type >>> sf = turicreate.SFrame.read_csv(url, column_type_hints=str) >>> sf Columns: user_id str movie_id str rating str Rows: 10000 +---------+----------+--------+ | user_id | movie_id | rating | +---------+----------+--------+ | 25904 | 1663 | 3 | | 25907 | 1663 | 3 | | 25923 | 1663 | 3 | | 25924 | 1663 | 3 | | 25928 | 1663 | 2 | | ... | ... | ... | +---------+----------+--------+ [10000 rows x 3 columns] Specify types for a subset of columns and leave the rest to be str. >>> sf = turicreate.SFrame.read_csv(url, ... column_type_hints={ ... 'user_id':int, 'rating':float ... }) >>> sf Columns: user_id str movie_id str rating float Rows: 10000 +---------+----------+--------+ | user_id | movie_id | rating | +---------+----------+--------+ | 25904 | 1663 | 3.0 | | 25907 | 1663 | 3.0 | | 25923 | 1663 | 3.0 | | 25924 | 1663 | 3.0 | | 25928 | 1663 | 2.0 | | ... | ... | ... | +---------+----------+--------+ [10000 rows x 3 columns] Not treat first line as header: >>> sf = turicreate.SFrame.read_csv(url, header=False) >>> sf Columns: X1 str X2 str X3 str Rows: 10001 +---------+----------+--------+ | X1 | X2 | X3 | +---------+----------+--------+ | user_id | movie_id | rating | | 25904 | 1663 | 3 | | 25907 | 1663 | 3 | | 25923 | 1663 | 3 | | 25924 | 1663 | 3 | | 25928 | 1663 | 2 | | ... | ... | ... | +---------+----------+--------+ [10001 rows x 3 columns] Treat '3' as missing value: >>> sf = turicreate.SFrame.read_csv(url, na_values=['3'], column_type_hints=str) >>> sf Columns: user_id str movie_id str rating str Rows: 10000 +---------+----------+--------+ | user_id | movie_id | rating | +---------+----------+--------+ | 25904 | 1663 | None | | 25907 | 1663 | None | | 25923 | 1663 | None | | 25924 | 1663 | None | | 25928 | 1663 | 2 | | ... | ... | ... | +---------+----------+--------+ [10000 rows x 3 columns] Throw error on parse failure: >>> bad_url = 'https://static.turi.com/datasets/bad_csv_example.csv' >>> sf = turicreate.SFrame.read_csv(bad_url, error_bad_lines=True) RuntimeError: Runtime Exception. Unable to parse line "x,y,z,a,b,c" Set error_bad_lines=False to skip bad lines """ return cls._read_csv_impl(url, delimiter=delimiter, header=header, error_bad_lines=error_bad_lines, comment_char=comment_char, escape_char=escape_char, double_quote=double_quote, quote_char=quote_char, skip_initial_space=skip_initial_space, column_type_hints=column_type_hints, na_values=na_values, line_terminator=line_terminator, usecols=usecols, nrows=nrows, skiprows=skiprows, verbose=verbose, store_errors=False, nrows_to_infer=nrows_to_infer, true_values=true_values, false_values=false_values, _only_raw_string_substitutions=_only_raw_string_substitutions, **kwargs)[0] @classmethod def read_json(cls, url, orient='records'): """ Reads a JSON file representing a table into an SFrame. Parameters ---------- url : string Location of the CSV file or directory to load. If URL is a directory or a "glob" pattern, all matching files will be loaded. orient : string, optional. Either "records" or "lines" If orient="records" the file is expected to contain a single JSON array, where each array element is a dictionary. If orient="lines", the file is expected to contain a JSON element per line. Examples -------- The orient parameter describes the expected input format of the JSON file. If orient="records", the JSON file is expected to contain a single JSON Array where each array element is a dictionary describing the row. For instance: >>> !cat input.json [{'a':1,'b':1}, {'a':2,'b':2}, {'a':3,'b':3}] >>> SFrame.read_json('input.json', orient='records') Columns: a int b int Rows: 3 Data: +---+---+ | a | b | +---+---+ | 1 | 1 | | 2 | 2 | | 3 | 3 | +---+---+ If orient="lines", the JSON file is expected to contain a JSON element per line. If each line contains a dictionary, it is automatically unpacked. >>> !cat input.json {'a':1,'b':1} {'a':2,'b':2} {'a':3,'b':3} >>> g = SFrame.read_json('input.json', orient='lines') Columns: a int b int Rows: 3 Data: +---+---+ | a | b | +---+---+ | 1 | 1 | | 2 | 2 | | 3 | 3 | +---+---+ If the lines are not dictionaries, the original format is maintained. >>> !cat input.json ['a','b','c'] ['d','e','f'] ['g','h','i'] [1,2,3] >>> g = SFrame.read_json('input.json', orient='lines') Columns: X1 list Rows: 3 Data: +-----------+ | X1 | +-----------+ | [a, b, c] | | [d, e, f] | | [g, h, i] | +-----------+ [3 rows x 1 columns] """ if orient == "records": g = SArray.read_json(url) if len(g) == 0: return SFrame() g = SFrame({'X1':g}) return g.unpack('X1','') elif orient == "lines": g = cls.read_csv(url, header=False,na_values=['null'],true_values=['true'],false_values=['false'], _only_raw_string_substitutions=True) if g.num_rows() == 0: return SFrame() if g.num_columns() != 1: raise RuntimeError("Input JSON not of expected format") if g['X1'].dtype == dict: return g.unpack('X1','') else: return g else: raise ValueError("Invalid value for orient parameter (" + str(orient) + ")") @classmethod def from_sql(cls, conn, sql_statement, params=None, type_inference_rows=100, dbapi_module=None, column_type_hints=None, cursor_arraysize=128): """ Convert the result of a SQL database query to an SFrame. Parameters ---------- conn : dbapi2.Connection A DBAPI2 connection object. Any connection object originating from the 'connect' method of a DBAPI2-compliant package can be used. sql_statement : str The query to be sent to the database through the given connection. No checks are performed on the `sql_statement`. Any side effects from the query will be reflected on the database. If no result rows are returned, an empty SFrame is created. params : iterable | dict, optional Parameters to substitute for any parameter markers in the `sql_statement`. Be aware that the style of parameters may vary between different DBAPI2 packages. type_inference_rows : int, optional The maximum number of rows to use for determining the column types of the SFrame. These rows are held in Python until all column types are determined or the maximum is reached. dbapi_module : module | package, optional The top-level DBAPI2 module/package that constructed the given connection object. By default, a best guess of which module the connection came from is made. In the event that this guess is wrong, this will need to be specified. column_type_hints : dict | list | type, optional Specifies the types of the output SFrame. If a dict is given, it must have result column names as keys, but need not have all of the result column names. If a list is given, the length of the list must match the number of result columns. If a single type is given, all columns in the output SFrame will be this type. If the result type is incompatible with the types given in this argument, a casting error will occur. cursor_arraysize : int, optional The number of rows to fetch from the database at one time. Returns ------- out : SFrame Examples -------- >>> import sqlite3 >>> conn = sqlite3.connect('example.db') >>> turicreate.SFrame.from_sql(conn, "SELECT * FROM foo") Columns: a int b int Rows: 1 Data: +---+---+ | a | b | +---+---+ | 1 | 2 | +---+---+ [1 rows x 2 columns] """ # Mapping types is always the trickiest part about reading from a # database, so the main complexity of this function concerns types. # Much of the heavy-lifting of this is done by the DBAPI2 module, which # holds the burden of the actual mapping from the database-specific # type to a suitable Python type. The problem is that the type that the # module chooses may not be supported by SFrame, and SFrame needs a # list of types to be created, so we must resort to guessing the type # of a column if the query result returns lots of NULL values. The goal # of these steps is to fail as little as possible first, and then # preserve data as much as we can. # # Here is how the type for an SFrame column is chosen: # # 1. The column_type_hints parameter is checked. # # Each column specified in the parameter will be forced to the # hinted type via a Python-side cast before it is given to the # SFrame. Only int, float, and str are allowed to be hints. # # 2. The types returned from the cursor are checked. # # The first non-None result for each column is taken to be the type # of that column. The type is checked for whether SFrame supports # it, or whether it can convert to a supported type. If the type is # supported, no Python-side cast takes place. If unsupported, the # SFrame column is set to str and the values are casted in Python to # str before being added to the SFrame. # # 3. DB type codes provided by module are checked # # This case happens for any column that only had None values in the # first `type_inference_rows` rows. In this case we check the # type_code in the cursor description for the columns missing types. # These types often do not match up with an SFrame-supported Python # type, so the utility of this step is limited. It can only result # in labeling datetime.datetime, float, or str. If a suitable # mapping isn't found, we fall back to str. mod_info = _get_global_dbapi_info(dbapi_module, conn) from .sframe_builder import SFrameBuilder c = conn.cursor() try: if params is None: c.execute(sql_statement) else: c.execute(sql_statement, params) except mod_info['Error'] as e: # The rollback method is considered optional by DBAPI2, but some # modules that do implement it won't work again unless it is called # if an error happens on a cursor. if hasattr(conn, 'rollback'): conn.rollback() raise e c.arraysize = cursor_arraysize result_desc = c.description result_names = [i[0] for i in result_desc] result_types = [None for i in result_desc] cols_to_force_cast = set() temp_vals = [] # Set any types that are given to us col_name_to_num = {result_names[i]:i for i in range(len(result_names))} if column_type_hints is not None: if type(column_type_hints) is dict: for k,v in column_type_hints.items(): col_num = col_name_to_num[k] cols_to_force_cast.add(col_num) result_types[col_num] = v elif type(column_type_hints) is list: if len(column_type_hints) != len(result_names): __LOGGER__.warn("If column_type_hints is specified as a "+\ "list, it must be of the same size as the result "+\ "set's number of columns. Ignoring (use dict instead).") else: result_types = column_type_hints cols_to_force_cast.update(range(len(result_desc))) elif type(column_type_hints) is type: result_types = [column_type_hints for i in result_desc] cols_to_force_cast.update(range(len(result_desc))) # Since we will be casting whatever we receive to the types given # before submitting the values to the SFrame, we need to make sure that # these are types that a "cast" makes sense, and we're not calling a # constructor that expects certain input (e.g. datetime.datetime), # since we could get lots of different input hintable_types = [int,float,str] if not all([i in hintable_types or i is None for i in result_types]): raise TypeError("Only " + str(hintable_types) + " can be provided as type hints!") # Perform type inference by checking to see what python types are # returned from the cursor if not all(result_types): # Only test the first fetch{one,many} command since the only way it # will raise an exception is if execute didn't produce a result set try: row = c.fetchone() except mod_info['Error'] as e: if hasattr(conn, 'rollback'): conn.rollback() raise e while row is not None: # Assumes that things like dicts are not a "single sequence" temp_vals.append(row) val_count = 0 for val in row: if result_types[val_count] is None and val is not None: result_types[val_count] = type(val) val_count += 1 if all(result_types) or len(temp_vals) >= type_inference_rows: break row = c.fetchone() # This will be true if some columns have all missing values up to this # point. Try using DBAPI2 type_codes to pick a suitable type. If this # doesn't work, fall back to string. if not all(result_types): missing_val_cols = [i for i,v in enumerate(result_types) if v is None] cols_to_force_cast.update(missing_val_cols) inferred_types = _infer_dbapi2_types(c, mod_info) cnt = 0 for i in result_types: if i is None: result_types[cnt] = inferred_types[cnt] cnt += 1 sb = SFrameBuilder(result_types, column_names=result_names) unsupported_cols = [i for i,v in enumerate(sb.column_types()) if v is type(None)] if len(unsupported_cols) > 0: cols_to_force_cast.update(unsupported_cols) for i in unsupported_cols: result_types[i] = str sb = SFrameBuilder(result_types, column_names=result_names) temp_vals = _convert_rows_to_builtin_seq(temp_vals) sb.append_multiple(_force_cast_sql_types(temp_vals, result_types, cols_to_force_cast)) rows = c.fetchmany() while len(rows) > 0: rows = _convert_rows_to_builtin_seq(rows) sb.append_multiple(_force_cast_sql_types(rows, result_types, cols_to_force_cast)) rows = c.fetchmany() cls = sb.close() try: c.close() except mod_info['Error'] as e: if hasattr(conn, 'rollback'): conn.rollback() raise e return cls def to_sql(self, conn, table_name, dbapi_module=None, use_python_type_specifiers=False, use_exact_column_names=True): """ Convert an SFrame to a single table in a SQL database. This function does not attempt to create the table or check if a table named `table_name` exists in the database. It simply assumes that `table_name` exists in the database and appends to it. `to_sql` can be thought of as a convenience wrapper around parameterized SQL insert statements. Parameters ---------- conn : dbapi2.Connection A DBAPI2 connection object. Any connection object originating from the 'connect' method of a DBAPI2-compliant package can be used. table_name : str The name of the table to append the data in this SFrame. dbapi_module : module | package, optional The top-level DBAPI2 module/package that constructed the given connection object. By default, a best guess of which module the connection came from is made. In the event that this guess is wrong, this will need to be specified. use_python_type_specifiers : bool, optional If the DBAPI2 module's parameter marker style is 'format' or 'pyformat', attempt to use accurate type specifiers for each value ('s' for string, 'd' for integer, etc.). Many DBAPI2 modules simply use 's' for all types if they use these parameter markers, so this is False by default. use_exact_column_names : bool, optional Specify the column names of the SFrame when inserting its contents into the DB. If the specified table does not have the exact same column names as the SFrame, inserting the data will fail. If False, the columns in the SFrame are inserted in order without care of the schema of the DB table. True by default. """ mod_info = _get_global_dbapi_info(dbapi_module, conn) c = conn.cursor() col_info = list(zip(self.column_names(), self.column_types())) if not use_python_type_specifiers: _pytype_to_printf = lambda x: 's' # DBAPI2 standard allows for five different ways to specify parameters sql_param = { 'qmark' : lambda name,col_num,col_type: '?', 'numeric' : lambda name,col_num,col_type:':'+str(col_num+1), 'named' : lambda name,col_num,col_type:':'+str(name), 'format' : lambda name,col_num,col_type:'%'+_pytype_to_printf(col_type), 'pyformat': lambda name,col_num,col_type:'%('+str(name)+')'+_pytype_to_printf(col_type), } get_sql_param = sql_param[mod_info['paramstyle']] # form insert string ins_str = "INSERT INTO " + str(table_name) value_str = " VALUES (" col_str = " (" count = 0 for i in col_info: col_str += i[0] value_str += get_sql_param(i[0],count,i[1]) if count < len(col_info)-1: col_str += "," value_str += "," count += 1 col_str += ")" value_str += ")" if use_exact_column_names: ins_str += col_str ins_str += value_str # Some formats require values in an iterable, some a dictionary if (mod_info['paramstyle'] == 'named' or\ mod_info['paramstyle'] == 'pyformat'): prepare_sf_row = lambda x:x else: col_names = self.column_names() prepare_sf_row = lambda x: [x[i] for i in col_names] for i in self: try: c.execute(ins_str, prepare_sf_row(i)) except mod_info['Error'] as e: if hasattr(conn, 'rollback'): conn.rollback() raise e conn.commit() c.close() def __hash__(self): ''' Because we override `__eq__` we need to implement this function in Python 3. Just make it match default behavior in Python 2. ''' return id(self) // 16 def __repr__(self): """ Returns a string description of the frame """ ret = self.__get_column_description__() (is_empty, data_str) = self.__str_impl__() if is_empty: data_str = "\t[]" if self.__has_size__(): ret = ret + "Rows: " + str(len(self)) + "\n\n" else: ret = ret + "Rows: Unknown" + "\n\n" ret = ret + "Data:\n" ret = ret + data_str return ret def __get_column_description__(self): colnames = self.column_names() coltypes = self.column_types() ret = "Columns:\n" if len(colnames) > 0: for i in range(len(colnames)): ret = ret + "\t" + colnames[i] + "\t" + coltypes[i].__name__ + "\n" ret = ret + "\n" else: ret = ret + "\tNone\n\n" return ret def __get_pretty_tables__(self, wrap_text=False, max_row_width=80, max_column_width=30, max_columns=20, max_rows_to_display=60): """ Returns a list of pretty print tables representing the current SFrame. If the number of columns is larger than max_columns, the last pretty table will contain an extra column of "...". Parameters ---------- wrap_text : bool, optional max_row_width : int, optional Max number of characters per table. max_column_width : int, optional Max number of characters per column. max_columns : int, optional Max number of columns per table. max_rows_to_display : int, optional Max number of rows to display. Returns ------- out : list[PrettyTable] """ if (len(self) <= max_rows_to_display): headsf = self.__copy__() else: headsf = self.head(max_rows_to_display) if headsf.shape == (0, 0): return [PrettyTable()] # convert array.array column to list column so they print like [...] # and not array('d', ...) for col in headsf.column_names(): if headsf[col].dtype is array.array: headsf[col] = headsf[col].astype(list) def _value_to_str(value): if (type(value) is array.array): return str(list(value)) elif (type(value) is numpy.ndarray): return str(value).replace('\n',' ') elif (type(value) is list): return '[' + ", ".join(_value_to_str(x) for x in value) + ']' else: return str(value) def _escape_space(s): if sys.version_info.major == 3: return "".join([ch.encode('unicode_escape').decode() if ch.isspace() else ch for ch in s]) return "".join([ch.encode('string_escape') if ch.isspace() else ch for ch in s]) def _truncate_respect_unicode(s, max_length): if (len(s) <= max_length): return s else: if sys.version_info.major < 3: u = unicode(s, 'utf-8', errors='replace') return u[:max_length].encode('utf-8') else: return s[:max_length] def _truncate_str(s, wrap_str=False): """ Truncate and optionally wrap the input string as unicode, replace unconvertible character with a diamond ?. """ s = _escape_space(s) if len(s) <= max_column_width: if sys.version_info.major < 3: return unicode(s, 'utf-8', errors='replace') else: return s else: ret = '' # if wrap_str is true, wrap the text and take at most 2 rows if wrap_str: wrapped_lines = wrap(s, max_column_width) if len(wrapped_lines) == 1: return wrapped_lines[0] last_line = wrapped_lines[1] if len(last_line) >= max_column_width: last_line = _truncate_respect_unicode(last_line, max_column_width - 4) ret = wrapped_lines[0] + '\n' + last_line + ' ...' else: ret = _truncate_respect_unicode(s, max_column_width - 4) + '...' if sys.version_info.major < 3: return unicode(ret, 'utf-8', errors='replace') else: return ret columns = self.column_names()[:max_columns] columns.reverse() # reverse the order of columns and we will pop from the end num_column_of_last_table = 0 row_of_tables = [] # let's build a list of tables with max_columns # each table should satisfy, max_row_width, and max_column_width while len(columns) > 0: tbl = PrettyTable() table_width = 0 num_column_of_last_table = 0 while len(columns) > 0: col = columns.pop() # check the max length of element in the column if len(headsf) > 0: col_width = min(max_column_width, max(len(str(x)) for x in headsf[col])) else: col_width = max_column_width if (table_width + col_width < max_row_width): # truncate the header if necessary header = _truncate_str(col, wrap_text) tbl.add_column(header, [_truncate_str(_value_to_str(x), wrap_text) for x in headsf[col]]) table_width = str(tbl).find('\n') num_column_of_last_table += 1 else: # the column does not fit in the current table, push it back to columns columns.append(col) break tbl.align = 'c' row_of_tables.append(tbl) # add a column of all "..." if there are more columns than displayed if self.num_columns() > max_columns: row_of_tables[-1].add_column('...', ['...'] * len(headsf)) num_column_of_last_table += 1 # add a row of all "..." if there are more rows than displayed if self.__has_size__() and self.num_rows() > headsf.num_rows(): row_of_tables[-1].add_row(['...'] * num_column_of_last_table) return row_of_tables def print_rows(self, num_rows=10, num_columns=40, max_column_width=30, max_row_width=80, output_file=None): """ Print the first M rows and N columns of the SFrame in human readable format. Parameters ---------- num_rows : int, optional Number of rows to print. num_columns : int, optional Number of columns to print. max_column_width : int, optional Maximum width of a column. Columns use fewer characters if possible. max_row_width : int, optional Maximum width of a printed row. Columns beyond this width wrap to a new line. `max_row_width` is automatically reset to be the larger of itself and `max_column_width`. output_file: file, optional The stream or file that receives the output. By default the output goes to sys.stdout, but it can also be redirected to a file or a string (using an object of type StringIO). See Also -------- head, tail """ if output_file is None: output_file = sys.stdout max_row_width = max(max_row_width, max_column_width + 1) printed_sf = self._imagecols_to_stringcols(num_rows) row_of_tables = printed_sf.__get_pretty_tables__(wrap_text=False, max_rows_to_display=num_rows, max_columns=num_columns, max_column_width=max_column_width, max_row_width=max_row_width) footer = "[%d rows x %d columns]\n" % self.shape print('\n'.join([str(tb) for tb in row_of_tables]) + "\n" + footer, file=output_file) def _imagecols_to_stringcols(self, num_rows=10): # A list of column types types = self.column_types() # A list of indexable column names names = self.column_names() # Constructing names of sframe columns that are of image type image_column_names = [names[i] for i in range(len(names)) if types[i] == _Image] #If there are image-type columns, copy the SFrame and cast the top MAX_NUM_ROWS_TO_DISPLAY of those columns to string printed_sf = self.__copy__() if len(image_column_names) > 0: for t in names: if t in image_column_names: printed_sf[t] = self[t].astype(str) return printed_sf.head(num_rows) def __str_impl__(self, num_rows=10, footer=True): """ Returns a string containing the first num_rows elements of the frame, along with a description of the frame. """ MAX_ROWS_TO_DISPLAY = num_rows printed_sf = self._imagecols_to_stringcols(MAX_ROWS_TO_DISPLAY) row_of_tables = printed_sf.__get_pretty_tables__(wrap_text=False, max_rows_to_display=MAX_ROWS_TO_DISPLAY) is_empty = len(printed_sf) == 0 if (not footer): return (is_empty, '\n'.join([str(tb) for tb in row_of_tables])) if self.__has_size__(): footer = '[%d rows x %d columns]\n' % self.shape if (self.num_rows() > MAX_ROWS_TO_DISPLAY): footer += '\n'.join(FOOTER_STRS) else: footer = '[? rows x %d columns]\n' % self.num_columns() footer += '\n'.join(LAZY_FOOTER_STRS) return (is_empty, '\n'.join([str(tb) for tb in row_of_tables]) + "\n" + footer) def __str__(self, num_rows=10, footer=True): """ Returns a string containing the first 10 elements of the frame, along with a description of the frame. """ return self.__str_impl__(num_rows, footer)[1] def _repr_html_(self): MAX_ROWS_TO_DISPLAY = 10 printed_sf = self._imagecols_to_stringcols(MAX_ROWS_TO_DISPLAY) row_of_tables = printed_sf.__get_pretty_tables__(wrap_text=True, max_row_width=120, max_columns=40, max_column_width=25, max_rows_to_display=MAX_ROWS_TO_DISPLAY) if self.__has_size__(): footer = '[%d rows x %d columns]<br/>' % self.shape if (self.num_rows() > MAX_ROWS_TO_DISPLAY): footer += '<br/>'.join(FOOTER_STRS) else: footer = '[? rows x %d columns]<br/>' % self.num_columns() footer += '<br/>'.join(LAZY_FOOTER_STRS) begin = '<div style="max-height:1000px;max-width:1500px;overflow:auto;">' end = '\n</div>' return begin + '\n'.join([tb.get_html_string(format=True) for tb in row_of_tables]) + "\n" + footer + end def __nonzero__(self): """ Returns true if the frame is not empty. """ return self.num_rows() != 0 def __len__(self): """ Returns the number of rows of the sframe. """ return self.num_rows() def __copy__(self): """ Returns a shallow copy of the sframe. """ return self.select_columns(self.column_names()) def __deepcopy__(self, memo): """ Returns a deep copy of the sframe. As the data in an SFrame is immutable, this is identical to __copy__. """ return self.__copy__() def copy(self): """ Returns a shallow copy of the sframe. """ return self.__copy__() def __eq__(self, other): raise NotImplementedError def __ne__(self, other): raise NotImplementedError def _row_selector(self, other): """ Where other is an SArray of identical length as the current Frame, this returns a selection of a subset of rows in the current SFrame where the corresponding row in the selector is non-zero. """ if type(other) is SArray: if self.__has_size__() and other.__has_size__() and len(other) != len(self): raise IndexError("Cannot perform logical indexing on arrays of different length.") with cython_context(): return SFrame(_proxy=self.__proxy__.logical_filter(other.__proxy__)) @property def dtype(self): """ The type of each column. Returns ------- out : list[type] Column types of the SFrame. See Also -------- column_types """ return self.column_types() def num_rows(self): """ The number of rows in this SFrame. Returns ------- out : int Number of rows in the SFrame. See Also -------- num_columns """ return self.__proxy__.num_rows() def num_columns(self): """ The number of columns in this SFrame. Returns ------- out : int Number of columns in the SFrame. See Also -------- num_rows """ return self.__proxy__.num_columns() def column_names(self): """ The name of each column in the SFrame. Returns ------- out : list[string] Column names of the SFrame. See Also -------- rename """ return self.__proxy__.column_names() def column_types(self): """ The type of each column in the SFrame. Returns ------- out : list[type] Column types of the SFrame. See Also -------- dtype """ return self.__proxy__.dtype() def head(self, n=10): """ The first n rows of the SFrame. Parameters ---------- n : int, optional The number of rows to fetch. Returns ------- out : SFrame A new SFrame which contains the first n rows of the current SFrame See Also -------- tail, print_rows """ return SFrame(_proxy=self.__proxy__.head(n)) def to_dataframe(self): """ Convert this SFrame to pandas.DataFrame. This operation will construct a pandas.DataFrame in memory. Care must be taken when size of the returned object is big. Returns ------- out : pandas.DataFrame The dataframe which contains all rows of SFrame """ assert HAS_PANDAS, 'pandas is not installed.' df = pandas.DataFrame() for i in range(self.num_columns()): column_name = self.column_names()[i] df[column_name] = list(self[column_name]) if len(df[column_name]) == 0: df[column_name] = df[column_name].astype(self.column_types()[i]) return df def to_numpy(self): """ Converts this SFrame to a numpy array This operation will construct a numpy array in memory. Care must be taken when size of the returned object is big. Returns ------- out : numpy.ndarray A Numpy Array containing all the values of the SFrame """ assert HAS_NUMPY, 'numpy is not installed.' import numpy return numpy.transpose(numpy.asarray([self[x] for x in self.column_names()])) def tail(self, n=10): """ The last n rows of the SFrame. Parameters ---------- n : int, optional The number of rows to fetch. Returns ------- out : SFrame A new SFrame which contains the last n rows of the current SFrame See Also -------- head, print_rows """ return SFrame(_proxy=self.__proxy__.tail(n)) def apply(self, fn, dtype=None, seed=None): """ Transform each row to an :class:`~turicreate.SArray` according to a specified function. Returns a new SArray of ``dtype`` where each element in this SArray is transformed by `fn(x)` where `x` is a single row in the sframe represented as a dictionary. The ``fn`` should return exactly one value which can be cast into type ``dtype``. If ``dtype`` is not specified, the first 100 rows of the SFrame are used to make a guess of the target data type. Parameters ---------- fn : function The function to transform each row of the SFrame. The return type should be convertible to `dtype` if `dtype` is not None. This can also be a toolkit extension function which is compiled as a native shared library using SDK. dtype : dtype, optional The dtype of the new SArray. If None, the first 100 elements of the array are used to guess the target data type. seed : int, optional Used as the seed if a random number generator is included in `fn`. Returns ------- out : SArray The SArray transformed by fn. Each element of the SArray is of type ``dtype`` Examples -------- Concatenate strings from several columns: >>> sf = turicreate.SFrame({'user_id': [1, 2, 3], 'movie_id': [3, 3, 6], 'rating': [4, 5, 1]}) >>> sf.apply(lambda x: str(x['user_id']) + str(x['movie_id']) + str(x['rating'])) dtype: str Rows: 3 ['134', '235', '361'] """ assert callable(fn), "Input must be callable" test_sf = self[:10] dryrun = [fn(row) for row in test_sf] if dtype is None: dtype = SArray(dryrun).dtype if seed is None: seed = abs(hash("%0.20f" % time.time())) % (2 ** 31) nativefn = None try: from .. import extensions as extensions nativefn = extensions._build_native_function_call(fn) except: pass if nativefn is not None: # this is a toolkit lambda. We can do something about it with cython_context(): return SArray(_proxy=self.__proxy__.transform_native(nativefn, dtype, seed)) with cython_context(): return SArray(_proxy=self.__proxy__.transform(fn, dtype, seed)) def flat_map(self, column_names, fn, column_types='auto', seed=None): """ Map each row of the SFrame to multiple rows in a new SFrame via a function. The output of `fn` must have type List[List[...]]. Each inner list will be a single row in the new output, and the collection of these rows within the outer list make up the data for the output SFrame. All rows must have the same length and the same order of types to make sure the result columns are homogeneously typed. For example, if the first element emitted into in the outer list by `fn` is [43, 2.3, 'string'], then all other elements emitted into the outer list must be a list with three elements, where the first is an int, second is a float, and third is a string. If column_types is not specified, the first 10 rows of the SFrame are used to determine the column types of the returned sframe. Parameters ---------- column_names : list[str] The column names for the returned SFrame. fn : function The function that maps each of the sframe row into multiple rows, returning List[List[...]]. All outputted rows must have the same length and order of types. column_types : list[type], optional The column types of the output SFrame. Default value will be automatically inferred by running `fn` on the first 10 rows of the input. If the types cannot be inferred from the first 10 rows, an error is raised. seed : int, optional Used as the seed if a random number generator is included in `fn`. Returns ------- out : SFrame A new SFrame containing the results of the flat_map of the original SFrame. Examples --------- Repeat each row according to the value in the 'number' column. >>> sf = turicreate.SFrame({'letter': ['a', 'b', 'c'], ... 'number': [1, 2, 3]}) >>> sf.flat_map(['number', 'letter'], ... lambda x: [list(x.itervalues()) for i in range(0, x['number'])]) +--------+--------+ | number | letter | +--------+--------+ | 1 | a | | 2 | b | | 2 | b | | 3 | c | | 3 | c | | 3 | c | +--------+--------+ [6 rows x 2 columns] """ assert callable(fn), "Input must be callable" if seed is None: seed = abs(hash("%0.20f" % time.time())) % (2 ** 31) # determine the column_types if column_types == 'auto': types = set() sample = self[0:10] results = [fn(row) for row in sample] for rows in results: if type(rows) is not list: raise TypeError("Output type of the lambda function must be a list of lists") # note: this skips empty lists for row in rows: if type(row) is not list: raise TypeError("Output type of the lambda function must be a list of lists") types.add(tuple([type(v) for v in row])) if len(types) == 0: raise TypeError( "Could not infer output column types from the first ten rows " +\ "of the SFrame. Please use the 'column_types' parameter to " +\ "set the types.") if len(types) > 1: raise TypeError("Mapped rows must have the same length and types") column_types = list(types.pop()) assert type(column_types) is list, "'column_types' must be a list." assert len(column_types) == len(column_names), "Number of output columns must match the size of column names" with cython_context(): return SFrame(_proxy=self.__proxy__.flat_map(fn, column_names, column_types, seed)) def sample(self, fraction, seed=None, exact=False): """ Sample a fraction of the current SFrame's rows. Parameters ---------- fraction : float Fraction of the rows to fetch. Must be between 0 and 1. if exact is False (default), the number of rows returned is approximately the fraction times the number of rows. seed : int, optional Seed for the random number generator used to sample. exact: bool, optional Defaults to False. If exact=True, an exact fraction is returned, but at a performance penalty. Returns ------- out : SFrame A new SFrame containing sampled rows of the current SFrame. Examples -------- Suppose we have an SFrame with 6,145 rows. >>> import random >>> sf = SFrame({'id': range(0, 6145)}) Retrieve about 30% of the SFrame rows with repeatable results by setting the random seed. >>> len(sf.sample(.3, seed=5)) 1783 """ if seed is None: seed = abs(hash("%0.20f" % time.time())) % (2 ** 31) if (fraction > 1 or fraction < 0): raise ValueError('Invalid sampling rate: ' + str(fraction)) if (self.num_rows() == 0 or self.num_columns() == 0): return self else: with cython_context(): return SFrame(_proxy=self.__proxy__.sample(fraction, seed, exact)) def random_split(self, fraction, seed=None, exact=False): """ Randomly split the rows of an SFrame into two SFrames. The first SFrame contains *M* rows, sampled uniformly (without replacement) from the original SFrame. *M* is approximately the fraction times the original number of rows. The second SFrame contains the remaining rows of the original SFrame. An exact fraction partition can be optionally obtained by setting exact=True. Parameters ---------- fraction : float Fraction of the rows to fetch. Must be between 0 and 1. if exact is False (default), the number of rows returned is approximately the fraction times the number of rows. seed : int, optional Seed for the random number generator used to split. exact: bool, optional Defaults to False. If exact=True, an exact fraction is returned, but at a performance penalty. Returns ------- out : tuple [SFrame] Two new SFrames. Examples -------- Suppose we have an SFrame with 1,024 rows and we want to randomly split it into training and testing datasets with about a 90%/10% split. >>> sf = turicreate.SFrame({'id': range(1024)}) >>> sf_train, sf_test = sf.random_split(.9, seed=5) >>> print(len(sf_train), len(sf_test)) 922 102 """ if (fraction > 1 or fraction < 0): raise ValueError('Invalid sampling rate: ' + str(fraction)) if (self.num_rows() == 0 or self.num_columns() == 0): return (SFrame(), SFrame()) if seed is None: # Include the nanosecond component as well. seed = abs(hash("%0.20f" % time.time())) % (2 ** 31) # The server side requires this to be an int, so cast if we can try: seed = int(seed) except ValueError: raise ValueError('The \'seed\' parameter must be of type int.') with cython_context(): proxy_pair = self.__proxy__.random_split(fraction, seed, exact) return (SFrame(data=[], _proxy=proxy_pair[0]), SFrame(data=[], _proxy=proxy_pair[1])) def topk(self, column_name, k=10, reverse=False): """ Get top k rows according to the given column. Result is according to and sorted by `column_name` in the given order (default is descending). When `k` is small, `topk` is more efficient than `sort`. Parameters ---------- column_name : string The column to sort on k : int, optional The number of rows to return reverse : bool, optional If True, return the top k rows in ascending order, otherwise, in descending order. Returns ------- out : SFrame an SFrame containing the top k rows sorted by column_name. See Also -------- sort Examples -------- >>> sf = turicreate.SFrame({'id': range(1000)}) >>> sf['value'] = -sf['id'] >>> sf.topk('id', k=3) +--------+--------+ | id | value | +--------+--------+ | 999 | -999 | | 998 | -998 | | 997 | -997 | +--------+--------+ [3 rows x 2 columns] >>> sf.topk('value', k=3) +--------+--------+ | id | value | +--------+--------+ | 1 | -1 | | 2 | -2 | | 3 | -3 | +--------+--------+ [3 rows x 2 columns] """ if type(column_name) is not str: raise TypeError("column_name must be a string") sf = self[self[column_name].is_topk(k, reverse)] return sf.sort(column_name, ascending=reverse) def save(self, filename, format=None): """ Save the SFrame to a file system for later use. Parameters ---------- filename : string The location to save the SFrame. Either a local directory or a remote URL. If the format is 'binary', a directory will be created at the location which will contain the sframe. format : {'binary', 'csv', 'json'}, optional Format in which to save the SFrame. Binary saved SFrames can be loaded much faster and without any format conversion losses. If not given, will try to infer the format from filename given. If file name ends with 'csv' or '.csv.gz', then save as 'csv' format, otherwise save as 'binary' format. See export_csv for more csv saving options. See Also -------- load_sframe, SFrame Examples -------- >>> # Save the sframe into binary format >>> sf.save('data/training_data_sframe') >>> # Save the sframe into csv format >>> sf.save('data/training_data.csv', format='csv') """ if format is None: if filename.endswith(('.csv', '.csv.gz')): format = 'csv' elif filename.endswith(('.json')): format = 'json' else: format = 'binary' else: if format is 'csv': if not filename.endswith(('.csv', '.csv.gz')): filename = filename + '.csv' elif format is not 'binary' and format is not 'json': raise ValueError("Invalid format: {}. Supported formats are 'csv' and 'binary' and 'json'".format(format)) ## Save the SFrame url = _make_internal_url(filename) with cython_context(): if format is 'binary': self.__proxy__.save(url) elif format is 'csv': assert filename.endswith(('.csv', '.csv.gz')) self.__proxy__.save_as_csv(url, {}) elif format is 'json': self.export_json(url) else: raise ValueError("Unsupported format: {}".format(format)) def export_csv(self, filename, delimiter=',', line_terminator='\n', header=True, quote_level=csv.QUOTE_NONNUMERIC, double_quote=True, escape_char='\\', quote_char='\"', na_rep='', file_header='', file_footer='', line_prefix='', _no_prefix_on_first_value=False, **kwargs): """ Writes an SFrame to a CSV file. Parameters ---------- filename : string The location to save the CSV. delimiter : string, optional This describes the delimiter used for writing csv files. line_terminator: string, optional The newline character header : bool, optional If true, the column names are emitted as a header. quote_level: csv.QUOTE_ALL | csv.QUOTE_NONE | csv.QUOTE_NONNUMERIC, optional The quoting level. If csv.QUOTE_ALL, every field is quoted. if csv.quote_NONE, no field is quoted. If csv.QUOTE_NONNUMERIC, only non-numeric fileds are quoted. csv.QUOTE_MINIMAL is interpreted as csv.QUOTE_NONNUMERIC. double_quote : bool, optional If True, quotes are escaped as two consecutive quotes escape_char : string, optional Character which begins a C escape sequence quote_char: string, optional Character used to quote fields na_rep: string, optional The value used to denote a missing value. file_header: string, optional A string printed to the start of the file file_footer: string, optional A string printed to the end of the file line_prefix: string, optional A string printed at the start of each value line """ # Pandas argument compatibility if "sep" in kwargs: delimiter = kwargs['sep'] del kwargs['sep'] if "quotechar" in kwargs: quote_char = kwargs['quotechar'] del kwargs['quotechar'] if "doublequote" in kwargs: double_quote = kwargs['doublequote'] del kwargs['doublequote'] if "lineterminator" in kwargs: line_terminator = kwargs['lineterminator'] del kwargs['lineterminator'] if len(kwargs) > 0: raise TypeError("Unexpected keyword arguments " + str(list(kwargs.keys()))) write_csv_options = {} write_csv_options['delimiter'] = delimiter write_csv_options['escape_char'] = escape_char write_csv_options['double_quote'] = double_quote write_csv_options['quote_char'] = quote_char if quote_level == csv.QUOTE_MINIMAL: write_csv_options['quote_level'] = 0 elif quote_level == csv.QUOTE_ALL: write_csv_options['quote_level'] = 1 elif quote_level == csv.QUOTE_NONNUMERIC: write_csv_options['quote_level'] = 2 elif quote_level == csv.QUOTE_NONE: write_csv_options['quote_level'] = 3 write_csv_options['header'] = header write_csv_options['line_terminator'] = line_terminator write_csv_options['na_value'] = na_rep write_csv_options['file_header'] = file_header write_csv_options['file_footer'] = file_footer write_csv_options['line_prefix'] = line_prefix # undocumented option. Disables line prefix on the first value line write_csv_options['_no_prefix_on_first_value'] = _no_prefix_on_first_value url = _make_internal_url(filename) self.__proxy__.save_as_csv(url, write_csv_options) def export_json(self, filename, orient='records'): """ Writes an SFrame to a JSON file. Parameters ---------- filename : string The location to save the JSON file. orient : string, optional. Either "records" or "lines" If orient="records" the file is saved as a single JSON array. If orient="lines", the file is saves as a JSON value per line. Examples -------- The orient parameter describes the expected input format of the JSON file. If orient="records", the output will be a single JSON Array where each array element is a dictionary describing the row. >>> g Columns: a int b int Rows: 3 Data: +---+---+ | a | b | +---+---+ | 1 | 1 | | 2 | 2 | | 3 | 3 | +---+---+ >>> g.export('output.json', orient='records') >>> !cat output.json [ {'a':1,'b':1}, {'a':2,'b':2}, {'a':3,'b':3}, ] If orient="rows", each row will be emitted as a JSON dictionary to each file line. >>> g Columns: a int b int Rows: 3 Data: +---+---+ | a | b | +---+---+ | 1 | 1 | | 2 | 2 | | 3 | 3 | +---+---+ >>> g.export('output.json', orient='rows') >>> !cat output.json {'a':1,'b':1} {'a':2,'b':2} {'a':3,'b':3} """ if orient == "records": self.pack_columns(dtype=dict).export_csv( filename, file_header='[', file_footer=']', header=False, double_quote=False, quote_level=csv.QUOTE_NONE, line_prefix=',', _no_prefix_on_first_value=True) elif orient == "lines": self.pack_columns(dtype=dict).export_csv( filename, header=False, double_quote=False, quote_level=csv.QUOTE_NONE) else: raise ValueError("Invalid value for orient parameter (" + str(orient) + ")") def _save_reference(self, filename): """ Performs an incomplete save of an existing SFrame into a directory. This saved SFrame may reference SFrames in other locations in the same filesystem for certain resources. Parameters ---------- filename : string The location to save the SFrame. Either a local directory or a remote URL. See Also -------- load_sframe, SFrame Examples -------- >>> # Save the sframe into binary format >>> sf.save_reference('data/training_data_sframe') """ ## Save the SFrame url = _make_internal_url(filename) with cython_context(): self.__proxy__.save_reference(url) def select_column(self, column_name): """ Get a reference to the :class:`~turicreate.SArray` that corresponds with the given column_name. Throws an exception if the column_name is something other than a string or if the column name is not found. Parameters ---------- column_name: str The column name. Returns ------- out : SArray The SArray that is referred by ``column_name``. See Also -------- select_columns Examples -------- >>> sf = turicreate.SFrame({'user_id': [1,2,3], ... 'user_name': ['alice', 'bob', 'charlie']}) >>> # This line is equivalent to `sa = sf['user_name']` >>> sa = sf.select_column('user_name') >>> sa dtype: str Rows: 3 ['alice', 'bob', 'charlie'] """ if not isinstance(column_name, str): raise TypeError("Invalid column_nametype: must be str") with cython_context(): return SArray(data=[], _proxy=self.__proxy__.select_column(column_name)) def select_columns(self, column_names): """ Selects all columns where the name of the column or the type of column is included in the column_names. An exception is raised if duplicate columns are selected i.e. sf.select_columns(['a','a']), or non-existent columns are selected. Throws an exception for all other input types. Parameters ---------- column_names: list[str or type] The list of column names or a list of types. Returns ------- out : SFrame A new SFrame that is made up of the columns referred to in ``column_names`` from the current SFrame. See Also -------- select_column Examples -------- >>> sf = turicreate.SFrame({'user_id': [1,2,3], ... 'user_name': ['alice', 'bob', 'charlie'], ... 'zipcode': [98101, 98102, 98103] ... }) >>> # This line is equivalent to `sf2 = sf[['user_id', 'zipcode']]` >>> sf2 = sf.select_columns(['user_id', 'zipcode']) >>> sf2 +---------+---------+ | user_id | zipcode | +---------+---------+ | 1 | 98101 | | 2 | 98102 | | 3 | 98103 | +---------+---------+ [3 rows x 2 columns] """ if not _is_non_string_iterable(column_names): raise TypeError("column_names must be an iterable") if not (all([isinstance(x, six.string_types) or isinstance(x, type) or isinstance(x, bytes) for x in column_names])): raise TypeError("Invalid key type: must be str, unicode, bytes or type") requested_str_columns = [s for s in column_names if isinstance(s, six.string_types)] # Make sure there are no duplicates keys from collections import Counter column_names_counter = Counter(column_names) if (len(column_names)) != len(column_names_counter): for key in column_names_counter: if column_names_counter[key] > 1: raise ValueError("There are duplicate keys in key list: '" + key + "'") colnames_and_types = list(zip(self.column_names(), self.column_types())) # Ok. we want the string columns to be in the ordering defined by the # argument. And then all the type selection columns. selected_columns = requested_str_columns typelist = [s for s in column_names if isinstance(s, type)] # next the type selection columns # loop through all the columns, adding all columns with types in # typelist. But don't add a column if it has already been added. for i in colnames_and_types: if i[1] in typelist and i[0] not in selected_columns: selected_columns += [i[0]] selected_columns = selected_columns with cython_context(): return SFrame(data=[], _proxy=self.__proxy__.select_columns(selected_columns)) def add_column(self, data, column_name="", inplace=False): """ Returns an SFrame with a new column. The number of elements in the data given must match the length of every other column of the SFrame. If no name is given, a default name is chosen. If inplace == False (default) this operation does not modify the current SFrame, returning a new SFrame. If inplace == True, this operation modifies the current SFrame, returning self. Parameters ---------- data : SArray The 'column' of data to add. column_name : string, optional The name of the column. If no name is given, a default name is chosen. inplace : bool, optional. Defaults to False. Whether the SFrame is modified in place. Returns ------- out : SFrame The current SFrame. See Also -------- add_columns Examples -------- >>> sf = turicreate.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']}) >>> sa = turicreate.SArray(['cat', 'dog', 'fossa']) >>> # This line is equivalent to `sf['species'] = sa` >>> res = sf.add_column(sa, 'species') >>> res +----+-----+---------+ | id | val | species | +----+-----+---------+ | 1 | A | cat | | 2 | B | dog | | 3 | C | fossa | +----+-----+---------+ [3 rows x 3 columns] """ # Check type for pandas dataframe or SArray? if not isinstance(data, SArray): if isinstance(data, _Iterable): data = SArray(data) else: if self.num_columns() == 0: data = SArray([data]) else: data = SArray.from_const(data, self.num_rows()) if not isinstance(column_name, str): raise TypeError("Invalid column name: must be str") if inplace: ret = self else: ret = self.copy() with cython_context(): ret.__proxy__.add_column(data.__proxy__, column_name) ret._cache = None return ret def add_columns(self, data, column_names=None, inplace=False): """ Returns an SFrame with multiple columns added. The number of elements in all columns must match the length of every other column of the SFrame. If inplace == False (default) this operation does not modify the current SFrame, returning a new SFrame. If inplace == True, this operation modifies the current SFrame, returning self. Parameters ---------- data : list[SArray] or SFrame The columns to add. column_names: list of string, optional A list of column names. All names must be specified. ``column_names`` is ignored if data is an SFrame. inplace : bool, optional. Defaults to False. Whether the SFrame is modified in place. Returns ------- out : SFrame The current SFrame. See Also -------- add_column Examples -------- >>> sf = turicreate.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']}) >>> sf2 = turicreate.SFrame({'species': ['cat', 'dog', 'fossa'], ... 'age': [3, 5, 9]}) >>> res = sf.add_columns(sf2) >>> res +----+-----+-----+---------+ | id | val | age | species | +----+-----+-----+---------+ | 1 | A | 3 | cat | | 2 | B | 5 | dog | | 3 | C | 9 | fossa | +----+-----+-----+---------+ [3 rows x 4 columns] """ datalist = data if isinstance(data, SFrame): other = data datalist = [other.select_column(name) for name in other.column_names()] column_names = other.column_names() my_columns = set(self.column_names()) for name in column_names: if name in my_columns: raise ValueError("Column '" + name + "' already exists in current SFrame") else: if not _is_non_string_iterable(datalist): raise TypeError("datalist must be an iterable") if not _is_non_string_iterable(column_names): raise TypeError("column_names must be an iterable") if not all([isinstance(x, SArray) for x in datalist]): raise TypeError("Must give column as SArray") if not all([isinstance(x, str) for x in column_names]): raise TypeError("Invalid column name in list : must all be str") if inplace: ret = self else: ret = self.copy() with cython_context(): ret.__proxy__.add_columns([x.__proxy__ for x in datalist], column_names) ret._cache = None return ret def remove_column(self, column_name, inplace=False): """ Returns an SFrame with a column removed. If inplace == False (default) this operation does not modify the current SFrame, returning a new SFrame. If inplace == True, this operation modifies the current SFrame, returning self. Parameters ---------- column_name : string The name of the column to remove. inplace : bool, optional. Defaults to False. Whether the SFrame is modified in place. Returns ------- out : SFrame The SFrame with given column removed. Examples -------- >>> sf = turicreate.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']}) >>> # This is equivalent to `del sf['val']` >>> res = sf.remove_column('val') >>> res +----+ | id | +----+ | 1 | | 2 | | 3 | +----+ [3 rows x 1 columns] """ column_name = str(column_name) if column_name not in self.column_names(): raise KeyError('Cannot find column %s' % column_name) colid = self.column_names().index(column_name) if inplace: ret = self else: ret = self.copy() with cython_context(): ret.__proxy__.remove_column(colid) ret._cache = None return ret def remove_columns(self, column_names, inplace=False): """ Returns an SFrame with one or more columns removed. If inplace == False (default) this operation does not modify the current SFrame, returning a new SFrame. If inplace == True, this operation modifies the current SFrame, returning self. Parameters ---------- column_names : list or iterable A list or iterable of column names. inplace : bool, optional. Defaults to False. Whether the SFrame is modified in place. Returns ------- out : SFrame The SFrame with given columns removed. Examples -------- >>> sf = turicreate.SFrame({'id': [1, 2, 3], 'val1': ['A', 'B', 'C'], 'val2' : [10, 11, 12]}) >>> res = sf.remove_columns(['val1', 'val2']) >>> res +----+ | id | +----+ | 1 | | 2 | | 3 | +----+ [3 rows x 1 columns] """ column_names = list(column_names) existing_columns = dict((k, i) for i, k in enumerate(self.column_names())) for name in column_names: if name not in existing_columns: raise KeyError('Cannot find column %s' % name) # Delete it going backwards so we don't invalidate indices deletion_indices = sorted(existing_columns[name] for name in column_names) if inplace: ret = self else: ret = self.copy() for colid in reversed(deletion_indices): with cython_context(): ret.__proxy__.remove_column(colid) ret._cache = None return ret def swap_columns(self, column_name_1, column_name_2, inplace=False): """ Returns an SFrame with two column positions swapped. If inplace == False (default) this operation does not modify the current SFrame, returning a new SFrame. If inplace == True, this operation modifies the current SFrame, returning self. Parameters ---------- column_name_1 : string Name of column to swap column_name_2 : string Name of other column to swap inplace : bool, optional. Defaults to False. Whether the SFrame is modified in place. Returns ------- out : SFrame The SFrame with swapped columns. Examples -------- >>> sf = turicreate.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']}) >>> res = sf.swap_columns('id', 'val') >>> res +-----+-----+ | val | id | +-----+-----+ | A | 1 | | B | 2 | | C | 3 | +----+-----+ [3 rows x 2 columns] """ colnames = self.column_names() colid_1 = colnames.index(column_name_1) colid_2 = colnames.index(column_name_2) if inplace: ret = self else: ret = self.copy() with cython_context(): ret.__proxy__.swap_columns(colid_1, colid_2) ret._cache = None return ret def rename(self, names, inplace=False): """ Returns an SFrame with columns renamed. ``names`` is expected to be a dict specifying the old and new names. This changes the names of the columns given as the keys and replaces them with the names given as the values. If inplace == False (default) this operation does not modify the current SFrame, returning a new SFrame. If inplace == True, this operation modifies the current SFrame, returning self. Parameters ---------- names : dict [string, string] Dictionary of [old_name, new_name] inplace : bool, optional. Defaults to False. Whether the SFrame is modified in place. Returns ------- out : SFrame The current SFrame. See Also -------- column_names Examples -------- >>> sf = SFrame({'X1': ['Alice','Bob'], ... 'X2': ['123 Fake Street','456 Fake Street']}) >>> res = sf.rename({'X1': 'name', 'X2':'address'}) >>> res +-------+-----------------+ | name | address | +-------+-----------------+ | Alice | 123 Fake Street | | Bob | 456 Fake Street | +-------+-----------------+ [2 rows x 2 columns] """ if (type(names) is not dict): raise TypeError('names must be a dictionary: oldname -> newname') all_columns = set(self.column_names()) for k in names: if not k in all_columns: raise ValueError('Cannot find column %s in the SFrame' % k) if inplace: ret = self else: ret = self.copy() with cython_context(): for k in names: colid = ret.column_names().index(k) ret.__proxy__.set_column_name(colid, names[k]) ret._cache = None return ret def __getitem__(self, key): """ This method does things based on the type of `key`. If `key` is: * str selects column with name 'key' * type selects all columns with types matching the type * list of str or type selects all columns with names or type in the list * SArray Performs a logical filter. Expects given SArray to be the same length as all columns in current SFrame. Every row corresponding with an entry in the given SArray that is equivalent to False is filtered from the result. * int Returns a single row of the SFrame (the `key`th one) as a dictionary. * slice Returns an SFrame including only the sliced rows. """ if type(key) is SArray: return self._row_selector(key) elif isinstance(key, six.string_types): if six.PY2 and type(key) == unicode: key = key.encode('utf-8') return self.select_column(key) elif type(key) is type: return self.select_columns([key]) elif _is_non_string_iterable(key): return self.select_columns(key) elif isinstance(key, numbers.Integral): sf_len = len(self) if key < 0: key = sf_len + key if key >= sf_len: raise IndexError("SFrame index out of range") if not hasattr(self, '_cache') or self._cache is None: self._cache = {} try: lb, ub, value_list = self._cache["getitem_cache"] if lb <= key < ub: return value_list[int(key - lb)] except KeyError: pass # Not in cache, need to grab it. Smaller here than with sarray # Do we have a good block size that won't cause memory to blow up? if not "getitem_cache_blocksize" in self._cache: block_size = \ (8*1024) // sum( (2 if dt in [int, long, float] else 8) for dt in self.column_types()) block_size = max(16, block_size) self._cache["getitem_cache_blocksize"] = block_size else: block_size = self._cache["getitem_cache_blocksize"] block_num = int(key // block_size) lb = block_num * block_size ub = min(sf_len, lb + block_size) val_list = list(SFrame(_proxy = self.__proxy__.copy_range(lb, 1, ub))) self._cache["getitem_cache"] = (lb, ub, val_list) return val_list[int(key - lb)] elif type(key) is slice: start = key.start stop = key.stop step = key.step if start is None: start = 0 if stop is None: stop = len(self) if step is None: step = 1 # handle negative indices if start < 0: start = len(self) + start if stop < 0: stop = len(self) + stop return SFrame(_proxy = self.__proxy__.copy_range(start, step, stop)) else: raise TypeError("Invalid index type: must be SArray, list, int, or str") def __setitem__(self, key, value): """ A wrapper around add_column(s). Key can be either a list or a str. If value is an SArray, it is added to the SFrame as a column. If it is a constant value (int, str, or float), then a column is created where every entry is equal to the constant value. Existing columns can also be replaced using this wrapper. """ if type(key) is list: self.add_columns(value, key, inplace=True) elif type(key) is str: sa_value = None if (type(value) is SArray): sa_value = value elif _is_non_string_iterable(value): # wrap list, array... to sarray sa_value = SArray(value) else: # create an sarray of constant value sa_value = SArray.from_const(value, self.num_rows()) # set new column if not key in self.column_names(): with cython_context(): self.add_column(sa_value, key, inplace=True) else: # special case if replacing the only column. # server would fail the replacement if the new column has different # length than current one, which doesn't make sense if we are replacing # the only column. To support this, we first take out the only column # and then put it back if exception happens single_column = (self.num_columns() == 1) if (single_column): tmpname = key saved_column = self.select_column(key) self.remove_column(key, inplace=True) else: # add the column to a unique column name. tmpname = '__' + '-'.join(self.column_names()) try: self.add_column(sa_value, tmpname, inplace=True) except Exception: if (single_column): self.add_column(saved_column, key, inplace=True) raise if (not single_column): # if add succeeded, remove the column name and rename tmpname->columnname. self.swap_columns(key, tmpname, inplace=True) self.remove_column(key, inplace=True) self.rename({tmpname: key}, inplace=True) else: raise TypeError('Cannot set column with key type ' + str(type(key))) def __delitem__(self, key): """ Wrapper around remove_column. """ self.remove_column(key, inplace=True) def materialize(self): """ For an SFrame that is lazily evaluated, force the persistence of the SFrame to disk, committing all lazy evaluated operations. """ return self.__materialize__() def __materialize__(self): """ For an SFrame that is lazily evaluated, force the persistence of the SFrame to disk, committing all lazy evaluated operations. """ with cython_context(): self.__proxy__.materialize() def is_materialized(self): """ Returns whether or not the SFrame has been materialized. """ return self.__is_materialized__() def __is_materialized__(self): """ Returns whether or not the SFrame has been materialized. """ return self.__proxy__.is_materialized() def __has_size__(self): """ Returns whether or not the size of the SFrame is known. """ return self.__proxy__.has_size() def __query_plan_str__(self): """ Returns the query plan as a dot graph string """ return self.__proxy__.query_plan_string() def __iter__(self): """ Provides an iterator to the rows of the SFrame. """ def generator(): elems_at_a_time = 262144 self.__proxy__.begin_iterator() ret = self.__proxy__.iterator_get_next(elems_at_a_time) column_names = self.column_names() while(True): for j in ret: yield dict(list(zip(column_names, j))) if len(ret) == elems_at_a_time: ret = self.__proxy__.iterator_get_next(elems_at_a_time) else: break return generator() def append(self, other): """ Add the rows of an SFrame to the end of this SFrame. Both SFrames must have the same set of columns with the same column names and column types. Parameters ---------- other : SFrame Another SFrame whose rows are appended to the current SFrame. Returns ------- out : SFrame The result SFrame from the append operation. Examples -------- >>> sf = turicreate.SFrame({'id': [4, 6, 8], 'val': ['D', 'F', 'H']}) >>> sf2 = turicreate.SFrame({'id': [1, 2, 3], 'val': ['A', 'B', 'C']}) >>> sf = sf.append(sf2) >>> sf +----+-----+ | id | val | +----+-----+ | 4 | D | | 6 | F | | 8 | H | | 1 | A | | 2 | B | | 3 | C | +----+-----+ [6 rows x 2 columns] """ if type(other) is not SFrame: raise RuntimeError("SFrame append can only work with SFrame") left_empty = len(self.column_names()) == 0 right_empty = len(other.column_names()) == 0 if (left_empty and right_empty): return SFrame() if (left_empty or right_empty): non_empty_sframe = self if right_empty else other return non_empty_sframe.__copy__() my_column_names = self.column_names() my_column_types = self.column_types() other_column_names = other.column_names() if (len(my_column_names) != len(other_column_names)): raise RuntimeError("Two SFrames have to have the same number of columns") # check if the order of column name is the same column_name_order_match = True for i in range(len(my_column_names)): if other_column_names[i] != my_column_names[i]: column_name_order_match = False break processed_other_frame = other if not column_name_order_match: # we allow name order of two sframes to be different, so we create a new sframe from # "other" sframe to make it has exactly the same shape processed_other_frame = SFrame() for i in range(len(my_column_names)): col_name = my_column_names[i] if(col_name not in other_column_names): raise RuntimeError("Column " + my_column_names[i] + " does not exist in second SFrame") other_column = other.select_column(col_name) processed_other_frame.add_column(other_column, col_name, inplace=True) # check column type if my_column_types[i] != other_column.dtype: raise RuntimeError("Column " + my_column_names[i] + " type is not the same in two SFrames, one is " + str(my_column_types[i]) + ", the other is " + str(other_column.dtype)) with cython_context(): return SFrame(_proxy=self.__proxy__.append(processed_other_frame.__proxy__)) def groupby(self, key_column_names, operations, *args): """ Perform a group on the key_column_names followed by aggregations on the columns listed in operations. The operations parameter is a dictionary that indicates which aggregation operators to use and which columns to use them on. The available operators are SUM, MAX, MIN, COUNT, AVG, VAR, STDV, CONCAT, SELECT_ONE, ARGMIN, ARGMAX, and QUANTILE. For convenience, aggregators MEAN, STD, and VARIANCE are available as synonyms for AVG, STDV, and VAR. See :mod:`~turicreate.aggregate` for more detail on the aggregators. Parameters ---------- key_column_names : string | list[string] Column(s) to group by. Key columns can be of any type other than dictionary. operations : dict, list Dictionary of columns and aggregation operations. Each key is a output column name and each value is an aggregator. This can also be a list of aggregators, in which case column names will be automatically assigned. *args All other remaining arguments will be interpreted in the same way as the operations argument. Returns ------- out_sf : SFrame A new SFrame, with a column for each groupby column and each aggregation operation. See Also -------- aggregate Notes ----- * Numeric aggregators (such as sum, mean, stdev etc.) follow the skip None policy i.e they will omit all missing values from the aggregation. As an example, `sum([None, 5, 10]) = 15` because the `None` value is skipped. * Aggregators have a default value when no values (after skipping all `None` values) are present. Default values are `None` for ['ARGMAX', 'ARGMIN', 'AVG', 'STD', 'MEAN', 'MIN', 'MAX'], `0` for ['COUNT' 'COUNT_DISTINCT', 'DISTINCT'] `[]` for 'CONCAT', 'QUANTILE', 'DISTINCT', and `{}` for 'FREQ_COUNT'. Examples -------- Suppose we have an SFrame with movie ratings by many users. >>> import turicreate.aggregate as agg >>> url = 'https://static.turi.com/datasets/rating_data_example.csv' >>> sf = turicreate.SFrame.read_csv(url) >>> sf +---------+----------+--------+ | user_id | movie_id | rating | +---------+----------+--------+ | 25904 | 1663 | 3 | | 25907 | 1663 | 3 | | 25923 | 1663 | 3 | | 25924 | 1663 | 3 | | 25928 | 1663 | 2 | | 25933 | 1663 | 4 | | 25934 | 1663 | 4 | | 25935 | 1663 | 4 | | 25936 | 1663 | 5 | | 25937 | 1663 | 2 | | ... | ... | ... | +---------+----------+--------+ [10000 rows x 3 columns] Compute the number of occurrences of each user. >>> user_count = sf.groupby(key_column_names='user_id', ... operations={'count': agg.COUNT()}) >>> user_count +---------+-------+ | user_id | count | +---------+-------+ | 62361 | 1 | | 30727 | 1 | | 40111 | 1 | | 50513 | 1 | | 35140 | 1 | | 42352 | 1 | | 29667 | 1 | | 46242 | 1 | | 58310 | 1 | | 64614 | 1 | | ... | ... | +---------+-------+ [9852 rows x 2 columns] Compute the mean and standard deviation of ratings per user. >>> user_rating_stats = sf.groupby(key_column_names='user_id', ... operations={ ... 'mean_rating': agg.MEAN('rating'), ... 'std_rating': agg.STD('rating') ... }) >>> user_rating_stats +---------+-------------+------------+ | user_id | mean_rating | std_rating | +---------+-------------+------------+ | 62361 | 5.0 | 0.0 | | 30727 | 4.0 | 0.0 | | 40111 | 2.0 | 0.0 | | 50513 | 4.0 | 0.0 | | 35140 | 4.0 | 0.0 | | 42352 | 5.0 | 0.0 | | 29667 | 4.0 | 0.0 | | 46242 | 5.0 | 0.0 | | 58310 | 2.0 | 0.0 | | 64614 | 2.0 | 0.0 | | ... | ... | ... | +---------+-------------+------------+ [9852 rows x 3 columns] Compute the movie with the minimum rating per user. >>> chosen_movies = sf.groupby(key_column_names='user_id', ... operations={ ... 'worst_movies': agg.ARGMIN('rating','movie_id') ... }) >>> chosen_movies +---------+-------------+ | user_id | worst_movies | +---------+-------------+ | 62361 | 1663 | | 30727 | 1663 | | 40111 | 1663 | | 50513 | 1663 | | 35140 | 1663 | | 42352 | 1663 | | 29667 | 1663 | | 46242 | 1663 | | 58310 | 1663 | | 64614 | 1663 | | ... | ... | +---------+-------------+ [9852 rows x 2 columns] Compute the movie with the max rating per user and also the movie with the maximum imdb-ranking per user. >>> sf['imdb-ranking'] = sf['rating'] * 10 >>> chosen_movies = sf.groupby(key_column_names='user_id', ... operations={('max_rating_movie','max_imdb_ranking_movie'): agg.ARGMAX(('rating','imdb-ranking'),'movie_id')}) >>> chosen_movies +---------+------------------+------------------------+ | user_id | max_rating_movie | max_imdb_ranking_movie | +---------+------------------+------------------------+ | 62361 | 1663 | 16630 | | 30727 | 1663 | 16630 | | 40111 | 1663 | 16630 | | 50513 | 1663 | 16630 | | 35140 | 1663 | 16630 | | 42352 | 1663 | 16630 | | 29667 | 1663 | 16630 | | 46242 | 1663 | 16630 | | 58310 | 1663 | 16630 | | 64614 | 1663 | 16630 | | ... | ... | ... | +---------+------------------+------------------------+ [9852 rows x 3 columns] Compute the movie with the max rating per user. >>> chosen_movies = sf.groupby(key_column_names='user_id', operations={'best_movies': agg.ARGMAX('rating','movie')}) Compute the movie with the max rating per user and also the movie with the maximum imdb-ranking per user. >>> chosen_movies = sf.groupby(key_column_names='user_id', operations={('max_rating_movie','max_imdb_ranking_movie'): agg.ARGMAX(('rating','imdb-ranking'),'movie')}) Compute the count, mean, and standard deviation of ratings per (user, time), automatically assigning output column names. >>> sf['time'] = sf.apply(lambda x: (x['user_id'] + x['movie_id']) % 11 + 2000) >>> user_rating_stats = sf.groupby(['user_id', 'time'], ... [agg.COUNT(), ... agg.AVG('rating'), ... agg.STDV('rating')]) >>> user_rating_stats +------+---------+-------+---------------+----------------+ | time | user_id | Count | Avg of rating | Stdv of rating | +------+---------+-------+---------------+----------------+ | 2006 | 61285 | 1 | 4.0 | 0.0 | | 2000 | 36078 | 1 | 4.0 | 0.0 | | 2003 | 47158 | 1 | 3.0 | 0.0 | | 2007 | 34446 | 1 | 3.0 | 0.0 | | 2010 | 47990 | 1 | 3.0 | 0.0 | | 2003 | 42120 | 1 | 5.0 | 0.0 | | 2007 | 44940 | 1 | 4.0 | 0.0 | | 2008 | 58240 | 1 | 4.0 | 0.0 | | 2002 | 102 | 1 | 1.0 | 0.0 | | 2009 | 52708 | 1 | 3.0 | 0.0 | | ... | ... | ... | ... | ... | +------+---------+-------+---------------+----------------+ [10000 rows x 5 columns] The groupby function can take a variable length list of aggregation specifiers so if we want the count and the 0.25 and 0.75 quantiles of ratings: >>> user_rating_stats = sf.groupby(['user_id', 'time'], agg.COUNT(), ... {'rating_quantiles': agg.QUANTILE('rating',[0.25, 0.75])}) >>> user_rating_stats +------+---------+-------+------------------------+ | time | user_id | Count | rating_quantiles | +------+---------+-------+------------------------+ | 2006 | 61285 | 1 | array('d', [4.0, 4.0]) | | 2000 | 36078 | 1 | array('d', [4.0, 4.0]) | | 2003 | 47158 | 1 | array('d', [3.0, 3.0]) | | 2007 | 34446 | 1 | array('d', [3.0, 3.0]) | | 2010 | 47990 | 1 | array('d', [3.0, 3.0]) | | 2003 | 42120 | 1 | array('d', [5.0, 5.0]) | | 2007 | 44940 | 1 | array('d', [4.0, 4.0]) | | 2008 | 58240 | 1 | array('d', [4.0, 4.0]) | | 2002 | 102 | 1 | array('d', [1.0, 1.0]) | | 2009 | 52708 | 1 | array('d', [3.0, 3.0]) | | ... | ... | ... | ... | +------+---------+-------+------------------------+ [10000 rows x 4 columns] To put all items a user rated into one list value by their star rating: >>> user_rating_stats = sf.groupby(["user_id", "rating"], ... {"rated_movie_ids":agg.CONCAT("movie_id")}) >>> user_rating_stats +--------+---------+----------------------+ | rating | user_id | rated_movie_ids | +--------+---------+----------------------+ | 3 | 31434 | array('d', [1663.0]) | | 5 | 25944 | array('d', [1663.0]) | | 4 | 38827 | array('d', [1663.0]) | | 4 | 51437 | array('d', [1663.0]) | | 4 | 42549 | array('d', [1663.0]) | | 4 | 49532 | array('d', [1663.0]) | | 3 | 26124 | array('d', [1663.0]) | | 4 | 46336 | array('d', [1663.0]) | | 4 | 52133 | array('d', [1663.0]) | | 5 | 62361 | array('d', [1663.0]) | | ... | ... | ... | +--------+---------+----------------------+ [9952 rows x 3 columns] To put all items and rating of a given user together into a dictionary value: >>> user_rating_stats = sf.groupby("user_id", ... {"movie_rating":agg.CONCAT("movie_id", "rating")}) >>> user_rating_stats +---------+--------------+ | user_id | movie_rating | +---------+--------------+ | 62361 | {1663: 5} | | 30727 | {1663: 4} | | 40111 | {1663: 2} | | 50513 | {1663: 4} | | 35140 | {1663: 4} | | 42352 | {1663: 5} | | 29667 | {1663: 4} | | 46242 | {1663: 5} | | 58310 | {1663: 2} | | 64614 | {1663: 2} | | ... | ... | +---------+--------------+ [9852 rows x 2 columns] """ # some basic checking first # make sure key_column_names is a list if isinstance(key_column_names, str): key_column_names = [key_column_names] # check that every column is a string, and is a valid column name my_column_names = self.column_names() key_columns_array = [] for column in key_column_names: if not isinstance(column, str): raise TypeError("Column name must be a string") if column not in my_column_names: raise KeyError("Column " + column + " does not exist in SFrame") if self[column].dtype == dict: raise TypeError("Cannot group on a dictionary column.") key_columns_array.append(column) group_output_columns = [] group_columns = [] group_ops = [] all_ops = [operations] + list(args) for op_entry in all_ops: # if it is not a dict, nor a list, it is just a single aggregator # element (probably COUNT). wrap it in a list so we can reuse the # list processing code operation = op_entry if not(isinstance(operation, list) or isinstance(operation, dict)): operation = [operation] if isinstance(operation, dict): # now sweep the dict and add to group_columns and group_ops for key in operation: val = operation[key] if type(val) is tuple: (op, column) = val if (op == '__builtin__avg__' and self[column[0]].dtype in [array.array, numpy.ndarray]): op = '__builtin__vector__avg__' if (op == '__builtin__sum__' and self[column[0]].dtype in [array.array, numpy.ndarray]): op = '__builtin__vector__sum__' if (op == '__builtin__argmax__' or op == '__builtin__argmin__') and ((type(column[0]) is tuple) != (type(key) is tuple)): raise TypeError("Output column(s) and aggregate column(s) for aggregate operation should be either all tuple or all string.") if (op == '__builtin__argmax__' or op == '__builtin__argmin__') and type(column[0]) is tuple: for (col,output) in zip(column[0],key): group_columns = group_columns + [[col,column[1]]] group_ops = group_ops + [op] group_output_columns = group_output_columns + [output] else: group_columns = group_columns + [column] group_ops = group_ops + [op] group_output_columns = group_output_columns + [key] if (op == '__builtin__concat__dict__'): key_column = column[0] key_column_type = self.select_column(key_column).dtype if not key_column_type in (int, float, str): raise TypeError('CONCAT key column must be int, float or str type') elif val == aggregate.COUNT: group_output_columns = group_output_columns + [key] val = aggregate.COUNT() (op, column) = val group_columns = group_columns + [column] group_ops = group_ops + [op] else: raise TypeError("Unexpected type in aggregator definition of output column: " + key) elif isinstance(operation, list): # we will be using automatically defined column names for val in operation: if type(val) is tuple: (op, column) = val if (op == '__builtin__avg__' and self[column[0]].dtype in [array.array, numpy.ndarray]): op = '__builtin__vector__avg__' if (op == '__builtin__sum__' and self[column[0]].dtype in [array.array, numpy.ndarray]): op = '__builtin__vector__sum__' if (op == '__builtin__argmax__' or op == '__builtin__argmin__') and type(column[0]) is tuple: for col in column[0]: group_columns = group_columns + [[col,column[1]]] group_ops = group_ops + [op] group_output_columns = group_output_columns + [""] else: group_columns = group_columns + [column] group_ops = group_ops + [op] group_output_columns = group_output_columns + [""] if (op == '__builtin__concat__dict__'): key_column = column[0] key_column_type = self.select_column(key_column).dtype if not key_column_type in (int, float, str): raise TypeError('CONCAT key column must be int, float or str type') elif val == aggregate.COUNT: group_output_columns = group_output_columns + [""] val = aggregate.COUNT() (op, column) = val group_columns = group_columns + [column] group_ops = group_ops + [op] else: raise TypeError("Unexpected type in aggregator definition.") # let's validate group_columns and group_ops are valid for (cols, op) in zip(group_columns, group_ops): for col in cols: if not isinstance(col, str): raise TypeError("Column name must be a string") if not isinstance(op, str): raise TypeError("Operation type not recognized.") if op is not aggregate.COUNT()[0]: for col in cols: if col not in my_column_names: raise KeyError("Column " + col + " does not exist in SFrame") with cython_context(): return SFrame(_proxy=self.__proxy__.groupby_aggregate(key_columns_array, group_columns, group_output_columns, group_ops)) def join(self, right, on=None, how='inner'): """ Merge two SFrames. Merges the current (left) SFrame with the given (right) SFrame using a SQL-style equi-join operation by columns. Parameters ---------- right : SFrame The SFrame to join. on : None | str | list | dict, optional The column name(s) representing the set of join keys. Each row that has the same value in this set of columns will be merged together. * If 'None' is given, join will use all columns that have the same name as the set of join keys. * If a str is given, this is interpreted as a join using one column, where both SFrames have the same column name. * If a list is given, this is interpreted as a join using one or more column names, where each column name given exists in both SFrames. * If a dict is given, each dict key is taken as a column name in the left SFrame, and each dict value is taken as the column name in right SFrame that will be joined together. e.g. {'left_col_name':'right_col_name'}. how : {'left', 'right', 'outer', 'inner'}, optional The type of join to perform. 'inner' is default. * inner: Equivalent to a SQL inner join. Result consists of the rows from the two frames whose join key values match exactly, merged together into one SFrame. * left: Equivalent to a SQL left outer join. Result is the union between the result of an inner join and the rest of the rows from the left SFrame, merged with missing values. * right: Equivalent to a SQL right outer join. Result is the union between the result of an inner join and the rest of the rows from the right SFrame, merged with missing values. * outer: Equivalent to a SQL full outer join. Result is the union between the result of a left outer join and a right outer join. Returns ------- out : SFrame Examples -------- >>> animals = turicreate.SFrame({'id': [1, 2, 3, 4], ... 'name': ['dog', 'cat', 'sheep', 'cow']}) >>> sounds = turicreate.SFrame({'id': [1, 3, 4, 5], ... 'sound': ['woof', 'baa', 'moo', 'oink']}) >>> animals.join(sounds, how='inner') +----+-------+-------+ | id | name | sound | +----+-------+-------+ | 1 | dog | woof | | 3 | sheep | baa | | 4 | cow | moo | +----+-------+-------+ [3 rows x 3 columns] >>> animals.join(sounds, on='id', how='left') +----+-------+-------+ | id | name | sound | +----+-------+-------+ | 1 | dog | woof | | 3 | sheep | baa | | 4 | cow | moo | | 2 | cat | None | +----+-------+-------+ [4 rows x 3 columns] >>> animals.join(sounds, on=['id'], how='right') +----+-------+-------+ | id | name | sound | +----+-------+-------+ | 1 | dog | woof | | 3 | sheep | baa | | 4 | cow | moo | | 5 | None | oink | +----+-------+-------+ [4 rows x 3 columns] >>> animals.join(sounds, on={'id':'id'}, how='outer') +----+-------+-------+ | id | name | sound | +----+-------+-------+ | 1 | dog | woof | | 3 | sheep | baa | | 4 | cow | moo | | 5 | None | oink | | 2 | cat | None | +----+-------+-------+ [5 rows x 3 columns] """ available_join_types = ['left','right','outer','inner'] if not isinstance(right, SFrame): raise TypeError("Can only join two SFrames") if how not in available_join_types: raise ValueError("Invalid join type") if (self.num_columns() <= 0) or (right.num_columns() <= 0): raise ValueError("Cannot join an SFrame with no columns.") join_keys = dict() if on is None: left_names = self.column_names() right_names = right.column_names() common_columns = [name for name in left_names if name in right_names] for name in common_columns: join_keys[name] = name elif type(on) is str: join_keys[on] = on elif type(on) is list: for name in on: if type(name) is not str: raise TypeError("Join keys must each be a str.") join_keys[name] = name elif type(on) is dict: join_keys = on else: raise TypeError("Must pass a str, list, or dict of join keys") with cython_context(): return SFrame(_proxy=self.__proxy__.join(right.__proxy__, how, join_keys)) def filter_by(self, values, column_name, exclude=False): """ Filter an SFrame by values inside an iterable object. Result is an SFrame that only includes (or excludes) the rows that have a column with the given ``column_name`` which holds one of the values in the given ``values`` :class:`~turicreate.SArray`. If ``values`` is not an SArray, we attempt to convert it to one before filtering. Parameters ---------- values : SArray | list | numpy.ndarray | pandas.Series | str | map | generator | filter | None | range The values to use to filter the SFrame. The resulting SFrame will only include rows that have one of these values in the given column. column_name : str The column of the SFrame to match with the given `values`. exclude : bool If True, the result SFrame will contain all rows EXCEPT those that have one of ``values`` in ``column_name``. Returns ------- out : SFrame The filtered SFrame. Examples -------- >>> sf = turicreate.SFrame({'id': [1, 2, 3, 4], ... 'animal_type': ['dog', 'cat', 'cow', 'horse'], ... 'name': ['bob', 'jim', 'jimbob', 'bobjim']}) >>> household_pets = ['cat', 'hamster', 'dog', 'fish', 'bird', 'snake'] >>> sf.filter_by(household_pets, 'animal_type') +-------------+----+------+ | animal_type | id | name | +-------------+----+------+ | dog | 1 | bob | | cat | 2 | jim | +-------------+----+------+ [2 rows x 3 columns] >>> sf.filter_by(household_pets, 'animal_type', exclude=True) +-------------+----+--------+ | animal_type | id | name | +-------------+----+--------+ | horse | 4 | bobjim | | cow | 3 | jimbob | +-------------+----+--------+ [2 rows x 3 columns] >>> sf.filter_by(None, 'name', exclude=True) +-------------+----+--------+ | animal_type | id | name | +-------------+----+--------+ | dog | 1 | bob | | cat | 2 | jim | | cow | 3 | jimbob | | horse | 4 | bobjim | +-------------+----+--------+ [4 rows x 3 columns] >>> sf.filter_by(filter(lambda x : len(x) > 3, sf['name']), 'name', exclude=True) +-------------+----+--------+ | animal_type | id | name | +-------------+----+--------+ | dog | 1 | bob | | cat | 2 | jim | +-------------+----+--------+ [2 rows x 3 columns] >>> sf.filter_by(range(3), 'id', exclude=True) +-------------+----+--------+ | animal_type | id | name | +-------------+----+--------+ | cow | 3 | jimbob | | horse | 4 | bobjim | +-------------+----+--------+ [2 rows x 3 columns] """ if type(column_name) is not str: raise TypeError("Must pass a str as column_name") existing_columns = self.column_names() if column_name not in existing_columns: raise KeyError("Column '" + column_name + "' not in SFrame.") existing_type = self[column_name].dtype if type(values) is not SArray: # If we were given a single element, try to put in list and convert # to SArray if not _is_non_string_iterable(values): values = [values] else: # is iterable # if `values` is a map/filter/generator, then we need to convert it to list # so we can repeatedly iterate through the iterable object through `all`. # true that, we don't cover use defined iterators. # I find it's too hard to check whether an iterable can be used repeatedly. # just let em not use. if SArray._is_iterable_required_to_listify(values): values = list(values) # if all vals are None, cast the sarray to existing type # this will enable filter_by(None, column_name) to remove missing vals if all(val is None for val in values): values = SArray(values, existing_type) else: values = SArray(values) value_sf = SFrame() value_sf.add_column(values, column_name, inplace=True) given_type = value_sf.column_types()[0] if given_type != existing_type: raise TypeError(("Type of given values ({0}) does not match type of column '" + column_name + "' ({1}) in SFrame.").format(given_type, existing_type)) # Make sure the values list has unique values, or else join will not # filter. value_sf = value_sf.groupby(column_name, {}) with cython_context(): if exclude: id_name = "id" # Make sure this name is unique so we know what to remove in # the result while id_name in existing_columns: id_name += "1" value_sf = value_sf.add_row_number(id_name) tmp = SFrame(_proxy=self.__proxy__.join(value_sf.__proxy__, 'left', {column_name:column_name})) ret_sf = tmp[tmp[id_name] == None] del ret_sf[id_name] return ret_sf else: return SFrame(_proxy=self.__proxy__.join(value_sf.__proxy__, 'inner', {column_name:column_name})) def explore(self, title=None): """ Explore the SFrame in an interactive GUI. Opens a new app window. Parameters ---------- title : str The plot title to show for the resulting visualization. Defaults to None. If the title is None, a default title will be provided. Returns ------- None Examples -------- Suppose 'sf' is an SFrame, we can view it using: >>> sf.explore() To override the default plot title and axis labels: >>> sf.explore(title="My Plot Title") """ import sys import os if sys.platform != 'darwin' and sys.platform != 'linux2' and sys.platform != 'linux': raise NotImplementedError('Visualization is currently supported only on macOS and Linux.') # Suppress visualization output if 'none' target is set from ..visualization._plot import _target if _target == 'none': return path_to_client = _get_client_app_path() if title is None: title = "" self.__proxy__.explore(path_to_client, title) def show(self): """ Visualize a summary of each column in an SFrame. Opens a new app window. Notes ----- - The plot will render either inline in a Jupyter Notebook, in a web browser, or in a native GUI window, depending on the value provided in `turicreate.visualization.set_target` (defaults to 'auto'). Returns ------- None Examples -------- Suppose 'sf' is an SFrame, we can view it using: >>> sf.show() """ returned_plot = self.plot() returned_plot.show() def plot(self): """ Create a Plot object that contains a summary of each column in an SFrame. Returns ------- out : Plot A :class: Plot object that is the columnwise summary of the sframe. Examples -------- Suppose 'sf' is an SFrame, we can make a plot object as: >>> plt = sf.plot() We can then visualize the plot using: >>> plt.show() """ return Plot(_proxy=self.__proxy__.plot()) def pack_columns(self, column_names=None, column_name_prefix=None, dtype=list, fill_na=None, remove_prefix=True, new_column_name=None): """ Pack columns of the current SFrame into one single column. The result is a new SFrame with the unaffected columns from the original SFrame plus the newly created column. The list of columns that are packed is chosen through either the ``column_names`` or ``column_name_prefix`` parameter. Only one of the parameters is allowed to be provided. ``columns_names`` explicitly specifies the list of columns to pack, while ``column_name_prefix`` specifies that all columns that have the given prefix are to be packed. The type of the resulting column is decided by the ``dtype`` parameter. Allowed values for ``dtype`` are dict, array.array and list: - *dict*: pack to a dictionary SArray where column name becomes dictionary key and column value becomes dictionary value - *array.array*: pack all values from the packing columns into an array - *list*: pack all values from the packing columns into a list. Parameters ---------- column_names : list[str], optional A list of column names to be packed. If omitted and `column_name_prefix` is not specified, all columns from current SFrame are packed. This parameter is mutually exclusive with the `column_name_prefix` parameter. column_name_prefix : str, optional Pack all columns with the given `column_name_prefix`. This parameter is mutually exclusive with the `columns_names` parameter. dtype : dict | array.array | list, optional The resulting packed column type. If not provided, dtype is list. fill_na : value, optional Value to fill into packed column if missing value is encountered. If packing to dictionary, `fill_na` is only applicable to dictionary values; missing keys are not replaced. remove_prefix : bool, optional If True and `column_name_prefix` is specified, the dictionary key will be constructed by removing the prefix from the column name. This option is only applicable when packing to dict type. new_column_name : str, optional Packed column name. If not given and `column_name_prefix` is given, then the prefix will be used as the new column name, otherwise name is generated automatically. Returns ------- out : SFrame An SFrame that contains columns that are not packed, plus the newly packed column. See Also -------- unpack Notes ----- - If packing to dictionary, missing key is always dropped. Missing values are dropped if fill_na is not provided, otherwise, missing value is replaced by 'fill_na'. If packing to list or array, missing values will be kept. If 'fill_na' is provided, the missing value is replaced with 'fill_na' value. Examples -------- Suppose 'sf' is an an SFrame that maintains business category information: >>> sf = turicreate.SFrame({'business': range(1, 5), ... 'category.retail': [1, None, 1, None], ... 'category.food': [1, 1, None, None], ... 'category.service': [None, 1, 1, None], ... 'category.shop': [1, 1, None, 1]}) >>> sf +----------+-----------------+---------------+------------------+---------------+ | business | category.retail | category.food | category.service | category.shop | +----------+-----------------+---------------+------------------+---------------+ | 1 | 1 | 1 | None | 1 | | 2 | None | 1 | 1 | 1 | | 3 | 1 | None | 1 | None | | 4 | None | 1 | None | 1 | +----------+-----------------+---------------+------------------+---------------+ [4 rows x 5 columns] To pack all category columns into a list: >>> sf.pack_columns(column_name_prefix='category') +----------+-----------------------+ | business | category | +----------+-----------------------+ | 1 | [1, 1, None, 1] | | 2 | [1, None, 1, 1] | | 3 | [None, 1, 1, None] | | 4 | [None, None, None, 1] | +----------+-----------------------+ [4 rows x 2 columns] To pack all category columns into a dictionary, with new column name: >>> sf.pack_columns(column_name_prefix='category', dtype=dict, ... new_column_name='new name') +----------+-------------------------------+ | business | new name | +----------+-------------------------------+ | 1 | {'food': 1, 'shop': 1, 're... | | 2 | {'food': 1, 'shop': 1, 'se... | | 3 | {'retail': 1, 'service': 1} | | 4 | {'shop': 1} | +----------+-------------------------------+ [4 rows x 2 columns] To keep column prefix in the resulting dict key: >>> sf.pack_columns(column_name_prefix='category', dtype=dict, remove_prefix=False) +----------+-------------------------------+ | business | category | +----------+-------------------------------+ | 1 | {'category.retail': 1, 'ca... | | 2 | {'category.food': 1, 'cate... | | 3 | {'category.retail': 1, 'ca... | | 4 | {'category.shop': 1} | +----------+-------------------------------+ [4 rows x 2 columns] To explicitly pack a set of columns: >>> sf.pack_columns(column_names = ['business', 'category.retail', 'category.food', 'category.service', 'category.shop']) +-----------------------+ | X1 | +-----------------------+ | [1, 1, 1, None, 1] | | [2, None, 1, 1, 1] | | [3, 1, None, 1, None] | | [4, None, 1, None, 1] | +-----------------------+ [4 rows x 1 columns] To pack all columns with name starting with 'category' into an array type, and with missing value replaced with 0: >>> import array >>> sf.pack_columns(column_name_prefix="category", dtype=array.array, ... fill_na=0) +----------+----------------------+ | business | category | +----------+----------------------+ | 1 | [1.0, 1.0, 0.0, 1.0] | | 2 | [1.0, 0.0, 1.0, 1.0] | | 3 | [0.0, 1.0, 1.0, 0.0] | | 4 | [0.0, 0.0, 0.0, 1.0] | +----------+----------------------+ [4 rows x 2 columns] """ if column_names is not None and column_name_prefix is not None: raise ValueError("'column_names' and 'column_name_prefix' parameter cannot be given at the same time.") if new_column_name is None and column_name_prefix is not None: new_column_name = column_name_prefix if column_name_prefix is not None: if type(column_name_prefix) != str: raise TypeError("'column_name_prefix' must be a string") column_names = [name for name in self.column_names() if name.startswith(column_name_prefix)] if len(column_names) == 0: raise ValueError("There is no column starts with prefix '" + column_name_prefix + "'") elif column_names is None: column_names = self.column_names() else: if not _is_non_string_iterable(column_names): raise TypeError("column_names must be an iterable type") column_name_set = set(self.column_names()) for column in column_names: if (column not in column_name_set): raise ValueError("Current SFrame has no column called '" + str(column) + "'.") # check duplicate names if len(set(column_names)) != len(column_names): raise ValueError("There is duplicate column names in column_names parameter") if (dtype not in (dict, list, array.array)): raise ValueError("Resulting dtype has to be one of dict/array.array/list type") # fill_na value for array needs to be numeric if dtype == array.array: if (fill_na is not None) and (type(fill_na) not in (int, float)): raise ValueError("fill_na value for array needs to be numeric type") # all column_names have to be numeric type for column in column_names: if self[column].dtype not in (int, float): raise TypeError("Column '" + column + "' type is not numeric, cannot pack into array type") # generate dict key names if pack to dictionary # we try to be smart here # if all column names are like: a.b, a.c, a.d,... # we then use "b", "c", "d", etc as the dictionary key during packing if (dtype == dict) and (column_name_prefix is not None) and (remove_prefix == True): size_prefix = len(column_name_prefix) first_char = set([c[size_prefix:size_prefix+1] for c in column_names]) if ((len(first_char) == 1) and first_char.pop() in ['.','-','_']): dict_keys = [name[size_prefix+1:] for name in column_names] else: dict_keys = [name[size_prefix:] for name in column_names] else: dict_keys = column_names rest_columns = [name for name in self.column_names() if name not in column_names] if new_column_name is not None: if type(new_column_name) != str: raise TypeError("'new_column_name' has to be a string") if new_column_name in rest_columns: raise KeyError("Current SFrame already contains a column name " + new_column_name) else: new_column_name = "" ret_sa = None with cython_context(): ret_sa = SArray(_proxy=self.__proxy__.pack_columns(column_names, dict_keys, dtype, fill_na)) new_sf = self.select_columns(rest_columns) new_sf.add_column(ret_sa, new_column_name, inplace=True) return new_sf def split_datetime(self, column_name, column_name_prefix=None, limit=None, timezone=False): """ Splits a datetime column of SFrame to multiple columns, with each value in a separate column. Returns a new SFrame with the expanded column replaced with a list of new columns. The expanded column must be of datetime type. For more details regarding name generation and other, refer to :py:func:`turicreate.SArray.split_datetime()` Parameters ---------- column_name : str Name of the unpacked column. column_name_prefix : str, optional If provided, expanded column names would start with the given prefix. If not provided, the default value is the name of the expanded column. limit: list[str], optional Limits the set of datetime elements to expand. Possible values are 'year','month','day','hour','minute','second', 'weekday', 'isoweekday', 'tmweekday', and 'us'. If not provided, only ['year','month','day','hour','minute','second'] are expanded. timezone : bool, optional A boolean parameter that determines whether to show the timezone column or not. Defaults to False. Returns ------- out : SFrame A new SFrame that contains rest of columns from original SFrame with the given column replaced with a collection of expanded columns. Examples --------- >>> sf Columns: id int submission datetime Rows: 2 Data: +----+-------------------------------------------------+ | id | submission | +----+-------------------------------------------------+ | 1 | datetime(2011, 1, 21, 7, 17, 21, tzinfo=GMT(+1))| | 2 | datetime(2011, 1, 21, 5, 43, 21, tzinfo=GMT(+1))| +----+-------------------------------------------------+ >>> sf.split_datetime('submission',limit=['hour','minute']) Columns: id int submission.hour int submission.minute int Rows: 2 Data: +----+-----------------+-------------------+ | id | submission.hour | submission.minute | +----+-----------------+-------------------+ | 1 | 7 | 17 | | 2 | 5 | 43 | +----+-----------------+-------------------+ """ if column_name not in self.column_names(): raise KeyError("column '" + column_name + "' does not exist in current SFrame") if column_name_prefix is None: column_name_prefix = column_name new_sf = self[column_name].split_datetime(column_name_prefix, limit, timezone) # construct return SFrame, check if there is conflict rest_columns = [name for name in self.column_names() if name != column_name] new_names = new_sf.column_names() while set(new_names).intersection(rest_columns): new_names = [name + ".1" for name in new_names] new_sf.rename(dict(list(zip(new_sf.column_names(), new_names))), inplace=True) ret_sf = self.select_columns(rest_columns) ret_sf.add_columns(new_sf, inplace=True) return ret_sf def unpack(self, column_name=None, column_name_prefix=None, column_types=None, na_value=None, limit=None): """ Expand one column of this SFrame to multiple columns with each value in a separate column. Returns a new SFrame with the unpacked column replaced with a list of new columns. The column must be of list/array/dict type. For more details regarding name generation, missing value handling and other, refer to the SArray version of :py:func:`~turicreate.SArray.unpack()`. Parameters ---------- column_name : str, optional Name of the unpacked column, if provided. If not provided and only one column is present then the column is unpacked. In case of multiple columns, name must be provided to know which column to be unpacked. column_name_prefix : str, optional If provided, unpacked column names would start with the given prefix. If not provided, default value is the name of the unpacked column. column_types : [type], optional Column types for the unpacked columns. If not provided, column types are automatically inferred from first 100 rows. For array type, default column types are float. If provided, column_types also restricts how many columns to unpack. na_value : flexible_type, optional If provided, convert all values that are equal to "na_value" to missing value (None). limit : list[str] | list[int], optional Control unpacking only a subset of list/array/dict value. For dictionary SArray, `limit` is a list of dictionary keys to restrict. For list/array SArray, `limit` is a list of integers that are indexes into the list/array value. Returns ------- out : SFrame A new SFrame that contains rest of columns from original SFrame with the given column replaced with a collection of unpacked columns. See Also -------- pack_columns, SArray.unpack Examples --------- >>> sf = turicreate.SFrame({'id': [1,2,3], ... 'wc': [{'a': 1}, {'b': 2}, {'a': 1, 'b': 2}]}) +----+------------------+ | id | wc | +----+------------------+ | 1 | {'a': 1} | | 2 | {'b': 2} | | 3 | {'a': 1, 'b': 2} | +----+------------------+ [3 rows x 2 columns] >>> sf.unpack('wc') +----+------+------+ | id | wc.a | wc.b | +----+------+------+ | 1 | 1 | None | | 2 | None | 2 | | 3 | 1 | 2 | +----+------+------+ [3 rows x 3 columns] To not have prefix in the generated column name: >>> sf.unpack('wc', column_name_prefix="") +----+------+------+ | id | a | b | +----+------+------+ | 1 | 1 | None | | 2 | None | 2 | | 3 | 1 | 2 | +----+------+------+ [3 rows x 3 columns] To limit subset of keys to unpack: >>> sf.unpack('wc', limit=['b']) +----+------+ | id | wc.b | +----+------+ | 1 | None | | 2 | 2 | | 3 | 2 | +----+------+ [3 rows x 3 columns] To unpack an array column: >>> import array >>> sf = turicreate.SFrame({'id': [1,2,3], ... 'friends': [array.array('d', [1.0, 2.0, 3.0]), ... array.array('d', [2.0, 3.0, 4.0]), ... array.array('d', [3.0, 4.0, 5.0])]}) >>> sf +-----------------+----+ | friends | id | +-----------------+----+ | [1.0, 2.0, 3.0] | 1 | | [2.0, 3.0, 4.0] | 2 | | [3.0, 4.0, 5.0] | 3 | +-----------------+----+ [3 rows x 2 columns] >>> sf.unpack('friends') +----+-----------+-----------+-----------+ | id | friends.0 | friends.1 | friends.2 | +----+-----------+-----------+-----------+ | 1 | 1.0 | 2.0 | 3.0 | | 2 | 2.0 | 3.0 | 4.0 | | 3 | 3.0 | 4.0 | 5.0 | +----+-----------+-----------+-----------+ [3 rows x 4 columns] >>> sf = turicreate.SFrame([{'a':1,'b':2,'c':3},{'a':4,'b':5,'c':6}]) >>> sf.unpack() +---+---+---+ | a | b | c | +---+---+---+ | 1 | 2 | 3 | | 4 | 5 | 6 | +---+---+---+ [2 rows x 3 columns] """ if column_name is None: if self.num_columns()==0: raise RuntimeError("No column exists in the current SFrame") for t in range(self.num_columns()): column_type = self.column_types()[t] if column_type==dict or column_type==list or column_type==array.array: if column_name is None: column_name = self.column_names()[t] else: raise RuntimeError("Column name needed to unpack") if column_name is None: raise RuntimeError("No columns can be unpacked") elif column_name_prefix is None: column_name_prefix="" elif column_name not in self.column_names(): raise KeyError("Column '" + column_name + "' does not exist in current SFrame") if column_name_prefix is None: column_name_prefix = column_name new_sf = self[column_name].unpack(column_name_prefix, column_types, na_value, limit) # construct return SFrame, check if there is conflict rest_columns = [name for name in self.column_names() if name != column_name] new_names = new_sf.column_names() while set(new_names).intersection(rest_columns): new_names = [name + ".1" for name in new_names] new_sf.rename(dict(list(zip(new_sf.column_names(), new_names))), inplace=True) ret_sf = self.select_columns(rest_columns) ret_sf.add_columns(new_sf, inplace=True) return ret_sf def stack(self, column_name, new_column_name=None, drop_na=False, new_column_type=None): """ Convert a "wide" column of an SFrame to one or two "tall" columns by stacking all values. The stack works only for columns of dict, list, or array type. If the column is dict type, two new columns are created as a result of stacking: one column holds the key and another column holds the value. The rest of the columns are repeated for each key/value pair. If the column is array or list type, one new column is created as a result of stacking. With each row holds one element of the array or list value, and the rest columns from the same original row repeated. The returned SFrame includes the newly created column(s) and all columns other than the one that is stacked. Parameters -------------- column_name : str The column to stack. This column must be of dict/list/array type new_column_name : str | list of str, optional The new column name(s). If original column is list/array type, new_column_name must a string. If original column is dict type, new_column_name must be a list of two strings. If not given, column names are generated automatically. drop_na : boolean, optional If True, missing values and empty list/array/dict are all dropped from the resulting column(s). If False, missing values are maintained in stacked column(s). new_column_type : type | list of types, optional The new column types. If original column is a list/array type new_column_type must be a single type, or a list of one type. If original column is of dict type, new_column_type must be a list of two types. If not provided, the types are automatically inferred from the first 100 values of the SFrame. Returns ------- out : SFrame A new SFrame that contains newly stacked column(s) plus columns in original SFrame other than the stacked column. See Also -------- unstack Examples --------- Suppose 'sf' is an SFrame that contains a column of dict type: >>> sf = turicreate.SFrame({'topic':[1,2,3,4], ... 'words': [{'a':3, 'cat':2}, ... {'a':1, 'the':2}, ... {'the':1, 'dog':3}, ... {}] ... }) +-------+----------------------+ | topic | words | +-------+----------------------+ | 1 | {'a': 3, 'cat': 2} | | 2 | {'a': 1, 'the': 2} | | 3 | {'the': 1, 'dog': 3} | | 4 | {} | +-------+----------------------+ [4 rows x 2 columns] Stack would stack all keys in one column and all values in another column: >>> sf.stack('words', new_column_name=['word', 'count']) +-------+------+-------+ | topic | word | count | +-------+------+-------+ | 1 | a | 3 | | 1 | cat | 2 | | 2 | a | 1 | | 2 | the | 2 | | 3 | the | 1 | | 3 | dog | 3 | | 4 | None | None | +-------+------+-------+ [7 rows x 3 columns] Observe that since topic 4 had no words, an empty row is inserted. To drop that row, set drop_na=True in the parameters to stack. Suppose 'sf' is an SFrame that contains a user and his/her friends, where 'friends' columns is an array type. Stack on 'friends' column would create a user/friend list for each user/friend pair: >>> sf = turicreate.SFrame({'topic':[1,2,3], ... 'friends':[[2,3,4], [5,6], ... [4,5,10,None]] ... }) >>> sf +-------+------------------+ | topic | friends | +-------+------------------+ | 1 | [2, 3, 4] | | 2 | [5, 6] | | 3 | [4, 5, 10, None] | +----- -+------------------+ [3 rows x 2 columns] >>> sf.stack('friends', new_column_name='friend') +-------+--------+ | topic | friend | +-------+--------+ | 1 | 2 | | 1 | 3 | | 1 | 4 | | 2 | 5 | | 2 | 6 | | 3 | 4 | | 3 | 5 | | 3 | 10 | | 3 | None | +-------+--------+ [9 rows x 2 columns] """ # validate column_name column_name = str(column_name) if column_name not in self.column_names(): raise ValueError("Cannot find column '" + str(column_name) + "' in the SFrame.") stack_column_type = self[column_name].dtype if (stack_column_type not in [dict, array.array, list]): raise TypeError("Stack is only supported for column of dict/list/array type.") # user defined types. do some checking if new_column_type is not None: # if new_column_type is a single type, just make it a list of one type if type(new_column_type) is type: new_column_type = [new_column_type] if (stack_column_type in [list, array.array]) and len(new_column_type) != 1: raise ValueError("Expecting a single column type to unpack list or array columns") if (stack_column_type in [dict]) and len(new_column_type) != 2: raise ValueError("Expecting two column types to unpack a dict column") if (new_column_name is not None): if stack_column_type == dict: if (type(new_column_name) is not list): raise TypeError("new_column_name has to be a list to stack dict type") elif (len(new_column_name) != 2): raise TypeError("new_column_name must have length of two") else: if (type(new_column_name) != str): raise TypeError("new_column_name has to be a str") new_column_name = [new_column_name] # check if the new column name conflicts with existing ones for name in new_column_name: if (name in self.column_names()) and (name != column_name): raise ValueError("Column with name '" + name + "' already exists, pick a new column name") else: if stack_column_type == dict: new_column_name = ["",""] else: new_column_name = [""] # infer column types head_row = SArray(self[column_name].head(100)).dropna() if (len(head_row) == 0): raise ValueError("Cannot infer column type because there is not enough rows to infer value") if new_column_type is None: # we have to perform type inference if stack_column_type == dict: # infer key/value type keys = []; values = [] for row in head_row: for val in row: keys.append(val) if val is not None: values.append(row[val]) new_column_type = [ infer_type_of_list(keys), infer_type_of_list(values) ] else: values = [v for v in itertools.chain.from_iterable(head_row)] new_column_type = [infer_type_of_list(values)] with cython_context(): return SFrame(_proxy=self.__proxy__.stack(column_name, new_column_name, new_column_type, drop_na)) def unstack(self, column_names, new_column_name=None): """ Concatenate values from one or two columns into one column, grouping by all other columns. The resulting column could be of type list, array or dictionary. If ``column_names`` is a numeric column, the result will be of array.array type. If ``column_names`` is a non-numeric column, the new column will be of list type. If ``column_names`` is a list of two columns, the new column will be of dict type where the keys are taken from the first column in the list. Parameters ---------- column_names : str | [str, str] The column(s) that is(are) to be concatenated. If str, then collapsed column type is either array or list. If [str, str], then collapsed column type is dict new_column_name : str, optional New column name. If not given, a name is generated automatically. Returns ------- out : SFrame A new SFrame containing the grouped columns as well as the new column. See Also -------- stack : The inverse of unstack. groupby : ``unstack`` is a special version of ``groupby`` that uses the :mod:`~turicreate.aggregate.CONCAT` aggregator Notes ----- - There is no guarantee the resulting SFrame maintains the same order as the original SFrame. - Missing values are maintained during unstack. - When unstacking into a dictionary, if there is more than one instance of a given key for a particular group, an arbitrary value is selected. Examples -------- >>> sf = turicreate.SFrame({'count':[4, 2, 1, 1, 2, None], ... 'topic':['cat', 'cat', 'dog', 'elephant', 'elephant', 'fish'], ... 'word':['a', 'c', 'c', 'a', 'b', None]}) >>> sf.unstack(column_names=['word', 'count'], new_column_name='words') +----------+------------------+ | topic | words | +----------+------------------+ | elephant | {'a': 1, 'b': 2} | | dog | {'c': 1} | | cat | {'a': 4, 'c': 2} | | fish | None | +----------+------------------+ [4 rows x 2 columns] >>> sf = turicreate.SFrame({'friend': [2, 3, 4, 5, 6, 4, 5, 2, 3], ... 'user': [1, 1, 1, 2, 2, 2, 3, 4, 4]}) >>> sf.unstack('friend', new_column_name='new name') +------+-----------+ | user | new name | +------+-----------+ | 3 | [5] | | 1 | [2, 3, 4] | | 2 | [6, 4, 5] | | 4 | [2, 3] | +------+-----------+ [4 rows x 2 columns] """ if (type(column_names) != str and len(column_names) != 2): raise TypeError("'column_names' parameter has to be either a string or a list of two strings.") with cython_context(): if type(column_names) == str: key_columns = [i for i in self.column_names() if i != column_names] if new_column_name is not None: return self.groupby(key_columns, {new_column_name : aggregate.CONCAT(column_names)}) else: return self.groupby(key_columns, aggregate.CONCAT(column_names)) elif len(column_names) == 2: key_columns = [i for i in self.column_names() if i not in column_names] if new_column_name is not None: return self.groupby(key_columns, {new_column_name: aggregate.CONCAT(column_names[0], column_names[1])}) else: return self.groupby(key_columns, aggregate.CONCAT(column_names[0], column_names[1])) def unique(self): """ Remove duplicate rows of the SFrame. Will not necessarily preserve the order of the given SFrame in the new SFrame. Returns ------- out : SFrame A new SFrame that contains the unique rows of the current SFrame. Raises ------ TypeError If any column in the SFrame is a dictionary type. See Also -------- SArray.unique Examples -------- >>> sf = turicreate.SFrame({'id':[1,2,3,3,4], 'value':[1,2,3,3,4]}) >>> sf +----+-------+ | id | value | +----+-------+ | 1 | 1 | | 2 | 2 | | 3 | 3 | | 3 | 3 | | 4 | 4 | +----+-------+ [5 rows x 2 columns] >>> sf.unique() +----+-------+ | id | value | +----+-------+ | 2 | 2 | | 4 | 4 | | 3 | 3 | | 1 | 1 | +----+-------+ [4 rows x 2 columns] """ return self.groupby(self.column_names(),{}) def sort(self, key_column_names, ascending=True): """ Sort current SFrame by the given columns, using the given sort order. Only columns that are type of str, int and float can be sorted. Parameters ---------- key_column_names : str | list of str | list of (str, bool) pairs Names of columns to be sorted. The result will be sorted first by first column, followed by second column, and so on. All columns will be sorted in the same order as governed by the `ascending` parameter. To control the sort ordering for each column individually, `key_column_names` must be a list of (str, bool) pairs. Given this case, the first value is the column name and the second value is a boolean indicating whether the sort order is ascending. ascending : bool, optional Sort all columns in the given order. Returns ------- out : SFrame A new SFrame that is sorted according to given sort criteria See Also -------- topk Examples -------- Suppose 'sf' is an sframe that has three columns 'a', 'b', 'c'. To sort by column 'a', ascending >>> sf = turicreate.SFrame({'a':[1,3,2,1], ... 'b':['a','c','b','b'], ... 'c':['x','y','z','y']}) >>> sf +---+---+---+ | a | b | c | +---+---+---+ | 1 | a | x | | 3 | c | y | | 2 | b | z | | 1 | b | y | +---+---+---+ [4 rows x 3 columns] >>> sf.sort('a') +---+---+---+ | a | b | c | +---+---+---+ | 1 | a | x | | 1 | b | y | | 2 | b | z | | 3 | c | y | +---+---+---+ [4 rows x 3 columns] To sort by column 'a', descending >>> sf.sort('a', ascending = False) +---+---+---+ | a | b | c | +---+---+---+ | 3 | c | y | | 2 | b | z | | 1 | a | x | | 1 | b | y | +---+---+---+ [4 rows x 3 columns] To sort by column 'a' and 'b', all ascending >>> sf.sort(['a', 'b']) +---+---+---+ | a | b | c | +---+---+---+ | 1 | a | x | | 1 | b | y | | 2 | b | z | | 3 | c | y | +---+---+---+ [4 rows x 3 columns] To sort by column 'a' ascending, and then by column 'c' descending >>> sf.sort([('a', True), ('c', False)]) +---+---+---+ | a | b | c | +---+---+---+ | 1 | b | y | | 1 | a | x | | 2 | b | z | | 3 | c | y | +---+---+---+ [4 rows x 3 columns] """ sort_column_names = [] sort_column_orders = [] # validate key_column_names if (type(key_column_names) == str): sort_column_names = [key_column_names] elif (type(key_column_names) == list): if (len(key_column_names) == 0): raise ValueError("Please provide at least one column to sort") first_param_types = set([type(i) for i in key_column_names]) if (len(first_param_types) != 1): raise ValueError("key_column_names element are not of the same type") first_param_type = first_param_types.pop() if (first_param_type == tuple): sort_column_names = [i[0] for i in key_column_names] sort_column_orders = [i[1] for i in key_column_names] elif(first_param_type == str): sort_column_names = key_column_names else: raise TypeError("key_column_names type is not supported") else: raise TypeError("key_column_names type is not correct. Supported types are str, list of str or list of (str,bool) pair.") # use the second parameter if the sort order is not given if (len(sort_column_orders) == 0): sort_column_orders = [ascending for i in sort_column_names] # make sure all column exists my_column_names = set(self.column_names()) for column in sort_column_names: if (type(column) != str): raise TypeError("Only string parameter can be passed in as column names") if (column not in my_column_names): raise ValueError("SFrame has no column named: '" + str(column) + "'") if (self[column].dtype not in (str, int, float,datetime.datetime)): raise TypeError("Only columns of type (str, int, float) can be sorted") with cython_context(): return SFrame(_proxy=self.__proxy__.sort(sort_column_names, sort_column_orders)) def dropna(self, columns=None, how='any'): """ Remove missing values from an SFrame. A missing value is either ``None`` or ``NaN``. If ``how`` is 'any', a row will be removed if any of the columns in the ``columns`` parameter contains at least one missing value. If ``how`` is 'all', a row will be removed if all of the columns in the ``columns`` parameter are missing values. If the ``columns`` parameter is not specified, the default is to consider all columns when searching for missing values. Parameters ---------- columns : list or str, optional The columns to use when looking for missing values. By default, all columns are used. how : {'any', 'all'}, optional Specifies whether a row should be dropped if at least one column has missing values, or if all columns have missing values. 'any' is default. Returns ------- out : SFrame SFrame with missing values removed (according to the given rules). See Also -------- dropna_split : Drops missing rows from the SFrame and returns them. Examples -------- Drop all missing values. >>> sf = turicreate.SFrame({'a': [1, None, None], 'b': ['a', 'b', None]}) >>> sf.dropna() +---+---+ | a | b | +---+---+ | 1 | a | +---+---+ [1 rows x 2 columns] Drop rows where every value is missing. >>> sf.dropna(any="all") +------+---+ | a | b | +------+---+ | 1 | a | | None | b | +------+---+ [2 rows x 2 columns] Drop rows where column 'a' has a missing value. >>> sf.dropna('a', any="all") +---+---+ | a | b | +---+---+ | 1 | a | +---+---+ [1 rows x 2 columns] """ # If the user gives me an empty list (the indicator to use all columns) # NA values being dropped would not be the expected behavior. This # is a NOOP, so let's not bother the server if type(columns) is list and len(columns) == 0: return SFrame(_proxy=self.__proxy__) (columns, all_behavior) = self.__dropna_errchk(columns, how) with cython_context(): return SFrame(_proxy=self.__proxy__.drop_missing_values(columns, all_behavior, False)) def dropna_split(self, columns=None, how='any'): """ Split rows with missing values from this SFrame. This function has the same functionality as :py:func:`~turicreate.SFrame.dropna`, but returns a tuple of two SFrames. The first item is the expected output from :py:func:`~turicreate.SFrame.dropna`, and the second item contains all the rows filtered out by the `dropna` algorithm. Parameters ---------- columns : list or str, optional The columns to use when looking for missing values. By default, all columns are used. how : {'any', 'all'}, optional Specifies whether a row should be dropped if at least one column has missing values, or if all columns have missing values. 'any' is default. Returns ------- out : (SFrame, SFrame) (SFrame with missing values removed, SFrame with the removed missing values) See Also -------- dropna Examples -------- >>> sf = turicreate.SFrame({'a': [1, None, None], 'b': ['a', 'b', None]}) >>> good, bad = sf.dropna_split() >>> good +---+---+ | a | b | +---+---+ | 1 | a | +---+---+ [1 rows x 2 columns] >>> bad +------+------+ | a | b | +------+------+ | None | b | | None | None | +------+------+ [2 rows x 2 columns] """ # If the user gives me an empty list (the indicator to use all columns) # NA values being dropped would not be the expected behavior. This # is a NOOP, so let's not bother the server if type(columns) is list and len(columns) == 0: return (SFrame(_proxy=self.__proxy__), SFrame()) (columns, all_behavior) = self.__dropna_errchk(columns, how) sframe_tuple = self.__proxy__.drop_missing_values(columns, all_behavior, True) if len(sframe_tuple) != 2: raise RuntimeError("Did not return two SFrames!") with cython_context(): return (SFrame(_proxy=sframe_tuple[0]), SFrame(_proxy=sframe_tuple[1])) def __dropna_errchk(self, columns, how): if columns is None: # Default behavior is to consider every column, specified to # the server by an empty list (to avoid sending all the column # in this case, since it is the most common) columns = list() elif type(columns) is str: columns = [columns] elif type(columns) is not list: raise TypeError("Must give columns as a list, str, or 'None'") else: # Verify that we are only passing strings in our list list_types = set([type(i) for i in columns]) if (str not in list_types) or (len(list_types) > 1): raise TypeError("All columns must be of 'str' type") if how not in ['any','all']: raise ValueError("Must specify 'any' or 'all'") if how == 'all': all_behavior = True else: all_behavior = False return (columns, all_behavior) def fillna(self, column_name, value): """ Fill all missing values with a given value in a given column. If the ``value`` is not the same type as the values in ``column_name``, this method attempts to convert the value to the original column's type. If this fails, an error is raised. Parameters ---------- column_name : str The name of the column to modify. value : type convertible to SArray's type The value used to replace all missing values. Returns ------- out : SFrame A new SFrame with the specified value in place of missing values. See Also -------- dropna Examples -------- >>> sf = turicreate.SFrame({'a':[1, None, None], ... 'b':['13.1', '17.2', None]}) >>> sf = sf.fillna('a', 0) >>> sf +---+------+ | a | b | +---+------+ | 1 | 13.1 | | 0 | 17.2 | | 0 | None | +---+------+ [3 rows x 2 columns] """ # Normal error checking if type(column_name) is not str: raise TypeError("column_name must be a str") ret = self[self.column_names()] ret[column_name] = ret[column_name].fillna(value) return ret def add_row_number(self, column_name='id', start=0, inplace=False): """ Returns an SFrame with a new column that numbers each row sequentially. By default the count starts at 0, but this can be changed to a positive or negative number. The new column will be named with the given column name. An error will be raised if the given column name already exists in the SFrame. If inplace == False (default) this operation does not modify the current SFrame, returning a new SFrame. If inplace == True, this operation modifies the current SFrame, returning self. Parameters ---------- column_name : str, optional The name of the new column that will hold the row numbers. start : int, optional The number used to start the row number count. inplace : bool, optional. Defaults to False. Whether the SFrame is modified in place. Returns ------- out : SFrame The new SFrame with a column name Notes ----- The range of numbers is constrained by a signed 64-bit integer, so beware of overflow if you think the results in the row number column will be greater than 9 quintillion. Examples -------- >>> sf = turicreate.SFrame({'a': [1, None, None], 'b': ['a', 'b', None]}) >>> sf.add_row_number() +----+------+------+ | id | a | b | +----+------+------+ | 0 | 1 | a | | 1 | None | b | | 2 | None | None | +----+------+------+ [3 rows x 3 columns] """ if type(column_name) is not str: raise TypeError("Must give column_name as strs") if type(start) is not int: raise TypeError("Must give start as int") if column_name in self.column_names(): raise RuntimeError("Column '" + column_name + "' already exists in the current SFrame") the_col = _create_sequential_sarray(self.num_rows(), start) # Make sure the row number column is the first column new_sf = SFrame() new_sf.add_column(the_col, column_name, inplace=True) new_sf.add_columns(self, inplace=True) if inplace: self.__proxy__ = new_sf.__proxy__ return self else: return new_sf def _group(self, key_columns): """ Left undocumented intentionally. """ gsf = GroupedSFrame(self, key_columns) return gsf @property def shape(self): """ The shape of the SFrame, in a tuple. The first entry is the number of rows, the second is the number of columns. Examples -------- >>> sf = turicreate.SFrame({'id':[1,2,3], 'val':['A','B','C']}) >>> sf.shape (3, 2) """ return (self.num_rows(), self.num_columns()) @property def __proxy__(self): return self._proxy @__proxy__.setter def __proxy__(self, value): assert type(value) is UnitySFrameProxy self._cache = None self._proxy = value self._cache = None
[]
[]
[ "TURI_JAVA_HOME" ]
[]
["TURI_JAVA_HOME"]
python
1
0
main.go
package main import ( "bytes" "encoding/json" "fmt" "io/ioutil" "log" "net/http" "net/url" "os" "strconv" "strings" "time" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/common/version" "gopkg.in/alecthomas/kingpin.v2" ) var Version string type bridge struct { server *http.Server debug *bool timeout *time.Duration titleAnnotation *string messageAnnotation *string priorityAnnotation *string defaultPriority *int gotifyToken *string gotifyEndpoint *string } type Notification struct { Alerts []Alert } type Alert struct { Annotations map[string]string Status string GeneratorURL string StartsAt string } type GotifyNotification struct { Title string `json:"title"` Message string `json:"message"` Priority int `json:"priority"` Extras map[string]interface{} `json:"extras"` } var ( gotifyEndpoint = kingpin.Flag("gotify_endpoint", "Full path to the Gotify message endpoint ($GOTIFY_ENDPOINT)").Default("http://127.0.0.1:80/message").Envar("GOTIFY_ENDPOINT").String() address = kingpin.Flag("bind_address", "The address the bridge will listen on ($BIND_ADDRESS)").Default("0.0.0.0").Envar("BIND_ADDRESS").IP() port = kingpin.Flag("port", "The port the bridge will listen on ($PORT)").Default("8080").Envar("PORT").Int() webhookPath = kingpin.Flag("webhook_path", "The URL path to handle requests on ($WEBHOOK_PATH)").Default("/gotify_webhook").Envar("WEBHOOK_PATH").String() timeout = kingpin.Flag("timeout", "The number of seconds to wait when connecting to gotify ($TIMEOUT)").Default("5s").Envar("TIMEOUT").Duration() titleAnnotation = kingpin.Flag("title_annotation", "Annotation holding the title of the alert ($TITLE_ANNOTATION)").Default("summary").Envar("TITLE_ANNOTATION").String() messageAnnotation = kingpin.Flag("message_annotation", "Annotation holding the alert message ($MESSAGE_ANNOTATION)").Default("description").Envar("MESSAGE_ANNOTATION").String() priorityAnnotation = kingpin.Flag("priority_annotation", "Annotation holding the priority of the alert ($PRIORITY_ANNOTATION)").Default("priority").Envar("PRIORITY_ANNOTATION").String() defaultPriority = kingpin.Flag("default_priority", "Annotation holding the priority of the alert ($DEFAULT_PRIORITY)").Default("5").Envar("DEFAULT_PRIORITY").Int() authUsername = kingpin.Flag("metrics_auth_username", "Username for metrics interface basic auth ($AUTH_USERNAME and $AUTH_PASSWORD)").Envar("AUTH_USERNAME").String() authPassword = "" metricsNamespace = kingpin.Flag("metrics_namespace", "Metrics Namespace ($METRICS_NAMESPACE)").Envar("METRICS_NAMESPACE").Default("alertmanager_gotify_bridge").String() metricsPath = kingpin.Flag("metrics_path", "Path under which to expose metrics for the bridge ($METRICS_PATH)").Envar("METRICS_PATH").Default("/metrics").String() extendedDetails = kingpin.Flag("extended_details", "When enabled, alerts are presented in HTML format and include colorized status (FIR|RES), alert start time, and a link to the generator of the alert ($EXTENDED_DETAILS)").Default("false").Envar("EXTENDED_DETAILS").Bool() debug = kingpin.Flag("debug", "Enable debug output of the server").Bool() metrics = make(map[string]int) ) func init() { prometheus.MustRegister(version.NewCollector(*metricsNamespace)) } type basicAuthHandler struct { handler http.HandlerFunc username string password string } type metricsHandler struct { svr *bridge } func (h *basicAuthHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { username, password, ok := r.BasicAuth() if !ok || username != h.username || password != h.password { log.Printf("Invalid HTTP auth from `%s`", r.RemoteAddr) w.Header().Set("WWW-Authenticate", "Basic realm=\"metrics\"") http.Error(w, "Invalid username or password", http.StatusUnauthorized) return } h.handler(w, r) return } func (h *metricsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { collector := NewMetricsCollector(&metrics, h.svr, metricsNamespace) registry := prometheus.NewRegistry() registry.MustRegister(collector) newHandler := promhttp.HandlerFor(registry, promhttp.HandlerOpts{}) newHandler = promhttp.InstrumentMetricHandler(registry, newHandler) newHandler.ServeHTTP(w, r) return } func basicAuthHandlerBuilder(parentHandler http.Handler) http.Handler { if *authUsername != "" && authPassword != "" { return &basicAuthHandler{ handler: parentHandler.ServeHTTP, username: *authUsername, password: authPassword, } } return parentHandler } func main() { kingpin.Version(Version) kingpin.Parse() metrics["requests_received"] = 0 metrics["requests_invalid"] = 0 metrics["alerts_received"] = 0 metrics["alerts_invalid"] = 0 metrics["alerts_processed"] = 0 metrics["alerts_failed"] = 0 gotifyToken := os.Getenv("GOTIFY_TOKEN") if gotifyToken == "" { os.Stderr.WriteString("ERROR: The token for Gotify API must be set in the environment variable GOTIFY_TOKEN\n") os.Exit(1) } authPassword = os.Getenv("NUT_EXPORTER_WEB_AUTH_PASSWORD") if !strings.HasSuffix(*gotifyEndpoint, "/message") { os.Stderr.WriteString(fmt.Sprintf("WARNING: /message not at the end of the gotifyEndpoint parameter (%s). Automatically appending it.\n", *gotifyEndpoint)) toAdd := "/message" if strings.HasSuffix(*gotifyEndpoint, "/") { toAdd = "message" } *gotifyEndpoint += toAdd os.Stderr.WriteString(fmt.Sprintf("New gotifyEndpoint: %s\n", *gotifyEndpoint)) } _, err := url.ParseRequestURI(*gotifyEndpoint) if err != nil { fmt.Printf("Error - invalid gotify endpoint: %s\n", err) os.Exit(1) } serverType := "" if *debug { serverType = "debug " } fmt.Printf("Starting %sserver on http://%s:%d%s translating to %s ...\n", serverType, *address, *port, *webhookPath, *gotifyEndpoint) svr := &bridge{ debug: debug, timeout: timeout, titleAnnotation: titleAnnotation, messageAnnotation: messageAnnotation, priorityAnnotation: priorityAnnotation, defaultPriority: defaultPriority, gotifyToken: &gotifyToken, gotifyEndpoint: gotifyEndpoint, } serverMux := http.NewServeMux() serverMux.HandleFunc(*webhookPath, svr.handleCall) serverMux.Handle(*metricsPath, basicAuthHandlerBuilder(&metricsHandler{svr: svr})) server := &http.Server{ Addr: fmt.Sprintf("%s:%d", *address, *port), Handler: serverMux, } svr.server = server err = server.ListenAndServe() if nil != err { fmt.Printf("Error starting the server: %s", err) os.Exit(1) } } func (svr *bridge) handleCall(w http.ResponseWriter, r *http.Request) { var notification Notification text := []string{} respCode := http.StatusOK metrics["requests_received"]++ /* Assume this will never fail */ b, _ := ioutil.ReadAll(r.Body) if *svr.debug { log.Printf("bridge: Recieved request: %+v\n", r) log.Printf("bridge: Headers:\n") for name, headers := range r.Header { name = strings.ToLower(name) for _, h := range headers { log.Printf("bridge: %v: %v", name, h) } } log.Printf("bridge: BODY: %s\n", string(b)) } /* if data was sent, parse the data */ if string(b) != "" { if *svr.debug { log.Printf("bridge: data sent - unmarshalling from JSON: %s\n", string(b)) } err := json.Unmarshal(b, &notification) if err != nil { /* Failure goes back to the user as a 500. Log data here for debugging (which shouldn't ever fail!) */ log.Printf("bridge: Unmarshal of request failed: %s\n", err) log.Printf("\nBEGIN passed data:\n%s\nEND passed data.", string(b)) http.Error(w, fmt.Sprintf("%s", err), http.StatusBadRequest) metrics["requests_invalid"]++ return } if *svr.debug { log.Printf("Detected %d alerts\n", len(notification.Alerts)) } for idx, alert := range notification.Alerts { extras := make(map[string]interface{}) proceed := true title := "" message := "" priority := *svr.defaultPriority metrics["alerts_received"]++ if *svr.debug { log.Printf(" Alert %d", idx) } if *extendedDetails { // set text to html extrasContentType := make(map[string]string) extrasContentType["contentType"] = "text/html" extras["client::display"] = extrasContentType switch alert.Status { case "resolved": message += "<font style='color: #00b339;' data-mx-color='#00b339'>RESOLVED</font><br/> " title += "[RES] " case "firing": message += "<font style='color: #b31e00;' data-mx-color='#b31e00'>FIRING</font><br/> " title += "[FIR] " } } if val, ok := alert.Annotations[*svr.titleAnnotation]; ok { title += val if *svr.debug { log.Printf(" title: %s\n", title) } } else { proceed = false text = []string{fmt.Sprintf("Missing annotation: %s", *svr.titleAnnotation)} if *svr.debug { log.Printf(" title annotation (%s) missing\n", *svr.titleAnnotation) } } if val, ok := alert.Annotations[*svr.messageAnnotation]; ok { message = val if *svr.debug { log.Printf(" message: %s\n", message) } } else { proceed = false text = []string{fmt.Sprintf("Missing annotation: %s", *svr.messageAnnotation)} if *svr.debug { log.Printf(" message annotation (%s) missing\n", *svr.messageAnnotation) } } if val, ok := alert.Annotations[*svr.priorityAnnotation]; ok { tmp, err := strconv.Atoi(val) if err == nil { priority = tmp if *svr.debug { log.Printf(" priority: %d\n", priority) } } } else { if *svr.debug { log.Printf(" priority annotation (%s) missing - falling back to default (%d)\n", *svr.priorityAnnotation, *svr.defaultPriority) } } if *extendedDetails { if strings.HasPrefix(alert.GeneratorURL, "http") { message += "<br/><a href='" + alert.GeneratorURL + "'>go to source</a>" extrasNotification := make(map[string]map[string]string) extrasNotification["click"] = make(map[string]string) extrasNotification["click"]["url"] = alert.GeneratorURL extras["client::notification"] = extrasNotification } if alert.StartsAt != "" { message += "<br/><br/><i><font style='color: #999999;' data-mx-color='#999999'> alert created at: " + alert.StartsAt[:19] + "</font></i><br/>" } } if proceed { if *svr.debug { log.Printf(" Required fields found. Dispatching to gotify...\n") } outbound := GotifyNotification{ Title: title, Message: message, Priority: priority, Extras: extras, } msg, _ := json.Marshal(outbound) if *svr.debug { log.Printf(" Outbound: %s\n", string(msg)) } client := http.Client{ Timeout: *svr.timeout * time.Second, } request, err := http.NewRequest("POST", *svr.gotifyEndpoint, bytes.NewBuffer(msg)) if err != nil { log.Printf("Error setting up request: %s", err) http.Error(w, http.StatusText(http.StatusInternalServerError), http.StatusInternalServerError) metrics["alerts_failed"]++ return } request.Header.Set("Content-Type", "application/json") request.Header.Set("X-Gotify-Key", *svr.gotifyToken) resp, err := client.Do(request) if err != nil { log.Printf("Error dispatching to Gotify: %s", err) respCode = http.StatusInternalServerError text = append(text, err.Error()) metrics["alerts_failed"]++ continue } else { defer resp.Body.Close() body, _ := ioutil.ReadAll(resp.Body) if *svr.debug { log.Printf(" Dispatched! Response was %s\n", body) } if resp.StatusCode != 200 { log.Printf("Non-200 response from gotify at %s. Code: %d, Status: %s (enable debug to see body)", *svr.gotifyEndpoint, resp.StatusCode, resp.Status) respCode = resp.StatusCode text = append(text, fmt.Sprintf("Gotify Error: %s", resp.Status)) metrics["alerts_failed"]++ } else { text = append(text, fmt.Sprintf("Message %d dispatched", idx)) metrics["alerts_processed"]++ } continue } } else { if *svr.debug { log.Printf(" Unable to dispatch!\n") respCode = http.StatusBadRequest text = []string{"Incomplete request"} metrics["alerts_invalid"]++ } } } } else { text = []string{"No content sent"} } http.Error(w, strings.Join(text, "\n"), respCode) return }
[ "\"GOTIFY_TOKEN\"", "\"NUT_EXPORTER_WEB_AUTH_PASSWORD\"" ]
[]
[ "NUT_EXPORTER_WEB_AUTH_PASSWORD", "GOTIFY_TOKEN" ]
[]
["NUT_EXPORTER_WEB_AUTH_PASSWORD", "GOTIFY_TOKEN"]
go
2
0
pkg/cmd/roachtest/test.go
// Copyright 2018 The Cockroach Authors. // // Use of this software is governed by the Business Source License included // in the file licenses/BSL.txt and at www.mariadb.com/bsl11. // // Change Date: 2022-10-01 // // On the date above, in accordance with the Business Source License, use // of this software will be governed by the Apache License, Version 2.0, // included in the file licenses/APL.txt and at // https://www.apache.org/licenses/LICENSE-2.0 package main import ( "bytes" "context" "fmt" "io" "os" "os/exec" "os/signal" "path/filepath" "regexp" "runtime" "sort" "strconv" "strings" "sync" "time" "github.com/cockroachdb/cockroach/pkg/cmd/internal/issues" "github.com/cockroachdb/cockroach/pkg/util/syncutil" "github.com/cockroachdb/cockroach/pkg/util/timeutil" "github.com/cockroachdb/cockroach/pkg/util/version" "github.com/petermattis/goid" "github.com/pkg/errors" ) var ( count = 1 debugEnabled = false postIssues = true gceNameRE = regexp.MustCompile(`^[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?$`) ) // testFilter holds the name and tag filters for filtering tests. type testFilter struct { name *regexp.Regexp tag *regexp.Regexp rawTag []string } func newFilter(filter []string) *testFilter { var name []string var tag []string var rawTag []string for _, v := range filter { if strings.HasPrefix(v, "tag:") { tag = append(tag, strings.TrimPrefix(v, "tag:")) rawTag = append(rawTag, v) } else { name = append(name, v) } } if len(tag) == 0 { tag = []string{"default"} rawTag = []string{"tag:default"} } makeRE := func(strs []string) *regexp.Regexp { switch len(strs) { case 0: return regexp.MustCompile(`.`) case 1: return regexp.MustCompile(strs[0]) default: for i := range strs { strs[i] = "(" + strs[i] + ")" } return regexp.MustCompile(strings.Join(strs, "|")) } } return &testFilter{ name: makeRE(name), tag: makeRE(tag), rawTag: rawTag, } } type testSpec struct { Skip string // if non-empty, test will be skipped // When Skip is set, this can contain more text to be printed in the logs // after the "--- SKIP" line. SkipDetails string // For subtests, Name is supposed to originally be assigned to the name of the // subtest when constructing the spec and then, once added to the registry, it // will automatically be expanded to contain all the parents' names. At that // point, subtestName will be populated to the original value of Name. Name string subtestName string // The maximum duration the test is allowed to run before it is considered // failed. If not specified, the default timeout is 10m before the test's // associated cluster expires. The timeout is always truncated to 10m before // the test's cluster expires. Timeout time.Duration // MinVersion indicates the minimum cockroach version that is required for // the test to be run. If MinVersion is less than the version specified // --cockroach-version, Skip will be populated causing the test to be // skipped. MinVersion string minVersion *version.Version // Tags is a set of tags associated with the test that allow grouping // tests. If no tags are specified, the set ["default"] is automatically // given. Tags []string // Cluster provides the specification for the cluster to use for the test. Only // a top-level testSpec may contain a nodes specification. The cluster is // shared by all subtests. Cluster clusterSpec // UseIOBarrier controls the local-ssd-no-ext4-barrier flag passed to // roachprod when creating a cluster. If set, the flag is not passed, and so // you get durable writes. If not set (the default!), the filesystem is // mounted without the barrier. // // The default (false) is chosen because it the no-barrier option is needed // explicitly by some tests (particularly benchmarks, ironically, since they'd // rather measure other things than I/O) and the vast majority of other tests // don't care - there's no durability across machine crashes that roachtests // care about. UseIOBarrier bool // A testSpec must specify only one of Run or SubTests. All subtests run in // the same cluster, without concurrency between them. Subtest should not // assume any particular state for the cluster as the SubTest may be run in // isolation. Run func(ctx context.Context, t *test, c *cluster) SubTests []testSpec } // matchOrSkip returns true if the filter matches the test. If the filter does // not match the test because the tag filter does not match, the test is // matched, but marked as skipped. func (t *testSpec) matchOrSkip(filter *testFilter) bool { if !filter.name.MatchString(t.Name) { return false } if len(t.Tags) == 0 { if !filter.tag.MatchString("default") { t.Skip = fmt.Sprintf("%s does not match [default]", filter.rawTag) } return true } for _, t := range t.Tags { if filter.tag.MatchString(t) { return true } } t.Skip = fmt.Sprintf("%s does not match %s", filter.rawTag, t.Tags) return true } // matchRegex returns true if the regex matches the test's name or any of the // subtest names. func (t *testSpec) matchRegex(filter *testFilter) bool { if t.matchOrSkip(filter) { return true } for i := range t.SubTests { if t.SubTests[i].matchRegex(filter) { return true } } return false } func (t *testSpec) matchRegexRecursive(filter *testFilter) []testSpec { var res []testSpec if t.matchOrSkip(filter) { res = append(res, *t) } for i := range t.SubTests { res = append(res, t.SubTests[i].matchRegexRecursive(filter)...) } return res } type registry struct { m map[string]*testSpec clusters map[string]string out io.Writer statusInterval time.Duration buildVersion *version.Version config struct { // skipClusterValidationOnAttach skips validation on existing clusters that // the registry uses for running tests. skipClusterValidationOnAttach bool // skipClusterStopOnAttach skips stopping existing clusters that // the registry uses for running tests. It implies skipClusterWipeOnAttach. skipClusterStopOnAttach bool skipClusterWipeOnAttach bool } status struct { syncutil.Mutex running map[*test]struct{} pass map[*test]struct{} fail map[*test]struct{} skip map[*test]struct{} } } type registryOpt func(r *registry) error var ( // setBuildVersion sets the build version based on the flag variable or loads // the version from git if the flag is not set. setBuildVersion registryOpt = func(r *registry) error { if buildTag != "" { return r.setBuildVersion(buildTag) } return r.loadBuildVersion() } ) // newRegistry constructs a registry and configures it with opts. If any opt // returns an error then the function will log about the error and exit the // process with os.Exit(1). func newRegistry(opts ...registryOpt) *registry { r := &registry{ m: make(map[string]*testSpec), clusters: make(map[string]string), out: os.Stdout, } r.config.skipClusterWipeOnAttach = !clusterWipe for _, opt := range opts { if err := opt(r); err != nil { fmt.Fprintf(os.Stderr, "failed to construct registry: %v\n", err) os.Exit(1) } } return r } func (r *registry) setBuildVersion(buildTag string) error { var err error r.buildVersion, err = version.Parse(buildTag) return err } func (r *registry) loadBuildVersion() error { getLatestTag := func() (string, error) { cmd := exec.Command("git", "describe", "--abbrev=0", "--tags", "--match=v[0-9]*") out, err := cmd.CombinedOutput() if err != nil { return "", errors.Wrapf(err, "failed to get version tag from git. Are you running in the "+ "cockroach repo directory? err=%s, out=%s", err, out) } return strings.TrimSpace(string(out)), nil } buildTag, err := getLatestTag() if err != nil { return err } return r.setBuildVersion(buildTag) } // PredecessorVersion returns a recent predecessor of the build version (i.e. // the build tag of the main binary). For example, if the running binary is from // the master branch prior to releasing 19.2.0, this will return a recent // (ideally though not necessarily the latest) 19.1 patch release. func (r *registry) PredecessorVersion() (string, error) { if r.buildVersion == nil { return "", errors.Errorf("buildVersion not set") } buildVersionMajorMinor := fmt.Sprintf("%d.%d", r.buildVersion.Major(), r.buildVersion.Minor()) verMap := map[string]string{ "19.2": "19.1.0-rc.4", "19.1": "2.1.6", "2.2": "2.1.6", "2.1": "2.0.7", } v, ok := verMap[buildVersionMajorMinor] if !ok { return "", errors.Errorf("prev version not set for version: %s", buildVersionMajorMinor) } return v, nil } // verifyValidClusterName verifies that the test name can be turned into a cluster // name when run by TeamCity. Outside of TeamCity runs, depending on the user // running it and the "cluster id" component of a cluster name, the name may // still be invalid; however, this method is designed to catch test names // that will cause errors on TeamCity but not in a developer's local test // environment. func (r *registry) verifyValidClusterName(testName string) error { // Both the name of the cluster, and the names of the individual nodes in the // cluster, must be valid identifiers in GCE when running on TeamCity. An // identifier can be tested using a regular expression. Also note that, due to // the specifics of the regular expression, we cannot assume that a valid // cluster name implies valid node names, or vice-versa; we therefore // construct both a TeamCity cluster name and a TeamCity node name and // validate both. // The name of a cluster is constructed as "[cluster ID][test name]" // In TeamCity runs, the cluster ID is currently a prefix with 6 digits, but // we use 7 here for a bit of breathing room. teamcityClusterName := makeGCEClusterName("teamcity-1234567-" + testName) if !gceNameRE.MatchString(teamcityClusterName) { return fmt.Errorf( "test name '%s' results in invalid cluster name"+ " (generated cluster name '%s' must match regex '%s')."+ " The test name may be too long or have invalid characters", testName, teamcityClusterName, gceNameRE, ) } // The node names are constructed using the cluster name, plus a 4 digit node // ID. teamcityNodeName := makeGCEClusterName("teamcity-1234567-" + testName + "-1234") if !gceNameRE.MatchString(teamcityNodeName) { return fmt.Errorf( "test name '%s' results in invalid cluster node names"+ " (generated node name '%s' must match regex '%s')."+ " The test name may be too long or have invalid characters", testName, teamcityNodeName, gceNameRE, ) } // Verify that the cluster name is not shared with an existing test. if t, ok := r.clusters[teamcityClusterName]; ok { return fmt.Errorf("test %s and test %s have equivalent nightly cluster names: %s", testName, t, teamcityClusterName) } r.clusters[teamcityClusterName] = testName return nil } func (r *registry) prepareSpec(spec *testSpec, depth int) error { if depth == 0 { spec.subtestName = spec.Name // Only top-level tests can create clusters, so those are the only ones for // which we need to verify the cluster name. if err := r.verifyValidClusterName(spec.Name); err != nil { return err } } if (spec.Run != nil) == (len(spec.SubTests) > 0) { return fmt.Errorf("%s: must specify only one of Run or SubTests", spec.Name) } if spec.Run == nil && spec.Timeout > 0 { return fmt.Errorf("%s: timeouts only apply to tests specifying Run", spec.Name) } if depth > 0 && spec.Cluster.NodeCount > 0 { return fmt.Errorf("%s: subtest may not provide cluster specification", spec.Name) } for i := range spec.SubTests { spec.SubTests[i].subtestName = spec.SubTests[i].Name spec.SubTests[i].Name = spec.Name + "/" + spec.SubTests[i].Name if err := r.prepareSpec(&spec.SubTests[i], depth+1); err != nil { return err } } if spec.MinVersion != "" { v, err := version.Parse(spec.MinVersion) if err != nil { return fmt.Errorf("%s: unable to parse min-version: %s", spec.Name, err) } if v.PreRelease() != "" { // Specifying a prerelease version as a MinVersion is too confusing // to be useful. The comparison is not straightforward. return fmt.Errorf("invalid version %s, cannot specify a prerelease (-xxx)", v) } // We append "-0" to the min-version spec so that we capture all // prereleases of the specified version. Otherwise, "v2.1.0" would compare // greater than "v2.1.0-alpha.x". spec.minVersion = version.MustParse(spec.MinVersion + "-0") } return nil } func (r *registry) Add(spec testSpec) { if _, ok := r.m[spec.Name]; ok { fmt.Fprintf(os.Stderr, "test %s already registered\n", spec.Name) os.Exit(1) } if err := r.prepareSpec(&spec, 0); err != nil { fmt.Fprintf(os.Stderr, "%s\n", err) os.Exit(1) } r.m[spec.Name] = &spec } // ListTopLevel lists the top level tests that match re, or that have a subtests // that matches re. func (r *registry) ListTopLevel(filter *testFilter) []*testSpec { var results []*testSpec for _, t := range r.m { if t.matchRegex(filter) { results = append(results, t) } } sort.Slice(results, func(i, j int) bool { return results[i].Name < results[j].Name }) return results } // ListAll lists all tests that match one of the filters. If a subtest matches // but a parent doesn't, only the subtest is returned. If a parent matches, all // subtests are returned. func (r *registry) ListAll(filters []string) []string { filter := newFilter(filters) var tests []testSpec for _, t := range r.m { tests = append(tests, t.matchRegexRecursive(filter)...) } var names []string for _, t := range tests { if t.Skip == "" && t.minVersion != nil { if !r.buildVersion.AtLeast(t.minVersion) { t.Skip = fmt.Sprintf("build-version (%s) < min-version (%s)", r.buildVersion, t.minVersion) } } name := t.Name if t.Skip != "" { name += " (skipped: " + t.Skip + ")" } names = append(names, name) } sort.Strings(names) return names } // Run runs the tests that match the filter. // // Args: // artifactsDir: The path to the dir where log files will be put. If empty, all // logging will go to stdout/stderr. func (r *registry) Run(filters []string, parallelism int, artifactsDir string, user string) int { filter := newFilter(filters) // Find the top-level tests to run. tests := r.ListTopLevel(filter) if len(tests) == 0 { fmt.Fprintf(r.out, "warning: no tests to run %s\n", filters) fmt.Fprintf(r.out, "FAIL\n") return 1 } // Skip any tests for which the min-version is less than the build-version. for _, t := range tests { if t.Skip == "" && t.minVersion != nil { if !r.buildVersion.AtLeast(t.minVersion) { t.Skip = fmt.Sprintf("build-version (%s) < min-version (%s)", r.buildVersion, t.minVersion) } } } wg := &sync.WaitGroup{} wg.Add(count * len(tests)) // We can't run tests in parallel on local clusters or on an existing // cluster. if local || clusterName != "" { parallelism = 1 } // Limit the parallelism to the number of tests. The primary effect this has // is that we'll log to stdout/stderr if only one test is being run. if parallelism > len(tests) { parallelism = len(tests) } r.status.running = make(map[*test]struct{}) r.status.pass = make(map[*test]struct{}) r.status.fail = make(map[*test]struct{}) r.status.skip = make(map[*test]struct{}) cr := newClusterRegistry() ctx, cancel := context.WithCancel(context.Background()) defer cancel() go func() { sem := make(chan struct{}, parallelism) for j := 0; j < count; j++ { for i := range tests { sem <- struct{}{} runNum := j + 1 if count == 1 { runNum = 0 } // Log to stdout/stderr if we're not running tests in parallel. teeOpt := noTee if parallelism == 1 { teeOpt = teeToStdout } artifactsSuffix := "" if runNum != 0 { artifactsSuffix = "run_" + strconv.Itoa(runNum) } var runDir string if artifactsDir != "" { runDir = filepath.Join( artifactsDir, teamCityNameEscape(tests[i].subtestName), artifactsSuffix) } r.runAsync( ctx, tests[i], filter, nil /* parent */, nil, /* cluster */ runNum, teeOpt, runDir, user, cr, func(failed bool) { wg.Done() <-sem }) } } }() done := make(chan struct{}) go func() { wg.Wait() close(done) }() // Periodically output test status to give an indication of progress. if r.statusInterval == 0 { r.statusInterval = time.Minute } ticker := time.NewTicker(r.statusInterval) defer ticker.Stop() // Shut down test clusters when interrupted (for example CTRL+C). sig := make(chan os.Signal, 1) signal.Notify(sig, os.Interrupt) for i := 1; ; i++ { select { case <-done: r.status.Lock() defer r.status.Unlock() postSlackReport(r.status.pass, r.status.fail, r.status.skip) if len(r.status.fail) > 0 { fmt.Fprintln(r.out, "FAIL") return 1 } fmt.Fprintf(r.out, "PASS\n") return 0 case <-ticker.C: r.status.Lock() runningTests := make([]*test, 0, len(r.status.running)) for t := range r.status.running { runningTests = append(runningTests, t) } sort.Slice(runningTests, func(i, j int) bool { return runningTests[i].Name() < runningTests[j].Name() }) var buf bytes.Buffer for _, t := range runningTests { if t.spec.Run == nil { // Ignore tests with subtests. continue } t.mu.Lock() done := t.mu.done var status map[int64]testStatus if !done { status = make(map[int64]testStatus, len(t.mu.status)) for k, v := range t.mu.status { status[k] = v } if len(status) == 0 { // If we have no other status messages display this unknown state. status[0] = testStatus{ msg: "???", time: timeutil.Now(), } } } t.mu.Unlock() if !done { ids := make([]int64, 0, len(status)) for id := range status { ids = append(ids, id) } sort.Slice(ids, func(i, j int) bool { // Force the goroutine ID for the main test goroutine to sort to // the front. NB: goroutine IDs are not monotonically increasing // because each thread has a small cache of IDs for allocation. if ids[j] == t.runnerID { return false } if ids[i] == t.runnerID { return true } return ids[i] < ids[j] }) fmt.Fprintf(&buf, "[%4d] %s: ", i, t.Name()) for j := range ids { s := status[ids[j]] duration := timeutil.Now().Sub(s.time) progStr := "" if s.progress > 0 { progStr = fmt.Sprintf("%.1f%%|", 100*s.progress) } if j > 0 { buf.WriteString(", ") } fmt.Fprintf(&buf, "%s (%s%s)", s.msg, progStr, time.Duration(duration.Seconds()+0.5)*time.Second) } fmt.Fprintf(&buf, "\n") } } fmt.Fprint(r.out, buf.String()) r.status.Unlock() case <-sig: if !debugEnabled { cancel() // Destroy all clusters. Don't wait more than 5 min for that though. ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) cr.destroyAllClusters(ctx) cancel() } } } } type testStatus struct { msg string time time.Time progress float64 } type test struct { spec *testSpec registry *registry // l is the logger that the test will use for its output. l *logger runner string runnerID int64 start time.Time end time.Time // debugEnabled is a test scoped value which enables automated tests to // enable debugging without enabling debugging for all tests. // It is a bit of a hack added to help debug #34458. debugEnabled bool // artifactsDir is the path to the directory holding all the artifacts for // this test. It will contain a test.log file, cluster logs, and // subdirectories for subtests. artifactsDir string mu struct { syncutil.RWMutex done bool failed bool // cancel, if set, is called from the t.Fatal() family of functions when the // test is being marked as failed (i.e. when the failed field above is also // set). This is used to cancel the context passed to t.spec.Run(), so async // test goroutines can be notified. cancel func() failLoc struct { file string line int } status map[int64]testStatus output []byte } } func (t *test) Name() string { return t.spec.Name } func (t *test) logger() *logger { return t.l } func (t *test) status(id int64, args ...interface{}) { t.mu.Lock() defer t.mu.Unlock() if t.mu.status == nil { t.mu.status = make(map[int64]testStatus) } if len(args) == 0 { delete(t.mu.status, id) return } t.mu.status[id] = testStatus{ msg: fmt.Sprint(args...), time: timeutil.Now(), } } // Status sets the main status message for the test. When called from the main // test goroutine (i.e. the goroutine on which testSpec.Run is invoked), this // is equivalent to calling WorkerStatus. If no arguments are specified, the // status message is erased. func (t *test) Status(args ...interface{}) { t.status(t.runnerID, args...) } // WorkerStatus sets the status message for a worker goroutine associated with // the test. The status message should be cleared before the goroutine exits by // calling WorkerStatus with no arguments. func (t *test) WorkerStatus(args ...interface{}) { t.status(goid.Get(), args...) } func (t *test) progress(id int64, frac float64) { t.mu.Lock() defer t.mu.Unlock() if t.mu.status == nil { t.mu.status = make(map[int64]testStatus) } status := t.mu.status[id] status.progress = frac t.mu.status[id] = status } // Progress sets the progress (a fraction in the range [0,1]) associated with // the main test status messasge. When called from the main test goroutine // (i.e. the goroutine on which testSpec.Run is invoked), this is equivalent to // calling WorkerProgress. func (t *test) Progress(frac float64) { t.progress(t.runnerID, frac) } // WorkerProgress sets the progress (a fraction in the range [0,1]) associated // with the a worker status messasge. func (t *test) WorkerProgress(frac float64) { t.progress(goid.Get(), frac) } // Skip records msg into t.spec.Skip and calls runtime.Goexit() - thus // interrupting the running of the test. func (t *test) Skip(msg string, details string) { t.spec.Skip = msg t.spec.SkipDetails = details runtime.Goexit() } func (t *test) Fatal(args ...interface{}) { t.fatalfInner("" /* format */, args...) } func (t *test) Fatalf(format string, args ...interface{}) { t.fatalfInner(format, args...) } func (t *test) fatalfInner(format string, args ...interface{}) { // Skip two frames: our own and the caller. if format != "" { t.printfAndFail(2 /* skip */, format, args...) } else { t.printAndFail(2 /* skip */, args...) } runtime.Goexit() } // FatalIfErr calls t.Fatal() if err != nil. func FatalIfErr(t *test, err error) { if err != nil { t.fatalfInner("" /* format */, err) } } func (t *test) printAndFail(skip int, args ...interface{}) { t.mu.Lock() defer t.mu.Unlock() t.mu.output = append(t.mu.output, t.decorate(skip+1, fmt.Sprint(args...))...) t.mu.failed = true if t.mu.cancel != nil { t.mu.cancel() } } func (t *test) printfAndFail(skip int, format string, args ...interface{}) { msg := t.decorate(skip+1, fmt.Sprintf(format, args...)) t.l.Printf("test failure: " + msg) t.mu.Lock() defer t.mu.Unlock() t.mu.output = append(t.mu.output, msg...) t.mu.failed = true if t.mu.cancel != nil { t.mu.cancel() } } // Args: // skip: The number of stack frames to exclude from the result. 0 means that // the caller will be the first frame identified. 1 means the caller's caller // will be the first, etc. func (t *test) decorate(skip int, s string) string { // Skip two extra frames to account for this function and runtime.Callers // itself. var pc [50]uintptr n := runtime.Callers(2+skip, pc[:]) if n == 0 { panic("zero callers found") } buf := new(bytes.Buffer) frames := runtime.CallersFrames(pc[:n]) sep := "\t" runnerFound := false for { if runnerFound { break } frame, more := frames.Next() if !more { break } if frame.Function == t.runner { runnerFound = true // Handle the special case of the runner function being the caller of // t.Fatal(). In that case, that's the line to be used for issue creation. if t.mu.failLoc.file == "" { t.mu.failLoc.file = frame.File t.mu.failLoc.line = frame.Line } } if !t.mu.failed && !runnerFound { // Keep track of the highest stack frame that is lower than the t.runner // stack frame. This is used to determine the author of that line of code // and issue assignment. t.mu.failLoc.file = frame.File t.mu.failLoc.line = frame.Line } file := frame.File if index := strings.LastIndexByte(file, '/'); index >= 0 { file = file[index+1:] } fmt.Fprintf(buf, "%s%s:%d", sep, file, frame.Line) sep = "," } buf.WriteString(": ") lines := strings.Split(s, "\n") if l := len(lines); l > 1 && lines[l-1] == "" { lines = lines[:l-1] } for i, line := range lines { if i > 0 { buf.WriteString("\n\t\t") } buf.WriteString(line) } buf.WriteByte('\n') return buf.String() } func (t *test) duration() time.Duration { return t.end.Sub(t.start) } func (t *test) Failed() bool { t.mu.RLock() failed := t.mu.failed t.mu.RUnlock() return failed } func (t *test) ArtifactsDir() string { return t.artifactsDir } // IsBuildVersion returns true if the build version is greater than or equal to // minVersion. This allows a test to optionally perform additional checks // depending on the cockroach version it is running against. Note that the // versions are Cockroach build tag version numbers, not the internal cluster // version number. func (t *test) IsBuildVersion(minVersion string) bool { vers, err := version.Parse(minVersion) if err != nil { t.Fatal(err) } if p := vers.PreRelease(); p != "" { panic("cannot specify a prerelease: " + p) } // We append "-0" to the min-version spec so that we capture all // prereleases of the specified version. Otherwise, "v2.1.0" would compare // greater than "v2.1.0-alpha.x". vers = version.MustParse(minVersion + "-0") return t.registry.buildVersion.AtLeast(vers) } var _ = (*test)(nil).IsBuildVersion // avoid unused lint // runAsync starts a goroutine that runs a test. If the test has subtests, // runAsync will be invoked recursively, but in a blocking manner. // // Args: // parent: The test's parent. Nil if the test is not a subtest. // c: The cluster on which the test (and all subtests) will run. If nil, a new // cluster will be created. // runNum: The 1-based index of this test run, if --count > 1. Otherwise (if // there's a single run), runNum is 0. func (r *registry) runAsync( ctx context.Context, spec *testSpec, filter *testFilter, parent *test, c *cluster, runNum int, teeOpt teeOptType, artifactsDir string, user string, cr *clusterRegistry, done func(failed bool), ) { t := &test{ spec: spec, registry: r, artifactsDir: artifactsDir, } var logPath string if artifactsDir != "" { logPath = filepath.Join(artifactsDir, "test.log") } l, err := rootLogger(logPath, teeOpt) FatalIfErr(t, err) t.l = l out := io.MultiWriter(r.out, t.l.file) if teamCity { fmt.Printf("##teamcity[testStarted name='%s' flowId='%s']\n", t.Name(), t.Name()) } else { var details []string if t.spec.Skip != "" { details = append(details, "skip") } var detail string if len(details) > 0 { detail = fmt.Sprintf(" [%s]", strings.Join(details, ",")) } fmt.Fprintf(out, "=== RUN %s%s\n", t.Name(), detail) } r.status.Lock() r.status.running[t] = struct{}{} r.status.Unlock() callerName := func() string { // Make room for the skip PC. var pc [2]uintptr n := runtime.Callers(2, pc[:]) // skip + runtime.Callers + callerName if n == 0 { panic("zero callers found") } frames := runtime.CallersFrames(pc[:n]) frame, _ := frames.Next() return frame.Function } go func() { t.runner = callerName() t.runnerID = goid.Get() defer func() { t.end = timeutil.Now() if err := recover(); err != nil { t.mu.Lock() t.mu.failed = true t.mu.output = append(t.mu.output, t.decorate(0 /* skip */, fmt.Sprint(err))...) t.mu.Unlock() } t.mu.Lock() t.mu.done = true t.mu.Unlock() dstr := fmt.Sprintf("%.2fs", t.duration().Seconds()) if t.Failed() { t.mu.Lock() output := t.mu.output failLoc := t.mu.failLoc t.mu.Unlock() if teamCity { fmt.Fprintf( r.out, "##teamcity[testFailed name='%s' details='%s' flowId='%s']\n", t.Name(), teamCityEscape(string(output)), t.Name(), ) } fmt.Fprintf(out, "--- FAIL: %s (%s)\n%s", t.Name(), dstr, output) if postIssues && issues.CanPost() && t.spec.Run != nil { authorEmail := getAuthorEmail(failLoc.file, failLoc.line) branch := "<unknown branch>" if b := os.Getenv("TC_BUILD_BRANCH"); b != "" { branch = b } msg := fmt.Sprintf("The test failed on branch=%s, cloud=%s:\n%s", branch, cloud, output) if err := issues.Post( context.Background(), fmt.Sprintf("roachtest: %s failed", t.Name()), "roachtest", t.Name(), msg, authorEmail, []string{"O-roachtest"}, ); err != nil { fmt.Fprintf(out, "failed to post issue: %s\n", err) } } } else if t.spec.Skip == "" { fmt.Fprintf(out, "--- PASS: %s (%s)\n", t.Name(), dstr) // If `##teamcity[testFailed ...]` is not present before `##teamCity[testFinished ...]`, // TeamCity regards the test as successful. } else { if teamCity { fmt.Fprintf(r.out, "##teamcity[testIgnored name='%s' message='%s']\n", t.Name(), teamCityEscape(t.spec.Skip)) } fmt.Fprintf(out, "--- SKIP: %s (%s)\n\t%s\n", t.Name(), dstr, t.spec.Skip) if t.spec.SkipDetails != "" { fmt.Fprintf(out, "Details: %s\n", t.spec.SkipDetails) } } if teamCity { fmt.Fprintf(r.out, "##teamcity[testFinished name='%s' flowId='%s']\n", t.Name(), t.Name()) // Only publish artifacts for failed tests. At the time of writing, a full roachtest // suite results in ~6gb of artifacts which we can't retain for more than a few days // (and this in turn delays the resolution of failures). if t.Failed() && artifactsDir != "" { escapedTestName := teamCityNameEscape(t.Name()) artifactsGlobPath := filepath.Join(artifactsDir, "**") artifactsSpec := fmt.Sprintf("%s => %s", artifactsGlobPath, escapedTestName) fmt.Fprintf(r.out, "##teamcity[publishArtifacts '%s']\n", artifactsSpec) } } r.status.Lock() delete(r.status.running, t) // Only include tests with a Run function in the summary output. if t.spec.Run != nil { if t.Failed() { r.status.fail[t] = struct{}{} } else if t.spec.Skip == "" { r.status.pass[t] = struct{}{} } else { r.status.skip[t] = struct{}{} } } r.status.Unlock() done(t.Failed()) }() t.start = timeutil.Now() if t.spec.Skip != "" { return } if c == nil { if clusterName == "" { var name string if !local { name = clusterID if name == "" { name = fmt.Sprintf("%d", timeutil.Now().Unix()) } name += "-" + t.Name() } cfg := clusterConfig{ name: name, nodes: t.spec.Cluster, useIOBarrier: t.spec.UseIOBarrier, artifactsDir: t.ArtifactsDir(), localCluster: local, teeOpt: teeOpt, user: user, } var err error c, err = newCluster(ctx, t.l, cfg, cr) if err != nil { t.Skip("failed to created cluster", err.Error()) } } else { opt := attachOpt{ skipValidation: r.config.skipClusterValidationOnAttach, skipStop: r.config.skipClusterStopOnAttach, skipWipe: r.config.skipClusterWipeOnAttach, } var err error c, err = attachToExistingCluster(ctx, clusterName, t.l, t.spec.Cluster, opt, cr) FatalIfErr(t, err) } if c != nil { defer func() { if (!debugEnabled && !t.debugEnabled) || !t.Failed() { c.Destroy(ctx, closeLogger) } else { c.l.Printf("not destroying cluster to allow debugging\n") } }() } } else { c = c.clone() } c.setTest(t) // If we have subtests, handle them here and return. if t.spec.Run == nil { for i := range t.spec.SubTests { childSpec := t.spec.SubTests[i] if childSpec.matchRegex(filter) { var wg sync.WaitGroup wg.Add(1) // Each subtest gets its own subdir in the parent's artifacts dir. var childDir string if t.ArtifactsDir() != "" { childDir = filepath.Join(t.ArtifactsDir(), teamCityNameEscape(childSpec.subtestName)) } r.runAsync(ctx, &childSpec, filter, t, c, runNum, teeOpt, childDir, user, cr, func(failed bool) { if failed { // Mark the parent test as failed since one of the subtests // failed. t.mu.Lock() t.mu.failed = true t.mu.Unlock() } if failed && debugEnabled { // The test failed and debugging is enabled. Don't try to stumble // forward running another test or subtest, just exit // immediately. os.Exit(1) } wg.Done() }) wg.Wait() } } return } // No subtests, so this is a leaf test. timeout := c.expiration.Add(-10 * time.Minute).Sub(timeutil.Now()) if timeout <= 0 { t.spec.Skip = fmt.Sprintf("cluster expired (%s)", timeout) return } if t.spec.Timeout > 0 && timeout > t.spec.Timeout { timeout = t.spec.Timeout } done := make(chan struct{}) defer close(done) // closed only after we've grabbed the debug info below defer func() { if t.Failed() { if err := c.FetchDebugZip(ctx); err != nil { c.l.Printf("failed to download debug zip: %s", err) } if err := c.FetchDmesg(ctx); err != nil { c.l.Printf("failed to fetch dmesg: %s", err) } if err := c.FetchJournalctl(ctx); err != nil { c.l.Printf("failed to fetch journalctl: %s", err) } if err := c.FetchCores(ctx); err != nil { c.l.Printf("failed to fetch cores: %s", err) } if err := c.CopyRoachprodState(ctx); err != nil { c.l.Printf("failed to copy roachprod state: %s", err) } } // NB: fetch the logs even when we have a debug zip because // debug zip can't ever get the logs for down nodes. // We only save artifacts for failed tests in CI, so this // duplication is acceptable. if err := c.FetchLogs(ctx); err != nil { c.l.Printf("failed to download logs: %s", err) } }() // Detect replica divergence (i.e. ranges in which replicas have arrived // at the same log position with different states). defer c.FailOnReplicaDivergence(ctx, t) // Detect dead nodes in an inner defer. Note that this will call t.Fatal // when appropriate, which will cause the closure above to enter the // t.Failed() branch. defer c.FailOnDeadNodes(ctx, t) runCtx, cancel := context.WithCancel(ctx) t.mu.Lock() // t.Fatal() will cancel this context. t.mu.cancel = cancel t.mu.Unlock() go func() { defer cancel() select { case <-time.After(timeout): t.printfAndFail(0 /* skip */, "test timed out (%s)\n", timeout) if err := c.FetchDebugZip(ctx); err != nil { c.l.Printf("failed to download logs: %s", err) } // NB: c.destroyState is nil for cloned clusters (i.e. in subtests). if !debugEnabled && c.destroyState != nil { // We don't close the logger here because the cluster may still be in // use by the test. c.Destroy(ctx, dontCloseLogger) } case <-done: } }() t.spec.Run(runCtx, t, c) }() } // teamCityEscape escapes a string for use as <value> in a key='<value>' attribute // in TeamCity build output marker. // Documentation here: https://confluence.jetbrains.com/display/TCD10/Build+Script+Interaction+with+TeamCity#BuildScriptInteractionwithTeamCity-Escapedvalues func teamCityEscape(s string) string { r := strings.NewReplacer( "\n", "|n", "'", "|'", "|", "||", "[", "|[", "]", "|]", ) return r.Replace(s) } func teamCityNameEscape(name string) string { return strings.Replace(name, ",", "_", -1) } // getAuthorEmail retrieves the author of a line of code. Returns the empty // string if the author cannot be determined. func getAuthorEmail(file string, line int) string { const repo = "github.com/cockroachdb/cockroach/" i := strings.Index(file, repo) if i == -1 { return "" } file = file[i+len(repo):] cmd := exec.Command(`/bin/bash`, `-c`, fmt.Sprintf(`git blame --porcelain -L%d,+1 $(git rev-parse --show-toplevel)/%s | grep author-mail`, line, file)) // This command returns output such as: // author-mail <[email protected]> out, err := cmd.CombinedOutput() if err != nil { return "" } re := regexp.MustCompile("author-mail <(.*)>") matches := re.FindSubmatch(out) if matches == nil { return "" } return string(matches[1]) }
[ "\"TC_BUILD_BRANCH\"" ]
[]
[ "TC_BUILD_BRANCH" ]
[]
["TC_BUILD_BRANCH"]
go
1
0
utils/ecs-run.py
#!/usr/bin/env python3 ################################################################################ # Copyright Keith D Gregory # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ################################################################################ import argparse import boto3 import json import os import re import sys # arg_parser is a global so that we can write help text from multiple places arg_parser = None def parse_args(argv): """ Parses all arguments, defaulting to environment variables if present. """ global arg_parser arg_parser = argparse.ArgumentParser(description="Runs an ECS task in Fargate.") arg_parser.add_argument("--cluster", metavar="CLUSTER_NAME", dest='cluster', help="""Cluster where the task will be run; default cluster if not specified. Defaults to the value of the ECS_CLUSTER environment variable. """) arg_parser.add_argument("--subnets", metavar="COMMA_SEPARATED_LIST", dest='subnets', help="""One or more subnets where the task may run; must belong to the VPC associated with the cluster. Defaults to the value of the ECS_SUBNETS environment variable. """) arg_parser.add_argument("--security_groups", "--sg", metavar="COMMA_SEPARATED_LIST", dest='security_groups', help="""Up to five security group IDs that will be associated with task. Defaults to the value of the ECS_SECURITY_GROUPS environment variable. """) arg_parser.add_argument("--task_execution_role", "--te", metavar="NAME_OR_ARN", dest='task_execution_role', help="""Name or ARN of the role used by ECS to launch the task, overriding any role configured in the task definition. Defaults to the value of the ECS_TASK_EXECUTION_ROLE environment variable. """) arg_parser.add_argument("--task_role", "--tr", metavar="NAME_OR_ARN", dest='task_role', help="""Name or ARN of the role used by the task itself, overriding any role configured in the task definition. Defaults to the value of the ECS_TASK_ROLE environment variable. """) arg_parser.add_argument("--assign_public_ip", action='store_const', const=True, default=False, dest='assign_public_ip', help="""Configures the task with a public IP. This only matters when running in a public subnet; a public IP in a private subnet has no effect. ECS will be unable to run the task if it's in a public subnet without a public IP, or in a private subnet without a NAT. Also set if the environment variable ECS_ASSIGN_PUBLIC_IP is set and contains "true" (case-insensitive). """) arg_parser.add_argument("--enable_exec", action='store_const', const=True, default=False, dest='enable_exec', help="""Configures the task to allow SSM Exec. This requires a task role (either provided here as an override or in the task definition) that grants ecs:ExecuteCommand. """) arg_parser.add_argument("--task_definition_version", metavar="VERSION", dest='taskdef_version', help="""The version of the task definition; defaults to the latest version. """) arg_parser.add_argument('taskdef', metavar="TASK_DEFINITION_NAME", help="""The name of the task definition """) arg_parser.add_argument('envars', nargs=argparse.REMAINDER, metavar="ENVIRONMENT_OVERRIDE", help="""Environment variable overrides for the task. May be specified as KEY=VALUE or CONTAINER:KEY=VALUE. Former applies to all containers in the task definition, latter to a specific container. """) args = arg_parser.parse_args(argv) args.cluster = args.cluster or os.environ.get("ECS_CLUSTER") args.subnets = args.subnets or os.environ.get("ECS_SUBNETS") args.security_groups = args.security_groups or os.environ.get("ECS_SECURITY_GROUPS") args.assign_public_ip = args.assign_public_ip or (os.environ.get("ECS_ASSIGN_PUBLIC_IP", "false").lower() == "true") args.task_execution_role = args.task_execution_role or os.environ.get("ECS_TASK_EXECUTION_ROLE") args.task_role= args.task_role or os.environ.get("ECS_TASK_ROLE") return args def exit_if_none(value, message): """ If the passed value is None, prints the specified message along with the program help text, and exits. If not None, returns the value. """ if value is None: print(message) print() arg_parser.print_help() sys.exit(1) else: return value def validate_cluster(cluster): """ Verifies that the named cluster exists. """ if cluster is None: return None clusters = boto3.client('ecs').describe_clusters(clusters=["Default"])['clusters'] if len(clusters) != 1: exit_if_none(None, f"invalid cluster: {cluster}") return clusters[0]['clusterArn'] def validate_subnets(subnet_spec): """ Splits the provided string and verifies that each subnet exists. """ exit_if_none(subnet_spec, "Missing subnets") actual_subnets = {} paginator = boto3.client('ec2').get_paginator('describe_subnets') for page in paginator.paginate(): for subnet in page['Subnets']: actual_subnets[subnet['SubnetId']] = subnet['VpcId'] subnets = [] vpcs = set() for subnet_id in subnet_spec.split(","): vpc_id = actual_subnets.get(subnet_id) exit_if_none(vpc_id, f"invalid subnet: {subnet_id}") subnets.append(subnet_id) vpcs.add(vpc_id) if (len(vpcs) > 1): exit_if_none(None, "subnets belong to different VPCs") return subnets def validate_security_groups(sg_spec): """ Splits the provided string and verifies that each security group exists. """ exit_if_none(sg_spec, "Missing security groups") actual_sgs = {} paginator = boto3.client('ec2').get_paginator('describe_security_groups') for page in paginator.paginate(): for sg in page['SecurityGroups']: actual_sgs[sg['GroupId']] = sg.get('VpcId') # some people may still have non-VPC groups security_groups = [] vpcs = set() for sg_id in sg_spec.split(","): vpc_id = actual_sgs.get(sg_id) exit_if_none(vpc_id, f"invalid security group: {sg_id}") security_groups.append(sg_id) vpcs.add(vpc_id) if (len(vpcs) > 1): exit_if_none(None, "security groups belong to different VPCs") return security_groups def validate_role(name_or_arn): """ Verifies that the specified role exists, matching either by name or full ARN. Returns the role ARN if valid. """ paginator = boto3.client('iam').get_paginator('list_roles') for page in paginator.paginate(): for role in page['Roles']: if (name_or_arn == role['Arn']) or (name_or_arn == role['RoleName']): return role['Arn'] exit_if_none(None, f"invalid role name/ARN: {name_or_arn}") def validate_task_definition(taskdef_name, version): """ Verifies that the task definition exists. If not given a version, just checks the name; otherwise both name and version must match. Returns the task definition ARN if valid. """ exit_if_none(taskdef_name, "Missing task definition name") if version: taskdef_name = f"{taskdef_name}:{version}" try: # ECS throws if it can't find a task definition taskdef = boto3.client('ecs').describe_task_definition(taskDefinition=taskdef_name).get('taskDefinition') return taskdef['taskDefinitionArn'] except: return exit_if_none(None, f"can't find task definition: {taskdef_name}") def retrieve_container_names(taskdef_name): """ Retrieves the task definition and returns a list of the containers that it contains. """ taskdef = boto3.client('ecs').describe_task_definition(taskDefinition=taskdef_name).get('taskDefinition') containers = [] for container in taskdef['containerDefinitions']: containers.append(container['name']) return containers def apply_environment_overrides(container_names, envar_specs): """ Applies environment variable overrides to the passed list of containers. Returns a dict, keyed by container name, where each item in the dict has name-value pairs for the environment overrides that apply to that container. """ matcher = re.compile(r"(([-\w]+):)*(\w+)=(.*)", re.ASCII) overrides_by_container = dict([[k,dict()] for k in container_names]) for spec in envar_specs: match = matcher.match(spec) exit_if_none(match, f"invalid environment override: {spec}") container_name = match.group(2) env_name = match.group(3) env_value = match.group(4) if container_name: container_override = overrides_by_container.get(container_name) exit_if_none(container_override, f"invalid container for override: {container_name}") container_override[env_name] = env_value else: for container_override in overrides_by_container.values(): container_override[env_name] = env_value return overrides_by_container def construct_container_overrides(taskdef_name, envar_specs): container_names = retrieve_container_names(taskdef_name) env_overrides = apply_environment_overrides(container_names, envar_specs) result = [] for container_name in container_names: container_env = [] for k,v in env_overrides.get(container_name, {}).items(): container_env.append({ "name": k, "value": v }) result.append({ "name": container_name, "environment": container_env }) return result if __name__ == "__main__": args = parse_args(sys.argv[1:]) run_args = { 'taskDefinition': validate_task_definition(args.taskdef, args.taskdef_version), 'count': 1, 'launchType': 'FARGATE', 'enableECSManagedTags': True, 'enableExecuteCommand': False, 'networkConfiguration': { 'awsvpcConfiguration': { 'subnets': validate_subnets(args.subnets), 'securityGroups': validate_security_groups(args.security_groups), 'assignPublicIp': 'ENABLED' if args.assign_public_ip else 'DISABLED' } }, 'overrides': { 'containerOverrides': construct_container_overrides(args.taskdef, args.envars), } } if args.cluster: run_args['cluster'] = validate_cluster(args.cluster) if args.task_execution_role: run_args['overrides']['executionRoleArn'] = validate_role(args.task_execution_role) if args.task_role: run_args['overrides']['taskRoleArn'] = validate_role(args.task_role) if args.enable_exec: run_args['enableExecuteCommand'] = True response = boto3.client('ecs').run_task(**run_args) task_arn = response['tasks'][0]['taskArn'] task_id = re.sub(r".*/", "", task_arn) print(f"task ID: {task_id}")
[]
[]
[ "ECS_SECURITY_GROUPS", "ECS_CLUSTER", "ECS_TASK_ROLE", "ECS_TASK_EXECUTION_ROLE", "ECS_ASSIGN_PUBLIC_IP", "ECS_SUBNETS" ]
[]
["ECS_SECURITY_GROUPS", "ECS_CLUSTER", "ECS_TASK_ROLE", "ECS_TASK_EXECUTION_ROLE", "ECS_ASSIGN_PUBLIC_IP", "ECS_SUBNETS"]
python
6
0
src/gpwm/renderers.py
# Copyright 2017 Gustavo Baratto. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Miscelaneous rendering functions """ import os import requests from six.moves.urllib.parse import parse_qs from six.moves.urllib.parse import urlparse import yaml import boto3 import jinja2 import mako.exceptions import mako.template import gpwm.utils def get_template_body(url): """ Returns the text of the URL Args: url(str): a RFC 1808 compliant URL Returns: The text of the target URL This function supports 3 different schemes: - http/https - s3 - path """ url_prefix = os.environ.get("GPWM_TEMPLATE_URL_PREFIX", "") if url_prefix: if url_prefix.endswith("/"): url_prefix = url_prefix[:-1] if url.startswith("/"): url = url[1:] url = f"{url_prefix}/{url}" parsed_url = urlparse(url) if "http" in parsed_url.scheme: # http and https try: request = requests.get(url) request.raise_for_status() body = request.text except requests.exceptions.RequestException as exc: raise SystemExit(exc) elif parsed_url.scheme == "s3": s3 = boto3.resource("s3") obj = s3.Object(parsed_url.netloc, parsed_url.path[1:]) extra_args = {k: v[0] for k, v in parse_qs(parsed_url.query).items()} try: body = obj.get(**extra_args)["Body"].read() except s3.meta.client.exceptions.NoSuchBucket as exc: raise SystemExit( f"Error: S3 bucket doesn't exist: {parsed_url.netloc}" ) except s3.meta.client.exceptions.NoSuchKey as exc: raise SystemExit(f"Error: S3 object doesn't exist: {url}") elif not parsed_url.scheme: with open(url) as local_file: body = local_file.read() else: raise SystemExit(f"URL scheme not supported: {parsed_url.scheme}") return parsed_url, body def parse_mako(stack_name, template_body, parameters): """ Parses Mako templates """ # The default for strict_undefined is False. Change to True to # troubleshoot pesky templates mako_template = mako.template.Template( template_body, strict_undefined=False ) parameters["utils"] = gpwm.utils # parameters["get_stack_output"] = get_stack_output # parameters["get_stack_resource"] = get_stack_resource # parameters["call_aws"] = call_aws try: rendered_mako_template = mako_template.render(**parameters) # Weird Mako exception handling: # http://docs.makotemplates.org/en/latest/usage.html#handling-exceptions except Exception: raise SystemExit( mako.exceptions.text_error_template().render() ) # Ignoring yaml tags unknown to this script, because one might want to use # the providers tags like !Ref, !Sub, etc in their templates try: template = yaml.load(rendered_mako_template) except yaml.constructor.ConstructorError as exc: if "could not determine a constructor for the tag" not in exc.problem: raise exc # Automatically adds and merges outputs for every resource in the # template - outputs are automatically exported. # An existing output in the template will not be overriden by an # automatic output. outputs = { k: { "Value": {"Ref": k}, "Export": {"Name": "{}-{}".format(stack_name, k)} } for k in template.get("Resources", {}).keys() } outputs.update(template.get("Outputs", {})) if outputs: template["Outputs"] = outputs return template def parse_jinja(stack_name, template_body, parameters): """ Parses Jinja templates """ jinja_template = jinja2.Template(template_body) parameters["utils"] = gpwm.utils # parameters["get_stack_output"] = get_stack_output # parameters["get_stack_resource"] = get_stack_resource # parameters["call_aws"] = call_aws try: template = yaml.load(jinja_template.render(**parameters)) # Ignoring yaml tags unknown to this script, because one might want to use # the providers tags like !Ref, !Sub, etc in their templates except yaml.constructor.ConstructorError as exc: if "could not determine a constructor for the tag" not in exc.problem: raise exc # Automatically adds and merges outputs for every resource in the # template - outputs are automatically exported. # An existing output in the template will not be overriden by an # automatic output. outputs = { k: { "Value": {"Ref": k}, "Export": {"Name": "{}-{}".format(stack_name, k)}} for k in template.get("Resources", {}).keys() } outputs.update(template.get("Outputs", {})) template["Outputs"] = outputs return template def parse_json(stack_name, template_body, parameters): """ Parses Json templates """ raise SystemExit("json templates not yet supported") def parse_yaml(stack_name, template_body, parameters): """ Parses YAML templates """ raise SystemExit("yaml templates not yet supported")
[]
[]
[ "GPWM_TEMPLATE_URL_PREFIX" ]
[]
["GPWM_TEMPLATE_URL_PREFIX"]
python
1
0
data/scripts/templates/object/mobile/shared_dressed_rebel_corporal_sullustan_male_01.py
#### NOTICE: THIS FILE IS AUTOGENERATED #### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY #### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES from swgpy.object import * def create(kernel): result = Creature() result.template = "object/mobile/shared_dressed_rebel_corporal_sullustan_male_01.iff" result.attribute_template_id = 9 result.stfName("npc_name","sullustan_base_male") #### BEGIN MODIFICATIONS #### #### END MODIFICATIONS #### return result
[]
[]
[]
[]
[]
python
null
null
null
cvat/settings/base.py
# Copyright (C) 2018 Intel Corporation # # SPDX-License-Identifier: MIT """ Django settings for CVAT project. Generated by 'django-admin startproject' using Django 2.0.1. For more information on this file, see https://docs.djangoproject.com/en/2.0/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/2.0/ref/settings/ """ import os import sys import fcntl import shutil import subprocess from pathlib import Path # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = str(Path(__file__).parents[2]) ALLOWED_HOSTS = os.environ.get('ALLOWED_HOSTS', 'localhost,127.0.0.1').split(',') INTERNAL_IPS = ['127.0.0.1'] try: sys.path.append(BASE_DIR) from keys.secret_key import SECRET_KEY except ImportError: from django.utils.crypto import get_random_string with open(os.path.join(BASE_DIR, 'keys', 'secret_key.py'), 'w') as f: chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)' f.write("SECRET_KEY = '{}'\n".format(get_random_string(50, chars))) from keys.secret_key import SECRET_KEY def generate_ssh_keys(): keys_dir = '{}/keys'.format(os.getcwd()) ssh_dir = '{}/.ssh'.format(os.getenv('HOME')) pidfile = os.path.join(ssh_dir, 'ssh.pid') with open(pidfile, "w") as pid: fcntl.flock(pid, fcntl.LOCK_EX) try: subprocess.run(['ssh-add {}/*'.format(ssh_dir)], shell = True, stderr = subprocess.PIPE) keys = subprocess.run(['ssh-add -l'], shell = True, stdout = subprocess.PIPE).stdout.decode('utf-8').split('\n') if 'has no identities' in keys[0]: print('SSH keys were not found') volume_keys = os.listdir(keys_dir) if not ('id_rsa' in volume_keys and 'id_rsa.pub' in volume_keys): print('New pair of keys are being generated') subprocess.run(['ssh-keygen -b 4096 -t rsa -f {}/id_rsa -q -N ""'.format(ssh_dir)], shell = True) shutil.copyfile('{}/id_rsa'.format(ssh_dir), '{}/id_rsa'.format(keys_dir)) shutil.copymode('{}/id_rsa'.format(ssh_dir), '{}/id_rsa'.format(keys_dir)) shutil.copyfile('{}/id_rsa.pub'.format(ssh_dir), '{}/id_rsa.pub'.format(keys_dir)) shutil.copymode('{}/id_rsa.pub'.format(ssh_dir), '{}/id_rsa.pub'.format(keys_dir)) else: print('Copying them from keys volume') shutil.copyfile('{}/id_rsa'.format(keys_dir), '{}/id_rsa'.format(ssh_dir)) shutil.copymode('{}/id_rsa'.format(keys_dir), '{}/id_rsa'.format(ssh_dir)) shutil.copyfile('{}/id_rsa.pub'.format(keys_dir), '{}/id_rsa.pub'.format(ssh_dir)) shutil.copymode('{}/id_rsa.pub'.format(keys_dir), '{}/id_rsa.pub'.format(ssh_dir)) subprocess.run(['ssh-add', '{}/id_rsa'.format(ssh_dir)], shell = True) finally: fcntl.flock(pid, fcntl.LOCK_UN) try: if os.getenv("SSH_AUTH_SOCK", None): generate_ssh_keys() except Exception: pass # Application definition JS_3RDPARTY = {} CSS_3RDPARTY = {} INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'cvat.apps.engine', 'cvat.apps.dashboard', 'cvat.apps.authentication', 'cvat.apps.documentation', 'cvat.apps.git', 'cvat.apps.annotation', 'django_rq', 'compressor', 'cacheops', 'sendfile', 'dj_pagination', 'revproxy', 'rules', 'rest_framework', 'rest_framework.authtoken', 'django_filters', 'drf_yasg', 'rest_auth', 'django.contrib.sites', 'allauth', 'allauth.account', 'rest_auth.registration' ] SITE_ID = 1 REST_FRAMEWORK = { 'DEFAULT_PERMISSION_CLASSES': [ 'rest_framework.permissions.IsAuthenticated', ], 'DEFAULT_AUTHENTICATION_CLASSES': [ 'rest_framework.authentication.TokenAuthentication', 'rest_framework.authentication.SessionAuthentication', 'rest_framework.authentication.BasicAuthentication' ], 'DEFAULT_VERSIONING_CLASS': # Don't try to use URLPathVersioning. It will give you /api/{version} # in path and '/api/docs' will not collapse similar items (flat list # of all possible methods isn't readable). 'rest_framework.versioning.NamespaceVersioning', # Need to add 'api-docs' here as a workaround for include_docs_urls. 'ALLOWED_VERSIONS': ('v1', 'api-docs'), 'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination', 'PAGE_SIZE': 10, 'DEFAULT_FILTER_BACKENDS': ( 'rest_framework.filters.SearchFilter', 'django_filters.rest_framework.DjangoFilterBackend', 'rest_framework.filters.OrderingFilter'), # Disable default handling of the 'format' query parameter by REST framework 'URL_FORMAT_OVERRIDE': None, } REST_AUTH_REGISTER_SERIALIZERS = { 'REGISTER_SERIALIZER': 'cvat.apps.authentication.serializers.RegisterSerializerEx' } if 'yes' == os.environ.get('TF_ANNOTATION', 'no'): INSTALLED_APPS += ['cvat.apps.tf_annotation'] if 'yes' == os.environ.get('OPENVINO_TOOLKIT', 'no'): INSTALLED_APPS += ['cvat.apps.auto_annotation'] if 'yes' == os.environ.get('OPENVINO_TOOLKIT', 'no'): INSTALLED_APPS += ['cvat.apps.reid'] if 'yes' == os.environ.get('WITH_DEXTR', 'no'): INSTALLED_APPS += ['cvat.apps.dextr_segmentation'] if os.getenv('DJANGO_LOG_VIEWER_HOST'): INSTALLED_APPS += ['cvat.apps.log_viewer'] # new feature by Mohammad if 'yes' == os.environ.get('AUTO_SEGMENTATION', 'no'): INSTALLED_APPS += ['cvat.apps.auto_segmentation'] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', 'dj_pagination.middleware.PaginationMiddleware', ] STATICFILES_FINDERS = [ 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', 'compressor.finders.CompressorFinder', ] ROOT_URLCONF = 'cvat.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'cvat.wsgi.application' # Django Auth DJANGO_AUTH_TYPE = 'BASIC' LOGIN_URL = 'login' LOGIN_REDIRECT_URL = '/' AUTH_LOGIN_NOTE = '<p>Have not registered yet? <a href="/auth/register">Register here</a>.</p>' AUTHENTICATION_BACKENDS = [ 'rules.permissions.ObjectPermissionBackend', 'django.contrib.auth.backends.ModelBackend' ] # https://github.com/pennersr/django-allauth ACCOUNT_EMAIL_VERIFICATION = 'none' # Django-RQ # https://github.com/rq/django-rq RQ_QUEUES = { 'default': { 'HOST': 'localhost', 'PORT': 6379, 'DB': 0, 'DEFAULT_TIMEOUT': '4h' }, 'low': { 'HOST': 'localhost', 'PORT': 6379, 'DB': 0, 'DEFAULT_TIMEOUT': '24h' } } RQ_SHOW_ADMIN_LINK = True RQ_EXCEPTION_HANDLERS = ['cvat.apps.engine.views.rq_handler'] # JavaScript and CSS compression # https://django-compressor.readthedocs.io COMPRESS_CSS_FILTERS = [ 'compressor.filters.css_default.CssAbsoluteFilter', 'compressor.filters.cssmin.rCSSMinFilter' ] COMPRESS_JS_FILTERS = [] # No compression for js files (template literals were compressed bad) # Password validation # https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, { 'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', }, { 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Cache DB access (e.g. for engine.task.get_frame) # https://github.com/Suor/django-cacheops CACHEOPS_REDIS = { 'host': 'localhost', # redis-server is on same machine 'port': 6379, # default redis port 'db': 1, # SELECT non-default redis database } CACHEOPS = { # Automatically cache any Task.objects.get() calls for 15 minutes # This also includes .first() and .last() calls. 'engine.task': {'ops': 'get', 'timeout': 60*15}, # Automatically cache any Job.objects.get() calls for 15 minutes # This also includes .first() and .last() calls. 'engine.job': {'ops': 'get', 'timeout': 60*15}, } CACHEOPS_DEGRADE_ON_FAILURE = True # Internationalization # https://docs.djangoproject.com/en/2.0/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = os.getenv('TZ', 'Etc/UTC') USE_I18N = True USE_L10N = True USE_TZ = True CSRF_COOKIE_NAME = "csrftoken" LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'standard': { 'format': '[%(asctime)s] %(levelname)s %(name)s: %(message)s' } }, 'handlers': { 'console': { 'class': 'logging.StreamHandler', 'filters': [], 'formatter': 'standard', }, 'server_file': { 'class': 'logging.handlers.RotatingFileHandler', 'level': 'DEBUG', 'filename': os.path.join(BASE_DIR, 'logs', 'cvat_server.log'), 'formatter': 'standard', 'maxBytes': 1024*1024*50, # 50 MB 'backupCount': 5, }, 'logstash': { 'level': 'INFO', 'class': 'logstash.TCPLogstashHandler', 'host': os.getenv('DJANGO_LOG_SERVER_HOST', 'localhost'), 'port': os.getenv('DJANGO_LOG_SERVER_PORT', 5000), 'version': 1, 'message_type': 'django', } }, 'loggers': { 'cvat.server': { 'handlers': ['console', 'server_file'], 'level': os.getenv('DJANGO_LOG_LEVEL', 'DEBUG'), }, 'cvat.client': { 'handlers': [], 'level': os.getenv('DJANGO_LOG_LEVEL', 'DEBUG'), }, 'revproxy': { 'handlers': ['console', 'server_file'], 'level': os.getenv('DJANGO_LOG_LEVEL', 'DEBUG') }, 'django': { 'handlers': ['console', 'server_file'], 'level': 'INFO', 'propagate': True } }, } if os.getenv('DJANGO_LOG_SERVER_HOST'): LOGGING['loggers']['cvat.server']['handlers'] += ['logstash'] LOGGING['loggers']['cvat.client']['handlers'] += ['logstash'] # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/2.0/howto/static-files/ STATIC_URL = '/static/' STATIC_ROOT = os.path.join(BASE_DIR, 'static') os.makedirs(STATIC_ROOT, exist_ok=True) DATA_ROOT = os.path.join(BASE_DIR, 'data') os.makedirs(DATA_ROOT, exist_ok=True) SHARE_ROOT = os.path.join(BASE_DIR, 'share') os.makedirs(SHARE_ROOT, exist_ok=True) MODELS_ROOT = os.path.join(BASE_DIR, 'models') os.makedirs(MODELS_ROOT, exist_ok=True) DATA_UPLOAD_MAX_MEMORY_SIZE = 100 * 1024 * 1024 # 100 MB DATA_UPLOAD_MAX_NUMBER_FIELDS = None # this django check disabled LOCAL_LOAD_MAX_FILES_COUNT = 500 LOCAL_LOAD_MAX_FILES_SIZE = 512 * 1024 * 1024 # 512 MB
[]
[]
[ "OPENVINO_TOOLKIT", "ALLOWED_HOSTS", "DJANGO_LOG_SERVER_PORT", "SSH_AUTH_SOCK", "DJANGO_LOG_SERVER_HOST", "DJANGO_LOG_VIEWER_HOST", "WITH_DEXTR", "AUTO_SEGMENTATION", "DJANGO_LOG_LEVEL", "TF_ANNOTATION", "TZ", "HOME" ]
[]
["OPENVINO_TOOLKIT", "ALLOWED_HOSTS", "DJANGO_LOG_SERVER_PORT", "SSH_AUTH_SOCK", "DJANGO_LOG_SERVER_HOST", "DJANGO_LOG_VIEWER_HOST", "WITH_DEXTR", "AUTO_SEGMENTATION", "DJANGO_LOG_LEVEL", "TF_ANNOTATION", "TZ", "HOME"]
python
12
0
frontend/__init__.py
import os from dotenv import load_dotenv from flask import Flask load_dotenv() app = Flask(__name__) app.config.update( REDIS_HOST=os.getenv('REDIS_HOST') or 'localhost', REDIS_PORT=os.getenv('REDIS_PORT') or 6379 ) from app import routes
[]
[]
[ "REDIS_PORT", "REDIS_HOST" ]
[]
["REDIS_PORT", "REDIS_HOST"]
python
2
0
horovod/spark/task/task_service.py
# Copyright 2019 Uber Technologies, Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== from distutils.version import LooseVersion import os import pyspark import time from horovod.run.common.util import codec, secret, timeout from horovod.run.common.service import task_service class ResourcesRequest(object): """Request Spark resources info for this task.""" class ResourcesResponse(object): def __init__(self, resources): self.resources = resources """Dictionary containing resource info.""" class GetTaskToTaskAddressesRequest(object): def __init__(self, task_index, all_task_addresses): self.task_index = task_index """Task index of other task service.""" self.all_task_addresses = all_task_addresses """Map of interface to list of (ip, port) pairs of other task service.""" class GetTaskToTaskAddressesResponse(object): def __init__(self, task_addresses_for_task): self.task_addresses_for_task = task_addresses_for_task """Map of interface to list of (ip, port) pairs.""" class SparkTaskService(task_service.BasicTaskService): NAME_FORMAT = 'task service #%d' def __init__(self, index, key, nics, minimum_command_lifetime_s, verbose=0): # on a Spark cluster we need our train function to see the Spark worker environment # this includes PYTHONPATH, HADOOP_TOKEN_FILE_LOCATION and _HOROVOD_SECRET_KEY env = os.environ.copy() # we inject the secret key here env[secret.HOROVOD_SECRET_KEY] = codec.dumps_base64(key) # we also need to provide the current working dir to mpirun_exec_fn.py env['HOROVOD_SPARK_WORK_DIR'] = os.getcwd() super(SparkTaskService, self).__init__(SparkTaskService.NAME_FORMAT % index, key, nics, env, verbose) self._key = key self._minimum_command_lifetime_s = minimum_command_lifetime_s self._minimum_command_lifetime = None def _run_command(self, command, env, event): super(SparkTaskService, self)._run_command(command, env, event) if self._minimum_command_lifetime_s is not None: self._minimum_command_lifetime = timeout.Timeout(self._minimum_command_lifetime_s, message='Just measuring runtime') def _handle(self, req, client_address): if isinstance(req, ResourcesRequest): return ResourcesResponse(self._get_resources()) if isinstance(req, GetTaskToTaskAddressesRequest): next_task_index = req.task_index next_task_addresses = req.all_task_addresses # We request interface matching to weed out all the NAT'ed interfaces. next_task_client = \ SparkTaskClient(next_task_index, next_task_addresses, self._key, self._verbose, match_intf=True) return GetTaskToTaskAddressesResponse(next_task_client.addresses()) return super(SparkTaskService, self)._handle(req, client_address) def _get_resources(self): if LooseVersion(pyspark.__version__) >= LooseVersion('3.0.0'): task_context = pyspark.TaskContext.get() if task_context: return task_context.resources() else: # task_context is None when not run on Spark worker # this only happens while running test_spark.test_task_fn_run_gloo_exec() print("Not running inside Spark worker, no resources available") return dict() def wait_for_command_termination(self): """ Waits for command termination. Ensures this method takes at least self._minimum_command_lifetime_s seconds to return after command started. """ try: return super(SparkTaskService, self).wait_for_command_termination() finally: # command terminated, make sure this method takes at least # self._minimum_command_lifetime_s seconds after command started # the client that started the command needs some time to connect again # to wait for the result (see horovod.spark.driver.rsh). if self._minimum_command_lifetime is not None: time.sleep(self._minimum_command_lifetime.remaining()) class SparkTaskClient(task_service.BasicTaskClient): def __init__(self, index, task_addresses, key, verbose, match_intf=False): super(SparkTaskClient, self).__init__(SparkTaskService.NAME_FORMAT % index, task_addresses, key, verbose, match_intf=match_intf) def resources(self): resp = self._send(ResourcesRequest()) return resp.resources def get_task_addresses_for_task(self, task_index, all_task_addresses): resp = self._send(GetTaskToTaskAddressesRequest(task_index, all_task_addresses)) return resp.task_addresses_for_task
[]
[]
[]
[]
[]
python
0
0
examples/pwr_run/checkpointing/socket_short/true_random/job26.py
""" #Trains a ResNet on the CIFAR10 dataset. """ from __future__ import print_function import keras from keras.layers import Dense, Conv2D, BatchNormalization, Activation from keras.layers import AveragePooling2D, Input, Flatten from keras.optimizers import Adam from keras.callbacks import ModelCheckpoint, LearningRateScheduler from keras.callbacks import ReduceLROnPlateau, TensorBoard from keras.preprocessing.image import ImageDataGenerator from keras.regularizers import l2 from keras import backend as K from keras.models import Model from keras.datasets import cifar10 from keras.applications.vgg16 import VGG16 from keras.applications.vgg19 import VGG19 from keras import models, layers, optimizers from datetime import datetime import tensorflow as tf import numpy as np import os import pdb import sys import argparse import time import signal import glob import json import send_signal from random import randrange parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training') parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name') parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint') parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use') parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)') parser.set_defaults(resume=False) args = parser.parse_args() os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num # Training parameters batch_size = 128 args_lr = 0.003 args_model = 'vgg19' epoch_begin_time = 0 job_name = sys.argv[0].split('.')[0] save_files = '/scratch/li.baol/checkpoint_true_random/' + job_name + '*' total_epochs = 110 starting_epoch = 0 # first step is to update the PID pid_dict = {} with open('pid_lock.json', 'r') as fp: pid_dict = json.load(fp) pid_dict[job_name] = os.getpid() json_file = json.dumps(pid_dict) with open('pid_lock.json', 'w') as fp: fp.write(json_file) os.rename('pid_lock.json', 'pid.json') if args.resume: save_file = glob.glob(save_files)[0] # epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0]) starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1]) data_augmentation = True num_classes = 10 # Subtracting pixel mean improves accuracy subtract_pixel_mean = True n = 3 # Model name, depth and version model_type = args.tc #'P100_resnet50_he_256_1' # Load the CIFAR10 data. (x_train, y_train), (x_test, y_test) = cifar10.load_data() # Normalize data. x_train = x_train.astype('float32') / 255 x_test = x_test.astype('float32') / 255 # If subtract pixel mean is enabled if subtract_pixel_mean: x_train_mean = np.mean(x_train, axis=0) x_train -= x_train_mean x_test -= x_train_mean print('x_train shape:', x_train.shape) print(x_train.shape[0], 'train samples') print(x_test.shape[0], 'test samples') print('y_train shape:', y_train.shape) # Convert class vectors to binary class matrices. y_train = keras.utils.to_categorical(y_train, num_classes) y_test = keras.utils.to_categorical(y_test, num_classes) if args.resume: print('resume from checkpoint') model = keras.models.load_model(save_file) else: print('train from start') model = models.Sequential() if '16' in args_model: base_model = VGG16(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None) elif '19' in args_model: base_model = VGG19(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None) #base_model.summary() #pdb.set_trace() model.add(base_model) model.add(layers.Flatten()) model.add(layers.BatchNormalization()) model.add(layers.Dense(128, activation='relu'))#, kernel_initializer='he_uniform')) #model.add(layers.Dropout(0.2)) model.add(layers.BatchNormalization()) model.add(layers.Dense(64, activation='relu'))#, kernel_initializer='he_uniform')) #model.add(layers.Dropout(0.2)) model.add(layers.BatchNormalization()) model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform')) model.compile(loss='categorical_crossentropy', optimizer=Adam(lr=args_lr), metrics=['accuracy']) #model.summary() print(model_type) #pdb.set_trace() current_epoch = 0 ################### connects interrupt signal to the process ##################### def terminateProcess(signalNumber, frame): # first record the wasted epoch time global epoch_begin_time if epoch_begin_time == 0: epoch_waste_time = 0 else: epoch_waste_time = int(time.time() - epoch_begin_time) epoch_waste_dict = {} with open('epoch_waste.json', 'r') as fp: epoch_waste_dict = json.load(fp) epoch_waste_dict[job_name] += epoch_waste_time json_file3 = json.dumps(epoch_waste_dict) with open('epoch_waste.json', 'w') as fp: fp.write(json_file3) print('checkpointing the model triggered by kill -15 signal') # delete whatever checkpoint that already exists for f in glob.glob(save_files): os.remove(f) model.save('/scratch/li.baol/checkpoint_true_random/' + job_name + '_' + str(current_epoch) + '.h5') print ('(SIGTERM) terminating the process') checkpoint_dict = {} with open('checkpoint.json', 'r') as fp: checkpoint_dict = json.load(fp) checkpoint_dict[job_name] = 1 json_file3 = json.dumps(checkpoint_dict) with open('checkpoint.json', 'w') as fp: fp.write(json_file3) sys.exit() signal.signal(signal.SIGTERM, terminateProcess) ################################################################################# logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch') class PrintEpoch(keras.callbacks.Callback): def on_epoch_begin(self, epoch, logs=None): global current_epoch #remaining_epochs = epochs - epoch current_epoch = epoch print('current epoch ' + str(current_epoch)) global epoch_begin_time epoch_begin_time = time.time() my_callback = PrintEpoch() callbacks = [tensorboard_callback, my_callback] #[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback] # Run training if not args.resume: # randomly assign it a value trainable_count = randrange(1000) # send signal 'jobxx param xxxxx' message = job_name + ' param ' + str(trainable_count) send_signal.send(args.node, 10002, message) # send signal to indicate checkpoint is qualified message = job_name + ' ckpt_qual' send_signal.send(args.node, 10002, message) model.fit(x_train, y_train, batch_size=batch_size, epochs=round(total_epochs/2), validation_data=(x_test, y_test), shuffle=True, callbacks=callbacks, initial_epoch=starting_epoch, verbose=1 ) # Score trained model. scores = model.evaluate(x_test, y_test, verbose=1) print('Test loss:', scores[0]) print('Test accuracy:', scores[1]) # send signal to indicate job has finished message = job_name + ' finish' send_signal.send(args.node, 10002, message)
[]
[]
[ "CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES" ]
[]
["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES"]
python
2
0
movierater/wsgi.py
""" WSGI config for movierater project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'movierater.settings') application = get_wsgi_application()
[]
[]
[]
[]
[]
python
0
0
source/events/media_events.py
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 """ This Lambda is responsible for receiving and storing CloudWatch events originating from Media Services. This Lambda must be installed into each region where Media Services are created. """ import datetime import os import json from random import randint from urllib.parse import unquote import boto3 from botocore.exceptions import ClientError from botocore.config import Config from jsonpath_ng import parse # user-agent config SOLUTION_ID = os.environ['SOLUTION_ID'] USER_AGENT_EXTRA = {"user_agent_extra": SOLUTION_ID} MSAM_BOTO3_CONFIG = Config(**USER_AGENT_EXTRA) DYNAMO_REGION_NAME=os.environ["EVENTS_TABLE_REGION"] DYNAMO_RESOURCE = boto3.resource('dynamodb', region_name=DYNAMO_REGION_NAME, config=MSAM_BOTO3_CONFIG) EVENTS_TABLE = DYNAMO_RESOURCE.Table(os.environ["EVENTS_TABLE_NAME"]) CLOUDWATCH_EVENTS_TABLE = DYNAMO_RESOURCE.Table(os.environ["CLOUDWATCH_EVENTS_TABLE_NAME"]) CONTENT_TABLE_NAME = os.environ["CONTENT_TABLE_NAME"] def lambda_handler(event, _): """ Entry point for CloudWatch event receipt. """ try: print(event) event["timestamp"] = int(datetime.datetime.strptime( event["time"], '%Y-%m-%dT%H:%M:%SZ').timestamp()) event["expires"] = event["timestamp"] + int(os.environ["ITEM_TTL"]) event["detail"]["time"] = event["time"] # catch all the various forms of ARN from the media services arn_expr = parse('$..arn|aRN|resource-arn|channel_arn|multiplex_arn|flowArn|PlaybackConfigurationArn|resourceArn') original_arns = [match.value for match in arn_expr.find(event)] arns = [] # remove arn that is for userIdentity or inputSecurityGroup # note: can't remove an item from a list that's being iterated over so doing it this way for arn in original_arns: if "user" in arn or "role" in arn or "inputSecurityGroup" in arn: pass else: arns.append(arn) if arns: event["resource_arn"] = unquote(arns[0]) # for certain events, the ARN is not labeled as an ARN but instead put in the resources list if not arns and event["resources"]: if "vod" not in event["resources"][0]: event["resource_arn"] = event["resources"][0] # handle alerts if "Alert" in event["detail-type"]: # medialive alerts if "MediaLive" in event["detail-type"]: event["alarm_id"] = event["detail"]["alarm_id"] event["alarm_state"] = event["detail"]["alarm_state"].lower() # mediaconnect alerts elif "MediaConnect" in event["detail-type"]: event["alarm_id"] = event["detail"]["error-id"] if event["detail"]["errored"]: event["alarm_state"] = "set" else: event["alarm_state"] = "cleared" event["detail"]["alert_type"] = event["detail"]["error-code"] del event["detail"]["error-code"] event["detail"]["message"] = event["detail"]["error-message"] del event["detail"]["error-message"] #print(event) EVENTS_TABLE.put_item(Item=event) print(event["detail-type"] + " stored.") # set the rest of the information needed for storing as regular CWE # give timestamp a millisecond precision since it's sort key in CWE table # Bandit B311: randint not used for cryptographic purposes event["timestamp"] = event["timestamp"] * 1000 + randint(1, 999) # nosec event["data"] = json.dumps(event["detail"]) event["type"] = event["detail-type"] if "eventName" in event["detail"]: event["type"] = event["type"] + ": " + event["detail"]["eventName"] # handle specific cases depending on source if event["source"] == "aws.medialive": if "BatchUpdateSchedule" in event["type"]: print("Creating an ARN for BatchUpdateSchedule event.") event["resource_arn"] = "arn:aws:medialive:" + event['region'] + ":" + \ event['account'] + ":channel:" + \ event['detail']['requestParameters']['channelId'] elif event["source"] == "aws.mediapackage": if "HarvestJob" in event["type"]: print("Asking MediaPackage for the ARN of endpoint in a HarvestJob event.") # to get the ARN, ask mediapackage to describe the origin endpoint # the ARN available through resources is the HarvestJob ARN, not the endpoint orig_id_expr = parse('$..origin_endpoint_id') orig_id = [match.value for match in orig_id_expr.find(event)] if orig_id: emp_client = boto3.client('mediapackage') response = emp_client.describe_origin_endpoint( Id=orig_id[0]) event["resource_arn"] = response["Arn"] else: print("Skipping this event. Origin ID not present in the HarvestJob event." + event["type"]) elif event["source"] == "aws.mediastore": # for object state change the resource is the object, not the container # so the captured arn needs to be fixed if "MediaStore Object State Change" in event["type"]: temp_arn = event["resource_arn"].split('/') event["resource_arn"] = temp_arn[0] + "/" + temp_arn[1] # if item has no resource arn, don't save in DB if "resource_arn" in event: #print(event) print("Storing media service event.") CLOUDWATCH_EVENTS_TABLE.put_item(Item=event) else: print("Skipping this event. " + event["type"]) except ClientError as error: print(error) return True
[]
[]
[ "CONTENT_TABLE_NAME", "EVENTS_TABLE_NAME", "EVENTS_TABLE_REGION", "CLOUDWATCH_EVENTS_TABLE_NAME", "SOLUTION_ID", "ITEM_TTL" ]
[]
["CONTENT_TABLE_NAME", "EVENTS_TABLE_NAME", "EVENTS_TABLE_REGION", "CLOUDWATCH_EVENTS_TABLE_NAME", "SOLUTION_ID", "ITEM_TTL"]
python
6
0
example_project/open_news/scraper/settings.py
from __future__ import unicode_literals # Scrapy settings for open_news project # # For simplicity, this file contains only the most important settings by # default. All the other settings are documented here: # # http://doc.scrapy.org/topics/settings.html # import os, sys PROJECT_ROOT = os.path.abspath(os.path.dirname(__file__)) os.environ.setdefault("DJANGO_SETTINGS_MODULE", "example_project.settings") sys.path.insert(0, os.path.join(PROJECT_ROOT, "../../..")) #only for example_project BOT_NAME = 'open_news' LOG_STDOUT = True SPIDER_MODULES = ['dynamic_scraper.spiders', 'open_news.scraper',] USER_AGENT = '{b}/{v}'.format(b=BOT_NAME, v='1.0') ITEM_PIPELINES = { 'dynamic_scraper.pipelines.DjangoImagesPipeline': 200, 'dynamic_scraper.pipelines.ValidationPipeline': 400, 'open_news.scraper.pipelines.DjangoWriterPipeline': 800, } IMAGES_STORE = os.path.join(PROJECT_ROOT, '../thumbnails') IMAGES_THUMBS = { 'medium': (50, 50), 'small': (25, 25), } DSCRAPER_IMAGES_STORE_FORMAT = 'ALL' DSCRAPER_LOG_ENABLED = True DSCRAPER_LOG_LEVEL = 'INFO' DSCRAPER_LOG_LIMIT = 5
[]
[]
[]
[]
[]
python
0
0
vendor/cloud.google.com/go/datastore/datastore.go
// Copyright 2014 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package datastore import ( "errors" "fmt" "log" "os" "reflect" "cloud.google.com/go/internal/trace" "golang.org/x/net/context" "google.golang.org/api/option" gtransport "google.golang.org/api/transport/grpc" pb "google.golang.org/genproto/googleapis/datastore/v1" "google.golang.org/grpc" ) const ( prodAddr = "datastore.googleapis.com:443" userAgent = "gcloud-golang-datastore/20160401" ) // ScopeDatastore grants permissions to view and/or manage datastore entities const ScopeDatastore = "https://www.googleapis.com/auth/datastore" // resourcePrefixHeader is the name of the metadata header used to indicate // the resource being operated on. const resourcePrefixHeader = "google-cloud-resource-prefix" // Client is a client for reading and writing data in a datastore dataset. type Client struct { conn *grpc.ClientConn client pb.DatastoreClient endpoint string dataset string // Called dataset by the datastore API, synonym for project ID. } // NewClient creates a new Client for a given dataset. // If the project ID is empty, it is derived from the DATASTORE_PROJECT_ID environment variable. // If the DATASTORE_EMULATOR_HOST environment variable is set, client will use its value // to connect to a locally-running datastore emulator. func NewClient(ctx context.Context, projectID string, opts ...option.ClientOption) (*Client, error) { var o []option.ClientOption // Environment variables for gcd emulator: // https://cloud.google.com/datastore/docs/tools/datastore-emulator // If the emulator is available, dial it without passing any credentials. if addr := os.Getenv("DATASTORE_EMULATOR_HOST"); addr != "" { o = []option.ClientOption{ option.WithEndpoint(addr), option.WithoutAuthentication(), option.WithGRPCDialOption(grpc.WithInsecure()), } } else { o = []option.ClientOption{ option.WithEndpoint(prodAddr), option.WithScopes(ScopeDatastore), option.WithUserAgent(userAgent), } } // Warn if we see the legacy emulator environment variables. if os.Getenv("DATASTORE_HOST") != "" && os.Getenv("DATASTORE_EMULATOR_HOST") == "" { log.Print("WARNING: legacy environment variable DATASTORE_HOST is ignored. Use DATASTORE_EMULATOR_HOST instead.") } if os.Getenv("DATASTORE_DATASET") != "" && os.Getenv("DATASTORE_PROJECT_ID") == "" { log.Print("WARNING: legacy environment variable DATASTORE_DATASET is ignored. Use DATASTORE_PROJECT_ID instead.") } if projectID == "" { projectID = os.Getenv("DATASTORE_PROJECT_ID") } if projectID == "" { return nil, errors.New("datastore: missing project/dataset id") } o = append(o, opts...) conn, err := gtransport.Dial(ctx, o...) if err != nil { return nil, fmt.Errorf("dialing: %v", err) } return &Client{ conn: conn, client: newDatastoreClient(conn, projectID), dataset: projectID, }, nil } var ( // ErrInvalidEntityType is returned when functions like Get or Next are // passed a dst or src argument of invalid type. ErrInvalidEntityType = errors.New("datastore: invalid entity type") // ErrInvalidKey is returned when an invalid key is presented. ErrInvalidKey = errors.New("datastore: invalid key") // ErrNoSuchEntity is returned when no entity was found for a given key. ErrNoSuchEntity = errors.New("datastore: no such entity") ) type multiArgType int const ( multiArgTypeInvalid multiArgType = iota multiArgTypePropertyLoadSaver multiArgTypeStruct multiArgTypeStructPtr multiArgTypeInterface ) // ErrFieldMismatch is returned when a field is to be loaded into a different // type than the one it was stored from, or when a field is missing or // unexported in the destination struct. // StructType is the type of the struct pointed to by the destination argument // passed to Get or to Iterator.Next. type ErrFieldMismatch struct { StructType reflect.Type FieldName string Reason string } func (e *ErrFieldMismatch) Error() string { return fmt.Sprintf("datastore: cannot load field %q into a %q: %s", e.FieldName, e.StructType, e.Reason) } // GeoPoint represents a location as latitude/longitude in degrees. type GeoPoint struct { Lat, Lng float64 } // Valid returns whether a GeoPoint is within [-90, 90] latitude and [-180, 180] longitude. func (g GeoPoint) Valid() bool { return -90 <= g.Lat && g.Lat <= 90 && -180 <= g.Lng && g.Lng <= 180 } func keyToProto(k *Key) *pb.Key { if k == nil { return nil } var path []*pb.Key_PathElement for { el := &pb.Key_PathElement{Kind: k.Kind} if k.ID != 0 { el.IdType = &pb.Key_PathElement_Id{Id: k.ID} } else if k.Name != "" { el.IdType = &pb.Key_PathElement_Name{Name: k.Name} } path = append(path, el) if k.Parent == nil { break } k = k.Parent } // The path should be in order [grandparent, parent, child] // We did it backward above, so reverse back. for i := 0; i < len(path)/2; i++ { path[i], path[len(path)-i-1] = path[len(path)-i-1], path[i] } key := &pb.Key{Path: path} if k.Namespace != "" { key.PartitionId = &pb.PartitionId{ NamespaceId: k.Namespace, } } return key } // protoToKey decodes a protocol buffer representation of a key into an // equivalent *Key object. If the key is invalid, protoToKey will return the // invalid key along with ErrInvalidKey. func protoToKey(p *pb.Key) (*Key, error) { var key *Key var namespace string if partition := p.PartitionId; partition != nil { namespace = partition.NamespaceId } for _, el := range p.Path { key = &Key{ Namespace: namespace, Kind: el.Kind, ID: el.GetId(), Name: el.GetName(), Parent: key, } } if !key.valid() { // Also detects key == nil. return key, ErrInvalidKey } return key, nil } // multiKeyToProto is a batch version of keyToProto. func multiKeyToProto(keys []*Key) []*pb.Key { ret := make([]*pb.Key, len(keys)) for i, k := range keys { ret[i] = keyToProto(k) } return ret } // multiKeyToProto is a batch version of keyToProto. func multiProtoToKey(keys []*pb.Key) ([]*Key, error) { hasErr := false ret := make([]*Key, len(keys)) err := make(MultiError, len(keys)) for i, k := range keys { ret[i], err[i] = protoToKey(k) if err[i] != nil { hasErr = true } } if hasErr { return nil, err } return ret, nil } // multiValid is a batch version of Key.valid. It returns an error, not a // []bool. func multiValid(key []*Key) error { invalid := false for _, k := range key { if !k.valid() { invalid = true break } } if !invalid { return nil } err := make(MultiError, len(key)) for i, k := range key { if !k.valid() { err[i] = ErrInvalidKey } } return err } // checkMultiArg checks that v has type []S, []*S, []I, or []P, for some struct // type S, for some interface type I, or some non-interface non-pointer type P // such that P or *P implements PropertyLoadSaver. // // It returns what category the slice's elements are, and the reflect.Type // that represents S, I or P. // // As a special case, PropertyList is an invalid type for v. // // TODO(djd): multiArg is very confusing. Fold this logic into the // relevant Put/Get methods to make the logic less opaque. func checkMultiArg(v reflect.Value) (m multiArgType, elemType reflect.Type) { if v.Kind() != reflect.Slice { return multiArgTypeInvalid, nil } if v.Type() == typeOfPropertyList { return multiArgTypeInvalid, nil } elemType = v.Type().Elem() if reflect.PtrTo(elemType).Implements(typeOfPropertyLoadSaver) { return multiArgTypePropertyLoadSaver, elemType } switch elemType.Kind() { case reflect.Struct: return multiArgTypeStruct, elemType case reflect.Interface: return multiArgTypeInterface, elemType case reflect.Ptr: elemType = elemType.Elem() if elemType.Kind() == reflect.Struct { return multiArgTypeStructPtr, elemType } } return multiArgTypeInvalid, nil } // Close closes the Client. func (c *Client) Close() error { return c.conn.Close() } // Get loads the entity stored for key into dst, which must be a struct pointer // or implement PropertyLoadSaver. If there is no such entity for the key, Get // returns ErrNoSuchEntity. // // The values of dst's unmatched struct fields are not modified, and matching // slice-typed fields are not reset before appending to them. In particular, it // is recommended to pass a pointer to a zero valued struct on each Get call. // // ErrFieldMismatch is returned when a field is to be loaded into a different // type than the one it was stored from, or when a field is missing or // unexported in the destination struct. ErrFieldMismatch is only returned if // dst is a struct pointer. func (c *Client) Get(ctx context.Context, key *Key, dst interface{}) (err error) { ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.Get") defer func() { trace.EndSpan(ctx, err) }() if dst == nil { // get catches nil interfaces; we need to catch nil ptr here return ErrInvalidEntityType } err = c.get(ctx, []*Key{key}, []interface{}{dst}, nil) if me, ok := err.(MultiError); ok { return me[0] } return err } // GetMulti is a batch version of Get. // // dst must be a []S, []*S, []I or []P, for some struct type S, some interface // type I, or some non-interface non-pointer type P such that P or *P // implements PropertyLoadSaver. If an []I, each element must be a valid dst // for Get: it must be a struct pointer or implement PropertyLoadSaver. // // As a special case, PropertyList is an invalid type for dst, even though a // PropertyList is a slice of structs. It is treated as invalid to avoid being // mistakenly passed when []PropertyList was intended. func (c *Client) GetMulti(ctx context.Context, keys []*Key, dst interface{}) (err error) { ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.GetMulti") defer func() { trace.EndSpan(ctx, err) }() return c.get(ctx, keys, dst, nil) } func (c *Client) get(ctx context.Context, keys []*Key, dst interface{}, opts *pb.ReadOptions) error { v := reflect.ValueOf(dst) multiArgType, _ := checkMultiArg(v) // Sanity checks if multiArgType == multiArgTypeInvalid { return errors.New("datastore: dst has invalid type") } if len(keys) != v.Len() { return errors.New("datastore: keys and dst slices have different length") } if len(keys) == 0 { return nil } // Go through keys, validate them, serialize then, and create a dict mapping them to their indices. // Equal keys are deduped. multiErr, any := make(MultiError, len(keys)), false keyMap := make(map[string][]int, len(keys)) pbKeys := make([]*pb.Key, 0, len(keys)) for i, k := range keys { if !k.valid() { multiErr[i] = ErrInvalidKey any = true } else if k.Incomplete() { multiErr[i] = fmt.Errorf("datastore: can't get the incomplete key: %v", k) any = true } else { ks := k.String() if _, ok := keyMap[ks]; !ok { pbKeys = append(pbKeys, keyToProto(k)) } keyMap[ks] = append(keyMap[ks], i) } } if any { return multiErr } req := &pb.LookupRequest{ ProjectId: c.dataset, Keys: pbKeys, ReadOptions: opts, } resp, err := c.client.Lookup(ctx, req) if err != nil { return err } found := resp.Found missing := resp.Missing // Upper bound 100 iterations to prevent infinite loop. // We choose 100 iterations somewhat logically: // Max number of Entities you can request from Datastore is 1,000. // Max size for a Datastore Entity is 1 MiB. // Max request size is 10 MiB, so we assume max response size is also 10 MiB. // 1,000 / 10 = 100. // Note that if ctx has a deadline, the deadline will probably // be hit before we reach 100 iterations. for i := 0; len(resp.Deferred) > 0 && i < 100; i++ { req.Keys = resp.Deferred resp, err = c.client.Lookup(ctx, req) if err != nil { return err } found = append(found, resp.Found...) missing = append(missing, resp.Missing...) } filled := 0 for _, e := range found { k, err := protoToKey(e.Entity.Key) if err != nil { return errors.New("datastore: internal error: server returned an invalid key") } filled += len(keyMap[k.String()]) for _, index := range keyMap[k.String()] { elem := v.Index(index) if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct { elem = elem.Addr() } if multiArgType == multiArgTypeStructPtr && elem.IsNil() { elem.Set(reflect.New(elem.Type().Elem())) } if err := loadEntityProto(elem.Interface(), e.Entity); err != nil { multiErr[index] = err any = true } } } for _, e := range missing { k, err := protoToKey(e.Entity.Key) if err != nil { return errors.New("datastore: internal error: server returned an invalid key") } filled += len(keyMap[k.String()]) for _, index := range keyMap[k.String()] { multiErr[index] = ErrNoSuchEntity } any = true } if filled != len(keys) { return errors.New("datastore: internal error: server returned the wrong number of entities") } if any { return multiErr } return nil } // Put saves the entity src into the datastore with key k. src must be a struct // pointer or implement PropertyLoadSaver; if a struct pointer then any // unexported fields of that struct will be skipped. If k is an incomplete key, // the returned key will be a unique key generated by the datastore. func (c *Client) Put(ctx context.Context, key *Key, src interface{}) (*Key, error) { k, err := c.PutMulti(ctx, []*Key{key}, []interface{}{src}) if err != nil { if me, ok := err.(MultiError); ok { return nil, me[0] } return nil, err } return k[0], nil } // PutMulti is a batch version of Put. // // src must satisfy the same conditions as the dst argument to GetMulti. // TODO(jba): rewrite in terms of Mutate. func (c *Client) PutMulti(ctx context.Context, keys []*Key, src interface{}) (ret []*Key, err error) { ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.PutMulti") defer func() { trace.EndSpan(ctx, err) }() mutations, err := putMutations(keys, src) if err != nil { return nil, err } // Make the request. req := &pb.CommitRequest{ ProjectId: c.dataset, Mutations: mutations, Mode: pb.CommitRequest_NON_TRANSACTIONAL, } resp, err := c.client.Commit(ctx, req) if err != nil { return nil, err } // Copy any newly minted keys into the returned keys. ret = make([]*Key, len(keys)) for i, key := range keys { if key.Incomplete() { // This key is in the mutation results. ret[i], err = protoToKey(resp.MutationResults[i].Key) if err != nil { return nil, errors.New("datastore: internal error: server returned an invalid key") } } else { ret[i] = key } } return ret, nil } func putMutations(keys []*Key, src interface{}) ([]*pb.Mutation, error) { v := reflect.ValueOf(src) multiArgType, _ := checkMultiArg(v) if multiArgType == multiArgTypeInvalid { return nil, errors.New("datastore: src has invalid type") } if len(keys) != v.Len() { return nil, errors.New("datastore: key and src slices have different length") } if len(keys) == 0 { return nil, nil } if err := multiValid(keys); err != nil { return nil, err } mutations := make([]*pb.Mutation, 0, len(keys)) multiErr := make(MultiError, len(keys)) hasErr := false for i, k := range keys { elem := v.Index(i) // Two cases where we need to take the address: // 1) multiArgTypePropertyLoadSaver => &elem implements PLS // 2) multiArgTypeStruct => saveEntity needs *struct if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct { elem = elem.Addr() } p, err := saveEntity(k, elem.Interface()) if err != nil { multiErr[i] = err hasErr = true } var mut *pb.Mutation if k.Incomplete() { mut = &pb.Mutation{Operation: &pb.Mutation_Insert{Insert: p}} } else { mut = &pb.Mutation{Operation: &pb.Mutation_Upsert{Upsert: p}} } mutations = append(mutations, mut) } if hasErr { return nil, multiErr } return mutations, nil } // Delete deletes the entity for the given key. func (c *Client) Delete(ctx context.Context, key *Key) error { err := c.DeleteMulti(ctx, []*Key{key}) if me, ok := err.(MultiError); ok { return me[0] } return err } // DeleteMulti is a batch version of Delete. // TODO(jba): rewrite in terms of Mutate. func (c *Client) DeleteMulti(ctx context.Context, keys []*Key) (err error) { ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.DeleteMulti") defer func() { trace.EndSpan(ctx, err) }() mutations, err := deleteMutations(keys) if err != nil { return err } req := &pb.CommitRequest{ ProjectId: c.dataset, Mutations: mutations, Mode: pb.CommitRequest_NON_TRANSACTIONAL, } _, err = c.client.Commit(ctx, req) return err } func deleteMutations(keys []*Key) ([]*pb.Mutation, error) { mutations := make([]*pb.Mutation, 0, len(keys)) set := make(map[string]bool, len(keys)) multiErr := make(MultiError, len(keys)) hasErr := false for i, k := range keys { if !k.valid() { multiErr[i] = ErrInvalidKey hasErr = true } else if k.Incomplete() { multiErr[i] = fmt.Errorf("datastore: can't delete the incomplete key: %v", k) hasErr = true } else { ks := k.String() if !set[ks] { mutations = append(mutations, &pb.Mutation{ Operation: &pb.Mutation_Delete{Delete: keyToProto(k)}, }) } set[ks] = true } } if hasErr { return nil, multiErr } return mutations, nil } // Mutate applies one or more mutations atomically. // It returns the keys of the argument Mutations, in the same order. // // If any of the mutations are invalid, Mutate returns a MultiError with the errors. // Mutate returns a MultiError in this case even if there is only one Mutation. func (c *Client) Mutate(ctx context.Context, muts ...*Mutation) (ret []*Key, err error) { ctx = trace.StartSpan(ctx, "cloud.google.com/go/datastore.Mutate") defer func() { trace.EndSpan(ctx, err) }() pmuts, err := mutationProtos(muts) if err != nil { return nil, err } req := &pb.CommitRequest{ ProjectId: c.dataset, Mutations: pmuts, Mode: pb.CommitRequest_NON_TRANSACTIONAL, } resp, err := c.client.Commit(ctx, req) if err != nil { return nil, err } // Copy any newly minted keys into the returned keys. ret = make([]*Key, len(muts)) for i, mut := range muts { if mut.key.Incomplete() { // This key is in the mutation results. ret[i], err = protoToKey(resp.MutationResults[i].Key) if err != nil { return nil, errors.New("datastore: internal error: server returned an invalid key") } } else { ret[i] = mut.key } } return ret, nil }
[ "\"DATASTORE_EMULATOR_HOST\"", "\"DATASTORE_HOST\"", "\"DATASTORE_EMULATOR_HOST\"", "\"DATASTORE_DATASET\"", "\"DATASTORE_PROJECT_ID\"", "\"DATASTORE_PROJECT_ID\"" ]
[]
[ "DATASTORE_DATASET", "DATASTORE_EMULATOR_HOST", "DATASTORE_PROJECT_ID", "DATASTORE_HOST" ]
[]
["DATASTORE_DATASET", "DATASTORE_EMULATOR_HOST", "DATASTORE_PROJECT_ID", "DATASTORE_HOST"]
go
4
0
shared/version/api.go
package version import ( "os" "strconv" ) // APIVersion contains the API base version. Only bumped for backward incompatible changes. var APIVersion = "1.0" // APIExtensions is the list of all API extensions in the order they were added. // // The following kind of changes come with a new extensions: // // - New configuration key // - New valid values for a configuration key // - New REST API endpoint // - New argument inside an existing REST API call // - New HTTPs authentication mechanisms or protocols // // This list is used mainly by the LXD server code, but it's in the shared // package as well for reference. var APIExtensions = []string{ "storage_zfs_remove_snapshots", "container_host_shutdown_timeout", "container_stop_priority", "container_syscall_filtering", "auth_pki", "container_last_used_at", "etag", "patch", "usb_devices", "https_allowed_credentials", "image_compression_algorithm", "directory_manipulation", "container_cpu_time", "storage_zfs_use_refquota", "storage_lvm_mount_options", "network", "profile_usedby", "container_push", "container_exec_recording", "certificate_update", "container_exec_signal_handling", "gpu_devices", "container_image_properties", "migration_progress", "id_map", "network_firewall_filtering", "network_routes", "storage", "file_delete", "file_append", "network_dhcp_expiry", "storage_lvm_vg_rename", "storage_lvm_thinpool_rename", "network_vlan", "image_create_aliases", "container_stateless_copy", "container_only_migration", "storage_zfs_clone_copy", "unix_device_rename", "storage_lvm_use_thinpool", "storage_rsync_bwlimit", "network_vxlan_interface", "storage_btrfs_mount_options", "entity_description", "image_force_refresh", "storage_lvm_lv_resizing", "id_map_base", "file_symlinks", "container_push_target", "network_vlan_physical", "storage_images_delete", "container_edit_metadata", "container_snapshot_stateful_migration", "storage_driver_ceph", "storage_ceph_user_name", "resource_limits", "storage_volatile_initial_source", "storage_ceph_force_osd_reuse", "storage_block_filesystem_btrfs", "resources", "kernel_limits", "storage_api_volume_rename", "macaroon_authentication", "network_sriov", "console", "restrict_devlxd", "migration_pre_copy", "infiniband", "maas_network", "devlxd_events", "proxy", "network_dhcp_gateway", "file_get_symlink", "network_leases", "unix_device_hotplug", "storage_api_local_volume_handling", "operation_description", "clustering", "event_lifecycle", "storage_api_remote_volume_handling", "nvidia_runtime", "container_mount_propagation", "container_backup", "devlxd_images", "container_local_cross_pool_handling", "proxy_unix", "proxy_udp", "clustering_join", "proxy_tcp_udp_multi_port_handling", "network_state", "proxy_unix_dac_properties", "container_protection_delete", "unix_priv_drop", "pprof_http", "proxy_haproxy_protocol", "network_hwaddr", "proxy_nat", "network_nat_order", "container_full", "candid_authentication", "backup_compression", "candid_config", "nvidia_runtime_config", "storage_api_volume_snapshots", "storage_unmapped", "projects", "candid_config_key", "network_vxlan_ttl", "container_incremental_copy", "usb_optional_vendorid", "snapshot_scheduling", "snapshot_schedule_aliases", "container_copy_project", "clustering_server_address", "clustering_image_replication", "container_protection_shift", "snapshot_expiry", "container_backup_override_pool", "snapshot_expiry_creation", "network_leases_location", "resources_cpu_socket", "resources_gpu", "resources_numa", "kernel_features", "id_map_current", "event_location", "storage_api_remote_volume_snapshots", "network_nat_address", "container_nic_routes", "rbac", "cluster_internal_copy", "seccomp_notify", "lxc_features", "container_nic_ipvlan", "network_vlan_sriov", "storage_cephfs", "container_nic_ipfilter", "resources_v2", "container_exec_user_group_cwd", "container_syscall_intercept", "container_disk_shift", "storage_shifted", "resources_infiniband", "daemon_storage", "instances", "image_types", "resources_disk_sata", "clustering_roles", "images_expiry", "resources_network_firmware", "backup_compression_algorithm", "ceph_data_pool_name", "container_syscall_intercept_mount", "compression_squashfs", "container_raw_mount", "container_nic_routed", "container_syscall_intercept_mount_fuse", "container_disk_ceph", "virtual-machines", "image_profiles", "clustering_architecture", "resources_disk_id", "storage_lvm_stripes", "vm_boot_priority", "unix_hotplug_devices", "api_filtering", "instance_nic_network", "clustering_sizing", "firewall_driver", "projects_limits", "container_syscall_intercept_hugetlbfs", "limits_hugepages", "container_nic_routed_gateway", "projects_restrictions", "custom_volume_snapshot_expiry", "volume_snapshot_scheduling", "trust_ca_certificates", "snapshot_disk_usage", "clustering_edit_roles", "container_nic_routed_host_address", "container_nic_ipvlan_gateway", "resources_usb_pci", "resources_cpu_threads_numa", "resources_cpu_core_die", "api_os", "container_nic_routed_host_table", "container_nic_ipvlan_host_table", "container_nic_ipvlan_mode", "resources_system", "images_push_relay", "network_dns_search", "container_nic_routed_limits", "instance_nic_bridged_vlan", "network_state_bond_bridge", "usedby_consistency", "custom_block_volumes", "clustering_failure_domains", "resources_gpu_mdev", "console_vga_type", "projects_limits_disk", "network_type_macvlan", "network_type_sriov", "container_syscall_intercept_bpf_devices", "network_type_ovn", "projects_networks", "projects_networks_restricted_uplinks", "custom_volume_backup", "backup_override_name", "storage_rsync_compression", "network_type_physical", "network_ovn_external_subnets", "network_ovn_nat", "network_ovn_external_routes_remove", "tpm_device_type", "storage_zfs_clone_copy_rebase", "gpu_mdev", "resources_pci_iommu", "resources_network_usb", "resources_disk_address", "network_physical_ovn_ingress_mode", "network_ovn_dhcp", "network_physical_routes_anycast", "projects_limits_instances", "network_state_vlan", "instance_nic_bridged_port_isolation", "instance_bulk_state_change", "network_gvrp", "instance_pool_move", "gpu_sriov", "pci_device_type", "storage_volume_state", "network_acl", "migration_stateful", "disk_state_quota", "storage_ceph_features", "projects_compression", "projects_images_remote_cache_expiry", "certificate_project", "network_ovn_acl", "projects_images_auto_update", "projects_restricted_cluster_target", "images_default_architecture", "network_ovn_acl_defaults", "gpu_mig", "project_usage", "network_bridge_acl", "warnings", "projects_restricted_backups_and_snapshots", "clustering_join_token", "clustering_description", } // APIExtensionsCount returns the number of available API extensions. func APIExtensionsCount() int { count := len(APIExtensions) // This environment variable is an internal one to force the code // to believe that we have an API extensions count greater than we // actually have. It's used by integration tests to exercise the // cluster upgrade process. artificialBump := os.Getenv("LXD_ARTIFICIALLY_BUMP_API_EXTENSIONS") if artificialBump != "" { n, err := strconv.Atoi(artificialBump) if err == nil { count += n } } return count }
[ "\"LXD_ARTIFICIALLY_BUMP_API_EXTENSIONS\"" ]
[]
[ "LXD_ARTIFICIALLY_BUMP_API_EXTENSIONS" ]
[]
["LXD_ARTIFICIALLY_BUMP_API_EXTENSIONS"]
go
1
0
mutex-scaling/informer/main.go
package main import ( "context" "fmt" "os" "sync" "time" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/informers" "k8s.io/client-go/kubernetes" "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/leaderelection" "k8s.io/client-go/tools/leaderelection/resourcelock" ) func getNamespace() string { namespace := os.Getenv("MY_POD_NAMESPACE") if namespace == "" { namespace = "default" } return namespace } var ( threadLock sync.RWMutex // this is a map of ordinals to pod names podConfig map[int]string deferredPodConfig []string ) const ( invalidOrdinal = -1 ) func removeKey(podName string) { fmt.Println("Removing ordinal value for", podName) for index, name := range podConfig { if name == podName { podConfig[index] = "" //mark as free break } } // Clean up stopped ones still awaiting to be added for index, val := range deferredPodConfig { if val == podName { fmt.Println("Removing deferred start for", podName) // remove maintaining order deferredPodConfig = append(deferredPodConfig[:index], deferredPodConfig[index+1:]...) // we really should not appear more than once... break } } } func getOrdinal(podName string) int { fmt.Println("Attempting to get ordinal for", podName) ordinalValue := invalidOrdinal for index, value := range podConfig { if value == podName { fmt.Println("Found existing ordinal value", index, "for", podName) return index } if value == "" { fmt.Println("Adding ordinal value", index, "for", podName) ordinalValue = index podConfig[index] = podName break } } // increment to get next available if ordinalValue == invalidOrdinal { fmt.Println("Currently unable to handle", podName, "as too many replicas running") addDeferred(podName) } return ordinalValue } func addDeferred(podName string) { deferredPodConfig = append(deferredPodConfig, podName) } func popDeferred() string { podName := "" if len(deferredPodConfig) > 0 { podName = deferredPodConfig[0] fmt.Println("Handling deferred pod start for", podName) deferredPodConfig = deferredPodConfig[1:] } return podName } func main() { // creates the in-cluster config config, err := rest.InClusterConfig() if err != nil { panic(err.Error()) } // creates the clientset clientset, err := kubernetes.NewForConfig(config) if err != nil { panic(err.Error()) } initialiseOrdinalMap(clientset) //TODO: leader election here requires RBAC sorting on K8S side really serveConfig(clientset) } func leaderElectionAndServe(clientset *kubernetes.Clientset) { nodeId := os.Getenv("MY_POD_NAME") lock := &resourcelock.LeaseLock{ LeaseMeta: metav1.ObjectMeta{ Name: "cbes-informer-lock", Namespace: getNamespace(), }, Client: clientset.CoordinationV1(), LockConfig: resourcelock.ResourceLockConfig{ Identity: nodeId, }, } ctx, cancel := context.WithCancel(context.Background()) defer cancel() leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{ Lock: lock, ReleaseOnCancel: true, LeaseDuration: 15 * time.Second, RenewDeadline: 10 * time.Second, RetryPeriod: 2 * time.Second, Callbacks: leaderelection.LeaderCallbacks{ OnStartedLeading: func(ctx context.Context) { fmt.Println("Started leading") serveConfig(clientset) }, OnStoppedLeading: func() { fmt.Println("Stopped leading") os.Exit(0) // just exit to restart but close HTTP server }, OnNewLeader: func(identity string) { if identity == nodeId { fmt.Println("Just acquired leadership", nodeId) return } fmt.Println("New leader", identity) }, }, }) } func getReplicaCount(clientset *kubernetes.Clientset) int { deploymentSpec, err := clientset.AppsV1().Deployments(getNamespace()).GetScale(context.TODO(), "cbes-deployment", metav1.GetOptions{}) if err != nil { panic(err.Error()) } return int(deploymentSpec.Spec.Replicas) } func initialiseOrdinalMap(clientset *kubernetes.Clientset) { // TODO: Want to be notified on this changing as well - use an informer that watches for the scaling event on the deployment totalReplicas := getReplicaCount(clientset) // TODO: on scaling changes we want to recalculate and provide ordinals - this might be better via a push notification over RPC then originalLength := len(podConfig) if originalLength != totalReplicas { fmt.Println("Detected", totalReplicas, "replicas, currently set to", originalLength) // If we have pod C allocated as ordinal 1 then we scale from 3 pods to 2, K8S may kill pod C even though it is ordinal 1 // This means every time there is a scaling change we need to just wipe it all and start again. podConfig = make(map[int]string, totalReplicas) for i := 0; i < int(totalReplicas); i++ { podConfig[i] = "" } // When scaling in either direction, the ordinals for existing pods do not change however their total range does. } } func serveConfig(clientset *kubernetes.Clientset) { // now watch apps we are monitoring options := func(options *metav1.ListOptions) { options.LabelSelector = "app=cbes" } sharedOptions := []informers.SharedInformerOption{ informers.WithNamespace(getNamespace()), informers.WithTweakListOptions(options), } informer := informers.NewSharedInformerFactoryWithOptions(clientset, time.Second, sharedOptions...) podInformer := informer.Core().V1().Pods().Informer() stopper := make(chan struct{}) defer close(stopper) defer runtime.HandleCrash() podInformer.AddEventHandler(&cache.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { pod := obj.(*v1.Pod) fmt.Println("Pod started", pod.Name, pod.Status.Reason) threadLock.Lock() defer threadLock.Unlock() addConfig(pod.Name) updateConfigMap(clientset) }, DeleteFunc: func(obj interface{}) { pod := obj.(*v1.Pod) fmt.Println("Pod stopped", pod.Name, pod.Status.Reason) threadLock.Lock() defer threadLock.Unlock() removeConfig(pod.Name) updateConfigMap(clientset) }, }) podInformer.Run(stopper) } // TODO: thread protection - might just be better to guard the two event methods func updateConfigMap(clientset *kubernetes.Clientset) { // load current configMap, err := clientset.CoreV1().ConfigMaps(getNamespace()).Get(context.TODO(), "cbes-config-dynamic", metav1.GetOptions{}) if err != nil { panic(err.Error) } // overwrite with our data - TODO: leader election probably needs to merge this configMapData["overall.conf"] = fmt.Sprintf("TOTAL_REPLICAS=%d\n", getReplicaCount(clientset)) configMap.Data = configMapData _, err = clientset.CoreV1().ConfigMaps(getNamespace()).Update(context.Background(), configMap, metav1.UpdateOptions{}) if err != nil { panic(err.Error()) } } var configMapData map[string]string = make(map[string]string) func getConfigFileName(podName string) string { return fmt.Sprintf("%s.conf", podName) } func addConfig(podName string) { if podName == "" { return } ordinalForPod := getOrdinal(podName) if ordinalForPod == invalidOrdinal { fmt.Println("Deferred config for", podName) return } fmt.Println("Ordinal", ordinalForPod, "for pod", podName) configMapData[getConfigFileName(podName)] = fmt.Sprintf("CBES_ORDINAL=%d\n", ordinalForPod) } func removeConfig(podName string) { removeKey(podName) delete(configMapData, getConfigFileName(podName)) // Attempt to add anything that is deferred addConfig(popDeferred()) }
[ "\"MY_POD_NAMESPACE\"", "\"MY_POD_NAME\"" ]
[]
[ "MY_POD_NAME", "MY_POD_NAMESPACE" ]
[]
["MY_POD_NAME", "MY_POD_NAMESPACE"]
go
2
0
parquet-tools/src/main/java/parquet/tools/util/PrettyPrintWriter.java
/** * Copyright 2013 ARRIS, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package parquet.tools.util; import java.io.IOException; import java.io.OutputStream; import java.io.PrintWriter; import java.util.ArrayList; import java.util.Formatter; import java.util.List; import java.util.Locale; import parquet.tools.Main; import com.google.common.base.Joiner; import com.google.common.base.Strings; public class PrettyPrintWriter extends PrintWriter { public static final String MORE = " [more]..."; public static final String LINE_SEP = System.getProperty("line.separator"); public static final Span DEFAULT_APPEND; public static final char DEFAULT_COLUMN_SEP = ':'; public static final int DEFAULT_MAX_COLUMNS = 1; public static final int DEFAULT_COLUMN_PADDING = 1; public static final int DEFAULT_TABS = 4; public static final int DEFAULT_WIDTH; public static final int DEFAULT_COLORS; private static final String RESET = "\u001B[0m"; public static final String MODE_OFF = "0"; public static final String MODE_BOLD = "1"; public static final String MODE_UNDER = "4"; public static final String MODE_BLINK = "5"; public static final String MODE_REVERSE = "7"; public static final String MODE_CONCEALED = "8"; public static final String FG_COLOR_BLACK = "30"; public static final String FG_COLOR_RED = "31"; public static final String FG_COLOR_GREEN = "32"; public static final String FG_COLOR_YELLOW = "33"; public static final String FG_COLOR_BLUE = "34"; public static final String FG_COLOR_MAGENTA = "35"; public static final String FG_COLOR_CYAN = "36"; public static final String FG_COLOR_WHITE = "37"; public static final String BG_COLOR_BLACK = "40"; public static final String BG_COLOR_RED = "41"; public static final String BG_COLOR_GREEN = "42"; public static final String BG_COLOR_YELLOW = "43"; public static final String BG_COLOR_BLUE = "44"; public static final String BG_COLOR_MAGENTA = "45"; public static final String BG_COLOR_CYAN = "46"; public static final String BG_COLOR_WHITE = "47"; public enum WhiteSpaceHandler { ELIMINATE_NEWLINES, COLLAPSE_WHITESPACE } static { int consoleWidth = 80; int numColors = 0; String columns = System.getenv("COLUMNS"); if (columns != null && !columns.isEmpty()) { try { consoleWidth = Integer.parseInt(columns); } catch (Throwable th) { } } String colors = System.getenv("COLORS"); if (colors != null && !colors.isEmpty()) { try { numColors = Integer.parseInt(colors); if (numColors < 0) numColors = 0; } catch (Throwable th) { } } String termout = System.getenv("TERMOUT"); if (termout != null && !termout.isEmpty()) { if (!"y".equalsIgnoreCase(termout) && !"yes".equalsIgnoreCase(termout) && !"t".equalsIgnoreCase(termout) && !"true".equalsIgnoreCase(termout) && !"on".equalsIgnoreCase(termout)) { consoleWidth = Integer.MAX_VALUE; numColors = 0; } } if (System.getProperty("DISABLE_COLORS", null) != null) { numColors = 0; } DEFAULT_WIDTH = consoleWidth; DEFAULT_COLORS = numColors; if (numColors > 0) { DEFAULT_APPEND = mkspan(MORE, null, FG_COLOR_RED, null); } else { DEFAULT_APPEND = mkspan(MORE); } } private final StringBuilder formatString; private final Formatter formatter; private final ArrayList<Line> buffer; private final boolean autoColumn; private final boolean autoCrop; private final Span appendToLongLine; private final int consoleWidth; private final int tabWidth; private final char columnSeparator; private final int maxColumns; private final int columnPadding; private final long maxBufferedLines; private final boolean flushOnTab; private final WhiteSpaceHandler whiteSpaceHandler; private int tabLevel; private String colorMode; private String colorForeground; private String colorBackground; private String tabs; private PrettyPrintWriter(OutputStream out, boolean autoFlush, boolean autoColumn, boolean autoCrop, Span appendToLongLine, int consoleWidth, int tabWidth, char columnSeparator, int maxColumns, int columnPadding, long maxBufferedLines, boolean flushOnTab, WhiteSpaceHandler whiteSpaceHandler) { super(out, autoFlush && !autoColumn); this.autoColumn = autoColumn; this.autoCrop = autoCrop; this.appendToLongLine = appendToLongLine; this.consoleWidth = consoleWidth; this.tabWidth = tabWidth; this.columnSeparator = columnSeparator; this.maxColumns = maxColumns; this.maxBufferedLines = maxBufferedLines; this.columnPadding = columnPadding; this.flushOnTab = flushOnTab; this.whiteSpaceHandler = whiteSpaceHandler; this.buffer = new ArrayList<Line>(); this.formatString = new StringBuilder(); this.formatter = new Formatter(this.formatString); this.colorMode = null; this.colorForeground = null; this.colorBackground = null; this.tabLevel = 0; this.tabs = ""; this.buffer.add(new Line()); } public void setTabLevel(int level) { this.tabLevel = level; this.tabs = Strings.repeat(" ", tabWidth * level); if (flushOnTab) flushColumns(); } public void incrementTabLevel() { setTabLevel(tabLevel + 1); } public void decrementTabLevel() { if (tabLevel == 0) { return; } setTabLevel(tabLevel - 1); } private int determineNumColumns() { int max = 0; for (Line line : buffer) { int num = line.countCharacter(columnSeparator); if (num > max) { max = num; } } return max > maxColumns ? maxColumns : max; } private int[] determineColumnWidths() { int columns = determineNumColumns(); if (columns == 0) { return null; } int[] widths = new int[columns]; for (Line line : buffer) { for (int last = 0, idx = 0; last < line.length() && idx < columns; ++idx) { int pos = line.indexOf(columnSeparator, last); if (pos < 0) break; int wid = pos - last + 1 + columnPadding; if (wid > widths[idx]) { widths[idx] = wid; } last = line.firstNonWhiteSpace(idx + 1); } } return widths; } private Line toColumns(int[] widths, Line line) throws IOException { int last = 0; for (int i = 0; i < widths.length; ++i) { int width = widths[i]; int idx = line.indexOf(columnSeparator, last); if (idx < 0) break; if ((idx+1) <= width) { line.spaceOut(width - (idx+1), idx+1); } last = line.firstNonWhiteSpace(idx + 1); } return line; } public void flushColumns() { flushColumns(false); } private void flushColumns(boolean preserveLast) { int size = buffer.size(); int[] widths = null; if (autoColumn) { widths = determineColumnWidths(); } StringBuilder builder = new StringBuilder(); try { for (int i = 0; i < size - 1; ++i) { Line line = buffer.get(i); if (widths != null) { line = toColumns(widths, line); } fixupLine(line); builder.setLength(0); line.toString(builder); super.out.append(builder.toString()); super.out.append(LINE_SEP); } if (!preserveLast) { Line line = buffer.get(size - 1); if (widths != null) { line = toColumns(widths, line); } fixupLine(line); builder.setLength(0); line.toString(builder); super.out.append(builder.toString()); } super.out.flush(); } catch (IOException ex) { } Line addback = null; if (preserveLast) { addback = buffer.get(size - 1); } buffer.clear(); if (addback != null) buffer.add(addback); else buffer.add(new Line()); } private void flushIfNeeded() { flushIfNeeded(false); } private void flushIfNeeded(boolean preserveLast) { if (!autoColumn || buffer.size() > maxBufferedLines) { flushColumns(preserveLast); } } private void appendToCurrent(String s) { int size = buffer.size(); Line value = buffer.get(size - 1); if (value.isEmpty()) { value.append(tabs()); } value.append(span(s)); } private void fixupLine(Line line) { if (autoCrop) { line.trimTo(consoleWidth, appendToLongLine); } } private void print(String s, boolean mayHaveNewlines) { if (s == null) { appendToCurrent("null"); return; } if (s.isEmpty()) { return; } if (LINE_SEP.equals(s)) { buffer.add(new Line()); flushIfNeeded(); return; } if (whiteSpaceHandler != null) { boolean endswith = s.endsWith(LINE_SEP); switch (whiteSpaceHandler) { case ELIMINATE_NEWLINES: s = s.replaceAll("\\r\\n|\\r|\\n", " "); break; case COLLAPSE_WHITESPACE: s = s.replaceAll("\\s+", " "); break; } mayHaveNewlines = endswith; if (endswith) s = s + LINE_SEP; } if (!mayHaveNewlines) { appendToCurrent(s); return; } String lines[] = s.split("\\r?\\n", -1); appendToCurrent(lines[0]); for (int i = 1; i < lines.length; ++i) { String value = lines[i]; if (value.isEmpty()) { buffer.add(new Line()); } else { Line line = new Line(); line.append(tabs()); line.append(span(value, true)); buffer.add(line); } } resetColor(); flushIfNeeded(true); } @Override public void print(String s) { print(s, true); } @Override public void println() { print(LINE_SEP, true); flushIfNeeded(); } @Override public void println(String x) { print(x); println(); } @Override public void print(boolean b) { print(String.valueOf(b), false); } @Override public void print(char c) { print(String.valueOf(c), false); } @Override public void print(int i) { print(String.valueOf(i), false); } @Override public void print(long l) { print(String.valueOf(l), false); } @Override public void print(float f) { print(String.valueOf(f), false); } @Override public void print(double d) { print(String.valueOf(d), false); } @Override public void print(char[] s) { print(String.valueOf(s), true); } @Override public void print(Object obj) { print(String.valueOf(obj), true); } @Override public PrintWriter printf(String format, Object... args) { return printf(formatter.locale(), format, args); } @Override public PrintWriter printf(Locale l, String format, Object... args) { formatter.format(l, format, args); String results = formatString.toString(); formatString.setLength(0); print(results); flushIfNeeded(); return this; } @Override public PrintWriter format(String format, Object... args) { return printf(format, args); } @Override public PrintWriter format(Locale l, String format, Object... args) { return printf(l, format, args); } @Override public PrintWriter append(char c) { print(c); return this; } @Override public PrintWriter append(CharSequence csq) { if (csq == null) { print("null"); return this; } return append(csq, 0, csq.length()); } @Override public PrintWriter append(CharSequence csq, int start, int end) { if (csq == null) { print("null"); return this; } print(csq.subSequence(start,end).toString()); return this; } @Override public void println(boolean x) { print(x); println(); } @Override public void println(char x) { print(x); println(); } @Override public void println(int x) { print(x); println(); } @Override public void println(long x) { print(x); println(); } @Override public void println(float x) { print(x); println(); } @Override public void println(double x) { print(x); println(); } @Override public void println(char[] x) { print(x); println(); } @Override public void println(Object x) { print(x); println(); } public void rule(char c) { if (tabs.length() >= consoleWidth) return; int width = consoleWidth; if (width == Integer.MAX_VALUE) { width = 100; } println(Strings.repeat(String.valueOf(c), width - tabs.length())); } public boolean acceptColorModification = true; public PrettyPrintWriter iff(boolean predicate) { if (!predicate && acceptColorModification) { resetColor(); } else { acceptColorModification = false; } return this; } public PrettyPrintWriter otherwise() { acceptColorModification = false; return this; } public PrettyPrintWriter black() { if (!acceptColorModification) return this; colorForeground = FG_COLOR_BLACK; return this; } public PrettyPrintWriter red() { if (!acceptColorModification) return this; colorForeground = FG_COLOR_RED; return this; } public PrettyPrintWriter green() { if (!acceptColorModification) return this; colorForeground = FG_COLOR_GREEN; return this; } public PrettyPrintWriter yellow() { if (!acceptColorModification) return this; colorForeground = FG_COLOR_YELLOW; return this; } public PrettyPrintWriter blue() { if (!acceptColorModification) return this; colorForeground = FG_COLOR_BLUE; return this; } public PrettyPrintWriter magenta() { if (!acceptColorModification) return this; colorForeground = FG_COLOR_MAGENTA; return this; } public PrettyPrintWriter cyan() { if (!acceptColorModification) return this; colorForeground = FG_COLOR_CYAN; return this; } public PrettyPrintWriter white() { if (!acceptColorModification) return this; colorForeground = FG_COLOR_WHITE; return this; } public PrettyPrintWriter bgblack() { if (!acceptColorModification) return this; colorBackground = BG_COLOR_BLACK; return this; } public PrettyPrintWriter bgred() { if (!acceptColorModification) return this; colorBackground = BG_COLOR_RED; return this; } public PrettyPrintWriter bggreen() { if (!acceptColorModification) return this; colorBackground = BG_COLOR_GREEN; return this; } public PrettyPrintWriter bgyellow() { if (!acceptColorModification) return this; colorBackground = BG_COLOR_YELLOW; return this; } public PrettyPrintWriter bgblue() { if (!acceptColorModification) return this; colorBackground = BG_COLOR_BLUE; return this; } public PrettyPrintWriter bgmagenta() { if (!acceptColorModification) return this; colorBackground = BG_COLOR_MAGENTA; return this; } public PrettyPrintWriter bgcyan() { if (!acceptColorModification) return this; colorBackground = BG_COLOR_CYAN; return this; } public PrettyPrintWriter bgwhite() { if (!acceptColorModification) return this; colorBackground = BG_COLOR_WHITE; return this; } public PrettyPrintWriter bold() { if (!acceptColorModification) return this; colorMode = MODE_BOLD; return this; } public PrettyPrintWriter blink() { if (!acceptColorModification) return this; colorMode = MODE_BLINK; return this; } public PrettyPrintWriter concealed() { if (!acceptColorModification) return this; colorMode = MODE_CONCEALED; return this; } public PrettyPrintWriter off() { if (!acceptColorModification) return this; colorMode = MODE_OFF; return this; } public PrettyPrintWriter underscore() { if (!acceptColorModification) return this; colorMode = MODE_UNDER; return this; } public PrettyPrintWriter reverse() { if (!acceptColorModification) return this; colorMode = MODE_REVERSE; return this; } public static Builder stdoutPrettyPrinter() { return new Builder(Main.out).withAutoFlush(); } public static Builder stderrPrettyPrinter() { return new Builder(Main.err).withAutoFlush(); } public static Builder newPrettyPrinter(OutputStream out) { return new Builder(out); } public static final class Builder { private final OutputStream out; private boolean autoFlush; private boolean autoColumn; private char columnSeparator; private int maxColumns; private int columnPadding; private long maxBufferedLines; private boolean autoCrop; private int consoleWidth; private Span appendToLongLine; private int tabWidth; private boolean flushOnTab; private WhiteSpaceHandler whiteSpaceHandler; public Builder(OutputStream out) { this.out = out; this.autoFlush = false; this.autoColumn = false; this.flushOnTab = false; this.columnSeparator = DEFAULT_COLUMN_SEP; this.maxColumns = DEFAULT_MAX_COLUMNS; this.columnPadding = DEFAULT_COLUMN_PADDING; this.autoCrop = false; this.consoleWidth = DEFAULT_WIDTH; this.appendToLongLine = null; this.tabWidth = DEFAULT_TABS; this.whiteSpaceHandler = null; this.maxBufferedLines = Long.MAX_VALUE; } public Builder withAutoFlush() { this.autoFlush = true; return this; } public Builder withAutoCrop() { return withAutoCrop(DEFAULT_WIDTH); } public Builder withAutoCrop(int consoleWidth) { return withAutoCrop(consoleWidth, DEFAULT_APPEND); } public Builder withAutoCrop(int consoleWidth, String appendToLong) { return withAutoCrop(consoleWidth, mkspan(appendToLong)); } public Builder withAutoCrop(int consoleWidth, Span appendToLong) { this.consoleWidth = consoleWidth; this.appendToLongLine = appendToLong; this.autoCrop = true; return this; } public Builder withTabSize(int tabWidth) { this.tabWidth = tabWidth; return this; } public Builder withAutoColumn() { return withAutoColumn(DEFAULT_COLUMN_SEP); } public Builder withAutoColumn(char columnSeparator) { return withAutoColumn(columnSeparator, DEFAULT_MAX_COLUMNS); } public Builder withAutoColumn(char columnSeparator, int maxColumns) { this.autoColumn = true; this.columnSeparator = columnSeparator; this.maxColumns = maxColumns; return this; } public Builder withColumnPadding(int columnPadding) { this.columnPadding = columnPadding; return this; } public Builder withWhitespaceHandler(WhiteSpaceHandler whiteSpaceHandler) { this.whiteSpaceHandler = whiteSpaceHandler; return this; } public Builder withMaxBufferedLines(long maxBufferedLines) { this.maxBufferedLines = maxBufferedLines; return this; } public Builder withFlushOnTab() { this.flushOnTab = true; return this; } public PrettyPrintWriter build() { return new PrettyPrintWriter(out, autoFlush, autoColumn, autoCrop, appendToLongLine, consoleWidth, tabWidth, columnSeparator, maxColumns, columnPadding, maxBufferedLines, flushOnTab, whiteSpaceHandler); } } private Span tabs() { return new Span(tabs); } private Span span(String span) { return span(span, false); } private void resetColor() { acceptColorModification = true; colorMode = null; colorForeground = null; colorBackground = null; } public static Span mkspan(String span) { return new Span(span); } public static Span mkspan(String span, String color) { return mkspan(span, null, color, null); } public static Span mkspan(String span, String colorMode, String colorForeground, String colorBackground) { if (DEFAULT_COLORS > 0 && (colorMode != null || colorForeground != null || colorBackground != null)) { String color = "\u001B[" + Joiner.on(';').skipNulls().join(colorMode, colorForeground, colorBackground) + "m"; return new Span(span, color); } else { return mkspan(span); } } private Span span(String span, boolean keepColor) { Span result; if (DEFAULT_COLORS > 0 && (colorMode != null || colorForeground != null || colorBackground != null)) { result = mkspan(span, colorMode, colorForeground, colorBackground); } else { result = mkspan(span); } if (!keepColor) { resetColor(); } return result; } public static final class Line { private List<Span> spans; private int length; public Line() { this.spans = new ArrayList<Span>(); } public void append(Span span) { length += span.length(); if (spans.isEmpty()) { spans.add(span); return; } Span last = spans.get(spans.size() - 1); if (last.canAppend(span)) { last.append(span); } else { spans.add(span); } } public boolean isEmpty() { return length == 0; } public int length() { return length; } public int indexOf(char ch, int start) { int offset = 0; for (Span span : spans) { if (start > span.length()) { start -= span.length(); continue; } int idx = span.indexOf(ch, start); if (idx >= 0) return offset + idx; offset += span.length() - start; start = 0; } return -1; } public void spaceOut(int width, int start) { for (Span span : spans) { if (start > span.length()) { start -= span.length(); continue; } span.spaceOut(width, start); return; } } public int firstNonWhiteSpace(int start) { return start; } public int countCharacter(char ch) { int result = 0; for (Span span : spans) { result += span.countCharacter(ch); } return result; } public void trimTo(int width, Span appendToLongLine) { int i = 0; int remaining = width; for (i = 0; i < spans.size(); ++i) { Span next = spans.get(i); if (next.length() > remaining) { ++i; next.trimTo(remaining, appendToLongLine); break; } remaining -= next.length(); } for (; i < spans.size(); ++i) { spans.remove(i); } } public void toString(StringBuilder builder) { for (Span span : spans) { span.toString(builder); } } } public static final class Span { private String span; private final String color; public Span(String span) { this(span, null); } public Span(String span, String color) { this.span = span; this.color = color; } public int length() { return span.length(); } public boolean isEmpty() { return span.isEmpty(); } public int indexOf(char ch, int start) { return span.indexOf(ch, start); } public void spaceOut(int width, int start) { int removeTo = start; while (removeTo < span.length() && Character.isWhitespace(span.charAt(removeTo))) { removeTo++; } span = span.substring(0,start) + Strings.repeat(" ", width) + span.substring(removeTo); } public int countCharacter(char ch) { int result = 0; for (int i = 0; i < span.length(); ++i) { if (span.charAt(i) == ch) { result++; } } return result; } public void trimTo(int width, Span appendToLongLine) { if (appendToLongLine != null && !appendToLongLine.isEmpty()) { int shortten = appendToLongLine.length(); if (shortten > width) shortten = width; span = span.substring(0, width - shortten) + appendToLongLine; } else { span = span.substring(0, width+1); } } public String toString() { StringBuilder builder = new StringBuilder(); toString(builder); return builder.toString(); } public void toString(StringBuilder builder) { if (color != null) builder.append(color); builder.append(span); if (color != null) builder.append(RESET); } public void append(Span other) { span = span + other.span; } public boolean canAppend(Span other) { if (color == null && other == null) return true; if (color == null && other != null) return false; return color.equals(other); } } }
[ "\"COLUMNS\"", "\"COLORS\"", "\"TERMOUT\"" ]
[]
[ "COLORS", "TERMOUT", "COLUMNS" ]
[]
["COLORS", "TERMOUT", "COLUMNS"]
java
3
0
src/util.py
import sys import cv2 import os from ast import literal_eval from pathlib import Path import shutil import logging import random import pickle import yaml import subprocess from PIL import Image from glob import glob import numpy as np import pandas as pd import matplotlib.pyplot as plt from matplotlib import animation, rc plt.rcParams['figure.figsize'] = 30, 30 np.set_printoptions(precision=3, suppress=True) rc('animation', html='jshtml') import torch from augmentations import get_albu_transforms IMAGE_DIR = '~/Kaggle/data/tensorflow-great-barrier-reef/train_images' def load_image(video_id, video_frame, image_dir): img_path = f'{image_dir}/video_{video_id}/{video_frame}.jpg' assert os.path.exists(img_path), f'{img_path} does not exist.' img = cv2.imread(img_path) return img def decode_annotations(annotaitons_str): """decode annotations in string to list of dict""" return literal_eval(annotaitons_str) def load_image_with_annotations(video_id, video_frame, image_dir, annotaitons_str): img = load_image(video_id, video_frame, image_dir) annotations = decode_annotations(annotaitons_str) if len(annotations) > 0: for ann in annotations: cv2.rectangle(img, (ann['x'], ann['y']), (ann['x'] + ann['width'], ann['y'] + ann['height']), (255, 0, 0), thickness=2,) return img def draw_predictions(img, pred_bboxes): img = img.copy() if len(pred_bboxes) > 0: for bbox in pred_bboxes: conf = bbox[0] x, y, w, h = bbox[1:].round().astype(int) cv2.rectangle(img, (x, y),(x+w, y+h),(0, 255, 255), thickness=2,) cv2.putText(img, f"{conf:.2}",(x, max(0, y-5)), cv2.FONT_HERSHEY_SIMPLEX,0.5,(0, 0, 255), thickness=1, ) return img def plot_img(df, idx, image_dir, pred_bboxes=None): row = df.iloc[idx] video_id = row.video_id video_frame = row.video_frame annotations_str = row.annotations img = load_image_with_annotations(video_id, video_frame, image_dir, annotations_str) if pred_bboxes and len(pred_bboxes) > 0: pred_bboxes = pred_bboxes[pred_bboxes[:,0].argsort()[::-1]] # sort by conf img = draw_predictions(img, pred_bboxes) plt.imshow(img[:, :, ::-1]) def calc_iou(bboxes1, bboxes2, bbox_mode='xywh'): assert len(bboxes1.shape) == 2 and bboxes1.shape[1] == 4 assert len(bboxes2.shape) == 2 and bboxes2.shape[1] == 4 bboxes1 = bboxes1.copy() bboxes2 = bboxes2.copy() if bbox_mode == 'xywh': bboxes1[:, 2:] += bboxes1[:, :2] bboxes2[:, 2:] += bboxes2[:, :2] x11, y11, x12, y12 = np.split(bboxes1, 4, axis=1) x21, y21, x22, y22 = np.split(bboxes2, 4, axis=1) xA = np.maximum(x11, np.transpose(x21)) yA = np.maximum(y11, np.transpose(y21)) xB = np.minimum(x12, np.transpose(x22)) yB = np.minimum(y12, np.transpose(y22)) interArea = np.maximum((xB - xA + 1e-9), 0) * np.maximum((yB - yA + 1e-9), 0) boxAArea = (x12 - x11 + 1e-9) * (y12 - y11 + 1e-9) boxBArea = (x22 - x21 + 1e-9) * (y22 - y21 + 1e-9) iou = interArea / (boxAArea + np.transpose(boxBArea) - interArea) return iou def f_beta(tp, fp, fn, beta=2): if tp == 0: return 0 return (1+beta**2)*tp / ((1+beta**2)*tp + beta**2*fn+fp) def calc_is_correct_at_iou_th(gt_bboxes, pred_bboxes, iou_th, verbose=False): gt_bboxes = gt_bboxes.copy() pred_bboxes = pred_bboxes.copy() tp = 0 fp = 0 for k, pred_bbox in enumerate(pred_bboxes): # fixed in ver.7 if len(gt_bboxes) == 0: fp += len(pred_bboxes) - k # fix in ver.7 break ious = calc_iou(gt_bboxes, pred_bbox[None, 1:]) max_iou = ious.max() if max_iou >= iou_th: tp += 1 gt_bboxes = np.delete(gt_bboxes, ious.argmax(), axis=0) else: fp += 1 fn = len(gt_bboxes) return tp, fp, fn def calc_is_correct(gt_bboxes, pred_bboxes, iou_th=0.5): """ gt_bboxes: (N, 4) np.array in xywh format pred_bboxes: (N, 5) np.array in conf+xywh format """ if len(gt_bboxes) == 0 and len(pred_bboxes) == 0: tps, fps, fns = 0, 0, 0 return tps, fps, fns elif len(gt_bboxes) == 0: tps, fps, fns = 0, len(pred_bboxes), 0 return tps, fps, fns elif len(pred_bboxes) == 0: tps, fps, fns = 0, 0, len(gt_bboxes) return tps, fps, fns pred_bboxes = pred_bboxes[pred_bboxes[:,0].argsort()[::-1]] # sort by conf tps, fps, fns = 0, 0, 0 tp, fp, fn = calc_is_correct_at_iou_th(gt_bboxes, pred_bboxes, iou_th) tps += tp fps += fp fns += fn return tps, fps, fns def calc_f2_score(gt_bboxes_list, pred_bboxes_list, verbose=False): """ gt_bboxes_list: list of (N, 4) np.array in xywh format pred_bboxes_list: list of (N, 5) np.array in conf+xywh format """ #f2s = [] f2_dict = {'f2':0, "P":0, "R": 0} all_tps = [list([0] * 11) for _ in range(len(gt_bboxes_list))] all_fps = [list([0] * 11) for _ in range(len(gt_bboxes_list))] all_fns = [list([0] * 11) for _ in range(len(gt_bboxes_list))] for k, iou_th in enumerate(np.arange(0.3, 0.85, 0.05)): tps, fps, fns = 0, 0, 0 for i, (gt_bboxes, pred_bboxes) in enumerate(zip(gt_bboxes_list, pred_bboxes_list)): tp, fp, fn = calc_is_correct(gt_bboxes, pred_bboxes, iou_th) tps += tp fps += fp fns += fn all_tps[i][k] = tp all_fps[i][k] = fp all_fns[i][k] = fn if verbose: num_gt = len(gt_bboxes) num_pred = len(pred_bboxes) print(f'num_gt:{num_gt:<3} num_pred:{num_pred:<3} tp:{tp:<3} fp:{fp:<3} fn:{fn:<3}') f2 = f_beta(tps, fps, fns, beta=2) precision = f_beta(tps, fps, fns, beta=0) recall = f_beta(tps, fps, fns, beta=100) f2_dict["f2_" + str(round(iou_th,3))] = f2 f2_dict["P_" + str(round(iou_th,3))] = precision f2_dict["R_" + str(round(iou_th,3))] = recall f2_dict['f2'] += f2 / 11 f2_dict['P'] += precision / 11 f2_dict['R'] += recall / 11 f2_dict["tps"] = all_tps f2_dict["fps"] = all_fps f2_dict["fns"] = all_fns return f2_dict def print_f2_dict(d): print("Overall f2: {:.3f}, precision {:.3f}, recall {:.3f}".format(d['f2'], d['precision'], d['recall'])) for k, iou_th in enumerate(np.arange(0.3, 0.85, 0.05)): print(f"IOU {iou_th:.2f}:", end=" ") print("f2: {:.3f}, precision {:.3f}, recall {:.3f}".format(d["f2_" + str(round(iou_th,3))], d["precision_" + str(round(iou_th,3))], d["recall_" + str(round(iou_th,3))])) def get_path(row, params, infer=False): row['old_image_path'] = params['root_dir'] / f'train_images/video_{row.video_id}/{row.video_frame}.jpg' if infer: row['image_path'] = row["old_image_path"] else: row['image_path'] = params['image_dir'] / f'video_{row.video_id}_{row.video_frame}.jpg' row['label_path'] = params['label_dir'] / f'video_{row.video_id}_{row.video_frame}.txt' return row def make_copy(path, params): # TODO: fix split issue data = str(path).split('/') filename = data[-1] video_id = data[-2] new_path = params["image_dir"] / f'{video_id}_{filename}' shutil.copy(path, new_path) return # https://www.kaggle.com/awsaf49/great-barrier-reef-yolov5-train def voc2yolo(image_height, image_width, bboxes): """ voc => [x1, y1, x2, y1] yolo => [xmid, ymid, w, h] (normalized) """ bboxes = bboxes.copy().astype(float) # otherwise all value will be 0 as voc_pascal dtype is np.int bboxes[..., [0, 2]] = bboxes[..., [0, 2]]/ image_width bboxes[..., [1, 3]] = bboxes[..., [1, 3]]/ image_height w = bboxes[..., 2] - bboxes[..., 0] h = bboxes[..., 3] - bboxes[..., 1] bboxes[..., 0] = bboxes[..., 0] + w/2 bboxes[..., 1] = bboxes[..., 1] + h/2 bboxes[..., 2] = w bboxes[..., 3] = h return bboxes def yolo2voc(image_height, image_width, bboxes): """ yolo => [xmid, ymid, w, h] (normalized) voc => [x1, y1, x2, y1] """ bboxes = bboxes.copy().astype(float) # otherwise all value will be 0 as voc_pascal dtype is np.int bboxes[..., [0, 2]] = bboxes[..., [0, 2]]* image_width bboxes[..., [1, 3]] = bboxes[..., [1, 3]]* image_height bboxes[..., [0, 1]] = bboxes[..., [0, 1]] - bboxes[..., [2, 3]]/2 bboxes[..., [2, 3]] = bboxes[..., [0, 1]] + bboxes[..., [2, 3]] return bboxes def coco2yolo(image_height, image_width, bboxes): """ coco => [xmin, ymin, w, h] yolo => [xmid, ymid, w, h] (normalized) """ bboxes = bboxes.copy().astype(float) # otherwise all value will be 0 as voc_pascal dtype is np.int # normolizinig bboxes[..., [0, 2]]= bboxes[..., [0, 2]]/ image_width bboxes[..., [1, 3]]= bboxes[..., [1, 3]]/ image_height # converstion (xmin, ymin) => (xmid, ymid) bboxes[..., [0, 1]] = bboxes[..., [0, 1]] + bboxes[..., [2, 3]]/2 return bboxes def yolo2coco(image_height, image_width, bboxes): """ yolo => [xmid, ymid, w, h] (normalized) coco => [xmin, ymin, w, h] """ bboxes = bboxes.copy().astype(float) # otherwise all value will be 0 as voc_pascal dtype is np.int # denormalizing bboxes[..., [0, 2]]= bboxes[..., [0, 2]]* image_width bboxes[..., [1, 3]]= bboxes[..., [1, 3]]* image_height # converstion (xmid, ymid) => (xmin, ymin) bboxes[..., [0, 1]] = bboxes[..., [0, 1]] - bboxes[..., [2, 3]]/2 return bboxes def voc2coco(bboxes, image_height=720, image_width=1280): bboxes = voc2yolo(image_height, image_width, bboxes) bboxes = yolo2coco(image_height, image_width, bboxes) return bboxes def load_image(image_path): return cv2.cvtColor(cv2.imread(image_path), cv2.COLOR_BGR2RGB) def plot_one_box(x, img, color=None, label=None, line_thickness=None): # Plots one bounding box on image img tl = line_thickness or round(0.002 * (img.shape[0] + img.shape[1]) / 2) + 1 # line/font thickness color = color or [random.randint(0, 255) for _ in range(3)] c1, c2 = (int(x[0]), int(x[1])), (int(x[2]), int(x[3])) cv2.rectangle(img, c1, c2, color, thickness=tl, lineType=cv2.LINE_AA) if label: tf = max(tl - 1, 1) # font thickness t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0] c2 = c1[0] + t_size[0], c1[1] - t_size[1] - 3 cv2.rectangle(img, c1, c2, color, -1, cv2.LINE_AA) # filled cv2.putText(img, label, (c1[0], c1[1] - 2), 0, tl / 3, [225, 255, 255], thickness=tf, lineType=cv2.LINE_AA) def draw_bboxes(img, bboxes, classes, colors = None, show_classes = None, bbox_format = 'yolo', class_name = False, line_thickness = 1): image = img.copy() show_classes = classes if show_classes is None else show_classes colors = (0, 255 ,0) if colors is None else colors if bbox_format == 'yolo': for idx in range(len(bboxes)): bbox = bboxes[idx] cls = classes[idx] color = colors[idx] if cls in show_classes: x1 = round(float(bbox[0])*image.shape[1]) y1 = round(float(bbox[1])*image.shape[0]) w = round(float(bbox[2])*image.shape[1]/2) #w/2 h = round(float(bbox[3])*image.shape[0]/2) voc_bbox = (x1-w, y1-h, x1+w, y1+h) plot_one_box(voc_bbox, image, color = color, label = cls if class_name else str(get_label(cls)), line_thickness = line_thickness) elif bbox_format == 'coco': for idx in range(len(bboxes)): bbox = bboxes[idx] cls = classes[idx] color = colors[idx] if cls in show_classes: x1 = int(round(bbox[0])) y1 = int(round(bbox[1])) w = int(round(bbox[2])) h = int(round(bbox[3])) voc_bbox = (x1, y1, x1+w, y1+h) plot_one_box(voc_bbox, image, color = color, label = cls, line_thickness = line_thickness) elif bbox_format == 'voc_pascal': for idx in range(len(bboxes)): bbox = bboxes[idx] cls = classes[idx] cls_id = class_ids[idx] color = colors[cls_id] if type(colors) is list else colors if cls in show_classes: x1 = int(round(bbox[0])) y1 = int(round(bbox[1])) x2 = int(round(bbox[2])) y2 = int(round(bbox[3])) voc_bbox = (x1, y1, x2, y2) plot_one_box(voc_bbox, image, color = color, label = cls if class_name else str(cls_id), line_thickness = line_thickness) else: raise ValueError('wrong bbox format') return image def get_bbox(annots): bboxes = [list(annot.values()) for annot in annots] return bboxes def get_imgsize(row): row['width'], row['height'] = imagesize.get(row['image_path']) return row # https://www.kaggle.com/diegoalejogm/great-barrier-reefs-eda-with-animations def create_animation(ims): fig = plt.figure(figsize=(16, 12)) plt.axis('off') im = plt.imshow(ims[0]) def animate_func(i): im.set_array(ims[i]) return [im] return animation.FuncAnimation(fig, animate_func, frames = len(ims), interval = 1000//12) # https://github.com/rbgirshick/fast-rcnn/blob/master/lib/utils/nms.py def nms(dets, thresh): x1 = dets[:, 0] y1 = dets[:, 1] x2 = dets[:, 2] y2 = dets[:, 3] scores = dets[:, 4] areas = (x2 - x1 + 1) * (y2 - y1 + 1) order = scores.argsort()[::-1] keep = [] while order.size > 0: i = order[0] keep.append(i) xx1 = np.maximum(x1[i], x1[order[1:]]) yy1 = np.maximum(y1[i], y1[order[1:]]) xx2 = np.minimum(x2[i], x2[order[1:]]) yy2 = np.minimum(y2[i], y2[order[1:]]) w = np.maximum(0.0, xx2 - xx1 + 1) h = np.maximum(0.0, yy2 - yy1 + 1) inter = w * h ovr = inter / (areas[i] + areas[order[1:]] - inter) inds = np.where(ovr <= thresh)[0] order = order[inds + 1] return keep # https://github.com/DocF/Soft-NMS/blob/master/soft_nms.py def py_cpu_softnms(dets, sc, Nt=0.3, sigma=0.5, thresh=0.001, method=2): """ py_cpu_softnms :param dets: boexs 坐标矩阵 format [y1, x1, y2, x2] :param sc: 每个 boxes 对应的分数 :param Nt: iou 交叠门限 :param sigma: 使用 gaussian 函数的方差 :param thresh: 最后的分数门限 :param method: 使用的方法 :return: 留下的 boxes 的 index """ # indexes concatenate boxes with the last column N = dets.shape[0] indexes = np.array([np.arange(N)]) dets = np.concatenate((dets, indexes.T), axis=1) # the order of boxes coordinate is [y1,x1,y2,x2] y1 = dets[:, 0] x1 = dets[:, 1] y2 = dets[:, 2] x2 = dets[:, 3] scores = sc areas = (x2 - x1 + 1) * (y2 - y1 + 1) for i in range(N): # intermediate parameters for later parameters exchange tBD = dets[i, :].copy() tscore = scores[i].copy() tarea = areas[i].copy() pos = i + 1 # if i != N-1: maxscore = np.max(scores[pos:], axis=0) maxpos = np.argmax(scores[pos:], axis=0) else: maxscore = scores[-1] maxpos = 0 if tscore < maxscore: dets[i, :] = dets[maxpos + i + 1, :] dets[maxpos + i + 1, :] = tBD tBD = dets[i, :] scores[i] = scores[maxpos + i + 1] scores[maxpos + i + 1] = tscore tscore = scores[i] areas[i] = areas[maxpos + i + 1] areas[maxpos + i + 1] = tarea tarea = areas[i] # IoU calculate xx1 = np.maximum(dets[i, 1], dets[pos:, 1]) yy1 = np.maximum(dets[i, 0], dets[pos:, 0]) xx2 = np.minimum(dets[i, 3], dets[pos:, 3]) yy2 = np.minimum(dets[i, 2], dets[pos:, 2]) w = np.maximum(0.0, xx2 - xx1 + 1) h = np.maximum(0.0, yy2 - yy1 + 1) inter = w * h ovr = inter / (areas[i] + areas[pos:] - inter) # Three methods: 1.linear 2.gaussian 3.original NMS if method == 1: # linear weight = np.ones(ovr.shape) weight[ovr > Nt] = weight[ovr > Nt] - ovr[ovr > Nt] elif method == 2: # gaussian weight = np.exp(-(ovr * ovr) / sigma) else: # original NMS weight = np.ones(ovr.shape) weight[ovr > Nt] = 0 scores[pos:] = weight * scores[pos:] # select the boxes and keep the corresponding indexes inds = dets[:, 4][scores > thresh] keep = inds.astype(int) return keep def seed_torch(seed=42): random.seed(seed) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) torch.backends.cudnn.deterministic = True def create_logger(filename, filemode='a'): # better logging file - output the in terminal as well file_handler = logging.FileHandler(filename=filename, mode=filemode) stdout_handler = logging.StreamHandler(sys.stdout) handlers = [file_handler, stdout_handler] formatter = "%(asctime)s %(levelname)s: %(message)s" datefmt = "%m/%d/%Y %I:%M:%S %p" logging.basicConfig(format=formatter, datefmt=datefmt, level=logging.DEBUG, handlers=handlers) return def save_pickle(obj, folder_path): pickle.dump(obj, open(folder_path, 'wb'), pickle.HIGHEST_PROTOCOL) def load_pickle(folder_path): return pickle.load(open(folder_path, 'rb')) def save_yaml(obj, folder_path): obj2 = obj.copy() for key, value in obj2.items(): if isinstance(value, Path): obj2[key] = str(value.resolve()) else: obj2[key] = value with open(folder_path, 'w') as file: yaml.dump(obj2, file) def load_yaml(folder_path): with open(folder_path) as file: data = yaml.load(file, Loader=yaml.FullLoader) return data def load_model(params): try: model = torch.hub.load(params['repo'], 'custom', path=params['ckpt_path'], source='local', force_reload=True) # local repo except: print("torch.hub.load failed, try torch.load") model = torch.load(params['ckpt_path']) model.conf = params['conf'] # NMS confidence threshold model.iou = params['iou'] # NMS IoU threshold model.classes = None # (optional list) filter by class, i.e. = [0, 15, 16] for persons, cats and dogs model.multi_label = False # NMS multiple labels per box model.max_det = 50 # maximum number of detections per image return model def predict(model, img, size=768, augment=False, use_sahi=False): if use_sahi: from sahi.predict import get_sliced_prediction results = get_sliced_prediction( img, model, slice_height = 512, slice_width = 512, overlap_height_ratio = 0.2, overlap_width_ratio = 0.2 ) preds = results.object_prediction_list bboxes = np.array([pred.bbox.to_voc_bbox() for pred in preds]) else: results = model(img, size=size, augment=augment) # custom inference size preds = results.pandas().xyxy[0] bboxes = preds[['xmin','ymin','xmax','ymax']].values if len(bboxes): height, width = img.shape[:2] bboxes = voc2coco(bboxes,height,width).astype(int) if use_sahi: confs = np.array([pred.score.value for pred in preds]) else: confs = preds.confidence.values return bboxes, confs else: return np.array([]),[] def format_prediction(bboxes, confs): annot = '' if len(bboxes)>0: for idx in range(len(bboxes)): xmin, ymin, w, h = bboxes[idx] conf = confs[idx] annot += f'{conf} {xmin} {ymin} {w} {h}' annot +=' ' annot = annot.strip(' ') return annot def show_img(img, bboxes, confs, colors, bbox_format='yolo'): labels = [str(round(conf,2)) for conf in confs] img = draw_bboxes(img = img, bboxes = bboxes, classes = labels, class_name = True, colors = colors, bbox_format = bbox_format, line_thickness = 2) return Image.fromarray(img) def write_hyp(params): with open(params["hyp_file"], mode="w") as f: for key, val in params["hyp_param"].items(): f.write(f"{key}: {val}\n") def class2dict(f): return dict((name, getattr(f, name)) for name in dir(f) if not name.startswith('__')) def upload(params): data_version = "-".join(params["exp_name"].split("_")) if os.path.exists(params["output_dir"] / "wandb"): shutil.move(str(params["output_dir"] / "wandb"), str(params["output_dir"].parent / f"{params['exp_name']}_wandb/") ) with open(params["output_dir"] / "dataset-metadata.json", "w") as f: f.write("{\n") f.write(f""" "title": "{data_version}",\n""") f.write(f""" "id": "vincentwang25/{data_version}",\n""") f.write(""" "licenses": [\n""") f.write(""" {\n""") f.write(""" "name": "CC0-1.0"\n""") f.write(""" }\n""") f.write(""" ]\n""") f.write("""}""") subprocess.call(["kaggle", "datasets", "create", "-p", str(params["output_dir"]), "-r", "zip"]) def coco(df): annotion_id = 0 images = [] annotations = [] categories = [{'id': 0, 'name': 'cots'}] for i, row in df.iterrows(): images.append({ "id": i, "file_name": f"video_{row['video_id']}_{row['video_frame']}.jpg", "height": 720, "width": 1280, }) for bbox in row['annotations']: annotations.append({ "id": annotion_id, "image_id": i, "category_id": 0, "bbox": list(bbox.values()), "area": bbox['width'] * bbox['height'], "segmentation": [], "iscrowd": 0 }) annotion_id += 1 json_file = {'categories':categories, 'images':images, 'annotations':annotations} return json_file def mmcfg_from_param(params): from mmcv import Config # model cfg = Config.fromfile(params['hyp_param']['base_file']) cfg.work_dir = str(params['output_dir']) cfg.seed = 2022 cfg.gpu_ids = range(2) cfg.load_from = params['hyp_param']['load_from'] if params['hyp_param']['model_type'] == 'faster_rcnn': cfg.model.roi_head.bbox_head.num_classes = 1 cfg.model.roi_head.bbox_head.loss_bbox.type = params['hyp_param']['loss_fnc'] cfg.model.rpn_head.loss_bbox.type = params['hyp_param']['loss_fnc'] if params['hyp_param']['loss_fnc'] == "GIoULoss": cfg.model.roi_head.bbox_head.reg_decoded_bbox = True cfg.model.rpn_head.reg_decoded_bbox = True cfg.model.train_cfg.rpn_proposal.nms.type = params['hyp_param']['nms'] cfg.model.test_cfg.rpn.nms.type = params['hyp_param']['nms'] cfg.model.test_cfg.rcnn.nms.type = params['hyp_param']['nms'] cfg.model.train_cfg.rcnn.sampler.type = params['hyp_param']['sampler'] elif params['hyp_param']['model_type'] == 'swin': pass # already changed elif params['hyp_param']['model_type'] == 'vfnet': cfg.model.bbox_head.num_classes = 1 if params['hyp_param'].get("optimizer", cfg.optimizer.type) == "AdamW": cfg.optimizer = dict( type="AdamW", lr=params['hyp_param'].get("lr", cfg.optimizer.lr), weight_decay=params['hyp_param'].get( "weight_decay", cfg.optimizer.weight_decay ), ) else: cfg.optimizer.lr = params['hyp_param'].get("lr", cfg.optimizer.lr) cfg.optimizer.weight_decay = params['hyp_param'].get( "weight_decay", cfg.optimizer.weight_decay) cfg.lr_config = dict( policy='CosineAnnealing', by_epoch=False, warmup='linear', warmup_iters= 1000, warmup_ratio= 1/10, min_lr=1e-07) # data cfg = add_data_pipeline(cfg, params) cfg.runner.max_epochs = params['epochs'] cfg.evaluation.start = 1 cfg.evaluation.interval = 1 cfg.evaluation.save_best='auto' cfg.evaluation.metric ='bbox' cfg.checkpoint_config.interval = -1 cfg.log_config.interval = 500 cfg.log_config.with_step = True cfg.log_config.by_epoch = True cfg.log_config.hooks =[dict(type='TextLoggerHook'), dict(type='TensorboardLoggerHook')] cfg.workflow = [('train',1)] logging.info(str(cfg)) return cfg def add_data_pipeline(cfg, params): cfg.dataset_type = 'COCODataset' cfg.classes = ('cots',) cfg.data_root = str(params['data_path'].resolve()) params['aug_param']['img_scale'] = (params['img_size'], params['img_size']) cfg.img_scale = params['aug_param']['img_scale'] cfg.dataset_type = 'CocoDataset' cfg.filter_empty_gt = False cfg.data.filter_empty_gt = False cfg.data.train.type = cfg.dataset_type cfg.data.train.classes = cfg.classes cfg.data.train.ann_file = str(params["cfg_dir"] / 'annotations_train.json') cfg.data.train.img_prefix = cfg.data_root + '/images/' cfg.data.train.filter_empty_gt = False cfg.data.test.type = cfg.dataset_type cfg.data.test.classes = cfg.classes cfg.data.test.ann_file = str(params["cfg_dir"] / 'annotations_valid.json') cfg.data.test.img_prefix = cfg.data_root + '/images/' cfg.data.test.filter_empty_gt = False cfg.data.val.type = cfg.dataset_type cfg.data.val.classes = cfg.classes cfg.data.val.ann_file = str(params["cfg_dir"] / 'annotations_valid.json') cfg.data.val.img_prefix = cfg.data_root + '/images/' cfg.data.val.filter_empty_gt = False cfg.data.samples_per_gpu = params['batch'] // len(cfg.gpu_ids) cfg.data.workers_per_gpu = params['workers'] // len(cfg.gpu_ids) # train pipeline albu_train_transforms = get_albu_transforms(params['aug_param'], is_train=True) if params['aug_param']['use_mixup'] or params['aug_param']['use_mosaic']: train_pipeline = [] else: train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True)] if params['aug_param']['use_mosaic']: train_pipeline.append(dict(type='Mosaic', img_scale=cfg.img_scale, pad_val=114.0)) else: train_pipeline.append(dict(type='Resize', img_scale=cfg.img_scale, keep_ratio=False)) train_pipeline = train_pipeline +[ dict(type='Pad', size_divisor=32), dict( type='Albu', transforms=albu_train_transforms, bbox_params=dict( type='BboxParams', format='pascal_voc', label_fields=['gt_labels'], min_visibility=0.0, filter_lost_elements=True), keymap={ 'img': 'image', 'gt_bboxes': 'bboxes' }, update_pad_shape=False, skip_img_without_anno=False )] if params['aug_param']['use_mixup']: train_pipeline.append(dict(type='MixUp', img_scale=cfg.img_scale, ratio_range=(0.8, 1.6), pad_val=114.0)) train_pipeline = train_pipeline +\ [ dict(type='Normalize', **cfg.img_norm_cfg), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'], meta_keys=('filename', 'ori_filename', 'ori_shape', 'img_shape', 'pad_shape', 'scale_factor', 'img_norm_cfg')), ] val_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=cfg.img_scale, flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **cfg.img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']) ]) ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=[cfg.img_scale], flip=[False], transforms=[ dict(type='Resize', keep_ratio=False), dict(type='Pad', size_divisor=32), dict(type='RandomFlip', direction='horizontal'), dict(type='Normalize', **cfg.img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] cfg.train_pipeline = train_pipeline cfg.val_pipeline = val_pipeline cfg.test_pipeline = test_pipeline if params['aug_param']['use_mixup'] or params['aug_param']['use_mosaic']: cfg.train_dataset = dict( type='MultiImageMixDataset', dataset=dict( type=cfg.dataset_type, classes=cfg.classes, ann_file=str(params["cfg_dir"] / 'annotations_train.json'), img_prefix=cfg.data_root + '/images/', pipeline=[ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True) ], filter_empty_gt=False, ), pipeline=cfg.train_pipeline ) cfg.data.train = cfg.train_dataset else: cfg.data.train.pipeline = cfg.train_pipeline cfg.data.val.pipeline = cfg.val_pipeline cfg.data.test.pipeline = cfg.test_pipeline return cfg def find_ckp(output_dir): return glob(output_dir / "best*.pth")[0]
[]
[]
[ "PYTHONHASHSEED" ]
[]
["PYTHONHASHSEED"]
python
1
0
pkg/integration/utils.go
package integration import ( "archive/tar" "bytes" "encoding/json" "errors" "fmt" "io" "os" "os/exec" "path/filepath" "reflect" "strings" "syscall" "time" "github.com/docker/docker/pkg/stringutils" ) // GetExitCode returns the ExitStatus of the specified error if its type is // exec.ExitError, returns 0 and an error otherwise. func GetExitCode(err error) (int, error) { exitCode := 0 if exiterr, ok := err.(*exec.ExitError); ok { if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok { return procExit.ExitStatus(), nil } } return exitCode, fmt.Errorf("failed to get exit code") } // ProcessExitCode process the specified error and returns the exit status code // if the error was of type exec.ExitError, returns nothing otherwise. func ProcessExitCode(err error) (exitCode int) { if err != nil { var exiterr error if exitCode, exiterr = GetExitCode(err); exiterr != nil { // TODO: Fix this so we check the error's text. // we've failed to retrieve exit code, so we set it to 127 exitCode = 127 } } return } // IsKilled process the specified error and returns whether the process was killed or not. func IsKilled(err error) bool { if exitErr, ok := err.(*exec.ExitError); ok { status, ok := exitErr.Sys().(syscall.WaitStatus) if !ok { return false } // status.ExitStatus() is required on Windows because it does not // implement Signal() nor Signaled(). Just check it had a bad exit // status could mean it was killed (and in tests we do kill) return (status.Signaled() && status.Signal() == os.Kill) || status.ExitStatus() != 0 } return false } // RunCommandWithOutput runs the specified command and returns the combined output (stdout/stderr) // with the exitCode different from 0 and the error if something bad happened func RunCommandWithOutput(cmd *exec.Cmd) (output string, exitCode int, err error) { exitCode = 0 out, err := cmd.CombinedOutput() exitCode = ProcessExitCode(err) output = string(out) return } // RunCommandWithStdoutStderr runs the specified command and returns stdout and stderr separately // with the exitCode different from 0 and the error if something bad happened func RunCommandWithStdoutStderr(cmd *exec.Cmd) (stdout string, stderr string, exitCode int, err error) { var ( stderrBuffer, stdoutBuffer bytes.Buffer ) exitCode = 0 cmd.Stderr = &stderrBuffer cmd.Stdout = &stdoutBuffer err = cmd.Run() exitCode = ProcessExitCode(err) stdout = stdoutBuffer.String() stderr = stderrBuffer.String() return } // RunCommandWithOutputForDuration runs the specified command "timeboxed" by the specified duration. // If the process is still running when the timebox is finished, the process will be killed and . // It will returns the output with the exitCode different from 0 and the error if something bad happened // and a boolean whether it has been killed or not. func RunCommandWithOutputForDuration(cmd *exec.Cmd, duration time.Duration) (output string, exitCode int, timedOut bool, err error) { var outputBuffer bytes.Buffer if cmd.Stdout != nil { err = errors.New("cmd.Stdout already set") return } cmd.Stdout = &outputBuffer if cmd.Stderr != nil { err = errors.New("cmd.Stderr already set") return } cmd.Stderr = &outputBuffer // Start the command in the main thread.. err = cmd.Start() if err != nil { err = fmt.Errorf("Fail to start command %v : %v", cmd, err) } type exitInfo struct { exitErr error exitCode int } done := make(chan exitInfo, 1) go func() { // And wait for it to exit in the goroutine :) info := exitInfo{} info.exitErr = cmd.Wait() info.exitCode = ProcessExitCode(info.exitErr) done <- info }() select { case <-time.After(duration): killErr := cmd.Process.Kill() if killErr != nil { fmt.Printf("failed to kill (pid=%d): %v\n", cmd.Process.Pid, killErr) } timedOut = true case info := <-done: err = info.exitErr exitCode = info.exitCode } output = outputBuffer.String() return } var errCmdTimeout = fmt.Errorf("command timed out") // RunCommandWithOutputAndTimeout runs the specified command "timeboxed" by the specified duration. // It returns the output with the exitCode different from 0 and the error if something bad happened or // if the process timed out (and has been killed). func RunCommandWithOutputAndTimeout(cmd *exec.Cmd, timeout time.Duration) (output string, exitCode int, err error) { var timedOut bool output, exitCode, timedOut, err = RunCommandWithOutputForDuration(cmd, timeout) if timedOut { err = errCmdTimeout } return } // RunCommand runs the specified command and returns the exitCode different from 0 // and the error if something bad happened. func RunCommand(cmd *exec.Cmd) (exitCode int, err error) { exitCode = 0 err = cmd.Run() exitCode = ProcessExitCode(err) return } // RunCommandPipelineWithOutput runs the array of commands with the output // of each pipelined with the following (like cmd1 | cmd2 | cmd3 would do). // It returns the final output, the exitCode different from 0 and the error // if something bad happened. func RunCommandPipelineWithOutput(cmds ...*exec.Cmd) (output string, exitCode int, err error) { if len(cmds) < 2 { return "", 0, errors.New("pipeline does not have multiple cmds") } // connect stdin of each cmd to stdout pipe of previous cmd for i, cmd := range cmds { if i > 0 { prevCmd := cmds[i-1] cmd.Stdin, err = prevCmd.StdoutPipe() if err != nil { return "", 0, fmt.Errorf("cannot set stdout pipe for %s: %v", cmd.Path, err) } } } // start all cmds except the last for _, cmd := range cmds[:len(cmds)-1] { if err = cmd.Start(); err != nil { return "", 0, fmt.Errorf("starting %s failed with error: %v", cmd.Path, err) } } defer func() { // wait all cmds except the last to release their resources for _, cmd := range cmds[:len(cmds)-1] { cmd.Wait() } }() // wait on last cmd return RunCommandWithOutput(cmds[len(cmds)-1]) } // UnmarshalJSON deserialize a JSON in the given interface. func UnmarshalJSON(data []byte, result interface{}) error { if err := json.Unmarshal(data, result); err != nil { return err } return nil } // ConvertSliceOfStringsToMap converts a slices of string in a map // with the strings as key and an empty string as values. func ConvertSliceOfStringsToMap(input []string) map[string]struct{} { output := make(map[string]struct{}) for _, v := range input { output[v] = struct{}{} } return output } // CompareDirectoryEntries compares two sets of FileInfo (usually taken from a directory) // and returns an error if different. func CompareDirectoryEntries(e1 []os.FileInfo, e2 []os.FileInfo) error { var ( e1Entries = make(map[string]struct{}) e2Entries = make(map[string]struct{}) ) for _, e := range e1 { e1Entries[e.Name()] = struct{}{} } for _, e := range e2 { e2Entries[e.Name()] = struct{}{} } if !reflect.DeepEqual(e1Entries, e2Entries) { return fmt.Errorf("entries differ") } return nil } // ListTar lists the entries of a tar. func ListTar(f io.Reader) ([]string, error) { tr := tar.NewReader(f) var entries []string for { th, err := tr.Next() if err == io.EOF { // end of tar archive return entries, nil } if err != nil { return entries, err } entries = append(entries, th.Name) } } // RandomTmpDirPath provides a temporary path with rand string appended. // does not create or checks if it exists. func RandomTmpDirPath(s string, platform string) string { tmp := "/tmp" if platform == "windows" { tmp = os.Getenv("TEMP") } path := filepath.Join(tmp, fmt.Sprintf("%s.%s", s, stringutils.GenerateRandomAlphaOnlyString(10))) if platform == "windows" { return filepath.FromSlash(path) // Using \ } return filepath.ToSlash(path) // Using / } // ConsumeWithSpeed reads chunkSize bytes from reader after every interval. // Returns total read bytes. func ConsumeWithSpeed(reader io.Reader, chunkSize int, interval time.Duration, stop chan bool) (n int, err error) { buffer := make([]byte, chunkSize) for { select { case <-stop: return default: var readBytes int readBytes, err = reader.Read(buffer) n += readBytes if err != nil { if err == io.EOF { err = nil } return } time.Sleep(interval) } } } // ParseCgroupPaths arses 'procCgroupData', which is output of '/proc/<pid>/cgroup', and returns // a map which cgroup name as key and path as value. func ParseCgroupPaths(procCgroupData string) map[string]string { cgroupPaths := map[string]string{} for _, line := range strings.Split(procCgroupData, "\n") { parts := strings.Split(line, ":") if len(parts) != 3 { continue } cgroupPaths[parts[1]] = parts[2] } return cgroupPaths } // ChannelBuffer holds a chan of byte array that can be populate in a goroutine. type ChannelBuffer struct { C chan []byte } // Write implements Writer. func (c *ChannelBuffer) Write(b []byte) (int, error) { c.C <- b return len(b), nil } // Close closes the go channel. func (c *ChannelBuffer) Close() error { close(c.C) return nil } // ReadTimeout reads the content of the channel in the specified byte array with // the specified duration as timeout. func (c *ChannelBuffer) ReadTimeout(p []byte, n time.Duration) (int, error) { select { case b := <-c.C: return copy(p[0:], b), nil case <-time.After(n): return -1, fmt.Errorf("timeout reading from channel") } } // RunAtDifferentDate runs the specifed function with the given time. // It changes the date of the system, which can led to weird behaviors. func RunAtDifferentDate(date time.Time, block func()) { // Layout for date. MMDDhhmmYYYY const timeLayout = "010203042006" // Ensure we bring time back to now now := time.Now().Format(timeLayout) dateReset := exec.Command("date", now) defer RunCommand(dateReset) dateChange := exec.Command("date", date.Format(timeLayout)) RunCommand(dateChange) block() return }
[ "\"TEMP\"" ]
[]
[ "TEMP" ]
[]
["TEMP"]
go
1
0
qa/rpc-tests/util.py
# Copyright (c) 2014 The Bitcoin Core developers # Copyright (c) 2014-2015 The Dash developers # Copyright (c) 2015-2017 The Krait developers # Distributed under the MIT/X11 software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Helpful routines for regression testing # # Add python-bitcoinrpc to module search path: import os import sys sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "python-bitcoinrpc")) from decimal import Decimal, ROUND_DOWN import json import random import shutil import subprocess import time import re from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException from util import * def p2p_port(n): return 11000 + n + os.getpid()%999 def rpc_port(n): return 12000 + n + os.getpid()%999 def check_json_precision(): """Make sure json library being used does not lose precision converting BTC values""" n = Decimal("20000000.00000003") satoshis = int(json.loads(json.dumps(float(n)))*1.0e8) if satoshis != 2000000000000003: raise RuntimeError("JSON encode/decode loses precision") def sync_blocks(rpc_connections): """ Wait until everybody has the same block count """ while True: counts = [ x.getblockcount() for x in rpc_connections ] if counts == [ counts[0] ]*len(counts): break time.sleep(1) def sync_mempools(rpc_connections): """ Wait until everybody has the same transactions in their memory pools """ while True: pool = set(rpc_connections[0].getrawmempool()) num_match = 1 for i in range(1, len(rpc_connections)): if set(rpc_connections[i].getrawmempool()) == pool: num_match = num_match+1 if num_match == len(rpc_connections): break time.sleep(1) bitcoind_processes = {} def initialize_datadir(dirname, n): datadir = os.path.join(dirname, "node"+str(n)) if not os.path.isdir(datadir): os.makedirs(datadir) with open(os.path.join(datadir, "krait.conf"), 'w') as f: f.write("regtest=1\n"); f.write("rpcuser=rt\n"); f.write("rpcpassword=rt\n"); f.write("port="+str(p2p_port(n))+"\n"); f.write("rpcport="+str(rpc_port(n))+"\n"); return datadir def initialize_chain(test_dir): """ Create (or copy from cache) a 200-block-long chain and 4 wallets. kraitd and krait-cli must be in search path. """ if not os.path.isdir(os.path.join("cache", "node0")): devnull = open("/dev/null", "w+") # Create cache directories, run kraitd: for i in range(4): datadir=initialize_datadir("cache", i) args = [ os.getenv("BITCOIND", "kraitd"), "-keypool=1", "-datadir="+datadir, "-discover=0" ] if i > 0: args.append("-connect=127.0.0.1:"+str(p2p_port(0))) bitcoind_processes[i] = subprocess.Popen(args) subprocess.check_call([ os.getenv("BITCOINCLI", "krait-cli"), "-datadir="+datadir, "-rpcwait", "getblockcount"], stdout=devnull) devnull.close() rpcs = [] for i in range(4): try: url = "http://rt:[email protected]:%d"%(rpc_port(i),) rpcs.append(AuthServiceProxy(url)) except: sys.stderr.write("Error connecting to "+url+"\n") sys.exit(1) # Create a 200-block-long chain; each of the 4 nodes # gets 25 mature blocks and 25 immature. # blocks are created with timestamps 10 minutes apart, starting # at 1 Jan 2014 block_time = 1388534400 for i in range(2): for peer in range(4): for j in range(25): set_node_times(rpcs, block_time) rpcs[peer].setgenerate(True, 1) block_time += 10*60 # Must sync before next peer starts generating blocks sync_blocks(rpcs) # Shut them down, and clean up cache directories: stop_nodes(rpcs) wait_bitcoinds() for i in range(4): os.remove(log_filename("cache", i, "debug.log")) os.remove(log_filename("cache", i, "db.log")) os.remove(log_filename("cache", i, "peers.dat")) os.remove(log_filename("cache", i, "fee_estimates.dat")) for i in range(4): from_dir = os.path.join("cache", "node"+str(i)) to_dir = os.path.join(test_dir, "node"+str(i)) shutil.copytree(from_dir, to_dir) initialize_datadir(test_dir, i) # Overwrite port/rpcport in krait.conf def initialize_chain_clean(test_dir, num_nodes): """ Create an empty blockchain and num_nodes wallets. Useful if a test case wants complete control over initialization. """ for i in range(num_nodes): datadir=initialize_datadir(test_dir, i) def _rpchost_to_args(rpchost): '''Convert optional IP:port spec to rpcconnect/rpcport args''' if rpchost is None: return [] match = re.match('(\[[0-9a-fA-f:]+\]|[^:]+)(?::([0-9]+))?$', rpchost) if not match: raise ValueError('Invalid RPC host spec ' + rpchost) rpcconnect = match.group(1) rpcport = match.group(2) if rpcconnect.startswith('['): # remove IPv6 [...] wrapping rpcconnect = rpcconnect[1:-1] rv = ['-rpcconnect=' + rpcconnect] if rpcport: rv += ['-rpcport=' + rpcport] return rv def start_node(i, dirname, extra_args=None, rpchost=None): """ Start a kraitd and return RPC connection to it """ datadir = os.path.join(dirname, "node"+str(i)) args = [ os.getenv("BITCOIND", "kraitd"), "-datadir="+datadir, "-keypool=1", "-discover=0", "-rest" ] if extra_args is not None: args.extend(extra_args) bitcoind_processes[i] = subprocess.Popen(args) devnull = open("/dev/null", "w+") subprocess.check_call([ os.getenv("BITCOINCLI", "krait-cli"), "-datadir="+datadir] + _rpchost_to_args(rpchost) + ["-rpcwait", "getblockcount"], stdout=devnull) devnull.close() url = "http://rt:rt@%s:%d" % (rpchost or '127.0.0.1', rpc_port(i)) proxy = AuthServiceProxy(url) proxy.url = url # store URL on proxy for info return proxy def start_nodes(num_nodes, dirname, extra_args=None, rpchost=None): """ Start multiple kraitds, return RPC connections to them """ if extra_args is None: extra_args = [ None for i in range(num_nodes) ] return [ start_node(i, dirname, extra_args[i], rpchost) for i in range(num_nodes) ] def log_filename(dirname, n_node, logname): return os.path.join(dirname, "node"+str(n_node), "regtest", logname) def stop_node(node, i): node.stop() bitcoind_processes[i].wait() del bitcoind_processes[i] def stop_nodes(nodes): for node in nodes: node.stop() del nodes[:] # Emptying array closes connections as a side effect def set_node_times(nodes, t): for node in nodes: node.setmocktime(t) def wait_bitcoinds(): # Wait for all bitcoinds to cleanly exit for bitcoind in bitcoind_processes.values(): bitcoind.wait() bitcoind_processes.clear() def connect_nodes(from_connection, node_num): ip_port = "127.0.0.1:"+str(p2p_port(node_num)) from_connection.addnode(ip_port, "onetry") # poll until version handshake complete to avoid race conditions # with transaction relaying while any(peer['version'] == 0 for peer in from_connection.getpeerinfo()): time.sleep(0.1) def connect_nodes_bi(nodes, a, b): connect_nodes(nodes[a], b) connect_nodes(nodes[b], a) def find_output(node, txid, amount): """ Return index to output of txid with value amount Raises exception if there is none. """ txdata = node.getrawtransaction(txid, 1) for i in range(len(txdata["vout"])): if txdata["vout"][i]["value"] == amount: return i raise RuntimeError("find_output txid %s : %s not found"%(txid,str(amount))) def gather_inputs(from_node, amount_needed, confirmations_required=1): """ Return a random set of unspent txouts that are enough to pay amount_needed """ assert(confirmations_required >=0) utxo = from_node.listunspent(confirmations_required) random.shuffle(utxo) inputs = [] total_in = Decimal("0.00000000") while total_in < amount_needed and len(utxo) > 0: t = utxo.pop() total_in += t["amount"] inputs.append({ "txid" : t["txid"], "vout" : t["vout"], "address" : t["address"] } ) if total_in < amount_needed: raise RuntimeError("Insufficient funds: need %d, have %d"%(amount_needed, total_in)) return (total_in, inputs) def make_change(from_node, amount_in, amount_out, fee): """ Create change output(s), return them """ outputs = {} amount = amount_out+fee change = amount_in - amount if change > amount*2: # Create an extra change output to break up big inputs change_address = from_node.getnewaddress() # Split change in two, being careful of rounding: outputs[change_address] = Decimal(change/2).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN) change = amount_in - amount - outputs[change_address] if change > 0: outputs[from_node.getnewaddress()] = change return outputs def send_zeropri_transaction(from_node, to_node, amount, fee): """ Create&broadcast a zero-priority transaction. Returns (txid, hex-encoded-txdata) Ensures transaction is zero-priority by first creating a send-to-self, then using it's output """ # Create a send-to-self with confirmed inputs: self_address = from_node.getnewaddress() (total_in, inputs) = gather_inputs(from_node, amount+fee*2) outputs = make_change(from_node, total_in, amount+fee, fee) outputs[self_address] = float(amount+fee) self_rawtx = from_node.createrawtransaction(inputs, outputs) self_signresult = from_node.signrawtransaction(self_rawtx) self_txid = from_node.sendrawtransaction(self_signresult["hex"], True) vout = find_output(from_node, self_txid, amount+fee) # Now immediately spend the output to create a 1-input, 1-output # zero-priority transaction: inputs = [ { "txid" : self_txid, "vout" : vout } ] outputs = { to_node.getnewaddress() : float(amount) } rawtx = from_node.createrawtransaction(inputs, outputs) signresult = from_node.signrawtransaction(rawtx) txid = from_node.sendrawtransaction(signresult["hex"], True) return (txid, signresult["hex"]) def random_zeropri_transaction(nodes, amount, min_fee, fee_increment, fee_variants): """ Create a random zero-priority transaction. Returns (txid, hex-encoded-transaction-data, fee) """ from_node = random.choice(nodes) to_node = random.choice(nodes) fee = min_fee + fee_increment*random.randint(0,fee_variants) (txid, txhex) = send_zeropri_transaction(from_node, to_node, amount, fee) return (txid, txhex, fee) def random_transaction(nodes, amount, min_fee, fee_increment, fee_variants): """ Create a random transaction. Returns (txid, hex-encoded-transaction-data, fee) """ from_node = random.choice(nodes) to_node = random.choice(nodes) fee = min_fee + fee_increment*random.randint(0,fee_variants) (total_in, inputs) = gather_inputs(from_node, amount+fee) outputs = make_change(from_node, total_in, amount, fee) outputs[to_node.getnewaddress()] = float(amount) rawtx = from_node.createrawtransaction(inputs, outputs) signresult = from_node.signrawtransaction(rawtx) txid = from_node.sendrawtransaction(signresult["hex"], True) return (txid, signresult["hex"], fee) def assert_equal(thing1, thing2): if thing1 != thing2: raise AssertionError("%s != %s"%(str(thing1),str(thing2))) def assert_greater_than(thing1, thing2): if thing1 <= thing2: raise AssertionError("%s <= %s"%(str(thing1),str(thing2))) def assert_raises(exc, fun, *args, **kwds): try: fun(*args, **kwds) except exc: pass except Exception as e: raise AssertionError("Unexpected exception raised: "+type(e).__name__) else: raise AssertionError("No exception raised")
[]
[]
[ "BITCOINCLI", "BITCOIND" ]
[]
["BITCOINCLI", "BITCOIND"]
python
2
0
test/other/extract_workload_from_kylin_logs.py
import os from collections import Counter from datetime import datetime import matplotlib.pyplot as plt import pandas as pd def process_logfile(log_file) -> list: with open(log_file, mode='r', encoding='utf-8') as file: lines = file.readlines() return [line.split(" : ")[0] for line in lines] if __name__ == '__main__': # extract datetime strings from log files query_timestamps = [] # logs_dir = os.path.join(os.environ['RAVEN_HOME'], 'resources', 'sql_logs-customer4', '4', 'logs') logs_dir = os.path.join(os.environ['RAVEN_HOME'], 'resources', 'sql_logs-customer1-3') for filename in os.listdir(logs_dir): query_timestamps += process_logfile(os.path.join(logs_dir, filename)) # print(query_timestamps) conv_time = [datetime.strptime(_, '%Y-%m-%d %H:%M:%S') for _ in query_timestamps] query_timestamps = [dt.strftime('%Y-%m-%d %H:%M:%S') for dt in conv_time] query_timestamps.sort() for t in query_timestamps: print(t) # print(query_timestamps) # mylist = dict(Counter(query_timestamps)).items() # dt = [datetime.strptime(t[0], '%Y-%m-%d %H:%M') for t in mylist] # qps = [t[1] / 60.0 for t in mylist] # df = pd.DataFrame({'qps': qps}, index=pd.DatetimeIndex(dt)) # df['qps'].plot(xlabel='time', ylabel='QPS') # plt.ylim(0, 2.0) # plt.show() # # convert strings into datetime objects # conv_time = [datetime.strptime(_, '%Y-%m-%d %H:%M:%S') for _ in query_timestamps] # # print(conv_time) # # # define bin number # bin_nr = 150 # fig, ax = plt.subplots(1, 1) # # # create histogram, get bin position for label # _counts, bins, _patches = ax.hist(conv_time, bins=bin_nr) # # # set xticks at bin edges # plt.xticks(bins) # # # reformat bin label into format hour:minute # # ax.xaxis.set_major_formatter(mdates.DateFormatter("%H:%M")) # # plt.show()
[]
[]
[ "RAVEN_HOME" ]
[]
["RAVEN_HOME"]
python
1
0
java/org/apache/catalina/servlets/CGIServlet.java
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.catalina.servlets; import java.io.BufferedOutputStream; import java.io.BufferedReader; import java.io.File; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.io.OutputStream; import java.io.UnsupportedEncodingException; import java.net.URLDecoder; import java.nio.file.Files; import java.util.ArrayList; import java.util.Date; import java.util.Enumeration; import java.util.HashSet; import java.util.Hashtable; import java.util.List; import java.util.Locale; import java.util.Map.Entry; import java.util.Set; import java.util.StringTokenizer; import java.util.Vector; import java.util.regex.Pattern; import javax.servlet.RequestDispatcher; import javax.servlet.ServletConfig; import javax.servlet.ServletContext; import javax.servlet.ServletException; import javax.servlet.http.Cookie; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import javax.servlet.http.HttpSession; import org.apache.catalina.util.IOTools; import org.apache.juli.logging.Log; import org.apache.juli.logging.LogFactory; import org.apache.tomcat.util.compat.JrePlatform; import org.apache.tomcat.util.res.StringManager; /** * CGI-invoking servlet for web applications, used to execute scripts which * comply to the Common Gateway Interface (CGI) specification and are named * in the path-info used to invoke this servlet. * * <p> * <i>Note: This code compiles and even works for simple CGI cases. * Exhaustive testing has not been done. Please consider it beta * quality. Feedback is appreciated to the author (see below).</i> * </p> * <p> * * <b>Example</b>:<br> * If an instance of this servlet was mapped (using * <code>&lt;web-app&gt;/WEB-INF/web.xml</code>) to: * </p> * <p> * <code> * &lt;web-app&gt;/cgi-bin/* * </code> * </p> * <p> * then the following request: * </p> * <p> * <code> * http://localhost:8080/&lt;web-app&gt;/cgi-bin/dir1/script/pathinfo1 * </code> * </p> * <p> * would result in the execution of the script * </p> * <p> * <code> * &lt;web-app-root&gt;/WEB-INF/cgi/dir1/script * </code> * </p> * <p> * with the script's <code>PATH_INFO</code> set to <code>/pathinfo1</code>. * </p> * <p> * Recommendation: House all your CGI scripts under * <code>&lt;webapp&gt;/WEB-INF/cgi</code>. This will ensure that you do not * accidentally expose your cgi scripts' code to the outside world and that * your cgis will be cleanly ensconced underneath the WEB-INF (i.e., * non-content) area. * </p> * <p> * The default CGI location is mentioned above. You have the flexibility to * put CGIs wherever you want, however: * </p> * <p> * The CGI search path will start at * webAppRootDir + File.separator + cgiPathPrefix * (or webAppRootDir alone if cgiPathPrefix is * null). * </p> * <p> * cgiPathPrefix is defined by setting * this servlet's cgiPathPrefix init parameter * </p> * * <p> * * <B>CGI Specification</B>:<br> derived from * <a href="http://cgi-spec.golux.com">http://cgi-spec.golux.com</a>. * A work-in-progress &amp; expired Internet Draft. Note no actual RFC describing * the CGI specification exists. Where the behavior of this servlet differs * from the specification cited above, it is either documented here, a bug, * or an instance where the specification cited differs from Best * Community Practice (BCP). * Such instances should be well-documented here. Please email the * <a href="https://tomcat.apache.org/lists.html">Tomcat group</a> * with amendments. * * </p> * <p> * * <b>Canonical metavariables</b>:<br> * The CGI specification defines the following canonical metavariables: * <br> * [excerpt from CGI specification] * <PRE> * AUTH_TYPE * CONTENT_LENGTH * CONTENT_TYPE * GATEWAY_INTERFACE * PATH_INFO * PATH_TRANSLATED * QUERY_STRING * REMOTE_ADDR * REMOTE_HOST * REMOTE_IDENT * REMOTE_USER * REQUEST_METHOD * SCRIPT_NAME * SERVER_NAME * SERVER_PORT * SERVER_PROTOCOL * SERVER_SOFTWARE * </PRE> * <p> * Metavariables with names beginning with the protocol name (<EM>e.g.</EM>, * "HTTP_ACCEPT") are also canonical in their description of request header * fields. The number and meaning of these fields may change independently * of this specification. (See also section 6.1.5 [of the CGI specification].) * </p> * [end excerpt] * * <h2> Implementation notes</h2> * <p> * * <b>standard input handling</b>: If your script accepts standard input, * then the client must start sending input within a certain timeout period, * otherwise the servlet will assume no input is coming and carry on running * the script. The script's the standard input will be closed and handling of * any further input from the client is undefined. Most likely it will be * ignored. If this behavior becomes undesirable, then this servlet needs * to be enhanced to handle threading of the spawned process' stdin, stdout, * and stderr (which should not be too hard). * <br> * If you find your cgi scripts are timing out receiving input, you can set * the init parameter <code>stderrTimeout</code> of your webapps' cgi-handling * servlet. * </p> * <p> * * <b>Metavariable Values</b>: According to the CGI specification, * implementations may choose to represent both null or missing values in an * implementation-specific manner, but must define that manner. This * implementation chooses to always define all required metavariables, but * set the value to "" for all metavariables whose value is either null or * undefined. PATH_TRANSLATED is the sole exception to this rule, as per the * CGI Specification. * * </p> * <p> * * <b>NPH -- Non-parsed-header implementation</b>: This implementation does * not support the CGI NPH concept, whereby server ensures that the data * supplied to the script are precisely as supplied by the client and * unaltered by the server. * </p> * <p> * The function of a servlet container (including Tomcat) is specifically * designed to parse and possible alter CGI-specific variables, and as * such makes NPH functionality difficult to support. * </p> * <p> * The CGI specification states that compliant servers MAY support NPH output. * It does not state servers MUST support NPH output to be unconditionally * compliant. Thus, this implementation maintains unconditional compliance * with the specification though NPH support is not present. * </p> * <p> * * The CGI specification is located at * <a href="http://cgi-spec.golux.com">http://cgi-spec.golux.com</a>. * * </p> * <h3>TODO:</h3> * <ul> * <li> Support for setting headers (for example, Location headers don't work) * <li> Support for collapsing multiple header lines (per RFC 2616) * <li> Ensure handling of POST method does not interfere with 2.3 Filters * <li> Refactor some debug code out of core * <li> Ensure header handling preserves encoding * <li> Possibly rewrite CGIRunner.run()? * <li> Possibly refactor CGIRunner and CGIEnvironment as non-inner classes? * <li> Document handling of cgi stdin when there is no stdin * <li> Revisit IOException handling in CGIRunner.run() * <li> Better documentation * <li> Confirm use of ServletInputStream.available() in CGIRunner.run() is * not needed * <li> [add more to this TODO list] * </ul> * * @author Martin T Dengler [[email protected]] * @author Amy Roh */ public final class CGIServlet extends HttpServlet { private static final Log log = LogFactory.getLog(CGIServlet.class); private static final StringManager sm = StringManager.getManager(CGIServlet.class); /* some vars below copied from Craig R. McClanahan's InvokerServlet */ private static final long serialVersionUID = 1L; private static final Set<String> DEFAULT_SUPER_METHODS = new HashSet<>(); private static final Pattern DEFAULT_CMD_LINE_ARGUMENTS_DECODED_PATTERN; private static final String ALLOW_ANY_PATTERN = ".*"; static { DEFAULT_SUPER_METHODS.add("HEAD"); DEFAULT_SUPER_METHODS.add("OPTIONS"); DEFAULT_SUPER_METHODS.add("TRACE"); if (JrePlatform.IS_WINDOWS) { DEFAULT_CMD_LINE_ARGUMENTS_DECODED_PATTERN = Pattern.compile("[a-zA-Z0-9\\Q-_.\\/:\\E]+"); } else { // No restrictions DEFAULT_CMD_LINE_ARGUMENTS_DECODED_PATTERN = null; } } /** * The CGI search path will start at * webAppRootDir + File.separator + cgiPathPrefix * (or webAppRootDir alone if cgiPathPrefix is * null) */ private String cgiPathPrefix = null; /** the executable to use with the script */ private String cgiExecutable = "perl"; /** additional arguments for the executable */ private List<String> cgiExecutableArgs = null; /** the encoding to use for parameters */ private String parameterEncoding = System.getProperty("file.encoding", "UTF-8"); /* The HTTP methods this Servlet will pass to the CGI script */ private Set<String> cgiMethods = new HashSet<>(); private boolean cgiMethodsAll = false; /** * The time (in milliseconds) to wait for the reading of stderr to complete * before terminating the CGI process. */ private long stderrTimeout = 2000; /** * The regular expression used to select HTTP headers to be passed to the * CGI process as environment variables. The name of the environment * variable will be the name of the HTTP header converter to upper case, * prefixed with <code>HTTP_</code> and with all <code>-</code> characters * converted to <code>_</code>. */ private Pattern envHttpHeadersPattern = Pattern.compile( "ACCEPT[-0-9A-Z]*|CACHE-CONTROL|COOKIE|HOST|IF-[-0-9A-Z]*|REFERER|USER-AGENT"); /** object used to ensure multiple threads don't try to expand same file */ private static final Object expandFileLock = new Object(); /** the shell environment variables to be passed to the CGI script */ private final Hashtable<String,String> shellEnv = new Hashtable<>(); /** * Enable creation of script command line arguments from query-string. * See https://tools.ietf.org/html/rfc3875#section-4.4 * 4.4. The Script Command Line */ private boolean enableCmdLineArguments = false; /** * Limits the encoded form of individual command line arguments. By default * values are limited to those allowed by the RFC. * See https://tools.ietf.org/html/rfc3875#section-4.4 * * Uses \Q...\E to avoid individual quoting. */ private Pattern cmdLineArgumentsEncodedPattern = Pattern.compile("[a-zA-Z0-9\\Q%;/?:@&,$-_.!~*'()\\E]+"); /** * Limits the decoded form of individual command line arguments. Default * varies by platform. */ private Pattern cmdLineArgumentsDecodedPattern = DEFAULT_CMD_LINE_ARGUMENTS_DECODED_PATTERN; /** * Sets instance variables. * <P> * Modified from Craig R. McClanahan's InvokerServlet * </P> * * @param config a <code>ServletConfig</code> object * containing the servlet's * configuration and initialization * parameters * * @exception ServletException if an exception has occurred that * interferes with the servlet's normal * operation */ @Override public void init(ServletConfig config) throws ServletException { super.init(config); // Set our properties from the initialization parameters cgiPathPrefix = getServletConfig().getInitParameter("cgiPathPrefix"); boolean passShellEnvironment = Boolean.parseBoolean(getServletConfig().getInitParameter("passShellEnvironment")); if (passShellEnvironment) { shellEnv.putAll(System.getenv()); } Enumeration<String> e = config.getInitParameterNames(); while(e.hasMoreElements()) { String initParamName = e.nextElement(); if (initParamName.startsWith("environment-variable-")) { if (initParamName.length() == 21) { throw new ServletException(sm.getString("cgiServlet.emptyEnvVarName")); } shellEnv.put(initParamName.substring(21), config.getInitParameter(initParamName)); } } if (getServletConfig().getInitParameter("executable") != null) { cgiExecutable = getServletConfig().getInitParameter("executable"); } if (getServletConfig().getInitParameter("executable-arg-1") != null) { List<String> args = new ArrayList<>(); for (int i = 1;; i++) { String arg = getServletConfig().getInitParameter( "executable-arg-" + i); if (arg == null) { break; } args.add(arg); } cgiExecutableArgs = args; } if (getServletConfig().getInitParameter("parameterEncoding") != null) { parameterEncoding = getServletConfig().getInitParameter("parameterEncoding"); } if (getServletConfig().getInitParameter("stderrTimeout") != null) { stderrTimeout = Long.parseLong(getServletConfig().getInitParameter( "stderrTimeout")); } if (getServletConfig().getInitParameter("envHttpHeaders") != null) { envHttpHeadersPattern = Pattern.compile(getServletConfig().getInitParameter("envHttpHeaders")); } if (getServletConfig().getInitParameter("enableCmdLineArguments") != null) { enableCmdLineArguments = Boolean.parseBoolean(config.getInitParameter("enableCmdLineArguments")); } if (getServletConfig().getInitParameter("cgiMethods") != null) { String paramValue = getServletConfig().getInitParameter("cgiMethods"); paramValue = paramValue.trim(); if ("*".equals(paramValue)) { cgiMethodsAll = true; } else { String[] methods = paramValue.split(","); for (String method : methods) { String trimmedMethod = method.trim(); cgiMethods.add(trimmedMethod); } } } else { cgiMethods.add("GET"); cgiMethods.add("POST"); } if (getServletConfig().getInitParameter("cmdLineArgumentsEncoded") != null) { cmdLineArgumentsEncodedPattern = Pattern.compile(getServletConfig().getInitParameter("cmdLineArgumentsEncoded")); } String value = getServletConfig().getInitParameter("cmdLineArgumentsDecoded"); if (ALLOW_ANY_PATTERN.equals(value)) { // Optimisation for case where anything is allowed cmdLineArgumentsDecodedPattern = null; } else if (value != null) { cmdLineArgumentsDecodedPattern = Pattern.compile(value); } } /** * Logs important Servlet API and container information. * * <p> * Based on SnoopAllServlet by Craig R. McClanahan * </p> * * @param req HttpServletRequest object used as source of information * * @exception IOException if a write operation exception occurs */ private void printServletEnvironment(HttpServletRequest req) throws IOException { // Document the properties from ServletRequest log.trace("ServletRequest Properties"); Enumeration<String> attrs = req.getAttributeNames(); while (attrs.hasMoreElements()) { String attr = attrs.nextElement(); log.trace("Request Attribute: " + attr + ": [ " + req.getAttribute(attr) +"]"); } log.trace("Character Encoding: [" + req.getCharacterEncoding() + "]"); log.trace("Content Length: [" + req.getContentLengthLong() + "]"); log.trace("Content Type: [" + req.getContentType() + "]"); Enumeration<Locale> locales = req.getLocales(); while (locales.hasMoreElements()) { Locale locale = locales.nextElement(); log.trace("Locale: [" +locale + "]"); } Enumeration<String> params = req.getParameterNames(); while (params.hasMoreElements()) { String param = params.nextElement(); for (String value : req.getParameterValues(param)) { log.trace("Request Parameter: " + param + ": [" + value + "]"); } } log.trace("Protocol: [" + req.getProtocol() + "]"); log.trace("Remote Address: [" + req.getRemoteAddr() + "]"); log.trace("Remote Host: [" + req.getRemoteHost() + "]"); log.trace("Scheme: [" + req.getScheme() + "]"); log.trace("Secure: [" + req.isSecure() + "]"); log.trace("Server Name: [" + req.getServerName() + "]"); log.trace("Server Port: [" + req.getServerPort() + "]"); // Document the properties from HttpServletRequest log.trace("HttpServletRequest Properties"); log.trace("Auth Type: [" + req.getAuthType() + "]"); log.trace("Context Path: [" + req.getContextPath() + "]"); Cookie cookies[] = req.getCookies(); if (cookies != null) { for (Cookie cookie : cookies) { log.trace("Cookie: " + cookie.getName() + ": [" + cookie.getValue() + "]"); } } Enumeration<String> headers = req.getHeaderNames(); while (headers.hasMoreElements()) { String header = headers.nextElement(); log.trace("HTTP Header: " + header + ": [" + req.getHeader(header) + "]"); } log.trace("Method: [" + req.getMethod() + "]"); log.trace("Path Info: [" + req.getPathInfo() + "]"); log.trace("Path Translated: [" + req.getPathTranslated() + "]"); log.trace("Query String: [" + req.getQueryString() + "]"); log.trace("Remote User: [" + req.getRemoteUser() + "]"); log.trace("Requested Session ID: [" + req.getRequestedSessionId() + "]"); log.trace("Requested Session ID From Cookie: [" + req.isRequestedSessionIdFromCookie() + "]"); log.trace("Requested Session ID From URL: [" + req.isRequestedSessionIdFromURL() + "]"); log.trace("Requested Session ID Valid: [" + req.isRequestedSessionIdValid() + "]"); log.trace("Request URI: [" + req.getRequestURI() + "]"); log.trace("Servlet Path: [" + req.getServletPath() + "]"); log.trace("User Principal: [" + req.getUserPrincipal() + "]"); // Process the current session (if there is one) HttpSession session = req.getSession(false); if (session != null) { // Document the session properties log.trace("HttpSession Properties"); log.trace("ID: [" + session.getId() + "]"); log.trace("Creation Time: [" + new Date(session.getCreationTime()) + "]"); log.trace("Last Accessed Time: [" + new Date(session.getLastAccessedTime()) + "]"); log.trace("Max Inactive Interval: [" + session.getMaxInactiveInterval() + "]"); // Document the session attributes attrs = session.getAttributeNames(); while (attrs.hasMoreElements()) { String attr = attrs.nextElement(); log.trace("Session Attribute: " + attr + ": [" + session.getAttribute(attr) + "]"); } } // Document the servlet configuration properties log.trace("ServletConfig Properties"); log.trace("Servlet Name: [" + getServletConfig().getServletName() + "]"); // Document the servlet configuration initialization parameters params = getServletConfig().getInitParameterNames(); while (params.hasMoreElements()) { String param = params.nextElement(); String value = getServletConfig().getInitParameter(param); log.trace("Servlet Init Param: " + param + ": [" + value + "]"); } // Document the servlet context properties log.trace("ServletContext Properties"); log.trace("Major Version: [" + getServletContext().getMajorVersion() + "]"); log.trace("Minor Version: [" + getServletContext().getMinorVersion() + "]"); log.trace("Real Path for '/': [" + getServletContext().getRealPath("/") + "]"); log.trace("Server Info: [" + getServletContext().getServerInfo() + "]"); // Document the servlet context initialization parameters log.trace("ServletContext Initialization Parameters"); params = getServletContext().getInitParameterNames(); while (params.hasMoreElements()) { String param = params.nextElement(); String value = getServletContext().getInitParameter(param); log.trace("Servlet Context Init Param: " + param + ": [" + value + "]"); } // Document the servlet context attributes log.trace("ServletContext Attributes"); attrs = getServletContext().getAttributeNames(); while (attrs.hasMoreElements()) { String attr = attrs.nextElement(); log.trace("Servlet Context Attribute: " + attr + ": [" + getServletContext().getAttribute(attr) + "]"); } } @Override protected void service(HttpServletRequest req, HttpServletResponse res) throws ServletException, IOException { String method = req.getMethod(); if (cgiMethodsAll || cgiMethods.contains(method)) { doGet(req, res); } else if (DEFAULT_SUPER_METHODS.contains(method)){ // If the CGI servlet is explicitly configured to handle one of // these methods it will be handled in the previous condition super.service(req, res); } else { // Unsupported method res.sendError(HttpServletResponse.SC_METHOD_NOT_ALLOWED); } } /** * Provides CGI Gateway service. * * @param req HttpServletRequest passed in by servlet container * @param res HttpServletResponse passed in by servlet container * * @exception ServletException if a servlet-specific exception occurs * @exception IOException if a read/write exception occurs */ @Override protected void doGet(HttpServletRequest req, HttpServletResponse res) throws ServletException, IOException { CGIEnvironment cgiEnv = new CGIEnvironment(req, getServletContext()); if (cgiEnv.isValid()) { CGIRunner cgi = new CGIRunner(cgiEnv.getCommand(), cgiEnv.getEnvironment(), cgiEnv.getWorkingDirectory(), cgiEnv.getParameters()); if ("POST".equals(req.getMethod())) { cgi.setInput(req.getInputStream()); } cgi.setResponse(res); cgi.run(); } else { res.sendError(404); } if (log.isTraceEnabled()) { String[] cgiEnvLines = cgiEnv.toString().split(System.lineSeparator()); for (String cgiEnvLine : cgiEnvLines) { log.trace(cgiEnvLine); } printServletEnvironment(req); } } @Override protected void doOptions(HttpServletRequest req, HttpServletResponse res) throws ServletException, IOException { // Note: This method will never be called if cgiMethods is "*" so that // case does nto need to be handled here. Set<String> allowedMethods = new HashSet<>(); allowedMethods.addAll(cgiMethods); allowedMethods.addAll(CGIServlet.DEFAULT_SUPER_METHODS); StringBuilder headerValue = new StringBuilder(); for (String method : allowedMethods) { headerValue.append(method); headerValue.append(','); } // Remove trailing comma headerValue.deleteCharAt(headerValue.length() - 1); res.setHeader("allow", headerValue.toString()); } /** * Behaviour depends on the status code. * * Status < 400 - Calls setStatus. Returns false. CGI servlet will provide * the response body. * Status >= 400 - Calls sendError(status), returns true. Standard error * page mechanism will provide the response body. */ private boolean setStatus(HttpServletResponse response, int status) throws IOException { if (status >= HttpServletResponse.SC_BAD_REQUEST) { response.sendError(status); return true; } else { response.setStatus(status); return false; } } /** * Encapsulates the CGI environment and rules to derive * that environment from the servlet container and request information. */ protected class CGIEnvironment { /** context of the enclosing servlet */ private ServletContext context = null; /** context path of enclosing servlet */ private String contextPath = null; /** servlet URI of the enclosing servlet */ private String servletPath = null; /** pathInfo for the current request */ private String pathInfo = null; /** real file system directory of the enclosing servlet's web app */ private String webAppRootDir = null; /** tempdir for context - used to expand scripts in unexpanded wars */ private File tmpDir = null; /** derived cgi environment */ private Hashtable<String, String> env = null; /** cgi command to be invoked */ private String command = null; /** cgi command's desired working directory */ private final File workingDirectory; /** cgi command's command line parameters */ private final ArrayList<String> cmdLineParameters = new ArrayList<>(); /** whether or not this object is valid or not */ private final boolean valid; /** * Creates a CGIEnvironment and derives the necessary environment, * query parameters, working directory, cgi command, etc. * * @param req HttpServletRequest for information provided by * the Servlet API * @param context ServletContext for information provided by the * Servlet API * @throws IOException an IO error occurred */ protected CGIEnvironment(HttpServletRequest req, ServletContext context) throws IOException { setupFromContext(context); boolean valid = setupFromRequest(req); if (valid) { valid = setCGIEnvironment(req); } if (valid) { workingDirectory = new File(command.substring(0, command.lastIndexOf(File.separator))); } else { workingDirectory = null; } this.valid = valid; } /** * Uses the ServletContext to set some CGI variables * * @param context ServletContext for information provided by the * Servlet API */ protected void setupFromContext(ServletContext context) { this.context = context; this.webAppRootDir = context.getRealPath("/"); this.tmpDir = (File) context.getAttribute(ServletContext.TEMPDIR); } /** * Uses the HttpServletRequest to set most CGI variables * * @param req HttpServletRequest for information provided by * the Servlet API * * @return true if the request was parsed without error, false if there * was a problem * @throws UnsupportedEncodingException Unknown encoding */ protected boolean setupFromRequest(HttpServletRequest req) throws UnsupportedEncodingException { boolean isIncluded = false; // Look to see if this request is an include if (req.getAttribute( RequestDispatcher.INCLUDE_REQUEST_URI) != null) { isIncluded = true; } if (isIncluded) { this.contextPath = (String) req.getAttribute( RequestDispatcher.INCLUDE_CONTEXT_PATH); this.servletPath = (String) req.getAttribute( RequestDispatcher.INCLUDE_SERVLET_PATH); this.pathInfo = (String) req.getAttribute( RequestDispatcher.INCLUDE_PATH_INFO); } else { this.contextPath = req.getContextPath(); this.servletPath = req.getServletPath(); this.pathInfo = req.getPathInfo(); } // If getPathInfo() returns null, must be using extension mapping // In this case, pathInfo should be same as servletPath if (this.pathInfo == null) { this.pathInfo = this.servletPath; } // If the request method is GET, POST or HEAD and the query string // does not contain an unencoded "=" this is an indexed query. // The parsed query string becomes the command line parameters // for the cgi command. if (enableCmdLineArguments && (req.getMethod().equals("GET") || req.getMethod().equals("POST") || req.getMethod().equals("HEAD"))) { String qs; if (isIncluded) { qs = (String) req.getAttribute( RequestDispatcher.INCLUDE_QUERY_STRING); } else { qs = req.getQueryString(); } if (qs != null && qs.indexOf('=') == -1) { StringTokenizer qsTokens = new StringTokenizer(qs, "+"); while (qsTokens.hasMoreTokens()) { String encodedArgument = qsTokens.nextToken(); if (!cmdLineArgumentsEncodedPattern.matcher(encodedArgument).matches()) { if (log.isDebugEnabled()) { log.debug(sm.getString("cgiServlet.invalidArgumentEncoded", encodedArgument, cmdLineArgumentsEncodedPattern.toString())); } return false; } String decodedArgument = URLDecoder.decode(encodedArgument, parameterEncoding); if (cmdLineArgumentsDecodedPattern != null && !cmdLineArgumentsDecodedPattern.matcher(decodedArgument).matches()) { if (log.isDebugEnabled()) { log.debug(sm.getString("cgiServlet.invalidArgumentDecoded", decodedArgument, cmdLineArgumentsDecodedPattern.toString())); } return false; } cmdLineParameters.add(decodedArgument); } } } return true; } /** * Resolves core information about the cgi script. * * <p> * Example URI: * </p> * <PRE> /servlet/cgigateway/dir1/realCGIscript/pathinfo1 </PRE> * <ul> * <LI><b>path</b> = $CATALINA_HOME/mywebapp/dir1/realCGIscript * <LI><b>scriptName</b> = /servlet/cgigateway/dir1/realCGIscript * <LI><b>cgiName</b> = /dir1/realCGIscript * <LI><b>name</b> = realCGIscript * </ul> * <p> * CGI search algorithm: search the real path below * &lt;my-webapp-root&gt; and find the first non-directory in * the getPathTranslated("/"), reading/searching from left-to-right. *</p> *<p> * The CGI search path will start at * webAppRootDir + File.separator + cgiPathPrefix * (or webAppRootDir alone if cgiPathPrefix is * null). *</p> *<p> * cgiPathPrefix is defined by setting * this servlet's cgiPathPrefix init parameter * *</p> * * @param pathInfo String from HttpServletRequest.getPathInfo() * @param webAppRootDir String from context.getRealPath("/") * @param contextPath String as from * HttpServletRequest.getContextPath() * @param servletPath String as from * HttpServletRequest.getServletPath() * @param cgiPathPrefix subdirectory of webAppRootDir below which * the web app's CGIs may be stored; can be null. * The CGI search path will start at * webAppRootDir + File.separator + cgiPathPrefix * (or webAppRootDir alone if cgiPathPrefix is * null). cgiPathPrefix is defined by setting * the servlet's cgiPathPrefix init parameter. * * * @return * <ul> * <li> * <code>path</code> - full file-system path to valid cgi script, * or null if no cgi was found * <li> * <code>scriptName</code> - * CGI variable SCRIPT_NAME; the full URL path * to valid cgi script or null if no cgi was * found * <li> * <code>cgiName</code> - servlet pathInfo fragment corresponding to * the cgi script itself, or null if not found * <li> * <code>name</code> - simple name (no directories) of the * cgi script, or null if no cgi was found * </ul> */ protected String[] findCGI(String pathInfo, String webAppRootDir, String contextPath, String servletPath, String cgiPathPrefix) { String path = null; String name = null; String scriptname = null; if (webAppRootDir != null && webAppRootDir.lastIndexOf(File.separator) == (webAppRootDir.length() - 1)) { //strip the trailing "/" from the webAppRootDir webAppRootDir = webAppRootDir.substring(0, (webAppRootDir.length() - 1)); } if (cgiPathPrefix != null) { webAppRootDir = webAppRootDir + File.separator + cgiPathPrefix; } if (log.isDebugEnabled()) { log.debug(sm.getString("cgiServlet.find.path", pathInfo, webAppRootDir)); } File currentLocation = new File(webAppRootDir); StringTokenizer dirWalker = new StringTokenizer(pathInfo, "/"); if (log.isDebugEnabled()) { log.debug(sm.getString("cgiServlet.find.location", currentLocation.getAbsolutePath())); } StringBuilder cginameBuilder = new StringBuilder(); while (!currentLocation.isFile() && dirWalker.hasMoreElements()) { String nextElement = (String) dirWalker.nextElement(); currentLocation = new File(currentLocation, nextElement); cginameBuilder.append('/').append(nextElement); if (log.isDebugEnabled()) { log.debug(sm.getString("cgiServlet.find.location", currentLocation.getAbsolutePath())); } } String cginame = cginameBuilder.toString(); if (!currentLocation.isFile()) { return new String[] { null, null, null, null }; } path = currentLocation.getAbsolutePath(); name = currentLocation.getName(); if (servletPath.startsWith(cginame)) { scriptname = contextPath + cginame; } else { scriptname = contextPath + servletPath + cginame; } if (log.isDebugEnabled()) { log.debug(sm.getString("cgiServlet.find.found", name, path, scriptname, cginame)); } return new String[] { path, scriptname, cginame, name }; } /** * Constructs the CGI environment to be supplied to the invoked CGI * script; relies heavily on Servlet API methods and findCGI * * @param req request associated with the CGI * Invocation * * @return true if environment was set OK, false if there * was a problem and no environment was set * @throws IOException an IO error occurred */ protected boolean setCGIEnvironment(HttpServletRequest req) throws IOException { /* * This method is slightly ugly; c'est la vie. * "You cannot stop [ugliness], you can only hope to contain [it]" * (apologies to Marv Albert regarding MJ) */ // Add the shell environment variables (if any) Hashtable<String, String> envp = new Hashtable<>(shellEnv); // Add the CGI environment variables String sPathInfoOrig = null; String sPathInfoCGI = null; String sPathTranslatedCGI = null; String sCGIFullPath = null; String sCGIScriptName = null; String sCGIFullName = null; String sCGIName = null; String[] sCGINames; sPathInfoOrig = this.pathInfo; sPathInfoOrig = sPathInfoOrig == null ? "" : sPathInfoOrig; if (webAppRootDir == null ) { // The app has not been deployed in exploded form webAppRootDir = tmpDir.toString(); expandCGIScript(); } sCGINames = findCGI(sPathInfoOrig, webAppRootDir, contextPath, servletPath, cgiPathPrefix); sCGIFullPath = sCGINames[0]; sCGIScriptName = sCGINames[1]; sCGIFullName = sCGINames[2]; sCGIName = sCGINames[3]; if (sCGIFullPath == null || sCGIScriptName == null || sCGIFullName == null || sCGIName == null) { return false; } envp.put("SERVER_SOFTWARE", "TOMCAT"); envp.put("SERVER_NAME", nullsToBlanks(req.getServerName())); envp.put("GATEWAY_INTERFACE", "CGI/1.1"); envp.put("SERVER_PROTOCOL", nullsToBlanks(req.getProtocol())); int port = req.getServerPort(); Integer iPort = (port == 0 ? Integer.valueOf(-1) : Integer.valueOf(port)); envp.put("SERVER_PORT", iPort.toString()); envp.put("REQUEST_METHOD", nullsToBlanks(req.getMethod())); envp.put("REQUEST_URI", nullsToBlanks(req.getRequestURI())); /*- * PATH_INFO should be determined by using sCGIFullName: * 1) Let sCGIFullName not end in a "/" (see method findCGI) * 2) Let sCGIFullName equal the pathInfo fragment which * corresponds to the actual cgi script. * 3) Thus, PATH_INFO = request.getPathInfo().substring( * sCGIFullName.length()) * * (see method findCGI, where the real work is done) * */ if (pathInfo == null || (pathInfo.substring(sCGIFullName.length()).length() <= 0)) { sPathInfoCGI = ""; } else { sPathInfoCGI = pathInfo.substring(sCGIFullName.length()); } envp.put("PATH_INFO", sPathInfoCGI); /*- * PATH_TRANSLATED must be determined after PATH_INFO (and the * implied real cgi-script) has been taken into account. * * The following example demonstrates: * * servlet info = /servlet/cgigw/dir1/dir2/cgi1/trans1/trans2 * cgifullpath = /servlet/cgigw/dir1/dir2/cgi1 * path_info = /trans1/trans2 * webAppRootDir = servletContext.getRealPath("/") * * path_translated = servletContext.getRealPath("/trans1/trans2") * * That is, PATH_TRANSLATED = webAppRootDir + sPathInfoCGI * (unless sPathInfoCGI is null or blank, then the CGI * specification dictates that the PATH_TRANSLATED metavariable * SHOULD NOT be defined. * */ if (!sPathInfoCGI.isEmpty()) { sPathTranslatedCGI = context.getRealPath(sPathInfoCGI); } if (sPathTranslatedCGI == null || "".equals(sPathTranslatedCGI)) { //NOOP } else { envp.put("PATH_TRANSLATED", nullsToBlanks(sPathTranslatedCGI)); } envp.put("SCRIPT_NAME", nullsToBlanks(sCGIScriptName)); envp.put("QUERY_STRING", nullsToBlanks(req.getQueryString())); envp.put("REMOTE_HOST", nullsToBlanks(req.getRemoteHost())); envp.put("REMOTE_ADDR", nullsToBlanks(req.getRemoteAddr())); envp.put("AUTH_TYPE", nullsToBlanks(req.getAuthType())); envp.put("REMOTE_USER", nullsToBlanks(req.getRemoteUser())); envp.put("REMOTE_IDENT", ""); //not necessary for full compliance envp.put("CONTENT_TYPE", nullsToBlanks(req.getContentType())); /* Note CGI spec says CONTENT_LENGTH must be NULL ("") or undefined * if there is no content, so we cannot put 0 or -1 in as per the * Servlet API spec. */ long contentLength = req.getContentLengthLong(); String sContentLength = (contentLength <= 0 ? "" : Long.toString(contentLength)); envp.put("CONTENT_LENGTH", sContentLength); Enumeration<String> headers = req.getHeaderNames(); String header = null; while (headers.hasMoreElements()) { header = null; header = headers.nextElement().toUpperCase(Locale.ENGLISH); //REMIND: rewrite multiple headers as if received as single //REMIND: change character set //REMIND: I forgot what the previous REMIND means if (envHttpHeadersPattern.matcher(header).matches()) { envp.put("HTTP_" + header.replace('-', '_'), req.getHeader(header)); } } File fCGIFullPath = new File(sCGIFullPath); command = fCGIFullPath.getCanonicalPath(); envp.put("X_TOMCAT_SCRIPT_PATH", command); //for kicks envp.put("SCRIPT_FILENAME", command); //for PHP this.env = envp; return true; } /** * Extracts requested resource from web app archive to context work * directory to enable CGI script to be executed. */ protected void expandCGIScript() { StringBuilder srcPath = new StringBuilder(); StringBuilder destPath = new StringBuilder(); InputStream is = null; // paths depend on mapping if (cgiPathPrefix == null ) { srcPath.append(pathInfo); is = context.getResourceAsStream(srcPath.toString()); destPath.append(tmpDir); destPath.append(pathInfo); } else { // essentially same search algorithm as findCGI() srcPath.append(cgiPathPrefix); StringTokenizer pathWalker = new StringTokenizer (pathInfo, "/"); // start with first element while (pathWalker.hasMoreElements() && (is == null)) { srcPath.append('/'); srcPath.append(pathWalker.nextElement()); is = context.getResourceAsStream(srcPath.toString()); } destPath.append(tmpDir); destPath.append('/'); destPath.append(srcPath); } if (is == null) { // didn't find anything, give up now log.warn(sm.getString("cgiServlet.expandNotFound", srcPath)); return; } try { File f = new File(destPath.toString()); if (f.exists()) { // Don't need to expand if it already exists return; } // create directories File dir = f.getParentFile(); if (!dir.mkdirs() && !dir.isDirectory()) { log.warn(sm.getString("cgiServlet.expandCreateDirFail", dir.getAbsolutePath())); return; } try { synchronized (expandFileLock) { // make sure file doesn't exist if (f.exists()) { return; } // create file if (!f.createNewFile()) { return; } Files.copy(is, f.toPath()); if (log.isDebugEnabled()) { log.debug(sm.getString("cgiServlet.expandOk", srcPath, destPath)); } } } catch (IOException ioe) { log.warn(sm.getString("cgiServlet.expandFail", srcPath, destPath), ioe); // delete in case file is corrupted if (f.exists()) { if (!f.delete()) { log.warn(sm.getString("cgiServlet.expandDeleteFail", f.getAbsolutePath())); } } } } finally { try { is.close(); } catch (IOException e) { log.warn(sm.getString("cgiServlet.expandCloseFail", srcPath), e); } } } /** * Returns important CGI environment information in a multi-line text * format. * * @return CGI environment info */ @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("CGIEnvironment Info:"); sb.append(System.lineSeparator()); if (isValid()) { sb.append("Validity: [true]"); sb.append(System.lineSeparator()); sb.append("Environment values:"); sb.append(System.lineSeparator()); for (Entry<String,String> entry : env.entrySet()) { sb.append(" "); sb.append(entry.getKey()); sb.append(": ["); sb.append(blanksToString(entry.getValue(), "will be set to blank")); sb.append(']'); sb.append(System.lineSeparator()); } sb.append("Derived Command :["); sb.append(nullsToBlanks(command)); sb.append(']'); sb.append(System.lineSeparator()); sb.append("Working Directory: ["); if (workingDirectory != null) { sb.append(workingDirectory.toString()); } sb.append(']'); sb.append(System.lineSeparator()); sb.append("Command Line Params:"); sb.append(System.lineSeparator()); for (String param : cmdLineParameters) { sb.append(" ["); sb.append(param); sb.append(']'); sb.append(System.lineSeparator()); } } else { sb.append("Validity: [false]"); sb.append(System.lineSeparator()); sb.append("CGI script not found or not specified."); sb.append(System.lineSeparator()); sb.append("Check the HttpServletRequest pathInfo property to see if it is what "); sb.append(System.lineSeparator()); sb.append("you meant it to be. You must specify an existent and executable file "); sb.append(System.lineSeparator()); sb.append("as part of the path-info."); sb.append(System.lineSeparator()); } return sb.toString(); } /** * Gets derived command string * * @return command string * */ protected String getCommand() { return command; } /** * Gets derived CGI working directory * * @return working directory * */ protected File getWorkingDirectory() { return workingDirectory; } /** * Gets derived CGI environment * * @return CGI environment * */ protected Hashtable<String,String> getEnvironment() { return env; } /** * Gets derived CGI query parameters * * @return CGI query parameters * */ protected ArrayList<String> getParameters() { return cmdLineParameters; } /** * Gets validity status * * @return true if this environment is valid, false * otherwise * */ protected boolean isValid() { return valid; } /** * Converts null strings to blank strings ("") * * @param s string to be converted if necessary * @return a non-null string, either the original or the empty string * ("") if the original was <code>null</code> */ protected String nullsToBlanks(String s) { return nullsToString(s, ""); } /** * Converts null strings to another string * * @param couldBeNull string to be converted if necessary * @param subForNulls string to return instead of a null string * @return a non-null string, either the original or the substitute * string if the original was <code>null</code> */ protected String nullsToString(String couldBeNull, String subForNulls) { return (couldBeNull == null ? subForNulls : couldBeNull); } /** * Converts blank strings to another string * * @param couldBeBlank string to be converted if necessary * @param subForBlanks string to return instead of a blank string * @return a non-null string, either the original or the substitute * string if the original was <code>null</code> or empty ("") */ protected String blanksToString(String couldBeBlank, String subForBlanks) { return (couldBeBlank == null || couldBeBlank.isEmpty()) ? subForBlanks : couldBeBlank; } } //class CGIEnvironment /** * Encapsulates the knowledge of how to run a CGI script, given the * script's desired environment and (optionally) input/output streams * * <p> * * Exposes a <code>run</code> method used to actually invoke the * CGI. * * </p> * <p> * * The CGI environment and settings are derived from the information * passed to the constructor. * * </p> * <p> * * The input and output streams can be set by the <code>setInput</code> * and <code>setResponse</code> methods, respectively. * </p> */ protected class CGIRunner { /** script/command to be executed */ private final String command; /** environment used when invoking the cgi script */ private final Hashtable<String,String> env; /** working directory used when invoking the cgi script */ private final File wd; /** command line parameters to be passed to the invoked script */ private final ArrayList<String> params; /** stdin to be passed to cgi script */ private InputStream stdin = null; /** response object used to set headers & get output stream */ private HttpServletResponse response = null; /** boolean tracking whether this object has enough info to run() */ private boolean readyToRun = false; /** * Creates a CGIRunner and initializes its environment, working * directory, and query parameters. * <BR> * Input/output streams (optional) are set using the * <code>setInput</code> and <code>setResponse</code> methods, * respectively. * * @param command string full path to command to be executed * @param env Hashtable with the desired script environment * @param wd File with the script's desired working directory * @param params ArrayList with the script's query command line * parameters as strings */ protected CGIRunner(String command, Hashtable<String,String> env, File wd, ArrayList<String> params) { this.command = command; this.env = env; this.wd = wd; this.params = params; updateReadyStatus(); } /** * Checks and sets ready status */ protected void updateReadyStatus() { if (command != null && env != null && wd != null && params != null && response != null) { readyToRun = true; } else { readyToRun = false; } } /** * Gets ready status * * @return false if not ready (<code>run</code> will throw * an exception), true if ready */ protected boolean isReady() { return readyToRun; } /** * Sets HttpServletResponse object used to set headers and send * output to * * @param response HttpServletResponse to be used * */ protected void setResponse(HttpServletResponse response) { this.response = response; updateReadyStatus(); } /** * Sets standard input to be passed on to the invoked cgi script * * @param stdin InputStream to be used * */ protected void setInput(InputStream stdin) { this.stdin = stdin; updateReadyStatus(); } /** * Converts a Hashtable to a String array by converting each * key/value pair in the Hashtable to a String in the form * "key=value" (hashkey + "=" + hash.get(hashkey).toString()) * * @param h Hashtable to convert * * @return converted string array * * @exception NullPointerException if a hash key has a null value * */ protected String[] hashToStringArray(Hashtable<String,?> h) throws NullPointerException { Vector<String> v = new Vector<>(); Enumeration<String> e = h.keys(); while (e.hasMoreElements()) { String k = e.nextElement(); v.add(k + "=" + h.get(k).toString()); } String[] strArr = new String[v.size()]; v.copyInto(strArr); return strArr; } /** * Executes a CGI script with the desired environment, current working * directory, and input/output streams * * <p> * This implements the following CGI specification recommendations: * </p> * <UL> * <LI> Servers SHOULD provide the "<code>query</code>" component of * the script-URI as command-line arguments to scripts if it * does not contain any unencoded "=" characters and the * command-line arguments can be generated in an unambiguous * manner. * <LI> Servers SHOULD set the AUTH_TYPE metavariable to the value * of the "<code>auth-scheme</code>" token of the * "<code>Authorization</code>" if it was supplied as part of the * request header. See <code>getCGIEnvironment</code> method. * <LI> Where applicable, servers SHOULD set the current working * directory to the directory in which the script is located * before invoking it. * <LI> Server implementations SHOULD define their behavior for the * following cases: * <ul> * <LI> <u>Allowed characters in pathInfo</u>: This implementation * does not allow ASCII NUL nor any character which cannot * be URL-encoded according to internet standards; * <LI> <u>Allowed characters in path segments</u>: This * implementation does not allow non-terminal NULL * segments in the the path -- IOExceptions may be thrown; * <LI> <u>"<code>.</code>" and "<code>..</code>" path * segments</u>: * This implementation does not allow "<code>.</code>" and * "<code>..</code>" in the the path, and such characters * will result in an IOException being thrown (this should * never happen since Tomcat normalises the requestURI * before determining the contextPath, servletPath and * pathInfo); * <LI> <u>Implementation limitations</u>: This implementation * does not impose any limitations except as documented * above. This implementation may be limited by the * servlet container used to house this implementation. * In particular, all the primary CGI variable values * are derived either directly or indirectly from the * container's implementation of the Servlet API methods. * </ul> * </UL> * * @exception IOException if problems during reading/writing occur * * @see java.lang.Runtime#exec(String command, String[] envp, * File dir) */ protected void run() throws IOException { /* * REMIND: this method feels too big; should it be re-written? */ if (!isReady()) { throw new IOException(this.getClass().getName() + ": not ready to run."); } if (log.isDebugEnabled()) { log.debug("envp: [" + env + "], command: [" + command + "]"); } if ((command.contains(File.separator + "." + File.separator)) || (command.contains(File.separator + "..")) || (command.contains(".." + File.separator))) { throw new IOException(this.getClass().getName() + "Illegal Character in CGI command path " + "('.' or '..') detected. Not running CGI [" + command + "]."); } /* original content/structure of this section taken from * http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4216884 * with major modifications by Martin Dengler */ Runtime rt = null; BufferedReader cgiHeaderReader = null; InputStream cgiOutput = null; BufferedReader commandsStdErr = null; Thread errReaderThread = null; BufferedOutputStream commandsStdIn = null; Process proc = null; int bufRead = -1; List<String> cmdAndArgs = new ArrayList<>(); if (cgiExecutable.length() != 0) { cmdAndArgs.add(cgiExecutable); } if (cgiExecutableArgs != null) { cmdAndArgs.addAll(cgiExecutableArgs); } cmdAndArgs.add(command); cmdAndArgs.addAll(params); try { rt = Runtime.getRuntime(); proc = rt.exec( cmdAndArgs.toArray(new String[0]), hashToStringArray(env), wd); String sContentLength = env.get("CONTENT_LENGTH"); if(!"".equals(sContentLength)) { commandsStdIn = new BufferedOutputStream(proc.getOutputStream()); IOTools.flow(stdin, commandsStdIn); commandsStdIn.flush(); commandsStdIn.close(); } /* we want to wait for the process to exit, Process.waitFor() * is useless in our situation; see * http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4223650 */ boolean isRunning = true; commandsStdErr = new BufferedReader (new InputStreamReader(proc.getErrorStream())); final BufferedReader stdErrRdr = commandsStdErr ; errReaderThread = new Thread() { @Override public void run () { sendToLog(stdErrRdr); } }; errReaderThread.start(); InputStream cgiHeaderStream = new HTTPHeaderInputStream(proc.getInputStream()); cgiHeaderReader = new BufferedReader(new InputStreamReader(cgiHeaderStream)); // Need to be careful here. If sendError() is called the // response body should be provided by the standard error page // process. But, if the output of the CGI process isn't read // then that process can hang. boolean skipBody = false; while (isRunning) { try { //set headers String line = null; while (((line = cgiHeaderReader.readLine()) != null) && !line.isEmpty()) { if (log.isTraceEnabled()) { log.trace("addHeader(\"" + line + "\")"); } if (line.startsWith("HTTP")) { skipBody = setStatus(response, getSCFromHttpStatusLine(line)); } else if (line.indexOf(':') >= 0) { String header = line.substring(0, line.indexOf(':')).trim(); String value = line.substring(line.indexOf(':') + 1).trim(); if (header.equalsIgnoreCase("status")) { skipBody = setStatus(response, getSCFromCGIStatusHeader(value)); } else { response.addHeader(header , value); } } else { log.info(sm.getString("cgiServlet.runBadHeader", line)); } } //write output byte[] bBuf = new byte[2048]; OutputStream out = response.getOutputStream(); cgiOutput = proc.getInputStream(); try { while (!skipBody && (bufRead = cgiOutput.read(bBuf)) != -1) { if (log.isTraceEnabled()) { log.trace("output " + bufRead + " bytes of data"); } out.write(bBuf, 0, bufRead); } } finally { // Attempt to consume any leftover byte if something bad happens, // such as a socket disconnect on the servlet side; otherwise, the // external process could hang if (bufRead != -1) { while ((bufRead = cgiOutput.read(bBuf)) != -1) { // NOOP - just read the data } } } proc.exitValue(); // Throws exception if alive isRunning = false; } catch (IllegalThreadStateException e) { try { Thread.sleep(500); } catch (InterruptedException ignored) { // Ignore } } } //replacement for Process.waitFor() } catch (IOException e){ log.warn(sm.getString("cgiServlet.runFail"), e); throw e; } finally { // Close the header reader if (cgiHeaderReader != null) { try { cgiHeaderReader.close(); } catch (IOException ioe) { log.warn(sm.getString("cgiServlet.runHeaderReaderFail"), ioe); } } // Close the output stream if used if (cgiOutput != null) { try { cgiOutput.close(); } catch (IOException ioe) { log.warn(sm.getString("cgiServlet.runOutputStreamFail"), ioe); } } // Make sure the error stream reader has finished if (errReaderThread != null) { try { errReaderThread.join(stderrTimeout); } catch (InterruptedException e) { log.warn(sm.getString("cgiServlet.runReaderInterrupt")); } } if (proc != null){ proc.destroy(); proc = null; } } } /** * Parses the Status-Line and extracts the status code. * * @param line The HTTP Status-Line (RFC2616, section 6.1) * @return The extracted status code or the code representing an * internal error if a valid status code cannot be extracted. */ private int getSCFromHttpStatusLine(String line) { int statusStart = line.indexOf(' ') + 1; if (statusStart < 1 || line.length() < statusStart + 3) { // Not a valid HTTP Status-Line log.warn(sm.getString("cgiServlet.runInvalidStatus", line)); return HttpServletResponse.SC_INTERNAL_SERVER_ERROR; } String status = line.substring(statusStart, statusStart + 3); int statusCode; try { statusCode = Integer.parseInt(status); } catch (NumberFormatException nfe) { // Not a valid status code log.warn(sm.getString("cgiServlet.runInvalidStatus", status)); return HttpServletResponse.SC_INTERNAL_SERVER_ERROR; } return statusCode; } /** * Parses the CGI Status Header value and extracts the status code. * * @param value The CGI Status value of the form <code> * digit digit digit SP reason-phrase</code> * @return The extracted status code or the code representing an * internal error if a valid status code cannot be extracted. */ private int getSCFromCGIStatusHeader(String value) { if (value.length() < 3) { // Not a valid status value log.warn(sm.getString("cgiServlet.runInvalidStatus", value)); return HttpServletResponse.SC_INTERNAL_SERVER_ERROR; } String status = value.substring(0, 3); int statusCode; try { statusCode = Integer.parseInt(status); } catch (NumberFormatException nfe) { // Not a valid status code log.warn(sm.getString("cgiServlet.runInvalidStatus", status)); return HttpServletResponse.SC_INTERNAL_SERVER_ERROR; } return statusCode; } private void sendToLog(BufferedReader rdr) { String line = null; int lineCount = 0 ; try { while ((line = rdr.readLine()) != null) { log.warn(sm.getString("cgiServlet.runStdErr", line)); lineCount++ ; } } catch (IOException e) { log.warn(sm.getString("cgiServlet.runStdErrFail"), e); } finally { try { rdr.close(); } catch (IOException e) { log.warn(sm.getString("cgiServlet.runStdErrFail"), e); } } if (lineCount > 0) { log.warn(sm.getString("cgiServlet.runStdErrCount", Integer.valueOf(lineCount))); } } } //class CGIRunner /** * This is an input stream specifically for reading HTTP headers. It reads * upto and including the two blank lines terminating the headers. It * allows the content to be read using bytes or characters as appropriate. */ protected static class HTTPHeaderInputStream extends InputStream { private static final int STATE_CHARACTER = 0; private static final int STATE_FIRST_CR = 1; private static final int STATE_FIRST_LF = 2; private static final int STATE_SECOND_CR = 3; private static final int STATE_HEADER_END = 4; private final InputStream input; private int state; HTTPHeaderInputStream(InputStream theInput) { input = theInput; state = STATE_CHARACTER; } /** * @see java.io.InputStream#read() */ @Override public int read() throws IOException { if (state == STATE_HEADER_END) { return -1; } int i = input.read(); // Update the state // State machine looks like this // // -------->-------- // | (CR) | // | | // CR1--->--- | // | | | // ^(CR) |(LF) | // | | | // CHAR--->--LF1--->--EOH // (LF) | (LF) | // |(CR) ^(LF) // | | // (CR2)-->--- if (i == 10) { // LF switch(state) { case STATE_CHARACTER: state = STATE_FIRST_LF; break; case STATE_FIRST_CR: state = STATE_FIRST_LF; break; case STATE_FIRST_LF: case STATE_SECOND_CR: state = STATE_HEADER_END; break; } } else if (i == 13) { // CR switch(state) { case STATE_CHARACTER: state = STATE_FIRST_CR; break; case STATE_FIRST_CR: state = STATE_HEADER_END; break; case STATE_FIRST_LF: state = STATE_SECOND_CR; break; } } else { state = STATE_CHARACTER; } return i; } } // class HTTPHeaderInputStream } //class CGIServlet
[]
[]
[]
[]
[]
java
0
0
ffbot/bot/asgi.py
#coding=utf-8 import os import django from channels.routing import get_default_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'bot.settings') django.setup() application = get_default_application()
[]
[]
[]
[]
[]
python
0
0
setup.py
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import os from setuptools import setup, find_packages, Extension import sys if sys.version_info < (3, 6): sys.exit('Sorry, Python >= 3.6 is required for fairseq.') with open('README.md') as f: readme = f.read() if sys.platform == 'darwin': extra_compile_args = ['-stdlib=libc++', '-O3'] else: extra_compile_args = ['-std=c++11', '-O3'] class NumpyExtension(Extension): """Source: https://stackoverflow.com/a/54128391""" def __init__(self, *args, **kwargs): self.__include_dirs = [] super().__init__(*args, **kwargs) @property def include_dirs(self): import numpy return self.__include_dirs + [numpy.get_include()] @include_dirs.setter def include_dirs(self, dirs): self.__include_dirs = dirs extensions = [ Extension( 'fairseq.libbleu', sources=[ 'fairseq/clib/libbleu/libbleu.cpp', 'fairseq/clib/libbleu/module.cpp', ], extra_compile_args=extra_compile_args, ), NumpyExtension( 'fairseq.data.data_utils_fast', sources=['fairseq/data/data_utils_fast.pyx'], language='c++', extra_compile_args=extra_compile_args, ), NumpyExtension( 'fairseq.data.token_block_utils_fast', sources=['fairseq/data/token_block_utils_fast.pyx'], language='c++', extra_compile_args=extra_compile_args, ), ] cmdclass = {} try: # torch is not available when generating docs from torch.utils import cpp_extension extensions.extend([ cpp_extension.CppExtension( 'fairseq.libnat', sources=[ 'fairseq/clib/libnat/edit_dist.cpp', ], ) ]) if 'CUDA_HOME' in os.environ: extensions.extend([ cpp_extension.CppExtension( 'fairseq.libnat_cuda', sources=[ 'fairseq/clib/libnat_cuda/edit_dist.cu', 'fairseq/clib/libnat_cuda/binding.cpp' ], )]) cmdclass['build_ext'] = cpp_extension.BuildExtension except ImportError: pass if 'READTHEDOCS' in os.environ: # don't build extensions when generating docs extensions = [] if 'build_ext' in cmdclass: del cmdclass['build_ext'] # use CPU build of PyTorch dependency_links = [ 'https://download.pytorch.org/whl/cpu/torch-1.3.0%2Bcpu-cp36-cp36m-linux_x86_64.whl' ] else: dependency_links = [] if 'clean' in sys.argv[1:]: # Source: https://bit.ly/2NLVsgE print("deleting Cython files...") import subprocess subprocess.run(['rm -f fairseq/*.so fairseq/**/*.so fairseq/*.pyd fairseq/**/*.pyd'], shell=True) setup( name='fairseq', version='0.9.0', description='Facebook AI Research Sequence-to-Sequence Toolkit', url='https://github.com/pytorch/fairseq', classifiers=[ 'Intended Audience :: Science/Research', 'License :: OSI Approved :: MIT License', 'Programming Language :: Python :: 3.6', 'Topic :: Scientific/Engineering :: Artificial Intelligence', ], long_description=readme, long_description_content_type='text/markdown', setup_requires=[ 'cython', 'numpy', 'setuptools>=18.0', ], install_requires=[ 'cffi', 'cython', 'numpy', 'regex', 'sacrebleu', # 'torch', 'tqdm', ], dependency_links=dependency_links, packages=find_packages(exclude=['scripts', 'tests']), ext_modules=extensions, test_suite='tests', entry_points={ 'console_scripts': [ 'fairseq-eval-lm = fairseq_cli.eval_lm:cli_main', 'fairseq-generate = fairseq_cli.generate:cli_main', 'fairseq-interactive = fairseq_cli.interactive:cli_main', 'fairseq-preprocess = fairseq_cli.preprocess:cli_main', 'fairseq-score = fairseq_cli.score:cli_main', 'fairseq-train = fairseq_cli.train:cli_main', 'fairseq-validate = fairseq_cli.validate:cli_main', ], }, cmdclass=cmdclass, zip_safe=False, )
[]
[]
[]
[]
[]
python
0
0
src/main/java/com/openbank/util/VariablesExpander.java
/* * All rights reserved. * * This software is distributable under the BSD license. * See the terms of the BSD license in the documentation provided with this software. */ package com.openbank.util; import net.thucydides.core.util.SystemEnvironmentVariables; import java.io.Serializable; import java.util.Properties; /** * This class is used to expand variables in the format <code>${variable}</code>$, using values from * {@link System#getenv()}, {@link System#getProperties()} and the <code>Properties</code> object specified in the * constructor (in inverse order; first match is accepted). * * @author Luigi R. Viggiano */ public class VariablesExpander implements Serializable { private static final long serialVersionUID = 1L; public static VariablesExpander INSTANCE; private final StrSubstitutor substitutor; private VariablesExpander() { Properties variables = new Properties(); variables.putAll(SystemEnvironmentVariables.createEnvironmentVariables().getProperties()); variables.putAll(System.getenv()); variables.putAll(System.getProperties()); setDefaultProperty(variables,"mongo.server", "localhost:27017"); //** setDefaultProperty(variables,"mongo.db", "config"); setDefaultProperty(variables,"country", "es"); setDefaultProperty(variables,"env", "qa"); substitutor = new StrSubstitutor(variables); } private void setDefaultProperty(Properties existing,String key, String value) { if (!existing.containsKey(key)) { existing.put(key, value); } } public static VariablesExpander get() { if (INSTANCE == null) INSTANCE = new VariablesExpander(); return INSTANCE; } private static String expandUserHome(String text) { if (text.equals("~")) { return System.getProperty("user.home"); } else if (text.indexOf("~/") != 0 && text.indexOf("file:~/") != 0 && text.indexOf("jar:file:~/") != 0) { return text.indexOf("~\\") != 0 && text.indexOf("file:~\\") != 0 && text.indexOf("jar:file:~\\") != 0 ? text : text.replaceFirst("~\\\\", fixBackslashForRegex(System.getProperty("user.home")) + "\\\\"); } else { return text.replaceFirst("~/", fixBackslashForRegex(System.getProperty("user.home")) + "/"); } } private static String fixBackslashForRegex(String text) { return text.replace("\\", "\\\\"); } public String expand(String path) { String expanded = expandUserHome(path); return replace(expanded); } public String replace(String str) { return substitutor.replace(str); } }
[]
[]
[]
[]
[]
java
0
0
backend/weatherwarner/wsgi.py
""" WSGI config for weatherwarner project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "weatherwarner.settings.dev") application = get_wsgi_application()
[]
[]
[]
[]
[]
python
0
0
clientv2/main.go
// Copyright 2018 The Chubao Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or // implied. See the License for the specific language governing // permissions and limitations under the License. package main // // Usage: ./client -c fuse.json & // // Default mountpoint is specified in fuse.json, which is "/mnt". // import ( "context" "flag" "fmt" syslog "log" "net/http" _ "net/http/pprof" "os" "os/signal" "path" "path/filepath" "runtime" "strings" "syscall" "github.com/chubaofs/chubaofs/sdk/master" sysutil "github.com/chubaofs/chubaofs/util/sys" "github.com/jacobsa/daemonize" "github.com/jacobsa/fuse" "github.com/jacobsa/fuse/fuseutil" cfs "github.com/chubaofs/chubaofs/clientv2/fs" "github.com/chubaofs/chubaofs/proto" "github.com/chubaofs/chubaofs/util/config" "github.com/chubaofs/chubaofs/util/errors" "github.com/chubaofs/chubaofs/util/exporter" "github.com/chubaofs/chubaofs/util/log" ) const ( MaxReadAhead = 512 * 1024 ) const ( LoggerDir = "client" LoggerPrefix = "client" LoggerOutput = "output.log" ModuleName = "fuseclient" ConfigKeyExporterPort = "exporterKey" ControlCommandSetRate = "/rate/set" ControlCommandGetRate = "/rate/get" ) var ( CommitID string BranchName string BuildTime string ) var ( configFile = flag.String("c", "", "FUSE client config file") configVersion = flag.Bool("v", false, "show version") configForeground = flag.Bool("f", false, "run foreground") ) var GlobalMountOptions []proto.MountOption func init() { GlobalMountOptions = proto.NewMountOptions() proto.InitMountOptions(GlobalMountOptions) } func main() { runtime.GOMAXPROCS(runtime.NumCPU()) flag.Parse() if *configVersion { fmt.Printf("ChubaoFS Client v2\n") fmt.Printf("Branch: %s\n", BranchName) fmt.Printf("Commit: %s\n", CommitID) fmt.Printf("Build: %s %s %s %s\n", runtime.Version(), runtime.GOOS, runtime.GOARCH, BuildTime) os.Exit(0) } if !*configForeground { if err := startDaemon(); err != nil { fmt.Printf("Mount failed: %v\n", err) os.Exit(1) } os.Exit(0) } /* * We are in daemon from here. * Must notify the parent process through SignalOutcome anyway. */ cfg, _ := config.LoadConfigFile(*configFile) opt, err := parseMountOption(cfg) if err != nil { daemonize.SignalOutcome(err) os.Exit(1) } exporter.Init(ModuleName, opt.Config) level := parseLogLevel(opt.Loglvl) _, err = log.InitLog(opt.Logpath, LoggerPrefix, level, nil) if err != nil { daemonize.SignalOutcome(err) os.Exit(1) } defer log.LogFlush() outputFilePath := path.Join(opt.Logpath, LoggerPrefix, LoggerOutput) outputFile, err := os.OpenFile(outputFilePath, os.O_CREATE|os.O_RDWR|os.O_APPEND, 0666) if err != nil { daemonize.SignalOutcome(err) os.Exit(1) } defer func() { outputFile.Sync() outputFile.Close() }() syslog.SetOutput(outputFile) syslog.Println("*** Final Mount Options ***") for _, o := range GlobalMountOptions { syslog.Println(o) } syslog.Println("*** End ***") if err = sysutil.RedirectFD(int(outputFile.Fd()), int(os.Stderr.Fd())); err != nil { daemonize.SignalOutcome(err) os.Exit(1) } registerInterceptedSignal(opt.MountPoint) if err = checkPermission(opt); err != nil { syslog.Println("check permission failed: ", err) log.LogFlush() _ = daemonize.SignalOutcome(err) os.Exit(1) } mfs, err := mount(opt) if err != nil { log.LogFlush() daemonize.SignalOutcome(err) os.Exit(1) } else { daemonize.SignalOutcome(nil) } if err = mfs.Join(context.Background()); err != nil { log.LogFlush() syslog.Printf("mfs Joint returns error: %v", err) os.Exit(1) } } func startDaemon() error { cmdPath, err := os.Executable() if err != nil { return fmt.Errorf("startDaemon failed: cannot get absolute command path, err(%v)", err) } configPath, err := filepath.Abs(*configFile) if err != nil { return fmt.Errorf("startDaemon failed: cannot get absolute command path of config file(%v) , err(%v)", *configFile, err) } args := []string{"-f"} args = append(args, "-c") args = append(args, configPath) env := []string{ fmt.Sprintf("PATH=%s", os.Getenv("PATH")), } err = daemonize.Run(cmdPath, args, env, os.Stdout) if err != nil { return fmt.Errorf("startDaemon failed: daemon start failed, cmd(%v) args(%v) env(%v) err(%v)\n", cmdPath, args, env, err) } return nil } func mount(opt *proto.MountOptions) (*fuse.MountedFileSystem, error) { super, err := cfs.NewSuper(opt) if err != nil { log.LogError(errors.Stack(err)) return nil, err } http.HandleFunc(ControlCommandSetRate, super.SetRate) http.HandleFunc(ControlCommandGetRate, super.GetRate) http.HandleFunc(log.SetLogLevelPath, log.SetLogLevel) go func() { fmt.Println(http.ListenAndServe(":"+opt.Profport, nil)) }() exporter.RegistConsul(super.ClusterName(), ModuleName, opt.Config) server := fuseutil.NewFileSystemServer(super) mntcfg := &fuse.MountConfig{ FSName: "chubaofs-" + opt.Volname, Subtype: "chubaofs", ReadOnly: opt.Rdonly, DisableWritebackCaching: true, } if opt.WriteCache { mntcfg.DisableWritebackCaching = false } // define extra options mntcfg.Options = make(map[string]string) mntcfg.Options["allow_other"] = "" mfs, err := fuse.Mount(opt.MountPoint, server, mntcfg) if err != nil { return nil, err } return mfs, nil } func registerInterceptedSignal(mnt string) { sigC := make(chan os.Signal, 1) signal.Notify(sigC, syscall.SIGINT, syscall.SIGTERM) go func() { sig := <-sigC syslog.Printf("Killed due to a received signal (%v)\n", sig) }() } func parseMountOption(cfg *config.Config) (*proto.MountOptions, error) { var err error opt := new(proto.MountOptions) opt.Config = cfg proto.ParseMountOptions(GlobalMountOptions, cfg) rawmnt := GlobalMountOptions[proto.MountPoint].GetString() opt.MountPoint, err = filepath.Abs(rawmnt) if err != nil { return nil, errors.Trace(err, "invalide mount point (%v) ", rawmnt) } opt.Volname = GlobalMountOptions[proto.VolName].GetString() opt.Owner = GlobalMountOptions[proto.Owner].GetString() opt.Master = GlobalMountOptions[proto.Master].GetString() opt.Logpath = GlobalMountOptions[proto.LogDir].GetString() opt.Loglvl = GlobalMountOptions[proto.LogLevel].GetString() opt.Profport = GlobalMountOptions[proto.ProfPort].GetString() opt.IcacheTimeout = GlobalMountOptions[proto.IcacheTimeout].GetInt64() opt.LookupValid = GlobalMountOptions[proto.LookupValid].GetInt64() opt.AttrValid = GlobalMountOptions[proto.AttrValid].GetInt64() opt.ReadRate = GlobalMountOptions[proto.ReadRate].GetInt64() opt.WriteRate = GlobalMountOptions[proto.WriteRate].GetInt64() opt.EnSyncWrite = GlobalMountOptions[proto.EnSyncWrite].GetInt64() opt.AutoInvalData = GlobalMountOptions[proto.AutoInvalData].GetInt64() opt.UmpDatadir = GlobalMountOptions[proto.WarnLogDir].GetString() opt.Rdonly = GlobalMountOptions[proto.Rdonly].GetBool() opt.WriteCache = GlobalMountOptions[proto.WriteCache].GetBool() opt.KeepCache = GlobalMountOptions[proto.KeepCache].GetBool() opt.FollowerRead = GlobalMountOptions[proto.FollowerRead].GetBool() opt.Authenticate = GlobalMountOptions[proto.Authenticate].GetBool() if opt.Authenticate { opt.TicketMess.ClientKey = GlobalMountOptions[proto.ClientKey].GetString() ticketHostConfig := GlobalMountOptions[proto.TicketHost].GetString() ticketHosts := strings.Split(ticketHostConfig, ",") opt.TicketMess.TicketHosts = ticketHosts opt.TicketMess.EnableHTTPS = GlobalMountOptions[proto.EnableHTTPS].GetBool() if opt.TicketMess.EnableHTTPS { opt.TicketMess.CertFile = GlobalMountOptions[proto.CertFile].GetString() } } opt.TokenKey = GlobalMountOptions[proto.TokenKey].GetString() opt.AccessKey = GlobalMountOptions[proto.AccessKey].GetString() opt.SecretKey = GlobalMountOptions[proto.SecretKey].GetString() if opt.MountPoint == "" || opt.Volname == "" || opt.Owner == "" || opt.Master == "" { return nil, errors.New(fmt.Sprintf("invalid config file: lack of mandatory fields, mountPoint(%v), volName(%v), owner(%v), masterAddr(%v)", opt.MountPoint, opt.Volname, opt.Owner, opt.Master)) } return opt, nil } func checkPermission(opt *proto.MountOptions) (err error) { var mc = master.NewMasterClientFromString(opt.Master, false) // Check token permission var info *proto.VolStatInfo if info, err = mc.ClientAPI().GetVolumeStat(opt.Volname); err != nil { return } if info.EnableToken { var token *proto.Token if token, err = mc.ClientAPI().GetToken(opt.Volname, opt.TokenKey); err != nil { log.LogWarnf("checkPermission: get token type failed: volume(%v) tokenKey(%v) err(%v)", opt.Volname, opt.TokenKey, err) return } log.LogInfof("checkPermission: get token: token(%v)", token) opt.Rdonly = token.TokenType == int8(proto.ReadOnlyToken) || opt.Rdonly } // Check user access policy is enabled if opt.AccessKey != "" { var userInfo *proto.UserInfo if userInfo, err = mc.UserAPI().GetAKInfo(opt.AccessKey); err != nil { return } if userInfo.SecretKey != opt.SecretKey { err = proto.ErrNoPermission return } var policy = userInfo.Policy if policy.IsOwn(opt.Volname) { return } if policy.IsAuthorized(opt.Volname, proto.POSIXWriteAction) && policy.IsAuthorized(opt.Volname, proto.POSIXReadAction) { return } if policy.IsAuthorized(opt.Volname, proto.POSIXReadAction) && !policy.IsAuthorized(opt.Volname, proto.POSIXWriteAction) { opt.Rdonly = true return } err = proto.ErrNoPermission return } return } // ParseLogLevel returns the log level based on the given string. func parseLogLevel(loglvl string) log.Level { var level log.Level switch strings.ToLower(loglvl) { case "debug": level = log.DebugLevel case "info": level = log.InfoLevel case "warn": level = log.WarnLevel case "error": level = log.ErrorLevel default: level = log.ErrorLevel } return level }
[ "\"PATH\"" ]
[]
[ "PATH" ]
[]
["PATH"]
go
1
0
train_senet_cpn_onebyone.py
# Copyright 2018 Changan Wang # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================= from __future__ import absolute_import from __future__ import division from __future__ import print_function import os import sys import numpy as np #from scipy.misc import imread, imsave, imshow, imresize import tensorflow as tf from net import seresnet_cpn as cpn from utility import train_helper from utility import mertric from preprocessing import preprocessing from preprocessing import dataset import config # hardware related configuration tf.app.flags.DEFINE_integer( 'num_readers', 16,#16 'The number of parallel readers that read data from the dataset.') tf.app.flags.DEFINE_integer( 'num_preprocessing_threads', 48,#48 'The number of threads used to create the batches.') tf.app.flags.DEFINE_integer( 'num_cpu_threads', 0, 'The number of cpu cores used to train.') tf.app.flags.DEFINE_float( 'gpu_memory_fraction', 1., 'GPU memory fraction to use.') # scaffold related configuration tf.app.flags.DEFINE_string( 'data_dir', '../Datasets/tfrecords',#'/media/rs/0E06CD1706CD0127/Kapok/Chi/Datasets/tfrecords', 'The directory where the dataset input data is stored.') tf.app.flags.DEFINE_string( 'dataset_name', '{}_????', 'The pattern of the dataset name to load.') tf.app.flags.DEFINE_string( 'model_dir', './logs_sext_cpn/', 'The parent directory where the model will be stored.') tf.app.flags.DEFINE_integer( 'log_every_n_steps', 10, 'The frequency with which logs are print.') tf.app.flags.DEFINE_integer( 'save_summary_steps', 100, 'The frequency with which summaries are saved, in seconds.') tf.app.flags.DEFINE_integer( 'save_checkpoints_secs', 3600, 'The frequency with which the model is saved, in seconds.') # model related configuration tf.app.flags.DEFINE_integer( 'train_image_size', 384, 'The size of the input image for the model to use.') tf.app.flags.DEFINE_integer( 'heatmap_size', 96, 'The size of the output heatmap of the model.') tf.app.flags.DEFINE_string( 'backbone', 'seresnext50',#or seresnext50 seresnet50 'The backbone network to use for feature pyramid.') tf.app.flags.DEFINE_float( 'heatmap_sigma', 1., 'The sigma of Gaussian which generate the target heatmap.') tf.app.flags.DEFINE_float( 'bbox_border', 25., 'The nearest distance of the crop border to al keypoints.') tf.app.flags.DEFINE_integer( 'train_epochs', 50, 'The number of epochs to use for training.') tf.app.flags.DEFINE_integer( 'epochs_per_eval', 20, 'The number of training epochs to run between evaluations.') tf.app.flags.DEFINE_integer( 'batch_size', 10, 'Batch size for training and evaluation.') tf.app.flags.DEFINE_integer( 'xt_batch_size', 10, 'Batch size for training and evaluation.') tf.app.flags.DEFINE_boolean( 'use_ohkm', True, 'Wether we will use the ohkm for hard keypoints.') tf.app.flags.DEFINE_string( 'data_format', 'channels_first', # 'channels_first' or 'channels_last' 'A flag to override the data format used in the model. channels_first ' 'provides a performance boost on GPU but is not always compatible ' 'with CPU. If left unspecified, the data format will be chosen ' 'automatically based on whether TensorFlow was built for CPU or GPU.') # optimizer related configuration tf.app.flags.DEFINE_integer( 'tf_random_seed', 20180417, 'Random seed for TensorFlow initializers.') tf.app.flags.DEFINE_float( 'weight_decay', 1e-5, 'The weight decay on the model weights.') tf.app.flags.DEFINE_float( 'mse_weight', 1., 'The weight decay on the model weights.') tf.app.flags.DEFINE_float( 'momentum', 0.9, 'The momentum for the MomentumOptimizer and RMSPropOptimizer.') tf.app.flags.DEFINE_float('learning_rate', 1e-4, 'Initial learning rate.')#1e-3 tf.app.flags.DEFINE_float( 'end_learning_rate', 0.000001, 'The minimal end learning rate used by a polynomial decay learning rate.') tf.app.flags.DEFINE_float( 'warmup_learning_rate', 0.00001, 'The start warm-up learning rate to avoid NAN.') tf.app.flags.DEFINE_integer( 'warmup_steps', 100, 'The total steps to warm-up.') # for learning rate piecewise_constant decay tf.app.flags.DEFINE_string( 'decay_boundaries', '2, 3', 'Learning rate decay boundaries by global_step (comma-separated list).') tf.app.flags.DEFINE_string( 'lr_decay_factors', '1, 0.5, 0.1', 'The values of learning_rate decay factor for each segment between boundaries (comma-separated list).') # checkpoint related configuration tf.app.flags.DEFINE_string( 'checkpoint_path', './model', 'The path to a checkpoint from which to fine-tune.') tf.app.flags.DEFINE_string( 'checkpoint_model_scope', '', 'Model scope in the checkpoint. None if the same as the trained model.') tf.app.flags.DEFINE_string( #'blouse', 'dress', 'outwear', 'skirt', 'trousers', 'all' 'model_scope', None, 'Model scope name used to replace the name_scope in checkpoint.') tf.app.flags.DEFINE_string( 'checkpoint_exclude_scopes', None, 'Comma-separated list of scopes of variables to exclude when restoring from a checkpoint.') tf.app.flags.DEFINE_boolean( 'ignore_missing_vars', True, 'When restoring a checkpoint would ignore missing variables.') tf.app.flags.DEFINE_boolean( 'run_on_cloud', False, 'Wether we will train on cloud.') tf.app.flags.DEFINE_boolean( 'seq_train', False, 'Wether we will train a sequence model.') tf.app.flags.DEFINE_string(# 'model_to_train', 'blouse, dress, outwear, skirt, trousers', #'all, blouse, dress, outwear, skirt, trousers', 'skirt, dress, outwear, trousers', 'The sub-model to train (comma-separated list).') FLAGS = tf.app.flags.FLAGS #--model_scope=blouse --checkpoint_path=./logs/all --data_format=channels_last --batch_size=1 def input_pipeline(is_training=True, model_scope=FLAGS.model_scope, num_epochs=FLAGS.epochs_per_eval): if 'all' in model_scope: lnorm_table = tf.contrib.lookup.HashTable(tf.contrib.lookup.KeyValueTensorInitializer(tf.constant(config.global_norm_key, dtype=tf.int64), tf.constant(config.global_norm_lvalues, dtype=tf.int64)), 0) rnorm_table = tf.contrib.lookup.HashTable(tf.contrib.lookup.KeyValueTensorInitializer(tf.constant(config.global_norm_key, dtype=tf.int64), tf.constant(config.global_norm_rvalues, dtype=tf.int64)), 1) else: lnorm_table = tf.contrib.lookup.HashTable(tf.contrib.lookup.KeyValueTensorInitializer(tf.constant(config.local_norm_key, dtype=tf.int64), tf.constant(config.local_norm_lvalues, dtype=tf.int64)), 0) rnorm_table = tf.contrib.lookup.HashTable(tf.contrib.lookup.KeyValueTensorInitializer(tf.constant(config.local_norm_key, dtype=tf.int64), tf.constant(config.local_norm_rvalues, dtype=tf.int64)), 1) preprocessing_fn = lambda org_image, classid, shape, key_x, key_y, key_v: preprocessing.preprocess_image(org_image, classid, shape, FLAGS.train_image_size, FLAGS.train_image_size, key_x, key_y, key_v, (lnorm_table, rnorm_table), is_training=is_training, data_format=('NCHW' if FLAGS.data_format=='channels_first' else 'NHWC'), category=(model_scope if 'all' not in model_scope else '*'), bbox_border=FLAGS.bbox_border, heatmap_sigma=FLAGS.heatmap_sigma, heatmap_size=FLAGS.heatmap_size) images, shape, classid, targets, key_v, isvalid, norm_value = dataset.slim_get_split(FLAGS.data_dir, preprocessing_fn, (FLAGS.xt_batch_size if 'seresnext50' in FLAGS.backbone else FLAGS.batch_size), FLAGS.num_readers, FLAGS.num_preprocessing_threads, num_epochs=num_epochs, is_training=is_training, file_pattern=FLAGS.dataset_name, category=(model_scope if 'all' not in model_scope else '*'), reader=None) return images, {'targets': targets, 'key_v': key_v, 'shape': shape, 'classid': classid, 'isvalid': isvalid, 'norm_value': norm_value} if config.PRED_DEBUG: from scipy.misc import imread, imsave, imshow, imresize def save_image_with_heatmap(image, height, width, heatmap_size, targets, pred_heatmap, indR, indG, indB): if not hasattr(save_image_with_heatmap, "counter"): save_image_with_heatmap.counter = 0 # it doesn't exist yet, so initialize it save_image_with_heatmap.counter += 1 img_to_save = np.array(image.tolist()) + 128 #print(img_to_save.shape) img_to_save = img_to_save.astype(np.uint8) heatmap0 = np.sum(targets[indR, ...], axis=0).astype(np.uint8) heatmap1 = np.sum(targets[indG, ...], axis=0).astype(np.uint8) heatmap2 = np.sum(targets[indB, ...], axis=0).astype(np.uint8) if len(indB) > 0 else np.zeros((heatmap_size, heatmap_size), dtype=np.float32) img_to_save = imresize(img_to_save, (height, width), interp='lanczos') heatmap0 = imresize(heatmap0, (height, width), interp='lanczos') heatmap1 = imresize(heatmap1, (height, width), interp='lanczos') heatmap2 = imresize(heatmap2, (height, width), interp='lanczos') img_to_save = img_to_save/2 img_to_save[:,:,0] = np.clip((img_to_save[:,:,0] + heatmap0 + heatmap2), 0, 255) img_to_save[:,:,1] = np.clip((img_to_save[:,:,1] + heatmap1 + heatmap2), 0, 255) #img_to_save[:,:,2] = np.clip((img_to_save[:,:,2]/4. + heatmap2), 0, 255) file_name = 'targets_{}.jpg'.format(save_image_with_heatmap.counter) imsave(os.path.join(config.DEBUG_DIR, file_name), img_to_save.astype(np.uint8)) pred_heatmap = np.array(pred_heatmap.tolist()) #print(pred_heatmap.shape) for ind in range(pred_heatmap.shape[0]): img = pred_heatmap[ind] img = img - img.min() img *= 255.0/img.max() file_name = 'heatmap_{}_{}.jpg'.format(save_image_with_heatmap.counter, ind) imsave(os.path.join(config.DEBUG_DIR, file_name), img.astype(np.uint8)) return save_image_with_heatmap.counter def get_keypoint(image, targets, predictions, heatmap_size, height, width, category, clip_at_zero=True, data_format='channels_last', name=None): predictions = tf.reshape(predictions, [1, -1, heatmap_size*heatmap_size]) pred_max = tf.reduce_max(predictions, axis=-1) pred_indices = tf.argmax(predictions, axis=-1) pred_x, pred_y = tf.cast(tf.floormod(pred_indices, heatmap_size), tf.float32), tf.cast(tf.floordiv(pred_indices, heatmap_size), tf.float32) width, height = tf.cast(width, tf.float32), tf.cast(height, tf.float32) pred_x, pred_y = pred_x * width / tf.cast(heatmap_size, tf.float32), pred_y * height / tf.cast(heatmap_size, tf.float32) if clip_at_zero: pred_x, pred_y = pred_x * tf.cast(pred_max>0, tf.float32), pred_y * tf.cast(pred_max>0, tf.float32) pred_x = pred_x * tf.cast(pred_max>0, tf.float32) + tf.cast(pred_max<=0, tf.float32) * (width / 2.) pred_y = pred_y * tf.cast(pred_max>0, tf.float32) + tf.cast(pred_max<=0, tf.float32) * (height / 2.) if config.PRED_DEBUG: pred_indices_ = tf.squeeze(pred_indices) image_ = tf.squeeze(image) * 255. pred_heatmap = tf.one_hot(pred_indices_, heatmap_size*heatmap_size, on_value=1., off_value=0., axis=-1, dtype=tf.float32) pred_heatmap = tf.reshape(pred_heatmap, [-1, heatmap_size, heatmap_size]) if data_format == 'channels_first': image_ = tf.transpose(image_, perm=(1, 2, 0)) save_image_op = tf.py_func(save_image_with_heatmap, [image_, height, width, heatmap_size, tf.reshape(pred_heatmap * 255., [-1, heatmap_size, heatmap_size]), tf.reshape(predictions, [-1, heatmap_size, heatmap_size]), config.left_right_group_map[category][0], config.left_right_group_map[category][1], config.left_right_group_map[category][2]], tf.int64, stateful=True) with tf.control_dependencies([save_image_op]): pred_x, pred_y = pred_x * 1., pred_y * 1. return pred_x, pred_y def gaussian_blur(inputs, inputs_filters, sigma, data_format, name=None): with tf.name_scope(name, "gaussian_blur", [inputs]): data_format_ = 'NHWC' if data_format=='channels_last' else 'NCHW' if data_format_ == 'NHWC': inputs = tf.transpose(inputs, [0, 2, 3, 1]) ksize = int(6 * sigma + 1.) x = tf.expand_dims(tf.range(ksize, delta=1, dtype=tf.float32), axis=1) y = tf.transpose(x, [1, 0]) kernel_matrix = tf.exp(- ((x - ksize/2.) ** 2 + (y - ksize/2.) ** 2) / (2 * sigma ** 2)) #print(kernel_matrix) kernel_filter = tf.reshape(kernel_matrix, [ksize, ksize, 1, 1]) kernel_filter = tf.tile(kernel_filter, [1, 1, inputs_filters, 1]) #kernel_filter = tf.transpose(kernel_filter, [1, 0, 2, 3]) outputs = tf.nn.depthwise_conv2d(inputs, kernel_filter, strides=[1, 1, 1, 1], padding='SAME', data_format=data_format_, name='blur') if data_format_ == 'NHWC': outputs = tf.transpose(outputs, [0, 3, 1, 2]) return outputs cpn_backbone = cpn.cascaded_pyramid_net if 'seresnext50' in FLAGS.backbone: cpn_backbone = cpn.xt_cascaded_pyramid_net def keypoint_model_fn(features, labels, mode, params): targets = labels['targets'] shape = labels['shape'] classid = labels['classid'] key_v = labels['key_v'] isvalid = labels['isvalid'] norm_value = labels['norm_value'] cur_batch_size = tf.shape(features)[0] #features= tf.ones_like(features) with tf.variable_scope(params['model_scope'], default_name=None, values=[features], reuse=tf.AUTO_REUSE): pred_outputs = cpn_backbone(features, config.class_num_joints[(params['model_scope'] if 'all' not in params['model_scope'] else '*')], params['heatmap_size'], (mode == tf.estimator.ModeKeys.TRAIN), params['data_format']) if params['data_format'] == 'channels_last': pred_outputs = [tf.transpose(pred_outputs[ind], [0, 3, 1, 2], name='outputs_trans_{}'.format(ind)) for ind in list(range(len(pred_outputs)))] score_map = pred_outputs[-1] pred_x, pred_y = get_keypoint(features, targets, score_map, params['heatmap_size'], params['train_image_size'], params['train_image_size'], (params['model_scope'] if 'all' not in params['model_scope'] else '*'), clip_at_zero=True, data_format=params['data_format']) # this is important!!! targets = 255. * targets blur_list = [1., 1.37, 1.73, 2.4, None]#[1., 1.5, 2., 3., None] #blur_list = [None, None, None, None, None] targets_list = [] for sigma in blur_list: if sigma is None: targets_list.append(targets) else: # always channels first foe targets targets_list.append(gaussian_blur(targets, config.class_num_joints[(params['model_scope'] if 'all' not in params['model_scope'] else '*')], sigma, params['data_format'], 'blur_{}'.format(sigma))) # print(key_v) #targets = tf.reshape(255.*tf.one_hot(tf.ones_like(key_v,tf.int64)*(params['heatmap_size']*params['heatmap_size']//2+params['heatmap_size']), params['heatmap_size']*params['heatmap_size']), [cur_batch_size,-1,params['heatmap_size'],params['heatmap_size']]) #norm_value = tf.ones_like(norm_value) # score_map = tf.reshape(tf.one_hot(tf.ones_like(key_v,tf.int64)*(31*64+31), params['heatmap_size']*params['heatmap_size']), [cur_batch_size,-1,params['heatmap_size'],params['heatmap_size']]) #with tf.control_dependencies([pred_x, pred_y]): ne_mertric = mertric.normalized_error(targets, score_map, norm_value, key_v, isvalid, cur_batch_size, config.class_num_joints[(params['model_scope'] if 'all' not in params['model_scope'] else '*')], params['heatmap_size'], params['train_image_size']) # last_pred_mse = tf.metrics.mean_squared_error(score_map, targets, # weights=1.0 / tf.cast(cur_batch_size, tf.float32), # name='last_pred_mse') # filter all invisible keypoint maybe better for this task # all_visible = tf.logical_and(key_v>0, isvalid>0) # targets_list = [tf.boolean_mask(targets_list[ind], all_visible) for ind in list(range(len(targets_list)))] # pred_outputs = [tf.boolean_mask(pred_outputs[ind], all_visible, name='boolean_mask_{}'.format(ind)) for ind in list(range(len(pred_outputs)))] all_visible = tf.expand_dims(tf.expand_dims(tf.cast(tf.logical_and(key_v>0, isvalid>0), tf.float32), axis=-1), axis=-1) targets_list = [targets_list[ind] * all_visible for ind in list(range(len(targets_list)))] pred_outputs = [pred_outputs[ind] * all_visible for ind in list(range(len(pred_outputs)))] sq_diff = tf.reduce_sum(tf.squared_difference(targets, pred_outputs[-1]), axis=-1) last_pred_mse = tf.metrics.mean_absolute_error(sq_diff, tf.zeros_like(sq_diff), name='last_pred_mse') metrics = {'normalized_error': ne_mertric, 'last_pred_mse':last_pred_mse} predictions = {'normalized_error': ne_mertric[1]} ne_mertric = tf.identity(ne_mertric[1], name='ne_mertric') base_learning_rate = params['learning_rate'] mse_loss_list = [] if params['use_ohkm']: base_learning_rate = 1. * base_learning_rate for pred_ind in list(range(len(pred_outputs) - 1)): mse_loss_list.append(0.5 * tf.losses.mean_squared_error(targets_list[pred_ind], pred_outputs[pred_ind], weights=1.0 / tf.cast(cur_batch_size, tf.float32), scope='loss_{}'.format(pred_ind), loss_collection=None,#tf.GraphKeys.LOSSES, # mean all elements of all pixels in all batch reduction=tf.losses.Reduction.MEAN))# SUM, SUM_OVER_BATCH_SIZE, default mean by all elements temp_loss = tf.reduce_mean(tf.reshape(tf.losses.mean_squared_error(targets_list[-1], pred_outputs[-1], weights=1.0, loss_collection=None, reduction=tf.losses.Reduction.NONE), [cur_batch_size, config.class_num_joints[(params['model_scope'] if 'all' not in params['model_scope'] else '*')], -1]), axis=-1) num_topk = config.class_num_joints[(params['model_scope'] if 'all' not in params['model_scope'] else '*')] // 2 gather_col = tf.nn.top_k(temp_loss, k=num_topk, sorted=True)[1] gather_row = tf.reshape(tf.tile(tf.reshape(tf.range(cur_batch_size), [-1, 1]), [1, num_topk]), [-1, 1]) gather_indcies = tf.stop_gradient(tf.stack([gather_row, tf.reshape(gather_col, [-1, 1])], axis=-1)) select_targets = tf.gather_nd(targets_list[-1], gather_indcies) select_heatmap = tf.gather_nd(pred_outputs[-1], gather_indcies) mse_loss_list.append(tf.losses.mean_squared_error(select_targets, select_heatmap, weights=1.0 / tf.cast(cur_batch_size, tf.float32), scope='loss_{}'.format(len(pred_outputs) - 1), loss_collection=None,#tf.GraphKeys.LOSSES, # mean all elements of all pixels in all batch reduction=tf.losses.Reduction.MEAN)) else: for pred_ind in list(range(len(pred_outputs))): mse_loss_list.append(tf.losses.mean_squared_error(targets_list[pred_ind], pred_outputs[pred_ind], weights=1.0 / tf.cast(cur_batch_size, tf.float32), scope='loss_{}'.format(pred_ind), loss_collection=None,#tf.GraphKeys.LOSSES, # mean all elements of all pixels in all batch reduction=tf.losses.Reduction.MEAN))# SUM, SUM_OVER_BATCH_SIZE, default mean by all elements mse_loss = tf.multiply(params['mse_weight'], tf.add_n(mse_loss_list), name='mse_loss') tf.summary.scalar('mse', mse_loss) tf.losses.add_loss(mse_loss) # bce_loss_list = [] # for pred_ind in list(range(len(pred_outputs))): # bce_loss_list.append(tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=pred_outputs[pred_ind], labels=targets_list[pred_ind]/255., name='loss_{}'.format(pred_ind)), name='loss_mean_{}'.format(pred_ind))) # mse_loss = tf.multiply(params['mse_weight'] / params['num_stacks'], tf.add_n(bce_loss_list), name='mse_loss') # tf.summary.scalar('mse', mse_loss) # tf.losses.add_loss(mse_loss) # Add weight decay to the loss. We exclude the batch norm variables because # doing so leads to a small improvement in accuracy. loss = mse_loss + params['weight_decay'] * tf.add_n([tf.nn.l2_loss(v) for v in tf.trainable_variables() if 'batch_normalization' not in v.name]) total_loss = tf.identity(loss, name='total_loss') tf.summary.scalar('loss', total_loss) if mode == tf.estimator.ModeKeys.EVAL: return tf.estimator.EstimatorSpec(mode=mode, loss=loss, predictions=predictions, eval_metric_ops=metrics) if mode == tf.estimator.ModeKeys.TRAIN: global_step = tf.train.get_or_create_global_step() lr_values = [params['warmup_learning_rate']] + [base_learning_rate * decay for decay in params['lr_decay_factors']] learning_rate = tf.train.piecewise_constant(tf.cast(global_step, tf.int32), [params['warmup_steps']] + [int(float(ep)*params['steps_per_epoch']) for ep in params['decay_boundaries']], lr_values) truncated_learning_rate = tf.maximum(learning_rate, tf.constant(params['end_learning_rate'], dtype=learning_rate.dtype), name='learning_rate') tf.summary.scalar('lr', truncated_learning_rate) optimizer = tf.train.MomentumOptimizer(learning_rate=truncated_learning_rate, momentum=params['momentum']) # Batch norm requires update_ops to be added as a train_op dependency. update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): train_op = optimizer.minimize(loss, global_step) else: train_op = None return tf.estimator.EstimatorSpec( mode=mode, predictions=predictions, loss=loss, train_op=train_op, eval_metric_ops=metrics, scaffold=tf.train.Scaffold(init_fn=train_helper.get_init_fn_for_scaffold_(params['checkpoint_path'], params['model_dir'], params['checkpoint_exclude_scopes'], params['model_scope'], params['checkpoint_model_scope'], params['ignore_missing_vars']))) def parse_comma_list(args): return [float(s.strip()) for s in args.split(',')] def sub_loop(model_fn, model_scope, model_dir, run_config, train_epochs, epochs_per_eval, lr_decay_factors, decay_boundaries, checkpoint_path=None, checkpoint_exclude_scopes='', checkpoint_model_scope='', ignore_missing_vars=True): steps_per_epoch = config.split_size[(model_scope if 'all' not in model_scope else '*')]['train'] // (FLAGS.xt_batch_size if 'seresnext50' in FLAGS.backbone else FLAGS.batch_size) fashionAI = tf.estimator.Estimator( model_fn=model_fn, model_dir=model_dir, config=run_config, params={ 'checkpoint_path': checkpoint_path, 'model_dir': model_dir, 'checkpoint_exclude_scopes': checkpoint_exclude_scopes, 'model_scope': model_scope, 'checkpoint_model_scope': checkpoint_model_scope, 'ignore_missing_vars': ignore_missing_vars, 'train_image_size': FLAGS.train_image_size, 'heatmap_size': FLAGS.heatmap_size, 'data_format': FLAGS.data_format, 'steps_per_epoch': steps_per_epoch, 'use_ohkm': FLAGS.use_ohkm, 'batch_size': (FLAGS.xt_batch_size if 'seresnext50' in FLAGS.backbone else FLAGS.batch_size), 'weight_decay': FLAGS.weight_decay, 'mse_weight': FLAGS.mse_weight, 'momentum': FLAGS.momentum, 'learning_rate': FLAGS.learning_rate, 'end_learning_rate': FLAGS.end_learning_rate, 'warmup_learning_rate': FLAGS.warmup_learning_rate, 'warmup_steps': FLAGS.warmup_steps, 'decay_boundaries': parse_comma_list(decay_boundaries), 'lr_decay_factors': parse_comma_list(lr_decay_factors), }) tf.gfile.MakeDirs(model_dir) tf.logging.info('Starting to train model {}.'.format(model_scope)) for _ in range(train_epochs // epochs_per_eval): tensors_to_log = { 'lr': 'learning_rate', 'loss': 'total_loss', 'mse': 'mse_loss', 'ne': 'ne_mertric', } logging_hook = tf.train.LoggingTensorHook(tensors=tensors_to_log, every_n_iter=FLAGS.log_every_n_steps, formatter=lambda dicts: '{}:'.format(model_scope) + (', '.join(['%s=%.6f' % (k, v) for k, v in dicts.items()]))) # FIXME: augment error:tensorflow.python.framework.errors_impl.InvalidArgumentError: indices[0] = 0 is not in [0, 0) tf.logging.info('Starting a training cycle.') fashionAI.train(input_fn=lambda : input_pipeline(True, model_scope, epochs_per_eval), hooks=[logging_hook], max_steps=(steps_per_epoch*train_epochs)) tf.logging.info('Starting to evaluate.') eval_results = fashionAI.evaluate(input_fn=lambda : input_pipeline(False, model_scope, 1)) tf.logging.info(eval_results) tf.logging.info('Finished model {}.'.format(model_scope)) def main(_): # Using the Winograd non-fused algorithms provides a small performance boost. os.environ['TF_ENABLE_WINOGRAD_NONFUSED'] = '1' gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction = FLAGS.gpu_memory_fraction) sess_config = tf.ConfigProto(allow_soft_placement = True, log_device_placement = False, intra_op_parallelism_threads = FLAGS.num_cpu_threads, inter_op_parallelism_threads = FLAGS.num_cpu_threads, gpu_options = gpu_options) # Set up a RunConfig to only save checkpoints once per training cycle. run_config = tf.estimator.RunConfig().replace( save_checkpoints_secs=FLAGS.save_checkpoints_secs).replace( save_checkpoints_steps=None).replace( save_summary_steps=FLAGS.save_summary_steps).replace( keep_checkpoint_max=5).replace( tf_random_seed=FLAGS.tf_random_seed).replace( log_step_count_steps=FLAGS.log_every_n_steps).replace( session_config=sess_config) if FLAGS.seq_train: detail_params = { 'all': { 'model_dir' : os.path.join(FLAGS.model_dir, 'all'), 'train_epochs': 6, 'epochs_per_eval': 4, 'lr_decay_factors': '1, 0.5, 0.1', 'decay_boundaries': '3, 4', 'model_scope': 'all', 'checkpoint_path': None, 'checkpoint_model_scope': '', 'checkpoint_exclude_scopes': '', 'ignore_missing_vars': True, }, 'blouse': { 'model_dir' : os.path.join(FLAGS.model_dir, 'blouse'), 'train_epochs': 50, 'epochs_per_eval': 30, 'lr_decay_factors': '1, 0.5, 0.1', 'decay_boundaries': '15, 30', 'model_scope': 'blouse', 'checkpoint_path': os.path.join(FLAGS.model_dir, 'all'), 'checkpoint_model_scope': 'all', 'checkpoint_exclude_scopes': 'blouse/feature_pyramid/conv_heatmap, blouse/global_net/conv_heatmap', 'ignore_missing_vars': True, }, 'dress': { 'model_dir' : os.path.join(FLAGS.model_dir, 'dress'), 'train_epochs': 50, 'epochs_per_eval': 30, 'lr_decay_factors': '1, 0.5, 0.1', 'decay_boundaries': '15, 30', 'model_scope': 'dress', 'checkpoint_path': os.path.join(FLAGS.model_dir, 'all'), 'checkpoint_model_scope': 'all', 'checkpoint_exclude_scopes': 'dress/feature_pyramid/conv_heatmap, dress/global_net/conv_heatmap', 'ignore_missing_vars': True, }, 'outwear': { 'model_dir' : os.path.join(FLAGS.model_dir, 'outwear'), 'train_epochs': 50, 'epochs_per_eval': 30, 'lr_decay_factors': '1, 0.5, 0.1', 'decay_boundaries': '15, 30', 'model_scope': 'outwear', 'checkpoint_path': os.path.join(FLAGS.model_dir, 'all'), 'checkpoint_model_scope': 'all', 'checkpoint_exclude_scopes': 'outwear/feature_pyramid/conv_heatmap, outwear/global_net/conv_heatmap', 'ignore_missing_vars': True, }, 'skirt': { 'model_dir' : os.path.join(FLAGS.model_dir, 'skirt'), 'train_epochs': 50, 'epochs_per_eval': 30, 'lr_decay_factors': '1, 0.5, 0.1', 'decay_boundaries': '15, 30', 'model_scope': 'skirt', 'checkpoint_path': os.path.join(FLAGS.model_dir, 'all'), 'checkpoint_model_scope': 'all', 'checkpoint_exclude_scopes': 'skirt/feature_pyramid/conv_heatmap, skirt/global_net/conv_heatmap', 'ignore_missing_vars': True, }, 'trousers': { 'model_dir' : os.path.join(FLAGS.model_dir, 'trousers'), 'train_epochs': 50, 'epochs_per_eval': 30, 'lr_decay_factors': '1, 0.5, 0.1', 'decay_boundaries': '15, 30', 'model_scope': 'trousers', 'checkpoint_path': os.path.join(FLAGS.model_dir, 'all'), 'checkpoint_model_scope': 'all', 'checkpoint_exclude_scopes': 'trousers/feature_pyramid/conv_heatmap, trousers/global_net/conv_heatmap', 'ignore_missing_vars': True, }, } else: detail_params = { 'blouse': { 'model_dir' : os.path.join(FLAGS.model_dir, 'blouse'), 'train_epochs': 28, 'epochs_per_eval': 7, 'lr_decay_factors': '1, 0.5, 0.1', 'decay_boundaries': '10, 20', 'model_scope': 'blouse', 'checkpoint_path': os.path.join(FLAGS.data_dir, FLAGS.backbone) if FLAGS.run_on_cloud else os.path.join(FLAGS.checkpoint_path, FLAGS.backbone), 'checkpoint_model_scope': '', 'checkpoint_exclude_scopes': 'blouse/feature_pyramid, blouse/global_net', 'ignore_missing_vars': True, }, 'dress': { 'model_dir' : os.path.join(FLAGS.model_dir, 'dress'), 'train_epochs': 28, 'epochs_per_eval': 7, 'lr_decay_factors': '1, 0.5, 0.1', 'decay_boundaries': '10, 20', 'model_scope': 'dress', 'checkpoint_path': os.path.join(FLAGS.data_dir, FLAGS.backbone) if FLAGS.run_on_cloud else os.path.join(FLAGS.checkpoint_path, FLAGS.backbone), 'checkpoint_model_scope': '', 'checkpoint_exclude_scopes': 'dress/feature_pyramid, dress/global_net', 'ignore_missing_vars': True, }, 'outwear': { 'model_dir' : os.path.join(FLAGS.model_dir, 'outwear'), 'train_epochs': 28, 'epochs_per_eval': 7, 'lr_decay_factors': '1, 0.5, 0.1', 'decay_boundaries': '10, 20', 'model_scope': 'outwear', 'checkpoint_path': os.path.join(FLAGS.data_dir, FLAGS.backbone) if FLAGS.run_on_cloud else os.path.join(FLAGS.checkpoint_path, FLAGS.backbone), 'checkpoint_model_scope': '', 'checkpoint_exclude_scopes': 'outwear/feature_pyramid, outwear/global_net', 'ignore_missing_vars': True, }, 'skirt': { 'model_dir' : os.path.join(FLAGS.model_dir, 'skirt'), 'train_epochs': 28, 'epochs_per_eval': 7, 'lr_decay_factors': '1, 0.5, 0.1', 'decay_boundaries': '10, 20', 'model_scope': 'skirt', 'checkpoint_path': os.path.join(FLAGS.data_dir, FLAGS.backbone) if FLAGS.run_on_cloud else os.path.join(FLAGS.checkpoint_path, FLAGS.backbone), 'checkpoint_model_scope': '', 'checkpoint_exclude_scopes': 'skirt/feature_pyramid, skirt/global_net', 'ignore_missing_vars': True, }, 'trousers': { 'model_dir' : os.path.join(FLAGS.model_dir, 'trousers'), 'train_epochs': 28, 'epochs_per_eval': 7, 'lr_decay_factors': '1, 0.5, 0.1', 'decay_boundaries': '10, 20', 'model_scope': 'trousers', 'checkpoint_path': os.path.join(FLAGS.data_dir, FLAGS.backbone) if FLAGS.run_on_cloud else os.path.join(FLAGS.checkpoint_path, FLAGS.backbone), 'checkpoint_model_scope': '', 'checkpoint_exclude_scopes': 'trousers/feature_pyramid, trousers/global_net', 'ignore_missing_vars': True, }, } model_to_train = [s.strip() for s in FLAGS.model_to_train.split(',')] for m in model_to_train: sub_loop(keypoint_model_fn, m, detail_params[m]['model_dir'], run_config, detail_params[m]['train_epochs'], detail_params[m]['epochs_per_eval'], detail_params[m]['lr_decay_factors'], detail_params[m]['decay_boundaries'], detail_params[m]['checkpoint_path'], detail_params[m]['checkpoint_exclude_scopes'], detail_params[m]['checkpoint_model_scope'], detail_params[m]['ignore_missing_vars']) if __name__ == '__main__': tf.logging.set_verbosity(tf.logging.INFO) tf.app.run() # 0.04473711425469029 # blouse: 0.042138283111307795 # dress: 0.04147867224643174 # outwear: 0.04511445541161763 # skirt: 0.05388678376709799 # trousers: 0.04985801318493035
[]
[]
[ "TF_ENABLE_WINOGRAD_NONFUSED" ]
[]
["TF_ENABLE_WINOGRAD_NONFUSED"]
python
1
0
awx/main/tasks.py
# -*- coding: utf-8 -*- # Copyright (c) 2015 Ansible, Inc. # All Rights Reserved. # Python from collections import OrderedDict, namedtuple import errno import functools import importlib import json import logging import os import shutil import stat import tempfile import time import traceback from distutils.dir_util import copy_tree from distutils.version import LooseVersion as Version import yaml import fcntl from pathlib import Path from uuid import uuid4 try: import psutil except Exception: psutil = None import urllib.parse as urlparse # Django from django.conf import settings from django.db import transaction, DatabaseError, IntegrityError from django.db.models.fields.related import ForeignKey from django.utils.timezone import now, timedelta from django.utils.encoding import smart_str from django.core.mail import send_mail from django.contrib.auth.models import User from django.utils.translation import ugettext_lazy as _ from django.core.cache import cache from django.core.exceptions import ObjectDoesNotExist # Django-CRUM from crum import impersonate # GitPython import git from gitdb.exc import BadName as BadGitName # Runner import ansible_runner # AWX from awx import __version__ as awx_application_version from awx.main.constants import CLOUD_PROVIDERS, PRIVILEGE_ESCALATION_METHODS, STANDARD_INVENTORY_UPDATE_ENV from awx.main.access import access_registry from awx.main.models import ( Schedule, TowerScheduleState, Instance, InstanceGroup, UnifiedJob, Notification, Inventory, InventorySource, SmartInventoryMembership, Job, AdHocCommand, ProjectUpdate, InventoryUpdate, SystemJob, JobEvent, ProjectUpdateEvent, InventoryUpdateEvent, AdHocCommandEvent, SystemJobEvent, build_safe_env ) from awx.main.constants import ACTIVE_STATES from awx.main.exceptions import AwxTaskError from awx.main.queue import CallbackQueueDispatcher from awx.main.isolated import manager as isolated_manager from awx.main.dispatch.publish import task from awx.main.dispatch import get_local_queuename, reaper from awx.main.utils import (get_ssh_version, update_scm_url, get_licenser, ignore_inventory_computed_fields, ignore_inventory_group_removal, extract_ansible_vars, schedule_task_manager, get_awx_version) from awx.main.utils.common import get_ansible_version, _get_ansible_version, get_custom_venv_choices from awx.main.utils.safe_yaml import safe_dump, sanitize_jinja from awx.main.utils.reload import stop_local_services from awx.main.utils.pglock import advisory_lock from awx.main.consumers import emit_channel_notification from awx.main import analytics from awx.conf import settings_registry from awx.conf.license import get_license from rest_framework.exceptions import PermissionDenied __all__ = ['RunJob', 'RunSystemJob', 'RunProjectUpdate', 'RunInventoryUpdate', 'RunAdHocCommand', 'handle_work_error', 'handle_work_success', 'apply_cluster_membership_policies', 'update_inventory_computed_fields', 'update_host_smart_inventory_memberships', 'send_notifications', 'run_administrative_checks', 'purge_old_stdout_files'] HIDDEN_PASSWORD = '**********' OPENSSH_KEY_ERROR = u'''\ It looks like you're trying to use a private key in OpenSSH format, which \ isn't supported by the installed version of OpenSSH on this instance. \ Try upgrading OpenSSH or providing your private key in an different format. \ ''' logger = logging.getLogger('awx.main.tasks') class InvalidVirtualenvError(Exception): def __init__(self, message): self.message = message def dispatch_startup(): startup_logger = logging.getLogger('awx.main.tasks') startup_logger.debug("Syncing Schedules") for sch in Schedule.objects.all(): try: sch.update_computed_fields() except Exception: logger.exception("Failed to rebuild schedule {}.".format(sch)) # # When the dispatcher starts, if the instance cannot be found in the database, # automatically register it. This is mostly useful for openshift-based # deployments where: # # 2 Instances come online # Instance B encounters a network blip, Instance A notices, and # deprovisions it # Instance B's connectivity is restored, the dispatcher starts, and it # re-registers itself # # In traditional container-less deployments, instances don't get # deprovisioned when they miss their heartbeat, so this code is mostly a # no-op. # apply_cluster_membership_policies() cluster_node_heartbeat() if Instance.objects.me().is_controller(): awx_isolated_heartbeat() def inform_cluster_of_shutdown(): try: this_inst = Instance.objects.get(hostname=settings.CLUSTER_HOST_ID) this_inst.capacity = 0 # No thank you to new jobs while shut down this_inst.save(update_fields=['capacity', 'modified']) try: reaper.reap(this_inst) except Exception: logger.exception('failed to reap jobs for {}'.format(this_inst.hostname)) logger.warning('Normal shutdown signal for instance {}, ' 'removed self from capacity pool.'.format(this_inst.hostname)) except Exception: logger.exception('Encountered problem with normal shutdown signal.') @task() def apply_cluster_membership_policies(): started_waiting = time.time() with advisory_lock('cluster_policy_lock', wait=True): lock_time = time.time() - started_waiting if lock_time > 1.0: to_log = logger.info else: to_log = logger.debug to_log('Waited {} seconds to obtain lock name: cluster_policy_lock'.format(lock_time)) started_compute = time.time() all_instances = list(Instance.objects.order_by('id')) all_groups = list(InstanceGroup.objects.prefetch_related('instances')) iso_hostnames = set([]) for ig in all_groups: if ig.controller_id is not None: iso_hostnames.update(ig.policy_instance_list) considered_instances = [inst for inst in all_instances if inst.hostname not in iso_hostnames] total_instances = len(considered_instances) actual_groups = [] actual_instances = [] Group = namedtuple('Group', ['obj', 'instances', 'prior_instances']) Node = namedtuple('Instance', ['obj', 'groups']) # Process policy instance list first, these will represent manually managed memberships instance_hostnames_map = {inst.hostname: inst for inst in all_instances} for ig in all_groups: group_actual = Group(obj=ig, instances=[], prior_instances=[ instance.pk for instance in ig.instances.all() # obtained in prefetch ]) for hostname in ig.policy_instance_list: if hostname not in instance_hostnames_map: logger.info("Unknown instance {} in {} policy list".format(hostname, ig.name)) continue inst = instance_hostnames_map[hostname] group_actual.instances.append(inst.id) # NOTE: arguable behavior: policy-list-group is not added to # instance's group count for consideration in minimum-policy rules if group_actual.instances: logger.debug("Policy List, adding Instances {} to Group {}".format(group_actual.instances, ig.name)) if ig.controller_id is None: actual_groups.append(group_actual) else: # For isolated groups, _only_ apply the policy_instance_list # do not add to in-memory list, so minimum rules not applied logger.debug('Committing instances to isolated group {}'.format(ig.name)) ig.instances.set(group_actual.instances) # Process Instance minimum policies next, since it represents a concrete lower bound to the # number of instances to make available to instance groups actual_instances = [Node(obj=i, groups=[]) for i in considered_instances if i.managed_by_policy] logger.debug("Total non-isolated instances:{} available for policy: {}".format( total_instances, len(actual_instances))) for g in sorted(actual_groups, key=lambda x: len(x.instances)): policy_min_added = [] for i in sorted(actual_instances, key=lambda x: len(x.groups)): if len(g.instances) >= g.obj.policy_instance_minimum: break if i.obj.id in g.instances: # If the instance is already _in_ the group, it was # applied earlier via the policy list continue g.instances.append(i.obj.id) i.groups.append(g.obj.id) policy_min_added.append(i.obj.id) if policy_min_added: logger.debug("Policy minimum, adding Instances {} to Group {}".format(policy_min_added, g.obj.name)) # Finally, process instance policy percentages for g in sorted(actual_groups, key=lambda x: len(x.instances)): policy_per_added = [] for i in sorted(actual_instances, key=lambda x: len(x.groups)): if i.obj.id in g.instances: # If the instance is already _in_ the group, it was # applied earlier via a minimum policy or policy list continue if 100 * float(len(g.instances)) / len(actual_instances) >= g.obj.policy_instance_percentage: break g.instances.append(i.obj.id) i.groups.append(g.obj.id) policy_per_added.append(i.obj.id) if policy_per_added: logger.debug("Policy percentage, adding Instances {} to Group {}".format(policy_per_added, g.obj.name)) # Determine if any changes need to be made needs_change = False for g in actual_groups: if set(g.instances) != set(g.prior_instances): needs_change = True break if not needs_change: logger.debug('Cluster policy no-op finished in {} seconds'.format(time.time() - started_compute)) return # On a differential basis, apply instances to non-isolated groups with transaction.atomic(): for g in actual_groups: instances_to_add = set(g.instances) - set(g.prior_instances) instances_to_remove = set(g.prior_instances) - set(g.instances) if instances_to_add: logger.debug('Adding instances {} to group {}'.format(list(instances_to_add), g.obj.name)) g.obj.instances.add(*instances_to_add) if instances_to_remove: logger.debug('Removing instances {} from group {}'.format(list(instances_to_remove), g.obj.name)) g.obj.instances.remove(*instances_to_remove) logger.debug('Cluster policy computation finished in {} seconds'.format(time.time() - started_compute)) @task(queue='tower_broadcast_all', exchange_type='fanout') def handle_setting_changes(setting_keys): orig_len = len(setting_keys) for i in range(orig_len): for dependent_key in settings_registry.get_dependent_settings(setting_keys[i]): setting_keys.append(dependent_key) cache_keys = set(setting_keys) logger.debug('cache delete_many(%r)', cache_keys) cache.delete_many(cache_keys) @task(queue='tower_broadcast_all', exchange_type='fanout') def delete_project_files(project_path): # TODO: possibly implement some retry logic lock_file = project_path + '.lock' if os.path.exists(project_path): try: shutil.rmtree(project_path) logger.debug('Success removing project files {}'.format(project_path)) except Exception: logger.exception('Could not remove project directory {}'.format(project_path)) if os.path.exists(lock_file): try: os.remove(lock_file) logger.debug('Success removing {}'.format(lock_file)) except Exception: logger.exception('Could not remove lock file {}'.format(lock_file)) @task(queue='tower_broadcast_all', exchange_type='fanout') def profile_sql(threshold=1, minutes=1): if threshold == 0: cache.delete('awx-profile-sql-threshold') logger.error('SQL PROFILING DISABLED') else: cache.set( 'awx-profile-sql-threshold', threshold, timeout=minutes * 60 ) logger.error('SQL QUERIES >={}s ENABLED FOR {} MINUTE(S)'.format(threshold, minutes)) @task() def send_notifications(notification_list, job_id=None): if not isinstance(notification_list, list): raise TypeError("notification_list should be of type list") if job_id is not None: job_actual = UnifiedJob.objects.get(id=job_id) notifications = Notification.objects.filter(id__in=notification_list) if job_id is not None: job_actual.notifications.add(*notifications) for notification in notifications: update_fields = ['status', 'notifications_sent'] try: sent = notification.notification_template.send(notification.subject, notification.body) notification.status = "successful" notification.notifications_sent = sent except Exception as e: logger.exception("Send Notification Failed {}".format(e)) notification.status = "failed" notification.error = smart_str(e) update_fields.append('error') finally: try: notification.save(update_fields=update_fields) except Exception: logger.exception('Error saving notification {} result.'.format(notification.id)) @task() def gather_analytics(): if not settings.INSIGHTS_TRACKING_STATE: return try: tgz = analytics.gather() if not tgz: return logger.debug('gathered analytics: {}'.format(tgz)) analytics.ship(tgz) finally: if os.path.exists(tgz): os.remove(tgz) @task() def run_administrative_checks(): logger.warn("Running administrative checks.") if not settings.TOWER_ADMIN_ALERTS: return validation_info = get_licenser().validate() if validation_info['license_type'] != 'open' and validation_info.get('instance_count', 0) < 1: return used_percentage = float(validation_info.get('current_instances', 0)) / float(validation_info.get('instance_count', 100)) tower_admin_emails = User.objects.filter(is_superuser=True).values_list('email', flat=True) if (used_percentage * 100) > 90: send_mail("Ansible Tower host usage over 90%", _("Ansible Tower host usage over 90%"), tower_admin_emails, fail_silently=True) if validation_info.get('date_warning', False): send_mail("Ansible Tower license will expire soon", _("Ansible Tower license will expire soon"), tower_admin_emails, fail_silently=True) @task(queue=get_local_queuename) def purge_old_stdout_files(): nowtime = time.time() for f in os.listdir(settings.JOBOUTPUT_ROOT): if os.path.getctime(os.path.join(settings.JOBOUTPUT_ROOT,f)) < nowtime - settings.LOCAL_STDOUT_EXPIRE_TIME: os.unlink(os.path.join(settings.JOBOUTPUT_ROOT,f)) logger.debug("Removing {}".format(os.path.join(settings.JOBOUTPUT_ROOT,f))) @task(queue=get_local_queuename) def cluster_node_heartbeat(): logger.debug("Cluster node heartbeat task.") nowtime = now() instance_list = list(Instance.objects.all_non_isolated()) this_inst = None lost_instances = [] (changed, instance) = Instance.objects.get_or_register() if changed: logger.info("Registered tower node '{}'".format(instance.hostname)) for inst in list(instance_list): if inst.hostname == settings.CLUSTER_HOST_ID: this_inst = inst instance_list.remove(inst) elif inst.is_lost(ref_time=nowtime): lost_instances.append(inst) instance_list.remove(inst) if this_inst: startup_event = this_inst.is_lost(ref_time=nowtime) this_inst.refresh_capacity() if startup_event: logger.warning('Rejoining the cluster as instance {}.'.format(this_inst.hostname)) return else: raise RuntimeError("Cluster Host Not Found: {}".format(settings.CLUSTER_HOST_ID)) # IFF any node has a greater version than we do, then we'll shutdown services for other_inst in instance_list: if other_inst.version == "": continue if Version(other_inst.version.split('-', 1)[0]) > Version(awx_application_version.split('-', 1)[0]) and not settings.DEBUG: logger.error("Host {} reports version {}, but this node {} is at {}, shutting down".format( other_inst.hostname, other_inst.version, this_inst.hostname, this_inst.version )) # Shutdown signal will set the capacity to zero to ensure no Jobs get added to this instance. # The heartbeat task will reset the capacity to the system capacity after upgrade. stop_local_services(communicate=False) raise RuntimeError("Shutting down.") for other_inst in lost_instances: try: reaper.reap(other_inst) except Exception: logger.exception('failed to reap jobs for {}'.format(other_inst.hostname)) try: # Capacity could already be 0 because: # * It's a new node and it never had a heartbeat # * It was set to 0 by another tower node running this method # * It was set to 0 by this node, but auto deprovisioning is off # # If auto deprovisining is on, don't bother setting the capacity to 0 # since we will delete the node anyway. if other_inst.capacity != 0 and not settings.AWX_AUTO_DEPROVISION_INSTANCES: other_inst.capacity = 0 other_inst.save(update_fields=['capacity']) logger.error("Host {} last checked in at {}, marked as lost.".format( other_inst.hostname, other_inst.modified)) elif settings.AWX_AUTO_DEPROVISION_INSTANCES: deprovision_hostname = other_inst.hostname other_inst.delete() logger.info("Host {} Automatically Deprovisioned.".format(deprovision_hostname)) except DatabaseError as e: if 'did not affect any rows' in str(e): logger.debug('Another instance has marked {} as lost'.format(other_inst.hostname)) else: logger.exception('Error marking {} as lost'.format(other_inst.hostname)) @task(queue=get_local_queuename) def awx_isolated_heartbeat(): local_hostname = settings.CLUSTER_HOST_ID logger.debug("Controlling node checking for any isolated management tasks.") poll_interval = settings.AWX_ISOLATED_PERIODIC_CHECK # Get isolated instances not checked since poll interval - some buffer nowtime = now() accept_before = nowtime - timedelta(seconds=(poll_interval - 10)) isolated_instance_qs = Instance.objects.filter( rampart_groups__controller__instances__hostname=local_hostname, ) isolated_instance_qs = isolated_instance_qs.filter( last_isolated_check__lt=accept_before ) | isolated_instance_qs.filter( last_isolated_check=None ) # Fast pass of isolated instances, claiming the nodes to update with transaction.atomic(): for isolated_instance in isolated_instance_qs: isolated_instance.last_isolated_check = nowtime # Prevent modified time from being changed, as in normal heartbeat isolated_instance.save(update_fields=['last_isolated_check']) # Slow pass looping over isolated IGs and their isolated instances if len(isolated_instance_qs) > 0: logger.debug("Managing isolated instances {}.".format(','.join([inst.hostname for inst in isolated_instance_qs]))) isolated_manager.IsolatedManager().health_check(isolated_instance_qs) @task() def awx_periodic_scheduler(): with advisory_lock('awx_periodic_scheduler_lock', wait=False) as acquired: if acquired is False: logger.debug("Not running periodic scheduler, another task holds lock") return logger.debug("Starting periodic scheduler") run_now = now() state = TowerScheduleState.get_solo() last_run = state.schedule_last_run logger.debug("Last scheduler run was: %s", last_run) state.schedule_last_run = run_now state.save() old_schedules = Schedule.objects.enabled().before(last_run) for schedule in old_schedules: schedule.update_computed_fields() schedules = Schedule.objects.enabled().between(last_run, run_now) invalid_license = False try: access_registry[Job](None).check_license() except PermissionDenied as e: invalid_license = e for schedule in schedules: template = schedule.unified_job_template schedule.update_computed_fields() # To update next_run timestamp. if template.cache_timeout_blocked: logger.warn("Cache timeout is in the future, bypassing schedule for template %s" % str(template.id)) continue try: job_kwargs = schedule.get_job_kwargs() new_unified_job = schedule.unified_job_template.create_unified_job(**job_kwargs) logger.debug('Spawned {} from schedule {}-{}.'.format( new_unified_job.log_format, schedule.name, schedule.pk)) if invalid_license: new_unified_job.status = 'failed' new_unified_job.job_explanation = str(invalid_license) new_unified_job.save(update_fields=['status', 'job_explanation']) new_unified_job.websocket_emit_status("failed") raise invalid_license can_start = new_unified_job.signal_start() except Exception: logger.exception('Error spawning scheduled job.') continue if not can_start: new_unified_job.status = 'failed' new_unified_job.job_explanation = "Scheduled job could not start because it was not in the right state or required manual credentials" new_unified_job.save(update_fields=['status', 'job_explanation']) new_unified_job.websocket_emit_status("failed") emit_channel_notification('schedules-changed', dict(id=schedule.id, group_name="schedules")) state.save() @task() def handle_work_success(task_actual): try: instance = UnifiedJob.get_instance_by_type(task_actual['type'], task_actual['id']) except ObjectDoesNotExist: logger.warning('Missing {} `{}` in success callback.'.format(task_actual['type'], task_actual['id'])) return if not instance: return schedule_task_manager() @task() def handle_work_error(task_id, *args, **kwargs): subtasks = kwargs.get('subtasks', None) logger.debug('Executing error task id %s, subtasks: %s' % (task_id, str(subtasks))) first_instance = None first_instance_type = '' if subtasks is not None: for each_task in subtasks: try: instance = UnifiedJob.get_instance_by_type(each_task['type'], each_task['id']) if not instance: # Unknown task type logger.warn("Unknown task type: {}".format(each_task['type'])) continue except ObjectDoesNotExist: logger.warning('Missing {} `{}` in error callback.'.format(each_task['type'], each_task['id'])) continue if first_instance is None: first_instance = instance first_instance_type = each_task['type'] if instance.celery_task_id != task_id and not instance.cancel_flag: instance.status = 'failed' instance.failed = True if not instance.job_explanation: instance.job_explanation = 'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % \ (first_instance_type, first_instance.name, first_instance.id) instance.save() instance.websocket_emit_status("failed") # We only send 1 job complete message since all the job completion message # handling does is trigger the scheduler. If we extend the functionality of # what the job complete message handler does then we may want to send a # completion event for each job here. if first_instance: schedule_task_manager() pass @task() def update_inventory_computed_fields(inventory_id, should_update_hosts=True): ''' Signal handler and wrapper around inventory.update_computed_fields to prevent unnecessary recursive calls. ''' i = Inventory.objects.filter(id=inventory_id) if not i.exists(): logger.error("Update Inventory Computed Fields failed due to missing inventory: " + str(inventory_id)) return i = i[0] try: i.update_computed_fields(update_hosts=should_update_hosts) except DatabaseError as e: if 'did not affect any rows' in str(e): logger.debug('Exiting duplicate update_inventory_computed_fields task.') return raise def update_smart_memberships_for_inventory(smart_inventory): current = set(SmartInventoryMembership.objects.filter(inventory=smart_inventory).values_list('host_id', flat=True)) new = set(smart_inventory.hosts.values_list('id', flat=True)) additions = new - current removals = current - new if additions or removals: with transaction.atomic(): if removals: SmartInventoryMembership.objects.filter(inventory=smart_inventory, host_id__in=removals).delete() if additions: add_for_inventory = [ SmartInventoryMembership(inventory_id=smart_inventory.id, host_id=host_id) for host_id in additions ] SmartInventoryMembership.objects.bulk_create(add_for_inventory, ignore_conflicts=True) logger.debug('Smart host membership cached for {}, {} additions, {} removals, {} total count.'.format( smart_inventory.pk, len(additions), len(removals), len(new) )) return True # changed return False @task() def update_host_smart_inventory_memberships(): smart_inventories = Inventory.objects.filter(kind='smart', host_filter__isnull=False, pending_deletion=False) changed_inventories = set([]) for smart_inventory in smart_inventories: try: changed = update_smart_memberships_for_inventory(smart_inventory) if changed: changed_inventories.add(smart_inventory) except IntegrityError: logger.exception('Failed to update smart inventory memberships for {}'.format(smart_inventory.pk)) # Update computed fields for changed inventories outside atomic action for smart_inventory in changed_inventories: smart_inventory.update_computed_fields(update_groups=False, update_hosts=False) @task() def delete_inventory(inventory_id, user_id, retries=5): # Delete inventory as user if user_id is None: user = None else: try: user = User.objects.get(id=user_id) except Exception: user = None with ignore_inventory_computed_fields(), ignore_inventory_group_removal(), impersonate(user): try: i = Inventory.objects.get(id=inventory_id) for host in i.hosts.iterator(): host.job_events_as_primary_host.update(host=None) i.delete() emit_channel_notification( 'inventories-status_changed', {'group_name': 'inventories', 'inventory_id': inventory_id, 'status': 'deleted'} ) logger.debug('Deleted inventory {} as user {}.'.format(inventory_id, user_id)) except Inventory.DoesNotExist: logger.exception("Delete Inventory failed due to missing inventory: " + str(inventory_id)) return except DatabaseError: logger.exception('Database error deleting inventory {}, but will retry.'.format(inventory_id)) if retries > 0: time.sleep(10) delete_inventory(inventory_id, user_id, retries=retries - 1) def with_path_cleanup(f): @functools.wraps(f) def _wrapped(self, *args, **kwargs): try: return f(self, *args, **kwargs) finally: for p in self.cleanup_paths: try: if os.path.isdir(p): shutil.rmtree(p, ignore_errors=True) elif os.path.exists(p): os.remove(p) except OSError: logger.exception("Failed to remove tmp file: {}".format(p)) self.cleanup_paths = [] return _wrapped class BaseTask(object): model = None event_model = None abstract = True proot_show_paths = [] def __init__(self): self.cleanup_paths = [] self.parent_workflow_job_id = None def update_model(self, pk, _attempt=0, **updates): """Reload the model instance from the database and update the given fields. """ try: with transaction.atomic(): # Retrieve the model instance. instance = self.model.objects.get(pk=pk) # Update the appropriate fields and save the model # instance, then return the new instance. if updates: update_fields = ['modified'] for field, value in updates.items(): setattr(instance, field, value) update_fields.append(field) if field == 'status': update_fields.append('failed') instance.save(update_fields=update_fields) return instance except DatabaseError as e: # Log out the error to the debug logger. logger.debug('Database error updating %s, retrying in 5 ' 'seconds (retry #%d): %s', self.model._meta.object_name, _attempt + 1, e) # Attempt to retry the update, assuming we haven't already # tried too many times. if _attempt < 5: time.sleep(5) return self.update_model( pk, _attempt=_attempt + 1, **updates ) else: logger.error('Failed to update %s after %d retries.', self.model._meta.object_name, _attempt) def get_ansible_version(self, instance): if not hasattr(self, '_ansible_version'): self._ansible_version = _get_ansible_version( ansible_path=self.get_path_to_ansible(instance, executable='ansible')) return self._ansible_version def get_path_to(self, *args): ''' Return absolute path relative to this file. ''' return os.path.abspath(os.path.join(os.path.dirname(__file__), *args)) def get_path_to_ansible(self, instance, executable='ansible-playbook', **kwargs): venv_path = getattr(instance, 'ansible_virtualenv_path', settings.ANSIBLE_VENV_PATH) venv_exe = os.path.join(venv_path, 'bin', executable) if os.path.exists(venv_exe): return venv_exe return shutil.which(executable) def build_private_data(self, instance, private_data_dir): ''' Return SSH private key data (only if stored in DB as ssh_key_data). Return structure is a dict of the form: ''' def build_private_data_dir(self, instance): ''' Create a temporary directory for job-related files. ''' path = tempfile.mkdtemp(prefix='awx_%s_' % instance.pk, dir=settings.AWX_PROOT_BASE_PATH) os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) if settings.AWX_CLEANUP_PATHS: self.cleanup_paths.append(path) runner_project_folder = os.path.join(path, 'project') if not os.path.exists(runner_project_folder): # Ansible Runner requires that this directory exists. # Specifically, when using process isolation os.mkdir(runner_project_folder) return path def build_private_data_files(self, instance, private_data_dir): ''' Creates temporary files containing the private data. Returns a dictionary i.e., { 'credentials': { <awx.main.models.Credential>: '/path/to/decrypted/data', <awx.main.models.Credential>: '/path/to/decrypted/data', ... }, 'certificates': { <awx.main.models.Credential>: /path/to/signed/ssh/certificate, <awx.main.models.Credential>: /path/to/signed/ssh/certificate, ... } } ''' private_data = self.build_private_data(instance, private_data_dir) private_data_files = {'credentials': {}} if private_data is not None: ssh_ver = get_ssh_version() ssh_too_old = True if ssh_ver == "unknown" else Version(ssh_ver) < Version("6.0") openssh_keys_supported = ssh_ver != "unknown" and Version(ssh_ver) >= Version("6.5") for credential, data in private_data.get('credentials', {}).items(): # Bail out now if a private key was provided in OpenSSH format # and we're running an earlier version (<6.5). if 'OPENSSH PRIVATE KEY' in data and not openssh_keys_supported: raise RuntimeError(OPENSSH_KEY_ERROR) # OpenSSH formatted keys must have a trailing newline to be # accepted by ssh-add. if 'OPENSSH PRIVATE KEY' in data and not data.endswith('\n'): data += '\n' # For credentials used with ssh-add, write to a named pipe which # will be read then closed, instead of leaving the SSH key on disk. if credential and credential.credential_type.namespace in ('ssh', 'scm') and not ssh_too_old: try: os.mkdir(os.path.join(private_data_dir, 'env')) except OSError as e: if e.errno != errno.EEXIST: raise path = os.path.join(private_data_dir, 'env', 'ssh_key') ansible_runner.utils.open_fifo_write(path, data.encode()) private_data_files['credentials']['ssh'] = path # Ansible network modules do not yet support ssh-agent. # Instead, ssh private key file is explicitly passed via an # env variable. else: handle, path = tempfile.mkstemp(dir=private_data_dir) f = os.fdopen(handle, 'w') f.write(data) f.close() os.chmod(path, stat.S_IRUSR | stat.S_IWUSR) private_data_files['credentials'][credential] = path for credential, data in private_data.get('certificates', {}).items(): artifact_dir = os.path.join(private_data_dir, 'artifacts', str(self.instance.id)) if not os.path.exists(artifact_dir): os.makedirs(artifact_dir, mode=0o700) path = os.path.join(artifact_dir, 'ssh_key_data-cert.pub') with open(path, 'w') as f: f.write(data) f.close() os.chmod(path, stat.S_IRUSR | stat.S_IWUSR) return private_data_files def build_passwords(self, instance, runtime_passwords): ''' Build a dictionary of passwords for responding to prompts. ''' return { 'yes': 'yes', 'no': 'no', '': '', } def build_extra_vars_file(self, instance, private_data_dir): ''' Build ansible yaml file filled with extra vars to be passed via [email protected] ''' def build_params_process_isolation(self, instance, private_data_dir, cwd): ''' Build ansible runner .run() parameters for process isolation. ''' process_isolation_params = dict() if self.should_use_proot(instance): local_paths = [private_data_dir] if cwd != private_data_dir and Path(private_data_dir) not in Path(cwd).parents: local_paths.append(cwd) show_paths = self.proot_show_paths + local_paths + \ settings.AWX_PROOT_SHOW_PATHS pi_path = settings.AWX_PROOT_BASE_PATH if not self.instance.is_isolated(): pi_path = tempfile.mkdtemp( prefix='ansible_runner_pi_', dir=settings.AWX_PROOT_BASE_PATH ) os.chmod(pi_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) self.cleanup_paths.append(pi_path) process_isolation_params = { 'process_isolation': True, 'process_isolation_path': pi_path, 'process_isolation_show_paths': show_paths, 'process_isolation_hide_paths': [ settings.AWX_PROOT_BASE_PATH, '/etc/tower', '/etc/ssh', '/var/lib/awx', '/var/log', settings.PROJECTS_ROOT, settings.JOBOUTPUT_ROOT, ] + getattr(settings, 'AWX_PROOT_HIDE_PATHS', None) or [], 'process_isolation_ro_paths': [settings.ANSIBLE_VENV_PATH, settings.AWX_VENV_PATH], } if getattr(instance, 'ansible_virtualenv_path', settings.ANSIBLE_VENV_PATH) != settings.ANSIBLE_VENV_PATH: process_isolation_params['process_isolation_ro_paths'].append(instance.ansible_virtualenv_path) return process_isolation_params def _write_extra_vars_file(self, private_data_dir, vars, safe_dict={}): env_path = os.path.join(private_data_dir, 'env') try: os.mkdir(env_path, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC) except OSError as e: if e.errno != errno.EEXIST: raise path = os.path.join(env_path, 'extravars') handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE) f = os.fdopen(handle, 'w') if settings.ALLOW_JINJA_IN_EXTRA_VARS == 'always': f.write(yaml.safe_dump(vars)) else: f.write(safe_dump(vars, safe_dict)) f.close() os.chmod(path, stat.S_IRUSR) return path def add_ansible_venv(self, venv_path, env, isolated=False): env['VIRTUAL_ENV'] = venv_path env['PATH'] = os.path.join(venv_path, "bin") + ":" + env['PATH'] venv_libdir = os.path.join(venv_path, "lib") if not isolated and ( not os.path.exists(venv_libdir) or os.path.join(venv_path, '') not in get_custom_venv_choices() ): raise InvalidVirtualenvError(_( 'Invalid virtual environment selected: {}'.format(venv_path) )) isolated_manager.set_pythonpath(venv_libdir, env) def add_awx_venv(self, env): env['VIRTUAL_ENV'] = settings.AWX_VENV_PATH env['PATH'] = os.path.join(settings.AWX_VENV_PATH, "bin") + ":" + env['PATH'] def build_env(self, instance, private_data_dir, isolated, private_data_files=None): ''' Build environment dictionary for ansible-playbook. ''' env = dict(os.environ.items()) # Add ANSIBLE_* settings to the subprocess environment. for attr in dir(settings): if attr == attr.upper() and attr.startswith('ANSIBLE_'): env[attr] = str(getattr(settings, attr)) # Also set environment variables configured in AWX_TASK_ENV setting. for key, value in settings.AWX_TASK_ENV.items(): env[key] = str(value) # Set environment variables needed for inventory and job event # callbacks to work. # Update PYTHONPATH to use local site-packages. # NOTE: # Derived class should call add_ansible_venv() or add_awx_venv() if self.should_use_proot(instance): env['PROOT_TMP_DIR'] = settings.AWX_PROOT_BASE_PATH env['AWX_PRIVATE_DATA_DIR'] = private_data_dir return env def should_use_proot(self, instance): ''' Return whether this task should use proot. ''' return False def build_inventory(self, instance, private_data_dir): script_params = dict(hostvars=True) if hasattr(instance, 'job_slice_number'): script_params['slice_number'] = instance.job_slice_number script_params['slice_count'] = instance.job_slice_count script_data = instance.inventory.get_script_data(**script_params) json_data = json.dumps(script_data) handle, path = tempfile.mkstemp(dir=private_data_dir) f = os.fdopen(handle, 'w') f.write('#! /usr/bin/env python\n# -*- coding: utf-8 -*-\nprint(%r)\n' % json_data) f.close() os.chmod(path, stat.S_IRUSR | stat.S_IXUSR | stat.S_IWUSR) return path def build_args(self, instance, private_data_dir, passwords): raise NotImplementedError def write_args_file(self, private_data_dir, args): env_path = os.path.join(private_data_dir, 'env') try: os.mkdir(env_path, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC) except OSError as e: if e.errno != errno.EEXIST: raise path = os.path.join(env_path, 'cmdline') handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE) f = os.fdopen(handle, 'w') f.write(ansible_runner.utils.args2cmdline(*args)) f.close() os.chmod(path, stat.S_IRUSR) return path def build_cwd(self, instance, private_data_dir): raise NotImplementedError def build_credentials_list(self, instance): return [] def get_instance_timeout(self, instance): global_timeout_setting_name = instance._global_timeout_setting() if global_timeout_setting_name: global_timeout = getattr(settings, global_timeout_setting_name, 0) local_timeout = getattr(instance, 'timeout', 0) job_timeout = global_timeout if local_timeout == 0 else local_timeout job_timeout = 0 if local_timeout < 0 else job_timeout else: job_timeout = 0 return job_timeout def get_password_prompts(self, passwords={}): ''' Return a dictionary where keys are strings or regular expressions for prompts, and values are password lookup keys (keys that are returned from build_passwords). ''' return OrderedDict() def create_expect_passwords_data_struct(self, password_prompts, passwords): expect_passwords = {} for k, v in password_prompts.items(): expect_passwords[k] = passwords.get(v, '') or '' return expect_passwords def pre_run_hook(self, instance, private_data_dir): ''' Hook for any steps to run before the job/task starts ''' def post_run_hook(self, instance, status): ''' Hook for any steps to run before job/task is marked as complete. ''' def final_run_hook(self, instance, status, private_data_dir, fact_modification_times, isolated_manager_instance=None): ''' Hook for any steps to run after job/task is marked as complete. ''' def event_handler(self, event_data): # # ⚠️ D-D-D-DANGER ZONE ⚠️ # This method is called once for *every event* emitted by Ansible # Runner as a playbook runs. That means that changes to the code in # this method are _very_ likely to introduce performance regressions. # # Even if this function is made on average .05s slower, it can have # devastating performance implications for playbooks that emit # tens or hundreds of thousands of events. # # Proceed with caution! # ''' Ansible runner puts a parent_uuid on each event, no matter what the type. AWX only saves the parent_uuid if the event is for a Job. ''' if event_data.get(self.event_data_key, None): if self.event_data_key != 'job_id': event_data.pop('parent_uuid', None) if self.parent_workflow_job_id: event_data['workflow_job_id'] = self.parent_workflow_job_id should_write_event = False event_data.setdefault(self.event_data_key, self.instance.id) self.dispatcher.dispatch(event_data) self.event_ct += 1 ''' Handle artifacts ''' if event_data.get('event_data', {}).get('artifact_data', {}): self.instance.artifacts = event_data['event_data']['artifact_data'] self.instance.save(update_fields=['artifacts']) return should_write_event def cancel_callback(self): ''' Ansible runner callback to tell the job when/if it is canceled ''' self.instance = self.update_model(self.instance.pk) if self.instance.cancel_flag or self.instance.status == 'canceled': cancel_wait = (now() - self.instance.modified).seconds if self.instance.modified else 0 if cancel_wait > 5: logger.warn('Request to cancel {} took {} seconds to complete.'.format(self.instance.log_format, cancel_wait)) return True return False def finished_callback(self, runner_obj): ''' Ansible runner callback triggered on finished run ''' event_data = { 'event': 'EOF', 'final_counter': self.event_ct, } event_data.setdefault(self.event_data_key, self.instance.id) self.dispatcher.dispatch(event_data) def status_handler(self, status_data, runner_config): ''' Ansible runner callback triggered on status transition ''' if status_data['status'] == 'starting': job_env = dict(runner_config.env) ''' Take the safe environment variables and overwrite ''' for k, v in self.safe_env.items(): if k in job_env: job_env[k] = v self.instance = self.update_model(self.instance.pk, job_args=json.dumps(runner_config.command), job_cwd=runner_config.cwd, job_env=job_env) def check_handler(self, config): ''' IsolatedManager callback triggered by the repeated checks of the isolated node ''' job_env = build_safe_env(config['env']) for k, v in self.safe_cred_env.items(): if k in job_env: job_env[k] = v self.instance = self.update_model(self.instance.pk, job_args=json.dumps(config['command']), job_cwd=config['cwd'], job_env=job_env) @with_path_cleanup def run(self, pk, **kwargs): ''' Run the job/task and capture its output. ''' # self.instance because of the update_model pattern and when it's used in callback handlers self.instance = self.update_model(pk, status='running', start_args='') # blank field to remove encrypted passwords self.instance.websocket_emit_status("running") status, rc = 'error', None extra_update_fields = {} fact_modification_times = {} self.event_ct = 0 ''' Needs to be an object property because status_handler uses it in a callback context ''' self.safe_env = {} self.safe_cred_env = {} private_data_dir = None isolated_manager_instance = None # store a reference to the parent workflow job (if any) so we can include # it in event data JSON if self.instance.spawned_by_workflow: self.parent_workflow_job_id = self.instance.get_workflow_job().id try: isolated = self.instance.is_isolated() self.instance.send_notification_templates("running") private_data_dir = self.build_private_data_dir(self.instance) self.pre_run_hook(self.instance, private_data_dir) if self.instance.cancel_flag: self.instance = self.update_model(self.instance.pk, status='canceled') if self.instance.status != 'running': # Stop the task chain and prevent starting the job if it has # already been canceled. self.instance = self.update_model(pk) status = self.instance.status raise RuntimeError('not starting %s task' % self.instance.status) if not os.path.exists(settings.AWX_PROOT_BASE_PATH): raise RuntimeError('AWX_PROOT_BASE_PATH=%s does not exist' % settings.AWX_PROOT_BASE_PATH) # store a record of the venv used at runtime if hasattr(self.instance, 'custom_virtualenv'): self.update_model(pk, custom_virtualenv=getattr(self.instance, 'ansible_virtualenv_path', settings.ANSIBLE_VENV_PATH)) # Fetch "cached" fact data from prior runs and put on the disk # where ansible expects to find it if getattr(self.instance, 'use_fact_cache', False): self.instance.start_job_fact_cache( os.path.join(private_data_dir, 'artifacts', str(self.instance.id), 'fact_cache'), fact_modification_times, ) # May have to serialize the value private_data_files = self.build_private_data_files(self.instance, private_data_dir) passwords = self.build_passwords(self.instance, kwargs) self.build_extra_vars_file(self.instance, private_data_dir) args = self.build_args(self.instance, private_data_dir, passwords) cwd = self.build_cwd(self.instance, private_data_dir) process_isolation_params = self.build_params_process_isolation(self.instance, private_data_dir, cwd) env = self.build_env(self.instance, private_data_dir, isolated, private_data_files=private_data_files) self.safe_env = build_safe_env(env) credentials = self.build_credentials_list(self.instance) for credential in credentials: if credential: credential.credential_type.inject_credential( credential, env, self.safe_cred_env, args, private_data_dir ) self.safe_env.update(self.safe_cred_env) self.write_args_file(private_data_dir, args) password_prompts = self.get_password_prompts(passwords) expect_passwords = self.create_expect_passwords_data_struct(password_prompts, passwords) params = { 'ident': self.instance.id, 'private_data_dir': private_data_dir, 'project_dir': cwd, 'playbook': self.build_playbook_path_relative_to_cwd(self.instance, private_data_dir), 'inventory': self.build_inventory(self.instance, private_data_dir), 'passwords': expect_passwords, 'envvars': env, 'event_handler': self.event_handler, 'cancel_callback': self.cancel_callback, 'finished_callback': self.finished_callback, 'status_handler': self.status_handler, 'settings': { 'job_timeout': self.get_instance_timeout(self.instance), 'pexpect_timeout': getattr(settings, 'PEXPECT_TIMEOUT', 5), 'suppress_ansible_output': True, **process_isolation_params, }, } if isinstance(self.instance, AdHocCommand): params['module'] = self.build_module_name(self.instance) params['module_args'] = self.build_module_args(self.instance) if getattr(self.instance, 'use_fact_cache', False): # Enable Ansible fact cache. params['fact_cache_type'] = 'jsonfile' else: # Disable Ansible fact cache. params['fact_cache_type'] = '' ''' Delete parameters if the values are None or empty array ''' for v in ['passwords', 'playbook', 'inventory']: if not params[v]: del params[v] if self.instance.is_isolated() is True: module_args = None if 'module_args' in params: # if it's adhoc, copy the module args module_args = ansible_runner.utils.args2cmdline( params.get('module_args'), ) shutil.move( params.pop('inventory'), os.path.join(private_data_dir, 'inventory') ) ansible_runner.utils.dump_artifacts(params) isolated_manager_instance = isolated_manager.IsolatedManager( cancelled_callback=lambda: self.update_model(self.instance.pk).cancel_flag, check_callback=self.check_handler, ) status, rc = isolated_manager_instance.run(self.instance, private_data_dir, params.get('playbook'), params.get('module'), module_args, event_data_key=self.event_data_key, ident=str(self.instance.pk)) self.event_ct = len(isolated_manager_instance.handled_events) else: self.dispatcher = CallbackQueueDispatcher() res = ansible_runner.interface.run(**params) status = res.status rc = res.rc if status == 'timeout': self.instance.job_explanation = "Job terminated due to timeout" status = 'failed' extra_update_fields['job_explanation'] = self.instance.job_explanation except InvalidVirtualenvError as e: extra_update_fields['job_explanation'] = e.message logger.error('{} {}'.format(self.instance.log_format, e.message)) except Exception: # this could catch programming or file system errors extra_update_fields['result_traceback'] = traceback.format_exc() logger.exception('%s Exception occurred while running task', self.instance.log_format) finally: logger.debug('%s finished running, producing %s events.', self.instance.log_format, self.event_ct) try: self.post_run_hook(self.instance, status) except Exception: logger.exception('{} Post run hook errored.'.format(self.instance.log_format)) self.instance = self.update_model(pk) self.instance = self.update_model(pk, status=status, emitted_events=self.event_ct, **extra_update_fields) try: self.final_run_hook(self.instance, status, private_data_dir, fact_modification_times, isolated_manager_instance=isolated_manager_instance) except Exception: logger.exception('{} Final run hook errored.'.format(self.instance.log_format)) self.instance.websocket_emit_status(status) if status != 'successful': if status == 'canceled': raise AwxTaskError.TaskCancel(self.instance, rc) else: raise AwxTaskError.TaskError(self.instance, rc) @task() class RunJob(BaseTask): ''' Run a job using ansible-playbook. ''' model = Job event_model = JobEvent event_data_key = 'job_id' def build_private_data(self, job, private_data_dir): ''' Returns a dict of the form { 'credentials': { <awx.main.models.Credential>: <credential_decrypted_ssh_key_data>, <awx.main.models.Credential>: <credential_decrypted_ssh_key_data>, ... }, 'certificates': { <awx.main.models.Credential>: <signed SSH certificate data>, <awx.main.models.Credential>: <signed SSH certificate data>, ... } } ''' private_data = {'credentials': {}} for credential in job.credentials.prefetch_related('input_sources__source_credential').all(): # If we were sent SSH credentials, decrypt them and send them # back (they will be written to a temporary file). if credential.has_input('ssh_key_data'): private_data['credentials'][credential] = credential.get_input('ssh_key_data', default='') if credential.has_input('ssh_public_key_data'): private_data.setdefault('certificates', {})[credential] = credential.get_input('ssh_public_key_data', default='') return private_data def build_passwords(self, job, runtime_passwords): ''' Build a dictionary of passwords for SSH private key, SSH user, sudo/su and ansible-vault. ''' passwords = super(RunJob, self).build_passwords(job, runtime_passwords) cred = job.machine_credential if cred: for field in ('ssh_key_unlock', 'ssh_password', 'become_password', 'vault_password'): value = runtime_passwords.get(field, cred.get_input('password' if field == 'ssh_password' else field, default='')) if value not in ('', 'ASK'): passwords[field] = value for cred in job.vault_credentials: field = 'vault_password' vault_id = cred.get_input('vault_id', default=None) if vault_id: field = 'vault_password.{}'.format(vault_id) if field in passwords: raise RuntimeError( 'multiple vault credentials were specified with --vault-id {}@prompt'.format( vault_id ) ) value = runtime_passwords.get(field, cred.get_input('vault_password', default='')) if value not in ('', 'ASK'): passwords[field] = value ''' Only 1 value can be provided for a unique prompt string. Prefer ssh key unlock over network key unlock. ''' if 'ssh_key_unlock' not in passwords: for cred in job.network_credentials: if cred.inputs.get('ssh_key_unlock'): passwords['ssh_key_unlock'] = runtime_passwords.get('ssh_key_unlock', cred.get_input('ssh_key_unlock', default='')) break return passwords def add_ansible_venv(self, venv_path, env, isolated=False): super(RunJob, self).add_ansible_venv(venv_path, env, isolated=isolated) # Add awx/lib to PYTHONPATH. env['PYTHONPATH'] = env.get('PYTHONPATH', '') + self.get_path_to('..', 'lib') + ':' def build_env(self, job, private_data_dir, isolated=False, private_data_files=None): ''' Build environment dictionary for ansible-playbook. ''' plugin_dir = self.get_path_to('..', 'plugins', 'callback') plugin_dirs = [plugin_dir] if hasattr(settings, 'AWX_ANSIBLE_CALLBACK_PLUGINS') and \ settings.AWX_ANSIBLE_CALLBACK_PLUGINS: plugin_dirs.extend(settings.AWX_ANSIBLE_CALLBACK_PLUGINS) plugin_path = ':'.join(plugin_dirs) env = super(RunJob, self).build_env(job, private_data_dir, isolated=isolated, private_data_files=private_data_files) if private_data_files is None: private_data_files = {} self.add_ansible_venv(job.ansible_virtualenv_path, env, isolated=isolated) # Set environment variables needed for inventory and job event # callbacks to work. env['JOB_ID'] = str(job.pk) env['INVENTORY_ID'] = str(job.inventory.pk) if job.use_fact_cache: library_path = env.get('ANSIBLE_LIBRARY') env['ANSIBLE_LIBRARY'] = ':'.join( filter(None, [ library_path, self.get_path_to('..', 'plugins', 'library') ]) ) if job.project: env['PROJECT_REVISION'] = job.project.scm_revision env['ANSIBLE_RETRY_FILES_ENABLED'] = "False" env['MAX_EVENT_RES'] = str(settings.MAX_EVENT_RES_DATA) if not isolated: env['ANSIBLE_CALLBACK_PLUGINS'] = plugin_path env['AWX_HOST'] = settings.TOWER_URL_BASE # Create a directory for ControlPath sockets that is unique to each # job and visible inside the proot environment (when enabled). cp_dir = os.path.join(private_data_dir, 'cp') if not os.path.exists(cp_dir): os.mkdir(cp_dir, 0o700) env['ANSIBLE_SSH_CONTROL_PATH_DIR'] = cp_dir # Set environment variables for cloud credentials. cred_files = private_data_files.get('credentials', {}) for cloud_cred in job.cloud_credentials: if cloud_cred and cloud_cred.credential_type.namespace == 'openstack': env['OS_CLIENT_CONFIG_FILE'] = cred_files.get(cloud_cred, '') for network_cred in job.network_credentials: env['ANSIBLE_NET_USERNAME'] = network_cred.get_input('username', default='') env['ANSIBLE_NET_PASSWORD'] = network_cred.get_input('password', default='') ssh_keyfile = cred_files.get(network_cred, '') if ssh_keyfile: env['ANSIBLE_NET_SSH_KEYFILE'] = ssh_keyfile authorize = network_cred.get_input('authorize', default=False) env['ANSIBLE_NET_AUTHORIZE'] = str(int(authorize)) if authorize: env['ANSIBLE_NET_AUTH_PASS'] = network_cred.get_input('authorize_password', default='') for env_key, folder, default in ( ('ANSIBLE_COLLECTIONS_PATHS', 'requirements_collections', '~/.ansible/collections:/usr/share/ansible/collections'), ('ANSIBLE_ROLES_PATH', 'requirements_roles', '~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles')): paths = default.split(':') if env_key in env: for path in env[env_key].split(':'): if path not in paths: paths = [env[env_key]] + paths paths = [os.path.join(private_data_dir, folder)] + paths env[env_key] = os.pathsep.join(paths) return env def build_args(self, job, private_data_dir, passwords): ''' Build command line argument list for running ansible-playbook, optionally using ssh-agent for public/private key authentication. ''' creds = job.machine_credential ssh_username, become_username, become_method = '', '', '' if creds: ssh_username = creds.get_input('username', default='') become_method = creds.get_input('become_method', default='') become_username = creds.get_input('become_username', default='') else: become_method = None become_username = "" # Always specify the normal SSH user as root by default. Since this # task is normally running in the background under a service account, # it doesn't make sense to rely on ansible-playbook's default of using # the current user. ssh_username = ssh_username or 'root' args = [] if job.job_type == 'check': args.append('--check') args.extend(['-u', sanitize_jinja(ssh_username)]) if 'ssh_password' in passwords: args.append('--ask-pass') if job.become_enabled: args.append('--become') if job.diff_mode: args.append('--diff') if become_method: args.extend(['--become-method', sanitize_jinja(become_method)]) if become_username: args.extend(['--become-user', sanitize_jinja(become_username)]) if 'become_password' in passwords: args.append('--ask-become-pass') # Support prompting for multiple vault passwords for k, v in passwords.items(): if k.startswith('vault_password'): if k == 'vault_password': args.append('--ask-vault-pass') else: # split only on the first dot in case the vault ID itself contains a dot vault_id = k.split('.', 1)[1] args.append('--vault-id') args.append('{}@prompt'.format(vault_id)) if job.forks: # FIXME: Max limit? args.append('--forks=%d' % job.forks) if job.force_handlers: args.append('--force-handlers') if job.limit: args.extend(['-l', job.limit]) if job.verbosity: args.append('-%s' % ('v' * min(5, job.verbosity))) if job.job_tags: args.extend(['-t', job.job_tags]) if job.skip_tags: args.append('--skip-tags=%s' % job.skip_tags) if job.start_at_task: args.append('--start-at-task=%s' % job.start_at_task) return args def build_cwd(self, job, private_data_dir): return os.path.join(private_data_dir, 'project') def build_playbook_path_relative_to_cwd(self, job, private_data_dir): return job.playbook def build_extra_vars_file(self, job, private_data_dir): # Define special extra_vars for AWX, combine with job.extra_vars. extra_vars = job.awx_meta_vars() if job.extra_vars_dict: extra_vars.update(json.loads(job.decrypted_extra_vars())) # By default, all extra vars disallow Jinja2 template usage for # security reasons; top level key-values defined in JT.extra_vars, however, # are whitelisted as "safe" (because they can only be set by users with # higher levels of privilege - those that have the ability create and # edit Job Templates) safe_dict = {} if job.job_template and settings.ALLOW_JINJA_IN_EXTRA_VARS == 'template': safe_dict = job.job_template.extra_vars_dict return self._write_extra_vars_file(private_data_dir, extra_vars, safe_dict) def build_credentials_list(self, job): return job.credentials.prefetch_related('input_sources__source_credential').all() def get_password_prompts(self, passwords={}): d = super(RunJob, self).get_password_prompts(passwords) d[r'Enter passphrase for .*:\s*?$'] = 'ssh_key_unlock' d[r'Bad passphrase, try again for .*:\s*?$'] = '' for method in PRIVILEGE_ESCALATION_METHODS: d[r'%s password.*:\s*?$' % (method[0])] = 'become_password' d[r'%s password.*:\s*?$' % (method[0].upper())] = 'become_password' d[r'BECOME password.*:\s*?$'] = 'become_password' d[r'SSH password:\s*?$'] = 'ssh_password' d[r'Password:\s*?$'] = 'ssh_password' d[r'Vault password:\s*?$'] = 'vault_password' for k, v in passwords.items(): if k.startswith('vault_password.'): # split only on the first dot in case the vault ID itself contains a dot vault_id = k.split('.', 1)[1] d[r'Vault password \({}\):\s*?$'.format(vault_id)] = k return d def should_use_proot(self, job): ''' Return whether this task should use proot. ''' return getattr(settings, 'AWX_PROOT_ENABLED', False) def pre_run_hook(self, job, private_data_dir): if job.inventory is None: error = _('Job could not start because it does not have a valid inventory.') self.update_model(job.pk, status='failed', job_explanation=error) raise RuntimeError(error) elif job.project is None: error = _('Job could not start because it does not have a valid project.') self.update_model(job.pk, status='failed', job_explanation=error) raise RuntimeError(error) elif job.project.status in ('error', 'failed'): msg = _( 'The project revision for this job template is unknown due to a failed update.' ) job = self.update_model(job.pk, status='failed', job_explanation=msg) raise RuntimeError(msg) project_path = job.project.get_project_path(check_if_exists=False) job_revision = job.project.scm_revision needs_sync = True if not job.project.scm_type: # manual projects are not synced, user has responsibility for that needs_sync = False elif not os.path.exists(project_path): logger.debug('Performing fresh clone of {} on this instance.'.format(job.project)) elif not job.project.scm_revision: logger.debug('Revision not known for {}, will sync with remote'.format(job.project)) elif job.project.scm_type == 'git': git_repo = git.Repo(project_path) try: desired_revision = job.project.scm_revision if job.scm_branch and job.scm_branch != job.project.scm_branch: desired_revision = job.scm_branch # could be commit or not, but will try as commit current_revision = git_repo.head.commit.hexsha if desired_revision == current_revision: job_revision = desired_revision logger.info('Skipping project sync for {} because commit is locally available'.format(job.log_format)) needs_sync = False except (ValueError, BadGitName): logger.debug('Needed commit for {} not in local source tree, will sync with remote'.format(job.log_format)) # Galaxy requirements are not supported for manual projects if not needs_sync and job.project.scm_type: # see if we need a sync because of presence of roles galaxy_req_path = os.path.join(project_path, 'roles', 'requirements.yml') if os.path.exists(galaxy_req_path): logger.debug('Running project sync for {} because of galaxy role requirements.'.format(job.log_format)) needs_sync = True galaxy_collections_req_path = os.path.join(project_path, 'collections', 'requirements.yml') if os.path.exists(galaxy_collections_req_path): logger.debug('Running project sync for {} because of galaxy collections requirements.'.format(job.log_format)) needs_sync = True if needs_sync: pu_ig = job.instance_group pu_en = job.execution_node if job.is_isolated() is True: pu_ig = pu_ig.controller pu_en = settings.CLUSTER_HOST_ID sync_metafields = dict( launch_type="sync", job_type='run', status='running', instance_group = pu_ig, execution_node=pu_en, celery_task_id=job.celery_task_id ) if job.scm_branch and job.scm_branch != job.project.scm_branch: sync_metafields['scm_branch'] = job.scm_branch local_project_sync = job.project.create_project_update(_eager_fields=sync_metafields) # save the associated job before calling run() so that a # cancel() call on the job can cancel the project update job = self.update_model(job.pk, project_update=local_project_sync) project_update_task = local_project_sync._get_task_class() try: # the job private_data_dir is passed so sync can download roles and collections there sync_task = project_update_task(job_private_data_dir=private_data_dir) sync_task.run(local_project_sync.id) local_project_sync.refresh_from_db() job = self.update_model(job.pk, scm_revision=local_project_sync.scm_revision) except Exception: local_project_sync.refresh_from_db() if local_project_sync.status != 'canceled': job = self.update_model(job.pk, status='failed', job_explanation=('Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % ('project_update', local_project_sync.name, local_project_sync.id))) raise job.refresh_from_db() if job.cancel_flag: return else: # Case where a local sync is not needed, meaning that local tree is # up-to-date with project, job is running project current version if job_revision: job = self.update_model(job.pk, scm_revision=job_revision) # Project update does not copy the folder, so copy here RunProjectUpdate.make_local_copy( project_path, os.path.join(private_data_dir, 'project'), job.project.scm_type, job_revision ) if job.inventory.kind == 'smart': # cache smart inventory memberships so that the host_filter query is not # ran inside of the event saving code update_smart_memberships_for_inventory(job.inventory) def final_run_hook(self, job, status, private_data_dir, fact_modification_times, isolated_manager_instance=None): super(RunJob, self).final_run_hook(job, status, private_data_dir, fact_modification_times) if not private_data_dir: # If there's no private data dir, that means we didn't get into the # actual `run()` call; this _usually_ means something failed in # the pre_run_hook method return if job.use_fact_cache: job.finish_job_fact_cache( os.path.join(private_data_dir, 'artifacts', str(job.id), 'fact_cache'), fact_modification_times, ) if isolated_manager_instance: isolated_manager_instance.cleanup() try: inventory = job.inventory except Inventory.DoesNotExist: pass else: update_inventory_computed_fields.delay(inventory.id, True) @task() class RunProjectUpdate(BaseTask): model = ProjectUpdate event_model = ProjectUpdateEvent event_data_key = 'project_update_id' @property def proot_show_paths(self): show_paths = [settings.PROJECTS_ROOT] if self.job_private_data_dir: show_paths.append(self.job_private_data_dir) return show_paths def __init__(self, *args, job_private_data_dir=None, **kwargs): super(RunProjectUpdate, self).__init__(*args, **kwargs) self.playbook_new_revision = None self.original_branch = None self.job_private_data_dir = job_private_data_dir def event_handler(self, event_data): super(RunProjectUpdate, self).event_handler(event_data) returned_data = event_data.get('event_data', {}) if returned_data.get('task_action', '') == 'set_fact': returned_facts = returned_data.get('res', {}).get('ansible_facts', {}) if 'scm_version' in returned_facts: self.playbook_new_revision = returned_facts['scm_version'] def build_private_data(self, project_update, private_data_dir): ''' Return SSH private key data needed for this project update. Returns a dict of the form { 'credentials': { <awx.main.models.Credential>: <credential_decrypted_ssh_key_data>, <awx.main.models.Credential>: <credential_decrypted_ssh_key_data>, <awx.main.models.Credential>: <credential_decrypted_ssh_key_data> } } ''' private_data = {'credentials': {}} if project_update.credential: credential = project_update.credential if credential.has_input('ssh_key_data'): private_data['credentials'][credential] = credential.get_input('ssh_key_data', default='') # Create dir where collections will live for the job run if project_update.job_type != 'check' and getattr(self, 'job_private_data_dir'): for folder_name in ('requirements_collections', 'requirements_roles'): folder_path = os.path.join(self.job_private_data_dir, folder_name) os.mkdir(folder_path, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC) return private_data def build_passwords(self, project_update, runtime_passwords): ''' Build a dictionary of passwords for SSH private key unlock and SCM username/password. ''' passwords = super(RunProjectUpdate, self).build_passwords(project_update, runtime_passwords) if project_update.credential: passwords['scm_key_unlock'] = project_update.credential.get_input('ssh_key_unlock', default='') passwords['scm_username'] = project_update.credential.get_input('username', default='') passwords['scm_password'] = project_update.credential.get_input('password', default='') return passwords def build_env(self, project_update, private_data_dir, isolated=False, private_data_files=None): ''' Build environment dictionary for ansible-playbook. ''' env = super(RunProjectUpdate, self).build_env(project_update, private_data_dir, isolated=isolated, private_data_files=private_data_files) self.add_ansible_venv(settings.ANSIBLE_VENV_PATH, env) env['ANSIBLE_RETRY_FILES_ENABLED'] = str(False) env['ANSIBLE_ASK_PASS'] = str(False) env['ANSIBLE_BECOME_ASK_PASS'] = str(False) env['DISPLAY'] = '' # Prevent stupid password popup when running tests. # give ansible a hint about the intended tmpdir to work around issues # like https://github.com/ansible/ansible/issues/30064 env['TMP'] = settings.AWX_PROOT_BASE_PATH env['PROJECT_UPDATE_ID'] = str(project_update.pk) env['ANSIBLE_CALLBACK_PLUGINS'] = self.get_path_to('..', 'plugins', 'callback') return env def _build_scm_url_extra_vars(self, project_update): ''' Helper method to build SCM url and extra vars with parameters needed for authentication. ''' extra_vars = {} if project_update.credential: scm_username = project_update.credential.get_input('username', default='') scm_password = project_update.credential.get_input('password', default='') else: scm_username = '' scm_password = '' scm_type = project_update.scm_type scm_url = update_scm_url(scm_type, project_update.scm_url, check_special_cases=False) scm_url_parts = urlparse.urlsplit(scm_url) # Prefer the username/password in the URL, if provided. scm_username = scm_url_parts.username or scm_username scm_password = scm_url_parts.password or scm_password if scm_username: if scm_type == 'svn': extra_vars['scm_username'] = scm_username extra_vars['scm_password'] = scm_password scm_password = False if scm_url_parts.scheme != 'svn+ssh': scm_username = False elif scm_url_parts.scheme.endswith('ssh'): scm_password = False elif scm_type == 'insights': extra_vars['scm_username'] = scm_username extra_vars['scm_password'] = scm_password scm_url = update_scm_url(scm_type, scm_url, scm_username, scm_password, scp_format=True) else: scm_url = update_scm_url(scm_type, scm_url, scp_format=True) # Pass the extra accept_hostkey parameter to the git module. if scm_type == 'git' and scm_url_parts.scheme.endswith('ssh'): extra_vars['scm_accept_hostkey'] = 'true' return scm_url, extra_vars def build_inventory(self, instance, private_data_dir): return 'localhost,' def build_args(self, project_update, private_data_dir, passwords): ''' Build command line argument list for running ansible-playbook, optionally using ssh-agent for public/private key authentication. ''' args = [] if getattr(settings, 'PROJECT_UPDATE_VVV', False): args.append('-vvv') else: args.append('-v') return args def build_extra_vars_file(self, project_update, private_data_dir): extra_vars = {} scm_url, extra_vars_new = self._build_scm_url_extra_vars(project_update) extra_vars.update(extra_vars_new) scm_branch = project_update.scm_branch branch_override = bool(scm_branch and project_update.scm_branch != project_update.project.scm_branch) if project_update.job_type == 'run' and (not branch_override): scm_branch = project_update.project.scm_revision elif not scm_branch: scm_branch = {'hg': 'tip'}.get(project_update.scm_type, 'HEAD') if project_update.job_type == 'check': roles_enabled = False collections_enabled = False else: roles_enabled = getattr(settings, 'AWX_ROLES_ENABLED', True) collections_enabled = getattr(settings, 'AWX_COLLECTIONS_ENABLED', True) # collections were introduced in Ansible version 2.8 if Version(get_ansible_version()) <= Version('2.8'): collections_enabled = False extra_vars.update({ 'project_path': project_update.get_project_path(check_if_exists=False), 'insights_url': settings.INSIGHTS_URL_BASE, 'awx_license_type': get_license(show_key=False).get('license_type', 'UNLICENSED'), 'awx_version': get_awx_version(), 'scm_type': project_update.scm_type, 'scm_url': scm_url, 'scm_branch': scm_branch, 'scm_clean': project_update.scm_clean, 'scm_delete_on_update': project_update.scm_delete_on_update if project_update.job_type == 'check' else False, 'scm_full_checkout': True if project_update.job_type == 'run' else False, 'roles_enabled': roles_enabled, 'collections_enabled': collections_enabled, }) if project_update.job_type != 'check' and self.job_private_data_dir: extra_vars['collections_destination'] = os.path.join(self.job_private_data_dir, 'requirements_collections') extra_vars['roles_destination'] = os.path.join(self.job_private_data_dir, 'requirements_roles') # apply custom refspec from user for PR refs and the like if project_update.scm_refspec: extra_vars['scm_refspec'] = project_update.scm_refspec elif project_update.project.allow_override: # If branch is override-able, do extra fetch for all branches extra_vars['scm_refspec'] = 'refs/heads/*:refs/remotes/origin/*' self._write_extra_vars_file(private_data_dir, extra_vars) def build_cwd(self, project_update, private_data_dir): return self.get_path_to('..', 'playbooks') def build_playbook_path_relative_to_cwd(self, project_update, private_data_dir): return os.path.join('project_update.yml') def get_password_prompts(self, passwords={}): d = super(RunProjectUpdate, self).get_password_prompts(passwords) d[r'Username for.*:\s*?$'] = 'scm_username' d[r'Password for.*:\s*?$'] = 'scm_password' d['Password:\s*?$'] = 'scm_password' # noqa d[r'\S+?@\S+?\'s\s+?password:\s*?$'] = 'scm_password' d[r'Enter passphrase for .*:\s*?$'] = 'scm_key_unlock' d[r'Bad passphrase, try again for .*:\s*?$'] = '' # FIXME: Configure whether we should auto accept host keys? d[r'^Are you sure you want to continue connecting \(yes/no\)\?\s*?$'] = 'yes' return d def _update_dependent_inventories(self, project_update, dependent_inventory_sources): scm_revision = project_update.project.scm_revision inv_update_class = InventoryUpdate._get_task_class() for inv_src in dependent_inventory_sources: if not inv_src.update_on_project_update: continue if inv_src.scm_last_revision == scm_revision: logger.debug('Skipping SCM inventory update for `{}` because ' 'project has not changed.'.format(inv_src.name)) continue logger.debug('Local dependent inventory update for `{}`.'.format(inv_src.name)) with transaction.atomic(): if InventoryUpdate.objects.filter(inventory_source=inv_src, status__in=ACTIVE_STATES).exists(): logger.debug('Skipping SCM inventory update for `{}` because ' 'another update is already active.'.format(inv_src.name)) continue local_inv_update = inv_src.create_inventory_update( _eager_fields=dict( launch_type='scm', status='running', instance_group=project_update.instance_group, execution_node=project_update.execution_node, source_project_update=project_update, celery_task_id=project_update.celery_task_id)) try: inv_update_class().run(local_inv_update.id) except Exception: logger.exception('{} Unhandled exception updating dependent SCM inventory sources.'.format( project_update.log_format )) try: project_update.refresh_from_db() except ProjectUpdate.DoesNotExist: logger.warning('Project update deleted during updates of dependent SCM inventory sources.') break try: local_inv_update.refresh_from_db() except InventoryUpdate.DoesNotExist: logger.warning('%s Dependent inventory update deleted during execution.', project_update.log_format) continue if project_update.cancel_flag: logger.info('Project update {} was canceled while updating dependent inventories.'.format(project_update.log_format)) break if local_inv_update.cancel_flag: logger.info('Continuing to process project dependencies after {} was canceled'.format(local_inv_update.log_format)) if local_inv_update.status == 'successful': inv_src.scm_last_revision = scm_revision inv_src.save(update_fields=['scm_last_revision']) def release_lock(self, instance): try: fcntl.lockf(self.lock_fd, fcntl.LOCK_UN) except IOError as e: logger.error("I/O error({0}) while trying to release lock file [{1}]: {2}".format(e.errno, instance.get_lock_file(), e.strerror)) os.close(self.lock_fd) raise os.close(self.lock_fd) self.lock_fd = None ''' Note: We don't support blocking=False ''' def acquire_lock(self, instance, blocking=True): lock_path = instance.get_lock_file() if lock_path is None: raise RuntimeError(u'Invalid lock file path') try: self.lock_fd = os.open(lock_path, os.O_RDWR | os.O_CREAT) except OSError as e: logger.error("I/O error({0}) while trying to open lock file [{1}]: {2}".format(e.errno, lock_path, e.strerror)) raise start_time = time.time() while True: try: instance.refresh_from_db(fields=['cancel_flag']) if instance.cancel_flag: logger.debug("ProjectUpdate({0}) was cancelled".format(instance.pk)) return fcntl.lockf(self.lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB) break except IOError as e: if e.errno not in (errno.EAGAIN, errno.EACCES): os.close(self.lock_fd) logger.error("I/O error({0}) while trying to aquire lock on file [{1}]: {2}".format(e.errno, lock_path, e.strerror)) raise else: time.sleep(1.0) waiting_time = time.time() - start_time if waiting_time > 1.0: logger.info( '{} spent {} waiting to acquire lock for local source tree ' 'for path {}.'.format(instance.log_format, waiting_time, lock_path)) def pre_run_hook(self, instance, private_data_dir): # re-create root project folder if a natural disaster has destroyed it if not os.path.exists(settings.PROJECTS_ROOT): os.mkdir(settings.PROJECTS_ROOT) self.acquire_lock(instance) self.original_branch = None if (instance.scm_type == 'git' and instance.job_type == 'run' and instance.project and instance.scm_branch != instance.project.scm_branch): project_path = instance.project.get_project_path(check_if_exists=False) if os.path.exists(project_path): git_repo = git.Repo(project_path) self.original_branch = git_repo.active_branch @staticmethod def make_local_copy(project_path, destination_folder, scm_type, scm_revision): if scm_type == 'git': git_repo = git.Repo(project_path) if not os.path.exists(destination_folder): os.mkdir(destination_folder, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC) tmp_branch_name = 'awx_internal/{}'.format(uuid4()) # always clone based on specific job revision if not scm_revision: raise RuntimeError('Unexpectedly could not determine a revision to run from project.') source_branch = git_repo.create_head(tmp_branch_name, scm_revision) # git clone must take file:// syntax for source repo or else options like depth will be ignored source_as_uri = Path(project_path).as_uri() git.Repo.clone_from( source_as_uri, destination_folder, branch=source_branch, depth=1, single_branch=True, # shallow, do not copy full history ) # submodules copied in loop because shallow copies from local HEADs are ideal # and no git clone submodule options are compatible with minimum requirements for submodule in git_repo.submodules: subrepo_path = os.path.abspath(os.path.join(project_path, submodule.path)) subrepo_destination_folder = os.path.abspath(os.path.join(destination_folder, submodule.path)) subrepo_uri = Path(subrepo_path).as_uri() git.Repo.clone_from(subrepo_uri, subrepo_destination_folder, depth=1, single_branch=True) # force option is necessary because remote refs are not counted, although no information is lost git_repo.delete_head(tmp_branch_name, force=True) else: copy_tree(project_path, destination_folder) def post_run_hook(self, instance, status): if self.job_private_data_dir: # copy project folder before resetting to default branch # because some git-tree-specific resources (like submodules) might matter self.make_local_copy( instance.get_project_path(check_if_exists=False), os.path.join(self.job_private_data_dir, 'project'), instance.scm_type, self.playbook_new_revision ) if self.original_branch: # for git project syncs, non-default branches can be problems # restore to branch the repo was on before this run try: self.original_branch.checkout() except Exception: # this could have failed due to dirty tree, but difficult to predict all cases logger.exception('Failed to restore project repo to prior state after {}'.format(instance.log_format)) self.release_lock(instance) p = instance.project if self.playbook_new_revision: instance.scm_revision = self.playbook_new_revision instance.save(update_fields=['scm_revision']) if instance.job_type == 'check' and status not in ('failed', 'canceled',): if self.playbook_new_revision: p.scm_revision = self.playbook_new_revision else: if status == 'successful': logger.error("{} Could not find scm revision in check".format(instance.log_format)) p.playbook_files = p.playbooks p.inventory_files = p.inventories p.save(update_fields=['scm_revision', 'playbook_files', 'inventory_files']) # Update any inventories that depend on this project dependent_inventory_sources = p.scm_inventory_sources.filter(update_on_project_update=True) if len(dependent_inventory_sources) > 0: if status == 'successful' and instance.launch_type != 'sync': self._update_dependent_inventories(instance, dependent_inventory_sources) def should_use_proot(self, project_update): ''' Return whether this task should use proot. ''' return getattr(settings, 'AWX_PROOT_ENABLED', False) @task() class RunInventoryUpdate(BaseTask): model = InventoryUpdate event_model = InventoryUpdateEvent event_data_key = 'inventory_update_id' @property def proot_show_paths(self): return [self.get_path_to('..', 'plugins', 'inventory')] def build_private_data(self, inventory_update, private_data_dir): """ Return private data needed for inventory update. Returns a dict of the form { 'credentials': { <awx.main.models.Credential>: <credential_decrypted_ssh_key_data>, <awx.main.models.Credential>: <credential_decrypted_ssh_key_data>, <awx.main.models.Credential>: <credential_decrypted_ssh_key_data> } } If no private data is needed, return None. """ if inventory_update.source in InventorySource.injectors: injector = InventorySource.injectors[inventory_update.source](self.get_ansible_version(inventory_update)) return injector.build_private_data(inventory_update, private_data_dir) def build_env(self, inventory_update, private_data_dir, isolated, private_data_files=None): """Build environment dictionary for inventory import. This used to be the mechanism by which any data that needs to be passed to the inventory update script is set up. In particular, this is how inventory update is aware of its proper credentials. Most environment injection is now accomplished by the credential injectors. The primary purpose this still serves is to still point to the inventory update INI or config file. """ env = super(RunInventoryUpdate, self).build_env(inventory_update, private_data_dir, isolated, private_data_files=private_data_files) if private_data_files is None: private_data_files = {} self.add_awx_venv(env) # Pass inventory source ID to inventory script. env['INVENTORY_SOURCE_ID'] = str(inventory_update.inventory_source_id) env['INVENTORY_UPDATE_ID'] = str(inventory_update.pk) env.update(STANDARD_INVENTORY_UPDATE_ENV) injector = None if inventory_update.source in InventorySource.injectors: injector = InventorySource.injectors[inventory_update.source](self.get_ansible_version(inventory_update)) if injector is not None: env = injector.build_env(inventory_update, env, private_data_dir, private_data_files) # All CLOUD_PROVIDERS sources implement as either script or auto plugin if injector.should_use_plugin(): env['ANSIBLE_INVENTORY_ENABLED'] = 'auto' else: env['ANSIBLE_INVENTORY_ENABLED'] = 'script' if inventory_update.source in ['scm', 'custom']: for env_k in inventory_update.source_vars_dict: if str(env_k) not in env and str(env_k) not in settings.INV_ENV_VARIABLE_BLACKLIST: env[str(env_k)] = str(inventory_update.source_vars_dict[env_k]) elif inventory_update.source == 'file': raise NotImplementedError('Cannot update file sources through the task system.') return env def write_args_file(self, private_data_dir, args): path = os.path.join(private_data_dir, 'args') handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE) f = os.fdopen(handle, 'w') f.write(' '.join(args)) f.close() os.chmod(path, stat.S_IRUSR) return path def build_args(self, inventory_update, private_data_dir, passwords): """Build the command line argument list for running an inventory import. """ # Get the inventory source and inventory. inventory_source = inventory_update.inventory_source inventory = inventory_source.inventory if inventory is None: raise RuntimeError('Inventory Source is not associated with an Inventory.') # Piece together the initial command to run via. the shell. args = ['awx-manage', 'inventory_import'] args.extend(['--inventory-id', str(inventory.pk)]) # Add appropriate arguments for overwrite if the inventory_update # object calls for it. if inventory_update.overwrite: args.append('--overwrite') if inventory_update.overwrite_vars: args.append('--overwrite-vars') # Declare the virtualenv the management command should activate # as it calls ansible-inventory args.extend(['--venv', inventory_update.ansible_virtualenv_path]) src = inventory_update.source # Add several options to the shell arguments based on the # inventory-source-specific setting in the AWX configuration. # These settings are "per-source"; it's entirely possible that # they will be different between cloud providers if an AWX user # actively uses more than one. if getattr(settings, '%s_ENABLED_VAR' % src.upper(), False): args.extend(['--enabled-var', getattr(settings, '%s_ENABLED_VAR' % src.upper())]) if getattr(settings, '%s_ENABLED_VALUE' % src.upper(), False): args.extend(['--enabled-value', getattr(settings, '%s_ENABLED_VALUE' % src.upper())]) if getattr(settings, '%s_GROUP_FILTER' % src.upper(), False): args.extend(['--group-filter', getattr(settings, '%s_GROUP_FILTER' % src.upper())]) if getattr(settings, '%s_HOST_FILTER' % src.upper(), False): args.extend(['--host-filter', getattr(settings, '%s_HOST_FILTER' % src.upper())]) if getattr(settings, '%s_EXCLUDE_EMPTY_GROUPS' % src.upper()): args.append('--exclude-empty-groups') if getattr(settings, '%s_INSTANCE_ID_VAR' % src.upper(), False): args.extend(['--instance-id-var', getattr(settings, '%s_INSTANCE_ID_VAR' % src.upper()),]) # Add arguments for the source inventory script args.append('--source') args.append(self.pseudo_build_inventory(inventory_update, private_data_dir)) if src == 'custom': args.append("--custom") args.append('-v%d' % inventory_update.verbosity) if settings.DEBUG: args.append('--traceback') return args def build_inventory(self, inventory_update, private_data_dir): return None # what runner expects in order to not deal with inventory def pseudo_build_inventory(self, inventory_update, private_data_dir): """Inventory imports are ran through a management command we pass the inventory in args to that command, so this is not considered to be "Ansible" inventory (by runner) even though it is Eventually, we would like to cut out the management command, and thus use this as the real inventory """ src = inventory_update.source injector = None if inventory_update.source in InventorySource.injectors: injector = InventorySource.injectors[src](self.get_ansible_version(inventory_update)) if injector is not None: if injector.should_use_plugin(): content = injector.inventory_contents(inventory_update, private_data_dir) # must be a statically named file inventory_path = os.path.join(private_data_dir, injector.filename) with open(inventory_path, 'w') as f: f.write(content) os.chmod(inventory_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) else: # Use the vendored script path inventory_path = self.get_path_to('..', 'plugins', 'inventory', injector.script_name) elif src == 'scm': inventory_path = inventory_update.get_actual_source_path() elif src == 'custom': handle, inventory_path = tempfile.mkstemp(dir=private_data_dir) f = os.fdopen(handle, 'w') if inventory_update.source_script is None: raise RuntimeError('Inventory Script does not exist') f.write(inventory_update.source_script.script) f.close() os.chmod(inventory_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR) return inventory_path def build_cwd(self, inventory_update, private_data_dir): ''' There are two cases where the inventory "source" is in a different location from the private data: - deprecated vendored inventory scripts in awx/plugins/inventory - SCM, where source needs to live in the project folder in these cases, the inventory does not exist in the standard tempdir ''' src = inventory_update.source if src == 'scm' and inventory_update.source_project_update: return inventory_update.source_project_update.get_project_path(check_if_exists=False) if src in CLOUD_PROVIDERS: injector = None if src in InventorySource.injectors: injector = InventorySource.injectors[src](self.get_ansible_version(inventory_update)) if (not injector) or (not injector.should_use_plugin()): return self.get_path_to('..', 'plugins', 'inventory') return private_data_dir def build_playbook_path_relative_to_cwd(self, inventory_update, private_data_dir): return None def build_credentials_list(self, inventory_update): # All credentials not used by inventory source injector return inventory_update.get_extra_credentials() def pre_run_hook(self, inventory_update, private_data_dir): source_project = None if inventory_update.inventory_source: source_project = inventory_update.inventory_source.source_project if (inventory_update.source=='scm' and inventory_update.launch_type!='scm' and source_project): # In project sync, pulling galaxy roles is not needed local_project_sync = source_project.create_project_update( _eager_fields=dict( launch_type="sync", job_type='run', status='running', execution_node=inventory_update.execution_node, instance_group = inventory_update.instance_group, celery_task_id=inventory_update.celery_task_id)) # associate the inventory update before calling run() so that a # cancel() call on the inventory update can cancel the project update local_project_sync.scm_inventory_updates.add(inventory_update) project_update_task = local_project_sync._get_task_class() try: project_update_task().run(local_project_sync.id) inventory_update.inventory_source.scm_last_revision = local_project_sync.project.scm_revision inventory_update.inventory_source.save(update_fields=['scm_last_revision']) except Exception: inventory_update = self.update_model( inventory_update.pk, status='failed', job_explanation=('Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % ('project_update', local_project_sync.name, local_project_sync.id))) raise @task() class RunAdHocCommand(BaseTask): ''' Run an ad hoc command using ansible. ''' model = AdHocCommand event_model = AdHocCommandEvent event_data_key = 'ad_hoc_command_id' def build_private_data(self, ad_hoc_command, private_data_dir): ''' Return SSH private key data needed for this ad hoc command (only if stored in DB as ssh_key_data). Returns a dict of the form { 'credentials': { <awx.main.models.Credential>: <credential_decrypted_ssh_key_data>, <awx.main.models.Credential>: <credential_decrypted_ssh_key_data>, ... }, 'certificates': { <awx.main.models.Credential>: <signed SSH certificate data>, <awx.main.models.Credential>: <signed SSH certificate data>, ... } } ''' # If we were sent SSH credentials, decrypt them and send them # back (they will be written to a temporary file). creds = ad_hoc_command.credential private_data = {'credentials': {}} if creds and creds.has_input('ssh_key_data'): private_data['credentials'][creds] = creds.get_input('ssh_key_data', default='') if creds and creds.has_input('ssh_public_key_data'): private_data.setdefault('certificates', {})[creds] = creds.get_input('ssh_public_key_data', default='') return private_data def build_passwords(self, ad_hoc_command, runtime_passwords): ''' Build a dictionary of passwords for SSH private key, SSH user and sudo/su. ''' passwords = super(RunAdHocCommand, self).build_passwords(ad_hoc_command, runtime_passwords) cred = ad_hoc_command.credential if cred: for field in ('ssh_key_unlock', 'ssh_password', 'become_password'): value = runtime_passwords.get(field, cred.get_input('password' if field == 'ssh_password' else field, default='')) if value not in ('', 'ASK'): passwords[field] = value return passwords def build_env(self, ad_hoc_command, private_data_dir, isolated=False, private_data_files=None): ''' Build environment dictionary for ansible. ''' plugin_dir = self.get_path_to('..', 'plugins', 'callback') env = super(RunAdHocCommand, self).build_env(ad_hoc_command, private_data_dir, isolated=isolated, private_data_files=private_data_files) self.add_ansible_venv(settings.ANSIBLE_VENV_PATH, env) # Set environment variables needed for inventory and ad hoc event # callbacks to work. env['AD_HOC_COMMAND_ID'] = str(ad_hoc_command.pk) env['INVENTORY_ID'] = str(ad_hoc_command.inventory.pk) env['INVENTORY_HOSTVARS'] = str(True) env['ANSIBLE_CALLBACK_PLUGINS'] = plugin_dir env['ANSIBLE_LOAD_CALLBACK_PLUGINS'] = '1' env['ANSIBLE_SFTP_BATCH_MODE'] = 'False' # Specify empty SSH args (should disable ControlPersist entirely for # ad hoc commands). env.setdefault('ANSIBLE_SSH_ARGS', '') return env def build_args(self, ad_hoc_command, private_data_dir, passwords): ''' Build command line argument list for running ansible, optionally using ssh-agent for public/private key authentication. ''' creds = ad_hoc_command.credential ssh_username, become_username, become_method = '', '', '' if creds: ssh_username = creds.get_input('username', default='') become_method = creds.get_input('become_method', default='') become_username = creds.get_input('become_username', default='') else: become_method = None become_username = "" # Always specify the normal SSH user as root by default. Since this # task is normally running in the background under a service account, # it doesn't make sense to rely on ansible's default of using the # current user. ssh_username = ssh_username or 'root' args = [] if ad_hoc_command.job_type == 'check': args.append('--check') args.extend(['-u', sanitize_jinja(ssh_username)]) if 'ssh_password' in passwords: args.append('--ask-pass') # We only specify sudo/su user and password if explicitly given by the # credential. Credential should never specify both sudo and su. if ad_hoc_command.become_enabled: args.append('--become') if become_method: args.extend(['--become-method', sanitize_jinja(become_method)]) if become_username: args.extend(['--become-user', sanitize_jinja(become_username)]) if 'become_password' in passwords: args.append('--ask-become-pass') if ad_hoc_command.forks: # FIXME: Max limit? args.append('--forks=%d' % ad_hoc_command.forks) if ad_hoc_command.diff_mode: args.append('--diff') if ad_hoc_command.verbosity: args.append('-%s' % ('v' * min(5, ad_hoc_command.verbosity))) extra_vars = ad_hoc_command.awx_meta_vars() if ad_hoc_command.extra_vars_dict: redacted_extra_vars, removed_vars = extract_ansible_vars(ad_hoc_command.extra_vars_dict) if removed_vars: raise ValueError(_( "{} are prohibited from use in ad hoc commands." ).format(", ".join(removed_vars))) extra_vars.update(ad_hoc_command.extra_vars_dict) if ad_hoc_command.limit: args.append(ad_hoc_command.limit) else: args.append('all') return args def build_extra_vars_file(self, ad_hoc_command, private_data_dir): extra_vars = ad_hoc_command.awx_meta_vars() if ad_hoc_command.extra_vars_dict: redacted_extra_vars, removed_vars = extract_ansible_vars(ad_hoc_command.extra_vars_dict) if removed_vars: raise ValueError(_( "{} are prohibited from use in ad hoc commands." ).format(", ".join(removed_vars))) extra_vars.update(ad_hoc_command.extra_vars_dict) self._write_extra_vars_file(private_data_dir, extra_vars) def build_module_name(self, ad_hoc_command): return ad_hoc_command.module_name def build_module_args(self, ad_hoc_command): module_args = ad_hoc_command.module_args if settings.ALLOW_JINJA_IN_EXTRA_VARS != 'always': module_args = sanitize_jinja(module_args) return module_args def build_cwd(self, ad_hoc_command, private_data_dir): return private_data_dir def build_playbook_path_relative_to_cwd(self, job, private_data_dir): return None def get_password_prompts(self, passwords={}): d = super(RunAdHocCommand, self).get_password_prompts() d[r'Enter passphrase for .*:\s*?$'] = 'ssh_key_unlock' d[r'Bad passphrase, try again for .*:\s*?$'] = '' for method in PRIVILEGE_ESCALATION_METHODS: d[r'%s password.*:\s*?$' % (method[0])] = 'become_password' d[r'%s password.*:\s*?$' % (method[0].upper())] = 'become_password' d[r'BECOME password.*:\s*?$'] = 'become_password' d[r'SSH password:\s*?$'] = 'ssh_password' d[r'Password:\s*?$'] = 'ssh_password' return d def should_use_proot(self, ad_hoc_command): ''' Return whether this task should use proot. ''' return getattr(settings, 'AWX_PROOT_ENABLED', False) def final_run_hook(self, adhoc_job, status, private_data_dir, fact_modification_times, isolated_manager_instance=None): super(RunAdHocCommand, self).final_run_hook(adhoc_job, status, private_data_dir, fact_modification_times) if isolated_manager_instance: isolated_manager_instance.cleanup() @task() class RunSystemJob(BaseTask): model = SystemJob event_model = SystemJobEvent event_data_key = 'system_job_id' def build_args(self, system_job, private_data_dir, passwords): args = ['awx-manage', system_job.job_type] try: # System Job extra_vars can be blank, must be JSON if not blank if system_job.extra_vars == '': json_vars = {} else: json_vars = json.loads(system_job.extra_vars) if 'days' in json_vars: args.extend(['--days', str(json_vars.get('days', 60))]) if 'dry_run' in json_vars and json_vars['dry_run']: args.extend(['--dry-run']) if system_job.job_type == 'cleanup_jobs': args.extend(['--jobs', '--project-updates', '--inventory-updates', '--management-jobs', '--ad-hoc-commands', '--workflow-jobs', '--notifications']) except Exception: logger.exception("{} Failed to parse system job".format(system_job.log_format)) return args def write_args_file(self, private_data_dir, args): path = os.path.join(private_data_dir, 'args') handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE) f = os.fdopen(handle, 'w') f.write(' '.join(args)) f.close() os.chmod(path, stat.S_IRUSR) return path def build_env(self, instance, private_data_dir, isolated=False, private_data_files=None): env = super(RunSystemJob, self).build_env(instance, private_data_dir, isolated=isolated, private_data_files=private_data_files) self.add_awx_venv(env) return env def build_cwd(self, instance, private_data_dir): return settings.BASE_DIR def build_playbook_path_relative_to_cwd(self, job, private_data_dir): return None def build_inventory(self, instance, private_data_dir): return None def _reconstruct_relationships(copy_mapping): for old_obj, new_obj in copy_mapping.items(): model = type(old_obj) for field_name in getattr(model, 'FIELDS_TO_PRESERVE_AT_COPY', []): field = model._meta.get_field(field_name) if isinstance(field, ForeignKey): if getattr(new_obj, field_name, None): continue related_obj = getattr(old_obj, field_name) related_obj = copy_mapping.get(related_obj, related_obj) setattr(new_obj, field_name, related_obj) elif field.many_to_many: for related_obj in getattr(old_obj, field_name).all(): logger.debug('Deep copy: Adding {} to {}({}).{} relationship'.format( related_obj, new_obj, model, field_name )) getattr(new_obj, field_name).add(copy_mapping.get(related_obj, related_obj)) new_obj.save() @task() def deep_copy_model_obj( model_module, model_name, obj_pk, new_obj_pk, user_pk, sub_obj_list, permission_check_func=None ): logger.debug('Deep copy {} from {} to {}.'.format(model_name, obj_pk, new_obj_pk)) from awx.api.generics import CopyAPIView from awx.main.signals import disable_activity_stream model = getattr(importlib.import_module(model_module), model_name, None) if model is None: return try: obj = model.objects.get(pk=obj_pk) new_obj = model.objects.get(pk=new_obj_pk) creater = User.objects.get(pk=user_pk) except ObjectDoesNotExist: logger.warning("Object or user no longer exists.") return with transaction.atomic(), ignore_inventory_computed_fields(), disable_activity_stream(): copy_mapping = {} for sub_obj_setup in sub_obj_list: sub_model = getattr(importlib.import_module(sub_obj_setup[0]), sub_obj_setup[1], None) if sub_model is None: continue try: sub_obj = sub_model.objects.get(pk=sub_obj_setup[2]) except ObjectDoesNotExist: continue copy_mapping.update(CopyAPIView.copy_model_obj( obj, new_obj, sub_model, sub_obj, creater )) _reconstruct_relationships(copy_mapping) if permission_check_func: permission_check_func = getattr(getattr( importlib.import_module(permission_check_func[0]), permission_check_func[1] ), permission_check_func[2]) permission_check_func(creater, copy_mapping.values()) if isinstance(new_obj, Inventory): update_inventory_computed_fields.delay(new_obj.id, True)
[]
[]
[]
[]
[]
python
0
0
manage.py
#!/usr/bin/env python import os import sys if __name__ == "__main__": os.environ.setdefault("DJANGO_SETTINGS_MODULE", "shop.settings.development") from django.core.management import execute_from_command_line execute_from_command_line(sys.argv)
[]
[]
[]
[]
[]
python
0
0
api/api.py
### IMPORTS ### import os import time from typing import ( List, Dict, Union, ) import threading import uuid import numpy as np from pydantic import ( BaseModel, validator, Field, ) from fastapi import ( FastAPI, Response, status, ) from fastapi.middleware.cors import CORSMiddleware import diskcache as dc from stressypy import create_job ############### ### FastAPI setup ### DEBUG = os.environ['DEBUG'] if os.environ['DEBUG'] else True config = { "DEBUG": DEBUG, "CACHE_TYPE": "SimpleCache", "CACHE_DEFAULT_TIMEOUT": 3600 # one hour } app = FastAPI( debug=DEBUG, title='LinReg API', description='An amazing API for some OP linear regression', version='0.0.1', docs_url='/', ) origins = [ "http://localhost:8000", "http://0.0.0.0:8000", ] app.add_middleware( CORSMiddleware, allow_origins=origins, allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ) cache = dc.FanoutCache('tmp') ##################### WORKERS = int(os.getenv("WORKERS", 1)) class LinRegLocks: """ A commodity class taking care to limit the number of concurrent operations to the number of workers available to the server """ locks = {f'worker_{i}': threading.Lock() for i in range(WORKERS)} def __enter__(self): self.lock = self._get_lock() self.lock.acquire() def __exit__(self, *args, **kwargs): try: self.lock.release() except: pass def _get_lock(self): while self._all_locked(): time.sleep(1) for lock in self.locks.values(): if not lock.locked(): return lock def _all_locked(self): return all([lock.locked() for lock in self.locks.values()]) linreg_lock = LinRegLocks() class DataToFit(BaseModel): """ Pydantic definition of the data users can input to generate a fit together with the required validation """ xs: List[float] = Field(example=[1, 2, 3]) ys: List[float] = Field(example=[1, 2, 3]) @validator('xs') def points_must_be_of_same_size(cls, v, values, **kwargs): if 'xs' in values and len(v) != len(values['ys']): raise ValueError('xs and ys have to be of same size') return v @validator('xs') def points_must_be_at_least_two(cls, v, values, **kwargs): if 'xs' in values and len(v) < 2: raise ValueError('xs and ys have to be at least 2') return v class DataFittedModel(BaseModel): """Pydantic definition of the fitted model""" model_id: int model: Dict class DataToPredict(BaseModel): """ Pydantic definition of the data users can provide for inference """ xs: List[float] def linreg(x: np.array, y: np.array) -> Dict[str, float]: """ The actual workhorse :returns dict with fitted slope and intercept """ A = np.vstack([x, np.ones(len(x))]).T slope, intercept = np.linalg.lstsq(A, y, rcond=None)[0] return {'slope': slope, 'intercept': intercept} @app.post("/fit", status_code=status.HTTP_201_CREATED) def linear_fit(points_to_fit: DataToFit, response: Response) -> Union[Dict[str, Union[str, Dict[str, float]]], Response]: """ The endpoint to fit a line to a set of datapoints :param points_to_fit: :param response: :return: """ # First check if all locks are already used up # If that's the case return 429 if linreg_lock._all_locked(): response.status_code = status.HTTP_429_TOO_MANY_REQUESTS return response # Now we can build the model # We use a thread lock to simulate a single threaded execution with linreg_lock: model = linreg(points_to_fit.xs, points_to_fit.ys) # Simulate that this takes A LOT of CPU for 20 seconds job = create_job(1, 20) job.run() # Create a pseudo-random ID for it model_id = str(uuid.uuid4()) # Store it temporarily cache.set(model_id, model) # Return the model id and its parameters output = { 'model_id': model_id, 'model': model, } return output @app.post("/predict/{model_id}", status_code=status.HTTP_200_OK) def predict(points_to_predict: DataToPredict, model_id: str): """ The endpoint to predict the ys for the given xs given the previously fitted model :param points_to_predict: :param model_id: :return: """ # Check if model has been fitted before if not (model := cache.get(model_id)): return {'error': f'model_id {model_id} not found in cache. please fit your model first'}, 404 else: # Make predictions predictions = model['intercept'] + model['slope'] * np.array(points_to_predict.xs) response = {'ys': list(predictions)} return response
[]
[]
[ "WORKERS", "DEBUG" ]
[]
["WORKERS", "DEBUG"]
python
2
0
server.py
# coding:utf-8 from flask import Flask, request, send_from_directory, flash, redirect, url_for, abort, jsonify, render_template, g import os from werkzeug.utils import secure_filename from flask import helpers import color_transfer import db from logger import get_log os.environ['LD_LIBRARY_PATH'] = '/'.join(__file__.split('/')[:-1]) + '/lib' app = Flask(__name__, static_url_path='') app.secret_key = 'A0Zr98j/3yX R~XHH!jmN]LWX/,?RT' executor = color_transfer.Executor() fs = color_transfer.FileStore() @app.before_request def before_request(): g.db = db.SqliteDB() @app.teardown_request def teardown_request(exception): if hasattr(g, 'db'): g.db.close() @app.route('/') def index(): return send_from_directory('static', 'index.htm') @app.route('/file/<path:filename>', methods=['DELETE']) def delete(filename): # TODO # filename = secure_filename(filename) # filepath = os.path.join(UPLOAD_FILE_PATH, filename) # if os.path.isfile(filepath): # os.remove(filepath) # return jsonify({'msg': '删除成功', 'code': 0}) # else: return jsonify({'msg': '删除失败', 'code': 1}) @app.route('/file/<types>', methods=['POST']) def upload(types): if 'file' not in request.files: abort(400) file = request.files['file'] if not file or 'image' not in file.mimetype: abort(400) filename = secure_filename(file.filename) if types == 'src': filename = 'src_' + filename elif types == 'ref': filename = 'ref_' + filename else: abort(400) return jsonify({'hash': fs.upload(file, filename)}) @app.route('/file/<types>', methods=['GET']) def get_list(types): # files = next(os.walk(UPLOAD_FILE_PATH))[2] # files = list(filter(lambda x: x.startswith(types), files)) # files.sort(key=lambda f: os.stat(os.path.join(UPLOAD_FILE_PATH, f)).st_mtime, reverse=True) return jsonify(fs.list(types)) @app.route('/download/<path:filename>') def download(filename): file_path = fs.download(filename) if not file_path: abort(404) if not os.path.exists(file_path): abort(404) return helpers.send_file(file_path, cache_timeout=24 * 3600) @app.route('/work') def work(): ref_img = request.args.get('ref_img') src_img = request.args.get('src_img') al = request.args.get('alg', 'reinhard') if al not in ['reinhard', 'welsh']: abort(400) r_id = g.db.insert_file(src_img, ref_img, al) executor.add_task(r_id, src_img, ref_img, al) return jsonify({'redirect': 'show'}) @app.route('/show') def show(): return send_from_directory('static', 'submission.html') @app.route('/submission') def submission(): return jsonify(g.db.query_db('select * from result order by id desc')) @app.route('/submission/del/<int:id>') def submission_del(id): # TODO # row = g.db.query_db('select * from result where id = ?', (id,), one=True) # filenames = [row['src_img'], row['res_img'], row['ref_img']] # for row_name in row: # if row[row_name] not in filenames: # continue # size = g.db.query_db('select count(1) from result where {} = ?1'.format(row_name), (row[row_name],), one=True) # if 1 < int(size['count(1)']): # filenames.remove(row[row_name]) # # for i in filenames: # try: # os.remove(os.path.join(UPLOAD_FILE_PATH, i)) # except IOError: # pass # g.db.del_file(id) return redirect(url_for('show', _external=True)) if __name__ == '__main__': app.debug = True app.run(host='0.0.0.0', port=8080)
[]
[]
[ "LD_LIBRARY_PATH" ]
[]
["LD_LIBRARY_PATH"]
python
1
0
navygem/wsgi.py
''' WSGI config for navygem project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/ ''' import os from django.core.wsgi import get_wsgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'navygem.settings') application = get_wsgi_application()
[]
[]
[]
[]
[]
python
0
0
logger/default.go
package logger import ( "context" "fmt" "os" "runtime" "sort" "strings" "sync" "time" dlog "xinhari.com/xinhari/debug/log" ) func init() { lvl, err := GetLevel(os.Getenv("MICRO_LOG_LEVEL")) if err != nil { lvl = InfoLevel } DefaultLogger = NewHelper(NewLogger(WithLevel(lvl))) } type defaultLogger struct { sync.RWMutex opts Options } // Init(opts...) should only overwrite provided options func (l *defaultLogger) Init(opts ...Option) error { for _, o := range opts { o(&l.opts) } return nil } func (l *defaultLogger) String() string { return "default" } func (l *defaultLogger) Fields(fields map[string]interface{}) Logger { l.Lock() l.opts.Fields = copyFields(fields) l.Unlock() return l } func copyFields(src map[string]interface{}) map[string]interface{} { dst := make(map[string]interface{}, len(src)) for k, v := range src { dst[k] = v } return dst } // logCallerfilePath returns a package/file:line description of the caller, // preserving only the leaf directory name and file name. func logCallerfilePath(loggingFilePath string) string { // To make sure we trim the path correctly on Windows too, we // counter-intuitively need to use '/' and *not* os.PathSeparator here, // because the path given originates from Go stdlib, specifically // runtime.Caller() which (as of Mar/17) returns forward slashes even on // Windows. // // See https://github.com/golang/go/issues/3335 // and https://github.com/golang/go/issues/18151 // // for discussion on the issue on Go side. idx := strings.LastIndexByte(loggingFilePath, '/') if idx == -1 { return loggingFilePath } idx = strings.LastIndexByte(loggingFilePath[:idx], '/') if idx == -1 { return loggingFilePath } return loggingFilePath[idx+1:] } func (l *defaultLogger) Log(level Level, v ...interface{}) { // TODO decide does we need to write message if log level not used? if !l.opts.Level.Enabled(level) { return } l.RLock() fields := copyFields(l.opts.Fields) l.RUnlock() fields["level"] = level.String() if _, file, line, ok := runtime.Caller(l.opts.CallerSkipCount); ok { fields["file"] = fmt.Sprintf("%s:%d", logCallerfilePath(file), line) } rec := dlog.Record{ Timestamp: time.Now(), Message: fmt.Sprint(v...), Metadata: make(map[string]string, len(fields)), } keys := make([]string, 0, len(fields)) for k, v := range fields { keys = append(keys, k) rec.Metadata[k] = fmt.Sprintf("%v", v) } sort.Strings(keys) metadata := "" for _, k := range keys { metadata += fmt.Sprintf(" %s=%v", k, fields[k]) } dlog.DefaultLog.Write(rec) t := rec.Timestamp.Format("2006-01-02 15:04:05") fmt.Printf("%s %s %v\n", t, metadata, rec.Message) } func (l *defaultLogger) Logf(level Level, format string, v ...interface{}) { // TODO decide does we need to write message if log level not used? if level < l.opts.Level { return } l.RLock() fields := copyFields(l.opts.Fields) l.RUnlock() fields["level"] = level.String() if _, file, line, ok := runtime.Caller(l.opts.CallerSkipCount); ok { fields["file"] = fmt.Sprintf("%s:%d", logCallerfilePath(file), line) } rec := dlog.Record{ Timestamp: time.Now(), Message: fmt.Sprintf(format, v...), Metadata: make(map[string]string, len(fields)), } keys := make([]string, 0, len(fields)) for k, v := range fields { keys = append(keys, k) rec.Metadata[k] = fmt.Sprintf("%v", v) } sort.Strings(keys) metadata := "" for _, k := range keys { metadata += fmt.Sprintf(" %s=%v", k, fields[k]) } dlog.DefaultLog.Write(rec) t := rec.Timestamp.Format("2006-01-02 15:04:05") fmt.Printf("%s %s %v\n", t, metadata, rec.Message) } func (n *defaultLogger) Options() Options { // not guard against options Context values n.RLock() opts := n.opts opts.Fields = copyFields(n.opts.Fields) n.RUnlock() return opts } // NewLogger builds a new logger based on options func NewLogger(opts ...Option) Logger { // Default options options := Options{ Level: InfoLevel, Fields: make(map[string]interface{}), Out: os.Stderr, CallerSkipCount: 2, Context: context.Background(), } l := &defaultLogger{opts: options} if err := l.Init(opts...); err != nil { l.Log(FatalLevel, err) } return l }
[ "\"MICRO_LOG_LEVEL\"" ]
[]
[ "MICRO_LOG_LEVEL" ]
[]
["MICRO_LOG_LEVEL"]
go
1
0
Project/asgi.py
""" ASGI config for Project project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/ """ import os from django.core.asgi import get_asgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Project.settings') application = get_asgi_application()
[]
[]
[]
[]
[]
python
0
0
codalab/worker_manager/gcp_batch_worker_manager.py
try: from google.cloud.container_v1 import ClusterManagerClient # type: ignore from google.oauth2 import service_account # type: ignore from kubernetes import client, utils # type: ignore from kubernetes.utils.create_from_yaml import FailToCreateError # type: ignore except ModuleNotFoundError: raise ModuleNotFoundError( 'Running the worker manager requires the kubernetes module.\n' 'Please run: pip install kubernetes' ) import base64 import logging import os import uuid from argparse import ArgumentParser from typing import Any, Dict, List from .worker_manager import WorkerManager, WorkerJob logger: logging.Logger = logging.getLogger(__name__) class GCPBatchWorkerManager(WorkerManager): NAME: str = 'gcp-batch' DESCRIPTION: str = 'Worker manager for submitting jobs to Google Cloud Platform via Kubernetes' @staticmethod def add_arguments_to_subparser(subparser: ArgumentParser) -> None: # GCP arguments subparser.add_argument('--project', type=str, help='Name of the GCP project', required=True) subparser.add_argument('--cluster', type=str, help='Name of the GKE cluster', required=True) subparser.add_argument( '--zone', type=str, help='The availability zone of the GKE cluster', required=True ) subparser.add_argument( '--credentials-path', type=str, help='Path to the GCP service account json file', required=True, ) subparser.add_argument( '--cert-path', type=str, default='.', help='Path to the generated SSL cert.' ) # Job-related arguments subparser.add_argument( '--cpus', type=int, default=1, help='Default number of CPUs for each worker' ) subparser.add_argument( '--gpus', type=int, default=0, help='Default number of GPUs to request for each worker' ) subparser.add_argument( '--memory-mb', type=int, default=2048, help='Default memory (in MB) for each worker' ) def __init__(self, args): super().__init__(args) self.codalab_username = os.environ.get('CODALAB_USERNAME') self.codalab_password = os.environ.get('CODALAB_PASSWORD') if not self.codalab_username or not self.codalab_password: raise EnvironmentError( 'Valid credentials need to be set as environment variables: CODALAB_USERNAME and CODALAB_PASSWORD' ) # Authenticate via GCP credentials: service_account.Credentials = service_account.Credentials.from_service_account_file( self.args.credentials_path, scopes=['https://www.googleapis.com/auth/cloud-platform'] ) cluster_manager_client: ClusterManagerClient = ClusterManagerClient(credentials=credentials) cluster = cluster_manager_client.get_cluster( name=f'projects/{self.args.project}/locations/{self.args.zone}/clusters/{self.args.cluster}' ) # Save SSL certificate to connect to the GKE cluster securely cert_path = os.path.join(self.args.cert_path, 'gke.crt') with open(cert_path, 'wb') as f: f.write(base64.b64decode(cluster.master_auth.cluster_ca_certificate)) # Configure and initialize Kubernetes client configuration: client.Configuration = client.Configuration() configuration.host = f'https://{cluster.endpoint}:443' configuration.api_key = {'authorization': f'Bearer {credentials.token}'} configuration.verify_ssl = True configuration.ssl_ca_cert = cert_path client.Configuration.set_default(configuration) self.k8_client: client.ApiClient = client.ApiClient(configuration) self.k8_api: client.CoreV1Api = client.CoreV1Api(self.k8_client) def get_worker_jobs(self) -> List[WorkerJob]: try: # Fetch the running pods pods: client.V1PodList = self.k8_api.list_namespaced_pod( 'default', field_selector='status.phase==Running' ) logger.debug(pods.items) return [WorkerJob(True) for _ in pods.items] except client.ApiException as e: logger.error(f'Exception when calling Kubernetes CoreV1Api->list_namespaced_pod: {e}') return [] def start_worker_job(self) -> None: # This needs to be a unique directory since jobs may share a host work_dir_prefix: str = ( self.args.worker_work_dir_prefix if self.args.worker_work_dir_prefix else '/tmp/' ) worker_id: str = uuid.uuid4().hex worker_name: str = f'cl-worker-{worker_id}' work_dir: str = os.path.join(work_dir_prefix, f'{worker_name}_work_dir') command: List[str] = self.build_command(worker_id, work_dir) worker_image: str = 'codalab/worker:' + os.environ.get('CODALAB_VERSION', 'latest') config: Dict[str, Any] = { 'apiVersion': 'v1', 'kind': 'Pod', 'metadata': {'name': worker_name}, 'spec': { 'containers': [ { 'name': f'{worker_name}-container', 'image': worker_image, 'command': command, 'securityContext': {'runAsUser': 0}, # Run as root 'env': [ {'name': 'CODALAB_USERNAME', 'value': self.codalab_username}, {'name': 'CODALAB_PASSWORD', 'value': self.codalab_password}, ], 'resources': { 'limits': { 'cpu': self.args.cpus, 'memory': f'{self.args.memory_mb}Mi', 'nvidia.com/gpu': self.args.gpus, # Configure NVIDIA GPUs } }, 'volumeMounts': [ {'name': 'dockersock', 'mountPath': '/var/run/docker.sock'}, {'name': 'workdir', 'mountPath': work_dir}, ], } ], 'volumes': [ {'name': 'dockersock', 'hostPath': {'path': '/var/run/docker.sock'}}, {'name': 'workdir', 'hostPath': {'path': work_dir}}, ], 'restartPolicy': 'Never', # Only run a job once }, } # Use Kubernetes to start a worker on GCP logger.debug('Starting worker {} with image {}'.format(worker_id, worker_image)) try: utils.create_from_dict(self.k8_client, config) except (client.ApiException, FailToCreateError) as e: logger.error(f'Exception when calling Kubernetes utils->create_from_dict: {e}')
[]
[]
[ "CODALAB_USERNAME", "CODALAB_PASSWORD", "CODALAB_VERSION" ]
[]
["CODALAB_USERNAME", "CODALAB_PASSWORD", "CODALAB_VERSION"]
python
3
0
vendor/github.com/containers/buildah/pkg/cli/common.go
package cli // the cli package contains urfave/cli related structs that help make up // the command line for buildah commands. it resides here so other projects // that vendor in this code can use them too. import ( "fmt" "os" "runtime" "strings" "github.com/containers/buildah" "github.com/containers/buildah/pkg/parse" "github.com/containers/buildah/util" "github.com/containers/common/pkg/auth" "github.com/containers/common/pkg/config" "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" "github.com/spf13/pflag" ) // LayerResults represents the results of the layer flags type LayerResults struct { ForceRm bool Layers bool } // UserNSResults represents the results for the UserNS flags type UserNSResults struct { UserNS string UserNSUIDMap []string UserNSGIDMap []string UserNSUIDMapUser string UserNSGIDMapGroup string } // NameSpaceResults represents the results for Namespace flags type NameSpaceResults struct { IPC string Network string CNIConfigDir string CNIPlugInPath string PID string UTS string } // BudResults represents the results for Bud flags type BudResults struct { Annotation []string Arch string Authfile string BuildArg []string CacheFrom string CertDir string Compress bool Creds string DisableCompression bool DisableContentTrust bool DecryptionKeys []string File []string Format string Iidfile string Label []string Logfile string Loglevel int NoCache bool OS string Platform string Pull bool PullAlways bool PullNever bool Quiet bool Rm bool Runtime string RuntimeFlags []string SignaturePolicy string SignBy string Squash bool Tag []string Target string TLSVerify bool } // FromAndBugResults represents the results for common flags // in bud and from type FromAndBudResults struct { AddHost []string BlobCache string CapAdd []string CapDrop []string CgroupParent string CPUPeriod uint64 CPUQuota int64 CPUSetCPUs string CPUSetMems string CPUShares uint64 Devices []string DNSSearch []string DNSServers []string DNSOptions []string HTTPProxy bool Isolation string Memory string MemorySwap string OverrideArch string OverrideOS string SecurityOpt []string ShmSize string Ulimit []string Volumes []string } // GetUserNSFlags returns the common flags for usernamespace func GetUserNSFlags(flags *UserNSResults) pflag.FlagSet { usernsFlags := pflag.FlagSet{} usernsFlags.StringVar(&flags.UserNS, "userns", "", "'container', `path` of user namespace to join, or 'host'") usernsFlags.StringSliceVar(&flags.UserNSUIDMap, "userns-uid-map", []string{}, "`containerID:hostID:length` UID mapping to use in user namespace") usernsFlags.StringSliceVar(&flags.UserNSGIDMap, "userns-gid-map", []string{}, "`containerID:hostID:length` GID mapping to use in user namespace") usernsFlags.StringVar(&flags.UserNSUIDMapUser, "userns-uid-map-user", "", "`name` of entries from /etc/subuid to use to set user namespace UID mapping") usernsFlags.StringVar(&flags.UserNSGIDMapGroup, "userns-gid-map-group", "", "`name` of entries from /etc/subgid to use to set user namespace GID mapping") return usernsFlags } // GetNameSpaceFlags returns the common flags for a namespace menu func GetNameSpaceFlags(flags *NameSpaceResults) pflag.FlagSet { fs := pflag.FlagSet{} fs.StringVar(&flags.IPC, string(specs.IPCNamespace), "", "'container', `path` of IPC namespace to join, or 'host'") fs.StringVar(&flags.Network, string(specs.NetworkNamespace), "", "'container', `path` of network namespace to join, or 'host'") // TODO How do we alias net and network? fs.StringVar(&flags.Network, "net", "", "'container', `path` of network namespace to join, or 'host'") if err := fs.MarkHidden("net"); err != nil { panic(fmt.Sprintf("error marking net flag as hidden: %v", err)) } fs.StringVar(&flags.CNIConfigDir, "cni-config-dir", util.DefaultCNIConfigDir, "`directory` of CNI configuration files") fs.StringVar(&flags.CNIPlugInPath, "cni-plugin-path", util.DefaultCNIPluginPath, "`path` of CNI network plugins") fs.StringVar(&flags.PID, string(specs.PIDNamespace), "", "container, `path` of PID namespace to join, or 'host'") fs.StringVar(&flags.UTS, string(specs.UTSNamespace), "", "container, :`path` of UTS namespace to join, or 'host'") return fs } // GetLayerFlags returns the common flags for layers func GetLayerFlags(flags *LayerResults) pflag.FlagSet { fs := pflag.FlagSet{} fs.BoolVar(&flags.ForceRm, "force-rm", false, "Always remove intermediate containers after a build, even if the build is unsuccessful.") fs.BoolVar(&flags.Layers, "layers", UseLayers(), fmt.Sprintf("cache intermediate layers during build. Use BUILDAH_LAYERS environment variable to override.")) return fs } // GetBudFlags returns common bud flags func GetBudFlags(flags *BudResults) pflag.FlagSet { fs := pflag.FlagSet{} fs.StringVar(&flags.Arch, "arch", runtime.GOARCH, "set the ARCH of the image to the provided value instead of the architecture of the host") fs.StringArrayVar(&flags.Annotation, "annotation", []string{}, "Set metadata for an image (default [])") fs.StringVar(&flags.Authfile, "authfile", auth.GetDefaultAuthFile(), "path of the authentication file.") fs.StringArrayVar(&flags.BuildArg, "build-arg", []string{}, "`argument=value` to supply to the builder") fs.StringVar(&flags.CacheFrom, "cache-from", "", "Images to utilise as potential cache sources. The build process does not currently support caching so this is a NOOP.") fs.StringVar(&flags.CertDir, "cert-dir", "", "use certificates at the specified path to access the registry") fs.BoolVar(&flags.Compress, "compress", false, "This is legacy option, which has no effect on the image") fs.StringVar(&flags.Creds, "creds", "", "use `[username[:password]]` for accessing the registry") fs.BoolVarP(&flags.DisableCompression, "disable-compression", "D", true, "don't compress layers by default") fs.BoolVar(&flags.DisableContentTrust, "disable-content-trust", false, "This is a Docker specific option and is a NOOP") fs.StringSliceVarP(&flags.File, "file", "f", []string{}, "`pathname or URL` of a Dockerfile") fs.StringVar(&flags.Format, "format", DefaultFormat(), "`format` of the built image's manifest and metadata. Use BUILDAH_FORMAT environment variable to override.") fs.StringVar(&flags.Iidfile, "iidfile", "", "`file` to write the image ID to") fs.StringArrayVar(&flags.Label, "label", []string{}, "Set metadata for an image (default [])") fs.BoolVar(&flags.NoCache, "no-cache", false, "Do not use existing cached images for the container build. Build from the start with a new set of cached layers.") fs.StringVar(&flags.Logfile, "logfile", "", "log to `file` instead of stdout/stderr") fs.IntVar(&flags.Loglevel, "loglevel", 0, "adjust logging level (range from -2 to 3)") fs.StringVar(&flags.OS, "os", runtime.GOOS, "set the OS to the provided value instead of the current operating system of the host") fs.StringVar(&flags.Platform, "platform", parse.DefaultPlatform(), "set the OS/ARCH to the provided value instead of the current operating system and architecture of the host (for example `linux/arm`)") fs.BoolVar(&flags.Pull, "pull", true, "pull the image from the registry if newer or not present in store, if false, only pull the image if not present") fs.BoolVar(&flags.PullAlways, "pull-always", false, "pull the image even if the named image is present in store") fs.BoolVar(&flags.PullNever, "pull-never", false, "do not pull the image, use the image present in store if available") fs.BoolVarP(&flags.Quiet, "quiet", "q", false, "refrain from announcing build instructions and image read/write progress") fs.BoolVar(&flags.Rm, "rm", true, "Remove intermediate containers after a successful build") // "runtime" definition moved to avoid name collision in podman build. Defined in cmd/buildah/bud.go. fs.StringSliceVar(&flags.RuntimeFlags, "runtime-flag", []string{}, "add global flags for the container runtime") fs.StringVar(&flags.SignBy, "sign-by", "", "sign the image using a GPG key with the specified `FINGERPRINT`") fs.StringVar(&flags.SignaturePolicy, "signature-policy", "", "`pathname` of signature policy file (not usually used)") fs.BoolVar(&flags.Squash, "squash", false, "squash newly built layers into a single new layer") fs.StringArrayVarP(&flags.Tag, "tag", "t", []string{}, "tagged `name` to apply to the built image") fs.StringVar(&flags.Target, "target", "", "set the target build stage to build") fs.BoolVar(&flags.TLSVerify, "tls-verify", true, "require HTTPS and verify certificates when accessing the registry") return fs } func GetFromAndBudFlags(flags *FromAndBudResults, usernsResults *UserNSResults, namespaceResults *NameSpaceResults) (pflag.FlagSet, error) { fs := pflag.FlagSet{} defaultContainerConfig, err := config.Default() if err != nil { return fs, errors.Wrapf(err, "failed to get container config") } fs.StringSliceVar(&flags.AddHost, "add-host", []string{}, "add a custom host-to-IP mapping (`host:ip`) (default [])") fs.StringVar(&flags.BlobCache, "blob-cache", "", "assume image blobs in the specified directory will be available for pushing") if err := fs.MarkHidden("blob-cache"); err != nil { panic(fmt.Sprintf("error marking net flag as hidden: %v", err)) } fs.StringSliceVar(&flags.CapAdd, "cap-add", []string{}, "add the specified capability when running (default [])") fs.StringSliceVar(&flags.CapDrop, "cap-drop", []string{}, "drop the specified capability when running (default [])") fs.StringVar(&flags.CgroupParent, "cgroup-parent", "", "optional parent cgroup for the container") fs.Uint64Var(&flags.CPUPeriod, "cpu-period", 0, "limit the CPU CFS (Completely Fair Scheduler) period") fs.Int64Var(&flags.CPUQuota, "cpu-quota", 0, "limit the CPU CFS (Completely Fair Scheduler) quota") fs.Uint64VarP(&flags.CPUShares, "cpu-shares", "c", 0, "CPU shares (relative weight)") fs.StringVar(&flags.CPUSetCPUs, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)") fs.StringVar(&flags.CPUSetMems, "cpuset-mems", "", "memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems.") fs.StringArrayVar(&flags.Devices, "device", defaultContainerConfig.Containers.Devices, "Additional devices to be used within containers (default [])") fs.StringSliceVar(&flags.DNSSearch, "dns-search", defaultContainerConfig.Containers.DNSSearches, "Set custom DNS search domains") fs.StringSliceVar(&flags.DNSServers, "dns", defaultContainerConfig.Containers.DNSServers, "Set custom DNS servers or disable it completely by setting it to 'none', which prevents the automatic creation of `/etc/resolv.conf`.") fs.StringSliceVar(&flags.DNSOptions, "dns-option", defaultContainerConfig.Containers.DNSOptions, "Set custom DNS options") fs.BoolVar(&flags.HTTPProxy, "http-proxy", true, "pass through HTTP Proxy environment variables") fs.StringVar(&flags.Isolation, "isolation", DefaultIsolation(), "`type` of process isolation to use. Use BUILDAH_ISOLATION environment variable to override.") fs.StringVarP(&flags.Memory, "memory", "m", "", "memory limit (format: <number>[<unit>], where unit = b, k, m or g)") fs.StringVar(&flags.MemorySwap, "memory-swap", "", "swap limit equal to memory plus swap: '-1' to enable unlimited swap") fs.StringVar(&flags.OverrideOS, "override-os", runtime.GOOS, "prefer `OS` instead of the running OS when pulling images") if err := fs.MarkHidden("override-os"); err != nil { panic(fmt.Sprintf("error marking override-os as hidden: %v", err)) } fs.StringVar(&flags.OverrideArch, "override-arch", runtime.GOARCH, "prefer `ARCH` instead of the architecture of the machine when pulling images") if err := fs.MarkHidden("override-arch"); err != nil { panic(fmt.Sprintf("error marking override-arch as hidden: %v", err)) } fs.StringArrayVar(&flags.SecurityOpt, "security-opt", []string{}, "security options (default [])") fs.StringVar(&flags.ShmSize, "shm-size", defaultContainerConfig.Containers.ShmSize, "size of '/dev/shm'. The format is `<number><unit>`.") fs.StringSliceVar(&flags.Ulimit, "ulimit", defaultContainerConfig.Containers.DefaultUlimits, "ulimit options") fs.StringArrayVarP(&flags.Volumes, "volume", "v", defaultContainerConfig.Containers.Volumes, "bind mount a volume into the container") // Add in the usernamespace and namespaceflags usernsFlags := GetUserNSFlags(usernsResults) namespaceFlags := GetNameSpaceFlags(namespaceResults) fs.AddFlagSet(&usernsFlags) fs.AddFlagSet(&namespaceFlags) return fs, nil } // UseLayers returns true if BUILDAH_LAYERS is set to "1" or "true" // otherwise it returns false func UseLayers() bool { layers := os.Getenv("BUILDAH_LAYERS") if strings.ToLower(layers) == "true" || layers == "1" { return true } return false } // DefaultFormat returns the default image format func DefaultFormat() string { format := os.Getenv("BUILDAH_FORMAT") if format != "" { return format } return buildah.OCI } // DefaultIsolation returns the default image format func DefaultIsolation() string { isolation := os.Getenv("BUILDAH_ISOLATION") if isolation != "" { return isolation } return buildah.OCI } // DefaultHistory returns the default add-history setting func DefaultHistory() bool { history := os.Getenv("BUILDAH_HISTORY") if strings.ToLower(history) == "true" || history == "1" { return true } return false } func VerifyFlagsArgsOrder(args []string) error { for _, arg := range args { if strings.HasPrefix(arg, "-") { return errors.Errorf("No options (%s) can be specified after the image or container name", arg) } } return nil }
[ "\"BUILDAH_LAYERS\"", "\"BUILDAH_FORMAT\"", "\"BUILDAH_ISOLATION\"", "\"BUILDAH_HISTORY\"" ]
[]
[ "BUILDAH_ISOLATION", "BUILDAH_HISTORY", "BUILDAH_FORMAT", "BUILDAH_LAYERS" ]
[]
["BUILDAH_ISOLATION", "BUILDAH_HISTORY", "BUILDAH_FORMAT", "BUILDAH_LAYERS"]
go
4
0
chain/sync.go
package chain import ( "bytes" "context" "errors" "fmt" "github.com/filecoin-project/go-state-types/network" "os" "sort" "strings" "sync" "time" "github.com/filecoin-project/lotus/chain/actors/builtin" "github.com/filecoin-project/lotus/node/modules/dtypes" "github.com/Gurpartap/async" "github.com/hashicorp/go-multierror" blocks "github.com/ipfs/go-block-format" "github.com/ipfs/go-cid" cbor "github.com/ipfs/go-ipld-cbor" logging "github.com/ipfs/go-log/v2" "github.com/libp2p/go-libp2p-core/connmgr" "github.com/libp2p/go-libp2p-core/peer" cbg "github.com/whyrusleeping/cbor-gen" "github.com/whyrusleeping/pubsub" "go.opencensus.io/stats" "go.opencensus.io/trace" "golang.org/x/xerrors" "github.com/filecoin-project/go-address" "github.com/filecoin-project/go-state-types/abi" "github.com/filecoin-project/go-state-types/crypto" "github.com/filecoin-project/lotus/extern/sector-storage/ffiwrapper" ffi "github.com/filecoin-project/filecoin-ffi" // named msgarray here to make it clear that these are the types used by // messages, regardless of specs-actors version. blockadt "github.com/filecoin-project/specs-actors/actors/util/adt" proof2 "github.com/filecoin-project/specs-actors/v2/actors/runtime/proof" "github.com/filecoin-project/lotus/api" "github.com/filecoin-project/lotus/build" "github.com/filecoin-project/lotus/chain/actors/builtin/power" "github.com/filecoin-project/lotus/chain/beacon" "github.com/filecoin-project/lotus/chain/exchange" "github.com/filecoin-project/lotus/chain/gen" "github.com/filecoin-project/lotus/chain/state" "github.com/filecoin-project/lotus/chain/stmgr" "github.com/filecoin-project/lotus/chain/store" "github.com/filecoin-project/lotus/chain/types" "github.com/filecoin-project/lotus/chain/vm" bstore "github.com/filecoin-project/lotus/lib/blockstore" "github.com/filecoin-project/lotus/lib/sigs" "github.com/filecoin-project/lotus/metrics" ) // Blocks that are more than MaxHeightDrift epochs above // the theoretical max height based on systime are quickly rejected const MaxHeightDrift = 5 var ( // LocalIncoming is the _local_ pubsub (unrelated to libp2p pubsub) topic // where the Syncer publishes candidate chain heads to be synced. LocalIncoming = "incoming" log = logging.Logger("chain") concurrentSyncRequests = exchange.ShufflePeersPrefix syncRequestBatchSize = 8 syncRequestRetries = 5 ) // Syncer is in charge of running the chain synchronization logic. As such, it // is tasked with these functions, amongst others: // // * Fast-forwards the chain as it learns of new TipSets from the network via // the SyncManager. // * Applies the fork choice rule to select the correct side when confronted // with a fork in the network. // * Requests block headers and messages from other peers when not available // in our BlockStore. // * Tracks blocks marked as bad in a cache. // * Keeps the BlockStore and ChainStore consistent with our view of the world, // the latter of which in turn informs other components when a reorg has been // committed. // // The Syncer does not run workers itself. It's mainly concerned with // ensuring a consistent state of chain consensus. The reactive and network- // interfacing processes are part of other components, such as the SyncManager // (which owns the sync scheduler and sync workers), ChainExchange, the HELLO // protocol, and the gossipsub block propagation layer. // // {hint/concept} The fork-choice rule as it currently stands is: "pick the // chain with the heaviest weight, so long as it hasn’t deviated one finality // threshold from our head (900 epochs, parameter determined by spec-actors)". type Syncer struct { // The interface for accessing and putting tipsets into local storage store *store.ChainStore // handle to the random beacon for verification beacon beacon.Schedule // the state manager handles making state queries sm *stmgr.StateManager // The known Genesis tipset Genesis *types.TipSet // TipSets known to be invalid bad *BadBlockCache // handle to the block sync service Exchange exchange.Client self peer.ID syncmgr SyncManager connmgr connmgr.ConnManager incoming *pubsub.PubSub receiptTracker *blockReceiptTracker verifier ffiwrapper.Verifier tickerCtxCancel context.CancelFunc checkptLk sync.Mutex checkpt types.TipSetKey ds dtypes.MetadataDS } type SyncManagerCtor func(syncFn SyncFunc) SyncManager // NewSyncer creates a new Syncer object. func NewSyncer(ds dtypes.MetadataDS, sm *stmgr.StateManager, exchange exchange.Client, syncMgrCtor SyncManagerCtor, connmgr connmgr.ConnManager, self peer.ID, beacon beacon.Schedule, verifier ffiwrapper.Verifier) (*Syncer, error) { gen, err := sm.ChainStore().GetGenesis() if err != nil { return nil, xerrors.Errorf("getting genesis block: %w", err) } gent, err := types.NewTipSet([]*types.BlockHeader{gen}) if err != nil { return nil, err } cp, err := loadCheckpoint(ds) if err != nil { return nil, xerrors.Errorf("error loading mpool config: %w", err) } s := &Syncer{ ds: ds, checkpt: cp, beacon: beacon, bad: NewBadBlockCache(), Genesis: gent, Exchange: exchange, store: sm.ChainStore(), sm: sm, self: self, receiptTracker: newBlockReceiptTracker(), connmgr: connmgr, verifier: verifier, incoming: pubsub.New(50), } if build.InsecurePoStValidation { log.Warn("*********************************************************************************************") log.Warn(" [INSECURE-POST-VALIDATION] Insecure test validation is enabled. If you see this outside of a test, it is a severe bug! ") log.Warn("*********************************************************************************************") } s.syncmgr = syncMgrCtor(s.Sync) return s, nil } func (syncer *Syncer) Start() { tickerCtx, tickerCtxCancel := context.WithCancel(context.Background()) syncer.syncmgr.Start() syncer.tickerCtxCancel = tickerCtxCancel go syncer.runMetricsTricker(tickerCtx) } func (syncer *Syncer) runMetricsTricker(tickerCtx context.Context) { genesisTime := time.Unix(int64(syncer.Genesis.MinTimestamp()), 0) ticker := build.Clock.Ticker(time.Duration(build.BlockDelaySecs) * time.Second) defer ticker.Stop() for { select { case <-ticker.C: sinceGenesis := build.Clock.Now().Sub(genesisTime) expectedHeight := int64(sinceGenesis.Seconds()) / int64(build.BlockDelaySecs) stats.Record(tickerCtx, metrics.ChainNodeHeightExpected.M(expectedHeight)) case <-tickerCtx.Done(): return } } } func (syncer *Syncer) Stop() { syncer.syncmgr.Stop() syncer.tickerCtxCancel() } // InformNewHead informs the syncer about a new potential tipset // This should be called when connecting to new peers, and additionally // when receiving new blocks from the network func (syncer *Syncer) InformNewHead(from peer.ID, fts *store.FullTipSet) bool { defer func() { if err := recover(); err != nil { log.Errorf("panic in InformNewHead: ", err) } }() ctx := context.Background() if fts == nil { log.Errorf("got nil tipset in InformNewHead") return false } if syncer.IsEpochBeyondCurrMax(fts.TipSet().Height()) { log.Errorf("Received block with impossibly large height %d", fts.TipSet().Height()) return false } for _, b := range fts.Blocks { if reason, ok := syncer.bad.Has(b.Cid()); ok { log.Warnf("InformNewHead called on block marked as bad: %s (reason: %s)", b.Cid(), reason) return false } if err := syncer.ValidateMsgMeta(b); err != nil { log.Warnf("invalid block received: %s", err) return false } } syncer.incoming.Pub(fts.TipSet().Blocks(), LocalIncoming) if from == syncer.self { // TODO: this is kindof a hack... log.Debug("got block from ourselves") if err := syncer.Sync(ctx, fts.TipSet()); err != nil { log.Errorf("failed to sync our own block %s: %+v", fts.TipSet().Cids(), err) return false } return true } // TODO: IMPORTANT(GARBAGE) this needs to be put in the 'temporary' side of // the blockstore if err := syncer.store.PersistBlockHeaders(fts.TipSet().Blocks()...); err != nil { log.Warn("failed to persist incoming block header: ", err) return false } syncer.Exchange.AddPeer(from) hts := syncer.store.GetHeaviestTipSet() bestPweight := hts.ParentWeight() targetWeight := fts.TipSet().ParentWeight() if targetWeight.LessThan(bestPweight) { var miners []string for _, blk := range fts.TipSet().Blocks() { miners = append(miners, blk.Miner.String()) } log.Debugw("incoming tipset does not appear to be better than our best chain, ignoring for now", "miners", miners, "bestPweight", bestPweight, "bestTS", hts.Cids(), "incomingWeight", targetWeight, "incomingTS", fts.TipSet().Cids()) return false } syncer.syncmgr.SetPeerHead(ctx, from, fts.TipSet()) return true } // IncomingBlocks spawns a goroutine that subscribes to the local eventbus to // receive new block headers as they arrive from the network, and sends them to // the returned channel. // // These blocks have not necessarily been incorporated to our view of the chain. func (syncer *Syncer) IncomingBlocks(ctx context.Context) (<-chan *types.BlockHeader, error) { sub := syncer.incoming.Sub(LocalIncoming) out := make(chan *types.BlockHeader, 10) go func() { defer syncer.incoming.Unsub(sub, LocalIncoming) for { select { case r := <-sub: hs := r.([]*types.BlockHeader) for _, h := range hs { select { case out <- h: case <-ctx.Done(): return } } case <-ctx.Done(): return } } }() return out, nil } // ValidateMsgMeta performs structural and content hash validation of the // messages within this block. If validation passes, it stores the messages in // the underlying IPLD block store. func (syncer *Syncer) ValidateMsgMeta(fblk *types.FullBlock) error { if msgc := len(fblk.BlsMessages) + len(fblk.SecpkMessages); msgc > build.BlockMessageLimit { return xerrors.Errorf("block %s has too many messages (%d)", fblk.Header.Cid(), msgc) } // TODO: IMPORTANT(GARBAGE). These message puts and the msgmeta // computation need to go into the 'temporary' side of the blockstore when // we implement that // We use a temporary bstore here to avoid writing intermediate pieces // into the blockstore. blockstore := bstore.NewTemporary() cst := cbor.NewCborStore(blockstore) var bcids, scids []cid.Cid for _, m := range fblk.BlsMessages { c, err := store.PutMessage(blockstore, m) if err != nil { return xerrors.Errorf("putting bls message to blockstore after msgmeta computation: %w", err) } bcids = append(bcids, c) } for _, m := range fblk.SecpkMessages { c, err := store.PutMessage(blockstore, m) if err != nil { return xerrors.Errorf("putting bls message to blockstore after msgmeta computation: %w", err) } scids = append(scids, c) } // Compute the root CID of the combined message trie. smroot, err := computeMsgMeta(cst, bcids, scids) if err != nil { return xerrors.Errorf("validating msgmeta, compute failed: %w", err) } // Check that the message trie root matches with what's in the block. if fblk.Header.Messages != smroot { return xerrors.Errorf("messages in full block did not match msgmeta root in header (%s != %s)", fblk.Header.Messages, smroot) } // Finally, flush. return vm.Copy(context.TODO(), blockstore, syncer.store.Blockstore(), smroot) } func (syncer *Syncer) LocalPeer() peer.ID { return syncer.self } func (syncer *Syncer) ChainStore() *store.ChainStore { return syncer.store } func (syncer *Syncer) InformNewBlock(from peer.ID, blk *types.FullBlock) bool { // TODO: search for other blocks that could form a tipset with this block // and then send that tipset to InformNewHead fts := &store.FullTipSet{Blocks: []*types.FullBlock{blk}} return syncer.InformNewHead(from, fts) } func copyBlockstore(ctx context.Context, from, to bstore.Blockstore) error { ctx, span := trace.StartSpan(ctx, "copyBlockstore") defer span.End() cids, err := from.AllKeysChan(ctx) if err != nil { return err } // TODO: should probably expose better methods on the blockstore for this operation var blks []blocks.Block for c := range cids { b, err := from.Get(c) if err != nil { return err } blks = append(blks, b) } if err := to.PutMany(blks); err != nil { return err } return nil } // TODO: this function effectively accepts unchecked input from the network, // either validate it here, or ensure that its validated elsewhere (maybe make // sure the blocksync code checks it?) // maybe this code should actually live in blocksync?? func zipTipSetAndMessages(bs cbor.IpldStore, ts *types.TipSet, allbmsgs []*types.Message, allsmsgs []*types.SignedMessage, bmi, smi [][]uint64) (*store.FullTipSet, error) { if len(ts.Blocks()) != len(smi) || len(ts.Blocks()) != len(bmi) { return nil, fmt.Errorf("msgincl length didnt match tipset size") } fts := &store.FullTipSet{} for bi, b := range ts.Blocks() { if msgc := len(bmi[bi]) + len(smi[bi]); msgc > build.BlockMessageLimit { return nil, fmt.Errorf("block %q has too many messages (%d)", b.Cid(), msgc) } var smsgs []*types.SignedMessage var smsgCids []cid.Cid for _, m := range smi[bi] { smsgs = append(smsgs, allsmsgs[m]) smsgCids = append(smsgCids, allsmsgs[m].Cid()) } var bmsgs []*types.Message var bmsgCids []cid.Cid for _, m := range bmi[bi] { bmsgs = append(bmsgs, allbmsgs[m]) bmsgCids = append(bmsgCids, allbmsgs[m].Cid()) } mrcid, err := computeMsgMeta(bs, bmsgCids, smsgCids) if err != nil { return nil, err } if b.Messages != mrcid { return nil, fmt.Errorf("messages didnt match message root in header for ts %s", ts.Key()) } fb := &types.FullBlock{ Header: b, BlsMessages: bmsgs, SecpkMessages: smsgs, } fts.Blocks = append(fts.Blocks, fb) } return fts, nil } // computeMsgMeta computes the root CID of the combined arrays of message CIDs // of both types (BLS and Secpk). func computeMsgMeta(bs cbor.IpldStore, bmsgCids, smsgCids []cid.Cid) (cid.Cid, error) { // block headers use adt0 store := blockadt.WrapStore(context.TODO(), bs) bmArr := blockadt.MakeEmptyArray(store) smArr := blockadt.MakeEmptyArray(store) for i, m := range bmsgCids { c := cbg.CborCid(m) if err := bmArr.Set(uint64(i), &c); err != nil { return cid.Undef, err } } for i, m := range smsgCids { c := cbg.CborCid(m) if err := smArr.Set(uint64(i), &c); err != nil { return cid.Undef, err } } bmroot, err := bmArr.Root() if err != nil { return cid.Undef, err } smroot, err := smArr.Root() if err != nil { return cid.Undef, err } mrcid, err := store.Put(store.Context(), &types.MsgMeta{ BlsMessages: bmroot, SecpkMessages: smroot, }) if err != nil { return cid.Undef, xerrors.Errorf("failed to put msgmeta: %w", err) } return mrcid, nil } // FetchTipSet tries to load the provided tipset from the store, and falls back // to the network (client) by querying the supplied peer if not found // locally. // // {hint/usage} This is used from the HELLO protocol, to fetch the greeting // peer's heaviest tipset if we don't have it. func (syncer *Syncer) FetchTipSet(ctx context.Context, p peer.ID, tsk types.TipSetKey) (*store.FullTipSet, error) { if fts, err := syncer.tryLoadFullTipSet(tsk); err == nil { return fts, nil } // fall back to the network. return syncer.Exchange.GetFullTipSet(ctx, p, tsk) } // tryLoadFullTipSet queries the tipset in the ChainStore, and returns a full // representation of it containing FullBlocks. If ALL blocks are not found // locally, it errors entirely with blockstore.ErrNotFound. func (syncer *Syncer) tryLoadFullTipSet(tsk types.TipSetKey) (*store.FullTipSet, error) { ts, err := syncer.store.LoadTipSet(tsk) if err != nil { return nil, err } fts := &store.FullTipSet{} for _, b := range ts.Blocks() { bmsgs, smsgs, err := syncer.store.MessagesForBlock(b) if err != nil { return nil, err } fb := &types.FullBlock{ Header: b, BlsMessages: bmsgs, SecpkMessages: smsgs, } fts.Blocks = append(fts.Blocks, fb) } return fts, nil } // Sync tries to advance our view of the chain to `maybeHead`. It does nothing // if our current head is heavier than the requested tipset, or if we're already // at the requested head, or if the head is the genesis. // // Most of the heavy-lifting logic happens in syncer#collectChain. Refer to the // godocs on that method for a more detailed view. func (syncer *Syncer) Sync(ctx context.Context, maybeHead *types.TipSet) error { ctx, span := trace.StartSpan(ctx, "chain.Sync") defer span.End() if span.IsRecordingEvents() { span.AddAttributes( trace.StringAttribute("tipset", fmt.Sprint(maybeHead.Cids())), trace.Int64Attribute("height", int64(maybeHead.Height())), ) } hts := syncer.store.GetHeaviestTipSet() if hts.ParentWeight().GreaterThan(maybeHead.ParentWeight()) { return nil } if syncer.Genesis.Equals(maybeHead) || hts.Equals(maybeHead) { return nil } if err := syncer.collectChain(ctx, maybeHead, hts); err != nil { span.AddAttributes(trace.StringAttribute("col_error", err.Error())) span.SetStatus(trace.Status{ Code: 13, Message: err.Error(), }) return xerrors.Errorf("collectChain failed: %w", err) } // At this point we have accepted and synced to the new `maybeHead` // (`StageSyncComplete`). if err := syncer.store.PutTipSet(ctx, maybeHead); err != nil { span.AddAttributes(trace.StringAttribute("put_error", err.Error())) span.SetStatus(trace.Status{ Code: 13, Message: err.Error(), }) return xerrors.Errorf("failed to put synced tipset to chainstore: %w", err) } peers := syncer.receiptTracker.GetPeers(maybeHead) if len(peers) > 0 { syncer.connmgr.TagPeer(peers[0], "new-block", 40) for _, p := range peers[1:] { syncer.connmgr.TagPeer(p, "new-block", 25) } } return nil } func isPermanent(err error) bool { return !errors.Is(err, ErrTemporal) } func (syncer *Syncer) ValidateTipSet(ctx context.Context, fts *store.FullTipSet, useCache bool) error { ctx, span := trace.StartSpan(ctx, "validateTipSet") defer span.End() span.AddAttributes(trace.Int64Attribute("height", int64(fts.TipSet().Height()))) ts := fts.TipSet() if ts.Equals(syncer.Genesis) { return nil } var futures []async.ErrorFuture for _, b := range fts.Blocks { b := b // rebind to a scoped variable futures = append(futures, async.Err(func() error { if err := syncer.ValidateBlock(ctx, b, useCache); err != nil { if isPermanent(err) { syncer.bad.Add(b.Cid(), NewBadBlockReason([]cid.Cid{b.Cid()}, err.Error())) } return xerrors.Errorf("validating block %s: %w", b.Cid(), err) } if err := syncer.sm.ChainStore().AddToTipSetTracker(b.Header); err != nil { return xerrors.Errorf("failed to add validated header to tipset tracker: %w", err) } return nil })) } for _, f := range futures { if err := f.AwaitContext(ctx); err != nil { return err } } return nil } func (syncer *Syncer) minerIsValid(ctx context.Context, maddr address.Address, baseTs *types.TipSet) error { act, err := syncer.sm.LoadActor(ctx, power.Address, baseTs) if err != nil { return xerrors.Errorf("failed to load power actor: %w", err) } powState, err := power.Load(syncer.store.Store(ctx), act) if err != nil { return xerrors.Errorf("failed to load power actor state: %w", err) } _, exist, err := powState.MinerPower(maddr) if err != nil { return xerrors.Errorf("failed to look up miner's claim: %w", err) } if !exist { return xerrors.New("miner isn't valid") } return nil } var ErrTemporal = errors.New("temporal error") func blockSanityChecks(h *types.BlockHeader) error { if h.ElectionProof == nil { return xerrors.Errorf("block cannot have nil election proof") } if h.Ticket == nil { return xerrors.Errorf("block cannot have nil ticket") } if h.BlockSig == nil { return xerrors.Errorf("block had nil signature") } if h.BLSAggregate == nil { return xerrors.Errorf("block had nil bls aggregate signature") } return nil } // ValidateBlock should match up with 'Semantical Validation' in validation.md in the spec func (syncer *Syncer) ValidateBlock(ctx context.Context, b *types.FullBlock, useCache bool) (err error) { defer func() { // b.Cid() could panic for empty blocks that are used in tests. if rerr := recover(); rerr != nil { err = xerrors.Errorf("validate block panic: %w", rerr) return } }() if useCache { isValidated, err := syncer.store.IsBlockValidated(ctx, b.Cid()) if err != nil { return xerrors.Errorf("check block validation cache %s: %w", b.Cid(), err) } if isValidated { return nil } } validationStart := build.Clock.Now() defer func() { stats.Record(ctx, metrics.BlockValidationDurationMilliseconds.M(metrics.SinceInMilliseconds(validationStart))) log.Infow("block validation", "took", time.Since(validationStart), "height", b.Header.Height, "age", time.Since(time.Unix(int64(b.Header.Timestamp), 0))) }() ctx, span := trace.StartSpan(ctx, "validateBlock") defer span.End() if err := blockSanityChecks(b.Header); err != nil { return xerrors.Errorf("incoming header failed basic sanity checks: %w", err) } h := b.Header baseTs, err := syncer.store.LoadTipSet(types.NewTipSetKey(h.Parents...)) if err != nil { return xerrors.Errorf("load parent tipset failed (%s): %w", h.Parents, err) } winPoStNv := syncer.sm.GetNtwkVersion(ctx, baseTs.Height()) lbts, lbst, err := stmgr.GetLookbackTipSetForRound(ctx, syncer.sm, baseTs, h.Height) if err != nil { return xerrors.Errorf("failed to get lookback tipset for block: %w", err) } prevBeacon, err := syncer.store.GetLatestBeaconEntry(baseTs) if err != nil { return xerrors.Errorf("failed to get latest beacon entry: %w", err) } // fast checks first nulls := h.Height - (baseTs.Height() + 1) if tgtTs := baseTs.MinTimestamp() + build.BlockDelaySecs*uint64(nulls+1); h.Timestamp != tgtTs { return xerrors.Errorf("block has wrong timestamp: %d != %d", h.Timestamp, tgtTs) } now := uint64(build.Clock.Now().Unix()) if h.Timestamp > now+build.AllowableClockDriftSecs { return xerrors.Errorf("block was from the future (now=%d, blk=%d): %w", now, h.Timestamp, ErrTemporal) } if h.Timestamp > now { log.Warn("Got block from the future, but within threshold", h.Timestamp, build.Clock.Now().Unix()) } msgsCheck := async.Err(func() error { if err := syncer.checkBlockMessages(ctx, b, baseTs); err != nil { return xerrors.Errorf("block had invalid messages: %w", err) } return nil }) minerCheck := async.Err(func() error { if err := syncer.minerIsValid(ctx, h.Miner, baseTs); err != nil { return xerrors.Errorf("minerIsValid failed: %w", err) } return nil }) baseFeeCheck := async.Err(func() error { baseFee, err := syncer.store.ComputeBaseFee(ctx, baseTs) if err != nil { return xerrors.Errorf("computing base fee: %w", err) } if types.BigCmp(baseFee, b.Header.ParentBaseFee) != 0 { return xerrors.Errorf("base fee doesn't match: %s (header) != %s (computed)", b.Header.ParentBaseFee, baseFee) } return nil }) pweight, err := syncer.store.Weight(ctx, baseTs) if err != nil { return xerrors.Errorf("getting parent weight: %w", err) } if types.BigCmp(pweight, b.Header.ParentWeight) != 0 { return xerrors.Errorf("parrent weight different: %s (header) != %s (computed)", b.Header.ParentWeight, pweight) } stateRootCheck := async.Err(func() error { stateroot, precp, err := syncer.sm.TipSetState(ctx, baseTs) if err != nil { return xerrors.Errorf("get tipsetstate(%d, %s) failed: %w", h.Height, h.Parents, err) } if stateroot != h.ParentStateRoot { msgs, err := syncer.store.MessagesForTipset(baseTs) if err != nil { log.Error("failed to load messages for tipset during tipset state mismatch error: ", err) } else { log.Warn("Messages for tipset with mismatching state:") for i, m := range msgs { mm := m.VMMessage() log.Warnf("Message[%d]: from=%s to=%s method=%d params=%x", i, mm.From, mm.To, mm.Method, mm.Params) } } return xerrors.Errorf("parent state root did not match computed state (%s != %s)", stateroot, h.ParentStateRoot) } if precp != h.ParentMessageReceipts { return xerrors.Errorf("parent receipts root did not match computed value (%s != %s)", precp, h.ParentMessageReceipts) } return nil }) // Stuff that needs worker address waddr, err := stmgr.GetMinerWorkerRaw(ctx, syncer.sm, lbst, h.Miner) if err != nil { return xerrors.Errorf("GetMinerWorkerRaw failed: %w", err) } winnerCheck := async.Err(func() error { if h.ElectionProof.WinCount < 1 { return xerrors.Errorf("block is not claiming to be a winner") } eligible, err := stmgr.MinerEligibleToMine(ctx, syncer.sm, h.Miner, baseTs, lbts) if err != nil { return xerrors.Errorf("determining if miner has min power failed: %w", err) } if !eligible { return xerrors.New("block's miner is ineligible to mine") } rBeacon := *prevBeacon if len(h.BeaconEntries) != 0 { rBeacon = h.BeaconEntries[len(h.BeaconEntries)-1] } buf := new(bytes.Buffer) if err := h.Miner.MarshalCBOR(buf); err != nil { return xerrors.Errorf("failed to marshal miner address to cbor: %w", err) } vrfBase, err := store.DrawRandomness(rBeacon.Data, crypto.DomainSeparationTag_ElectionProofProduction, h.Height, buf.Bytes()) if err != nil { return xerrors.Errorf("could not draw randomness: %w", err) } if err := VerifyElectionPoStVRF(ctx, waddr, vrfBase, h.ElectionProof.VRFProof); err != nil { return xerrors.Errorf("validating block election proof failed: %w", err) } slashed, err := stmgr.GetMinerSlashed(ctx, syncer.sm, baseTs, h.Miner) if err != nil { return xerrors.Errorf("failed to check if block miner was slashed: %w", err) } if slashed { return xerrors.Errorf("received block was from slashed or invalid miner") } mpow, tpow, _, err := stmgr.GetPowerRaw(ctx, syncer.sm, lbst, h.Miner) if err != nil { return xerrors.Errorf("failed getting power: %w", err) } j := h.ElectionProof.ComputeWinCount(mpow.QualityAdjPower, tpow.QualityAdjPower) if h.ElectionProof.WinCount != j { return xerrors.Errorf("miner claims wrong number of wins: miner: %d, computed: %d", h.ElectionProof.WinCount, j) } return nil }) blockSigCheck := async.Err(func() error { if err := sigs.CheckBlockSignature(ctx, h, waddr); err != nil { return xerrors.Errorf("check block signature failed: %w", err) } return nil }) beaconValuesCheck := async.Err(func() error { if os.Getenv("LOTUS_IGNORE_DRAND") == "_yes_" { return nil } if err := beacon.ValidateBlockValues(syncer.beacon, h, baseTs.Height(), *prevBeacon); err != nil { return xerrors.Errorf("failed to validate blocks random beacon values: %w", err) } return nil }) tktsCheck := async.Err(func() error { buf := new(bytes.Buffer) if err := h.Miner.MarshalCBOR(buf); err != nil { return xerrors.Errorf("failed to marshal miner address to cbor: %w", err) } if h.Height > build.UpgradeSmokeHeight { buf.Write(baseTs.MinTicket().VRFProof) } beaconBase := *prevBeacon if len(h.BeaconEntries) != 0 { beaconBase = h.BeaconEntries[len(h.BeaconEntries)-1] } vrfBase, err := store.DrawRandomness(beaconBase.Data, crypto.DomainSeparationTag_TicketProduction, h.Height-build.TicketRandomnessLookback, buf.Bytes()) if err != nil { return xerrors.Errorf("failed to compute vrf base for ticket: %w", err) } err = VerifyElectionPoStVRF(ctx, waddr, vrfBase, h.Ticket.VRFProof) if err != nil { return xerrors.Errorf("validating block tickets failed: %w", err) } return nil }) wproofCheck := async.Err(func() error { if err := syncer.VerifyWinningPoStProof(ctx, winPoStNv, h, *prevBeacon, lbst, waddr); err != nil { return xerrors.Errorf("invalid election post: %w", err) } return nil }) await := []async.ErrorFuture{ minerCheck, tktsCheck, blockSigCheck, beaconValuesCheck, wproofCheck, winnerCheck, msgsCheck, baseFeeCheck, stateRootCheck, } var merr error for _, fut := range await { if err := fut.AwaitContext(ctx); err != nil { merr = multierror.Append(merr, err) } } if merr != nil { mulErr := merr.(*multierror.Error) mulErr.ErrorFormat = func(es []error) string { if len(es) == 1 { return fmt.Sprintf("1 error occurred:\n\t* %+v\n\n", es[0]) } points := make([]string, len(es)) for i, err := range es { points[i] = fmt.Sprintf("* %+v", err) } return fmt.Sprintf( "%d errors occurred:\n\t%s\n\n", len(es), strings.Join(points, "\n\t")) } return mulErr } if useCache { if err := syncer.store.MarkBlockAsValidated(ctx, b.Cid()); err != nil { return xerrors.Errorf("caching block validation %s: %w", b.Cid(), err) } } return nil } func (syncer *Syncer) VerifyWinningPoStProof(ctx context.Context, nv network.Version, h *types.BlockHeader, prevBeacon types.BeaconEntry, lbst cid.Cid, waddr address.Address) error { if build.InsecurePoStValidation { if len(h.WinPoStProof) == 0 { return xerrors.Errorf("[INSECURE-POST-VALIDATION] No winning post proof given") } if string(h.WinPoStProof[0].ProofBytes) == "valid proof" { return nil } return xerrors.Errorf("[INSECURE-POST-VALIDATION] winning post was invalid") } buf := new(bytes.Buffer) if err := h.Miner.MarshalCBOR(buf); err != nil { return xerrors.Errorf("failed to marshal miner address: %w", err) } rbase := prevBeacon if len(h.BeaconEntries) > 0 { rbase = h.BeaconEntries[len(h.BeaconEntries)-1] } rand, err := store.DrawRandomness(rbase.Data, crypto.DomainSeparationTag_WinningPoStChallengeSeed, h.Height, buf.Bytes()) if err != nil { return xerrors.Errorf("failed to get randomness for verifying winning post proof: %w", err) } mid, err := address.IDFromAddress(h.Miner) if err != nil { return xerrors.Errorf("failed to get ID from miner address %s: %w", h.Miner, err) } sectors, err := stmgr.GetSectorsForWinningPoSt(ctx, nv, syncer.verifier, syncer.sm, lbst, h.Miner, rand) if err != nil { return xerrors.Errorf("getting winning post sector set: %w", err) } ok, err := ffiwrapper.ProofVerifier.VerifyWinningPoSt(ctx, proof2.WinningPoStVerifyInfo{ Randomness: rand, Proofs: h.WinPoStProof, ChallengedSectors: sectors, Prover: abi.ActorID(mid), }) if err != nil { return xerrors.Errorf("failed to verify election post: %w", err) } if !ok { log.Errorf("invalid winning post (block: %s, %x; %v)", h.Cid(), rand, sectors) return xerrors.Errorf("winning post was invalid") } return nil } // TODO: We should extract this somewhere else and make the message pool and miner use the same logic func (syncer *Syncer) checkBlockMessages(ctx context.Context, b *types.FullBlock, baseTs *types.TipSet) error { { var sigCids []cid.Cid // this is what we get for people not wanting the marshalcbor method on the cid type var pubks [][]byte for _, m := range b.BlsMessages { sigCids = append(sigCids, m.Cid()) pubk, err := syncer.sm.GetBlsPublicKey(ctx, m.From, baseTs) if err != nil { return xerrors.Errorf("failed to load bls public to validate block: %w", err) } pubks = append(pubks, pubk) } if err := syncer.verifyBlsAggregate(ctx, b.Header.BLSAggregate, sigCids, pubks); err != nil { return xerrors.Errorf("bls aggregate signature was invalid: %w", err) } } nonces := make(map[address.Address]uint64) stateroot, _, err := syncer.sm.TipSetState(ctx, baseTs) if err != nil { return err } st, err := state.LoadStateTree(syncer.store.Store(ctx), stateroot) if err != nil { return xerrors.Errorf("failed to load base state tree: %w", err) } pl := vm.PricelistByEpoch(baseTs.Height()) var sumGasLimit int64 checkMsg := func(msg types.ChainMsg) error { m := msg.VMMessage() // Phase 1: syntactic validation, as defined in the spec minGas := pl.OnChainMessage(msg.ChainLength()) if err := m.ValidForBlockInclusion(minGas.Total()); err != nil { return err } // ValidForBlockInclusion checks if any single message does not exceed BlockGasLimit // So below is overflow safe sumGasLimit += m.GasLimit if sumGasLimit > build.BlockGasLimit { return xerrors.Errorf("block gas limit exceeded") } // Phase 2: (Partial) semantic validation: // the sender exists and is an account actor, and the nonces make sense if _, ok := nonces[m.From]; !ok { // `GetActor` does not validate that this is an account actor. act, err := st.GetActor(m.From) if err != nil { return xerrors.Errorf("failed to get actor: %w", err) } if !builtin.IsAccountActor(act.Code) { return xerrors.New("Sender must be an account actor") } nonces[m.From] = act.Nonce } if nonces[m.From] != m.Nonce { return xerrors.Errorf("wrong nonce (exp: %d, got: %d)", nonces[m.From], m.Nonce) } nonces[m.From]++ return nil } // Validate message arrays in a temporary blockstore. tmpbs := bstore.NewTemporary() tmpstore := blockadt.WrapStore(ctx, cbor.NewCborStore(tmpbs)) bmArr := blockadt.MakeEmptyArray(tmpstore) for i, m := range b.BlsMessages { if err := checkMsg(m); err != nil { return xerrors.Errorf("block had invalid bls message at index %d: %w", i, err) } c, err := store.PutMessage(tmpbs, m) if err != nil { return xerrors.Errorf("failed to store message %s: %w", m.Cid(), err) } k := cbg.CborCid(c) if err := bmArr.Set(uint64(i), &k); err != nil { return xerrors.Errorf("failed to put bls message at index %d: %w", i, err) } } smArr := blockadt.MakeEmptyArray(tmpstore) for i, m := range b.SecpkMessages { if err := checkMsg(m); err != nil { return xerrors.Errorf("block had invalid secpk message at index %d: %w", i, err) } // `From` being an account actor is only validated inside the `vm.ResolveToKeyAddr` call // in `StateManager.ResolveToKeyAddress` here (and not in `checkMsg`). kaddr, err := syncer.sm.ResolveToKeyAddress(ctx, m.Message.From, baseTs) if err != nil { return xerrors.Errorf("failed to resolve key addr: %w", err) } if err := sigs.Verify(&m.Signature, kaddr, m.Message.Cid().Bytes()); err != nil { return xerrors.Errorf("secpk message %s has invalid signature: %w", m.Cid(), err) } c, err := store.PutMessage(tmpbs, m) if err != nil { return xerrors.Errorf("failed to store message %s: %w", m.Cid(), err) } k := cbg.CborCid(c) if err := smArr.Set(uint64(i), &k); err != nil { return xerrors.Errorf("failed to put secpk message at index %d: %w", i, err) } } bmroot, err := bmArr.Root() if err != nil { return err } smroot, err := smArr.Root() if err != nil { return err } mrcid, err := tmpstore.Put(ctx, &types.MsgMeta{ BlsMessages: bmroot, SecpkMessages: smroot, }) if err != nil { return err } if b.Header.Messages != mrcid { return fmt.Errorf("messages didnt match message root in header") } // Finally, flush. return vm.Copy(ctx, tmpbs, syncer.store.Blockstore(), mrcid) } func (syncer *Syncer) verifyBlsAggregate(ctx context.Context, sig *crypto.Signature, msgs []cid.Cid, pubks [][]byte) error { _, span := trace.StartSpan(ctx, "syncer.verifyBlsAggregate") defer span.End() span.AddAttributes( trace.Int64Attribute("msgCount", int64(len(msgs))), ) msgsS := make([]ffi.Message, len(msgs)) pubksS := make([]ffi.PublicKey, len(msgs)) for i := 0; i < len(msgs); i++ { msgsS[i] = msgs[i].Bytes() copy(pubksS[i][:], pubks[i][:ffi.PublicKeyBytes]) } sigS := new(ffi.Signature) copy(sigS[:], sig.Data[:ffi.SignatureBytes]) if len(msgs) == 0 { return nil } valid := ffi.HashVerify(sigS, msgsS, pubksS) if !valid { return xerrors.New("bls aggregate signature failed to verify") } return nil } type syncStateKey struct{} func extractSyncState(ctx context.Context) *SyncerState { v := ctx.Value(syncStateKey{}) if v != nil { return v.(*SyncerState) } return nil } // collectHeaders collects the headers from the blocks between any two tipsets. // // `incoming` is the heaviest/projected/target tipset we have learned about, and // `known` is usually an anchor tipset we already have in our view of the chain // (which could be the genesis). // // collectHeaders checks if portions of the chain are in our ChainStore; falling // down to the network to retrieve the missing parts. If during the process, any // portion we receive is in our denylist (bad list), we short-circuit. // // {hint/usage}: This is used by collectChain, which is in turn called from the // main Sync method (Syncer#Sync), so it's a pretty central method. // // {hint/logic}: The logic of this method is as follows: // // 1. Check that the from tipset is not linked to a parent block known to be // bad. // 2. Check the consistency of beacon entries in the from tipset. We check // total equality of the BeaconEntries in each block. // 3. Traverse the chain backwards, for each tipset: // 3a. Load it from the chainstore; if found, it move on to its parent. // 3b. Query our peers via client in batches, requesting up to a // maximum of 500 tipsets every time. // // Once we've concluded, if we find a mismatching tipset at the height where the // anchor tipset should be, we are facing a fork, and we invoke Syncer#syncFork // to resolve it. Refer to the godocs there. // // All throughout the process, we keep checking if the received blocks are in // the deny list, and short-circuit the process if so. func (syncer *Syncer) collectHeaders(ctx context.Context, incoming *types.TipSet, known *types.TipSet) ([]*types.TipSet, error) { ctx, span := trace.StartSpan(ctx, "collectHeaders") defer span.End() ss := extractSyncState(ctx) span.AddAttributes( trace.Int64Attribute("incomingHeight", int64(incoming.Height())), trace.Int64Attribute("knownHeight", int64(known.Height())), ) // Check if the parents of the from block are in the denylist. // i.e. if a fork of the chain has been requested that we know to be bad. for _, pcid := range incoming.Parents().Cids() { if reason, ok := syncer.bad.Has(pcid); ok { newReason := reason.Linked("linked to %s", pcid) for _, b := range incoming.Cids() { syncer.bad.Add(b, newReason) } return nil, xerrors.Errorf("chain linked to block marked previously as bad (%s, %s) (reason: %s)", incoming.Cids(), pcid, reason) } } { // ensure consistency of beacon entires targetBE := incoming.Blocks()[0].BeaconEntries sorted := sort.SliceIsSorted(targetBE, func(i, j int) bool { return targetBE[i].Round < targetBE[j].Round }) if !sorted { syncer.bad.Add(incoming.Cids()[0], NewBadBlockReason(incoming.Cids(), "wrong order of beacon entires")) return nil, xerrors.Errorf("wrong order of beacon entires") } for _, bh := range incoming.Blocks()[1:] { if len(targetBE) != len(bh.BeaconEntries) { // cannot mark bad, I think @Kubuxu return nil, xerrors.Errorf("tipset contained different number for beacon entires") } for i, be := range bh.BeaconEntries { if targetBE[i].Round != be.Round || !bytes.Equal(targetBE[i].Data, be.Data) { // cannot mark bad, I think @Kubuxu return nil, xerrors.Errorf("tipset contained different beacon entires") } } } } blockSet := []*types.TipSet{incoming} // Parent of the new (possibly better) tipset that we need to fetch next. at := incoming.Parents() // we want to sync all the blocks until the height above our // best tipset so far untilHeight := known.Height() + 1 ss.SetHeight(blockSet[len(blockSet)-1].Height()) var acceptedBlocks []cid.Cid loop: for blockSet[len(blockSet)-1].Height() > untilHeight { for _, bc := range at.Cids() { if reason, ok := syncer.bad.Has(bc); ok { newReason := reason.Linked("change contained %s", bc) for _, b := range acceptedBlocks { syncer.bad.Add(b, newReason) } return nil, xerrors.Errorf("chain contained block marked previously as bad (%s, %s) (reason: %s)", incoming.Cids(), bc, reason) } } // If, for some reason, we have a suffix of the chain locally, handle that here ts, err := syncer.store.LoadTipSet(at) if err == nil { acceptedBlocks = append(acceptedBlocks, at.Cids()...) blockSet = append(blockSet, ts) at = ts.Parents() continue } if !xerrors.Is(err, bstore.ErrNotFound) { log.Warn("loading local tipset: %s", err) } // NB: GetBlocks validates that the blocks are in-fact the ones we // requested, and that they are correctly linked to one another. It does // not validate any state transitions. window := 500 if gap := int(blockSet[len(blockSet)-1].Height() - untilHeight); gap < window { window = gap } blks, err := syncer.Exchange.GetBlocks(ctx, at, window) if err != nil { // Most likely our peers aren't fully synced yet, but forwarded // new block message (ideally we'd find better peers) log.Errorf("failed to get blocks: %+v", err) span.AddAttributes(trace.StringAttribute("error", err.Error())) // This error will only be logged above, return nil, xerrors.Errorf("failed to get blocks: %w", err) } log.Info("Got blocks: ", blks[0].Height(), len(blks)) // Check that the fetched segment of the chain matches what we already // have. Since we fetch from the head backwards our reassembled chain // is sorted in reverse here: we have a child -> parent order, our last // tipset then should be child of the first tipset retrieved. // FIXME: The reassembly logic should be part of the `client` // service, the consumer should not be concerned with the // `MaxRequestLength` limitation, it should just be able to request // an segment of arbitrary length. The same burden is put on // `syncFork()` which needs to be aware this as well. if blockSet[len(blockSet)-1].IsChildOf(blks[0]) == false { return nil, xerrors.Errorf("retrieved segments of the chain are not connected at heights %d/%d", blockSet[len(blockSet)-1].Height(), blks[0].Height()) // A successful `GetBlocks()` call is guaranteed to fetch at least // one tipset so the acess `blks[0]` is safe. } for _, b := range blks { if b.Height() < untilHeight { break loop } for _, bc := range b.Cids() { if reason, ok := syncer.bad.Has(bc); ok { newReason := reason.Linked("change contained %s", bc) for _, b := range acceptedBlocks { syncer.bad.Add(b, newReason) } return nil, xerrors.Errorf("chain contained block marked previously as bad (%s, %s) (reason: %s)", incoming.Cids(), bc, reason) } } blockSet = append(blockSet, b) } acceptedBlocks = append(acceptedBlocks, at.Cids()...) ss.SetHeight(blks[len(blks)-1].Height()) at = blks[len(blks)-1].Parents() } base := blockSet[len(blockSet)-1] if base.Equals(known) { blockSet = blockSet[:len(blockSet)-1] base = blockSet[len(blockSet)-1] } if base.IsChildOf(known) { // common case: receiving blocks that are building on top of our best tipset return blockSet, nil } knownParent, err := syncer.store.LoadTipSet(known.Parents()) if err != nil { return nil, xerrors.Errorf("failed to load next local tipset: %w", err) } if base.IsChildOf(knownParent) { // common case: receiving a block thats potentially part of the same tipset as our best block return blockSet, nil } // We have now ascertained that this is *not* a 'fast forward' log.Warnf("(fork detected) synced header chain (%s - %d) does not link to our best block (%s - %d)", incoming.Cids(), incoming.Height(), known.Cids(), known.Height()) fork, err := syncer.syncFork(ctx, base, known) if err != nil { if xerrors.Is(err, ErrForkTooLong) || xerrors.Is(err, ErrForkCheckpoint) { // TODO: we're marking this block bad in the same way that we mark invalid blocks bad. Maybe distinguish? log.Warn("adding forked chain to our bad tipset cache") for _, b := range incoming.Blocks() { syncer.bad.Add(b.Cid(), NewBadBlockReason(incoming.Cids(), "fork past finality")) } } return nil, xerrors.Errorf("failed to sync fork: %w", err) } blockSet = append(blockSet, fork...) return blockSet, nil } var ErrForkTooLong = fmt.Errorf("fork longer than threshold") var ErrForkCheckpoint = fmt.Errorf("fork would require us to diverge from checkpointed block") // syncFork tries to obtain the chain fragment that links a fork into a common // ancestor in our view of the chain. // // If the fork is too long (build.ForkLengthThreshold), or would cause us to diverge from the checkpoint (ErrForkCheckpoint), // we add the entire subchain to the denylist. Else, we find the common ancestor, and add the missing chain // fragment until the fork point to the returned []TipSet. func (syncer *Syncer) syncFork(ctx context.Context, incoming *types.TipSet, known *types.TipSet) ([]*types.TipSet, error) { chkpt := syncer.GetCheckpoint() if known.Key() == chkpt { return nil, ErrForkCheckpoint } // TODO: Does this mean we always ask for ForkLengthThreshold blocks from the network, even if we just need, like, 2? // Would it not be better to ask in smaller chunks, given that an ~ForkLengthThreshold is very rare? tips, err := syncer.Exchange.GetBlocks(ctx, incoming.Parents(), int(build.ForkLengthThreshold)) if err != nil { return nil, err } nts, err := syncer.store.LoadTipSet(known.Parents()) if err != nil { return nil, xerrors.Errorf("failed to load next local tipset: %w", err) } for cur := 0; cur < len(tips); { if nts.Height() == 0 { if !syncer.Genesis.Equals(nts) { return nil, xerrors.Errorf("somehow synced chain that linked back to a different genesis (bad genesis: %s)", nts.Key()) } return nil, xerrors.Errorf("synced chain forked at genesis, refusing to sync; incoming: %s", incoming.Cids()) } if nts.Equals(tips[cur]) { return tips[:cur+1], nil } if nts.Height() < tips[cur].Height() { cur++ } else { // We will be forking away from nts, check that it isn't checkpointed if nts.Key() == chkpt { return nil, ErrForkCheckpoint } nts, err = syncer.store.LoadTipSet(nts.Parents()) if err != nil { return nil, xerrors.Errorf("loading next local tipset: %w", err) } } } return nil, ErrForkTooLong } func (syncer *Syncer) syncMessagesAndCheckState(ctx context.Context, headers []*types.TipSet) error { ss := extractSyncState(ctx) ss.SetHeight(headers[len(headers)-1].Height()) return syncer.iterFullTipsets(ctx, headers, func(ctx context.Context, fts *store.FullTipSet) error { log.Debugw("validating tipset", "height", fts.TipSet().Height(), "size", len(fts.TipSet().Cids())) if err := syncer.ValidateTipSet(ctx, fts, true); err != nil { log.Errorf("failed to validate tipset: %+v", err) return xerrors.Errorf("message processing failed: %w", err) } stats.Record(ctx, metrics.ChainNodeWorkerHeight.M(int64(fts.TipSet().Height()))) ss.SetHeight(fts.TipSet().Height()) return nil }) } // fills out each of the given tipsets with messages and calls the callback with it func (syncer *Syncer) iterFullTipsets(ctx context.Context, headers []*types.TipSet, cb func(context.Context, *store.FullTipSet) error) error { ss := extractSyncState(ctx) ctx, span := trace.StartSpan(ctx, "iterFullTipsets") defer span.End() span.AddAttributes(trace.Int64Attribute("num_headers", int64(len(headers)))) for i := len(headers) - 1; i >= 0; { fts, err := syncer.store.TryFillTipSet(headers[i]) if err != nil { return err } if fts != nil { if err := cb(ctx, fts); err != nil { return err } i-- continue } batchSize := concurrentSyncRequests * syncRequestBatchSize if i < batchSize { batchSize = i + 1 } ss.SetStage(api.StageFetchingMessages) startOffset := i + 1 - batchSize bstout, batchErr := syncer.fetchMessages(ctx, headers[startOffset:startOffset+batchSize], startOffset) ss.SetStage(api.StageMessages) if batchErr != nil { return xerrors.Errorf("failed to fetch messages: %w", batchErr) } for bsi := 0; bsi < len(bstout); bsi++ { // temp storage so we don't persist data we dont want to bs := bstore.NewTemporary() blks := cbor.NewCborStore(bs) this := headers[i-bsi] bstip := bstout[len(bstout)-(bsi+1)] fts, err := zipTipSetAndMessages(blks, this, bstip.Bls, bstip.Secpk, bstip.BlsIncludes, bstip.SecpkIncludes) if err != nil { log.Warnw("zipping failed", "error", err, "bsi", bsi, "i", i, "height", this.Height(), "next-height", i+batchSize) return xerrors.Errorf("message processing failed: %w", err) } if err := cb(ctx, fts); err != nil { return err } if err := persistMessages(ctx, bs, bstip); err != nil { return err } if err := copyBlockstore(ctx, bs, syncer.store.Blockstore()); err != nil { return xerrors.Errorf("message processing failed: %w", err) } } i -= batchSize } return nil } func (syncer *Syncer) fetchMessages(ctx context.Context, headers []*types.TipSet, startOffset int) ([]*exchange.CompactedMessages, error) { batchSize := len(headers) batch := make([]*exchange.CompactedMessages, batchSize) var wg sync.WaitGroup var mx sync.Mutex var batchErr error start := build.Clock.Now() for j := 0; j < batchSize; j += syncRequestBatchSize { wg.Add(1) go func(j int) { defer wg.Done() nreq := syncRequestBatchSize if j+nreq > batchSize { nreq = batchSize - j } failed := false for offset := 0; !failed && offset < nreq; { nextI := j + offset lastI := j + nreq var requestErr error var requestResult []*exchange.CompactedMessages for retry := 0; requestResult == nil && retry < syncRequestRetries; retry++ { if retry > 0 { log.Infof("fetching messages at %d (retry %d)", startOffset+nextI, retry) } else { log.Infof("fetching messages at %d", startOffset+nextI) } result, err := syncer.Exchange.GetChainMessages(ctx, headers[nextI:lastI]) if err != nil { requestErr = multierror.Append(requestErr, err) } else { requestResult = result } } mx.Lock() if requestResult != nil { copy(batch[j+offset:], requestResult) offset += len(requestResult) } else { log.Errorf("error fetching messages at %d: %s", nextI, requestErr) batchErr = multierror.Append(batchErr, requestErr) failed = true } mx.Unlock() } }(j) } wg.Wait() if batchErr != nil { return nil, batchErr } log.Infof("fetching messages for %d tipsets at %d done; took %s", batchSize, startOffset, build.Clock.Since(start)) return batch, nil } func persistMessages(ctx context.Context, bs bstore.Blockstore, bst *exchange.CompactedMessages) error { _, span := trace.StartSpan(ctx, "persistMessages") defer span.End() for _, m := range bst.Bls { //log.Infof("putting BLS message: %s", m.Cid()) if _, err := store.PutMessage(bs, m); err != nil { log.Errorf("failed to persist messages: %+v", err) return xerrors.Errorf("BLS message processing failed: %w", err) } } for _, m := range bst.Secpk { if m.Signature.Type != crypto.SigTypeSecp256k1 { return xerrors.Errorf("unknown signature type on message %s: %q", m.Cid(), m.Signature.Type) } //log.Infof("putting secp256k1 message: %s", m.Cid()) if _, err := store.PutMessage(bs, m); err != nil { log.Errorf("failed to persist messages: %+v", err) return xerrors.Errorf("secp256k1 message processing failed: %w", err) } } return nil } // collectChain tries to advance our view of the chain to the purported head. // // It goes through various stages: // // 1. StageHeaders: we proceed in the sync process by requesting block headers // from our peers, moving back from their heads, until we reach a tipset // that we have in common (such a common tipset must exist, thought it may // simply be the genesis block). // // If the common tipset is our head, we treat the sync as a "fast-forward", // else we must drop part of our chain to connect to the peer's head // (referred to as "forking"). // // 2. StagePersistHeaders: now that we've collected the missing headers, // augmented by those on the other side of a fork, we persist them to the // BlockStore. // // 3. StageMessages: having acquired the headers and found a common tipset, // we then move forward, requesting the full blocks, including the messages. func (syncer *Syncer) collectChain(ctx context.Context, ts *types.TipSet, hts *types.TipSet) error { ctx, span := trace.StartSpan(ctx, "collectChain") defer span.End() ss := extractSyncState(ctx) ss.Init(hts, ts) headers, err := syncer.collectHeaders(ctx, ts, hts) if err != nil { ss.Error(err) return err } span.AddAttributes(trace.Int64Attribute("syncChainLength", int64(len(headers)))) if !headers[0].Equals(ts) { log.Errorf("collectChain headers[0] should be equal to sync target. Its not: %s != %s", headers[0].Cids(), ts.Cids()) } ss.SetStage(api.StagePersistHeaders) toPersist := make([]*types.BlockHeader, 0, len(headers)*int(build.BlocksPerEpoch)) for _, ts := range headers { toPersist = append(toPersist, ts.Blocks()...) } if err := syncer.store.PersistBlockHeaders(toPersist...); err != nil { err = xerrors.Errorf("failed to persist synced blocks to the chainstore: %w", err) ss.Error(err) return err } toPersist = nil ss.SetStage(api.StageMessages) if err := syncer.syncMessagesAndCheckState(ctx, headers); err != nil { err = xerrors.Errorf("collectChain syncMessages: %w", err) ss.Error(err) return err } ss.SetStage(api.StageSyncComplete) log.Debugw("new tipset", "height", ts.Height(), "tipset", types.LogCids(ts.Cids())) return nil } func VerifyElectionPoStVRF(ctx context.Context, worker address.Address, rand []byte, evrf []byte) error { return gen.VerifyVRF(ctx, worker, rand, evrf) } func (syncer *Syncer) State() []SyncerStateSnapshot { return syncer.syncmgr.State() } // MarkBad manually adds a block to the "bad blocks" cache. func (syncer *Syncer) MarkBad(blk cid.Cid) { syncer.bad.Add(blk, NewBadBlockReason([]cid.Cid{blk}, "manually marked bad")) } // UnmarkBad manually adds a block to the "bad blocks" cache. func (syncer *Syncer) UnmarkBad(blk cid.Cid) { syncer.bad.Remove(blk) } func (syncer *Syncer) UnmarkAllBad() { syncer.bad.Purge() } func (syncer *Syncer) CheckBadBlockCache(blk cid.Cid) (string, bool) { bbr, ok := syncer.bad.Has(blk) return bbr.String(), ok } func (syncer *Syncer) getLatestBeaconEntry(_ context.Context, ts *types.TipSet) (*types.BeaconEntry, error) { cur := ts for i := 0; i < 20; i++ { cbe := cur.Blocks()[0].BeaconEntries if len(cbe) > 0 { return &cbe[len(cbe)-1], nil } if cur.Height() == 0 { return nil, xerrors.Errorf("made it back to genesis block without finding beacon entry") } next, err := syncer.store.LoadTipSet(cur.Parents()) if err != nil { return nil, xerrors.Errorf("failed to load parents when searching back for latest beacon entry: %w", err) } cur = next } return nil, xerrors.Errorf("found NO beacon entries in the 20 latest tipsets") } func (syncer *Syncer) IsEpochBeyondCurrMax(epoch abi.ChainEpoch) bool { if syncer.Genesis == nil { return false } now := uint64(build.Clock.Now().Unix()) return epoch > (abi.ChainEpoch((now-syncer.Genesis.MinTimestamp())/build.BlockDelaySecs) + MaxHeightDrift) }
[ "\"LOTUS_IGNORE_DRAND\"" ]
[]
[ "LOTUS_IGNORE_DRAND" ]
[]
["LOTUS_IGNORE_DRAND"]
go
1
0
adapter/config/yaml/yaml_test.go
// Copyright 2014 beego Author. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package yaml import ( "fmt" "os" "testing" "github.com/beego/beego/v2/adapter/config" ) func TestYaml(t *testing.T) { var ( yamlcontext = ` "appname": beeapi "httpport": 8080 "mysqlport": 3600 "PI": 3.1415976 "runmode": dev "autorender": false "copyrequestbody": true "PATH": GOPATH "path1": ${GOPATH} "path2": ${GOPATH||/home/go} "empty": "" ` keyValue = map[string]interface{}{ "appname": "beeapi", "httpport": 8080, "mysqlport": int64(3600), "PI": 3.1415976, "runmode": "dev", "autorender": false, "copyrequestbody": true, "PATH": "GOPATH", "path1": os.Getenv("GOPATH"), "path2": os.Getenv("GOPATH"), "error": "", "emptystrings": []string{}, } ) cfgFileName := "testyaml.conf" f, err := os.Create(cfgFileName) if err != nil { t.Fatal(err) } _, err = f.WriteString(yamlcontext) if err != nil { f.Close() t.Fatal(err) } f.Close() defer os.Remove(cfgFileName) yamlconf, err := config.NewConfig("yaml", cfgFileName) if err != nil { t.Fatal(err) } if yamlconf.String("appname") != "beeapi" { t.Fatal("appname not equal to beeapi") } for k, v := range keyValue { var ( value interface{} err error ) switch v.(type) { case int: value, err = yamlconf.Int(k) case int64: value, err = yamlconf.Int64(k) case float64: value, err = yamlconf.Float(k) case bool: value, err = yamlconf.Bool(k) case []string: value = yamlconf.Strings(k) case string: value = yamlconf.String(k) default: value, err = yamlconf.DIY(k) } if err != nil { t.Errorf("get key %q value fatal,%v err %s", k, v, err) } else if fmt.Sprintf("%v", v) != fmt.Sprintf("%v", value) { t.Errorf("get key %q value, want %v got %v .", k, v, value) } } if err = yamlconf.Set("name", "astaxie"); err != nil { t.Fatal(err) } if yamlconf.String("name") != "astaxie" { t.Fatal("get name error") } }
[ "\"GOPATH\"", "\"GOPATH\"" ]
[]
[ "GOPATH" ]
[]
["GOPATH"]
go
1
0
google/stow_test.go
package google import ( "io/ioutil" "os" "reflect" "testing" "github.com/cheekybits/is" "github.com/khalid79/stow" "github.com/khalid79/stow/test" ) func TestStow(t *testing.T) { credFile := os.Getenv("GOOGLE_CREDENTIALS_FILE") projectId := os.Getenv("GOOGLE_PROJECT_ID") if credFile == "" || projectId == "" { t.Skip("skipping test because GOOGLE_CREDENTIALS_FILE or GOOGLE_PROJECT_ID not set.") } b, err := ioutil.ReadFile(credFile) if err != nil { t.Fatal(err) } config := stow.ConfigMap{ "json": string(b), "project_id": projectId, } test.All(t, "google", config) } func TestPrepMetadataSuccess(t *testing.T) { is := is.New(t) m := make(map[string]string) m["one"] = "two" m["3"] = "4" m["ninety-nine"] = "100" m2 := make(map[string]interface{}) for key, value := range m { m2[key] = value } //returns map[string]interface returnedMap, err := prepMetadata(m2) is.NoErr(err) if !reflect.DeepEqual(returnedMap, m) { t.Errorf("Expected map (%+v) and returned map (%+v) are not equal.", m, returnedMap) } } func TestPrepMetadataFailureWithNonStringValues(t *testing.T) { is := is.New(t) m := make(map[string]interface{}) m["float"] = 8.9 m["number"] = 9 _, err := prepMetadata(m) is.Err(err) }
[ "\"GOOGLE_CREDENTIALS_FILE\"", "\"GOOGLE_PROJECT_ID\"" ]
[]
[ "GOOGLE_PROJECT_ID", "GOOGLE_CREDENTIALS_FILE" ]
[]
["GOOGLE_PROJECT_ID", "GOOGLE_CREDENTIALS_FILE"]
go
2
0
Spring_05_JavaConfig/src/main/java/com/callor/jc/exec/MakeDBConnection.java
package com.callor.jc.exec; import org.jasypt.encryption.pbe.StandardPBEStringEncryptor; import java.io.FileNotFoundException; import java.io.PrintWriter; import java.util.Map; import java.util.Scanner; public class MakeDBConnection { public static void main(String[] args) throws FileNotFoundException { final String propsFile = "./src/main/resources/db.properties"; final Map<String, String> sysEnv = System.getenv(); final String driver = "com.mysql.cj.jdbc.Driver"; final String url = "jdbc:mysql://localhost:3306/naraDB"; final Scanner scan = new Scanner(System.in); String saltPass = sysEnv.get("callor.com"); System.out.println("시스템 환경변수 : " + saltPass); System.out.print("username : "); String username = scan.nextLine(); System.out.print("password : "); String password = scan.nextLine(); StandardPBEStringEncryptor pbe = new StandardPBEStringEncryptor(); pbe.setAlgorithm("PBEWithMD5AndDES"); pbe.setPassword(saltPass); String encUsername = pbe.encrypt(username); String encPassword = pbe.encrypt(password); PrintWriter out = new PrintWriter(propsFile); out.println("# MySQL 연결 설정"); out.printf("db.driver=%s\n",driver); out.printf("db.url=%s\n",url); out.printf("db.username=%s\n",encUsername); out.printf("db.password=%s\n",encPassword); out.flush(); out.close(); System.out.println("DB Connection Properties Complete!!"); } }
[]
[]
[]
[]
[]
java
0
0
config/config.go
package config import ( "fmt" "io/ioutil" "os" "strings" "github.com/jmervine/hipcat/Godeps/_workspace/src/gopkg.in/yaml.v2" ) var DefaultConfig = "~/.hipcat" func ReplaceHome(path string) (string, error) { if os.Getenv("HOME") == "" { return path, fmt.Errorf("HOME could not be fetched from your environment") } return strings.Replace(path, "~", os.Getenv("HOME"), 1), nil } type Config struct { Room string `yaml:"room"` Token string `yaml:"token"` Sender string `yaml:"sender"` Host string `yaml:"host"` Code string `yaml:"code"` Color string `yaml:"color"` Notify string `yaml:"notify"` Conf string Message []byte } func ToBool(s string) bool { s = string([]byte(strings.ToLower(s))[0]) return (s == "t" || s == "y") } func (config *Config) LoadConfig(source string) error { source, err := ReplaceHome(source) if err != nil { return err } raw, err := ioutil.ReadFile(source) if err == nil { err = yaml.Unmarshal(raw, config) } return err } func (c *Config) Require() error { err := "Missing required argument: %s" if c.Room == "" { return fmt.Errorf(err, "room") } if c.Token == "" { return fmt.Errorf(err, "token") } return nil } func (c *Config) ReadMessage() error { stdin, err := ioutil.ReadAll(os.Stdin) if err != nil { return err } c.Message = stdin return err } func (c *Config) FormattedMessage() string { format := "%s" if ToBool(c.Code) { format = "/code %s" } return fmt.Sprintf(format, string(c.Message[:len(c.Message)-1])) } func (c *Config) FormattedNotification() string { return fmt.Sprintf("<pre>%s</pre>", string(c.Message[:len(c.Message)-1])) }
[ "\"HOME\"", "\"HOME\"" ]
[]
[ "HOME" ]
[]
["HOME"]
go
1
0
api/src/main/java/com/expedia/www/haystack/agent/core/config/ConfigurationHelpers.java
/* * Copyright 2017 Expedia, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package com.expedia.www.haystack.agent.core.config; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; import org.apache.commons.lang3.StringUtils; import java.util.*; public class ConfigurationHelpers { // this property is injected into the config object passed to every dispatcher // it can be used by the logger or build metric names with agent name as prefix public static String AGENT_NAME_KEY = "agentName"; private final static String HAYSTACK_AGENT_ENV_VAR_PREFIX = "haystack_env_"; private ConfigurationHelpers() { /* suppress pmd violation */ } /** * convert the map of [string,string] to properties object * @param config map of key value pairs * @return a properties object */ public static Properties generatePropertiesFromMap(Map<String, String> config) { final Properties properties = new Properties(); properties.putAll(config); return properties; } /** * create typesafe config object by first reading the configuration from environment variables * and then doing a fallback on the actual configuration string passed as argument. * Environment variables can be used to override the config. * @param configStr configuration passed to the app * @return final config object with env variables as overrides over actual configuration passed to the app */ public static Config load(final String configStr) { return loadFromEnvVars().withFallback(ConfigFactory.parseString(configStr)); } /** * parse the agent configurations from the root config * @param config main configuration * @return map of agentNames and their corresponding config object */ public static Map<String, Config> readAgentConfigs(final Config config) { final Map<String, Config> agentConfigs = new HashMap<>(); final Config agentsConfig = config.getConfig("agents"); final Set<String> agentNames = new HashSet<>(); agentsConfig.entrySet().forEach((e) -> agentNames.add(findRootKeyName(e.getKey()))); agentNames.forEach((name) -> agentConfigs.put(name, agentsConfig.getConfig(name))); return agentConfigs; } /** * parse the dispatcher configurations from the agent's config section * agent's name is injected into each dispatcher config object, by default * @param agentConfig agent's config section * @param agentName name of agent * @return map of dispatcherNames and their corresponding config object */ public static Map<String, Config> readDispatchersConfig(final Config agentConfig, final String agentName) { final Config dispatchers = agentConfig.getConfig("dispatchers"); final Map<String, Config> dispatchersConfigMap = new HashMap<>(); final Set<String> dispatcherNames = new HashSet<>(); dispatchers.entrySet().forEach((e) -> dispatcherNames.add(findRootKeyName(e.getKey()))); dispatcherNames.forEach((name) -> dispatchersConfigMap.put(name, addAgentNameToConfig(dispatchers.getConfig(name), agentName))); return dispatchersConfigMap; } /** * converts typesafe config object to a map of string,string * @param conf typesafe config object * @return map of key, value pairs */ public static Map<String, String> convertToPropertyMap(final Config conf) { final Map<String, String> props = new HashMap<>(); conf.entrySet().forEach((e) -> props.put(e.getKey(), e.getValue().unwrapped().toString())); return props; } private static Config addAgentNameToConfig(final Config config, final String agentName) { return config.withFallback(ConfigFactory.parseString(AGENT_NAME_KEY + " = " + agentName)); } private static boolean isHaystackAgentEnvVar(final String envKey) { return envKey.startsWith(HAYSTACK_AGENT_ENV_VAR_PREFIX); } private static Config loadFromEnvVars() { final Map<String, String> envMap = new HashMap<>(); System.getenv().entrySet().stream() .filter((e) -> isHaystackAgentEnvVar(e.getKey())) .forEach((e) -> { final String normalizedKey = e.getKey().replaceFirst(HAYSTACK_AGENT_ENV_VAR_PREFIX, "").replace('_', '.'); envMap.put(normalizedKey, e.getValue()); }); return ConfigFactory.parseMap(envMap); } // extracts the root keyname, for e.g. if the path given is 'x.y.z' then rootKey is 'x' private static String findRootKeyName(final String path) { return StringUtils.split(path, ".")[0]; } }
[]
[]
[]
[]
[]
java
0
0
vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/quotaset_test.go
// +build acceptance compute quotasets package v2 import ( "fmt" "os" "testing" "github.com/gophercloud/gophercloud" "github.com/gophercloud/gophercloud/acceptance/clients" "github.com/gophercloud/gophercloud/acceptance/tools" "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/quotasets" "github.com/gophercloud/gophercloud/openstack/identity/v2/tenants" th "github.com/gophercloud/gophercloud/testhelper" ) func TestQuotasetGet(t *testing.T) { client, err := clients.NewComputeV2Client() th.AssertNoErr(t, err) identityClient, err := clients.NewIdentityV2Client() th.AssertNoErr(t, err) tenantID, err := getTenantID(t, identityClient) th.AssertNoErr(t, err) quotaSet, err := quotasets.Get(client, tenantID).Extract() th.AssertNoErr(t, err) tools.PrintResource(t, quotaSet) th.AssertEquals(t, quotaSet.FixedIPs, -1) } func getTenantID(t *testing.T, client *gophercloud.ServiceClient) (string, error) { allPages, err := tenants.List(client, nil).AllPages() th.AssertNoErr(t, err) allTenants, err := tenants.ExtractTenants(allPages) th.AssertNoErr(t, err) for _, tenant := range allTenants { return tenant.ID, nil } return "", fmt.Errorf("Unable to get tenant ID") } func getTenantIDByName(t *testing.T, client *gophercloud.ServiceClient, name string) (string, error) { allPages, err := tenants.List(client, nil).AllPages() th.AssertNoErr(t, err) allTenants, err := tenants.ExtractTenants(allPages) th.AssertNoErr(t, err) for _, tenant := range allTenants { if tenant.Name == name { return tenant.ID, nil } } return "", fmt.Errorf("Unable to get tenant ID") } // What will be sent as desired Quotas to the Server var UpdateQuotaOpts = quotasets.UpdateOpts{ FixedIPs: gophercloud.IntToPointer(10), FloatingIPs: gophercloud.IntToPointer(10), InjectedFileContentBytes: gophercloud.IntToPointer(10240), InjectedFilePathBytes: gophercloud.IntToPointer(255), InjectedFiles: gophercloud.IntToPointer(5), KeyPairs: gophercloud.IntToPointer(10), MetadataItems: gophercloud.IntToPointer(128), RAM: gophercloud.IntToPointer(20000), SecurityGroupRules: gophercloud.IntToPointer(20), SecurityGroups: gophercloud.IntToPointer(10), Cores: gophercloud.IntToPointer(10), Instances: gophercloud.IntToPointer(4), ServerGroups: gophercloud.IntToPointer(2), ServerGroupMembers: gophercloud.IntToPointer(3), } // What the Server hopefully returns as the new Quotas var UpdatedQuotas = quotasets.QuotaSet{ FixedIPs: 10, FloatingIPs: 10, InjectedFileContentBytes: 10240, InjectedFilePathBytes: 255, InjectedFiles: 5, KeyPairs: 10, MetadataItems: 128, RAM: 20000, SecurityGroupRules: 20, SecurityGroups: 10, Cores: 10, Instances: 4, ServerGroups: 2, ServerGroupMembers: 3, } func TestQuotasetUpdateDelete(t *testing.T) { clients.RequireAdmin(t) client, err := clients.NewComputeV2Client() th.AssertNoErr(t, err) idclient, err := clients.NewIdentityV2Client() th.AssertNoErr(t, err) tenantid, err := getTenantIDByName(t, idclient, os.Getenv("OS_TENANT_NAME")) th.AssertNoErr(t, err) // save original quotas orig, err := quotasets.Get(client, tenantid).Extract() th.AssertNoErr(t, err) // Test Update res, err := quotasets.Update(client, tenantid, UpdateQuotaOpts).Extract() th.AssertNoErr(t, err) th.AssertEquals(t, UpdatedQuotas, *res) // Test Delete _, err = quotasets.Delete(client, tenantid).Extract() th.AssertNoErr(t, err) // We dont know the default quotas, so just check if the quotas are not the same as before newres, err := quotasets.Get(client, tenantid).Extract() th.AssertNoErr(t, err) if newres.RAM == res.RAM { t.Fatalf("Failed to update quotas") } restore := quotasets.UpdateOpts{} FillUpdateOptsFromQuotaSet(*orig, &restore) // restore original quotas res, err = quotasets.Update(client, tenantid, restore).Extract() th.AssertNoErr(t, err) orig.ID = "" th.AssertDeepEquals(t, orig, res) }
[ "\"OS_TENANT_NAME\"" ]
[]
[ "OS_TENANT_NAME" ]
[]
["OS_TENANT_NAME"]
go
1
0
local.go
// Copyright 2013 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // This file implements a stand-alone blog server. package main import ( "flag" "log" "net" "net/http" "os" "os/exec" "path/filepath" "strings" "golang.org/x/tools/blog" ) var ( httpAddr = flag.String("http", "localhost:8080", "HTTP listen address") contentPath = flag.String("content", "content/", "path to content files") templatePath = flag.String("template", "template/", "path to template files") staticPath = flag.String("static", "static/", "path to static files") godocPath = flag.String("godoc", defaultGodocPath(), "path to lib/godoc static files") reload = flag.Bool("reload", false, "reload content on each page load") ) func defaultGodocPath() string { out, err := exec.Command("go", "list", "-f", "{{.Dir}}", "golang.org/x/tools/godoc").CombinedOutput() if err != nil { log.Printf("warning: locating -godoc directory: %v", err) return "" } dir := strings.TrimSpace(string(out)) return filepath.Join(dir, "static") } // maybeStatic serves from one of the two static directories // (-static and -godoc) if possible, or else defers to the fallback handler. func maybeStatic(fallback http.Handler) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { p := r.URL.Path if strings.Contains(p, ".") && !strings.HasSuffix(p, "/") { f := filepath.Join(*staticPath, p) if _, err := os.Stat(f); err == nil { http.ServeFile(w, r, f) return } } if strings.HasPrefix(p, "/lib/godoc/") { f := filepath.Join(*godocPath, p[len("/lib/godoc/"):]) if _, err := os.Stat(f); err == nil { http.ServeFile(w, r, f) return } } fallback.ServeHTTP(w, r) } } func newServer(reload bool, staticPath string, config blog.Config) (http.Handler, error) { mux := http.NewServeMux() var h http.Handler if reload { h = http.HandlerFunc(reloadingBlogServer) } else { s, err := blog.NewServer(config) if err != nil { return nil, err } h = s } mux.Handle("/", maybeStatic(h)) return mux, nil } func main() { flag.Parse() if os.Getenv("GAE_ENV") == "standard" { log.Println("running in App Engine Standard mode") gaeMain() return } config.ContentPath = *contentPath config.TemplatePath = *templatePath mux, err := newServer(*reload, *staticPath, config) if err != nil { log.Fatal(err) } ln, err := net.Listen("tcp", *httpAddr) if err != nil { log.Fatal(err) } log.Println("Listening on addr", *httpAddr) log.Fatal(http.Serve(ln, mux)) } // reloadingBlogServer is an handler that restarts the blog server on each page // view. Inefficient; don't enable by default. Handy when editing blog content. func reloadingBlogServer(w http.ResponseWriter, r *http.Request) { s, err := blog.NewServer(config) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } s.ServeHTTP(w, r) }
[ "\"GAE_ENV\"" ]
[]
[ "GAE_ENV" ]
[]
["GAE_ENV"]
go
1
0
src/server/pachyderm_test.go
package server import ( "archive/tar" "bytes" "context" "encoding/base64" "encoding/hex" "encoding/json" "errors" "fmt" "io" "io/ioutil" "math" "math/rand" "net" "net/http" "net/url" "os" "path" "reflect" "strconv" "strings" "sync" "testing" "time" "github.com/segmentio/kafka-go" "golang.org/x/sync/errgroup" "github.com/pachyderm/pachyderm/src/client" "github.com/pachyderm/pachyderm/src/client/auth" "github.com/pachyderm/pachyderm/src/client/pfs" "github.com/pachyderm/pachyderm/src/client/pkg/require" "github.com/pachyderm/pachyderm/src/client/pps" pfspretty "github.com/pachyderm/pachyderm/src/server/pfs/pretty" "github.com/pachyderm/pachyderm/src/server/pkg/ancestry" "github.com/pachyderm/pachyderm/src/server/pkg/backoff" col "github.com/pachyderm/pachyderm/src/server/pkg/collection" "github.com/pachyderm/pachyderm/src/server/pkg/ppsconsts" "github.com/pachyderm/pachyderm/src/server/pkg/ppsutil" "github.com/pachyderm/pachyderm/src/server/pkg/pretty" tu "github.com/pachyderm/pachyderm/src/server/pkg/testutil" "github.com/pachyderm/pachyderm/src/server/pkg/uuid" "github.com/pachyderm/pachyderm/src/server/pkg/workload" ppspretty "github.com/pachyderm/pachyderm/src/server/pps/pretty" pps_server "github.com/pachyderm/pachyderm/src/server/pps/server" "github.com/pachyderm/pachyderm/src/server/pps/server/githook" etcd "github.com/coreos/etcd/clientv3" "github.com/gogo/protobuf/types" prom_api "github.com/prometheus/client_golang/api" prom_api_v1 "github.com/prometheus/client_golang/api/prometheus/v1" prom_model "github.com/prometheus/common/model" apps "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/watch" ) const ( // If this environment variable is set, then the tests are being run // in a real cluster in the cloud. InCloudEnv = "PACH_TEST_CLOUD" ) func TestSimplePipeline(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) dataRepo := tu.UniqueString("TestSimplePipeline_data") require.NoError(t, c.CreateRepo(dataRepo)) commit1, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, commit1.ID)) pipeline := tu.UniqueString("TestSimplePipeline") require.NoError(t, c.CreatePipeline( pipeline, "", []string{"bash"}, []string{ fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo), }, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(dataRepo, "/*"), "", false, )) commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) var buf bytes.Buffer require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, &buf)) require.Equal(t, "foo", buf.String()) } // TestRepoSize ensures that a repo's size is equal to it's master branch's // HEAD's size. This test should prevent a regression where output repos would // incorrectly report their size to be 0B. See here for more details: // https://github.com/pachyderm/pachyderm/issues/3330 func TestRepoSize(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) // create a data repo dataRepo := tu.UniqueString("TestRepoSize_data") require.NoError(t, c.CreateRepo(dataRepo)) // create a pipeline pipeline := tu.UniqueString("TestRepoSize") require.NoError(t, c.CreatePipeline( pipeline, "", []string{"bash"}, []string{ fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo), }, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(dataRepo, "/*"), "", false, )) // put a file without an open commit - should count towards repo size _, err := c.PutFile(dataRepo, "master", "file2", strings.NewReader("foo")) require.NoError(t, err) // put a file on another branch - should not count towards repo size _, err = c.PutFile(dataRepo, "develop", "file3", strings.NewReader("foo")) require.NoError(t, err) // put a file on an open commit - should count toward repo size commit1, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, commit1.ID, "file1", strings.NewReader("foo")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, commit1.ID)) // wait for everything to be processed commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) // check data repo size repoInfo, err := pachClient.InspectRepo(dataRepo) require.NoError(t, err) require.Equal(t, uint64(6), repoInfo.SizeBytes) // check pipeline repo size repoInfo, err = pachClient.InspectRepo(pipeline) require.NoError(t, err) require.Equal(t, uint64(6), repoInfo.SizeBytes) // ensure size is updated when we delete a commit require.NoError(t, c.DeleteCommit(dataRepo, commit1.ID)) repoInfo, err = pachClient.InspectRepo(dataRepo) require.NoError(t, err) require.Equal(t, uint64(3), repoInfo.SizeBytes) repoInfo, err = pachClient.InspectRepo(pipeline) require.NoError(t, err) require.Equal(t, uint64(3), repoInfo.SizeBytes) } func TestPFSPipeline(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) dataRepo := tu.UniqueString("TestPFSPipeline_data") require.NoError(t, c.CreateRepo(dataRepo)) commit1, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, commit1.ID)) pipeline := tu.UniqueString("TestPFSPipeline") require.NoError(t, c.CreatePipeline( pipeline, "", []string{"bash"}, []string{ fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo), }, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(dataRepo, "/*"), "", false, )) commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) var buf bytes.Buffer require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, &buf)) require.Equal(t, "foo", buf.String()) } func TestPipelineWithParallelism(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) dataRepo := tu.UniqueString("TestPipelineWithParallelism_data") require.NoError(t, c.CreateRepo(dataRepo)) numFiles := 1000 commit1, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) for i := 0; i < numFiles; i++ { _, err = c.PutFile(dataRepo, commit1.ID, fmt.Sprintf("file-%d", i), strings.NewReader(fmt.Sprintf("%d", i))) } require.NoError(t, c.FinishCommit(dataRepo, commit1.ID)) pipeline := tu.UniqueString("pipeline") require.NoError(t, c.CreatePipeline( pipeline, "", []string{"bash"}, []string{ fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo), }, &pps.ParallelismSpec{ Constant: 4, }, client.NewPFSInput(dataRepo, "/*"), "", false, )) commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) for i := 0; i < numFiles; i++ { var buf bytes.Buffer require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, fmt.Sprintf("file-%d", i), 0, 0, &buf)) require.Equal(t, fmt.Sprintf("%d", i), buf.String()) } } func TestPipelineWithLargeFiles(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) dataRepo := tu.UniqueString("TestPipelineWithLargeFiles_data") require.NoError(t, c.CreateRepo(dataRepo)) r := rand.New(rand.NewSource(99)) numFiles := 10 var fileContents []string commit1, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) chunkSize := int(pfs.ChunkSize / 32) // We used to use a full ChunkSize, but it was increased which caused this test to take too long. for i := 0; i < numFiles; i++ { fileContent := workload.RandString(r, chunkSize+i*MB) _, err = c.PutFile(dataRepo, commit1.ID, fmt.Sprintf("file-%d", i), strings.NewReader(fileContent)) require.NoError(t, err) fileContents = append(fileContents, fileContent) } require.NoError(t, c.FinishCommit(dataRepo, commit1.ID)) pipeline := tu.UniqueString("pipeline") require.NoError(t, c.CreatePipeline( pipeline, "", []string{"bash"}, []string{ fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo), }, nil, client.NewPFSInput(dataRepo, "/*"), "", false, )) commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) commit := commitInfos[0].Commit for i := 0; i < numFiles; i++ { var buf bytes.Buffer fileName := fmt.Sprintf("file-%d", i) fileInfo, err := c.InspectFile(commit.Repo.Name, commit.ID, fileName) require.NoError(t, err) require.Equal(t, chunkSize+i*MB, int(fileInfo.SizeBytes)) require.NoError(t, c.GetFile(commit.Repo.Name, commit.ID, fileName, 0, 0, &buf)) // we don't wanna use the `require` package here since it prints // the strings, which would clutter the output. if fileContents[i] != buf.String() { t.Fatalf("file content does not match") } } } func TestDatumDedup(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) dataRepo := tu.UniqueString("TestDatumDedup_data") require.NoError(t, c.CreateRepo(dataRepo)) commit1, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo")) require.NoError(t, c.FinishCommit(dataRepo, commit1.ID)) pipeline := tu.UniqueString("pipeline") // This pipeline sleeps for 10 secs per datum require.NoError(t, c.CreatePipeline( pipeline, "", []string{"bash"}, []string{ "sleep 10", }, nil, client.NewPFSInput(dataRepo, "/*"), "", false, )) commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) commit2, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, commit2.ID)) // Since we did not change the datum, the datum should not be processed // again, which means that the job should complete instantly. ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() stream, err := c.PfsAPIClient.FlushCommit( ctx, &pfs.FlushCommitRequest{ Commits: []*pfs.Commit{commit2}, }) require.NoError(t, err) _, err = stream.Recv() require.NoError(t, err) } func TestPipelineInputDataModification(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) dataRepo := tu.UniqueString("TestPipelineInputDataModification_data") require.NoError(t, c.CreateRepo(dataRepo)) commit1, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo")) require.NoError(t, c.FinishCommit(dataRepo, commit1.ID)) pipeline := tu.UniqueString("pipeline") require.NoError(t, c.CreatePipeline( pipeline, "", []string{"bash"}, []string{ fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo), }, nil, client.NewPFSInput(dataRepo, "/*"), "", false, )) commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) var buf bytes.Buffer require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, &buf)) require.Equal(t, "foo", buf.String()) // replace the contents of 'file' in dataRepo (from "foo" to "bar") commit2, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) require.NoError(t, c.DeleteFile(dataRepo, commit2.ID, "file")) _, err = c.PutFile(dataRepo, commit2.ID, "file", strings.NewReader("bar")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, commit2.ID)) commitIter, err = c.FlushCommit([]*pfs.Commit{commit2}, nil) require.NoError(t, err) commitInfos = collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) buf.Reset() require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, &buf)) require.Equal(t, "bar", buf.String()) // Add a file to dataRepo commit3, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) require.NoError(t, c.DeleteFile(dataRepo, commit3.ID, "file")) _, err = c.PutFile(dataRepo, commit3.ID, "file2", strings.NewReader("foo")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, commit3.ID)) commitIter, err = c.FlushCommit([]*pfs.Commit{commit3}, nil) require.NoError(t, err) commitInfos = collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) require.YesError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, &buf)) buf.Reset() require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file2", 0, 0, &buf)) require.Equal(t, "foo", buf.String()) commitInfos, err = c.ListCommit(pipeline, "master", "", 0) require.NoError(t, err) require.Equal(t, 3, len(commitInfos)) } func TestMultipleInputsFromTheSameBranch(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) dataRepo := tu.UniqueString("TestMultipleInputsFromTheSameBranch_data") require.NoError(t, c.CreateRepo(dataRepo)) commit1, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, commit1.ID, "dirA/file", strings.NewReader("foo\n")) require.NoError(t, err) _, err = c.PutFile(dataRepo, commit1.ID, "dirB/file", strings.NewReader("foo\n")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, commit1.ID)) pipeline := tu.UniqueString("pipeline") require.NoError(t, c.CreatePipeline( pipeline, "", []string{"bash"}, []string{ "cat /pfs/out/file", fmt.Sprintf("cat /pfs/dirA/dirA/file >> /pfs/out/file"), fmt.Sprintf("cat /pfs/dirB/dirB/file >> /pfs/out/file"), }, nil, client.NewCrossInput( client.NewPFSInputOpts("dirA", dataRepo, "", "/dirA/*", "", false), client.NewPFSInputOpts("dirB", dataRepo, "", "/dirB/*", "", false), ), "", false, )) commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) var buf bytes.Buffer require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, &buf)) require.Equal(t, "foo\nfoo\n", buf.String()) commit2, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, commit2.ID, "dirA/file", strings.NewReader("bar\n")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, commit2.ID)) commitIter, err = c.FlushCommit([]*pfs.Commit{commit2}, nil) require.NoError(t, err) commitInfos = collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) buf.Reset() require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, &buf)) require.Equal(t, "foo\nbar\nfoo\n", buf.String()) commit3, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, commit3.ID, "dirB/file", strings.NewReader("buzz\n")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, commit3.ID)) commitIter, err = c.FlushCommit([]*pfs.Commit{commit3}, nil) require.NoError(t, err) commitInfos = collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) buf.Reset() require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, &buf)) require.Equal(t, "foo\nbar\nfoo\nbuzz\n", buf.String()) commitInfos, err = c.ListCommit(pipeline, "master", "", 0) require.NoError(t, err) require.Equal(t, 3, len(commitInfos)) } func TestMultipleInputsFromTheSameRepoDifferentBranches(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) dataRepo := tu.UniqueString("TestMultipleInputsFromTheSameRepoDifferentBranches_data") require.NoError(t, c.CreateRepo(dataRepo)) branchA := "branchA" branchB := "branchB" pipeline := tu.UniqueString("pipeline") // Creating this pipeline should error, because the two inputs are // from the same repo but they don't specify different names. require.NoError(t, c.CreatePipeline( pipeline, "", []string{"bash"}, []string{ "cat /pfs/branch-a/file >> /pfs/out/file", "cat /pfs/branch-b/file >> /pfs/out/file", }, nil, client.NewCrossInput( client.NewPFSInputOpts("branch-a", dataRepo, branchA, "/*", "", false), client.NewPFSInputOpts("branch-b", dataRepo, branchB, "/*", "", false), ), "", false, )) commitA, err := c.StartCommit(dataRepo, branchA) require.NoError(t, err) c.PutFile(dataRepo, commitA.ID, "/file", strings.NewReader("data A\n")) c.FinishCommit(dataRepo, commitA.ID) commitB, err := c.StartCommit(dataRepo, branchB) require.NoError(t, err) c.PutFile(dataRepo, commitB.ID, "/file", strings.NewReader("data B\n")) c.FinishCommit(dataRepo, commitB.ID) iter, err := c.FlushCommit([]*pfs.Commit{commitA, commitB}, nil) require.NoError(t, err) commits := collectCommitInfos(t, iter) require.Equal(t, 1, len(commits)) buffer := bytes.Buffer{} require.NoError(t, c.GetFile(commits[0].Commit.Repo.Name, commits[0].Commit.ID, "file", 0, 0, &buffer)) require.Equal(t, "data A\ndata B\n", buffer.String()) } func TestRunPipeline(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) // Test on cross pipeline t.Run("RunPipelineCross", func(t *testing.T) { dataRepo := tu.UniqueString("TestRunPipeline_data") require.NoError(t, c.CreateRepo(dataRepo)) branchA := "branchA" branchB := "branchB" pipeline := tu.UniqueString("pipeline") require.NoError(t, c.CreatePipeline( pipeline, "", []string{"bash"}, []string{ "cat /pfs/branch-a/file >> /pfs/out/file", "cat /pfs/branch-b/file >> /pfs/out/file", "echo ran-pipeline", }, nil, client.NewCrossInput( client.NewPFSInputOpts("branch-a", dataRepo, branchA, "/*", "", false), client.NewPFSInputOpts("branch-b", dataRepo, branchB, "/*", "", false), ), "", false, )) commitA, err := c.StartCommit(dataRepo, branchA) require.NoError(t, err) c.PutFile(dataRepo, commitA.ID, "/file", strings.NewReader("data A\n")) c.FinishCommit(dataRepo, commitA.ID) commitB, err := c.StartCommit(dataRepo, branchB) require.NoError(t, err) c.PutFile(dataRepo, commitB.ID, "/file", strings.NewReader("data B\n")) c.FinishCommit(dataRepo, commitB.ID) iter, err := c.FlushCommit([]*pfs.Commit{commitA, commitB}, nil) require.NoError(t, err) commits := collectCommitInfos(t, iter) require.Equal(t, 1, len(commits)) buffer := bytes.Buffer{} require.NoError(t, c.GetFile(commits[0].Commit.Repo.Name, commits[0].Commit.ID, "file", 0, 0, &buffer)) require.Equal(t, "data A\ndata B\n", buffer.String()) commitM, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) err = c.FinishCommit(dataRepo, commitM.ID) require.NoError(t, err) // we should have two jobs ji, err := c.ListJob(pipeline, nil, nil, -1, true) require.NoError(t, err) require.Equal(t, 2, len(ji)) // now run the pipeline require.NoError(t, c.RunPipeline(pipeline, nil)) // running the pipeline should create a new job require.NoError(t, backoff.Retry(func() error { jobInfos, err := c.ListJob(pipeline, nil, nil, -1, true) require.NoError(t, err) if len(jobInfos) != 3 { return fmt.Errorf("expected 3 jobs, got %d", len(jobInfos)) } return nil }, backoff.NewTestingBackOff())) // now run the pipeline with non-empty provenance require.NoError(t, backoff.Retry(func() error { return c.RunPipeline(pipeline, []*pfs.CommitProvenance{ client.NewCommitProvenance(dataRepo, "branchA", commitA.ID), }) }, backoff.NewTestingBackOff())) // running the pipeline should create a new job require.NoError(t, backoff.Retry(func() error { jobInfos, err := c.ListJob(pipeline, nil, nil, -1, true) require.NoError(t, err) if len(jobInfos) != 4 { return fmt.Errorf("expected 4 jobs, got %d", len(jobInfos)) } return nil }, backoff.NewTestingBackOff())) // add some new commits with some new info commitA2, err := c.StartCommit(dataRepo, branchA) require.NoError(t, err) c.PutFile(dataRepo, commitA2.ID, "/file", strings.NewReader("data A2\n")) c.FinishCommit(dataRepo, commitA2.ID) commitB2, err := c.StartCommit(dataRepo, branchB) require.NoError(t, err) c.PutFile(dataRepo, commitB2.ID, "/file", strings.NewReader("data B2\n")) c.FinishCommit(dataRepo, commitB2.ID) // and make sure the output file is updated appropriately iter, err = c.FlushCommit([]*pfs.Commit{commitA2, commitB2}, nil) require.NoError(t, err) commits = collectCommitInfos(t, iter) require.Equal(t, 1, len(commits)) buffer = bytes.Buffer{} require.NoError(t, c.GetFile(commits[0].Commit.Repo.Name, commits[0].Commit.ID, "file", 0, 0, &buffer)) require.Equal(t, "data A\ndata A2\ndata B\ndata B2\n", buffer.String()) // now run the pipeline provenant on the old commits require.NoError(t, c.RunPipeline(pipeline, []*pfs.CommitProvenance{ client.NewCommitProvenance(dataRepo, "branchA", commitA.ID), client.NewCommitProvenance(dataRepo, "branchB", commitB2.ID), })) // and ensure that the file now has the info from the correct versions of the commits iter, err = c.FlushCommit([]*pfs.Commit{commitA, commitB2}, nil) require.NoError(t, err) commits = collectCommitInfos(t, iter) require.Equal(t, 1, len(commits)) buffer = bytes.Buffer{} require.NoError(t, c.GetFile(commits[0].Commit.Repo.Name, commits[0].Commit.ID, "file", 0, 0, &buffer)) require.Equal(t, "data A\ndata B\ndata B2\n", buffer.String()) // make sure no commits with this provenance combination exist iter, err = c.FlushCommit([]*pfs.Commit{commitA2, commitB}, nil) require.NoError(t, err) commits = collectCommitInfos(t, iter) require.Equal(t, 0, len(commits)) }) // Test on pipeline with no commits t.Run("RunPipelineEmpty", func(t *testing.T) { dataRepo := tu.UniqueString("TestRunPipeline_data") require.NoError(t, c.CreateRepo(dataRepo)) pipeline := tu.UniqueString("empty-pipeline") require.NoError(t, c.CreatePipeline( pipeline, "", nil, nil, nil, nil, "", false, )) // we should have two jobs ji, err := c.ListJob(pipeline, nil, nil, -1, true) require.NoError(t, err) require.Equal(t, 0, len(ji)) // now run the pipeline require.YesError(t, c.RunPipeline(pipeline, nil)) }) // Test on unrelated branch t.Run("RunPipelineUnrelated", func(t *testing.T) { dataRepo := tu.UniqueString("TestRunPipeline_data") require.NoError(t, c.CreateRepo(dataRepo)) branchA := "branchA" branchB := "branchB" pipeline := tu.UniqueString("unrelated-pipeline") require.NoError(t, c.CreatePipeline( pipeline, "", []string{"bash"}, []string{ "cat /pfs/branch-a/file >> /pfs/out/file", "cat /pfs/branch-b/file >> /pfs/out/file", "echo ran-pipeline", }, nil, client.NewCrossInput( client.NewPFSInputOpts("branch-a", dataRepo, branchA, "/*", "", false), client.NewPFSInputOpts("branch-b", dataRepo, branchB, "/*", "", false), ), "", false, )) commitA, err := c.StartCommit(dataRepo, branchA) require.NoError(t, err) c.PutFile(dataRepo, commitA.ID, "/file", strings.NewReader("data A\n")) c.FinishCommit(dataRepo, commitA.ID) commitM, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) err = c.FinishCommit(dataRepo, commitM.ID) require.NoError(t, err) require.NoError(t, c.CreateBranch(dataRepo, "unrelated", "", nil)) commitU, err := c.StartCommit(dataRepo, "unrelated") require.NoError(t, err) err = c.FinishCommit(dataRepo, commitU.ID) require.NoError(t, err) _, err = c.FlushCommit([]*pfs.Commit{commitA, commitM, commitU}, nil) require.NoError(t, err) // now run the pipeline with unrelated provenance require.YesError(t, c.RunPipeline(pipeline, []*pfs.CommitProvenance{ client.NewCommitProvenance(dataRepo, "unrelated", commitU.ID)})) }) // Test with downstream pipeline t.Run("RunPipelineDownstream", func(t *testing.T) { dataRepo := tu.UniqueString("TestRunPipeline_data") require.NoError(t, c.CreateRepo(dataRepo)) branchA := "branchA" branchB := "branchB" pipeline := tu.UniqueString("original-pipeline") require.NoError(t, c.CreatePipeline( pipeline, "", []string{"bash"}, []string{ "cat /pfs/branch-a/file >> /pfs/out/file", "cat /pfs/branch-b/file >> /pfs/out/file", "echo ran-pipeline", }, nil, client.NewCrossInput( client.NewPFSInputOpts("branch-a", dataRepo, branchA, "/*", "", false), client.NewPFSInputOpts("branch-b", dataRepo, branchB, "/*", "", false), ), "", false, )) commitA, err := c.StartCommit(dataRepo, branchA) require.NoError(t, err) c.PutFile(dataRepo, commitA.ID, "/file", strings.NewReader("data A\n")) c.FinishCommit(dataRepo, commitA.ID) commitB, err := c.StartCommit(dataRepo, branchB) require.NoError(t, err) c.PutFile(dataRepo, commitB.ID, "/file", strings.NewReader("data B\n")) c.FinishCommit(dataRepo, commitB.ID) iter, err := c.FlushCommit([]*pfs.Commit{commitA, commitB}, nil) require.NoError(t, err) commits := collectCommitInfos(t, iter) require.Equal(t, 1, len(commits)) buffer := bytes.Buffer{} require.NoError(t, c.GetFile(commits[0].Commit.Repo.Name, commits[0].Commit.ID, "file", 0, 0, &buffer)) require.Equal(t, "data A\ndata B\n", buffer.String()) // and make sure we can attatch a downstream pipeline downstreamPipeline := tu.UniqueString("downstream-pipeline") require.NoError(t, c.CreatePipeline( downstreamPipeline, "", []string{"/bin/bash"}, []string{"cp " + fmt.Sprintf("/pfs/%s/*", pipeline) + " /pfs/out/"}, nil, client.NewPFSInput(pipeline, "/*"), "", false, )) commitA2, err := c.StartCommit(dataRepo, branchA) require.NoError(t, err) err = c.FinishCommit(dataRepo, commitA2.ID) require.NoError(t, err) // there should be one job on the old commit for downstreamPipeline jobInfos, err := c.FlushJobAll([]*pfs.Commit{commitA}, []string{downstreamPipeline}) require.NoError(t, err) require.Equal(t, 1, len(jobInfos)) // now run the pipeline require.NoError(t, backoff.Retry(func() error { return c.RunPipeline(pipeline, []*pfs.CommitProvenance{ client.NewCommitProvenance(dataRepo, branchA, commitA.ID), }) }, backoff.NewTestingBackOff())) // the downstream pipeline shouldn't have any new jobs, since runpipeline jobs don't propagate jobInfos, err = c.FlushJobAll([]*pfs.Commit{commitA}, []string{downstreamPipeline}) require.NoError(t, err) require.Equal(t, 1, len(jobInfos)) }) // Test with a downstream pipeline who's upstream has no datum, but where the downstream still needs to succeed t.Run("RunPipelineEmptyUpstream", func(t *testing.T) { dataRepo := tu.UniqueString("TestRunPipeline_data") require.NoError(t, c.CreateRepo(dataRepo)) branchA := "branchA" branchB := "branchB" pipeline := tu.UniqueString("pipeline-downstream") require.NoError(t, c.CreatePipeline( pipeline, "", []string{"bash"}, []string{ "cat /pfs/branch-a/file >> /pfs/out/file", "cat /pfs/branch-b/file >> /pfs/out/file", "echo ran-pipeline", }, nil, client.NewCrossInput( client.NewPFSInputOpts("branch-a", dataRepo, branchA, "/*", "", false), client.NewPFSInputOpts("branch-b", dataRepo, branchB, "/*", "", false), ), "", false, )) commitA, err := c.StartCommit(dataRepo, branchA) require.NoError(t, err) c.PutFile(dataRepo, commitA.ID, "/file", strings.NewReader("data A\n")) c.FinishCommit(dataRepo, commitA.ID) iter, err := c.FlushCommit([]*pfs.Commit{commitA}, nil) require.NoError(t, err) commits := collectCommitInfos(t, iter) require.Equal(t, 1, len(commits)) buffer := bytes.Buffer{} // no commit to branch-b so "file" should not exist require.YesError(t, c.GetFile(commits[0].Commit.Repo.Name, commits[0].Commit.ID, "file", 0, 0, &buffer)) // and make sure we can attatch a downstream pipeline downstreamPipeline := tu.UniqueString("pipelinedownstream") require.NoError(t, c.CreatePipeline( downstreamPipeline, "", []string{"/bin/bash"}, []string{ "cat /pfs/branch-a/file >> /pfs/out/file", fmt.Sprintf("cat /pfs/%s/file >> /pfs/out/file", pipeline), "echo ran-pipeline", }, nil, client.NewUnionInput( client.NewPFSInputOpts("branch-a", dataRepo, branchA, "/*", "", false), client.NewPFSInput(pipeline, "/*"), ), "", false, )) commitA2, err := c.StartCommit(dataRepo, branchA) require.NoError(t, err) err = c.FinishCommit(dataRepo, commitA2.ID) require.NoError(t, err) // there should be one job on the old commit for downstreamPipeline jobInfos, err := c.FlushJobAll([]*pfs.Commit{commitA}, []string{downstreamPipeline}) require.NoError(t, err) require.Equal(t, 1, len(jobInfos)) // now run the pipeline require.NoError(t, backoff.Retry(func() error { return c.RunPipeline(pipeline, []*pfs.CommitProvenance{ client.NewCommitProvenance(dataRepo, branchA, commitA.ID), }) }, backoff.NewTestingBackOff())) buffer2 := bytes.Buffer{} require.NoError(t, c.GetFile(jobInfos[0].OutputCommit.Repo.Name, jobInfos[0].OutputCommit.ID, "file", 0, 0, &buffer2)) // the union of an empty output and datA should only return a file with "data A" in it. require.Equal(t, "data A\n", buffer2.String()) // add another commit to see that we can successfully do the cross and union together commitB, err := c.StartCommit(dataRepo, branchB) require.NoError(t, err) c.PutFile(dataRepo, commitB.ID, "/file", strings.NewReader("data B\n")) c.FinishCommit(dataRepo, commitB.ID) _, err = c.FlushCommit([]*pfs.Commit{commitA, commitB}, nil) jobInfos, err = c.FlushJobAll([]*pfs.Commit{commitB}, []string{downstreamPipeline}) require.NoError(t, err) require.Equal(t, 1, len(jobInfos)) buffer3 := bytes.Buffer{} require.NoError(t, c.GetFile(jobInfos[0].OutputCommit.Repo.Name, jobInfos[0].OutputCommit.ID, "file", 0, 0, &buffer3)) // now that we've added data to the other branch of the cross, we should see the union of data A along with the the crossed data. require.Equal(t, "data A\ndata A\ndata B\n", buffer3.String()) }) // Test on commits from the same branch t.Run("RunPipelineSameBranch", func(t *testing.T) { dataRepo := tu.UniqueString("TestRunPipeline_data") require.NoError(t, c.CreateRepo(dataRepo)) branchA := "branchA" branchB := "branchB" pipeline := tu.UniqueString("sameBranch-pipeline") require.NoError(t, c.CreatePipeline( pipeline, "", []string{"bash"}, []string{ "cat /pfs/branch-a/file >> /pfs/out/file", "cat /pfs/branch-b/file >> /pfs/out/file", "echo ran-pipeline", }, nil, client.NewCrossInput( client.NewPFSInputOpts("branch-a", dataRepo, branchA, "/*", "", false), client.NewPFSInputOpts("branch-b", dataRepo, branchB, "/*", "", false), ), "", false, )) commitA1, err := c.StartCommit(dataRepo, branchA) require.NoError(t, err) c.PutFile(dataRepo, commitA1.ID, "/file", strings.NewReader("data A1\n")) c.FinishCommit(dataRepo, commitA1.ID) commitA2, err := c.StartCommit(dataRepo, branchA) require.NoError(t, err) c.PutFile(dataRepo, commitA2.ID, "/file", strings.NewReader("data A2\n")) c.FinishCommit(dataRepo, commitA2.ID) _, err = c.FlushCommit([]*pfs.Commit{commitA1, commitA2}, nil) require.NoError(t, err) // now run the pipeline with provenance from the same branch require.YesError(t, c.RunPipeline(pipeline, []*pfs.CommitProvenance{ client.NewCommitProvenance(dataRepo, branchA, commitA1.ID), client.NewCommitProvenance(dataRepo, branchA, commitA2.ID)}, )) }) // Test on pipeline that should always fail t.Run("RerunPipeline", func(t *testing.T) { dataRepo := tu.UniqueString("TestRerunPipeline_data") require.NoError(t, c.CreateRepo(dataRepo)) // jobs on this pipeline should always fail pipeline := tu.UniqueString("rerun-pipeline") require.NoError(t, c.CreatePipeline( pipeline, "", []string{"bash"}, []string{"false"}, nil, client.NewPFSInputOpts("branch-a", dataRepo, "branchA", "/*", "", false), "", false, )) commitA1, err := c.StartCommit(dataRepo, "branchA") require.NoError(t, err) c.PutFile(dataRepo, commitA1.ID, "/file", strings.NewReader("data A1\n")) c.FinishCommit(dataRepo, commitA1.ID) iter, err := c.FlushCommit([]*pfs.Commit{commitA1}, nil) require.NoError(t, err) commits := collectCommitInfos(t, iter) require.Equal(t, 1, len(commits)) // now run the pipeline require.NoError(t, c.RunPipeline(pipeline, nil)) // running the pipeline should create a new job require.NoError(t, backoff.Retry(func() error { jobInfos, err := c.ListJob(pipeline, nil, nil, -1, true) require.NoError(t, err) if len(jobInfos) != 2 { return fmt.Errorf("expected 2 jobs, got %d", len(jobInfos)) } // but both of these jobs should fail for i, job := range jobInfos { if "JOB_FAILURE" != job.State.String() { return fmt.Errorf("expected job %v to fail, but got %v", i, job.State.String()) } } return nil }, backoff.NewTestingBackOff())) // Shouldn't error if you try to delete an already deleted pipeline require.NoError(t, c.DeletePipeline(pipeline, false)) require.NoError(t, c.DeletePipeline(pipeline, false)) }) } func TestPipelineFailure(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) dataRepo := tu.UniqueString("TestPipelineFailure_data") require.NoError(t, c.CreateRepo(dataRepo)) commit, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, commit.ID)) pipeline := tu.UniqueString("pipeline") require.NoError(t, c.CreatePipeline( pipeline, "", []string{"exit 1"}, nil, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(dataRepo, "/*"), "", false, )) var jobInfos []*pps.JobInfo require.NoError(t, backoff.Retry(func() error { jobInfos, err = c.ListJob(pipeline, nil, nil, -1, true) require.NoError(t, err) if len(jobInfos) != 1 { return fmt.Errorf("expected 1 jobs, got %d", len(jobInfos)) } return nil }, backoff.NewTestingBackOff())) jobInfo, err := c.PpsAPIClient.InspectJob(context.Background(), &pps.InspectJobRequest{ Job: jobInfos[0].Job, BlockState: true, }) require.NoError(t, err) require.Equal(t, pps.JobState_JOB_FAILURE, jobInfo.State) require.True(t, strings.Contains(jobInfo.Reason, "datum")) } func TestPipelineErrorHandling(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) dataRepo := tu.UniqueString("TestPipelineErrorHandling_data") require.NoError(t, c.CreateRepo(dataRepo)) _, err := c.PutFile(dataRepo, "master", "file1", strings.NewReader("foo\n")) require.NoError(t, err) _, err = c.PutFile(dataRepo, "master", "file2", strings.NewReader("bar\n")) require.NoError(t, err) _, err = c.PutFile(dataRepo, "master", "file3", strings.NewReader("bar\n")) require.NoError(t, err) // In this pipeline, we'll have a command that fails for files 2 and 3, and an error handler that fails for file 2 pipeline := tu.UniqueString("pipeline1") _, err = c.PpsAPIClient.CreatePipeline( context.Background(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipeline), Transform: &pps.Transform{ Cmd: []string{"bash"}, Stdin: []string{"if", fmt.Sprintf("[ -a pfs/%v/file1 ]", dataRepo), "then", "exit 0", "fi", "exit 1"}, ErrCmd: []string{"bash"}, ErrStdin: []string{"if", fmt.Sprintf("[ -a pfs/%v/file3 ]", dataRepo), "then", "exit 0", "fi", "exit 1"}, }, Input: client.NewPFSInput(dataRepo, "/*"), }) require.NoError(t, err) var jobInfos []*pps.JobInfo require.NoError(t, backoff.Retry(func() error { jobInfos, err = c.ListJob(pipeline, nil, nil, -1, true) require.NoError(t, err) if len(jobInfos) != 1 { return fmt.Errorf("expected 1 job, got %d", len(jobInfos)) } return nil }, backoff.NewTestingBackOff())) jobInfo, err := c.PpsAPIClient.InspectJob(context.Background(), &pps.InspectJobRequest{ Job: jobInfos[0].Job, BlockState: true, }) require.NoError(t, err) // We expect the job to fail, and have 1 datum processed, recovered, and failed each require.Equal(t, pps.JobState_JOB_FAILURE, jobInfo.State) require.Equal(t, int64(1), jobInfo.DataProcessed) require.Equal(t, int64(1), jobInfo.DataRecovered) require.Equal(t, int64(1), jobInfo.DataFailed) // For this pipeline, we have the same command as before, but this time the error handling passes for all pipeline = tu.UniqueString("pipeline2") _, err = c.PpsAPIClient.CreatePipeline( context.Background(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipeline), Transform: &pps.Transform{ Cmd: []string{"bash"}, Stdin: []string{"if", fmt.Sprintf("[ -a pfs/%v/file1 ]", dataRepo), "then", "exit 0", "fi", "exit 1"}, ErrCmd: []string{"true"}, }, Input: client.NewPFSInput(dataRepo, "/*"), }) require.NoError(t, err) require.NoError(t, backoff.Retry(func() error { jobInfos, err = c.ListJob(pipeline, nil, nil, -1, true) require.NoError(t, err) if len(jobInfos) != 1 { return fmt.Errorf("expected 1 job, got %d", len(jobInfos)) } return nil }, backoff.NewTestingBackOff())) jobInfo, err = c.PpsAPIClient.InspectJob(context.Background(), &pps.InspectJobRequest{ Job: jobInfos[0].Job, BlockState: true, }) require.NoError(t, err) // so we expect the job to succeed, and to have recovered 2 datums require.Equal(t, pps.JobState_JOB_SUCCESS, jobInfo.State) require.Equal(t, int64(1), jobInfo.DataProcessed) require.Equal(t, int64(2), jobInfo.DataRecovered) require.Equal(t, int64(0), jobInfo.DataFailed) } func TestEgressFailure(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) dataRepo := tu.UniqueString("TestEgressFailure_data") require.NoError(t, c.CreateRepo(dataRepo)) commit, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, commit.ID)) // This pipeline should fail because the egress URL is invalid pipeline := tu.UniqueString("pipeline") _, err = c.PpsAPIClient.CreatePipeline( context.Background(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipeline), Transform: &pps.Transform{ Cmd: []string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"}, }, Input: client.NewPFSInput(dataRepo, "/"), Egress: &pps.Egress{URL: "invalid://blahblah"}, }) require.NoError(t, err) var jobInfos []*pps.JobInfo require.NoError(t, backoff.Retry(func() error { jobInfos, err = c.ListJob(pipeline, nil, nil, -1, true) require.NoError(t, err) if len(jobInfos) != 1 { return fmt.Errorf("expected 1 jobs, got %d", len(jobInfos)) } return nil }, backoff.NewTestingBackOff())) jobInfo, err := c.PpsAPIClient.InspectJob(context.Background(), &pps.InspectJobRequest{ Job: jobInfos[0].Job, BlockState: true, }) require.NoError(t, err) require.Equal(t, pps.JobState_JOB_FAILURE, jobInfo.State) require.True(t, strings.Contains(jobInfo.Reason, "egress")) } func TestLazyPipelinePropagation(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) dataRepo := tu.UniqueString("TestLazyPipelinePropagation_data") require.NoError(t, c.CreateRepo(dataRepo)) pipelineA := tu.UniqueString("pipeline-A") require.NoError(t, c.CreatePipeline( pipelineA, "", []string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"}, nil, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInputOpts("", dataRepo, "", "/*", "", true), "", false, )) pipelineB := tu.UniqueString("pipeline-B") require.NoError(t, c.CreatePipeline( pipelineB, "", []string{"cp", path.Join("/pfs", pipelineA, "file"), "/pfs/out/file"}, nil, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInputOpts("", pipelineA, "", "/*", "", true), "", false, )) commit1, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo\n")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, commit1.ID)) commitIter, err := c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, commit1.ID)}, nil) require.NoError(t, err) collectCommitInfos(t, commitIter) jobInfos, err := c.ListJob(pipelineA, nil, nil, -1, true) require.NoError(t, err) require.Equal(t, 1, len(jobInfos)) require.NotNil(t, jobInfos[0].Input.Pfs) require.Equal(t, true, jobInfos[0].Input.Pfs.Lazy) jobInfos, err = c.ListJob(pipelineB, nil, nil, -1, true) require.NoError(t, err) require.Equal(t, 1, len(jobInfos)) require.NotNil(t, jobInfos[0].Input.Pfs) require.Equal(t, true, jobInfos[0].Input.Pfs.Lazy) } func TestLazyPipeline(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) // create repos dataRepo := tu.UniqueString("TestLazyPipeline_data") require.NoError(t, c.CreateRepo(dataRepo)) // create pipeline pipelineName := tu.UniqueString("pipeline") _, err := c.PpsAPIClient.CreatePipeline( context.Background(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipelineName), Transform: &pps.Transform{ Cmd: []string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"}, }, ParallelismSpec: &pps.ParallelismSpec{ Constant: 1, }, Input: &pps.Input{ Pfs: &pps.PFSInput{ Repo: dataRepo, Glob: "/", Lazy: true, }, }, }) require.NoError(t, err) // Do a commit commit, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, "master", "file", strings.NewReader("foo\n")) require.NoError(t, err) // We put 2 files, 1 of which will never be touched by the pipeline code. // This is an important part of the correctness of this test because the // job-shim sets up a goro for each pipe, pipes that are never opened will // leak but that shouldn't prevent the job from completing. _, err = c.PutFile(dataRepo, "master", "file2", strings.NewReader("foo\n")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, "master")) commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, nil) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) buffer := bytes.Buffer{} require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, &buffer)) require.Equal(t, "foo\n", buffer.String()) } func TestEmptyFiles(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) // create repos dataRepo := tu.UniqueString("TestShufflePipeline_data") require.NoError(t, c.CreateRepo(dataRepo)) // create pipeline pipelineName := tu.UniqueString("pipeline") _, err := c.PpsAPIClient.CreatePipeline( context.Background(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipelineName), Transform: &pps.Transform{ Cmd: []string{"bash"}, Stdin: []string{ fmt.Sprintf("if [ -s /pfs/%s/file]; then exit 1; fi", dataRepo), fmt.Sprintf("ln -s /pfs/%s/file /pfs/out/file", dataRepo), }, }, ParallelismSpec: &pps.ParallelismSpec{ Constant: 1, }, Input: &pps.Input{ Pfs: &pps.PFSInput{ Repo: dataRepo, Glob: "/*", EmptyFiles: true, }, }, }) require.NoError(t, err) // Do a commit commit, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, "master", "file", strings.NewReader("foo\n")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, "master")) commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, nil) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) buffer := bytes.Buffer{} require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, &buffer)) require.Equal(t, "foo\n", buffer.String()) } // There's an issue where if you use cp with certain flags, it might copy // special files without reading from them. In our case, we use named pipes // to simulate lazy files, so the pipes themselves might get copied into // the output directory, blocking upload. // // We've updated the code such that we are able to detect if the files we // are uploading are pipes, and make the job fail in that case. func TestLazyPipelineCPPipes(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) // create repos dataRepo := tu.UniqueString("TestLazyPipeline_data") require.NoError(t, c.CreateRepo(dataRepo)) // create pipeline pipeline := tu.UniqueString("pipeline") _, err := c.PpsAPIClient.CreatePipeline( context.Background(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipeline), Transform: &pps.Transform{ // Using cp with the -r flag apparently just copes go Cmd: []string{"cp", "-r", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"}, }, ParallelismSpec: &pps.ParallelismSpec{ Constant: 1, }, Input: &pps.Input{ Pfs: &pps.PFSInput{ Repo: dataRepo, Glob: "/", Lazy: true, }, }, }) require.NoError(t, err) // Do a commit _, err = c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, "master", "file", strings.NewReader("foo\n")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, "master")) // wait for job to spawn time.Sleep(15 * time.Second) var jobID string require.NoError(t, backoff.Retry(func() error { jobInfos, err := c.ListJob(pipeline, nil, nil, -1, true) if err != nil { return err } if len(jobInfos) != 1 { return fmt.Errorf("len(jobInfos) should be 1") } jobID = jobInfos[0].Job.ID jobInfo, err := c.PpsAPIClient.InspectJob(context.Background(), &pps.InspectJobRequest{ Job: client.NewJob(jobID), BlockState: true, }) if err != nil { return err } if jobInfo.State != pps.JobState_JOB_FAILURE { return fmt.Errorf("job did not fail, even though it tried to copy " + "pipes, which should be disallowed by Pachyderm") } return nil }, backoff.NewTestingBackOff())) } // TestProvenance creates a pipeline DAG that's not a transitive reduction // It looks like this: // A // | \ // v v // B-->C // When we commit to A we expect to see 1 commit on C rather than 2. func TestProvenance(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) aRepo := tu.UniqueString("A") require.NoError(t, c.CreateRepo(aRepo)) bPipeline := tu.UniqueString("B") require.NoError(t, c.CreatePipeline( bPipeline, "", []string{"cp", path.Join("/pfs", aRepo, "file"), "/pfs/out/file"}, nil, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(aRepo, "/*"), "", false, )) cPipeline := tu.UniqueString("C") require.NoError(t, c.CreatePipeline( cPipeline, "", []string{"sh"}, []string{fmt.Sprintf("diff %s %s >/pfs/out/file", path.Join("/pfs", aRepo, "file"), path.Join("/pfs", bPipeline, "file"))}, &pps.ParallelismSpec{ Constant: 1, }, client.NewCrossInput( client.NewPFSInput(aRepo, "/*"), client.NewPFSInput(bPipeline, "/*"), ), "", false, )) // commit to aRepo commit1, err := c.StartCommit(aRepo, "master") require.NoError(t, err) _, err = c.PutFile(aRepo, commit1.ID, "file", strings.NewReader("foo\n")) require.NoError(t, err) require.NoError(t, c.FinishCommit(aRepo, commit1.ID)) commit2, err := c.StartCommit(aRepo, "master") require.NoError(t, err) _, err = c.PutFile(aRepo, commit2.ID, "file", strings.NewReader("bar\n")) require.NoError(t, err) require.NoError(t, c.FinishCommit(aRepo, commit2.ID)) aCommit := commit2 commitIter, err := c.FlushCommit([]*pfs.Commit{aCommit}, []*pfs.Repo{client.NewRepo(bPipeline)}) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) bCommit := commitInfos[0].Commit commitIter, err = c.FlushCommit([]*pfs.Commit{aCommit, bCommit}, nil) require.NoError(t, err) commitInfos = collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) cCommitInfo := commitInfos[0] require.Equal(t, uint64(0), cCommitInfo.SizeBytes) // We should only see two commits in aRepo commitInfos, err = c.ListCommit(aRepo, "master", "", 0) require.NoError(t, err) require.Equal(t, 2, len(commitInfos)) // There are three commits in the pipeline repos (two from input commits, and // one from the CreatePipeline call that created each repo) commitInfos, err = c.ListCommit(bPipeline, "master", "", 0) require.NoError(t, err) require.Equal(t, 2, len(commitInfos)) commitInfos, err = c.ListCommit(cPipeline, "master", "", 0) require.NoError(t, err) require.Equal(t, 2, len(commitInfos)) } // TestProvenance2 tests the following DAG: // A // / \ // B C // \ / // D func TestProvenance2(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) aRepo := tu.UniqueString("A") require.NoError(t, c.CreateRepo(aRepo)) bPipeline := tu.UniqueString("B") require.NoError(t, c.CreatePipeline( bPipeline, "", []string{"cp", path.Join("/pfs", aRepo, "bfile"), "/pfs/out/bfile"}, nil, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(aRepo, "/b*"), "", false, )) cPipeline := tu.UniqueString("C") require.NoError(t, c.CreatePipeline( cPipeline, "", []string{"cp", path.Join("/pfs", aRepo, "cfile"), "/pfs/out/cfile"}, nil, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(aRepo, "/c*"), "", false, )) dPipeline := tu.UniqueString("D") require.NoError(t, c.CreatePipeline( dPipeline, "", []string{"sh"}, []string{ fmt.Sprintf("diff /pfs/%s/bfile /pfs/%s/cfile >/pfs/out/file", bPipeline, cPipeline), }, &pps.ParallelismSpec{ Constant: 1, }, client.NewCrossInput( client.NewPFSInput(bPipeline, "/*"), client.NewPFSInput(cPipeline, "/*"), ), "", false, )) // commit to aRepo commit1, err := c.StartCommit(aRepo, "master") require.NoError(t, err) _, err = c.PutFile(aRepo, commit1.ID, "bfile", strings.NewReader("foo\n")) require.NoError(t, err) _, err = c.PutFile(aRepo, commit1.ID, "cfile", strings.NewReader("foo\n")) require.NoError(t, err) require.NoError(t, c.FinishCommit(aRepo, commit1.ID)) commit2, err := c.StartCommit(aRepo, "master") require.NoError(t, err) _, err = c.PutFile(aRepo, commit2.ID, "bfile", strings.NewReader("bar\n")) require.NoError(t, err) _, err = c.PutFile(aRepo, commit2.ID, "cfile", strings.NewReader("bar\n")) require.NoError(t, err) require.NoError(t, c.FinishCommit(aRepo, commit2.ID)) commitIter, err := c.FlushCommit([]*pfs.Commit{commit2}, []*pfs.Repo{client.NewRepo(dPipeline)}) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) // We should only see two commits in each repo. commitInfos, err = c.ListCommit(bPipeline, "master", "", 0) require.NoError(t, err) require.Equal(t, 2, len(commitInfos)) commitInfos, err = c.ListCommit(cPipeline, "master", "", 0) require.NoError(t, err) require.Equal(t, 2, len(commitInfos)) commitInfos, err = c.ListCommit(dPipeline, "master", "", 0) require.NoError(t, err) require.Equal(t, 2, len(commitInfos)) for _, commitInfo := range commitInfos { commit := commitInfo.Commit buffer := bytes.Buffer{} require.NoError(t, c.GetFile(commit.Repo.Name, commit.ID, "file", 0, 0, &buffer)) require.Equal(t, "", buffer.String()) } } // TestStopPipelineExtraCommit generates the following DAG: // A -> B -> C // and ensures that calling StopPipeline on B does not create an commit in C. func TestStopPipelineExtraCommit(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) aRepo := tu.UniqueString("A") require.NoError(t, c.CreateRepo(aRepo)) bPipeline := tu.UniqueString("B") require.NoError(t, c.CreatePipeline( bPipeline, "", []string{"cp", path.Join("/pfs", aRepo, "file"), "/pfs/out/file"}, nil, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(aRepo, "/*"), "", false, )) cPipeline := tu.UniqueString("C") require.NoError(t, c.CreatePipeline( cPipeline, "", []string{"cp", path.Join("/pfs", aRepo, "file"), "/pfs/out/file"}, nil, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(bPipeline, "/*"), "", false, )) // commit to aRepo commit1, err := c.StartCommit(aRepo, "master") require.NoError(t, err) _, err = c.PutFile(aRepo, commit1.ID, "file", strings.NewReader("foo\n")) require.NoError(t, err) require.NoError(t, c.FinishCommit(aRepo, commit1.ID)) commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, []*pfs.Repo{client.NewRepo(bPipeline), client.NewRepo(cPipeline)}) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 2, len(commitInfos)) // We should only see one commit in aRepo, bPipeline, and cPipeline commitInfos, err = c.ListCommit(aRepo, "master", "", 0) require.NoError(t, err) require.Equal(t, 1, len(commitInfos)) commitInfos, err = c.ListCommit(bPipeline, "master", "", 0) require.NoError(t, err) require.Equal(t, 1, len(commitInfos)) commitInfos, err = c.ListCommit(cPipeline, "master", "", 0) require.NoError(t, err) require.Equal(t, 1, len(commitInfos)) require.NoError(t, c.StopPipeline(bPipeline)) commitInfos, err = c.ListCommit(cPipeline, "master", "", 0) require.NoError(t, err) require.Equal(t, 1, len(commitInfos)) } // TestFlushCommit func TestFlushCommit(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) prefix := tu.UniqueString("repo") makeRepoName := func(i int) string { return fmt.Sprintf("%s-%d", prefix, i) } sourceRepo := makeRepoName(0) require.NoError(t, c.CreateRepo(sourceRepo)) // Create a five-stage pipeline numStages := 5 for i := 0; i < numStages; i++ { repo := makeRepoName(i) require.NoError(t, c.CreatePipeline( makeRepoName(i+1), "", []string{"cp", path.Join("/pfs", repo, "file"), "/pfs/out/file"}, nil, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(repo, "/*"), "", false, )) } for i := 0; i < 10; i++ { commit, err := c.StartCommit(sourceRepo, "master") require.NoError(t, err) _, err = c.PutFile(sourceRepo, commit.ID, "file", strings.NewReader("foo\n")) require.NoError(t, err) require.NoError(t, c.FinishCommit(sourceRepo, commit.ID)) commitIter, err := c.FlushCommit([]*pfs.Commit{client.NewCommit(sourceRepo, commit.ID)}, nil) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, numStages, len(commitInfos)) jobInfos, err := c.FlushJobAll([]*pfs.Commit{client.NewCommit(sourceRepo, commit.ID)}, nil) require.NoError(t, err) require.Equal(t, numStages, len(jobInfos)) } } func TestFlushCommitFailures(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) dataRepo := tu.UniqueString("TestFlushCommitFailures") require.NoError(t, c.CreateRepo(dataRepo)) prefix := tu.UniqueString("TestFlushCommitFailures") pipelineName := func(i int) string { return prefix + fmt.Sprintf("%d", i) } require.NoError(t, c.CreatePipeline( pipelineName(0), "", []string{"sh"}, []string{fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo)}, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(dataRepo, "/*"), "", false, )) require.NoError(t, c.CreatePipeline( pipelineName(1), "", []string{"sh"}, []string{ fmt.Sprintf("if [ -f /pfs/%s/file1 ]; then exit 1; fi", pipelineName(0)), fmt.Sprintf("cp /pfs/%s/* /pfs/out/", pipelineName(0)), }, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(pipelineName(0), "/*"), "", false, )) require.NoError(t, c.CreatePipeline( pipelineName(2), "", []string{"sh"}, []string{fmt.Sprintf("cp /pfs/%s/* /pfs/out/", pipelineName(1))}, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(pipelineName(1), "/*"), "", false, )) for i := 0; i < 2; i++ { commit, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, commit.ID, fmt.Sprintf("file%d", i), strings.NewReader("foo\n")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, commit.ID)) jobInfos, err := c.FlushJobAll([]*pfs.Commit{client.NewCommit(dataRepo, commit.ID)}, nil) require.NoError(t, err) require.Equal(t, 3, len(jobInfos)) if i == 0 { for _, ji := range jobInfos { require.Equal(t, pps.JobState_JOB_SUCCESS.String(), ji.State.String()) } } else { for _, ji := range jobInfos { if ji.Pipeline.Name != pipelineName(0) { require.Equal(t, pps.JobState_JOB_FAILURE.String(), ji.State.String()) } } } } } func TestFlushCommitAfterCreatePipeline(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) repo := tu.UniqueString("data") require.NoError(t, c.CreateRepo(repo)) var commit *pfs.Commit var err error for i := 0; i < 10; i++ { commit, err = c.StartCommit(repo, "") require.NoError(t, err) _, err = c.PutFile(repo, commit.ID, "file", strings.NewReader(fmt.Sprintf("foo%d\n", i))) require.NoError(t, err) require.NoError(t, c.FinishCommit(repo, commit.ID)) } require.NoError(t, c.SetBranch(repo, commit.ID, "master")) pipeline := tu.UniqueString("pipeline") require.NoError(t, c.CreatePipeline( pipeline, "", []string{"cp", path.Join("/pfs", repo, "file"), "/pfs/out/file"}, nil, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(repo, "/*"), "", false, )) commitIter, err := c.FlushCommit([]*pfs.Commit{client.NewCommit(repo, "master")}, nil) require.NoError(t, err) collectCommitInfos(t, commitIter) } // TestRecreatePipeline tracks #432 func TestRecreatePipeline(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) repo := tu.UniqueString("data") require.NoError(t, c.CreateRepo(repo)) commit, err := c.StartCommit(repo, "master") require.NoError(t, err) _, err = c.PutFile(repo, commit.ID, "file", strings.NewReader("foo")) require.NoError(t, err) require.NoError(t, c.FinishCommit(repo, commit.ID)) pipeline := tu.UniqueString("pipeline") createPipeline := func() { require.NoError(t, c.CreatePipeline( pipeline, "", []string{"cp", path.Join("/pfs", repo, "file"), "/pfs/out/file"}, nil, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(repo, "/*"), "", false, )) commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, nil) require.NoError(t, err) require.Equal(t, 1, len(collectCommitInfos(t, commitIter))) } // Do it twice. We expect jobs to be created on both runs. createPipeline() time.Sleep(5 * time.Second) require.NoError(t, c.DeletePipeline(pipeline, false)) time.Sleep(5 * time.Second) createPipeline() } func TestDeletePipeline(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) repo := tu.UniqueString("data") require.NoError(t, c.CreateRepo(repo)) commit, err := c.StartCommit(repo, "master") require.NoError(t, err) _, err = c.PutFile(repo, commit.ID, uuid.NewWithoutDashes(), strings.NewReader("foo")) require.NoError(t, err) require.NoError(t, c.FinishCommit(repo, commit.ID)) pipelines := []string{tu.UniqueString("TestDeletePipeline1"), tu.UniqueString("TestDeletePipeline2")} createPipelines := func() { require.NoError(t, c.CreatePipeline( pipelines[0], "", []string{"sleep", "20"}, nil, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(repo, "/*"), "", false, )) require.NoError(t, c.CreatePipeline( pipelines[1], "", []string{"sleep", "20"}, nil, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(pipelines[0], "/*"), "", false, )) time.Sleep(10 * time.Second) // Wait for the pipeline to start running require.NoErrorWithinTRetry(t, 90*time.Second, func() error { pipelineInfos, err := c.ListPipeline() if err != nil { return err } // Check number of pipelines names := make([]string, 0, len(pipelineInfos)) for _, pi := range pipelineInfos { names = append(names, fmt.Sprintf("(%s, %s)", pi.Pipeline.Name, pi.State)) } if len(pipelineInfos) != 2 { return fmt.Errorf("Expected two pipelines, but got: %+v", names) } // make sure second pipeline is running pipelineInfo, err := c.InspectPipeline(pipelines[1]) if err != nil { return err } if pipelineInfo.State != pps.PipelineState_PIPELINE_RUNNING { return fmt.Errorf("no running pipeline (only %+v)", names) } return nil }) } createPipelines() deletePipeline := func(pipeline string) { require.NoError(t, c.DeletePipeline(pipeline, false)) time.Sleep(5 * time.Second) // Wait for the pipeline to disappear require.NoError(t, backoff.Retry(func() error { _, err := c.InspectPipeline(pipeline) if err == nil { return fmt.Errorf("expected pipeline to be missing, but it's still present") } return nil }, backoff.NewTestingBackOff())) } // Can't delete a pipeline from the middle of the dag require.YesError(t, c.DeletePipeline(pipelines[0], false)) deletePipeline(pipelines[1]) deletePipeline(pipelines[0]) // The jobs should be gone jobs, err := c.ListJob("", nil, nil, -1, true) require.NoError(t, err) require.Equal(t, len(jobs), 0) // Listing jobs for a deleted pipeline should error _, err = c.ListJob(pipelines[0], nil, nil, -1, true) require.YesError(t, err) createPipelines() // Can force delete pipelines from the middle of the dag. require.NoError(t, c.DeletePipeline(pipelines[0], true)) } func TestPipelineState(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) repo := tu.UniqueString("data") require.NoError(t, c.CreateRepo(repo)) pipeline := tu.UniqueString("pipeline") require.NoError(t, c.CreatePipeline( pipeline, "", []string{"cp", path.Join("/pfs", repo, "file"), "/pfs/out/file"}, nil, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(repo, "/*"), "", false, )) // Wait for pipeline to get picked up time.Sleep(15 * time.Second) require.NoError(t, backoff.Retry(func() error { pipelineInfo, err := c.InspectPipeline(pipeline) if err != nil { return err } if pipelineInfo.State != pps.PipelineState_PIPELINE_RUNNING { return fmt.Errorf("pipeline should be in state running, not: %s", pipelineInfo.State.String()) } return nil }, backoff.NewTestingBackOff())) // Stop pipeline and wait for the pipeline to pause require.NoError(t, c.StopPipeline(pipeline)) time.Sleep(5 * time.Second) require.NoError(t, backoff.Retry(func() error { pipelineInfo, err := c.InspectPipeline(pipeline) if err != nil { return err } if !pipelineInfo.Stopped { return fmt.Errorf("pipeline never paused, even though StopPipeline() was called, state: %s", pipelineInfo.State.String()) } return nil }, backoff.NewTestingBackOff())) // Restart pipeline and wait for the pipeline to resume require.NoError(t, c.StartPipeline(pipeline)) time.Sleep(15 * time.Second) require.NoError(t, backoff.Retry(func() error { pipelineInfo, err := c.InspectPipeline(pipeline) if err != nil { return err } if pipelineInfo.State != pps.PipelineState_PIPELINE_RUNNING { return fmt.Errorf("pipeline never restarted, even though StartPipeline() was called, state: %s", pipelineInfo.State.String()) } return nil }, backoff.NewTestingBackOff())) } func TestPipelineJobCounts(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) repo := tu.UniqueString("data") require.NoError(t, c.CreateRepo(repo)) pipeline := tu.UniqueString("pipeline") require.NoError(t, c.CreatePipeline( pipeline, "", []string{"cp", path.Join("/pfs", repo, "file"), "/pfs/out/file"}, nil, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(repo, "/*"), "", false, )) // Trigger a job by creating a commit commit, err := c.StartCommit(repo, "master") require.NoError(t, err) _, err = c.PutFile(repo, commit.ID, "file", strings.NewReader("foo")) require.NoError(t, err) require.NoError(t, c.FinishCommit(repo, commit.ID)) commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, nil) require.NoError(t, err) collectCommitInfos(t, commitIter) jobInfos, err := c.ListJob(pipeline, nil, nil, -1, true) require.NoError(t, err) require.Equal(t, 1, len(jobInfos)) inspectJobRequest := &pps.InspectJobRequest{ Job: jobInfos[0].Job, BlockState: true, } ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) defer cancel() //cleanup resources _, err = c.PpsAPIClient.InspectJob(ctx, inspectJobRequest) require.NoError(t, err) // check that the job has been accounted for pipelineInfo, err := c.InspectPipeline(pipeline) require.NoError(t, err) require.Equal(t, int32(1), pipelineInfo.JobCounts[int32(pps.JobState_JOB_SUCCESS)]) } // TODO(msteffen): This test breaks the suite when run against cloud providers, // because killing the pachd pod breaks the connection with pachctl port-forward func TestDeleteAfterMembershipChange(t *testing.T) { t.Skip("This is causing intermittent CI failures") test := func(up bool) { repo := tu.UniqueString("TestDeleteAfterMembershipChange") c := getPachClient(t) require.NoError(t, c.DeleteAll()) require.NoError(t, c.CreateRepo(repo)) _, err := c.StartCommit(repo, "master") require.NoError(t, err) require.NoError(t, c.FinishCommit(repo, "master")) scalePachdRandom(t, up) c = getUsablePachClient(t) require.NoError(t, c.DeleteRepo(repo, false)) } test(true) test(false) } // TODO(msteffen): This test breaks the suite when run against cloud providers, // because killing the pachd pod breaks the connection with pachctl port-forward func TestPachdRestartResumesRunningJobs(t *testing.T) { t.Skip("This is causing intermittent CI failures") // this test cannot be run in parallel because it restarts everything which breaks other tests. c := getPachClient(t) require.NoError(t, c.DeleteAll()) // create repos dataRepo := tu.UniqueString("TestPachdRestartPickUpRunningJobs") require.NoError(t, c.CreateRepo(dataRepo)) // create pipeline pipelineName := tu.UniqueString("pipeline") require.NoError(t, c.CreatePipeline( pipelineName, "", []string{"bash"}, []string{ "sleep 10", }, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(dataRepo, "/"), "", false, )) commit, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, commit.ID)) time.Sleep(5 * time.Second) jobInfos, err := c.ListJob(pipelineName, nil, nil, -1, true) require.NoError(t, err) require.Equal(t, 1, len(jobInfos)) require.EqualOneOf(t, []pps.JobState{pps.JobState_JOB_RUNNING, pps.JobState_JOB_MERGING}, jobInfos[0].State) restartOne(t) // need a new client because the old one will have a defunct connection c = getUsablePachClient(t) jobInfo, err := c.InspectJob(jobInfos[0].Job.ID, true) require.NoError(t, err) require.Equal(t, pps.JobState_JOB_SUCCESS, jobInfo.State) } // TestUpdatePipelineThatHasNoOutput tracks #1637 func TestUpdatePipelineThatHasNoOutput(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) dataRepo := tu.UniqueString("TestUpdatePipelineThatHasNoOutput") require.NoError(t, c.CreateRepo(dataRepo)) commit, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, commit.ID)) pipeline := tu.UniqueString("pipeline") require.NoError(t, c.CreatePipeline( pipeline, "", []string{"sh"}, []string{"exit 1"}, nil, client.NewPFSInput(dataRepo, "/"), "", false, )) // Wait for job to spawn var jobInfos []*pps.JobInfo time.Sleep(10 * time.Second) require.NoError(t, backoff.Retry(func() error { var err error jobInfos, err = c.ListJob(pipeline, nil, nil, -1, true) if err != nil { return err } if len(jobInfos) < 1 { return fmt.Errorf("job not spawned") } return nil }, backoff.NewTestingBackOff())) jobInfo, err := c.InspectJob(jobInfos[0].Job.ID, true) require.NoError(t, err) require.Equal(t, pps.JobState_JOB_FAILURE, jobInfo.State) // Now we update the pipeline require.NoError(t, c.CreatePipeline( pipeline, "", []string{"sh"}, []string{"exit 1"}, nil, client.NewPFSInput(dataRepo, "/"), "", true, )) } func TestAcceptReturnCode(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) dataRepo := tu.UniqueString("TestAcceptReturnCode") require.NoError(t, c.CreateRepo(dataRepo)) commit, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, commit.ID)) pipelineName := tu.UniqueString("pipeline") _, err = c.PpsAPIClient.CreatePipeline( context.Background(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipelineName), Transform: &pps.Transform{ Cmd: []string{"sh"}, Stdin: []string{"exit 1"}, AcceptReturnCode: []int64{1}, }, Input: client.NewPFSInput(dataRepo, "/*"), }, ) require.NoError(t, err) commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, nil) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) jobInfos, err := c.ListJob(pipelineName, nil, nil, -1, true) require.NoError(t, err) require.Equal(t, 1, len(jobInfos)) jobInfo, err := c.InspectJob(jobInfos[0].Job.ID, true) require.NoError(t, err) require.Equal(t, pps.JobState_JOB_SUCCESS, jobInfo.State) } // TODO(msteffen): This test breaks the suite when run against cloud providers, // because killing the pachd pod breaks the connection with pachctl port-forward func TestRestartAll(t *testing.T) { t.Skip("This is causing intermittent CI failures") // this test cannot be run in parallel because it restarts everything which breaks other tests. c := getPachClient(t) require.NoError(t, c.DeleteAll()) // create repos dataRepo := tu.UniqueString("TestRestartAll_data") require.NoError(t, c.CreateRepo(dataRepo)) // create pipeline pipelineName := tu.UniqueString("pipeline") require.NoError(t, c.CreatePipeline( pipelineName, "", []string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"}, nil, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(dataRepo, "/*"), "", false, )) // Do first commit to repo commit, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, commit.ID)) commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, nil) require.NoError(t, err) collectCommitInfos(t, commitIter) restartAll(t) // need a new client because the old one will have a defunct connection c = getUsablePachClient(t) // Wait a little for pipelines to restart time.Sleep(10 * time.Second) pipelineInfo, err := c.InspectPipeline(pipelineName) require.NoError(t, err) require.Equal(t, pps.PipelineState_PIPELINE_RUNNING, pipelineInfo.State) _, err = c.InspectRepo(dataRepo) require.NoError(t, err) _, err = c.InspectCommit(dataRepo, commit.ID) require.NoError(t, err) } // TODO(msteffen): This test breaks the suite when run against cloud providers, // because killing the pachd pod breaks the connection with pachctl port-forward func TestRestartOne(t *testing.T) { t.Skip("This is causing intermittent CI failures") // this test cannot be run in parallel because it restarts everything which breaks other tests. c := getPachClient(t) require.NoError(t, c.DeleteAll()) // create repos dataRepo := tu.UniqueString("TestRestartOne_data") require.NoError(t, c.CreateRepo(dataRepo)) // create pipeline pipelineName := tu.UniqueString("pipeline") require.NoError(t, c.CreatePipeline( pipelineName, "", []string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"}, nil, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(dataRepo, "/"), "", false, )) // Do first commit to repo commit, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, commit.ID)) commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, nil) require.NoError(t, err) collectCommitInfos(t, commitIter) restartOne(t) // need a new client because the old one will have a defunct connection c = getUsablePachClient(t) _, err = c.InspectPipeline(pipelineName) require.NoError(t, err) _, err = c.InspectRepo(dataRepo) require.NoError(t, err) _, err = c.InspectCommit(dataRepo, commit.ID) require.NoError(t, err) } func TestPrettyPrinting(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) // create repos dataRepo := tu.UniqueString("TestPrettyPrinting_data") require.NoError(t, c.CreateRepo(dataRepo)) // create pipeline pipelineName := tu.UniqueString("pipeline") _, err := c.PpsAPIClient.CreatePipeline( context.Background(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipelineName), Transform: &pps.Transform{ Cmd: []string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"}, }, ParallelismSpec: &pps.ParallelismSpec{ Constant: 1, }, ResourceRequests: &pps.ResourceSpec{ Memory: "100M", Cpu: 0.5, }, Input: client.NewPFSInput(dataRepo, "/*"), }) require.NoError(t, err) // Do a commit to repo commit, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, commit.ID)) commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, nil) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) repoInfo, err := c.InspectRepo(dataRepo) require.NoError(t, err) require.NoError(t, pfspretty.PrintDetailedRepoInfo(pfspretty.NewPrintableRepoInfo(repoInfo))) for _, commitInfo := range commitInfos { require.NoError(t, pfspretty.PrintDetailedCommitInfo(pfspretty.NewPrintableCommitInfo(commitInfo))) } fileInfo, err := c.InspectFile(dataRepo, commit.ID, "file") require.NoError(t, err) require.NoError(t, pfspretty.PrintDetailedFileInfo(fileInfo)) pipelineInfo, err := c.InspectPipeline(pipelineName) require.NoError(t, err) require.NoError(t, ppspretty.PrintDetailedPipelineInfo(ppspretty.NewPrintablePipelineInfo(pipelineInfo))) jobInfos, err := c.ListJob("", nil, nil, -1, true) require.NoError(t, err) require.True(t, len(jobInfos) > 0) require.NoError(t, ppspretty.PrintDetailedJobInfo(ppspretty.NewPrintableJobInfo(jobInfos[0]))) } func TestDeleteAll(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } // this test cannot be run in parallel because it deletes everything c := getPachClient(t) require.NoError(t, c.DeleteAll()) // create repos dataRepo := tu.UniqueString("TestDeleteAll_data") require.NoError(t, c.CreateRepo(dataRepo)) // create pipeline pipelineName := tu.UniqueString("pipeline") require.NoError(t, c.CreatePipeline( pipelineName, "", []string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"}, nil, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(dataRepo, "/"), "", false, )) // Do commit to repo commit, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, commit.ID)) commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, nil) require.NoError(t, err) require.Equal(t, 1, len(collectCommitInfos(t, commitIter))) require.NoError(t, c.DeleteAll()) repoInfos, err := c.ListRepo() require.NoError(t, err) require.Equal(t, 0, len(repoInfos)) pipelineInfos, err := c.ListPipeline() require.NoError(t, err) require.Equal(t, 0, len(pipelineInfos)) jobInfos, err := c.ListJob("", nil, nil, -1, true) require.NoError(t, err) require.Equal(t, 0, len(jobInfos)) } func TestRecursiveCp(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) // create repos dataRepo := tu.UniqueString("TestRecursiveCp_data") require.NoError(t, c.CreateRepo(dataRepo)) // create pipeline pipelineName := tu.UniqueString("TestRecursiveCp") require.NoError(t, c.CreatePipeline( pipelineName, "", []string{"sh"}, []string{ fmt.Sprintf("cp -r /pfs/%s /pfs/out", dataRepo), }, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(dataRepo, "/*"), "", false, )) // Do commit to repo commit, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) for i := 0; i < 100; i++ { _, err = c.PutFile( dataRepo, commit.ID, fmt.Sprintf("file%d", i), strings.NewReader(strings.Repeat("foo\n", 10000)), ) require.NoError(t, err) } require.NoError(t, c.FinishCommit(dataRepo, commit.ID)) commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, nil) require.NoError(t, err) require.Equal(t, 1, len(collectCommitInfos(t, commitIter))) } func TestPipelineUniqueness(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) repo := tu.UniqueString("data") require.NoError(t, c.CreateRepo(repo)) pipelineName := tu.UniqueString("pipeline") require.NoError(t, c.CreatePipeline( pipelineName, "", []string{"bash"}, []string{""}, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(repo, "/"), "", false, )) err := c.CreatePipeline( pipelineName, "", []string{"bash"}, []string{""}, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(repo, "/"), "", false, ) require.YesError(t, err) require.Matches(t, "pipeline .*? already exists", err.Error()) } func TestUpdatePipeline(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) // create repos dataRepo := tu.UniqueString("TestUpdatePipeline_data") require.NoError(t, c.CreateRepo(dataRepo)) pipelineName := tu.UniqueString("pipeline") require.NoError(t, c.CreatePipeline( pipelineName, "", []string{"bash"}, []string{"echo foo >/pfs/out/file"}, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(dataRepo, "/*"), "", true, )) _, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, "master", "file", strings.NewReader("1")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, "master")) iter, err := c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil) require.NoError(t, err) collectCommitInfos(t, iter) var buffer bytes.Buffer require.NoError(t, c.GetFile(pipelineName, "master", "file", 0, 0, &buffer)) require.Equal(t, "foo\n", buffer.String()) // Update the pipeline require.NoError(t, c.CreatePipeline( pipelineName, "", []string{"bash"}, []string{"echo bar >/pfs/out/file"}, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(dataRepo, "/*"), "", true, )) _, err = c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, "master", "file", strings.NewReader("2")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, "master")) iter, err = c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil) require.NoError(t, err) collectCommitInfos(t, iter) buffer.Reset() require.NoError(t, c.GetFile(pipelineName, "master", "file", 0, 0, &buffer)) require.Equal(t, "bar\n", buffer.String()) // Inspect the first job to make sure it hasn't changed jis, err := c.ListJob(pipelineName, nil, nil, -1, true) require.Equal(t, 3, len(jis)) require.Equal(t, "echo bar >/pfs/out/file", jis[0].Transform.Stdin[0]) require.Equal(t, "echo bar >/pfs/out/file", jis[1].Transform.Stdin[0]) require.Equal(t, "echo foo >/pfs/out/file", jis[2].Transform.Stdin[0]) // Update the pipeline again, this time with Reprocess: true set. Now we // should see a different output file _, err = c.PpsAPIClient.CreatePipeline( context.Background(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipelineName), Transform: &pps.Transform{ Cmd: []string{"bash"}, Stdin: []string{"echo buzz >/pfs/out/file"}, }, ParallelismSpec: &pps.ParallelismSpec{ Constant: 1, }, Input: client.NewPFSInput(dataRepo, "/*"), Update: true, Reprocess: true, }) require.NoError(t, err) iter, err = c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil) require.NoError(t, err) collectCommitInfos(t, iter) buffer.Reset() require.NoError(t, c.GetFile(pipelineName, "master", "file", 0, 0, &buffer)) require.Equal(t, "buzz\n", buffer.String()) } // TestManyPipelineUpdate updates a single pipeline several (currently 8) times, // and watches the jobs it creates to make sure they start and run successfully. // This catches issues with output commit provenance, and any other basic // problems with updating pipelines. It's very slow, so it can only be run // manually func TestManyPipelineUpdate(t *testing.T) { t.Skip(t.Name() + " should only be run manually; it takes ~10m and is too " + "slow for CI") testUpdates := func(reprocess bool) func(t *testing.T) { return func(t *testing.T) { c := getPachClient(t) require.NoError(t, c.DeleteAll()) require.NoError(t, c.GarbageCollect(0)) dataRepo := tu.UniqueString("input-") require.NoError(t, c.CreateRepo(dataRepo)) _, err := c.PutFile(dataRepo, "master", "file", strings.NewReader(fmt.Sprintf("-"))) require.NoError(t, err) pipeline := "p" count := 8 jobsSeen := 0 for i := 0; i < count; i++ { fmt.Printf("creating pipeline %d (reprocess: %t)...", i, reprocess) require.NoError(t, c.CreatePipeline( pipeline, "", []string{"bash"}, []string{fmt.Sprintf("echo %d >/pfs/out/f", i)}, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(dataRepo, "/*"), "", true, )) fmt.Printf("flushing commit...") require.NoErrorWithinTRetry(t, 60*time.Second, func() error { iter, err := c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil) require.NoError(t, err) _, err = iter.Next() if err != nil { if err == io.EOF { return fmt.Errorf("expected %d commits, but only got %d", jobsSeen+1, i) } return err } jis, err := c.ListJob(pipeline, []*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil, 0, false) require.NoError(t, err) if len(jis) < 1 { return fmt.Errorf("expected to see %d jobs, but only saw %d", jobsSeen+1, len(jis)) } jobsSeen = len(jis) return nil }) fmt.Printf("done\n") } } } t.Run("Reprocess", testUpdates(true)) t.Run("NoReprocess", testUpdates(false)) } func TestUpdateFailedPipeline(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) // create repos dataRepo := tu.UniqueString("TestUpdateFailedPipeline_data") require.NoError(t, c.CreateRepo(dataRepo)) pipelineName := tu.UniqueString("pipeline") require.NoError(t, c.CreatePipeline( pipelineName, "imagethatdoesntexist", []string{"bash"}, []string{"echo foo >/pfs/out/file"}, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(dataRepo, "/*"), "", false, )) _, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, "master", "file", strings.NewReader("1")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, "master")) // Wait for pod to try and pull the bad image time.Sleep(10 * time.Second) pipelineInfo, err := c.InspectPipeline(pipelineName) require.NoError(t, err) require.Equal(t, pps.PipelineState_PIPELINE_FAILURE, pipelineInfo.State) require.NoError(t, c.CreatePipeline( pipelineName, "bash:4", []string{"bash"}, []string{"echo bar >/pfs/out/file"}, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(dataRepo, "/*"), "", true, )) time.Sleep(10 * time.Second) pipelineInfo, err = c.InspectPipeline(pipelineName) require.NoError(t, err) require.Equal(t, pps.PipelineState_PIPELINE_RUNNING, pipelineInfo.State) // Sanity check run some actual data through the pipeline: _, err = c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, "master", "file", strings.NewReader("2")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, "master")) iter, err := c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil) require.NoError(t, err) collectCommitInfos(t, iter) var buffer bytes.Buffer require.NoError(t, c.GetFile(pipelineName, "master", "file", 0, 0, &buffer)) require.Equal(t, "bar\n", buffer.String()) } func TestUpdateStoppedPipeline(t *testing.T) { // Pipeline should be updated, but should not be restarted if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) // create repo & pipeline dataRepo := tu.UniqueString("TestUpdateStoppedPipeline_data") require.NoError(t, c.CreateRepo(dataRepo)) pipelineName := tu.UniqueString("pipeline") require.NoError(t, c.CreatePipeline( pipelineName, "", []string{"bash"}, []string{"cp /pfs/*/file /pfs/out/file"}, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(dataRepo, "/*"), "", false, )) commits, err := c.ListCommit(pipelineName, "master", "", 0) require.NoError(t, err) require.Equal(t, 0, len(commits)) // Add input data _, err = c.PutFile(dataRepo, "master", "file", strings.NewReader("foo")) require.NoError(t, err) commits, err = c.ListCommit(pipelineName, "master", "", 0) require.NoError(t, err) require.Equal(t, 1, len(commits)) // Make sure the pipeline runs once (i.e. it's all the way up) commitIter, err := c.FlushCommit( []*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) // Stop the pipeline (and confirm that it's stopped) require.NoError(t, c.StopPipeline(pipelineName)) pipelineInfo, err := c.InspectPipeline(pipelineName) require.NoError(t, err) require.Equal(t, true, pipelineInfo.Stopped) require.NoError(t, backoff.Retry(func() error { pipelineInfo, err = c.InspectPipeline(pipelineName) if err != nil { return err } if pipelineInfo.State != pps.PipelineState_PIPELINE_PAUSED { return fmt.Errorf("expected pipeline to be in state PAUSED, but was in %s", pipelineInfo.State) } return nil }, backoff.NewTestingBackOff())) commits, err = c.ListCommit(pipelineName, "master", "", 0) require.NoError(t, err) require.Equal(t, 1, len(commits)) // Update shouldn't restart it (wait for version to increment) require.NoError(t, c.CreatePipeline( pipelineName, "", []string{"bash"}, []string{"cp /pfs/*/file /pfs/out/file"}, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(dataRepo, "/*"), "", true, )) time.Sleep(10 * time.Second) require.NoError(t, backoff.Retry(func() error { pipelineInfo, err = c.InspectPipeline(pipelineName) if err != nil { return err } if pipelineInfo.State != pps.PipelineState_PIPELINE_PAUSED { return fmt.Errorf("expected pipeline to be in state PAUSED, but was in %s", pipelineInfo.State) } if pipelineInfo.Version != 2 { return fmt.Errorf("expected pipeline to be on v2, but was on v%d", pipelineInfo.Version) } return nil }, backoff.NewTestingBackOff())) commits, err = c.ListCommit(pipelineName, "master", "", 0) require.NoError(t, err) require.Equal(t, 1, len(commits)) // Create a commit (to give the pipeline pending work), then start the pipeline _, err = c.PutFile(dataRepo, "master", "file", strings.NewReader("bar")) require.NoError(t, err) require.NoError(t, c.StartPipeline(pipelineName)) // Pipeline should start and create a job should succeed -- fix // https://github.com/pachyderm/pachyderm/issues/3934) commitIter, err = c.FlushCommit( []*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil) require.NoError(t, err) commitInfos = collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) commits, err = c.ListCommit(pipelineName, "master", "", 0) require.NoError(t, err) require.Equal(t, 2, len(commits)) var buf bytes.Buffer require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, &buf)) require.Equal(t, "foobar", buf.String()) } func TestUpdatePipelineRunningJob(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) // create repos dataRepo := tu.UniqueString("TestUpdatePipeline_data") require.NoError(t, c.CreateRepo(dataRepo)) pipelineName := tu.UniqueString("pipeline") require.NoError(t, c.CreatePipeline( pipelineName, "", []string{"bash"}, []string{"sleep 1000"}, &pps.ParallelismSpec{ Constant: 2, }, client.NewPFSInput(dataRepo, "/*"), "", false, )) numFiles := 50 commit1, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) for i := 0; i < numFiles; i++ { _, err = c.PutFile(dataRepo, commit1.ID, fmt.Sprintf("file-%d", i), strings.NewReader("")) } require.NoError(t, c.FinishCommit(dataRepo, commit1.ID)) commit2, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) for i := 0; i < numFiles; i++ { _, err = c.PutFile(dataRepo, commit2.ID, fmt.Sprintf("file-%d", i+numFiles), strings.NewReader("")) } require.NoError(t, c.FinishCommit(dataRepo, commit2.ID)) b := backoff.NewTestingBackOff() b.MaxElapsedTime = 30 * time.Second require.NoError(t, backoff.Retry(func() error { jobInfos, err := c.ListJob(pipelineName, nil, nil, -1, true) if err != nil { return err } if len(jobInfos) != 1 { return fmt.Errorf("wrong number of jobs") } state := jobInfos[0].State if state != pps.JobState_JOB_RUNNING && state != pps.JobState_JOB_MERGING { return fmt.Errorf("wrong state: %v for %s", state, jobInfos[0].Job.ID) } return nil }, b)) // Update the pipeline. This will not create a new pipeline as reprocess // isn't set to true. require.NoError(t, c.CreatePipeline( pipelineName, "", []string{"bash"}, []string{"true"}, &pps.ParallelismSpec{ Constant: 2, }, client.NewPFSInput(dataRepo, "/*"), "", true, )) iter, err := c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil) require.NoError(t, err) collectCommitInfos(t, iter) // Currently, commits finish shortly before their respecive JobInfo documents // are updated (the pipeline master receives the commit update and then // updates the JobInfo document). Wait briefly for this to happen time.Sleep(10 * time.Second) jobInfos, err := c.ListJob(pipelineName, nil, nil, -1, true) require.NoError(t, err) require.Equal(t, 2, len(jobInfos)) require.Equal(t, pps.JobState_JOB_SUCCESS.String(), jobInfos[0].State.String()) require.Equal(t, pps.JobState_JOB_KILLED.String(), jobInfos[1].State.String()) } func TestManyFilesSingleCommit(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) // create repos dataRepo := tu.UniqueString("TestManyFilesSingleCommit_data") require.NoError(t, c.CreateRepo(dataRepo)) numFiles := 5000 _, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) for i := 0; i < numFiles; i++ { _, err = c.PutFile(dataRepo, "master", fmt.Sprintf("file-%d", i), strings.NewReader("")) require.NoError(t, err) } require.NoError(t, c.FinishCommit(dataRepo, "master")) fileInfos, err := c.ListFile(dataRepo, "master", "") require.NoError(t, err) require.Equal(t, numFiles, len(fileInfos)) } func TestManyFilesSingleOutputCommit(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) dataRepo := tu.UniqueString("TestManyFilesSingleOutputCommit_data") require.NoError(t, c.CreateRepo(dataRepo)) branch := "master" file := "file" // Setup input. _, err := c.StartCommit(dataRepo, branch) require.NoError(t, err) numFiles := 20000 var data string for i := 0; i < numFiles; i++ { data += strconv.Itoa(i) + "\n" } _, err = c.PutFile(dataRepo, branch, file, strings.NewReader(data)) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, branch)) // Setup pipeline. pipelineName := tu.UniqueString("TestManyFilesSingleOutputCommit") _, err = c.PpsAPIClient.CreatePipeline(context.Background(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipelineName), Transform: &pps.Transform{ Cmd: []string{"sh"}, Stdin: []string{"while read line; do echo $line > /pfs/out/$line; done < " + path.Join("/pfs", dataRepo, file)}, }, Input: client.NewPFSInput(dataRepo, "/*"), }, ) require.NoError(t, err) // Check results. jis, err := c.FlushJobAll([]*pfs.Commit{client.NewCommit(dataRepo, branch)}, nil) require.Equal(t, 1, len(jis)) fileInfos, err := c.ListFile(pipelineName, branch, "") require.NoError(t, err) require.Equal(t, numFiles, len(fileInfos)) fileInfos, err = c.ListFile(pipelineName, branch, "/1*") require.NoError(t, err) require.Equal(t, 11111, len(fileInfos)) fileInfos, err = c.ListFile(pipelineName, branch, "/5*") require.NoError(t, err) require.Equal(t, 1111, len(fileInfos)) fileInfos, err = c.ListFile(pipelineName, branch, "/9*") require.NoError(t, err) require.Equal(t, 1111, len(fileInfos)) } func TestStopPipeline(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) // create repos dataRepo := tu.UniqueString("TestPipeline_data") require.NoError(t, c.CreateRepo(dataRepo)) // create pipeline pipelineName := tu.UniqueString("pipeline") require.NoError(t, c.CreatePipeline( pipelineName, "", []string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"}, nil, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(dataRepo, "/*"), "", false, )) // Stop the pipeline, so it doesn't process incoming commits require.NoError(t, c.StopPipeline(pipelineName)) // Do first commit to repo commit1, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo\n")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, commit1.ID)) // wait for 10 seconds and check that no commit has been outputted time.Sleep(10 * time.Second) commits, err := c.ListCommit(pipelineName, "master", "", 0) require.NoError(t, err) require.Equal(t, len(commits), 0) // Restart pipeline, and make sure old commit is processed require.NoError(t, c.StartPipeline(pipelineName)) commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) var buffer bytes.Buffer require.NoError(t, c.GetFile(pipelineName, commitInfos[0].Commit.ID, "file", 0, 0, &buffer)) require.Equal(t, "foo\n", buffer.String()) } func TestStandby(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) t.Run("ChainOf10", func(t *testing.T) { require.NoError(t, c.DeleteAll()) dataRepo := tu.UniqueString("TestStandby_data") require.NoError(t, c.CreateRepo(dataRepo)) numPipelines := 10 pipelines := make([]string, numPipelines) for i := 0; i < numPipelines; i++ { pipelines[i] = tu.UniqueString("TestStandby") input := dataRepo if i > 0 { input = pipelines[i-1] } _, err := c.PpsAPIClient.CreatePipeline(context.Background(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipelines[i]), Transform: &pps.Transform{ Cmd: []string{"true"}, }, Input: client.NewPFSInput(input, "/*"), Standby: true, }, ) require.NoError(t, err) } require.NoErrorWithinTRetry(t, time.Second*30, func() error { pis, err := c.ListPipeline() require.NoError(t, err) var standby int for _, pi := range pis { if pi.State == pps.PipelineState_PIPELINE_STANDBY { standby++ } } if standby != numPipelines { return fmt.Errorf("should have %d pipelines in standby, not %d", numPipelines, standby) } return nil }) _, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, "master")) var eg errgroup.Group var finished bool eg.Go(func() error { commitIter, err := c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil) require.NoError(t, err) collectCommitInfos(t, commitIter) finished = true return nil }) eg.Go(func() error { for !finished { pis, err := c.ListPipeline() require.NoError(t, err) var active int for _, pi := range pis { if pi.State != pps.PipelineState_PIPELINE_STANDBY { active++ } } // We tolerate having 2 pipelines out of standby because there's // latency associated with entering and exiting standby. require.True(t, active <= 2, "active: %d", active) } return nil }) eg.Wait() }) t.Run("ManyCommits", func(t *testing.T) { require.NoError(t, c.DeleteAll()) dataRepo := tu.UniqueString("TestStandby_data") pipeline := tu.UniqueString("TestStandby") require.NoError(t, c.CreateRepo(dataRepo)) _, err := c.PpsAPIClient.CreatePipeline(context.Background(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipeline), Transform: &pps.Transform{ Cmd: []string{"sh"}, Stdin: []string{"echo $PPS_POD_NAME >/pfs/out/pod"}, }, Input: client.NewPFSInput(dataRepo, "/"), Standby: true, }, ) require.NoError(t, err) numCommits := 100 for i := 0; i < numCommits; i++ { _, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, "master")) } commitIter, err := c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) pod := "" cis, err := c.ListCommit(pipeline, "master", "", 0) require.NoError(t, err) for _, ci := range cis { var buffer bytes.Buffer require.NoError(t, c.GetFile(pipeline, ci.Commit.ID, "pod", 0, 0, &buffer)) if pod == "" { pod = buffer.String() } else { require.True(t, pod == buffer.String(), "multiple pods were used to process commits") } } pi, err := c.InspectPipeline(pipeline) require.NoError(t, err) require.Equal(t, pps.PipelineState_PIPELINE_STANDBY.String(), pi.State.String()) }) } func TestPipelineEnv(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } // make a secret to reference k := tu.GetKubeClient(t) secretName := tu.UniqueString("test-secret") _, err := k.CoreV1().Secrets(v1.NamespaceDefault).Create( &v1.Secret{ ObjectMeta: metav1.ObjectMeta{ Name: secretName, }, Data: map[string][]byte{ "foo": []byte("foo\n"), }, }, ) require.NoError(t, err) c := getPachClient(t) require.NoError(t, c.DeleteAll()) // create repos dataRepo := tu.UniqueString("TestPipelineEnv_data") require.NoError(t, c.CreateRepo(dataRepo)) // create pipeline pipelineName := tu.UniqueString("pipeline") _, err = c.PpsAPIClient.CreatePipeline( context.Background(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipelineName), Transform: &pps.Transform{ Cmd: []string{"sh"}, Stdin: []string{ "ls /var/secret", "cat /var/secret/foo > /pfs/out/foo", "echo $bar> /pfs/out/bar", "echo $foo> /pfs/out/foo_env", fmt.Sprintf("echo $%s >/pfs/out/job_id", client.JobIDEnv), fmt.Sprintf("echo $%s >/pfs/out/output_commit_id", client.OutputCommitIDEnv), fmt.Sprintf("echo $%s >/pfs/out/input", dataRepo), fmt.Sprintf("echo $%s_COMMIT >/pfs/out/input_commit", dataRepo), }, Env: map[string]string{"bar": "bar"}, Secrets: []*pps.Secret{ { Name: secretName, Key: "foo", MountPath: "/var/secret", EnvVar: "foo", }, }, }, ParallelismSpec: &pps.ParallelismSpec{ Constant: 1, }, Input: client.NewPFSInput(dataRepo, "/*"), }) require.NoError(t, err) // Do first commit to repo _, err = c.PutFile(dataRepo, "master", "file", strings.NewReader("foo\n")) require.NoError(t, err) jis, err := c.FlushJobAll([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil) require.Equal(t, 1, len(jis)) var buffer bytes.Buffer require.NoError(t, c.GetFile(pipelineName, jis[0].OutputCommit.ID, "foo", 0, 0, &buffer)) require.Equal(t, "foo\n", buffer.String()) buffer.Reset() require.NoError(t, c.GetFile(pipelineName, jis[0].OutputCommit.ID, "foo_env", 0, 0, &buffer)) require.Equal(t, "foo\n", buffer.String()) buffer.Reset() require.NoError(t, c.GetFile(pipelineName, jis[0].OutputCommit.ID, "bar", 0, 0, &buffer)) require.Equal(t, "bar\n", buffer.String()) buffer.Reset() require.NoError(t, c.GetFile(pipelineName, jis[0].OutputCommit.ID, "job_id", 0, 0, &buffer)) require.Equal(t, fmt.Sprintf("%s\n", jis[0].Job.ID), buffer.String()) buffer.Reset() require.NoError(t, c.GetFile(pipelineName, jis[0].OutputCommit.ID, "output_commit_id", 0, 0, &buffer)) require.Equal(t, fmt.Sprintf("%s\n", jis[0].OutputCommit.ID), buffer.String()) buffer.Reset() require.NoError(t, c.GetFile(pipelineName, jis[0].OutputCommit.ID, "input", 0, 0, &buffer)) require.Equal(t, fmt.Sprintf("/pfs/%s/file\n", dataRepo), buffer.String()) buffer.Reset() require.NoError(t, c.GetFile(pipelineName, jis[0].OutputCommit.ID, "input_commit", 0, 0, &buffer)) require.Equal(t, fmt.Sprintf("%s\n", jis[0].Input.Pfs.Commit), buffer.String()) } func TestPipelineWithFullObjects(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) // create repos dataRepo := tu.UniqueString("TestPipeline_data") require.NoError(t, c.CreateRepo(dataRepo)) // create pipeline pipelineName := tu.UniqueString("pipeline") require.NoError(t, c.CreatePipeline( pipelineName, "", []string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"}, nil, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(dataRepo, "/*"), "", false, )) // Do first commit to repo commit1, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo\n")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, commit1.ID)) commitInfoIter, err := c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, commit1.ID)}, nil) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitInfoIter) require.Equal(t, 1, len(commitInfos)) var buffer bytes.Buffer require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, &buffer)) require.Equal(t, "foo\n", buffer.String()) // Do second commit to repo commit2, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, commit2.ID, "file", strings.NewReader("bar\n")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, commit2.ID)) commitInfoIter, err = c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil) require.NoError(t, err) commitInfos = collectCommitInfos(t, commitInfoIter) require.Equal(t, 1, len(commitInfos)) buffer = bytes.Buffer{} require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, &buffer)) require.Equal(t, "foo\nbar\n", buffer.String()) } func TestPipelineWithExistingInputCommits(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) // create repos dataRepo := tu.UniqueString("TestPipeline_data") require.NoError(t, c.CreateRepo(dataRepo)) // Do first commit to repo commit1, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo\n")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, commit1.ID)) // Do second commit to repo commit2, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, commit2.ID, "file", strings.NewReader("bar\n")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, commit2.ID)) // create pipeline pipelineName := tu.UniqueString("pipeline") require.NoError(t, c.CreatePipeline( pipelineName, "", []string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"}, nil, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(dataRepo, "/*"), "", false, )) commitInfoIter, err := c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitInfoIter) require.Equal(t, 1, len(commitInfos)) buffer := bytes.Buffer{} require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, &buffer)) require.Equal(t, "foo\nbar\n", buffer.String()) // Check that one output commit is created (processing the inputs' head commits) commitInfos, err = c.ListCommit(pipelineName, "master", "", 0) require.NoError(t, err) require.Equal(t, 1, len(commitInfos)) } func TestPipelineThatSymlinks(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) // create repos dataRepo := tu.UniqueString("TestPipeline_data") require.NoError(t, c.CreateRepo(dataRepo)) // create pipeline pipelineName := tu.UniqueString("pipeline") require.NoError(t, c.CreatePipeline( pipelineName, "", []string{"bash"}, []string{ // Symlinks to input files fmt.Sprintf("ln -s /pfs/%s/foo /pfs/out/foo", dataRepo), fmt.Sprintf("ln -s /pfs/%s/dir1/bar /pfs/out/bar", dataRepo), "mkdir /pfs/out/dir", fmt.Sprintf("ln -s /pfs/%s/dir2 /pfs/out/dir/dir2", dataRepo), // Symlinks to external files "echo buzz > /tmp/buzz", "ln -s /tmp/buzz /pfs/out/buzz", }, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(dataRepo, "/"), "", false, )) // Do first commit to repo commit, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, commit.ID, "foo", strings.NewReader("foo")) require.NoError(t, err) _, err = c.PutFile(dataRepo, commit.ID, "dir1/bar", strings.NewReader("bar")) require.NoError(t, err) _, err = c.PutFile(dataRepo, commit.ID, "dir2/foo", strings.NewReader("foo")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, commit.ID)) commitInfoIter, err := c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitInfoIter) require.Equal(t, 1, len(commitInfos)) // Check that the output files are identical to the input files. buffer := bytes.Buffer{} require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "foo", 0, 0, &buffer)) require.Equal(t, "foo", buffer.String()) buffer.Reset() require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "bar", 0, 0, &buffer)) require.Equal(t, "bar", buffer.String()) buffer.Reset() require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "dir/dir2/foo", 0, 0, &buffer)) require.Equal(t, "foo", buffer.String()) buffer.Reset() require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "buzz", 0, 0, &buffer)) require.Equal(t, "buzz\n", buffer.String()) // Make sure that we skipped the upload by checking that the input file // and the output file have the same object refs. inputFooFileInfo, err := c.InspectFile(dataRepo, commit.ID, "foo") require.NoError(t, err) outputFooFileInfo, err := c.InspectFile(pipelineName, commitInfos[0].Commit.ID, "foo") require.NoError(t, err) for i, object := range inputFooFileInfo.Objects { info, err := c.InspectObject(object.Hash) require.NoError(t, err) require.Equal(t, info.BlockRef, outputFooFileInfo.BlockRefs[i]) } inputFooFileInfo, err = c.InspectFile(dataRepo, commit.ID, "dir1/bar") require.NoError(t, err) outputFooFileInfo, err = c.InspectFile(pipelineName, commitInfos[0].Commit.ID, "bar") require.NoError(t, err) for i, object := range inputFooFileInfo.Objects { info, err := c.InspectObject(object.Hash) require.NoError(t, err) require.Equal(t, info.BlockRef, outputFooFileInfo.BlockRefs[i]) } inputFooFileInfo, err = c.InspectFile(dataRepo, commit.ID, "dir2/foo") require.NoError(t, err) outputFooFileInfo, err = c.InspectFile(pipelineName, commitInfos[0].Commit.ID, "dir/dir2/foo") require.NoError(t, err) for i, object := range inputFooFileInfo.Objects { info, err := c.InspectObject(object.Hash) require.NoError(t, err) require.Equal(t, info.BlockRef, outputFooFileInfo.BlockRefs[i]) } } // TestChainedPipelines tracks https://github.com/pachyderm/pachyderm/issues/797 func TestChainedPipelines(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) aRepo := tu.UniqueString("A") require.NoError(t, c.CreateRepo(aRepo)) dRepo := tu.UniqueString("D") require.NoError(t, c.CreateRepo(dRepo)) aCommit, err := c.StartCommit(aRepo, "master") require.NoError(t, err) _, err = c.PutFile(aRepo, "master", "file", strings.NewReader("foo\n")) require.NoError(t, err) require.NoError(t, c.FinishCommit(aRepo, "master")) dCommit, err := c.StartCommit(dRepo, "master") require.NoError(t, err) _, err = c.PutFile(dRepo, "master", "file", strings.NewReader("bar\n")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dRepo, "master")) bPipeline := tu.UniqueString("B") require.NoError(t, c.CreatePipeline( bPipeline, "", []string{"cp", path.Join("/pfs", aRepo, "file"), "/pfs/out/file"}, nil, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(aRepo, "/"), "", false, )) cPipeline := tu.UniqueString("C") require.NoError(t, c.CreatePipeline( cPipeline, "", []string{"sh"}, []string{fmt.Sprintf("cp /pfs/%s/file /pfs/out/bFile", bPipeline), fmt.Sprintf("cp /pfs/%s/file /pfs/out/dFile", dRepo)}, &pps.ParallelismSpec{ Constant: 1, }, client.NewCrossInput( client.NewPFSInput(bPipeline, "/"), client.NewPFSInput(dRepo, "/"), ), "", false, )) resultIter, err := c.FlushCommit([]*pfs.Commit{aCommit, dCommit}, nil) require.NoError(t, err) results := collectCommitInfos(t, resultIter) require.Equal(t, 1, len(results)) require.Equal(t, cPipeline, results[0].Commit.Repo.Name) var buf bytes.Buffer require.NoError(t, c.GetFile(cPipeline, results[0].Commit.ID, "bFile", 0, 0, &buf)) require.Equal(t, "foo\n", buf.String()) buf.Reset() require.NoError(t, c.GetFile(cPipeline, results[0].Commit.ID, "dFile", 0, 0, &buf)) require.Equal(t, "bar\n", buf.String()) } // DAG: // // A // | // B E // | / // C // | // D func TestChainedPipelinesNoDelay(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) aRepo := tu.UniqueString("A") require.NoError(t, c.CreateRepo(aRepo)) eRepo := tu.UniqueString("E") require.NoError(t, c.CreateRepo(eRepo)) aCommit, err := c.StartCommit(aRepo, "master") require.NoError(t, err) _, err = c.PutFile(aRepo, "master", "file", strings.NewReader("foo\n")) require.NoError(t, err) require.NoError(t, c.FinishCommit(aRepo, "master")) eCommit, err := c.StartCommit(eRepo, "master") require.NoError(t, err) _, err = c.PutFile(eRepo, "master", "file", strings.NewReader("bar\n")) require.NoError(t, err) require.NoError(t, c.FinishCommit(eRepo, "master")) bPipeline := tu.UniqueString("B") require.NoError(t, c.CreatePipeline( bPipeline, "", []string{"cp", path.Join("/pfs", aRepo, "file"), "/pfs/out/file"}, nil, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(aRepo, "/"), "", false, )) cPipeline := tu.UniqueString("C") require.NoError(t, c.CreatePipeline( cPipeline, "", []string{"sh"}, []string{fmt.Sprintf("cp /pfs/%s/file /pfs/out/bFile", bPipeline), fmt.Sprintf("cp /pfs/%s/file /pfs/out/eFile", eRepo)}, &pps.ParallelismSpec{ Constant: 1, }, client.NewCrossInput( client.NewPFSInput(bPipeline, "/"), client.NewPFSInput(eRepo, "/"), ), "", false, )) dPipeline := tu.UniqueString("D") require.NoError(t, c.CreatePipeline( dPipeline, "", []string{"sh"}, []string{fmt.Sprintf("cp /pfs/%s/bFile /pfs/out/bFile", cPipeline), fmt.Sprintf("cp /pfs/%s/eFile /pfs/out/eFile", cPipeline)}, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(cPipeline, "/"), "", false, )) resultsIter, err := c.FlushCommit([]*pfs.Commit{aCommit, eCommit}, nil) require.NoError(t, err) results := collectCommitInfos(t, resultsIter) require.Equal(t, 2, len(results)) eCommit2, err := c.StartCommit(eRepo, "master") require.NoError(t, err) _, err = c.PutFile(eRepo, "master", "file", strings.NewReader("bar\n")) require.NoError(t, err) require.NoError(t, c.FinishCommit(eRepo, "master")) resultsIter, err = c.FlushCommit([]*pfs.Commit{eCommit2}, nil) require.NoError(t, err) results = collectCommitInfos(t, resultsIter) require.Equal(t, 2, len(results)) // Get number of jobs triggered in pipeline D jobInfos, err := c.ListJob(dPipeline, nil, nil, -1, true) require.NoError(t, err) require.Equal(t, 2, len(jobInfos)) } func collectCommitInfos(t testing.TB, commitInfoIter client.CommitInfoIterator) []*pfs.CommitInfo { var commitInfos []*pfs.CommitInfo for { commitInfo, err := commitInfoIter.Next() if err == io.EOF { return commitInfos } require.NoError(t, err) commitInfos = append(commitInfos, commitInfo) } } func TestParallelismSpec(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } kubeclient := tu.GetKubeClient(t) nodes, err := kubeclient.CoreV1().Nodes().List(metav1.ListOptions{}) numNodes := len(nodes.Items) // Test Constant strategy parellelism, err := ppsutil.GetExpectedNumWorkers(tu.GetKubeClient(t), &pps.ParallelismSpec{ Constant: 7, }) require.NoError(t, err) require.Equal(t, 7, parellelism) // Coefficient == 1 (basic test) // TODO(msteffen): This test can fail when run against cloud providers, if the // remote cluster has more than one node (in which case "Coefficient: 1" will // cause more than 1 worker to start) parellelism, err = ppsutil.GetExpectedNumWorkers(kubeclient, &pps.ParallelismSpec{ Coefficient: 1, }) require.NoError(t, err) require.Equal(t, numNodes, parellelism) // Coefficient > 1 parellelism, err = ppsutil.GetExpectedNumWorkers(kubeclient, &pps.ParallelismSpec{ Coefficient: 2, }) require.NoError(t, err) require.Equal(t, 2*numNodes, parellelism) // Make sure we start at least one worker parellelism, err = ppsutil.GetExpectedNumWorkers(kubeclient, &pps.ParallelismSpec{ Coefficient: 0.01, }) require.NoError(t, err) require.Equal(t, 1, parellelism) // Test 0-initialized JobSpec parellelism, err = ppsutil.GetExpectedNumWorkers(kubeclient, &pps.ParallelismSpec{}) require.NoError(t, err) require.Equal(t, 1, parellelism) // Test nil JobSpec parellelism, err = ppsutil.GetExpectedNumWorkers(kubeclient, nil) require.NoError(t, err) require.Equal(t, 1, parellelism) } func TestPipelineJobDeletion(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) // create repos dataRepo := tu.UniqueString("TestPipeline_data") require.NoError(t, c.CreateRepo(dataRepo)) // create pipeline pipelineName := tu.UniqueString("pipeline") require.NoError(t, c.CreatePipeline( pipelineName, "", []string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"}, nil, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(dataRepo, "/"), "", false, )) commit, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, commit.ID)) commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, nil) require.NoError(t, err) _, err = commitIter.Next() require.NoError(t, err) // Now delete the corresponding job jobInfos, err := c.ListJob(pipelineName, nil, nil, -1, true) require.NoError(t, err) require.Equal(t, 1, len(jobInfos)) err = c.DeleteJob(jobInfos[0].Job.ID) require.NoError(t, err) } func TestStopJob(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) // create repos dataRepo := tu.UniqueString("TestStopJob") require.NoError(t, c.CreateRepo(dataRepo)) // create pipeline pipelineName := tu.UniqueString("pipeline-stop-job") require.NoError(t, c.CreatePipeline( pipelineName, "", []string{"sleep", "20"}, nil, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(dataRepo, "/"), "", false, )) // Create two input commits to trigger two jobs. // We will stop the first job midway through, and assert that the // second job finishes. commit1, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo\n")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, commit1.ID)) commit2, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, commit2.ID, "file", strings.NewReader("foo\n")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, commit2.ID)) var jobID string b := backoff.NewTestingBackOff() require.NoError(t, backoff.Retry(func() error { jobInfos, err := c.ListJob(pipelineName, nil, nil, -1, true) require.NoError(t, err) if len(jobInfos) != 1 { return fmt.Errorf("len(jobInfos) should be 1") } jobID = jobInfos[0].Job.ID state := jobInfos[0].State if state != pps.JobState_JOB_RUNNING && state != pps.JobState_JOB_MERGING { return fmt.Errorf("jobInfos[0] has the wrong state") } return nil }, b)) // Now stop the first job err = c.StopJob(jobID) require.NoError(t, err) jobInfo, err := c.InspectJob(jobID, true) require.NoError(t, err) require.Equal(t, pps.JobState_JOB_KILLED, jobInfo.State) b.Reset() // Check that the second job completes require.NoError(t, backoff.Retry(func() error { jobInfos, err := c.ListJob(pipelineName, nil, nil, -1, true) require.NoError(t, err) if len(jobInfos) != 2 { return fmt.Errorf("len(jobInfos) should be 2") } jobID = jobInfos[0].Job.ID return nil }, b)) jobInfo, err = c.InspectJob(jobID, true) require.NoError(t, err) require.Equal(t, pps.JobState_JOB_SUCCESS, jobInfo.State) } func TestGetLogs(t *testing.T) { testGetLogs(t, false) } func TestGetLogsWithStats(t *testing.T) { testGetLogs(t, true) } func testGetLogs(t *testing.T, enableStats bool) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) iter := c.GetLogs("", "", nil, "", false, false, 0) for iter.Next() { } require.NoError(t, iter.Err()) // create repos dataRepo := tu.UniqueString("data") require.NoError(t, c.CreateRepo(dataRepo)) // create pipeline pipelineName := tu.UniqueString("pipeline") _, err := c.PpsAPIClient.CreatePipeline(context.Background(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipelineName), Transform: &pps.Transform{ Cmd: []string{"sh"}, Stdin: []string{ fmt.Sprintf("cp /pfs/%s/file /pfs/out/file", dataRepo), "echo foo", "echo %s", // %s tests a formatting bug we had (#2729) }, }, Input: client.NewPFSInput(dataRepo, "/*"), EnableStats: enableStats, ParallelismSpec: &pps.ParallelismSpec{ Constant: 4, }, }) require.NoError(t, err) // Commit data to repo and flush commit commit, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, "master", "file", strings.NewReader("foo\n")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, "master")) _, err = c.FlushJobAll([]*pfs.Commit{commit}, nil) require.NoError(t, err) // Get logs from pipeline, using pipeline iter = c.GetLogs(pipelineName, "", nil, "", false, false, 0) var numLogs int var loglines []string for iter.Next() { if !iter.Message().User { continue } numLogs++ require.True(t, iter.Message().Message != "") loglines = append(loglines, strings.TrimSuffix(iter.Message().Message, "\n")) require.False(t, strings.Contains(iter.Message().Message, "MISSING"), iter.Message().Message) } require.True(t, numLogs >= 2, "logs:\n%s", strings.Join(loglines, "\n")) require.NoError(t, iter.Err()) // Get logs from pipeline, using pipeline (tailing the last two log lines) iter = c.GetLogs(pipelineName, "", nil, "", false, false, 2) numLogs = 0 loglines = []string{} for iter.Next() { numLogs++ require.True(t, iter.Message().Message != "") loglines = append(loglines, strings.TrimSuffix(iter.Message().Message, "\n")) } require.True(t, numLogs >= 2, "logs:\n%s", strings.Join(loglines, "\n")) require.NoError(t, iter.Err()) // Get logs from pipeline, using a pipeline that doesn't exist. There should // be an error iter = c.GetLogs("__DOES_NOT_EXIST__", "", nil, "", false, false, 0) require.False(t, iter.Next()) require.YesError(t, iter.Err()) require.Matches(t, "could not get", iter.Err().Error()) // Get logs from pipeline, using job // (1) Get job ID, from pipeline that just ran jobInfos, err := c.ListJob(pipelineName, nil, nil, -1, true) require.NoError(t, err) require.True(t, len(jobInfos) == 1) // (2) Get logs using extracted job ID // wait for logs to be collected time.Sleep(10 * time.Second) iter = c.GetLogs("", jobInfos[0].Job.ID, nil, "", false, false, 0) numLogs = 0 for iter.Next() { numLogs++ require.True(t, iter.Message().Message != "") } // Make sure that we've seen some logs require.NoError(t, iter.Err()) require.True(t, numLogs > 0) // Get logs for datums but don't specify pipeline or job. These should error iter = c.GetLogs("", "", []string{"/foo"}, "", false, false, 0) require.False(t, iter.Next()) require.YesError(t, iter.Err()) resp, err := c.ListDatum(jobInfos[0].Job.ID, 0, 0) require.NoError(t, err) require.True(t, len(resp.DatumInfos) > 0) iter = c.GetLogs("", "", nil, resp.DatumInfos[0].Datum.ID, false, false, 0) require.False(t, iter.Next()) require.YesError(t, iter.Err()) // Get logs from pipeline, using a job that doesn't exist. There should // be an error iter = c.GetLogs("", "__DOES_NOT_EXIST__", nil, "", false, false, 0) require.False(t, iter.Next()) require.YesError(t, iter.Err()) require.Matches(t, "could not get", iter.Err().Error()) // Filter logs based on input (using file that exists). Get logs using file // path, hex hash, and base64 hash, and make sure you get the same log lines fileInfo, err := c.InspectFile(dataRepo, commit.ID, "/file") require.NoError(t, err) // TODO(msteffen) This code shouldn't be wrapped in a backoff, but for some // reason GetLogs is not yet 100% consistent. This reduces flakes in testing. require.NoError(t, backoff.Retry(func() error { pathLog := c.GetLogs("", jobInfos[0].Job.ID, []string{"/file"}, "", false, false, 0) hexHash := "19fdf57bdf9eb5a9602bfa9c0e6dd7ed3835f8fd431d915003ea82747707be66" require.Equal(t, hexHash, hex.EncodeToString(fileInfo.Hash)) // sanity-check test hexLog := c.GetLogs("", jobInfos[0].Job.ID, []string{hexHash}, "", false, false, 0) base64Hash := "Gf31e9+etalgK/qcDm3X7Tg1+P1DHZFQA+qCdHcHvmY=" require.Equal(t, base64Hash, base64.StdEncoding.EncodeToString(fileInfo.Hash)) base64Log := c.GetLogs("", jobInfos[0].Job.ID, []string{base64Hash}, "", false, false, 0) numLogs = 0 for { havePathLog, haveHexLog, haveBase64Log := pathLog.Next(), hexLog.Next(), base64Log.Next() if havePathLog != haveHexLog || haveHexLog != haveBase64Log { return fmt.Errorf("Unequal log lengths") } if !havePathLog { break } numLogs++ if pathLog.Message().Message != hexLog.Message().Message || hexLog.Message().Message != base64Log.Message().Message { return fmt.Errorf( "unequal logs, pathLogs: \"%s\" hexLog: \"%s\" base64Log: \"%s\"", pathLog.Message().Message, hexLog.Message().Message, base64Log.Message().Message) } } for _, logsiter := range []*client.LogsIter{pathLog, hexLog, base64Log} { if logsiter.Err() != nil { return logsiter.Err() } } if numLogs == 0 { return fmt.Errorf("no logs found") } return nil }, backoff.NewTestingBackOff())) // Filter logs based on input (using file that doesn't exist). There should // be no logs iter = c.GetLogs("", jobInfos[0].Job.ID, []string{"__DOES_NOT_EXIST__"}, "", false, false, 0) require.False(t, iter.Next()) require.NoError(t, iter.Err()) ctx, cancel := context.WithTimeout(context.Background(), time.Minute) defer cancel() iter = c.WithCtx(ctx).GetLogs(pipelineName, "", nil, "", false, false, 0) numLogs = 0 for iter.Next() { numLogs++ if numLogs == 8 { // Do another commit so there's logs to receive with follow _, err = c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, "master", "file", strings.NewReader("bar\n")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, "master")) } require.True(t, iter.Message().Message != "") if numLogs == 16 { break } } require.NoError(t, iter.Err()) } func TestAllDatumsAreProcessed(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) dataRepo1 := tu.UniqueString("TestAllDatumsAreProcessed_data1") require.NoError(t, c.CreateRepo(dataRepo1)) dataRepo2 := tu.UniqueString("TestAllDatumsAreProcessed_data2") require.NoError(t, c.CreateRepo(dataRepo2)) commit1, err := c.StartCommit(dataRepo1, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo1, "master", "file1", strings.NewReader("foo\n")) require.NoError(t, err) _, err = c.PutFile(dataRepo1, "master", "file2", strings.NewReader("foo\n")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo1, "master")) commit2, err := c.StartCommit(dataRepo2, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo2, "master", "file1", strings.NewReader("foo\n")) require.NoError(t, err) _, err = c.PutFile(dataRepo2, "master", "file2", strings.NewReader("foo\n")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo2, "master")) require.NoError(t, c.CreatePipeline( tu.UniqueString("TestAllDatumsAreProcessed_pipelines"), "", []string{"bash"}, []string{ fmt.Sprintf("cat /pfs/%s/* /pfs/%s/* > /pfs/out/file", dataRepo1, dataRepo2), }, nil, client.NewCrossInput( client.NewPFSInput(dataRepo1, "/*"), client.NewPFSInput(dataRepo2, "/*"), ), "", false, )) commitIter, err := c.FlushCommit([]*pfs.Commit{commit1, commit2}, nil) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) var buf bytes.Buffer require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, &buf)) // should be 8 because each file gets copied twice due to cross product require.Equal(t, strings.Repeat("foo\n", 8), buf.String()) } func TestDatumStatusRestart(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) dataRepo := tu.UniqueString("TestDatumDedup_data") require.NoError(t, c.CreateRepo(dataRepo)) commit1, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, commit1.ID)) pipeline := tu.UniqueString("pipeline") // This pipeline sleeps for 20 secs per datum require.NoError(t, c.CreatePipeline( pipeline, "", []string{"bash"}, []string{ "sleep 20", }, nil, client.NewPFSInput(dataRepo, "/*"), "", false, )) var jobID string var datumStarted time.Time // checkStatus waits for 'pipeline' to start and makes sure that each time // it's called, the datum being processes was started at a new and later time // (than the last time checkStatus was called) checkStatus := func() { require.NoError(t, backoff.Retry(func() error { // get the jobs, err := c.ListJob(pipeline, nil, nil, -1, true) require.NoError(t, err) if len(jobs) == 0 { return fmt.Errorf("no jobs found") } jobID = jobs[0].Job.ID jobInfo, err := c.InspectJob(jobs[0].Job.ID, false) require.NoError(t, err) if len(jobInfo.WorkerStatus) == 0 { return fmt.Errorf("no worker statuses") } if jobInfo.WorkerStatus[0].JobID == jobInfo.Job.ID { // The first time this function is called, datumStarted is zero // so `Before` is true for any non-zero time. _datumStarted, err := types.TimestampFromProto(jobInfo.WorkerStatus[0].Started) require.NoError(t, err) require.True(t, datumStarted.Before(_datumStarted)) datumStarted = _datumStarted return nil } return fmt.Errorf("worker status from wrong job") }, backoff.RetryEvery(time.Second).For(30*time.Second))) } checkStatus() require.NoError(t, c.RestartDatum(jobID, []string{"/file"})) checkStatus() commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) } func TestUseMultipleWorkers(t *testing.T) { t.Skip("flaky") if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) dataRepo := tu.UniqueString("TestUseMultipleWorkers_data") require.NoError(t, c.CreateRepo(dataRepo)) commit1, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) for i := 0; i < 20; i++ { _, err = c.PutFile(dataRepo, commit1.ID, fmt.Sprintf("file%d", i), strings.NewReader("foo")) require.NoError(t, err) } require.NoError(t, c.FinishCommit(dataRepo, commit1.ID)) pipeline := tu.UniqueString("pipeline") // This pipeline sleeps for 10 secs per datum require.NoError(t, c.CreatePipeline( pipeline, "", []string{"bash"}, []string{ "sleep 10", }, &pps.ParallelismSpec{ Constant: 2, }, client.NewPFSInput(dataRepo, "/*"), "", false, )) // Get job info 2x/sec for 20s until we confirm two workers for the current job require.NoError(t, backoff.Retry(func() error { jobs, err := c.ListJob(pipeline, nil, nil, -1, true) if err != nil { return fmt.Errorf("could not list job: %s", err.Error()) } if len(jobs) == 0 { return fmt.Errorf("failed to find job") } jobInfo, err := c.InspectJob(jobs[0].Job.ID, false) if err != nil { return fmt.Errorf("could not inspect job: %s", err.Error()) } if len(jobInfo.WorkerStatus) != 2 { return fmt.Errorf("incorrect number of statuses: %v", len(jobInfo.WorkerStatus)) } return nil }, backoff.RetryEvery(500*time.Millisecond).For(20*time.Second))) } // TestSystemResourceRequest doesn't create any jobs or pipelines, it // just makes sure that when pachyderm is deployed, we give rethinkdb, pachd, // and etcd default resource requests. This prevents them from overloading // nodes and getting evicted, which can slow down or break a cluster. func TestSystemResourceRequests(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } kubeClient := tu.GetKubeClient(t) // Expected resource requests for pachyderm system pods: defaultLocalMem := map[string]string{ "pachd": "512M", "etcd": "512M", } defaultLocalCPU := map[string]string{ "pachd": "250m", "etcd": "250m", } defaultCloudMem := map[string]string{ "pachd": "3G", "etcd": "2G", } defaultCloudCPU := map[string]string{ "pachd": "1", "etcd": "1", } // Get Pod info for 'app' from k8s var c v1.Container for _, app := range []string{"pachd", "etcd"} { err := backoff.Retry(func() error { podList, err := kubeClient.CoreV1().Pods(v1.NamespaceDefault).List( metav1.ListOptions{ LabelSelector: metav1.FormatLabelSelector(metav1.SetAsLabelSelector( map[string]string{"app": app, "suite": "pachyderm"}, )), }) if err != nil { return err } if len(podList.Items) < 1 { return fmt.Errorf("could not find pod for %s", app) // retry } c = podList.Items[0].Spec.Containers[0] return nil }, backoff.NewTestingBackOff()) require.NoError(t, err) // Make sure the pod's container has resource requests cpu, ok := c.Resources.Requests[v1.ResourceCPU] require.True(t, ok, "could not get CPU request for "+app) require.True(t, cpu.String() == defaultLocalCPU[app] || cpu.String() == defaultCloudCPU[app]) mem, ok := c.Resources.Requests[v1.ResourceMemory] require.True(t, ok, "could not get memory request for "+app) require.True(t, mem.String() == defaultLocalMem[app] || mem.String() == defaultCloudMem[app]) } } // TestPipelineResourceRequest creates a pipeline with a resource request, and // makes sure that's passed to k8s (by inspecting the pipeline's pods) func TestPipelineResourceRequest(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) // create repos dataRepo := tu.UniqueString("TestPipelineResourceRequest") pipelineName := tu.UniqueString("TestPipelineResourceRequest_Pipeline") require.NoError(t, c.CreateRepo(dataRepo)) // Resources are not yet in client.CreatePipeline() (we may add them later) _, err := c.PpsAPIClient.CreatePipeline( context.Background(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipelineName), Transform: &pps.Transform{ Cmd: []string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"}, }, ParallelismSpec: &pps.ParallelismSpec{ Constant: 1, }, ResourceRequests: &pps.ResourceSpec{ Memory: "100M", Cpu: 0.5, Disk: "10M", }, Input: &pps.Input{ Pfs: &pps.PFSInput{ Repo: dataRepo, Branch: "master", Glob: "/*", }, }, }) require.NoError(t, err) // Get info about the pipeline pods from k8s & check for resources pipelineInfo, err := c.InspectPipeline(pipelineName) require.NoError(t, err) var container v1.Container rcName := ppsutil.PipelineRcName(pipelineInfo.Pipeline.Name, pipelineInfo.Version) kubeClient := tu.GetKubeClient(t) err = backoff.Retry(func() error { podList, err := kubeClient.CoreV1().Pods(v1.NamespaceDefault).List( metav1.ListOptions{ LabelSelector: metav1.FormatLabelSelector(metav1.SetAsLabelSelector( map[string]string{"app": rcName}, )), }) if err != nil { return err // retry } if len(podList.Items) != 1 || len(podList.Items[0].Spec.Containers) == 0 { return fmt.Errorf("could not find single container for pipeline %s", pipelineInfo.Pipeline.Name) } container = podList.Items[0].Spec.Containers[0] return nil // no more retries }, backoff.NewTestingBackOff()) require.NoError(t, err) // Make sure a CPU and Memory request are both set cpu, ok := container.Resources.Requests[v1.ResourceCPU] require.True(t, ok) require.Equal(t, "500m", cpu.String()) mem, ok := container.Resources.Requests[v1.ResourceMemory] require.True(t, ok) require.Equal(t, "100M", mem.String()) disk, ok := container.Resources.Requests[v1.ResourceEphemeralStorage] require.True(t, ok) require.Equal(t, "10M", disk.String()) } func TestPipelineResourceLimit(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) // create repos dataRepo := tu.UniqueString("TestPipelineResourceLimit") pipelineName := tu.UniqueString("TestPipelineResourceLimit_Pipeline") require.NoError(t, c.CreateRepo(dataRepo)) // Resources are not yet in client.CreatePipeline() (we may add them later) _, err := c.PpsAPIClient.CreatePipeline( context.Background(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipelineName), Transform: &pps.Transform{ Cmd: []string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"}, }, ParallelismSpec: &pps.ParallelismSpec{ Constant: 1, }, ResourceLimits: &pps.ResourceSpec{ Memory: "100M", Cpu: 0.5, }, Input: &pps.Input{ Pfs: &pps.PFSInput{ Repo: dataRepo, Branch: "master", Glob: "/*", }, }, }) require.NoError(t, err) // Get info about the pipeline pods from k8s & check for resources pipelineInfo, err := c.InspectPipeline(pipelineName) require.NoError(t, err) var container v1.Container rcName := ppsutil.PipelineRcName(pipelineInfo.Pipeline.Name, pipelineInfo.Version) kubeClient := tu.GetKubeClient(t) err = backoff.Retry(func() error { podList, err := kubeClient.CoreV1().Pods(v1.NamespaceDefault).List(metav1.ListOptions{ LabelSelector: metav1.FormatLabelSelector(metav1.SetAsLabelSelector( map[string]string{"app": rcName, "suite": "pachyderm"}, )), }) if err != nil { return err // retry } if len(podList.Items) != 1 || len(podList.Items[0].Spec.Containers) == 0 { return fmt.Errorf("could not find single container for pipeline %s", pipelineInfo.Pipeline.Name) } container = podList.Items[0].Spec.Containers[0] return nil // no more retries }, backoff.NewTestingBackOff()) require.NoError(t, err) // Make sure a CPU and Memory request are both set cpu, ok := container.Resources.Limits[v1.ResourceCPU] require.True(t, ok) require.Equal(t, "500m", cpu.String()) mem, ok := container.Resources.Limits[v1.ResourceMemory] require.True(t, ok) require.Equal(t, "100M", mem.String()) } func TestPipelineResourceLimitDefaults(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) // create repos dataRepo := tu.UniqueString("TestPipelineResourceLimit") pipelineName := tu.UniqueString("TestPipelineResourceLimit_Pipeline") require.NoError(t, c.CreateRepo(dataRepo)) // Resources are not yet in client.CreatePipeline() (we may add them later) _, err := c.PpsAPIClient.CreatePipeline( context.Background(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipelineName), Transform: &pps.Transform{ Cmd: []string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"}, }, ParallelismSpec: &pps.ParallelismSpec{ Constant: 1, }, Input: &pps.Input{ Pfs: &pps.PFSInput{ Repo: dataRepo, Branch: "master", Glob: "/*", }, }, }) require.NoError(t, err) // Get info about the pipeline pods from k8s & check for resources pipelineInfo, err := c.InspectPipeline(pipelineName) require.NoError(t, err) var container v1.Container rcName := ppsutil.PipelineRcName(pipelineInfo.Pipeline.Name, pipelineInfo.Version) kubeClient := tu.GetKubeClient(t) err = backoff.Retry(func() error { podList, err := kubeClient.CoreV1().Pods(v1.NamespaceDefault).List(metav1.ListOptions{ LabelSelector: metav1.FormatLabelSelector(metav1.SetAsLabelSelector( map[string]string{"app": rcName, "suite": "pachyderm"}, )), }) if err != nil { return err // retry } if len(podList.Items) != 1 || len(podList.Items[0].Spec.Containers) == 0 { return fmt.Errorf("could not find single container for pipeline %s", pipelineInfo.Pipeline.Name) } container = podList.Items[0].Spec.Containers[0] return nil // no more retries }, backoff.NewTestingBackOff()) require.NoError(t, err) require.Nil(t, container.Resources.Limits) } func TestPipelinePartialResourceRequest(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) // create repos dataRepo := tu.UniqueString("TestPipelinePartialResourceRequest") pipelineName := tu.UniqueString("pipeline") require.NoError(t, c.CreateRepo(dataRepo)) // Resources are not yet in client.CreatePipeline() (we may add them later) _, err := c.PpsAPIClient.CreatePipeline( context.Background(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(fmt.Sprintf("%s-%d", pipelineName, 0)), Transform: &pps.Transform{ Cmd: []string{"true"}, }, ResourceRequests: &pps.ResourceSpec{ Cpu: 0.5, Memory: "100M", }, Input: &pps.Input{ Pfs: &pps.PFSInput{ Repo: dataRepo, Branch: "master", Glob: "/*", }, }, }) require.NoError(t, err) _, err = c.PpsAPIClient.CreatePipeline( context.Background(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(fmt.Sprintf("%s-%d", pipelineName, 1)), Transform: &pps.Transform{ Cmd: []string{"true"}, }, ResourceRequests: &pps.ResourceSpec{ Memory: "100M", }, Input: &pps.Input{ Pfs: &pps.PFSInput{ Repo: dataRepo, Branch: "master", Glob: "/*", }, }, }) require.NoError(t, err) _, err = c.PpsAPIClient.CreatePipeline( context.Background(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(fmt.Sprintf("%s-%d", pipelineName, 2)), Transform: &pps.Transform{ Cmd: []string{"true"}, }, ResourceRequests: &pps.ResourceSpec{}, Input: &pps.Input{ Pfs: &pps.PFSInput{ Repo: dataRepo, Branch: "master", Glob: "/*", }, }, }) require.NoError(t, err) require.NoError(t, backoff.Retry(func() error { for i := 0; i < 3; i++ { pipelineInfo, err := c.InspectPipeline(fmt.Sprintf("%s-%d", pipelineName, i)) require.NoError(t, err) if pipelineInfo.State != pps.PipelineState_PIPELINE_RUNNING { return fmt.Errorf("pipeline not in running state") } } return nil }, backoff.NewTestingBackOff())) } func TestPodOpts(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) // create repos dataRepo := tu.UniqueString("TestPodSpecOpts_data") require.NoError(t, c.CreateRepo(dataRepo)) t.Run("Validation", func(t *testing.T) { pipelineName := tu.UniqueString("TestPodSpecOpts") _, err := c.PpsAPIClient.CreatePipeline( context.Background(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipelineName), Transform: &pps.Transform{ Cmd: []string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"}, }, Input: &pps.Input{ Pfs: &pps.PFSInput{ Repo: dataRepo, Branch: "master", Glob: "/*", }, }, PodSpec: "not-json", }) require.YesError(t, err) _, err = c.PpsAPIClient.CreatePipeline( context.Background(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipelineName), Transform: &pps.Transform{ Cmd: []string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"}, }, Input: &pps.Input{ Pfs: &pps.PFSInput{ Repo: dataRepo, Branch: "master", Glob: "/*", }, }, PodPatch: "also-not-json", }) require.YesError(t, err) }) t.Run("Spec", func(t *testing.T) { pipelineName := tu.UniqueString("TestPodSpecOpts") _, err := c.PpsAPIClient.CreatePipeline( context.Background(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipelineName), Transform: &pps.Transform{ Cmd: []string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"}, }, ParallelismSpec: &pps.ParallelismSpec{ Constant: 1, }, Input: &pps.Input{ Pfs: &pps.PFSInput{ Repo: dataRepo, Branch: "master", Glob: "/*", }, }, SchedulingSpec: &pps.SchedulingSpec{ // This NodeSelector will cause the worker pod to fail to // schedule, but the test can still pass because we just check // for values on the pod, it doesn't need to actually come up. NodeSelector: map[string]string{ "foo": "bar", }, }, PodSpec: `{ "hostname": "hostname" }`, }) require.NoError(t, err) // Get info about the pipeline pods from k8s & check for resources pipelineInfo, err := c.InspectPipeline(pipelineName) require.NoError(t, err) var pod v1.Pod rcName := ppsutil.PipelineRcName(pipelineInfo.Pipeline.Name, pipelineInfo.Version) kubeClient := tu.GetKubeClient(t) err = backoff.Retry(func() error { podList, err := kubeClient.CoreV1().Pods(v1.NamespaceDefault).List(metav1.ListOptions{ LabelSelector: metav1.FormatLabelSelector(metav1.SetAsLabelSelector( map[string]string{"app": rcName, "suite": "pachyderm"}, )), }) if err != nil { return err // retry } if len(podList.Items) != 1 || len(podList.Items[0].Spec.Containers) == 0 { return fmt.Errorf("could not find single container for pipeline %s", pipelineInfo.Pipeline.Name) } pod = podList.Items[0] return nil // no more retries }, backoff.NewTestingBackOff()) require.NoError(t, err) // Make sure a CPU and Memory request are both set require.Equal(t, "bar", pod.Spec.NodeSelector["foo"]) require.Equal(t, "hostname", pod.Spec.Hostname) }) t.Run("Patch", func(t *testing.T) { pipelineName := tu.UniqueString("TestPodSpecOpts") _, err := c.PpsAPIClient.CreatePipeline( context.Background(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipelineName), Transform: &pps.Transform{ Cmd: []string{"cp", path.Join("/pfs", dataRepo, "file"), "/pfs/out/file"}, }, ParallelismSpec: &pps.ParallelismSpec{ Constant: 1, }, Input: &pps.Input{ Pfs: &pps.PFSInput{ Repo: dataRepo, Branch: "master", Glob: "/*", }, }, SchedulingSpec: &pps.SchedulingSpec{ // This NodeSelector will cause the worker pod to fail to // schedule, but the test can still pass because we just check // for values on the pod, it doesn't need to actually come up. NodeSelector: map[string]string{ "foo": "bar", }, }, PodPatch: `[ { "op": "add", "path": "/hostname", "value": "hostname" } ]`, }) require.NoError(t, err) // Get info about the pipeline pods from k8s & check for resources pipelineInfo, err := c.InspectPipeline(pipelineName) require.NoError(t, err) var pod v1.Pod rcName := ppsutil.PipelineRcName(pipelineInfo.Pipeline.Name, pipelineInfo.Version) kubeClient := tu.GetKubeClient(t) err = backoff.Retry(func() error { podList, err := kubeClient.CoreV1().Pods(v1.NamespaceDefault).List(metav1.ListOptions{ LabelSelector: metav1.FormatLabelSelector(metav1.SetAsLabelSelector( map[string]string{"app": rcName, "suite": "pachyderm"}, )), }) if err != nil { return err // retry } if len(podList.Items) != 1 || len(podList.Items[0].Spec.Containers) == 0 { return fmt.Errorf("could not find single container for pipeline %s", pipelineInfo.Pipeline.Name) } pod = podList.Items[0] return nil // no more retries }, backoff.NewTestingBackOff()) require.NoError(t, err) // Make sure a CPU and Memory request are both set require.Equal(t, "bar", pod.Spec.NodeSelector["foo"]) require.Equal(t, "hostname", pod.Spec.Hostname) }) } func TestPipelineLargeOutput(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) dataRepo := tu.UniqueString("TestPipelineInputDataModification_data") require.NoError(t, c.CreateRepo(dataRepo)) numFiles := 100 commit1, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) for i := 0; i < numFiles; i++ { _, err = c.PutFile(dataRepo, commit1.ID, fmt.Sprintf("file-%d", i), strings.NewReader("")) } require.NoError(t, c.FinishCommit(dataRepo, commit1.ID)) pipeline := tu.UniqueString("pipeline") require.NoError(t, c.CreatePipeline( pipeline, "", []string{"bash"}, []string{ "for i in `seq 1 100`; do touch /pfs/out/$RANDOM; done", }, &pps.ParallelismSpec{ Constant: 4, }, client.NewPFSInput(dataRepo, "/*"), "", false, )) commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) } func TestJoinInput(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) var repos []string for i := 0; i < 2; i++ { repos = append(repos, tu.UniqueString(fmt.Sprintf("TestJoinInput%v", i))) require.NoError(t, c.CreateRepo(repos[i])) } numFiles := 16 var commits []*pfs.Commit for r, repo := range repos { commit, err := c.StartCommit(repo, "master") require.NoError(t, err) commits = append(commits, commit) for i := 0; i < numFiles; i++ { _, err = c.PutFile(repo, "master", fmt.Sprintf("file-%v.%4b", r, i), strings.NewReader(fmt.Sprintf("%d\n", i))) } require.NoError(t, c.FinishCommit(repo, "master")) } pipeline := tu.UniqueString("join-pipeline") require.NoError(t, c.CreatePipeline( pipeline, "", []string{"bash"}, []string{ fmt.Sprintf("touch /pfs/out/$(echo $(ls -r /pfs/%s/)$(ls -r /pfs/%s/))", repos[0], repos[1]), }, &pps.ParallelismSpec{ Constant: 1, }, client.NewJoinInput( client.NewPFSInputOpts("", repos[0], "", "/file-?.(11*)", "$1", false), client.NewPFSInputOpts("", repos[1], "", "/file-?.(*0)", "$1", false), ), "", false, )) commitInfos, err := c.FlushCommitAll(commits, []*pfs.Repo{client.NewRepo(pipeline)}) require.NoError(t, err) require.Equal(t, 1, len(commitInfos)) outCommit := commitInfos[0].Commit fileInfos, err := c.ListFile(outCommit.Repo.Name, outCommit.ID, "") require.NoError(t, err) require.Equal(t, 2, len(fileInfos)) expectedNames := []string{"/file-0.1100file-1.1100", "/file-0.1110file-1.1110"} for i, fi := range fileInfos { // 1 byte per repo require.Equal(t, expectedNames[i], fi.File.Path) } } func TestUnionInput(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) var repos []string for i := 0; i < 4; i++ { repos = append(repos, tu.UniqueString("TestUnionInput")) require.NoError(t, c.CreateRepo(repos[i])) } numFiles := 2 var commits []*pfs.Commit for _, repo := range repos { commit, err := c.StartCommit(repo, "master") require.NoError(t, err) commits = append(commits, commit) for i := 0; i < numFiles; i++ { _, err = c.PutFile(repo, "master", fmt.Sprintf("file-%d", i), strings.NewReader(fmt.Sprintf("%d", i))) } require.NoError(t, c.FinishCommit(repo, "master")) } t.Run("union all", func(t *testing.T) { pipeline := tu.UniqueString("pipeline") require.NoError(t, c.CreatePipeline( pipeline, "", []string{"bash"}, []string{ "cp /pfs/*/* /pfs/out", }, &pps.ParallelismSpec{ Constant: 1, }, client.NewUnionInput( client.NewPFSInput(repos[0], "/*"), client.NewPFSInput(repos[1], "/*"), client.NewPFSInput(repos[2], "/*"), client.NewPFSInput(repos[3], "/*"), ), "", false, )) commitIter, err := c.FlushCommit(commits, []*pfs.Repo{client.NewRepo(pipeline)}) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) outCommit := commitInfos[0].Commit fileInfos, err := c.ListFile(outCommit.Repo.Name, outCommit.ID, "") require.NoError(t, err) require.Equal(t, 2, len(fileInfos)) for _, fi := range fileInfos { // 1 byte per repo require.Equal(t, uint64(len(repos)), fi.SizeBytes) } }) t.Run("union crosses", func(t *testing.T) { pipeline := tu.UniqueString("pipeline") require.NoError(t, c.CreatePipeline( pipeline, "", []string{"bash"}, []string{ "cp -r /pfs/TestUnionInput* /pfs/out", }, &pps.ParallelismSpec{ Constant: 1, }, client.NewUnionInput( client.NewCrossInput( client.NewPFSInput(repos[0], "/*"), client.NewPFSInput(repos[1], "/*"), ), client.NewCrossInput( client.NewPFSInput(repos[2], "/*"), client.NewPFSInput(repos[3], "/*"), ), ), "", false, )) commitIter, err := c.FlushCommit(commits, []*pfs.Repo{client.NewRepo(pipeline)}) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) outCommit := commitInfos[0].Commit for _, repo := range repos { fileInfos, err := c.ListFile(outCommit.Repo.Name, outCommit.ID, repo) require.NoError(t, err) require.Equal(t, 2, len(fileInfos)) for _, fi := range fileInfos { // each file should be seen twice require.Equal(t, uint64(2), fi.SizeBytes) } } }) t.Run("cross unions", func(t *testing.T) { pipeline := tu.UniqueString("pipeline") require.NoError(t, c.CreatePipeline( pipeline, "", []string{"bash"}, []string{ "cp -r /pfs/TestUnionInput* /pfs/out", }, &pps.ParallelismSpec{ Constant: 1, }, client.NewCrossInput( client.NewUnionInput( client.NewPFSInput(repos[0], "/*"), client.NewPFSInput(repos[1], "/*"), ), client.NewUnionInput( client.NewPFSInput(repos[2], "/*"), client.NewPFSInput(repos[3], "/*"), ), ), "", false, )) commitIter, err := c.FlushCommit(commits, []*pfs.Repo{client.NewRepo(pipeline)}) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) outCommit := commitInfos[0].Commit for _, repo := range repos { fileInfos, err := c.ListFile(outCommit.Repo.Name, outCommit.ID, repo) require.NoError(t, err) require.Equal(t, 2, len(fileInfos)) for _, fi := range fileInfos { // each file should be seen twice require.Equal(t, uint64(4), fi.SizeBytes) } } }) t.Run("union alias", func(t *testing.T) { pipeline := tu.UniqueString("pipeline") require.NoError(t, c.CreatePipeline( pipeline, "", []string{"bash"}, []string{ "cp -r /pfs/in /pfs/out", }, &pps.ParallelismSpec{ Constant: 1, }, client.NewUnionInput( client.NewPFSInputOpts("in", repos[0], "", "/*", "", false), client.NewPFSInputOpts("in", repos[1], "", "/*", "", false), client.NewPFSInputOpts("in", repos[2], "", "/*", "", false), client.NewPFSInputOpts("in", repos[3], "", "/*", "", false), ), "", false, )) commitIter, err := c.FlushCommit(commits, []*pfs.Repo{client.NewRepo(pipeline)}) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) outCommit := commitInfos[0].Commit fileInfos, err := c.ListFile(outCommit.Repo.Name, outCommit.ID, "in") require.NoError(t, err) require.Equal(t, 2, len(fileInfos)) for _, fi := range fileInfos { require.Equal(t, uint64(4), fi.SizeBytes) } }) t.Run("union cross alias", func(t *testing.T) { pipeline := tu.UniqueString("pipeline") require.YesError(t, c.CreatePipeline( pipeline, "", []string{"bash"}, []string{ "cp -r /pfs/in* /pfs/out", }, &pps.ParallelismSpec{ Constant: 1, }, client.NewUnionInput( client.NewCrossInput( client.NewPFSInputOpts("in1", repos[0], "", "/*", "", false), client.NewPFSInputOpts("in1", repos[1], "", "/*", "", false), ), client.NewCrossInput( client.NewPFSInputOpts("in2", repos[2], "", "/*", "", false), client.NewPFSInputOpts("in2", repos[3], "", "/*", "", false), ), ), "", false, )) require.NoError(t, c.CreatePipeline( pipeline, "", []string{"bash"}, []string{ "cp -r /pfs/in* /pfs/out", }, &pps.ParallelismSpec{ Constant: 1, }, client.NewUnionInput( client.NewCrossInput( client.NewPFSInputOpts("in1", repos[0], "", "/*", "", false), client.NewPFSInputOpts("in2", repos[1], "", "/*", "", false), ), client.NewCrossInput( client.NewPFSInputOpts("in1", repos[2], "", "/*", "", false), client.NewPFSInputOpts("in2", repos[3], "", "/*", "", false), ), ), "", false, )) commitIter, err := c.FlushCommit(commits, []*pfs.Repo{client.NewRepo(pipeline)}) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) outCommit := commitInfos[0].Commit for _, dir := range []string{"in1", "in2"} { fileInfos, err := c.ListFile(outCommit.Repo.Name, outCommit.ID, dir) require.NoError(t, err) require.Equal(t, 2, len(fileInfos)) for _, fi := range fileInfos { // each file should be seen twice require.Equal(t, uint64(4), fi.SizeBytes) } } }) t.Run("cross union alias", func(t *testing.T) { pipeline := tu.UniqueString("pipeline") require.YesError(t, c.CreatePipeline( pipeline, "", []string{"bash"}, []string{ "cp -r /pfs/in* /pfs/out", }, &pps.ParallelismSpec{ Constant: 1, }, client.NewCrossInput( client.NewUnionInput( client.NewPFSInputOpts("in1", repos[0], "", "/*", "", false), client.NewPFSInputOpts("in2", repos[1], "", "/*", "", false), ), client.NewUnionInput( client.NewPFSInputOpts("in1", repos[2], "", "/*", "", false), client.NewPFSInputOpts("in2", repos[3], "", "/*", "", false), ), ), "", false, )) require.NoError(t, c.CreatePipeline( pipeline, "", []string{"bash"}, []string{ "cp -r /pfs/in* /pfs/out", }, &pps.ParallelismSpec{ Constant: 1, }, client.NewCrossInput( client.NewUnionInput( client.NewPFSInputOpts("in1", repos[0], "", "/*", "", false), client.NewPFSInputOpts("in1", repos[1], "", "/*", "", false), ), client.NewUnionInput( client.NewPFSInputOpts("in2", repos[2], "", "/*", "", false), client.NewPFSInputOpts("in2", repos[3], "", "/*", "", false), ), ), "", false, )) commitIter, err := c.FlushCommit(commits, []*pfs.Repo{client.NewRepo(pipeline)}) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) outCommit := commitInfos[0].Commit for _, dir := range []string{"in1", "in2"} { fileInfos, err := c.ListFile(outCommit.Repo.Name, outCommit.ID, dir) require.NoError(t, err) require.Equal(t, 2, len(fileInfos)) for _, fi := range fileInfos { // each file should be seen twice require.Equal(t, uint64(8), fi.SizeBytes) } } }) } func TestGarbageCollection(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) // Delete everything, then run garbage collection and finally check that // we're at a baseline of 0 tags and 0 objects. require.NoError(t, c.DeleteAll()) require.NoError(t, c.GarbageCollect(0)) originalObjects := getAllObjects(t, c) originalTags := getAllTags(t, c) require.Equal(t, 0, len(originalObjects)) require.Equal(t, 0, len(originalTags)) dataRepo := tu.UniqueString(t.Name()) pipeline := tu.UniqueString(t.Name() + "Pipeline") failurePipeline := tu.UniqueString(t.Name() + "FailurePipeline") var commit *pfs.Commit var err error createInputAndPipeline := func() { require.NoError(t, c.CreateRepo(dataRepo)) commit, err = c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, commit.ID, "foo", strings.NewReader("foo")) require.NoError(t, err) _, err = c.PutFile(dataRepo, commit.ID, "bar", strings.NewReader("bar")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, "master")) // This pipeline fails immediately (to test that GC succeeds in the // presence of failed pipelines require.NoError(t, c.CreatePipeline( failurePipeline, "nonexistant-image", []string{"bash"}, []string{"exit 1"}, nil, client.NewPFSInput(dataRepo, "/"), "", false, )) // This pipeline copies foo and modifies bar require.NoError(t, c.CreatePipeline( pipeline, "", []string{"bash"}, []string{ fmt.Sprintf("cp /pfs/%s/foo /pfs/out/foo", dataRepo), fmt.Sprintf("cp /pfs/%s/bar /pfs/out/bar", dataRepo), "echo bar >> /pfs/out/bar", }, nil, client.NewPFSInput(dataRepo, "/"), "", false, )) // run FlushJob inside a retry loop, as the pipeline may take a few moments // to start the worker master and create a job require.NoErrorWithinTRetry(t, 60*time.Second, func() error { jobInfos, err := c.FlushJobAll([]*pfs.Commit{commit}, nil) require.NoError(t, err) if len(jobInfos) != 1 { return fmt.Errorf("expected one job but got %d", len(jobInfos)) } if jobInfos[0].State != pps.JobState_JOB_SUCCESS { return fmt.Errorf("Expected job in state SUCCESS but was in %s", jobInfos[0].State) } return nil }) } createInputAndPipeline() objectsBefore := getAllObjects(t, c) tagsBefore := getAllTags(t, c) specObjectCountBefore := getObjectCountForRepo(t, c, ppsconsts.SpecRepo) // Try to GC without stopping the pipeline. require.YesError(t, c.GarbageCollect(0)) // Now stop the pipeline and GC require.NoError(t, c.StopPipeline(pipeline)) require.NoErrorWithinTRetry(t, 90*time.Second, func() error { return c.GarbageCollect(0) }) // Check that data still exists in the input repo var buf bytes.Buffer require.NoError(t, c.GetFile(dataRepo, commit.ID, "foo", 0, 0, &buf)) require.Equal(t, "foo", buf.String()) buf.Reset() require.NoError(t, c.GetFile(dataRepo, commit.ID, "bar", 0, 0, &buf)) require.Equal(t, "bar", buf.String()) pis, err := c.ListPipeline() require.NoError(t, err) require.Equal(t, 2, len(pis)) buf.Reset() require.NoError(t, c.GetFile(pipeline, "master", "foo", 0, 0, &buf)) require.Equal(t, "foo", buf.String()) buf.Reset() require.NoError(t, c.GetFile(pipeline, "master", "bar", 0, 0, &buf)) require.Equal(t, "barbar\n", buf.String()) // Check that no objects or tags have been removed, since we just ran GC // without deleting anything. objectsAfter := getAllObjects(t, c) tagsAfter := getAllTags(t, c) require.Equal(t, len(tagsBefore), len(tagsAfter)) // Stopping the pipeline creates/updates the pipeline __spec__ repo, so we need // to account for the number of objects we added there specObjectCountAfter := getObjectCountForRepo(t, c, ppsconsts.SpecRepo) expectedSpecObjectCountDelta := specObjectCountAfter - specObjectCountBefore require.Equal(t, len(objectsBefore)+expectedSpecObjectCountDelta, len(objectsAfter)) objectsBefore = objectsAfter tagsBefore = tagsAfter // Now delete both pipelines and GC require.NoError(t, c.DeletePipeline(pipeline, false)) require.NoError(t, c.DeletePipeline(failurePipeline, false)) require.NoErrorWithinTRetry(t, 30*time.Second, func() error { require.NoError(t, c.GarbageCollect(0)) // We should've deleted one tag since the functioning pipeline only processed // one datum. tagsAfter = getAllTags(t, c) if dTags := len(tagsBefore) - len(tagsAfter); dTags != 1 { return fmt.Errorf("expected 1 tag after GC but found %d", dTags) } // We should've deleted 2 objects: // - the hashtree referenced by the tag (datum hashtree) // - Note that the hashtree for the output commit is the same object as the // datum hashtree. It contains the same metadata, and b/c hashtrees are // stored as objects, it's deduped with the datum hashtree // - The "datums" object attached to 'pipeline's output commit // Note that deleting a pipeline doesn't delete the spec commits objectsAfter = getAllObjects(t, c) if dObjects := len(objectsBefore) - len(objectsAfter); dObjects != 2 { return fmt.Errorf("expected 3 objects but found %d", dObjects) } // The 9 remaining objects are: // - hashtree for input commit // - object w/ contents of /foo + object w/ contents of /bar // - 6 objects in __spec__: // (hashtree + /spec file) * (2 'pipeline' commits + 1 'failurePipeline' commit) if len(objectsAfter) != 9 { return fmt.Errorf("expected 9 objects remaining, but found %d", len(objectsAfter)) } return nil }) // Now we delete everything. require.NoError(t, c.DeleteAll()) require.NoError(t, c.GarbageCollect(0)) // Since we've now deleted everything that we created in this test, // the tag count and object count should be back to the originals. objectsAfter = getAllObjects(t, c) tagsAfter = getAllTags(t, c) require.Equal(t, 0, len(tagsAfter)) require.Equal(t, 0, len(objectsAfter)) // Now we create the pipeline again and check that all data is // accessible. This is important because there used to be a bug // where we failed to invalidate the cache such that the objects in // the cache were referencing blocks that had been GC-ed. createInputAndPipeline() buf.Reset() require.NoError(t, c.GetFile(dataRepo, commit.ID, "foo", 0, 0, &buf)) require.Equal(t, "foo", buf.String()) buf.Reset() require.NoError(t, c.GetFile(dataRepo, commit.ID, "bar", 0, 0, &buf)) require.Equal(t, "bar", buf.String()) buf.Reset() require.NoError(t, c.GetFile(pipeline, "master", "foo", 0, 0, &buf)) require.Equal(t, "foo", buf.String()) buf.Reset() require.NoError(t, c.GetFile(pipeline, "master", "bar", 0, 0, &buf)) require.Equal(t, "barbar\n", buf.String()) } func TestPipelineWithStats(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) dataRepo := tu.UniqueString("TestPipelineWithStats_data") require.NoError(t, c.CreateRepo(dataRepo)) numFiles := 10 commit1, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) for i := 0; i < numFiles; i++ { _, err = c.PutFile(dataRepo, commit1.ID, fmt.Sprintf("file-%d", i), strings.NewReader(strings.Repeat("foo\n", 100))) } require.NoError(t, c.FinishCommit(dataRepo, commit1.ID)) pipeline := tu.UniqueString("pipeline") _, err = c.PpsAPIClient.CreatePipeline(context.Background(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipeline), Transform: &pps.Transform{ Cmd: []string{"bash"}, Stdin: []string{ fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo), }, }, Input: client.NewPFSInput(dataRepo, "/*"), EnableStats: true, ParallelismSpec: &pps.ParallelismSpec{ Constant: 4, }, }) commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 2, len(commitInfos)) jobs, err := c.ListJob(pipeline, nil, nil, -1, true) require.NoError(t, err) require.Equal(t, 1, len(jobs)) // Check we can list datums before job completion resp, err := c.ListDatum(jobs[0].Job.ID, 0, 0) require.NoError(t, err) require.Equal(t, numFiles, len(resp.DatumInfos)) require.Equal(t, 1, len(resp.DatumInfos[0].Data)) // Check we can list datums before job completion w pagination resp, err = c.ListDatum(jobs[0].Job.ID, 5, 0) require.NoError(t, err) require.Equal(t, 5, len(resp.DatumInfos)) require.Equal(t, int64(numFiles/5), resp.TotalPages) require.Equal(t, int64(0), resp.Page) // Block on the job being complete before we call ListDatum again so we're // sure the datums have actually been processed. _, err = c.InspectJob(jobs[0].Job.ID, true) require.NoError(t, err) resp, err = c.ListDatum(jobs[0].Job.ID, 0, 0) require.NoError(t, err) require.Equal(t, numFiles, len(resp.DatumInfos)) require.Equal(t, 1, len(resp.DatumInfos[0].Data)) for _, datum := range resp.DatumInfos { require.NoError(t, err) require.Equal(t, pps.DatumState_SUCCESS, datum.State) } // Make sure 'inspect datum' works datum, err := c.InspectDatum(jobs[0].Job.ID, resp.DatumInfos[0].Datum.ID) require.NoError(t, err) require.Equal(t, pps.DatumState_SUCCESS, datum.State) } func TestPipelineWithStatsToggle(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) dataRepo := tu.UniqueString("TestPipelineWithStatsToggle_data") require.NoError(t, c.CreateRepo(dataRepo)) pipeline := tu.UniqueString("TestPipelineWithStatsToggle") updatePipeline := func(enabled bool) error { _, err := c.PpsAPIClient.CreatePipeline(context.Background(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipeline), Transform: &pps.Transform{ Cmd: []string{"bash"}, Stdin: []string{ fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo), }, }, EnableStats: enabled, Input: client.NewPFSInput(dataRepo, "/*"), ParallelismSpec: &pps.ParallelismSpec{ Constant: 1, }, Update: true, }) return err } numFiles := 5 var commits []*pfs.Commit updateFiles := func() { commitLen := len(commits) for i := commitLen; i < commitLen+numFiles; i++ { commit, err := c.StartCommit(dataRepo, "master") commits = append(commits, commit) require.NoError(t, err) _, err = c.PutFile(dataRepo, commits[i].ID, fmt.Sprintf("foo-%d", i), strings.NewReader("foo\n")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, commits[i].ID)) } } // Start with stats disabled. require.NoError(t, updatePipeline(false)) updateFiles() jobs, err := c.FlushJobAll([]*pfs.Commit{commits[len(commits)-1]}, nil) require.NoError(t, err) // Check no stats. resp, err := c.ListDatum(jobs[0].Job.ID, 0, 0) require.NoError(t, err) _, err = c.InspectDatum(jobs[0].Job.ID, resp.DatumInfos[0].Datum.ID) require.YesError(t, err) // Enable stats require.NoError(t, updatePipeline(true)) updateFiles() jobs, err = c.FlushJobAll([]*pfs.Commit{commits[len(commits)-1]}, nil) require.NoError(t, err) // Check stats. time.Sleep(3 * time.Minute) resp, err = c.ListDatum(jobs[0].Job.ID, 0, 0) require.NoError(t, err) require.Equal(t, numFiles, len(resp.DatumInfos)) datum, err := c.InspectDatum(jobs[0].Job.ID, resp.DatumInfos[0].Datum.ID) require.NoError(t, err) require.Equal(t, pps.DatumState_SUCCESS, datum.State) for i := 1; i < numFiles; i++ { datum, err := c.InspectDatum(jobs[0].Job.ID, resp.DatumInfos[i].Datum.ID) require.NoError(t, err) require.Equal(t, pps.DatumState_SKIPPED, datum.State) } // Check that disabling stats errors. require.YesError(t, updatePipeline(false)) } func TestPipelineWithStatsFailedDatums(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) dataRepo := tu.UniqueString("TestPipelineWithStatsFailedDatums_data") require.NoError(t, c.CreateRepo(dataRepo)) numFiles := 10 commit1, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) for i := 0; i < numFiles; i++ { _, err = c.PutFile(dataRepo, commit1.ID, fmt.Sprintf("file-%d", i), strings.NewReader(strings.Repeat("foo\n", 100))) } require.NoError(t, c.FinishCommit(dataRepo, commit1.ID)) pipeline := tu.UniqueString("pipeline") _, err = c.PpsAPIClient.CreatePipeline(context.Background(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipeline), Transform: &pps.Transform{ Cmd: []string{"bash"}, Stdin: []string{ fmt.Sprintf("if [ -f /pfs/%s/file-5 ]; then exit 1; fi", dataRepo), fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo), }, }, Input: client.NewPFSInput(dataRepo, "/*"), EnableStats: true, ParallelismSpec: &pps.ParallelismSpec{ Constant: 4, }, }) commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 2, len(commitInfos)) jobs, err := c.ListJob(pipeline, nil, nil, -1, true) require.NoError(t, err) require.Equal(t, 1, len(jobs)) // Block on the job being complete before we call ListDatum _, err = c.InspectJob(jobs[0].Job.ID, true) require.NoError(t, err) resp, err := c.ListDatum(jobs[0].Job.ID, 0, 0) require.NoError(t, err) require.Equal(t, numFiles, len(resp.DatumInfos)) // First entry should be failed require.Equal(t, pps.DatumState_FAILED, resp.DatumInfos[0].State) // Last entry should be success require.Equal(t, pps.DatumState_SUCCESS, resp.DatumInfos[len(resp.DatumInfos)-1].State) // Make sure 'inspect datum' works for failed state datum, err := c.InspectDatum(jobs[0].Job.ID, resp.DatumInfos[0].Datum.ID) require.NoError(t, err) require.Equal(t, pps.DatumState_FAILED, datum.State) } func TestPipelineWithStatsPaginated(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) dataRepo := tu.UniqueString("TestPipelineWithStatsPaginated_data") require.NoError(t, c.CreateRepo(dataRepo)) numPages := int64(2) pageSize := int64(10) numFiles := int(numPages * pageSize) commit1, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) for i := 0; i < numFiles; i++ { _, err = c.PutFile(dataRepo, commit1.ID, fmt.Sprintf("file-%d", i), strings.NewReader(strings.Repeat("foo\n", 100))) } require.NoError(t, c.FinishCommit(dataRepo, commit1.ID)) pipeline := tu.UniqueString("pipeline") _, err = c.PpsAPIClient.CreatePipeline(context.Background(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipeline), Transform: &pps.Transform{ Cmd: []string{"bash"}, Stdin: []string{ fmt.Sprintf("if [ -f /pfs/%s/file-5 ]; then exit 1; fi", dataRepo), fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo), }, }, Input: client.NewPFSInput(dataRepo, "/*"), EnableStats: true, ParallelismSpec: &pps.ParallelismSpec{ Constant: 4, }, }) commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 2, len(commitInfos)) var jobs []*pps.JobInfo require.NoError(t, backoff.Retry(func() error { jobs, err = c.ListJob(pipeline, nil, nil, -1, true) require.NoError(t, err) if len(jobs) != 1 { return fmt.Errorf("expected 1 jobs, got %d", len(jobs)) } return nil }, backoff.NewTestingBackOff())) // Block on the job being complete before we call ListDatum _, err = c.InspectJob(jobs[0].Job.ID, true) require.NoError(t, err) resp, err := c.ListDatum(jobs[0].Job.ID, pageSize, 0) require.NoError(t, err) require.Equal(t, pageSize, int64(len(resp.DatumInfos))) require.Equal(t, int64(numFiles)/pageSize, resp.TotalPages) // First entry should be failed require.Equal(t, pps.DatumState_FAILED, resp.DatumInfos[0].State) resp, err = c.ListDatum(jobs[0].Job.ID, pageSize, int64(numPages-1)) require.NoError(t, err) require.Equal(t, pageSize, int64(len(resp.DatumInfos))) require.Equal(t, int64(int64(numFiles)/pageSize-1), resp.Page) // Last entry should be success require.Equal(t, pps.DatumState_SUCCESS, resp.DatumInfos[len(resp.DatumInfos)-1].State) // Make sure we get error when requesting pages too high resp, err = c.ListDatum(jobs[0].Job.ID, pageSize, int64(numPages)) require.YesError(t, err) } func TestPipelineWithStatsAcrossJobs(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) dataRepo := tu.UniqueString("TestPipelineWithStatsAcrossJobs_data") require.NoError(t, c.CreateRepo(dataRepo)) numFiles := 10 commit1, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) for i := 0; i < numFiles; i++ { _, err = c.PutFile(dataRepo, commit1.ID, fmt.Sprintf("foo-%d", i), strings.NewReader(strings.Repeat("foo\n", 100))) } require.NoError(t, c.FinishCommit(dataRepo, commit1.ID)) pipeline := tu.UniqueString("StatsAcrossJobs") _, err = c.PpsAPIClient.CreatePipeline(context.Background(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipeline), Transform: &pps.Transform{ Cmd: []string{"bash"}, Stdin: []string{ fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo), }, }, Input: client.NewPFSInput(dataRepo, "/*"), EnableStats: true, ParallelismSpec: &pps.ParallelismSpec{ Constant: 1, }, }) commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 2, len(commitInfos)) jobs, err := c.ListJob(pipeline, nil, nil, -1, true) require.NoError(t, err) require.Equal(t, 1, len(jobs)) // Block on the job being complete before we call ListDatum _, err = c.InspectJob(jobs[0].Job.ID, true) require.NoError(t, err) resp, err := c.ListDatum(jobs[0].Job.ID, 0, 0) require.NoError(t, err) require.Equal(t, numFiles, len(resp.DatumInfos)) datum, err := c.InspectDatum(jobs[0].Job.ID, resp.DatumInfos[0].Datum.ID) require.NoError(t, err) require.Equal(t, pps.DatumState_SUCCESS, datum.State) commit2, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) for i := 0; i < numFiles; i++ { _, err = c.PutFile(dataRepo, commit2.ID, fmt.Sprintf("bar-%d", i), strings.NewReader(strings.Repeat("bar\n", 100))) } require.NoError(t, c.FinishCommit(dataRepo, commit2.ID)) commitIter, err = c.FlushCommit([]*pfs.Commit{commit2}, nil) require.NoError(t, err) commitInfos = collectCommitInfos(t, commitIter) require.Equal(t, 2, len(commitInfos)) jobs, err = c.ListJob(pipeline, nil, nil, -1, true) require.NoError(t, err) require.Equal(t, 2, len(jobs)) // Block on the job being complete before we call ListDatum _, err = c.InspectJob(jobs[0].Job.ID, true) require.NoError(t, err) resp, err = c.ListDatum(jobs[0].Job.ID, 0, 0) require.NoError(t, err) // we should see all the datums from the first job (which should be skipped) // in addition to all the new datums processed in this job require.Equal(t, numFiles*2, len(resp.DatumInfos)) datum, err = c.InspectDatum(jobs[0].Job.ID, resp.DatumInfos[0].Datum.ID) require.NoError(t, err) require.Equal(t, pps.DatumState_SUCCESS, datum.State) // Test datums marked as skipped correctly // (also tests list datums are sorted by state) datum, err = c.InspectDatum(jobs[0].Job.ID, resp.DatumInfos[numFiles].Datum.ID) require.NoError(t, err) require.Equal(t, pps.DatumState_SKIPPED, datum.State) } func TestPipelineWithStatsSkippedEdgeCase(t *testing.T) { // If I add a file in commit1, delete it in commit2, add it again in commit 3 ... // the datum will be marked as success on the 3rd job, even though it should be marked as skipped if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) dataRepo := tu.UniqueString("TestPipelineWithStatsSkippedEdgeCase_data") require.NoError(t, c.CreateRepo(dataRepo)) numFiles := 10 commit1, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) for i := 0; i < numFiles; i++ { _, err = c.PutFile(dataRepo, commit1.ID, fmt.Sprintf("file-%d", i), strings.NewReader(strings.Repeat("foo\n", 100))) } require.NoError(t, c.FinishCommit(dataRepo, commit1.ID)) pipeline := tu.UniqueString("StatsEdgeCase") _, err = c.PpsAPIClient.CreatePipeline(context.Background(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipeline), Transform: &pps.Transform{ Cmd: []string{"bash"}, Stdin: []string{ fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo), }, }, Input: client.NewPFSInput(dataRepo, "/*"), EnableStats: true, ParallelismSpec: &pps.ParallelismSpec{ Constant: 1, }, }) commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 2, len(commitInfos)) jobs, err := c.ListJob(pipeline, nil, nil, -1, true) require.NoError(t, err) require.Equal(t, 1, len(jobs)) // Block on the job being complete before we call ListDatum _, err = c.InspectJob(jobs[0].Job.ID, true) require.NoError(t, err) resp, err := c.ListDatum(jobs[0].Job.ID, 0, 0) require.NoError(t, err) require.Equal(t, numFiles, len(resp.DatumInfos)) for _, datum := range resp.DatumInfos { require.NoError(t, err) require.Equal(t, pps.DatumState_SUCCESS, datum.State) } // Make sure 'inspect datum' works datum, err := c.InspectDatum(jobs[0].Job.ID, resp.DatumInfos[0].Datum.ID) require.NoError(t, err) require.Equal(t, pps.DatumState_SUCCESS, datum.State) // Create a second commit that deletes a file in commit1 commit2, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) err = c.DeleteFile(dataRepo, commit2.ID, "file-0") require.NoError(t, c.FinishCommit(dataRepo, commit2.ID)) // Create a third commit that re-adds the file removed in commit2 commit3, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, commit3.ID, "file-0", strings.NewReader(strings.Repeat("foo\n", 100))) require.NoError(t, c.FinishCommit(dataRepo, commit3.ID)) commitIter, err = c.FlushCommit([]*pfs.Commit{commit3}, nil) require.NoError(t, err) commitInfos = collectCommitInfos(t, commitIter) require.Equal(t, 2, len(commitInfos)) jobs, err = c.ListJob(pipeline, nil, nil, -1, true) require.NoError(t, err) require.Equal(t, 3, len(jobs)) // Block on the job being complete before we call ListDatum _, err = c.InspectJob(jobs[0].Job.ID, true) require.NoError(t, err) resp, err = c.ListDatum(jobs[0].Job.ID, 0, 0) require.NoError(t, err) require.Equal(t, numFiles, len(resp.DatumInfos)) var states []interface{} for _, datum := range resp.DatumInfos { require.Equal(t, pps.DatumState_SKIPPED, datum.State) states = append(states, datum.State) } } func TestPipelineOnStatsBranch(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) dataRepo := tu.UniqueString("TestPipelineOnStatsBranch_data") require.NoError(t, c.CreateRepo(dataRepo)) commit, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, commit.ID)) pipeline1, pipeline2 := tu.UniqueString("TestPipelineOnStatsBranch1"), tu.UniqueString("TestPipelineOnStatsBranch2") _, err = c.PpsAPIClient.CreatePipeline(context.Background(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipeline1), Transform: &pps.Transform{ Cmd: []string{"bash", "-c", "cp -r $(ls -d /pfs/*|grep -v /pfs/out) /pfs/out"}, }, Input: client.NewPFSInput(dataRepo, "/*"), EnableStats: true, }) require.NoError(t, err) _, err = c.PpsAPIClient.CreatePipeline(context.Background(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipeline2), Transform: &pps.Transform{ Cmd: []string{"bash", "-c", "cp -r $(ls -d /pfs/*|grep -v /pfs/out) /pfs/out"}, }, Input: &pps.Input{ Pfs: &pps.PFSInput{ Repo: pipeline1, Branch: "stats", Glob: "/*", }, }, EnableStats: true, }) require.NoError(t, err) jobInfos, err := c.FlushJobAll([]*pfs.Commit{commit}, nil) require.NoError(t, err) require.Equal(t, 2, len(jobInfos)) for _, ji := range jobInfos { require.Equal(t, ji.State.String(), pps.JobState_JOB_SUCCESS.String()) } } func TestSkippedDatums(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) // create repos dataRepo := tu.UniqueString("TestPipeline_data") require.NoError(t, c.CreateRepo(dataRepo)) // create pipeline pipelineName := tu.UniqueString("pipeline") // require.NoError(t, c.CreatePipeline( _, err := c.PpsAPIClient.CreatePipeline(context.Background(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipelineName), Transform: &pps.Transform{ Cmd: []string{"bash"}, Stdin: []string{ fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo), }, }, ParallelismSpec: &pps.ParallelismSpec{ Constant: 1, }, Input: client.NewPFSInput(dataRepo, "/*"), EnableStats: true, }) require.NoError(t, err) // Do first commit to repo commit1, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo\n")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, commit1.ID)) jis, err := c.FlushJobAll([]*pfs.Commit{client.NewCommit(dataRepo, commit1.ID)}, nil) require.NoError(t, err) require.Equal(t, 1, len(jis)) ji := jis[0] require.Equal(t, ji.State, pps.JobState_JOB_SUCCESS) var buffer bytes.Buffer require.NoError(t, c.GetFile(ji.OutputCommit.Repo.Name, ji.OutputCommit.ID, "file", 0, 0, &buffer)) require.Equal(t, "foo\n", buffer.String()) // Do second commit to repo commit2, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, commit2.ID, "file2", strings.NewReader("bar\n")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, commit2.ID)) jis, err = c.FlushJobAll([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil) require.NoError(t, err) require.Equal(t, 1, len(jis)) ji = jis[0] require.Equal(t, ji.State, pps.JobState_JOB_SUCCESS) /* jobs, err := c.ListJob(pipelineName, nil, nil, -1, true) require.NoError(t, err) require.Equal(t, 2, len(jobs)) datums, err := c.ListDatum(jobs[1].Job.ID) fmt.Printf("got datums: %v\n", datums) require.NoError(t, err) require.Equal(t, 2, len(datums)) datum, err := c.InspectDatum(jobs[1].Job.ID, datums[0].ID) require.NoError(t, err) require.Equal(t, pps.DatumState_SUCCESS, datum.State) */ } func TestOpencvDemo(t *testing.T) { t.Skip("flaky") if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) require.NoError(t, c.CreateRepo("images")) commit, err := c.StartCommit("images", "master") require.NoError(t, err) require.NoError(t, c.PutFileURL("images", "master", "46Q8nDz.jpg", "http://imgur.com/46Q8nDz.jpg", false, false)) require.NoError(t, c.FinishCommit("images", "master")) bytes, err := ioutil.ReadFile("../../examples/opencv/edges.json") require.NoError(t, err) createPipelineRequest := &pps.CreatePipelineRequest{} require.NoError(t, json.Unmarshal(bytes, createPipelineRequest)) _, err = c.PpsAPIClient.CreatePipeline(context.Background(), createPipelineRequest) require.NoError(t, err) bytes, err = ioutil.ReadFile("../../examples/opencv/montage.json") require.NoError(t, err) createPipelineRequest = &pps.CreatePipelineRequest{} require.NoError(t, json.Unmarshal(bytes, createPipelineRequest)) _, err = c.PpsAPIClient.CreatePipeline(context.Background(), createPipelineRequest) require.NoError(t, err) commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, nil) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 2, len(commitInfos)) } func TestCronPipeline(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) t.Run("SimpleCron", func(t *testing.T) { pipeline1 := tu.UniqueString("cron1-") require.NoError(t, c.CreatePipeline( pipeline1, "", []string{"/bin/bash"}, []string{"cp /pfs/time/* /pfs/out/"}, nil, client.NewCronInput("time", "@every 20s"), "", false, )) pipeline2 := tu.UniqueString("cron2-") require.NoError(t, c.CreatePipeline( pipeline2, "", []string{"/bin/bash"}, []string{"cp " + fmt.Sprintf("/pfs/%s/*", pipeline1) + " /pfs/out/"}, nil, client.NewPFSInput(pipeline1, "/*"), "", false, )) // subscribe to the pipeline1 cron repo and wait for inputs repo := fmt.Sprintf("%s_%s", pipeline1, "time") ctx, cancel := context.WithTimeout(context.Background(), time.Second*120) defer cancel() //cleanup resources iter, err := c.WithCtx(ctx).SubscribeCommit(repo, "master", nil, "", pfs.CommitState_STARTED) require.NoError(t, err) // We'll look at three commits - with one created in each tick // We expect the first commit to have 1 file, the second to have 2 files, etc... for i := 1; i <= 3; i++ { commitInfo, err := iter.Next() require.NoError(t, err) commitIter, err := c.FlushCommit([]*pfs.Commit{commitInfo.Commit}, nil) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 2, len(commitInfos)) for _, ci := range commitInfos { files, err := c.ListFile(ci.Commit.Repo.Name, ci.Commit.ID, "") require.NoError(t, err) require.Equal(t, i, len(files)) } } }) // Test a CronInput with the overwrite flag set to true t.Run("CronOverwrite", func(t *testing.T) { pipeline3 := tu.UniqueString("cron3-") overwriteInput := client.NewCronInput("time", "@every 20s") overwriteInput.Cron.Overwrite = true require.NoError(t, c.CreatePipeline( pipeline3, "", []string{"/bin/bash"}, []string{"cp /pfs/time/* /pfs/out/"}, nil, overwriteInput, "", false, )) repo := fmt.Sprintf("%s_%s", pipeline3, "time") ctx, cancel := context.WithTimeout(context.Background(), time.Second*120) defer cancel() //cleanup resources iter, err := c.WithCtx(ctx).SubscribeCommit(repo, "master", nil, "", pfs.CommitState_STARTED) require.NoError(t, err) // We'll look at three commits - with one created in each tick // We expect each of the commits to have just a single file in this case for i := 1; i <= 3; i++ { commitInfo, err := iter.Next() require.NoError(t, err) commitIter, err := c.FlushCommit([]*pfs.Commit{commitInfo.Commit}, nil) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) for _, ci := range commitInfos { files, err := c.ListFile(ci.Commit.Repo.Name, ci.Commit.ID, "") require.NoError(t, err) require.Equal(t, 1, len(files)) } } }) // Create a non-cron input repo, and test a pipeline with a cross of cron and // non-cron inputs t.Run("CronPFSCross", func(t *testing.T) { dataRepo := tu.UniqueString("TestCronPipeline_data") require.NoError(t, c.CreateRepo(dataRepo)) pipeline4 := tu.UniqueString("cron4-") require.NoError(t, c.CreatePipeline( pipeline4, "", []string{"bash"}, []string{ "cp /pfs/time/time /pfs/out/time", fmt.Sprintf("cp /pfs/%s/file /pfs/out/file", dataRepo), }, nil, client.NewCrossInput( client.NewCronInput("time", "@every 20s"), client.NewPFSInput(dataRepo, "/"), ), "", false, )) dataCommit, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, "master", "file", strings.NewReader("file")) require.NoError(t, c.FinishCommit(dataRepo, "master")) repo := fmt.Sprintf("%s_%s", pipeline4, "time") ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) defer cancel() //cleanup resources iter, err := c.WithCtx(ctx).SubscribeCommit(repo, "master", nil, "", pfs.CommitState_STARTED) require.NoError(t, err) commitInfo, err := iter.Next() require.NoError(t, err) commitIter, err := c.FlushCommit([]*pfs.Commit{dataCommit, commitInfo.Commit}, nil) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) }) } func TestSelfReferentialPipeline(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) pipeline := tu.UniqueString("pipeline") require.YesError(t, c.CreatePipeline( pipeline, "", []string{"true"}, nil, nil, client.NewPFSInput(pipeline, "/"), "", false, )) } func TestPipelineBadImage(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) pipeline1 := tu.UniqueString("bad_pipeline_1_") require.NoError(t, c.CreatePipeline( pipeline1, "BadImage", []string{"true"}, nil, nil, client.NewCronInput("time", "@every 20s"), "", false, )) pipeline2 := tu.UniqueString("bad_pipeline_2_") require.NoError(t, c.CreatePipeline( pipeline2, "bs/badimage:vcrap", []string{"true"}, nil, nil, client.NewCronInput("time", "@every 20s"), "", false, )) require.NoError(t, backoff.Retry(func() error { for _, pipeline := range []string{pipeline1, pipeline2} { pipelineInfo, err := c.InspectPipeline(pipeline) if err != nil { return err } if pipelineInfo.State != pps.PipelineState_PIPELINE_FAILURE { return fmt.Errorf("pipeline %s should have failed", pipeline) } require.True(t, pipelineInfo.Reason != "") } return nil }, backoff.NewTestingBackOff())) } func TestFixPipeline(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) // create repos dataRepo := tu.UniqueString("TestFixPipeline_data") require.NoError(t, c.CreateRepo(dataRepo)) _, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, "master", "file", strings.NewReader("1")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, "master")) pipelineName := tu.UniqueString("TestFixPipeline_pipeline") require.NoError(t, c.CreatePipeline( pipelineName, "", []string{"exit 1"}, nil, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(dataRepo, "/*"), "", false, )) require.NoError(t, backoff.Retry(func() error { jobInfos, err := c.ListJob(pipelineName, nil, nil, -1, true) require.NoError(t, err) if len(jobInfos) != 1 { return fmt.Errorf("expected 1 jobs, got %d", len(jobInfos)) } jobInfo, err := c.InspectJob(jobInfos[0].Job.ID, true) require.NoError(t, err) require.Equal(t, pps.JobState_JOB_FAILURE, jobInfo.State) return nil }, backoff.NewTestingBackOff())) // Update the pipeline, this will not create a new pipeline as reprocess // isn't set to true. require.NoError(t, c.CreatePipeline( pipelineName, "", []string{"bash"}, []string{"echo bar >/pfs/out/file"}, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(dataRepo, "/*"), "", true, )) require.NoError(t, backoff.Retry(func() error { jobInfos, err := c.ListJob(pipelineName, nil, nil, -1, true) require.NoError(t, err) if len(jobInfos) != 2 { return fmt.Errorf("expected 2 jobs, got %d", len(jobInfos)) } jobInfo, err := c.InspectJob(jobInfos[0].Job.ID, true) require.NoError(t, err) require.Equal(t, pps.JobState_JOB_SUCCESS, jobInfo.State) return nil }, backoff.NewTestingBackOff())) } func TestListJobOutput(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) dataRepo := tu.UniqueString("TestListJobOutput_data") require.NoError(t, c.CreateRepo(dataRepo)) commit1, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, commit1.ID)) pipeline := tu.UniqueString("pipeline") require.NoError(t, c.CreatePipeline( pipeline, "", []string{"bash"}, []string{ fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo), }, &pps.ParallelismSpec{ Constant: 4, }, client.NewPFSInput(dataRepo, "/*"), "", false, )) commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) require.NoError(t, backoff.Retry(func() error { jobInfos, err := c.ListJob("", nil, commitInfos[0].Commit, -1, true) if err != nil { return err } if len(jobInfos) != 1 { return fmt.Errorf("expected 1 job") } jobInfos, err = c.ListJob("", nil, client.NewCommit(pipeline, "master"), -1, true) if err != nil { return err } if len(jobInfos) != 1 { return fmt.Errorf("expected 1 job") } return nil }, backoff.NewTestingBackOff())) } func TestListJobTruncated(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) dataRepo := tu.UniqueString("TestListJobTruncated_data") require.NoError(t, c.CreateRepo(dataRepo)) commit1, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, commit1.ID)) pipeline := tu.UniqueString("pipeline") require.NoError(t, c.CreatePipeline( pipeline, "", []string{"bash"}, []string{ fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo), }, nil, client.NewPFSInput(dataRepo, "/*"), "", false, )) commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) require.NoError(t, backoff.Retry(func() error { var liteJobInfos, fullJobInfos []*pps.JobInfo liteJobInfos, err := c.ListJob("", nil, commitInfos[0].Commit, 0, false) if err != nil { return err } fullJobInfos, err = c.ListJob("", nil, commitInfos[0].Commit, 0, true) if err != nil { return err } if len(liteJobInfos) != 1 { return fmt.Errorf("expected 1 job from truncated ListJob") } if len(fullJobInfos) != 1 { return fmt.Errorf("expected 1 job from ListJob") } // Check that fields stored in PFS are missing, but fields stored in etcd // are not require.Nil(t, liteJobInfos[0].Transform) require.Nil(t, liteJobInfos[0].Input) require.Equal(t, pipeline, liteJobInfos[0].Pipeline.Name) // Check that all fields are present require.NotNil(t, fullJobInfos[0].Transform) require.NotNil(t, fullJobInfos[0].Input) require.Equal(t, pipeline, fullJobInfos[0].Pipeline.Name) return nil }, backoff.NewTestingBackOff())) } func TestPipelineEnvVarAlias(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) dataRepo := tu.UniqueString("TestPipelineEnvVarAlias_data") require.NoError(t, c.CreateRepo(dataRepo)) numFiles := 10 commit1, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) for i := 0; i < numFiles; i++ { _, err = c.PutFile(dataRepo, commit1.ID, fmt.Sprintf("file-%d", i), strings.NewReader(fmt.Sprintf("%d", i))) } require.NoError(t, c.FinishCommit(dataRepo, commit1.ID)) pipeline := tu.UniqueString("pipeline") require.NoError(t, c.CreatePipeline( pipeline, "", []string{"bash"}, []string{ "env", fmt.Sprintf("cp $%s /pfs/out/", dataRepo), }, nil, client.NewPFSInput(dataRepo, "/*"), "", false, )) commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) for i := 0; i < numFiles; i++ { var buf bytes.Buffer require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, fmt.Sprintf("file-%d", i), 0, 0, &buf)) require.Equal(t, fmt.Sprintf("%d", i), buf.String()) } } func TestMaxQueueSize(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) dataRepo := tu.UniqueString("TestMaxQueueSize_input") require.NoError(t, c.CreateRepo(dataRepo)) commit1, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) for i := 0; i < 20; i++ { _, err = c.PutFile(dataRepo, commit1.ID, fmt.Sprintf("file%d", i), strings.NewReader("foo")) require.NoError(t, err) } require.NoError(t, c.FinishCommit(dataRepo, commit1.ID)) pipeline := tu.UniqueString("TestMaxQueueSize_output") // This pipeline sleeps for 10 secs per datum _, err = c.PpsAPIClient.CreatePipeline( context.Background(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipeline), Transform: &pps.Transform{ Cmd: []string{"bash"}, Stdin: []string{ "sleep 5", }, }, Input: client.NewPFSInput(dataRepo, "/*"), ParallelismSpec: &pps.ParallelismSpec{ Constant: 2, }, MaxQueueSize: 1, ChunkSpec: &pps.ChunkSpec{ Number: 10, }, }) require.NoError(t, err) var jobInfo *pps.JobInfo for i := 0; i < 10; i++ { require.NoError(t, backoff.Retry(func() error { jobs, err := c.ListJob(pipeline, nil, nil, -1, true) if err != nil { return fmt.Errorf("could not list job: %s", err.Error()) } if len(jobs) == 0 { return fmt.Errorf("failed to find job") } jobInfo, err = c.InspectJob(jobs[0].Job.ID, false) if err != nil { return fmt.Errorf("could not inspect job: %s", err.Error()) } if len(jobInfo.WorkerStatus) != 2 { return fmt.Errorf("incorrect number of statuses: %v", len(jobInfo.WorkerStatus)) } return nil }, backoff.RetryEvery(500*time.Millisecond).For(60*time.Second))) for _, status := range jobInfo.WorkerStatus { if status.QueueSize > 1 { t.Fatalf("queue size too big: %d", status.QueueSize) } } time.Sleep(500 * time.Millisecond) } } func TestHTTPAuth(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) clientAddr := c.GetAddress() host, _, err := net.SplitHostPort(clientAddr) port, ok := os.LookupEnv("PACHD_SERVICE_PORT_API_HTTP_PORT") if !ok { port = "30652" // default NodePort port for Pachd's HTTP API } httpAPIAddr := net.JoinHostPort(host, port) // Try to login token := "abbazabbadoo" form := url.Values{} form.Add("Token", token) req, err := http.NewRequest("POST", fmt.Sprintf("http://%s/v1/auth/login", httpAPIAddr), strings.NewReader(form.Encode())) req.Header.Add("Content-Type", "application/x-www-form-urlencoded") require.NoError(t, err) httpClient := &http.Client{} resp, err := httpClient.Do(req) require.NoError(t, err) defer resp.Body.Close() require.Equal(t, 1, len(resp.Cookies())) require.Equal(t, auth.ContextTokenKey, resp.Cookies()[0].Name) require.Equal(t, "*", resp.Header.Get("Access-Control-Allow-Origin")) require.Equal(t, token, resp.Cookies()[0].Value) // Try to logout req, err = http.NewRequest("POST", fmt.Sprintf("http://%s/v1/auth/logout", httpAPIAddr), nil) require.NoError(t, err) resp, err = httpClient.Do(req) require.NoError(t, err) defer resp.Body.Close() require.Equal(t, 1, len(resp.Cookies())) require.Equal(t, auth.ContextTokenKey, resp.Cookies()[0].Name) require.Equal(t, "*", resp.Header.Get("Access-Control-Allow-Origin")) // The cookie should be unset now require.Equal(t, "", resp.Cookies()[0].Value) // Make sure we get 404s for non existent routes req, err = http.NewRequest("POST", fmt.Sprintf("http://%s/v1/auth/logoutzz", httpAPIAddr), nil) require.NoError(t, err) resp, err = httpClient.Do(req) require.NoError(t, err) require.Equal(t, 404, resp.StatusCode) } func TestHTTPGetFile(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) dataRepo := tu.UniqueString("TestHTTPGetFile_data") require.NoError(t, c.CreateRepo(dataRepo)) commit1, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo")) f, err := os.Open("../../etc/testing/artifacts/giphy.gif") require.NoError(t, err) _, err = c.PutFile(dataRepo, commit1.ID, "giphy.gif", f) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, commit1.ID)) clientAddr := c.GetAddress() host, _, err := net.SplitHostPort(clientAddr) port, ok := os.LookupEnv("PACHD_SERVICE_PORT_API_HTTP_PORT") if !ok { port = "30652" // default NodePort port for Pachd's HTTP API } httpAPIAddr := net.JoinHostPort(host, port) // Try to get raw contents resp, err := http.Get(fmt.Sprintf("http://%s/v1/pfs/repos/%v/commits/%v/files/file", httpAPIAddr, dataRepo, commit1.ID)) require.NoError(t, err) defer resp.Body.Close() contents, err := ioutil.ReadAll(resp.Body) require.NoError(t, err) require.Equal(t, "foo", string(contents)) contentDisposition := resp.Header.Get("Content-Disposition") require.Equal(t, "", contentDisposition) // Try to get file for downloading resp, err = http.Get(fmt.Sprintf("http://%s/v1/pfs/repos/%v/commits/%v/files/file?download=true", httpAPIAddr, dataRepo, commit1.ID)) require.NoError(t, err) defer resp.Body.Close() contents, err = ioutil.ReadAll(resp.Body) require.NoError(t, err) require.Equal(t, "foo", string(contents)) contentDisposition = resp.Header.Get("Content-Disposition") require.Equal(t, "attachment; filename=\"file\"", contentDisposition) // Make sure MIME type is set resp, err = http.Get(fmt.Sprintf("http://%s/v1/pfs/repos/%v/commits/%v/files/giphy.gif", httpAPIAddr, dataRepo, commit1.ID)) require.NoError(t, err) defer resp.Body.Close() contentDisposition = resp.Header.Get("Content-Type") require.Equal(t, "image/gif", contentDisposition) } func TestService(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) dataRepo := tu.UniqueString("TestService_data") require.NoError(t, c.CreateRepo(dataRepo)) commit1, err := c.StartCommit(dataRepo, "master") _, err = c.PutFile(dataRepo, commit1.ID, "file1", strings.NewReader("foo")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, commit1.ID)) annotations := map[string]string{"foo": "bar"} pipeline := tu.UniqueString("pipelineservice") // This pipeline sleeps for 10 secs per datum require.NoError(t, c.CreatePipelineService( pipeline, "trinitronx/python-simplehttpserver", []string{"sh"}, []string{ "cd /pfs", "exec python -m SimpleHTTPServer 8000", }, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(dataRepo, "/"), false, 8000, 31800, annotations, )) time.Sleep(10 * time.Second) // Lookup the address for 'pipelineservice' (different inside vs outside k8s) serviceAddr := func() string { // Hack: detect if running inside the cluster by looking for this env var if _, ok := os.LookupEnv("KUBERNETES_PORT"); !ok { // Outside cluster: Re-use external IP and external port defined above clientAddr := c.GetAddress() host, _, err := net.SplitHostPort(clientAddr) require.NoError(t, err) return net.JoinHostPort(host, "31800") } // Get k8s service corresponding to pachyderm service above--must access // via internal cluster IP, but we don't know what that is var address string kubeClient := tu.GetKubeClient(t) backoff.Retry(func() error { svcs, err := kubeClient.CoreV1().Services("default").List(metav1.ListOptions{}) require.NoError(t, err) for _, svc := range svcs.Items { // Pachyderm actually generates two services for pipelineservice: one // for pachyderm (a ClusterIP service) and one for the user container // (a NodePort service, which is the one we want) rightName := strings.Contains(svc.Name, "pipelineservice") rightType := svc.Spec.Type == v1.ServiceTypeNodePort if !rightName || !rightType { continue } host := svc.Spec.ClusterIP port := fmt.Sprintf("%d", svc.Spec.Ports[0].Port) address = net.JoinHostPort(host, port) actualAnnotations := svc.Annotations delete(actualAnnotations, "pipelineName") if !reflect.DeepEqual(actualAnnotations, annotations) { return fmt.Errorf( "expected service annotations map %#v, got %#v", annotations, actualAnnotations, ) } return nil } return fmt.Errorf("no matching k8s service found") }, backoff.NewTestingBackOff()) require.NotEqual(t, "", address) return address }() require.NoError(t, backoff.Retry(func() error { resp, err := http.Get(fmt.Sprintf("http://%s/%s/file1", serviceAddr, dataRepo)) if err != nil { return err } if resp.StatusCode != 200 { return fmt.Errorf("GET returned %d", resp.StatusCode) } content, err := ioutil.ReadAll(resp.Body) if err != nil { return err } if string(content) != "foo" { return fmt.Errorf("wrong content for file1: expected foo, got %s", string(content)) } return nil }, backoff.NewTestingBackOff())) clientAddr := c.GetAddress() host, _, err := net.SplitHostPort(clientAddr) port, ok := os.LookupEnv("PACHD_SERVICE_PORT_API_HTTP_PORT") if !ok { port = "30652" // default NodePort port for Pachd's HTTP API } httpAPIAddr := net.JoinHostPort(host, port) url := fmt.Sprintf("http://%s/v1/pps/services/%s/%s/file1", httpAPIAddr, pipeline, dataRepo) require.NoError(t, backoff.Retry(func() error { resp, err := http.Get(url) if err != nil { return err } if resp.StatusCode != 200 { return fmt.Errorf("GET returned %d", resp.StatusCode) } content, err := ioutil.ReadAll(resp.Body) if err != nil { return err } if string(content) != "foo" { return fmt.Errorf("wrong content for file1: expected foo, got %s", string(content)) } return nil }, backoff.NewTestingBackOff())) commit2, err := c.StartCommit(dataRepo, "master") _, err = c.PutFile(dataRepo, commit2.ID, "file2", strings.NewReader("bar")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, commit2.ID)) require.NoError(t, backoff.Retry(func() error { resp, err := http.Get(fmt.Sprintf("http://%s/%s/file2", serviceAddr, dataRepo)) if err != nil { return err } if resp.StatusCode != 200 { return fmt.Errorf("GET returned %d", resp.StatusCode) } content, err := ioutil.ReadAll(resp.Body) if err != nil { return err } if string(content) != "bar" { return fmt.Errorf("wrong content for file2: expected bar, got %s", string(content)) } return nil }, backoff.NewTestingBackOff())) } func TestChunkSpec(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) dataRepo := tu.UniqueString("TestChunkSpec_data") require.NoError(t, c.CreateRepo(dataRepo)) commit1, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) numFiles := 101 for i := 0; i < numFiles; i++ { _, err = c.PutFile(dataRepo, commit1.ID, fmt.Sprintf("file%d", i), strings.NewReader("foo")) require.NoError(t, err) } require.NoError(t, c.FinishCommit(dataRepo, commit1.ID)) t.Run("number", func(t *testing.T) { pipeline := tu.UniqueString("TestChunkSpec") c.PpsAPIClient.CreatePipeline(context.Background(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipeline), Transform: &pps.Transform{ Cmd: []string{"bash"}, Stdin: []string{ fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo), }, }, Input: client.NewPFSInput(dataRepo, "/*"), ChunkSpec: &pps.ChunkSpec{Number: 1}, }) commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, []*pfs.Repo{client.NewRepo(pipeline)}) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) for i := 0; i < numFiles; i++ { var buf bytes.Buffer require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, fmt.Sprintf("file%d", i), 0, 0, &buf)) require.Equal(t, "foo", buf.String()) } }) t.Run("size", func(t *testing.T) { pipeline := tu.UniqueString("TestChunkSpec") c.PpsAPIClient.CreatePipeline(context.Background(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipeline), Transform: &pps.Transform{ Cmd: []string{"bash"}, Stdin: []string{ fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo), }, }, Input: client.NewPFSInput(dataRepo, "/*"), ChunkSpec: &pps.ChunkSpec{SizeBytes: 5}, }) commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, []*pfs.Repo{client.NewRepo(pipeline)}) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) for i := 0; i < numFiles; i++ { var buf bytes.Buffer require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, fmt.Sprintf("file%d", i), 0, 0, &buf)) require.Equal(t, "foo", buf.String()) } }) } func TestLongDatums(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) dataRepo := tu.UniqueString("TestLongDatums_data") require.NoError(t, c.CreateRepo(dataRepo)) commit1, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) numFiles := 8 for i := 0; i < numFiles; i++ { _, err = c.PutFile(dataRepo, commit1.ID, fmt.Sprintf("file%d", i), strings.NewReader("foo")) require.NoError(t, err) } require.NoError(t, c.FinishCommit(dataRepo, commit1.ID)) pipeline := tu.UniqueString("TestLongDatums") require.NoError(t, c.CreatePipeline( pipeline, "", []string{"bash"}, []string{ "sleep 1m", fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo), }, &pps.ParallelismSpec{ Constant: 4, }, client.NewPFSInput(dataRepo, "/*"), "", false, )) commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) for i := 0; i < numFiles; i++ { var buf bytes.Buffer require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, fmt.Sprintf("file%d", i), 0, 0, &buf)) require.Equal(t, "foo", buf.String()) } } func TestPipelineWithGitInputInvalidURLs(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) outputFilename := "commitSHA" pipeline := tu.UniqueString("github_pipeline") // Of the common git URL types (listed below), only the 'clone' url is supported RN // (for several reasons, one of which is that we can't assume we have SSH / an ssh env setup on the user container) //git_url: "git://github.com/sjezewski/testgithook.git", //ssh_url: "[email protected]:sjezewski/testgithook.git", //svn_url: "https://github.com/sjezewski/testgithook", //clone_url: "https://github.com/sjezewski/testgithook.git", require.YesError(t, c.CreatePipeline( pipeline, "", []string{"bash"}, []string{ fmt.Sprintf("cat /pfs/test-artifacts/.git/HEAD > /pfs/out/%v", outputFilename), }, nil, &pps.Input{ Git: &pps.GitInput{ URL: "git://github.com/pachyderm/test-artifacts.git", }, }, "", false, )) require.YesError(t, c.CreatePipeline( pipeline, "", []string{"bash"}, []string{ fmt.Sprintf("cat /pfs/test-artifacts/.git/HEAD > /pfs/out/%v", outputFilename), }, nil, &pps.Input{ Git: &pps.GitInput{ URL: "[email protected]:pachyderm/test-artifacts.git", }, }, "", false, )) require.YesError(t, c.CreatePipeline( pipeline, "", []string{"bash"}, []string{ fmt.Sprintf("cat /pfs/test-artifacts/.git/HEAD > /pfs/out/%v", outputFilename), }, nil, &pps.Input{ Git: &pps.GitInput{ URL: "https://github.com:pachyderm/test-artifacts", }, }, "", false, )) } func TestPipelineWithGitInputPrivateGHRepo(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) outputFilename := "commitSHA" pipeline := tu.UniqueString("github_pipeline") repoName := "pachyderm-dummy" require.NoError(t, c.CreatePipeline( pipeline, "", []string{"bash"}, []string{ fmt.Sprintf("cat /pfs/%v/.git/HEAD > /pfs/out/%v", repoName, outputFilename), }, nil, &pps.Input{ Git: &pps.GitInput{ URL: fmt.Sprintf("https://github.com/pachyderm/%v.git", repoName), }, }, "", false, )) // There should be a pachyderm repo created w no commits: repos, err := c.ListRepo() require.NoError(t, err) found := false for _, repo := range repos { if repo.Repo.Name == repoName { found = true } } require.Equal(t, true, found) // To trigger the pipeline, we'll need to simulate the webhook by pushing a POST payload to the githook server simulateGitPush(t, "../../etc/testing/artifacts/githook-payloads/private.json") // Need to sleep since the webhook http handler is non blocking time.Sleep(2 * time.Second) // Now there should NOT be a new commit on the pachyderm repo commits, err := c.ListCommit(repoName, "master", "", 0) require.NoError(t, err) require.Equal(t, 0, len(commits)) // We should see that the pipeline has failed pipelineInfo, err := c.InspectPipeline(pipeline) require.NoError(t, err) require.Equal(t, pps.PipelineState_PIPELINE_FAILURE, pipelineInfo.State) require.Equal(t, fmt.Sprintf("unable to clone private github repo (https://github.com/pachyderm/%v.git)", repoName), pipelineInfo.Reason) } func TestPipelineWithGitInputDuplicateNames(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) outputFilename := "commitSHA" pipeline := tu.UniqueString("github_pipeline") //Test same name on one pipeline require.YesError(t, c.CreatePipeline( pipeline, "", []string{"bash"}, []string{ fmt.Sprintf("cat /pfs/pachyderm/.git/HEAD > /pfs/out/%v", outputFilename), }, nil, &pps.Input{ Cross: []*pps.Input{ &pps.Input{ Git: &pps.GitInput{ URL: "https://github.com/pachyderm/test-artifacts.git", Name: "foo", }, }, &pps.Input{ Git: &pps.GitInput{ URL: "https://github.com/pachyderm/test-artifacts.git", Name: "foo", }, }, }, }, "", false, )) //Test same URL on one pipeline require.YesError(t, c.CreatePipeline( pipeline, "", []string{"bash"}, []string{ fmt.Sprintf("cat /pfs/pachyderm/.git/HEAD > /pfs/out/%v", outputFilename), }, nil, &pps.Input{ Cross: []*pps.Input{ &pps.Input{ Git: &pps.GitInput{ URL: "https://github.com/pachyderm/test-artifacts.git", }, }, &pps.Input{ Git: &pps.GitInput{ URL: "https://github.com/pachyderm/test-artifacts.git", }, }, }, }, "", false, )) // Test same URL but different names require.NoError(t, c.CreatePipeline( pipeline, "", []string{"bash"}, []string{ fmt.Sprintf("cat /pfs/pachyderm/.git/HEAD > /pfs/out/%v", outputFilename), }, nil, &pps.Input{ Cross: []*pps.Input{ &pps.Input{ Git: &pps.GitInput{ URL: "https://github.com/pachyderm/test-artifacts.git", Name: "foo", }, }, &pps.Input{ Git: &pps.GitInput{ URL: "https://github.com/pachyderm/test-artifacts.git", }, }, }, }, "", false, )) } func TestPipelineWithGitInput(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) outputFilename := "commitSHA" pipeline := tu.UniqueString("github_pipeline") require.NoError(t, c.CreatePipeline( pipeline, "", []string{"bash"}, []string{ fmt.Sprintf("cat /pfs/test-artifacts/.git/HEAD > /pfs/out/%v", outputFilename), }, nil, &pps.Input{ Git: &pps.GitInput{ URL: "https://github.com/pachyderm/test-artifacts.git", }, }, "", false, )) // There should be a pachyderm repo created w no commits: _, err := c.InspectRepo("test-artifacts") require.NoError(t, err) commits, err := c.ListCommit("test-artifacts", "master", "", 0) require.NoError(t, err) require.Equal(t, 0, len(commits)) // To trigger the pipeline, we'll need to simulate the webhook by pushing a POST payload to the githook server simulateGitPush(t, "../../etc/testing/artifacts/githook-payloads/master.json") // Need to sleep since the webhook http handler is non blocking time.Sleep(2 * time.Second) // Now there should be a new commit on the pachyderm repo / master branch branches, err := c.ListBranch("test-artifacts") require.NoError(t, err) require.Equal(t, 1, len(branches)) require.Equal(t, "master", branches[0].Name) commit := branches[0].Head // Now wait for the pipeline complete as normal outputRepo := client.NewRepo(pipeline) commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, []*pfs.Repo{outputRepo}) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) commit = commitInfos[0].Commit var buf bytes.Buffer require.NoError(t, c.GetFile(commit.Repo.Name, commit.ID, outputFilename, 0, 0, &buf)) require.Equal(t, "9047fbfc251e7412ef3300868f743f2c24852539", strings.TrimSpace(buf.String())) } func TestPipelineWithGitInputSequentialPushes(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) outputFilename := "commitSHA" pipeline := tu.UniqueString("github_pipeline") require.NoError(t, c.CreatePipeline( pipeline, "", []string{"bash"}, []string{ fmt.Sprintf("cat /pfs/test-artifacts/.git/HEAD > /pfs/out/%v", outputFilename), }, nil, &pps.Input{ Git: &pps.GitInput{ URL: "https://github.com/pachyderm/test-artifacts.git", }, }, "", false, )) // There should be a pachyderm repo created w no commits: _, err := c.InspectRepo("test-artifacts") require.NoError(t, err) commits, err := c.ListCommit("test-artifacts", "master", "", 0) require.NoError(t, err) require.Equal(t, 0, len(commits)) // To trigger the pipeline, we'll need to simulate the webhook by pushing a POST payload to the githook server simulateGitPush(t, "../../etc/testing/artifacts/githook-payloads/master.json") // Need to sleep since the webhook http handler is non blocking time.Sleep(2 * time.Second) // Now there should be a new commit on the pachyderm repo / master branch branches, err := c.ListBranch("test-artifacts") require.NoError(t, err) require.Equal(t, 1, len(branches)) require.Equal(t, "master", branches[0].Name) commit := branches[0].Head // Now wait for the pipeline complete as normal commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, nil) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) commit = commitInfos[0].Commit var buf bytes.Buffer require.NoError(t, c.GetFile(commit.Repo.Name, commit.ID, outputFilename, 0, 0, &buf)) require.Equal(t, "9047fbfc251e7412ef3300868f743f2c24852539", strings.TrimSpace(buf.String())) // To trigger the pipeline, we'll need to simulate the webhook by pushing a POST payload to the githook server simulateGitPush(t, "../../etc/testing/artifacts/githook-payloads/master-2.json") // Need to sleep since the webhook http handler is non blocking time.Sleep(2 * time.Second) // Now there should be a new commit on the pachyderm repo / master branch branches, err = c.ListBranch("test-artifacts") require.NoError(t, err) require.Equal(t, 1, len(branches)) require.Equal(t, "master", branches[0].Name) commit = branches[0].Head // Now wait for the pipeline complete as normal commitIter, err = c.FlushCommit([]*pfs.Commit{commit}, nil) require.NoError(t, err) commitInfos = collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) commit = commitInfos[0].Commit buf.Reset() require.NoError(t, c.GetFile(commit.Repo.Name, commit.ID, outputFilename, 0, 0, &buf)) require.Equal(t, "162963b4adf00cd378488abdedc085ba08e21674", strings.TrimSpace(buf.String())) } func TestPipelineWithGitInputCustomName(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) outputFilename := "commitSHA" pipeline := tu.UniqueString("github_pipeline") repoName := "foo" require.NoError(t, c.CreatePipeline( pipeline, "", []string{"bash"}, []string{ fmt.Sprintf("cat /pfs/%v/.git/HEAD > /pfs/out/%v", repoName, outputFilename), }, nil, &pps.Input{ Git: &pps.GitInput{ URL: "https://github.com/pachyderm/test-artifacts.git", Name: repoName, }, }, "", false, )) // There should be a pachyderm repo created w no commits: _, err := c.InspectRepo(repoName) require.NoError(t, err) commits, err := c.ListCommit(repoName, "", "", 0) require.NoError(t, err) require.Equal(t, 0, len(commits)) // To trigger the pipeline, we'll need to simulate the webhook by pushing a POST payload to the githook server simulateGitPush(t, "../../etc/testing/artifacts/githook-payloads/master.json") // Need to sleep since the webhook http handler is non blocking time.Sleep(2 * time.Second) // Now there should be a new commit on the pachyderm repo / master branch branches, err := c.ListBranch(repoName) require.NoError(t, err) require.Equal(t, 1, len(branches)) require.Equal(t, "master", branches[0].Name) commit := branches[0].Head // Now wait for the pipeline complete as normal outputRepo := client.NewRepo(pipeline) commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, []*pfs.Repo{outputRepo}) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) commit = commitInfos[0].Commit var buf bytes.Buffer require.NoError(t, c.GetFile(commit.Repo.Name, commit.ID, outputFilename, 0, 0, &buf)) require.Equal(t, "9047fbfc251e7412ef3300868f743f2c24852539", strings.TrimSpace(buf.String())) } func TestPipelineWithGitInputMultiPipelineSeparateInputs(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) outputFilename := "commitSHA" repos := []string{"pachyderm", "foo"} pipelines := []string{ tu.UniqueString("github_pipeline_a_"), tu.UniqueString("github_pipeline_b_"), } for i, repoName := range repos { require.NoError(t, c.CreatePipeline( pipelines[i], "", []string{"bash"}, []string{ fmt.Sprintf("cat /pfs/%v/.git/HEAD > /pfs/out/%v", repoName, outputFilename), }, nil, &pps.Input{ Git: &pps.GitInput{ URL: "https://github.com/pachyderm/test-artifacts.git", Name: repoName, }, }, "", false, )) // There should be a pachyderm repo created w no commits: repos, err := c.ListRepo() require.NoError(t, err) found := false for _, repo := range repos { if repo.Repo.Name == repoName { found = true } } require.Equal(t, true, found) commits, err := c.ListCommit(repoName, "", "", 0) require.NoError(t, err) require.Equal(t, 0, len(commits)) } // To trigger the pipeline, we'll need to simulate the webhook by pushing a POST payload to the githook server simulateGitPush(t, "../../etc/testing/artifacts/githook-payloads/master.json") // Need to sleep since the webhook http handler is non blocking time.Sleep(2 * time.Second) for i, repoName := range repos { // Now there should be a new commit on the pachyderm repo / master branch branches, err := c.ListBranch(repoName) require.NoError(t, err) require.Equal(t, 1, len(branches)) require.Equal(t, "master", branches[0].Name) commit := branches[0].Head // Now wait for the pipeline complete as normal outputRepo := client.NewRepo(pipelines[i]) commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, []*pfs.Repo{outputRepo}) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) commit = commitInfos[0].Commit var buf bytes.Buffer require.NoError(t, c.GetFile(commit.Repo.Name, commit.ID, outputFilename, 0, 0, &buf)) require.Equal(t, "9047fbfc251e7412ef3300868f743f2c24852539", strings.TrimSpace(buf.String())) } } func TestPipelineWithGitInputMultiPipelineSameInput(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) outputFilename := "commitSHA" repos := []string{"test-artifacts", "test-artifacts"} pipelines := []string{ tu.UniqueString("github_pipeline_a_"), tu.UniqueString("github_pipeline_b_"), } for i, repoName := range repos { require.NoError(t, c.CreatePipeline( pipelines[i], "", []string{"bash"}, []string{ fmt.Sprintf("cat /pfs/%v/.git/HEAD > /pfs/out/%v", repoName, outputFilename), }, nil, &pps.Input{ Git: &pps.GitInput{ URL: "https://github.com/pachyderm/test-artifacts.git", }, }, "", false, )) // There should be a pachyderm repo created w no commits: repos, err := c.ListRepo() require.NoError(t, err) found := false for _, repo := range repos { if repo.Repo.Name == repoName { found = true } } require.Equal(t, true, found) commits, err := c.ListCommit(repoName, "", "", 0) require.NoError(t, err) require.Equal(t, 0, len(commits)) } // To trigger the pipeline, we'll need to simulate the webhook by pushing a POST payload to the githook server simulateGitPush(t, "../../etc/testing/artifacts/githook-payloads/master.json") // Need to sleep since the webhook http handler is non blocking time.Sleep(2 * time.Second) // Now there should be a new commit on the pachyderm repo / master branch branches, err := c.ListBranch(repos[0]) require.NoError(t, err) require.Equal(t, 1, len(branches)) require.Equal(t, "master", branches[0].Name) commit := branches[0].Head // Now wait for the pipeline complete as normal commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, nil) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 2, len(commitInfos)) commit = commitInfos[0].Commit for _, commitInfo := range commitInfos { commit = commitInfo.Commit var buf bytes.Buffer require.NoError(t, c.GetFile(commit.Repo.Name, commit.ID, outputFilename, 0, 0, &buf)) require.Equal(t, "9047fbfc251e7412ef3300868f743f2c24852539", strings.TrimSpace(buf.String())) } } func TestPipelineWithGitInputAndBranch(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) branchName := "foo" outputFilename := "commitSHA" pipeline := tu.UniqueString("github_pipeline") require.NoError(t, c.CreatePipeline( pipeline, "", []string{"bash"}, []string{ fmt.Sprintf("cat /pfs/test-artifacts/.git/HEAD > /pfs/out/%v", outputFilename), }, nil, &pps.Input{ Git: &pps.GitInput{ URL: "https://github.com/pachyderm/test-artifacts.git", Branch: branchName, }, }, "", false, )) // There should be a pachyderm repo created w no commits: _, err := c.InspectRepo("test-artifacts") require.NoError(t, err) // Make sure a push to master does NOT trigger this pipeline simulateGitPush(t, "../../etc/testing/artifacts/githook-payloads/master.json") // Need to sleep since the webhook http handler is non blocking time.Sleep(5 * time.Second) // Now there should be a new commit on the pachyderm repo / master branch _, err = c.InspectBranch("test-artifacts", "master") require.YesError(t, err) // To trigger the pipeline, we'll need to simulate the webhook by pushing a POST payload to the githook server simulateGitPush(t, "../../etc/testing/artifacts/githook-payloads/branch.json") // Need to sleep since the webhook http handler is non blocking time.Sleep(5 * time.Second) // Now there should be a new commit on the pachyderm repo / master branch branches, err := c.ListBranch("test-artifacts") require.NoError(t, err) require.Equal(t, 1, len(branches)) require.Equal(t, branchName, branches[0].Name) commit := branches[0].Head require.NotNil(t, commit) // Now wait for the pipeline complete as normal outputRepo := client.NewRepo(pipeline) commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, []*pfs.Repo{outputRepo}) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) commit = commitInfos[0].Commit var buf bytes.Buffer require.NoError(t, c.GetFile(commit.Repo.Name, commit.ID, outputFilename, 0, 0, &buf)) require.Equal(t, "81269575dcfc6ac2e2a463ad8016163f79c97f5c", strings.TrimSpace(buf.String())) } func TestPipelineWithDatumTimeout(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) dataRepo := tu.UniqueString("TestPipelineWithDatumTimeout_data") require.NoError(t, c.CreateRepo(dataRepo)) commit1, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, commit1.ID)) timeout := 20 pipeline := tu.UniqueString("pipeline") duration, err := time.ParseDuration(fmt.Sprintf("%vs", timeout)) require.NoError(t, err) _, err = c.PpsAPIClient.CreatePipeline( context.Background(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipeline), Transform: &pps.Transform{ Cmd: []string{"bash"}, Stdin: []string{ "while true; do sleep 1; date; done", fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo), }, }, Input: client.NewPFSInput(dataRepo, "/*"), EnableStats: true, DatumTimeout: types.DurationProto(duration), }, ) require.NoError(t, err) commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 2, len(commitInfos)) jobs, err := c.ListJob(pipeline, nil, nil, -1, true) require.NoError(t, err) require.Equal(t, 1, len(jobs)) // Block on the job being complete before we call ListDatum jobInfo, err := c.InspectJob(jobs[0].Job.ID, true) require.NoError(t, err) require.Equal(t, pps.JobState_JOB_FAILURE, jobInfo.State) // Now validate the datum timed out properly resp, err := c.ListDatum(jobs[0].Job.ID, 0, 0) require.NoError(t, err) require.Equal(t, 1, len(resp.DatumInfos)) datum, err := c.InspectDatum(jobs[0].Job.ID, resp.DatumInfos[0].Datum.ID) require.NoError(t, err) require.Equal(t, pps.DatumState_FAILED, datum.State) // ProcessTime looks like "20 seconds" tokens := strings.Split(pretty.Duration(datum.Stats.ProcessTime), " ") require.Equal(t, 2, len(tokens)) seconds, err := strconv.Atoi(tokens[0]) require.NoError(t, err) require.Equal(t, timeout, seconds) } func TestPipelineWithDatumTimeoutControl(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) dataRepo := tu.UniqueString("TestPipelineWithDatumTimeoutControl_data") require.NoError(t, c.CreateRepo(dataRepo)) commit1, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, commit1.ID)) timeout := 20 pipeline := tu.UniqueString("pipeline") duration, err := time.ParseDuration(fmt.Sprintf("%vs", timeout)) require.NoError(t, err) _, err = c.PpsAPIClient.CreatePipeline( context.Background(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipeline), Transform: &pps.Transform{ Cmd: []string{"bash"}, Stdin: []string{ fmt.Sprintf("sleep %v", timeout-10), fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo), }, }, Input: client.NewPFSInput(dataRepo, "/*"), DatumTimeout: types.DurationProto(duration), }, ) require.NoError(t, err) commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) jobs, err := c.ListJob(pipeline, nil, nil, -1, true) require.NoError(t, err) require.Equal(t, 1, len(jobs)) // Block on the job being complete before we call ListDatum jobInfo, err := c.InspectJob(jobs[0].Job.ID, true) require.NoError(t, err) require.Equal(t, pps.JobState_JOB_SUCCESS, jobInfo.State) } func TestPipelineWithJobTimeout(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) dataRepo := tu.UniqueString("TestPipelineWithDatumTimeout_data") require.NoError(t, c.CreateRepo(dataRepo)) commit1, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) numFiles := 2 for i := 0; i < numFiles; i++ { _, err = c.PutFile(dataRepo, commit1.ID, fmt.Sprintf("file-%v", i), strings.NewReader("foo")) require.NoError(t, err) } require.NoError(t, c.FinishCommit(dataRepo, commit1.ID)) timeout := 20 pipeline := tu.UniqueString("pipeline") duration, err := time.ParseDuration(fmt.Sprintf("%vs", timeout)) require.NoError(t, err) _, err = c.PpsAPIClient.CreatePipeline( context.Background(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipeline), Transform: &pps.Transform{ Cmd: []string{"bash"}, Stdin: []string{ fmt.Sprintf("sleep %v", timeout), // we have 2 datums, so the total exec time will more than double the timeout value fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo), }, }, Input: client.NewPFSInput(dataRepo, "/*"), EnableStats: true, JobTimeout: types.DurationProto(duration), }, ) require.NoError(t, err) // Wait for the job to get scheduled / appear in listjob // A sleep of 15s is insufficient time.Sleep(25 * time.Second) jobs, err := c.ListJob(pipeline, nil, nil, -1, true) require.NoError(t, err) require.Equal(t, 1, len(jobs)) // Block on the job being complete before we call ListDatum jobInfo, err := c.InspectJob(jobs[0].Job.ID, true) require.NoError(t, err) require.Equal(t, pps.JobState_JOB_KILLED.String(), jobInfo.State.String()) started, err := types.TimestampFromProto(jobInfo.Started) require.NoError(t, err) finished, err := types.TimestampFromProto(jobInfo.Finished) require.NoError(t, err) require.True(t, math.Abs((finished.Sub(started)-(time.Second*20)).Seconds()) <= 1.0) } func TestCommitDescription(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) defer cancel() dataRepo := tu.UniqueString("TestCommitDescription") require.NoError(t, c.CreateRepo(dataRepo)) // Test putting a message in StartCommit commit, err := c.PfsAPIClient.StartCommit(ctx, &pfs.StartCommitRequest{ Branch: "master", Parent: client.NewCommit(dataRepo, ""), Description: "test commit description in 'start commit'", }) require.NoError(t, err) c.FinishCommit(dataRepo, commit.ID) commitInfo, err := c.InspectCommit(dataRepo, commit.ID) require.NoError(t, err) require.Equal(t, "test commit description in 'start commit'", commitInfo.Description) require.NoError(t, pfspretty.PrintDetailedCommitInfo(pfspretty.NewPrintableCommitInfo(commitInfo))) // Test putting a message in FinishCommit commit, err = c.StartCommit(dataRepo, "master") require.NoError(t, err) c.PfsAPIClient.FinishCommit(ctx, &pfs.FinishCommitRequest{ Commit: commit, Description: "test commit description in 'finish commit'", }) commitInfo, err = c.InspectCommit(dataRepo, commit.ID) require.NoError(t, err) require.Equal(t, "test commit description in 'finish commit'", commitInfo.Description) require.NoError(t, pfspretty.PrintDetailedCommitInfo(pfspretty.NewPrintableCommitInfo(commitInfo))) // Test overwriting a commit message commit, err = c.PfsAPIClient.StartCommit(ctx, &pfs.StartCommitRequest{ Branch: "master", Parent: client.NewCommit(dataRepo, ""), Description: "test commit description in 'start commit'", }) require.NoError(t, err) c.PfsAPIClient.FinishCommit(ctx, &pfs.FinishCommitRequest{ Commit: commit, Description: "test commit description in 'finish commit' that overwrites", }) commitInfo, err = c.InspectCommit(dataRepo, commit.ID) require.NoError(t, err) require.Equal(t, "test commit description in 'finish commit' that overwrites", commitInfo.Description) require.NoError(t, pfspretty.PrintDetailedCommitInfo(pfspretty.NewPrintableCommitInfo(commitInfo))) } func TestGetFileWithEmptyCommits(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) defer cancel() repoName := tu.UniqueString("TestGetFileWithEmptyCommits") require.NoError(t, c.CreateRepo(repoName)) // Create a real commit in repoName/master commit, err := c.StartCommit(repoName, "master") require.NoError(t, err) _, err = c.PutFile(repoName, commit.ID, "/file", strings.NewReader("data contents")) require.NoError(t, err) require.NoError(t, c.FinishCommit(repoName, commit.ID)) // Create an empty commit in repoName/master commit, err = c.StartCommit(repoName, "master") c.PfsAPIClient.FinishCommit(ctx, &pfs.FinishCommitRequest{ Commit: commit, Empty: true, }) // We get a "file not found" error when we try to get a file from repoName/master buf := bytes.Buffer{} err = c.GetFile(repoName, "master", "/file", 0, 0, &buf) require.YesError(t, err) require.True(t, strings.Contains(err.Error(), "not found")) } func TestPipelineDescription(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) dataRepo := tu.UniqueString("TestPipelineDescription_data") require.NoError(t, c.CreateRepo(dataRepo)) description := "pipeline description" pipeline := tu.UniqueString("TestPipelineDescription") _, err := c.PpsAPIClient.CreatePipeline( context.Background(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipeline), Transform: &pps.Transform{Cmd: []string{"true"}}, Description: description, Input: client.NewPFSInput(dataRepo, "/"), }) require.NoError(t, err) pi, err := c.InspectPipeline(pipeline) require.NoError(t, err) require.Equal(t, description, pi.Description) } func TestListJobInputCommits(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) aRepo := tu.UniqueString("TestListJobInputCommits_data_a") require.NoError(t, c.CreateRepo(aRepo)) bRepo := tu.UniqueString("TestListJobInputCommits_data_b") require.NoError(t, c.CreateRepo(bRepo)) pipeline := tu.UniqueString("TestListJobInputCommits") require.NoError(t, c.CreatePipeline( pipeline, "", []string{"bash"}, []string{ fmt.Sprintf("cp /pfs/%s/* /pfs/out/", aRepo), fmt.Sprintf("cp /pfs/%s/* /pfs/out/", bRepo), }, &pps.ParallelismSpec{ Constant: 1, }, client.NewCrossInput( client.NewPFSInput(aRepo, "/*"), client.NewPFSInput(bRepo, "/*"), ), "", false, )) commita1, err := c.StartCommit(aRepo, "master") require.NoError(t, err) _, err = c.PutFile(aRepo, "master", "file", strings.NewReader("foo")) require.NoError(t, err) require.NoError(t, c.FinishCommit(aRepo, "master")) commitb1, err := c.StartCommit(bRepo, "master") require.NoError(t, err) _, err = c.PutFile(bRepo, "master", "file", strings.NewReader("foo")) require.NoError(t, err) require.NoError(t, c.FinishCommit(bRepo, "master")) commitIter, err := c.FlushCommit([]*pfs.Commit{commita1, commitb1}, nil) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) commita2, err := c.StartCommit(aRepo, "master") require.NoError(t, err) _, err = c.PutFile(aRepo, "master", "file", strings.NewReader("bar")) require.NoError(t, err) require.NoError(t, c.FinishCommit(aRepo, "master")) commitIter, err = c.FlushCommit([]*pfs.Commit{commita2, commitb1}, nil) require.NoError(t, err) commitInfos = collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) commitb2, err := c.StartCommit(bRepo, "master") require.NoError(t, err) _, err = c.PutFile(bRepo, "master", "file", strings.NewReader("bar")) require.NoError(t, err) require.NoError(t, c.FinishCommit(bRepo, "master")) commitIter, err = c.FlushCommit([]*pfs.Commit{commita2, commitb2}, nil) require.NoError(t, err) commitInfos = collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) jobInfos, err := c.ListJob("", []*pfs.Commit{commita1}, nil, -1, true) require.NoError(t, err) require.Equal(t, 2, len(jobInfos)) // a1 + nil and a1 + b1 jobInfos, err = c.ListJob("", []*pfs.Commit{commitb1}, nil, -1, true) require.NoError(t, err) require.Equal(t, 2, len(jobInfos)) // a1 + b1 and a2 + b1 jobInfos, err = c.ListJob("", []*pfs.Commit{commita2}, nil, -1, true) require.NoError(t, err) require.Equal(t, 2, len(jobInfos)) // a2 + b1 and a2 + b2 jobInfos, err = c.ListJob("", []*pfs.Commit{commitb2}, nil, -1, true) require.NoError(t, err) require.Equal(t, 1, len(jobInfos)) // a2 + b2 jobInfos, err = c.ListJob("", []*pfs.Commit{commita1, commitb1}, nil, -1, true) require.NoError(t, err) require.Equal(t, 1, len(jobInfos)) jobInfos, err = c.ListJob("", []*pfs.Commit{commita2, commitb1}, nil, -1, true) require.NoError(t, err) require.Equal(t, 1, len(jobInfos)) jobInfos, err = c.ListJob("", []*pfs.Commit{commita2, commitb2}, nil, -1, true) require.NoError(t, err) require.Equal(t, 1, len(jobInfos)) jobInfos, err = c.ListJob("", []*pfs.Commit{client.NewCommit(aRepo, "master"), client.NewCommit(bRepo, "master")}, nil, -1, true) require.NoError(t, err) require.Equal(t, 1, len(jobInfos)) } func TestManyJobs(t *testing.T) { t.Skip("This test is too long to be run as part of CI") if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) dataRepo := tu.UniqueString("TestManyJobs_data") require.NoError(t, c.CreateRepo(dataRepo)) numPipelines := 10 for i := 0; i < numPipelines; i++ { pipeline := tu.UniqueString("TestManyJobs") require.NoError(t, c.CreatePipeline( pipeline, "", []string{"true"}, []string{strings.Repeat("words ", 30), strings.Repeat("words ", 30), strings.Repeat("words ", 30), strings.Repeat("words ", 30), strings.Repeat("words ", 30), strings.Repeat("words ", 30)}, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(dataRepo, "/*"), "", false, )) } numCommits := 5000 for i := 0; i < numCommits; i++ { _, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, "master")) } commitIter, err := c.FlushCommit([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) _, err = c.ListJob("", nil, nil, -1, true) require.NoError(t, err) } // TestCancelJob creates a long-running job and then kills it, testing that the // user process is killed. func TestCancelJob(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) // Create an input repo repo := tu.UniqueString("TestCancelJob") require.NoError(t, c.CreateRepo(repo)) // Create an input commit commit, err := c.StartCommit(repo, "master") require.NoError(t, err) _, err = c.PutFile(repo, commit.ID, "/time", strings.NewReader("600")) require.NoError(t, err) _, err = c.PutFile(repo, commit.ID, "/data", strings.NewReader("commit data")) require.NoError(t, err) require.NoError(t, c.FinishCommit(repo, commit.ID)) // Create sleep + copy pipeline pipeline := tu.UniqueString("pipeline") require.NoError(t, c.CreatePipeline( pipeline, "", []string{"bash"}, []string{ "sleep `cat /pfs/*/time`", "cp /pfs/*/data /pfs/out/", }, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(repo, "/"), "", false, )) // Wait until PPS has started processing commit var jobInfo *pps.JobInfo require.NoErrorWithinT(t, 30*time.Second, func() error { return backoff.Retry(func() error { jobInfos, err := c.ListJob(pipeline, []*pfs.Commit{commit}, nil, -1, true) if err != nil { return err } if len(jobInfos) != 1 { return fmt.Errorf("Expected one job, but got %d: %v", len(jobInfos), jobInfos) } jobInfo = jobInfos[0] return nil }, backoff.NewTestingBackOff()) }) // stop the job require.NoError(t, c.StopJob(jobInfo.Job.ID)) // Wait until the job is cancelled require.NoErrorWithinT(t, 30*time.Second, func() error { return backoff.Retry(func() error { updatedJobInfo, err := c.InspectJob(jobInfo.Job.ID, false) if err != nil { return err } if updatedJobInfo.State != pps.JobState_JOB_KILLED { return fmt.Errorf("job %s is still running, but should be KILLED", jobInfo.Job.ID) } return nil }, backoff.NewTestingBackOff()) }) // Create one more commit to make sure the pipeline can still process input // commits commit2, err := c.StartCommit(repo, "master") require.NoError(t, err) require.NoError(t, c.DeleteFile(repo, commit2.ID, "/time")) _, err = c.PutFile(repo, commit2.ID, "/time", strings.NewReader("1")) require.NoError(t, err) require.NoError(t, c.DeleteFile(repo, commit2.ID, "/data")) _, err = c.PutFile(repo, commit2.ID, "/data", strings.NewReader("commit 2 data")) require.NoError(t, err) require.NoError(t, c.FinishCommit(repo, commit2.ID)) // Flush commit2, and make sure the output is as expected iter, err := c.FlushCommit([]*pfs.Commit{commit2}, []*pfs.Repo{client.NewRepo(pipeline)}) require.NoError(t, err) commitInfos := collectCommitInfos(t, iter) require.Equal(t, 1, len(commitInfos)) buf := bytes.Buffer{} err = c.GetFile(pipeline, commitInfos[0].Commit.ID, "/data", 0, 0, &buf) require.NoError(t, err) require.Equal(t, "commit 2 data", buf.String()) } // TestCancelManyJobs creates many jobs to test that the handling of many // incoming job events is correct. Each job comes up (which tests that that // cancelling job 'a' does not cancel subsequent job 'b'), must be the only job // running (which tests that only one job can run at a time), and then is // cancelled. func TestCancelManyJobs(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) // Create an input repo repo := tu.UniqueString("TestCancelManyJobs") require.NoError(t, c.CreateRepo(repo)) // Create sleep pipeline pipeline := tu.UniqueString("pipeline") require.NoError(t, c.CreatePipeline( pipeline, "", []string{"sleep", "600"}, nil, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(repo, "/"), "", false, )) // Create 10 input commits, to spawn 10 jobs var commits [10]*pfs.Commit var err error for i := 0; i < 10; i++ { commits[i], err = c.StartCommit(repo, "master") require.NoError(t, err) require.NoError(t, c.FinishCommit(repo, commits[i].ID)) } // For each expected job: watch to make sure the input job comes up, make // sure that it's the only job running, then cancel it for _, commit := range commits { // Wait until PPS has started processing commit var jobInfo *pps.JobInfo require.NoErrorWithinT(t, 30*time.Second, func() error { return backoff.Retry(func() error { jobInfos, err := c.ListJob(pipeline, []*pfs.Commit{commit}, nil, -1, true) if err != nil { return err } if len(jobInfos) != 1 { return fmt.Errorf("Expected one job, but got %d: %v", len(jobInfos), jobInfos) } jobInfo = jobInfos[0] return nil }, backoff.NewTestingBackOff()) }) // Stop the job require.NoError(t, c.StopJob(jobInfo.Job.ID)) // Check that the job is now killed require.NoErrorWithinT(t, 30*time.Second, func() error { return backoff.Retry(func() error { // TODO(msteffen): once github.com/pachyderm/pachyderm/pull/2642 is // submitted, change ListJob here to filter on commit1 as the input commit, // rather than inspecting the input in the test updatedJobInfo, err := c.InspectJob(jobInfo.Job.ID, false) if err != nil { return err } if updatedJobInfo.State != pps.JobState_JOB_KILLED { return fmt.Errorf("job %s is still running, but should be KILLED", jobInfo.Job.ID) } return nil }, backoff.NewTestingBackOff()) }) } } // TestDeleteCommitPropagation deletes an input commit and makes sure all // downstream commits are also deleted. // DAG in this test: repo -> pipeline[0] -> pipeline[1] func TestDeleteCommitPropagation(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) // Create an input repo repo := tu.UniqueString("TestDeleteCommitPropagation") require.NoError(t, c.CreateRepo(repo)) // Create two copy pipelines numPipelines, numCommits := 2, 2 pipeline := make([]string, numPipelines) for i := 0; i < numPipelines; i++ { pipeline[i] = tu.UniqueString(fmt.Sprintf("pipeline%d_", i)) input := []string{repo, pipeline[0]}[i] require.NoError(t, c.CreatePipeline( pipeline[i], "", []string{"bash"}, []string{"cp /pfs/*/* /pfs/out/"}, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(input, "/*"), "", false, )) } // Commit twice to the input repo, creating 4 jobs and 4 output commits commit := make([]*pfs.Commit, numCommits) var err error for i := 0; i < numCommits; i++ { commit[i], err = c.StartCommit(repo, "master") require.NoError(t, err) _, err = c.PutFile(repo, commit[i].ID, "file", strings.NewReader("foo")) require.NoError(t, err) require.NoError(t, c.FinishCommit(repo, commit[i].ID)) commitIter, err := c.FlushCommit([]*pfs.Commit{commit[i]}, nil) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 2, len(commitInfos)) } // Delete the first commit in the input repo (not master, but its parent) // Make sure that 'repo' and all downstream repos only have one commit now. // This ensures that commits' parents are updated commits, err := c.ListCommit(repo, "master", "", 0) require.NoError(t, err) require.Equal(t, 2, len(commits)) require.NoError(t, c.DeleteCommit(repo, commit[0].ID)) for _, r := range []string{repo, pipeline[0], pipeline[1]} { commits, err := c.ListCommit(r, "master", "", 0) require.NoError(t, err) require.Equal(t, 1, len(commits)) require.Nil(t, commits[0].ParentCommit) } jis, err := c.ListJob(pipeline[0], nil, nil, -1, true) require.NoError(t, err) require.Equal(t, 1, len(jis)) // Delete the second commit in the input repo (master) // Make sure that 'repo' and all downstream repos have no commits. This // ensures that branches are updated. require.NoError(t, c.DeleteCommit(repo, "master")) for _, r := range []string{repo, pipeline[0], pipeline[1]} { commits, err := c.ListCommit(r, "master", "", 0) require.NoError(t, err) require.Equal(t, 0, len(commits)) } // Make one more input commit, to be sure that the branches are still // connected properly finalCommit, err := c.StartCommit(repo, "master") require.NoError(t, err) _, err = c.PutFile(repo, finalCommit.ID, "file", strings.NewReader("foo")) require.NoError(t, err) require.NoError(t, c.FinishCommit(repo, finalCommit.ID)) commitIter, err := c.FlushCommit([]*pfs.Commit{finalCommit}, nil) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 2, len(commitInfos)) } // TestDeleteCommitRunsJob creates an input reo, commits several times, and then // creates a pipeline. Creating the pipeline will spawn a job and while that // job is running, this test deletes the HEAD commit of the input branch, which // deletes the job's output commit and cancels the job. This should start // another pipeline that processes the original input HEAD commit's parent. func TestDeleteCommitRunsJob(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) // Create an input repo repo := tu.UniqueString("TestDeleteCommitRunsJob") require.NoError(t, c.CreateRepo(repo)) // Create two input commits. The input commit has two files: 'time' which // determines how long the processing job runs for, and 'data' which // determines the job's output. This ensures that the first job (processing // the second commit) runs for a long time, making it easy to cancel, while // the second job runs quickly, ensuring that the test finishes quickly commit1, err := c.StartCommit(repo, "master") require.NoError(t, err) _, err = c.PutFile(repo, commit1.ID, "/time", strings.NewReader("1")) require.NoError(t, err) _, err = c.PutFile(repo, commit1.ID, "/data", strings.NewReader("commit 1 data")) require.NoError(t, err) require.NoError(t, c.FinishCommit(repo, commit1.ID)) commit2, err := c.StartCommit(repo, "master") require.NoError(t, err) require.NoError(t, c.DeleteFile(repo, commit2.ID, "/time")) _, err = c.PutFile(repo, commit2.ID, "/time", strings.NewReader("600")) require.NoError(t, err) require.NoError(t, c.DeleteFile(repo, commit2.ID, "/data")) _, err = c.PutFile(repo, commit2.ID, "/data", strings.NewReader("commit 2 data")) require.NoError(t, err) require.NoError(t, c.FinishCommit(repo, commit2.ID)) // Create sleep + copy pipeline pipeline := tu.UniqueString("pipeline") require.NoError(t, c.CreatePipeline( pipeline, "", []string{"bash"}, []string{ "sleep `cat /pfs/*/time`", "cp /pfs/*/data /pfs/out/", }, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(repo, "/"), "", false, )) // Wait until PPS has started processing commit2 require.NoErrorWithinT(t, 30*time.Second, func() error { return backoff.Retry(func() error { // TODO(msteffen): once github.com/pachyderm/pachyderm/pull/2642 is // submitted, change ListJob here to filter on commit1 as the input commit, // rather than inspecting the input in the test jobInfos, err := c.ListJob(pipeline, nil, nil, -1, true) if err != nil { return err } if len(jobInfos) != 1 { return fmt.Errorf("Expected one job, but got %d: %v", len(jobInfos), jobInfos) } pps.VisitInput(jobInfos[0].Input, func(input *pps.Input) { if input.Pfs == nil { err = fmt.Errorf("expected a single PFS input, but got: %v", jobInfos[0].Input) return } if input.Pfs.Commit != commit2.ID { err = fmt.Errorf("expected job to process %s, but instead processed: %s", commit2.ID, jobInfos[0].Input) return } }) return err }, backoff.NewTestingBackOff()) }) // Delete the first commit in the input repo require.NoError(t, c.DeleteCommit(repo, commit2.ID)) // Wait until PPS has started processing commit1 require.NoErrorWithinT(t, 30*time.Second, func() error { return backoff.Retry(func() error { // TODO(msteffen): as above, change ListJob here to filter on commit2 as // the input, rather than inspecting the input in the test jobInfos, err := c.ListJob(pipeline, nil, nil, -1, true) if err != nil { return err } if len(jobInfos) != 1 { return fmt.Errorf("Expected one job, but got %d: %v", len(jobInfos), jobInfos) } pps.VisitInput(jobInfos[0].Input, func(input *pps.Input) { if input.Pfs == nil { err = fmt.Errorf("expected a single PFS input, but got: %v", jobInfos[0].Input) return } if input.Pfs.Commit != commit1.ID { err = fmt.Errorf("expected job to process %s, but instead processed: %s", commit1.ID, jobInfos[0].Input) return } }) return err }, backoff.NewTestingBackOff()) }) iter, err := c.FlushCommit([]*pfs.Commit{commit1}, []*pfs.Repo{client.NewRepo(pipeline)}) require.NoError(t, err) commitInfos := collectCommitInfos(t, iter) require.Equal(t, 1, len(commitInfos)) // Check that the job processed the right data buf := bytes.Buffer{} err = c.GetFile(repo, "master", "/data", 0, 0, &buf) require.NoError(t, err) require.Equal(t, "commit 1 data", buf.String()) // Create one more commit to make sure the pipeline can still process input // commits commit3, err := c.StartCommit(repo, "master") require.NoError(t, err) require.NoError(t, c.DeleteFile(repo, commit3.ID, "/data")) _, err = c.PutFile(repo, commit3.ID, "/data", strings.NewReader("commit 3 data")) require.NoError(t, err) require.NoError(t, c.FinishCommit(repo, commit3.ID)) // Flush commit3, and make sure the output is as expected iter, err = c.FlushCommit([]*pfs.Commit{commit3}, []*pfs.Repo{client.NewRepo(pipeline)}) require.NoError(t, err) commitInfos = collectCommitInfos(t, iter) require.Equal(t, 1, len(commitInfos)) buf.Reset() err = c.GetFile(pipeline, commitInfos[0].Commit.ID, "/data", 0, 0, &buf) require.NoError(t, err) require.Equal(t, "commit 3 data", buf.String()) } func TestEntryPoint(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) dataRepo := tu.UniqueString(t.Name() + "-data") require.NoError(t, c.CreateRepo(dataRepo)) commit1, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, commit1.ID)) pipeline := tu.UniqueString(t.Name()) require.NoError(t, c.CreatePipeline( pipeline, "pachyderm_entrypoint", nil, nil, &pps.ParallelismSpec{ Constant: 1, }, &pps.Input{ Pfs: &pps.PFSInput{ Name: "in", Repo: dataRepo, Glob: "/*", }, }, "", false, )) commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) var buf bytes.Buffer require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "file", 0, 0, &buf)) require.Equal(t, "foo", buf.String()) } func TestDeleteSpecRepo(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) dataRepo := tu.UniqueString("TestDeleteSpecRepo_data") require.NoError(t, c.CreateRepo(dataRepo)) pipeline := tu.UniqueString("TestSimplePipeline") require.NoError(t, c.CreatePipeline( pipeline, "pachyderm_entrypoint", []string{"echo", "foo"}, nil, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(dataRepo, "/"), "", false, )) require.YesError(t, c.DeleteRepo(ppsconsts.SpecRepo, false)) } func TestUserWorkingDir(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) defer require.NoError(t, c.DeleteAll()) dataRepo := tu.UniqueString("TestUserWorkingDir_data") require.NoError(t, c.CreateRepo(dataRepo)) commit, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, commit.ID)) pipeline := tu.UniqueString("TestSimplePipeline") _, err = c.PpsAPIClient.CreatePipeline( context.Background(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipeline), Transform: &pps.Transform{ Image: "pachyderm_entrypoint", Cmd: []string{"bash"}, Stdin: []string{ "ls -lh /pfs", "whoami >/pfs/out/whoami", "pwd >/pfs/out/pwd", fmt.Sprintf("cat /pfs/%s/file >/pfs/out/file", dataRepo), }, User: "test", WorkingDir: "/home/test", }, Input: client.NewPFSInput(dataRepo, "/"), }) require.NoError(t, err) commitIter, err := c.FlushCommit([]*pfs.Commit{commit}, nil) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) var buf bytes.Buffer require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "whoami", 0, 0, &buf)) require.Equal(t, "test\n", buf.String()) buf.Reset() require.NoError(t, c.GetFile(commitInfos[0].Commit.Repo.Name, commitInfos[0].Commit.ID, "pwd", 0, 0, &buf)) require.Equal(t, "/home/test\n", buf.String()) } func TestDontReadStdin(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) dataRepo := tu.UniqueString("TestDontReadStdin_data") require.NoError(t, c.CreateRepo(dataRepo)) pipeline := tu.UniqueString("TestDontReadStdin") require.NoError(t, c.CreatePipeline( pipeline, "", []string{"true"}, []string{"stdin that will never be read"}, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(dataRepo, "/"), "", false, )) numCommits := 20 for i := 0; i < numCommits; i++ { commit, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, "master")) jobInfos, err := c.FlushJobAll([]*pfs.Commit{commit}, nil) require.NoError(t, err) require.Equal(t, 1, len(jobInfos)) require.Equal(t, jobInfos[0].State.String(), pps.JobState_JOB_SUCCESS.String()) } } func TestStatsDeleteAll(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) dataRepo := tu.UniqueString("TestPipelineWithStats_data") require.NoError(t, c.CreateRepo(dataRepo)) commit, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, commit.ID)) pipeline := tu.UniqueString("pipeline") _, err = c.PpsAPIClient.CreatePipeline(context.Background(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipeline), Transform: &pps.Transform{ Cmd: []string{"cp", fmt.Sprintf("/pfs/%s/file", dataRepo), "/pfs/out"}, }, Input: client.NewPFSInput(dataRepo, "/"), EnableStats: true, }) jis, err := c.FlushJobAll([]*pfs.Commit{commit}, nil) require.NoError(t, err) require.Equal(t, 1, len(jis)) require.Equal(t, pps.JobState_JOB_SUCCESS.String(), jis[0].State.String()) require.NoError(t, c.DeleteAll()) require.NoError(t, c.CreateRepo(dataRepo)) commit, err = c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, commit.ID, "file", strings.NewReader("foo\n")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, commit.ID)) _, err = c.PpsAPIClient.CreatePipeline(context.Background(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipeline), Transform: &pps.Transform{ Cmd: []string{"cp", fmt.Sprintf("/pfs/%s/file", dataRepo), "/pfs/out"}, }, Input: client.NewPFSInput(dataRepo, "/*"), EnableStats: true, }) jis, err = c.FlushJobAll([]*pfs.Commit{commit}, nil) require.NoError(t, err) require.Equal(t, 1, len(jis)) require.Equal(t, pps.JobState_JOB_SUCCESS.String(), jis[0].State.String()) require.NoError(t, c.DeleteAll()) } func TestCorruption(t *testing.T) { t.Skip("This test takes too long to run on CI.") if testing.Short() { t.Skip("Skipping integration tests in short mode") } etcdClient := getEtcdClient(t) c := getPachClient(t) require.NoError(t, c.DeleteAll()) r := rand.New(rand.NewSource(128)) for i := 0; i < 100; i++ { dataRepo := tu.UniqueString("TestSimplePipeline_data") require.NoError(t, c.CreateRepo(dataRepo)) commit1, err := c.StartCommit(dataRepo, "master") require.NoError(t, err) _, err = c.PutFile(dataRepo, commit1.ID, "file", strings.NewReader("foo")) require.NoError(t, err) require.NoError(t, c.FinishCommit(dataRepo, commit1.ID)) pipeline := tu.UniqueString("TestSimplePipeline") require.NoError(t, c.CreatePipeline( pipeline, "", []string{"bash"}, []string{ fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo), }, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(dataRepo, "/*"), "", false, )) commitIter, err := c.FlushCommit([]*pfs.Commit{commit1}, nil) require.NoError(t, err) commitInfos := collectCommitInfos(t, commitIter) require.Equal(t, 1, len(commitInfos)) resp, err := etcdClient.Get(context.Background(), col.DefaultPrefix, etcd.WithPrefix(), etcd.WithKeysOnly()) require.NoError(t, err) for _, kv := range resp.Kvs { // Delete 1 in 10 keys if r.Intn(10) == 0 { _, err := etcdClient.Delete(context.Background(), string(kv.Key)) require.NoError(t, err) } } require.NoError(t, c.DeleteAll()) } } func TestPachdPrometheusStats(t *testing.T) { t.Skip("flake") if testing.Short() { t.Skip("Skipping integration tests in short mode") } port := os.Getenv("PROM_PORT") promClient, err := prom_api.NewClient(prom_api.Config{ Address: fmt.Sprintf("http://127.0.0.1:%v", port), }) require.NoError(t, err) promAPI := prom_api_v1.NewAPI(promClient) countQuery := func(t *testing.T, query string) float64 { result, err := promAPI.Query(context.Background(), query, time.Now()) require.NoError(t, err) resultVec := result.(prom_model.Vector) require.Equal(t, 1, len(resultVec)) return float64(resultVec[0].Value) } avgQuery := func(t *testing.T, sumQuery string, countQuery string, expected int) { query := "(" + sumQuery + ")/(" + countQuery + ")" result, err := promAPI.Query(context.Background(), query, time.Now()) require.NoError(t, err) resultVec := result.(prom_model.Vector) require.Equal(t, expected, len(resultVec)) } // Check stats reported on pachd pod pod := "app=\"pachd\"" without := "(instance)" // Check PFS API is reported t.Run("GetFileAvgRuntime", func(t *testing.T) { sum := fmt.Sprintf("sum(pachyderm_pachd_get_file_time_sum{%v}) without %v", pod, without) count := fmt.Sprintf("sum(pachyderm_pachd_get_file_time_count{%v}) without %v", pod, without) avgQuery(t, sum, count, 2) // 2 results ... one for finished, one for errored }) t.Run("PutFileAvgRuntime", func(t *testing.T) { sum := fmt.Sprintf("sum(pachyderm_pachd_put_file_time_sum{%v}) without %v", pod, without) count := fmt.Sprintf("sum(pachyderm_pachd_put_file_time_count{%v}) without %v", pod, without) avgQuery(t, sum, count, 1) }) t.Run("GetFileSeconds", func(t *testing.T) { query := fmt.Sprintf("sum(pachyderm_pachd_get_file_seconds_count{%v}) without %v", pod, without) countQuery(t, query) // Just check query has a result }) t.Run("PutFileSeconds", func(t *testing.T) { query := fmt.Sprintf("sum(pachyderm_pachd_put_file_seconds_count{%v}) without %v", pod, without) countQuery(t, query) // Just check query has a result }) // Check PPS API is reported t.Run("ListJobSeconds", func(t *testing.T) { query := fmt.Sprintf("sum(pachyderm_pachd_list_job_seconds_count{%v}) without %v", pod, without) countQuery(t, query) }) t.Run("ListJobAvgRuntime", func(t *testing.T) { sum := fmt.Sprintf("sum(pachyderm_pachd_list_job_time_sum{%v}) without %v", pod, without) count := fmt.Sprintf("sum(pachyderm_pachd_list_job_time_count{%v}) without %v", pod, without) avgQuery(t, sum, count, 1) }) } func TestRapidUpdatePipelines(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) pipeline := tu.UniqueString(t.Name() + "-pipeline-") cronInput := client.NewCronInput("time", "@every 30s") cronInput.Cron.Overwrite = true require.NoError(t, c.CreatePipeline( pipeline, "", []string{"/bin/bash"}, []string{"cp /pfs/time/* /pfs/out/"}, nil, cronInput, "", false, )) // TODO(msteffen): remove all sleeps from tests time.Sleep(10 * time.Second) for i := 0; i < 20; i++ { _, err := c.PpsAPIClient.CreatePipeline( context.Background(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipeline), Transform: &pps.Transform{ Cmd: []string{"/bin/bash"}, Stdin: []string{"cp /pfs/time/* /pfs/out/"}, }, Input: cronInput, Update: true, Reprocess: true, }) require.NoError(t, err) } // TODO ideally this test would not take 5 minutes (or even 3 minutes) require.NoErrorWithinTRetry(t, 5*time.Minute, func() error { jis, err := c.ListJob(pipeline, nil, nil, -1, true) if err != nil { return err } if len(jis) < 6 { return fmt.Errorf("should have more than 6 jobs in 5 minutes") } for i := 0; i+1 < len(jis); i++ { difference := jis[i].Started.Seconds - jis[i+1].Started.Seconds if difference < 15 { return fmt.Errorf("jobs too close together") } else if difference > 45 { return fmt.Errorf("jobs too far apart") } } return nil }) } func TestDatumTries(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) dataRepo := tu.UniqueString("TestDatumTries_data") require.NoError(t, c.CreateRepo(dataRepo)) _, err := c.PutFile(dataRepo, "master", "file", strings.NewReader("foo")) require.NoError(t, err) tries := int64(5) pipeline := tu.UniqueString("TestSimplePipeline") _, err = c.PpsAPIClient.CreatePipeline( context.Background(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipeline), Transform: &pps.Transform{ Cmd: []string{"unknown"}, // Cmd fails because "unknown" isn't a known command. }, Input: client.NewPFSInput(dataRepo, "/"), DatumTries: tries, }) require.NoError(t, err) jobInfos, err := c.FlushJobAll([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil) require.NoError(t, err) require.Equal(t, 1, len(jobInfos)) iter := c.GetLogs("", jobInfos[0].Job.ID, nil, "", false, false, 0) var observedTries int64 for iter.Next() { if strings.Contains(iter.Message().Message, "errored running user code after") { observedTries++ } } require.Equal(t, tries, observedTries) } func TestInspectJob(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) _, err := pachClient.PpsAPIClient.InspectJob(context.Background(), &pps.InspectJobRequest{}) require.YesError(t, err) require.True(t, strings.Contains(err.Error(), "must specify either a Job or an OutputCommit")) repo := tu.UniqueString("TestInspectJob") require.NoError(t, c.CreateRepo(repo)) _, err = c.PutFile(repo, "master", "file", strings.NewReader("foo")) require.NoError(t, err) ci, err := c.InspectCommit(repo, "master") require.NoError(t, err) _, err = c.InspectJobOutputCommit(repo, ci.Commit.ID, false) require.YesError(t, err) require.True(t, strings.Contains(err.Error(), "not found")) } func TestPipelineVersions(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) dataRepo := tu.UniqueString("TestPipelineVersions_data") require.NoError(t, c.CreateRepo(dataRepo)) pipeline := tu.UniqueString("TestPipelineVersions") nVersions := 5 for i := 0; i < nVersions; i++ { require.NoError(t, c.CreatePipeline( pipeline, "", []string{fmt.Sprintf("%d", i)}, // an obviously illegal command, but the pipeline will never run nil, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(dataRepo, "/*"), "", i != 0, )) } for i := 0; i < nVersions; i++ { pi, err := c.InspectPipeline(ancestry.Add(pipeline, nVersions-1-i)) require.NoError(t, err) require.Equal(t, fmt.Sprintf("%d", i), pi.Transform.Cmd[0]) } } // TestSplitFileHeader tests putting data in Pachyderm with delimiter == SQL, // and makes sure that every pipeline worker gets a copy of the file header. As // well, adding more data with the same header should not change the contents of // existing data. func TestSplitFileHeader(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) // put a SQL file w/ header repo := tu.UniqueString("TestSplitFileHeader") require.NoError(t, c.CreateRepo(repo)) _, err := c.PutFileSplit(repo, "master", "d", pfs.Delimiter_SQL, 0, 0, 0, false, strings.NewReader(tu.TestPGDump)) require.NoError(t, err) // Create a pipeline that roughly validates the header pipeline := tu.UniqueString("TestSplitFileHeaderPipeline") require.NoError(t, c.CreatePipeline( pipeline, "", []string{"/bin/bash"}, []string{ `ls /pfs/*/d/*`, // for debugging `cars_tables="$(grep "CREATE TABLE public.cars" /pfs/*/d/* | sort -u | wc -l)"`, `(( cars_tables == 1 )) && exit 0 || exit 1`, }, &pps.ParallelismSpec{Constant: 1}, client.NewPFSInput(repo, "/d/*"), "", false, )) // wait for job to run & check that all rows were processed var jobCount int c.FlushJob([]*pfs.Commit{client.NewCommit(repo, "master")}, nil, func(jobInfo *pps.JobInfo) error { jobCount++ require.Equal(t, 1, jobCount) require.Equal(t, pps.JobState_JOB_SUCCESS, jobInfo.State) require.Equal(t, int64(5), jobInfo.DataProcessed) require.Equal(t, int64(0), jobInfo.DataSkipped) return nil }) // Add new rows with same header data _, err = c.PutFileSplit(repo, "master", "d", pfs.Delimiter_SQL, 0, 0, 0, false, strings.NewReader(tu.TestPGDumpNewRows)) require.NoError(t, err) // old data should be skipped, even though header was uploaded twice (new // header shouldn't append or change the hash or anything) jobCount = 0 c.FlushJob([]*pfs.Commit{client.NewCommit(repo, "master")}, nil, func(jobInfo *pps.JobInfo) error { jobCount++ require.Equal(t, 1, jobCount) require.Equal(t, pps.JobState_JOB_SUCCESS, jobInfo.State) require.Equal(t, int64(3), jobInfo.DataProcessed) // added 3 new rows require.Equal(t, int64(5), jobInfo.DataSkipped) return nil }) } func TestNewHeaderCausesReprocess(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) // put a SQL file w/ header repo := tu.UniqueString("TestSplitFileHeader") require.NoError(t, c.CreateRepo(repo)) _, err := c.PutFileSplit(repo, "master", "d", pfs.Delimiter_SQL, 0, 0, 0, false, strings.NewReader(tu.TestPGDump)) require.NoError(t, err) // Create a pipeline that roughly validates the header pipeline := tu.UniqueString("TestSplitFileReprocessPL") require.NoError(t, c.CreatePipeline( pipeline, "", []string{"/bin/bash"}, []string{ `ls /pfs/*/d/*`, // for debugging `cars_tables="$(grep "CREATE TABLE public.cars" /pfs/*/d/* | sort -u | wc -l)"`, `(( cars_tables == 1 )) && exit 0 || exit 1`, }, &pps.ParallelismSpec{Constant: 1}, client.NewPFSInput(repo, "/d/*"), "", false, )) // wait for job to run & check that all rows were processed var jobCount int c.FlushJob([]*pfs.Commit{client.NewCommit(repo, "master")}, nil, func(jobInfo *pps.JobInfo) error { jobCount++ require.Equal(t, 1, jobCount) require.Equal(t, pps.JobState_JOB_SUCCESS, jobInfo.State) require.Equal(t, int64(5), jobInfo.DataProcessed) require.Equal(t, int64(0), jobInfo.DataSkipped) return nil }) // put empty dataset w/ new header _, err = c.PutFileSplit(repo, "master", "d", pfs.Delimiter_SQL, 0, 0, 0, false, strings.NewReader(tu.TestPGDumpNewHeader)) require.NoError(t, err) // everything gets reprocessed (hashes all change even though the files // themselves weren't altered) jobCount = 0 c.FlushJob([]*pfs.Commit{client.NewCommit(repo, "master")}, nil, func(jobInfo *pps.JobInfo) error { jobCount++ require.Equal(t, 1, jobCount) require.Equal(t, pps.JobState_JOB_SUCCESS, jobInfo.State) require.Equal(t, int64(5), jobInfo.DataProcessed) // added 3 new rows require.Equal(t, int64(0), jobInfo.DataSkipped) return nil }) } func TestSpout(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) t.Run("SpoutBasic", func(t *testing.T) { dataRepo := tu.UniqueString("TestSpoutBasic_data") require.NoError(t, c.CreateRepo(dataRepo)) // create a spout pipeline pipeline := tu.UniqueString("pipelinespoutbasic") _, err := c.PpsAPIClient.CreatePipeline( c.Ctx(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipeline), Transform: &pps.Transform{ Cmd: []string{"/bin/sh"}, Stdin: []string{ "while [ : ]", "do", "sleep 2", "date > date", "tar -cvf /pfs/out ./date*", "done"}, }, Spout: &pps.Spout{}, // this needs to be non-nil to make it a spout }) require.NoError(t, err) // get 5 succesive commits, and ensure that the file size increases each time // since the spout should be appending to that file on each commit iter, err := c.SubscribeCommit(pipeline, "master", nil, "", pfs.CommitState_FINISHED) require.NoError(t, err) var prevLength uint64 for i := 0; i < 5; i++ { commitInfo, err := iter.Next() require.NoError(t, err) files, err := c.ListFile(pipeline, commitInfo.Commit.ID, "") require.NoError(t, err) require.Equal(t, 1, len(files)) fileLength := files[0].SizeBytes if fileLength <= prevLength { t.Errorf("File length was expected to increase. Prev: %v, Cur: %v", prevLength, fileLength) } prevLength = fileLength } // make sure we can delete commits err = c.DeleteCommit(pipeline, "master") require.NoError(t, err) // and make sure we can attatch a downstream pipeline downstreamPipeline := tu.UniqueString("pipelinespoutdownstream") require.NoError(t, c.CreatePipeline( downstreamPipeline, "", []string{"/bin/bash"}, []string{"cp " + fmt.Sprintf("/pfs/%s/*", pipeline) + " /pfs/out/"}, nil, client.NewPFSInput(pipeline, "/*"), "", false, )) // we should have one job between pipeline and downstreamPipeline jobInfos, err := c.FlushJobAll([]*pfs.Commit{client.NewCommit(pipeline, "master")}, []string{downstreamPipeline}) require.NoError(t, err) require.Equal(t, 1, len(jobInfos)) }) t.Run("SpoutOverwrite", func(t *testing.T) { dataRepo := tu.UniqueString("TestSpoutOverwrite_data") require.NoError(t, c.CreateRepo(dataRepo)) pipeline := tu.UniqueString("pipelinespoutoverwrite") _, err := c.PpsAPIClient.CreatePipeline( c.Ctx(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipeline), Transform: &pps.Transform{ Cmd: []string{"/bin/sh"}, Stdin: []string{ "while [ : ]", "do", "sleep 2", "date > date", "tar -cvf /pfs/out ./date*", "done"}, }, Spout: &pps.Spout{ Overwrite: true, }, }) require.NoError(t, err) // if the overwrite flag is enabled, then the spout will overwrite the file on each commit // so the commits should have files that stay the same size iter, err := c.SubscribeCommit(pipeline, "master", nil, "", pfs.CommitState_FINISHED) require.NoError(t, err) var prevLength uint64 for i := 0; i < 5; i++ { commitInfo, err := iter.Next() require.NoError(t, err) files, err := c.ListFile(pipeline, commitInfo.Commit.ID, "") require.NoError(t, err) require.Equal(t, 1, len(files)) fileLength := files[0].SizeBytes if i > 0 && fileLength != prevLength { t.Errorf("File length was expected to stay the same. Prev: %v, Cur: %v", prevLength, fileLength) } prevLength = fileLength } }) t.Run("SpoutProvenance", func(t *testing.T) { dataRepo := tu.UniqueString("TestSpoutProvenance_data") require.NoError(t, c.CreateRepo(dataRepo)) // create a pipeline pipeline := tu.UniqueString("pipelinespoutprovenance") _, err := c.PpsAPIClient.CreatePipeline( c.Ctx(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipeline), Transform: &pps.Transform{ Cmd: []string{"/bin/sh"}, Stdin: []string{ "while [ : ]", "do", "sleep 5", "date > date", "tar -cvf /pfs/out ./date*", "done"}, }, Spout: &pps.Spout{ Overwrite: true, }, }) require.NoError(t, err) // get some commits pipelineInfo, err := c.InspectPipeline(pipeline) require.NoError(t, err) iter, err := c.SubscribeCommit(pipeline, "", client.NewCommitProvenance(ppsconsts.SpecRepo, pipeline, pipelineInfo.SpecCommit.ID), "", pfs.CommitState_FINISHED) require.NoError(t, err) // and we want to make sure that these commits all have the same provenance provenanceID := "" for i := 0; i < 3; i++ { commitInfo, err := iter.Next() require.NoError(t, err) require.Equal(t, 1, len(commitInfo.Provenance)) provenance := commitInfo.Provenance[0].Commit if i == 0 { // set first one provenanceID = provenance.ID } else { require.Equal(t, provenanceID, provenance.ID) } } // now we'll update the pipeline _, err = c.PpsAPIClient.CreatePipeline( c.Ctx(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipeline), Transform: &pps.Transform{ Cmd: []string{"/bin/sh"}, Stdin: []string{ "while [ : ]", "do", "sleep 5", "date > date", "tar -cvf /pfs/out ./date*", "done"}, }, Spout: &pps.Spout{}, Update: true, Reprocess: true, }) require.NoError(t, err) pipelineInfo, err = c.InspectPipeline(pipeline) require.NoError(t, err) iter, err = c.SubscribeCommit(pipeline, "", client.NewCommitProvenance(ppsconsts.SpecRepo, pipeline, pipelineInfo.SpecCommit.ID), "", pfs.CommitState_FINISHED) require.NoError(t, err) for i := 0; i < 3; i++ { commitInfo, err := iter.Next() require.NoError(t, err) require.Equal(t, 1, len(commitInfo.Provenance)) provenance := commitInfo.Provenance[0].Commit if i == 0 { // this time, we expect our commits to have different provenance from the commits earlier require.NotEqual(t, provenanceID, provenance.ID) provenanceID = provenance.ID } else { // but they should still have the same provenance as each other require.Equal(t, provenanceID, provenance.ID) } } }) t.Run("ServiceSpout", func(t *testing.T) { dataRepo := tu.UniqueString("TestServiceSpout_data") require.NoError(t, c.CreateRepo(dataRepo)) annotations := map[string]string{"foo": "bar"} // Create a pipeline that listens for tcp connections // on internal port 8000 and dumps whatever it receives // (should be in the form of a tar stream) to /pfs/out. pipeline := tu.UniqueString("pipelineservicespout") _, err := c.PpsAPIClient.CreatePipeline( c.Ctx(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipeline), Transform: &pps.Transform{ Image: "pachyderm/ubuntuplusnetcat:latest", Cmd: []string{"sh"}, Stdin: []string{ "netcat -l -s 0.0.0.0 -p 8000 >/pfs/out", }, }, ParallelismSpec: &pps.ParallelismSpec{ Constant: 1, }, Input: client.NewPFSInput(dataRepo, "/"), Update: false, Spout: &pps.Spout{ Service: &pps.Service{ InternalPort: 8000, ExternalPort: 31800, Annotations: annotations, }, }, }) require.NoError(t, err) time.Sleep(20 * time.Second) host, _, err := net.SplitHostPort(c.GetAddress()) serviceAddr := net.JoinHostPort(host, "31800") // Write a tar stream with a single file to // the tcp connection of the pipeline service's // external port. backoff.Retry(func() error { raddr, err := net.ResolveTCPAddr("tcp", serviceAddr) if err != nil { return err } conn, err := net.DialTCP("tcp", nil, raddr) if err != nil { return err } tarwriter := tar.NewWriter(conn) defer tarwriter.Close() headerinfo := &tar.Header{ Name: "file1", Size: int64(len("foo")), } err = tarwriter.WriteHeader(headerinfo) if err != nil { return err } _, err = tarwriter.Write([]byte("foo")) if err != nil { return err } return nil }, backoff.NewTestingBackOff()) iter, err := c.SubscribeCommit(pipeline, "master", nil, "", pfs.CommitState_FINISHED) require.NoError(t, err) commitInfo, err := iter.Next() require.NoError(t, err) files, err := c.ListFile(pipeline, commitInfo.Commit.ID, "") require.NoError(t, err) require.Equal(t, 1, len(files)) // Confirm that a commit is made with the file // written to the external port of the pipeline's service. var buf bytes.Buffer err = c.GetFile(pipeline, commitInfo.Commit.ID, files[0].File.Path, 0, 0, &buf) require.NoError(t, err) require.Equal(t, buf.String(), "foo") }) require.NoError(t, c.Fsck(false, func(resp *pfs.FsckResponse) error { if resp.Error != "" { return fmt.Errorf("%v", resp.Error) } return nil })) } func TestKafka(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) host := "localhost" // Open a connection to the kafka cluster conn, err := kafka.Dial("tcp", fmt.Sprintf("%v:%v", host, 32400)) if err != nil { t.Fatal(err) } defer conn.Close() // create the topic port := "" topic := tu.UniqueString("demo") // this part is kinda finnicky because sometimes the zookeeper session will timeout for one of the brokers brokers, err := conn.Brokers() // so to deal with that, we try connecting to each broker for i, b := range brokers { conn, err := kafka.Dial("tcp", fmt.Sprintf("%v:%v", host, b.Port)) if err != nil { t.Fatal(err) } // we keep track of the port number of brokers brokers, err = conn.Brokers() // this is ok since Go does the for loop over brokers as it was for the initial loop port = fmt.Sprint(b.Port) // and try creating the topic err = conn.CreateTopics(kafka.TopicConfig{ Topic: topic, NumPartitions: 1, ReplicationFactor: len(brokers), }) if err != nil { // it's ok if the first n-1 fail if i < len(brokers)-1 { continue } // but if all of them fail, that's bad t.Fatal("Can't create topic", err) } // once we found one that works, we can be done with this part break } // now we want to connect to the leader broker with our topic // so we look up the partiton which will have this information part, err := kafka.LookupPartition(context.Background(), "tcp", fmt.Sprintf("%v:%v", host, port), topic, 0) if err != nil { t.Fatal(err) } // we grab the host IP and port to pass to the image host = part.Leader.Host port = fmt.Sprint(part.Leader.Port) // since kafka and pachyderm are in the same kubernetes cluster, we need to adjust the host address to "localhost" here part.Leader.Host = "localhost" // and we can now make a connection to the leader conn, err = kafka.DialPartition(context.Background(), "tcp", fmt.Sprintf("%v:%v", "localhost", port), part) if err != nil { t.Fatal(err) } // now we asynchronously write to the kafka topic quit := make(chan bool) go func(chan bool) { i := 0 for { select { case <-quit: return default: if _, err = conn.WriteMessages( kafka.Message{Value: []byte(fmt.Sprintf("Now it's %v\n", i))}, ); err != nil { t.Fatal(err) } i++ } } }(quit) defer func() { quit <- true }() // create a spout pipeline running the kafka consumer _, err = c.PpsAPIClient.CreatePipeline( c.Ctx(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(topic), Transform: &pps.Transform{ Image: "kafka-demo:latest", Cmd: []string{"go", "run", "./main.go"}, Env: map[string]string{ "HOST": host, "TOPIC": topic, "PORT": port, }, }, Spout: &pps.Spout{}, // this needs to be non-nil to make it a spout }) require.NoError(t, err) // and verify that the spout is consuming it // we'll get 5 succesive commits, and ensure that we find all the kafka messages we wrote // to the first five files. iter, err := c.SubscribeCommit(topic, "master", nil, "", pfs.CommitState_FINISHED) require.NoError(t, err) num := 1 for i := 0; i < 5; i++ { num-- // files end in a newline so we need to decrement here inbetween iterations commitInfo, err := iter.Next() require.NoError(t, err) files, err := c.ListFile(topic, commitInfo.Commit.ID, "") require.NoError(t, err) require.Equal(t, i+1, len(files)) // get the i'th file var buf bytes.Buffer err = c.GetFile(topic, commitInfo.Commit.ID, files[i].File.Path, 0, 0, &buf) if err != nil { t.Errorf("Could not get file %v", err) } // read the lines and verify that we see each line we wrote for err != io.EOF { line := "" line, err = buf.ReadString('\n') if len(line) > 0 && line != fmt.Sprintf("Now it's %v\n", num) { t.Error("Missed a kafka message:", num) } num++ } } // we also check that at least 5 kafka messages were consumed if num < 5 { t.Error("Expected to process more than 5 kafka messages:", num) } } func TestDeferredProcessing(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) dataRepo := tu.UniqueString("TestDeferredProcessing_data") require.NoError(t, c.CreateRepo(dataRepo)) pipeline1 := tu.UniqueString("TestDeferredProcessing1") _, err := c.PpsAPIClient.CreatePipeline( context.Background(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipeline1), Transform: &pps.Transform{ Cmd: []string{"bash"}, Stdin: []string{fmt.Sprintf("cp /pfs/%s/* /pfs/out/", dataRepo)}, }, Input: client.NewPFSInput(dataRepo, "/*"), OutputBranch: "staging", }) require.NoError(t, err) pipeline2 := tu.UniqueString("TestDeferredProcessing2") require.NoError(t, c.CreatePipeline( pipeline2, "", []string{"bash"}, []string{ fmt.Sprintf("cp /pfs/%s/* /pfs/out/", pipeline1), }, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(pipeline1, "/*"), "", false, )) _, err = c.PutFile(dataRepo, "staging", "file", strings.NewReader("foo")) require.NoError(t, err) commit := client.NewCommit(dataRepo, "staging") commitInfos, err := c.FlushCommitAll([]*pfs.Commit{commit}, nil) require.NoError(t, err) require.Equal(t, 0, len(commitInfos)) c.CreateBranch(dataRepo, "master", "staging", nil) commitInfos, err = c.FlushCommitAll([]*pfs.Commit{commit}, nil) require.NoError(t, err) require.Equal(t, 1, len(commitInfos)) c.CreateBranch(pipeline1, "master", "staging", nil) commitInfos, err = c.FlushCommitAll([]*pfs.Commit{commit}, nil) require.NoError(t, err) require.Equal(t, 2, len(commitInfos)) } func TestPipelineHistory(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) // create repos dataRepo := tu.UniqueString("TestPipelineHistory_data") require.NoError(t, c.CreateRepo(dataRepo)) pipelineName := tu.UniqueString("TestPipelineHistory") require.NoError(t, c.CreatePipeline( pipelineName, "", []string{"bash"}, []string{"echo foo >/pfs/out/file"}, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(dataRepo, "/*"), "", true, )) _, err := c.PutFile(dataRepo, "master", "file", strings.NewReader("1")) require.NoError(t, err) _, err = c.FlushCommitAll([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil) require.NoError(t, err) jis, err := c.ListJob(pipelineName, nil, nil, 0, true) require.Equal(t, 1, len(jis)) // Update the pipeline require.NoError(t, c.CreatePipeline( pipelineName, "", []string{"bash"}, []string{"echo bar >/pfs/out/file"}, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(dataRepo, "/*"), "", true, )) _, err = c.PutFile(dataRepo, "master", "file", strings.NewReader("2")) require.NoError(t, err) _, err = c.FlushCommitAll([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil) require.NoError(t, err) cis, err := c.ListCommit(pipelineName, "master", "", 0) require.NoError(t, err) require.Equal(t, 3, len(cis)) jis, err = c.ListJob(pipelineName, nil, nil, 0, true) require.Equal(t, 2, len(jis)) jis, err = c.ListJob(pipelineName, nil, nil, 1, true) require.Equal(t, 3, len(jis)) jis, err = c.ListJob(pipelineName, nil, nil, -1, true) require.Equal(t, 3, len(jis)) // Update the pipeline again require.NoError(t, c.CreatePipeline( pipelineName, "", []string{"bash"}, []string{"echo buzz >/pfs/out/file"}, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(dataRepo, "/*"), "", true, )) _, err = c.FlushCommitAll([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil) require.NoError(t, err) jis, err = c.ListJob(pipelineName, nil, nil, 0, true) require.Equal(t, 1, len(jis)) jis, err = c.ListJob(pipelineName, nil, nil, 1, true) require.Equal(t, 3, len(jis)) jis, err = c.ListJob(pipelineName, nil, nil, 2, true) require.Equal(t, 4, len(jis)) jis, err = c.ListJob(pipelineName, nil, nil, -1, true) require.Equal(t, 4, len(jis)) // Add another pipeline, this shouldn't change the results of the above // commands. pipelineName2 := tu.UniqueString("TestPipelineHistory2") require.NoError(t, c.CreatePipeline( pipelineName2, "", []string{"bash"}, []string{"echo foo >/pfs/out/file"}, &pps.ParallelismSpec{ Constant: 1, }, client.NewPFSInput(dataRepo, "/*"), "", true, )) _, err = c.FlushCommitAll([]*pfs.Commit{client.NewCommit(dataRepo, "master")}, nil) require.NoError(t, err) jis, err = c.ListJob(pipelineName, nil, nil, 0, true) require.Equal(t, 1, len(jis)) jis, err = c.ListJob(pipelineName, nil, nil, 1, true) require.Equal(t, 3, len(jis)) jis, err = c.ListJob(pipelineName, nil, nil, 2, true) require.Equal(t, 4, len(jis)) jis, err = c.ListJob(pipelineName, nil, nil, -1, true) require.Equal(t, 4, len(jis)) pipelineInfos, err := c.ListPipeline() require.NoError(t, err) require.Equal(t, 2, len(pipelineInfos)) pipelineInfos, err = c.ListPipelineHistory("", -1) require.Equal(t, 4, len(pipelineInfos)) pipelineInfos, err = c.ListPipelineHistory("", 1) require.Equal(t, 3, len(pipelineInfos)) pipelineInfos, err = c.ListPipelineHistory(pipelineName, -1) require.Equal(t, 3, len(pipelineInfos)) pipelineInfos, err = c.ListPipelineHistory(pipelineName2, -1) require.Equal(t, 1, len(pipelineInfos)) } func TestFileHistory(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) dataRepo1 := tu.UniqueString("TestFileHistory_data1") require.NoError(t, c.CreateRepo(dataRepo1)) dataRepo2 := tu.UniqueString("TestFileHistory_data2") require.NoError(t, c.CreateRepo(dataRepo2)) pipeline := tu.UniqueString("TestSimplePipeline") require.NoError(t, c.CreatePipeline( pipeline, "", []string{"bash"}, []string{ fmt.Sprintf("for a in /pfs/%s/*", dataRepo1), "do", fmt.Sprintf("for b in /pfs/%s/*", dataRepo2), "do", "touch /pfs/out/$(basename $a)_$(basename $b)", "done", "done", }, &pps.ParallelismSpec{ Constant: 1, }, client.NewCrossInput( client.NewPFSInput(dataRepo1, "/*"), client.NewPFSInput(dataRepo2, "/*"), ), "", false, )) _, err := c.PutFile(dataRepo1, "master", "A1", strings.NewReader("")) require.NoError(t, err) _, err = c.PutFile(dataRepo2, "master", "B1", strings.NewReader("")) require.NoError(t, err) _, err = c.PutFile(dataRepo1, "master", "A2", strings.NewReader("")) require.NoError(t, err) _, err = c.PutFile(dataRepo1, "master", "A3", strings.NewReader("")) require.NoError(t, err) _, err = c.PutFile(dataRepo2, "master", "B2", strings.NewReader("")) require.NoError(t, err) _, err = c.PutFile(dataRepo2, "master", "B3", strings.NewReader("")) require.NoError(t, err) _, err = c.FlushCommitAll([]*pfs.Commit{client.NewCommit(dataRepo1, "master"), client.NewCommit(dataRepo2, "master")}, nil) require.NoError(t, err) _, err = c.ListFileHistory(pipeline, "master", "", -1) require.NoError(t, err) } // TestNoOutputRepoDoesntCrashPPSMaster creates a pipeline, then deletes its // output repo while it's running (failing the pipeline and preventing the PPS // master from finishing the pipeline's output commit) and makes sure new // pipelines can be created (i.e. that the PPS master doesn't crashloop due to // the missing output repo). func TestNoOutputRepoDoesntCrashPPSMaster(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) // Create input repo w/ initial commit repo := tu.UniqueString(t.Name()) require.NoError(t, c.CreateRepo(repo)) _, err := c.PutFile(repo, "master", "/file.1", strings.NewReader("1")) require.NoError(t, err) // Create pipeline pipeline := tu.UniqueString("pipeline") require.NoError(t, c.CreatePipeline( pipeline, "", // default image: ubuntu:16.04 []string{"bash"}, []string{ "sleep 10", "cp /pfs/*/* /pfs/out/", }, &pps.ParallelismSpec{Constant: 1}, client.NewPFSInput(repo, "/*"), "", // default output branch: master false, )) // force-delete output repo while 'sleep 10' is running, failing the pipeline require.NoError(t, c.DeleteRepo(pipeline, true)) // make sure the pipeline is failed require.NoErrorWithinTRetry(t, 30*time.Second, func() error { pi, err := c.InspectPipeline(pipeline) if err != nil { return err } if pi.State == pps.PipelineState_PIPELINE_FAILURE { return fmt.Errorf("%q should be in state FAILURE but is in %q", pipeline, pi.State.String()) } return nil }) // Delete the pachd pod, so that it restarts and the PPS master has to process // the failed pipeline tu.DeletePachdPod(t) // delete the pachd pod require.NoErrorWithinTRetry(t, 30*time.Second, func() error { _, err := c.Version() // wait for pachd to come back return err }) // Create a new input commit, and flush its output to 'pipeline', to make sure // the pipeline either restarts the RC and recreates the output repo, or fails _, err = c.PutFile(repo, "master", "/file.2", strings.NewReader("2")) require.NoError(t, err) iter, err := c.FlushCommit( []*pfs.Commit{client.NewCommit(repo, "master")}, []*pfs.Repo{client.NewRepo(pipeline)}) require.NoError(t, err) require.NoErrorWithinT(t, 30*time.Second, func() error { _, err := iter.Next() // TODO(msteffen): While not currently possible, PFS could return // CommitDeleted here. This should detect that error, but first: // - src/server/pfs/pfs.go should be moved to src/client/pfs (w/ other err // handling code) // - packages depending on that code should be migrated // Then this could add "|| pfs.IsCommitDeletedErr(err)" and satisfy the todo if err == io.EOF { return nil // expected--with no output repo, FlushCommit can't return anything } return fmt.Errorf("unexpected error value: %v", err) }) // Create a new pipeline, make sure FlushCommit eventually returns, and check // pipeline output (i.e. the PPS master does not crashloop--pipeline2 // eventually starts successfully) pipeline2 := tu.UniqueString("pipeline") require.NoError(t, c.CreatePipeline( pipeline2, "", // default image: ubuntu:16.04 []string{"bash"}, []string{"cp /pfs/*/* /pfs/out/"}, &pps.ParallelismSpec{Constant: 1}, client.NewPFSInput(repo, "/*"), "", // default output branch: master false, )) iter, err = c.FlushCommit( []*pfs.Commit{client.NewCommit(repo, "master")}, []*pfs.Repo{client.NewRepo(pipeline2)}) require.NoError(t, err) require.NoErrorWithinT(t, 30*time.Second, func() error { _, err := iter.Next() return err }) buf := &bytes.Buffer{} require.NoError(t, c.GetFile(pipeline2, "master", "/file.1", 0, 0, buf)) require.Equal(t, "1", buf.String()) buf.Reset() require.NoError(t, c.GetFile(pipeline2, "master", "/file.2", 0, 0, buf)) require.Equal(t, "2", buf.String()) } // TestNoTransform tests that sending a CreatePipeline request to pachd with no // 'transform' field doesn't kill pachd func TestNoTransform(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) // Create input repo dataRepo := tu.UniqueString(t.Name() + "-data") require.NoError(t, c.CreateRepo(dataRepo)) // Create pipeline w/ no transform--make sure we get a response (& make sure // it explains the problem) pipeline := tu.UniqueString("no-transform-") _, err := c.PpsAPIClient.CreatePipeline( context.Background(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipeline), Transform: nil, Input: client.NewPFSInput(dataRepo, "/*"), }) require.YesError(t, err) require.Matches(t, "transform", err.Error()) } // TestNoCmd tests that sending a CreatePipeline request to pachd with no // 'transform.cmd' field doesn't kill pachd func TestNoCmd(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) // Create input data dataRepo := tu.UniqueString(t.Name() + "-data") require.NoError(t, c.CreateRepo(dataRepo)) _, err := c.PutFile(dataRepo, "master", "file", strings.NewReader("foo")) require.NoError(t, err) // create pipeline pipeline := tu.UniqueString("no-cmd-") _, err = c.PpsAPIClient.CreatePipeline( context.Background(), &pps.CreatePipelineRequest{ Pipeline: client.NewPipeline(pipeline), Transform: &pps.Transform{ Cmd: nil, Stdin: []string{`cat foo >/pfs/out/file`}, }, Input: client.NewPFSInput(dataRepo, "/*"), }) require.NoError(t, err) time.Sleep(5 * time.Second) // give pipeline time to start require.NoErrorWithinTRetry(t, 30*time.Second, func() error { pipelineInfo, err := c.InspectPipeline(pipeline) if err != nil { return err } if pipelineInfo.State != pps.PipelineState_PIPELINE_FAILURE { return fmt.Errorf("pipeline should be in state FAILURE, not: %s", pipelineInfo.State.String()) } return nil }) } func TestListTag(t *testing.T) { if testing.Short() { t.Skip("Skipping integration tests in short mode") } c := getPachClient(t) require.NoError(t, c.DeleteAll()) require.NoError(t, c.GarbageCollect(0)) // makes ListTags faster // Create a large number of objects w/ tags numTags := 1000 for i := 0; i < numTags; i++ { w, err := c.PutObjectAsync([]*pfs.Tag{{Name: fmt.Sprintf("tag%d", i)}}) require.NoError(t, err) _, err = w.Write([]byte(fmt.Sprintf("Object %d", i))) require.NoError(t, err) require.NoError(t, w.Close()) } // List tags & make sure all expected tags are present respTags := make(map[string]struct{}) require.NoError(t, c.ListTag(func(r *pfs.ListTagsResponse) error { respTags[r.Tag.Name] = struct{}{} require.NotEqual(t, "", r.Object.Hash) return nil })) require.Equal(t, numTags, len(respTags)) for i := 0; i < numTags; i++ { _, ok := respTags[fmt.Sprintf("tag%d", i)] require.True(t, ok) } // Check actual results of at least one write. actual := &bytes.Buffer{} require.NoError(t, c.GetTag("tag0", actual)) require.Equal(t, "Object 0", actual.String()) } func getObjectCountForRepo(t testing.TB, c *client.APIClient, repo string) int { pipelineInfos, err := pachClient.ListPipeline() require.NoError(t, err) repoInfo, err := pachClient.InspectRepo(repo) require.NoError(t, err) activeStat, err := pps_server.CollectActiveObjectsAndTags(context.Background(), c, []*pfs.RepoInfo{repoInfo}, pipelineInfos, 0, "") require.NoError(t, err) return activeStat.NObjects } func getAllObjects(t testing.TB, c *client.APIClient) []*pfs.Object { objectsClient, err := c.ListObjects(context.Background(), &pfs.ListObjectsRequest{}) require.NoError(t, err) var objects []*pfs.Object for object, err := objectsClient.Recv(); err != io.EOF; object, err = objectsClient.Recv() { require.NoError(t, err) objects = append(objects, object.Object) } return objects } func getAllTags(t testing.TB, c *client.APIClient) []string { tagsClient, err := c.ListTags(context.Background(), &pfs.ListTagsRequest{}) require.NoError(t, err) var tags []string for resp, err := tagsClient.Recv(); err != io.EOF; resp, err = tagsClient.Recv() { require.NoError(t, err) tags = append(tags, resp.Tag.Name) } return tags } func restartAll(t *testing.T) { k := tu.GetKubeClient(t) podsInterface := k.CoreV1().Pods(v1.NamespaceDefault) podList, err := podsInterface.List( metav1.ListOptions{ LabelSelector: "suite=pachyderm", }) require.NoError(t, err) for _, pod := range podList.Items { require.NoError(t, podsInterface.Delete(pod.Name, &metav1.DeleteOptions{ GracePeriodSeconds: new(int64), })) } waitForReadiness(t) } func restartOne(t *testing.T) { k := tu.GetKubeClient(t) podsInterface := k.CoreV1().Pods(v1.NamespaceDefault) podList, err := podsInterface.List( metav1.ListOptions{ LabelSelector: "app=pachd", }) require.NoError(t, err) require.NoError(t, podsInterface.Delete( podList.Items[rand.Intn(len(podList.Items))].Name, &metav1.DeleteOptions{GracePeriodSeconds: new(int64)})) waitForReadiness(t) } const ( retries = 10 ) // getUsablePachClient is like getPachClient except it blocks until it gets a // connection that actually works func getUsablePachClient(t *testing.T) *client.APIClient { for i := 0; i < retries; i++ { client := getPachClient(t) ctx, cancel := context.WithTimeout(context.Background(), time.Second*30) defer cancel() //cleanup resources _, err := client.PfsAPIClient.ListRepo(ctx, &pfs.ListRepoRequest{}) if err == nil { return client } } t.Fatalf("failed to connect after %d tries", retries) return nil } func podRunningAndReady(e watch.Event) (bool, error) { if e.Type == watch.Deleted { return false, errors.New("received DELETE while watching pods") } pod, ok := e.Object.(*v1.Pod) if !ok { } return pod.Status.Phase == v1.PodRunning, nil } func waitForReadiness(t testing.TB) { k := tu.GetKubeClient(t) deployment := pachdDeployment(t) for { newDeployment, err := k.Apps().Deployments(v1.NamespaceDefault).Get(deployment.Name, metav1.GetOptions{}) require.NoError(t, err) if newDeployment.Status.ObservedGeneration >= deployment.Generation && newDeployment.Status.Replicas == *newDeployment.Spec.Replicas { break } time.Sleep(time.Second * 5) } watch, err := k.CoreV1().Pods(v1.NamespaceDefault).Watch(metav1.ListOptions{ LabelSelector: "app=pachd", }) defer watch.Stop() require.NoError(t, err) readyPods := make(map[string]bool) for event := range watch.ResultChan() { ready, err := podRunningAndReady(event) require.NoError(t, err) if ready { pod, ok := event.Object.(*v1.Pod) if !ok { t.Fatal("event.Object should be an object") } readyPods[pod.Name] = true if len(readyPods) == int(*deployment.Spec.Replicas) { break } } } } func simulateGitPush(t *testing.T, pathToPayload string) { payload, err := ioutil.ReadFile(pathToPayload) require.NoError(t, err) req, err := http.NewRequest( "POST", fmt.Sprintf("http://127.0.0.1:%v/v1/handle/push", githook.GitHookPort+30000), bytes.NewBuffer(payload), ) req.Header.Set("X-Github-Delivery", "2984f5d0-c032-11e7-82d7-ed3ee54be25d") req.Header.Set("User-Agent", "GitHub-Hookshot/c1d08eb") req.Header.Set("X-Github-Event", "push") req.Header.Set("Content-Type", "application/json") client := &http.Client{} resp, err := client.Do(req) require.NoError(t, err) defer resp.Body.Close() require.Equal(t, 200, resp.StatusCode) } func pipelineRc(t testing.TB, pipelineInfo *pps.PipelineInfo) (*v1.ReplicationController, error) { k := tu.GetKubeClient(t) rc := k.CoreV1().ReplicationControllers(v1.NamespaceDefault) return rc.Get( ppsutil.PipelineRcName(pipelineInfo.Pipeline.Name, pipelineInfo.Version), metav1.GetOptions{}) } func pachdDeployment(t testing.TB) *apps.Deployment { k := tu.GetKubeClient(t) result, err := k.Apps().Deployments(v1.NamespaceDefault).Get("pachd", metav1.GetOptions{}) require.NoError(t, err) return result } // scalePachd scales the number of pachd nodes up or down. // If up is true, then the number of nodes will be within (n, 2n] // If up is false, then the number of nodes will be within [1, n) func scalePachdRandom(t testing.TB, up bool) { pachdRc := pachdDeployment(t) originalReplicas := *pachdRc.Spec.Replicas for { if up { *pachdRc.Spec.Replicas = originalReplicas + int32(rand.Intn(int(originalReplicas))+1) } else { *pachdRc.Spec.Replicas = int32(rand.Intn(int(originalReplicas)-1) + 1) } if *pachdRc.Spec.Replicas != originalReplicas { break } } scalePachdN(t, int(*pachdRc.Spec.Replicas)) } // scalePachdN scales the number of pachd nodes to N func scalePachdN(t testing.TB, n int) { k := tu.GetKubeClient(t) // Modify the type metadata of the Deployment spec we read from k8s, so that // k8s will accept it if we're talking to a 1.7 cluster pachdDeployment := pachdDeployment(t) *pachdDeployment.Spec.Replicas = int32(n) pachdDeployment.TypeMeta.APIVersion = "apps/v1beta1" _, err := k.Apps().Deployments(v1.NamespaceDefault).Update(pachdDeployment) require.NoError(t, err) waitForReadiness(t) // Unfortunately, even when all pods are ready, the cluster membership // protocol might still be running, thus PFS API calls might fail. So // we wait a little bit for membership to stablize. time.Sleep(15 * time.Second) } // scalePachd reads the number of pachd nodes from an env variable and // scales pachd accordingly. func scalePachd(t testing.TB) { nStr := os.Getenv("PACHD") if nStr == "" { return } n, err := strconv.Atoi(nStr) require.NoError(t, err) scalePachdN(t, n) } var pachClient *client.APIClient var getPachClientOnce sync.Once func getPachClient(t testing.TB) *client.APIClient { getPachClientOnce.Do(func() { var err error if addr := os.Getenv("PACHD_PORT_650_TCP_ADDR"); addr != "" { pachClient, err = client.NewInCluster() } else { pachClient, err = client.NewForTest() } require.NoError(t, err) }) return pachClient } var etcdClient *etcd.Client var getEtcdClientOnce sync.Once const ( etcdAddress = "localhost:32379" // etcd must already be serving at this address ) func getEtcdClient(t testing.TB) *etcd.Client { getEtcdClientOnce.Do(func() { var err error etcdClient, err = etcd.New(etcd.Config{ Endpoints: []string{etcdAddress}, DialOptions: client.DefaultDialOptions(), }) require.NoError(t, err) }) return etcdClient }
[ "\"PROM_PORT\"", "\"PACHD\"", "\"PACHD_PORT_650_TCP_ADDR\"" ]
[]
[ "PACHD_PORT_650_TCP_ADDR", "PROM_PORT", "PACHD" ]
[]
["PACHD_PORT_650_TCP_ADDR", "PROM_PORT", "PACHD"]
go
3
0
gohelper/helper.go
package gohelper import ( "os" ) func getGoBin() string { return os.Getenv("GOROOT") } func getGoPath() string { return os.Getenv("GOPATH") }
[ "\"GOROOT\"", "\"GOPATH\"" ]
[]
[ "GOPATH", "GOROOT" ]
[]
["GOPATH", "GOROOT"]
go
2
0
src/bot.py
import telebot import os bot = telebot.TeleBot( os.environ['TELEGRAM_BOT_TOKEN'], parse_mode='HTML' )
[]
[]
[ "TELEGRAM_BOT_TOKEN" ]
[]
["TELEGRAM_BOT_TOKEN"]
python
1
0
driver/postgres/postgres_test.go
package postgres import ( "database/sql" "os" "testing" "github.com/mattes/migrate/file" "github.com/mattes/migrate/migrate/direction" pipep "github.com/mattes/migrate/pipe" ) // TestMigrate runs some additional tests on Migrate(). // Basic testing is already done in migrate/migrate_test.go func TestMigrate(t *testing.T) { if testing.Short() { t.Skip("skipping test in short mode.") } host := os.Getenv("POSTGRES_PORT_5432_TCP_ADDR") port := os.Getenv("POSTGRES_PORT_5432_TCP_PORT") driverURL := "postgres://postgres@" + host + ":" + port + "/template1?sslmode=disable" // prepare clean database connection, err := sql.Open("postgres", driverURL) if err != nil { t.Fatal(err) } if _, err := connection.Exec(` DROP TABLE IF EXISTS yolo; DROP TABLE IF EXISTS ` + tableName + `;`); err != nil { t.Fatal(err) } d := &Driver{} if err := d.Initialize(driverURL); err != nil { t.Fatal(err) } // testing idempotency: second call should be a no-op, since table already exists if err := d.Initialize(driverURL); err != nil { t.Fatal(err) } files := []file.File{ { Path: "/foobar", FileName: "001_foobar.up.sql", Version: 1, Name: "foobar", Direction: direction.Up, Content: []byte(` CREATE TABLE yolo ( id serial not null primary key ); `), }, { Path: "/foobar", FileName: "002_foobar.down.sql", Version: 1, Name: "foobar", Direction: direction.Down, Content: []byte(` DROP TABLE yolo; `), }, { Path: "/foobar", FileName: "002_foobar.up.sql", Version: 2, Name: "foobar", Direction: direction.Up, Content: []byte(` CREATE TABLE error ( id THIS WILL CAUSE AN ERROR ) `), }, { Path: "/foobar", FileName: "20170118205923_demo.up.sql", Version: 20170118205923, Name: "demo", Direction: direction.Up, Content: []byte(` CREATE TABLE demo ( id serial not null primary key ) `), }, { Path: "/foobar", FileName: "20170118205923_demo.down.sql", Version: 20170118205923, Name: "demo", Direction: direction.Down, Content: []byte(` DROP TABLE demo `), }, } pipe := pipep.New() go d.Migrate(files[0], pipe) errs := pipep.ReadErrors(pipe) if len(errs) > 0 { t.Fatal(errs) } pipe = pipep.New() go d.Migrate(files[1], pipe) errs = pipep.ReadErrors(pipe) if len(errs) > 0 { t.Fatal(errs) } pipe = pipep.New() go d.Migrate(files[2], pipe) errs = pipep.ReadErrors(pipe) if len(errs) == 0 { t.Error("Expected test case to fail") } pipe = pipep.New() go d.Migrate(files[3], pipe) errs = pipep.ReadErrors(pipe) if len(errs) > 0 { t.Fatal(errs) } pipe = pipep.New() go d.Migrate(files[4], pipe) errs = pipep.ReadErrors(pipe) if len(errs) > 0 { t.Fatal(errs) } if err := d.Close(); err != nil { t.Fatal(err) } }
[ "\"POSTGRES_PORT_5432_TCP_ADDR\"", "\"POSTGRES_PORT_5432_TCP_PORT\"" ]
[]
[ "POSTGRES_PORT_5432_TCP_PORT", "POSTGRES_PORT_5432_TCP_ADDR" ]
[]
["POSTGRES_PORT_5432_TCP_PORT", "POSTGRES_PORT_5432_TCP_ADDR"]
go
2
0
site_scons/site_tools/command_output.py
#!/usr/bin/python2.4 # Copyright 2008, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Command output builder for SCons.""" import os import signal import subprocess import sys import threading import time import SCons.Script # TODO(rspangler): Move KillProcessTree() and RunCommand() into their own # module. def KillProcessTree(pid): """Kills the process and all of its child processes. Args: pid: process to kill. Raises: OSError: Unsupported OS. """ if sys.platform in ('win32', 'cygwin'): # Use Windows' taskkill utility killproc_path = '%s;%s\\system32;%s\\system32\\wbem' % ( (os.environ['SYSTEMROOT'],) * 3) killproc_cmd = 'taskkill /F /T /PID %d' % pid killproc_task = subprocess.Popen(killproc_cmd, shell=True, stdout=subprocess.PIPE, env={'PATH':killproc_path}) killproc_task.communicate() elif sys.platform in ('linux', 'linux2', 'darwin'): # Use ps to get a list of processes ps_task = subprocess.Popen(['/bin/ps', 'x', '-o', 'pid,ppid'], stdout=subprocess.PIPE) ps_out = ps_task.communicate()[0] # Parse out a dict of pid->ppid ppid = {} for ps_line in ps_out.split('\n'): w = ps_line.strip().split() if len(w) < 2: continue # Not enough words in this line to be a process list try: ppid[int(w[0])] = int(w[1]) except ValueError: pass # Header or footer # For each process, kill it if it or any of its parents is our child for p in ppid: p2 = p while p2: if p2 == pid: os.kill(p, signal.SIGKILL) break p2 = ppid.get(p2) else: raise OSError('Unsupported OS for KillProcessTree()') def RunCommand(cmdargs, cwdir=None, env=None, echo_output=True, timeout=None, timeout_errorlevel=14): """Runs an external command. Args: cmdargs: A command string, or a tuple containing the command and its arguments. cwdir: Working directory for the command, if not None. env: Environment variables dict, if not None. echo_output: If True, output will be echoed to stdout. timeout: If not None, timeout for command in seconds. If command times out, it will be killed and timeout_errorlevel will be returned. timeout_errorlevel: The value to return if the command times out. Returns: The integer errorlevel from the command. The combined stdout and stderr as a string. """ # Force unicode string in the environment to strings. if env: env = dict([(k, str(v)) for k, v in env.items()]) start_time = time.time() child = subprocess.Popen(cmdargs, cwd=cwdir, env=env, shell=True, universal_newlines=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) child_out = [] child_retcode = None def _ReadThread(): """Thread worker function to read output from child process. Necessary since there is no cross-platform way of doing non-blocking reads of the output pipe. """ read_run = True while read_run: # Need to have a delay of 1 cycle between child completing and # thread exit, to pick up the final output from the child. if child_retcode is not None: read_run = False new_out = child.stdout.read() if new_out: if echo_output: print new_out, child_out.append(new_out) read_thread = threading.Thread(target=_ReadThread) read_thread.start() # Wait for child to exit or timeout while child_retcode is None: time.sleep(1) # So we don't poll too frequently child_retcode = child.poll() if timeout and child_retcode is None: elapsed = time.time() - start_time if elapsed > timeout: print '*** RunCommand() timeout:', cmdargs KillProcessTree(child.pid) child_retcode = timeout_errorlevel # Wait for worker thread to pick up final output and die read_thread.join(5) if read_thread.isAlive(): print '*** Error: RunCommand() read thread did not exit.' sys.exit(1) if echo_output: print # end last line of output return child_retcode, ''.join(child_out) def CommandOutputBuilder(target, source, env): """Command output builder. Args: self: Environment in which to build target: List of target nodes source: List of source nodes Returns: None or 0 if successful; nonzero to indicate failure. Runs the command specified in the COMMAND_OUTPUT_CMDLINE environment variable and stores its output in the first target file. Additional target files should be specified if the command creates additional output files. Runs the command in the COMMAND_OUTPUT_RUN_DIR subdirectory. """ env = env.Clone() cmdline = env.subst('$COMMAND_OUTPUT_CMDLINE', target=target, source=source) cwdir = env.subst('$COMMAND_OUTPUT_RUN_DIR', target=target, source=source) if cwdir: cwdir = os.path.normpath(cwdir) env.AppendENVPath('PATH', cwdir) env.AppendENVPath('LD_LIBRARY_PATH', cwdir) else: cwdir = None cmdecho = env.get('COMMAND_OUTPUT_ECHO', True) timeout = env.get('COMMAND_OUTPUT_TIMEOUT') timeout_errorlevel = env.get('COMMAND_OUTPUT_TIMEOUT_ERRORLEVEL') retcode, output = RunCommand(cmdline, cwdir=cwdir, env=env['ENV'], echo_output=cmdecho, timeout=timeout, timeout_errorlevel=timeout_errorlevel) # Save command line output output_file = open(str(target[0]), 'w') output_file.write(output) output_file.close() return retcode def generate(env): # NOTE: SCons requires the use of this name, which fails gpylint. """SCons entry point for this tool.""" # Add the builder and tell it which build environment variables we use. action = SCons.Script.Action( CommandOutputBuilder, 'Output "$COMMAND_OUTPUT_CMDLINE" to $TARGET', varlist=[ 'COMMAND_OUTPUT_CMDLINE', 'COMMAND_OUTPUT_RUN_DIR', 'COMMAND_OUTPUT_TIMEOUT', 'COMMAND_OUTPUT_TIMEOUT_ERRORLEVEL', # We use COMMAND_OUTPUT_ECHO also, but that doesn't change the # command being run or its output. ], ) builder = SCons.Script.Builder(action = action) env.Append(BUILDERS={'CommandOutput': builder}) # Default command line is to run the first input env['COMMAND_OUTPUT_CMDLINE'] = '$SOURCE' # TODO(rspangler): add a pseudo-builder which takes an additional command # line as an argument.
[]
[]
[ "SYSTEMROOT" ]
[]
["SYSTEMROOT"]
python
1
0
staging/src/k8s.io/apiserver/pkg/storage/etcd3/watcher.go
/* Copyright 2016 The Kubernetes Authors. Copyright 2020 Authors of Arktos - file modified. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package etcd3 import ( "context" "errors" "fmt" "github.com/grafov/bcast" corev1 "k8s.io/api/core/v1" "os" "strconv" "strings" "sync" apierrs "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/watch" "k8s.io/apiserver/pkg/storage" "k8s.io/apiserver/pkg/storage/value" "go.etcd.io/etcd/clientv3" "k8s.io/klog" ) const ( // We have set a buffer in order to reduce times of context switches. incomingBufSize = 100 outgoingBufSize = 100 // To be compatible with system resources, its tenant is set as default systemTenant = "default/" ) // fatalOnDecodeError is used during testing to panic the server if watcher encounters a decoding error var fatalOnDecodeError = false // errTestingDecode is the only error that testingDeferOnDecodeError catches during a panic var errTestingDecode = errors.New("sentinel error only used during testing to indicate watch decoding error") // testingDeferOnDecodeError is used during testing to recover from a panic caused by errTestingDecode, all other values continue to panic func testingDeferOnDecodeError() { if r := recover(); r != nil && r != errTestingDecode { panic(r) } } func init() { // check to see if we are running in a test environment TestOnlySetFatalOnDecodeError(true) fatalOnDecodeError, _ = strconv.ParseBool(os.Getenv("KUBE_PANIC_WATCH_DECODE_ERROR")) } // TestOnlySetFatalOnDecodeError should only be used for cases where decode errors are expected and need to be tested. e.g. conversion webhooks. func TestOnlySetFatalOnDecodeError(b bool) { fatalOnDecodeError = b } type watcher struct { client *clientv3.Client codec runtime.Codec versioner storage.Versioner transformer value.Transformer partitionConfig map[string]storage.Interval updatePartitionCh *bcast.Member updateMux sync.Mutex } // watchChan implements watch.Interface. type watchChan struct { watcher *watcher key string initialRev int64 recursive bool internalPred storage.SelectionPredicate ctx context.Context cancel context.CancelFunc incomingEventChan chan *event resultChan chan watch.Event errChan chan error keyRange keyRange } func newWatcherWithPartitionConfig(client *clientv3.Client, codec runtime.Codec, versioner storage.Versioner, transformer value.Transformer, partitionConfigMap map[string]storage.Interval, updatePartitionCh *bcast.Member) *watcher { return &watcher{ client: client, codec: codec, versioner: versioner, transformer: transformer, partitionConfig: partitionConfigMap, updatePartitionCh: updatePartitionCh, } } // Watch watches on a key and returns a watch.Interface that transfers relevant notifications. // If rev is zero, it will return the existing object(s) and then start watching from // the maximum revision+1 from returned objects. // If rev is non-zero, it will watch events happened after given revision. // If recursive is false, it watches on given key. // If recursive is true, it watches any children and directories under the key, excluding the root key itself. // pred must be non-nil. Only if pred matches the change, it will be returned. func (w *watcher) Watch(ctx context.Context, key string, rev int64, recursive bool, pred storage.SelectionPredicate) watch.AggregatedWatchInterface { //klog.Infof("========= watcher watch key %s", key) if recursive && !strings.HasSuffix(key, "/") { key += "/" } res := watch.NewAggregatedWatcherWithReset(ctx) //klog.Infof("Created aggregated watch channel %#v for key %s", res.ResultChan(), key) go w.run(ctx, key, rev, recursive, pred, res) return res } func (w *watcher) run(ctx context.Context, key string, rev int64, recursive bool, pred storage.SelectionPredicate, res *watch.AggregatedWatcher) { for { keyRanges := GetKeyAndOptFromPartitionConfig(key, w.partitionConfig) wcs := make([]*watchChan, 0) for _, kr := range keyRanges { wc := w.createWatchChan(ctx, key, rev, recursive, pred, kr) wcs = append(wcs, wc) go wc.run() res.AddWatchInterface(wc, nil) } if w.updatePartitionCh == nil { return } else { select { case data, ok := <-w.updatePartitionCh.Read: if !ok { klog.Fatalf("Channel closed for data partition update. key %s", key) return } dataPartition, _ := data.(corev1.DataPartitionConfig) for _, wc := range wcs { wc.Stop() } w.updatePartitionConfig(dataPartition) klog.V(4).Infof("Reset data partition DONE. watch key %s, New partition [%+v]", key, dataPartition) } } } } func (w *watcher) updatePartitionConfig(dp corev1.DataPartitionConfig) { // try to avoid concurrent map writes error w.updateMux.Lock() rangeStartValue := "" if dp.IsRangeStartValid { rangeStartValue = dp.RangeStart } rangeEndValue := "" if dp.IsRangeEndValid { rangeEndValue = dp.RangeEnd } interval := storage.Interval{ Begin: rangeStartValue, End: rangeEndValue, } for k := range w.partitionConfig { klog.V(3).Infof("updatePartitionConfig interval key %s, interval [%+v]", k, interval) w.partitionConfig[k] = interval } w.updateMux.Unlock() } func (w *watcher) createWatchChan(ctx context.Context, key string, rev int64, recursive bool, pred storage.SelectionPredicate, kr keyRange) *watchChan { wc := &watchChan{ watcher: w, key: key, initialRev: rev, recursive: recursive, internalPred: pred, incomingEventChan: make(chan *event, incomingBufSize), resultChan: make(chan watch.Event, outgoingBufSize), errChan: make(chan error, 1), keyRange: kr, } if pred.Empty() { // The filter doesn't filter out any object. wc.internalPred = storage.Everything } wc.ctx, wc.cancel = context.WithCancel(ctx) return wc } func (wc *watchChan) run() { watchClosedCh := make(chan struct{}) go wc.startWatching(watchClosedCh) var resultChanWG sync.WaitGroup resultChanWG.Add(1) go wc.processEvent(&resultChanWG) select { case err := <-wc.errChan: if err == context.Canceled { break } errResult := transformErrorToEvent(err) if errResult != nil { // error result is guaranteed to be received by user before closing ResultChan. select { case wc.resultChan <- *errResult: case <-wc.ctx.Done(): // user has given up all results } } case <-watchClosedCh: case <-wc.ctx.Done(): // user cancel } // We use wc.ctx to reap all goroutines. Under whatever condition, we should stop them all. // It's fine to double cancel. wc.cancel() // we need to wait until resultChan wouldn't be used anymore resultChanWG.Wait() close(wc.resultChan) klog.V(3).Infof("Result channel closed for key %s, range %+v", wc.key, wc.keyRange) } func (wc *watchChan) Stop() { wc.cancel() } func (wc *watchChan) ResultChan() <-chan watch.Event { return wc.resultChan } // sync tries to retrieve existing data and send them to process. // The revision to watch will be set to the revision in response. // All events sent will have isCreated=true func (wc *watchChan) sync() error { opts := []clientv3.OpOption{} if wc.recursive { opts = append(opts, clientv3.WithPrefix()) } getResp, err := wc.watcher.client.Get(wc.ctx, wc.key, opts...) if err != nil { return err } wc.initialRev = getResp.Header.Revision for _, kv := range getResp.Kvs { wc.sendEvent(parseKV(kv)) } return nil } // startWatching does: // - get current objects if initialRev=0; set initialRev to current rev // - watch on given key and send events to process. func (wc *watchChan) startWatching(watchClosedCh chan struct{}) { if wc.initialRev == 0 { if err := wc.sync(); err != nil { klog.Errorf("failed to sync with latest state: %v", err) wc.sendError(err) return } } opts := []clientv3.OpOption{clientv3.WithRev(wc.initialRev + 1), clientv3.WithPrevKV()} if wc.recursive { opts = append(opts, clientv3.WithPrefix()) } klog.V(3).Infof("Starting watcher for wc.ctx=%v, wc.key=%v", wc.ctx, wc.key) if wc.keyRange.begin != "" && wc.keyRange.end != "" { wc.key = wc.keyRange.begin opts = append(opts, clientv3.WithRange(wc.keyRange.end)) klog.V(3).Infof("The updated key range wc.key=%v, wc.withRange=%v ", wc.key, wc.keyRange.end) } wch := wc.watcher.client.Watch(wc.ctx, wc.key, opts...) for wres := range wch { if wres.Err() != nil { err := wres.Err() // If there is an error on server (e.g. compaction), the channel will return it before closed. klog.Errorf("watch chan error: %v", err) wc.sendError(err) return } for _, e := range wres.Events { parsedEvent, err := parseEvent(e) if err != nil { klog.Errorf("watch chan error: %v", err) wc.sendError(err) return } wc.sendEvent(parsedEvent) } } // When we come to this point, it's only possible that client side ends the watch. // e.g. cancel the context, close the client. // If this watch chan is broken and context isn't cancelled, other goroutines will still hang. // We should notify the main thread that this goroutine has exited. close(watchClosedCh) } // getKeyAndOptFromPartitionConfig does: // - update the watchChan key by adding interval begin / end // - create the opts by adding opened range end or range beginning if either of them applies func GetKeyAndOptFromPartitionConfig(key string, partitionConfig map[string]storage.Interval) []keyRange { var res []keyRange if val, ok := partitionConfig[key]; ok { updatedKey := key updatedEnd := key // The interval end is not empty. if len(val.Begin) > 0 { updatedKey += val.Begin // If the interval begin is not empty, update the key by adding the interval begin, such as [key+val.Begin, key + val.End) if len(val.End) > 0 { updatedEnd = key + val.End // If the interval begin is empty, update the key by adding the interval begin, such as [key+val.Begin, ∞) } else { updatedEnd = clientv3.GetPrefixRangeEnd(key) } } else { // The interval begin is empty and the end is not empty. So that the key remains unchanged, such as [key, key + val.End) if len(val.End) > 0 { updatedEnd = key + val.End } // If the begin and the end are both empty, there is no need to add options. } if updatedKey != updatedEnd { res = append(res, keyRange{updatedKey, updatedEnd}) systemTenantKey := key + systemTenant if systemTenantKey < updatedKey || systemTenantKey >= updatedEnd { res = append(res, keyRange{systemTenantKey, clientv3.GetPrefixRangeEnd(systemTenantKey)}) } } else if updatedKey == key { res = append(res, keyRange{key, ""}) } } else { res = append(res, keyRange{key, ""}) } return res } // processEvent processes events from etcd watcher and sends results to resultChan. func (wc *watchChan) processEvent(wg *sync.WaitGroup) { defer wg.Done() for { select { case e := <-wc.incomingEventChan: res := wc.transform(e) if res == nil { continue } if len(wc.resultChan) == outgoingBufSize { klog.V(3).Infof("Fast watcher, slow processing. Number of buffered events: %d."+ "Probably caused by slow dispatching events to watchers", outgoingBufSize) } // If user couldn't receive results fast enough, we also block incoming events from watcher. // Because storing events in local will cause more memory usage. // The worst case would be closing the fast watcher. select { case wc.resultChan <- *res: case <-wc.ctx.Done(): return } case <-wc.ctx.Done(): return } } } func (wc *watchChan) filter(obj runtime.Object) bool { if wc.internalPred.Empty() { return true } matched, err := wc.internalPred.Matches(obj) return err == nil && matched } func (wc *watchChan) acceptAll() bool { return wc.internalPred.Empty() } // transform transforms an event into a result for user if not filtered. func (wc *watchChan) transform(e *event) (res *watch.Event) { curObj, oldObj, err := wc.prepareObjs(e) if err != nil { klog.Errorf("failed to prepare current and previous objects: %v", err) wc.sendError(err) return nil } switch { case e.isDeleted: if !wc.filter(oldObj) { return nil } res = &watch.Event{ Type: watch.Deleted, Object: oldObj, } case e.isCreated: if !wc.filter(curObj) { return nil } res = &watch.Event{ Type: watch.Added, Object: curObj, } default: if wc.acceptAll() { res = &watch.Event{ Type: watch.Modified, Object: curObj, } return res } curObjPasses := wc.filter(curObj) oldObjPasses := wc.filter(oldObj) switch { case curObjPasses && oldObjPasses: res = &watch.Event{ Type: watch.Modified, Object: curObj, } case curObjPasses && !oldObjPasses: res = &watch.Event{ Type: watch.Added, Object: curObj, } case !curObjPasses && oldObjPasses: res = &watch.Event{ Type: watch.Deleted, Object: oldObj, } } } return res } func transformErrorToEvent(err error) *watch.Event { err = interpretWatchError(err) if _, ok := err.(apierrs.APIStatus); !ok { err = apierrs.NewInternalError(err) } status := err.(apierrs.APIStatus).Status() return &watch.Event{ Type: watch.Error, Object: &status, } } func (wc *watchChan) sendError(err error) { select { case wc.errChan <- err: case <-wc.ctx.Done(): } } func (wc *watchChan) sendEvent(e *event) { if len(wc.incomingEventChan) == incomingBufSize { klog.V(3).Infof("Fast watcher, slow processing. Number of buffered events: %d."+ "Probably caused by slow decoding, user not receiving fast, or other processing logic", incomingBufSize) } select { case wc.incomingEventChan <- e: case <-wc.ctx.Done(): } } func (wc *watchChan) prepareObjs(e *event) (curObj runtime.Object, oldObj runtime.Object, err error) { if !e.isDeleted { data, _, err := wc.watcher.transformer.TransformFromStorage(e.value, authenticatedDataString(e.key)) if err != nil { return nil, nil, err } curObj, err = decodeObj(wc.watcher.codec, wc.watcher.versioner, data, e.rev) if err != nil { return nil, nil, err } } // We need to decode prevValue, only if this is deletion event or // the underlying filter doesn't accept all objects (otherwise we // know that the filter for previous object will return true and // we need the object only to compute whether it was filtered out // before). if len(e.prevValue) > 0 && (e.isDeleted || !wc.acceptAll()) { data, _, err := wc.watcher.transformer.TransformFromStorage(e.prevValue, authenticatedDataString(e.key)) if err != nil { return nil, nil, err } // Note that this sends the *old* object with the etcd revision for the time at // which it gets deleted. oldObj, err = decodeObj(wc.watcher.codec, wc.watcher.versioner, data, e.rev) if err != nil { return nil, nil, err } } return curObj, oldObj, nil } func decodeObj(codec runtime.Codec, versioner storage.Versioner, data []byte, rev int64) (_ runtime.Object, err error) { obj, err := runtime.Decode(codec, []byte(data)) if err != nil { if fatalOnDecodeError { // catch watch decode error iff we caused it on // purpose during a unit test defer testingDeferOnDecodeError() // we are running in a test environment and thus an // error here is due to a coder mistake if the defer // does not catch it panic(err) } return nil, err } // ensure resource version is set on the object we load from etcd if err := versioner.UpdateObject(obj, uint64(rev)); err != nil { return nil, fmt.Errorf("failure to version api object (%d) %#v: %v", rev, obj, err) } return obj, nil } type keyRange struct { begin, end string }
[ "\"KUBE_PANIC_WATCH_DECODE_ERROR\"" ]
[]
[ "KUBE_PANIC_WATCH_DECODE_ERROR" ]
[]
["KUBE_PANIC_WATCH_DECODE_ERROR"]
go
1
0
example/type-system-extension/server/server.go
package main import ( "log" "net/http" "os" "github.com/vndocker/encrypted-graphql/graphql/playground" extension "github.com/vndocker/encrypted-graphql/example/type-system-extension" "github.com/vndocker/encrypted-graphql/graphql/handler" ) const defaultPort = "8080" func main() { port := os.Getenv("PORT") if port == "" { port = defaultPort } http.Handle("/", playground.Handler("GraphQL playground", "/query")) http.Handle("/query", handler.NewDefaultServer( extension.NewExecutableSchema( extension.Config{ Resolvers: extension.NewRootResolver(), Directives: extension.DirectiveRoot{ EnumLogging: extension.EnumLogging, FieldLogging: extension.FieldLogging, InputLogging: extension.InputLogging, ObjectLogging: extension.ObjectLogging, ScalarLogging: extension.ScalarLogging, UnionLogging: extension.UnionLogging, }, }, ), )) log.Printf("connect to http://localhost:%s/ for GraphQL playground", port) log.Fatal(http.ListenAndServe(":"+port, nil)) }
[ "\"PORT\"" ]
[]
[ "PORT" ]
[]
["PORT"]
go
1
0
swift/common/utils.py
# Copyright (c) 2010-2012 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. """Miscellaneous utility functions for use with Swift.""" from __future__ import print_function import errno import fcntl import grp import hmac import json import math import operator import os import pwd import re import sys import time import uuid import functools import platform import email.parser from distutils.version import LooseVersion from hashlib import md5, sha1 from random import random, shuffle from contextlib import contextmanager, closing import ctypes import ctypes.util from optparse import OptionParser from tempfile import mkstemp, NamedTemporaryFile import glob import itertools import stat import datetime import eventlet import eventlet.semaphore from eventlet import GreenPool, sleep, Timeout, tpool from eventlet.green import socket, threading import eventlet.queue import netifaces import codecs utf8_decoder = codecs.getdecoder('utf-8') utf8_encoder = codecs.getencoder('utf-8') import six from six.moves import cPickle as pickle from six.moves.configparser import (ConfigParser, NoSectionError, NoOptionError, RawConfigParser) from six.moves import range from six.moves.urllib.parse import ParseResult from six.moves.urllib.parse import quote as _quote from six.moves.urllib.parse import urlparse as stdlib_urlparse from swift import gettext_ as _ import swift.common.exceptions from swift.common.http import is_success, is_redirection, HTTP_NOT_FOUND, \ HTTP_PRECONDITION_FAILED, HTTP_REQUESTED_RANGE_NOT_SATISFIABLE from swift.common.header_key_dict import HeaderKeyDict from swift.common.linkat import linkat if six.PY3: stdlib_queue = eventlet.patcher.original('queue') else: stdlib_queue = eventlet.patcher.original('Queue') stdlib_threading = eventlet.patcher.original('threading') # logging doesn't import patched as cleanly as one would like from logging.handlers import SysLogHandler import logging logging.thread = eventlet.green.thread logging.threading = eventlet.green.threading logging._lock = logging.threading.RLock() # setup notice level logging NOTICE = 25 logging.addLevelName(NOTICE, 'NOTICE') SysLogHandler.priority_map['NOTICE'] = 'notice' # These are lazily pulled from libc elsewhere _sys_fallocate = None _posix_fadvise = None _libc_socket = None _libc_bind = None _libc_accept = None # see man -s 2 setpriority _libc_setpriority = None # see man -s 2 syscall _posix_syscall = None # If set to non-zero, fallocate routines will fail based on free space # available being at or below this amount, in bytes. FALLOCATE_RESERVE = 0 # Indicates if FALLOCATE_RESERVE is the percentage of free space (True) or # the number of bytes (False). FALLOCATE_IS_PERCENT = False # from /usr/src/linux-headers-*/include/uapi/linux/resource.h PRIO_PROCESS = 0 # /usr/include/x86_64-linux-gnu/asm/unistd_64.h defines syscalls there # are many like it, but this one is mine, see man -s 2 ioprio_set def NR_ioprio_set(): """Give __NR_ioprio_set value for your system.""" architecture = os.uname()[4] arch_bits = platform.architecture()[0] # check if supported system, now support x86_64 and AArch64 if architecture == 'x86_64' and arch_bits == '64bit': return 251 elif architecture == 'aarch64' and arch_bits == '64bit': return 30 raise OSError("Swift doesn't support ionice priority for %s %s" % (architecture, arch_bits)) # this syscall integer probably only works on x86_64 linux systems, you # can check if it's correct on yours with something like this: """ #include <stdio.h> #include <sys/syscall.h> int main(int argc, const char* argv[]) { printf("%d\n", __NR_ioprio_set); return 0; } """ # this is the value for "which" that says our who value will be a pid # pulled out of /usr/src/linux-headers-*/include/linux/ioprio.h IOPRIO_WHO_PROCESS = 1 IO_CLASS_ENUM = { 'IOPRIO_CLASS_RT': 1, 'IOPRIO_CLASS_BE': 2, 'IOPRIO_CLASS_IDLE': 3, } # the IOPRIO_PRIO_VALUE "macro" is also pulled from # /usr/src/linux-headers-*/include/linux/ioprio.h IOPRIO_CLASS_SHIFT = 13 def IOPRIO_PRIO_VALUE(class_, data): return (((class_) << IOPRIO_CLASS_SHIFT) | data) # Used by hash_path to offer a bit more security when generating hashes for # paths. It simply appends this value to all paths; guessing the hash a path # will end up with would also require knowing this suffix. HASH_PATH_SUFFIX = '' HASH_PATH_PREFIX = '' SWIFT_CONF_FILE = '/etc/swift/swift.conf' # These constants are Linux-specific, and Python doesn't seem to know # about them. We ask anyway just in case that ever gets fixed. # # The values were copied from the Linux 3.x kernel headers. AF_ALG = getattr(socket, 'AF_ALG', 38) F_SETPIPE_SZ = getattr(fcntl, 'F_SETPIPE_SZ', 1031) O_TMPFILE = getattr(os, 'O_TMPFILE', 0o20000000 | os.O_DIRECTORY) # Used by the parse_socket_string() function to validate IPv6 addresses IPV6_RE = re.compile("^\[(?P<address>.*)\](:(?P<port>[0-9]+))?$") MD5_OF_EMPTY_STRING = 'd41d8cd98f00b204e9800998ecf8427e' class InvalidHashPathConfigError(ValueError): def __str__(self): return "[swift-hash]: both swift_hash_path_suffix and " \ "swift_hash_path_prefix are missing from %s" % SWIFT_CONF_FILE def validate_hash_conf(): global HASH_PATH_SUFFIX global HASH_PATH_PREFIX if not HASH_PATH_SUFFIX and not HASH_PATH_PREFIX: hash_conf = ConfigParser() if hash_conf.read(SWIFT_CONF_FILE): try: HASH_PATH_SUFFIX = hash_conf.get('swift-hash', 'swift_hash_path_suffix') except (NoSectionError, NoOptionError): pass try: HASH_PATH_PREFIX = hash_conf.get('swift-hash', 'swift_hash_path_prefix') except (NoSectionError, NoOptionError): pass if not HASH_PATH_SUFFIX and not HASH_PATH_PREFIX: raise InvalidHashPathConfigError() try: validate_hash_conf() except InvalidHashPathConfigError: # could get monkey patched or lazy loaded pass def get_hmac(request_method, path, expires, key): """ Returns the hexdigest string of the HMAC-SHA1 (RFC 2104) for the request. :param request_method: Request method to allow. :param path: The path to the resource to allow access to. :param expires: Unix timestamp as an int for when the URL expires. :param key: HMAC shared secret. :returns: hexdigest str of the HMAC-SHA1 for the request. """ return hmac.new( key, '%s\n%s\n%s' % (request_method, expires, path), sha1).hexdigest() # Used by get_swift_info and register_swift_info to store information about # the swift cluster. _swift_info = {} _swift_admin_info = {} def get_swift_info(admin=False, disallowed_sections=None): """ Returns information about the swift cluster that has been previously registered with the register_swift_info call. :param admin: boolean value, if True will additionally return an 'admin' section with information previously registered as admin info. :param disallowed_sections: list of section names to be withheld from the information returned. :returns: dictionary of information about the swift cluster. """ disallowed_sections = disallowed_sections or [] info = dict(_swift_info) for section in disallowed_sections: key_to_pop = None sub_section_dict = info for sub_section in section.split('.'): if key_to_pop: sub_section_dict = sub_section_dict.get(key_to_pop, {}) if not isinstance(sub_section_dict, dict): sub_section_dict = {} break key_to_pop = sub_section sub_section_dict.pop(key_to_pop, None) if admin: info['admin'] = dict(_swift_admin_info) info['admin']['disallowed_sections'] = list(disallowed_sections) return info def register_swift_info(name='swift', admin=False, **kwargs): """ Registers information about the swift cluster to be retrieved with calls to get_swift_info. NOTE: Do not use "." in the param: name or any keys in kwargs. "." is used in the disallowed_sections to remove unwanted keys from /info. :param name: string, the section name to place the information under. :param admin: boolean, if True, information will be registered to an admin section which can optionally be withheld when requesting the information. :param kwargs: key value arguments representing the information to be added. :raises ValueError: if name or any of the keys in kwargs has "." in it """ if name == 'admin' or name == 'disallowed_sections': raise ValueError('\'{0}\' is reserved name.'.format(name)) if admin: dict_to_use = _swift_admin_info else: dict_to_use = _swift_info if name not in dict_to_use: if "." in name: raise ValueError('Cannot use "." in a swift_info key: %s' % name) dict_to_use[name] = {} for key, val in kwargs.items(): if "." in key: raise ValueError('Cannot use "." in a swift_info key: %s' % key) dict_to_use[name][key] = val def backward(f, blocksize=4096): """ A generator returning lines from a file starting with the last line, then the second last line, etc. i.e., it reads lines backwards. Stops when the first line (if any) is read. This is useful when searching for recent activity in very large files. :param f: file object to read :param blocksize: no of characters to go backwards at each block """ f.seek(0, os.SEEK_END) if f.tell() == 0: return last_row = b'' while f.tell() != 0: try: f.seek(-blocksize, os.SEEK_CUR) except IOError: blocksize = f.tell() f.seek(-blocksize, os.SEEK_CUR) block = f.read(blocksize) f.seek(-blocksize, os.SEEK_CUR) rows = block.split(b'\n') rows[-1] = rows[-1] + last_row while rows: last_row = rows.pop(-1) if rows and last_row: yield last_row yield last_row # Used when reading config values TRUE_VALUES = set(('true', '1', 'yes', 'on', 't', 'y')) def config_true_value(value): """ Returns True if the value is either True or a string in TRUE_VALUES. Returns False otherwise. """ return value is True or \ (isinstance(value, six.string_types) and value.lower() in TRUE_VALUES) def config_positive_int_value(value): """ Returns positive int value if it can be cast by int() and it's an integer > 0. (not including zero) Raises ValueError otherwise. """ try: value = int(value) if value < 1: raise ValueError() except (TypeError, ValueError): raise ValueError( 'Config option must be an positive int number, not "%s".' % value) return value def config_auto_int_value(value, default): """ Returns default if value is None or 'auto'. Returns value as an int or raises ValueError otherwise. """ if value is None or \ (isinstance(value, six.string_types) and value.lower() == 'auto'): return default try: value = int(value) except (TypeError, ValueError): raise ValueError('Config option must be an integer or the ' 'string "auto", not "%s".' % value) return value def append_underscore(prefix): if prefix and not prefix.endswith('_'): prefix += '_' return prefix def config_read_reseller_options(conf, defaults): """ Read reseller_prefix option and associated options from configuration Reads the reseller_prefix option, then reads options that may be associated with a specific reseller prefix. Reads options such that an option without a prefix applies to all reseller prefixes unless an option has an explicit prefix. :param conf: the configuration :param defaults: a dict of default values. The key is the option name. The value is either an array of strings or a string :return: tuple of an array of reseller prefixes and a dict of option values """ reseller_prefix_opt = conf.get('reseller_prefix', 'AUTH').split(',') reseller_prefixes = [] for prefix in [pre.strip() for pre in reseller_prefix_opt if pre.strip()]: if prefix == "''": prefix = '' prefix = append_underscore(prefix) if prefix not in reseller_prefixes: reseller_prefixes.append(prefix) if len(reseller_prefixes) == 0: reseller_prefixes.append('') # Get prefix-using config options associated_options = {} for prefix in reseller_prefixes: associated_options[prefix] = dict(defaults) associated_options[prefix].update( config_read_prefixed_options(conf, '', defaults)) prefix_name = prefix if prefix != '' else "''" associated_options[prefix].update( config_read_prefixed_options(conf, prefix_name, defaults)) return reseller_prefixes, associated_options def config_read_prefixed_options(conf, prefix_name, defaults): """ Read prefixed options from configuration :param conf: the configuration :param prefix_name: the prefix (including, if needed, an underscore) :param defaults: a dict of default values. The dict supplies the option name and type (string or comma separated string) :return: a dict containing the options """ params = {} for option_name in defaults.keys(): value = conf.get('%s%s' % (prefix_name, option_name)) if value: if isinstance(defaults.get(option_name), list): params[option_name] = [] for role in value.lower().split(','): params[option_name].append(role.strip()) else: params[option_name] = value.strip() return params def noop_libc_function(*args): return 0 def validate_configuration(): try: validate_hash_conf() except InvalidHashPathConfigError as e: sys.exit("Error: %s" % e) def load_libc_function(func_name, log_error=True, fail_if_missing=False, errcheck=False): """ Attempt to find the function in libc, otherwise return a no-op func. :param func_name: name of the function to pull from libc. :param log_error: log an error when a function can't be found :param fail_if_missing: raise an exception when a function can't be found. Default behavior is to return a no-op function. :param errcheck: boolean, if true install a wrapper on the function to check for a return values of -1 and call ctype.get_errno and raise an OSError """ try: libc = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True) func = getattr(libc, func_name) except AttributeError: if fail_if_missing: raise if log_error: logging.warning(_("Unable to locate %s in libc. Leaving as a " "no-op."), func_name) return noop_libc_function if errcheck: def _errcheck(result, f, args): if result == -1: errcode = ctypes.get_errno() raise OSError(errcode, os.strerror(errcode)) return result func.errcheck = _errcheck return func def generate_trans_id(trans_id_suffix): return 'tx%s-%010x%s' % ( uuid.uuid4().hex[:21], time.time(), quote(trans_id_suffix)) def get_policy_index(req_headers, res_headers): """ Returns the appropriate index of the storage policy for the request from a proxy server :param req_headers: dict of the request headers. :param res_headers: dict of the response headers. :returns: string index of storage policy, or None """ header = 'X-Backend-Storage-Policy-Index' policy_index = res_headers.get(header, req_headers.get(header)) return str(policy_index) if policy_index is not None else None def get_log_line(req, res, trans_time, additional_info): """ Make a line for logging that matches the documented log line format for backend servers. :param req: the request. :param res: the response. :param trans_time: the time the request took to complete, a float. :param additional_info: a string to log at the end of the line :returns: a properly formatted line for logging. """ policy_index = get_policy_index(req.headers, res.headers) return '%s - - [%s] "%s %s" %s %s "%s" "%s" "%s" %.4f "%s" %d %s' % ( req.remote_addr, time.strftime('%d/%b/%Y:%H:%M:%S +0000', time.gmtime()), req.method, req.path, res.status.split()[0], res.content_length or '-', req.referer or '-', req.headers.get('x-trans-id', '-'), req.user_agent or '-', trans_time, additional_info or '-', os.getpid(), policy_index or '-') def get_trans_id_time(trans_id): if len(trans_id) >= 34 and \ trans_id.startswith('tx') and trans_id[23] == '-': try: return int(trans_id[24:34], 16) except ValueError: pass return None def config_fallocate_value(reserve_value): """ Returns fallocate reserve_value as an int or float. Returns is_percent as a boolean. Returns a ValueError on invalid fallocate value. """ try: if str(reserve_value[-1:]) == '%': reserve_value = float(reserve_value[:-1]) is_percent = True else: reserve_value = int(reserve_value) is_percent = False except ValueError: raise ValueError('Error: %s is an invalid value for fallocate' '_reserve.' % reserve_value) return reserve_value, is_percent class FileLikeIter(object): def __init__(self, iterable): """ Wraps an iterable to behave as a file-like object. The iterable must yield bytes strings. """ self.iterator = iter(iterable) self.buf = None self.closed = False def __iter__(self): return self def next(self): """ next(x) -> the next value, or raise StopIteration """ if self.closed: raise ValueError('I/O operation on closed file') if self.buf: rv = self.buf self.buf = None return rv else: return next(self.iterator) __next__ = next def read(self, size=-1): """ read([size]) -> read at most size bytes, returned as a bytes string. If the size argument is negative or omitted, read until EOF is reached. Notice that when in non-blocking mode, less data than what was requested may be returned, even if no size parameter was given. """ if self.closed: raise ValueError('I/O operation on closed file') if size < 0: return b''.join(self) elif not size: chunk = b'' elif self.buf: chunk = self.buf self.buf = None else: try: chunk = next(self.iterator) except StopIteration: return b'' if len(chunk) > size: self.buf = chunk[size:] chunk = chunk[:size] return chunk def readline(self, size=-1): """ readline([size]) -> next line from the file, as a bytes string. Retain newline. A non-negative size argument limits the maximum number of bytes to return (an incomplete line may be returned then). Return an empty string at EOF. """ if self.closed: raise ValueError('I/O operation on closed file') data = b'' while b'\n' not in data and (size < 0 or len(data) < size): if size < 0: chunk = self.read(1024) else: chunk = self.read(size - len(data)) if not chunk: break data += chunk if b'\n' in data: data, sep, rest = data.partition(b'\n') data += sep if self.buf: self.buf = rest + self.buf else: self.buf = rest return data def readlines(self, sizehint=-1): """ readlines([size]) -> list of bytes strings, each a line from the file. Call readline() repeatedly and return a list of the lines so read. The optional size argument, if given, is an approximate bound on the total number of bytes in the lines returned. """ if self.closed: raise ValueError('I/O operation on closed file') lines = [] while True: line = self.readline(sizehint) if not line: break lines.append(line) if sizehint >= 0: sizehint -= len(line) if sizehint <= 0: break return lines def close(self): """ close() -> None or (perhaps) an integer. Close the file. Sets data attribute .closed to True. A closed file cannot be used for further I/O operations. close() may be called more than once without error. Some kinds of file objects (for example, opened by popen()) may return an exit status upon closing. """ self.iterator = None self.closed = True class FallocateWrapper(object): def __init__(self, noop=False): self.noop = noop if self.noop: self.func_name = 'posix_fallocate' self.fallocate = noop_libc_function return # fallocate is preferred because we need the on-disk size to match # the allocated size. Older versions of sqlite require that the # two sizes match. However, fallocate is Linux only. for func in ('fallocate', 'posix_fallocate'): self.func_name = func self.fallocate = load_libc_function(func, log_error=False) if self.fallocate is not noop_libc_function: break if self.fallocate is noop_libc_function: logging.warning(_("Unable to locate fallocate, posix_fallocate in " "libc. Leaving as a no-op.")) def __call__(self, fd, mode, offset, length): """The length parameter must be a ctypes.c_uint64.""" if not self.noop: if FALLOCATE_RESERVE > 0: st = os.fstatvfs(fd) free = st.f_frsize * st.f_bavail - length.value if FALLOCATE_IS_PERCENT: free = \ (float(free) / float(st.f_frsize * st.f_blocks)) * 100 if float(free) <= float(FALLOCATE_RESERVE): raise OSError( errno.ENOSPC, 'FALLOCATE_RESERVE fail %s <= %s' % (free, FALLOCATE_RESERVE)) args = { 'fallocate': (fd, mode, offset, length), 'posix_fallocate': (fd, offset, length) } return self.fallocate(*args[self.func_name]) def disable_fallocate(): global _sys_fallocate _sys_fallocate = FallocateWrapper(noop=True) def fallocate(fd, size): """ Pre-allocate disk space for a file. :param fd: file descriptor :param size: size to allocate (in bytes) """ global _sys_fallocate if _sys_fallocate is None: _sys_fallocate = FallocateWrapper() if size < 0: size = 0 # 1 means "FALLOC_FL_KEEP_SIZE", which means it pre-allocates invisibly ret = _sys_fallocate(fd, 1, 0, ctypes.c_uint64(size)) err = ctypes.get_errno() if ret and err not in (0, errno.ENOSYS, errno.EOPNOTSUPP, errno.EINVAL): raise OSError(err, 'Unable to fallocate(%s)' % size) def fsync(fd): """ Sync modified file data and metadata to disk. :param fd: file descriptor """ if hasattr(fcntl, 'F_FULLSYNC'): try: fcntl.fcntl(fd, fcntl.F_FULLSYNC) except IOError as e: raise OSError(e.errno, 'Unable to F_FULLSYNC(%s)' % fd) else: os.fsync(fd) def fdatasync(fd): """ Sync modified file data to disk. :param fd: file descriptor """ try: os.fdatasync(fd) except AttributeError: fsync(fd) def fsync_dir(dirpath): """ Sync directory entries to disk. :param dirpath: Path to the directory to be synced. """ dirfd = None try: dirfd = os.open(dirpath, os.O_DIRECTORY | os.O_RDONLY) fsync(dirfd) except OSError as err: if err.errno == errno.ENOTDIR: # Raise error if someone calls fsync_dir on a non-directory raise logging.warning(_('Unable to perform fsync() on directory %(dir)s:' ' %(err)s'), {'dir': dirpath, 'err': os.strerror(err.errno)}) finally: if dirfd: os.close(dirfd) def drop_buffer_cache(fd, offset, length): """ Drop 'buffer' cache for the given range of the given file. :param fd: file descriptor :param offset: start offset :param length: length """ global _posix_fadvise if _posix_fadvise is None: _posix_fadvise = load_libc_function('posix_fadvise64') # 4 means "POSIX_FADV_DONTNEED" ret = _posix_fadvise(fd, ctypes.c_uint64(offset), ctypes.c_uint64(length), 4) if ret != 0: logging.warning("posix_fadvise64(%(fd)s, %(offset)s, %(length)s, 4) " "-> %(ret)s", {'fd': fd, 'offset': offset, 'length': length, 'ret': ret}) NORMAL_FORMAT = "%016.05f" INTERNAL_FORMAT = NORMAL_FORMAT + '_%016x' SHORT_FORMAT = NORMAL_FORMAT + '_%x' MAX_OFFSET = (16 ** 16) - 1 PRECISION = 1e-5 # Setting this to True will cause the internal format to always display # extended digits - even when the value is equivalent to the normalized form. # This isn't ideal during an upgrade when some servers might not understand # the new time format - but flipping it to True works great for testing. FORCE_INTERNAL = False # or True @functools.total_ordering class Timestamp(object): """ Internal Representation of Swift Time. The normalized form of the X-Timestamp header looks like a float with a fixed width to ensure stable string sorting - normalized timestamps look like "1402464677.04188" To support overwrites of existing data without modifying the original timestamp but still maintain consistency a second internal offset vector is append to the normalized timestamp form which compares and sorts greater than the fixed width float format but less than a newer timestamp. The internalized format of timestamps looks like "1402464677.04188_0000000000000000" - the portion after the underscore is the offset and is a formatted hexadecimal integer. The internalized form is not exposed to clients in responses from Swift. Normal client operations will not create a timestamp with an offset. The Timestamp class in common.utils supports internalized and normalized formatting of timestamps and also comparison of timestamp values. When the offset value of a Timestamp is 0 - it's considered insignificant and need not be represented in the string format; to support backwards compatibility during a Swift upgrade the internalized and normalized form of a Timestamp with an insignificant offset are identical. When a timestamp includes an offset it will always be represented in the internalized form, but is still excluded from the normalized form. Timestamps with an equivalent timestamp portion (the float part) will compare and order by their offset. Timestamps with a greater timestamp portion will always compare and order greater than a Timestamp with a lesser timestamp regardless of it's offset. String comparison and ordering is guaranteed for the internalized string format, and is backwards compatible for normalized timestamps which do not include an offset. """ def __init__(self, timestamp, offset=0, delta=0): """ Create a new Timestamp. :param timestamp: time in seconds since the Epoch, may be any of: * a float or integer * normalized/internalized string * another instance of this class (offset is preserved) :param offset: the second internal offset vector, an int :param delta: deca-microsecond difference from the base timestamp param, an int """ if isinstance(timestamp, six.string_types): parts = timestamp.split('_', 1) self.timestamp = float(parts.pop(0)) if parts: self.offset = int(parts[0], 16) else: self.offset = 0 else: self.timestamp = float(timestamp) self.offset = getattr(timestamp, 'offset', 0) # increment offset if offset >= 0: self.offset += offset else: raise ValueError('offset must be non-negative') if self.offset > MAX_OFFSET: raise ValueError('offset must be smaller than %d' % MAX_OFFSET) self.raw = int(round(self.timestamp / PRECISION)) # add delta if delta: self.raw = self.raw + delta if self.raw <= 0: raise ValueError( 'delta must be greater than %d' % (-1 * self.raw)) self.timestamp = float(self.raw * PRECISION) if self.timestamp < 0: raise ValueError('timestamp cannot be negative') if self.timestamp >= 10000000000: raise ValueError('timestamp too large') def __repr__(self): return INTERNAL_FORMAT % (self.timestamp, self.offset) def __str__(self): raise TypeError('You must specify which string format is required') def __float__(self): return self.timestamp def __int__(self): return int(self.timestamp) def __nonzero__(self): return bool(self.timestamp or self.offset) def __bool__(self): return self.__nonzero__() @property def normal(self): return NORMAL_FORMAT % self.timestamp @property def internal(self): if self.offset or FORCE_INTERNAL: return INTERNAL_FORMAT % (self.timestamp, self.offset) else: return self.normal @property def short(self): if self.offset or FORCE_INTERNAL: return SHORT_FORMAT % (self.timestamp, self.offset) else: return self.normal @property def isoformat(self): t = float(self.normal) if six.PY3: # On Python 3, round manually using ROUND_HALF_EVEN rounding # method, to use the same rounding method than Python 2. Python 3 # used a different rounding method, but Python 3.4.4 and 3.5.1 use # again ROUND_HALF_EVEN as Python 2. # See https://bugs.python.org/issue23517 frac, t = math.modf(t) us = round(frac * 1e6) if us >= 1000000: t += 1 us -= 1000000 elif us < 0: t -= 1 us += 1000000 dt = datetime.datetime.utcfromtimestamp(t) dt = dt.replace(microsecond=us) else: dt = datetime.datetime.utcfromtimestamp(t) isoformat = dt.isoformat() # python isoformat() doesn't include msecs when zero if len(isoformat) < len("1970-01-01T00:00:00.000000"): isoformat += ".000000" return isoformat def __eq__(self, other): if other is None: return False if not isinstance(other, Timestamp): other = Timestamp(other) return self.internal == other.internal def __ne__(self, other): if other is None: return True if not isinstance(other, Timestamp): other = Timestamp(other) return self.internal != other.internal def __lt__(self, other): if other is None: return False if not isinstance(other, Timestamp): other = Timestamp(other) return self.internal < other.internal def __hash__(self): return hash(self.internal) def encode_timestamps(t1, t2=None, t3=None, explicit=False): """ Encode up to three timestamps into a string. Unlike a Timestamp object, the encoded string does NOT used fixed width fields and consequently no relative chronology of the timestamps can be inferred from lexicographic sorting of encoded timestamp strings. The format of the encoded string is: <t1>[<+/-><t2 - t1>[<+/-><t3 - t2>]] i.e. if t1 = t2 = t3 then just the string representation of t1 is returned, otherwise the time offsets for t2 and t3 are appended. If explicit is True then the offsets for t2 and t3 are always appended even if zero. Note: any offset value in t1 will be preserved, but offsets on t2 and t3 are not preserved. In the anticipated use cases for this method (and the inverse decode_timestamps method) the timestamps passed as t2 and t3 are not expected to have offsets as they will be timestamps associated with a POST request. In the case where the encoding is used in a container objects table row, t1 could be the PUT or DELETE time but t2 and t3 represent the content type and metadata times (if different from the data file) i.e. correspond to POST timestamps. In the case where the encoded form is used in a .meta file name, t1 and t2 both correspond to POST timestamps. """ form = '{0}' values = [t1.short] if t2 is not None: t2_t1_delta = t2.raw - t1.raw explicit = explicit or (t2_t1_delta != 0) values.append(t2_t1_delta) if t3 is not None: t3_t2_delta = t3.raw - t2.raw explicit = explicit or (t3_t2_delta != 0) values.append(t3_t2_delta) if explicit: form += '{1:+x}' if t3 is not None: form += '{2:+x}' return form.format(*values) def decode_timestamps(encoded, explicit=False): """ Parses a string of the form generated by encode_timestamps and returns a tuple of the three component timestamps. If explicit is False, component timestamps that are not explicitly encoded will be assumed to have zero delta from the previous component and therefore take the value of the previous component. If explicit is True, component timestamps that are not explicitly encoded will be returned with value None. """ # TODO: some tests, e.g. in test_replicator, put float timestamps values # into container db's, hence this defensive check, but in real world # this may never happen. if not isinstance(encoded, six.string_types): ts = Timestamp(encoded) return ts, ts, ts parts = [] signs = [] pos_parts = encoded.split('+') for part in pos_parts: # parse time components and their signs # e.g. x-y+z --> parts = [x, y, z] and signs = [+1, -1, +1] neg_parts = part.split('-') parts = parts + neg_parts signs = signs + [1] + [-1] * (len(neg_parts) - 1) t1 = Timestamp(parts[0]) t2 = t3 = None if len(parts) > 1: t2 = t1 delta = signs[1] * int(parts[1], 16) # if delta = 0 we want t2 = t3 = t1 in order to # preserve any offset in t1 - only construct a distinct # timestamp if there is a non-zero delta. if delta: t2 = Timestamp((t1.raw + delta) * PRECISION) elif not explicit: t2 = t1 if len(parts) > 2: t3 = t2 delta = signs[2] * int(parts[2], 16) if delta: t3 = Timestamp((t2.raw + delta) * PRECISION) elif not explicit: t3 = t2 return t1, t2, t3 def normalize_timestamp(timestamp): """ Format a timestamp (string or numeric) into a standardized xxxxxxxxxx.xxxxx (10.5) format. Note that timestamps using values greater than or equal to November 20th, 2286 at 17:46 UTC will use 11 digits to represent the number of seconds. :param timestamp: unix timestamp :returns: normalized timestamp as a string """ return Timestamp(timestamp).normal EPOCH = datetime.datetime(1970, 1, 1) def last_modified_date_to_timestamp(last_modified_date_str): """ Convert a last modified date (like you'd get from a container listing, e.g. 2014-02-28T23:22:36.698390) to a float. """ start = datetime.datetime.strptime(last_modified_date_str, '%Y-%m-%dT%H:%M:%S.%f') delta = start - EPOCH # This calculation is based on Python 2.7's Modules/datetimemodule.c, # function delta_to_microseconds(), but written in Python. return Timestamp(delta.total_seconds()) def normalize_delete_at_timestamp(timestamp): """ Format a timestamp (string or numeric) into a standardized xxxxxxxxxx (10) format. Note that timestamps less than 0000000000 are raised to 0000000000 and values greater than November 20th, 2286 at 17:46:39 UTC will be capped at that date and time, resulting in no return value exceeding 9999999999. This cap is because the expirer is already working through a sorted list of strings that were all a length of 10. Adding another digit would mess up the sort and cause the expirer to break from processing early. By 2286, this problem will need to be fixed, probably by creating an additional .expiring_objects account to work from with 11 (or more) digit container names. :param timestamp: unix timestamp :returns: normalized timestamp as a string """ return '%010d' % min(max(0, float(timestamp)), 9999999999) def mkdirs(path): """ Ensures the path is a directory or makes it if not. Errors if the path exists but is a file or on permissions failure. :param path: path to create """ if not os.path.isdir(path): try: os.makedirs(path) except OSError as err: if err.errno != errno.EEXIST or not os.path.isdir(path): raise def makedirs_count(path, count=0): """ Same as os.makedirs() except that this method returns the number of new directories that had to be created. Also, this does not raise an error if target directory already exists. This behaviour is similar to Python 3.x's os.makedirs() called with exist_ok=True. Also similar to swift.common.utils.mkdirs() https://hg.python.org/cpython/file/v3.4.2/Lib/os.py#l212 """ head, tail = os.path.split(path) if not tail: head, tail = os.path.split(head) if head and tail and not os.path.exists(head): count = makedirs_count(head, count) if tail == os.path.curdir: return try: os.mkdir(path) except OSError as e: # EEXIST may also be raised if path exists as a file # Do not let that pass. if e.errno != errno.EEXIST or not os.path.isdir(path): raise else: count += 1 return count def renamer(old, new, fsync=True): """ Attempt to fix / hide race conditions like empty object directories being removed by backend processes during uploads, by retrying. The containing directory of 'new' and of all newly created directories are fsync'd by default. This _will_ come at a performance penalty. In cases where these additional fsyncs are not necessary, it is expected that the caller of renamer() turn it off explicitly. :param old: old path to be renamed :param new: new path to be renamed to :param fsync: fsync on containing directory of new and also all the newly created directories. """ dirpath = os.path.dirname(new) try: count = makedirs_count(dirpath) os.rename(old, new) except OSError: count = makedirs_count(dirpath) os.rename(old, new) if fsync: # If count=0, no new directories were created. But we still need to # fsync leaf dir after os.rename(). # If count>0, starting from leaf dir, fsync parent dirs of all # directories created by makedirs_count() for i in range(0, count + 1): fsync_dir(dirpath) dirpath = os.path.dirname(dirpath) def link_fd_to_path(fd, target_path, dirs_created=0, retries=2, fsync=True): """ Creates a link to file descriptor at target_path specified. This method does not close the fd for you. Unlike rename, as linkat() cannot overwrite target_path if it exists, we unlink and try again. Attempts to fix / hide race conditions like empty object directories being removed by backend processes during uploads, by retrying. :param fd: File descriptor to be linked :param target_path: Path in filesystem where fd is to be linked :param dirs_created: Number of newly created directories that needs to be fsync'd. :param retries: number of retries to make :param fsync: fsync on containing directory of target_path and also all the newly created directories. """ dirpath = os.path.dirname(target_path) for _junk in range(0, retries): try: linkat(linkat.AT_FDCWD, "/proc/self/fd/%d" % (fd), linkat.AT_FDCWD, target_path, linkat.AT_SYMLINK_FOLLOW) break except IOError as err: if err.errno == errno.ENOENT: dirs_created = makedirs_count(dirpath) elif err.errno == errno.EEXIST: try: os.unlink(target_path) except OSError as e: if e.errno != errno.ENOENT: raise else: raise if fsync: for i in range(0, dirs_created + 1): fsync_dir(dirpath) dirpath = os.path.dirname(dirpath) def split_path(path, minsegs=1, maxsegs=None, rest_with_last=False): """ Validate and split the given HTTP request path. **Examples**:: ['a'] = split_path('/a') ['a', None] = split_path('/a', 1, 2) ['a', 'c'] = split_path('/a/c', 1, 2) ['a', 'c', 'o/r'] = split_path('/a/c/o/r', 1, 3, True) :param path: HTTP Request path to be split :param minsegs: Minimum number of segments to be extracted :param maxsegs: Maximum number of segments to be extracted :param rest_with_last: If True, trailing data will be returned as part of last segment. If False, and there is trailing data, raises ValueError. :returns: list of segments with a length of maxsegs (non-existent segments will return as None) :raises: ValueError if given an invalid path """ if not maxsegs: maxsegs = minsegs if minsegs > maxsegs: raise ValueError('minsegs > maxsegs: %d > %d' % (minsegs, maxsegs)) if rest_with_last: segs = path.split('/', maxsegs) minsegs += 1 maxsegs += 1 count = len(segs) if (segs[0] or count < minsegs or count > maxsegs or '' in segs[1:minsegs]): raise ValueError('Invalid path: %s' % quote(path)) else: minsegs += 1 maxsegs += 1 segs = path.split('/', maxsegs) count = len(segs) if (segs[0] or count < minsegs or count > maxsegs + 1 or '' in segs[1:minsegs] or (count == maxsegs + 1 and segs[maxsegs])): raise ValueError('Invalid path: %s' % quote(path)) segs = segs[1:maxsegs] segs.extend([None] * (maxsegs - 1 - len(segs))) return segs def validate_device_partition(device, partition): """ Validate that a device and a partition are valid and won't lead to directory traversal when used. :param device: device to validate :param partition: partition to validate :raises: ValueError if given an invalid device or partition """ if not device or '/' in device or device in ['.', '..']: raise ValueError('Invalid device: %s' % quote(device or '')) if not partition or '/' in partition or partition in ['.', '..']: raise ValueError('Invalid partition: %s' % quote(partition or '')) class RateLimitedIterator(object): """ Wrap an iterator to only yield elements at a rate of N per second. :param iterable: iterable to wrap :param elements_per_second: the rate at which to yield elements :param limit_after: rate limiting kicks in only after yielding this many elements; default is 0 (rate limit immediately) """ def __init__(self, iterable, elements_per_second, limit_after=0, ratelimit_if=lambda _junk: True): self.iterator = iter(iterable) self.elements_per_second = elements_per_second self.limit_after = limit_after self.running_time = 0 self.ratelimit_if = ratelimit_if def __iter__(self): return self def next(self): next_value = next(self.iterator) if self.ratelimit_if(next_value): if self.limit_after > 0: self.limit_after -= 1 else: self.running_time = ratelimit_sleep(self.running_time, self.elements_per_second) return next_value __next__ = next class GreenthreadSafeIterator(object): """ Wrap an iterator to ensure that only one greenthread is inside its next() method at a time. This is useful if an iterator's next() method may perform network IO, as that may trigger a greenthread context switch (aka trampoline), which can give another greenthread a chance to call next(). At that point, you get an error like "ValueError: generator already executing". By wrapping calls to next() with a mutex, we avoid that error. """ def __init__(self, unsafe_iterable): self.unsafe_iter = iter(unsafe_iterable) self.semaphore = eventlet.semaphore.Semaphore(value=1) def __iter__(self): return self def next(self): with self.semaphore: return next(self.unsafe_iter) __next__ = next class NullLogger(object): """A no-op logger for eventlet wsgi.""" def write(self, *args): # "Logs" the args to nowhere pass def exception(self, *args): pass def critical(self, *args): pass def error(self, *args): pass def warning(self, *args): pass def info(self, *args): pass def debug(self, *args): pass def log(self, *args): pass class LoggerFileObject(object): # Note: this is greenthread-local storage _cls_thread_local = threading.local() def __init__(self, logger, log_type='STDOUT'): self.logger = logger self.log_type = log_type def write(self, value): # We can get into a nasty situation when logs are going to syslog # and syslog dies. # # It's something like this: # # (A) someone logs something # # (B) there's an exception in sending to /dev/log since syslog is # not working # # (C) logging takes that exception and writes it to stderr (see # logging.Handler.handleError) # # (D) stderr was replaced with a LoggerFileObject at process start, # so the LoggerFileObject takes the provided string and tells # its logger to log it (to syslog, naturally). # # Then, steps B through D repeat until we run out of stack. if getattr(self._cls_thread_local, 'already_called_write', False): return self._cls_thread_local.already_called_write = True try: value = value.strip() if value: if 'Connection reset by peer' in value: self.logger.error( _('%s: Connection reset by peer'), self.log_type) else: self.logger.error(_('%(type)s: %(value)s'), {'type': self.log_type, 'value': value}) finally: self._cls_thread_local.already_called_write = False def writelines(self, values): if getattr(self._cls_thread_local, 'already_called_writelines', False): return self._cls_thread_local.already_called_writelines = True try: self.logger.error(_('%(type)s: %(value)s'), {'type': self.log_type, 'value': '#012'.join(values)}) finally: self._cls_thread_local.already_called_writelines = False def close(self): pass def flush(self): pass def __iter__(self): return self def next(self): raise IOError(errno.EBADF, 'Bad file descriptor') __next__ = next def read(self, size=-1): raise IOError(errno.EBADF, 'Bad file descriptor') def readline(self, size=-1): raise IOError(errno.EBADF, 'Bad file descriptor') def tell(self): return 0 def xreadlines(self): return self class StatsdClient(object): def __init__(self, host, port, base_prefix='', tail_prefix='', default_sample_rate=1, sample_rate_factor=1, logger=None): self._host = host self._port = port self._base_prefix = base_prefix self.set_prefix(tail_prefix) self._default_sample_rate = default_sample_rate self._sample_rate_factor = sample_rate_factor self.random = random self.logger = logger # Determine if host is IPv4 or IPv6 addr_info = None try: addr_info = socket.getaddrinfo(host, port, socket.AF_INET) self._sock_family = socket.AF_INET except socket.gaierror: try: addr_info = socket.getaddrinfo(host, port, socket.AF_INET6) self._sock_family = socket.AF_INET6 except socket.gaierror: # Don't keep the server from starting from what could be a # transient DNS failure. Any hostname will get re-resolved as # necessary in the .sendto() calls. # However, we don't know if we're IPv4 or IPv6 in this case, so # we assume legacy IPv4. self._sock_family = socket.AF_INET # NOTE: we use the original host value, not the DNS-resolved one # because if host is a hostname, we don't want to cache the DNS # resolution for the entire lifetime of this process. Let standard # name resolution caching take effect. This should help operators use # DNS trickery if they want. if addr_info is not None: # addr_info is a list of 5-tuples with the following structure: # (family, socktype, proto, canonname, sockaddr) # where sockaddr is the only thing of interest to us, and we only # use the first result. We want to use the originally supplied # host (see note above) and the remainder of the variable-length # sockaddr: IPv4 has (address, port) while IPv6 has (address, # port, flow info, scope id). sockaddr = addr_info[0][-1] self._target = (host,) + (sockaddr[1:]) else: self._target = (host, port) def set_prefix(self, new_prefix): if new_prefix and self._base_prefix: self._prefix = '.'.join([self._base_prefix, new_prefix, '']) elif new_prefix: self._prefix = new_prefix + '.' elif self._base_prefix: self._prefix = self._base_prefix + '.' else: self._prefix = '' def _send(self, m_name, m_value, m_type, sample_rate): if sample_rate is None: sample_rate = self._default_sample_rate sample_rate = sample_rate * self._sample_rate_factor parts = ['%s%s:%s' % (self._prefix, m_name, m_value), m_type] if sample_rate < 1: if self.random() < sample_rate: parts.append('@%s' % (sample_rate,)) else: return if six.PY3: parts = [part.encode('utf-8') for part in parts] # Ideally, we'd cache a sending socket in self, but that # results in a socket getting shared by multiple green threads. with closing(self._open_socket()) as sock: try: return sock.sendto(b'|'.join(parts), self._target) except IOError as err: if self.logger: self.logger.warning( _('Error sending UDP message to %(target)r: %(err)s'), {'target': self._target, 'err': err}) def _open_socket(self): return socket.socket(self._sock_family, socket.SOCK_DGRAM) def update_stats(self, m_name, m_value, sample_rate=None): return self._send(m_name, m_value, 'c', sample_rate) def increment(self, metric, sample_rate=None): return self.update_stats(metric, 1, sample_rate) def decrement(self, metric, sample_rate=None): return self.update_stats(metric, -1, sample_rate) def timing(self, metric, timing_ms, sample_rate=None): return self._send(metric, timing_ms, 'ms', sample_rate) def timing_since(self, metric, orig_time, sample_rate=None): return self.timing(metric, (time.time() - orig_time) * 1000, sample_rate) def transfer_rate(self, metric, elapsed_time, byte_xfer, sample_rate=None): if byte_xfer: return self.timing(metric, elapsed_time * 1000 / byte_xfer * 1000, sample_rate) def server_handled_successfully(status_int): """ True for successful responses *or* error codes that are not Swift's fault, False otherwise. For example, 500 is definitely the server's fault, but 412 is an error code (4xx are all errors) that is due to a header the client sent. If one is tracking error rates to monitor server health, one would be advised to use a function like this one, lest a client cause a flurry of 404s or 416s and make a spurious spike in your errors graph. """ return (is_success(status_int) or is_redirection(status_int) or status_int == HTTP_NOT_FOUND or status_int == HTTP_PRECONDITION_FAILED or status_int == HTTP_REQUESTED_RANGE_NOT_SATISFIABLE) def timing_stats(**dec_kwargs): """ Returns a decorator that logs timing events or errors for public methods in swift's wsgi server controllers, based on response code. """ def decorating_func(func): method = func.__name__ @functools.wraps(func) def _timing_stats(ctrl, *args, **kwargs): start_time = time.time() resp = func(ctrl, *args, **kwargs) if server_handled_successfully(resp.status_int): ctrl.logger.timing_since(method + '.timing', start_time, **dec_kwargs) else: ctrl.logger.timing_since(method + '.errors.timing', start_time, **dec_kwargs) return resp return _timing_stats return decorating_func # double inheritance to support property with setter class LogAdapter(logging.LoggerAdapter, object): """ A Logger like object which performs some reformatting on calls to :meth:`exception`. Can be used to store a threadlocal transaction id and client ip. """ _cls_thread_local = threading.local() def __init__(self, logger, server): logging.LoggerAdapter.__init__(self, logger, {}) self.server = server self.warn = self.warning @property def txn_id(self): if hasattr(self._cls_thread_local, 'txn_id'): return self._cls_thread_local.txn_id @txn_id.setter def txn_id(self, value): self._cls_thread_local.txn_id = value @property def client_ip(self): if hasattr(self._cls_thread_local, 'client_ip'): return self._cls_thread_local.client_ip @client_ip.setter def client_ip(self, value): self._cls_thread_local.client_ip = value @property def thread_locals(self): return (self.txn_id, self.client_ip) @thread_locals.setter def thread_locals(self, value): self.txn_id, self.client_ip = value def getEffectiveLevel(self): return self.logger.getEffectiveLevel() def process(self, msg, kwargs): """ Add extra info to message """ kwargs['extra'] = {'server': self.server, 'txn_id': self.txn_id, 'client_ip': self.client_ip} return msg, kwargs def notice(self, msg, *args, **kwargs): """ Convenience function for syslog priority LOG_NOTICE. The python logging lvl is set to 25, just above info. SysLogHandler is monkey patched to map this log lvl to the LOG_NOTICE syslog priority. """ self.log(NOTICE, msg, *args, **kwargs) def _exception(self, msg, *args, **kwargs): logging.LoggerAdapter.exception(self, msg, *args, **kwargs) def exception(self, msg, *args, **kwargs): _junk, exc, _junk = sys.exc_info() call = self.error emsg = '' if isinstance(exc, (OSError, socket.error)): if exc.errno in (errno.EIO, errno.ENOSPC): emsg = str(exc) elif exc.errno == errno.ECONNREFUSED: emsg = _('Connection refused') elif exc.errno == errno.EHOSTUNREACH: emsg = _('Host unreachable') elif exc.errno == errno.ETIMEDOUT: emsg = _('Connection timeout') else: call = self._exception elif isinstance(exc, eventlet.Timeout): emsg = exc.__class__.__name__ if hasattr(exc, 'seconds'): emsg += ' (%ss)' % exc.seconds if isinstance(exc, swift.common.exceptions.MessageTimeout): if exc.msg: emsg += ' %s' % exc.msg else: call = self._exception call('%s: %s' % (msg, emsg), *args, **kwargs) def set_statsd_prefix(self, prefix): """ The StatsD client prefix defaults to the "name" of the logger. This method may override that default with a specific value. Currently used in the proxy-server to differentiate the Account, Container, and Object controllers. """ if self.logger.statsd_client: self.logger.statsd_client.set_prefix(prefix) def statsd_delegate(statsd_func_name): """ Factory to create methods which delegate to methods on self.logger.statsd_client (an instance of StatsdClient). The created methods conditionally delegate to a method whose name is given in 'statsd_func_name'. The created delegate methods are a no-op when StatsD logging is not configured. :param statsd_func_name: the name of a method on StatsdClient. """ func = getattr(StatsdClient, statsd_func_name) @functools.wraps(func) def wrapped(self, *a, **kw): if getattr(self.logger, 'statsd_client'): return func(self.logger.statsd_client, *a, **kw) return wrapped update_stats = statsd_delegate('update_stats') increment = statsd_delegate('increment') decrement = statsd_delegate('decrement') timing = statsd_delegate('timing') timing_since = statsd_delegate('timing_since') transfer_rate = statsd_delegate('transfer_rate') class SwiftLogFormatter(logging.Formatter): """ Custom logging.Formatter will append txn_id to a log message if the record has one and the message does not. Optionally it can shorten overly long log lines. """ def __init__(self, fmt=None, datefmt=None, max_line_length=0): logging.Formatter.__init__(self, fmt=fmt, datefmt=datefmt) self.max_line_length = max_line_length def format(self, record): if not hasattr(record, 'server'): # Catch log messages that were not initiated by swift # (for example, the keystone auth middleware) record.server = record.name # Included from Python's logging.Formatter and then altered slightly to # replace \n with #012 record.message = record.getMessage() if self._fmt.find('%(asctime)') >= 0: record.asctime = self.formatTime(record, self.datefmt) msg = (self._fmt % record.__dict__).replace('\n', '#012') if record.exc_info: # Cache the traceback text to avoid converting it multiple times # (it's constant anyway) if not record.exc_text: record.exc_text = self.formatException( record.exc_info).replace('\n', '#012') if record.exc_text: if not msg.endswith('#012'): msg = msg + '#012' msg = msg + record.exc_text if (hasattr(record, 'txn_id') and record.txn_id and record.txn_id not in msg): msg = "%s (txn: %s)" % (msg, record.txn_id) if (hasattr(record, 'client_ip') and record.client_ip and record.levelno != logging.INFO and record.client_ip not in msg): msg = "%s (client_ip: %s)" % (msg, record.client_ip) if self.max_line_length > 0 and len(msg) > self.max_line_length: if self.max_line_length < 7: msg = msg[:self.max_line_length] else: approxhalf = (self.max_line_length - 5) // 2 msg = msg[:approxhalf] + " ... " + msg[-approxhalf:] return msg def get_logger(conf, name=None, log_to_console=False, log_route=None, fmt="%(server)s: %(message)s"): """ Get the current system logger using config settings. **Log config and defaults**:: log_facility = LOG_LOCAL0 log_level = INFO log_name = swift log_max_line_length = 0 log_udp_host = (disabled) log_udp_port = logging.handlers.SYSLOG_UDP_PORT log_address = /dev/log log_statsd_host = (disabled) log_statsd_port = 8125 log_statsd_default_sample_rate = 1.0 log_statsd_sample_rate_factor = 1.0 log_statsd_metric_prefix = (empty-string) :param conf: Configuration dict to read settings from :param name: Name of the logger :param log_to_console: Add handler which writes to console on stderr :param log_route: Route for the logging, not emitted to the log, just used to separate logging configurations :param fmt: Override log format """ if not conf: conf = {} if name is None: name = conf.get('log_name', 'swift') if not log_route: log_route = name logger = logging.getLogger(log_route) logger.propagate = False # all new handlers will get the same formatter formatter = SwiftLogFormatter( fmt=fmt, max_line_length=int(conf.get('log_max_line_length', 0))) # get_logger will only ever add one SysLog Handler to a logger if not hasattr(get_logger, 'handler4logger'): get_logger.handler4logger = {} if logger in get_logger.handler4logger: logger.removeHandler(get_logger.handler4logger[logger]) # facility for this logger will be set by last call wins facility = getattr(SysLogHandler, conf.get('log_facility', 'LOG_LOCAL0'), SysLogHandler.LOG_LOCAL0) udp_host = conf.get('log_udp_host') if udp_host: udp_port = int(conf.get('log_udp_port', logging.handlers.SYSLOG_UDP_PORT)) handler = SysLogHandler(address=(udp_host, udp_port), facility=facility) else: log_address = conf.get('log_address', '/dev/log') try: handler = SysLogHandler(address=log_address, facility=facility) except socket.error as e: # Either /dev/log isn't a UNIX socket or it does not exist at all if e.errno not in [errno.ENOTSOCK, errno.ENOENT]: raise handler = SysLogHandler(facility=facility) handler.setFormatter(formatter) logger.addHandler(handler) get_logger.handler4logger[logger] = handler # setup console logging if log_to_console or hasattr(get_logger, 'console_handler4logger'): # remove pre-existing console handler for this logger if not hasattr(get_logger, 'console_handler4logger'): get_logger.console_handler4logger = {} if logger in get_logger.console_handler4logger: logger.removeHandler(get_logger.console_handler4logger[logger]) console_handler = logging.StreamHandler(sys.__stderr__) console_handler.setFormatter(formatter) logger.addHandler(console_handler) get_logger.console_handler4logger[logger] = console_handler # set the level for the logger logger.setLevel( getattr(logging, conf.get('log_level', 'INFO').upper(), logging.INFO)) # Setup logger with a StatsD client if so configured statsd_host = conf.get('log_statsd_host') if statsd_host: statsd_port = int(conf.get('log_statsd_port', 8125)) base_prefix = conf.get('log_statsd_metric_prefix', '') default_sample_rate = float(conf.get( 'log_statsd_default_sample_rate', 1)) sample_rate_factor = float(conf.get( 'log_statsd_sample_rate_factor', 1)) statsd_client = StatsdClient(statsd_host, statsd_port, base_prefix, name, default_sample_rate, sample_rate_factor, logger=logger) logger.statsd_client = statsd_client else: logger.statsd_client = None adapted_logger = LogAdapter(logger, name) other_handlers = conf.get('log_custom_handlers', None) if other_handlers: log_custom_handlers = [s.strip() for s in other_handlers.split(',') if s.strip()] for hook in log_custom_handlers: try: mod, fnc = hook.rsplit('.', 1) logger_hook = getattr(__import__(mod, fromlist=[fnc]), fnc) logger_hook(conf, name, log_to_console, log_route, fmt, logger, adapted_logger) except (AttributeError, ImportError): print('Error calling custom handler [%s]' % hook, file=sys.stderr) except ValueError: print('Invalid custom handler format [%s]' % hook, file=sys.stderr) return adapted_logger def get_hub(): """ Checks whether poll is available and falls back on select if it isn't. Note about epoll: Review: https://review.openstack.org/#/c/18806/ There was a problem where once out of every 30 quadrillion connections, a coroutine wouldn't wake up when the client closed its end. Epoll was not reporting the event or it was getting swallowed somewhere. Then when that file descriptor was re-used, eventlet would freak right out because it still thought it was waiting for activity from it in some other coro. """ try: import select if hasattr(select, "poll"): return "poll" return "selects" except ImportError: return None def drop_privileges(user, call_setsid=True): """ Sets the userid/groupid of the current process, get session leader, etc. :param user: User name to change privileges to """ if os.geteuid() == 0: groups = [g.gr_gid for g in grp.getgrall() if user in g.gr_mem] os.setgroups(groups) user = pwd.getpwnam(user) os.setgid(user[3]) os.setuid(user[2]) os.environ['HOME'] = user[5] if call_setsid: try: os.setsid() except OSError: pass os.chdir('/') # in case you need to rmdir on where you started the daemon os.umask(0o22) # ensure files are created with the correct privileges def capture_stdio(logger, **kwargs): """ Log unhandled exceptions, close stdio, capture stdout and stderr. param logger: Logger object to use """ # log uncaught exceptions sys.excepthook = lambda * exc_info: \ logger.critical(_('UNCAUGHT EXCEPTION'), exc_info=exc_info) # collect stdio file desc not in use for logging stdio_files = [sys.stdin, sys.stdout, sys.stderr] console_fds = [h.stream.fileno() for _junk, h in getattr( get_logger, 'console_handler4logger', {}).items()] stdio_files = [f for f in stdio_files if f.fileno() not in console_fds] with open(os.devnull, 'r+b') as nullfile: # close stdio (excludes fds open for logging) for f in stdio_files: # some platforms throw an error when attempting an stdin flush try: f.flush() except IOError: pass try: os.dup2(nullfile.fileno(), f.fileno()) except OSError: pass # redirect stdio if kwargs.pop('capture_stdout', True): sys.stdout = LoggerFileObject(logger) if kwargs.pop('capture_stderr', True): sys.stderr = LoggerFileObject(logger, 'STDERR') def parse_options(parser=None, once=False, test_args=None): """Parse standard swift server/daemon options with optparse.OptionParser. :param parser: OptionParser to use. If not sent one will be created. :param once: Boolean indicating the "once" option is available :param test_args: Override sys.argv; used in testing :returns: Tuple of (config, options); config is an absolute path to the config file, options is the parser options as a dictionary. :raises SystemExit: First arg (CONFIG) is required, file must exist """ if not parser: parser = OptionParser(usage="%prog CONFIG [options]") parser.add_option("-v", "--verbose", default=False, action="store_true", help="log to console") if once: parser.add_option("-o", "--once", default=False, action="store_true", help="only run one pass of daemon") # if test_args is None, optparse will use sys.argv[:1] options, args = parser.parse_args(args=test_args) if not args: parser.print_usage() print(_("Error: missing config path argument")) sys.exit(1) config = os.path.abspath(args.pop(0)) if not os.path.exists(config): parser.print_usage() print(_("Error: unable to locate %s") % config) sys.exit(1) extra_args = [] # if any named options appear in remaining args, set the option to True for arg in args: if arg in options.__dict__: setattr(options, arg, True) else: extra_args.append(arg) options = vars(options) if extra_args: options['extra_args'] = extra_args return config, options def is_valid_ip(ip): """ Return True if the provided ip is a valid IP-address """ return is_valid_ipv4(ip) or is_valid_ipv6(ip) def is_valid_ipv4(ip): """ Return True if the provided ip is a valid IPv4-address """ try: socket.inet_pton(socket.AF_INET, ip) except socket.error: # not a valid IPv4 address return False return True def is_valid_ipv6(ip): """ Returns True if the provided ip is a valid IPv6-address """ try: socket.inet_pton(socket.AF_INET6, ip) except socket.error: # not a valid IPv6 address return False return True def expand_ipv6(address): """ Expand ipv6 address. :param address: a string indicating valid ipv6 address :returns: a string indicating fully expanded ipv6 address """ packed_ip = socket.inet_pton(socket.AF_INET6, address) return socket.inet_ntop(socket.AF_INET6, packed_ip) def whataremyips(bind_ip=None): """ Get "our" IP addresses ("us" being the set of services configured by one `*.conf` file). If our REST listens on a specific address, return it. Otherwise, if listen on '0.0.0.0' or '::' return all addresses, including the loopback. :param str bind_ip: Optional bind_ip from a config file; may be IP address or hostname. :returns: list of Strings of ip addresses """ if bind_ip: # See if bind_ip is '0.0.0.0'/'::' try: _, _, _, _, sockaddr = socket.getaddrinfo( bind_ip, None, 0, socket.SOCK_STREAM, 0, socket.AI_NUMERICHOST)[0] if sockaddr[0] not in ('0.0.0.0', '::'): return [bind_ip] except socket.gaierror: pass addresses = [] for interface in netifaces.interfaces(): try: iface_data = netifaces.ifaddresses(interface) for family in iface_data: if family not in (netifaces.AF_INET, netifaces.AF_INET6): continue for address in iface_data[family]: addr = address['addr'] # If we have an ipv6 address remove the # %ether_interface at the end if family == netifaces.AF_INET6: addr = expand_ipv6(addr.split('%')[0]) addresses.append(addr) except ValueError: pass return addresses def parse_socket_string(socket_string, default_port): """ Given a string representing a socket, returns a tuple of (host, port). Valid strings are DNS names, IPv4 addresses, or IPv6 addresses, with an optional port. If an IPv6 address is specified it **must** be enclosed in [], like *[::1]* or *[::1]:11211*. This follows the accepted prescription for `IPv6 host literals`_. Examples:: server.org server.org:1337 127.0.0.1:1337 [::1]:1337 [::1] .. _IPv6 host literals: https://tools.ietf.org/html/rfc3986#section-3.2.2 """ port = default_port # IPv6 addresses must be between '[]' if socket_string.startswith('['): match = IPV6_RE.match(socket_string) if not match: raise ValueError("Invalid IPv6 address: %s" % socket_string) host = match.group('address') port = match.group('port') or port else: if ':' in socket_string: tokens = socket_string.split(':') if len(tokens) > 2: raise ValueError("IPv6 addresses must be between '[]'") host, port = tokens else: host = socket_string return (host, port) def storage_directory(datadir, partition, name_hash): """ Get the storage directory :param datadir: Base data directory :param partition: Partition :param name_hash: Account, container or object name hash :returns: Storage directory """ return os.path.join(datadir, str(partition), name_hash[-3:], name_hash) def hash_path(account, container=None, object=None, raw_digest=False): """ Get the canonical hash for an account/container/object :param account: Account :param container: Container :param object: Object :param raw_digest: If True, return the raw version rather than a hex digest :returns: hash string """ if object and not container: raise ValueError('container is required if object is provided') paths = [account] if container: paths.append(container) if object: paths.append(object) if raw_digest: return md5(HASH_PATH_PREFIX + '/' + '/'.join(paths) + HASH_PATH_SUFFIX).digest() else: return md5(HASH_PATH_PREFIX + '/' + '/'.join(paths) + HASH_PATH_SUFFIX).hexdigest() @contextmanager def lock_path(directory, timeout=10, timeout_class=None): """ Context manager that acquires a lock on a directory. This will block until the lock can be acquired, or the timeout time has expired (whichever occurs first). For locking exclusively, file or directory has to be opened in Write mode. Python doesn't allow directories to be opened in Write Mode. So we workaround by locking a hidden file in the directory. :param directory: directory to be locked :param timeout: timeout (in seconds) :param timeout_class: The class of the exception to raise if the lock cannot be granted within the timeout. Will be constructed as timeout_class(timeout, lockpath). Default: LockTimeout """ if timeout_class is None: timeout_class = swift.common.exceptions.LockTimeout mkdirs(directory) lockpath = '%s/.lock' % directory fd = os.open(lockpath, os.O_WRONLY | os.O_CREAT) sleep_time = 0.01 slower_sleep_time = max(timeout * 0.01, sleep_time) slowdown_at = timeout * 0.01 time_slept = 0 try: with timeout_class(timeout, lockpath): while True: try: fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) break except IOError as err: if err.errno != errno.EAGAIN: raise if time_slept > slowdown_at: sleep_time = slower_sleep_time sleep(sleep_time) time_slept += sleep_time yield True finally: os.close(fd) @contextmanager def lock_file(filename, timeout=10, append=False, unlink=True): """ Context manager that acquires a lock on a file. This will block until the lock can be acquired, or the timeout time has expired (whichever occurs first). :param filename: file to be locked :param timeout: timeout (in seconds) :param append: True if file should be opened in append mode :param unlink: True if the file should be unlinked at the end """ flags = os.O_CREAT | os.O_RDWR if append: flags |= os.O_APPEND mode = 'a+' else: mode = 'r+' while True: fd = os.open(filename, flags) file_obj = os.fdopen(fd, mode) try: with swift.common.exceptions.LockTimeout(timeout, filename): while True: try: fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) break except IOError as err: if err.errno != errno.EAGAIN: raise sleep(0.01) try: if os.stat(filename).st_ino != os.fstat(fd).st_ino: continue except OSError as err: if err.errno == errno.ENOENT: continue raise yield file_obj if unlink: os.unlink(filename) break finally: file_obj.close() def lock_parent_directory(filename, timeout=10): """ Context manager that acquires a lock on the parent directory of the given file path. This will block until the lock can be acquired, or the timeout time has expired (whichever occurs first). :param filename: file path of the parent directory to be locked :param timeout: timeout (in seconds) """ return lock_path(os.path.dirname(filename), timeout=timeout) def get_time_units(time_amount): """ Get a nomralized length of time in the largest unit of time (hours, minutes, or seconds.) :param time_amount: length of time in seconds :returns: A touple of (length of time, unit of time) where unit of time is one of ('h', 'm', 's') """ time_unit = 's' if time_amount > 60: time_amount /= 60 time_unit = 'm' if time_amount > 60: time_amount /= 60 time_unit = 'h' return time_amount, time_unit def compute_eta(start_time, current_value, final_value): """ Compute an ETA. Now only if we could also have a progress bar... :param start_time: Unix timestamp when the operation began :param current_value: Current value :param final_value: Final value :returns: ETA as a tuple of (length of time, unit of time) where unit of time is one of ('h', 'm', 's') """ elapsed = time.time() - start_time completion = (float(current_value) / final_value) or 0.00001 return get_time_units(1.0 / completion * elapsed - elapsed) def unlink_older_than(path, mtime): """ Remove any file in a given path that was last modified before mtime. :param path: path to remove file from :param mtime: timestamp of oldest file to keep """ filepaths = map(functools.partial(os.path.join, path), listdir(path)) return unlink_paths_older_than(filepaths, mtime) def unlink_paths_older_than(filepaths, mtime): """ Remove any files from the given list that were last modified before mtime. :param filepaths: a list of strings, the full paths of files to check :param mtime: timestamp of oldest file to keep """ for fpath in filepaths: try: if os.path.getmtime(fpath) < mtime: os.unlink(fpath) except OSError: pass def item_from_env(env, item_name, allow_none=False): """ Get a value from the wsgi environment :param env: wsgi environment dict :param item_name: name of item to get :returns: the value from the environment """ item = env.get(item_name, None) if item is None and not allow_none: logging.error("ERROR: %s could not be found in env!", item_name) return item def cache_from_env(env, allow_none=False): """ Get memcache connection pool from the environment (which had been previously set by the memcache middleware :param env: wsgi environment dict :returns: swift.common.memcached.MemcacheRing from environment """ return item_from_env(env, 'swift.cache', allow_none) def read_conf_dir(parser, conf_dir): conf_files = [] for f in os.listdir(conf_dir): if f.endswith('.conf') and not f.startswith('.'): conf_files.append(os.path.join(conf_dir, f)) return parser.read(sorted(conf_files)) def readconf(conf_path, section_name=None, log_name=None, defaults=None, raw=False): """ Read config file(s) and return config items as a dict :param conf_path: path to config file/directory, or a file-like object (hasattr readline) :param section_name: config section to read (will return all sections if not defined) :param log_name: name to be used with logging (will use section_name if not defined) :param defaults: dict of default values to pre-populate the config with :returns: dict of config items :raises ValueError: if section_name does not exist :raises IOError: if reading the file failed """ if defaults is None: defaults = {} if raw: c = RawConfigParser(defaults) else: c = ConfigParser(defaults) if hasattr(conf_path, 'readline'): if hasattr(conf_path, 'seek'): conf_path.seek(0) c.readfp(conf_path) else: if os.path.isdir(conf_path): # read all configs in directory success = read_conf_dir(c, conf_path) else: success = c.read(conf_path) if not success: raise IOError(_("Unable to read config from %s") % conf_path) if section_name: if c.has_section(section_name): conf = dict(c.items(section_name)) else: raise ValueError( _("Unable to find %(section)s config section in %(conf)s") % {'section': section_name, 'conf': conf_path}) if "log_name" not in conf: if log_name is not None: conf['log_name'] = log_name else: conf['log_name'] = section_name else: conf = {} for s in c.sections(): conf.update({s: dict(c.items(s))}) if 'log_name' not in conf: conf['log_name'] = log_name conf['__file__'] = conf_path return conf def write_pickle(obj, dest, tmp=None, pickle_protocol=0): """ Ensure that a pickle file gets written to disk. The file is first written to a tmp location, ensure it is synced to disk, then perform a move to its final location :param obj: python object to be pickled :param dest: path of final destination file :param tmp: path to tmp to use, defaults to None :param pickle_protocol: protocol to pickle the obj with, defaults to 0 """ if tmp is None: tmp = os.path.dirname(dest) mkdirs(tmp) fd, tmppath = mkstemp(dir=tmp, suffix='.tmp') with os.fdopen(fd, 'wb') as fo: pickle.dump(obj, fo, pickle_protocol) fo.flush() os.fsync(fd) renamer(tmppath, dest) def search_tree(root, glob_match, ext='', exts=None, dir_ext=None): """Look in root, for any files/dirs matching glob, recursively traversing any found directories looking for files ending with ext :param root: start of search path :param glob_match: glob to match in root, matching dirs are traversed with os.walk :param ext: only files that end in ext will be returned :param exts: a list of file extensions; only files that end in one of these extensions will be returned; if set this list overrides any extension specified using the 'ext' param. :param dir_ext: if present directories that end with dir_ext will not be traversed and instead will be returned as a matched path :returns: list of full paths to matching files, sorted """ exts = exts or [ext] found_files = [] for path in glob.glob(os.path.join(root, glob_match)): if os.path.isdir(path): for root, dirs, files in os.walk(path): if dir_ext and root.endswith(dir_ext): found_files.append(root) # the root is a config dir, descend no further break for file_ in files: if any(exts) and not any(file_.endswith(e) for e in exts): continue found_files.append(os.path.join(root, file_)) found_dir = False for dir_ in dirs: if dir_ext and dir_.endswith(dir_ext): found_dir = True found_files.append(os.path.join(root, dir_)) if found_dir: # do not descend further into matching directories break else: if ext and not path.endswith(ext): continue found_files.append(path) return sorted(found_files) def write_file(path, contents): """Write contents to file at path :param path: any path, subdirs will be created as needed :param contents: data to write to file, will be converted to string """ dirname, name = os.path.split(path) if not os.path.exists(dirname): try: os.makedirs(dirname) except OSError as err: if err.errno == errno.EACCES: sys.exit('Unable to create %s. Running as ' 'non-root?' % dirname) with open(path, 'w') as f: f.write('%s' % contents) def remove_file(path): """Quiet wrapper for os.unlink, OSErrors are suppressed :param path: first and only argument passed to os.unlink """ try: os.unlink(path) except OSError: pass def audit_location_generator(devices, datadir, suffix='', mount_check=True, logger=None): """ Given a devices path and a data directory, yield (path, device, partition) for all files in that directory :param devices: parent directory of the devices to be audited :param datadir: a directory located under self.devices. This should be one of the DATADIR constants defined in the account, container, and object servers. :param suffix: path name suffix required for all names returned :param mount_check: Flag to check if a mount check should be performed on devices :param logger: a logger object """ device_dir = listdir(devices) # randomize devices in case of process restart before sweep completed shuffle(device_dir) for device in device_dir: if mount_check and not ismount(os.path.join(devices, device)): if logger: logger.warning( _('Skipping %s as it is not mounted'), device) continue datadir_path = os.path.join(devices, device, datadir) try: partitions = listdir(datadir_path) except OSError as e: if logger: logger.warning(_('Skipping %(datadir)s because %(err)s'), {'datadir': datadir_path, 'err': e}) continue for partition in partitions: part_path = os.path.join(datadir_path, partition) try: suffixes = listdir(part_path) except OSError as e: if e.errno != errno.ENOTDIR: raise continue for asuffix in suffixes: suff_path = os.path.join(part_path, asuffix) try: hashes = listdir(suff_path) except OSError as e: if e.errno != errno.ENOTDIR: raise continue for hsh in hashes: hash_path = os.path.join(suff_path, hsh) try: files = sorted(listdir(hash_path), reverse=True) except OSError as e: if e.errno != errno.ENOTDIR: raise continue for fname in files: if suffix and not fname.endswith(suffix): continue path = os.path.join(hash_path, fname) yield path, device, partition def ratelimit_sleep(running_time, max_rate, incr_by=1, rate_buffer=5): """ Will eventlet.sleep() for the appropriate time so that the max_rate is never exceeded. If max_rate is 0, will not ratelimit. The maximum recommended rate should not exceed (1000 * incr_by) a second as eventlet.sleep() does involve some overhead. Returns running_time that should be used for subsequent calls. :param running_time: the running time in milliseconds of the next allowable request. Best to start at zero. :param max_rate: The maximum rate per second allowed for the process. :param incr_by: How much to increment the counter. Useful if you want to ratelimit 1024 bytes/sec and have differing sizes of requests. Must be > 0 to engage rate-limiting behavior. :param rate_buffer: Number of seconds the rate counter can drop and be allowed to catch up (at a faster than listed rate). A larger number will result in larger spikes in rate but better average accuracy. Must be > 0 to engage rate-limiting behavior. """ if max_rate <= 0 or incr_by <= 0: return running_time # 1,000 milliseconds = 1 second clock_accuracy = 1000.0 # Convert seconds to milliseconds now = time.time() * clock_accuracy # Calculate time per request in milliseconds time_per_request = clock_accuracy * (float(incr_by) / max_rate) # Convert rate_buffer to milliseconds and compare if now - running_time > rate_buffer * clock_accuracy: running_time = now elif running_time - now > time_per_request: # Convert diff back to a floating point number of seconds and sleep eventlet.sleep((running_time - now) / clock_accuracy) # Return the absolute time for the next interval in milliseconds; note # that time could have passed well beyond that point, but the next call # will catch that and skip the sleep. return running_time + time_per_request class ContextPool(GreenPool): """GreenPool subclassed to kill its coros when it gets gc'ed""" def __enter__(self): return self def __exit__(self, type, value, traceback): for coro in list(self.coroutines_running): coro.kill() class GreenAsyncPileWaitallTimeout(Timeout): pass class GreenAsyncPile(object): """ Runs jobs in a pool of green threads, and the results can be retrieved by using this object as an iterator. This is very similar in principle to eventlet.GreenPile, except it returns results as they become available rather than in the order they were launched. Correlating results with jobs (if necessary) is left to the caller. """ def __init__(self, size_or_pool): """ :param size_or_pool: thread pool size or a pool to use """ if isinstance(size_or_pool, GreenPool): self._pool = size_or_pool size = self._pool.size else: self._pool = GreenPool(size_or_pool) size = size_or_pool self._responses = eventlet.queue.LightQueue(size) self._inflight = 0 self._pending = 0 def _run_func(self, func, args, kwargs): try: self._responses.put(func(*args, **kwargs)) finally: self._inflight -= 1 @property def inflight(self): return self._inflight def spawn(self, func, *args, **kwargs): """ Spawn a job in a green thread on the pile. """ self._pending += 1 self._inflight += 1 self._pool.spawn(self._run_func, func, args, kwargs) def waitfirst(self, timeout): """ Wait up to timeout seconds for first result to come in. :param timeout: seconds to wait for results :returns: first item to come back, or None """ for result in self._wait(timeout, first_n=1): return result def waitall(self, timeout): """ Wait timeout seconds for any results to come in. :param timeout: seconds to wait for results :returns: list of results accrued in that time """ return self._wait(timeout) def _wait(self, timeout, first_n=None): results = [] try: with GreenAsyncPileWaitallTimeout(timeout): while True: results.append(next(self)) if first_n and len(results) >= first_n: break except (GreenAsyncPileWaitallTimeout, StopIteration): pass return results def __iter__(self): return self def next(self): try: rv = self._responses.get_nowait() except eventlet.queue.Empty: if self._inflight == 0: raise StopIteration() rv = self._responses.get() self._pending -= 1 return rv __next__ = next class StreamingPile(GreenAsyncPile): """ Runs jobs in a pool of green threads, spawning more jobs as results are retrieved and worker threads become available. When used as a context manager, has the same worker-killing properties as :class:`ContextPool`. """ def __init__(self, size): """:param size: number of worker threads to use""" self.pool = ContextPool(size) super(StreamingPile, self).__init__(self.pool) def asyncstarmap(self, func, args_iter): """ This is the same as :func:`itertools.starmap`, except that *func* is executed in a separate green thread for each item, and results won't necessarily have the same order as inputs. """ args_iter = iter(args_iter) # Initialize the pile for args in itertools.islice(args_iter, self.pool.size): self.spawn(func, *args) # Keep populating the pile as greenthreads become available for args in args_iter: yield next(self) self.spawn(func, *args) # Drain the pile for result in self: yield result def __enter__(self): self.pool.__enter__() return self def __exit__(self, type, value, traceback): self.pool.__exit__(type, value, traceback) class ModifiedParseResult(ParseResult): """Parse results class for urlparse.""" @property def hostname(self): netloc = self.netloc.split('@', 1)[-1] if netloc.startswith('['): return netloc[1:].split(']')[0] elif ':' in netloc: return netloc.rsplit(':')[0] return netloc @property def port(self): netloc = self.netloc.split('@', 1)[-1] if netloc.startswith('['): netloc = netloc.rsplit(']')[1] if ':' in netloc: return int(netloc.rsplit(':')[1]) return None def urlparse(url): """ urlparse augmentation. This is necessary because urlparse can't handle RFC 2732 URLs. :param url: URL to parse. """ return ModifiedParseResult(*stdlib_urlparse(url)) def validate_sync_to(value, allowed_sync_hosts, realms_conf): """ Validates an X-Container-Sync-To header value, returning the validated endpoint, realm, and realm_key, or an error string. :param value: The X-Container-Sync-To header value to validate. :param allowed_sync_hosts: A list of allowed hosts in endpoints, if realms_conf does not apply. :param realms_conf: A instance of swift.common.container_sync_realms.ContainerSyncRealms to validate against. :returns: A tuple of (error_string, validated_endpoint, realm, realm_key). The error_string will None if the rest of the values have been validated. The validated_endpoint will be the validated endpoint to sync to. The realm and realm_key will be set if validation was done through realms_conf. """ orig_value = value value = value.rstrip('/') if not value: return (None, None, None, None) if value.startswith('//'): if not realms_conf: return (None, None, None, None) data = value[2:].split('/') if len(data) != 4: return ( _('Invalid X-Container-Sync-To format %r') % orig_value, None, None, None) realm, cluster, account, container = data realm_key = realms_conf.key(realm) if not realm_key: return (_('No realm key for %r') % realm, None, None, None) endpoint = realms_conf.endpoint(realm, cluster) if not endpoint: return ( _('No cluster endpoint for %(realm)r %(cluster)r') % {'realm': realm, 'cluster': cluster}, None, None, None) return ( None, '%s/%s/%s' % (endpoint.rstrip('/'), account, container), realm.upper(), realm_key) p = urlparse(value) if p.scheme not in ('http', 'https'): return ( _('Invalid scheme %r in X-Container-Sync-To, must be "//", ' '"http", or "https".') % p.scheme, None, None, None) if not p.path: return (_('Path required in X-Container-Sync-To'), None, None, None) if p.params or p.query or p.fragment: return ( _('Params, queries, and fragments not allowed in ' 'X-Container-Sync-To'), None, None, None) if p.hostname not in allowed_sync_hosts: return ( _('Invalid host %r in X-Container-Sync-To') % p.hostname, None, None, None) return (None, value, None, None) def affinity_key_function(affinity_str): """Turns an affinity config value into a function suitable for passing to sort(). After doing so, the array will be sorted with respect to the given ordering. For example, if affinity_str is "r1=1, r2z7=2, r2z8=2", then the array will be sorted with all nodes from region 1 (r1=1) first, then all the nodes from region 2 zones 7 and 8 (r2z7=2 and r2z8=2), then everything else. Note that the order of the pieces of affinity_str is irrelevant; the priority values are what comes after the equals sign. If affinity_str is empty or all whitespace, then the resulting function will not alter the ordering of the nodes. :param affinity_str: affinity config value, e.g. "r1z2=3" or "r1=1, r2z1=2, r2z2=2" :returns: single-argument function :raises: ValueError if argument invalid """ affinity_str = affinity_str.strip() if not affinity_str: return lambda x: 0 priority_matchers = [] pieces = [s.strip() for s in affinity_str.split(',')] for piece in pieces: # matches r<number>=<number> or r<number>z<number>=<number> match = re.match("r(\d+)(?:z(\d+))?=(\d+)$", piece) if match: region, zone, priority = match.groups() region = int(region) priority = int(priority) zone = int(zone) if zone else None matcher = {'region': region, 'priority': priority} if zone is not None: matcher['zone'] = zone priority_matchers.append(matcher) else: raise ValueError("Invalid affinity value: %r" % affinity_str) priority_matchers.sort(key=operator.itemgetter('priority')) def keyfn(ring_node): for matcher in priority_matchers: if (matcher['region'] == ring_node['region'] and ('zone' not in matcher or matcher['zone'] == ring_node['zone'])): return matcher['priority'] return 4294967296 # 2^32, i.e. "a big number" return keyfn def affinity_locality_predicate(write_affinity_str): """ Turns a write-affinity config value into a predicate function for nodes. The returned value will be a 1-arg function that takes a node dictionary and returns a true value if it is "local" and a false value otherwise. The definition of "local" comes from the affinity_str argument passed in here. For example, if affinity_str is "r1, r2z2", then only nodes where region=1 or where (region=2 and zone=2) are considered local. If affinity_str is empty or all whitespace, then the resulting function will consider everything local :param write_affinity_str: affinity config value, e.g. "r1z2" or "r1, r2z1, r2z2" :returns: single-argument function, or None if affinity_str is empty :raises: ValueError if argument invalid """ affinity_str = write_affinity_str.strip() if not affinity_str: return None matchers = [] pieces = [s.strip() for s in affinity_str.split(',')] for piece in pieces: # matches r<number> or r<number>z<number> match = re.match("r(\d+)(?:z(\d+))?$", piece) if match: region, zone = match.groups() region = int(region) zone = int(zone) if zone else None matcher = {'region': region} if zone is not None: matcher['zone'] = zone matchers.append(matcher) else: raise ValueError("Invalid write-affinity value: %r" % affinity_str) def is_local(ring_node): for matcher in matchers: if (matcher['region'] == ring_node['region'] and ('zone' not in matcher or matcher['zone'] == ring_node['zone'])): return True return False return is_local def get_remote_client(req): # remote host for zeus client = req.headers.get('x-cluster-client-ip') if not client and 'x-forwarded-for' in req.headers: # remote host for other lbs client = req.headers['x-forwarded-for'].split(',')[0].strip() if not client: client = req.remote_addr return client def human_readable(value): """ Returns the number in a human readable format; for example 1048576 = "1Mi". """ value = float(value) index = -1 suffixes = 'KMGTPEZY' while value >= 1024 and index + 1 < len(suffixes): index += 1 value = round(value / 1024) if index == -1: return '%d' % value return '%d%si' % (round(value), suffixes[index]) def put_recon_cache_entry(cache_entry, key, item): """ Function that will check if item is a dict, and if so put it under cache_entry[key]. We use nested recon cache entries when the object auditor runs in parallel or else in 'once' mode with a specified subset of devices. """ if isinstance(item, dict): if key not in cache_entry or key in cache_entry and not \ isinstance(cache_entry[key], dict): cache_entry[key] = {} elif key in cache_entry and item == {}: cache_entry.pop(key, None) return for k, v in item.items(): if v == {}: cache_entry[key].pop(k, None) else: cache_entry[key][k] = v else: cache_entry[key] = item def dump_recon_cache(cache_dict, cache_file, logger, lock_timeout=2, set_owner=None): """Update recon cache values :param cache_dict: Dictionary of cache key/value pairs to write out :param cache_file: cache file to update :param logger: the logger to use to log an encountered error :param lock_timeout: timeout (in seconds) :param set_owner: Set owner of recon cache file """ try: with lock_file(cache_file, lock_timeout, unlink=False) as cf: cache_entry = {} try: existing_entry = cf.readline() if existing_entry: cache_entry = json.loads(existing_entry) except ValueError: # file doesn't have a valid entry, we'll recreate it pass for cache_key, cache_value in cache_dict.items(): put_recon_cache_entry(cache_entry, cache_key, cache_value) tf = None try: with NamedTemporaryFile(dir=os.path.dirname(cache_file), delete=False) as tf: tf.write(json.dumps(cache_entry) + '\n') if set_owner: os.chown(tf.name, pwd.getpwnam(set_owner).pw_uid, -1) renamer(tf.name, cache_file, fsync=False) finally: if tf is not None: try: os.unlink(tf.name) except OSError as err: if err.errno != errno.ENOENT: raise except (Exception, Timeout): logger.exception(_('Exception dumping recon cache')) def listdir(path): try: return os.listdir(path) except OSError as err: if err.errno != errno.ENOENT: raise return [] def streq_const_time(s1, s2): """Constant-time string comparison. :params s1: the first string :params s2: the second string :return: True if the strings are equal. This function takes two strings and compares them. It is intended to be used when doing a comparison for authentication purposes to help guard against timing attacks. """ if len(s1) != len(s2): return False result = 0 for (a, b) in zip(s1, s2): result |= ord(a) ^ ord(b) return result == 0 def pairs(item_list): """ Returns an iterator of all pairs of elements from item_list. :param item_list: items (no duplicates allowed) """ for i, item1 in enumerate(item_list): for item2 in item_list[(i + 1):]: yield (item1, item2) def replication(func): """ Decorator to declare which methods are accessible for different type of servers: * If option replication_server is None then this decorator doesn't matter. * If option replication_server is True then ONLY decorated with this decorator methods will be started. * If option replication_server is False then decorated with this decorator methods will NOT be started. :param func: function to mark accessible for replication """ func.replication = True return func def public(func): """ Decorator to declare which methods are publicly accessible as HTTP requests :param func: function to make public """ func.publicly_accessible = True return func def majority_size(n): return (n // 2) + 1 def quorum_size(n): """ quorum size as it applies to services that use 'replication' for data integrity (Account/Container services). Object quorum_size is defined on a storage policy basis. Number of successful backend requests needed for the proxy to consider the client request successful. """ return (n + 1) // 2 def rsync_ip(ip): """ Transform ip string to an rsync-compatible form Will return ipv4 addresses unchanged, but will nest ipv6 addresses inside square brackets. :param ip: an ip string (ipv4 or ipv6) :returns: a string ip address """ return '[%s]' % ip if is_valid_ipv6(ip) else ip def rsync_module_interpolation(template, device): """ Interpolate devices variables inside a rsync module template :param template: rsync module template as a string :param device: a device from a ring :returns: a string with all variables replaced by device attributes """ replacements = { 'ip': rsync_ip(device.get('ip', '')), 'port': device.get('port', ''), 'replication_ip': rsync_ip(device.get('replication_ip', '')), 'replication_port': device.get('replication_port', ''), 'region': device.get('region', ''), 'zone': device.get('zone', ''), 'device': device.get('device', ''), 'meta': device.get('meta', ''), } try: module = template.format(**replacements) except KeyError as e: raise ValueError('Cannot interpolate rsync_module, invalid variable: ' '%s' % e) return module def get_valid_utf8_str(str_or_unicode): """ Get valid parts of utf-8 str from str, unicode and even invalid utf-8 str :param str_or_unicode: a string or an unicode which can be invalid utf-8 """ if isinstance(str_or_unicode, six.text_type): (str_or_unicode, _len) = utf8_encoder(str_or_unicode, 'replace') (valid_utf8_str, _len) = utf8_decoder(str_or_unicode, 'replace') return valid_utf8_str.encode('utf-8') def list_from_csv(comma_separated_str): """ Splits the str given and returns a properly stripped list of the comma separated values. """ if comma_separated_str: return [v.strip() for v in comma_separated_str.split(',') if v.strip()] return [] def csv_append(csv_string, item): """ Appends an item to a comma-separated string. If the comma-separated string is empty/None, just returns item. """ if csv_string: return ",".join((csv_string, item)) else: return item class CloseableChain(object): """ Like itertools.chain, but with a close method that will attempt to invoke its sub-iterators' close methods, if any. """ def __init__(self, *iterables): self.iterables = iterables def __iter__(self): return iter(itertools.chain(*(self.iterables))) def close(self): for it in self.iterables: close_method = getattr(it, 'close', None) if close_method: close_method() def reiterate(iterable): """ Consume the first item from an iterator, then re-chain it to the rest of the iterator. This is useful when you want to make sure the prologue to downstream generators have been executed before continuing. :param iterable: an iterable object """ if isinstance(iterable, (list, tuple)): return iterable else: iterator = iter(iterable) try: chunk = '' while not chunk: chunk = next(iterator) return CloseableChain([chunk], iterator) except StopIteration: return [] class InputProxy(object): """ File-like object that counts bytes read. To be swapped in for wsgi.input for accounting purposes. """ def __init__(self, wsgi_input): """ :param wsgi_input: file-like object to wrap the functionality of """ self.wsgi_input = wsgi_input self.bytes_received = 0 self.client_disconnect = False def read(self, *args, **kwargs): """ Pass read request to the underlying file-like object and add bytes read to total. """ try: chunk = self.wsgi_input.read(*args, **kwargs) except Exception: self.client_disconnect = True raise self.bytes_received += len(chunk) return chunk def readline(self, *args, **kwargs): """ Pass readline request to the underlying file-like object and add bytes read to total. """ try: line = self.wsgi_input.readline(*args, **kwargs) except Exception: self.client_disconnect = True raise self.bytes_received += len(line) return line class LRUCache(object): """ Decorator for size/time bound memoization that evicts the least recently used members. """ PREV, NEXT, KEY, CACHED_AT, VALUE = 0, 1, 2, 3, 4 # link fields def __init__(self, maxsize=1000, maxtime=3600): self.maxsize = maxsize self.maxtime = maxtime self.reset() def reset(self): self.mapping = {} self.head = [None, None, None, None, None] # oldest self.tail = [self.head, None, None, None, None] # newest self.head[self.NEXT] = self.tail def set_cache(self, value, *key): while len(self.mapping) >= self.maxsize: old_next, old_key = self.head[self.NEXT][self.NEXT:self.NEXT + 2] self.head[self.NEXT], old_next[self.PREV] = old_next, self.head del self.mapping[old_key] last = self.tail[self.PREV] link = [last, self.tail, key, time.time(), value] self.mapping[key] = last[self.NEXT] = self.tail[self.PREV] = link return value def get_cached(self, link, *key): link_prev, link_next, key, cached_at, value = link if cached_at + self.maxtime < time.time(): raise KeyError('%r has timed out' % (key,)) link_prev[self.NEXT] = link_next link_next[self.PREV] = link_prev last = self.tail[self.PREV] last[self.NEXT] = self.tail[self.PREV] = link link[self.PREV] = last link[self.NEXT] = self.tail return value def __call__(self, f): class LRUCacheWrapped(object): @functools.wraps(f) def __call__(im_self, *key): link = self.mapping.get(key, self.head) if link is not self.head: try: return self.get_cached(link, *key) except KeyError: pass value = f(*key) self.set_cache(value, *key) return value def size(im_self): """ Return the size of the cache """ return len(self.mapping) def reset(im_self): return self.reset() def get_maxsize(im_self): return self.maxsize def set_maxsize(im_self, i): self.maxsize = i def get_maxtime(im_self): return self.maxtime def set_maxtime(im_self, i): self.maxtime = i maxsize = property(get_maxsize, set_maxsize) maxtime = property(get_maxtime, set_maxtime) def __repr__(im_self): return '<%s %r>' % (im_self.__class__.__name__, f) return LRUCacheWrapped() class Spliterator(object): """ Takes an iterator yielding sliceable things (e.g. strings or lists) and yields subiterators, each yielding up to the requested number of items from the source. >>> si = Spliterator(["abcde", "fg", "hijkl"]) >>> ''.join(si.take(4)) "abcd" >>> ''.join(si.take(3)) "efg" >>> ''.join(si.take(1)) "h" >>> ''.join(si.take(3)) "ijk" >>> ''.join(si.take(3)) "l" # shorter than requested; this can happen with the last iterator """ def __init__(self, source_iterable): self.input_iterator = iter(source_iterable) self.leftovers = None self.leftovers_index = 0 self._iterator_in_progress = False def take(self, n): if self._iterator_in_progress: raise ValueError( "cannot call take() again until the first iterator is" " exhausted (has raised StopIteration)") self._iterator_in_progress = True try: if self.leftovers: # All this string slicing is a little awkward, but it's for # a good reason. Consider a length N string that someone is # taking k bytes at a time. # # With this implementation, we create one new string of # length k (copying the bytes) on each call to take(). Once # the whole input has been consumed, each byte has been # copied exactly once, giving O(N) bytes copied. # # If, instead of this, we were to set leftovers = # leftovers[k:] and omit leftovers_index, then each call to # take() would copy k bytes to create the desired substring, # then copy all the remaining bytes to reset leftovers, # resulting in an overall O(N^2) bytes copied. llen = len(self.leftovers) - self.leftovers_index if llen <= n: n -= llen to_yield = self.leftovers[self.leftovers_index:] self.leftovers = None self.leftovers_index = 0 yield to_yield else: to_yield = self.leftovers[ self.leftovers_index:(self.leftovers_index + n)] self.leftovers_index += n n = 0 yield to_yield while n > 0: chunk = next(self.input_iterator) cl = len(chunk) if cl <= n: n -= cl yield chunk else: self.leftovers = chunk self.leftovers_index = n yield chunk[:n] n = 0 finally: self._iterator_in_progress = False def tpool_reraise(func, *args, **kwargs): """ Hack to work around Eventlet's tpool not catching and reraising Timeouts. """ def inner(): try: return func(*args, **kwargs) except BaseException as err: return err resp = tpool.execute(inner) if isinstance(resp, BaseException): raise resp return resp def ismount(path): """ Test whether a path is a mount point. This will catch any exceptions and translate them into a False return value Use ismount_raw to have the exceptions raised instead. """ try: return ismount_raw(path) except OSError: return False def ismount_raw(path): """ Test whether a path is a mount point. Whereas ismount will catch any exceptions and just return False, this raw version will not catch exceptions. This is code hijacked from C Python 2.6.8, adapted to remove the extra lstat() system call. """ try: s1 = os.lstat(path) except os.error as err: if err.errno == errno.ENOENT: # It doesn't exist -- so not a mount point :-) return False raise if stat.S_ISLNK(s1.st_mode): # A symlink can never be a mount point return False s2 = os.lstat(os.path.join(path, '..')) dev1 = s1.st_dev dev2 = s2.st_dev if dev1 != dev2: # path/.. on a different device as path return True ino1 = s1.st_ino ino2 = s2.st_ino if ino1 == ino2: # path/.. is the same i-node as path return True # Device and inode checks are not properly working inside containerized # environments, therefore using a workaround to check if there is a # stubfile placed by an operator if os.path.isfile(os.path.join(path, ".ismount")): return True return False def close_if_possible(maybe_closable): close_method = getattr(maybe_closable, 'close', None) if callable(close_method): return close_method() @contextmanager def closing_if_possible(maybe_closable): """ Like contextlib.closing(), but doesn't crash if the object lacks a close() method. PEP 333 (WSGI) says: "If the iterable returned by the application has a close() method, the server or gateway must call that method upon completion of the current request[.]" This function makes that easier. """ try: yield maybe_closable finally: close_if_possible(maybe_closable) _rfc_token = r'[^()<>@,;:\"/\[\]?={}\x00-\x20\x7f]+' _rfc_extension_pattern = re.compile( r'(?:\s*;\s*(' + _rfc_token + r")\s*(?:=\s*(" + _rfc_token + r'|"(?:[^"\\]|\\.)*"))?)') _content_range_pattern = re.compile(r'^bytes (\d+)-(\d+)/(\d+)$') def parse_content_range(content_range): """ Parse a content-range header into (first_byte, last_byte, total_size). See RFC 7233 section 4.2 for details on the header format, but it's basically "Content-Range: bytes ${start}-${end}/${total}". :param content_range: Content-Range header value to parse, e.g. "bytes 100-1249/49004" :returns: 3-tuple (start, end, total) :raises: ValueError if malformed """ found = re.search(_content_range_pattern, content_range) if not found: raise ValueError("malformed Content-Range %r" % (content_range,)) return tuple(int(x) for x in found.groups()) def parse_content_type(content_type): """ Parse a content-type and its parameters into values. RFC 2616 sec 14.17 and 3.7 are pertinent. **Examples**:: 'text/plain; charset=UTF-8' -> ('text/plain', [('charset, 'UTF-8')]) 'text/plain; charset=UTF-8; level=1' -> ('text/plain', [('charset, 'UTF-8'), ('level', '1')]) :param content_type: content_type to parse :returns: a tuple containing (content type, list of k, v parameter tuples) """ parm_list = [] if ';' in content_type: content_type, parms = content_type.split(';', 1) parms = ';' + parms for m in _rfc_extension_pattern.findall(parms): key = m[0].strip() value = m[1].strip() parm_list.append((key, value)) return content_type, parm_list def extract_swift_bytes(content_type): """ Parse a content-type and return a tuple containing: - the content_type string minus any swift_bytes param, - the swift_bytes value or None if the param was not found :param content_type: a content-type string :return: a tuple of (content-type, swift_bytes or None) """ content_type, params = parse_content_type(content_type) swift_bytes = None for k, v in params: if k == 'swift_bytes': swift_bytes = v else: content_type += ';%s=%s' % (k, v) return content_type, swift_bytes def override_bytes_from_content_type(listing_dict, logger=None): """ Takes a dict from a container listing and overrides the content_type, bytes fields if swift_bytes is set. """ listing_dict['content_type'], swift_bytes = extract_swift_bytes( listing_dict['content_type']) if swift_bytes is not None: try: listing_dict['bytes'] = int(swift_bytes) except ValueError: if logger: logger.exception(_("Invalid swift_bytes")) def clean_content_type(value): if ';' in value: left, right = value.rsplit(';', 1) if right.lstrip().startswith('swift_bytes='): return left return value def quote(value, safe='/'): """ Patched version of urllib.quote that encodes utf-8 strings before quoting """ return _quote(get_valid_utf8_str(value), safe) def get_expirer_container(x_delete_at, expirer_divisor, acc, cont, obj): """ Returns a expiring object container name for given X-Delete-At and a/c/o. """ shard_int = int(hash_path(acc, cont, obj), 16) % 100 return normalize_delete_at_timestamp( int(x_delete_at) / expirer_divisor * expirer_divisor - shard_int) class _MultipartMimeFileLikeObject(object): def __init__(self, wsgi_input, boundary, input_buffer, read_chunk_size): self.no_more_data_for_this_file = False self.no_more_files = False self.wsgi_input = wsgi_input self.boundary = boundary self.input_buffer = input_buffer self.read_chunk_size = read_chunk_size def read(self, length=None): if not length: length = self.read_chunk_size if self.no_more_data_for_this_file: return b'' # read enough data to know whether we're going to run # into a boundary in next [length] bytes if len(self.input_buffer) < length + len(self.boundary) + 2: to_read = length + len(self.boundary) + 2 while to_read > 0: try: chunk = self.wsgi_input.read(to_read) except (IOError, ValueError) as e: raise swift.common.exceptions.ChunkReadError(str(e)) to_read -= len(chunk) self.input_buffer += chunk if not chunk: self.no_more_files = True break boundary_pos = self.input_buffer.find(self.boundary) # boundary does not exist in the next (length) bytes if boundary_pos == -1 or boundary_pos > length: ret = self.input_buffer[:length] self.input_buffer = self.input_buffer[length:] # if it does, just return data up to the boundary else: ret, self.input_buffer = self.input_buffer.split(self.boundary, 1) self.no_more_files = self.input_buffer.startswith(b'--') self.no_more_data_for_this_file = True self.input_buffer = self.input_buffer[2:] return ret def readline(self): if self.no_more_data_for_this_file: return b'' boundary_pos = newline_pos = -1 while newline_pos < 0 and boundary_pos < 0: try: chunk = self.wsgi_input.read(self.read_chunk_size) except (IOError, ValueError) as e: raise swift.common.exceptions.ChunkReadError(str(e)) self.input_buffer += chunk newline_pos = self.input_buffer.find(b'\r\n') boundary_pos = self.input_buffer.find(self.boundary) if not chunk: self.no_more_files = True break # found a newline if newline_pos >= 0 and \ (boundary_pos < 0 or newline_pos < boundary_pos): # Use self.read to ensure any logic there happens... ret = b'' to_read = newline_pos + 2 while to_read > 0: chunk = self.read(to_read) # Should never happen since we're reading from input_buffer, # but just for completeness... if not chunk: break to_read -= len(chunk) ret += chunk return ret else: # no newlines, just return up to next boundary return self.read(len(self.input_buffer)) def iter_multipart_mime_documents(wsgi_input, boundary, read_chunk_size=4096): """ Given a multi-part-mime-encoded input file object and boundary, yield file-like objects for each part. Note that this does not split each part into headers and body; the caller is responsible for doing that if necessary. :param wsgi_input: The file-like object to read from. :param boundary: The mime boundary to separate new file-like objects on. :returns: A generator of file-like objects for each part. :raises: MimeInvalid if the document is malformed """ boundary = '--' + boundary blen = len(boundary) + 2 # \r\n try: got = wsgi_input.readline(blen) while got == '\r\n': got = wsgi_input.readline(blen) except (IOError, ValueError) as e: raise swift.common.exceptions.ChunkReadError(str(e)) if got.strip() != boundary: raise swift.common.exceptions.MimeInvalid( 'invalid starting boundary: wanted %r, got %r', (boundary, got)) boundary = '\r\n' + boundary input_buffer = '' done = False while not done: it = _MultipartMimeFileLikeObject(wsgi_input, boundary, input_buffer, read_chunk_size) yield it done = it.no_more_files input_buffer = it.input_buffer def parse_mime_headers(doc_file): """ Takes a file-like object containing a MIME document and returns a HeaderKeyDict containing the headers. The body of the message is not consumed: the position in doc_file is left at the beginning of the body. This function was inspired by the Python standard library's http.client.parse_headers. :param doc_file: binary file-like object containing a MIME document :returns: a swift.common.swob.HeaderKeyDict containing the headers """ headers = [] while True: line = doc_file.readline() done = line in (b'\r\n', b'\n', b'') if six.PY3: try: line = line.decode('utf-8') except UnicodeDecodeError: line = line.decode('latin1') headers.append(line) if done: break if six.PY3: header_string = ''.join(headers) else: header_string = b''.join(headers) headers = email.parser.Parser().parsestr(header_string) return HeaderKeyDict(headers) def mime_to_document_iters(input_file, boundary, read_chunk_size=4096): """ Takes a file-like object containing a multipart MIME document and returns an iterator of (headers, body-file) tuples. :param input_file: file-like object with the MIME doc in it :param boundary: MIME boundary, sans dashes (e.g. "divider", not "--divider") :param read_chunk_size: size of strings read via input_file.read() """ doc_files = iter_multipart_mime_documents(input_file, boundary, read_chunk_size) for i, doc_file in enumerate(doc_files): # this consumes the headers and leaves just the body in doc_file headers = parse_mime_headers(doc_file) yield (headers, doc_file) def maybe_multipart_byteranges_to_document_iters(app_iter, content_type): """ Takes an iterator that may or may not contain a multipart MIME document as well as content type and returns an iterator of body iterators. :param app_iter: iterator that may contain a multipart MIME document :param content_type: content type of the app_iter, used to determine whether it conains a multipart document and, if so, what the boundary is between documents """ content_type, params_list = parse_content_type(content_type) if content_type != 'multipart/byteranges': yield app_iter return body_file = FileLikeIter(app_iter) boundary = dict(params_list)['boundary'] for _headers, body in mime_to_document_iters(body_file, boundary): yield (chunk for chunk in iter(lambda: body.read(65536), '')) def document_iters_to_multipart_byteranges(ranges_iter, boundary): """ Takes an iterator of range iters and yields a multipart/byteranges MIME document suitable for sending as the body of a multi-range 206 response. See document_iters_to_http_response_body for parameter descriptions. """ divider = "--" + boundary + "\r\n" terminator = "--" + boundary + "--" for range_spec in ranges_iter: start_byte = range_spec["start_byte"] end_byte = range_spec["end_byte"] entity_length = range_spec.get("entity_length", "*") content_type = range_spec["content_type"] part_iter = range_spec["part_iter"] part_header = ''.join(( divider, "Content-Type: ", str(content_type), "\r\n", "Content-Range: ", "bytes %d-%d/%s\r\n" % ( start_byte, end_byte, entity_length), "\r\n" )) yield part_header for chunk in part_iter: yield chunk yield "\r\n" yield terminator def document_iters_to_http_response_body(ranges_iter, boundary, multipart, logger): """ Takes an iterator of range iters and turns it into an appropriate HTTP response body, whether that's multipart/byteranges or not. This is almost, but not quite, the inverse of request_helpers.http_response_to_document_iters(). This function only yields chunks of the body, not any headers. :param ranges_iter: an iterator of dictionaries, one per range. Each dictionary must contain at least the following key: "part_iter": iterator yielding the bytes in the range Additionally, if multipart is True, then the following other keys are required: "start_byte": index of the first byte in the range "end_byte": index of the last byte in the range "content_type": value for the range's Content-Type header Finally, there is one optional key that is used in the multipart/byteranges case: "entity_length": length of the requested entity (not necessarily equal to the response length). If omitted, "*" will be used. Each part_iter will be exhausted prior to calling next(ranges_iter). :param boundary: MIME boundary to use, sans dashes (e.g. "boundary", not "--boundary"). :param multipart: True if the response should be multipart/byteranges, False otherwise. This should be True if and only if you have 2 or more ranges. :param logger: a logger """ if multipart: return document_iters_to_multipart_byteranges(ranges_iter, boundary) else: try: response_body_iter = next(ranges_iter)['part_iter'] except StopIteration: return '' # We need to make sure ranges_iter does not get garbage-collected # before response_body_iter is exhausted. The reason is that # ranges_iter has a finally block that calls close_swift_conn, and # so if that finally block fires before we read response_body_iter, # there's nothing there. def string_along(useful_iter, useless_iter_iter, logger): with closing_if_possible(useful_iter): for x in useful_iter: yield x try: next(useless_iter_iter) except StopIteration: pass else: logger.warning( _("More than one part in a single-part response?")) return string_along(response_body_iter, ranges_iter, logger) def multipart_byteranges_to_document_iters(input_file, boundary, read_chunk_size=4096): """ Takes a file-like object containing a multipart/byteranges MIME document (see RFC 7233, Appendix A) and returns an iterator of (first-byte, last-byte, length, document-headers, body-file) 5-tuples. :param input_file: file-like object with the MIME doc in it :param boundary: MIME boundary, sans dashes (e.g. "divider", not "--divider") :param read_chunk_size: size of strings read via input_file.read() """ for headers, body in mime_to_document_iters(input_file, boundary, read_chunk_size): first_byte, last_byte, length = parse_content_range( headers.get('content-range')) yield (first_byte, last_byte, length, headers.items(), body) #: Regular expression to match form attributes. ATTRIBUTES_RE = re.compile(r'(\w+)=(".*?"|[^";]+)(; ?|$)') def parse_content_disposition(header): """ Given the value of a header like: Content-Disposition: form-data; name="somefile"; filename="test.html" Return data like ("form-data", {"name": "somefile", "filename": "test.html"}) :param header: Value of a header (the part after the ': '). :returns: (value name, dict) of the attribute data parsed (see above). """ attributes = {} attrs = '' if ';' in header: header, attrs = [x.strip() for x in header.split(';', 1)] m = True while m: m = ATTRIBUTES_RE.match(attrs) if m: attrs = attrs[len(m.group(0)):] attributes[m.group(1)] = m.group(2).strip('"') return header, attributes class sockaddr_alg(ctypes.Structure): _fields_ = [("salg_family", ctypes.c_ushort), ("salg_type", ctypes.c_ubyte * 14), ("salg_feat", ctypes.c_uint), ("salg_mask", ctypes.c_uint), ("salg_name", ctypes.c_ubyte * 64)] _bound_md5_sockfd = None def get_md5_socket(): """ Get an MD5 socket file descriptor. One can MD5 data with it by writing it to the socket with os.write, then os.read the 16 bytes of the checksum out later. NOTE: It is the caller's responsibility to ensure that os.close() is called on the returned file descriptor. This is a bare file descriptor, not a Python object. It doesn't close itself. """ # Linux's AF_ALG sockets work like this: # # First, initialize a socket with socket() and bind(). This tells the # socket what algorithm to use, as well as setting up any necessary bits # like crypto keys. Of course, MD5 doesn't need any keys, so it's just the # algorithm name. # # Second, to hash some data, get a second socket by calling accept() on # the first socket. Write data to the socket, then when finished, read the # checksum from the socket and close it. This lets you checksum multiple # things without repeating all the setup code each time. # # Since we only need to bind() one socket, we do that here and save it for # future re-use. That way, we only use one file descriptor to get an MD5 # socket instead of two, and we also get to save some syscalls. global _bound_md5_sockfd global _libc_socket global _libc_bind global _libc_accept if _libc_accept is None: _libc_accept = load_libc_function('accept', fail_if_missing=True) if _libc_socket is None: _libc_socket = load_libc_function('socket', fail_if_missing=True) if _libc_bind is None: _libc_bind = load_libc_function('bind', fail_if_missing=True) # Do this at first call rather than at import time so that we don't use a # file descriptor on systems that aren't using any MD5 sockets. if _bound_md5_sockfd is None: sockaddr_setup = sockaddr_alg( AF_ALG, (ord('h'), ord('a'), ord('s'), ord('h'), 0), 0, 0, (ord('m'), ord('d'), ord('5'), 0)) hash_sockfd = _libc_socket(ctypes.c_int(AF_ALG), ctypes.c_int(socket.SOCK_SEQPACKET), ctypes.c_int(0)) if hash_sockfd < 0: raise IOError(ctypes.get_errno(), "Failed to initialize MD5 socket") bind_result = _libc_bind(ctypes.c_int(hash_sockfd), ctypes.pointer(sockaddr_setup), ctypes.c_int(ctypes.sizeof(sockaddr_alg))) if bind_result < 0: os.close(hash_sockfd) raise IOError(ctypes.get_errno(), "Failed to bind MD5 socket") _bound_md5_sockfd = hash_sockfd md5_sockfd = _libc_accept(ctypes.c_int(_bound_md5_sockfd), None, 0) if md5_sockfd < 0: raise IOError(ctypes.get_errno(), "Failed to accept MD5 socket") return md5_sockfd def modify_priority(conf, logger): """ Modify priority by nice and ionice. """ global _libc_setpriority if _libc_setpriority is None: _libc_setpriority = load_libc_function('setpriority', errcheck=True) def _setpriority(nice_priority): """ setpriority for this pid :param nice_priority: valid values are -19 to 20 """ try: _libc_setpriority(PRIO_PROCESS, os.getpid(), int(nice_priority)) except (ValueError, OSError): print(_("WARNING: Unable to modify scheduling priority of process." " Keeping unchanged! Check logs for more info. ")) logger.exception('Unable to modify nice priority') else: logger.debug('set nice priority to %s' % nice_priority) nice_priority = conf.get('nice_priority') if nice_priority is not None: _setpriority(nice_priority) global _posix_syscall if _posix_syscall is None: _posix_syscall = load_libc_function('syscall', errcheck=True) def _ioprio_set(io_class, io_priority): """ ioprio_set for this process :param io_class: the I/O class component, can be IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, or IOPRIO_CLASS_IDLE :param io_priority: priority value in the I/O class """ try: io_class = IO_CLASS_ENUM[io_class] io_priority = int(io_priority) _posix_syscall(NR_ioprio_set(), IOPRIO_WHO_PROCESS, os.getpid(), IOPRIO_PRIO_VALUE(io_class, io_priority)) except (KeyError, ValueError, OSError): print(_("WARNING: Unable to modify I/O scheduling class " "and priority of process. Keeping unchanged! " "Check logs for more info.")) logger.exception("Unable to modify ionice priority") else: logger.debug('set ionice class %s priority %s', io_class, io_priority) io_class = conf.get("ionice_class") if io_class is None: return io_priority = conf.get("ionice_priority", 0) _ioprio_set(io_class, io_priority) def o_tmpfile_supported(): """ Returns True if O_TMPFILE flag is supported. O_TMPFILE was introduced in Linux 3.11 but it also requires support from underlying filesystem being used. Some common filesystems and linux versions in which those filesystems added support for O_TMPFILE: xfs (3.15) ext4 (3.11) btrfs (3.16) """ return all([linkat.available, platform.system() == 'Linux', LooseVersion(platform.release()) >= LooseVersion('3.16')]) def safe_json_loads(value): if value: try: return json.loads(value) except (TypeError, ValueError): pass return None MD5_BLOCK_READ_BYTES = 4096 def md5_hash_for_file(fname): """ Get the MD5 checksum of a file. :param fname: path to file :returns: MD5 checksum, hex encoded """ with open(fname, 'rb') as f: md5sum = md5() for block in iter(lambda: f.read(MD5_BLOCK_READ_BYTES), ''): md5sum.update(block) return md5sum.hexdigest()
[]
[]
[ "HOME" ]
[]
["HOME"]
python
1
0
config/config.go
package config import ( "bytes" "fmt" "io/ioutil" "log" "net/http" "net/url" "os" "path/filepath" "reflect" "regexp" "runtime" "sort" "strconv" "strings" "time" "github.com/influxdata/telegraf" "github.com/influxdata/telegraf/internal" "github.com/influxdata/telegraf/models" "github.com/influxdata/telegraf/plugins/aggregators" "github.com/influxdata/telegraf/plugins/inputs" "github.com/influxdata/telegraf/plugins/outputs" "github.com/influxdata/telegraf/plugins/parsers" "github.com/influxdata/telegraf/plugins/processors" "github.com/influxdata/telegraf/plugins/serializers" "github.com/influxdata/toml" "github.com/influxdata/toml/ast" ) var ( // Default sections sectionDefaults = []string{"global_tags", "agent", "outputs", "processors", "aggregators", "inputs"} // Default input plugins inputDefaults = []string{"cpu", "mem", "swap", "system", "kernel", "processes", "disk", "diskio"} // Default output plugins outputDefaults = []string{"influxdb"} // envVarRe is a regex to find environment variables in the config file envVarRe = regexp.MustCompile(`\$\{(\w+)\}|\$(\w+)`) envVarEscaper = strings.NewReplacer( `"`, `\"`, `\`, `\\`, ) ) // Config specifies the URL/user/password for the database that telegraf // will be logging to, as well as all the plugins that the user has // specified type Config struct { toml *toml.Config errs []error // config load errors. UnusedFields map[string]bool Tags map[string]string InputFilters []string OutputFilters []string Agent *AgentConfig Inputs []*models.RunningInput Outputs []*models.RunningOutput Aggregators []*models.RunningAggregator // Processors have a slice wrapper type because they need to be sorted Processors models.RunningProcessors AggProcessors models.RunningProcessors } // NewConfig creates a new struct to hold the Telegraf config. // For historical reasons, It holds the actual instances of the running plugins // once the configuration is parsed. func NewConfig() *Config { c := &Config{ UnusedFields: map[string]bool{}, // Agent defaults: Agent: &AgentConfig{ Interval: internal.Duration{Duration: 10 * time.Second}, RoundInterval: true, FlushInterval: internal.Duration{Duration: 10 * time.Second}, LogTarget: "file", LogfileRotationMaxArchives: 5, }, Tags: make(map[string]string), Inputs: make([]*models.RunningInput, 0), Outputs: make([]*models.RunningOutput, 0), Processors: make([]*models.RunningProcessor, 0), AggProcessors: make([]*models.RunningProcessor, 0), InputFilters: make([]string, 0), OutputFilters: make([]string, 0), } tomlCfg := &toml.Config{ NormFieldName: toml.DefaultConfig.NormFieldName, FieldToKey: toml.DefaultConfig.FieldToKey, MissingField: c.missingTomlField, } c.toml = tomlCfg return c } // AgentConfig defines configuration that will be used by the Telegraf agent type AgentConfig struct { // Interval at which to gather information Interval internal.Duration // RoundInterval rounds collection interval to 'interval'. // ie, if Interval=10s then always collect on :00, :10, :20, etc. RoundInterval bool // By default or when set to "0s", precision will be set to the same // timestamp order as the collection interval, with the maximum being 1s. // ie, when interval = "10s", precision will be "1s" // when interval = "250ms", precision will be "1ms" // Precision will NOT be used for service inputs. It is up to each individual // service input to set the timestamp at the appropriate precision. Precision internal.Duration // CollectionJitter is used to jitter the collection by a random amount. // Each plugin will sleep for a random time within jitter before collecting. // This can be used to avoid many plugins querying things like sysfs at the // same time, which can have a measurable effect on the system. CollectionJitter internal.Duration // FlushInterval is the Interval at which to flush data FlushInterval internal.Duration // FlushJitter Jitters the flush interval by a random amount. // This is primarily to avoid large write spikes for users running a large // number of telegraf instances. // ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s FlushJitter internal.Duration // MetricBatchSize is the maximum number of metrics that is wrote to an // output plugin in one call. MetricBatchSize int // MetricBufferLimit is the max number of metrics that each output plugin // will cache. The buffer is cleared when a successful write occurs. When // full, the oldest metrics will be overwritten. This number should be a // multiple of MetricBatchSize. Due to current implementation, this could // not be less than 2 times MetricBatchSize. MetricBufferLimit int // FlushBufferWhenFull tells Telegraf to flush the metric buffer whenever // it fills up, regardless of FlushInterval. Setting this option to true // does _not_ deactivate FlushInterval. FlushBufferWhenFull bool // deprecated in 0.13; has no effect // TODO(cam): Remove UTC and parameter, they are no longer // valid for the agent config. Leaving them here for now for backwards- // compatibility UTC bool `toml:"utc"` // deprecated in 1.0.0; has no effect // Debug is the option for running in debug mode Debug bool `toml:"debug"` // Quiet is the option for running in quiet mode Quiet bool `toml:"quiet"` // Log target controls the destination for logs and can be one of "file", // "stderr" or, on Windows, "eventlog". When set to "file", the output file // is determined by the "logfile" setting. LogTarget string `toml:"logtarget"` // Name of the file to be logged to when using the "file" logtarget. If set to // the empty string then logs are written to stderr. Logfile string `toml:"logfile"` // The file will be rotated after the time interval specified. When set // to 0 no time based rotation is performed. LogfileRotationInterval internal.Duration `toml:"logfile_rotation_interval"` // The logfile will be rotated when it becomes larger than the specified // size. When set to 0 no size based rotation is performed. LogfileRotationMaxSize internal.Size `toml:"logfile_rotation_max_size"` // Maximum number of rotated archives to keep, any older logs are deleted. // If set to -1, no archives are removed. LogfileRotationMaxArchives int `toml:"logfile_rotation_max_archives"` Hostname string OmitHostname bool } // InputNames returns a list of strings of the configured inputs. func (c *Config) InputNames() []string { var name []string for _, input := range c.Inputs { name = append(name, input.Config.Name) } return PluginNameCounts(name) } // AggregatorNames returns a list of strings of the configured aggregators. func (c *Config) AggregatorNames() []string { var name []string for _, aggregator := range c.Aggregators { name = append(name, aggregator.Config.Name) } return PluginNameCounts(name) } // ProcessorNames returns a list of strings of the configured processors. func (c *Config) ProcessorNames() []string { var name []string for _, processor := range c.Processors { name = append(name, processor.Config.Name) } return PluginNameCounts(name) } // OutputNames returns a list of strings of the configured outputs. func (c *Config) OutputNames() []string { var name []string for _, output := range c.Outputs { name = append(name, output.Config.Name) } return PluginNameCounts(name) } // PluginNameCounts returns a list of sorted plugin names and their count func PluginNameCounts(plugins []string) []string { names := make(map[string]int) for _, plugin := range plugins { names[plugin]++ } var namecount []string for name, count := range names { if count == 1 { namecount = append(namecount, name) } else { namecount = append(namecount, fmt.Sprintf("%s (%dx)", name, count)) } } sort.Strings(namecount) return namecount } // ListTags returns a string of tags specified in the config, // line-protocol style func (c *Config) ListTags() string { var tags []string for k, v := range c.Tags { tags = append(tags, fmt.Sprintf("%s=%s", k, v)) } sort.Strings(tags) return strings.Join(tags, " ") } var header = `# Telegraf Configuration # # Telegraf is entirely plugin driven. All metrics are gathered from the # declared inputs, and sent to the declared outputs. # # Plugins must be declared in here to be active. # To deactivate a plugin, comment out the name and any variables. # # Use 'telegraf -config telegraf.conf -test' to see what metrics a config # file would generate. # # Environment variables can be used anywhere in this config file, simply surround # them with ${}. For strings the variable must be within quotes (ie, "${STR_VAR}"), # for numbers and booleans they should be plain (ie, ${INT_VAR}, ${BOOL_VAR}) ` var globalTagsConfig = ` # Global tags can be specified here in key="value" format. [global_tags] # dc = "us-east-1" # will tag all metrics with dc=us-east-1 # rack = "1a" ## Environment variables can be used as tags, and throughout the config file # user = "$USER" ` var agentConfig = ` # Configuration for telegraf agent [agent] ## Default data collection interval for all inputs interval = "10s" ## Rounds collection interval to 'interval' ## ie, if interval="10s" then always collect on :00, :10, :20, etc. round_interval = true ## Telegraf will send metrics to outputs in batches of at most ## metric_batch_size metrics. ## This controls the size of writes that Telegraf sends to output plugins. metric_batch_size = 1000 ## Maximum number of unwritten metrics per output. Increasing this value ## allows for longer periods of output downtime without dropping metrics at the ## cost of higher maximum memory usage. metric_buffer_limit = 10000 ## Collection jitter is used to jitter the collection by a random amount. ## Each plugin will sleep for a random time within jitter before collecting. ## This can be used to avoid many plugins querying things like sysfs at the ## same time, which can have a measurable effect on the system. collection_jitter = "0s" ## Default flushing interval for all outputs. Maximum flush_interval will be ## flush_interval + flush_jitter flush_interval = "10s" ## Jitter the flush interval by a random amount. This is primarily to avoid ## large write spikes for users running a large number of telegraf instances. ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s flush_jitter = "0s" ## By default or when set to "0s", precision will be set to the same ## timestamp order as the collection interval, with the maximum being 1s. ## ie, when interval = "10s", precision will be "1s" ## when interval = "250ms", precision will be "1ms" ## Precision will NOT be used for service inputs. It is up to each individual ## service input to set the timestamp at the appropriate precision. ## Valid time units are "ns", "us" (or "µs"), "ms", "s". precision = "" ## Log at debug level. # debug = false ## Log only error level messages. # quiet = false ## Log target controls the destination for logs and can be one of "file", ## "stderr" or, on Windows, "eventlog". When set to "file", the output file ## is determined by the "logfile" setting. # logtarget = "file" ## Name of the file to be logged to when using the "file" logtarget. If set to ## the empty string then logs are written to stderr. # logfile = "" ## The logfile will be rotated after the time interval specified. When set ## to 0 no time based rotation is performed. Logs are rotated only when ## written to, if there is no log activity rotation may be delayed. # logfile_rotation_interval = "0d" ## The logfile will be rotated when it becomes larger than the specified ## size. When set to 0 no size based rotation is performed. # logfile_rotation_max_size = "0MB" ## Maximum number of rotated archives to keep, any older logs are deleted. ## If set to -1, no archives are removed. # logfile_rotation_max_archives = 5 ## Override default hostname, if empty use os.Hostname() hostname = "" ## If set to true, do no set the "host" tag in the telegraf agent. omit_hostname = false ` var outputHeader = ` ############################################################################### # OUTPUT PLUGINS # ############################################################################### ` var processorHeader = ` ############################################################################### # PROCESSOR PLUGINS # ############################################################################### ` var aggregatorHeader = ` ############################################################################### # AGGREGATOR PLUGINS # ############################################################################### ` var inputHeader = ` ############################################################################### # INPUT PLUGINS # ############################################################################### ` var serviceInputHeader = ` ############################################################################### # SERVICE INPUT PLUGINS # ############################################################################### ` // PrintSampleConfig prints the sample config func PrintSampleConfig( sectionFilters []string, inputFilters []string, outputFilters []string, aggregatorFilters []string, processorFilters []string, ) { // print headers fmt.Printf(header) if len(sectionFilters) == 0 { sectionFilters = sectionDefaults } printFilteredGlobalSections(sectionFilters) // print output plugins if sliceContains("outputs", sectionFilters) { if len(outputFilters) != 0 { if len(outputFilters) >= 3 && outputFilters[1] != "none" { fmt.Printf(outputHeader) } printFilteredOutputs(outputFilters, false) } else { fmt.Printf(outputHeader) printFilteredOutputs(outputDefaults, false) // Print non-default outputs, commented var pnames []string for pname := range outputs.Outputs { if !sliceContains(pname, outputDefaults) { pnames = append(pnames, pname) } } sort.Strings(pnames) printFilteredOutputs(pnames, true) } } // print processor plugins if sliceContains("processors", sectionFilters) { if len(processorFilters) != 0 { if len(processorFilters) >= 3 && processorFilters[1] != "none" { fmt.Printf(processorHeader) } printFilteredProcessors(processorFilters, false) } else { fmt.Printf(processorHeader) pnames := []string{} for pname := range processors.Processors { pnames = append(pnames, pname) } sort.Strings(pnames) printFilteredProcessors(pnames, true) } } // print aggregator plugins if sliceContains("aggregators", sectionFilters) { if len(aggregatorFilters) != 0 { if len(aggregatorFilters) >= 3 && aggregatorFilters[1] != "none" { fmt.Printf(aggregatorHeader) } printFilteredAggregators(aggregatorFilters, false) } else { fmt.Printf(aggregatorHeader) pnames := []string{} for pname := range aggregators.Aggregators { pnames = append(pnames, pname) } sort.Strings(pnames) printFilteredAggregators(pnames, true) } } // print input plugins if sliceContains("inputs", sectionFilters) { if len(inputFilters) != 0 { if len(inputFilters) >= 3 && inputFilters[1] != "none" { fmt.Printf(inputHeader) } printFilteredInputs(inputFilters, false) } else { fmt.Printf(inputHeader) printFilteredInputs(inputDefaults, false) // Print non-default inputs, commented var pnames []string for pname := range inputs.Inputs { if !sliceContains(pname, inputDefaults) { pnames = append(pnames, pname) } } sort.Strings(pnames) printFilteredInputs(pnames, true) } } } func printFilteredProcessors(processorFilters []string, commented bool) { // Filter processors var pnames []string for pname := range processors.Processors { if sliceContains(pname, processorFilters) { pnames = append(pnames, pname) } } sort.Strings(pnames) // Print Outputs for _, pname := range pnames { creator := processors.Processors[pname] output := creator() printConfig(pname, output, "processors", commented) } } func printFilteredAggregators(aggregatorFilters []string, commented bool) { // Filter outputs var anames []string for aname := range aggregators.Aggregators { if sliceContains(aname, aggregatorFilters) { anames = append(anames, aname) } } sort.Strings(anames) // Print Outputs for _, aname := range anames { creator := aggregators.Aggregators[aname] output := creator() printConfig(aname, output, "aggregators", commented) } } func printFilteredInputs(inputFilters []string, commented bool) { // Filter inputs var pnames []string for pname := range inputs.Inputs { if sliceContains(pname, inputFilters) { pnames = append(pnames, pname) } } sort.Strings(pnames) // cache service inputs to print them at the end servInputs := make(map[string]telegraf.ServiceInput) // for alphabetical looping: servInputNames := []string{} // Print Inputs for _, pname := range pnames { if pname == "cisco_telemetry_gnmi" { continue } creator := inputs.Inputs[pname] input := creator() switch p := input.(type) { case telegraf.ServiceInput: servInputs[pname] = p servInputNames = append(servInputNames, pname) continue } printConfig(pname, input, "inputs", commented) } // Print Service Inputs if len(servInputs) == 0 { return } sort.Strings(servInputNames) fmt.Printf(serviceInputHeader) for _, name := range servInputNames { printConfig(name, servInputs[name], "inputs", commented) } } func printFilteredOutputs(outputFilters []string, commented bool) { // Filter outputs var onames []string for oname := range outputs.Outputs { if sliceContains(oname, outputFilters) { onames = append(onames, oname) } } sort.Strings(onames) // Print Outputs for _, oname := range onames { creator := outputs.Outputs[oname] output := creator() printConfig(oname, output, "outputs", commented) } } func printFilteredGlobalSections(sectionFilters []string) { if sliceContains("global_tags", sectionFilters) { fmt.Printf(globalTagsConfig) } if sliceContains("agent", sectionFilters) { fmt.Printf(agentConfig) } } func printConfig(name string, p telegraf.PluginDescriber, op string, commented bool) { comment := "" if commented { comment = "# " } fmt.Printf("\n%s# %s\n%s[[%s.%s]]", comment, p.Description(), comment, op, name) config := p.SampleConfig() if config == "" { fmt.Printf("\n%s # no configuration\n\n", comment) } else { lines := strings.Split(config, "\n") for i, line := range lines { if i == 0 || i == len(lines)-1 { fmt.Print("\n") continue } fmt.Print(strings.TrimRight(comment+line, " ") + "\n") } } } func sliceContains(name string, list []string) bool { for _, b := range list { if b == name { return true } } return false } // PrintInputConfig prints the config usage of a single input. func PrintInputConfig(name string) error { if creator, ok := inputs.Inputs[name]; ok { printConfig(name, creator(), "inputs", false) } else { return fmt.Errorf("Input %s not found", name) } return nil } // PrintOutputConfig prints the config usage of a single output. func PrintOutputConfig(name string) error { if creator, ok := outputs.Outputs[name]; ok { printConfig(name, creator(), "outputs", false) } else { return fmt.Errorf("Output %s not found", name) } return nil } // LoadDirectory loads all toml config files found in the specified path, recursively. func (c *Config) LoadDirectory(path string) error { walkfn := func(thispath string, info os.FileInfo, _ error) error { if info == nil { log.Printf("W! Telegraf is not permitted to read %s", thispath) return nil } if info.IsDir() { if strings.HasPrefix(info.Name(), "..") { // skip Kubernetes mounts, prevening loading the same config twice return filepath.SkipDir } return nil } name := info.Name() if len(name) < 6 || name[len(name)-5:] != ".conf" { return nil } err := c.LoadConfig(thispath) if err != nil { return err } return nil } return filepath.Walk(path, walkfn) } // Try to find a default config file at these locations (in order): // 1. $TELEGRAF_CONFIG_PATH // 2. $HOME/.telegraf/telegraf.conf // 3. /etc/telegraf/telegraf.conf // func getDefaultConfigPath() (string, error) { envfile := os.Getenv("TELEGRAF_CONFIG_PATH") homefile := os.ExpandEnv("${HOME}/.telegraf/telegraf.conf") etcfile := "/etc/telegraf/telegraf.conf" if runtime.GOOS == "windows" { programFiles := os.Getenv("ProgramFiles") if programFiles == "" { // Should never happen programFiles = `C:\Program Files` } etcfile = programFiles + `\Telegraf\telegraf.conf` } for _, path := range []string{envfile, homefile, etcfile} { if _, err := os.Stat(path); err == nil { log.Printf("I! Using config file: %s", path) return path, nil } } // if we got here, we didn't find a file in a default location return "", fmt.Errorf("No config file specified, and could not find one"+ " in $TELEGRAF_CONFIG_PATH, %s, or %s", homefile, etcfile) } // LoadConfig loads the given config file and applies it to c func (c *Config) LoadConfig(path string) error { var err error if path == "" { if path, err = getDefaultConfigPath(); err != nil { return err } } data, err := loadConfig(path) if err != nil { return fmt.Errorf("Error loading config file %s: %w", path, err) } if err = c.LoadConfigData(data); err != nil { return fmt.Errorf("Error loading config file %s: %w", path, err) } return nil } // LoadConfigData loads TOML-formatted config data func (c *Config) LoadConfigData(data []byte) error { tbl, err := parseConfig(data) if err != nil { return fmt.Errorf("Error parsing data: %s", err) } // Parse tags tables first: for _, tableName := range []string{"tags", "global_tags"} { if val, ok := tbl.Fields[tableName]; ok { subTable, ok := val.(*ast.Table) if !ok { return fmt.Errorf("invalid configuration, bad table name %q", tableName) } if err = c.toml.UnmarshalTable(subTable, c.Tags); err != nil { return fmt.Errorf("error parsing table name %q: %s", tableName, err) } } } // Parse agent table: if val, ok := tbl.Fields["agent"]; ok { subTable, ok := val.(*ast.Table) if !ok { return fmt.Errorf("invalid configuration, error parsing agent table") } if err = c.toml.UnmarshalTable(subTable, c.Agent); err != nil { return fmt.Errorf("error parsing [agent]: %w", err) } } if !c.Agent.OmitHostname { if c.Agent.Hostname == "" { hostname, err := os.Hostname() if err != nil { return err } c.Agent.Hostname = hostname } c.Tags["host"] = c.Agent.Hostname } if len(c.UnusedFields) > 0 { return fmt.Errorf("line %d: configuration specified the fields %q, but they weren't used", tbl.Line, keys(c.UnusedFields)) } // Parse all the rest of the plugins: for name, val := range tbl.Fields { subTable, ok := val.(*ast.Table) if !ok { return fmt.Errorf("invalid configuration, error parsing field %q as table", name) } switch name { case "agent", "global_tags", "tags": case "outputs": for pluginName, pluginVal := range subTable.Fields { switch pluginSubTable := pluginVal.(type) { // legacy [outputs.influxdb] support case *ast.Table: if err = c.addOutput(pluginName, pluginSubTable); err != nil { return fmt.Errorf("error parsing %s, %w", pluginName, err) } case []*ast.Table: for _, t := range pluginSubTable { if err = c.addOutput(pluginName, t); err != nil { return fmt.Errorf("error parsing %s array, %w", pluginName, err) } } default: return fmt.Errorf("unsupported config format: %s", pluginName) } if len(c.UnusedFields) > 0 { return fmt.Errorf("plugin %s.%s: line %d: configuration specified the fields %q, but they weren't used", name, pluginName, subTable.Line, keys(c.UnusedFields)) } } case "inputs", "plugins": for pluginName, pluginVal := range subTable.Fields { switch pluginSubTable := pluginVal.(type) { // legacy [inputs.cpu] support case *ast.Table: if err = c.addInput(pluginName, pluginSubTable); err != nil { return fmt.Errorf("error parsing %s, %w", pluginName, err) } case []*ast.Table: for _, t := range pluginSubTable { if err = c.addInput(pluginName, t); err != nil { return fmt.Errorf("error parsing %s, %w", pluginName, err) } } default: return fmt.Errorf("Unsupported config format: %s", pluginName) } if len(c.UnusedFields) > 0 { return fmt.Errorf("plugin %s.%s: line %d: configuration specified the fields %q, but they weren't used", name, pluginName, subTable.Line, keys(c.UnusedFields)) } } case "processors": for pluginName, pluginVal := range subTable.Fields { switch pluginSubTable := pluginVal.(type) { case []*ast.Table: for _, t := range pluginSubTable { if err = c.addProcessor(pluginName, t); err != nil { return fmt.Errorf("error parsing %s, %w", pluginName, err) } } default: return fmt.Errorf("Unsupported config format: %s", pluginName) } if len(c.UnusedFields) > 0 { return fmt.Errorf("plugin %s.%s: line %d: configuration specified the fields %q, but they weren't used", name, pluginName, subTable.Line, keys(c.UnusedFields)) } } case "aggregators": for pluginName, pluginVal := range subTable.Fields { switch pluginSubTable := pluginVal.(type) { case []*ast.Table: for _, t := range pluginSubTable { if err = c.addAggregator(pluginName, t); err != nil { return fmt.Errorf("Error parsing %s, %s", pluginName, err) } } default: return fmt.Errorf("Unsupported config format: %s", pluginName) } if len(c.UnusedFields) > 0 { return fmt.Errorf("plugin %s.%s: line %d: configuration specified the fields %q, but they weren't used", name, pluginName, subTable.Line, keys(c.UnusedFields)) } } // Assume it's an input input for legacy config file support if no other // identifiers are present default: if err = c.addInput(name, subTable); err != nil { return fmt.Errorf("Error parsing %s, %s", name, err) } } } if len(c.Processors) > 1 { sort.Sort(c.Processors) } return nil } // trimBOM trims the Byte-Order-Marks from the beginning of the file. // this is for Windows compatibility only. // see https://github.com/influxdata/telegraf/issues/1378 func trimBOM(f []byte) []byte { return bytes.TrimPrefix(f, []byte("\xef\xbb\xbf")) } // escapeEnv escapes a value for inserting into a TOML string. func escapeEnv(value string) string { return envVarEscaper.Replace(value) } func loadConfig(config string) ([]byte, error) { u, err := url.Parse(config) if err != nil { return nil, err } switch u.Scheme { case "https", "http": return fetchConfig(u) default: // If it isn't a https scheme, try it as a file. } return ioutil.ReadFile(config) } func fetchConfig(u *url.URL) ([]byte, error) { req, err := http.NewRequest("GET", u.String(), nil) if err != nil { return nil, err } if v, exists := os.LookupEnv("INFLUX_TOKEN"); exists { req.Header.Add("Authorization", "Token "+v) } req.Header.Add("Accept", "application/toml") req.Header.Set("User-Agent", internal.ProductToken()) resp, err := http.DefaultClient.Do(req) if err != nil { return nil, err } if resp.StatusCode != http.StatusOK { return nil, fmt.Errorf("failed to retrieve remote config: %s", resp.Status) } defer resp.Body.Close() return ioutil.ReadAll(resp.Body) } // parseConfig loads a TOML configuration from a provided path and // returns the AST produced from the TOML parser. When loading the file, it // will find environment variables and replace them. func parseConfig(contents []byte) (*ast.Table, error) { contents = trimBOM(contents) parameters := envVarRe.FindAllSubmatch(contents, -1) for _, parameter := range parameters { if len(parameter) != 3 { continue } var envVar []byte if parameter[1] != nil { envVar = parameter[1] } else if parameter[2] != nil { envVar = parameter[2] } else { continue } envVal, ok := os.LookupEnv(strings.TrimPrefix(string(envVar), "$")) if ok { envVal = escapeEnv(envVal) contents = bytes.Replace(contents, parameter[0], []byte(envVal), 1) } } return toml.Parse(contents) } func (c *Config) addAggregator(name string, table *ast.Table) error { creator, ok := aggregators.Aggregators[name] if !ok { return fmt.Errorf("Undefined but requested aggregator: %s", name) } aggregator := creator() conf, err := c.buildAggregator(name, table) if err != nil { return err } if err := c.toml.UnmarshalTable(table, aggregator); err != nil { return err } c.Aggregators = append(c.Aggregators, models.NewRunningAggregator(aggregator, conf)) return nil } func (c *Config) addProcessor(name string, table *ast.Table) error { creator, ok := processors.Processors[name] if !ok { return fmt.Errorf("Undefined but requested processor: %s", name) } processorConfig, err := c.buildProcessor(name, table) if err != nil { return err } rf, err := c.newRunningProcessor(creator, processorConfig, name, table) if err != nil { return err } c.Processors = append(c.Processors, rf) // save a copy for the aggregator rf, err = c.newRunningProcessor(creator, processorConfig, name, table) if err != nil { return err } c.AggProcessors = append(c.AggProcessors, rf) return nil } func (c *Config) newRunningProcessor( creator processors.StreamingCreator, processorConfig *models.ProcessorConfig, name string, table *ast.Table, ) (*models.RunningProcessor, error) { processor := creator() if p, ok := processor.(unwrappable); ok { if err := c.toml.UnmarshalTable(table, p.Unwrap()); err != nil { return nil, err } } else { if err := c.toml.UnmarshalTable(table, processor); err != nil { return nil, err } } rf := models.NewRunningProcessor(processor, processorConfig) return rf, nil } func (c *Config) addOutput(name string, table *ast.Table) error { if len(c.OutputFilters) > 0 && !sliceContains(name, c.OutputFilters) { return nil } creator, ok := outputs.Outputs[name] if !ok { return fmt.Errorf("Undefined but requested output: %s", name) } output := creator() // If the output has a SetSerializer function, then this means it can write // arbitrary types of output, so build the serializer and set it. switch t := output.(type) { case serializers.SerializerOutput: serializer, err := c.buildSerializer(name, table) if err != nil { return err } t.SetSerializer(serializer) } outputConfig, err := c.buildOutput(name, table) if err != nil { return err } if err := c.toml.UnmarshalTable(table, output); err != nil { return err } ro := models.NewRunningOutput(name, output, outputConfig, c.Agent.MetricBatchSize, c.Agent.MetricBufferLimit) c.Outputs = append(c.Outputs, ro) return nil } func (c *Config) addInput(name string, table *ast.Table) error { if len(c.InputFilters) > 0 && !sliceContains(name, c.InputFilters) { return nil } // Legacy support renaming io input to diskio if name == "io" { name = "diskio" } creator, ok := inputs.Inputs[name] if !ok { return fmt.Errorf("Undefined but requested input: %s", name) } input := creator() // If the input has a SetParser function, then this means it can accept // arbitrary types of input, so build the parser and set it. if t, ok := input.(parsers.ParserInput); ok { parser, err := c.buildParser(name, table) if err != nil { return err } t.SetParser(parser) } if t, ok := input.(parsers.ParserFuncInput); ok { config, err := c.getParserConfig(name, table) if err != nil { return err } t.SetParserFunc(func() (parsers.Parser, error) { return parsers.NewParser(config) }) } pluginConfig, err := c.buildInput(name, table) if err != nil { return err } if err := c.toml.UnmarshalTable(table, input); err != nil { return err } rp := models.NewRunningInput(input, pluginConfig) rp.SetDefaultTags(c.Tags) c.Inputs = append(c.Inputs, rp) return nil } // buildAggregator parses Aggregator specific items from the ast.Table, // builds the filter and returns a // models.AggregatorConfig to be inserted into models.RunningAggregator func (c *Config) buildAggregator(name string, tbl *ast.Table) (*models.AggregatorConfig, error) { conf := &models.AggregatorConfig{ Name: name, Delay: time.Millisecond * 100, Period: time.Second * 30, Grace: time.Second * 0, } c.getFieldDuration(tbl, "period", &conf.Period) c.getFieldDuration(tbl, "delay", &conf.Delay) c.getFieldDuration(tbl, "grace", &conf.Grace) c.getFieldBool(tbl, "drop_original", &conf.DropOriginal) c.getFieldString(tbl, "name_prefix", &conf.MeasurementPrefix) c.getFieldString(tbl, "name_suffix", &conf.MeasurementSuffix) c.getFieldString(tbl, "name_override", &conf.NameOverride) c.getFieldString(tbl, "alias", &conf.Alias) conf.Tags = make(map[string]string) if node, ok := tbl.Fields["tags"]; ok { if subtbl, ok := node.(*ast.Table); ok { if err := c.toml.UnmarshalTable(subtbl, conf.Tags); err != nil { return nil, fmt.Errorf("could not parse tags for input %s", name) } } } if c.hasErrs() { return nil, c.firstErr() } var err error conf.Filter, err = c.buildFilter(tbl) if err != nil { return conf, err } return conf, nil } // buildProcessor parses Processor specific items from the ast.Table, // builds the filter and returns a // models.ProcessorConfig to be inserted into models.RunningProcessor func (c *Config) buildProcessor(name string, tbl *ast.Table) (*models.ProcessorConfig, error) { conf := &models.ProcessorConfig{Name: name} c.getFieldInt64(tbl, "order", &conf.Order) c.getFieldString(tbl, "alias", &conf.Alias) if c.hasErrs() { return nil, c.firstErr() } var err error conf.Filter, err = c.buildFilter(tbl) if err != nil { return conf, err } return conf, nil } // buildFilter builds a Filter // (tagpass/tagdrop/namepass/namedrop/fieldpass/fielddrop) to // be inserted into the models.OutputConfig/models.InputConfig // to be used for glob filtering on tags and measurements func (c *Config) buildFilter(tbl *ast.Table) (models.Filter, error) { f := models.Filter{} c.getFieldStringSlice(tbl, "namepass", &f.NamePass) c.getFieldStringSlice(tbl, "namedrop", &f.NameDrop) c.getFieldStringSlice(tbl, "pass", &f.FieldPass) c.getFieldStringSlice(tbl, "fieldpass", &f.FieldPass) c.getFieldStringSlice(tbl, "drop", &f.FieldDrop) c.getFieldStringSlice(tbl, "fielddrop", &f.FieldDrop) c.getFieldTagFilter(tbl, "tagpass", &f.TagPass) c.getFieldTagFilter(tbl, "tagdrop", &f.TagDrop) c.getFieldStringSlice(tbl, "tagexclude", &f.TagExclude) c.getFieldStringSlice(tbl, "taginclude", &f.TagInclude) if c.hasErrs() { return f, c.firstErr() } if err := f.Compile(); err != nil { return f, err } return f, nil } // buildInput parses input specific items from the ast.Table, // builds the filter and returns a // models.InputConfig to be inserted into models.RunningInput func (c *Config) buildInput(name string, tbl *ast.Table) (*models.InputConfig, error) { cp := &models.InputConfig{Name: name} c.getFieldDuration(tbl, "interval", &cp.Interval) c.getFieldDuration(tbl, "precision", &cp.Precision) c.getFieldDuration(tbl, "collection_jitter", &cp.CollectionJitter) c.getFieldString(tbl, "name_prefix", &cp.MeasurementPrefix) c.getFieldString(tbl, "name_suffix", &cp.MeasurementSuffix) c.getFieldString(tbl, "name_override", &cp.NameOverride) c.getFieldString(tbl, "alias", &cp.Alias) cp.Tags = make(map[string]string) if node, ok := tbl.Fields["tags"]; ok { if subtbl, ok := node.(*ast.Table); ok { if err := c.toml.UnmarshalTable(subtbl, cp.Tags); err != nil { return nil, fmt.Errorf("could not parse tags for input %s", name) } } } if c.hasErrs() { return nil, c.firstErr() } var err error cp.Filter, err = c.buildFilter(tbl) if err != nil { return cp, err } return cp, nil } // buildParser grabs the necessary entries from the ast.Table for creating // a parsers.Parser object, and creates it, which can then be added onto // an Input object. func (c *Config) buildParser(name string, tbl *ast.Table) (parsers.Parser, error) { config, err := c.getParserConfig(name, tbl) if err != nil { return nil, err } return parsers.NewParser(config) } func (c *Config) getParserConfig(name string, tbl *ast.Table) (*parsers.Config, error) { pc := &parsers.Config{ JSONStrict: true, } c.getFieldString(tbl, "data_format", &pc.DataFormat) // Legacy support, exec plugin originally parsed JSON by default. if name == "exec" && pc.DataFormat == "" { pc.DataFormat = "json" } else if pc.DataFormat == "" { pc.DataFormat = "influx" } c.getFieldString(tbl, "separator", &pc.Separator) c.getFieldStringSlice(tbl, "templates", &pc.Templates) c.getFieldStringSlice(tbl, "tag_keys", &pc.TagKeys) c.getFieldStringSlice(tbl, "json_string_fields", &pc.JSONStringFields) c.getFieldString(tbl, "json_name_key", &pc.JSONNameKey) c.getFieldString(tbl, "json_query", &pc.JSONQuery) c.getFieldString(tbl, "json_time_key", &pc.JSONTimeKey) c.getFieldString(tbl, "json_time_format", &pc.JSONTimeFormat) c.getFieldString(tbl, "json_timezone", &pc.JSONTimezone) c.getFieldBool(tbl, "json_strict", &pc.JSONStrict) c.getFieldString(tbl, "data_type", &pc.DataType) c.getFieldString(tbl, "collectd_auth_file", &pc.CollectdAuthFile) c.getFieldString(tbl, "collectd_security_level", &pc.CollectdSecurityLevel) c.getFieldString(tbl, "collectd_parse_multivalue", &pc.CollectdSplit) c.getFieldStringSlice(tbl, "collectd_typesdb", &pc.CollectdTypesDB) c.getFieldString(tbl, "dropwizard_metric_registry_path", &pc.DropwizardMetricRegistryPath) c.getFieldString(tbl, "dropwizard_time_path", &pc.DropwizardTimePath) c.getFieldString(tbl, "dropwizard_time_format", &pc.DropwizardTimeFormat) c.getFieldString(tbl, "dropwizard_tags_path", &pc.DropwizardTagsPath) c.getFieldStringMap(tbl, "dropwizard_tag_paths", &pc.DropwizardTagPathsMap) //for grok data_format c.getFieldStringSlice(tbl, "grok_named_patterns", &pc.GrokNamedPatterns) c.getFieldStringSlice(tbl, "grok_patterns", &pc.GrokPatterns) c.getFieldString(tbl, "grok_custom_patterns", &pc.GrokCustomPatterns) c.getFieldStringSlice(tbl, "grok_custom_pattern_files", &pc.GrokCustomPatternFiles) c.getFieldString(tbl, "grok_timezone", &pc.GrokTimezone) c.getFieldString(tbl, "grok_unique_timestamp", &pc.GrokUniqueTimestamp) //for csv parser c.getFieldStringSlice(tbl, "csv_column_names", &pc.CSVColumnNames) c.getFieldStringSlice(tbl, "csv_column_types", &pc.CSVColumnTypes) c.getFieldStringSlice(tbl, "csv_tag_columns", &pc.CSVTagColumns) c.getFieldString(tbl, "csv_timezone", &pc.CSVTimezone) c.getFieldString(tbl, "csv_delimiter", &pc.CSVDelimiter) c.getFieldString(tbl, "csv_comment", &pc.CSVComment) c.getFieldString(tbl, "csv_measurement_column", &pc.CSVMeasurementColumn) c.getFieldString(tbl, "csv_timestamp_column", &pc.CSVTimestampColumn) c.getFieldString(tbl, "csv_timestamp_format", &pc.CSVTimestampFormat) c.getFieldInt(tbl, "csv_header_row_count", &pc.CSVHeaderRowCount) c.getFieldInt(tbl, "csv_skip_rows", &pc.CSVSkipRows) c.getFieldInt(tbl, "csv_skip_columns", &pc.CSVSkipColumns) c.getFieldBool(tbl, "csv_trim_space", &pc.CSVTrimSpace) c.getFieldStringSlice(tbl, "csv_skip_values", &pc.CSVSkipValues) c.getFieldStringSlice(tbl, "form_urlencoded_tag_keys", &pc.FormUrlencodedTagKeys) pc.MetricName = name if c.hasErrs() { return nil, c.firstErr() } return pc, nil } // buildSerializer grabs the necessary entries from the ast.Table for creating // a serializers.Serializer object, and creates it, which can then be added onto // an Output object. func (c *Config) buildSerializer(name string, tbl *ast.Table) (serializers.Serializer, error) { sc := &serializers.Config{TimestampUnits: time.Duration(1 * time.Second)} c.getFieldString(tbl, "data_format", &sc.DataFormat) if sc.DataFormat == "" { sc.DataFormat = "influx" } c.getFieldString(tbl, "prefix", &sc.Prefix) c.getFieldString(tbl, "template", &sc.Template) c.getFieldStringSlice(tbl, "templates", &sc.Templates) c.getFieldString(tbl, "carbon2_format", &sc.Carbon2Format) c.getFieldInt(tbl, "influx_max_line_bytes", &sc.InfluxMaxLineBytes) c.getFieldBool(tbl, "influx_sort_fields", &sc.InfluxSortFields) c.getFieldBool(tbl, "influx_uint_support", &sc.InfluxUintSupport) c.getFieldBool(tbl, "graphite_tag_support", &sc.GraphiteTagSupport) c.getFieldString(tbl, "graphite_separator", &sc.GraphiteSeparator) c.getFieldDuration(tbl, "json_timestamp_units", &sc.TimestampUnits) c.getFieldBool(tbl, "splunkmetric_hec_routing", &sc.HecRouting) c.getFieldBool(tbl, "splunkmetric_multimetric", &sc.SplunkmetricMultiMetric) c.getFieldStringSlice(tbl, "wavefront_source_override", &sc.WavefrontSourceOverride) c.getFieldBool(tbl, "wavefront_use_strict", &sc.WavefrontUseStrict) c.getFieldBool(tbl, "prometheus_export_timestamp", &sc.PrometheusExportTimestamp) c.getFieldBool(tbl, "prometheus_sort_metrics", &sc.PrometheusSortMetrics) c.getFieldBool(tbl, "prometheus_string_as_label", &sc.PrometheusStringAsLabel) if c.hasErrs() { return nil, c.firstErr() } return serializers.NewSerializer(sc) } // buildOutput parses output specific items from the ast.Table, // builds the filter and returns an // models.OutputConfig to be inserted into models.RunningInput // Note: error exists in the return for future calls that might require error func (c *Config) buildOutput(name string, tbl *ast.Table) (*models.OutputConfig, error) { filter, err := c.buildFilter(tbl) if err != nil { return nil, err } oc := &models.OutputConfig{ Name: name, Filter: filter, } // TODO: support FieldPass/FieldDrop on outputs c.getFieldDuration(tbl, "flush_interval", &oc.FlushInterval) c.getFieldDuration(tbl, "flush_jitter", &oc.FlushJitter) c.getFieldInt(tbl, "metric_buffer_limit", &oc.MetricBufferLimit) c.getFieldInt(tbl, "metric_batch_size", &oc.MetricBatchSize) c.getFieldString(tbl, "alias", &oc.Alias) c.getFieldString(tbl, "name_override", &oc.NameOverride) c.getFieldString(tbl, "name_suffix", &oc.NameSuffix) c.getFieldString(tbl, "name_prefix", &oc.NamePrefix) if c.hasErrs() { return nil, c.firstErr() } return oc, nil } func (c *Config) missingTomlField(typ reflect.Type, key string) error { switch key { case "alias", "carbon2_format", "collectd_auth_file", "collectd_parse_multivalue", "collectd_security_level", "collectd_typesdb", "collection_jitter", "csv_column_names", "csv_column_types", "csv_comment", "csv_delimiter", "csv_header_row_count", "csv_measurement_column", "csv_skip_columns", "csv_skip_rows", "csv_tag_columns", "csv_timestamp_column", "csv_timestamp_format", "csv_timezone", "csv_trim_space", "csv_skip_values", "data_format", "data_type", "delay", "drop", "drop_original", "dropwizard_metric_registry_path", "dropwizard_tag_paths", "dropwizard_tags_path", "dropwizard_time_format", "dropwizard_time_path", "fielddrop", "fieldpass", "flush_interval", "flush_jitter", "form_urlencoded_tag_keys", "grace", "graphite_separator", "graphite_tag_support", "grok_custom_pattern_files", "grok_custom_patterns", "grok_named_patterns", "grok_patterns", "grok_timezone", "grok_unique_timestamp", "influx_max_line_bytes", "influx_sort_fields", "influx_uint_support", "interval", "json_name_key", "json_query", "json_strict", "json_string_fields", "json_time_format", "json_time_key", "json_timestamp_units", "json_timezone", "metric_batch_size", "metric_buffer_limit", "name_override", "name_prefix", "name_suffix", "namedrop", "namepass", "order", "pass", "period", "precision", "prefix", "prometheus_export_timestamp", "prometheus_sort_metrics", "prometheus_string_as_label", "separator", "splunkmetric_hec_routing", "splunkmetric_multimetric", "tag_keys", "tagdrop", "tagexclude", "taginclude", "tagpass", "tags", "template", "templates", "wavefront_source_override", "wavefront_use_strict": // ignore fields that are common to all plugins. default: c.UnusedFields[key] = true } return nil } func (c *Config) getFieldString(tbl *ast.Table, fieldName string, target *string) { if node, ok := tbl.Fields[fieldName]; ok { if kv, ok := node.(*ast.KeyValue); ok { if str, ok := kv.Value.(*ast.String); ok { *target = str.Value } } } } func (c *Config) getFieldDuration(tbl *ast.Table, fieldName string, target interface{}) { if node, ok := tbl.Fields[fieldName]; ok { if kv, ok := node.(*ast.KeyValue); ok { if str, ok := kv.Value.(*ast.String); ok { d, err := time.ParseDuration(str.Value) if err != nil { c.addError(tbl, fmt.Errorf("error parsing duration: %w", err)) return } targetVal := reflect.ValueOf(target).Elem() targetVal.Set(reflect.ValueOf(d)) } } } } func (c *Config) getFieldBool(tbl *ast.Table, fieldName string, target *bool) { var err error if node, ok := tbl.Fields[fieldName]; ok { if kv, ok := node.(*ast.KeyValue); ok { switch t := kv.Value.(type) { case *ast.Boolean: *target, err = t.Boolean() if err != nil { c.addError(tbl, fmt.Errorf("unknown boolean value type %q, expecting boolean", kv.Value)) return } case *ast.String: *target, err = strconv.ParseBool(t.Value) if err != nil { c.addError(tbl, fmt.Errorf("unknown boolean value type %q, expecting boolean", kv.Value)) return } default: c.addError(tbl, fmt.Errorf("unknown boolean value type %q, expecting boolean", kv.Value.Source())) return } } } } func (c *Config) getFieldInt(tbl *ast.Table, fieldName string, target *int) { if node, ok := tbl.Fields[fieldName]; ok { if kv, ok := node.(*ast.KeyValue); ok { if iAst, ok := kv.Value.(*ast.Integer); ok { i, err := iAst.Int() if err != nil { c.addError(tbl, fmt.Errorf("unexpected int type %q, expecting int", iAst.Value)) return } *target = int(i) } } } } func (c *Config) getFieldInt64(tbl *ast.Table, fieldName string, target *int64) { if node, ok := tbl.Fields[fieldName]; ok { if kv, ok := node.(*ast.KeyValue); ok { if iAst, ok := kv.Value.(*ast.Integer); ok { i, err := iAst.Int() if err != nil { c.addError(tbl, fmt.Errorf("unexpected int type %q, expecting int", iAst.Value)) return } *target = i } } } } func (c *Config) getFieldStringSlice(tbl *ast.Table, fieldName string, target *[]string) { if node, ok := tbl.Fields[fieldName]; ok { if kv, ok := node.(*ast.KeyValue); ok { if ary, ok := kv.Value.(*ast.Array); ok { for _, elem := range ary.Value { if str, ok := elem.(*ast.String); ok { *target = append(*target, str.Value) } } } } } } func (c *Config) getFieldTagFilter(tbl *ast.Table, fieldName string, target *[]models.TagFilter) { if node, ok := tbl.Fields[fieldName]; ok { if subtbl, ok := node.(*ast.Table); ok { for name, val := range subtbl.Fields { if kv, ok := val.(*ast.KeyValue); ok { tagfilter := models.TagFilter{Name: name} if ary, ok := kv.Value.(*ast.Array); ok { for _, elem := range ary.Value { if str, ok := elem.(*ast.String); ok { tagfilter.Filter = append(tagfilter.Filter, str.Value) } } } *target = append(*target, tagfilter) } } } } } func (c *Config) getFieldStringMap(tbl *ast.Table, fieldName string, target *map[string]string) { *target = map[string]string{} if node, ok := tbl.Fields[fieldName]; ok { if subtbl, ok := node.(*ast.Table); ok { for name, val := range subtbl.Fields { if kv, ok := val.(*ast.KeyValue); ok { if str, ok := kv.Value.(*ast.String); ok { (*target)[name] = str.Value } } } } } } func keys(m map[string]bool) []string { result := []string{} for k := range m { result = append(result, k) } return result } func (c *Config) hasErrs() bool { return len(c.errs) > 0 } func (c *Config) firstErr() error { if len(c.errs) == 0 { return nil } return c.errs[0] } func (c *Config) addError(tbl *ast.Table, err error) { c.errs = append(c.errs, fmt.Errorf("line %d:%d: %w", tbl.Line, tbl.Position, err)) } // unwrappable lets you retrieve the original telegraf.Processor from the // StreamingProcessor. This is necessary because the toml Unmarshaller won't // look inside composed types. type unwrappable interface { Unwrap() telegraf.Processor }
[ "\"TELEGRAF_CONFIG_PATH\"", "\"ProgramFiles\"" ]
[]
[ "TELEGRAF_CONFIG_PATH", "ProgramFiles" ]
[]
["TELEGRAF_CONFIG_PATH", "ProgramFiles"]
go
2
0
server/router/internal/middleware.go
package internal import ( "bytes" "gf-vue-admin/app/api/request" "gf-vue-admin/app/api/response" api "gf-vue-admin/app/api/system" model "gf-vue-admin/app/model/system" service "gf-vue-admin/app/service/system" "gf-vue-admin/library/global" "gf-vue-admin/library/utils" jwt "github.com/gogf/gf-jwt" "github.com/gogf/gf/frame/g" "github.com/gogf/gf/net/ghttp" "github.com/gogf/gf/util/gconv" "io/ioutil" "strconv" "time" ) var Middleware = new(middleware) type middleware struct { id int err error body []byte result *model.Admin } //@author: [SliverHorn](https://github.com/SliverHorn) //@description: 验证token有效性 func (m *middleware) JwtAuth(r *ghttp.Request) { api.GfJWTMiddleware.MiddlewareFunc()(r) _jwt, err := api.GfJWTMiddleware.ParseToken(r) // 解析token if err != nil { if err == jwt.ErrExpiredToken { _ = r.Response.WriteJson(&response.Response{Code: 7, Data: g.Map{"reload": true}, Message: "授权已过期!"}) r.ExitAll() } _ = r.Response.WriteJson(&response.Response{Code: 7, Data: g.Map{"reload": true}, Error: err}) r.ExitAll() } if _jwt != nil { token := _jwt.Raw if service.JwtBlacklist.IsBlacklist(token) { _ = r.Response.WriteJson(&response.Response{Code: 7, Data: g.Map{"reload": true}, Message: "您的帐户异地登陆或令牌失效!"}) r.ExitAll() } var claims = gconv.Map(_jwt.Claims) r.SetParam("claims", _jwt.Claims) r.SetParam("admin_authority_id", claims["admin_authority_id"]) if global.Config.System.UseMultipoint { if !service.JwtBlacklist.ValidatorRedisToken(gconv.String(claims["admin_uuid"]), token) { _ = r.Response.WriteJson(&response.Response{Code: 7, Data: g.Map{"reload": true}, Message: "Token鉴权失败!"}) r.Exit() } } } r.Middleware.Next() } //@author: [SliverHorn](https://github.com/SliverHorn) //@description: 拦截器 func (m *middleware) CasbinRbac(r *ghttp.Request) { // 获取请求的URI obj := r.Request.URL.RequestURI() // 获取请求方法 act := r.Request.Method // 获取用户的角色 sub := r.GetParam("admin_authority_id") e := service.Casbin.Casbin() // 判断策略中是否存在 success, _ := e.Enforce(sub, obj, act) if global.Config.System.Env == "develop" || success { r.Middleware.Next() } else { _ = r.Response.WriteJson(&response.Response{Code: 7, Data: g.Map{}, Message: "权限不足"}) r.ExitAll() } } //@author: [SliverHorn](https://github.com/SliverHorn) //@description: 操作记录中间件 func (m *middleware) OperationRecord(r *ghttp.Request) { // Request if m.body, m.err = ioutil.ReadAll(r.Request.Body); m.err != nil { g.Log().Error(g.Map{"err": m.err}) } r.Request.Body = ioutil.NopCloser(bytes.NewBuffer(m.body)) if token, err := api.GfJWTMiddleware.ParseToken(r); err != nil { // 优先从jwt获取用户信息 id, _ := strconv.Atoi(r.Request.Header.Get("x-user-id")) if m.result, m.err = service.Admin.FindAdminById(&request.GetById{Id: uint(id)}); m.err != nil { g.Log().Error(`Function service.Admin.FindAdminById() Failed!`, g.Map{"err": m.err}) } } else { claims := gconv.Map(token.Claims) uuid := gconv.String(claims["admin_uuid"]) if m.result, m.err = service.Admin.FindAdmin(&request.GetByUuid{Uuid: uuid}); m.err != nil { g.Log().Error(`Function service.Admin.FindAdmin() Failed!`, g.Map{"err": m.err}) } m.id = int(m.result.ID) } record := request.CreateOperationRecord{ BaseOperationRecord: request.BaseOperationRecord{ Ip: r.GetClientIp(), Method: r.Request.Method, Path: r.Request.URL.Path, Agent: r.Request.UserAgent(), Request: string(m.body), UserID: m.id, }, } now := time.Now() r.Middleware.Next() // Response latency := time.Now().Sub(now) if err := r.GetError(); err != nil { record.ErrorMessage = err.Error() } record.Status = r.Response.Status record.Latency = latency.Microseconds() record.Response = string(r.Response.Buffer()) if err := service.OperationRecord.Create(&record); err != nil { g.Log().Error("create operation record error:", g.Map{"err": err}) } str := "接收到的请求为" + record.Request + "\n" + "请求方式为" + record.Method + "\n" + "报错信息如下" + record.ErrorMessage + "\n" + "耗时" + latency.String() + "\n" if global.Config.System.ErrorToEmail { if record.Status != 200 { subject := m.result.Username + "" + record.Ip + "调用了" + record.Path + "报错了" if err := utils.Email.ErrorToEmail(subject, str); err != nil { g.Log().Errorf("Function utils.Email.ErrorToEmail Failed!", g.Map{"err": err}) } } } }
[]
[]
[]
[]
[]
go
null
null
null
config.py
import pymongo import telebot from dotenv import load_dotenv import os load_dotenv() ckey = os.getenv("ckey") csecret = os.getenv("csecret") debug = (os.getenv("DEBUG") == 'True') token = os.getenv("token") url = os.getenv("url") db_host = os.getenv("db_host") api_id = os.getenv("api_id") api_hash = os.getenv("api_hash") bot = telebot.TeleBot( token, threaded=True ) client = pymongo.MongoClient(db_host)
[]
[]
[ "ckey", "token", "csecret", "api_id", "DEBUG", "url", "api_hash", "db_host" ]
[]
["ckey", "token", "csecret", "api_id", "DEBUG", "url", "api_hash", "db_host"]
python
8
0
dataServer/heartbeat/heartbeat.go
package heartbeat import ( "ckoss/pkg/rabbitmq" "os" "time" ) // export RABBITMQ_SERVER=amqp://test:test@localhost:5672 func StartHeartbeat() { // 心跳信息 // 生成一个队列,通过这个队列,将 dataServer 的监听地址发送给 exchange : apiServers q := rabbitmq.New(os.Getenv("RABBITMQ_SERVER")) defer q.Close() for { // 将 dataServer 的监听地址发送给 exchange : apiServers q.Publish("apiServers", os.Getenv("LISTEN_ADDRESS")) time.Sleep(5 * time.Second) } }
[ "\"RABBITMQ_SERVER\"", "\"LISTEN_ADDRESS\"" ]
[]
[ "LISTEN_ADDRESS", "RABBITMQ_SERVER" ]
[]
["LISTEN_ADDRESS", "RABBITMQ_SERVER"]
go
2
0
main.go
package main import ( "crypto/tls" "fmt" "os" "path" "strings" "sync" "github.com/gorilla/websocket" "github.com/sirupsen/logrus" "github.com/vxcontrol/sharm/term" ) const defSharmHost = "sharm.io" const defSharmLogFile = "sharm.log" var log *logrus.Logger var cstDialer = websocket.Dialer{ Subprotocols: []string{"sharm-stream"}, ReadBufferSize: 1024 * 100, WriteBufferSize: 1024 * 100, } func exitHandler() { log.Info("Sharm client was exited") } func initLogging(logPath, logLevel string) { logrus.RegisterExitHandler(exitHandler) log = logrus.New() log.Out = os.Stderr if logLevel != "" { logPathFile := path.Join(path.Clean(logPath), defSharmLogFile) logFile, err := os.OpenFile(logPathFile, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666) if err == nil { log.Out = logFile } else { log.Error("Failed to log to file, using default stderr") } } switch logLevel { case "trace": log.SetLevel(logrus.TraceLevel) case "debug": log.SetLevel(logrus.DebugLevel) case "info": log.SetLevel(logrus.InfoLevel) case "warning": log.SetLevel(logrus.WarnLevel) case "error": log.SetLevel(logrus.ErrorLevel) case "fatal": log.SetLevel(logrus.FatalLevel) case "panic": log.SetLevel(logrus.PanicLevel) default: log.SetLevel(logrus.FatalLevel) } } func main() { token := os.Getenv("TOKEN") if len(os.Args) < 2 && token == "" { fmt.Printf("Usage: %s <token> [command]\n", os.Args[0]) os.Exit(0) } host := defSharmHost logLevel := "" logPath := "." command := "" if os.Getenv("HOST") != "" { host = os.Getenv("HOST") } if os.Getenv("LOG_LEVEL") != "" { logLevel = os.Getenv("LOG_LEVEL") } if os.Getenv("LOG_PATH") != "" { logPath = os.Getenv("LOG_PATH") } if os.Getenv("COMMAND") != "" { command = os.Getenv("COMMAND") } if token == "" { token = os.Args[1] } if command == "" && len(os.Args) >= 3 { command = strings.Join(os.Args[2:], " ") } initLogging(logPath, logLevel) log.WithFields(logrus.Fields{ "HOST": host, "LOG_LEVEL": logLevel, "LOG_PATH": logPath, "COMMAND": command, "TOKEN": token, }).Debug("Sharm client has initialized by next vars") dialer := cstDialer dialer.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} conn, _, err := dialer.Dial(fmt.Sprintf("wss://%s/stream/%s", host, token), nil) if err != nil { log.WithFields(logrus.Fields{ "error": err.Error(), }).Fatal("Can't create connection to Sharm server") } defer conn.Close() pty := &term.Term{} if err = pty.Start(command); err != nil { log.WithFields(logrus.Fields{ "error": err.Error(), }).Fatal("Can't create new terminal") } defer pty.Close() log.Info("Sharm client has started") context := &clientContext{ connection: conn, pty: pty, writeMutex: &sync.Mutex{}, } // TODO: here may use reconnect logic context.goHandleClient() }
[ "\"TOKEN\"", "\"HOST\"", "\"HOST\"", "\"LOG_LEVEL\"", "\"LOG_LEVEL\"", "\"LOG_PATH\"", "\"LOG_PATH\"", "\"COMMAND\"", "\"COMMAND\"" ]
[]
[ "HOST", "LOG_LEVEL", "LOG_PATH", "TOKEN", "COMMAND" ]
[]
["HOST", "LOG_LEVEL", "LOG_PATH", "TOKEN", "COMMAND"]
go
5
0
getquote.py
#!/usr/bin/python3 """getquote script for ledger-cli.""" import configparser import os import sys from urllib import error from urllib import request from datetime import datetime cfg = configparser.ConfigParser() cfg.read('getquote.ini') IEX_TOKEN = os.getenv('IEX_TOKEN') or cfg.get('IEX', 'TOKEN') def quote(symbol: str) -> int: """Returns the current price of a security. Args: symbol: security to quote. Returns: Price of the security. """ url = f'https://cloud.iexapis.com/stable/stock/{symbol}/quote/latestPrice?token={IEX_TOKEN}' try: with request.urlopen(url) as resp: return resp.read().decode('utf-8') except error.HTTPError as exp: raise LookupError(f'Unable to quote "{symbol}"') from exp def ledger_format(symbol: str) -> str: """Returns a quote formated for ledger-cli. Args: symbol: security to quote. Returns: Ledger-Cli Price db entry for the quote. """ date = datetime.now().astimezone().strftime('%Y/%m/%d %H:%M:%S%z') price = quote(symbol) return f'{date} {symbol} ${price}' def main(): """Entrypoint for getquote.""" symbol = sys.argv[1] print(ledger_format(symbol)) if __name__ == '__main__': main()
[]
[]
[ "IEX_TOKEN" ]
[]
["IEX_TOKEN"]
python
1
0
lib/galaxy_test/base/api.py
import os from contextlib import contextmanager from urllib.parse import urlencode from .api_asserts import ( assert_error_code_is, assert_has_keys, assert_not_has_keys, assert_status_code_is, assert_status_code_is_ok, ) from .api_util import ( ADMIN_TEST_USER, get_admin_api_key, get_user_api_key, OTHER_USER, TEST_USER, ) from .interactor import TestCaseGalaxyInteractor as BaseInteractor class UsesApiTestCaseMixin: def tearDown(self): if os.environ.get('GALAXY_TEST_EXTERNAL') is None: # Only kill running jobs after test for managed test instances for job in self.galaxy_interactor.get('jobs?state=running&?user_details=true').json(): self._delete("jobs/%s" % job['id']) def _api_url(self, path, params=None, use_key=None, use_admin_key=None): if not params: params = {} url = f"{self.url}/api/{path}" if use_key: params["key"] = self.galaxy_interactor.api_key if use_admin_key: params["key"] = self.galaxy_interactor.master_api_key query = urlencode(params) if query: url = f"{url}?{query}" return url def _setup_interactor(self): self.user_api_key = get_user_api_key() self.master_api_key = get_admin_api_key() self.galaxy_interactor = self._get_interactor() def _get_interactor(self, api_key=None): return ApiTestInteractor(self, api_key=api_key) def _setup_user(self, email, password=None, is_admin=True): self.galaxy_interactor.ensure_user_with_email(email, password=password) users = self._get("users", admin=is_admin).json() user = [user for user in users if user["email"] == email][0] return user def _setup_user_get_key(self, email, password=None, is_admin=True): user = self._setup_user(email, password, is_admin) return user, self._post("users/%s/api_key" % user["id"], admin=True).json() @contextmanager def _different_user(self, email=OTHER_USER): """ Use in test cases to switch get/post operations to act as new user ..code-block:: python with self._different_user("[email protected]"): self._get("histories") # Gets [email protected] histories. """ original_api_key = self.user_api_key original_interactor_key = self.galaxy_interactor.api_key user, new_key = self._setup_user_get_key(email) try: self.user_api_key = new_key self.galaxy_interactor.api_key = new_key yield finally: self.user_api_key = original_api_key self.galaxy_interactor.api_key = original_interactor_key def _get(self, *args, **kwds): return self.galaxy_interactor.get(*args, **kwds) def _post(self, *args, **kwds): return self.galaxy_interactor.post(*args, **kwds) def _delete(self, *args, **kwds): return self.galaxy_interactor.delete(*args, **kwds) def _put(self, *args, **kwds): return self.galaxy_interactor.put(*args, **kwds) def _patch(self, *args, **kwds): return self.galaxy_interactor.patch(*args, **kwds) def _assert_status_code_is_ok(self, response): assert_status_code_is_ok(response) def _assert_status_code_is(self, response, expected_status_code): assert_status_code_is(response, expected_status_code) def _assert_has_keys(self, response, *keys): assert_has_keys(response, *keys) def _assert_not_has_keys(self, response, *keys): assert_not_has_keys(response, *keys) def _assert_error_code_is(self, response, error_code): assert_error_code_is(response, error_code) def _random_key(self): # Used for invalid request testing... return "1234567890123456" _assert_has_key = _assert_has_keys class ApiTestInteractor(BaseInteractor): """ Specialized variant of the API interactor (originally developed for tool functional tests) for testing the API generally. """ def __init__(self, test_case, api_key=None): admin = getattr(test_case, "require_admin_user", False) test_user = TEST_USER if not admin else ADMIN_TEST_USER super().__init__(test_case, test_user=test_user, api_key=api_key) # This variant the lower level get and post methods are meant to be used # directly to test API - instead of relying on higher-level constructs for # specific pieces of the API (the way it is done with the variant for tool) # testing. def get(self, *args, **kwds): return self._get(*args, **kwds) def post(self, *args, **kwds): return self._post(*args, **kwds) def delete(self, *args, **kwds): return self._delete(*args, **kwds) def patch(self, *args, **kwds): return self._patch(*args, **kwds) def put(self, *args, **kwds): return self._put(*args, **kwds)
[]
[]
[ "GALAXY_TEST_EXTERNAL" ]
[]
["GALAXY_TEST_EXTERNAL"]
python
1
0
python/pyarrow/hdfs.py
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import os import posixpath import sys from pyarrow.util import implements from pyarrow.filesystem import FileSystem import pyarrow.lib as lib class HadoopFileSystem(lib.HadoopFileSystem, FileSystem): """ FileSystem interface for HDFS cluster. See pyarrow.hdfs.connect for full connection details """ def __init__(self, host="default", port=0, user=None, kerb_ticket=None, driver='libhdfs', extra_conf=None): if driver == 'libhdfs': _maybe_set_hadoop_classpath() self._connect(host, port, user, kerb_ticket, extra_conf) def __reduce__(self): return (HadoopFileSystem, (self.host, self.port, self.user, self.kerb_ticket, self.extra_conf)) def _isfilestore(self): """ Returns True if this FileSystem is a unix-style file store with directories. """ return True @implements(FileSystem.isdir) def isdir(self, path): return super().isdir(path) @implements(FileSystem.isfile) def isfile(self, path): return super().isfile(path) @implements(FileSystem.delete) def delete(self, path, recursive=False): return super().delete(path, recursive) def mkdir(self, path, **kwargs): """ Create directory in HDFS Parameters ---------- path : string Directory path to create, including any parent directories Notes ----- libhdfs does not support create_parents=False, so we ignore this here """ return super().mkdir(path) @implements(FileSystem.rename) def rename(self, path, new_path): return super().rename(path, new_path) @implements(FileSystem.exists) def exists(self, path): return super().exists(path) def ls(self, path, detail=False): """ Retrieve directory contents and metadata, if requested. Parameters ---------- path : HDFS path detail : boolean, default False If False, only return list of paths Returns ------- result : list of dicts (detail=True) or strings (detail=False) """ return super().ls(path, detail) def walk(self, top_path): """ Directory tree generator for HDFS, like os.walk Parameters ---------- top_path : string Root directory for tree traversal Returns ------- Generator yielding 3-tuple (dirpath, dirnames, filename) """ contents = self.ls(top_path, detail=True) directories, files = _libhdfs_walk_files_dirs(top_path, contents) yield top_path, directories, files for dirname in directories: yield from self.walk(self._path_join(top_path, dirname)) def _maybe_set_hadoop_classpath(): import re if re.search(r'hadoop-common[^/]+.jar', os.environ.get('CLASSPATH', '')): return if 'HADOOP_HOME' in os.environ: if sys.platform != 'win32': classpath = _derive_hadoop_classpath() else: hadoop_bin = '{}/bin/hadoop'.format(os.environ['HADOOP_HOME']) classpath = _hadoop_classpath_glob(hadoop_bin) else: classpath = _hadoop_classpath_glob('hadoop') os.environ['CLASSPATH'] = classpath.decode('utf-8') def _derive_hadoop_classpath(): import subprocess find_args = ('find', '-L', os.environ['HADOOP_HOME'], '-name', '*.jar') find = subprocess.Popen(find_args, stdout=subprocess.PIPE) xargs_echo = subprocess.Popen(('xargs', 'echo'), stdin=find.stdout, stdout=subprocess.PIPE) jars = subprocess.check_output(('tr', "' '", "':'"), stdin=xargs_echo.stdout) hadoop_conf = os.environ["HADOOP_CONF_DIR"] \ if "HADOOP_CONF_DIR" in os.environ \ else os.environ["HADOOP_HOME"] + "/etc/hadoop" return (hadoop_conf + ":").encode("utf-8") + jars def _hadoop_classpath_glob(hadoop_bin): import subprocess hadoop_classpath_args = (hadoop_bin, 'classpath', '--glob') return subprocess.check_output(hadoop_classpath_args) def _libhdfs_walk_files_dirs(top_path, contents): files = [] directories = [] for c in contents: scrubbed_name = posixpath.split(c['name'])[1] if c['kind'] == 'file': files.append(scrubbed_name) else: directories.append(scrubbed_name) return directories, files def connect(host="default", port=0, user=None, kerb_ticket=None, extra_conf=None): """ Connect to an HDFS cluster. All parameters are optional and should only be set if the defaults need to be overridden. Authentication should be automatic if the HDFS cluster uses Kerberos. However, if a username is specified, then the ticket cache will likely be required. Parameters ---------- host : NameNode. Set to "default" for fs.defaultFS from core-site.xml. port : NameNode's port. Set to 0 for default or logical (HA) nodes. user : Username when connecting to HDFS; None implies login user. kerb_ticket : Path to Kerberos ticket cache. extra_conf : dict, default None extra Key/Value pairs for config; Will override any hdfs-site.xml properties Notes ----- The first time you call this method, it will take longer than usual due to JNI spin-up time. Returns ------- filesystem : HadoopFileSystem """ fs = HadoopFileSystem(host=host, port=port, user=user, kerb_ticket=kerb_ticket, extra_conf=extra_conf) return fs
[]
[]
[ "HADOOP_HOME", "HADOOP_CONF_DIR", "CLASSPATH" ]
[]
["HADOOP_HOME", "HADOOP_CONF_DIR", "CLASSPATH"]
python
3
0
plugins/meta/portmap/portmap_integ_test.go
// Copyright 2017 CNI authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "fmt" "net" "os" "path/filepath" "time" "github.com/containernetworking/cni/libcni" "github.com/containernetworking/cni/pkg/types/current" "github.com/containernetworking/plugins/pkg/ns" "github.com/coreos/go-iptables/iptables" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" "github.com/vishvananda/netlink" ) var _ = Describe("portmap integration tests", func() { var configList *libcni.NetworkConfigList var cniConf *libcni.CNIConfig var targetNS ns.NetNS var containerPort int var closeChan chan interface{} BeforeEach(func() { var err error rawConfig := `{ "cniVersion": "0.3.0", "name": "cni-portmap-unit-test", "plugins": [ { "type": "ptp", "ipMasq": true, "ipam": { "type": "host-local", "subnet": "172.16.31.0/24" } }, { "type": "portmap", "capabilities": { "portMappings": true } } ] }` configList, err = libcni.ConfListFromBytes([]byte(rawConfig)) Expect(err).NotTo(HaveOccurred()) // turn PATH in to CNI_PATH dirs := filepath.SplitList(os.Getenv("PATH")) cniConf = &libcni.CNIConfig{Path: dirs} targetNS, err = ns.NewNS() Expect(err).NotTo(HaveOccurred()) fmt.Fprintln(GinkgoWriter, "namespace:", targetNS.Path()) // Start an echo server and get the port containerPort, closeChan, err = RunEchoServerInNS(targetNS) Expect(err).NotTo(HaveOccurred()) }) AfterEach(func() { if targetNS != nil { targetNS.Close() } }) // This needs to be done using Ginkgo's asynchronous testing mode. It("forwards a TCP port on ipv4", func(done Done) { var err error hostPort := 9999 runtimeConfig := libcni.RuntimeConf{ ContainerID: "unit-test", NetNS: targetNS.Path(), IfName: "eth0", CapabilityArgs: map[string]interface{}{ "portMappings": []map[string]interface{}{ { "hostPort": hostPort, "containerPort": containerPort, "protocol": "tcp", }, }, }, } // Make delete idempotent, so we can clean up on failure netDeleted := false deleteNetwork := func() error { if netDeleted { return nil } netDeleted = true return cniConf.DelNetworkList(configList, &runtimeConfig) } // we'll also manually check the iptables chains ipt, err := iptables.NewWithProtocol(iptables.ProtocolIPv4) Expect(err).NotTo(HaveOccurred()) dnatChainName := genDnatChain("cni-portmap-unit-test", "unit-test", nil).name // Create the network resI, err := cniConf.AddNetworkList(configList, &runtimeConfig) Expect(err).NotTo(HaveOccurred()) defer deleteNetwork() // Check the chain exists _, err = ipt.List("nat", dnatChainName) Expect(err).NotTo(HaveOccurred()) result, err := current.GetResult(resI) Expect(err).NotTo(HaveOccurred()) var contIP net.IP for _, ip := range result.IPs { if result.Interfaces[ip.Interface].Sandbox == "" { continue } contIP = ip.Address.IP } if contIP == nil { Fail("could not determine container IP") } // Sanity check: verify that the container is reachable directly contOK := testEchoServer(fmt.Sprintf("%s:%d", contIP.String(), containerPort)) // Verify that a connection to the forwarded port works hostIP := getLocalIP() dnatOK := testEchoServer(fmt.Sprintf("%s:%d", hostIP, hostPort)) // Verify that a connection to localhost works snatOK := testEchoServer(fmt.Sprintf("%s:%d", "127.0.0.1", hostPort)) // Cleanup close(closeChan) err = deleteNetwork() Expect(err).NotTo(HaveOccurred()) // Verify iptables rules are gone _, err = ipt.List("nat", dnatChainName) Expect(err).To(MatchError(ContainSubstring("iptables: No chain/target/match by that name."))) // Check that everything succeeded *after* we clean up the network if !contOK { Fail("connection direct to " + contIP.String() + " failed") } if !dnatOK { Fail("Connection to " + hostIP + " was not forwarded") } if !snatOK { Fail("connection to 127.0.0.1 was not forwarded") } close(done) }, 5) }) // testEchoServer returns true if we found an echo server on the port func testEchoServer(address string) bool { fmt.Fprintln(GinkgoWriter, "dialing", address) conn, err := net.Dial("tcp", address) if err != nil { fmt.Fprintln(GinkgoWriter, "connection to", address, "failed:", err) return false } defer conn.Close() conn.SetDeadline(time.Now().Add(20 * time.Second)) fmt.Fprintln(GinkgoWriter, "connected to", address) message := "Aliquid melius quam pessimum optimum non est." _, err = fmt.Fprint(conn, message) if err != nil { fmt.Fprintln(GinkgoWriter, "sending message to", address, " failed:", err) return false } conn.SetDeadline(time.Now().Add(20 * time.Second)) fmt.Fprintln(GinkgoWriter, "reading...") response := make([]byte, len(message)) _, err = conn.Read(response) if err != nil { fmt.Fprintln(GinkgoWriter, "receiving message from", address, " failed:", err) return false } fmt.Fprintln(GinkgoWriter, "read...") if string(response) == message { return true } fmt.Fprintln(GinkgoWriter, "returned message didn't match?") return false } func getLocalIP() string { addrs, err := netlink.AddrList(nil, netlink.FAMILY_V4) Expect(err).NotTo(HaveOccurred()) for _, addr := range addrs { return addr.IP.String() } Fail("no live addresses") return "" }
[ "\"PATH\"" ]
[]
[ "PATH" ]
[]
["PATH"]
go
1
0
main.py
import argparse import logging import multiprocessing as mp import os import time from pathlib import Path import matplotlib.pyplot as plt import numpy as np from tqdm import tqdm from environments import SimulatedSpe_edEnv, WebsocketEnv from environments.logging import CloudUploader, Spe_edLogger from environments.spe_ed import SavedGame from heuristics import PathLengthHeuristic from policies import HeuristicPolicy, load_named_policy from tournament.tournament import run_tournament # Set up logging logging.basicConfig( level=logging.INFO, format='%(asctime)s %(levelname)s: %(message)s', datefmt='%Y-%m-%d %H:%M:%S', ) default_window_size = (720, 720) def play(env, pol, show=False, render_file=None, fps=10, logger=None, silent=True, window_size=default_window_size): obs = env.reset() if show and not env.render(screen_width=window_size[0], screen_height=window_size[1]): return if render_file is not None: # Initialize video writer from imageio_ffmpeg import write_frames writer = write_frames(render_file, window_size, fps=fps, codec="libx264", quality=8) writer.send(None) # seed the generator writer.send( env.render(mode="rgb_array", screen_width=window_size[0], screen_height=window_size[1]).copy(order='C') ) if logger is not None: # Log initial state states = [env.game_state()] time_limits = [] done = False with tqdm(disable=silent) as pbar: while not done: action = pol.act(*obs) obs, reward, done, _ = env.step(action) if show and not env.render(screen_width=window_size[0], screen_height=window_size[1]): return if render_file is not None: writer.send( env.render(mode="rgb_array", screen_width=window_size[0], screen_height=window_size[1]).copy(order='C') ) if logger is not None: states.append(env.game_state()) if isinstance(env, WebsocketEnv): time_limits.append(env.time_limit) pbar.update() if logger is not None: logger.log(states, time_limits) if render_file is not None: writer.close() if show: # Show final state while True: if not env.render(screen_width=window_size[0], screen_height=window_size[1]): return plt.pause(0.01) # Sleep def show_logfile(log_file, window_size=default_window_size): """Render logfile to mp4""" from matplotlib.widgets import Slider from visualization import Spe_edAx def format_state(t): s = "Players:\n" s += "\n".join(str(p) for p in game.player_states[t]) + "\n" s += "\nActions:\n" if t + 1 < len(game.data): s += "\n".join(str(a) for a in game.infer_actions(t)) + "\n" else: s += "\n".join("win" if p.active else "inactive" for p in game.player_states[t]) + "\n" return s game = SavedGame.load(log_file) if game.you is not None: game.move_controlled_player_to_front() fig = plt.figure(figsize=(window_size[0] / 100, window_size[1] / 100), dpi=100) ax1 = plt.subplot(1, 1, 1) viewer = Spe_edAx(fig, ax1, game.cell_states[0], game.player_states[0]) plt.tight_layout() plt.subplots_adjust(bottom=0.1, right=0.6) slider = Slider(plt.axes([0.1, 0.025, 0.8, 0.03]), 't', 0, len(game.data) - 1, valinit=0, valstep=1, valfmt="%d") text_box = fig.text(0.61, 0.975, format_state(0), ha='left', va='top') def change_t(val): t = int(slider.val) viewer.update(game.cell_states[t], game.player_states[t]) text_box.set_text(format_state(t)) slider.on_changed(change_t) plt.show() def render_logfile(log_file, fps=10, silent=False, window_size=default_window_size): """Render logfile to mp4. Resulting .mp4 is placed alongside the .json file. Args: log_file: Log file to render. fps: FPS of generated video. silent: Show no progress bar. """ import subprocess import tempfile from imageio_ffmpeg import get_ffmpeg_exe from visualization import Spe_edAx, render_video def temp_file_name(suffix): """Create the name of a temp file with given suffix without opening it.""" return Path(tempfile.gettempdir()) / (next(tempfile._get_candidate_names()) + suffix) game = SavedGame.load(log_file) if game.you: game.move_controlled_player_to_front() fig = plt.figure( figsize=(window_size[0] / 100, window_size[1] / 100), dpi=100, tight_layout=True, ) ax = plt.subplot(1, 1, 1) viewer = Spe_edAx(fig, ax, game.cell_states[0], game.player_states[0]) def frames(): """Draw all game states""" for i in tqdm(range(len(game.cell_states)), desc=f"Rendering {log_file.name}", disable=silent): viewer.update(game.cell_states[i], game.player_states[i]) fig.canvas.draw() frame = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8).reshape(window_size[0], window_size[1], 3) yield frame # Render video to temp file tmp_video = temp_file_name(".mp4") width, height = fig.canvas.get_width_height() render_video(tmp_video, frames(), width, height, fps=fps) # Create thumbnail in temp file tmp_thumbnail = temp_file_name(".jpg") plt.savefig(tmp_thumbnail) # Join both in log dir subprocess.run( [ get_ffmpeg_exe(), "-i", str(tmp_video), "-i", str(tmp_thumbnail), "-y", "-map", "0", "-map", "1", "-c", "copy", "-disposition:v:1", "attached_pic", "-v", "warning", str(log_file.parent / (log_file.name[:-5] + ".mp4")) ] ) # Cleanup plt.close(fig) tmp_video.unlink() tmp_thumbnail.unlink() if __name__ == "__main__": parser = argparse.ArgumentParser(description='spe_ed') parser.add_argument( 'mode', nargs='?', choices=['play', 'replay', 'render_logdir', 'plot', 'tournament', 'tournament-plot'], default="play" ) parser.add_argument('--show', action='store_true', help='Display games using an updating matplotlib plot.') parser.add_argument('--render-file', type=str, default=None, help='File to render to. Should end with .mp4') parser.add_argument( '--sim', action='store_true', help='The simulator environment runs a local simulation of Spe_ed instead of using the webserver.' ) parser.add_argument('--log-file', type=str, default=None, help='Path to a log file, used to load and replay games.') parser.add_argument('--log-dir', type=str, default=None, help='Directory for storing or retrieving logs.') parser.add_argument( '--t-config', type=str, default='./tournament/tournament_config.py', help='Path of the tournament config file containing which settings to run.' ) parser.add_argument('--upload', action='store_true', help='Upload generated log to cloud server.') parser.add_argument('--fps', type=int, default=10, help='FPS for rendering.') parser.add_argument( '--cores', type=int, default=None, help='Number of cores for multiprocessing, default uses all.' ) parser.add_argument('--repeat', type=bool, default=False, help='Play endlessly.') args = parser.parse_args() if args.mode == 'render_logdir': log_dir = Path(args.log_dir) if not log_dir.is_dir(): logging.error(f"{log_dir} is not a directory") quit(1) log_files = [] for log_file in log_dir.iterdir(): if not log_file.name.endswith(".json"): continue if (log_dir / (log_file.name[:-5] + ".mp4")).exists(): continue log_files.append(log_file) with mp.Pool(args.cores) as pool, tqdm(desc="Rendering games", total=len(log_files)) as pbar: for log_file in log_files: pool.apply_async(render_logfile, (log_file, args.fps, True), callback=lambda _: pbar.update()) pool.close() pool.join() elif args.mode == 'replay': show_logfile(args.log_file) elif args.mode == 'tournament': from statistics import create_tournament_plots log_dir = Path(args.log_dir) run_tournament(args.show, log_dir, args.t_config, args.cores) create_tournament_plots(log_dir, log_dir.parent) elif args.mode == 'tournament-plot': from statistics import create_tournament_plots log_dir = Path(args.log_dir) if not log_dir.is_dir(): logging.error(f"{log_dir} is not a directory") quit(1) create_tournament_plots(log_dir, log_dir.parent) elif args.mode == 'plot': from statistics import create_plots log_dir = Path(args.log_dir) if not log_dir.is_dir(): logging.error(f"{log_dir} is not a directory") quit(1) create_plots(log_dir, log_dir.parent / "statistics.csv") else: # Create logger if args.log_dir is not None: logger_callbacks = [] if args.upload: logger_callbacks.append( CloudUploader( os.environ["CLOUD_URL"], os.environ["CLOUD_USER"], os.environ["CLOUD_PASSWORD"], remote_dir="logs/" ).upload ) logger = Spe_edLogger(args.log_dir, logger_callbacks) else: logger = None # Create environment if args.sim: env = SimulatedSpe_edEnv(40, 40, [HeuristicPolicy(PathLengthHeuristic(10)) for _ in range(5)]) else: env = WebsocketEnv(os.environ["URL"], os.environ["KEY"], os.environ["TIME_URL"]) # Create policy pol = load_named_policy("GarrukV3") while True: try: play( env, pol, show=args.show, render_file=args.render_file, fps=args.fps, logger=logger, silent=args.repeat ) except Exception: logging.exception("Exception during play") time.sleep(60) # Sleep for a bit and try again if not args.repeat: break
[]
[]
[ "CLOUD_PASSWORD", "CLOUD_URL", "URL", "KEY", "CLOUD_USER", "TIME_URL" ]
[]
["CLOUD_PASSWORD", "CLOUD_URL", "URL", "KEY", "CLOUD_USER", "TIME_URL"]
python
6
0
docs/conf.py
# -*- coding: utf-8 -*- import os import sys sys.path.insert(0, os.path.abspath('..')) ## noqa import securedrop_api on_rtd = os.environ.get('READTHEDOCS', None) == 'True' project = 'securedrop_api' copyright = '2018, heartsucker' author = 'heartsucker' version = securedrop_api.__version__ release = version extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.todo', 'sphinx.ext.viewcode', ] source_suffix = '.rst' master_doc = 'index' language = None todo_include_todos = True exclude_patterns = [u'_build', 'Thumbs.db', '.DS_Store'] pygments_style = 'sphinx' if on_rtd: html_theme = 'default' else: try: import sphinx_rtd_theme html_theme = 'sphinx_rtd_theme' html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] except ImportError: html_theme = 'alabaster' htmlhelp_basename = 'securedrop_apidoc' epub_title = project epub_author = author epub_publisher = author epub_copyright = copyright epub_exclude_files = ['search.html']
[]
[]
[ "READTHEDOCS" ]
[]
["READTHEDOCS"]
python
1
0
modules/api/test/api_test.go
// Copyright 2017 Xiaomi, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package test import ( "errors" "fmt" "os" "strings" "testing" log "github.com/Sirupsen/logrus" "github.com/masato25/resty" . "github.com/smartystreets/goconvey/convey" "github.com/spf13/viper" cutils "github.com/open-falcon/falcon-plus/common/utils" "github.com/open-falcon/falcon-plus/modules/api/app/model/uic" "github.com/open-falcon/falcon-plus/modules/api/app/utils" cfg "github.com/open-falcon/falcon-plus/modules/api/config" ) var ( api_v1 = "" test_user_name = "apitest-user1" test_user_password = "password" test_team_name = "apitest-team1" root_user_name = "root" root_user_password = "rootpass" ) func init() { cfg_file := os.Getenv("API_TEST_CFG") if cfg_file == "" { cfg_file = "./cfg_example" } viper.SetConfigName(cfg_file) viper.AddConfigPath(".") viper.AddConfigPath("../") err := viper.ReadInConfig() if err != nil { log.Fatal(err) } err = cfg.InitLog(viper.GetString("log_level")) if err != nil { log.Fatal(err) } db_user := os.Getenv("DB_USER") if db_user == "" { db_user = "root" } db_passwd := os.Getenv("DB_PASSWORD") db_host := os.Getenv("DB_HOST") if db_host == "" { db_host = "127.0.0.1" } db_port := os.Getenv("DB_PORT") if db_port == "" { db_port = "3306" } db_names := []string{"falcon_portal", "graph", "uic", "dashboard", "alarms"} for _, dbn := range db_names { viper.Set(fmt.Sprintf("db.%s", dbn), fmt.Sprintf("%s:%s@tcp(%s:%s)/%s?charset=utf8&parseTime=True&loc=Local", db_user, db_passwd, db_host, db_port, dbn)) } err = cfg.InitDB(viper.GetBool("db.db_bug"), viper.GetViper()) if err != nil { log.Fatalf("db conn failed with error %s", err.Error()) } api_port := os.Getenv("API_PORT") if api_port == "" { api_port = strings.TrimLeft(viper.GetString("web_port"), ":") } api_host := os.Getenv("API_HOST") if api_host == "" { api_host = "127.0.0.1" } api_v1 = fmt.Sprintf("http://%s:%s/api/v1", api_host, api_port) init_testing_data() } func init_testing_data() { password := utils.HashIt(test_user_password) user := uic.User{ Name: test_user_name, Passwd: password, Cnname: test_user_name, Email: test_user_name + "@test.com", Phone: "1234567890", IM: "hellotest", QQ: "3800000", } db := cfg.Con() if db.Uic.Table("user").Where("name = ?", test_user_name).First(&uic.User{}).RecordNotFound() { if err := db.Uic.Table("user").Create(&user).Error; err != nil { log.Fatal(err) } log.Info("create_user:", test_user_name) } db.Uic.Table("user").Where("name = ?", "root").Delete(&uic.User{}) db.Uic.Table("team").Where("name = ?", test_team_name).Delete(&uic.Team{}) } func get_session_token() (string, error) { rr := map[string]interface{}{} resp, _ := resty.R(). SetQueryParam("name", root_user_name). SetQueryParam("password", root_user_password). SetResult(&rr). Post(fmt.Sprintf("%s/user/login", api_v1)) if resp.StatusCode() != 200 { return "", errors.New(resp.String()) } api_token := fmt.Sprintf(`{"name": "%v", "sig": "%v"}`, rr["name"], rr["sig"]) return api_token, nil } func TestUser(t *testing.T) { var rr *map[string]interface{} = &map[string]interface{}{} var api_token string = "" Convey("Create root user: POST /user/create", t, func() { resp, _ := resty.R(). SetHeader("Content-Type", "application/json"). SetBody(map[string]string{ "name": root_user_name, "password": root_user_password, "email": "[email protected]", "cnname": "cnroot", }). SetResult(rr). Post(fmt.Sprintf("%s/user/create", api_v1)) So(resp.StatusCode(), ShouldEqual, 200) So(*rr, ShouldNotBeEmpty) So((*rr)["sig"], ShouldNotBeBlank) api_token = resp.String() }) Convey("Get user info by name: GET /user/name/:user", t, func() { *rr = map[string]interface{}{} resp, _ := resty.R(). SetHeader("Apitoken", api_token). SetResult(rr). Get(fmt.Sprintf("%s/user/name/%s", api_v1, root_user_name)) So(resp.StatusCode(), ShouldEqual, 200) So(*rr, ShouldNotBeEmpty) So((*rr)["role"], ShouldEqual, 2) So((*rr)["id"], ShouldBeGreaterThanOrEqualTo, 0) }) root_user_id := (*rr)["id"] Convey("Get user info by id: GET /user/u/:uid", t, func() { *rr = map[string]interface{}{} resp, _ := resty.R(). SetHeader("Apitoken", api_token). SetResult(rr). Get(fmt.Sprintf("%s/user/u/%v", api_v1, root_user_id)) So(resp.StatusCode(), ShouldEqual, 200) So(*rr, ShouldNotBeEmpty) So((*rr)["name"], ShouldEqual, root_user_name) }) Convey("Update current user: PUT /user/update", t, func() { *rr = map[string]interface{}{} resp, _ := resty.R(). SetHeader("Content-Type", "application/json"). SetHeader("Apitoken", api_token). SetBody(map[string]string{ "cnname": "cnroot2", "email": "[email protected]", "phone": "18000000000", }). SetResult(rr). Put(fmt.Sprintf("%s/user/update", api_v1)) So(resp.StatusCode(), ShouldEqual, 200) So(*rr, ShouldNotBeEmpty) So((*rr)["message"], ShouldContainSubstring, "updated") Convey("Get user info by name: GET /user/name/:user", func() { *rr = map[string]interface{}{} resp, _ := resty.R(). SetHeader("Apitoken", api_token). SetResult(rr). Get(fmt.Sprintf("%s/user/name/%s", api_v1, root_user_name)) So(resp.StatusCode(), ShouldEqual, 200) So(*rr, ShouldNotBeEmpty) So((*rr)["cnname"], ShouldEqual, "cnroot2") }) }) Convey("Change password: PUT /user/cgpasswd", t, func() { *rr = map[string]interface{}{} resp, _ := resty.R(). SetHeader("Content-Type", "application/json"). SetHeader("Apitoken", api_token). SetBody(map[string]string{ "old_password": root_user_password, "new_password": root_user_password, }). SetResult(rr). Put(fmt.Sprintf("%s/user/cgpasswd", api_v1)) So(resp.StatusCode(), ShouldEqual, 200) So(*rr, ShouldNotBeEmpty) So((*rr)["message"], ShouldContainSubstring, "updated") }) Convey("Get user list: GET /user/users", t, func() { r := []map[string]interface{}{} resp, _ := resty.R(). SetHeader("Apitoken", api_token). SetResult(&r). Get(fmt.Sprintf("%s/user/users", api_v1)) So(resp.StatusCode(), ShouldEqual, 200) So(r, ShouldNotBeEmpty) So(r[0]["name"], ShouldNotBeBlank) }) Convey("Get current user: POST /user/current", t, func() { *rr = map[string]interface{}{} resp, _ := resty.R(). SetHeader("Apitoken", api_token). SetResult(rr). Get(fmt.Sprintf("%s/user/current", api_v1)) So(resp.StatusCode(), ShouldEqual, 200) So(*rr, ShouldNotBeEmpty) So((*rr)["name"], ShouldEqual, root_user_name) }) Convey("Login user: POST /user/login", t, func() { *rr = map[string]interface{}{} resp, _ := resty.R(). SetQueryParam("name", root_user_name). SetQueryParam("password", root_user_password). SetResult(rr). Post(fmt.Sprintf("%s/user/login", api_v1)) So(resp.StatusCode(), ShouldEqual, 200) So(*rr, ShouldNotBeEmpty) So((*rr)["name"], ShouldEqual, root_user_name) So((*rr)["sig"], ShouldNotBeBlank) So((*rr)["admin"], ShouldBeTrue) api_token = fmt.Sprintf(`{"name": "%v", "sig": "%v"}`, (*rr)["name"], (*rr)["sig"]) }) Convey("Auth user by session: GET /user/auth_session", t, func() { *rr = map[string]interface{}{} resp, _ := resty.R(). SetHeader("Apitoken", api_token). SetResult(rr). Get(fmt.Sprintf("%s/user/auth_session", api_v1)) So(resp.StatusCode(), ShouldEqual, 200) So(*rr, ShouldNotBeEmpty) So((*rr)["message"], ShouldContainSubstring, "valid") }) Convey("Logout user: GET /user/logout", t, func() { *rr = map[string]interface{}{} resp, _ := resty.R(). SetHeader("Apitoken", api_token). SetResult(rr). Get(fmt.Sprintf("%s/user/logout", api_v1)) So(resp.StatusCode(), ShouldEqual, 200) So(*rr, ShouldNotBeEmpty) So((*rr)["message"], ShouldContainSubstring, "successful") }) } func TestAdmin(t *testing.T) { var rr *map[string]interface{} = &map[string]interface{}{} var api_token string = "" Convey("Login as root", t, func() { resp, _ := resty.R(). SetQueryParam("name", root_user_name).SetQueryParam("password", root_user_password).SetResult(rr). Post(fmt.Sprintf("%s/user/login", api_v1)) So(resp.StatusCode(), ShouldEqual, 200) So(*rr, ShouldNotBeEmpty) So((*rr)["name"], ShouldEqual, root_user_name) So((*rr)["sig"], ShouldNotBeBlank) So((*rr)["admin"], ShouldBeTrue) }) api_token = fmt.Sprintf(`{"name": "%v", "sig": "%v"}`, (*rr)["name"], (*rr)["sig"]) Convey("Get user info by name: GET /user/name/:user", t, func() { *rr = map[string]interface{}{} resp, _ := resty.R(). SetHeader("Apitoken", api_token). SetResult(rr). Get(fmt.Sprintf("%s/user/name/%s", api_v1, test_user_name)) So(resp.StatusCode(), ShouldEqual, 200) So(*rr, ShouldNotBeEmpty) So((*rr)["id"], ShouldBeGreaterThanOrEqualTo, 0) }) test_user_id := (*rr)["id"] Convey("Change user role: PUT /admin/change_user_role", t, func() { *rr = map[string]interface{}{} resp, _ := resty.R(). SetHeader("Apitoken", api_token). SetHeader("Content-Type", "application/json"). SetBody(fmt.Sprintf(`{"user_id": %v,"admin": "yes"}`, test_user_id)). SetResult(rr). Put(fmt.Sprintf("%s/admin/change_user_role", api_v1)) So(resp.StatusCode(), ShouldEqual, 200) So(*rr, ShouldNotBeEmpty) So((*rr)["message"], ShouldContainSubstring, "sccuessful") Convey("Get user info by name: GET /user/name/:user", func() { *rr = map[string]interface{}{} resp, _ := resty.R(). SetHeader("Apitoken", api_token). SetResult(rr). Get(fmt.Sprintf("%s/user/name/%s", api_v1, test_user_name)) So(resp.StatusCode(), ShouldEqual, 200) So(*rr, ShouldNotBeEmpty) So((*rr)["role"], ShouldEqual, 1) }) }) Convey("Change user passwd: PUT /admin/change_user_passwd", t, func() { *rr = map[string]interface{}{} resp, _ := resty.R(). SetHeader("Apitoken", api_token). SetHeader("Content-Type", "application/json"). SetBody(fmt.Sprintf(`{"user_id": %v,"password": "%s"}`, test_user_id, test_user_password)). SetResult(rr). Put(fmt.Sprintf("%s/admin/change_user_passwd", api_v1)) So(resp.StatusCode(), ShouldEqual, 200) So(*rr, ShouldNotBeEmpty) So((*rr)["message"], ShouldContainSubstring, "updated") }) Convey("Change user profile: PUT /admin/change_user_profile", t, func() { *rr = map[string]interface{}{} resp, _ := resty.R(). SetHeader("Apitoken", api_token). SetHeader("Content-Type", "application/json"). SetBody(fmt.Sprintf(`{"user_id": %v,"cnname": "%s", "email": "%s"}`, test_user_id, test_user_name, "[email protected]")). SetResult(rr). Put(fmt.Sprintf("%s/admin/change_user_profile", api_v1)) So(resp.StatusCode(), ShouldEqual, 200) So(*rr, ShouldNotBeEmpty) So((*rr)["message"], ShouldContainSubstring, "updated") Convey("Get user info by name: GET /user/name/:user", func() { *rr = map[string]interface{}{} resp, _ := resty.R(). SetHeader("Apitoken", api_token). SetResult(rr). Get(fmt.Sprintf("%s/user/name/%s", api_v1, test_user_name)) So(resp.StatusCode(), ShouldEqual, 200) So(*rr, ShouldNotBeEmpty) So((*rr)["email"], ShouldEqual, "[email protected]") }) }) Convey("Admin login user: POST /admin/login", t, func() { *rr = map[string]interface{}{} resp, _ := resty.R(). SetHeader("Content-Type", "application/json"). SetHeader("Apitoken", api_token). SetBody(map[string]string{ "name": test_user_name, }). SetResult(rr). Post(fmt.Sprintf("%s/admin/login", api_v1)) So(resp.StatusCode(), ShouldEqual, 200) So(*rr, ShouldNotBeEmpty) So((*rr)["name"], ShouldEqual, test_user_name) }) Convey("Delete user: DELETE /admin/delete_user", t, func() { }) } func TestTeam(t *testing.T) { var rr *map[string]interface{} = &map[string]interface{}{} Convey("Login as root", t, func() { resp, _ := resty.R(). SetQueryParam("name", root_user_name).SetQueryParam("password", root_user_password).SetResult(rr). Post(fmt.Sprintf("%s/user/login", api_v1)) So(resp.StatusCode(), ShouldEqual, 200) So(*rr, ShouldNotBeEmpty) So((*rr)["name"], ShouldEqual, root_user_name) So((*rr)["sig"], ShouldNotBeBlank) So((*rr)["admin"], ShouldBeTrue) }) api_token := fmt.Sprintf(`{"name": "%v", "sig": "%v"}`, (*rr)["name"], (*rr)["sig"]) Convey("Get user info by name: GET /user/name/:user", t, func() { *rr = map[string]interface{}{} resp, _ := resty.R(). SetHeader("Apitoken", api_token). SetResult(rr). Get(fmt.Sprintf("%s/user/name/%s", api_v1, root_user_name)) So(resp.StatusCode(), ShouldEqual, 200) So(*rr, ShouldNotBeEmpty) So((*rr)["role"], ShouldEqual, 2) So((*rr)["id"], ShouldBeGreaterThanOrEqualTo, 0) }) root_user_id := (*rr)["id"] Convey("Create team: POST /team", t, func() { *rr = map[string]interface{}{} resp, _ := resty.R(). SetHeader("Content-Type", "application/json"). SetHeader("Apitoken", api_token). SetBody(fmt.Sprintf(`{"team_name": "%s","resume": "i'm descript", "users": [1]}`, test_team_name)). SetResult(rr). Post(fmt.Sprintf("%s/team", api_v1)) So(resp.StatusCode(), ShouldEqual, 200) So(*rr, ShouldNotBeEmpty) So((*rr)["message"], ShouldContainSubstring, "created") }) Convey("Get team by name: GET /team/name/:name", t, func() { *rr = map[string]interface{}{} resp, _ := resty.R().SetHeader("Apitoken", api_token).SetResult(rr). Get(fmt.Sprintf("%s/team/name/%s", api_v1, test_team_name)) So(resp.StatusCode(), ShouldEqual, 200) So(*rr, ShouldNotBeEmpty) So((*rr)["name"], ShouldEqual, test_team_name) So((*rr)["users"], ShouldNotBeEmpty) So((*rr)["id"], ShouldBeGreaterThan, 0) }) test_team_id := (*rr)["id"] Convey("Get team by id: GET /team/t/:tid", t, func() { *rr = map[string]interface{}{} resp, _ := resty.R().SetHeader("Apitoken", api_token).SetResult(rr). Get(fmt.Sprintf("%s/team/t/%v", api_v1, test_team_id)) So(resp.StatusCode(), ShouldEqual, 200) So(*rr, ShouldNotBeEmpty) So((*rr)["name"], ShouldEqual, test_team_name) So((*rr)["users"], ShouldNotBeEmpty) So((*rr)["id"], ShouldEqual, test_team_id) }) Convey("Update team by id: PUT /team", t, func() { *rr = map[string]interface{}{} resp, _ := resty.R(). SetHeader("Content-Type", "application/json"). SetHeader("Apitoken", api_token). SetBody(fmt.Sprintf(`{"team_id": %v,"resume": "descript2", "name":"%v", "users": [1]}`, test_team_id, test_team_name)). SetResult(rr). Put(fmt.Sprintf("%s/team", api_v1)) So(resp.StatusCode(), ShouldEqual, 200) So(*rr, ShouldNotBeEmpty) So((*rr)["message"], ShouldContainSubstring, "updated") Convey("Get team by name: GET /team/name/:name", func() { *rr = map[string]interface{}{} resp, _ := resty.R().SetHeader("Apitoken", api_token).SetResult(rr). Get(fmt.Sprintf("%s/team/name/%s", api_v1, test_team_name)) So(resp.StatusCode(), ShouldEqual, 200) So(*rr, ShouldNotBeEmpty) So((*rr)["resume"], ShouldEqual, "descript2") }) }) Convey("Add users to team: POST /team/user", t, func() { *rr = map[string]interface{}{} resp, _ := resty.R(). SetHeader("Content-Type", "application/json"). SetHeader("Apitoken", api_token). SetBody(map[string]interface{}{ "team_id": test_team_id, "users": []string{root_user_name}, }). SetResult(rr). Post(fmt.Sprintf("%s/team/user", api_v1)) So(resp.StatusCode(), ShouldEqual, 200) So(*rr, ShouldNotBeEmpty) So((*rr)["message"], ShouldContainSubstring, "successful") }) Convey("Get teams which user belong to: GET /user/u/:uid/teams", t, func() { *rr = map[string]interface{}{} resp, _ := resty.R().SetHeader("Apitoken", api_token).SetResult(rr). Get(fmt.Sprintf("%s/user/u/%v/teams", api_v1, root_user_id)) So(resp.StatusCode(), ShouldEqual, 200) So(*rr, ShouldNotBeEmpty) So((*rr)["teams"], ShouldNotBeEmpty) }) Convey("Check user in teams or not: GET /user/u/:uid/in_teams", t, func() { *rr = map[string]interface{}{} resp, _ := resty.R(). SetHeader("Apitoken", api_token). SetQueryParam("team_names", test_team_name). SetResult(rr). Get(fmt.Sprintf("%s/user/u/%v/in_teams", api_v1, root_user_id)) So(resp.StatusCode(), ShouldEqual, 200) So(*rr, ShouldNotBeEmpty) So((*rr)["message"], ShouldEqual, "true") }) Convey("Get team list: GET /team", t, func() { var r []map[string]interface{} resp, _ := resty.R().SetHeader("Apitoken", api_token).SetResult(&r). Get(fmt.Sprintf("%s/team", api_v1)) So(resp.StatusCode(), ShouldEqual, 200) So(r, ShouldNotBeEmpty) So(r[0]["team"], ShouldNotBeEmpty) So(r[0]["users"], ShouldNotBeEmpty) So(r[0]["creator_name"], ShouldNotBeBlank) }) Convey("Delete team by id: DELETE /team/:tid", t, func() { *rr = map[string]interface{}{} resp, _ := resty.R(). SetHeader("Apitoken", api_token). SetResult(rr). Delete(fmt.Sprintf("%s/team/%v", api_v1, test_team_id)) So(resp.StatusCode(), ShouldEqual, 200) So(*rr, ShouldNotBeEmpty) So((*rr)["message"], ShouldContainSubstring, "deleted") }) } func TestGraph(t *testing.T) { api_token, err := get_session_token() if err != nil { log.Fatal(err) } rc := resty.New() rc.SetHeader("Apitoken", api_token) Convey("Get endpoint list: GET /graph/endpoint", t, func() { r := []map[string]interface{}{} resp, _ := rc.R().SetQueryParam("q", ".+"). SetResult(&r). Get(fmt.Sprintf("%s/graph/endpoint", api_v1)) So(resp.StatusCode(), ShouldEqual, 200) So(len(r), ShouldBeGreaterThanOrEqualTo, 0) if len(r) == 0 { return } eid := r[0]["id"] r = []map[string]interface{}{} Convey("Get counter list: GET /graph/endpoint_counter", func() { resp, _ := rc.R(). SetQueryParam("eid", fmt.Sprintf("%v", eid)). SetQueryParam("metricQuery", ".+"). SetQueryParam("limit", "1"). SetResult(&r). Get(fmt.Sprintf("%s/graph/endpoint_counter", api_v1)) So(resp.StatusCode(), ShouldEqual, 200) So(r, ShouldNotBeEmpty) }) }) } func TestNodata(t *testing.T) { api_token, err := get_session_token() if err != nil { log.Fatal(err) } var rr *map[string]interface{} = &map[string]interface{}{} rc := resty.New() rc.SetHeader("Apitoken", api_token) var nid int = 0 Convey("Create nodata config: POST /nodata", t, func() { nodata_name := fmt.Sprintf("api.testnodata-%s", cutils.RandString(8)) resp, _ := rc.R(). SetHeader("Content-Type", "application/json"). SetBody(fmt.Sprintf(`{"tags": "", "step": 60, "obj_type": "host", "obj": "docker-agent", "name": "%s", "mock": -1, "metric": "api.test.metric", "dstype": "GAUGE"}`, nodata_name)). SetResult(rr). Post(fmt.Sprintf("%s/nodata/", api_v1)) So(resp.StatusCode(), ShouldEqual, 200) if v, ok := (*rr)["id"]; ok { nid = int(v.(float64)) Convey("Delete nodata config", func() { resp, _ := rc.R().Delete(fmt.Sprintf("%s/nodata/%d", api_v1, nid)) So(resp.StatusCode(), ShouldEqual, 200) }) } }) }
[ "\"API_TEST_CFG\"", "\"DB_USER\"", "\"DB_PASSWORD\"", "\"DB_HOST\"", "\"DB_PORT\"", "\"API_PORT\"", "\"API_HOST\"" ]
[]
[ "DB_PASSWORD", "DB_HOST", "DB_PORT", "API_PORT", "API_HOST", "DB_USER", "API_TEST_CFG" ]
[]
["DB_PASSWORD", "DB_HOST", "DB_PORT", "API_PORT", "API_HOST", "DB_USER", "API_TEST_CFG"]
go
7
0
wandb/old/retry.py
import datetime import functools import logging import os import random import time import traceback import weakref import sys import wandb from wandb import env from wandb import util from wandb.errors import Error from requests import HTTPError logger = logging.getLogger(__name__) def make_printer(msg): def printer(): print(msg) return printer class TransientException(Exception): """Exception type designated for errors that may only be temporary Can have its own message and/or wrap another exception. """ def __init__(self, msg=None, exc=None): super(TransientException, self).__init__(msg) self.message = msg self.exception = exc class Retry(object): """Creates a retryable version of a function. Calling this will call the passed function, retrying if any exceptions in retryable_exceptions are caught, with exponential backoff. """ MAX_SLEEP_SECONDS = 5 * 60 def __init__(self, call_fn, retry_timedelta=None, num_retries=None, check_retry_fn=lambda e: True, retryable_exceptions=None, error_prefix="Network error", retry_callback=None): self._call_fn = call_fn self._check_retry_fn = check_retry_fn self._error_prefix = error_prefix self._last_print = datetime.datetime.now() - datetime.timedelta(minutes=1) self._retry_timedelta = retry_timedelta self._num_retries = num_retries self._retryable_exceptions = retryable_exceptions if self._retryable_exceptions is None: self._retryable_exceptions = (TransientException,) self._index = 0 self.retry_callback = retry_callback @property def num_iters(self): """The number of iterations the previous __call__ retried.""" return self._num_iter def __call__(self, *args, **kwargs): """Call the wrapped function, with retries. Arguments: retry_timedelta (kwarg): amount of time to retry before giving up. sleep_base (kwarg): amount of time to sleep upon first failure, all other sleeps are derived from this one. """ retry_timedelta = kwargs.pop('retry_timedelta', self._retry_timedelta) if retry_timedelta is None: retry_timedelta = datetime.timedelta(days=365) num_retries = kwargs.pop('num_retries', self._num_retries) if num_retries is None: num_retries = 1000000 if os.environ.get('WANDB_TEST'): num_retries = 0 sleep_base = kwargs.pop('retry_sleep_base', 1) # an extra function to allow performing more logic on the filtered exceptiosn check_retry_fn = kwargs.pop('check_retry_fn', self._check_retry_fn) sleep = sleep_base start_time = datetime.datetime.now() now = start_time self._num_iter = 0 while True: try: result = self._call_fn(*args, **kwargs) # Only print resolved attempts once every minute if self._num_iter > 2 and now - self._last_print > datetime.timedelta(minutes=1): self._last_print = datetime.datetime.now() self.retry_callback(200, "{} resolved after {}, resuming normal operation.".format(self._error_prefix, datetime.datetime.now() - start_time)) return result except self._retryable_exceptions as e: # if the secondary check fails, re-raise if not check_retry_fn(e): raise if (datetime.datetime.now() - start_time >= retry_timedelta or self._num_iter >= num_retries): raise if self._num_iter == 2: logger.exception('Retry attempt failed:') if isinstance(e, HTTPError) and e.response is not None and self.retry_callback is not None: self.retry_callback(e.response.status_code, e.response.text) else: # todo: would like to catch other errors, eg wandb.errors.Error, ConnectionError etc # but some of these can be raised before the retry handler thread (RunStatusChecker) is # spawned in wandb_init wandb.termlog("{} ({}), entering retry loop.".format(self._error_prefix, e.__class__.__name__)) # if wandb.env.is_debug(): # traceback.print_exc() time.sleep(sleep + random.random() * 0.25 * sleep) sleep *= 2 if sleep > self.MAX_SLEEP_SECONDS: sleep = self.MAX_SLEEP_SECONDS now = datetime.datetime.now() self._num_iter += 1 def retriable(*args, **kargs): def decorator(fn): retrier = Retry(fn, *args, **kargs) @functools.wraps(fn) def wrapped_fn(*args, **kargs): return retrier(*args, **kargs) return wrapped_fn return decorator
[]
[]
[ "WANDB_TEST" ]
[]
["WANDB_TEST"]
python
1
0
server/server/asgi.py
""" ASGI config for server project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/ """ import os from channels.routing import ProtocolTypeRouter, URLRouter from django.core.asgi import get_asgi_application from websockets.middlewares import TokenAuthMiddlewareStack from websockets.routing import websocket_urlpatterns os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'server.settings') application = ProtocolTypeRouter({ 'websocket': TokenAuthMiddlewareStack(URLRouter(websocket_urlpatterns)) })
[]
[]
[]
[]
[]
python
0
0
src/post/post_app.py
import os import prometheus_client import time import structlog import traceback import requests from flask import Flask, request, Response, abort, logging from pymongo import MongoClient from bson.objectid import ObjectId from bson.json_util import dumps from helpers import http_healthcheck_handler, log_event from py_zipkin.zipkin import zipkin_span, ZipkinAttrs CONTENT_TYPE_LATEST = str('text/plain; version=0.0.4; charset=utf-8') POST_DATABASE_HOST = os.getenv('POST_DATABASE_HOST', '127.0.0.1') POST_DATABASE_PORT = os.getenv('POST_DATABASE_PORT', '27017') ZIPKIN_HOST = os.getenv('ZIPKIN_HOST', 'zipkin') ZIPKIN_PORT = os.getenv('ZIPKIN_PORT', '9411') ZIPKIN_URL = "http://{0}:{1}/api/v1/spans".format(ZIPKIN_HOST, ZIPKIN_PORT) log = structlog.get_logger() app = Flask(__name__) def init(app): # appication version info app.version = None with open('VERSION') as f: app.version = f.read().rstrip() # prometheus metrics app.post_read_db_seconds = prometheus_client.Histogram( 'post_read_db_seconds', 'Request DB time' ) app.post_count = prometheus_client.Counter( 'post_count', 'A counter of new posts' ) # database client connection app.db = MongoClient( POST_DATABASE_HOST, int(POST_DATABASE_PORT) ).users_post.posts def http_transport(encoded_span): # The collector expects a thrift-encoded list of spans. Instead of # decoding and re-encoding the already thrift-encoded message, we can just # add header bytes that specify that what follows is a list of length 1. body = b'\x0c\x00\x00\x00\x01' + encoded_span requests.post(ZIPKIN_URL, data=body, headers={'Content-Type': 'application/x-thrift'}) # Prometheus endpoint @app.route('/metrics') def metrics(): return Response(prometheus_client.generate_latest(), mimetype=CONTENT_TYPE_LATEST) # Retrieve information about all posts @zipkin_span(service_name='post', span_name='db_find_all_posts') def find_posts(): try: posts = app.db.find().sort('created_at', -1) except Exception as e: log_event('error', 'find_all_posts', "Failed to retrieve posts from the database. \ Reason: {}".format(str(e))) abort(500) else: log_event('info', 'find_all_posts', 'Successfully retrieved all posts from the database') return dumps(posts) @app.route("/posts") def posts(): with zipkin_span( service_name='post', zipkin_attrs=ZipkinAttrs( trace_id=request.headers['X-B3-TraceID'], span_id=request.headers['X-B3-SpanID'], parent_span_id=request.headers['X-B3-ParentSpanID'], flags=request.headers['X-B3-Flags'], is_sampled=request.headers['X-B3-Sampled'], ), span_name='/posts', transport_handler=http_transport, port=5000, sample_rate=100, ): posts = find_posts() return posts # Vote for a post @app.route('/vote', methods=['POST']) def vote(): try: post_id = request.values.get('id') vote_type = request.values.get('type') except Exception as e: log_event('error', 'request_error', "Bad input parameters. Reason: {}".format(str(e))) abort(400) try: post = app.db.find_one({'_id': ObjectId(post_id)}) post['votes'] += int(vote_type) app.db.update_one({'_id': ObjectId(post_id)}, {'$set': {'votes': post['votes']}}) except Exception as e: log_event('error', 'post_vote', "Failed to vote for a post. Reason: {}".format(str(e)), {'post_id': post_id, 'vote_type': vote_type}) abort(500) else: log_event('info', 'post_vote', 'Successful vote', {'post_id': post_id, 'vote_type': vote_type}) return 'OK' # Add new post @app.route('/add_post', methods=['POST']) def add_post(): try: title = request.values.get('title') link = request.values.get('link') created_at = request.values.get('created_at') except Exception as e: log_event('error', 'request_error', "Bad input parameters. Reason: {}".format(str(e))) abort(400) try: app.db.insert({'title': title, 'link': link, 'created_at': created_at, 'votes': 0}) except Exception as e: log_event('error', 'post_create', "Failed to create a post. Reason: {}".format(str(e)), {'title': title, 'link': link}) abort(500) else: log_event('info', 'post_create', 'Successfully created a new post', {'title': title, 'link': link}) app.post_count.inc() return 'OK' # Retrieve information about a post @zipkin_span(service_name='post', span_name='db_find_single_post') def find_post(id): start_time = time.time() try: post = app.db.find_one({'_id': ObjectId(id)}) except Exception as e: log_event('error', 'post_find', "Failed to find the post. Reason: {}".format(str(e)), request.values) abort(500) else: stop_time = time.time() # + 0.3 resp_time = stop_time - start_time app.post_read_db_seconds.observe(resp_time) # time.sleep(3) log_event('info', 'post_find', 'Successfully found the post information', {'post_id': id}) return dumps(post) # Find a post @app.route('/post/<id>') def get_post(id): with zipkin_span( service_name='post', zipkin_attrs=ZipkinAttrs( trace_id=request.headers['X-B3-TraceID'], span_id=request.headers['X-B3-SpanID'], parent_span_id=request.headers['X-B3-ParentSpanID'], flags=request.headers['X-B3-Flags'], is_sampled=request.headers['X-B3-Sampled'], ), span_name='/post/<id>', transport_handler=http_transport, port=5000, sample_rate=100, ): post = find_post(id) return post # Health check endpoint @app.route('/healthcheck') def healthcheck(): return http_healthcheck_handler(POST_DATABASE_HOST, POST_DATABASE_PORT, app.version) # Log every request @app.after_request def after_request(response): request_id = request.headers['Request-Id'] \ if 'Request-Id' in request.headers else None log.info('request', service='post', request_id=request_id, path=request.full_path, addr=request.remote_addr, method=request.method, response_status=response.status_code) return response # Log Exceptions @app.errorhandler(Exception) def exceptions(e): request_id = request.headers['Request-Id'] \ if 'Request-Id' in request.headers else None tb = traceback.format_exc() log.error('internal_error', service='post', request_id=request_id, path=request.full_path, remote_addr=request.remote_addr, method=request.method, traceback=tb) return 'Internal Server Error', 500 if __name__ == "__main__": init(app) logg = logging.getLogger('werkzeug') logg.disabled = True # disable default logger # define log structure structlog.configure(processors=[ structlog.processors.TimeStamper(fmt="%Y-%m-%d %H:%M:%S"), structlog.stdlib.add_log_level, # to see indented logs in the terminal, uncomment the line below # structlog.processors.JSONRenderer(indent=2, sort_keys=True) # and comment out the one below structlog.processors.JSONRenderer(sort_keys=True) ]) app.run(host='0.0.0.0', debug=True)
[]
[]
[ "POST_DATABASE_PORT", "ZIPKIN_HOST", "ZIPKIN_PORT", "POST_DATABASE_HOST" ]
[]
["POST_DATABASE_PORT", "ZIPKIN_HOST", "ZIPKIN_PORT", "POST_DATABASE_HOST"]
python
4
0
lambda/src/main/java/cn/amazon/aws/rp/spapi/dynamodb/impl/SpApiTaskDao.java
package cn.amazon.aws.rp.spapi.dynamodb.impl; import cn.amazon.aws.rp.spapi.common.IdWorker; import cn.amazon.aws.rp.spapi.constants.DateConstants; import cn.amazon.aws.rp.spapi.dynamodb.ISpApiTaskDao; import cn.amazon.aws.rp.spapi.dynamodb.entity.SpApiTask; import cn.amazon.aws.rp.spapi.enums.DateType; import cn.amazon.aws.rp.spapi.enums.StatusEnum; import cn.amazon.aws.rp.spapi.utils.DateUtil; import com.amazonaws.services.dynamodbv2.AmazonDynamoDB; import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClientBuilder; import com.amazonaws.services.dynamodbv2.datamodeling.DynamoDBMapper; import com.amazonaws.services.dynamodbv2.datamodeling.DynamoDBQueryExpression; import com.amazonaws.services.dynamodbv2.document.AttributeUpdate; import com.amazonaws.services.dynamodbv2.document.DynamoDB; import com.amazonaws.services.dynamodbv2.document.PrimaryKey; import com.amazonaws.services.dynamodbv2.document.Table; import com.amazonaws.services.dynamodbv2.model.AttributeValue; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.time.LocalDateTime; import java.time.format.DateTimeFormatter; import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Objects; /** * @description: * @className: SpApiTaskDao * @type: JAVA * @date: 2020/11/11 14:22 * @author: zhangkui */ public class SpApiTaskDao implements ISpApiTaskDao { private static final AmazonDynamoDB DDB = AmazonDynamoDBClientBuilder.standard().build(); public static String TABLE_NAME = "sp_api_task"; private static String TABLE_P_KEY = "sellerKey"; private final IdWorker idWorker; public SpApiTaskDao(){ idWorker = new IdWorker(); } @Override public List<SpApiTask> getTask(String sellerKey) { Map<String, AttributeValue> eav = new HashMap<String, AttributeValue>(); eav.put(":sellerKey", new AttributeValue().withS(sellerKey)); DynamoDBQueryExpression<SpApiTask> queryExpression = new DynamoDBQueryExpression<SpApiTask>() .withKeyConditionExpression("sellerKey = :sellerKey") .withExpressionAttributeValues(eav); final DynamoDBMapper dbm = new DynamoDBMapper(DDB); return dbm.query(SpApiTask.class, queryExpression); } @Override public void delTask(String sellerKey, String sellerId) { DynamoDB dynamoDB = new DynamoDB(DDB); final Table table = dynamoDB.getTable(TABLE_NAME); PrimaryKey primaryKey = new PrimaryKey(); primaryKey.addComponent("sellerKey", sellerKey); primaryKey.addComponent("sellerId", sellerId); table.deleteItem(primaryKey); } @Override public void upTaskStatus(String sellerKey, String sellerId, int status) { DynamoDB dynamoDB = new DynamoDB(DDB); final Table table = dynamoDB.getTable(TABLE_NAME); PrimaryKey primaryKey = new PrimaryKey(); primaryKey.addComponent("sellerKey", sellerKey); primaryKey.addComponent("sellerId", sellerId); AttributeUpdate attributeUpdate = new AttributeUpdate("executeStatus"); attributeUpdate.put(status); table.updateItem(primaryKey,attributeUpdate); } @Override public void addTask(SpApiTask spApiTaskVO) { final DynamoDBMapper dbm = new DynamoDBMapper(DDB); dbm.batchSave(spApiTaskVO); } @Override public void addNewTask(SpApiTask spApiTask, String dateType, long space) { SpApiTask apiTask = new SpApiTask(); apiTask.setSellerKey(spApiTask.getSellerId() + "_" + spApiTask.getTaskName()); apiTask.setSellerId(spApiTask.getSellerId()); if(Objects.nonNull(spApiTask.getEndTime())) { apiTask.setStartTime(spApiTask.getEndTime()); LocalDateTime localDateTime = DateUtil.getLocalDateTime(spApiTask.getEndTime()); if (DateType.NANOS.name().equalsIgnoreCase(dateType)) { apiTask.setEndTime(DateUtil.getDateFormat(localDateTime.plusNanos(space))); } else if (DateType.SECONDS.name().equalsIgnoreCase(dateType)) { apiTask.setEndTime(DateUtil.getDateFormat(localDateTime.plusSeconds(space))); } else if (DateType.MINUTES.name().equalsIgnoreCase(dateType)) { apiTask.setEndTime(DateUtil.getDateFormat(localDateTime.plusMinutes(space))); } else if (DateType.HOURS.name().equalsIgnoreCase(dateType)) { apiTask.setEndTime(DateUtil.getDateFormat(localDateTime.plusHours(space))); } else if (DateType.DAYS.name().equalsIgnoreCase(dateType)) { apiTask.setEndTime(DateUtil.getDateFormat(localDateTime.plusDays(space))); } else if (DateType.WEEKS.name().equalsIgnoreCase(dateType)) { apiTask.setEndTime(DateUtil.getDateFormat(localDateTime.plusWeeks(space))); } else if (DateType.MONTHS.name().equalsIgnoreCase(dateType)) { apiTask.setEndTime(DateUtil.getDateFormat(localDateTime.plusMonths(space))); } else if (DateType.YEARS.name().equalsIgnoreCase(dateType)) { apiTask.setEndTime(DateUtil.getDateFormat(localDateTime.plusYears(space))); } // 判断endTime是否大于当前时间 if(LocalDateTime.now().isBefore(DateUtil.getLocalDateTime(apiTask.getEndTime()))){ apiTask.setEndTime(DateUtil.getDateFormat(LocalDateTime.now().plusMinutes(-2L))); } } apiTask.setTaskId(idWorker.nextId()); apiTask.setTaskName(spApiTask.getTaskName()); apiTask.setExecuteStatus(StatusEnum.INIT.getStatus()); this.addTask(apiTask); } public static String getTaskTableName() { // Update the table name from environment. It is expected to be set by CDK script on Lambda. final String tableName = System.getenv("DYNAMODB_TASK_TABLE"); if (tableName != null) { TABLE_NAME = tableName; } return TABLE_NAME; } }
[ "\"DYNAMODB_TASK_TABLE\"" ]
[]
[ "DYNAMODB_TASK_TABLE" ]
[]
["DYNAMODB_TASK_TABLE"]
java
1
0
warbler/test_message_views.py
"""Message View tests.""" # run these tests like: # # FLASK_ENV=production python -m unittest test_message_views.py import os from unittest import TestCase from config import get_config_ipdb_break import ipdb # if get_config_ipdb_break(): ipdb.set_trace() from models import db, Message, User, Likes # BEFORE we import our app, let's set an environmental variable # to use a different database for tests (we need to do this # before we import our app, since that will have already # connected to the database # os.environ['DATABASE_URL'] = "postgresql:///warbler-test" # Now we can import app from app import app, CURR_USER_KEY # Create our tables (we do this here, so we only create the tables # once for all tests --- in each test, we'll delete the data # and create fresh new clean test data db.create_all() # Don't have WTForms use CSRF at all, since it's a pain to test app.config['WTF_CSRF_ENABLED'] = False class MessageViewTestCase(TestCase): """Test views for messages.""" def setUp(self): """Create test client, add sample data.""" # if get_config_ipdb_break(): ipdb.set_trace() Likes.query.delete() Message.query.delete() User.query.delete() self.client = app.test_client() self.testuser = User.signup(username="testuser", email="[email protected]", password="testuser", image_url=None) db.session.commit() def test_unauthorized(self): """Can access message page?""" # if get_config_ipdb_break(): ipdb.set_trace() # Since we need to change the session to mimic logging in, # we need to use the changing-session trick: with self.client as c: resp = c.get("/messages/new") # can the page be found? self.assertEqual(resp.status_code, 302) html = resp.get_data(as_text=True) # is there authorization? self.assertIn('You should be redirected automatically to target URL:', html) def test_unauthorized_post(self): """Can't post to message page?""" # Since we need to change the session to mimic logging in, # we need to use the changing-session trick: with self.client as c: # if get_config_ipdb_break(): ipdb.set_trace() resp = c.post("/messages/new", data={"text": "Hello"}) self.assertEqual(resp.status_code, 302) html = resp.get_data(as_text=True) # is there authorization? self.assertIn('You should be redirected automatically to target URL:', html) def test_add_message(self): """Can user post a message?""" # Since we need to change the session to mimic logging in, # we need to use the changing-session trick: with self.client as c: with c.session_transaction() as sess: sess[CURR_USER_KEY] = self.testuser.id # Now, that session setting is saved, so we can have # the rest of ours test resp = c.post("/messages/new", data={"text": "Hello"}) # Make sure it redirects self.assertEqual(resp.status_code, 302) msg = Message.query.one() self.assertEqual(msg.text, "Hello") def test_list_message(self): """Can view user's posted message?""" # Since we need to change the session to mimic logging in, # we need to use the changing-session trick: with self.client as c: with c.session_transaction() as sess: sess[CURR_USER_KEY] = self.testuser.id # Now, that session setting is saved, so we can have # the rest of ours test if get_config_ipdb_break(): ipdb.set_trace() resp = c.get(f"/users/{self.testuser.id}") html = resp.get_data(as_text=True) self.assertNotIn('Hello', html) resp = c.post("/messages/new", data={"text": "Hello"}) html = resp.get_data(as_text=True) self.assertEqual(resp.status_code, 302) resp = c.get(f"/users/{self.testuser.id}") html = resp.get_data(as_text=True) self.assertIn('Hello', html)
[]
[]
[ "DATABASE_URL" ]
[]
["DATABASE_URL"]
python
1
0
app.py
from tensorflow.keras.models import load_model from tensorflow.python.keras.backend import set_session import tensorflow as tf from flask import Flask, request, render_template, jsonify, send_file, url_for import os from PIL import Image, ImageOps import numpy as np import math import time import base64 app = Flask(__name__) app.static_folder = 'static' dir_path = os.path.dirname(os.path.realpath(__file__)) os.environ["CUDA_VISIBLE_DEVICES"] = "-1" MODEL_PATH = os.path.join(os.getcwd(), 'models', 'keras_model.h5') sess = tf.Session() graph = tf.compat.v1.get_default_graph() set_session(sess) model = load_model(MODEL_PATH) def read_labels(): x = None labels = list() with open(os.path.join(os.getcwd(), 'models', 'labels.txt'), 'r') as f: x = [line.rstrip('\n') for line in f] for item in x: print("Item : {}".format(item)) split_label = item.split(" ") label_index = split_label[0] merger = "" for i in range(1, len(split_label)): merger += split_label[i] + " " print(("Merger Index : {} ".format(merger))) labels.append({"id": label_index, "label": merger[0:len(merger)-1]}) return labels @app.route('/detect', methods=['POST']) def post_example(): global sess global graph with graph.as_default(): # perform the prediction set_session(sess) np.set_printoptions(suppress=True) if not request.headers.get('Content-type') is None: if(request.headers.get('Content-type').split(';')[0] == 'multipart/form-data'): if 'image' in request.files.keys(): print("inside get image statement") file = request.files['image'] img = Image.open(file.stream) # PIL image uploaded_img_path = os.path.join(os.getcwd(), 'static', 'uploads', file.filename) print("Upload Path : {}".format(uploaded_img_path)) img.save(uploaded_img_path) data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32) print("Image path {}".format(uploaded_img_path)) image = Image.open(uploaded_img_path) #resize the image to a 224x224 with the same strategy as in TM2: #resizing the image to be at least 224x224 and then cropping from the center size = (224, 224) image = ImageOps.fit(image, size, Image.ANTIALIAS) image = image.convert('RGB') #turn the image into a numpy array image_array = np.asarray(image) # Normalize the image normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1 # Load the image into the array data[0] = normalized_image_array #load the labels labels = read_labels() print(labels[0]) # run the inference prediction = model.predict(data) print(prediction[0]) scores = list() for i in range(0, len(prediction[0])): print("Id : {}".format(i)) print("Score: {}".format(float(prediction[0][i]))) print("label: {}".format(labels[i]['label'])) scores.append({"id": i, "label": float(prediction[0][i]), "score": labels[i]['label']}) result = { "inference": scores } return jsonify(result), 200 else: return jsonify(get_status_code("Invalid body", "Please provide valid format for Image 2")), 415 elif(request.headers.get('Content-type') == 'application/json'): if(request.data == b''): return jsonify(get_status_code("Invalid body", "Please provide valid format for Image")), 415 else: body = request.get_json() if "image_string" in body.keys(): str_image = body['image_string'] # str_image = img_string.split(',')[1] imgdata = base64.b64decode(str_image) uploaded_img_path = os.path.join(os.getcwd(), 'static', 'uploads', str(time.time())+".jpg") # img = "uploads\\" + str(int(round(time.time() * 1000))) + "image_file.jpg" with open(uploaded_img_path, 'wb') as f: f.write(imgdata) data = np.ndarray(shape=(1, 224, 224, 3), dtype=np.float32) print("Image path {}".format(uploaded_img_path)) image = Image.open(uploaded_img_path) #resize the image to a 224x224 with the same strategy as in TM2: #resizing the image to be at least 224x224 and then cropping from the center size = (224, 224) image = ImageOps.fit(image, size, Image.ANTIALIAS) image = image.convert('RGB') #turn the image into a numpy array image_array = np.asarray(image) # Normalize the image normalized_image_array = (image_array.astype(np.float32) / 127.0) - 1 # Load the image into the array data[0] = normalized_image_array #load the labels labels = read_labels() print(labels[0]) # run the inference prediction = model.predict(data) print(prediction[0]) scores = list() for i in range(0, len(prediction[0])): scores.append({"id": i, "label": float(prediction[0][i]), "score": labels[i]['label']}) result = { "inference": scores } return jsonify(result), 200 else: return jsonify(get_status_code("Invalid header", "Please provide correct header with correct data")), 415 else: return jsonify(get_status_code("Invalid Header", "Please provide valid header")), 401 def get_status_code(argument, message): res = { "error": { "code": argument, "message": message } } return res if __name__=="__main__": port = int(os.environ.get('PORT', 5000)) app.run(host="0.0.0.0", port=port)
[]
[]
[ "PORT", "CUDA_VISIBLE_DEVICES" ]
[]
["PORT", "CUDA_VISIBLE_DEVICES"]
python
2
0
scripts/lib/zulip_tools.py
#!/usr/bin/env python3 import argparse import datetime import functools import hashlib import logging import os import pwd import re import shlex import shutil import subprocess import sys import tempfile import time import json import uuid import configparser from typing import Sequence, Set, Any, Dict, List DEPLOYMENTS_DIR = "/home/zulip/deployments" LOCK_DIR = os.path.join(DEPLOYMENTS_DIR, "lock") TIMESTAMP_FORMAT = '%Y-%m-%d-%H-%M-%S' # Color codes OKBLUE = '\033[94m' OKGREEN = '\033[92m' WARNING = '\033[93m' FAIL = '\033[91m' ENDC = '\033[0m' BLACKONYELLOW = '\x1b[0;30;43m' WHITEONRED = '\x1b[0;37;41m' BOLDRED = '\x1B[1;31m' GREEN = '\x1b[32m' YELLOW = '\x1b[33m' BLUE = '\x1b[34m' MAGENTA = '\x1b[35m' CYAN = '\x1b[36m' def overwrite_symlink(src: str, dst: str) -> None: while True: tmp = tempfile.mktemp( prefix='.' + os.path.basename(dst) + '.', dir=os.path.dirname(dst)) try: os.symlink(src, tmp) except FileExistsError: continue break try: os.rename(tmp, dst) except Exception: os.remove(tmp) raise def parse_cache_script_args(description: str) -> argparse.Namespace: # Keep this in sync with clean_unused_caches in provision_inner.py parser = argparse.ArgumentParser(description=description) parser.add_argument( "--threshold", dest="threshold_days", type=int, default=14, nargs="?", metavar="<days>", help="Any cache which is not in " "use by a deployment not older than threshold days(current " "installation in dev) and older than threshold days will be " "deleted. (defaults to 14)") parser.add_argument( "--dry-run", dest="dry_run", action="store_true", help="If specified then script will only print the caches " "that it will delete/keep back. It will not delete any cache.") parser.add_argument( "--verbose", dest="verbose", action="store_true", help="If specified then script will print a detailed report " "of what is being will deleted/kept back.") parser.add_argument( "--no-print-headings", dest="no_headings", action="store_true", help="If specified then script will not print headings for " "what will be deleted/kept back.") args = parser.parse_args() args.verbose |= args.dry_run # Always print a detailed report in case of dry run. return args def get_deploy_root() -> str: return os.path.realpath( os.path.normpath(os.path.join(os.path.dirname(__file__), "..", "..")) ) def get_deployment_version(extract_path: str) -> str: version = '0.0.0' for item in os.listdir(extract_path): item_path = os.path.join(extract_path, item) if item.startswith('zulip-server') and os.path.isdir(item_path): with open(os.path.join(item_path, 'version.py')) as f: result = re.search('ZULIP_VERSION = "(.*)"', f.read()) if result: version = result.groups()[0] break return version def is_invalid_upgrade(current_version: str, new_version: str) -> bool: if new_version > '1.4.3' and current_version <= '1.3.10': return True return False def subprocess_text_output(args: Sequence[str]) -> str: return subprocess.check_output(args, universal_newlines=True).strip() def get_zulip_pwent() -> pwd.struct_passwd: deploy_root_uid = os.stat(get_deploy_root()).st_uid if deploy_root_uid != 0: return pwd.getpwuid(deploy_root_uid) # In the case that permissions got messed up and the deployment # directory is unexpectedly owned by root, we fallback to the # `zulip` user as that's the correct value in production. return pwd.getpwnam("zulip") def su_to_zulip(save_suid: bool = False) -> None: """Warning: su_to_zulip assumes that the zulip checkout is owned by the zulip user (or whatever normal user is running the Zulip installation). It should never be run from the installer or other production contexts before /home/zulip/deployments/current is created.""" pwent = get_zulip_pwent() os.setgid(pwent.pw_gid) if save_suid: os.setresuid(pwent.pw_uid, pwent.pw_uid, os.getuid()) else: os.setuid(pwent.pw_uid) os.environ['HOME'] = pwent.pw_dir def make_deploy_path() -> str: timestamp = datetime.datetime.now().strftime(TIMESTAMP_FORMAT) return os.path.join(DEPLOYMENTS_DIR, timestamp) TEMPLATE_DATABASE_DIR = "test-backend/databases" def get_dev_uuid_var_path(create_if_missing: bool = False) -> str: zulip_path = get_deploy_root() uuid_path = os.path.join(os.path.realpath(os.path.dirname(zulip_path)), ".zulip-dev-uuid") if os.path.exists(uuid_path): with open(uuid_path) as f: zulip_uuid = f.read().strip() else: if create_if_missing: zulip_uuid = str(uuid.uuid4()) # We need root access here, since the path will be under /srv/ in the # development environment. run_as_root(["sh", "-c", 'echo "$1" > "$2"', "-", zulip_uuid, uuid_path]) else: raise AssertionError("Missing UUID file; please run tools/provision!") result_path = os.path.join(zulip_path, "var", zulip_uuid) os.makedirs(result_path, exist_ok=True) return result_path def get_deployment_lock(error_rerun_script: str) -> None: start_time = time.time() got_lock = False while time.time() - start_time < 300: try: os.mkdir(LOCK_DIR) got_lock = True break except OSError: print(WARNING + "Another deployment in progress; waiting for lock... " + "(If no deployment is running, rmdir %s)" % (LOCK_DIR,) + ENDC) sys.stdout.flush() time.sleep(3) if not got_lock: print(FAIL + "Deployment already in progress. Please run\n" + " %s\n" % (error_rerun_script,) + "manually when the previous deployment finishes, or run\n" + " rmdir %s\n" % (LOCK_DIR,) + "if the previous deployment crashed." + ENDC) sys.exit(1) def release_deployment_lock() -> None: shutil.rmtree(LOCK_DIR) def run(args: Sequence[str], **kwargs: Any) -> None: # Output what we're doing in the `set -x` style print("+ %s" % (" ".join(map(shlex.quote, args)),)) try: subprocess.check_call(args, **kwargs) except subprocess.CalledProcessError: print() print(WHITEONRED + "Error running a subcommand of %s: %s" % (sys.argv[0], " ".join(map(shlex.quote, args))) + ENDC) print(WHITEONRED + "Actual error output for the subcommand is just above this." + ENDC) print() raise def log_management_command(cmd: str, log_path: str) -> None: log_dir = os.path.dirname(log_path) if not os.path.exists(log_dir): os.makedirs(log_dir) formatter = logging.Formatter("%(asctime)s: %(message)s") file_handler = logging.FileHandler(log_path) file_handler.setFormatter(formatter) logger = logging.getLogger("zulip.management") logger.addHandler(file_handler) logger.setLevel(logging.INFO) logger.info("Ran '%s'", cmd) def get_environment() -> str: if os.path.exists(DEPLOYMENTS_DIR): return "prod" return "dev" def get_recent_deployments(threshold_days: int) -> Set[str]: # Returns a list of deployments not older than threshold days # including `/root/zulip` directory if it exists. recent = set() threshold_date = datetime.datetime.now() - datetime.timedelta(days=threshold_days) for dir_name in os.listdir(DEPLOYMENTS_DIR): target_dir = os.path.join(DEPLOYMENTS_DIR, dir_name) if not os.path.isdir(target_dir): # Skip things like uwsgi sockets, symlinks, etc. continue if not os.path.exists(os.path.join(target_dir, "zerver")): # Skip things like "lock" that aren't actually a deployment directory continue try: date = datetime.datetime.strptime(dir_name, TIMESTAMP_FORMAT) if date >= threshold_date: recent.add(target_dir) except ValueError: # Always include deployments whose name is not in the format of a timestamp. recent.add(target_dir) # If it is a symlink then include the target as well. if os.path.islink(target_dir): recent.add(os.path.realpath(target_dir)) if os.path.exists("/root/zulip"): recent.add("/root/zulip") return recent def get_threshold_timestamp(threshold_days: int) -> int: # Given number of days, this function returns timestamp corresponding # to the time prior to given number of days. threshold = datetime.datetime.now() - datetime.timedelta(days=threshold_days) threshold_timestamp = int(time.mktime(threshold.utctimetuple())) return threshold_timestamp def get_caches_to_be_purged(caches_dir: str, caches_in_use: Set[str], threshold_days: int) -> Set[str]: # Given a directory containing caches, a list of caches in use # and threshold days, this function return a list of caches # which can be purged. Remove the cache only if it is: # 1: Not in use by the current installation(in dev as well as in prod). # 2: Not in use by a deployment not older than `threshold_days`(in prod). # 3: Not in use by '/root/zulip'. # 4: Not older than `threshold_days`. caches_to_purge = set() threshold_timestamp = get_threshold_timestamp(threshold_days) for cache_dir_base in os.listdir(caches_dir): cache_dir = os.path.join(caches_dir, cache_dir_base) if cache_dir in caches_in_use: # Never purge a cache which is in use. continue if os.path.getctime(cache_dir) < threshold_timestamp: caches_to_purge.add(cache_dir) return caches_to_purge def purge_unused_caches( caches_dir: str, caches_in_use: Set[str], cache_type: str, args: argparse.Namespace ) -> None: all_caches = {os.path.join(caches_dir, cache) for cache in os.listdir(caches_dir)} caches_to_purge = get_caches_to_be_purged(caches_dir, caches_in_use, args.threshold_days) caches_to_keep = all_caches - caches_to_purge may_be_perform_purging( caches_to_purge, caches_to_keep, cache_type, args.dry_run, args.verbose, args.no_headings) if args.verbose: print("Done!") def generate_sha1sum_emoji(zulip_path: str) -> str: ZULIP_EMOJI_DIR = os.path.join(zulip_path, 'tools', 'setup', 'emoji') sha = hashlib.sha1() filenames = ['emoji_map.json', 'build_emoji', 'emoji_setup_utils.py', 'emoji_names.py'] for filename in filenames: file_path = os.path.join(ZULIP_EMOJI_DIR, filename) with open(file_path, 'rb') as reader: sha.update(reader.read()) # Take into account the version of `emoji-datasource-google` package # while generating success stamp. PACKAGE_FILE_PATH = os.path.join(zulip_path, 'package.json') with open(PACKAGE_FILE_PATH) as fp: parsed_package_file = json.load(fp) dependency_data = parsed_package_file['dependencies'] if 'emoji-datasource-google' in dependency_data: with open(os.path.join(zulip_path, "yarn.lock")) as fp: (emoji_datasource_version,) = re.findall( r"^emoji-datasource-google@" + re.escape(dependency_data["emoji-datasource-google"]) + r':\n version "(.*)"', fp.read(), re.M, ) else: emoji_datasource_version = "0" sha.update(emoji_datasource_version.encode()) return sha.hexdigest() def may_be_perform_purging( dirs_to_purge: Set[str], dirs_to_keep: Set[str], dir_type: str, dry_run: bool, verbose: bool, no_headings: bool, ) -> None: if dry_run: print("Performing a dry run...") if not no_headings: print("Cleaning unused %ss..." % (dir_type,)) for directory in dirs_to_purge: if verbose: print("Cleaning unused %s: %s" % (dir_type, directory)) if not dry_run: run_as_root(["rm", "-rf", directory]) for directory in dirs_to_keep: if verbose: print("Keeping used %s: %s" % (dir_type, directory)) @functools.lru_cache(None) def parse_os_release() -> Dict[str, str]: """ Example of the useful subset of the data: { 'ID': 'ubuntu', 'VERSION_ID': '18.04', 'NAME': 'Ubuntu', 'VERSION': '18.04.3 LTS (Bionic Beaver)', 'PRETTY_NAME': 'Ubuntu 18.04.3 LTS', } VERSION_CODENAME (e.g. 'bionic') is nice and human-readable, but we avoid using it, as it is not available on RHEL-based platforms. """ distro_info = {} # type: Dict[str, str] with open('/etc/os-release') as fp: for line in fp: line = line.strip() if not line or line.startswith('#'): # The line may be blank or a comment, see: # https://www.freedesktop.org/software/systemd/man/os-release.html continue k, v = line.split('=', 1) [distro_info[k]] = shlex.split(v) return distro_info @functools.lru_cache(None) def os_families() -> Set[str]: """ Known families: debian (includes: debian, ubuntu) ubuntu (includes: ubuntu) fedora (includes: fedora, rhel, centos) rhel (includes: rhel, centos) centos (includes: centos) """ distro_info = parse_os_release() return {distro_info["ID"], *distro_info.get("ID_LIKE", "").split()} def files_and_string_digest(filenames: List[str], extra_strings: List[str]) -> str: # see is_digest_obsolete for more context sha1sum = hashlib.sha1() for fn in filenames: with open(fn, 'rb') as file_to_hash: sha1sum.update(file_to_hash.read()) for extra_string in extra_strings: sha1sum.update(extra_string.encode("utf-8")) return sha1sum.hexdigest() def is_digest_obsolete(hash_name: str, filenames: List[str], extra_strings: List[str]=[]) -> bool: ''' In order to determine if we need to run some process, we calculate a digest of the important files and strings whose respective contents or values may indicate such a need. filenames = files we should hash the contents of extra_strings = strings we should hash directly Grep for callers to see examples of how this is used. To elaborate on extra_strings, they will typically be things like: - package versions (that we import) - settings values (that we stringify with json, deterministically) ''' last_hash_path = os.path.join(get_dev_uuid_var_path(), hash_name) try: with open(last_hash_path) as f: old_hash = f.read() except FileNotFoundError: # This is normal for a fresh checkout--a missing # digest is an obsolete digest. return True new_hash = files_and_string_digest(filenames, extra_strings) return new_hash != old_hash def write_new_digest(hash_name: str, filenames: List[str], extra_strings: List[str]=[]) -> None: hash_path = os.path.join(get_dev_uuid_var_path(), hash_name) new_hash = files_and_string_digest(filenames, extra_strings) with open(hash_path, 'w') as f: f.write(new_hash) # Be a little verbose here--our callers ensure we # only write new digests when things have changed, and # making this system more transparent to developers # can help them troubleshoot provisioning glitches. print('New digest written to: ' + hash_path) def is_root() -> bool: if 'posix' in os.name and os.geteuid() == 0: return True return False def run_as_root(args: List[str], **kwargs: Any) -> None: sudo_args = kwargs.pop('sudo_args', []) if not is_root(): args = ['sudo'] + sudo_args + ['--'] + args run(args, **kwargs) def assert_not_running_as_root() -> None: script_name = os.path.abspath(sys.argv[0]) if is_root(): pwent = get_zulip_pwent() msg = ("{shortname} should not be run as root. Use `su {user}` to switch to the 'zulip'\n" "user before rerunning this, or use \n su {user} -c '{name} ...'\n" "to switch users and run this as a single command.").format( name=script_name, shortname=os.path.basename(script_name), user=pwent.pw_name) print(msg) sys.exit(1) def assert_running_as_root(strip_lib_from_paths: bool=False) -> None: script_name = os.path.abspath(sys.argv[0]) # Since these Python scripts are run inside a thin shell wrapper, # we need to replace the paths in order to ensure we instruct # users to (re)run the right command. if strip_lib_from_paths: script_name = script_name.replace("scripts/lib/upgrade", "scripts/upgrade") if not is_root(): print("{} must be run as root.".format(script_name)) sys.exit(1) def get_config( config_file: configparser.RawConfigParser, section: str, key: str, default_value: str = "", ) -> str: if config_file.has_option(section, key): return config_file.get(section, key) return default_value def get_config_file() -> configparser.RawConfigParser: config_file = configparser.RawConfigParser() config_file.read("/etc/zulip/zulip.conf") return config_file def get_deploy_options(config_file: configparser.RawConfigParser) -> List[str]: return get_config(config_file, 'deployment', 'deploy_options', "").strip().split() def get_or_create_dev_uuid_var_path(path: str) -> str: absolute_path = '{}/{}'.format(get_dev_uuid_var_path(), path) os.makedirs(absolute_path, exist_ok=True) return absolute_path def is_vagrant_env_host(path: str) -> bool: return '.vagrant' in os.listdir(path) if __name__ == '__main__': cmd = sys.argv[1] if cmd == 'make_deploy_path': print(make_deploy_path()) elif cmd == 'get_dev_uuid': print(get_dev_uuid_var_path())
[]
[]
[ "HOME" ]
[]
["HOME"]
python
1
0
examples/train_transd_FB15K237.py
import config import models import tensorflow as tf import numpy as np import os from sys import argv os.environ['CUDA_VISIBLE_DEVICES']='0' #Input training files from benchmarks/FB15K/ folder. con = config.Config() #True: Input test files from the same folder. con.set_in_path("./benchmarks/FB15K237/") con.set_test_link_prediction(True) # con.set_test_triple_classification(True) con.set_work_threads(8) con.set_train_times(1000) con.set_nbatches(100) con.set_alpha(1.0) con.set_margin(4.0) con.set_bern(1) con.set_dimension(200) con.set_ent_neg_rate(25) con.set_rel_neg_rate(0) con.set_opt_method("SGD") #Models will be exported via tf.Saver() automatically. con.set_export_files("./res/model.vec.tf", 0) #Model parameters will be exported to json files automatically. con.set_out_files("./res/embedding.vec.json") #Initialize experimental settings. con.init() #Set the knowledge embedding model con.set_model(models.TransD) #Train the model. con.run() #To test models after training needs "set_test_flag(True)". con.test()
[]
[]
[ "CUDA_VISIBLE_DEVICES" ]
[]
["CUDA_VISIBLE_DEVICES"]
python
1
0
vendor/golang.org/x/tools/go/packages/golist.go
// Copyright 2018 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package packages import ( "bytes" "encoding/json" "fmt" "go/types" "io/ioutil" "log" "os" "os/exec" "path" "path/filepath" "reflect" "regexp" "strconv" "strings" "sync" "time" "unicode" "golang.org/x/tools/go/internal/packagesdriver" "golang.org/x/tools/internal/gopathwalk" "golang.org/x/tools/internal/semver" ) // debug controls verbose logging. var debug, _ = strconv.ParseBool(os.Getenv("GOPACKAGESDEBUG")) // A goTooOldError reports that the go command // found by exec.LookPath is too old to use the new go list behavior. type goTooOldError struct { error } // responseDeduper wraps a driverResponse, deduplicating its contents. type responseDeduper struct { seenRoots map[string]bool seenPackages map[string]*Package dr *driverResponse } // init fills in r with a driverResponse. func (r *responseDeduper) init(dr *driverResponse) { r.dr = dr r.seenRoots = map[string]bool{} r.seenPackages = map[string]*Package{} for _, pkg := range dr.Packages { r.seenPackages[pkg.ID] = pkg } for _, root := range dr.Roots { r.seenRoots[root] = true } } func (r *responseDeduper) addPackage(p *Package) { if r.seenPackages[p.ID] != nil { return } r.seenPackages[p.ID] = p r.dr.Packages = append(r.dr.Packages, p) } func (r *responseDeduper) addRoot(id string) { if r.seenRoots[id] { return } r.seenRoots[id] = true r.dr.Roots = append(r.dr.Roots, id) } // goInfo contains global information from the go tool. type goInfo struct { rootDirs map[string]string env goEnv } type goEnv struct { modulesOn bool } func determineEnv(cfg *Config) goEnv { buf, err := invokeGo(cfg, "env", "GOMOD") if err != nil { return goEnv{} } gomod := bytes.TrimSpace(buf.Bytes()) env := goEnv{} env.modulesOn = len(gomod) > 0 return env } // goListDriver uses the go list command to interpret the patterns and produce // the build system package structure. // See driver for more details. func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { var sizes types.Sizes var sizeserr error var sizeswg sync.WaitGroup if cfg.Mode&NeedTypesSizes != 0 || cfg.Mode&NeedTypes != 0 { sizeswg.Add(1) go func() { sizes, sizeserr = getSizes(cfg) sizeswg.Done() }() } // start fetching rootDirs var info goInfo var rootDirsReady, envReady = make(chan struct{}), make(chan struct{}) go func() { info.rootDirs = determineRootDirs(cfg) close(rootDirsReady) }() go func() { info.env = determineEnv(cfg) close(envReady) }() getGoInfo := func() *goInfo { <-rootDirsReady <-envReady return &info } // always pass getGoInfo to golistDriver golistDriver := func(cfg *Config, patterns ...string) (*driverResponse, error) { return golistDriver(cfg, getGoInfo, patterns...) } // Determine files requested in contains patterns var containFiles []string var packagesNamed []string restPatterns := make([]string, 0, len(patterns)) // Extract file= and other [querytype]= patterns. Report an error if querytype // doesn't exist. extractQueries: for _, pattern := range patterns { eqidx := strings.Index(pattern, "=") if eqidx < 0 { restPatterns = append(restPatterns, pattern) } else { query, value := pattern[:eqidx], pattern[eqidx+len("="):] switch query { case "file": containFiles = append(containFiles, value) case "pattern": restPatterns = append(restPatterns, value) case "iamashamedtousethedisabledqueryname": packagesNamed = append(packagesNamed, value) case "": // not a reserved query restPatterns = append(restPatterns, pattern) default: for _, rune := range query { if rune < 'a' || rune > 'z' { // not a reserved query restPatterns = append(restPatterns, pattern) continue extractQueries } } // Reject all other patterns containing "=" return nil, fmt.Errorf("invalid query type %q in query pattern %q", query, pattern) } } } response := &responseDeduper{} var err error // See if we have any patterns to pass through to go list. Zero initial // patterns also requires a go list call, since it's the equivalent of // ".". if len(restPatterns) > 0 || len(patterns) == 0 { dr, err := golistDriver(cfg, restPatterns...) if err != nil { return nil, err } response.init(dr) } else { response.init(&driverResponse{}) } sizeswg.Wait() if sizeserr != nil { return nil, sizeserr } // types.SizesFor always returns nil or a *types.StdSizes response.dr.Sizes, _ = sizes.(*types.StdSizes) var containsCandidates []string if len(containFiles) != 0 { if err := runContainsQueries(cfg, golistDriver, response, containFiles, getGoInfo); err != nil { return nil, err } } if len(packagesNamed) != 0 { if err := runNamedQueries(cfg, golistDriver, response, packagesNamed); err != nil { return nil, err } } modifiedPkgs, needPkgs, err := processGolistOverlay(cfg, response, getGoInfo) if err != nil { return nil, err } if len(containFiles) > 0 { containsCandidates = append(containsCandidates, modifiedPkgs...) containsCandidates = append(containsCandidates, needPkgs...) } if err := addNeededOverlayPackages(cfg, golistDriver, response, needPkgs, getGoInfo); err != nil { return nil, err } // Check candidate packages for containFiles. if len(containFiles) > 0 { for _, id := range containsCandidates { pkg, ok := response.seenPackages[id] if !ok { response.addPackage(&Package{ ID: id, Errors: []Error{ { Kind: ListError, Msg: fmt.Sprintf("package %s expected but not seen", id), }, }, }) continue } for _, f := range containFiles { for _, g := range pkg.GoFiles { if sameFile(f, g) { response.addRoot(id) } } } } } return response.dr, nil } func addNeededOverlayPackages(cfg *Config, driver driver, response *responseDeduper, pkgs []string, getGoInfo func() *goInfo) error { if len(pkgs) == 0 { return nil } drivercfg := *cfg if getGoInfo().env.modulesOn { drivercfg.BuildFlags = append(drivercfg.BuildFlags, "-mod=readonly") } dr, err := driver(&drivercfg, pkgs...) if err != nil { return err } for _, pkg := range dr.Packages { response.addPackage(pkg) } _, needPkgs, err := processGolistOverlay(cfg, response, getGoInfo) if err != nil { return err } if err := addNeededOverlayPackages(cfg, driver, response, needPkgs, getGoInfo); err != nil { return err } return nil } func runContainsQueries(cfg *Config, driver driver, response *responseDeduper, queries []string, goInfo func() *goInfo) error { for _, query := range queries { // TODO(matloob): Do only one query per directory. fdir := filepath.Dir(query) // Pass absolute path of directory to go list so that it knows to treat it as a directory, // not a package path. pattern, err := filepath.Abs(fdir) if err != nil { return fmt.Errorf("could not determine absolute path of file= query path %q: %v", query, err) } dirResponse, err := driver(cfg, pattern) if err != nil || (len(dirResponse.Packages) == 1 && len(dirResponse.Packages[0].Errors) == 1) { // There was an error loading the package. Try to load the file as an ad-hoc package. // Usually the error will appear in a returned package, but may not if we're in modules mode // and the ad-hoc is located outside a module. var queryErr error dirResponse, queryErr = driver(cfg, query) if queryErr != nil { // Return the original error if the attempt to fall back failed. return err } // Special case to handle issue #33482: // If this is a file= query for ad-hoc packages where the file only exists on an overlay, // and exists outside of a module, add the file in for the package. if len(dirResponse.Packages) == 1 && (dirResponse.Packages[0].ID == "command-line-arguments" || dirResponse.Packages[0].PkgPath == filepath.ToSlash(query)) { if len(dirResponse.Packages[0].GoFiles) == 0 { filename := filepath.Join(pattern, filepath.Base(query)) // avoid recomputing abspath // TODO(matloob): check if the file is outside of a root dir? for path := range cfg.Overlay { if path == filename { dirResponse.Packages[0].Errors = nil dirResponse.Packages[0].GoFiles = []string{path} dirResponse.Packages[0].CompiledGoFiles = []string{path} } } } } } isRoot := make(map[string]bool, len(dirResponse.Roots)) for _, root := range dirResponse.Roots { isRoot[root] = true } for _, pkg := range dirResponse.Packages { // Add any new packages to the main set // We don't bother to filter packages that will be dropped by the changes of roots, // that will happen anyway during graph construction outside this function. // Over-reporting packages is not a problem. response.addPackage(pkg) // if the package was not a root one, it cannot have the file if !isRoot[pkg.ID] { continue } for _, pkgFile := range pkg.GoFiles { if filepath.Base(query) == filepath.Base(pkgFile) { response.addRoot(pkg.ID) break } } } } return nil } // modCacheRegexp splits a path in a module cache into module, module version, and package. var modCacheRegexp = regexp.MustCompile(`(.*)@([^/\\]*)(.*)`) func runNamedQueries(cfg *Config, driver driver, response *responseDeduper, queries []string) error { // calling `go env` isn't free; bail out if there's nothing to do. if len(queries) == 0 { return nil } // Determine which directories are relevant to scan. roots, modRoot, err := roots(cfg) if err != nil { return err } // Scan the selected directories. Simple matches, from GOPATH/GOROOT // or the local module, can simply be "go list"ed. Matches from the // module cache need special treatment. var matchesMu sync.Mutex var simpleMatches, modCacheMatches []string add := func(root gopathwalk.Root, dir string) { // Walk calls this concurrently; protect the result slices. matchesMu.Lock() defer matchesMu.Unlock() path := dir if dir != root.Path { path = dir[len(root.Path)+1:] } if pathMatchesQueries(path, queries) { switch root.Type { case gopathwalk.RootModuleCache: modCacheMatches = append(modCacheMatches, path) case gopathwalk.RootCurrentModule: // We'd need to read go.mod to find the full // import path. Relative's easier. rel, err := filepath.Rel(cfg.Dir, dir) if err != nil { // This ought to be impossible, since // we found dir in the current module. panic(err) } simpleMatches = append(simpleMatches, "./"+rel) case gopathwalk.RootGOPATH, gopathwalk.RootGOROOT: simpleMatches = append(simpleMatches, path) } } } startWalk := time.Now() gopathwalk.Walk(roots, add, gopathwalk.Options{ModulesEnabled: modRoot != "", Debug: debug}) cfg.Logf("%v for walk", time.Since(startWalk)) // Weird special case: the top-level package in a module will be in // whatever directory the user checked the repository out into. It's // more reasonable for that to not match the package name. So, if there // are any Go files in the mod root, query it just to be safe. if modRoot != "" { rel, err := filepath.Rel(cfg.Dir, modRoot) if err != nil { panic(err) // See above. } files, err := ioutil.ReadDir(modRoot) for _, f := range files { if strings.HasSuffix(f.Name(), ".go") { simpleMatches = append(simpleMatches, rel) break } } } addResponse := func(r *driverResponse) { for _, pkg := range r.Packages { response.addPackage(pkg) for _, name := range queries { if pkg.Name == name { response.addRoot(pkg.ID) break } } } } if len(simpleMatches) != 0 { resp, err := driver(cfg, simpleMatches...) if err != nil { return err } addResponse(resp) } // Module cache matches are tricky. We want to avoid downloading new // versions of things, so we need to use the ones present in the cache. // go list doesn't accept version specifiers, so we have to write out a // temporary module, and do the list in that module. if len(modCacheMatches) != 0 { // Collect all the matches, deduplicating by major version // and preferring the newest. type modInfo struct { mod string major string } mods := make(map[modInfo]string) var imports []string for _, modPath := range modCacheMatches { matches := modCacheRegexp.FindStringSubmatch(modPath) mod, ver := filepath.ToSlash(matches[1]), matches[2] importPath := filepath.ToSlash(filepath.Join(matches[1], matches[3])) major := semver.Major(ver) if prevVer, ok := mods[modInfo{mod, major}]; !ok || semver.Compare(ver, prevVer) > 0 { mods[modInfo{mod, major}] = ver } imports = append(imports, importPath) } // Build the temporary module. var gomod bytes.Buffer gomod.WriteString("module modquery\nrequire (\n") for mod, version := range mods { gomod.WriteString("\t" + mod.mod + " " + version + "\n") } gomod.WriteString(")\n") tmpCfg := *cfg // We're only trying to look at stuff in the module cache, so // disable the network. This should speed things up, and has // prevented errors in at least one case, #28518. tmpCfg.Env = append(append([]string{"GOPROXY=off"}, cfg.Env...)) var err error tmpCfg.Dir, err = ioutil.TempDir("", "gopackages-modquery") if err != nil { return err } defer os.RemoveAll(tmpCfg.Dir) if err := ioutil.WriteFile(filepath.Join(tmpCfg.Dir, "go.mod"), gomod.Bytes(), 0777); err != nil { return fmt.Errorf("writing go.mod for module cache query: %v", err) } // Run the query, using the import paths calculated from the matches above. resp, err := driver(&tmpCfg, imports...) if err != nil { return fmt.Errorf("querying module cache matches: %v", err) } addResponse(resp) } return nil } func getSizes(cfg *Config) (types.Sizes, error) { return packagesdriver.GetSizesGolist(cfg.Context, cfg.BuildFlags, cfg.Env, cfg.Dir, usesExportData(cfg)) } // roots selects the appropriate paths to walk based on the passed-in configuration, // particularly the environment and the presence of a go.mod in cfg.Dir's parents. func roots(cfg *Config) ([]gopathwalk.Root, string, error) { stdout, err := invokeGo(cfg, "env", "GOROOT", "GOPATH", "GOMOD") if err != nil { return nil, "", err } fields := strings.Split(stdout.String(), "\n") if len(fields) != 4 || len(fields[3]) != 0 { return nil, "", fmt.Errorf("go env returned unexpected output: %q", stdout.String()) } goroot, gopath, gomod := fields[0], filepath.SplitList(fields[1]), fields[2] var modDir string if gomod != "" { modDir = filepath.Dir(gomod) } var roots []gopathwalk.Root // Always add GOROOT. roots = append(roots, gopathwalk.Root{filepath.Join(goroot, "/src"), gopathwalk.RootGOROOT}) // If modules are enabled, scan the module dir. if modDir != "" { roots = append(roots, gopathwalk.Root{modDir, gopathwalk.RootCurrentModule}) } // Add either GOPATH/src or GOPATH/pkg/mod, depending on module mode. for _, p := range gopath { if modDir != "" { roots = append(roots, gopathwalk.Root{filepath.Join(p, "/pkg/mod"), gopathwalk.RootModuleCache}) } else { roots = append(roots, gopathwalk.Root{filepath.Join(p, "/src"), gopathwalk.RootGOPATH}) } } return roots, modDir, nil } // These functions were copied from goimports. See further documentation there. // pathMatchesQueries is adapted from pkgIsCandidate. // TODO: is it reasonable to do Contains here, rather than an exact match on a path component? func pathMatchesQueries(path string, queries []string) bool { lastTwo := lastTwoComponents(path) for _, query := range queries { if strings.Contains(lastTwo, query) { return true } if hasHyphenOrUpperASCII(lastTwo) && !hasHyphenOrUpperASCII(query) { lastTwo = lowerASCIIAndRemoveHyphen(lastTwo) if strings.Contains(lastTwo, query) { return true } } } return false } // lastTwoComponents returns at most the last two path components // of v, using either / or \ as the path separator. func lastTwoComponents(v string) string { nslash := 0 for i := len(v) - 1; i >= 0; i-- { if v[i] == '/' || v[i] == '\\' { nslash++ if nslash == 2 { return v[i:] } } } return v } func hasHyphenOrUpperASCII(s string) bool { for i := 0; i < len(s); i++ { b := s[i] if b == '-' || ('A' <= b && b <= 'Z') { return true } } return false } func lowerASCIIAndRemoveHyphen(s string) (ret string) { buf := make([]byte, 0, len(s)) for i := 0; i < len(s); i++ { b := s[i] switch { case b == '-': continue case 'A' <= b && b <= 'Z': buf = append(buf, b+('a'-'A')) default: buf = append(buf, b) } } return string(buf) } // Fields must match go list; // see $GOROOT/src/cmd/go/internal/load/pkg.go. type jsonPackage struct { ImportPath string Dir string Name string Export string GoFiles []string CompiledGoFiles []string CFiles []string CgoFiles []string CXXFiles []string MFiles []string HFiles []string FFiles []string SFiles []string SwigFiles []string SwigCXXFiles []string SysoFiles []string Imports []string ImportMap map[string]string Deps []string TestGoFiles []string TestImports []string XTestGoFiles []string XTestImports []string ForTest string // q in a "p [q.test]" package, else "" DepOnly bool Error *jsonPackageError } type jsonPackageError struct { ImportStack []string Pos string Err string } func otherFiles(p *jsonPackage) [][]string { return [][]string{p.CFiles, p.CXXFiles, p.MFiles, p.HFiles, p.FFiles, p.SFiles, p.SwigFiles, p.SwigCXXFiles, p.SysoFiles} } // golistDriver uses the "go list" command to expand the pattern // words and return metadata for the specified packages. dir may be // "" and env may be nil, as per os/exec.Command. func golistDriver(cfg *Config, rootsDirs func() *goInfo, words ...string) (*driverResponse, error) { // go list uses the following identifiers in ImportPath and Imports: // // "p" -- importable package or main (command) // "q.test" -- q's test executable // "p [q.test]" -- variant of p as built for q's test executable // "q_test [q.test]" -- q's external test package // // The packages p that are built differently for a test q.test // are q itself, plus any helpers used by the external test q_test, // typically including "testing" and all its dependencies. // Run "go list" for complete // information on the specified packages. buf, err := invokeGo(cfg, golistargs(cfg, words)...) if err != nil { return nil, err } seen := make(map[string]*jsonPackage) // Decode the JSON and convert it to Package form. var response driverResponse for dec := json.NewDecoder(buf); dec.More(); { p := new(jsonPackage) if err := dec.Decode(p); err != nil { return nil, fmt.Errorf("JSON decoding failed: %v", err) } if p.ImportPath == "" { // The documentation for go list says that “[e]rroneous packages will have // a non-empty ImportPath”. If for some reason it comes back empty, we // prefer to error out rather than silently discarding data or handing // back a package without any way to refer to it. if p.Error != nil { return nil, Error{ Pos: p.Error.Pos, Msg: p.Error.Err, } } return nil, fmt.Errorf("package missing import path: %+v", p) } // Work around https://golang.org/issue/33157: // go list -e, when given an absolute path, will find the package contained at // that directory. But when no package exists there, it will return a fake package // with an error and the ImportPath set to the absolute path provided to go list. // Try to convert that absolute path to what its package path would be if it's // contained in a known module or GOPATH entry. This will allow the package to be // properly "reclaimed" when overlays are processed. if filepath.IsAbs(p.ImportPath) && p.Error != nil { pkgPath, ok := getPkgPath(cfg, p.ImportPath, rootsDirs) if ok { p.ImportPath = pkgPath } } if old, found := seen[p.ImportPath]; found { if !reflect.DeepEqual(p, old) { return nil, fmt.Errorf("internal error: go list gives conflicting information for package %v", p.ImportPath) } // skip the duplicate continue } seen[p.ImportPath] = p pkg := &Package{ Name: p.Name, ID: p.ImportPath, GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles), CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles), OtherFiles: absJoin(p.Dir, otherFiles(p)...), } // Work around https://golang.org/issue/28749: // cmd/go puts assembly, C, and C++ files in CompiledGoFiles. // Filter out any elements of CompiledGoFiles that are also in OtherFiles. // We have to keep this workaround in place until go1.12 is a distant memory. if len(pkg.OtherFiles) > 0 { other := make(map[string]bool, len(pkg.OtherFiles)) for _, f := range pkg.OtherFiles { other[f] = true } out := pkg.CompiledGoFiles[:0] for _, f := range pkg.CompiledGoFiles { if other[f] { continue } out = append(out, f) } pkg.CompiledGoFiles = out } // Extract the PkgPath from the package's ID. if i := strings.IndexByte(pkg.ID, ' '); i >= 0 { pkg.PkgPath = pkg.ID[:i] } else { pkg.PkgPath = pkg.ID } if pkg.PkgPath == "unsafe" { pkg.GoFiles = nil // ignore fake unsafe.go file } // Assume go list emits only absolute paths for Dir. if p.Dir != "" && !filepath.IsAbs(p.Dir) { log.Fatalf("internal error: go list returned non-absolute Package.Dir: %s", p.Dir) } if p.Export != "" && !filepath.IsAbs(p.Export) { pkg.ExportFile = filepath.Join(p.Dir, p.Export) } else { pkg.ExportFile = p.Export } // imports // // Imports contains the IDs of all imported packages. // ImportsMap records (path, ID) only where they differ. ids := make(map[string]bool) for _, id := range p.Imports { ids[id] = true } pkg.Imports = make(map[string]*Package) for path, id := range p.ImportMap { pkg.Imports[path] = &Package{ID: id} // non-identity import delete(ids, id) } for id := range ids { if id == "C" { continue } pkg.Imports[id] = &Package{ID: id} // identity import } if !p.DepOnly { response.Roots = append(response.Roots, pkg.ID) } // Work around for pre-go.1.11 versions of go list. // TODO(matloob): they should be handled by the fallback. // Can we delete this? if len(pkg.CompiledGoFiles) == 0 { pkg.CompiledGoFiles = pkg.GoFiles } if p.Error != nil { pkg.Errors = append(pkg.Errors, Error{ Pos: p.Error.Pos, Msg: strings.TrimSpace(p.Error.Err), // Trim to work around golang.org/issue/32363. }) } response.Packages = append(response.Packages, pkg) } return &response, nil } // getPkgPath finds the package path of a directory if it's relative to a root directory. func getPkgPath(cfg *Config, dir string, goInfo func() *goInfo) (string, bool) { absDir, err := filepath.Abs(dir) if err != nil { cfg.Logf("error getting absolute path of %s: %v", dir, err) return "", false } for rdir, rpath := range goInfo().rootDirs { absRdir, err := filepath.Abs(rdir) if err != nil { cfg.Logf("error getting absolute path of %s: %v", rdir, err) continue } // Make sure that the directory is in the module, // to avoid creating a path relative to another module. if !strings.HasPrefix(absDir, absRdir) { cfg.Logf("%s does not have prefix %s", absDir, absRdir) continue } // TODO(matloob): This doesn't properly handle symlinks. r, err := filepath.Rel(rdir, dir) if err != nil { continue } if rpath != "" { // We choose only one root even though the directory even it can belong in multiple modules // or GOPATH entries. This is okay because we only need to work with absolute dirs when a // file is missing from disk, for instance when gopls calls go/packages in an overlay. // Once the file is saved, gopls, or the next invocation of the tool will get the correct // result straight from golist. // TODO(matloob): Implement module tiebreaking? return path.Join(rpath, filepath.ToSlash(r)), true } return filepath.ToSlash(r), true } return "", false } // absJoin absolutizes and flattens the lists of files. func absJoin(dir string, fileses ...[]string) (res []string) { for _, files := range fileses { for _, file := range files { if !filepath.IsAbs(file) { file = filepath.Join(dir, file) } res = append(res, file) } } return res } func golistargs(cfg *Config, words []string) []string { const findFlags = NeedImports | NeedTypes | NeedSyntax | NeedTypesInfo fullargs := []string{ "list", "-e", "-json", fmt.Sprintf("-compiled=%t", cfg.Mode&(NeedCompiledGoFiles|NeedSyntax|NeedTypesInfo|NeedTypesSizes) != 0), fmt.Sprintf("-test=%t", cfg.Tests), fmt.Sprintf("-export=%t", usesExportData(cfg)), fmt.Sprintf("-deps=%t", cfg.Mode&NeedImports != 0), // go list doesn't let you pass -test and -find together, // probably because you'd just get the TestMain. fmt.Sprintf("-find=%t", !cfg.Tests && cfg.Mode&findFlags == 0), } fullargs = append(fullargs, cfg.BuildFlags...) fullargs = append(fullargs, "--") fullargs = append(fullargs, words...) return fullargs } // invokeGo returns the stdout of a go command invocation. func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) { stdout := new(bytes.Buffer) stderr := new(bytes.Buffer) cmd := exec.CommandContext(cfg.Context, "go", args...) // On darwin the cwd gets resolved to the real path, which breaks anything that // expects the working directory to keep the original path, including the // go command when dealing with modules. // The Go stdlib has a special feature where if the cwd and the PWD are the // same node then it trusts the PWD, so by setting it in the env for the child // process we fix up all the paths returned by the go command. cmd.Env = append(append([]string{}, cfg.Env...), "PWD="+cfg.Dir) cmd.Dir = cfg.Dir cmd.Stdout = stdout cmd.Stderr = stderr defer func(start time.Time) { cfg.Logf("%s for %v, stderr: <<%s>> stdout: <<%s>>\n", time.Since(start), cmdDebugStr(cmd, args...), stderr, stdout) }(time.Now()) if err := cmd.Run(); err != nil { // Check for 'go' executable not being found. if ee, ok := err.(*exec.Error); ok && ee.Err == exec.ErrNotFound { return nil, fmt.Errorf("'go list' driver requires 'go', but %s", exec.ErrNotFound) } exitErr, ok := err.(*exec.ExitError) if !ok { // Catastrophic error: // - context cancellation return nil, fmt.Errorf("couldn't exec 'go %v': %s %T", args, err, err) } // Old go version? if strings.Contains(stderr.String(), "flag provided but not defined") { return nil, goTooOldError{fmt.Errorf("unsupported version of go: %s: %s", exitErr, stderr)} } // Related to #24854 if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "unexpected directory layout") { return nil, fmt.Errorf("%s", stderr.String()) } // Is there an error running the C compiler in cgo? This will be reported in the "Error" field // and should be suppressed by go list -e. // // This condition is not perfect yet because the error message can include other error messages than runtime/cgo. isPkgPathRune := func(r rune) bool { // From https://golang.org/ref/spec#Import_declarations: // Implementation restriction: A compiler may restrict ImportPaths to non-empty strings // using only characters belonging to Unicode's L, M, N, P, and S general categories // (the Graphic characters without spaces) and may also exclude the // characters !"#$%&'()*,:;<=>?[\]^`{|} and the Unicode replacement character U+FFFD. return unicode.IsOneOf([]*unicode.RangeTable{unicode.L, unicode.M, unicode.N, unicode.P, unicode.S}, r) && strings.IndexRune("!\"#$%&'()*,:;<=>?[\\]^`{|}\uFFFD", r) == -1 } if len(stderr.String()) > 0 && strings.HasPrefix(stderr.String(), "# ") { if strings.HasPrefix(strings.TrimLeftFunc(stderr.String()[len("# "):], isPkgPathRune), "\n") { return stdout, nil } } // This error only appears in stderr. See golang.org/cl/166398 for a fix in go list to show // the error in the Err section of stdout in case -e option is provided. // This fix is provided for backwards compatibility. if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must be .go files") { output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, strings.Trim(stderr.String(), "\n")) return bytes.NewBufferString(output), nil } // Similar to the previous error, but currently lacks a fix in Go. if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "named files must all be in one directory") { output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, strings.Trim(stderr.String(), "\n")) return bytes.NewBufferString(output), nil } // Backwards compatibility for Go 1.11 because 1.12 and 1.13 put the directory in the ImportPath. // If the package doesn't exist, put the absolute path of the directory into the error message, // as Go 1.13 list does. const noSuchDirectory = "no such directory" if len(stderr.String()) > 0 && strings.Contains(stderr.String(), noSuchDirectory) { errstr := stderr.String() abspath := strings.TrimSpace(errstr[strings.Index(errstr, noSuchDirectory)+len(noSuchDirectory):]) output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, abspath, strings.Trim(stderr.String(), "\n")) return bytes.NewBufferString(output), nil } // Workaround for #29280: go list -e has incorrect behavior when an ad-hoc package doesn't exist. // Note that the error message we look for in this case is different that the one looked for above. if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no such file or directory") { output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, strings.Trim(stderr.String(), "\n")) return bytes.NewBufferString(output), nil } // Workaround for #34273. go list -e with GO111MODULE=on has incorrect behavior when listing a // directory outside any module. if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "outside available modules") { output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, // TODO(matloob): command-line-arguments isn't correct here. "command-line-arguments", strings.Trim(stderr.String(), "\n")) return bytes.NewBufferString(output), nil } // Workaround for an instance of golang.org/issue/26755: go list -e will return a non-zero exit // status if there's a dependency on a package that doesn't exist. But it should return // a zero exit status and set an error on that package. if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no Go files in") { // try to extract package name from string stderrStr := stderr.String() var importPath string colon := strings.Index(stderrStr, ":") if colon > 0 && strings.HasPrefix(stderrStr, "go build ") { importPath = stderrStr[len("go build "):colon] } output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, importPath, strings.Trim(stderrStr, "\n")) return bytes.NewBufferString(output), nil } // Export mode entails a build. // If that build fails, errors appear on stderr // (despite the -e flag) and the Export field is blank. // Do not fail in that case. // The same is true if an ad-hoc package given to go list doesn't exist. // TODO(matloob): Remove these once we can depend on go list to exit with a zero status with -e even when // packages don't exist or a build fails. if !usesExportData(cfg) && !containsGoFile(args) { return nil, fmt.Errorf("go %v: %s: %s", args, exitErr, stderr) } } // As of writing, go list -export prints some non-fatal compilation // errors to stderr, even with -e set. We would prefer that it put // them in the Package.Error JSON (see https://golang.org/issue/26319). // In the meantime, there's nowhere good to put them, but they can // be useful for debugging. Print them if $GOPACKAGESPRINTGOLISTERRORS // is set. if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTGOLISTERRORS") != "" { fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd, args...), stderr) } return stdout, nil } func containsGoFile(s []string) bool { for _, f := range s { if strings.HasSuffix(f, ".go") { return true } } return false } func cmdDebugStr(cmd *exec.Cmd, args ...string) string { env := make(map[string]string) for _, kv := range cmd.Env { split := strings.Split(kv, "=") k, v := split[0], split[1] env[k] = v } var quotedArgs []string for _, arg := range args { quotedArgs = append(quotedArgs, strconv.Quote(arg)) } return fmt.Sprintf("GOROOT=%v GOPATH=%v GO111MODULE=%v PWD=%v go %s", env["GOROOT"], env["GOPATH"], env["GO111MODULE"], env["PWD"], strings.Join(quotedArgs, " ")) }
[ "\"GOPACKAGESDEBUG\"", "\"GOPACKAGESPRINTGOLISTERRORS\"" ]
[]
[ "GOPACKAGESDEBUG", "GOPACKAGESPRINTGOLISTERRORS" ]
[]
["GOPACKAGESDEBUG", "GOPACKAGESPRINTGOLISTERRORS"]
go
2
0
fstest/fstests/fstests.go
// Package fstests provides generic integration tests for the Fs and // Object interfaces. // // These tests are concerned with the basic functionality of a // backend. The tests in fs/sync and fs/operations tests more // cornercases that these tests don't. package fstests import ( "bytes" "context" "fmt" "io" "io/ioutil" "math/bits" "os" "path" "path/filepath" "reflect" "sort" "strconv" "strings" "testing" "time" "github.com/pkg/errors" "github.com/rclone/rclone/fs" "github.com/rclone/rclone/fs/config" "github.com/rclone/rclone/fs/fserrors" "github.com/rclone/rclone/fs/fspath" "github.com/rclone/rclone/fs/hash" "github.com/rclone/rclone/fs/object" "github.com/rclone/rclone/fs/operations" "github.com/rclone/rclone/fs/walk" "github.com/rclone/rclone/fstest" "github.com/rclone/rclone/fstest/testserver" "github.com/rclone/rclone/lib/encoder" "github.com/rclone/rclone/lib/random" "github.com/rclone/rclone/lib/readers" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) // InternalTester is an optional interface for Fs which allows to execute internal tests // // This interface should be implemented in 'backend'_internal_test.go and not in 'backend'.go type InternalTester interface { InternalTest(*testing.T) } // ChunkedUploadConfig contains the values used by TestFsPutChunked // to determine the limits of chunked uploading type ChunkedUploadConfig struct { // Minimum allowed chunk size MinChunkSize fs.SizeSuffix // Maximum allowed chunk size, 0 is no limit MaxChunkSize fs.SizeSuffix // Rounds the given chunk size up to the next valid value // nil will disable rounding // e.g. the next power of 2 CeilChunkSize func(fs.SizeSuffix) fs.SizeSuffix // More than one chunk is required on upload NeedMultipleChunks bool } // SetUploadChunkSizer is a test only interface to change the upload chunk size at runtime type SetUploadChunkSizer interface { // Change the configured UploadChunkSize. // Will only be called while no transfer is in progress. SetUploadChunkSize(fs.SizeSuffix) (fs.SizeSuffix, error) } // SetUploadCutoffer is a test only interface to change the upload cutoff size at runtime type SetUploadCutoffer interface { // Change the configured UploadCutoff. // Will only be called while no transfer is in progress. SetUploadCutoff(fs.SizeSuffix) (fs.SizeSuffix, error) } // NextPowerOfTwo returns the current or next bigger power of two. // All values less or equal 0 will return 0 func NextPowerOfTwo(i fs.SizeSuffix) fs.SizeSuffix { return 1 << uint(64-bits.LeadingZeros64(uint64(i)-1)) } // NextMultipleOf returns a function that can be used as a CeilChunkSize function. // This function will return the next multiple of m that is equal or bigger than i. // All values less or equal 0 will return 0. func NextMultipleOf(m fs.SizeSuffix) func(fs.SizeSuffix) fs.SizeSuffix { if m <= 0 { panic(fmt.Sprintf("invalid multiplier %s", m)) } return func(i fs.SizeSuffix) fs.SizeSuffix { if i <= 0 { return 0 } return (((i - 1) / m) + 1) * m } } // dirsToNames returns a sorted list of names func dirsToNames(dirs []fs.Directory) []string { names := []string{} for _, dir := range dirs { names = append(names, fstest.Normalize(dir.Remote())) } sort.Strings(names) return names } // objsToNames returns a sorted list of object names func objsToNames(objs []fs.Object) []string { names := []string{} for _, obj := range objs { names = append(names, fstest.Normalize(obj.Remote())) } sort.Strings(names) return names } // findObject finds the object on the remote func findObject(ctx context.Context, t *testing.T, f fs.Fs, Name string) fs.Object { var obj fs.Object var err error sleepTime := 1 * time.Second for i := 1; i <= *fstest.ListRetries; i++ { obj, err = f.NewObject(ctx, Name) if err == nil { break } t.Logf("Sleeping for %v for findObject eventual consistency: %d/%d (%v)", sleepTime, i, *fstest.ListRetries, err) time.Sleep(sleepTime) sleepTime = (sleepTime * 3) / 2 } require.NoError(t, err) return obj } // retry f() until no retriable error func retry(t *testing.T, what string, f func() error) { const maxTries = 10 var err error for tries := 1; tries <= maxTries; tries++ { err = f() // exit if no error, or error is not retriable if err == nil || !fserrors.IsRetryError(err) { break } t.Logf("%s error: %v - low level retry %d/%d", what, err, tries, maxTries) time.Sleep(2 * time.Second) } require.NoError(t, err, what) } // testPut puts file with random contents to the remote func testPut(ctx context.Context, t *testing.T, f fs.Fs, file *fstest.Item) (string, fs.Object) { return PutTestContents(ctx, t, f, file, random.String(100), true) } // PutTestContents puts file with given contents to the remote and checks it but unlike TestPutLarge doesn't remove func PutTestContents(ctx context.Context, t *testing.T, f fs.Fs, file *fstest.Item, contents string, check bool) (string, fs.Object) { var ( err error obj fs.Object uploadHash *hash.MultiHasher ) retry(t, "Put", func() error { buf := bytes.NewBufferString(contents) uploadHash = hash.NewMultiHasher() in := io.TeeReader(buf, uploadHash) file.Size = int64(buf.Len()) obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil) obj, err = f.Put(ctx, in, obji) return err }) file.Hashes = uploadHash.Sums() if check { file.Check(t, obj, f.Precision()) // Re-read the object and check again obj = findObject(ctx, t, f, file.Path) file.Check(t, obj, f.Precision()) } return contents, obj } // TestPutLarge puts file to the remote, checks it and removes it on success. func TestPutLarge(ctx context.Context, t *testing.T, f fs.Fs, file *fstest.Item) { var ( err error obj fs.Object uploadHash *hash.MultiHasher ) retry(t, "PutLarge", func() error { r := readers.NewPatternReader(file.Size) uploadHash = hash.NewMultiHasher() in := io.TeeReader(r, uploadHash) obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil) obj, err = f.Put(ctx, in, obji) if file.Size == 0 && err == fs.ErrorCantUploadEmptyFiles { t.Skip("Can't upload zero length files") } return err }) file.Hashes = uploadHash.Sums() file.Check(t, obj, f.Precision()) // Re-read the object and check again obj = findObject(ctx, t, f, file.Path) file.Check(t, obj, f.Precision()) // Download the object and check it is OK downloadHash := hash.NewMultiHasher() download, err := obj.Open(ctx) require.NoError(t, err) n, err := io.Copy(downloadHash, download) require.NoError(t, err) assert.Equal(t, file.Size, n) require.NoError(t, download.Close()) assert.Equal(t, file.Hashes, downloadHash.Sums()) // Remove the object require.NoError(t, obj.Remove(ctx)) } // read the contents of an object as a string func readObject(ctx context.Context, t *testing.T, obj fs.Object, limit int64, options ...fs.OpenOption) string { what := fmt.Sprintf("readObject(%q) limit=%d, options=%+v", obj, limit, options) in, err := obj.Open(ctx, options...) require.NoError(t, err, what) var r io.Reader = in if limit >= 0 { r = &io.LimitedReader{R: r, N: limit} } contents, err := ioutil.ReadAll(r) require.NoError(t, err, what) err = in.Close() require.NoError(t, err, what) return string(contents) } // ExtraConfigItem describes a config item for the tests type ExtraConfigItem struct{ Name, Key, Value string } // Opt is options for Run type Opt struct { RemoteName string NilObject fs.Object ExtraConfig []ExtraConfigItem SkipBadWindowsCharacters bool // skips unusable characters for windows if set SkipFsMatch bool // if set skip exact matching of Fs value TiersToTest []string // List of tiers which can be tested in setTier test ChunkedUpload ChunkedUploadConfig UnimplementableFsMethods []string // List of methods which can't be implemented in this wrapping Fs UnimplementableObjectMethods []string // List of methods which can't be implemented in this wrapping Fs SkipFsCheckWrap bool // if set skip FsCheckWrap SkipObjectCheckWrap bool // if set skip ObjectCheckWrap SkipInvalidUTF8 bool // if set skip invalid UTF-8 checks } // returns true if x is found in ss func stringsContains(x string, ss []string) bool { for _, s := range ss { if x == s { return true } } return false } // Run runs the basic integration tests for a remote using the options passed in. // // They are structured in a hierarchical way so that dependencies for the tests can be created. // // For example some tests require the directory to be created - these // are inside the "FsMkdir" test. Some tests require some tests files // - these are inside the "FsPutFiles" test. func Run(t *testing.T, opt *Opt) { var ( remote fs.Fs remoteName = opt.RemoteName subRemoteName string subRemoteLeaf string file1 = fstest.Item{ ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"), Path: "file name.txt", } file1Contents string file2 = fstest.Item{ ModTime: fstest.Time("2001-02-03T04:05:10.123123123Z"), Path: `hello? sausage/êé/Hello, 世界/ " ' @ < > & ? + ≠/z.txt`, } isLocalRemote bool purged bool // whether the dir has been purged or not ctx = context.Background() unwrappableFsMethods = []string{"Command"} // these Fs methods don't need to be wrapped ever ) if strings.HasSuffix(os.Getenv("RCLONE_CONFIG"), "/notfound") && *fstest.RemoteName == "" { t.Skip("quicktest only") } // Skip the test if the remote isn't configured skipIfNotOk := func(t *testing.T) { if remote == nil { t.Skipf("WARN: %q not configured", remoteName) } } // Skip if remote is not ListR capable, otherwise set the useListR // flag, returning a function to restore its value skipIfNotListR := func(t *testing.T) func() { skipIfNotOk(t) if remote.Features().ListR == nil { t.Skip("FS has no ListR interface") } previous := fs.Config.UseListR fs.Config.UseListR = true return func() { fs.Config.UseListR = previous } } // Skip if remote is not SetTier and GetTier capable skipIfNotSetTier := func(t *testing.T) { skipIfNotOk(t) if remote.Features().SetTier == false || remote.Features().GetTier == false { t.Skip("FS has no SetTier & GetTier interfaces") } } // Return true if f (or any of the things it wraps) is bucket // based but not at the root. isBucketBasedButNotRoot := func(f fs.Fs) bool { f = fs.UnWrapFs(f) return f.Features().BucketBased && strings.Contains(strings.Trim(f.Root(), "/"), "/") } // Initialise the remote fstest.Initialise() // Set extra config if supplied for _, item := range opt.ExtraConfig { config.FileSet(item.Name, item.Key, item.Value) } if *fstest.RemoteName != "" { remoteName = *fstest.RemoteName } oldFstestRemoteName := fstest.RemoteName fstest.RemoteName = &remoteName defer func() { fstest.RemoteName = oldFstestRemoteName }() t.Logf("Using remote %q", remoteName) var err error if remoteName == "" { remoteName, err = fstest.LocalRemote() require.NoError(t, err) isLocalRemote = true } // Start any test servers if required finish, err := testserver.Start(remoteName) require.NoError(t, err) defer finish() // Make the Fs we are testing with, initialising the local variables // subRemoteName - name of the remote after the TestRemote: // subRemoteLeaf - a subdirectory to use under that // remote - the result of fs.NewFs(TestRemote:subRemoteName) subRemoteName, subRemoteLeaf, err = fstest.RandomRemoteName(remoteName) require.NoError(t, err) remote, err = fs.NewFs(subRemoteName) if err == fs.ErrorNotFoundInConfigFile { t.Logf("Didn't find %q in config file - skipping tests", remoteName) return } require.NoError(t, err, fmt.Sprintf("unexpected error: %v", err)) // Skip the rest if it failed skipIfNotOk(t) // Check to see if Fs that wrap other Fs implement all the optional methods t.Run("FsCheckWrap", func(t *testing.T) { skipIfNotOk(t) if opt.SkipFsCheckWrap { t.Skip("Skipping FsCheckWrap on this Fs") } ft := new(fs.Features).Fill(remote) if ft.UnWrap == nil { t.Skip("Not a wrapping Fs") } v := reflect.ValueOf(ft).Elem() vType := v.Type() for i := 0; i < v.NumField(); i++ { vName := vType.Field(i).Name if stringsContains(vName, opt.UnimplementableFsMethods) { continue } if stringsContains(vName, unwrappableFsMethods) { continue } field := v.Field(i) // skip the bools if field.Type().Kind() == reflect.Bool { continue } if field.IsNil() { t.Errorf("Missing Fs wrapper for %s", vName) } } }) // Check to see if Fs advertises commands and they work and have docs t.Run("FsCommand", func(t *testing.T) { skipIfNotOk(t) doCommand := remote.Features().Command if doCommand == nil { t.Skip("No commands in this remote") } // Check the correct error is generated _, err := doCommand(context.Background(), "NOTFOUND", nil, nil) assert.Equal(t, fs.ErrorCommandNotFound, err, "Incorrect error generated on command not found") // Check there are some commands in the fsInfo fsInfo, _, _, _, err := fs.ConfigFs(remoteName) require.NoError(t, err) assert.True(t, len(fsInfo.CommandHelp) > 0, "Command is declared, must return some help in CommandHelp") }) // TestFsRmdirNotFound tests deleting a non existent directory t.Run("FsRmdirNotFound", func(t *testing.T) { skipIfNotOk(t) if isBucketBasedButNotRoot(remote) { t.Skip("Skipping test as non root bucket based remote") } err := remote.Rmdir(ctx, "") assert.Error(t, err, "Expecting error on Rmdir non existent") }) // Make the directory err = remote.Mkdir(ctx, "") require.NoError(t, err) fstest.CheckListing(t, remote, []fstest.Item{}) // TestFsString tests the String method t.Run("FsString", func(t *testing.T) { skipIfNotOk(t) str := remote.String() require.NotEqual(t, "", str) }) // TestFsName tests the Name method t.Run("FsName", func(t *testing.T) { skipIfNotOk(t) got := remote.Name() want := remoteName[:strings.LastIndex(remoteName, ":")+1] if isLocalRemote { want = "local:" } require.Equal(t, want, got+":") }) // TestFsRoot tests the Root method t.Run("FsRoot", func(t *testing.T) { skipIfNotOk(t) name := remote.Name() + ":" root := remote.Root() if isLocalRemote { // only check last path element on local require.Equal(t, filepath.Base(subRemoteName), filepath.Base(root)) } else { require.Equal(t, subRemoteName, name+root) } }) // TestFsRmdirEmpty tests deleting an empty directory t.Run("FsRmdirEmpty", func(t *testing.T) { skipIfNotOk(t) err := remote.Rmdir(ctx, "") require.NoError(t, err) }) // TestFsMkdir tests making a directory // // Tests that require the directory to be made are within this t.Run("FsMkdir", func(t *testing.T) { skipIfNotOk(t) err := remote.Mkdir(ctx, "") require.NoError(t, err) fstest.CheckListing(t, remote, []fstest.Item{}) err = remote.Mkdir(ctx, "") require.NoError(t, err) // TestFsMkdirRmdirSubdir tests making and removing a sub directory t.Run("FsMkdirRmdirSubdir", func(t *testing.T) { skipIfNotOk(t) dir := "dir/subdir" err := operations.Mkdir(ctx, remote, dir) require.NoError(t, err) fstest.CheckListingWithPrecision(t, remote, []fstest.Item{}, []string{"dir", "dir/subdir"}, fs.GetModifyWindow(remote)) err = operations.Rmdir(ctx, remote, dir) require.NoError(t, err) fstest.CheckListingWithPrecision(t, remote, []fstest.Item{}, []string{"dir"}, fs.GetModifyWindow(remote)) err = operations.Rmdir(ctx, remote, "dir") require.NoError(t, err) fstest.CheckListingWithPrecision(t, remote, []fstest.Item{}, []string{}, fs.GetModifyWindow(remote)) }) // TestFsListEmpty tests listing an empty directory t.Run("FsListEmpty", func(t *testing.T) { skipIfNotOk(t) fstest.CheckListing(t, remote, []fstest.Item{}) }) // TestFsListDirEmpty tests listing the directories from an empty directory TestFsListDirEmpty := func(t *testing.T) { skipIfNotOk(t) objs, dirs, err := walk.GetAll(ctx, remote, "", true, 1) if !remote.Features().CanHaveEmptyDirectories { if err != fs.ErrorDirNotFound { require.NoError(t, err) } } else { require.NoError(t, err) } assert.Equal(t, []string{}, objsToNames(objs)) assert.Equal(t, []string{}, dirsToNames(dirs)) } t.Run("FsListDirEmpty", TestFsListDirEmpty) // TestFsListRDirEmpty tests listing the directories from an empty directory using ListR t.Run("FsListRDirEmpty", func(t *testing.T) { defer skipIfNotListR(t)() TestFsListDirEmpty(t) }) // TestFsListDirNotFound tests listing the directories from an empty directory TestFsListDirNotFound := func(t *testing.T) { skipIfNotOk(t) objs, dirs, err := walk.GetAll(ctx, remote, "does not exist", true, 1) if !remote.Features().CanHaveEmptyDirectories { if err != fs.ErrorDirNotFound { assert.NoError(t, err) assert.Equal(t, 0, len(objs)+len(dirs)) } } else { assert.Equal(t, fs.ErrorDirNotFound, err) } } t.Run("FsListDirNotFound", TestFsListDirNotFound) // TestFsListRDirNotFound tests listing the directories from an empty directory using ListR t.Run("FsListRDirNotFound", func(t *testing.T) { defer skipIfNotListR(t)() TestFsListDirNotFound(t) }) // FsEncoding tests that file name encodings are // working by uploading a series of unusual files // Must be run in an empty directory t.Run("FsEncoding", func(t *testing.T) { skipIfNotOk(t) // check no files or dirs as pre-requisite fstest.CheckListingWithPrecision(t, remote, []fstest.Item{}, []string{}, fs.GetModifyWindow(remote)) for _, test := range []struct { name string path string }{ // See lib/encoder/encoder.go for list of things that go here {"control chars", "\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F\x10\x11\x12\x13\x14\x15\x16\x17\x18\x19\x1A\x1B\x1C\x1D\x1E\x1F\x7F"}, {"dot", "."}, {"dot dot", ".."}, {"punctuation", "!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~"}, {"leading space", " leading space"}, {"leading tilde", "~leading tilde"}, {"leading CR", "\rleading CR"}, {"leading LF", "\nleading LF"}, {"leading HT", "\tleading HT"}, {"leading VT", "\vleading VT"}, {"leading dot", ".leading dot"}, {"trailing space", "trailing space "}, {"trailing CR", "trailing CR\r"}, {"trailing LF", "trailing LF\n"}, {"trailing HT", "trailing HT\t"}, {"trailing VT", "trailing VT\v"}, {"trailing dot", "trailing dot."}, {"invalid UTF-8", "invalid utf-8\xfe"}, } { t.Run(test.name, func(t *testing.T) { if opt.SkipInvalidUTF8 && test.name == "invalid UTF-8" { t.Skip("Skipping " + test.name) } // turn raw strings into Standard encoding fileName := encoder.Standard.Encode(test.path) dirName := fileName t.Logf("testing %q", fileName) assert.NoError(t, remote.Mkdir(ctx, dirName)) file := fstest.Item{ ModTime: time.Now(), Path: dirName + "/" + fileName, // test creating a file and dir with that name } _, o := testPut(context.Background(), t, remote, &file) fstest.CheckListingWithPrecision(t, remote, []fstest.Item{file}, []string{dirName}, fs.GetModifyWindow(remote)) assert.NoError(t, o.Remove(ctx)) assert.NoError(t, remote.Rmdir(ctx, dirName)) fstest.CheckListingWithPrecision(t, remote, []fstest.Item{}, []string{}, fs.GetModifyWindow(remote)) }) } }) // TestFsNewObjectNotFound tests not finding an object t.Run("FsNewObjectNotFound", func(t *testing.T) { skipIfNotOk(t) // Object in an existing directory o, err := remote.NewObject(ctx, "potato") assert.Nil(t, o) assert.Equal(t, fs.ErrorObjectNotFound, err) // Now try an object in a non existing directory o, err = remote.NewObject(ctx, "directory/not/found/potato") assert.Nil(t, o) assert.Equal(t, fs.ErrorObjectNotFound, err) }) // TestFsPutError tests uploading a file where there is an error // // It makes sure that aborting a file half way through does not create // a file on the remote. // // go test -v -run 'TestIntegration/Test(Setup|Init|FsMkdir|FsPutError)$' t.Run("FsPutError", func(t *testing.T) { skipIfNotOk(t) var N int64 = 5 * 1024 if *fstest.SizeLimit > 0 && N > *fstest.SizeLimit { N = *fstest.SizeLimit t.Logf("Reduce file size due to limit %d", N) } // Read N bytes then produce an error contents := random.String(int(N)) buf := bytes.NewBufferString(contents) er := &readers.ErrorReader{Err: errors.New("potato")} in := io.MultiReader(buf, er) obji := object.NewStaticObjectInfo(file2.Path, file2.ModTime, 2*N, true, nil, nil) _, err := remote.Put(ctx, in, obji) // assert.Nil(t, obj) - FIXME some remotes return the object even on nil assert.NotNil(t, err) obj, err := remote.NewObject(ctx, file2.Path) assert.Nil(t, obj) assert.Equal(t, fs.ErrorObjectNotFound, err) }) t.Run("FsPutZeroLength", func(t *testing.T) { skipIfNotOk(t) TestPutLarge(ctx, t, remote, &fstest.Item{ ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"), Path: fmt.Sprintf("zero-length-file"), Size: int64(0), }) }) t.Run("FsOpenWriterAt", func(t *testing.T) { skipIfNotOk(t) openWriterAt := remote.Features().OpenWriterAt if openWriterAt == nil { t.Skip("FS has no OpenWriterAt interface") } path := "writer-at-subdir/writer-at-file" out, err := openWriterAt(ctx, path, -1) require.NoError(t, err) var n int n, err = out.WriteAt([]byte("def"), 3) assert.NoError(t, err) assert.Equal(t, 3, n) n, err = out.WriteAt([]byte("ghi"), 6) assert.NoError(t, err) assert.Equal(t, 3, n) n, err = out.WriteAt([]byte("abc"), 0) assert.NoError(t, err) assert.Equal(t, 3, n) assert.NoError(t, out.Close()) obj := findObject(ctx, t, remote, path) assert.Equal(t, "abcdefghi", readObject(ctx, t, obj, -1), "contents of file differ") assert.NoError(t, obj.Remove(ctx)) assert.NoError(t, remote.Rmdir(ctx, "writer-at-subdir")) }) // TestFsChangeNotify tests that changes are properly // propagated // // go test -v -remote TestDrive: -run '^Test(Setup|Init|FsChangeNotify)$' -verbose t.Run("FsChangeNotify", func(t *testing.T) { skipIfNotOk(t) // Check have ChangeNotify doChangeNotify := remote.Features().ChangeNotify if doChangeNotify == nil { t.Skip("FS has no ChangeNotify interface") } err := operations.Mkdir(ctx, remote, "dir") require.NoError(t, err) pollInterval := make(chan time.Duration) dirChanges := map[string]struct{}{} objChanges := map[string]struct{}{} doChangeNotify(ctx, func(x string, e fs.EntryType) { fs.Debugf(nil, "doChangeNotify(%q, %+v)", x, e) if strings.HasPrefix(x, file1.Path[:5]) || strings.HasPrefix(x, file2.Path[:5]) { fs.Debugf(nil, "Ignoring notify for file1 or file2: %q, %v", x, e) return } if e == fs.EntryDirectory { dirChanges[x] = struct{}{} } else if e == fs.EntryObject { objChanges[x] = struct{}{} } }, pollInterval) defer func() { close(pollInterval) }() pollInterval <- time.Second var dirs []string for _, idx := range []int{1, 3, 2} { dir := fmt.Sprintf("dir/subdir%d", idx) err = operations.Mkdir(ctx, remote, dir) require.NoError(t, err) dirs = append(dirs, dir) } var objs []fs.Object for _, idx := range []int{2, 4, 3} { file := fstest.Item{ ModTime: time.Now(), Path: fmt.Sprintf("dir/file%d", idx), } _, o := testPut(ctx, t, remote, &file) objs = append(objs, o) } // Looks for each item in wants in changes - // if they are all found it returns true contains := func(changes map[string]struct{}, wants []string) bool { for _, want := range wants { _, ok := changes[want] if !ok { return false } } return true } // Wait a little while for the changes to come in wantDirChanges := []string{"dir/subdir1", "dir/subdir3", "dir/subdir2"} wantObjChanges := []string{"dir/file2", "dir/file4", "dir/file3"} ok := false for tries := 1; tries < 10; tries++ { ok = contains(dirChanges, wantDirChanges) && contains(objChanges, wantObjChanges) if ok { break } t.Logf("Try %d/10 waiting for dirChanges and objChanges", tries) time.Sleep(3 * time.Second) } if !ok { t.Errorf("%+v does not contain %+v or \n%+v does not contain %+v", dirChanges, wantDirChanges, objChanges, wantObjChanges) } // tidy up afterwards for _, o := range objs { assert.NoError(t, o.Remove(ctx)) } dirs = append(dirs, "dir") for _, dir := range dirs { assert.NoError(t, remote.Rmdir(ctx, dir)) } }) // TestFsPut files writes file1, file2 and tests an update // // Tests that require file1, file2 are within this t.Run("FsPutFiles", func(t *testing.T) { skipIfNotOk(t) file1Contents, _ = testPut(ctx, t, remote, &file1) /* file2Contents = */ testPut(ctx, t, remote, &file2) file1Contents, _ = testPut(ctx, t, remote, &file1) // Note that the next test will check there are no duplicated file names // TestFsListDirFile2 tests the files are correctly uploaded by doing // Depth 1 directory listings TestFsListDirFile2 := func(t *testing.T) { skipIfNotOk(t) list := func(dir string, expectedDirNames, expectedObjNames []string) { var objNames, dirNames []string for i := 1; i <= *fstest.ListRetries; i++ { objs, dirs, err := walk.GetAll(ctx, remote, dir, true, 1) if errors.Cause(err) == fs.ErrorDirNotFound { objs, dirs, err = walk.GetAll(ctx, remote, dir, true, 1) } require.NoError(t, err) objNames = objsToNames(objs) dirNames = dirsToNames(dirs) if len(objNames) >= len(expectedObjNames) && len(dirNames) >= len(expectedDirNames) { break } t.Logf("Sleeping for 1 second for TestFsListDirFile2 eventual consistency: %d/%d", i, *fstest.ListRetries) time.Sleep(1 * time.Second) } assert.Equal(t, expectedDirNames, dirNames) assert.Equal(t, expectedObjNames, objNames) } dir := file2.Path deepest := true for dir != "" { expectedObjNames := []string{} expectedDirNames := []string{} child := dir dir = path.Dir(dir) if dir == "." { dir = "" expectedObjNames = append(expectedObjNames, file1.Path) } if deepest { expectedObjNames = append(expectedObjNames, file2.Path) deepest = false } else { expectedDirNames = append(expectedDirNames, child) } list(dir, expectedDirNames, expectedObjNames) } } t.Run("FsListDirFile2", TestFsListDirFile2) // TestFsListRDirFile2 tests the files are correctly uploaded by doing // Depth 1 directory listings using ListR t.Run("FsListRDirFile2", func(t *testing.T) { defer skipIfNotListR(t)() TestFsListDirFile2(t) }) // Test the files are all there with walk.ListR recursive listings t.Run("FsListR", func(t *testing.T) { skipIfNotOk(t) objs, dirs, err := walk.GetAll(ctx, remote, "", true, -1) require.NoError(t, err) assert.Equal(t, []string{ "hello? sausage", "hello? sausage/êé", "hello? sausage/êé/Hello, 世界", "hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠", }, dirsToNames(dirs)) assert.Equal(t, []string{ "file name.txt", "hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠/z.txt", }, objsToNames(objs)) }) // Test the files are all there with // walk.ListR recursive listings on a sub dir t.Run("FsListRSubdir", func(t *testing.T) { skipIfNotOk(t) objs, dirs, err := walk.GetAll(ctx, remote, path.Dir(path.Dir(path.Dir(path.Dir(file2.Path)))), true, -1) require.NoError(t, err) assert.Equal(t, []string{ "hello? sausage/êé", "hello? sausage/êé/Hello, 世界", "hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠", }, dirsToNames(dirs)) assert.Equal(t, []string{ "hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠/z.txt", }, objsToNames(objs)) }) // TestFsListDirRoot tests that DirList works in the root TestFsListDirRoot := func(t *testing.T) { skipIfNotOk(t) rootRemote, err := fs.NewFs(remoteName) require.NoError(t, err) _, dirs, err := walk.GetAll(ctx, rootRemote, "", true, 1) require.NoError(t, err) assert.Contains(t, dirsToNames(dirs), subRemoteLeaf, "Remote leaf not found") } t.Run("FsListDirRoot", TestFsListDirRoot) // TestFsListRDirRoot tests that DirList works in the root using ListR t.Run("FsListRDirRoot", func(t *testing.T) { defer skipIfNotListR(t)() TestFsListDirRoot(t) }) // TestFsListSubdir tests List works for a subdirectory TestFsListSubdir := func(t *testing.T) { skipIfNotOk(t) fileName := file2.Path var err error var objs []fs.Object var dirs []fs.Directory for i := 0; i < 2; i++ { dir, _ := path.Split(fileName) dir = dir[:len(dir)-1] objs, dirs, err = walk.GetAll(ctx, remote, dir, true, -1) } require.NoError(t, err) require.Len(t, objs, 1) assert.Equal(t, fileName, objs[0].Remote()) require.Len(t, dirs, 0) } t.Run("FsListSubdir", TestFsListSubdir) // TestFsListRSubdir tests List works for a subdirectory using ListR t.Run("FsListRSubdir", func(t *testing.T) { defer skipIfNotListR(t)() TestFsListSubdir(t) }) // TestFsListLevel2 tests List works for 2 levels TestFsListLevel2 := func(t *testing.T) { skipIfNotOk(t) objs, dirs, err := walk.GetAll(ctx, remote, "", true, 2) if err == fs.ErrorLevelNotSupported { return } require.NoError(t, err) assert.Equal(t, []string{file1.Path}, objsToNames(objs)) assert.Equal(t, []string{"hello? sausage", "hello? sausage/êé"}, dirsToNames(dirs)) } t.Run("FsListLevel2", TestFsListLevel2) // TestFsListRLevel2 tests List works for 2 levels using ListR t.Run("FsListRLevel2", func(t *testing.T) { defer skipIfNotListR(t)() TestFsListLevel2(t) }) // TestFsListFile1 tests file present t.Run("FsListFile1", func(t *testing.T) { skipIfNotOk(t) fstest.CheckListing(t, remote, []fstest.Item{file1, file2}) }) // TestFsNewObject tests NewObject t.Run("FsNewObject", func(t *testing.T) { skipIfNotOk(t) obj := findObject(ctx, t, remote, file1.Path) file1.Check(t, obj, remote.Precision()) }) // TestFsListFile1and2 tests two files present t.Run("FsListFile1and2", func(t *testing.T) { skipIfNotOk(t) fstest.CheckListing(t, remote, []fstest.Item{file1, file2}) }) // TestFsNewObjectDir tests NewObject on a directory which should produce an error t.Run("FsNewObjectDir", func(t *testing.T) { skipIfNotOk(t) dir := path.Dir(file2.Path) obj, err := remote.NewObject(ctx, dir) assert.Nil(t, obj) assert.NotNil(t, err) }) // TestFsPurge tests Purge t.Run("FsPurge", func(t *testing.T) { skipIfNotOk(t) // Check have Purge doPurge := remote.Features().Purge if doPurge == nil { t.Skip("FS has no Purge interface") } // put up a file to purge fileToPurge := fstest.Item{ ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"), Path: "dirToPurge/fileToPurge.txt", } _, _ = testPut(ctx, t, remote, &fileToPurge) fstest.CheckListingWithPrecision(t, remote, []fstest.Item{file1, file2, fileToPurge}, []string{ "dirToPurge", "hello? sausage", "hello? sausage/êé", "hello? sausage/êé/Hello, 世界", "hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠", }, fs.GetModifyWindow(remote)) // Now purge it err = operations.Purge(ctx, remote, "dirToPurge") require.NoError(t, err) fstest.CheckListingWithPrecision(t, remote, []fstest.Item{file1, file2}, []string{ "hello? sausage", "hello? sausage/êé", "hello? sausage/êé/Hello, 世界", "hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠", }, fs.GetModifyWindow(remote)) }) // TestFsCopy tests Copy t.Run("FsCopy", func(t *testing.T) { skipIfNotOk(t) // Check have Copy doCopy := remote.Features().Copy if doCopy == nil { t.Skip("FS has no Copier interface") } // Test with file2 so have + and ' ' in file name var file2Copy = file2 file2Copy.Path += "-copy" // do the copy src := findObject(ctx, t, remote, file2.Path) dst, err := doCopy(ctx, src, file2Copy.Path) if err == fs.ErrorCantCopy { t.Skip("FS can't copy") } require.NoError(t, err, fmt.Sprintf("Error: %#v", err)) // check file exists in new listing fstest.CheckListing(t, remote, []fstest.Item{file1, file2, file2Copy}) // Check dst lightly - list above has checked ModTime/Hashes assert.Equal(t, file2Copy.Path, dst.Remote()) // Delete copy err = dst.Remove(ctx) require.NoError(t, err) }) // TestFsMove tests Move t.Run("FsMove", func(t *testing.T) { skipIfNotOk(t) // Check have Move doMove := remote.Features().Move if doMove == nil { t.Skip("FS has no Mover interface") } // state of files now: // 1: file name.txt // 2: hello sausage?/../z.txt var file1Move = file1 var file2Move = file2 // check happy path, i.e. no naming conflicts when rename and move are two // separate operations file2Move.Path = "other.txt" src := findObject(ctx, t, remote, file2.Path) dst, err := doMove(ctx, src, file2Move.Path) if err == fs.ErrorCantMove { t.Skip("FS can't move") } require.NoError(t, err) // check file exists in new listing fstest.CheckListing(t, remote, []fstest.Item{file1, file2Move}) // Check dst lightly - list above has checked ModTime/Hashes assert.Equal(t, file2Move.Path, dst.Remote()) // 1: file name.txt // 2: other.txt // Check conflict on "rename, then move" file1Move.Path = "moveTest/other.txt" src = findObject(ctx, t, remote, file1.Path) _, err = doMove(ctx, src, file1Move.Path) require.NoError(t, err) fstest.CheckListing(t, remote, []fstest.Item{file1Move, file2Move}) // 1: moveTest/other.txt // 2: other.txt // Check conflict on "move, then rename" src = findObject(ctx, t, remote, file1Move.Path) _, err = doMove(ctx, src, file1.Path) require.NoError(t, err) fstest.CheckListing(t, remote, []fstest.Item{file1, file2Move}) // 1: file name.txt // 2: other.txt src = findObject(ctx, t, remote, file2Move.Path) _, err = doMove(ctx, src, file2.Path) require.NoError(t, err) fstest.CheckListing(t, remote, []fstest.Item{file1, file2}) // 1: file name.txt // 2: hello sausage?/../z.txt // Tidy up moveTest directory require.NoError(t, remote.Rmdir(ctx, "moveTest")) }) // Move src to this remote using server side move operations. // // Will only be called if src.Fs().Name() == f.Name() // // If it isn't possible then return fs.ErrorCantDirMove // // If destination exists then return fs.ErrorDirExists // TestFsDirMove tests DirMove // // go test -v -run 'TestIntegration/Test(Setup|Init|FsMkdir|FsPutFile1|FsPutFile2|FsUpdateFile1|FsDirMove)$ t.Run("FsDirMove", func(t *testing.T) { skipIfNotOk(t) // Check have DirMove doDirMove := remote.Features().DirMove if doDirMove == nil { t.Skip("FS has no DirMover interface") } // Check it can't move onto itself err := doDirMove(ctx, remote, "", "") require.Equal(t, fs.ErrorDirExists, err) // new remote newRemote, _, removeNewRemote, err := fstest.RandomRemote() require.NoError(t, err) defer removeNewRemote() const newName = "new_name/sub_new_name" // try the move err = newRemote.Features().DirMove(ctx, remote, "", newName) require.NoError(t, err) // check remotes // remote should not exist here _, err = remote.List(ctx, "") assert.Equal(t, fs.ErrorDirNotFound, errors.Cause(err)) //fstest.CheckListingWithPrecision(t, remote, []fstest.Item{}, []string{}, remote.Precision()) file1Copy := file1 file1Copy.Path = path.Join(newName, file1.Path) file2Copy := file2 file2Copy.Path = path.Join(newName, file2.Path) fstest.CheckListingWithPrecision(t, newRemote, []fstest.Item{file2Copy, file1Copy}, []string{ "new_name", "new_name/sub_new_name", "new_name/sub_new_name/hello? sausage", "new_name/sub_new_name/hello? sausage/êé", "new_name/sub_new_name/hello? sausage/êé/Hello, 世界", "new_name/sub_new_name/hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠", }, newRemote.Precision()) // move it back err = doDirMove(ctx, newRemote, newName, "") require.NoError(t, err) // check remotes fstest.CheckListingWithPrecision(t, remote, []fstest.Item{file2, file1}, []string{ "hello? sausage", "hello? sausage/êé", "hello? sausage/êé/Hello, 世界", "hello? sausage/êé/Hello, 世界/ \" ' @ < > & ? + ≠", }, remote.Precision()) fstest.CheckListingWithPrecision(t, newRemote, []fstest.Item{}, []string{ "new_name", }, newRemote.Precision()) }) // TestFsRmdirFull tests removing a non empty directory t.Run("FsRmdirFull", func(t *testing.T) { skipIfNotOk(t) if isBucketBasedButNotRoot(remote) { t.Skip("Skipping test as non root bucket based remote") } err := remote.Rmdir(ctx, "") require.Error(t, err, "Expecting error on RMdir on non empty remote") }) // TestFsPrecision tests the Precision of the Fs t.Run("FsPrecision", func(t *testing.T) { skipIfNotOk(t) precision := remote.Precision() if precision == fs.ModTimeNotSupported { return } if precision > time.Second || precision < 0 { t.Fatalf("Precision out of range %v", precision) } // FIXME check expected precision }) // TestObjectString tests the Object String method t.Run("ObjectString", func(t *testing.T) { skipIfNotOk(t) obj := findObject(ctx, t, remote, file1.Path) assert.Equal(t, file1.Path, obj.String()) if opt.NilObject != nil { assert.Equal(t, "<nil>", opt.NilObject.String()) } }) // TestObjectFs tests the object can be found t.Run("ObjectFs", func(t *testing.T) { skipIfNotOk(t) obj := findObject(ctx, t, remote, file1.Path) // If this is set we don't do the direct comparison of // the Fs from the object as it may be different if opt.SkipFsMatch { return } testRemote := remote if obj.Fs() != testRemote { // Check to see if this wraps something else if doUnWrap := testRemote.Features().UnWrap; doUnWrap != nil { testRemote = doUnWrap() } } assert.Equal(t, obj.Fs(), testRemote) }) // TestObjectRemote tests the Remote is correct t.Run("ObjectRemote", func(t *testing.T) { skipIfNotOk(t) obj := findObject(ctx, t, remote, file1.Path) assert.Equal(t, file1.Path, obj.Remote()) }) // TestObjectHashes checks all the hashes the object supports t.Run("ObjectHashes", func(t *testing.T) { skipIfNotOk(t) obj := findObject(ctx, t, remote, file1.Path) file1.CheckHashes(t, obj) }) // TestObjectModTime tests the ModTime of the object is correct TestObjectModTime := func(t *testing.T) { skipIfNotOk(t) obj := findObject(ctx, t, remote, file1.Path) file1.CheckModTime(t, obj, obj.ModTime(ctx), remote.Precision()) } t.Run("ObjectModTime", TestObjectModTime) // TestObjectMimeType tests the MimeType of the object is correct t.Run("ObjectMimeType", func(t *testing.T) { skipIfNotOk(t) obj := findObject(ctx, t, remote, file1.Path) do, ok := obj.(fs.MimeTyper) if !ok { t.Skip("MimeType method not supported") } mimeType := do.MimeType(ctx) if strings.ContainsRune(mimeType, ';') { assert.Equal(t, "text/plain; charset=utf-8", mimeType) } else { assert.Equal(t, "text/plain", mimeType) } }) // TestObjectSetModTime tests that SetModTime works t.Run("ObjectSetModTime", func(t *testing.T) { skipIfNotOk(t) newModTime := fstest.Time("2011-12-13T14:15:16.999999999Z") obj := findObject(ctx, t, remote, file1.Path) err := obj.SetModTime(ctx, newModTime) if err == fs.ErrorCantSetModTime || err == fs.ErrorCantSetModTimeWithoutDelete { t.Log(err) return } require.NoError(t, err) file1.ModTime = newModTime file1.CheckModTime(t, obj, obj.ModTime(ctx), remote.Precision()) // And make a new object and read it from there too TestObjectModTime(t) }) // TestObjectSize tests that Size works t.Run("ObjectSize", func(t *testing.T) { skipIfNotOk(t) obj := findObject(ctx, t, remote, file1.Path) assert.Equal(t, file1.Size, obj.Size()) }) // TestObjectOpen tests that Open works t.Run("ObjectOpen", func(t *testing.T) { skipIfNotOk(t) obj := findObject(ctx, t, remote, file1.Path) assert.Equal(t, file1Contents, readObject(ctx, t, obj, -1), "contents of file1 differ") }) // TestObjectOpenSeek tests that Open works with SeekOption t.Run("ObjectOpenSeek", func(t *testing.T) { skipIfNotOk(t) obj := findObject(ctx, t, remote, file1.Path) assert.Equal(t, file1Contents[50:], readObject(ctx, t, obj, -1, &fs.SeekOption{Offset: 50}), "contents of file1 differ after seek") }) // TestObjectOpenRange tests that Open works with RangeOption // // go test -v -run 'TestIntegration/Test(Setup|Init|FsMkdir|FsPutFile1|FsPutFile2|FsUpdateFile1|ObjectOpenRange)$' t.Run("ObjectOpenRange", func(t *testing.T) { skipIfNotOk(t) obj := findObject(ctx, t, remote, file1.Path) for _, test := range []struct { ro fs.RangeOption wantStart, wantEnd int }{ {fs.RangeOption{Start: 5, End: 15}, 5, 16}, {fs.RangeOption{Start: 80, End: -1}, 80, 100}, {fs.RangeOption{Start: 81, End: 100000}, 81, 100}, {fs.RangeOption{Start: -1, End: 20}, 80, 100}, // if start is omitted this means get the final bytes // {fs.RangeOption{Start: -1, End: -1}, 0, 100}, - this seems to work but the RFC doesn't define it } { got := readObject(ctx, t, obj, -1, &test.ro) foundAt := strings.Index(file1Contents, got) help := fmt.Sprintf("%#v failed want [%d:%d] got [%d:%d]", test.ro, test.wantStart, test.wantEnd, foundAt, foundAt+len(got)) assert.Equal(t, file1Contents[test.wantStart:test.wantEnd], got, help) } }) // TestObjectPartialRead tests that reading only part of the object does the correct thing t.Run("ObjectPartialRead", func(t *testing.T) { skipIfNotOk(t) obj := findObject(ctx, t, remote, file1.Path) assert.Equal(t, file1Contents[:50], readObject(ctx, t, obj, 50), "contents of file1 differ after limited read") }) // TestObjectUpdate tests that Update works t.Run("ObjectUpdate", func(t *testing.T) { skipIfNotOk(t) contents := random.String(200) buf := bytes.NewBufferString(contents) hash := hash.NewMultiHasher() in := io.TeeReader(buf, hash) file1.Size = int64(buf.Len()) obj := findObject(ctx, t, remote, file1.Path) obji := object.NewStaticObjectInfo(file1.Path, file1.ModTime, int64(len(contents)), true, nil, obj.Fs()) err := obj.Update(ctx, in, obji) require.NoError(t, err) file1.Hashes = hash.Sums() // check the object has been updated file1.Check(t, obj, remote.Precision()) // Re-read the object and check again obj = findObject(ctx, t, remote, file1.Path) file1.Check(t, obj, remote.Precision()) // check contents correct assert.Equal(t, contents, readObject(ctx, t, obj, -1), "contents of updated file1 differ") file1Contents = contents }) // TestObjectStorable tests that Storable works t.Run("ObjectStorable", func(t *testing.T) { skipIfNotOk(t) obj := findObject(ctx, t, remote, file1.Path) require.NotNil(t, !obj.Storable(), "Expecting object to be storable") }) // TestFsIsFile tests that an error is returned along with a valid fs // which points to the parent directory. t.Run("FsIsFile", func(t *testing.T) { skipIfNotOk(t) remoteName := subRemoteName + "/" + file2.Path file2Copy := file2 file2Copy.Path = "z.txt" fileRemote, err := fs.NewFs(remoteName) require.NotNil(t, fileRemote) assert.Equal(t, fs.ErrorIsFile, err) if strings.HasPrefix(remoteName, "TestChunker") && strings.Contains(remoteName, "Nometa") { // TODO fix chunker and remove this bypass t.Logf("Skip listing check -- chunker can't yet handle this tricky case") return } fstest.CheckListing(t, fileRemote, []fstest.Item{file2Copy}) }) // TestFsIsFileNotFound tests that an error is not returned if no object is found t.Run("FsIsFileNotFound", func(t *testing.T) { skipIfNotOk(t) remoteName := subRemoteName + "/not found.txt" fileRemote, err := fs.NewFs(remoteName) require.NoError(t, err) fstest.CheckListing(t, fileRemote, []fstest.Item{}) }) // Test that things work from the root t.Run("FromRoot", func(t *testing.T) { if features := remote.Features(); features.BucketBased && !features.BucketBasedRootOK { t.Skip("Can't list from root on this remote") } configName, configLeaf, err := fspath.Parse(subRemoteName) require.NoError(t, err) if configName == "" { configName, configLeaf = path.Split(subRemoteName) } else { configName += ":" } t.Logf("Opening root remote %q path %q from %q", configName, configLeaf, subRemoteName) rootRemote, err := fs.NewFs(configName) require.NoError(t, err) file1Root := file1 file1Root.Path = path.Join(configLeaf, file1Root.Path) file2Root := file2 file2Root.Path = path.Join(configLeaf, file2Root.Path) var dirs []string dir := file2.Path for { dir = path.Dir(dir) if dir == "" || dir == "." || dir == "/" { break } dirs = append(dirs, path.Join(configLeaf, dir)) } // Check that we can see file1 and file2 from the root t.Run("List", func(t *testing.T) { fstest.CheckListingWithRoot(t, rootRemote, configLeaf, []fstest.Item{file1Root, file2Root}, dirs, rootRemote.Precision()) }) // Check that that listing the entries is OK t.Run("ListEntries", func(t *testing.T) { entries, err := rootRemote.List(context.Background(), configLeaf) require.NoError(t, err) fstest.CompareItems(t, entries, []fstest.Item{file1Root}, dirs[len(dirs)-1:], rootRemote.Precision(), "ListEntries") }) // List the root with ListR t.Run("ListR", func(t *testing.T) { doListR := rootRemote.Features().ListR if doListR == nil { t.Skip("FS has no ListR interface") } file1Found, file2Found := false, false stopTime := time.Now().Add(10 * time.Second) errTooMany := errors.New("too many files") errFound := errors.New("found") err := doListR(context.Background(), "", func(entries fs.DirEntries) error { for _, entry := range entries { remote := entry.Remote() if remote == file1Root.Path { file1Found = true } if remote == file2Root.Path { file2Found = true } if file1Found && file2Found { return errFound } } if time.Now().After(stopTime) { return errTooMany } return nil }) if err != errFound && err != errTooMany { assert.NoError(t, err) } if err != errTooMany { assert.True(t, file1Found, "file1Root not found") assert.True(t, file2Found, "file2Root not found") } else { t.Logf("Too many files to list - giving up") } }) // Create a new file t.Run("Put", func(t *testing.T) { file3Root := fstest.Item{ ModTime: time.Now(), Path: path.Join(configLeaf, "created from root.txt"), } _, file3Obj := testPut(ctx, t, rootRemote, &file3Root) fstest.CheckListingWithRoot(t, rootRemote, configLeaf, []fstest.Item{file1Root, file2Root, file3Root}, nil, rootRemote.Precision()) // And then remove it t.Run("Remove", func(t *testing.T) { require.NoError(t, file3Obj.Remove(context.Background())) fstest.CheckListingWithRoot(t, rootRemote, configLeaf, []fstest.Item{file1Root, file2Root}, nil, rootRemote.Precision()) }) }) }) // TestPublicLink tests creation of sharable, public links // go test -v -run 'TestIntegration/Test(Setup|Init|FsMkdir|FsPutFile1|FsPutFile2|FsUpdateFile1|PublicLink)$' t.Run("PublicLink", func(t *testing.T) { skipIfNotOk(t) doPublicLink := remote.Features().PublicLink if doPublicLink == nil { t.Skip("FS has no PublicLinker interface") } expiry := fs.Duration(60 * time.Second) // if object not found link, err := doPublicLink(ctx, file1.Path+"_does_not_exist", expiry, false) require.Error(t, err, "Expected to get error when file doesn't exist") require.Equal(t, "", link, "Expected link to be empty on error") // sharing file for the first time link1, err := doPublicLink(ctx, file1.Path, expiry, false) require.NoError(t, err) require.NotEqual(t, "", link1, "Link should not be empty") link2, err := doPublicLink(ctx, file2.Path, expiry, false) require.NoError(t, err) require.NotEqual(t, "", link2, "Link should not be empty") require.NotEqual(t, link1, link2, "Links to different files should differ") // sharing file for the 2nd time link1, err = doPublicLink(ctx, file1.Path, expiry, false) require.NoError(t, err) require.NotEqual(t, "", link1, "Link should not be empty") // sharing directory for the first time path := path.Dir(file2.Path) link3, err := doPublicLink(ctx, path, expiry, false) if err != nil && (errors.Cause(err) == fs.ErrorCantShareDirectories || errors.Cause(err) == fs.ErrorObjectNotFound) { t.Log("skipping directory tests as not supported on this backend") } else { require.NoError(t, err) require.NotEqual(t, "", link3, "Link should not be empty") // sharing directory for the second time link3, err = doPublicLink(ctx, path, expiry, false) require.NoError(t, err) require.NotEqual(t, "", link3, "Link should not be empty") // sharing the "root" directory in a subremote subRemote, _, removeSubRemote, err := fstest.RandomRemote() require.NoError(t, err) defer removeSubRemote() // ensure sub remote isn't empty buf := bytes.NewBufferString("somecontent") obji := object.NewStaticObjectInfo("somefile", time.Now(), int64(buf.Len()), true, nil, nil) _, err = subRemote.Put(ctx, buf, obji) require.NoError(t, err) link4, err := subRemote.Features().PublicLink(ctx, "", expiry, false) require.NoError(t, err, "Sharing root in a sub-remote should work") require.NotEqual(t, "", link4, "Link should not be empty") } }) // TestSetTier tests SetTier and GetTier functionality t.Run("SetTier", func(t *testing.T) { skipIfNotSetTier(t) obj := findObject(ctx, t, remote, file1.Path) setter, ok := obj.(fs.SetTierer) assert.NotNil(t, ok) getter, ok := obj.(fs.GetTierer) assert.NotNil(t, ok) // If interfaces are supported TiersToTest should contain // at least one entry supportedTiers := opt.TiersToTest assert.NotEmpty(t, supportedTiers) // test set tier changes on supported storage classes or tiers for _, tier := range supportedTiers { err := setter.SetTier(tier) assert.Nil(t, err) got := getter.GetTier() assert.Equal(t, tier, got) } }) // Check to see if Fs that wrap other Objects implement all the optional methods t.Run("ObjectCheckWrap", func(t *testing.T) { skipIfNotOk(t) if opt.SkipObjectCheckWrap { t.Skip("Skipping FsCheckWrap on this Fs") } ft := new(fs.Features).Fill(remote) if ft.UnWrap == nil { t.Skip("Not a wrapping Fs") } obj := findObject(ctx, t, remote, file1.Path) _, unsupported := fs.ObjectOptionalInterfaces(obj) for _, name := range unsupported { if !stringsContains(name, opt.UnimplementableObjectMethods) { t.Errorf("Missing Object wrapper for %s", name) } } }) // TestObjectRemove tests Remove t.Run("ObjectRemove", func(t *testing.T) { skipIfNotOk(t) // remove file1 obj := findObject(ctx, t, remote, file1.Path) err := obj.Remove(ctx) require.NoError(t, err) // check listing without modtime as TestPublicLink may change the modtime fstest.CheckListingWithPrecision(t, remote, []fstest.Item{file2}, nil, fs.ModTimeNotSupported) }) // TestAbout tests the About optional interface t.Run("ObjectAbout", func(t *testing.T) { skipIfNotOk(t) // Check have About doAbout := remote.Features().About if doAbout == nil { t.Skip("FS does not support About") } // Can't really check the output much! usage, err := doAbout(context.Background()) require.NoError(t, err) require.NotNil(t, usage) assert.NotEqual(t, int64(0), usage.Total) }) // Just file2 remains for Purge to clean up // TestFsPutStream tests uploading files when size isn't known in advance. // This may trigger large buffer allocation in some backends, keep it // close to the end of suite. (See fs/operations/xtra_operations_test.go) t.Run("FsPutStream", func(t *testing.T) { skipIfNotOk(t) if remote.Features().PutStream == nil { t.Skip("FS has no PutStream interface") } for _, contentSize := range []int{0, 100} { t.Run(strconv.Itoa(contentSize), func(t *testing.T) { file := fstest.Item{ ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"), Path: "piped data.txt", Size: -1, // use unknown size during upload } var ( err error obj fs.Object uploadHash *hash.MultiHasher ) retry(t, "PutStream", func() error { contents := random.String(contentSize) buf := bytes.NewBufferString(contents) uploadHash = hash.NewMultiHasher() in := io.TeeReader(buf, uploadHash) file.Size = -1 obji := object.NewStaticObjectInfo(file.Path, file.ModTime, file.Size, true, nil, nil) obj, err = remote.Features().PutStream(ctx, in, obji) return err }) file.Hashes = uploadHash.Sums() file.Size = int64(contentSize) // use correct size when checking file.Check(t, obj, remote.Precision()) // Re-read the object and check again obj = findObject(ctx, t, remote, file.Path) file.Check(t, obj, remote.Precision()) require.NoError(t, obj.Remove(ctx)) }) } }) // TestInternal calls InternalTest() on the Fs t.Run("Internal", func(t *testing.T) { skipIfNotOk(t) if it, ok := remote.(InternalTester); ok { it.InternalTest(t) } else { t.Skipf("%T does not implement InternalTester", remote) } }) }) // TestFsPutChunked may trigger large buffer allocation with // some backends (see fs/operations/xtra_operations_test.go), // keep it closer to the end of suite. t.Run("FsPutChunked", func(t *testing.T) { skipIfNotOk(t) if testing.Short() { t.Skip("not running with -short") } setUploadChunkSizer, _ := remote.(SetUploadChunkSizer) if setUploadChunkSizer == nil { t.Skipf("%T does not implement SetUploadChunkSizer", remote) } setUploadCutoffer, _ := remote.(SetUploadCutoffer) minChunkSize := opt.ChunkedUpload.MinChunkSize if minChunkSize < 100 { minChunkSize = 100 } if opt.ChunkedUpload.CeilChunkSize != nil { minChunkSize = opt.ChunkedUpload.CeilChunkSize(minChunkSize) } maxChunkSize := 2 * fs.MebiByte if maxChunkSize < 2*minChunkSize { maxChunkSize = 2 * minChunkSize } if opt.ChunkedUpload.MaxChunkSize > 0 && maxChunkSize > opt.ChunkedUpload.MaxChunkSize { maxChunkSize = opt.ChunkedUpload.MaxChunkSize } if opt.ChunkedUpload.CeilChunkSize != nil { maxChunkSize = opt.ChunkedUpload.CeilChunkSize(maxChunkSize) } next := func(f func(fs.SizeSuffix) fs.SizeSuffix) fs.SizeSuffix { s := f(minChunkSize) if s > maxChunkSize { s = minChunkSize } return s } chunkSizes := fs.SizeSuffixList{ minChunkSize, minChunkSize + (maxChunkSize-minChunkSize)/3, next(NextPowerOfTwo), next(NextMultipleOf(100000)), next(NextMultipleOf(100001)), maxChunkSize, } chunkSizes.Sort() // Set the minimum chunk size, upload cutoff and reset it at the end oldChunkSize, err := setUploadChunkSizer.SetUploadChunkSize(minChunkSize) require.NoError(t, err) var oldUploadCutoff fs.SizeSuffix if setUploadCutoffer != nil { oldUploadCutoff, err = setUploadCutoffer.SetUploadCutoff(minChunkSize) require.NoError(t, err) } defer func() { _, err := setUploadChunkSizer.SetUploadChunkSize(oldChunkSize) assert.NoError(t, err) if setUploadCutoffer != nil { _, err := setUploadCutoffer.SetUploadCutoff(oldUploadCutoff) assert.NoError(t, err) } }() var lastCs fs.SizeSuffix for _, cs := range chunkSizes { if cs <= lastCs { continue } if opt.ChunkedUpload.CeilChunkSize != nil { cs = opt.ChunkedUpload.CeilChunkSize(cs) } lastCs = cs t.Run(cs.String(), func(t *testing.T) { _, err := setUploadChunkSizer.SetUploadChunkSize(cs) require.NoError(t, err) if setUploadCutoffer != nil { _, err = setUploadCutoffer.SetUploadCutoff(cs) require.NoError(t, err) } var testChunks []fs.SizeSuffix if opt.ChunkedUpload.NeedMultipleChunks { // If NeedMultipleChunks is set then test with > cs testChunks = []fs.SizeSuffix{cs + 1, 2 * cs, 2*cs + 1} } else { testChunks = []fs.SizeSuffix{cs - 1, cs, 2*cs + 1} } for _, fileSize := range testChunks { t.Run(fmt.Sprintf("%d", fileSize), func(t *testing.T) { TestPutLarge(ctx, t, remote, &fstest.Item{ ModTime: fstest.Time("2001-02-03T04:05:06.499999999Z"), Path: fmt.Sprintf("chunked-%s-%s.bin", cs.String(), fileSize.String()), Size: int64(fileSize), }) }) } }) } }) // TestFsUploadUnknownSize ensures Fs.Put() and Object.Update() don't panic when // src.Size() == -1 // // This may trigger large buffer allocation in some backends, keep it // closer to the suite end. (See fs/operations/xtra_operations_test.go) t.Run("FsUploadUnknownSize", func(t *testing.T) { skipIfNotOk(t) t.Run("FsPutUnknownSize", func(t *testing.T) { defer func() { assert.Nil(t, recover(), "Fs.Put() should not panic when src.Size() == -1") }() contents := random.String(100) in := bytes.NewBufferString(contents) obji := object.NewStaticObjectInfo("unknown-size-put.txt", fstest.Time("2002-02-03T04:05:06.499999999Z"), -1, true, nil, nil) obj, err := remote.Put(ctx, in, obji) if err == nil { require.NoError(t, obj.Remove(ctx), "successfully uploaded unknown-sized file but failed to remove") } // if err != nil: it's okay as long as no panic }) t.Run("FsUpdateUnknownSize", func(t *testing.T) { unknownSizeUpdateFile := fstest.Item{ ModTime: fstest.Time("2002-02-03T04:05:06.499999999Z"), Path: "unknown-size-update.txt", } testPut(ctx, t, remote, &unknownSizeUpdateFile) defer func() { assert.Nil(t, recover(), "Object.Update() should not panic when src.Size() == -1") }() newContents := random.String(200) in := bytes.NewBufferString(newContents) obj := findObject(ctx, t, remote, unknownSizeUpdateFile.Path) obji := object.NewStaticObjectInfo(unknownSizeUpdateFile.Path, unknownSizeUpdateFile.ModTime, -1, true, nil, obj.Fs()) err := obj.Update(ctx, in, obji) if err == nil { require.NoError(t, obj.Remove(ctx), "successfully updated object with unknown-sized source but failed to remove") } // if err != nil: it's okay as long as no panic }) }) // TestFsRootCollapse tests if the root of an fs "collapses" to the // absolute root. It creates a new fs of the same backend type with its // root set to a *non-existent* folder, and attempts to read the info of // an object in that folder, whose name is taken from a directory that // exists in the absolute root. // This test is added after // https://github.com/rclone/rclone/issues/3164. t.Run("FsRootCollapse", func(t *testing.T) { deepRemoteName := subRemoteName + "/deeper/nonexisting/directory" deepRemote, err := fs.NewFs(deepRemoteName) require.NoError(t, err) colonIndex := strings.IndexRune(deepRemoteName, ':') firstSlashIndex := strings.IndexRune(deepRemoteName, '/') firstDir := deepRemoteName[colonIndex+1 : firstSlashIndex] _, err = deepRemote.NewObject(ctx, firstDir) require.Equal(t, fs.ErrorObjectNotFound, err) // If err is not fs.ErrorObjectNotFound, it means the backend is // somehow confused about root and absolute root. }) // Purge the folder err = operations.Purge(ctx, remote, "") if errors.Cause(err) != fs.ErrorDirNotFound { require.NoError(t, err) } purged = true fstest.CheckListing(t, remote, []fstest.Item{}) // Check purging again if not bucket based if !isBucketBasedButNotRoot(remote) { err = operations.Purge(ctx, remote, "") assert.Error(t, err, "Expecting error after on second purge") if errors.Cause(err) != fs.ErrorDirNotFound { t.Log("Warning: this should produce fs.ErrorDirNotFound") } } }) // Check directory is purged if !purged { _ = operations.Purge(ctx, remote, "") } // Remove the local directory so we don't clutter up /tmp if strings.HasPrefix(remoteName, "/") { t.Log("remoteName", remoteName) // Remove temp directory err := os.Remove(remoteName) require.NoError(t, err) } }
[ "\"RCLONE_CONFIG\"" ]
[]
[ "RCLONE_CONFIG" ]
[]
["RCLONE_CONFIG"]
go
1
0
civilpy/general/math.py
import civilpy np = civilpy.np solver = civilpy.sym inf = civilpy.sym.oo def equation_solver(eq, variable): return civilpy.sym.solve(eq, variable) def arithmetic_growth(p_0, r, t): p_t = p_0 + (t * r) return p_t def geometric_growth(p_0, r, t): p_t = p_0 * ((1 + r) ** t) return p_t def find_global_extrema_order_3(f, x1, x2, variable='x'): x = civilpy.sym.symbols(variable) f_prime = civilpy.sym.diff(f) f_prime2 = civilpy.sym.diff(f_prime) print(f'The first derivative is \n{f_prime} the second is \n {f_prime2}\n') try: root1, root2 = civilpy.sym.solvers.solve(f_prime, x) print(f'The first root is {root1}, the second root is {root2}\n') for root in [root1, root2]: if f_prime2.subs(x, root) > 0: print(f'{root} is a local minimum\n') elif f_prime2.subs(x, root) < 0: print(f'{root} is a local maximum\n') inf_point = civilpy.sym.solvers.solve(f_prime2, x) print(f'The inflection point is {inf_point[0]}\n') check_values = {x1: 0, x2: 0, root1: 0, root2: 0} for key, value in check_values.items(): check_values[key] = f.subs(x, key) global_min = min(check_values.keys(), key=(lambda k: check_values[k])) global_max = max(check_values.keys(), key=(lambda k: check_values[k])) print(f'Global min: {global_min}, Global max: {global_max}') return global_min, global_max, check_values except ValueError: print('This function only works on 3rd order equations')
[]
[]
[]
[]
[]
python
null
null
null
github_processor.go
package metadata import ( "context" "os" "strconv" "sync" "github.com/athenianco/metadata/github" "github.com/athenianco/metadata/pubsub" ) /* * Note: By default, * Cloud Functions does not support connecting to the Cloud SQL instance using TCP. * Your code should not try to access the instance using an IP address (such as 127.0.0.1 or 172.17.0.1) * unless you have configured Serverless VPC Access. * * The PostgreSQL standard requires the Unix socket to have a .s.PGSQL.5432 suffix in the socket path. * Some libraries apply this suffix automatically, * but others require you to specify the socket path as follows: * /cloudsql/INSTANCE_CONNECTION_NAME/.s.PGSQL.5432. */ var ghProcessor struct { once sync.Once fnc pubsub.Subscriber } func initGHProcessor() { dbURI := os.Getenv("GITHUB_DATABASE_URI") if dbURI == "" { panic("GITHUB_DATABASE_URI is not set") } // When using a connection pool, it is important to set the maximum connections to 1. // This may seem counter-intuitive, however, Cloud Functions limits concurrent executions to 1 per instance. // This means you never have a situation where two requests are being processed by a single function instance at the same time. // This means in most situations only a single database connection is needed. maxOpenConns, err := strconv.Atoi(os.Getenv("GITHUB_DATABASE_MAX_OPEN_CONNS")) if err != nil { maxOpenConns = 1 } maxIdleConns, err := strconv.Atoi(os.Getenv("GITHUB_DATABASE_MAX_IDLE_CONNS")) if err != nil { maxIdleConns = 1 } db, err := github.OpenDatabase(dbURI, maxOpenConns, maxIdleConns) if err != nil { panic(err) } ghProcessor.fnc = func(ctx context.Context, msg pubsub.Message) error { event, err := github.UnmarshalEvent(msg.Data) if err != nil { return err } return event.Process(ctx, db) } } // GithubProcess is triggered by Pub/Sub. func GithubProcess(ctx context.Context, msg pubsub.Message) error { ghProcessor.once.Do(initGHProcessor) return ghProcessor.fnc(ctx, msg) }
[ "\"GITHUB_DATABASE_URI\"", "\"GITHUB_DATABASE_MAX_OPEN_CONNS\"", "\"GITHUB_DATABASE_MAX_IDLE_CONNS\"" ]
[]
[ "GITHUB_DATABASE_MAX_OPEN_CONNS", "GITHUB_DATABASE_URI", "GITHUB_DATABASE_MAX_IDLE_CONNS" ]
[]
["GITHUB_DATABASE_MAX_OPEN_CONNS", "GITHUB_DATABASE_URI", "GITHUB_DATABASE_MAX_IDLE_CONNS"]
go
3
0
public/javascripts/fckeditor/fckeditor.py
""" FCKeditor - The text editor for Internet - http://www.fckeditor.net Copyright (C) 2003-2007 Frederico Caldeira Knabben == BEGIN LICENSE == Licensed under the terms of any of the following licenses at your choice: - GNU General Public License Version 2 or later (the "GPL") http://www.gnu.org/licenses/gpl.html - GNU Lesser General Public License Version 2.1 or later (the "LGPL") http://www.gnu.org/licenses/lgpl.html - Mozilla Public License Version 1.1 or later (the "MPL") http://www.mozilla.org/MPL/MPL-1.1.html == END LICENSE == This is the integration file for Python. """ import cgi import os import re import string def escape(text, replace=string.replace): """Converts the special characters '<', '>', and '&'. RFC 1866 specifies that these characters be represented in HTML as &lt; &gt; and &amp; respectively. In Python 1.5 we use the new string.replace() function for speed. """ text = replace(text, '&', '&amp;') # must be done 1st text = replace(text, '<', '&lt;') text = replace(text, '>', '&gt;') text = replace(text, '"', '&quot;') text = replace(text, "'", '&#39;') return text # The FCKeditor class class FCKeditor(object): def __init__(self, instanceName): self.InstanceName = instanceName self.BasePath = '/fckeditor/' self.Width = '100%' self.Height = '200' self.ToolbarSet = 'Default' self.Value = ''; self.Config = {} def Create(self): return self.CreateHtml() def CreateHtml(self): HtmlValue = escape(self.Value) Html = "<div>" if (self.IsCompatible()): File = "fckeditor.html" Link = "%seditor/%s?InstanceName=%s" % ( self.BasePath, File, self.InstanceName ) if (self.ToolbarSet is not None): Link += "&amp;ToolBar=%s" % self.ToolbarSet # Render the linked hidden field Html += "<input type=\"hidden\" id=\"%s\" name=\"%s\" value=\"%s\" style=\"display:none\" />" % ( self.InstanceName, self.InstanceName, HtmlValue ) # Render the configurations hidden field Html += "<input type=\"hidden\" id=\"%s___Config\" value=\"%s\" style=\"display:none\" />" % ( self.InstanceName, self.GetConfigFieldString() ) # Render the editor iframe Html += "<iframe id=\"%s\__Frame\" src=\"%s\" width=\"%s\" height=\"%s\" frameborder=\"0\" scrolling=\"no\"></iframe>" % ( self.InstanceName, Link, self.Width, self.Height ) else: if (self.Width.find("%%") < 0): WidthCSS = "%spx" % self.Width else: WidthCSS = self.Width if (self.Height.find("%%") < 0): HeightCSS = "%spx" % self.Height else: HeightCSS = self.Height Html += "<textarea name=\"%s\" rows=\"4\" cols=\"40\" style=\"width: %s; height: %s;\" wrap=\"virtual\">%s</textarea>" % ( self.InstanceName, WidthCSS, HeightCSS, HtmlValue ) Html += "</div>" return Html def IsCompatible(self): if (os.environ.has_key("HTTP_USER_AGENT")): sAgent = os.environ.get("HTTP_USER_AGENT", "") else: sAgent = "" if (sAgent.find("MSIE") >= 0) and (sAgent.find("mac") < 0) and (sAgent.find("Opera") < 0): i = sAgent.find("MSIE") iVersion = float(sAgent[i+5:i+5+3]) if (iVersion >= 5.5): return True return False elif (sAgent.find("Gecko/") >= 0): i = sAgent.find("Gecko/") iVersion = int(sAgent[i+6:i+6+8]) if (iVersion >= 20030210): return True return False elif (sAgent.find("Opera/") >= 0): i = sAgent.find("Opera/") iVersion = float(sAgent[i+6:i+6+4]) if (iVersion >= 9.5): return True return False elif (sAgent.find("AppleWebKit/") >= 0): p = re.compile('AppleWebKit\/(\d+)', re.IGNORECASE) m = p.search(sAgent) if (m.group(1) >= 522): return True return False else: return False def GetConfigFieldString(self): sParams = "" bFirst = True for sKey in self.Config.keys(): sValue = self.Config[sKey] if (not bFirst): sParams += "&amp;" else: bFirst = False if (sValue): k = escape(sKey) v = escape(sValue) if (sValue == "true"): sParams += "%s=true" % k elif (sValue == "false"): sParams += "%s=false" % k else: sParams += "%s=%s" % (k, v) return sParams
[]
[]
[ "HTTP_USER_AGENT" ]
[]
["HTTP_USER_AGENT"]
python
1
0
test/e2e/capi_test.go
// +build e2e /* Copyright 2020 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package e2e import ( "context" "fmt" "os" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" capi_e2e "sigs.k8s.io/cluster-api/test/e2e" "sigs.k8s.io/cluster-api/test/framework" "sigs.k8s.io/cluster-api/util" ) var _ = Describe("Running the Cluster API E2E tests", func() { BeforeEach(func() { Expect(e2eConfig.Variables).To(HaveKey(capi_e2e.CNIPath)) rgName := fmt.Sprintf("capz-e2e-%s", util.RandomString(6)) Expect(os.Setenv(AzureResourceGroup, rgName)).NotTo(HaveOccurred()) Expect(os.Setenv(AzureVNetName, fmt.Sprintf("%s-vnet", rgName))).NotTo(HaveOccurred()) }) AfterEach(func() { redactLogs() Expect(os.Unsetenv(AzureResourceGroup)).NotTo(HaveOccurred()) Expect(os.Unsetenv(AzureVNetName)).NotTo(HaveOccurred()) }) Context("Running the quick-start spec", func() { capi_e2e.QuickStartSpec(context.TODO(), func() capi_e2e.QuickStartSpecInput { return capi_e2e.QuickStartSpecInput{ E2EConfig: e2eConfig, ClusterctlConfigPath: clusterctlConfigPath, BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, } }) }) Context("Running the KCP upgrade spec in a single control plane cluster", func() { capi_e2e.KCPUpgradeSpec(context.TODO(), func() capi_e2e.KCPUpgradeSpecInput { return capi_e2e.KCPUpgradeSpecInput{ E2EConfig: e2eConfig, ClusterctlConfigPath: clusterctlConfigPath, BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, ControlPlaneMachineCount: 1, SkipCleanup: skipCleanup, } }) }) Context("Running the KCP upgrade spec in a HA cluster", func() { capi_e2e.KCPUpgradeSpec(context.TODO(), func() capi_e2e.KCPUpgradeSpecInput { return capi_e2e.KCPUpgradeSpecInput{ E2EConfig: e2eConfig, ClusterctlConfigPath: clusterctlConfigPath, BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, ControlPlaneMachineCount: 3, SkipCleanup: skipCleanup, } }) }) Context("Running the MachineDeployment upgrade spec", func() { capi_e2e.MachineDeploymentUpgradesSpec(context.TODO(), func() capi_e2e.MachineDeploymentUpgradesSpecInput { return capi_e2e.MachineDeploymentUpgradesSpecInput{ E2EConfig: e2eConfig, ClusterctlConfigPath: clusterctlConfigPath, BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, } }) }) if os.Getenv("LOCAL_ONLY") != "true" { Context("Running the self-hosted spec", func() { capi_e2e.SelfHostedSpec(context.TODO(), func() capi_e2e.SelfHostedSpecInput { return capi_e2e.SelfHostedSpecInput{ E2EConfig: e2eConfig, ClusterctlConfigPath: clusterctlConfigPath, BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, } }) }) } Context("Should successfully remediate unhealthy machines with MachineHealthCheck", func() { capi_e2e.MachineRemediationSpec(context.TODO(), func() capi_e2e.MachineRemediationSpecInput { return capi_e2e.MachineRemediationSpecInput{ E2EConfig: e2eConfig, ClusterctlConfigPath: clusterctlConfigPath, BootstrapClusterProxy: bootstrapClusterProxy, ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, } }) }) Context("Should adopt up-to-date control plane Machines without modification", func() { capi_e2e.KCPAdoptionSpec(context.TODO(), func() capi_e2e.KCPAdoptionSpecInput { return capi_e2e.KCPAdoptionSpecInput{ E2EConfig: e2eConfig, ClusterctlConfigPath: clusterctlConfigPath, BootstrapClusterProxy: bootstrapClusterProxy.(framework.ClusterProxy), ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, } }) }) Context("Should successfully exercise machine pools", func() { capi_e2e.MachinePoolSpec(context.TODO(), func() capi_e2e.MachinePoolInput { return capi_e2e.MachinePoolInput{ E2EConfig: e2eConfig, ClusterctlConfigPath: clusterctlConfigPath, BootstrapClusterProxy: bootstrapClusterProxy.(framework.ClusterProxy), ArtifactFolder: artifactFolder, SkipCleanup: skipCleanup, } }) }) })
[ "\"LOCAL_ONLY\"" ]
[]
[ "LOCAL_ONLY" ]
[]
["LOCAL_ONLY"]
go
1
0
docs/conf.py
# -*- coding: utf-8 -*- # # webapp2 documentation build configuration file, created by # sphinx-quickstart on Sat Jul 31 10:41:37 2010. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import mock import os import sys from pkg_resources import get_distribution on_rtd = os.environ.get('READTHEDOCS', None) == 'True' # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # See # (http://read-the-docs.readthedocs.org/en/latest/faq.html#\ # i-get-import-errors-on-libraries-that-depend-on-c-modules) class Mock(mock.Mock): @classmethod def __getattr__(cls, name): return Mock() MOCK_MODULES = ( 'google', 'google.appengine', 'google.appengine.api', 'google.appengine.ext', 'google.appengine.ext.ndb', 'google.appengine.ext.ndb.model', 'google.appengine.ext.webapp', 'google.appengine.ext.webapp.util', ) sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES) # Setup the libary import path current_path = os.path.dirname(__file__) webapp2_path = os.path.abspath(os.path.join(current_path, '..')) sys.path.insert(1, webapp2_path) # Insert theme path sys.path.insert(1, os.path.join(current_path, '_themes', 'webapp2')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'webapp2' copyright = u'2011, webapp2 AUTHORS' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. distro = get_distribution('webapp2') version = distro.version # The full version, including alpha/beta/rc tags. release = distro.version # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. if on_rtd and False: html_theme = 'default' else: html_theme = 'webapp2' # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'pygapp2.pygapp2' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { 'logo_width': 252, 'logo_height': 60, 'analytics_code': 'UA-7132932-5', } # Add any paths that contain custom themes here, relative to this directory. html_theme_path = ['_themes'] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. html_short_title = 'webapp2' # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = '_static/webapp2_blue_small.png' # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'webapp2doc' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'webapp2.tex', u'webapp2 Documentation', u'Rodrigo Moraes', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'webapp2', u'webapp2 Documentation', [u'Rodrigo Moraes'], 1) ] # Example configuration for intersphinx: refer to the Python standard library. intersphinx_mapping = {'python': ('http://docs.python.org/2.7', None)}
[]
[]
[ "READTHEDOCS" ]
[]
["READTHEDOCS"]
python
1
0
fs/example_test.go
// Copyright 2019 the Go-FUSE Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package fs_test import ( "fmt" "io/ioutil" "log" "os" "github.com/hanwen/go-fuse/v2/fs" "github.com/hanwen/go-fuse/v2/fuse" ) // ExampleMount shows how to create a loopback file system, and // mounting it onto a directory func Example_mount() { mntDir, _ := ioutil.TempDir("", "") home := os.Getenv("HOME") // Make $HOME available on a mount dir under /tmp/ . Caution: // write operations are also mirrored. root, err := fs.NewLoopbackRoot(home) if err != nil { log.Fatal(err) } // Mount the file system server, err := fs.Mount(mntDir, root, &fs.Options{ MountOptions: fuse.MountOptions{Debug: true}, }) if err != nil { log.Fatal(err) } fmt.Printf("Mounted %s as loopback on %s\n", home, mntDir) fmt.Printf("\n\nCAUTION:\nwrite operations on %s will also affect $HOME (%s)\n\n", mntDir, home) fmt.Printf("Unmount by calling 'fusermount -u %s'\n", mntDir) // Serve the file system, until unmounted by calling fusermount -u server.Wait() }
[ "\"HOME\"" ]
[]
[ "HOME" ]
[]
["HOME"]
go
1
0
backend/backend/settings/deps/org.py
import os ORG = os.environ.get('BACKEND_ORG', 'substra') DEFAULT_PORT = os.environ.get('BACKEND_DEFAULT_PORT', '8000') ORG_NAME = ORG.replace('-', '') ORG_DB_NAME = ORG.replace('-', '_').upper()
[]
[]
[ "BACKEND_ORG", "BACKEND_DEFAULT_PORT" ]
[]
["BACKEND_ORG", "BACKEND_DEFAULT_PORT"]
python
2
0
cmd/token_user.go
/* * Copyright © 2015-2018 Aeneas Rekkas <[email protected]> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * @author Aeneas Rekkas <[email protected]> * @copyright 2015-2018 Aeneas Rekkas <[email protected]> * @license Apache-2.0 */ package cmd import ( "context" "crypto/rand" "crypto/rsa" "crypto/tls" "fmt" "html/template" "net/http" "os" "strconv" "strings" "time" "github.com/ory/hydra/cmd/cli" "github.com/julienschmidt/httprouter" "github.com/spf13/cobra" "github.com/toqueteos/webbrowser" "golang.org/x/oauth2" "github.com/ory/x/cmdx" "github.com/ory/x/flagx" "github.com/ory/x/randx" "github.com/ory/x/tlsx" "github.com/ory/x/urlx" ) var tokenUserWelcome = template.Must(template.New("").Parse(`<html> <body> <h1>Welcome to the exemplary OAuth 2.0 Consumer!</h1> <p>This is an example app which emulates an OAuth 2.0 consumer application. Usually, this would be your web or mobile application and would use an <a href="https://oauth.net/code/">OAuth 2.0</a> or <a href="https://oauth.net/code/">OpenID Connect</a> library.</p> <p>This example requests an OAuth 2.0 Access, Refresh, and OpenID Connect ID Token from the OAuth 2.0 Server (ORY Hydra). To initiate the flow, click the "Authorize Application" button.</p> <p><a href="{{ .URL }}">Authorize application</a></p> </body> </html>`)) var tokenUserError = template.Must(template.New("").Parse(`<html> <body> <h1>An error occurred</h1> <h2>{{ .Name }}</h2> <p>{{ .Description }}</p> <p>{{ .Hint }}</p> <p>{{ .Debug }}</p> </body> </html>`)) var tokenUserResult = template.Must(template.New("").Parse(`<html> <head></head> <body> <ul> <li>Access Token: <code>{{ .AccessToken }}</code></li> <li>Refresh Token: <code>{{ .RefreshToken }}</code></li> <li>Expires in: <code>{{ .Expiry }}</code></li> <li>ID Token: <code>{{ .IDToken }}</code></li> </ul> </body> </html>`)) // tokenUserCmd represents the token command var tokenUserCmd = &cobra.Command{ Use: "user", Short: "An exemplary OAuth 2.0 Client performing the OAuth 2.0 Authorize Code Flow", Long: `Starts an exemplary web server that acts as an OAuth 2.0 Client performing the Authorize Code Flow. This command will help you to see if ORY Hydra has been configured properly. This command must not be used for anything else than manual testing or demo purposes. The server will terminate on error and success.`, Run: func(cmd *cobra.Command, args []string) { /* #nosec G402 - we want to support dev environments, hence tls trickery */ ctx := context.WithValue(context.Background(), oauth2.HTTPClient, &http.Client{Transport: &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: flagx.MustGetBool(cmd, "skip-tls-verify")}, }}) isSSL := flagx.MustGetBool(cmd, "https") port := flagx.MustGetInt(cmd, "port") scopes := flagx.MustGetStringSlice(cmd, "scope") prompt := flagx.MustGetStringSlice(cmd, "prompt") maxAge := flagx.MustGetInt(cmd, "max-age") redirectUrl := flagx.MustGetString(cmd, "redirect") backend := flagx.MustGetString(cmd, "token-url") frontend := flagx.MustGetString(cmd, "auth-url") audience := flagx.MustGetStringSlice(cmd, "audience") clientID := flagx.MustGetString(cmd, "client-id") clientSecret := flagx.MustGetString(cmd, "client-secret") if clientID == "" || clientSecret == "" { fmt.Print(cmd.UsageString()) fmt.Println("Please provide a Client ID and Client Secret using flags --client-id and --client-secret, or environment variables OAUTH2_CLIENT_ID and OAUTH2_CLIENT_SECRET.") return } proto := "http" if isSSL { proto = "https" } serverLocation := fmt.Sprintf("%s://127.0.0.1:%d/", proto, port) if redirectUrl == "" { redirectUrl = serverLocation + "callback" } if backend == "" { backend = urlx.AppendPaths(cli.RemoteURI(cmd), "/oauth2/token").String() } if frontend == "" { frontend = urlx.AppendPaths(cli.RemoteURI(cmd), "/oauth2/auth").String() } conf := oauth2.Config{ ClientID: clientID, ClientSecret: clientSecret, Endpoint: oauth2.Endpoint{ TokenURL: backend, AuthURL: frontend, }, RedirectURL: redirectUrl, Scopes: scopes, } state, err := randx.RuneSequence(24, randx.AlphaLower) cmdx.Must(err, "Could not generate random state: %s", err) nonce, err := randx.RuneSequence(24, randx.AlphaLower) cmdx.Must(err, "Could not generate random state: %s", err) authCodeURL := conf.AuthCodeURL( string(state), oauth2.SetAuthURLParam("audience", strings.Join(audience, "+")), oauth2.SetAuthURLParam("nonce", string(nonce)), oauth2.SetAuthURLParam("prompt", strings.Join(prompt, "+")), oauth2.SetAuthURLParam("max_age", strconv.Itoa(maxAge)), ) if !flagx.MustGetBool(cmd, "no-open") { _ = webbrowser.Open(serverLocation) // ignore errors } fmt.Println("Setting up home route on " + serverLocation) fmt.Println("Setting up callback listener on " + serverLocation + "callback") fmt.Println("Press ctrl + c on Linux / Windows or cmd + c on OSX to end the process.") fmt.Printf("If your browser does not open automatically, navigate to:\n\n\t%s\n\n", serverLocation) r := httprouter.New() var tlsc *tls.Config if isSSL { key, err := rsa.GenerateKey(rand.Reader, 2048) cmdx.Must(err, "Unable to generate RSA key pair: %s", err) cert, err := tlsx.CreateSelfSignedTLSCertificate(key) cmdx.Must(err, "Unable to generate self-signed TLS Certificate: %s", err) tlsc = &tls.Config{Certificates: []tls.Certificate{*cert}} } server := &http.Server{Addr: fmt.Sprintf(":%d", port), Handler: r, TLSConfig: tlsc} var shutdown = func() { time.Sleep(time.Second * 1) ctx, cancel := context.WithTimeout(context.Background(), time.Second*5) defer cancel() _ = server.Shutdown(ctx) } r.GET("/", func(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { _ = tokenUserWelcome.Execute(w, &struct{ URL string }{URL: authCodeURL}) }) type ed struct { Name string Description string Hint string Debug string } r.GET("/callback", func(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { if len(r.URL.Query().Get("error")) > 0 { fmt.Printf("Got error: %s\n", r.URL.Query().Get("error_description")) w.WriteHeader(http.StatusInternalServerError) _ = tokenUserError.Execute(w, &ed{ Name: r.URL.Query().Get("error"), Description: r.URL.Query().Get("error_description"), Hint: r.URL.Query().Get("error_hint"), Debug: r.URL.Query().Get("error_debug"), }) go shutdown() return } if r.URL.Query().Get("state") != string(state) { fmt.Printf("States do not match. Expected %s, got %s\n", string(state), r.URL.Query().Get("state")) w.WriteHeader(http.StatusInternalServerError) _ = tokenUserError.Execute(w, &ed{ Name: "States do not match", Description: "Expected state " + string(state) + " but got " + r.URL.Query().Get("state"), }) go shutdown() return } code := r.URL.Query().Get("code") token, err := conf.Exchange(ctx, code) if err != nil { fmt.Printf("Unable to exchange code for token: %s\n", err) w.WriteHeader(http.StatusInternalServerError) _ = tokenUserError.Execute(w, &ed{ Name: err.Error(), }) go shutdown() return } idt := token.Extra("id_token") fmt.Printf("Access Token:\n\t%s\n", token.AccessToken) fmt.Printf("Refresh Token:\n\t%s\n", token.RefreshToken) fmt.Printf("Expires in:\n\t%s\n", token.Expiry.Format(time.RFC1123)) fmt.Printf("ID Token:\n\t%v\n\n", idt) _ = tokenUserResult.Execute(w, struct { AccessToken string RefreshToken string Expiry string IDToken string }{ AccessToken: token.AccessToken, RefreshToken: token.RefreshToken, Expiry: token.Expiry.Format(time.RFC1123), IDToken: fmt.Sprintf("%v", idt), }) go shutdown() }) if isSSL { err = server.ListenAndServeTLS("", "") } else { err = server.ListenAndServe() } cmdx.Must(err, "%s", err) }, } func init() { tokenCmd.AddCommand(tokenUserCmd) tokenUserCmd.Flags().Bool("no-open", false, "Do not open the browser window automatically") tokenUserCmd.Flags().IntP("port", "p", 4446, "The port on which the server should run") tokenUserCmd.Flags().StringSlice("scope", []string{"offline", "openid"}, "Request OAuth2 scope") tokenUserCmd.Flags().StringSlice("prompt", []string{}, "Set the OpenID Connect prompt parameter") tokenUserCmd.Flags().Int("max-age", 0, "Set the OpenID Connect max_age parameter") tokenUserCmd.Flags().String("client-id", os.Getenv("OAUTH2_CLIENT_ID"), "Use the provided OAuth 2.0 Client ID, defaults to environment variable OAUTH2_CLIENT_ID") tokenUserCmd.Flags().String("client-secret", os.Getenv("OAUTH2_CLIENT_SECRET"), "Use the provided OAuth 2.0 Client Secret, defaults to environment variable OAUTH2_CLIENT_SECRET") tokenUserCmd.Flags().String("redirect", "", "Force a redirect url") tokenUserCmd.Flags().StringSlice("audience", []string{}, "Request a specific OAuth 2.0 Access Token Audience") tokenUserCmd.Flags().String("auth-url", "", "Usually it is enough to specify the `endpoint` flag, but if you want to force the authorization url, use this flag") tokenUserCmd.Flags().String("token-url", "", "Usually it is enough to specify the `endpoint` flag, but if you want to force the token url, use this flag") tokenUserCmd.Flags().String("endpoint", os.Getenv("HYDRA_URL"), "Set the URL where ORY Hydra is hosted, defaults to environment variable HYDRA_URL") tokenUserCmd.Flags().Bool("https", false, "Sets up HTTPS for the endpoint using a self-signed certificate which is re-generated every time you start this command") }
[ "\"OAUTH2_CLIENT_ID\"", "\"OAUTH2_CLIENT_SECRET\"", "\"HYDRA_URL\"" ]
[]
[ "OAUTH2_CLIENT_ID", "HYDRA_URL", "OAUTH2_CLIENT_SECRET" ]
[]
["OAUTH2_CLIENT_ID", "HYDRA_URL", "OAUTH2_CLIENT_SECRET"]
go
3
0
test/e2e_test.go
// // Copyright 2021 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // +build e2e package test import ( "bytes" "context" "encoding/base64" "encoding/json" "fmt" "io/ioutil" "net/http/httptest" "net/url" "os" "path" "path/filepath" "testing" "github.com/google/go-cmp/cmp" "github.com/google/go-containerregistry/pkg/authn" "github.com/google/go-containerregistry/pkg/name" "github.com/google/go-containerregistry/pkg/registry" "github.com/google/go-containerregistry/pkg/v1/random" "github.com/google/go-containerregistry/pkg/v1/remote" "github.com/sigstore/cosign/cmd/cosign/cli" "github.com/sigstore/cosign/cmd/cosign/cli/attach" "github.com/sigstore/cosign/cmd/cosign/cli/download" "github.com/sigstore/cosign/cmd/cosign/cli/upload" sget "github.com/sigstore/cosign/cmd/sget/cli" "github.com/sigstore/cosign/pkg/cosign" "github.com/sigstore/cosign/pkg/cosign/kubernetes" cremote "github.com/sigstore/cosign/pkg/cosign/remote" "github.com/sigstore/sigstore/pkg/signature/payload" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) const ( serverEnv = "REKOR_SERVER" rekorURL = "https://rekor.sigstore.dev" ) var keyPass = []byte("hello") var passFunc = func(_ bool) ([]byte, error) { return keyPass, nil } var verify = func(keyRef, imageRef string, checkClaims bool, annotations map[string]interface{}) error { cmd := cli.VerifyCommand{ KeyRef: keyRef, RekorURL: rekorURL, CheckClaims: checkClaims, Annotations: &annotations, } args := []string{imageRef} return cmd.Exec(context.Background(), args) } func TestSignVerify(t *testing.T) { repo, stop := reg(t) defer stop() td := t.TempDir() imgName := path.Join(repo, "cosign-e2e") _, _, cleanup := mkimage(t, imgName) defer cleanup() _, privKeyPath, pubKeyPath := keypair(t, td) ctx := context.Background() // Verify should fail at first mustErr(verify(pubKeyPath, imgName, true, nil), t) // So should download mustErr(download.SignatureCmd(ctx, imgName), t) // Now sign the image ko := cli.KeyOpts{KeyRef: privKeyPath, PassFunc: passFunc} must(cli.SignCmd(ctx, ko, nil, imgName, "", true, "", false, false), t) // Now verify and download should work! must(verify(pubKeyPath, imgName, true, nil), t) must(download.SignatureCmd(ctx, imgName), t) // Look for a specific annotation mustErr(verify(pubKeyPath, imgName, true, map[string]interface{}{"foo": "bar"}), t) // Sign the image with an annotation annotations := map[string]interface{}{"foo": "bar"} must(cli.SignCmd(ctx, ko, annotations, imgName, "", true, "", false, false), t) // It should match this time. must(verify(pubKeyPath, imgName, true, map[string]interface{}{"foo": "bar"}), t) // But two doesn't work mustErr(verify(pubKeyPath, imgName, true, map[string]interface{}{"foo": "bar", "baz": "bat"}), t) } func TestSignVerifyClean(t *testing.T) { repo, stop := reg(t) defer stop() td := t.TempDir() imgName := path.Join(repo, "cosign-e2e") _, _, _ = mkimage(t, imgName) _, privKeyPath, pubKeyPath := keypair(t, td) ctx := context.Background() // Now sign the image ko := cli.KeyOpts{KeyRef: privKeyPath, PassFunc: passFunc} must(cli.SignCmd(ctx, ko, nil, imgName, "", true, "", false, false), t) // Now verify and download should work! must(verify(pubKeyPath, imgName, true, nil), t) must(download.SignatureCmd(ctx, imgName), t) // Now clean signature from the given image must(cli.CleanCmd(ctx, imgName), t) // It doesn't work mustErr(verify(pubKeyPath, imgName, true, nil), t) } func TestAttestVerify(t *testing.T) { repo, stop := reg(t) defer stop() td := t.TempDir() imgName := path.Join(repo, "cosign-attest-e2e") _, _, cleanup := mkimage(t, imgName) defer cleanup() _, privKeyPath, pubKeyPath := keypair(t, td) ctx := context.Background() // Verify should fail at first verifyAttestation := cli.VerifyAttestationCommand{ KeyRef: pubKeyPath, } attestation := "helloworld" ap := filepath.Join(td, "attestation") if err := ioutil.WriteFile(ap, []byte(attestation), 0600); err != nil { t.Fatal(err) } mustErr(verifyAttestation.Exec(ctx, []string{imgName}), t) // Now attest the image ko := cli.KeyOpts{KeyRef: privKeyPath, PassFunc: passFunc} must(cli.AttestCmd(ctx, ko, imgName, "", true, ap, false), t) // Now verify and download should work! must(verifyAttestation.Exec(ctx, []string{imgName}), t) // Look for a specific annotation mustErr(verify(pubKeyPath, imgName, true, map[string]interface{}{"foo": "bar"}), t) } func TestBundle(t *testing.T) { // turn on the tlog defer setenv(t, cli.ExperimentalEnv, "1")() repo, stop := reg(t) defer stop() td := t.TempDir() imgName := path.Join(repo, "cosign-e2e") _, _, cleanup := mkimage(t, imgName) defer cleanup() _, privKeyPath, pubKeyPath := keypair(t, td) ctx := context.Background() ko := cli.KeyOpts{ KeyRef: privKeyPath, PassFunc: passFunc, RekorURL: rekorURL, } // Sign the image must(cli.SignCmd(ctx, ko, nil, imgName, "", true, "", false, false), t) // Make sure verify works must(verify(pubKeyPath, imgName, true, nil), t) // Make sure offline verification works with bundling // use rekor prod since we have hardcoded the public key os.Setenv(serverEnv, "notreal") must(verify(pubKeyPath, imgName, true, nil), t) } func TestDuplicateSign(t *testing.T) { repo, stop := reg(t) defer stop() td := t.TempDir() imgName := path.Join(repo, "cosign-e2e") ref, _, cleanup := mkimage(t, imgName) defer cleanup() _, privKeyPath, pubKeyPath := keypair(t, td) ctx := context.Background() // Verify should fail at first mustErr(verify(pubKeyPath, imgName, true, nil), t) // So should download mustErr(download.SignatureCmd(ctx, imgName), t) // Now sign the image ko := cli.KeyOpts{KeyRef: privKeyPath, PassFunc: passFunc} must(cli.SignCmd(ctx, ko, nil, imgName, "", true, "", false, false), t) // Now verify and download should work! must(verify(pubKeyPath, imgName, true, nil), t) must(download.SignatureCmd(ctx, imgName), t) // Signing again should work just fine... must(cli.SignCmd(ctx, ko, nil, imgName, "", true, "", false, false), t) // but a duplicate signature should not be a uploaded sigRepo, err := cli.TargetRepositoryForImage(ref) if err != nil { t.Fatalf("failed to get signature repository: %v", err) } signatures, err := cosign.FetchSignaturesForImage(ctx, ref, sigRepo, cosign.SignatureTagSuffix, registryClientOpts(ctx)...) if err != nil { t.Fatalf("failed to fetch signatures: %v", err) } if len(signatures) > 1 { t.Errorf("expected there to only be one signature, got %v", signatures) } } func TestKeyURLVerify(t *testing.T) { // TODO: re-enable once distroless images are being signed by the new client t.Skip() // Verify that an image can be verified via key url keyRef := "https://raw.githubusercontent.com/GoogleContainerTools/distroless/main/cosign.pub" img := "gcr.io/distroless/base:latest" must(verify(keyRef, img, true, nil), t) } func TestGenerateKeyPairEnvVar(t *testing.T) { defer setenv(t, "COSIGN_PASSWORD", "foo")() keys, err := cosign.GenerateKeyPair(cli.GetPass) if err != nil { t.Fatal(err) } if _, err := cosign.LoadECDSAPrivateKey(keys.PrivateBytes, []byte("foo")); err != nil { t.Fatal(err) } } func TestGenerateKeyPairK8s(t *testing.T) { td := t.TempDir() wd, err := os.Getwd() if err != nil { t.Fatal(err) } if err := os.Chdir(td); err != nil { t.Fatal(err) } defer func() { os.Chdir(wd) }() password := "foo" defer setenv(t, "COSIGN_PASSWORD", password)() ctx := context.Background() name := "cosign-secret" namespace := "default" if err := kubernetes.KeyPairSecret(ctx, fmt.Sprintf("k8s://%s/%s", namespace, name), cli.GetPass); err != nil { t.Fatal(err) } // make sure the secret actually exists client, err := kubernetes.Client() if err != nil { t.Fatal(err) } s, err := client.CoreV1().Secrets(namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { t.Fatal(err) } if v, ok := s.Data["cosign.password"]; !ok || string(v) != password { t.Fatalf("password is incorrect, got %v expected %v", v, "foo") } } func TestMultipleSignatures(t *testing.T) { repo, stop := reg(t) defer stop() td1 := t.TempDir() td2 := t.TempDir() imgName := path.Join(repo, "cosign-e2e") _, _, cleanup := mkimage(t, imgName) defer cleanup() _, priv1, pub1 := keypair(t, td1) _, priv2, pub2 := keypair(t, td2) ctx := context.Background() // Verify should fail at first for both keys mustErr(verify(pub1, imgName, true, nil), t) mustErr(verify(pub2, imgName, true, nil), t) // Now sign the image with one key ko := cli.KeyOpts{KeyRef: priv1, PassFunc: passFunc} must(cli.SignCmd(ctx, ko, nil, imgName, "", true, "", false, false), t) // Now verify should work with that one, but not the other must(verify(pub1, imgName, true, nil), t) mustErr(verify(pub2, imgName, true, nil), t) // Now sign with the other key too ko.KeyRef = priv2 must(cli.SignCmd(ctx, ko, nil, imgName, "", true, "", false, false), t) // Now verify should work with both must(verify(pub1, imgName, true, nil), t) must(verify(pub2, imgName, true, nil), t) } func TestSignBlob(t *testing.T) { var blob = "someblob" td1 := t.TempDir() td2 := t.TempDir() t.Cleanup(func() { os.RemoveAll(td1) os.RemoveAll(td2) }) bp := filepath.Join(td1, blob) if err := ioutil.WriteFile(bp, []byte(blob), 0644); err != nil { t.Fatal(err) } _, privKeyPath1, pubKeyPath1 := keypair(t, td1) _, _, pubKeyPath2 := keypair(t, td2) ctx := context.Background() ko1 := cli.KeyOpts{ KeyRef: pubKeyPath1, } ko2 := cli.KeyOpts{ KeyRef: pubKeyPath2, } // Verify should fail on a bad input mustErr(cli.VerifyBlobCmd(ctx, ko1, "", "badsig", blob), t) mustErr(cli.VerifyBlobCmd(ctx, ko2, "", "badsig", blob), t) // Now sign the blob with one key ko := cli.KeyOpts{ KeyRef: privKeyPath1, PassFunc: passFunc, } sig, err := cli.SignBlobCmd(ctx, ko, bp, true, "") if err != nil { t.Fatal(err) } // Now verify should work with that one, but not the other must(cli.VerifyBlobCmd(ctx, ko1, "", string(sig), bp), t) mustErr(cli.VerifyBlobCmd(ctx, ko2, "", string(sig), bp), t) } func TestGenerate(t *testing.T) { repo, stop := reg(t) defer stop() imgName := path.Join(repo, "cosign-e2e") _, desc, cleanup := mkimage(t, imgName) defer cleanup() // Generate the payload for the image, and check the digest. b := bytes.Buffer{} must(cli.GenerateCmd(context.Background(), imgName, nil, &b), t) ss := payload.SimpleContainerImage{} must(json.Unmarshal(b.Bytes(), &ss), t) equals(desc.Digest.String(), ss.Critical.Image.DockerManifestDigest, t) // Now try with some annotations. b.Reset() a := map[string]interface{}{"foo": "bar"} must(cli.GenerateCmd(context.Background(), imgName, a, &b), t) must(json.Unmarshal(b.Bytes(), &ss), t) equals(desc.Digest.String(), ss.Critical.Image.DockerManifestDigest, t) equals(ss.Optional["foo"], "bar", t) } func keypair(t *testing.T, td string) (*cosign.Keys, string, string) { wd, err := os.Getwd() if err != nil { t.Fatal(err) } if err := os.Chdir(td); err != nil { t.Fatal(err) } defer func() { os.Chdir(wd) }() keys, err := cosign.GenerateKeyPair(passFunc) if err != nil { t.Fatal(err) } privKeyPath := filepath.Join(td, "cosign.key") if err := ioutil.WriteFile(privKeyPath, keys.PrivateBytes, 0600); err != nil { t.Fatal(err) } pubKeyPath := filepath.Join(td, "cosign.pub") if err := ioutil.WriteFile(pubKeyPath, keys.PublicBytes, 0600); err != nil { t.Fatal(err) } return keys, privKeyPath, pubKeyPath } func TestUploadDownload(t *testing.T) { repo, stop := reg(t) defer stop() td := t.TempDir() ctx := context.Background() testCases := map[string]struct { signature string signatureType attach.SignatureArgType expectedErr bool }{ "file containing signature": { signature: "testsignaturefile", signatureType: attach.FileSignature, expectedErr: false, }, "raw signature as argument": { signature: "testsignatureraw", signatureType: attach.RawSignature, expectedErr: false, }, "empty signature as argument": { signature: "", signatureType: attach.RawSignature, expectedErr: true, }, } imgName := path.Join(repo, "cosign-e2e") for testName, testCase := range testCases { t.Run(testName, func(t *testing.T) { ref, _, cleanup := mkimage(t, imgName) payload := "testpayload" payloadPath := mkfile(payload, td, t) signature := base64.StdEncoding.EncodeToString([]byte(testCase.signature)) var sigRef string if testCase.signatureType == attach.FileSignature { sigRef = mkfile(signature, td, t) } else { sigRef = signature } // Upload it! err := attach.SignatureCmd(ctx, sigRef, payloadPath, imgName) if testCase.expectedErr { mustErr(err, t) } else { must(err, t) } // Now download it! sigRepo, err := cli.TargetRepositoryForImage(ref) if err != nil { t.Fatalf("failed to get signature repository: %v", err) } regClientOpts := registryClientOpts(ctx) signatures, err := cosign.FetchSignaturesForImage(ctx, ref, sigRepo, cosign.SignatureTagSuffix, regClientOpts...) if testCase.expectedErr { mustErr(err, t) } else { must(err, t) if len(signatures) != 1 { t.Error("unexpected signatures") } if diff := cmp.Diff(signatures[0].Base64Signature, signature); diff != "" { t.Error(diff) } if diff := cmp.Diff(signatures[0].Payload, []byte(payload)); diff != "" { t.Error(diff) } } // Now delete it! cleanup() }) } } func TestUploadBlob(t *testing.T) { repo, stop := reg(t) defer stop() td := t.TempDir() ctx := context.Background() imgName := path.Join(repo, "/cosign-upload-e2e") payload := "testpayload" payloadPath := mkfile(payload, td, t) // Upload it! files := []cremote.File{{ Path: payloadPath, }} must(upload.BlobCmd(ctx, files, "", imgName), t) // Check it ref, err := name.ParseReference(imgName) if err != nil { t.Fatal(err) } // Now download it with sget (this should fail by tag) if _, err := sget.SgetCmd(ctx, imgName, ""); err == nil { t.Error("expected download to fail") } img, err := remote.Image(ref) if err != nil { t.Fatal(err) } dgst, err := img.Digest() if err != nil { t.Fatal(err) } // But pass by digest rc, err := sget.SgetCmd(ctx, imgName+"@"+dgst.String(), "") if err != nil { t.Fatal(err) } b, err := ioutil.ReadAll(rc) if err != nil { t.Fatal(err) } if string(b) != payload { t.Errorf("expected contents to be %s, got %s", payload, string(b)) } } func TestAttachSBOM(t *testing.T) { repo, stop := reg(t) defer stop() ctx := context.Background() imgName := path.Join(repo, "sbom-image") img, _, cleanup := mkimage(t, imgName) defer cleanup() out := bytes.Buffer{} _, err := download.SBOMCmd(ctx, img.Name(), &out) if err == nil { t.Fatal("Expected error") } t.Log(out) out.Reset() // Upload it! must(attach.SBOMCmd(ctx, "./testdata/bom-go-mod.spdx", "spdx", imgName), t) sboms, err := download.SBOMCmd(ctx, imgName, &out) if err != nil { t.Fatal(err) } t.Log(out) if len(sboms) != 1 { t.Fatalf("Expected one sbom, got %d", len(sboms)) } want, err := ioutil.ReadFile("./testdata/bom-go-mod.spdx") if err != nil { t.Fatal(err) } if diff := cmp.Diff(string(want), sboms[0]); diff != "" { t.Errorf("diff: %s", diff) } } func setenv(t *testing.T, k, v string) func() { if err := os.Setenv(k, v); err != nil { t.Fatalf("error setitng env: %v", err) } return func() { os.Unsetenv(k) } } func TestTlog(t *testing.T) { repo, stop := reg(t) defer stop() td := t.TempDir() imgName := path.Join(repo, "cosign-e2e") _, _, cleanup := mkimage(t, imgName) defer cleanup() _, privKeyPath, pubKeyPath := keypair(t, td) ctx := context.Background() // Verify should fail at first mustErr(verify(pubKeyPath, imgName, true, nil), t) // Now sign the image without the tlog ko := cli.KeyOpts{ KeyRef: privKeyPath, PassFunc: passFunc, RekorURL: rekorURL, } must(cli.SignCmd(ctx, ko, nil, imgName, "", true, "", false, false), t) // Now verify should work! must(verify(pubKeyPath, imgName, true, nil), t) // Now we turn on the tlog! defer setenv(t, cli.ExperimentalEnv, "1")() // Verify shouldn't work since we haven't put anything in it yet. mustErr(verify(pubKeyPath, imgName, true, nil), t) // Sign again with the tlog env var on must(cli.SignCmd(ctx, ko, nil, imgName, "", true, "", false, false), t) // And now verify works! must(verify(pubKeyPath, imgName, true, nil), t) } func TestGetPublicKeyCustomOut(t *testing.T) { td := t.TempDir() keys, privKeyPath, _ := keypair(t, td) ctx := context.Background() outFile := "output.pub" outPath := filepath.Join(td, outFile) outWriter, err := os.OpenFile(outPath, os.O_WRONLY|os.O_CREATE, 0600) must(err, t) pk := cli.Pkopts{ KeyRef: privKeyPath, } must(cli.GetPublicKey(ctx, pk, cli.NamedWriter{Name: outPath, Writer: outWriter}, passFunc), t) output, err := ioutil.ReadFile(outPath) must(err, t) equals(keys.PublicBytes, output, t) } func mkfile(contents, td string, t *testing.T) string { f, err := ioutil.TempFile(td, "") if err != nil { t.Fatal(err) } defer f.Close() if _, err := f.Write([]byte(contents)); err != nil { t.Fatal(err) } return f.Name() } func mkimage(t *testing.T, n string) (name.Reference, *remote.Descriptor, func()) { ref, err := name.ParseReference(n, name.WeakValidation) if err != nil { t.Fatal(err) } img, err := random.Image(512, 5) if err != nil { t.Fatal(err) } regClientOpts := registryClientOpts(context.Background()) if err := remote.Write(ref, img, regClientOpts...); err != nil { t.Fatal(err) } remoteImage, err := remote.Get(ref, regClientOpts...) if err != nil { t.Fatal(err) } cleanup := func() { _ = remote.Delete(ref, regClientOpts...) ref := cosign.AttachedImageTag(ref.Context(), remoteImage, cosign.SignatureTagSuffix) _ = remote.Delete(ref, regClientOpts...) } return ref, remoteImage, cleanup } func must(err error, t *testing.T) { t.Helper() if err != nil { t.Fatal(err) } } func mustErr(err error, t *testing.T) { t.Helper() if err == nil { t.Fatal("expected error") } } func equals(v1, v2 interface{}, t *testing.T) { if diff := cmp.Diff(v1, v2); diff != "" { t.Error(diff) } } func reg(t *testing.T) (string, func()) { repo := os.Getenv("COSIGN_TEST_REPO") if repo != "" { return repo, func() {} } t.Log("COSIGN_TEST_REPO unset, using fake registry") r := httptest.NewServer(registry.New()) u, err := url.Parse(r.URL) if err != nil { t.Fatal(err) } return u.Host, r.Close } func registryClientOpts(ctx context.Context) []remote.Option { return []remote.Option{ remote.WithAuthFromKeychain(authn.DefaultKeychain), remote.WithContext(ctx), } }
[ "\"COSIGN_TEST_REPO\"" ]
[]
[ "COSIGN_TEST_REPO" ]
[]
["COSIGN_TEST_REPO"]
go
1
0
docs/examples/benchmarks/144_0/144_000.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- ''' .. module:: examples.benchmarks.144_000 :platform: Agnostic, Windows :synopsis: Test for IGES type 144 form 0 .. Created on 01/08/2013 .. codeauthor:: Rod Persky <rodney.persky {removethis} AT gmail _DOT_ com> .. Licensed under the Academic Free License ("AFL") v. 3.0 .. Source at https://github.com/Rod-Persky/pyIGES .. figure:: 144-000.png :scale: 70 % :height: 528 px :width: 1114 px :alt: 144 type :align: center 144 Type construction, See table below +------------------+--------------+-----------------+ | Stage 3, | Stage 2, | Stage 1, | | Generate Surface | |BENCH_142|_ | Create Elements | +==================+==============+=================+ | |IGES_144| | |IGES_142| | |IGES_112| | +------------------+--------------+-----------------+ | | | |IGES_100| | +------------------+--------------+-----------------+ | | | |IGES_114| | +------------------+--------------+-----------------+ .. ToDo:: Figure out how this works. .. literalinclude:: 144_000.py :pyobject: iges_144_000 :linenos: :emphasize-lines: 24-26 The resulting IGES file should look like: .. literalinclude:: 144-000.igs :language: py :linenos: :emphasize-lines: 14-15, 128 ''' import os import sys sys.path.append(os.path.abspath('../../../')) import examples.benchmarks # 114 = Parametric Spline Surface Entity Page 98 # 100 = Circular Arc Entity Page 64 # 112 = Parametric Spline Curve Entity Page 94 # 142 = Curve on a Parametric Surface Entity Page 162 # 144 = Trimmed (Parametric) Surface Entity Page 166 #=============================================================================== # Pref, # 0 = Unspecified # 1 = S o B is preferred # 2 = C is preferred # 3 = C and S o B are equally preferred #=============================================================================== def iges_144_000(): import pyiges.IGESGeomLib as IGES from pyiges.IGESGeomLib import IGESPoint from pyiges.IGESCore import IGEStorage system = IGEStorage() examples.benchmarks.standard_iges_setup(system, "144-000-benchmark.igs") para_spline_surface = IGES.IGESTestSplineSurf() system.Commit(para_spline_surface) circle = IGES.IGESGeomCircle(IGESPoint(6, 7.25, 0), IGESPoint(6.25, 7.25)) system.Commit(circle) para_spline_curve = IGES.IGESTestSplineCurve() system.Commit(para_spline_curve) curve_on_parametric_surface = IGES.IGESCurveOnParametricSurface(para_spline_surface, circle, para_spline_curve, 2) system.Commit(curve_on_parametric_surface) trimmed_parametric_surface = IGES.IGESTrimmedParaSurface(para_spline_surface, curve_on_parametric_surface) system.Commit(trimmed_parametric_surface) system.save("144-000-benchmark.igs") if not os.environ.get('READTHEDOCS', None): print(system) os.startfile("144-000-benchmark.igs") if __name__ == "__main__": iges_144_000()
[]
[]
[ "READTHEDOCS" ]
[]
["READTHEDOCS"]
python
1
0
devbrowserrefresh.go
package cbutil import ( "fmt" "log" "os" "os/exec" "runtime" "time" ) func DevRefreshBrowserOnLoadAndTemplateChange(domain string) { if os.Getenv("CHROMEREFRESH") == "1" && runtime.GOOS == "darwin" { go func() { Sleep(time.Second) cmd := exec.Command(`/usr/bin/osascript`, `-e`, fmt.Sprintf(`tell application "Google Chrome" set window_list to every window # get the windows repeat with the_window in window_list # for every window set tab_list to every tab in the_window # get the tabs repeat with the_tab in tab_list # for every tab if the URL of the_tab contains "%s" then tell the_tab to reload end if end repeat end repeat end tell`, domain)) e := cmd.Run() if e != nil { log.Println(e.Error()) } e = exec.Command("pkill", "-f", "fswatch").Start() if e != nil { log.Println(e.Error()) } watch := exec.Command(`/bin/bash`, `-c`, fmt.Sprintf(`/usr/local/bin/fswatch -e ".*" -i "\\.gohtml$" -o . | xargs -n1 -I {} osascript -e 'tell application "Google Chrome" set window_list to every window # get the windows repeat with the_window in window_list # for every window set tab_list to every tab in the_window # get the tabs repeat with the_tab in tab_list # for every tab if the URL of the_tab contains "%s" then tell the_tab to reload end if end repeat end repeat end tell'`, domain)) e = watch.Start() if e != nil { log.Println(e.Error()) } }() } }
[ "\"CHROMEREFRESH\"" ]
[]
[ "CHROMEREFRESH" ]
[]
["CHROMEREFRESH"]
go
1
0
control/iosys.py
# iosys.py - input/output system module # # RMM, 28 April 2019 # # Additional features to add # * Allow constant inputs for MIMO input_output_response (w/out ones) # * Add support for constants/matrices as part of operators (1 + P) # * Add unit tests (and example?) for time-varying systems # * Allow time vector for discrete time simulations to be multiples of dt # * Check the way initial outputs for discrete time systems are handled # """The :mod:`~control.iosys` module contains the :class:`~control.InputOutputSystem` class that represents (possibly nonlinear) input/output systems. The :class:`~control.InputOutputSystem` class is a general class that defines any continuous or discrete time dynamical system. Input/output systems can be simulated and also used to compute equilibrium points and linearizations. """ __author__ = "Richard Murray" __copyright__ = "Copyright 2019, California Institute of Technology" __credits__ = ["Richard Murray"] __license__ = "BSD" __maintainer__ = "Richard Murray" __email__ = "[email protected]" import numpy as np import scipy as sp import copy from warnings import warn from .statesp import StateSpace, tf2ss, _convert_to_statespace from .timeresp import _check_convert_array, _process_time_response from .lti import isctime, isdtime, common_timebase from . import config __all__ = ['InputOutputSystem', 'LinearIOSystem', 'NonlinearIOSystem', 'InterconnectedSystem', 'LinearICSystem', 'input_output_response', 'find_eqpt', 'linearize', 'ss2io', 'tf2io', 'interconnect', 'summing_junction'] # Define module default parameter values _iosys_defaults = { 'iosys.state_name_delim': '_', 'iosys.duplicate_system_name_prefix': '', 'iosys.duplicate_system_name_suffix': '$copy', 'iosys.linearized_system_name_prefix': '', 'iosys.linearized_system_name_suffix': '$linearized' } class InputOutputSystem(object): """A class for representing input/output systems. The InputOutputSystem class allows (possibly nonlinear) input/output systems to be represented in Python. It is intended as a parent class for a set of subclasses that are used to implement specific structures and operations for different types of input/output dynamical systems. Parameters ---------- inputs : int, list of str, or None Description of the system inputs. This can be given as an integer count or as a list of strings that name the individual signals. If an integer count is specified, the names of the signal will be of the form `s[i]` (where `s` is one of `u`, `y`, or `x`). If this parameter is not given or given as `None`, the relevant quantity will be determined when possible based on other information provided to functions using the system. outputs : int, list of str, or None Description of the system outputs. Same format as `inputs`. states : int, list of str, or None Description of the system states. Same format as `inputs`. dt : None, True or float, optional System timebase. 0 (default) indicates continuous time, True indicates discrete time with unspecified sampling time, positive number is discrete time with specified sampling time, None indicates unspecified timebase (either continuous or discrete time). params : dict, optional Parameter values for the systems. Passed to the evaluation functions for the system as default values, overriding internal defaults. name : string, optional System name (used for specifying signals). If unspecified, a generic name <sys[id]> is generated with a unique integer id. Attributes ---------- ninputs, noutputs, nstates : int Number of input, output and state variables input_index, output_index, state_index : dict Dictionary of signal names for the inputs, outputs and states and the index of the corresponding array dt : None, True or float System timebase. 0 (default) indicates continuous time, True indicates discrete time with unspecified sampling time, positive number is discrete time with specified sampling time, None indicates unspecified timebase (either continuous or discrete time). params : dict, optional Parameter values for the systems. Passed to the evaluation functions for the system as default values, overriding internal defaults. name : string, optional System name (used for specifying signals) Notes ----- The :class:`~control.InputOuputSystem` class (and its subclasses) makes use of two special methods for implementing much of the work of the class: * _rhs(t, x, u): compute the right hand side of the differential or difference equation for the system. This must be specified by the subclass for the system. * _out(t, x, u): compute the output for the current state of the system. The default is to return the entire system state. """ _idCounter = 0 def _name_or_default(self, name=None): if name is None: name = "sys[{}]".format(InputOutputSystem._idCounter) InputOutputSystem._idCounter += 1 return name def __init__(self, inputs=None, outputs=None, states=None, params={}, name=None, **kwargs): """Create an input/output system. The InputOutputSystem contructor is used to create an input/output object with the core information required for all input/output systems. Instances of this class are normally created by one of the input/output subclasses: :class:`~control.LinearICSystem`, :class:`~control.LinearIOSystem`, :class:`~control.NonlinearIOSystem`, :class:`~control.InterconnectedSystem`. """ # Store the input arguments # default parameters self.params = params.copy() # timebase self.dt = kwargs.get('dt', config.defaults['control.default_dt']) # system name self.name = self._name_or_default(name) # Parse and store the number of inputs, outputs, and states self.set_inputs(inputs) self.set_outputs(outputs) self.set_states(states) # # Class attributes # # These attributes are defined as class attributes so that they are # documented properly. They are "overwritten" in __init__. # #: Number of system inputs. #: #: :meta hide-value: ninputs = 0 #: Number of system outputs. #: #: :meta hide-value: noutputs = 0 #: Number of system states. #: #: :meta hide-value: nstates = 0 def __repr__(self): return self.name if self.name is not None else str(type(self)) def __str__(self): """String representation of an input/output system""" str = "System: " + (self.name if self.name else "(None)") + "\n" str += "Inputs (%s): " % self.ninputs for key in self.input_index: str += key + ", " str += "\nOutputs (%s): " % self.noutputs for key in self.output_index: str += key + ", " str += "\nStates (%s): " % self.nstates for key in self.state_index: str += key + ", " return str def __mul__(sys2, sys1): """Multiply two input/output systems (series interconnection)""" if isinstance(sys1, (int, float, np.number)): # TODO: Scale the output raise NotImplemented("Scalar multiplication not yet implemented") elif isinstance(sys1, np.ndarray): # TODO: Post-multiply by a matrix raise NotImplemented("Matrix multiplication not yet implemented") elif not isinstance(sys1, InputOutputSystem): raise TypeError("Unknown I/O system object ", sys1) # Make sure systems can be interconnected if sys1.noutputs != sys2.ninputs: raise ValueError("Can't multiply systems with incompatible " "inputs and outputs") # Make sure timebase are compatible dt = common_timebase(sys1.dt, sys2.dt) # Create a new system to handle the composition inplist = [(0, i) for i in range(sys1.ninputs)] outlist = [(1, i) for i in range(sys2.noutputs)] newsys = InterconnectedSystem( (sys1, sys2), inplist=inplist, outlist=outlist) # Set up the connection map manually newsys.set_connect_map(np.block( [[np.zeros((sys1.ninputs, sys1.noutputs)), np.zeros((sys1.ninputs, sys2.noutputs))], [np.eye(sys2.ninputs, sys1.noutputs), np.zeros((sys2.ninputs, sys2.noutputs))]] )) # If both systems are linear, create LinearICSystem if isinstance(sys1, StateSpace) and isinstance(sys2, StateSpace): ss_sys = StateSpace.__mul__(sys2, sys1) return LinearICSystem(newsys, ss_sys) # Return the newly created InterconnectedSystem return newsys def __rmul__(sys1, sys2): """Pre-multiply an input/output systems by a scalar/matrix""" if isinstance(sys2, InputOutputSystem): # Both systems are InputOutputSystems => use __mul__ return InputOutputSystem.__mul__(sys2, sys1) elif isinstance(sys2, (int, float, np.number)): # TODO: Scale the output raise NotImplemented("Scalar multiplication not yet implemented") elif isinstance(sys2, np.ndarray): # TODO: Post-multiply by a matrix raise NotImplemented("Matrix multiplication not yet implemented") elif isinstance(sys2, StateSpace): # TODO: Should eventuall preserve LinearIOSystem structure return StateSpace.__mul__(sys2, sys1) else: raise TypeError("Unknown I/O system object ", sys1) def __add__(sys1, sys2): """Add two input/output systems (parallel interconnection)""" # TODO: Allow addition of scalars and matrices if isinstance(sys2, (int, float, np.number)): # TODO: Scale the output raise NotImplemented("Scalar addition not yet implemented") elif isinstance(sys2, np.ndarray): # TODO: Post-multiply by a matrix raise NotImplemented("Matrix addition not yet implemented") elif not isinstance(sys2, InputOutputSystem): raise TypeError("Unknown I/O system object ", sys2) # Make sure number of input and outputs match if sys1.ninputs != sys2.ninputs or sys1.noutputs != sys2.noutputs: raise ValueError("Can't add systems with different numbers of " "inputs or outputs.") ninputs = sys1.ninputs noutputs = sys1.noutputs # Create a new system to handle the composition inplist = [[(0, i), (1, i)] for i in range(ninputs)] outlist = [[(0, i), (1, i)] for i in range(noutputs)] newsys = InterconnectedSystem( (sys1, sys2), inplist=inplist, outlist=outlist) # If both systems are linear, create LinearICSystem if isinstance(sys1, StateSpace) and isinstance(sys2, StateSpace): ss_sys = StateSpace.__add__(sys2, sys1) return LinearICSystem(newsys, ss_sys) # Return the newly created InterconnectedSystem return newsys # TODO: add __radd__ to allow postaddition by scalars and matrices def __neg__(sys): """Negate an input/output systems (rescale)""" if sys.ninputs is None or sys.noutputs is None: raise ValueError("Can't determine number of inputs or outputs") inplist = [(0, i) for i in range(sys.ninputs)] outlist = [(0, i, -1) for i in range(sys.noutputs)] # Create a new system to hold the negation newsys = InterconnectedSystem( (sys,), dt=sys.dt, inplist=inplist, outlist=outlist) # If the system is linear, create LinearICSystem if isinstance(sys, StateSpace): ss_sys = StateSpace.__neg__(sys) return LinearICSystem(newsys, ss_sys) # Return the newly created system return newsys def _isstatic(self): """Check to see if a system is a static system (no states)""" return self.nstates == 0 # Utility function to parse a list of signals def _process_signal_list(self, signals, prefix='s'): if signals is None: # No information provided; try and make it up later return None, {} elif isinstance(signals, int): # Number of signals given; make up the names return signals, {'%s[%d]' % (prefix, i): i for i in range(signals)} elif isinstance(signals, str): # Single string given => single signal with given name return 1, {signals: 0} elif all(isinstance(s, str) for s in signals): # Use the list of strings as the signal names return len(signals), {signals[i]: i for i in range(len(signals))} else: raise TypeError("Can't parse signal list %s" % str(signals)) # Find a signal by name def _find_signal(self, name, sigdict): return sigdict.get(name, None) # Update parameters used for _rhs, _out (used by subclasses) def _update_params(self, params, warning=False): if (warning): warn("Parameters passed to InputOutputSystem ignored.") def _rhs(self, t, x, u, params={}): """Evaluate right hand side of a differential or difference equation. Private function used to compute the right hand side of an input/output system model. Intended for fast evaluation; for a more user-friendly interface you may want to use :meth:`dynamics`. """ NotImplemented("Evaluation not implemented for system of type ", type(self)) def dynamics(self, t, x, u): """Compute the dynamics of a differential or difference equation. Given time `t`, input `u` and state `x`, returns the value of the right hand side of the dynamical system. If the system is continuous, returns the time derivative dx/dt = f(t, x, u) where `f` is the system's (possibly nonlinear) dynamics function. If the system is discrete-time, returns the next value of `x`: x[t+dt] = f(t, x[t], u[t]) Where `t` is a scalar. The inputs `x` and `u` must be of the correct length. Parameters ---------- t : float the time at which to evaluate x : array_like current state u : array_like input Returns ------- dx/dt or x[t+dt] : ndarray """ return self._rhs(t, x, u) def _out(self, t, x, u, params={}): """Evaluate the output of a system at a given state, input, and time Private function used to compute the output of of an input/output system model given the state, input, parameters. Intended for fast evaluation; for a more user-friendly interface you may want to use :meth:`output`. """ # If no output function was defined in subclass, return state return x def output(self, t, x, u): """Compute the output of the system Given time `t`, input `u` and state `x`, returns the output of the system: y = g(t, x, u) The inputs `x` and `u` must be of the correct length. Parameters ---------- t : float the time at which to evaluate x : array_like current state u : array_like input Returns ------- y : ndarray """ return self._out(t, x, u) def set_inputs(self, inputs, prefix='u'): """Set the number/names of the system inputs. Parameters ---------- inputs : int, list of str, or None Description of the system inputs. This can be given as an integer count or as a list of strings that name the individual signals. If an integer count is specified, the names of the signal will be of the form `u[i]` (where the prefix `u` can be changed using the optional prefix parameter). prefix : string, optional If `inputs` is an integer, create the names of the states using the given prefix (default = 'u'). The names of the input will be of the form `prefix[i]`. """ self.ninputs, self.input_index = \ self._process_signal_list(inputs, prefix=prefix) def set_outputs(self, outputs, prefix='y'): """Set the number/names of the system outputs. Parameters ---------- outputs : int, list of str, or None Description of the system outputs. This can be given as an integer count or as a list of strings that name the individual signals. If an integer count is specified, the names of the signal will be of the form `u[i]` (where the prefix `u` can be changed using the optional prefix parameter). prefix : string, optional If `outputs` is an integer, create the names of the states using the given prefix (default = 'y'). The names of the input will be of the form `prefix[i]`. """ self.noutputs, self.output_index = \ self._process_signal_list(outputs, prefix=prefix) def set_states(self, states, prefix='x'): """Set the number/names of the system states. Parameters ---------- states : int, list of str, or None Description of the system states. This can be given as an integer count or as a list of strings that name the individual signals. If an integer count is specified, the names of the signal will be of the form `u[i]` (where the prefix `u` can be changed using the optional prefix parameter). prefix : string, optional If `states` is an integer, create the names of the states using the given prefix (default = 'x'). The names of the input will be of the form `prefix[i]`. """ self.nstates, self.state_index = \ self._process_signal_list(states, prefix=prefix) def find_input(self, name): """Find the index for an input given its name (`None` if not found)""" return self.input_index.get(name, None) def find_output(self, name): """Find the index for an output given its name (`None` if not found)""" return self.output_index.get(name, None) def find_state(self, name): """Find the index for a state given its name (`None` if not found)""" return self.state_index.get(name, None) def issiso(self): """Check to see if a system is single input, single output""" return self.ninputs == 1 and self.noutputs == 1 def feedback(self, other=1, sign=-1, params={}): """Feedback interconnection between two input/output systems Parameters ---------- sys1: InputOutputSystem The primary process. sys2: InputOutputSystem The feedback process (often a feedback controller). sign: scalar, optional The sign of feedback. `sign` = -1 indicates negative feedback, and `sign` = 1 indicates positive feedback. `sign` is an optional argument; it assumes a value of -1 if not specified. Returns ------- out: InputOutputSystem Raises ------ ValueError if the inputs, outputs, or timebases of the systems are incompatible. """ # TODO: add conversion to I/O system when needed if not isinstance(other, InputOutputSystem): # Try converting to a state space system try: other = _convert_to_statespace(other) except TypeError: raise TypeError( "Feedback around I/O system must be an I/O system " "or convertable to an I/O system.") other = LinearIOSystem(other) # Make sure systems can be interconnected if self.noutputs != other.ninputs or other.noutputs != self.ninputs: raise ValueError("Can't connect systems with incompatible " "inputs and outputs") # Make sure timebases are compatible dt = common_timebase(self.dt, other.dt) inplist = [(0, i) for i in range(self.ninputs)] outlist = [(0, i) for i in range(self.noutputs)] # Return the series interconnection between the systems newsys = InterconnectedSystem( (self, other), inplist=inplist, outlist=outlist, params=params, dt=dt) # Set up the connecton map manually newsys.set_connect_map(np.block( [[np.zeros((self.ninputs, self.noutputs)), sign * np.eye(self.ninputs, other.noutputs)], [np.eye(other.ninputs, self.noutputs), np.zeros((other.ninputs, other.noutputs))]] )) if isinstance(self, StateSpace) and isinstance(other, StateSpace): # Special case: maintain linear systems structure ss_sys = StateSpace.feedback(self, other, sign=sign) return LinearICSystem(newsys, ss_sys) # Return the newly created system return newsys def linearize(self, x0, u0, t=0, params={}, eps=1e-6, name=None, copy=False, **kwargs): """Linearize an input/output system at a given state and input. Return the linearization of an input/output system at a given state and input value as a StateSpace system. See :func:`~control.linearize` for complete documentation. """ # # If the linearization is not defined by the subclass, perform a # numerical linearization use the `_rhs()` and `_out()` member # functions. # # Figure out dimensions if they were not specified. nstates = _find_size(self.nstates, x0) ninputs = _find_size(self.ninputs, u0) # Convert x0, u0 to arrays, if needed if np.isscalar(x0): x0 = np.ones((nstates,)) * x0 if np.isscalar(u0): u0 = np.ones((ninputs,)) * u0 # Compute number of outputs by evaluating the output function noutputs = _find_size(self.noutputs, self._out(t, x0, u0)) # Update the current parameters self._update_params(params) # Compute the nominal value of the update law and output F0 = self._rhs(t, x0, u0) H0 = self._out(t, x0, u0) # Create empty matrices that we can fill up with linearizations A = np.zeros((nstates, nstates)) # Dynamics matrix B = np.zeros((nstates, ninputs)) # Input matrix C = np.zeros((noutputs, nstates)) # Output matrix D = np.zeros((noutputs, ninputs)) # Direct term # Perturb each of the state variables and compute linearization for i in range(nstates): dx = np.zeros((nstates,)) dx[i] = eps A[:, i] = (self._rhs(t, x0 + dx, u0) - F0) / eps C[:, i] = (self._out(t, x0 + dx, u0) - H0) / eps # Perturb each of the input variables and compute linearization for i in range(ninputs): du = np.zeros((ninputs,)) du[i] = eps B[:, i] = (self._rhs(t, x0, u0 + du) - F0) / eps D[:, i] = (self._out(t, x0, u0 + du) - H0) / eps # Create the state space system linsys = LinearIOSystem( StateSpace(A, B, C, D, self.dt, remove_useless=False), name=name, **kwargs) # Set the names the system, inputs, outputs, and states if copy: if name is None: linsys.name = \ config.defaults['iosys.linearized_system_name_prefix'] + \ self.name + \ config.defaults['iosys.linearized_system_name_suffix'] linsys.ninputs, linsys.input_index = self.ninputs, \ self.input_index.copy() linsys.noutputs, linsys.output_index = \ self.noutputs, self.output_index.copy() linsys.nstates, linsys.state_index = \ self.nstates, self.state_index.copy() return linsys def copy(self, newname=None): """Make a copy of an input/output system.""" dup_prefix = config.defaults['iosys.duplicate_system_name_prefix'] dup_suffix = config.defaults['iosys.duplicate_system_name_suffix'] newsys = copy.copy(self) newsys.name = self._name_or_default( dup_prefix + self.name + dup_suffix if not newname else newname) return newsys class LinearIOSystem(InputOutputSystem, StateSpace): """Input/output representation of a linear (state space) system. This class is used to implementat a system that is a linear state space system (defined by the StateSpace system object). Parameters ---------- linsys : StateSpace LTI StateSpace system to be converted inputs : int, list of str or None, optional Description of the system inputs. This can be given as an integer count or as a list of strings that name the individual signals. If an integer count is specified, the names of the signal will be of the form `s[i]` (where `s` is one of `u`, `y`, or `x`). If this parameter is not given or given as `None`, the relevant quantity will be determined when possible based on other information provided to functions using the system. outputs : int, list of str or None, optional Description of the system outputs. Same format as `inputs`. states : int, list of str, or None, optional Description of the system states. Same format as `inputs`. dt : None, True or float, optional System timebase. 0 (default) indicates continuous time, True indicates discrete time with unspecified sampling time, positive number is discrete time with specified sampling time, None indicates unspecified timebase (either continuous or discrete time). params : dict, optional Parameter values for the systems. Passed to the evaluation functions for the system as default values, overriding internal defaults. name : string, optional System name (used for specifying signals). If unspecified, a generic name <sys[id]> is generated with a unique integer id. Attributes ---------- ninputs, noutputs, nstates, dt, etc See :class:`InputOutputSystem` for inherited attributes. A, B, C, D See :class:`~control.StateSpace` for inherited attributes. """ def __init__(self, linsys, inputs=None, outputs=None, states=None, name=None, **kwargs): """Create an I/O system from a state space linear system. Converts a :class:`~control.StateSpace` system into an :class:`~control.InputOutputSystem` with the same inputs, outputs, and states. The new system can be a continuous or discrete time system. """ if not isinstance(linsys, StateSpace): raise TypeError("Linear I/O system must be a state space object") # Look for 'input' and 'output' parameter name variants inputs = _parse_signal_parameter(inputs, 'input', kwargs) outputs = _parse_signal_parameter(outputs, 'output', kwargs, end=True) # Create the I/O system object super(LinearIOSystem, self).__init__( inputs=linsys.ninputs, outputs=linsys.noutputs, states=linsys.nstates, params={}, dt=linsys.dt, name=name) # Initalize additional state space variables StateSpace.__init__(self, linsys, remove_useless=False) # Process input, output, state lists, if given # Make sure they match the size of the linear system ninputs, self.input_index = self._process_signal_list( inputs if inputs is not None else linsys.ninputs, prefix='u') if ninputs is not None and linsys.ninputs != ninputs: raise ValueError("Wrong number/type of inputs given.") noutputs, self.output_index = self._process_signal_list( outputs if outputs is not None else linsys.noutputs, prefix='y') if noutputs is not None and linsys.noutputs != noutputs: raise ValueError("Wrong number/type of outputs given.") nstates, self.state_index = self._process_signal_list( states if states is not None else linsys.nstates, prefix='x') if nstates is not None and linsys.nstates != nstates: raise ValueError("Wrong number/type of states given.") # The following text needs to be replicated from StateSpace in order for # this entry to show up properly in sphinx doccumentation (not sure why, # but it was the only way to get it to work). # #: Deprecated attribute; use :attr:`nstates` instead. #: #: The ``state`` attribute was used to store the number of states for : a #: state space system. It is no longer used. If you need to access the #: number of states, use :attr:`nstates`. states = property(StateSpace._get_states, StateSpace._set_states) def _update_params(self, params={}, warning=True): # Parameters not supported; issue a warning if params and warning: warn("Parameters passed to LinearIOSystems are ignored.") def _rhs(self, t, x, u): # Convert input to column vector and then change output to 1D array xdot = np.dot(self.A, np.reshape(x, (-1, 1))) \ + np.dot(self.B, np.reshape(u, (-1, 1))) return np.array(xdot).reshape((-1,)) def _out(self, t, x, u): # Convert input to column vector and then change output to 1D array y = np.dot(self.C, np.reshape(x, (-1, 1))) \ + np.dot(self.D, np.reshape(u, (-1, 1))) return np.array(y).reshape((-1,)) class NonlinearIOSystem(InputOutputSystem): """Nonlinear I/O system. Creates an :class:`~control.InputOutputSystem` for a nonlinear system by specifying a state update function and an output function. The new system can be a continuous or discrete time system (Note: discrete-time systems are not yet supported by most functions.) Parameters ---------- updfcn : callable Function returning the state update function `updfcn(t, x, u, params) -> array` where `x` is a 1-D array with shape (nstates,), `u` is a 1-D array with shape (ninputs,), `t` is a float representing the currrent time, and `params` is a dict containing the values of parameters used by the function. outfcn : callable Function returning the output at the given state `outfcn(t, x, u, params) -> array` where the arguments are the same as for `upfcn`. inputs : int, list of str or None, optional Description of the system inputs. This can be given as an integer count or as a list of strings that name the individual signals. If an integer count is specified, the names of the signal will be of the form `s[i]` (where `s` is one of `u`, `y`, or `x`). If this parameter is not given or given as `None`, the relevant quantity will be determined when possible based on other information provided to functions using the system. outputs : int, list of str or None, optional Description of the system outputs. Same format as `inputs`. states : int, list of str, or None, optional Description of the system states. Same format as `inputs`. params : dict, optional Parameter values for the systems. Passed to the evaluation functions for the system as default values, overriding internal defaults. dt : timebase, optional The timebase for the system, used to specify whether the system is operating in continuous or discrete time. It can have the following values: * dt = 0: continuous time system (default) * dt > 0: discrete time system with sampling period 'dt' * dt = True: discrete time with unspecified sampling period * dt = None: no timebase specified name : string, optional System name (used for specifying signals). If unspecified, a generic name <sys[id]> is generated with a unique integer id. """ def __init__(self, updfcn, outfcn=None, inputs=None, outputs=None, states=None, params={}, name=None, **kwargs): """Create a nonlinear I/O system given update and output functions.""" # Look for 'input' and 'output' parameter name variants inputs = _parse_signal_parameter(inputs, 'input', kwargs) outputs = _parse_signal_parameter(outputs, 'output', kwargs) # Store the update and output functions self.updfcn = updfcn self.outfcn = outfcn # Initialize the rest of the structure dt = kwargs.pop('dt', config.defaults['control.default_dt']) super(NonlinearIOSystem, self).__init__( inputs=inputs, outputs=outputs, states=states, params=params, dt=dt, name=name ) # Make sure all input arguments got parsed if kwargs: raise TypeError("unknown parameters %s" % kwargs) # Check to make sure arguments are consistent if updfcn is None: if self.nstates is None: self.nstates = 0 else: raise ValueError("States specified but no update function " "given.") if outfcn is None: # No output function specified => outputs = states if self.noutputs is None and self.nstates is not None: self.noutputs = self.nstates elif self.noutputs is not None and self.noutputs == self.nstates: # Number of outputs = number of states => all is OK pass elif self.noutputs is not None and self.noutputs != 0: raise ValueError("Outputs specified but no output function " "(and nstates not known).") # Initialize current parameters to default parameters self._current_params = params.copy() # Return the value of a static nonlinear system def __call__(sys, u, params=None, squeeze=None): """Evaluate a (static) nonlinearity at a given input value If a nonlinear I/O system has not internal state, then evaluating the system at an input `u` gives the output `y = F(u)`, determined by the output function. Parameters ---------- params : dict, optional Parameter values for the system. Passed to the evaluation function for the system as default values, overriding internal defaults. squeeze : bool, optional If True and if the system has a single output, return the system output as a 1D array rather than a 2D array. If False, return the system output as a 2D array even if the system is SISO. Default value set by config.defaults['control.squeeze_time_response']. """ # Make sure the call makes sense if not sys._isstatic(): raise TypeError( "function evaluation is only supported for static " "input/output systems") # If we received any parameters, update them before calling _out() if params is not None: sys._update_params(params) # Evaluate the function on the argument out = sys._out(0, np.array((0,)), np.asarray(u)) _, out = _process_time_response(sys, None, out, None, squeeze=squeeze) return out def _update_params(self, params, warning=False): # Update the current parameter values self._current_params = self.params.copy() self._current_params.update(params) def _rhs(self, t, x, u): xdot = self.updfcn(t, x, u, self._current_params) \ if self.updfcn is not None else [] return np.array(xdot).reshape((-1,)) def _out(self, t, x, u): y = self.outfcn(t, x, u, self._current_params) \ if self.outfcn is not None else x return np.array(y).reshape((-1,)) class InterconnectedSystem(InputOutputSystem): """Interconnection of a set of input/output systems. This class is used to implement a system that is an interconnection of input/output systems. The sys consists of a collection of subsystems whose inputs and outputs are connected via a connection map. The overall system inputs and outputs are subsets of the subsystem inputs and outputs. See :func:`~control.interconnect` for a list of parameters. """ def __init__(self, syslist, connections=[], inplist=[], outlist=[], inputs=None, outputs=None, states=None, params={}, dt=None, name=None, **kwargs): """Create an I/O system from a list of systems + connection info.""" # Look for 'input' and 'output' parameter name variants inputs = _parse_signal_parameter(inputs, 'input', kwargs) outputs = _parse_signal_parameter(outputs, 'output', kwargs, end=True) # Convert input and output names to lists if they aren't already if not isinstance(inplist, (list, tuple)): inplist = [inplist] if not isinstance(outlist, (list, tuple)): outlist = [outlist] # Check to make sure all systems are consistent self.syslist = syslist self.syslist_index = {} nstates = 0 self.state_offset = [] ninputs = 0 self.input_offset = [] noutputs = 0 self.output_offset = [] sysobj_name_dct = {} sysname_count_dct = {} for sysidx, sys in enumerate(syslist): # Make sure time bases are consistent dt = common_timebase(dt, sys.dt) # Make sure number of inputs, outputs, states is given if sys.ninputs is None or sys.noutputs is None or \ sys.nstates is None: raise TypeError("System '%s' must define number of inputs, " "outputs, states in order to be connected" % sys.name) # Keep track of the offsets into the states, inputs, outputs self.input_offset.append(ninputs) self.output_offset.append(noutputs) self.state_offset.append(nstates) # Keep track of the total number of states, inputs, outputs nstates += sys.nstates ninputs += sys.ninputs noutputs += sys.noutputs # Check for duplicate systems or duplicate names # Duplicates are renamed sysname_1, sysname_2, etc. if sys in sysobj_name_dct: sys = sys.copy() warn("Duplicate object found in system list: %s. " "Making a copy" % str(sys.name)) if sys.name is not None and sys.name in sysname_count_dct: count = sysname_count_dct[sys.name] sysname_count_dct[sys.name] += 1 sysname = sys.name + "_" + str(count) sysobj_name_dct[sys] = sysname self.syslist_index[sysname] = sysidx warn("Duplicate name found in system list. " "Renamed to {}".format(sysname)) else: sysname_count_dct[sys.name] = 1 sysobj_name_dct[sys] = sys.name self.syslist_index[sys.name] = sysidx if states is None: states = [] state_name_delim = config.defaults['iosys.state_name_delim'] for sys, sysname in sysobj_name_dct.items(): states += [sysname + state_name_delim + statename for statename in sys.state_index.keys()] # Create the I/O system super(InterconnectedSystem, self).__init__( inputs=len(inplist), outputs=len(outlist), states=states, params=params, dt=dt, name=name) # If input or output list was specified, update it if inputs is not None: nsignals, self.input_index = \ self._process_signal_list(inputs, prefix='u') if nsignals is not None and len(inplist) != nsignals: raise ValueError("Wrong number/type of inputs given.") if outputs is not None: nsignals, self.output_index = \ self._process_signal_list(outputs, prefix='y') if nsignals is not None and len(outlist) != nsignals: raise ValueError("Wrong number/type of outputs given.") # Convert the list of interconnections to a connection map (matrix) self.connect_map = np.zeros((ninputs, noutputs)) for connection in connections: input_index = self._parse_input_spec(connection[0]) for output_spec in connection[1:]: output_index, gain = self._parse_output_spec(output_spec) if self.connect_map[input_index, output_index] != 0: warn("multiple connections given for input %d" % input_index + ". Combining with previous entries.") self.connect_map[input_index, output_index] += gain # Convert the input list to a matrix: maps system to subsystems self.input_map = np.zeros((ninputs, self.ninputs)) for index, inpspec in enumerate(inplist): if isinstance(inpspec, (int, str, tuple)): inpspec = [inpspec] if not isinstance(inpspec, list): raise ValueError("specifications in inplist must be of type " "int, str, tuple or list.") for spec in inpspec: ulist_index = self._parse_input_spec(spec) if self.input_map[ulist_index, index] != 0: warn("multiple connections given for input %d" % index + ". Combining with previous entries.") self.input_map[ulist_index, index] += 1 # Convert the output list to a matrix: maps subsystems to system self.output_map = np.zeros((self.noutputs, noutputs + ninputs)) for index, outspec in enumerate(outlist): if isinstance(outspec, (int, str, tuple)): outspec = [outspec] if not isinstance(outspec, list): raise ValueError("specifications in outlist must be of type " "int, str, tuple or list.") for spec in outspec: ylist_index, gain = self._parse_output_spec(spec) if self.output_map[index, ylist_index] != 0: warn("multiple connections given for output %d" % index + ". Combining with previous entries.") self.output_map[index, ylist_index] += gain # Save the parameters for the system self.params = params.copy() def _update_params(self, params, warning=False): for sys in self.syslist: local = sys.params.copy() # start with system parameters local.update(self.params) # update with global params local.update(params) # update with locally passed parameters sys._update_params(local, warning=warning) def _rhs(self, t, x, u): # Make sure state and input are vectors x = np.array(x, ndmin=1) u = np.array(u, ndmin=1) # Compute the input and output vectors ulist, ylist = self._compute_static_io(t, x, u) # Go through each system and update the right hand side for that system xdot = np.zeros((self.nstates,)) # Array to hold results state_index, input_index = 0, 0 # Start at the beginning for sys in self.syslist: # Update the right hand side for this subsystem if sys.nstates != 0: xdot[state_index:state_index + sys.nstates] = sys._rhs( t, x[state_index:state_index + sys.nstates], ulist[input_index:input_index + sys.ninputs]) # Update the state and input index counters state_index += sys.nstates input_index += sys.ninputs return xdot def _out(self, t, x, u): # Make sure state and input are vectors x = np.array(x, ndmin=1) u = np.array(u, ndmin=1) # Compute the input and output vectors ulist, ylist = self._compute_static_io(t, x, u) # Make the full set of subsystem outputs to system output return np.dot(self.output_map, ylist) def _compute_static_io(self, t, x, u): # Figure out the total number of inputs and outputs (ninputs, noutputs) = self.connect_map.shape # # Get the outputs and inputs at the current system state # # Initialize the lists used to keep track of internal signals ulist = np.dot(self.input_map, u) ylist = np.zeros((noutputs + ninputs,)) # To allow for feedthrough terms, iterate multiple times to allow # feedthrough elements to propagate. For n systems, we could need to # cycle through n+1 times before reaching steady state # TODO (later): see if there is a more efficient way to compute cycle_count = len(self.syslist) + 1 while cycle_count > 0: state_index, input_index, output_index = 0, 0, 0 for sys in self.syslist: # Compute outputs for each system from current state ysys = sys._out( t, x[state_index:state_index + sys.nstates], ulist[input_index:input_index + sys.ninputs]) # Store the outputs at the start of ylist ylist[output_index:output_index + sys.noutputs] = \ ysys.reshape((-1,)) # Store the input in the second part of ylist ylist[noutputs + input_index: noutputs + input_index + sys.ninputs] = \ ulist[input_index:input_index + sys.ninputs] # Increment the index pointers state_index += sys.nstates input_index += sys.ninputs output_index += sys.noutputs # Compute inputs based on connection map new_ulist = np.dot(self.connect_map, ylist[:noutputs]) \ + np.dot(self.input_map, u) # Check to see if any of the inputs changed if (ulist == new_ulist).all(): break else: ulist = new_ulist # Decrease the cycle counter cycle_count -= 1 # Make sure that we stopped before detecting an algebraic loop if cycle_count == 0: raise RuntimeError("Algebraic loop detected.") return ulist, ylist def _parse_input_spec(self, spec): """Parse an input specification and returns the index This function parses a specification of an input of an interconnected system component and returns the index of that input in the internal input vector. Input specifications are of one of the following forms: i first input for the ith system (i,) first input for the ith system (i, j) jth input for the ith system 'sys.sig' signal 'sig' in subsys 'sys' ('sys', 'sig') signal 'sig' in subsys 'sys' The function returns an index into the input vector array and the gain to use for that input. """ # Parse the signal that we received subsys_index, input_index, gain = self._parse_signal(spec, 'input') if gain != 1: raise ValueError("gain not allowed in spec '%s'." % str(spec)) # Return the index into the input vector list (ylist) return self.input_offset[subsys_index] + input_index def _parse_output_spec(self, spec): """Parse an output specification and returns the index and gain This function parses a specification of an output of an interconnected system component and returns the index of that output in the internal output vector (ylist). Output specifications are of one of the following forms: i first output for the ith system (i,) first output for the ith system (i, j) jth output for the ith system (i, j, gain) jth output for the ith system with gain 'sys.sig' signal 'sig' in subsys 'sys' '-sys.sig' signal 'sig' in subsys 'sys' with gain -1 ('sys', 'sig', gain) signal 'sig' in subsys 'sys' with gain If the gain is not specified, it is taken to be 1. Numbered outputs must be chosen from the list of subsystem outputs, but named outputs can also be contained in the list of subsystem inputs. The function returns an index into the output vector array and the gain to use for that output. """ # Parse the rest of the spec with standard signal parsing routine try: # Start by looking in the set of subsystem outputs subsys_index, output_index, gain = \ self._parse_signal(spec, 'output') # Return the index into the input vector list (ylist) return self.output_offset[subsys_index] + output_index, gain except ValueError: # Try looking in the set of subsystem *inputs* subsys_index, input_index, gain = self._parse_signal( spec, 'input or output', dictname='input_index') # Return the index into the input vector list (ylist) noutputs = sum(sys.noutputs for sys in self.syslist) return noutputs + \ self.input_offset[subsys_index] + input_index, gain def _parse_signal(self, spec, signame='input', dictname=None): """Parse a signal specification, returning system and signal index. Signal specifications are of one of the following forms: i system_index = i, signal_index = 0 (i,) system_index = i, signal_index = 0 (i, j) system_index = i, signal_index = j 'sys.sig' signal 'sig' in subsys 'sys' ('sys', 'sig') signal 'sig' in subsys 'sys' ('sys', j) signal_index j in subsys 'sys' The function returns an index into the input vector array and the gain to use for that input. """ import re gain = 1 # Default gain # Check for special forms of the input if isinstance(spec, tuple) and len(spec) == 3: gain = spec[2] spec = spec[:2] elif isinstance(spec, str) and spec[0] == '-': gain = -1 spec = spec[1:] # Process cases where we are given indices as integers if isinstance(spec, int): return spec, 0, gain elif isinstance(spec, tuple) and len(spec) == 1 \ and isinstance(spec[0], int): return spec[0], 0, gain elif isinstance(spec, tuple) and len(spec) == 2 \ and all([isinstance(index, int) for index in spec]): return spec + (gain,) # Figure out the name of the dictionary to use if dictname is None: dictname = signame + '_index' if isinstance(spec, str): # If we got a dotted string, break up into pieces namelist = re.split(r'\.', spec) # For now, only allow signal level of system name # TODO: expand to allow nested signal names if len(namelist) != 2: raise ValueError("Couldn't parse %s signal reference '%s'." % (signame, spec)) system_index = self._find_system(namelist[0]) if system_index is None: raise ValueError("Couldn't find system '%s'." % namelist[0]) signal_index = self.syslist[system_index]._find_signal( namelist[1], getattr(self.syslist[system_index], dictname)) if signal_index is None: raise ValueError("Couldn't find %s signal '%s.%s'." % (signame, namelist[0], namelist[1])) return system_index, signal_index, gain # Handle the ('sys', 'sig'), (i, j), and mixed cases elif isinstance(spec, tuple) and len(spec) == 2 and \ isinstance(spec[0], (str, int)) and \ isinstance(spec[1], (str, int)): if isinstance(spec[0], int): system_index = spec[0] if system_index < 0 or system_index > len(self.syslist): system_index = None else: system_index = self._find_system(spec[0]) if system_index is None: raise ValueError("Couldn't find system '%s'." % spec[0]) if isinstance(spec[1], int): signal_index = spec[1] # TODO (later): check against max length of appropriate list? if signal_index < 0: system_index = None else: signal_index = self.syslist[system_index]._find_signal( spec[1], getattr(self.syslist[system_index], dictname)) if signal_index is None: raise ValueError("Couldn't find signal %s.%s." % tuple(spec)) return system_index, signal_index, gain else: raise ValueError("Couldn't parse signal reference %s." % str(spec)) def _find_system(self, name): return self.syslist_index.get(name, None) def set_connect_map(self, connect_map): """Set the connection map for an interconnected I/O system. Parameters ---------- connect_map : 2D array Specify the matrix that will be used to multiply the vector of subsystem outputs to obtain the vector of subsystem inputs. """ # Make sure the connection map is the right size if connect_map.shape != self.connect_map.shape: ValueError("Connection map is not the right shape") self.connect_map = connect_map def set_input_map(self, input_map): """Set the input map for an interconnected I/O system. Parameters ---------- input_map : 2D array Specify the matrix that will be used to multiply the vector of system inputs to obtain the vector of subsystem inputs. These values are added to the inputs specified in the connection map. """ # Figure out the number of internal inputs ninputs = sum(sys.ninputs for sys in self.syslist) # Make sure the input map is the right size if input_map.shape[0] != ninputs: ValueError("Input map is not the right shape") self.input_map = input_map self.ninputs = input_map.shape[1] def set_output_map(self, output_map): """Set the output map for an interconnected I/O system. Parameters ---------- output_map : 2D array Specify the matrix that will be used to multiply the vector of subsystem outputs to obtain the vector of system outputs. """ # Figure out the number of internal inputs and outputs ninputs = sum(sys.ninputs for sys in self.syslist) noutputs = sum(sys.noutputs for sys in self.syslist) # Make sure the output map is the right size if output_map.shape[1] == noutputs: # For backward compatibility, add zeros to the end of the array output_map = np.concatenate( (output_map, np.zeros((output_map.shape[0], ninputs))), axis=1) if output_map.shape[1] != noutputs + ninputs: ValueError("Output map is not the right shape") self.output_map = output_map self.noutputs = output_map.shape[0] class LinearICSystem(InterconnectedSystem, LinearIOSystem): """Interconnection of a set of linear input/output systems. This class is used to implement a system that is an interconnection of linear input/output systems. It has all of the structure of an :class:`~control.InterconnectedSystem`, but also maintains the requirement elements of :class:`~control.LinearIOSystem`, including the :class:`StateSpace` class structure, allowing it to be passed to functions that expect a :class:`StateSpace` system. This class is usually generated using :func:`~control.interconnect` and not called directly """ def __init__(self, io_sys, ss_sys=None): if not isinstance(io_sys, InterconnectedSystem): raise TypeError("First argument must be an interconnected system.") # Create the I/O system object InputOutputSystem.__init__( self, name=io_sys.name, params=io_sys.params) # Copy over the I/O systems attributes self.syslist = io_sys.syslist self.ninputs = io_sys.ninputs self.noutputs = io_sys.noutputs self.nstates = io_sys.nstates self.input_index = io_sys.input_index self.output_index = io_sys.output_index self.state_index = io_sys.state_index self.dt = io_sys.dt # Copy over the attributes from the interconnected system self.syslist_index = io_sys.syslist_index self.state_offset = io_sys.state_offset self.input_offset = io_sys.input_offset self.output_offset = io_sys.output_offset self.connect_map = io_sys.connect_map self.input_map = io_sys.input_map self.output_map = io_sys.output_map self.params = io_sys.params # If we didnt' get a state space system, linearize the full system # TODO: this could be replaced with a direct computation (someday) if ss_sys is None: ss_sys = self.linearize(0, 0) # Initialize the state space attributes if isinstance(ss_sys, StateSpace): # Make sure the dimension match if io_sys.ninputs != ss_sys.ninputs or \ io_sys.noutputs != ss_sys.noutputs or \ io_sys.nstates != ss_sys.nstates: raise ValueError("System dimensions for first and second " "arguments must match.") StateSpace.__init__(self, ss_sys, remove_useless=False) else: raise TypeError("Second argument must be a state space system.") # The following text needs to be replicated from StateSpace in order for # this entry to show up properly in sphinx doccumentation (not sure why, # but it was the only way to get it to work). # #: Deprecated attribute; use :attr:`nstates` instead. #: #: The ``state`` attribute was used to store the number of states for : a #: state space system. It is no longer used. If you need to access the #: number of states, use :attr:`nstates`. states = property(StateSpace._get_states, StateSpace._set_states) def input_output_response( sys, T, U=0., X0=0, params={}, transpose=False, return_x=False, squeeze=None, solve_ivp_kwargs={}, **kwargs): """Compute the output response of a system to a given input. Simulate a dynamical system with a given input and return its output and state values. Parameters ---------- sys : InputOutputSystem Input/output system to simulate. T : array-like Time steps at which the input is defined; values must be evenly spaced. U : array-like or number, optional Input array giving input at each time `T` (default = 0). X0 : array-like or number, optional Initial condition (default = 0). return_x : bool, optional If True, return the values of the state at each time (default = False). squeeze : bool, optional If True and if the system has a single output, return the system output as a 1D array rather than a 2D array. If False, return the system output as a 2D array even if the system is SISO. Default value set by config.defaults['control.squeeze_time_response']. Returns ------- T : array Time values of the output. yout : array Response of the system. If the system is SISO and squeeze is not True, the array is 1D (indexed by time). If the system is not SISO or squeeze is False, the array is 2D (indexed by the output number and time). xout : array Time evolution of the state vector (if return_x=True). Other parameters ---------------- solve_ivp_method : str, optional Set the method used by :func:`scipy.integrate.solve_ivp`. Defaults to 'RK45'. solve_ivp_kwargs : str, optional Pass additional keywords to :func:`scipy.integrate.solve_ivp`. Raises ------ TypeError If the system is not an input/output system. ValueError If time step does not match sampling time (for discrete time systems). """ # # Process keyword arguments # # Allow method as an alternative to solve_ivp_method if kwargs.get('method', None): solve_ivp_kwargs['method'] = kwargs.pop('method') # Figure out the method to be used if kwargs.get('solve_ivp_method', None): if kwargs.get('method', None): raise ValueError("ivp_method specified more than once") solve_ivp_kwargs['method'] = kwargs['solve_ivp_method'] # Set the default method to 'RK45' if solve_ivp_kwargs.get('method', None) is None: solve_ivp_kwargs['method'] = 'RK45' # Sanity checking on the input if not isinstance(sys, InputOutputSystem): raise TypeError("System of type ", type(sys), " not valid") # Compute the time interval and number of steps T0, Tf = T[0], T[-1] n_steps = len(T) # Check and convert the input, if needed # TODO: improve MIMO ninputs check (choose from U) if sys.ninputs is None or sys.ninputs == 1: legal_shapes = [(n_steps,), (1, n_steps)] else: legal_shapes = [(sys.ninputs, n_steps)] U = _check_convert_array(U, legal_shapes, 'Parameter ``U``: ', squeeze=False) # Check to make sure this is not a static function nstates = _find_size(sys.nstates, X0) if nstates == 0: # No states => map input to output u = U[0] if len(U.shape) == 1 else U[:, 0] y = np.zeros((np.shape(sys._out(T[0], X0, u))[0], len(T))) for i in range(len(T)): u = U[i] if len(U.shape) == 1 else U[:, i] y[:, i] = sys._out(T[i], [], u) return _process_time_response( sys, T, y, np.array((0, 0, np.asarray(T).size)), transpose=transpose, return_x=return_x, squeeze=squeeze) # create X0 if not given, test if X0 has correct shape X0 = _check_convert_array(X0, [(nstates,), (nstates, 1)], 'Parameter ``X0``: ', squeeze=True) # Update the parameter values sys._update_params(params) # # Define a function to evaluate the input at an arbitrary time # # This is equivalent to the function # # ufun = sp.interpolate.interp1d(T, U, fill_value='extrapolate') # # but has a lot less overhead => simulation runs much faster def ufun(t): # Find the value of the index using linear interpolation idx = np.searchsorted(T, t, side='left') if idx == 0: # For consistency in return type, multiple by a float return U[..., 0] * 1. else: dt = (t - T[idx-1]) / (T[idx] - T[idx-1]) return U[..., idx-1] * (1. - dt) + U[..., idx] * dt # Create a lambda function for the right hand side def ivp_rhs(t, x): return sys._rhs(t, x, ufun(t)) # Perform the simulation if isctime(sys): if not hasattr(sp.integrate, 'solve_ivp'): raise NameError("scipy.integrate.solve_ivp not found; " "use SciPy 1.0 or greater") soln = sp.integrate.solve_ivp( ivp_rhs, (T0, Tf), X0, t_eval=T, vectorized=False, **solve_ivp_kwargs) # Compute the output associated with the state (and use sys.out to # figure out the number of outputs just in case it wasn't specified) u = U[0] if len(U.shape) == 1 else U[:, 0] y = np.zeros((np.shape(sys._out(T[0], X0, u))[0], len(T))) for i in range(len(T)): u = U[i] if len(U.shape) == 1 else U[:, i] y[:, i] = sys._out(T[i], soln.y[:, i], u) elif isdtime(sys): # Make sure the time vector is uniformly spaced dt = T[1] - T[0] if not np.allclose(T[1:] - T[:-1], dt): raise ValueError("Parameter ``T``: time values must be " "equally spaced.") # Make sure the sample time matches the given time if (sys.dt is not True): # Make sure that the time increment is a multiple of sampling time # TODO: add back functionality for undersampling # TODO: this test is brittle if dt = sys.dt # First make sure that time increment is bigger than sampling time # if dt < sys.dt: # raise ValueError("Time steps ``T`` must match sampling time") # Check to make sure sampling time matches time increments if not np.isclose(dt, sys.dt): raise ValueError("Time steps ``T`` must be equal to " "sampling time") # Compute the solution soln = sp.optimize.OptimizeResult() soln.t = T # Store the time vector directly x = [float(x0) for x0 in X0] # State vector (store as floats) soln.y = [] # Solution, following scipy convention y = [] # System output for i in range(len(T)): # Store the current state and output soln.y.append(x) y.append(sys._out(T[i], x, ufun(T[i]))) # Update the state for the next iteration x = sys._rhs(T[i], x, ufun(T[i])) # Convert output to numpy arrays soln.y = np.transpose(np.array(soln.y)) y = np.transpose(np.array(y)) # Mark solution as successful soln.success = True # No way to fail else: # Neither ctime or dtime?? raise TypeError("Can't determine system type") return _process_time_response(sys, soln.t, y, soln.y, transpose=transpose, return_x=return_x, squeeze=squeeze) def find_eqpt(sys, x0, u0=[], y0=None, t=0, params={}, iu=None, iy=None, ix=None, idx=None, dx0=None, return_y=False, return_result=False, **kw): """Find the equilibrium point for an input/output system. Returns the value of an equlibrium point given the initial state and either input value or desired output value for the equilibrium point. Parameters ---------- x0 : list of initial state values Initial guess for the value of the state near the equilibrium point. u0 : list of input values, optional If `y0` is not specified, sets the equilibrium value of the input. If `y0` is given, provides an initial guess for the value of the input. Can be omitted if the system does not have any inputs. y0 : list of output values, optional If specified, sets the desired values of the outputs at the equilibrium point. t : float, optional Evaluation time, for time-varying systems params : dict, optional Parameter values for the system. Passed to the evaluation functions for the system as default values, overriding internal defaults. iu : list of input indices, optional If specified, only the inputs with the given indices will be fixed at the specified values in solving for an equilibrium point. All other inputs will be varied. Input indices can be listed in any order. iy : list of output indices, optional If specified, only the outputs with the given indices will be fixed at the specified values in solving for an equilibrium point. All other outputs will be varied. Output indices can be listed in any order. ix : list of state indices, optional If specified, states with the given indices will be fixed at the specified values in solving for an equilibrium point. All other states will be varied. State indices can be listed in any order. dx0 : list of update values, optional If specified, the value of update map must match the listed value instead of the default value of 0. idx : list of state indices, optional If specified, state updates with the given indices will have their update maps fixed at the values given in `dx0`. All other update values will be ignored in solving for an equilibrium point. State indices can be listed in any order. By default, all updates will be fixed at `dx0` in searching for an equilibrium point. return_y : bool, optional If True, return the value of output at the equilibrium point. return_result : bool, optional If True, return the `result` option from the :func:`scipy.optimize.root` function used to compute the equilibrium point. Returns ------- xeq : array of states Value of the states at the equilibrium point, or `None` if no equilibrium point was found and `return_result` was False. ueq : array of input values Value of the inputs at the equilibrium point, or `None` if no equilibrium point was found and `return_result` was False. yeq : array of output values, optional If `return_y` is True, returns the value of the outputs at the equilibrium point, or `None` if no equilibrium point was found and `return_result` was False. result : :class:`scipy.optimize.OptimizeResult`, optional If `return_result` is True, returns the `result` from the :func:`scipy.optimize.root` function. """ from scipy.optimize import root # Figure out the number of states, inputs, and outputs nstates = _find_size(sys.nstates, x0) ninputs = _find_size(sys.ninputs, u0) noutputs = _find_size(sys.noutputs, y0) # Convert x0, u0, y0 to arrays, if needed if np.isscalar(x0): x0 = np.ones((nstates,)) * x0 if np.isscalar(u0): u0 = np.ones((ninputs,)) * u0 if np.isscalar(y0): y0 = np.ones((ninputs,)) * y0 # Discrete-time not yet supported if isdtime(sys, strict=True): raise NotImplementedError( "Discrete time systems are not yet supported.") # Make sure the input arguments match the sizes of the system if len(x0) != nstates or \ (u0 is not None and len(u0) != ninputs) or \ (y0 is not None and len(y0) != noutputs) or \ (dx0 is not None and len(dx0) != nstates): raise ValueError("Length of input arguments does not match system.") # Update the parameter values sys._update_params(params) # Decide what variables to minimize if all([x is None for x in (iu, iy, ix, idx)]): # Special cases: either inputs or outputs are constrained if y0 is None: # Take u0 as fixed and minimize over x # TODO: update to allow discrete time systems def ode_rhs(z): return sys._rhs(t, z, u0) result = root(ode_rhs, x0, **kw) z = (result.x, u0, sys._out(t, result.x, u0)) else: # Take y0 as fixed and minimize over x and u def rootfun(z): # Split z into x and u x, u = np.split(z, [nstates]) # TODO: update to allow discrete time systems return np.concatenate( (sys._rhs(t, x, u), sys._out(t, x, u) - y0), axis=0) z0 = np.concatenate((x0, u0), axis=0) # Put variables together result = root(rootfun, z0, **kw) # Find the eq point x, u = np.split(result.x, [nstates]) # Split result back in two z = (x, u, sys._out(t, x, u)) else: # General case: figure out what variables to constrain # Verify the indices we are using are all in range if iu is not None: iu = np.unique(iu) if any([not isinstance(x, int) for x in iu]) or \ (len(iu) > 0 and (min(iu) < 0 or max(iu) >= ninputs)): assert ValueError("One or more input indices is invalid") else: iu = [] if iy is not None: iy = np.unique(iy) if any([not isinstance(x, int) for x in iy]) or \ min(iy) < 0 or max(iy) >= noutputs: assert ValueError("One or more output indices is invalid") else: iy = list(range(noutputs)) if ix is not None: ix = np.unique(ix) if any([not isinstance(x, int) for x in ix]) or \ min(ix) < 0 or max(ix) >= nstates: assert ValueError("One or more state indices is invalid") else: ix = [] if idx is not None: idx = np.unique(idx) if any([not isinstance(x, int) for x in idx]) or \ min(idx) < 0 or max(idx) >= nstates: assert ValueError("One or more deriv indices is invalid") else: idx = list(range(nstates)) # Construct the index lists for mapping variables and constraints # # The mechanism by which we implement the root finding function is to # map the subset of variables we are searching over into the inputs # and states, and then return a function that represents the equations # we are trying to solve. # # To do this, we need to carry out the following operations: # # 1. Given the current values of the free variables (z), map them into # the portions of the state and input vectors that are not fixed. # # 2. Compute the update and output maps for the input/output system # and extract the subset of equations that should be equal to zero. # # We perform these functions by computing four sets of index lists: # # * state_vars: indices of states that are allowed to vary # * input_vars: indices of inputs that are allowed to vary # * deriv_vars: indices of derivatives that must be constrained # * output_vars: indices of outputs that must be constrained # # This index lists can all be precomputed based on the `iu`, `iy`, # `ix`, and `idx` lists that were passed as arguments to `find_eqpt` # and were processed above. # Get the states and inputs that were not listed as fixed state_vars = (range(nstates) if not len(ix) else np.delete(np.array(range(nstates)), ix)) input_vars = (range(ninputs) if not len(iu) else np.delete(np.array(range(ninputs)), iu)) # Set the outputs and derivs that will serve as constraints output_vars = np.array(iy) deriv_vars = np.array(idx) # Verify that the number of degrees of freedom all add up correctly num_freedoms = len(state_vars) + len(input_vars) num_constraints = len(output_vars) + len(deriv_vars) if num_constraints != num_freedoms: warn("Number of constraints (%d) does not match number of degrees " "of freedom (%d). Results may be meaningless." % (num_constraints, num_freedoms)) # Make copies of the state and input variables to avoid overwriting # and convert to floats (in case ints were used for initial conditions) x = np.array(x0, dtype=float) u = np.array(u0, dtype=float) dx0 = np.array(dx0, dtype=float) if dx0 is not None \ else np.zeros(x.shape) # Keep track of the number of states in the set of free variables nstate_vars = len(state_vars) dtime = isdtime(sys, strict=True) def rootfun(z): # Map the vector of values into the states and inputs x[state_vars] = z[:nstate_vars] u[input_vars] = z[nstate_vars:] # Compute the update and output maps dx = sys._rhs(t, x, u) - dx0 if dtime: dx -= x # TODO: check dy = sys._out(t, x, u) - y0 # Map the results into the constrained variables return np.concatenate((dx[deriv_vars], dy[output_vars]), axis=0) # Set the initial condition for the root finding algorithm z0 = np.concatenate((x[state_vars], u[input_vars]), axis=0) # Finally, call the root finding function result = root(rootfun, z0, **kw) # Extract out the results and insert into x and u x[state_vars] = result.x[:nstate_vars] u[input_vars] = result.x[nstate_vars:] z = (x, u, sys._out(t, x, u)) # Return the result based on what the user wants and what we found if not return_y: z = z[0:2] # Strip y from result if not desired if return_result: # Return whatever we got, along with the result dictionary return z + (result,) elif result.success: # Return the result of the optimization return z else: # Something went wrong, don't return anything return (None, None, None) if return_y else (None, None) # Linearize an input/output system def linearize(sys, xeq, ueq=[], t=0, params={}, **kw): """Linearize an input/output system at a given state and input. This function computes the linearization of an input/output system at a given state and input value and returns a :class:`~control.StateSpace` object. The eavaluation point need not be an equilibrium point. Parameters ---------- sys : InputOutputSystem The system to be linearized xeq : array The state at which the linearization will be evaluated (does not need to be an equlibrium state). ueq : array The input at which the linearization will be evaluated (does not need to correspond to an equlibrium state). t : float, optional The time at which the linearization will be computed (for time-varying systems). params : dict, optional Parameter values for the systems. Passed to the evaluation functions for the system as default values, overriding internal defaults. copy : bool, Optional If `copy` is True, copy the names of the input signals, output signals, and states to the linearized system. If `name` is not specified, the system name is set to the input system name with the string '_linearized' appended. name : string, optional Set the name of the linearized system. If not specified and if `copy` is `False`, a generic name <sys[id]> is generated with a unique integer id. If `copy` is `True`, the new system name is determined by adding the prefix and suffix strings in config.defaults['iosys.linearized_system_name_prefix'] and config.defaults['iosys.linearized_system_name_suffix'], with the default being to add the suffix '$linearized'. Returns ------- ss_sys : LinearIOSystem The linearization of the system, as a :class:`~control.LinearIOSystem` object (which is also a :class:`~control.StateSpace` object. """ if not isinstance(sys, InputOutputSystem): raise TypeError("Can only linearize InputOutputSystem types") return sys.linearize(xeq, ueq, t=t, params=params, **kw) # Utility function to parse a signal parameter def _parse_signal_parameter(value, name, kwargs, end=False): # Check kwargs for a variant of the parameter name if value is None and name in kwargs: value = kwargs.pop(name) if end and kwargs: raise TypeError("unknown parameters %s" % kwargs) return value def _find_size(sysval, vecval): """Utility function to find the size of a system parameter If both parameters are not None, they must be consistent. """ if hasattr(vecval, '__len__'): if sysval is not None and sysval != len(vecval): raise ValueError("Inconsistend information to determine size " "of system component") return len(vecval) # None or 0, which is a valid value for "a (sysval, ) vector of zeros". if not vecval: return 0 if sysval is None else sysval elif sysval == 1: # (1, scalar) is also a valid combination from legacy code return 1 raise ValueError("Can't determine size of system component.") # Convert a state space system into an input/output system (wrapper) def ss2io(*args, **kwargs): return LinearIOSystem(*args, **kwargs) ss2io.__doc__ = LinearIOSystem.__init__.__doc__ # Convert a transfer function into an input/output system (wrapper) def tf2io(*args, **kwargs): """Convert a transfer function into an I/O system""" # TODO: add remaining documentation # Convert the system to a state space system linsys = tf2ss(*args) # Now convert the state space system to an I/O system return LinearIOSystem(linsys, **kwargs) # Function to create an interconnected system def interconnect(syslist, connections=None, inplist=[], outlist=[], inputs=None, outputs=None, states=None, params={}, dt=None, name=None, **kwargs): """Interconnect a set of input/output systems. This function creates a new system that is an interconnection of a set of input/output systems. If all of the input systems are linear I/O systems (type :class:`~control.LinearIOSystem`) then the resulting system will be a linear interconnected I/O system (type :class:`~control.LinearICSystem`) with the appropriate inputs, outputs, and states. Otherwise, an interconnected I/O system (type :class:`~control.InterconnectedSystem`) will be created. Parameters ---------- syslist : list of InputOutputSystems The list of input/output systems to be connected connections : list of connections, optional Description of the internal connections between the subsystems: [connection1, connection2, ...] Each connection is itself a list that describes an input to one of the subsystems. The entries are of the form: [input-spec, output-spec1, output-spec2, ...] The input-spec can be in a number of different forms. The lowest level representation is a tuple of the form `(subsys_i, inp_j)` where `subsys_i` is the index into `syslist` and `inp_j` is the index into the input vector for the subsystem. If `subsys_i` has a single input, then the subsystem index `subsys_i` can be listed as the input-spec. If systems and signals are given names, then the form 'sys.sig' or ('sys', 'sig') are also recognized. Similarly, each output-spec should describe an output signal from one of the susystems. The lowest level representation is a tuple of the form `(subsys_i, out_j, gain)`. The input will be constructed by summing the listed outputs after multiplying by the gain term. If the gain term is omitted, it is assumed to be 1. If the system has a single output, then the subsystem index `subsys_i` can be listed as the input-spec. If systems and signals are given names, then the form 'sys.sig', ('sys', 'sig') or ('sys', 'sig', gain) are also recognized, and the special form '-sys.sig' can be used to specify a signal with gain -1. If omitted, the `interconnect` function will attempt to create the interconnection map by connecting all signals with the same base names (ignoring the system name). Specifically, for each input signal name in the list of systems, if that signal name corresponds to the output signal in any of the systems, it will be connected to that input (with a summation across all signals if the output name occurs in more than one system). The `connections` keyword can also be set to `False`, which will leave the connection map empty and it can be specified instead using the low-level :func:`~control.InterconnectedSystem.set_connect_map` method. inplist : list of input connections, optional List of connections for how the inputs for the overall system are mapped to the subsystem inputs. The input specification is similar to the form defined in the connection specification, except that connections do not specify an input-spec, since these are the system inputs. The entries for a connection are thus of the form: [input-spec1, input-spec2, ...] Each system input is added to the input for the listed subsystem. If the system input connects to only one subsystem input, a single input specification can be given (without the inner list). If omitted, the input map can be specified using the :func:`~control.InterconnectedSystem.set_input_map` method. outlist : list of output connections, optional List of connections for how the outputs from the subsystems are mapped to overall system outputs. The output connection description is the same as the form defined in the inplist specification (including the optional gain term). Numbered outputs must be chosen from the list of subsystem outputs, but named outputs can also be contained in the list of subsystem inputs. If an output connection contains more than one signal specification, then those signals are added together (multiplying by the any gain term) to form the system output. If omitted, the output map can be specified using the :func:`~control.InterconnectedSystem.set_output_map` method. inputs : int, list of str or None, optional Description of the system inputs. This can be given as an integer count or as a list of strings that name the individual signals. If an integer count is specified, the names of the signal will be of the form `s[i]` (where `s` is one of `u`, `y`, or `x`). If this parameter is not given or given as `None`, the relevant quantity will be determined when possible based on other information provided to functions using the system. outputs : int, list of str or None, optional Description of the system outputs. Same format as `inputs`. states : int, list of str, or None, optional Description of the system states. Same format as `inputs`. The default is `None`, in which case the states will be given names of the form '<subsys_name>.<state_name>', for each subsys in syslist and each state_name of each subsys. params : dict, optional Parameter values for the systems. Passed to the evaluation functions for the system as default values, overriding internal defaults. dt : timebase, optional The timebase for the system, used to specify whether the system is operating in continuous or discrete time. It can have the following values: * dt = 0: continuous time system (default) * dt > 0: discrete time system with sampling period 'dt' * dt = True: discrete time with unspecified sampling period * dt = None: no timebase specified name : string, optional System name (used for specifying signals). If unspecified, a generic name <sys[id]> is generated with a unique integer id. Example ------- >>> P = control.LinearIOSystem( >>> control.rss(2, 2, 2, strictly_proper=True), name='P') >>> C = control.LinearIOSystem(control.rss(2, 2, 2), name='C') >>> T = control.interconnect( >>> [P, C], >>> connections = [ >>> ['P.u[0]', 'C.y[0]'], ['P.u[1]', 'C.y[1]'], >>> ['C.u[0]', '-P.y[0]'], ['C.u[1]', '-P.y[1]']], >>> inplist = ['C.u[0]', 'C.u[1]'], >>> outlist = ['P.y[0]', 'P.y[1]'], >>> ) For a SISO system, this example can be simplified by using the :func:`~control.summing_block` function and the ability to automatically interconnect signals with the same names: >>> P = control.tf2io(control.tf(1, [1, 0]), inputs='u', outputs='y') >>> C = control.tf2io(control.tf(10, [1, 1]), inputs='e', outputs='u') >>> sumblk = control.summing_junction(inputs=['r', '-y'], output='e') >>> T = control.interconnect([P, C, sumblk], input='r', output='y') Notes ----- If a system is duplicated in the list of systems to be connected, a warning is generated and a copy of the system is created with the name of the new system determined by adding the prefix and suffix strings in config.defaults['iosys.linearized_system_name_prefix'] and config.defaults['iosys.linearized_system_name_suffix'], with the default being to add the suffix '$copy'$ to the system name. It is possible to replace lists in most of arguments with tuples instead, but strictly speaking the only use of tuples should be in the specification of an input- or output-signal via the tuple notation `(subsys_i, signal_j, gain)` (where `gain` is optional). If you get an unexpected error message about a specification being of the wrong type, check your use of tuples. In addition to its use for general nonlinear I/O systems, the :func:`~control.interconnect` function allows linear systems to be interconnected using named signals (compared with the :func:`~control.connect` function, which uses signal indices) and to be treated as both a :class:`~control.StateSpace` system as well as an :class:`~control.InputOutputSystem`. The `input` and `output` keywords can be used instead of `inputs` and `outputs`, for more natural naming of SISO systems. """ # Look for 'input' and 'output' parameter name variants inputs = _parse_signal_parameter(inputs, 'input', kwargs) outputs = _parse_signal_parameter(outputs, 'output', kwargs, end=True) # If connections was not specified, set up default connection list if connections is None: # For each system input, look for outputs with the same name connections = [] for input_sys in syslist: for input_name in input_sys.input_index.keys(): connect = [input_sys.name + "." + input_name] for output_sys in syslist: if input_name in output_sys.output_index.keys(): connect.append(output_sys.name + "." + input_name) if len(connect) > 1: connections.append(connect) elif connections is False: # Use an empty connections list connections = [] # If inplist/outlist is not present, try using inputs/outputs instead if not inplist and inputs is not None: inplist = list(inputs) if not outlist and outputs is not None: outlist = list(outputs) # Process input list if not isinstance(inplist, (list, tuple)): inplist = [inplist] new_inplist = [] for signal in inplist: # Create an empty connection and append to inplist connection = [] # Check for signal names without a system name if isinstance(signal, str) and len(signal.split('.')) == 1: # Get the signal name name = signal[1:] if signal[0] == '-' else signal sign = '-' if signal[0] == '-' else "" # Look for the signal name as a system input for sys in syslist: if name in sys.input_index.keys(): connection.append(sign + sys.name + "." + name) # Make sure we found the name if len(connection) == 0: raise ValueError("could not find signal %s" % name) else: new_inplist.append(connection) else: new_inplist.append(signal) inplist = new_inplist # Process output list if not isinstance(outlist, (list, tuple)): outlist = [outlist] new_outlist = [] for signal in outlist: # Create an empty connection and append to inplist connection = [] # Check for signal names without a system name if isinstance(signal, str) and len(signal.split('.')) == 1: # Get the signal name name = signal[1:] if signal[0] == '-' else signal sign = '-' if signal[0] == '-' else "" # Look for the signal name as a system output for sys in syslist: if name in sys.output_index.keys(): connection.append(sign + sys.name + "." + name) # Make sure we found the name if len(connection) == 0: raise ValueError("could not find signal %s" % name) else: new_outlist.append(connection) else: new_outlist.append(signal) outlist = new_outlist newsys = InterconnectedSystem( syslist, connections=connections, inplist=inplist, outlist=outlist, inputs=inputs, outputs=outputs, states=states, params=params, dt=dt, name=name) # If all subsystems are linear systems, maintain linear structure if all([isinstance(sys, LinearIOSystem) for sys in syslist]): return LinearICSystem(newsys, None) return newsys # Summing junction def summing_junction( inputs=None, output=None, dimension=None, name=None, prefix='u', **kwargs): """Create a summing junction as an input/output system. This function creates a static input/output system that outputs the sum of the inputs, potentially with a change in sign for each individual input. The input/output system that is created by this function can be used as a component in the :func:`~control.interconnect` function. Parameters ---------- inputs : int, string or list of strings Description of the inputs to the summing junction. This can be given as an integer count, a string, or a list of strings. If an integer count is specified, the names of the input signals will be of the form `u[i]`. output : string, optional Name of the system output. If not specified, the output will be 'y'. dimension : int, optional The dimension of the summing junction. If the dimension is set to a positive integer, a multi-input, multi-output summing junction will be created. The input and output signal names will be of the form `<signal>[i]` where `signal` is the input/output signal name specified by the `inputs` and `output` keywords. Default value is `None`. name : string, optional System name (used for specifying signals). If unspecified, a generic name <sys[id]> is generated with a unique integer id. prefix : string, optional If `inputs` is an integer, create the names of the states using the given prefix (default = 'u'). The names of the input will be of the form `prefix[i]`. Returns ------- sys : static LinearIOSystem Linear input/output system object with no states and only a direct term that implements the summing junction. Example ------- >>> P = control.tf2io(ct.tf(1, [1, 0]), input='u', output='y') >>> C = control.tf2io(ct.tf(10, [1, 1]), input='e', output='u') >>> sumblk = control.summing_junction(inputs=['r', '-y'], output='e') >>> T = control.interconnect((P, C, sumblk), input='r', output='y') """ # Utility function to parse input and output signal lists def _parse_list(signals, signame='input', prefix='u'): # Parse signals, including gains if isinstance(signals, int): nsignals = signals names = ["%s[%d]" % (prefix, i) for i in range(nsignals)] gains = np.ones((nsignals,)) elif isinstance(signals, str): nsignals = 1 gains = [-1 if signals[0] == '-' else 1] names = [signals[1:] if signals[0] == '-' else signals] elif isinstance(signals, list) and \ all([isinstance(x, str) for x in signals]): nsignals = len(signals) gains = np.ones((nsignals,)) names = [] for i in range(nsignals): if signals[i][0] == '-': gains[i] = -1 names.append(signals[i][1:]) else: names.append(signals[i]) else: raise ValueError( "could not parse %s description '%s'" % (signame, str(signals))) # Return the parsed list return nsignals, names, gains # Look for 'input' and 'output' parameter name variants inputs = _parse_signal_parameter(inputs, 'input', kwargs) output = _parse_signal_parameter(output, 'outputs', kwargs, end=True) # Default values for inputs and output if inputs is None: raise TypeError("input specification is required") if output is None: output = 'y' # Read the input list ninputs, input_names, input_gains = _parse_list( inputs, signame="input", prefix=prefix) noutputs, output_names, output_gains = _parse_list( output, signame="output", prefix='y') if noutputs > 1: raise NotImplementedError("vector outputs not yet supported") # If the dimension keyword is present, vectorize inputs and outputs if isinstance(dimension, int) and dimension >= 1: # Create a new list of input/output names and update parameters input_names = ["%s[%d]" % (name, dim) for name in input_names for dim in range(dimension)] ninputs = ninputs * dimension output_names = ["%s[%d]" % (name, dim) for name in output_names for dim in range(dimension)] noutputs = noutputs * dimension elif dimension is not None: raise ValueError( "unrecognized dimension value '%s'" % str(dimension)) else: dimension = 1 # Create the direct term D = np.kron(input_gains * output_gains[0], np.eye(dimension)) # Create a linear system of the appropriate size ss_sys = StateSpace( np.zeros((0, 0)), np.ones((0, ninputs)), np.ones((noutputs, 0)), D) # Create a LinearIOSystem return LinearIOSystem( ss_sys, inputs=input_names, outputs=output_names, name=name)
[]
[]
[]
[]
[]
python
null
null
null
main_test.go
package gorm_test // Run tests // $ docker-compose up // $ ./test_all.sh import ( "context" "database/sql" "database/sql/driver" "errors" "fmt" "os" "path/filepath" "reflect" "regexp" "sort" "strconv" "strings" "sync" "testing" "time" "github.com/erikstmartin/go-testdb" "github.com/jinzhu/now" "github.com/Laisky/gorm" _ "github.com/Laisky/gorm/dialects/mssql" _ "github.com/Laisky/gorm/dialects/mysql" "github.com/Laisky/gorm/dialects/postgres" _ "github.com/Laisky/gorm/dialects/sqlite" ) var ( DB *gorm.DB t1, t2, t3, t4, t5 time.Time ) func init() { var err error if DB, err = OpenTestConnection(); err != nil { panic(fmt.Sprintf("No error should happen when connecting to test database, but got err=%+v", err)) } runMigration() } func OpenTestConnection() (db *gorm.DB, err error) { dbDSN := os.Getenv("GORM_DSN") switch os.Getenv("GORM_DIALECT") { case "mysql": fmt.Println("testing mysql...") if dbDSN == "" { dbDSN = "gorm:gorm@tcp(localhost:9910)/gorm?charset=utf8&parseTime=True" } db, err = gorm.Open("mysql", dbDSN) case "postgres": fmt.Println("testing postgres...") if dbDSN == "" { dbDSN = "user=gorm password=gorm dbname=gorm port=9920 sslmode=disable" } db, err = gorm.Open("postgres", dbDSN) case "mssql": // CREATE LOGIN gorm WITH PASSWORD = 'LoremIpsum86'; // CREATE DATABASE gorm; // USE gorm; // CREATE USER gorm FROM LOGIN gorm; // sp_changedbowner 'gorm'; fmt.Println("testing mssql...") if dbDSN == "" { dbDSN = "sqlserver://gorm:LoremIpsum86@localhost:9930?database=gorm" } db, err = gorm.Open("mssql", dbDSN) default: fmt.Println("testing sqlite3...") db, err = gorm.Open("sqlite3", filepath.Join(os.TempDir(), "gorm.db")) } // db.SetLogger(Logger{log.New(os.Stdout, "\r\n", 0)}) // db.SetLogger(log.New(os.Stdout, "\r\n", 0)) if debug := os.Getenv("DEBUG"); debug == "true" { db.LogMode(true) } else if debug == "false" { db.LogMode(false) } db.DB().SetMaxIdleConns(10) return } func TestOpen_ReturnsError_WithBadArgs(t *testing.T) { stringRef := "foo" testCases := []interface{}{42, time.Now(), &stringRef} for _, tc := range testCases { t.Run(fmt.Sprintf("%v", tc), func(t *testing.T) { _, err := gorm.Open("postgresql", tc) if err == nil { t.Error("Should got error with invalid database source") } if !strings.HasPrefix(err.Error(), "invalid database source:") { t.Errorf("Should got error starting with \"invalid database source:\", but got %q", err.Error()) } }) } } func TestStringPrimaryKey(t *testing.T) { type UUIDStruct struct { ID string `gorm:"primary_key"` Name string } DB.DropTable(&UUIDStruct{}) DB.AutoMigrate(&UUIDStruct{}) data := UUIDStruct{ID: "uuid", Name: "hello"} if err := DB.Save(&data).Error; err != nil || data.ID != "uuid" || data.Name != "hello" { t.Errorf("string primary key should not be populated") } data = UUIDStruct{ID: "uuid", Name: "hello world"} if err := DB.Save(&data).Error; err != nil || data.ID != "uuid" || data.Name != "hello world" { t.Errorf("string primary key should not be populated") } } func TestExceptionsWithInvalidSql(t *testing.T) { var columns []string if DB.Where("sdsd.zaaa = ?", "sd;;;aa").Pluck("aaa", &columns).Error == nil { t.Errorf("Should got error with invalid SQL") } if DB.Model(&User{}).Where("sdsd.zaaa = ?", "sd;;;aa").Pluck("aaa", &columns).Error == nil { t.Errorf("Should got error with invalid SQL") } if DB.Where("sdsd.zaaa = ?", "sd;;;aa").Find(&User{}).Error == nil { t.Errorf("Should got error with invalid SQL") } var count1, count2 int64 DB.Model(&User{}).Count(&count1) if count1 <= 0 { t.Errorf("Should find some users") } if DB.Where("name = ?", "jinzhu; delete * from users").First(&User{}).Error == nil { t.Errorf("Should got error with invalid SQL") } DB.Model(&User{}).Count(&count2) if count1 != count2 { t.Errorf("No user should not be deleted by invalid SQL") } } func TestSetTable(t *testing.T) { DB.Create(getPreparedUser("pluck_user1", "pluck_user")) DB.Create(getPreparedUser("pluck_user2", "pluck_user")) DB.Create(getPreparedUser("pluck_user3", "pluck_user")) if err := DB.Table("users").Where("role = ?", "pluck_user").Pluck("age", &[]int{}).Error; err != nil { t.Error("No errors should happen if set table for pluck", err) } var users []User if DB.Table("users").Find(&[]User{}).Error != nil { t.Errorf("No errors should happen if set table for find") } if DB.Table("invalid_table").Find(&users).Error == nil { t.Errorf("Should got error when table is set to an invalid table") } DB.Exec("drop table deleted_users;") if DB.Table("deleted_users").CreateTable(&User{}).Error != nil { t.Errorf("Create table with specified table") } DB.Table("deleted_users").Save(&User{Name: "DeletedUser"}) var deletedUsers []User DB.Table("deleted_users").Find(&deletedUsers) if len(deletedUsers) != 1 { t.Errorf("Query from specified table") } var user User DB.Table("deleted_users").First(&user, "name = ?", "DeletedUser") user.Age = 20 DB.Table("deleted_users").Save(&user) if DB.Table("deleted_users").First(&user, "name = ? AND age = ?", "DeletedUser", 20).RecordNotFound() { t.Errorf("Failed to found updated user") } DB.Save(getPreparedUser("normal_user", "reset_table")) DB.Table("deleted_users").Save(getPreparedUser("deleted_user", "reset_table")) var user1, user2, user3 User DB.Where("role = ?", "reset_table").First(&user1).Table("deleted_users").First(&user2).Table("").First(&user3) if (user1.Name != "normal_user") || (user2.Name != "deleted_user") || (user3.Name != "normal_user") { t.Errorf("unset specified table with blank string") } } type Order struct { } type Cart struct { } func (c Cart) TableName() string { return "shopping_cart" } func TestHasTable(t *testing.T) { type Foo struct { Id int Stuff string } DB.DropTable(&Foo{}) // Table should not exist at this point, HasTable should return false if ok := DB.HasTable("foos"); ok { t.Errorf("Table should not exist, but does") } if ok := DB.HasTable(&Foo{}); ok { t.Errorf("Table should not exist, but does") } // We create the table if err := DB.CreateTable(&Foo{}).Error; err != nil { t.Errorf("Table should be created") } // And now it should exits, and HasTable should return true if ok := DB.HasTable("foos"); !ok { t.Errorf("Table should exist, but HasTable informs it does not") } if ok := DB.HasTable(&Foo{}); !ok { t.Errorf("Table should exist, but HasTable informs it does not") } } func TestTableName(t *testing.T) { DB := DB.Model("") if DB.NewScope(Order{}).TableName() != "orders" { t.Errorf("Order's table name should be orders") } if DB.NewScope(&Order{}).TableName() != "orders" { t.Errorf("&Order's table name should be orders") } if DB.NewScope([]Order{}).TableName() != "orders" { t.Errorf("[]Order's table name should be orders") } if DB.NewScope(&[]Order{}).TableName() != "orders" { t.Errorf("&[]Order's table name should be orders") } DB.SingularTable(true) if DB.NewScope(Order{}).TableName() != "order" { t.Errorf("Order's singular table name should be order") } if DB.NewScope(&Order{}).TableName() != "order" { t.Errorf("&Order's singular table name should be order") } if DB.NewScope([]Order{}).TableName() != "order" { t.Errorf("[]Order's singular table name should be order") } if DB.NewScope(&[]Order{}).TableName() != "order" { t.Errorf("&[]Order's singular table name should be order") } if DB.NewScope(&Cart{}).TableName() != "shopping_cart" { t.Errorf("&Cart's singular table name should be shopping_cart") } if DB.NewScope(Cart{}).TableName() != "shopping_cart" { t.Errorf("Cart's singular table name should be shopping_cart") } if DB.NewScope(&[]Cart{}).TableName() != "shopping_cart" { t.Errorf("&[]Cart's singular table name should be shopping_cart") } if DB.NewScope([]Cart{}).TableName() != "shopping_cart" { t.Errorf("[]Cart's singular table name should be shopping_cart") } DB.SingularTable(false) } func TestTableNameConcurrently(t *testing.T) { DB := DB.Model("") if DB.NewScope(Order{}).TableName() != "orders" { t.Errorf("Order's table name should be orders") } var wg sync.WaitGroup wg.Add(10) for i := 1; i <= 10; i++ { go func(db *gorm.DB) { DB.SingularTable(true) wg.Done() }(DB) } wg.Wait() if DB.NewScope(Order{}).TableName() != "order" { t.Errorf("Order's singular table name should be order") } DB.SingularTable(false) } func TestNullValues(t *testing.T) { DB.DropTable(&NullValue{}) DB.AutoMigrate(&NullValue{}) if err := DB.Save(&NullValue{ Name: sql.NullString{String: "hello", Valid: true}, Gender: &sql.NullString{String: "M", Valid: true}, Age: sql.NullInt64{Int64: 18, Valid: true}, Male: sql.NullBool{Bool: true, Valid: true}, Height: sql.NullFloat64{Float64: 100.11, Valid: true}, AddedAt: NullTime{Time: time.Now(), Valid: true}, }).Error; err != nil { t.Errorf("Not error should raise when test null value") } var nv NullValue DB.First(&nv, "name = ?", "hello") if nv.Name.String != "hello" || nv.Gender.String != "M" || nv.Age.Int64 != 18 || nv.Male.Bool != true || nv.Height.Float64 != 100.11 || nv.AddedAt.Valid != true { t.Errorf("Should be able to fetch null value") } if err := DB.Save(&NullValue{ Name: sql.NullString{String: "hello-2", Valid: true}, Gender: &sql.NullString{String: "F", Valid: true}, Age: sql.NullInt64{Int64: 18, Valid: false}, Male: sql.NullBool{Bool: true, Valid: true}, Height: sql.NullFloat64{Float64: 100.11, Valid: true}, AddedAt: NullTime{Time: time.Now(), Valid: false}, }).Error; err != nil { t.Errorf("Not error should raise when test null value") } var nv2 NullValue DB.First(&nv2, "name = ?", "hello-2") if nv2.Name.String != "hello-2" || nv2.Gender.String != "F" || nv2.Age.Int64 != 0 || nv2.Male.Bool != true || nv2.Height.Float64 != 100.11 || nv2.AddedAt.Valid != false { t.Errorf("Should be able to fetch null value") } if err := DB.Save(&NullValue{ Name: sql.NullString{String: "hello-3", Valid: false}, Gender: &sql.NullString{String: "M", Valid: true}, Age: sql.NullInt64{Int64: 18, Valid: false}, Male: sql.NullBool{Bool: true, Valid: true}, Height: sql.NullFloat64{Float64: 100.11, Valid: true}, AddedAt: NullTime{Time: time.Now(), Valid: false}, }).Error; err == nil { t.Errorf("Can't save because of name can't be null") } } func TestNullValuesWithFirstOrCreate(t *testing.T) { var nv1 = NullValue{ Name: sql.NullString{String: "first_or_create", Valid: true}, Gender: &sql.NullString{String: "M", Valid: true}, } var nv2 NullValue result := DB.Where(nv1).FirstOrCreate(&nv2) if result.RowsAffected != 1 { t.Errorf("RowsAffected should be 1 after create some record") } if result.Error != nil { t.Errorf("Should not raise any error, but got %v", result.Error) } if nv2.Name.String != "first_or_create" || nv2.Gender.String != "M" { t.Errorf("first or create with nullvalues") } if err := DB.Where(nv1).Assign(NullValue{Age: sql.NullInt64{Int64: 18, Valid: true}}).FirstOrCreate(&nv2).Error; err != nil { t.Errorf("Should not raise any error, but got %v", err) } if nv2.Age.Int64 != 18 { t.Errorf("should update age to 18") } } func TestTransaction(t *testing.T) { tx := DB.Begin() u := User{Name: "transcation"} if err := tx.Save(&u).Error; err != nil { t.Errorf("No error should raise") } if err := tx.First(&User{}, "name = ?", "transcation").Error; err != nil { t.Errorf("Should find saved record") } if sqlTx, ok := tx.CommonDB().(*sql.Tx); !ok || sqlTx == nil { t.Errorf("Should return the underlying sql.Tx") } tx.Rollback() if err := tx.First(&User{}, "name = ?", "transcation").Error; err == nil { t.Errorf("Should not find record after rollback") } tx2 := DB.Begin() u2 := User{Name: "transcation-2"} if err := tx2.Save(&u2).Error; err != nil { t.Errorf("No error should raise") } if err := tx2.First(&User{}, "name = ?", "transcation-2").Error; err != nil { t.Errorf("Should find saved record") } tx2.Commit() if err := DB.First(&User{}, "name = ?", "transcation-2").Error; err != nil { t.Errorf("Should be able to find committed record") } tx3 := DB.Begin() u3 := User{Name: "transcation-3"} if err := tx3.Save(&u3).Error; err != nil { t.Errorf("No error should raise") } if err := tx3.First(&User{}, "name = ?", "transcation-3").Error; err != nil { t.Errorf("Should find saved record") } tx3.RollbackUnlessCommitted() if err := tx.First(&User{}, "name = ?", "transcation").Error; err == nil { t.Errorf("Should not find record after rollback") } tx4 := DB.Begin() u4 := User{Name: "transcation-4"} if err := tx4.Save(&u4).Error; err != nil { t.Errorf("No error should raise") } if err := tx4.First(&User{}, "name = ?", "transcation-4").Error; err != nil { t.Errorf("Should find saved record") } tx4.Commit() tx4.RollbackUnlessCommitted() if err := DB.First(&User{}, "name = ?", "transcation-4").Error; err != nil { t.Errorf("Should be able to find committed record") } } func assertPanic(t *testing.T, f func()) { defer func() { if r := recover(); r == nil { t.Errorf("The code did not panic") } }() f() } func TestTransactionWithBlock(t *testing.T) { // rollback err := DB.Transaction(func(tx *gorm.DB) error { u := User{Name: "transcation"} if err := tx.Save(&u).Error; err != nil { t.Errorf("No error should raise") } if err := tx.First(&User{}, "name = ?", "transcation").Error; err != nil { t.Errorf("Should find saved record") } return errors.New("the error message") }) if err.Error() != "the error message" { t.Errorf("Transaction return error will equal the block returns error") } if err := DB.First(&User{}, "name = ?", "transcation").Error; err == nil { t.Errorf("Should not find record after rollback") } // commit DB.Transaction(func(tx *gorm.DB) error { u2 := User{Name: "transcation-2"} if err := tx.Save(&u2).Error; err != nil { t.Errorf("No error should raise") } if err := tx.First(&User{}, "name = ?", "transcation-2").Error; err != nil { t.Errorf("Should find saved record") } return nil }) if err := DB.First(&User{}, "name = ?", "transcation-2").Error; err != nil { t.Errorf("Should be able to find committed record") } // panic will rollback assertPanic(t, func() { DB.Transaction(func(tx *gorm.DB) error { u3 := User{Name: "transcation-3"} if err := tx.Save(&u3).Error; err != nil { t.Errorf("No error should raise") } if err := tx.First(&User{}, "name = ?", "transcation-3").Error; err != nil { t.Errorf("Should find saved record") } panic("force panic") }) }) if err := DB.First(&User{}, "name = ?", "transcation-3").Error; err == nil { t.Errorf("Should not find record after panic rollback") } } func TestTransaction_NoErrorOnRollbackAfterCommit(t *testing.T) { tx := DB.Begin() u := User{Name: "transcation"} if err := tx.Save(&u).Error; err != nil { t.Errorf("No error should raise") } if err := tx.Commit().Error; err != nil { t.Errorf("Commit should not raise error") } if err := tx.Rollback().Error; err != nil { t.Errorf("Rollback should not raise error") } } func TestTransactionReadonly(t *testing.T) { dialect := os.Getenv("GORM_DIALECT") if dialect == "" { dialect = "sqlite" } switch dialect { case "mssql", "sqlite": t.Skipf("%s does not support readonly transactions\n", dialect) } tx := DB.Begin() u := User{Name: "transcation"} if err := tx.Save(&u).Error; err != nil { t.Errorf("No error should raise") } tx.Commit() tx = DB.BeginTx(context.Background(), &sql.TxOptions{ReadOnly: true}) if err := tx.First(&User{}, "name = ?", "transcation").Error; err != nil { t.Errorf("Should find saved record") } if sqlTx, ok := tx.CommonDB().(*sql.Tx); !ok || sqlTx == nil { t.Errorf("Should return the underlying sql.Tx") } u = User{Name: "transcation-2"} if err := tx.Save(&u).Error; err == nil { t.Errorf("Error should have been raised in a readonly transaction") } tx.Rollback() } func TestRow(t *testing.T) { user1 := User{Name: "RowUser1", Age: 1, Birthday: parseTime("2000-1-1")} user2 := User{Name: "RowUser2", Age: 10, Birthday: parseTime("2010-1-1")} user3 := User{Name: "RowUser3", Age: 20, Birthday: parseTime("2020-1-1")} DB.Save(&user1).Save(&user2).Save(&user3) row := DB.Table("users").Where("name = ?", user2.Name).Select("age").Row() var age int64 row.Scan(&age) if age != 10 { t.Errorf("Scan with Row") } } func TestRows(t *testing.T) { user1 := User{Name: "RowsUser1", Age: 1, Birthday: parseTime("2000-1-1")} user2 := User{Name: "RowsUser2", Age: 10, Birthday: parseTime("2010-1-1")} user3 := User{Name: "RowsUser3", Age: 20, Birthday: parseTime("2020-1-1")} DB.Save(&user1).Save(&user2).Save(&user3) rows, err := DB.Table("users").Where("name = ? or name = ?", user2.Name, user3.Name).Select("name, age").Rows() if err != nil { t.Errorf("Not error should happen, got %v", err) } count := 0 for rows.Next() { var name string var age int64 rows.Scan(&name, &age) count++ } if count != 2 { t.Errorf("Should found two records") } } func TestScanRows(t *testing.T) { user1 := User{Name: "ScanRowsUser1", Age: 1, Birthday: parseTime("2000-1-1")} user2 := User{Name: "ScanRowsUser2", Age: 10, Birthday: parseTime("2010-1-1")} user3 := User{Name: "ScanRowsUser3", Age: 20, Birthday: parseTime("2020-1-1")} DB.Save(&user1).Save(&user2).Save(&user3) rows, err := DB.Table("users").Where("name = ? or name = ?", user2.Name, user3.Name).Select("name, age").Rows() if err != nil { t.Errorf("Not error should happen, got %v", err) } type Result struct { Name string Age int } var results []Result for rows.Next() { var result Result if err := DB.ScanRows(rows, &result); err != nil { t.Errorf("should get no error, but got %v", err) } results = append(results, result) } if !reflect.DeepEqual(results, []Result{{Name: "ScanRowsUser2", Age: 10}, {Name: "ScanRowsUser3", Age: 20}}) { t.Errorf("Should find expected results") } } func TestScan(t *testing.T) { user1 := User{Name: "ScanUser1", Age: 1, Birthday: parseTime("2000-1-1")} user2 := User{Name: "ScanUser2", Age: 10, Birthday: parseTime("2010-1-1")} user3 := User{Name: "ScanUser3", Age: 20, Birthday: parseTime("2020-1-1")} DB.Save(&user1).Save(&user2).Save(&user3) type result struct { Name string Age int } var res result DB.Table("users").Select("name, age").Where("name = ?", user3.Name).Scan(&res) if res.Name != user3.Name { t.Errorf("Scan into struct should work") } var doubleAgeRes = &result{} if err := DB.Table("users").Select("age + age as age").Where("name = ?", user3.Name).Scan(&doubleAgeRes).Error; err != nil { t.Errorf("Scan to pointer of pointer") } if doubleAgeRes.Age != res.Age*2 { t.Errorf("Scan double age as age") } var ress []result DB.Table("users").Select("name, age").Where("name in (?)", []string{user2.Name, user3.Name}).Scan(&ress) if len(ress) != 2 || ress[0].Name != user2.Name || ress[1].Name != user3.Name { t.Errorf("Scan into struct map") } } func TestRaw(t *testing.T) { user1 := User{Name: "ExecRawSqlUser1", Age: 1, Birthday: parseTime("2000-1-1")} user2 := User{Name: "ExecRawSqlUser2", Age: 10, Birthday: parseTime("2010-1-1")} user3 := User{Name: "ExecRawSqlUser3", Age: 20, Birthday: parseTime("2020-1-1")} DB.Save(&user1).Save(&user2).Save(&user3) type result struct { Name string Email string } var ress []result DB.Raw("SELECT name, age FROM users WHERE name = ? or name = ?", user2.Name, user3.Name).Scan(&ress) if len(ress) != 2 || ress[0].Name != user2.Name || ress[1].Name != user3.Name { t.Errorf("Raw with scan") } rows, _ := DB.Raw("select name, age from users where name = ?", user3.Name).Rows() count := 0 for rows.Next() { count++ } if count != 1 { t.Errorf("Raw with Rows should find one record with name 3") } DB.Exec("update users set name=? where name in (?)", "jinzhu", []string{user1.Name, user2.Name, user3.Name}) if DB.Where("name in (?)", []string{user1.Name, user2.Name, user3.Name}).First(&User{}).Error != gorm.ErrRecordNotFound { t.Error("Raw sql to update records") } } func TestGroup(t *testing.T) { rows, err := DB.Select("name").Table("users").Group("name").Rows() if err == nil { defer rows.Close() for rows.Next() { var name string rows.Scan(&name) } } else { t.Errorf("Should not raise any error") } } func TestJoins(t *testing.T) { var user = User{ Name: "joins", CreditCard: CreditCard{Number: "411111111111"}, Emails: []Email{{Email: "[email protected]"}, {Email: "[email protected]"}}, } DB.Save(&user) var users1 []User DB.Joins("left join emails on emails.user_id = users.id").Where("name = ?", "joins").Find(&users1) if len(users1) != 2 { t.Errorf("should find two users using left join") } var users2 []User DB.Joins("left join emails on emails.user_id = users.id AND emails.email = ?", "[email protected]").Where("name = ?", "joins").First(&users2) if len(users2) != 1 { t.Errorf("should find one users using left join with conditions") } var users3 []User DB.Joins("join emails on emails.user_id = users.id AND emails.email = ?", "[email protected]").Joins("join credit_cards on credit_cards.user_id = users.id AND credit_cards.number = ?", "411111111111").Where("name = ?", "joins").First(&users3) if len(users3) != 1 { t.Errorf("should find one users using multiple left join conditions") } var users4 []User DB.Joins("join emails on emails.user_id = users.id AND emails.email = ?", "[email protected]").Joins("join credit_cards on credit_cards.user_id = users.id AND credit_cards.number = ?", "422222222222").Where("name = ?", "joins").First(&users4) if len(users4) != 0 { t.Errorf("should find no user when searching with unexisting credit card") } var users5 []User db5 := DB.Joins("join emails on emails.user_id = users.id AND emails.email = ?", "[email protected]").Joins("join credit_cards on credit_cards.user_id = users.id AND credit_cards.number = ?", "411111111111").Where(User{Id: 1}).Where(Email{Id: 1}).Not(Email{Id: 10}).First(&users5) if db5.Error != nil { t.Errorf("Should not raise error for join where identical fields in different tables. Error: %s", db5.Error.Error()) } } type JoinedIds struct { UserID int64 `gorm:"column:id"` BillingAddressID int64 `gorm:"column:id"` EmailID int64 `gorm:"column:id"` } func TestScanIdenticalColumnNames(t *testing.T) { var user = User{ Name: "joinsIds", Email: "[email protected]", BillingAddress: Address{ Address1: "One Park Place", }, Emails: []Email{{Email: "[email protected]"}, {Email: "[email protected]"}}, } DB.Save(&user) var users []JoinedIds DB.Select("users.id, addresses.id, emails.id").Table("users"). Joins("left join addresses on users.billing_address_id = addresses.id"). Joins("left join emails on emails.user_id = users.id"). Where("name = ?", "joinsIds").Scan(&users) if len(users) != 2 { t.Fatal("should find two rows using left join") } if user.Id != users[0].UserID { t.Errorf("Expected result row to contain UserID %d, but got %d", user.Id, users[0].UserID) } if user.Id != users[1].UserID { t.Errorf("Expected result row to contain UserID %d, but got %d", user.Id, users[1].UserID) } if user.BillingAddressID.Int64 != users[0].BillingAddressID { t.Errorf("Expected result row to contain BillingAddressID %d, but got %d", user.BillingAddressID.Int64, users[0].BillingAddressID) } if user.BillingAddressID.Int64 != users[1].BillingAddressID { t.Errorf("Expected result row to contain BillingAddressID %d, but got %d", user.BillingAddressID.Int64, users[0].BillingAddressID) } if users[0].EmailID == users[1].EmailID { t.Errorf("Email ids should be unique. Got %d and %d", users[0].EmailID, users[1].EmailID) } if int64(user.Emails[0].Id) != users[0].EmailID && int64(user.Emails[1].Id) != users[0].EmailID { t.Errorf("Expected result row ID to be either %d or %d, but was %d", user.Emails[0].Id, user.Emails[1].Id, users[0].EmailID) } if int64(user.Emails[0].Id) != users[1].EmailID && int64(user.Emails[1].Id) != users[1].EmailID { t.Errorf("Expected result row ID to be either %d or %d, but was %d", user.Emails[0].Id, user.Emails[1].Id, users[1].EmailID) } } func TestJoinsWithSelect(t *testing.T) { type result struct { Name string Email string } user := User{ Name: "joins_with_select", Emails: []Email{{Email: "[email protected]"}, {Email: "[email protected]"}}, } DB.Save(&user) var results []result DB.Table("users").Select("name, emails.email").Joins("left join emails on emails.user_id = users.id").Where("name = ?", "joins_with_select").Scan(&results) sort.Slice(results, func(i, j int) bool { return strings.Compare(results[i].Email, results[j].Email) < 0 }) if len(results) != 2 || results[0].Email != "[email protected]" || results[1].Email != "[email protected]" { t.Errorf("Should find all two emails with Join select") } } func TestHaving(t *testing.T) { rows, err := DB.Select("name, count(*) as total").Table("users").Group("name").Having("name IN (?)", []string{"2", "3"}).Rows() if err == nil { defer rows.Close() for rows.Next() { var name string var total int64 rows.Scan(&name, &total) if name == "2" && total != 1 { t.Errorf("Should have one user having name 2") } if name == "3" && total != 2 { t.Errorf("Should have two users having name 3") } } } else { t.Errorf("Should not raise any error") } } func TestQueryBuilderSubselectInWhere(t *testing.T) { user := User{Name: "query_expr_select_ruser1", Email: "[email protected]", Age: 32} DB.Save(&user) user = User{Name: "query_expr_select_ruser2", Email: "[email protected]", Age: 16} DB.Save(&user) user = User{Name: "query_expr_select_ruser3", Email: "[email protected]", Age: 64} DB.Save(&user) user = User{Name: "query_expr_select_ruser4", Email: "[email protected]", Age: 128} DB.Save(&user) var users []User DB.Select("*").Where("name IN (?)", DB. Select("name").Table("users").Where("name LIKE ?", "query_expr_select%").QueryExpr()).Find(&users) if len(users) != 4 { t.Errorf("Four users should be found, instead found %d", len(users)) } DB.Select("*").Where("name LIKE ?", "query_expr_select%").Where("age >= (?)", DB. Select("AVG(age)").Table("users").Where("name LIKE ?", "query_expr_select%").QueryExpr()).Find(&users) if len(users) != 2 { t.Errorf("Two users should be found, instead found %d", len(users)) } } func TestQueryBuilderRawQueryWithSubquery(t *testing.T) { user := User{Name: "subquery_test_user1", Age: 10} DB.Save(&user) user = User{Name: "subquery_test_user2", Age: 11} DB.Save(&user) user = User{Name: "subquery_test_user3", Age: 12} DB.Save(&user) var count int err := DB.Raw("select count(*) from (?) tmp", DB.Table("users"). Select("name"). Where("age >= ? and name in (?)", 10, []string{"subquery_test_user1", "subquery_test_user2"}). Group("name"). QueryExpr(), ).Count(&count).Error if err != nil { t.Errorf("Expected to get no errors, but got %v", err) } if count != 2 { t.Errorf("Row count must be 2, instead got %d", count) } err = DB.Raw("select count(*) from (?) tmp", DB.Table("users"). Select("name"). Where("name LIKE ?", "subquery_test%"). Not("age <= ?", 10).Not("name in (?)", []string{"subquery_test_user1", "subquery_test_user2"}). Group("name"). QueryExpr(), ).Count(&count).Error if err != nil { t.Errorf("Expected to get no errors, but got %v", err) } if count != 1 { t.Errorf("Row count must be 1, instead got %d", count) } } func TestQueryBuilderSubselectInHaving(t *testing.T) { user := User{Name: "query_expr_having_ruser1", Email: "[email protected]", Age: 64} DB.Save(&user) user = User{Name: "query_expr_having_ruser2", Email: "[email protected]", Age: 128} DB.Save(&user) user = User{Name: "query_expr_having_ruser3", Email: "[email protected]", Age: 64} DB.Save(&user) user = User{Name: "query_expr_having_ruser4", Email: "[email protected]", Age: 128} DB.Save(&user) var users []User DB.Select("AVG(age) as avgage").Where("name LIKE ?", "query_expr_having_%").Group("email").Having("AVG(age) > (?)", DB. Select("AVG(age)").Where("name LIKE ?", "query_expr_having_%").Table("users").QueryExpr()).Find(&users) if len(users) != 1 { t.Errorf("Two user group should be found, instead found %d", len(users)) } } func DialectHasTzSupport() bool { // NB: mssql and FoundationDB do not support time zones. if dialect := os.Getenv("GORM_DIALECT"); dialect == "foundation" { return false } return true } func TestTimeWithZone(t *testing.T) { var format = "2006-01-02 15:04:05 -0700" var times []time.Time GMT8, _ := time.LoadLocation("Asia/Shanghai") times = append(times, time.Date(2013, 02, 19, 1, 51, 49, 123456789, GMT8)) times = append(times, time.Date(2013, 02, 18, 17, 51, 49, 123456789, time.UTC)) for index, vtime := range times { name := "time_with_zone_" + strconv.Itoa(index) user := User{Name: name, Birthday: &vtime} if !DialectHasTzSupport() { // If our driver dialect doesn't support TZ's, just use UTC for everything here. utcBirthday := user.Birthday.UTC() user.Birthday = &utcBirthday } DB.Save(&user) expectedBirthday := "2013-02-18 17:51:49 +0000" foundBirthday := user.Birthday.UTC().Format(format) if foundBirthday != expectedBirthday { t.Errorf("User's birthday should not be changed after save for name=%s, expected bday=%+v but actual value=%+v", name, expectedBirthday, foundBirthday) } var findUser, findUser2, findUser3 User DB.First(&findUser, "name = ?", name) foundBirthday = findUser.Birthday.UTC().Format(format) if foundBirthday != expectedBirthday { t.Errorf("User's birthday should not be changed after find for name=%s, expected bday=%+v but actual value=%+v", name, expectedBirthday, foundBirthday) } if DB.Where("id = ? AND birthday >= ?", findUser.Id, user.Birthday.Add(-time.Minute)).First(&findUser2).RecordNotFound() { t.Errorf("User should be found") } if !DB.Where("id = ? AND birthday >= ?", findUser.Id, user.Birthday.Add(time.Minute)).First(&findUser3).RecordNotFound() { t.Errorf("User should not be found") } } } func TestHstore(t *testing.T) { type Details struct { Id int64 Bulk postgres.Hstore } if dialect := os.Getenv("GORM_DIALECT"); dialect != "postgres" { t.Skip() } if err := DB.Exec("CREATE EXTENSION IF NOT EXISTS hstore").Error; err != nil { fmt.Println("\033[31mHINT: Must be superuser to create hstore extension (ALTER USER gorm WITH SUPERUSER;)\033[0m") panic(fmt.Sprintf("No error should happen when create hstore extension, but got %+v", err)) } DB.Exec("drop table details") if err := DB.CreateTable(&Details{}).Error; err != nil { panic(fmt.Sprintf("No error should happen when create table, but got %+v", err)) } bankAccountId, phoneNumber, opinion := "123456", "14151321232", "sharkbait" bulk := map[string]*string{ "bankAccountId": &bankAccountId, "phoneNumber": &phoneNumber, "opinion": &opinion, } d := Details{Bulk: bulk} DB.Save(&d) var d2 Details if err := DB.First(&d2).Error; err != nil { t.Errorf("Got error when tried to fetch details: %+v", err) } for k := range bulk { if r, ok := d2.Bulk[k]; ok { if res, _ := bulk[k]; *res != *r { t.Errorf("Details should be equal") } } else { t.Errorf("Details should be existed") } } } func TestSetAndGet(t *testing.T) { if value, ok := DB.Set("hello", "world").Get("hello"); !ok { t.Errorf("Should be able to get setting after set") } else { if value.(string) != "world" { t.Errorf("Setted value should not be changed") } } if _, ok := DB.Get("non_existing"); ok { t.Errorf("Get non existing key should return error") } } func TestCompatibilityMode(t *testing.T) { DB, _ := gorm.Open("testdb", "") testdb.SetQueryFunc(func(query string) (driver.Rows, error) { columns := []string{"id", "name", "age"} result := ` 1,Tim,20 2,Joe,25 3,Bob,30 ` return testdb.RowsFromCSVString(columns, result), nil }) var users []User DB.Find(&users) if (users[0].Name != "Tim") || len(users) != 3 { t.Errorf("Unexcepted result returned") } } func TestOpenExistingDB(t *testing.T) { DB.Save(&User{Name: "jnfeinstein"}) dialect := os.Getenv("GORM_DIALECT") db, err := gorm.Open(dialect, DB.DB()) if err != nil { t.Errorf("Should have wrapped the existing DB connection") } var user User if db.Where("name = ?", "jnfeinstein").First(&user).Error == gorm.ErrRecordNotFound { t.Errorf("Should have found existing record") } } func TestDdlErrors(t *testing.T) { var err error if err = DB.Close(); err != nil { t.Errorf("Closing DDL test db connection err=%s", err) } defer func() { // Reopen DB connection. if DB, err = OpenTestConnection(); err != nil { t.Fatalf("Failed re-opening db connection: %s", err) } }() if err := DB.Find(&User{}).Error; err == nil { t.Errorf("Expected operation on closed db to produce an error, but err was nil") } } func TestOpenWithOneParameter(t *testing.T) { db, err := gorm.Open("dialect") if db != nil { t.Error("Open with one parameter returned non nil for db") } if err == nil { t.Error("Open with one parameter returned err as nil") } } func TestSaveAssociations(t *testing.T) { db := DB.New() deltaAddressCount := 0 if err := db.Model(&Address{}).Count(&deltaAddressCount).Error; err != nil { t.Errorf("failed to fetch address count") t.FailNow() } placeAddress := &Address{ Address1: "somewhere on earth", } ownerAddress1 := &Address{ Address1: "near place address", } ownerAddress2 := &Address{ Address1: "address2", } db.Create(placeAddress) addressCountShouldBe := func(t *testing.T, expectedCount int) { countFromDB := 0 t.Helper() err := db.Model(&Address{}).Count(&countFromDB).Error if err != nil { t.Error("failed to fetch address count") } if countFromDB != expectedCount { t.Errorf("address count mismatch: %d", countFromDB) } } addressCountShouldBe(t, deltaAddressCount+1) // owner address should be created, place address should be reused place1 := &Place{ PlaceAddressID: placeAddress.ID, PlaceAddress: placeAddress, OwnerAddress: ownerAddress1, } err := db.Create(place1).Error if err != nil { t.Errorf("failed to store place: %s", err.Error()) } addressCountShouldBe(t, deltaAddressCount+2) // owner address should be created again, place address should be reused place2 := &Place{ PlaceAddressID: placeAddress.ID, PlaceAddress: &Address{ ID: 777, Address1: "address1", }, OwnerAddress: ownerAddress2, OwnerAddressID: 778, } err = db.Create(place2).Error if err != nil { t.Errorf("failed to store place: %s", err.Error()) } addressCountShouldBe(t, deltaAddressCount+3) count := 0 db.Model(&Place{}).Where(&Place{ PlaceAddressID: placeAddress.ID, OwnerAddressID: ownerAddress1.ID, }).Count(&count) if count != 1 { t.Errorf("only one instance of (%d, %d) should be available, found: %d", placeAddress.ID, ownerAddress1.ID, count) } db.Model(&Place{}).Where(&Place{ PlaceAddressID: placeAddress.ID, OwnerAddressID: ownerAddress2.ID, }).Count(&count) if count != 1 { t.Errorf("only one instance of (%d, %d) should be available, found: %d", placeAddress.ID, ownerAddress2.ID, count) } db.Model(&Place{}).Where(&Place{ PlaceAddressID: placeAddress.ID, }).Count(&count) if count != 2 { t.Errorf("two instances of (%d) should be available, found: %d", placeAddress.ID, count) } } func TestBlockGlobalUpdate(t *testing.T) { db := DB.New() db.Create(&Toy{Name: "Stuffed Animal", OwnerType: "Nobody"}) err := db.Model(&Toy{}).Update("OwnerType", "Human").Error if err != nil { t.Error("Unexpected error on global update") } err = db.Delete(&Toy{}).Error if err != nil { t.Error("Unexpected error on global delete") } db.BlockGlobalUpdate(true) db.Create(&Toy{Name: "Stuffed Animal", OwnerType: "Nobody"}) err = db.Model(&Toy{}).Update("OwnerType", "Human").Error if err == nil { t.Error("Expected error on global update") } err = db.Model(&Toy{}).Where(&Toy{OwnerType: "Martian"}).Update("OwnerType", "Astronaut").Error if err != nil { t.Error("Unxpected error on conditional update") } err = db.Delete(&Toy{}).Error if err == nil { t.Error("Expected error on global delete") } err = db.Where(&Toy{OwnerType: "Martian"}).Delete(&Toy{}).Error if err != nil { t.Error("Unexpected error on conditional delete") } } func TestCountWithHaving(t *testing.T) { db := DB.New() db.Delete(User{}) defer db.Delete(User{}) DB.Create(getPreparedUser("user1", "pluck_user")) DB.Create(getPreparedUser("user2", "pluck_user")) user3 := getPreparedUser("user3", "pluck_user") user3.Languages = []Language{} DB.Create(user3) var count int err := db.Model(User{}).Select("users.id"). Joins("LEFT JOIN user_languages ON user_languages.user_id = users.id"). Joins("LEFT JOIN languages ON user_languages.language_id = languages.id"). Group("users.id").Having("COUNT(languages.id) > 1").Count(&count).Error if err != nil { t.Error("Unexpected error on query count with having") } if count != 2 { t.Error("Unexpected result on query count with having") } } func TestPluck(t *testing.T) { db := DB.New() db.Delete(User{}) defer db.Delete(User{}) DB.Create(&User{Id: 1, Name: "user1"}) DB.Create(&User{Id: 2, Name: "user2"}) DB.Create(&User{Id: 3, Name: "user3"}) var ids []int64 err := db.Model(User{}).Order("id").Pluck("id", &ids).Error if err != nil { t.Error("Unexpected error on pluck") } if len(ids) != 3 || ids[0] != 1 || ids[1] != 2 || ids[2] != 3 { t.Error("Unexpected result on pluck") } err = db.Model(User{}).Order("id").Pluck("id", &ids).Error if err != nil { t.Error("Unexpected error on pluck again") } if len(ids) != 3 || ids[0] != 1 || ids[1] != 2 || ids[2] != 3 { t.Error("Unexpected result on pluck again") } } func TestCountWithQueryOption(t *testing.T) { db := DB.New() db.Delete(User{}) defer db.Delete(User{}) DB.Create(&User{Name: "user1"}) DB.Create(&User{Name: "user2"}) DB.Create(&User{Name: "user3"}) var count int err := db.Model(User{}).Select("users.id"). Set("gorm:query_option", "WHERE users.name='user2'"). Count(&count).Error if err != nil { t.Error("Unexpected error on query count with query_option") } if count != 1 { t.Error("Unexpected result on query count with query_option") } } func TestSubQueryWithQueryOption(t *testing.T) { db := DB.New() subQuery := db.Model(User{}).Select("users.id"). Set("gorm:query_option", "WHERE users.name='user2'"). SubQuery() matched, _ := regexp.MatchString( `^&{.+\s+WHERE users\.name='user2'.*\s\[]}$`, fmt.Sprint(subQuery)) if !matched { t.Error("Unexpected result of SubQuery with query_option") } } func TestQueryExprWithQueryOption(t *testing.T) { db := DB.New() queryExpr := db.Model(User{}).Select("users.id"). Set("gorm:query_option", "WHERE users.name='user2'"). QueryExpr() matched, _ := regexp.MatchString( `^&{.+\s+WHERE users\.name='user2'.*\s\[]}$`, fmt.Sprint(queryExpr)) if !matched { t.Error("Unexpected result of QueryExpr with query_option") } } func TestQueryHint1(t *testing.T) { db := DB.New() _, err := db.Model(User{}).Raw("select 1").Rows() if err != nil { t.Error("Unexpected error on query count with query_option") } } func TestQueryHint2(t *testing.T) { type TestStruct struct { ID string `gorm:"primary_key"` Name string } DB.DropTable(&TestStruct{}) DB.AutoMigrate(&TestStruct{}) data := TestStruct{ID: "uuid", Name: "hello"} if err := DB.Set("gorm:query_hint", "/*master*/").Save(&data).Error; err != nil { t.Error("Unexpected error on query count with query_option") } } func TestFloatColumnPrecision(t *testing.T) { if dialect := os.Getenv("GORM_DIALECT"); dialect != "mysql" && dialect != "sqlite" { t.Skip() } type FloatTest struct { ID string `gorm:"primary_key"` FloatValue float64 `gorm:"column:float_value" sql:"type:float(255,5);"` } DB.DropTable(&FloatTest{}) DB.AutoMigrate(&FloatTest{}) data := FloatTest{ID: "uuid", FloatValue: 112.57315} if err := DB.Save(&data).Error; err != nil || data.ID != "uuid" || data.FloatValue != 112.57315 { t.Errorf("Float value should not lose precision") } } func TestWhereUpdates(t *testing.T) { type OwnerEntity struct { gorm.Model OwnerID uint OwnerType string } type SomeEntity struct { gorm.Model Name string OwnerEntity OwnerEntity `gorm:"polymorphic:Owner"` } DB.DropTable(&SomeEntity{}) DB.AutoMigrate(&SomeEntity{}) a := SomeEntity{Name: "test"} DB.Model(&a).Where(a).Updates(SomeEntity{Name: "test2"}) } func BenchmarkGorm(b *testing.B) { for x := 0; x < b.N; x++ { e := strconv.Itoa(x) + "[email protected]" now := time.Now() email := EmailWithIdx{Email: e, UserAgent: "pc", RegisteredAt: &now} // Insert DB.Save(&email) // Query DB.First(&EmailWithIdx{}, "email = ?", e) // Update DB.Model(&email).UpdateColumn("email", "new-"+e) // Delete DB.Delete(&email) } } func BenchmarkRawSql(b *testing.B) { DB, _ := sql.Open("postgres", "user=gorm DB.ame=gorm sslmode=disable") DB.SetMaxIdleConns(10) insertSql := "INSERT INTO emails (user_id,email,user_agent,registered_at,created_at,updated_at) VALUES ($1,$2,$3,$4,$5,$6) RETURNING id" querySql := "SELECT * FROM emails WHERE email = $1 ORDER BY id LIMIT 1" updateSql := "UPDATE emails SET email = $1, updated_at = $2 WHERE id = $3" deleteSql := "DELETE FROM orders WHERE id = $1" for x := 0; x < b.N; x++ { var id int64 e := strconv.Itoa(x) + "[email protected]" now := time.Now() email := EmailWithIdx{Email: e, UserAgent: "pc", RegisteredAt: &now} // Insert DB.QueryRow(insertSql, email.UserId, email.Email, email.UserAgent, email.RegisteredAt, time.Now(), time.Now()).Scan(&id) // Query rows, _ := DB.Query(querySql, email.Email) rows.Close() // Update DB.Exec(updateSql, "new-"+e, time.Now(), id) // Delete DB.Exec(deleteSql, id) } } func parseTime(str string) *time.Time { t := now.New(time.Now().UTC()).MustParse(str) return &t }
[ "\"GORM_DSN\"", "\"GORM_DIALECT\"", "\"DEBUG\"", "\"GORM_DIALECT\"", "\"GORM_DIALECT\"", "\"GORM_DIALECT\"", "\"GORM_DIALECT\"", "\"GORM_DIALECT\"" ]
[]
[ "GORM_DIALECT", "GORM_DSN", "DEBUG" ]
[]
["GORM_DIALECT", "GORM_DSN", "DEBUG"]
go
3
0
vendor/github.com/containers/libpod/libpod/runtime.go
package libpod import ( "context" "fmt" "os" "path/filepath" "strings" "sync" "syscall" "github.com/containers/common/pkg/config" is "github.com/containers/image/v5/storage" "github.com/containers/image/v5/types" "github.com/containers/libpod/libpod/define" "github.com/containers/libpod/libpod/events" "github.com/containers/libpod/libpod/image" "github.com/containers/libpod/libpod/lock" "github.com/containers/libpod/pkg/cgroups" "github.com/containers/libpod/pkg/rootless" "github.com/containers/libpod/pkg/util" "github.com/containers/storage" "github.com/cri-o/ocicni/pkg/ocicni" "github.com/docker/docker/pkg/namesgenerator" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) // A RuntimeOption is a functional option which alters the Runtime created by // NewRuntime type RuntimeOption func(*Runtime) error type storageSet struct { RunRootSet bool GraphRootSet bool StaticDirSet bool VolumePathSet bool GraphDriverNameSet bool TmpDirSet bool } // Runtime is the core libpod runtime type Runtime struct { config *config.Config storageConfig storage.StoreOptions storageSet storageSet state State store storage.Store storageService *storageService imageContext *types.SystemContext defaultOCIRuntime OCIRuntime ociRuntimes map[string]OCIRuntime netPlugin ocicni.CNIPlugin conmonPath string imageRuntime *image.Runtime lockManager lock.Manager // doRenumber indicates that the runtime should perform a lock renumber // during initialization. // Once the runtime has been initialized and returned, this variable is // unused. doRenumber bool doMigrate bool // System migrate can move containers to a new runtime. // We make no promises that these migrated containers work on the new // runtime, though. migrateRuntime string // valid indicates whether the runtime is ready to use. // valid is set to true when a runtime is returned from GetRuntime(), // and remains true until the runtime is shut down (rendering its // storage unusable). When valid is false, the runtime cannot be used. valid bool lock sync.RWMutex // mechanism to read and write even logs eventer events.Eventer // noStore indicates whether we need to interact with a store or not noStore bool } // SetXdgDirs ensures the XDG_RUNTIME_DIR env and XDG_CONFIG_HOME variables are set. // containers/image uses XDG_RUNTIME_DIR to locate the auth file, XDG_CONFIG_HOME is // use for the libpod.conf configuration file. func SetXdgDirs() error { if !rootless.IsRootless() { return nil } // Setup XDG_RUNTIME_DIR runtimeDir := os.Getenv("XDG_RUNTIME_DIR") if runtimeDir == "" { var err error runtimeDir, err = util.GetRuntimeDir() if err != nil { return err } } if err := os.Setenv("XDG_RUNTIME_DIR", runtimeDir); err != nil { return errors.Wrapf(err, "cannot set XDG_RUNTIME_DIR") } if rootless.IsRootless() && os.Getenv("DBUS_SESSION_BUS_ADDRESS") == "" { sessionAddr := filepath.Join(runtimeDir, "bus") if _, err := os.Stat(sessionAddr); err == nil { os.Setenv("DBUS_SESSION_BUS_ADDRESS", fmt.Sprintf("unix:path=%s", sessionAddr)) } } // Setup XDG_CONFIG_HOME if cfgHomeDir := os.Getenv("XDG_CONFIG_HOME"); cfgHomeDir == "" { cfgHomeDir, err := util.GetRootlessConfigHomeDir() if err != nil { return err } if err := os.Setenv("XDG_CONFIG_HOME", cfgHomeDir); err != nil { return errors.Wrapf(err, "cannot set XDG_CONFIG_HOME") } } return nil } // NewRuntime creates a new container runtime // Options can be passed to override the default configuration for the runtime func NewRuntime(ctx context.Context, options ...RuntimeOption) (runtime *Runtime, err error) { conf, err := config.NewConfig("") if err != nil { return nil, err } runtime, err = newRuntimeFromConfig(ctx, conf, options...) conf.CheckCgroupsAndAdjustConfig() return runtime, err } // NewRuntimeFromConfig creates a new container runtime using the given // configuration file for its default configuration. Passed RuntimeOption // functions can be used to mutate this configuration further. // An error will be returned if the configuration file at the given path does // not exist or cannot be loaded func NewRuntimeFromConfig(ctx context.Context, userConfig *config.Config, options ...RuntimeOption) (runtime *Runtime, err error) { return newRuntimeFromConfig(ctx, userConfig, options...) } func newRuntimeFromConfig(ctx context.Context, conf *config.Config, options ...RuntimeOption) (runtime *Runtime, err error) { runtime = new(Runtime) if conf.Engine.OCIRuntime == "" { conf.Engine.OCIRuntime = "runc" // If we're running on cgroups v2, default to using crun. if onCgroupsv2, _ := cgroups.IsCgroup2UnifiedMode(); onCgroupsv2 { conf.Engine.OCIRuntime = "crun" } } runtime.config = conf storeOpts, err := storage.DefaultStoreOptions(rootless.IsRootless(), rootless.GetRootlessUID()) if err != nil { return nil, err } runtime.storageConfig = storeOpts // Overwrite config with user-given configuration options for _, opt := range options { if err := opt(runtime); err != nil { return nil, errors.Wrapf(err, "error configuring runtime") } } if err := makeRuntime(ctx, runtime); err != nil { return nil, err } return runtime, nil } func getLockManager(runtime *Runtime) (lock.Manager, error) { var err error var manager lock.Manager switch runtime.config.Engine.LockType { case "file": lockPath := filepath.Join(runtime.config.Engine.TmpDir, "locks") manager, err = lock.OpenFileLockManager(lockPath) if err != nil { if os.IsNotExist(errors.Cause(err)) { manager, err = lock.NewFileLockManager(lockPath) if err != nil { return nil, errors.Wrapf(err, "failed to get new file lock manager") } } else { return nil, err } } case "", "shm": lockPath := define.DefaultSHMLockPath if rootless.IsRootless() { lockPath = fmt.Sprintf("%s_%d", define.DefaultRootlessSHMLockPath, rootless.GetRootlessUID()) } // Set up the lock manager manager, err = lock.OpenSHMLockManager(lockPath, runtime.config.Engine.NumLocks) if err != nil { switch { case os.IsNotExist(errors.Cause(err)): manager, err = lock.NewSHMLockManager(lockPath, runtime.config.Engine.NumLocks) if err != nil { return nil, errors.Wrapf(err, "failed to get new shm lock manager") } case errors.Cause(err) == syscall.ERANGE && runtime.doRenumber: logrus.Debugf("Number of locks does not match - removing old locks") // ERANGE indicates a lock numbering mismatch. // Since we're renumbering, this is not fatal. // Remove the earlier set of locks and recreate. if err := os.Remove(filepath.Join("/dev/shm", lockPath)); err != nil { return nil, errors.Wrapf(err, "error removing libpod locks file %s", lockPath) } manager, err = lock.NewSHMLockManager(lockPath, runtime.config.Engine.NumLocks) if err != nil { return nil, err } default: return nil, err } } default: return nil, errors.Wrapf(define.ErrInvalidArg, "unknown lock type %s", runtime.config.Engine.LockType) } return manager, nil } // Make a new runtime based on the given configuration // Sets up containers/storage, state store, OCI runtime func makeRuntime(ctx context.Context, runtime *Runtime) (err error) { // Find a working conmon binary cPath, err := runtime.config.FindConmon() if err != nil { return err } runtime.conmonPath = cPath // Make the static files directory if it does not exist if err := os.MkdirAll(runtime.config.Engine.StaticDir, 0700); err != nil { // The directory is allowed to exist if !os.IsExist(err) { return errors.Wrapf(err, "error creating runtime static files directory %s", runtime.config.Engine.StaticDir) } } // Set up the state. // // TODO - if we further break out the state implementation into // libpod/state, the config could take care of the code below. It // would further allow to move the types and consts into a coherent // package. switch runtime.config.Engine.StateType { case config.InMemoryStateStore: state, err := NewInMemoryState() if err != nil { return err } runtime.state = state case config.SQLiteStateStore: return errors.Wrapf(define.ErrInvalidArg, "SQLite state is currently disabled") case config.BoltDBStateStore: dbPath := filepath.Join(runtime.config.Engine.StaticDir, "bolt_state.db") state, err := NewBoltState(dbPath, runtime) if err != nil { return err } runtime.state = state default: return errors.Wrapf(define.ErrInvalidArg, "unrecognized state type passed (%v)", runtime.config.Engine.StateType) } // Grab config from the database so we can reset some defaults dbConfig, err := runtime.state.GetDBConfig() if err != nil { return errors.Wrapf(err, "error retrieving runtime configuration from database") } if err := runtime.mergeDBConfig(dbConfig); err != nil { return errors.Wrapf(err, "error merging database config into runtime config") } logrus.Debugf("Using graph driver %s", runtime.storageConfig.GraphDriverName) logrus.Debugf("Using graph root %s", runtime.storageConfig.GraphRoot) logrus.Debugf("Using run root %s", runtime.storageConfig.RunRoot) logrus.Debugf("Using static dir %s", runtime.config.Engine.StaticDir) logrus.Debugf("Using tmp dir %s", runtime.config.Engine.TmpDir) logrus.Debugf("Using volume path %s", runtime.config.Engine.VolumePath) // Validate our config against the database, now that we've set our // final storage configuration if err := runtime.state.ValidateDBConfig(runtime); err != nil { return err } if err := runtime.state.SetNamespace(runtime.config.Engine.Namespace); err != nil { return errors.Wrapf(err, "error setting libpod namespace in state") } logrus.Debugf("Set libpod namespace to %q", runtime.config.Engine.Namespace) // Set up containers/storage var store storage.Store if os.Geteuid() != 0 { logrus.Debug("Not configuring container store") } else if runtime.noStore { logrus.Debug("No store required. Not opening container store.") } else if err := runtime.configureStore(); err != nil { return err } defer func() { if err != nil && store != nil { // Don't forcibly shut down // We could be opening a store in use by another libpod _, err2 := store.Shutdown(false) if err2 != nil { logrus.Errorf("Error removing store for partially-created runtime: %s", err2) } } }() // Setup the eventer eventer, err := runtime.newEventer() if err != nil { return err } runtime.eventer = eventer if runtime.imageRuntime != nil { runtime.imageRuntime.Eventer = eventer } // Set up containers/image runtime.imageContext = &types.SystemContext{ SignaturePolicyPath: runtime.config.Engine.SignaturePolicyPath, } // Create the tmpDir if err := os.MkdirAll(runtime.config.Engine.TmpDir, 0751); err != nil { // The directory is allowed to exist if !os.IsExist(err) { return errors.Wrapf(err, "error creating tmpdir %s", runtime.config.Engine.TmpDir) } } // Create events log dir if err := os.MkdirAll(filepath.Dir(runtime.config.Engine.EventsLogFilePath), 0700); err != nil { // The directory is allowed to exist if !os.IsExist(err) { return errors.Wrapf(err, "error creating events dirs %s", filepath.Dir(runtime.config.Engine.EventsLogFilePath)) } } // Make lookup tables for runtime support supportsJSON := make(map[string]bool) supportsNoCgroups := make(map[string]bool) for _, r := range runtime.config.Engine.RuntimeSupportsJSON { supportsJSON[r] = true } for _, r := range runtime.config.Engine.RuntimeSupportsNoCgroups { supportsNoCgroups[r] = true } // Get us at least one working OCI runtime. runtime.ociRuntimes = make(map[string]OCIRuntime) // Initialize remaining OCI runtimes for name, paths := range runtime.config.Engine.OCIRuntimes { json := supportsJSON[name] nocgroups := supportsNoCgroups[name] ociRuntime, err := newConmonOCIRuntime(name, paths, runtime.conmonPath, runtime.config, json, nocgroups) if err != nil { // Don't fatally error. // This will allow us to ship configs including optional // runtimes that might not be installed (crun, kata). // Only a warnf so default configs don't spec errors. logrus.Warnf("Error initializing configured OCI runtime %s: %v", name, err) continue } runtime.ociRuntimes[name] = ociRuntime } // Do we have a default OCI runtime? if runtime.config.Engine.OCIRuntime != "" { // If the string starts with / it's a path to a runtime // executable. if strings.HasPrefix(runtime.config.Engine.OCIRuntime, "/") { name := filepath.Base(runtime.config.Engine.OCIRuntime) json := supportsJSON[name] nocgroups := supportsNoCgroups[name] ociRuntime, err := newConmonOCIRuntime(name, []string{runtime.config.Engine.OCIRuntime}, runtime.conmonPath, runtime.config, json, nocgroups) if err != nil { return err } runtime.ociRuntimes[name] = ociRuntime runtime.defaultOCIRuntime = ociRuntime } else { ociRuntime, ok := runtime.ociRuntimes[runtime.config.Engine.OCIRuntime] if !ok { return errors.Wrapf(define.ErrInvalidArg, "default OCI runtime %q not found", runtime.config.Engine.OCIRuntime) } runtime.defaultOCIRuntime = ociRuntime } } // Do we have at least one valid OCI runtime? if len(runtime.ociRuntimes) == 0 { return errors.Wrapf(define.ErrInvalidArg, "no OCI runtime has been configured") } // Do we have a default runtime? if runtime.defaultOCIRuntime == nil { return errors.Wrapf(define.ErrInvalidArg, "no default OCI runtime was configured") } // Make the per-boot files directory if it does not exist if err := os.MkdirAll(runtime.config.Engine.TmpDir, 0755); err != nil { // The directory is allowed to exist if !os.IsExist(err) { return errors.Wrapf(err, "error creating runtime temporary files directory %s", runtime.config.Engine.TmpDir) } } // Set up the CNI net plugin if !rootless.IsRootless() { netPlugin, err := ocicni.InitCNI(runtime.config.Network.DefaultNetwork, runtime.config.Network.NetworkConfigDir, runtime.config.Network.CNIPluginDirs...) if err != nil { return errors.Wrapf(err, "error configuring CNI network plugin") } runtime.netPlugin = netPlugin } // We now need to see if the system has restarted // We check for the presence of a file in our tmp directory to verify this // This check must be locked to prevent races runtimeAliveLock := filepath.Join(runtime.config.Engine.TmpDir, "alive.lck") runtimeAliveFile := filepath.Join(runtime.config.Engine.TmpDir, "alive") aliveLock, err := storage.GetLockfile(runtimeAliveLock) if err != nil { return errors.Wrapf(err, "error acquiring runtime init lock") } // Acquire the lock and hold it until we return // This ensures that no two processes will be in runtime.refresh at once // TODO: we can't close the FD in this lock, so we should keep it around // and use it to lock important operations aliveLock.Lock() doRefresh := false defer func() { if aliveLock.Locked() { aliveLock.Unlock() } }() _, err = os.Stat(runtimeAliveFile) if err != nil { // If we need to refresh, then it is safe to assume there are // no containers running. Create immediately a namespace, as // we will need to access the storage. if os.Geteuid() != 0 { aliveLock.Unlock() // Unlock to avoid deadlock as BecomeRootInUserNS will reexec. pausePid, err := util.GetRootlessPauseProcessPidPath() if err != nil { return errors.Wrapf(err, "could not get pause process pid file path") } became, ret, err := rootless.BecomeRootInUserNS(pausePid) if err != nil { return err } if became { os.Exit(ret) } } // If the file doesn't exist, we need to refresh the state // This will trigger on first use as well, but refreshing an // empty state only creates a single file // As such, it's not really a performance concern if os.IsNotExist(err) { doRefresh = true } else { return errors.Wrapf(err, "error reading runtime status file %s", runtimeAliveFile) } } runtime.lockManager, err = getLockManager(runtime) if err != nil { return err } // If we're renumbering locks, do it now. // It breaks out of normal runtime init, and will not return a valid // runtime. if runtime.doRenumber { if err := runtime.renumberLocks(); err != nil { return err } } // If we need to refresh the state, do it now - things are guaranteed to // be set up by now. if doRefresh { // Ensure we have a store before refresh occurs if runtime.store == nil { if err := runtime.configureStore(); err != nil { return err } } if err2 := runtime.refresh(runtimeAliveFile); err2 != nil { return err2 } } // Mark the runtime as valid - ready to be used, cannot be modified // further runtime.valid = true if runtime.doMigrate { if err := runtime.migrate(ctx); err != nil { return err } } return nil } // GetConfig returns a copy of the configuration used by the runtime func (r *Runtime) GetConfig() (*config.Config, error) { r.lock.RLock() defer r.lock.RUnlock() if !r.valid { return nil, define.ErrRuntimeStopped } config := new(config.Config) // Copy so the caller won't be able to modify the actual config if err := JSONDeepCopy(r.config, config); err != nil { return nil, errors.Wrapf(err, "error copying config") } return config, nil } // DeferredShutdown shuts down the runtime without exposing any // errors. This is only meant to be used when the runtime is being // shutdown within a defer statement; else use Shutdown func (r *Runtime) DeferredShutdown(force bool) { _ = r.Shutdown(force) } // Shutdown shuts down the runtime and associated containers and storage // If force is true, containers and mounted storage will be shut down before // cleaning up; if force is false, an error will be returned if there are // still containers running or mounted func (r *Runtime) Shutdown(force bool) error { r.lock.Lock() defer r.lock.Unlock() if !r.valid { return define.ErrRuntimeStopped } r.valid = false // Shutdown all containers if --force is given if force { ctrs, err := r.state.AllContainers() if err != nil { logrus.Errorf("Error retrieving containers from database: %v", err) } else { for _, ctr := range ctrs { if err := ctr.StopWithTimeout(r.config.Engine.StopTimeout); err != nil { logrus.Errorf("Error stopping container %s: %v", ctr.ID(), err) } } } } var lastError error // If no store was requested, it can bew nil and there is no need to // attempt to shut it down if r.store != nil { if _, err := r.store.Shutdown(force); err != nil { lastError = errors.Wrapf(err, "Error shutting down container storage") } } if err := r.state.Close(); err != nil { if lastError != nil { logrus.Errorf("%v", lastError) } lastError = err } return lastError } // Reconfigures the runtime after a reboot // Refreshes the state, recreating temporary files // Does not check validity as the runtime is not valid until after this has run func (r *Runtime) refresh(alivePath string) error { logrus.Debugf("Podman detected system restart - performing state refresh") // First clear the state in the database if err := r.state.Refresh(); err != nil { return err } // Next refresh the state of all containers to recreate dirs and // namespaces, and all the pods to recreate cgroups. // Containers, pods, and volumes must also reacquire their locks. ctrs, err := r.state.AllContainers() if err != nil { return errors.Wrapf(err, "error retrieving all containers from state") } pods, err := r.state.AllPods() if err != nil { return errors.Wrapf(err, "error retrieving all pods from state") } vols, err := r.state.AllVolumes() if err != nil { return errors.Wrapf(err, "error retrieving all volumes from state") } // No locks are taken during pod, volume, and container refresh. // Furthermore, the pod/volume/container refresh() functions are not // allowed to take locks themselves. // We cannot assume that any pod/volume/container has a valid lock until // after this function has returned. // The runtime alive lock should suffice to provide mutual exclusion // until this has run. for _, ctr := range ctrs { if err := ctr.refresh(); err != nil { logrus.Errorf("Error refreshing container %s: %v", ctr.ID(), err) } } for _, pod := range pods { if err := pod.refresh(); err != nil { logrus.Errorf("Error refreshing pod %s: %v", pod.ID(), err) } } for _, vol := range vols { if err := vol.refresh(); err != nil { logrus.Errorf("Error refreshing volume %s: %v", vol.Name(), err) } } // Create a file indicating the runtime is alive and ready file, err := os.OpenFile(alivePath, os.O_RDONLY|os.O_CREATE, 0644) if err != nil { return errors.Wrapf(err, "error creating runtime status file %s", alivePath) } defer file.Close() r.newSystemEvent(events.Refresh) return nil } // Info returns the store and host information func (r *Runtime) Info() (*define.Info, error) { return r.info() } // generateName generates a unique name for a container or pod. func (r *Runtime) generateName() (string, error) { for { name := namesgenerator.GetRandomName(0) // Make sure container with this name does not exist if _, err := r.state.LookupContainer(name); err == nil { continue } else if errors.Cause(err) != define.ErrNoSuchCtr { return "", err } // Make sure pod with this name does not exist if _, err := r.state.LookupPod(name); err == nil { continue } else if errors.Cause(err) != define.ErrNoSuchPod { return "", err } return name, nil } // The code should never reach here. } // Configure store and image runtime func (r *Runtime) configureStore() error { store, err := storage.GetStore(r.storageConfig) if err != nil { return err } r.store = store is.Transport.SetStore(store) // Set up a storage service for creating container root filesystems from // images storageService, err := getStorageService(r.store) if err != nil { return err } r.storageService = storageService ir := image.NewImageRuntimeFromStore(r.store) ir.SignaturePolicyPath = r.config.Engine.SignaturePolicyPath ir.EventsLogFilePath = r.config.Engine.EventsLogFilePath ir.EventsLogger = r.config.Engine.EventsLogger r.imageRuntime = ir return nil } // ImageRuntime returns the imageruntime for image operations. // If WithNoStore() was used, no image runtime will be available, and this // function will return nil. func (r *Runtime) ImageRuntime() *image.Runtime { return r.imageRuntime } // SystemContext returns the imagecontext func (r *Runtime) SystemContext() *types.SystemContext { return r.imageContext } // GetOCIRuntimePath retrieves the path of the default OCI runtime. func (r *Runtime) GetOCIRuntimePath() string { return r.defaultOCIRuntime.Path() } // StorageConfig retrieves the storage options for the container runtime func (r *Runtime) StorageConfig() storage.StoreOptions { return r.storageConfig } // DBConfig is a set of Libpod runtime configuration settings that are saved in // a State when it is first created, and can subsequently be retrieved. type DBConfig struct { LibpodRoot string LibpodTmp string StorageRoot string StorageTmp string GraphDriver string VolumePath string } // mergeDBConfig merges the configuration from the database. func (r *Runtime) mergeDBConfig(dbConfig *DBConfig) error { c := r.config.Engine if !r.storageSet.RunRootSet && dbConfig.StorageTmp != "" { if r.storageConfig.RunRoot != dbConfig.StorageTmp && r.storageConfig.RunRoot != "" { logrus.Debugf("Overriding run root %q with %q from database", r.storageConfig.RunRoot, dbConfig.StorageTmp) } r.storageConfig.RunRoot = dbConfig.StorageTmp } if !r.storageSet.GraphRootSet && dbConfig.StorageRoot != "" { if r.storageConfig.GraphRoot != dbConfig.StorageRoot && r.storageConfig.GraphRoot != "" { logrus.Debugf("Overriding graph root %q with %q from database", r.storageConfig.GraphRoot, dbConfig.StorageRoot) } r.storageConfig.GraphRoot = dbConfig.StorageRoot } if !r.storageSet.GraphDriverNameSet && dbConfig.GraphDriver != "" { if r.storageConfig.GraphDriverName != dbConfig.GraphDriver && r.storageConfig.GraphDriverName != "" { logrus.Errorf("User-selected graph driver %q overwritten by graph driver %q from database - delete libpod local files to resolve", r.storageConfig.GraphDriverName, dbConfig.GraphDriver) } r.storageConfig.GraphDriverName = dbConfig.GraphDriver } if !r.storageSet.StaticDirSet && dbConfig.LibpodRoot != "" { if c.StaticDir != dbConfig.LibpodRoot && c.StaticDir != "" { logrus.Debugf("Overriding static dir %q with %q from database", c.StaticDir, dbConfig.LibpodRoot) } c.StaticDir = dbConfig.LibpodRoot } if !r.storageSet.TmpDirSet && dbConfig.LibpodTmp != "" { if c.TmpDir != dbConfig.LibpodTmp && c.TmpDir != "" { logrus.Debugf("Overriding tmp dir %q with %q from database", c.TmpDir, dbConfig.LibpodTmp) } c.TmpDir = dbConfig.LibpodTmp c.EventsLogFilePath = filepath.Join(dbConfig.LibpodTmp, "events", "events.log") } if !r.storageSet.VolumePathSet && dbConfig.VolumePath != "" { if c.VolumePath != dbConfig.VolumePath && c.VolumePath != "" { logrus.Debugf("Overriding volume path %q with %q from database", c.VolumePath, dbConfig.VolumePath) } c.VolumePath = dbConfig.VolumePath } return nil }
[ "\"XDG_RUNTIME_DIR\"", "\"DBUS_SESSION_BUS_ADDRESS\"", "\"XDG_CONFIG_HOME\"" ]
[]
[ "XDG_RUNTIME_DIR", "DBUS_SESSION_BUS_ADDRESS", "XDG_CONFIG_HOME" ]
[]
["XDG_RUNTIME_DIR", "DBUS_SESSION_BUS_ADDRESS", "XDG_CONFIG_HOME"]
go
3
0
ndp/settings.py
""" Django settings for ndp project. For more information on this file, see https://docs.djangoproject.com/en/1.7/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.7/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(__file__)) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/ # SECURITY WARNING: don't run with debug turned on in production! DEBUG = os.environ.get('DJANGO_DEBUG', 'true') == 'true' # SECURITY WARNING: keep the secret key used in production secret! if DEBUG: SECRET_KEY = '-r&cjf5&l80y&(q_fiidd$-u7&o$=gv)s84=2^a2$o^&9aco0o' else: SECRET_KEY = os.environ.get('DJANGO_SECRET_KEY') # XXX set me GOOGLE_ANALYTICS_ID = 'UA-48399585-28' ALLOWED_HOSTS = ['*'] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'pipeline', 'django_extensions', 'ndp', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'ndp.urls' WSGI_APPLICATION = 'ndp.wsgi.application' SESSION_ENGINE = 'django.contrib.sessions.backends.signed_cookies' # Database # https://docs.djangoproject.com/en/1.7/ref/settings/#databases import dj_database_url db_config = dj_database_url.config(default='postgres://ndp:@localhost/ndp') db_config['ATOMIC_REQUESTS'] = True DATABASES = { 'default': db_config, } # Internationalization # https://docs.djangoproject.com/en/1.7/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Templates TEMPLATE_DEBUG = DEBUG TEMPLATE_CONTEXT_PROCESSORS = ( "django.contrib.auth.context_processors.auth", "django.core.context_processors.debug", "django.core.context_processors.i18n", "django.core.context_processors.media", "django.core.context_processors.static", "django.core.context_processors.tz", "django.core.context_processors.request", "django.contrib.messages.context_processors.messages", "ndp.context_processors.google_analytics", ) # file uploads if not DEBUG: DEFAULT_FILE_STORAGE = 'ndp.botopatch.S3Storage' AWS_S3_FILE_OVERWRITE = False AWS_ACCESS_KEY_ID = os.environ.get('AWS_ACCESS_KEY_ID') AWS_SECRET_ACCESS_KEY = os.environ.get('AWS_SECRET_ACCESS_KEY') AWS_STORAGE_BUCKET_NAME = "ndp-media" AWS_S3_HOST = "s3-eu-west-1.amazonaws.com" AWS_HEADERS = { 'Cache-Control': 'max-age=86400', } # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.7/howto/static-files/ ASSETS_DEBUG = DEBUG ASSETS_URL_EXPIRE = False # assets must be placed in the 'static' dir of your Django app # where the compiled assets go STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles') # the URL for assets STATIC_URL = '/static/' STATICFILES_FINDERS = ( "django.contrib.staticfiles.finders.FileSystemFinder", "django.contrib.staticfiles.finders.AppDirectoriesFinder", "pipeline.finders.PipelineFinder", ) PYSCSS_LOAD_PATHS = [ os.path.join(BASE_DIR, 'ndp', 'static'), os.path.join(BASE_DIR, 'ndp', 'static', 'bower_components'), ] PIPELINE_CSS = { 'css': { 'source_filenames': ( 'stylesheets/fonts.css', 'bootstrap/css/bootstrap.min.css', 'stylesheets/animate.css', 'tipso/tipso.css', 'stylesheets/ndp.css', 'stylesheets/sectors.css', ), 'output_filename': 'app.css', }, } PIPELINE_JS = { 'js': { 'source_filenames': ( 'bower_components/jquery/dist/jquery.min.js', 'bootstrap/js/bootstrap.min.js', 'javascript/readmore.min.js', 'tipso/tipso.min.js', 'javascript/ndp.js', ), 'output_filename': 'app.js', }, } PIPELINE_CSS_COMPRESSOR = None PIPELINE_JS_COMPRESSOR = None PIPELINE_COMPILERS = ( 'ndp.pipeline.PyScssCompiler', ) # Simplified static file serving. # https://warehouse.python.org/project/whitenoise/ STATICFILES_STORAGE = 'ndp.pipeline.GzipManifestPipelineStorage' # Logging LOGGING = { 'version': 1, 'disable_existing_loggers': True, 'formatters': { 'simple': { 'format': '%(asctime)s %(levelname)s %(module)s %(process)d %(thread)d %(message)s' } }, 'handlers': { 'console': { 'level': 'DEBUG', 'class': 'logging.StreamHandler', 'formatter': 'simple' } }, 'root': { 'handlers': ['console'], 'level': 'ERROR' }, 'loggers': { # put any custom loggers here # 'your_package_name': { # 'level': 'DEBUG' if DEBUG else 'INFO', # }, 'django': { 'level': 'DEBUG' if DEBUG else 'INFO', } } }
[]
[]
[ "DJANGO_SECRET_KEY", "DJANGO_DEBUG", "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY" ]
[]
["DJANGO_SECRET_KEY", "DJANGO_DEBUG", "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY"]
python
4
0
tests/tests_async.py
# -*- coding: utf8 -*- import boto3 import mock import os import unittest try: from mock import patch except ImportError: from unittest.mock import patch from zappa.asynchronous import AsyncException, LambdaAsyncResponse, SnsAsyncResponse from zappa.asynchronous import import_and_get_task, \ get_func_task_path class TestZappa(unittest.TestCase): def setUp(self): self.sleep_patch = mock.patch('time.sleep', return_value=None) # Tests expect us-east-1. # If the user has set a different region in env variables, we set it aside for now and use us-east-1 self.users_current_region_name = os.environ.get('AWS_DEFAULT_REGION', None) os.environ['AWS_DEFAULT_REGION'] = 'us-east-1' if not os.environ.get('PLACEBO_MODE') == 'record': self.sleep_patch.start() def tearDown(self): if not os.environ.get('PLACEBO_MODE') == 'record': self.sleep_patch.stop() del os.environ['AWS_DEFAULT_REGION'] if self.users_current_region_name is not None: # Give the user their AWS region back, we're done testing with us-east-1. os.environ['AWS_DEFAULT_REGION'] = self.users_current_region_name ## # Sanity Tests ## def test_test(self): self.assertTrue(True) self.assertFalse(False) def test_nofails_classes(self): boto_session = boto3.Session(region_name=os.environ['AWS_DEFAULT_REGION']) a = AsyncException() l = LambdaAsyncResponse(boto_session=boto_session) # s = SnsAsyncResponse() s = SnsAsyncResponse(arn="arn:abc:def", boto_session=boto_session) def test_nofails_funcs(self): funk = import_and_get_task("tests.test_app.async_me") get_func_task_path(funk) self.assertEqual(funk.__name__, 'async_me') ## # Functional tests ## def test_sync_call(self): funk = import_and_get_task("tests.test_app.async_me") self.assertEqual(funk.sync('123'), "run async when on lambda 123") def test_async_call_with_defaults(self): """Change a task's asynchronousity at runtime.""" # Import the task first to make sure it is decorated whilst the # environment is unpatched. async_me = import_and_get_task("tests.test_app.async_me") lambda_async_mock = mock.Mock() lambda_async_mock.return_value.send.return_value = "Running async!" with mock.patch.dict('zappa.asynchronous.ASYNC_CLASSES', {'lambda': lambda_async_mock}): # First check that it still runs synchronously by default self.assertEqual(async_me("123"), "run async when on lambda 123") # Now patch the environment to make it look like we are running on # AWS Lambda options = { 'AWS_LAMBDA_FUNCTION_NAME': 'MyLambda', 'AWS_REGION': 'us-east-1' } with mock.patch.dict(os.environ, options): self.assertEqual(async_me("qux"), "Running async!") # And check the dispatching class got called correctly lambda_async_mock.assert_called_once() lambda_async_mock.assert_called_with(aws_region='us-east-1', capture_response=False, lambda_function_name="MyLambda") lambda_async_mock.return_value.send.assert_called_with( get_func_task_path(async_me), ("qux",), {})
[]
[]
[ "PLACEBO_MODE", "AWS_DEFAULT_REGION" ]
[]
["PLACEBO_MODE", "AWS_DEFAULT_REGION"]
python
2
0
Jobs/Enrich/merging/update_merging.py
#!/usr/bin/env python3 # this code looks up older jobs in merging state in ES # find them in ATLAS_PANDAARCH.JOBSARCHIVED and updates state in ES import os from elasticsearch import Elasticsearch from elasticsearch.helpers import scan, bulk import cx_Oracle username = os.environ['JOB_ORACLE_USER'] password = os.environ['JOB_ORACLE_PASS'] # JOB_ORACLE_CONNECTION_STRING or JOB_ORACLE_ADG_CONNECTION_STRING server = os.environ['JOB_ORACLE_CONNECTION_STRING'].replace( "jdbc:oracle:thin:@//", "") connString = username + "/" + password + "@" + server con = cx_Oracle.connect(connString) print('Oracle version:', con.version) es = Elasticsearch([{'host': 'atlas-kibana.mwt2.org', 'port': 9200, 'scheme': 'https'}], timeout=60) mergin_job_query = { "size": 0, "version": True, "_source": ["_id"], "query": { "term": {"jobstatus": "merging"} } } res = scan(client=es, index='jobs', query=mergin_job_query, scroll='5m', timeout="5m", size=10000) counter = 0 skipped = 0 jobs = {} jbs = '' new_statuses = {} for job in res: counter += 1 if not counter % 1000: print("scanned: ", counter, "\tskipped:", skipped, '\tnew statuses:', new_statuses) # print(job) # if counter > 1000: # break if (job['_version']) > 10: skipped += 1 continue jobs[int(job['_id'])] = job['_index'] jbs += str(job['_id'] + ',') if len(jobs) > 900: jbs = jbs[:-1] # geting info from oracle cur = con.cursor() cur.execute( 'SELECT pandaid, jobstatus FROM ATLAS_PANDAARCH.JOBSARCHIVED WHERE PANDAID IN(' + jbs + ')') lookedup = [] for result in cur: # print(result) ns = result[1] if ns not in new_statuses: new_statuses[ns] = 0 new_statuses[ns] += 1 lookedup.append(result) cur.close() # do ES update data = [] for pid, jstatus in lookedup: if jstatus == 'merging': continue d = { '_op_type': 'update', '_index': jobs[pid], '_type': 'jobs_data', '_id': pid, 'doc': {'jobstatus': jstatus} } data.append(d) status = bulk(client=es, actions=data, stats_only=True, timeout="5m") print(status) jobs = {} jbs = '' con.close() print('Done.')
[]
[]
[ "JOB_ORACLE_USER", "JOB_ORACLE_PASS", "JOB_ORACLE_CONNECTION_STRING" ]
[]
["JOB_ORACLE_USER", "JOB_ORACLE_PASS", "JOB_ORACLE_CONNECTION_STRING"]
python
3
0
actions/syslinux/v1/main.go
package main import ( "fmt" "io" "os" "os/exec" log "github.com/sirupsen/logrus" "golang.org/x/sys/unix" ) func main() { fmt.Printf("SYSLINUX - Boot Loader Installation\n------------------------\n") disk := os.Getenv("DEST_DISK") partition := os.Getenv("DEST_PARTITION") ver := os.Getenv("SYSLINUX_VERSION") switch ver { case "386", "3.86": syslinux386(disk, partition) default: log.Fatalf("Unknown syslinux version [%s]", ver) } } func syslinux386(disk, partition string) { log.Infof("Writing mbr to [%s] and installing boot loader to [%s]", disk, partition) // Open the block device and write the Master boot record blockOut, err := os.OpenFile(disk, os.O_CREATE|os.O_WRONLY, 0644) if err != nil { log.Fatalln(err) } ReReadPartitionTable(blockOut) defer blockOut.Close() mbrIn, err := os.OpenFile("/mbr.bin.386", os.O_RDONLY, 0644) defer mbrIn.Close() _, err = io.Copy(blockOut, mbrIn) if err != nil { log.Fatalln(err) } _, err = os.Stat(partition) if err != nil { log.Fatalln(err) } cmd := exec.Command("/syslinux.386", partition) cmd.Stdin, cmd.Stdout, cmd.Stderr = os.Stdin, os.Stdout, os.Stderr err = cmd.Start() if err != nil { log.Fatalf("Error starting [%v]", err) } err = cmd.Wait() if err != nil { log.Fatalf("Error running [%v]", err) } } const ( BLKRRPART = 0x125f ) // ReReadPartitionTable forces the kernel to re-read the partition table // on the disk. // // It is done via an ioctl call with request as BLKRRPART. func ReReadPartitionTable(d *os.File) error { fd := d.Fd() _, err := unix.IoctlGetInt(int(fd), BLKRRPART) if err != nil { return fmt.Errorf("Unable to re-read partition table: %v", err) } return nil }
[ "\"DEST_DISK\"", "\"DEST_PARTITION\"", "\"SYSLINUX_VERSION\"" ]
[]
[ "DEST_DISK", "DEST_PARTITION", "SYSLINUX_VERSION" ]
[]
["DEST_DISK", "DEST_PARTITION", "SYSLINUX_VERSION"]
go
3
0
go/src/github.com/influxdata/influxdb/tsdb/store_test.go
package tsdb_test import ( "bytes" "context" "errors" "fmt" "io/ioutil" "math" "math/rand" "os" "path/filepath" "reflect" "regexp" "sort" "strings" "testing" "time" "github.com/davecgh/go-spew/spew" "github.com/influxdata/influxdb/internal" "github.com/influxdata/influxdb/logger" "github.com/influxdata/influxdb/models" "github.com/influxdata/influxdb/pkg/deep" "github.com/influxdata/influxdb/pkg/slices" "github.com/influxdata/influxdb/query" "github.com/influxdata/influxdb/tsdb" "github.com/influxdata/influxdb/tsdb/index/inmem" "github.com/influxdata/influxql" ) // Ensure the store can delete a retention policy and all shards under // it. func TestStore_DeleteRetentionPolicy(t *testing.T) { t.Parallel() test := func(index string) { s := MustOpenStore(index) defer s.Close() // Create a new shard and verify that it exists. if err := s.CreateShard("db0", "rp0", 1, true); err != nil { t.Fatal(err) } else if sh := s.Shard(1); sh == nil { t.Fatalf("expected shard") } // Create a new shard under the same retention policy, and verify // that it exists. if err := s.CreateShard("db0", "rp0", 2, true); err != nil { t.Fatal(err) } else if sh := s.Shard(2); sh == nil { t.Fatalf("expected shard") } // Create a new shard under a different retention policy, and // verify that it exists. if err := s.CreateShard("db0", "rp1", 3, true); err != nil { t.Fatal(err) } else if sh := s.Shard(3); sh == nil { t.Fatalf("expected shard") } // Deleting the rp0 retention policy does not return an error. if err := s.DeleteRetentionPolicy("db0", "rp0"); err != nil { t.Fatal(err) } // It deletes the shards under that retention policy. if sh := s.Shard(1); sh != nil { t.Errorf("shard 1 was not deleted") } if sh := s.Shard(2); sh != nil { t.Errorf("shard 2 was not deleted") } // It deletes the retention policy directory. if got, exp := dirExists(filepath.Join(s.Path(), "db0", "rp0")), false; got != exp { t.Error("directory exists, but should have been removed") } // It deletes the WAL retention policy directory. if got, exp := dirExists(filepath.Join(s.EngineOptions.Config.WALDir, "db0", "rp0")), false; got != exp { t.Error("directory exists, but should have been removed") } // Reopen other shard and check it still exists. if err := s.Reopen(); err != nil { t.Error(err) } else if sh := s.Shard(3); sh == nil { t.Errorf("shard 3 does not exist") } // It does not delete other retention policy directories. if got, exp := dirExists(filepath.Join(s.Path(), "db0", "rp1")), true; got != exp { t.Error("directory does not exist, but should") } if got, exp := dirExists(filepath.Join(s.EngineOptions.Config.WALDir, "db0", "rp1")), true; got != exp { t.Error("directory does not exist, but should") } } for _, index := range tsdb.RegisteredIndexes() { t.Run(index, func(t *testing.T) { test(index) }) } } // Ensure the store can create a new shard. func TestStore_CreateShard(t *testing.T) { t.Parallel() test := func(index string) { s := MustOpenStore(index) defer s.Close() // Create a new shard and verify that it exists. if err := s.CreateShard("db0", "rp0", 1, true); err != nil { t.Fatal(err) } else if sh := s.Shard(1); sh == nil { t.Fatalf("expected shard") } // Create another shard and verify that it exists. if err := s.CreateShard("db0", "rp0", 2, true); err != nil { t.Fatal(err) } else if sh := s.Shard(2); sh == nil { t.Fatalf("expected shard") } // Reopen shard and recheck. if err := s.Reopen(); err != nil { t.Fatal(err) } else if sh := s.Shard(1); sh == nil { t.Fatalf("expected shard(1)") } else if sh = s.Shard(2); sh == nil { t.Fatalf("expected shard(2)") } } for _, index := range tsdb.RegisteredIndexes() { t.Run(index, func(t *testing.T) { test(index) }) } } // Ensure the store does not return an error when delete from a non-existent db. func TestStore_DeleteSeries_NonExistentDB(t *testing.T) { t.Parallel() test := func(index string) { s := MustOpenStore(index) defer s.Close() if err := s.DeleteSeries("db0", nil, nil); err != nil { t.Fatal(err.Error()) } } for _, index := range tsdb.RegisteredIndexes() { t.Run(index, func(t *testing.T) { test(index) }) } } // Ensure the store can delete an existing shard. func TestStore_DeleteShard(t *testing.T) { t.Parallel() test := func(index string) error { s := MustOpenStore(index) defer s.Close() // Create a new shard and verify that it exists. if err := s.CreateShard("db0", "rp0", 1, true); err != nil { return err } else if sh := s.Shard(1); sh == nil { return fmt.Errorf("expected shard") } // Create another shard. if err := s.CreateShard("db0", "rp0", 2, true); err != nil { return err } else if sh := s.Shard(2); sh == nil { return fmt.Errorf("expected shard") } // and another, but in a different db. if err := s.CreateShard("db1", "rp0", 3, true); err != nil { return err } else if sh := s.Shard(3); sh == nil { return fmt.Errorf("expected shard") } // Write series data to the db0 shards. s.MustWriteToShardString(1, "cpu,servera=a v=1", "cpu,serverb=b v=1", "mem,serverc=a v=1") s.MustWriteToShardString(2, "cpu,servera=a v=1", "mem,serverc=a v=1") // Write similar data to db1 database s.MustWriteToShardString(3, "cpu,serverb=b v=1") // Reopen the store and check all shards still exist if err := s.Reopen(); err != nil { return err } for i := uint64(1); i <= 3; i++ { if sh := s.Shard(i); sh == nil { return fmt.Errorf("shard %d missing", i) } } // Remove the first shard from the store. if err := s.DeleteShard(1); err != nil { return err } // cpu,serverb=b should be removed from the series file for db0 because // shard 1 was the only owner of that series. // Verify by getting all tag keys. keys, err := s.TagKeys(nil, []uint64{2}, nil) if err != nil { return err } expKeys := []tsdb.TagKeys{ {Measurement: "cpu", Keys: []string{"servera"}}, {Measurement: "mem", Keys: []string{"serverc"}}, } if got, exp := keys, expKeys; !reflect.DeepEqual(got, exp) { return fmt.Errorf("got keys %v, expected %v", got, exp) } // Verify that the same series was not removed from other databases' // series files. if keys, err = s.TagKeys(nil, []uint64{3}, nil); err != nil { return err } expKeys = []tsdb.TagKeys{{Measurement: "cpu", Keys: []string{"serverb"}}} if got, exp := keys, expKeys; !reflect.DeepEqual(got, exp) { return fmt.Errorf("got keys %v, expected %v", got, exp) } return nil } for _, index := range tsdb.RegisteredIndexes() { t.Run(index, func(t *testing.T) { if err := test(index); err != nil { t.Error(err) } }) } } // Ensure the store can create a snapshot to a shard. func TestStore_CreateShardSnapShot(t *testing.T) { t.Parallel() test := func(index string) { s := MustOpenStore(index) defer s.Close() // Create a new shard and verify that it exists. if err := s.CreateShard("db0", "rp0", 1, true); err != nil { t.Fatal(err) } else if sh := s.Shard(1); sh == nil { t.Fatalf("expected shard") } dir, e := s.CreateShardSnapshot(1) if e != nil { t.Fatal(e) } if dir == "" { t.Fatal("empty directory name") } } for _, index := range tsdb.RegisteredIndexes() { t.Run(index, func(t *testing.T) { test(index) }) } } func TestStore_Open(t *testing.T) { t.Parallel() test := func(index string) { s := NewStore(index) defer s.Close() if err := os.MkdirAll(filepath.Join(s.Path(), "db0", "rp0", "2"), 0777); err != nil { t.Fatal(err) } if err := os.MkdirAll(filepath.Join(s.Path(), "db0", "rp2", "4"), 0777); err != nil { t.Fatal(err) } if err := os.MkdirAll(filepath.Join(s.Path(), "db1", "rp0", "1"), 0777); err != nil { t.Fatal(err) } // Store should ignore shard since it does not have a numeric name. if err := s.Open(); err != nil { t.Fatal(err) } else if n := len(s.Databases()); n != 2 { t.Fatalf("unexpected database index count: %d", n) } else if n := s.ShardN(); n != 3 { t.Fatalf("unexpected shard count: %d", n) } expDatabases := []string{"db0", "db1"} gotDatabases := s.Databases() sort.Strings(gotDatabases) if got, exp := gotDatabases, expDatabases; !reflect.DeepEqual(got, exp) { t.Fatalf("got %#v, expected %#v", got, exp) } } for _, index := range tsdb.RegisteredIndexes() { t.Run(index, func(t *testing.T) { test(index) }) } } // Ensure the store reports an error when it can't open a database directory. func TestStore_Open_InvalidDatabaseFile(t *testing.T) { t.Parallel() test := func(index string) { s := NewStore(index) defer s.Close() // Create a file instead of a directory for a database. if _, err := os.Create(filepath.Join(s.Path(), "db0")); err != nil { t.Fatal(err) } // Store should ignore database since it's a file. if err := s.Open(); err != nil { t.Fatal(err) } else if n := len(s.Databases()); n != 0 { t.Fatalf("unexpected database index count: %d", n) } } for _, index := range tsdb.RegisteredIndexes() { t.Run(index, func(t *testing.T) { test(index) }) } } // Ensure the store reports an error when it can't open a retention policy. func TestStore_Open_InvalidRetentionPolicy(t *testing.T) { t.Parallel() test := func(index string) { s := NewStore(index) defer s.Close() // Create an RP file instead of a directory. if err := os.MkdirAll(filepath.Join(s.Path(), "db0"), 0777); err != nil { t.Fatal(err) } else if _, err := os.Create(filepath.Join(s.Path(), "db0", "rp0")); err != nil { t.Fatal(err) } // Store should ignore retention policy since it's a file, and there should // be no indices created. if err := s.Open(); err != nil { t.Fatal(err) } else if n := len(s.Databases()); n != 0 { t.Log(s.Databases()) t.Fatalf("unexpected database index count: %d", n) } } for _, index := range tsdb.RegisteredIndexes() { t.Run(index, func(t *testing.T) { test(index) }) } } // Ensure the store reports an error when it can't open a retention policy. func TestStore_Open_InvalidShard(t *testing.T) { t.Parallel() test := func(index string) { s := NewStore(index) defer s.Close() // Create a non-numeric shard file. if err := os.MkdirAll(filepath.Join(s.Path(), "db0", "rp0"), 0777); err != nil { t.Fatal(err) } else if _, err := os.Create(filepath.Join(s.Path(), "db0", "rp0", "bad_shard")); err != nil { t.Fatal(err) } // Store should ignore shard since it does not have a numeric name. if err := s.Open(); err != nil { t.Fatal(err) } else if n := len(s.Databases()); n != 0 { t.Fatalf("unexpected database index count: %d", n) } else if n := s.ShardN(); n != 0 { t.Fatalf("unexpected shard count: %d", n) } } for _, index := range tsdb.RegisteredIndexes() { t.Run(index, func(t *testing.T) { test(index) }) } } // Ensure shards can create iterators. func TestShards_CreateIterator(t *testing.T) { t.Parallel() test := func(index string) { s := MustOpenStore(index) defer s.Close() // Create shard #0 with data. s.MustCreateShardWithData("db0", "rp0", 0, `cpu,host=serverA value=1 0`, `cpu,host=serverA value=2 10`, `cpu,host=serverB value=3 20`, ) // Create shard #1 with data. s.MustCreateShardWithData("db0", "rp0", 1, `cpu,host=serverA value=1 30`, `mem,host=serverA value=2 40`, // skip: wrong source `cpu,host=serverC value=3 60`, ) // Retrieve shard group. shards := s.ShardGroup([]uint64{0, 1}) // Create iterator. m := &influxql.Measurement{Name: "cpu"} itr, err := shards.CreateIterator(context.Background(), m, query.IteratorOptions{ Expr: influxql.MustParseExpr(`value`), Dimensions: []string{"host"}, Ascending: true, StartTime: influxql.MinTime, EndTime: influxql.MaxTime, }) if err != nil { t.Fatal(err) } defer itr.Close() fitr := itr.(query.FloatIterator) // Read values from iterator. The host=serverA points should come first. if p, err := fitr.Next(); err != nil { t.Fatalf("unexpected error(0): %s", err) } else if !deep.Equal(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=serverA"), Time: time.Unix(0, 0).UnixNano(), Value: 1}) { t.Fatalf("unexpected point(0): %s", spew.Sdump(p)) } if p, err := fitr.Next(); err != nil { t.Fatalf("unexpected error(1): %s", err) } else if !deep.Equal(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=serverA"), Time: time.Unix(10, 0).UnixNano(), Value: 2}) { t.Fatalf("unexpected point(1): %s", spew.Sdump(p)) } if p, err := fitr.Next(); err != nil { t.Fatalf("unexpected error(2): %s", err) } else if !deep.Equal(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=serverA"), Time: time.Unix(30, 0).UnixNano(), Value: 1}) { t.Fatalf("unexpected point(2): %s", spew.Sdump(p)) } // Next the host=serverB point. if p, err := fitr.Next(); err != nil { t.Fatalf("unexpected error(3): %s", err) } else if !deep.Equal(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=serverB"), Time: time.Unix(20, 0).UnixNano(), Value: 3}) { t.Fatalf("unexpected point(3): %s", spew.Sdump(p)) } // And finally the host=serverC point. if p, err := fitr.Next(); err != nil { t.Fatalf("unexpected error(4): %s", err) } else if !deep.Equal(p, &query.FloatPoint{Name: "cpu", Tags: ParseTags("host=serverC"), Time: time.Unix(60, 0).UnixNano(), Value: 3}) { t.Fatalf("unexpected point(4): %s", spew.Sdump(p)) } // Then an EOF should occur. if p, err := fitr.Next(); err != nil { t.Fatalf("expected eof, got error: %s", err) } else if p != nil { t.Fatalf("expected eof, got: %s", spew.Sdump(p)) } } for _, index := range tsdb.RegisteredIndexes() { t.Run(index, func(t *testing.T) { test(index) }) } } // Ensure the store can backup a shard and another store can restore it. func TestStore_BackupRestoreShard(t *testing.T) { test := func(index string) { s0, s1 := MustOpenStore(index), MustOpenStore(index) defer s0.Close() defer s1.Close() // Create shard with data. s0.MustCreateShardWithData("db0", "rp0", 100, `cpu value=1 0`, `cpu value=2 10`, `cpu value=3 20`, ) if err := s0.Reopen(); err != nil { t.Fatal(err) } // Backup shard to a buffer. var buf bytes.Buffer if err := s0.BackupShard(100, time.Time{}, &buf); err != nil { t.Fatal(err) } // Create the shard on the other store and restore from buffer. if err := s1.CreateShard("db0", "rp0", 100, true); err != nil { t.Fatal(err) } if err := s1.RestoreShard(100, &buf); err != nil { t.Fatal(err) } // Read data from m := &influxql.Measurement{Name: "cpu"} itr, err := s0.Shard(100).CreateIterator(context.Background(), m, query.IteratorOptions{ Expr: influxql.MustParseExpr(`value`), Ascending: true, StartTime: influxql.MinTime, EndTime: influxql.MaxTime, }) if err != nil { t.Fatal(err) } defer itr.Close() fitr := itr.(query.FloatIterator) // Read values from iterator. The host=serverA points should come first. p, e := fitr.Next() if e != nil { t.Fatal(e) } if !deep.Equal(p, &query.FloatPoint{Name: "cpu", Time: time.Unix(0, 0).UnixNano(), Value: 1}) { t.Fatalf("unexpected point(0): %s", spew.Sdump(p)) } p, e = fitr.Next() if e != nil { t.Fatal(e) } if !deep.Equal(p, &query.FloatPoint{Name: "cpu", Time: time.Unix(10, 0).UnixNano(), Value: 2}) { t.Fatalf("unexpected point(1): %s", spew.Sdump(p)) } p, e = fitr.Next() if e != nil { t.Fatal(e) } if !deep.Equal(p, &query.FloatPoint{Name: "cpu", Time: time.Unix(20, 0).UnixNano(), Value: 3}) { t.Fatalf("unexpected point(2): %s", spew.Sdump(p)) } } for _, index := range tsdb.RegisteredIndexes() { if index == tsdb.TSI1IndexName { t.Skip("Skipping failing test for tsi1") } t.Run(index, func(t *testing.T) { test(index) }) } } func TestStore_Shard_SeriesN(t *testing.T) { t.Parallel() test := func(index string) error { s := MustOpenStore(index) defer s.Close() // Create shard with data. s.MustCreateShardWithData("db0", "rp0", 1, `cpu value=1 0`, `cpu,host=serverA value=2 10`, ) // Create 2nd shard w/ same measurements. s.MustCreateShardWithData("db0", "rp0", 2, `cpu value=1 0`, `cpu value=2 10`, ) if got, exp := s.Shard(1).SeriesN(), int64(2); got != exp { return fmt.Errorf("[shard %d] got series count of %d, but expected %d", 1, got, exp) } else if got, exp := s.Shard(2).SeriesN(), int64(1); got != exp { return fmt.Errorf("[shard %d] got series count of %d, but expected %d", 2, got, exp) } return nil } for _, index := range tsdb.RegisteredIndexes() { t.Run(index, func(t *testing.T) { if err := test(index); err != nil { t.Error(err) } }) } } func TestStore_MeasurementNames_Deduplicate(t *testing.T) { t.Parallel() test := func(index string) { s := MustOpenStore(index) defer s.Close() // Create shard with data. s.MustCreateShardWithData("db0", "rp0", 1, `cpu value=1 0`, `cpu value=2 10`, `cpu value=3 20`, ) // Create 2nd shard w/ same measurements. s.MustCreateShardWithData("db0", "rp0", 2, `cpu value=1 0`, `cpu value=2 10`, `cpu value=3 20`, ) meas, err := s.MeasurementNames(query.OpenAuthorizer, "db0", nil) if err != nil { t.Fatalf("unexpected error with MeasurementNames: %v", err) } if exp, got := 1, len(meas); exp != got { t.Fatalf("measurement len mismatch: exp %v, got %v", exp, got) } if exp, got := "cpu", string(meas[0]); exp != got { t.Fatalf("measurement name mismatch: exp %v, got %v", exp, got) } } for _, index := range tsdb.RegisteredIndexes() { t.Run(index, func(t *testing.T) { test(index) }) } } func testStoreCardinalityTombstoning(t *testing.T, store *Store) { // Generate point data to write to the shards. series := genTestSeries(10, 2, 4) // 160 series points := make([]models.Point, 0, len(series)) for _, s := range series { points = append(points, models.MustNewPoint(s.Measurement, s.Tags, map[string]interface{}{"value": 1.0}, time.Now())) } // Create requested number of shards in the store & write points across // shards such that we never write the same series to multiple shards. for shardID := 0; shardID < 4; shardID++ { if err := store.CreateShard("db", "rp", uint64(shardID), true); err != nil { t.Errorf("create shard: %s", err) } if err := store.BatchWrite(shardID, points[shardID*40:(shardID+1)*40]); err != nil { t.Errorf("batch write: %s", err) } } // Delete all the series for each measurement. mnames, err := store.MeasurementNames(nil, "db", nil) if err != nil { t.Fatal(err) } for _, name := range mnames { if err := store.DeleteSeries("db", []influxql.Source{&influxql.Measurement{Name: string(name)}}, nil); err != nil { t.Fatal(err) } } // Estimate the series cardinality... cardinality, err := store.Store.SeriesCardinality("db") if err != nil { t.Fatal(err) } // Estimated cardinality should be well within 10 of the actual cardinality. if got, exp := int(cardinality), 10; got > exp { t.Errorf("series cardinality was %v (expected within %v), expected was: %d", got, exp, 0) } // Since all the series have been deleted, all the measurements should have // been removed from the index too. if cardinality, err = store.Store.MeasurementsCardinality("db"); err != nil { t.Fatal(err) } // Estimated cardinality should be well within 2 of the actual cardinality. // TODO(edd): this is totally arbitrary. How can I make it better? if got, exp := int(cardinality), 2; got > exp { t.Errorf("measurement cardinality was %v (expected within %v), expected was: %d", got, exp, 0) } } func TestStore_Cardinality_Tombstoning(t *testing.T) { t.Parallel() if testing.Short() || os.Getenv("GORACE") != "" || os.Getenv("APPVEYOR") != "" { t.Skip("Skipping test in short, race and appveyor mode.") } test := func(index string) { store := NewStore(index) if err := store.Open(); err != nil { panic(err) } defer store.Close() testStoreCardinalityTombstoning(t, store) } for _, index := range tsdb.RegisteredIndexes() { t.Run(index, func(t *testing.T) { test(index) }) } } func testStoreCardinalityUnique(t *testing.T, store *Store) { // Generate point data to write to the shards. series := genTestSeries(64, 5, 5) // 200,000 series expCardinality := len(series) points := make([]models.Point, 0, len(series)) for _, s := range series { points = append(points, models.MustNewPoint(s.Measurement, s.Tags, map[string]interface{}{"value": 1.0}, time.Now())) } // Create requested number of shards in the store & write points across // shards such that we never write the same series to multiple shards. for shardID := 0; shardID < 10; shardID++ { if err := store.CreateShard("db", "rp", uint64(shardID), true); err != nil { t.Fatalf("create shard: %s", err) } if err := store.BatchWrite(shardID, points[shardID*20000:(shardID+1)*20000]); err != nil { t.Fatalf("batch write: %s", err) } } // Estimate the series cardinality... cardinality, err := store.Store.SeriesCardinality("db") if err != nil { t.Fatal(err) } // Estimated cardinality should be well within 1.5% of the actual cardinality. if got, exp := math.Abs(float64(cardinality)-float64(expCardinality))/float64(expCardinality), 0.015; got > exp { t.Errorf("got epsilon of %v for series cardinality %v (expected %v), which is larger than expected %v", got, cardinality, expCardinality, exp) } // Estimate the measurement cardinality... if cardinality, err = store.Store.MeasurementsCardinality("db"); err != nil { t.Fatal(err) } // Estimated cardinality should be well within 2 of the actual cardinality. (arbitrary...) expCardinality = 64 if got, exp := math.Abs(float64(cardinality)-float64(expCardinality)), 2.0; got > exp { t.Errorf("got measurmement cardinality %v, expected upto %v; difference is larger than expected %v", cardinality, expCardinality, exp) } } func TestStore_Cardinality_Unique(t *testing.T) { t.Parallel() if testing.Short() || os.Getenv("GORACE") != "" || os.Getenv("APPVEYOR") != "" { t.Skip("Skipping test in short, race and appveyor mode.") } test := func(index string) { store := NewStore(index) store.EngineOptions.Config.MaxSeriesPerDatabase = 0 if err := store.Open(); err != nil { panic(err) } defer store.Close() testStoreCardinalityUnique(t, store) } for _, index := range tsdb.RegisteredIndexes() { t.Run(index, func(t *testing.T) { test(index) }) } } // This test tests cardinality estimation when series data is duplicated across // multiple shards. func testStoreCardinalityDuplicates(t *testing.T, store *Store) { // Generate point data to write to the shards. series := genTestSeries(64, 5, 5) // 200,000 series. expCardinality := len(series) points := make([]models.Point, 0, len(series)) for _, s := range series { points = append(points, models.MustNewPoint(s.Measurement, s.Tags, map[string]interface{}{"value": 1.0}, time.Now())) } // Create requested number of shards in the store & write points. for shardID := 0; shardID < 10; shardID++ { if err := store.CreateShard("db", "rp", uint64(shardID), true); err != nil { t.Fatalf("create shard: %s", err) } var from, to int if shardID == 0 { // if it's the first shard then write all of the points. from, to = 0, len(points)-1 } else { // For other shards we write a random sub-section of all the points. // which will duplicate the series and shouldn't increase the // cardinality. from, to := rand.Intn(len(points)), rand.Intn(len(points)) if from > to { from, to = to, from } } if err := store.BatchWrite(shardID, points[from:to]); err != nil { t.Fatalf("batch write: %s", err) } } // Estimate the series cardinality... cardinality, err := store.Store.SeriesCardinality("db") if err != nil { t.Fatal(err) } // Estimated cardinality should be well within 1.5% of the actual cardinality. if got, exp := math.Abs(float64(cardinality)-float64(expCardinality))/float64(expCardinality), 0.015; got > exp { t.Errorf("got epsilon of %v for series cardinality %d (expected %d), which is larger than expected %v", got, cardinality, expCardinality, exp) } // Estimate the measurement cardinality... if cardinality, err = store.Store.MeasurementsCardinality("db"); err != nil { t.Fatal(err) } // Estimated cardinality should be well within 2 of the actual cardinality. (Arbitrary...) expCardinality = 64 if got, exp := math.Abs(float64(cardinality)-float64(expCardinality)), 2.0; got > exp { t.Errorf("got measurement cardinality %v, expected upto %v; difference is larger than expected %v", cardinality, expCardinality, exp) } } func TestStore_Cardinality_Duplicates(t *testing.T) { t.Parallel() if testing.Short() || os.Getenv("GORACE") != "" || os.Getenv("APPVEYOR") != "" { t.Skip("Skipping test in short, race and appveyor mode.") } test := func(index string) { store := NewStore(index) store.EngineOptions.Config.MaxSeriesPerDatabase = 0 if err := store.Open(); err != nil { panic(err) } defer store.Close() testStoreCardinalityDuplicates(t, store) } for _, index := range tsdb.RegisteredIndexes() { t.Run(index, func(t *testing.T) { test(index) }) } } // Creates a large number of series in multiple shards, which will force // compactions to occur. func testStoreCardinalityCompactions(store *Store) error { // Generate point data to write to the shards. series := genTestSeries(300, 5, 5) // 937,500 series expCardinality := len(series) points := make([]models.Point, 0, len(series)) for _, s := range series { points = append(points, models.MustNewPoint(s.Measurement, s.Tags, map[string]interface{}{"value": 1.0}, time.Now())) } // Create requested number of shards in the store & write points across // shards such that we never write the same series to multiple shards. for shardID := 0; shardID < 2; shardID++ { if err := store.CreateShard("db", "rp", uint64(shardID), true); err != nil { return fmt.Errorf("create shard: %s", err) } if err := store.BatchWrite(shardID, points[shardID*468750:(shardID+1)*468750]); err != nil { return fmt.Errorf("batch write: %s", err) } } // Estimate the series cardinality... cardinality, err := store.Store.SeriesCardinality("db") if err != nil { return err } // Estimated cardinality should be well within 1.5% of the actual cardinality. if got, exp := math.Abs(float64(cardinality)-float64(expCardinality))/float64(expCardinality), 0.015; got > exp { return fmt.Errorf("got epsilon of %v for series cardinality %v (expected %v), which is larger than expected %v", got, cardinality, expCardinality, exp) } // Estimate the measurement cardinality... if cardinality, err = store.Store.MeasurementsCardinality("db"); err != nil { return err } // Estimated cardinality should be well within 2 of the actual cardinality. (Arbitrary...) expCardinality = 300 if got, exp := math.Abs(float64(cardinality)-float64(expCardinality)), 2.0; got > exp { return fmt.Errorf("got measurement cardinality %v, expected upto %v; difference is larger than expected %v", cardinality, expCardinality, exp) } return nil } func TestStore_Cardinality_Compactions(t *testing.T) { if testing.Short() || os.Getenv("GORACE") != "" || os.Getenv("APPVEYOR") != "" { t.Skip("Skipping test in short, race and appveyor mode.") } test := func(index string) error { store := NewStore(index) store.EngineOptions.Config.MaxSeriesPerDatabase = 0 if err := store.Open(); err != nil { panic(err) } defer store.Close() return testStoreCardinalityCompactions(store) } for _, index := range tsdb.RegisteredIndexes() { t.Run(index, func(t *testing.T) { if err := test(index); err != nil { t.Fatal(err) } }) } } func TestStore_Sketches(t *testing.T) { t.Parallel() checkCardinalities := func(store *tsdb.Store, series, tseries, measurements, tmeasurements int) error { // Get sketches and check cardinality... sketch, tsketch, err := store.SeriesSketches("db") if err != nil { return err } // delta calculates a rough 10% delta. If i is small then a minimum value // of 2 is used. delta := func(i int) int { v := i / 10 if v == 0 { v = 2 } return v } // series cardinality should be well within 10%. if got, exp := int(sketch.Count()), series; got-exp < -delta(series) || got-exp > delta(series) { return fmt.Errorf("got series cardinality %d, expected ~%d", got, exp) } // check series tombstones if got, exp := int(tsketch.Count()), tseries; got-exp < -delta(tseries) || got-exp > delta(tseries) { return fmt.Errorf("got series tombstone cardinality %d, expected ~%d", got, exp) } // Check measurement cardinality. if sketch, tsketch, err = store.MeasurementsSketches("db"); err != nil { return err } if got, exp := int(sketch.Count()), measurements; got-exp < -delta(measurements) || got-exp > delta(measurements) { return fmt.Errorf("got measurement cardinality %d, expected ~%d", got, exp) } if got, exp := int(tsketch.Count()), tmeasurements; got-exp < -delta(tmeasurements) || got-exp > delta(tmeasurements) { return fmt.Errorf("got measurement tombstone cardinality %d, expected ~%d", got, exp) } return nil } test := func(index string) error { store := MustOpenStore(index) defer store.Close() // Generate point data to write to the shards. series := genTestSeries(10, 2, 4) // 160 series points := make([]models.Point, 0, len(series)) for _, s := range series { points = append(points, models.MustNewPoint(s.Measurement, s.Tags, map[string]interface{}{"value": 1.0}, time.Now())) } // Create requested number of shards in the store & write points across // shards such that we never write the same series to multiple shards. for shardID := 0; shardID < 4; shardID++ { if err := store.CreateShard("db", "rp", uint64(shardID), true); err != nil { return fmt.Errorf("create shard: %s", err) } if err := store.BatchWrite(shardID, points[shardID*40:(shardID+1)*40]); err != nil { return fmt.Errorf("batch write: %s", err) } } // Check cardinalities if err := checkCardinalities(store.Store, 160, 0, 10, 0); err != nil { return fmt.Errorf("[initial] %v", err) } // Reopen the store. if err := store.Reopen(); err != nil { return err } // Check cardinalities if err := checkCardinalities(store.Store, 160, 0, 10, 0); err != nil { return fmt.Errorf("[initial|re-open] %v", err) } // Delete half the the measurements data mnames, err := store.MeasurementNames(nil, "db", nil) if err != nil { return err } for _, name := range mnames[:len(mnames)/2] { if err := store.DeleteSeries("db", []influxql.Source{&influxql.Measurement{Name: string(name)}}, nil); err != nil { return err } } // Check cardinalities. In this case, the indexes behave differently. expS, expTS, expM, expTM := 160, 0, 10, 5 if index == inmem.IndexName { expS, expTS, expM, expTM = 160, 80, 10, 5 } // Check cardinalities - tombstones should be in if err := checkCardinalities(store.Store, expS, expTS, expM, expTM); err != nil { return fmt.Errorf("[initial|re-open|delete] %v", err) } // Reopen the store. if err := store.Reopen(); err != nil { return err } // Check cardinalities. In this case, the indexes behave differently. expS, expTS, expM, expTM = 160, 0, 5, 5 if index == inmem.IndexName { expS, expTS, expM, expTM = 80, 0, 5, 0 } if err := checkCardinalities(store.Store, expS, expTS, expM, expTM); err != nil { return fmt.Errorf("[initial|re-open|delete|re-open] %v", err) } return nil } for _, index := range tsdb.RegisteredIndexes() { t.Run(index, func(t *testing.T) { if err := test(index); err != nil { t.Fatal(err) } }) } } func TestStore_TagValues(t *testing.T) { t.Parallel() // No WHERE - just get for keys host and shard RHSAll := &influxql.ParenExpr{ Expr: &influxql.BinaryExpr{ Op: influxql.OR, LHS: &influxql.BinaryExpr{ Op: influxql.EQ, LHS: &influxql.VarRef{Val: "_tagKey"}, RHS: &influxql.StringLiteral{Val: "host"}, }, RHS: &influxql.BinaryExpr{ Op: influxql.EQ, LHS: &influxql.VarRef{Val: "_tagKey"}, RHS: &influxql.StringLiteral{Val: "shard"}, }, }, } // Get for host and shard, but also WHERE on foo = a RHSWhere := &influxql.ParenExpr{ Expr: &influxql.BinaryExpr{ Op: influxql.AND, LHS: &influxql.ParenExpr{ Expr: &influxql.BinaryExpr{ Op: influxql.EQ, LHS: &influxql.VarRef{Val: "foo"}, RHS: &influxql.StringLiteral{Val: "a"}, }, }, RHS: RHSAll, }, } // SHOW TAG VALUES FROM /cpu\d/ WITH KEY IN ("host", "shard") // // Switching out RHS for RHSWhere would make the query: // SHOW TAG VALUES FROM /cpu\d/ WITH KEY IN ("host", "shard") WHERE foo = 'a' base := influxql.BinaryExpr{ Op: influxql.AND, LHS: &influxql.ParenExpr{ Expr: &influxql.BinaryExpr{ Op: influxql.EQREGEX, LHS: &influxql.VarRef{Val: "_name"}, RHS: &influxql.RegexLiteral{Val: regexp.MustCompile(`cpu\d`)}, }, }, RHS: RHSAll, } var baseWhere *influxql.BinaryExpr = influxql.CloneExpr(&base).(*influxql.BinaryExpr) baseWhere.RHS = RHSWhere examples := []struct { Name string Expr influxql.Expr Exp []tsdb.TagValues }{ { Name: "No WHERE clause", Expr: &base, Exp: []tsdb.TagValues{ createTagValues("cpu0", map[string][]string{"shard": {"s0"}}), createTagValues("cpu1", map[string][]string{"shard": {"s1"}}), createTagValues("cpu10", map[string][]string{"host": {"nofoo", "tv0", "tv1", "tv2", "tv3"}, "shard": {"s0", "s1", "s2"}}), createTagValues("cpu11", map[string][]string{"host": {"nofoo", "tv0", "tv1", "tv2", "tv3"}, "shard": {"s0", "s1", "s2"}}), createTagValues("cpu12", map[string][]string{"host": {"nofoo", "tv0", "tv1", "tv2", "tv3"}, "shard": {"s0", "s1", "s2"}}), createTagValues("cpu2", map[string][]string{"shard": {"s2"}}), }, }, { Name: "With WHERE clause", Expr: baseWhere, Exp: []tsdb.TagValues{ createTagValues("cpu0", map[string][]string{"shard": {"s0"}}), createTagValues("cpu1", map[string][]string{"shard": {"s1"}}), createTagValues("cpu10", map[string][]string{"host": {"tv0", "tv1", "tv2", "tv3"}, "shard": {"s0", "s1", "s2"}}), createTagValues("cpu11", map[string][]string{"host": {"tv0", "tv1", "tv2", "tv3"}, "shard": {"s0", "s1", "s2"}}), createTagValues("cpu12", map[string][]string{"host": {"tv0", "tv1", "tv2", "tv3"}, "shard": {"s0", "s1", "s2"}}), createTagValues("cpu2", map[string][]string{"shard": {"s2"}}), }, }, } var s *Store setup := func(index string) []uint64 { // returns shard ids s = MustOpenStore(index) fmtStr := `cpu1%[1]d,foo=a,ignoreme=nope,host=tv%[2]d,shard=s%[3]d value=1 %[4]d cpu1%[1]d,host=nofoo value=1 %[4]d mem,host=nothanks value=1 %[4]d cpu%[3]d,shard=s%[3]d,foo=a value=2 %[4]d ` genPoints := func(sid int) []string { var ts int points := make([]string, 0, 3*4) for m := 0; m < 3; m++ { for tagvid := 0; tagvid < 4; tagvid++ { points = append(points, fmt.Sprintf(fmtStr, m, tagvid, sid, ts)) ts++ } } return points } // Create data across 3 shards. var ids []uint64 for i := 0; i < 3; i++ { ids = append(ids, uint64(i)) s.MustCreateShardWithData("db0", "rp0", i, genPoints(i)...) } return ids } for _, example := range examples { for _, index := range tsdb.RegisteredIndexes() { shardIDs := setup(index) t.Run(example.Name+"_"+index, func(t *testing.T) { got, err := s.TagValues(nil, shardIDs, example.Expr) if err != nil { t.Fatal(err) } exp := example.Exp if !reflect.DeepEqual(got, exp) { t.Fatalf("got:\n%#v\n\nexp:\n%#v", got, exp) } }) s.Close() } } } func TestStore_Measurements_Auth(t *testing.T) { t.Parallel() test := func(index string) error { s := MustOpenStore(index) defer s.Close() // Create shard #0 with data. s.MustCreateShardWithData("db0", "rp0", 0, `cpu,host=serverA value=1 0`, `cpu,host=serverA value=2 10`, `cpu,region=west value=3 20`, `cpu,secret=foo value=5 30`, // cpu still readable because it has other series that can be read. `mem,secret=foo value=1 30`, `disk value=4 30`, ) authorizer := &internal.AuthorizerMock{ AuthorizeSeriesReadFn: func(database string, measurement []byte, tags models.Tags) bool { if database == "" || tags.GetString("secret") != "" { t.Logf("Rejecting series db=%s, m=%s, tags=%v", database, measurement, tags) return false } return true }, } names, err := s.MeasurementNames(authorizer, "db0", nil) if err != nil { return err } // names should not contain any measurements where none of the associated // series are authorised for reads. expNames := 2 var gotNames int for _, name := range names { if string(name) == "mem" { return fmt.Errorf("got measurement %q but it should be filtered.", name) } gotNames++ } if gotNames != expNames { return fmt.Errorf("got %d measurements, but expected %d", gotNames, expNames) } // Now delete all of the cpu series. cond, err := influxql.ParseExpr("host = 'serverA' OR region = 'west'") if err != nil { return err } if err := s.DeleteSeries("db0", nil, cond); err != nil { return err } if names, err = s.MeasurementNames(authorizer, "db0", nil); err != nil { return err } // names should not contain any measurements where none of the associated // series are authorised for reads. expNames = 1 gotNames = 0 for _, name := range names { if string(name) == "mem" || string(name) == "cpu" { return fmt.Errorf("after delete got measurement %q but it should be filtered.", name) } gotNames++ } if gotNames != expNames { return fmt.Errorf("after delete got %d measurements, but expected %d", gotNames, expNames) } return nil } for _, index := range tsdb.RegisteredIndexes() { t.Run(index, func(t *testing.T) { if err := test(index); err != nil { t.Fatal(err) } }) } } func TestStore_TagKeys_Auth(t *testing.T) { t.Parallel() test := func(index string) error { s := MustOpenStore(index) defer s.Close() // Create shard #0 with data. s.MustCreateShardWithData("db0", "rp0", 0, `cpu,host=serverA value=1 0`, `cpu,host=serverA,debug=true value=2 10`, `cpu,region=west value=3 20`, `cpu,secret=foo,machine=a value=1 20`, ) authorizer := &internal.AuthorizerMock{ AuthorizeSeriesReadFn: func(database string, measurement []byte, tags models.Tags) bool { if database == "" || !bytes.Equal(measurement, []byte("cpu")) || tags.GetString("secret") != "" { t.Logf("Rejecting series db=%s, m=%s, tags=%v", database, measurement, tags) return false } return true }, } keys, err := s.TagKeys(authorizer, []uint64{0}, nil) if err != nil { return err } // keys should not contain any tag keys associated with a series containing // a secret tag. expKeys := 3 var gotKeys int for _, tk := range keys { if got, exp := tk.Measurement, "cpu"; got != exp { return fmt.Errorf("got measurement %q, expected %q", got, exp) } for _, key := range tk.Keys { if key == "secret" || key == "machine" { return fmt.Errorf("got tag key %q but it should be filtered.", key) } gotKeys++ } } if gotKeys != expKeys { return fmt.Errorf("got %d keys, but expected %d", gotKeys, expKeys) } // Delete the series with region = west cond, err := influxql.ParseExpr("region = 'west'") if err != nil { return err } if err := s.DeleteSeries("db0", nil, cond); err != nil { return err } if keys, err = s.TagKeys(authorizer, []uint64{0}, nil); err != nil { return err } // keys should not contain any tag keys associated with a series containing // a secret tag or the deleted series expKeys = 2 gotKeys = 0 for _, tk := range keys { if got, exp := tk.Measurement, "cpu"; got != exp { return fmt.Errorf("got measurement %q, expected %q", got, exp) } for _, key := range tk.Keys { if key == "secret" || key == "machine" || key == "region" { return fmt.Errorf("got tag key %q but it should be filtered.", key) } gotKeys++ } } if gotKeys != expKeys { return fmt.Errorf("got %d keys, but expected %d", gotKeys, expKeys) } return nil } for _, index := range tsdb.RegisteredIndexes() { t.Run(index, func(t *testing.T) { if err := test(index); err != nil { t.Fatal(err) } }) } } func TestStore_TagValues_Auth(t *testing.T) { t.Parallel() test := func(index string) error { s := MustOpenStore(index) defer s.Close() // Create shard #0 with data. s.MustCreateShardWithData("db0", "rp0", 0, `cpu,host=serverA value=1 0`, `cpu,host=serverA value=2 10`, `cpu,host=serverB value=3 20`, `cpu,secret=foo,host=serverD value=1 20`, ) authorizer := &internal.AuthorizerMock{ AuthorizeSeriesReadFn: func(database string, measurement []byte, tags models.Tags) bool { if database == "" || !bytes.Equal(measurement, []byte("cpu")) || tags.GetString("secret") != "" { t.Logf("Rejecting series db=%s, m=%s, tags=%v", database, measurement, tags) return false } return true }, } values, err := s.TagValues(authorizer, []uint64{0}, &influxql.BinaryExpr{ Op: influxql.EQ, LHS: &influxql.VarRef{Val: "_tagKey"}, RHS: &influxql.StringLiteral{Val: "host"}, }) if err != nil { return err } // values should not contain any tag values associated with a series containing // a secret tag. expValues := 2 var gotValues int for _, tv := range values { if got, exp := tv.Measurement, "cpu"; got != exp { return fmt.Errorf("got measurement %q, expected %q", got, exp) } for _, v := range tv.Values { if got, exp := v.Value, "serverD"; got == exp { return fmt.Errorf("got tag value %q but it should be filtered.", got) } gotValues++ } } if gotValues != expValues { return fmt.Errorf("got %d tags, but expected %d", gotValues, expValues) } // Delete the series with values serverA cond, err := influxql.ParseExpr("host = 'serverA'") if err != nil { return err } if err := s.DeleteSeries("db0", nil, cond); err != nil { return err } values, err = s.TagValues(authorizer, []uint64{0}, &influxql.BinaryExpr{ Op: influxql.EQ, LHS: &influxql.VarRef{Val: "_tagKey"}, RHS: &influxql.StringLiteral{Val: "host"}, }) if err != nil { return err } // values should not contain any tag values associated with a series containing // a secret tag. expValues = 1 gotValues = 0 for _, tv := range values { if got, exp := tv.Measurement, "cpu"; got != exp { return fmt.Errorf("got measurement %q, expected %q", got, exp) } for _, v := range tv.Values { if got, exp := v.Value, "serverD"; got == exp { return fmt.Errorf("got tag value %q but it should be filtered.", got) } else if got, exp := v.Value, "serverA"; got == exp { return fmt.Errorf("got tag value %q but it should be filtered.", got) } gotValues++ } } if gotValues != expValues { return fmt.Errorf("got %d values, but expected %d", gotValues, expValues) } return nil } for _, index := range tsdb.RegisteredIndexes() { t.Run(index, func(t *testing.T) { if err := test(index); err != nil { t.Fatal(err) } }) } } // Helper to create some tag values func createTagValues(mname string, kvs map[string][]string) tsdb.TagValues { var sz int for _, v := range kvs { sz += len(v) } out := tsdb.TagValues{ Measurement: mname, Values: make([]tsdb.KeyValue, 0, sz), } for tk, tvs := range kvs { for _, tv := range tvs { out.Values = append(out.Values, tsdb.KeyValue{Key: tk, Value: tv}) } // We have to sort the KeyValues since that's how they're provided from // the tsdb.Store. sort.Sort(tsdb.KeyValues(out.Values)) } return out } func TestStore_MeasurementNames_ConcurrentDropShard(t *testing.T) { for _, index := range tsdb.RegisteredIndexes() { s := MustOpenStore(index) defer s.Close() shardN := 10 for i := 0; i < shardN; i++ { // Create new shards with some data s.MustCreateShardWithData("db0", "rp0", i, `cpu,host=serverA value=1 30`, `mem,region=west value=2 40`, // skip: wrong source `cpu,host=serverC value=3 60`, ) } done := make(chan struct{}) errC := make(chan error, 2) // Randomly close and open the shards. go func() { for { select { case <-done: errC <- nil return default: i := uint64(rand.Intn(int(shardN))) if sh := s.Shard(i); sh == nil { errC <- errors.New("shard should not be nil") return } else { if err := sh.Close(); err != nil { errC <- err return } time.Sleep(500 * time.Microsecond) if err := sh.Open(); err != nil { errC <- err return } } } } }() // Attempt to get tag keys from the shards. go func() { for { select { case <-done: errC <- nil return default: names, err := s.MeasurementNames(nil, "db0", nil) if err == tsdb.ErrIndexClosing || err == tsdb.ErrEngineClosed { continue // These errors are expected } if err != nil { errC <- err return } if got, exp := names, slices.StringsToBytes("cpu", "mem"); !reflect.DeepEqual(got, exp) { errC <- fmt.Errorf("got keys %v, expected %v", got, exp) return } } } }() // Run for 500ms time.Sleep(500 * time.Millisecond) close(done) // Check for errors. if err := <-errC; err != nil { t.Fatal(err) } if err := <-errC; err != nil { t.Fatal(err) } } } func TestStore_TagKeys_ConcurrentDropShard(t *testing.T) { for _, index := range tsdb.RegisteredIndexes() { s := MustOpenStore(index) defer s.Close() shardN := 10 for i := 0; i < shardN; i++ { // Create new shards with some data s.MustCreateShardWithData("db0", "rp0", i, `cpu,host=serverA value=1 30`, `mem,region=west value=2 40`, // skip: wrong source `cpu,host=serverC value=3 60`, ) } done := make(chan struct{}) errC := make(chan error, 2) // Randomly close and open the shards. go func() { for { select { case <-done: errC <- nil return default: i := uint64(rand.Intn(int(shardN))) if sh := s.Shard(i); sh == nil { errC <- errors.New("shard should not be nil") return } else { if err := sh.Close(); err != nil { errC <- err return } time.Sleep(500 * time.Microsecond) if err := sh.Open(); err != nil { errC <- err return } } } } }() // Attempt to get tag keys from the shards. go func() { for { select { case <-done: errC <- nil return default: keys, err := s.TagKeys(nil, []uint64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, nil) if err == tsdb.ErrIndexClosing || err == tsdb.ErrEngineClosed { continue // These errors are expected } if err != nil { errC <- err return } if got, exp := keys[0].Keys, []string{"host"}; !reflect.DeepEqual(got, exp) { errC <- fmt.Errorf("got keys %v, expected %v", got, exp) return } if got, exp := keys[1].Keys, []string{"region"}; !reflect.DeepEqual(got, exp) { errC <- fmt.Errorf("got keys %v, expected %v", got, exp) return } } } }() // Run for 500ms time.Sleep(500 * time.Millisecond) close(done) // Check for errors if err := <-errC; err != nil { t.Fatal(err) } if err := <-errC; err != nil { t.Fatal(err) } } } func TestStore_TagValues_ConcurrentDropShard(t *testing.T) { for _, index := range tsdb.RegisteredIndexes() { s := MustOpenStore(index) defer s.Close() shardN := 10 for i := 0; i < shardN; i++ { // Create new shards with some data s.MustCreateShardWithData("db0", "rp0", i, `cpu,host=serverA value=1 30`, `mem,region=west value=2 40`, // skip: wrong source `cpu,host=serverC value=3 60`, ) } done := make(chan struct{}) errC := make(chan error, 2) // Randomly close and open the shards. go func() { for { select { case <-done: errC <- nil return default: i := uint64(rand.Intn(int(shardN))) if sh := s.Shard(i); sh == nil { errC <- errors.New("shard should not be nil") return } else { if err := sh.Close(); err != nil { errC <- err return } time.Sleep(500 * time.Microsecond) if err := sh.Open(); err != nil { errC <- err return } } } } }() // Attempt to get tag keys from the shards. go func() { for { select { case <-done: errC <- nil return default: stmt, err := influxql.ParseStatement(`SHOW TAG VALUES WITH KEY = "host"`) if err != nil { t.Fatal(err) } rewrite, err := query.RewriteStatement(stmt) if err != nil { t.Fatal(err) } cond := rewrite.(*influxql.ShowTagValuesStatement).Condition values, err := s.TagValues(nil, []uint64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, cond) if err == tsdb.ErrIndexClosing || err == tsdb.ErrEngineClosed { continue // These errors are expected } if err != nil { errC <- err return } exp := tsdb.TagValues{ Measurement: "cpu", Values: []tsdb.KeyValue{ tsdb.KeyValue{Key: "host", Value: "serverA"}, tsdb.KeyValue{Key: "host", Value: "serverC"}, }, } if got := values[0]; !reflect.DeepEqual(got, exp) { errC <- fmt.Errorf("got keys %v, expected %v", got, exp) return } } } }() // Run for 500ms time.Sleep(500 * time.Millisecond) close(done) // Check for errors if err := <-errC; err != nil { t.Fatal(err) } if err := <-errC; err != nil { t.Fatal(err) } } } func BenchmarkStore_SeriesCardinality_100_Shards(b *testing.B) { for _, index := range tsdb.RegisteredIndexes() { store := NewStore(index) if err := store.Open(); err != nil { panic(err) } // Write a point to n shards. for shardID := 0; shardID < 100; shardID++ { if err := store.CreateShard("db", "rp", uint64(shardID), true); err != nil { b.Fatalf("create shard: %s", err) } err := store.WriteToShard(uint64(shardID), []models.Point{models.MustNewPoint("cpu", nil, map[string]interface{}{"value": 1.0}, time.Now())}) if err != nil { b.Fatalf("write: %s", err) } } b.Run(store.EngineOptions.IndexVersion, func(b *testing.B) { for i := 0; i < b.N; i++ { _, _ = store.SeriesCardinality("db") } }) store.Close() } } func BenchmarkStoreOpen_200KSeries_100Shards(b *testing.B) { benchmarkStoreOpen(b, 64, 5, 5, 1, 100) } func benchmarkStoreOpen(b *testing.B, mCnt, tkCnt, tvCnt, pntCnt, shardCnt int) { var store *Store setup := func(index string) error { store := MustOpenStore(index) // Generate test series (measurements + unique tag sets). series := genTestSeries(mCnt, tkCnt, tvCnt) // Generate point data to write to the shards. points := []models.Point{} for _, s := range series { for val := 0.0; val < float64(pntCnt); val++ { p := models.MustNewPoint(s.Measurement, s.Tags, map[string]interface{}{"value": val}, time.Now()) points = append(points, p) } } // Create requested number of shards in the store & write points. for shardID := 0; shardID < shardCnt; shardID++ { if err := store.CreateShard("mydb", "myrp", uint64(shardID), true); err != nil { return fmt.Errorf("create shard: %s", err) } if err := store.BatchWrite(shardID, points); err != nil { return fmt.Errorf("batch write: %s", err) } } return nil } for _, index := range tsdb.RegisteredIndexes() { if err := setup(index); err != nil { b.Fatal(err) } b.Run(store.EngineOptions.IndexVersion, func(b *testing.B) { for n := 0; n < b.N; n++ { store := tsdb.NewStore(store.Path()) if err := store.Open(); err != nil { b.Fatalf("open store error: %s", err) } b.StopTimer() store.Close() b.StartTimer() } }) os.RemoveAll(store.Path()) } } // To store result of benchmark (ensure allocated on heap). var tvResult []tsdb.TagValues func BenchmarkStore_TagValues(b *testing.B) { benchmarks := []struct { name string shards int measurements int tagValues int }{ {name: "s=1_m=1_v=100", shards: 1, measurements: 1, tagValues: 100}, {name: "s=1_m=1_v=1000", shards: 1, measurements: 1, tagValues: 1000}, {name: "s=1_m=10_v=100", shards: 1, measurements: 10, tagValues: 100}, {name: "s=1_m=10_v=1000", shards: 1, measurements: 10, tagValues: 1000}, {name: "s=1_m=100_v=100", shards: 1, measurements: 100, tagValues: 100}, {name: "s=1_m=100_v=1000", shards: 1, measurements: 100, tagValues: 1000}, {name: "s=10_m=1_v=100", shards: 10, measurements: 1, tagValues: 100}, {name: "s=10_m=1_v=1000", shards: 10, measurements: 1, tagValues: 1000}, {name: "s=10_m=10_v=100", shards: 10, measurements: 10, tagValues: 100}, {name: "s=10_m=10_v=1000", shards: 10, measurements: 10, tagValues: 1000}, {name: "s=10_m=100_v=100", shards: 10, measurements: 100, tagValues: 100}, {name: "s=10_m=100_v=1000", shards: 10, measurements: 100, tagValues: 1000}, } var s *Store setup := func(shards, measurements, tagValues int, index string, useRandom bool) []uint64 { // returns shard ids s := NewStore(index) if err := s.Open(); err != nil { panic(err) } fmtStr := `cpu%[1]d,host=tv%[2]d,shard=s%[3]d,z1=s%[1]d%[2]d,z2=%[4]s value=1 %[5]d` // genPoints generates some point data. If ran is true then random tag // key values will be generated, meaning more work sorting and merging. // If ran is false, then the same set of points will be produced for the // same set of parameters, meaning more de-duplication of points will be // needed. genPoints := func(sid int, ran bool) []string { var v, ts int var half string points := make([]string, 0, measurements*tagValues) for m := 0; m < measurements; m++ { for tagvid := 0; tagvid < tagValues; tagvid++ { v = tagvid if ran { v = rand.Intn(100000) } half = fmt.Sprint(rand.Intn(2) == 0) points = append(points, fmt.Sprintf(fmtStr, m, v, sid, half, ts)) ts++ } } return points } // Create data across chosen number of shards. var shardIDs []uint64 for i := 0; i < shards; i++ { shardIDs = append(shardIDs, uint64(i)) s.MustCreateShardWithData("db0", "rp0", i, genPoints(i, useRandom)...) } return shardIDs } teardown := func() { if err := s.Close(); err != nil { b.Fatal(err) } } // SHOW TAG VALUES WITH KEY IN ("host", "shard") cond1 := &influxql.ParenExpr{ Expr: &influxql.BinaryExpr{ Op: influxql.OR, LHS: &influxql.BinaryExpr{ Op: influxql.EQ, LHS: &influxql.VarRef{Val: "_tagKey"}, RHS: &influxql.StringLiteral{Val: "host"}, }, RHS: &influxql.BinaryExpr{ Op: influxql.EQ, LHS: &influxql.VarRef{Val: "_tagKey"}, RHS: &influxql.StringLiteral{Val: "shard"}, }, }, } cond2 := &influxql.ParenExpr{ Expr: &influxql.BinaryExpr{ Op: influxql.AND, LHS: &influxql.ParenExpr{ Expr: &influxql.BinaryExpr{ Op: influxql.EQ, LHS: &influxql.VarRef{Val: "z2"}, RHS: &influxql.StringLiteral{Val: "true"}, }, }, RHS: cond1, }, } var err error for _, index := range tsdb.RegisteredIndexes() { for useRand := 0; useRand < 2; useRand++ { for c, condition := range []influxql.Expr{cond1, cond2} { for _, bm := range benchmarks { shardIDs := setup(bm.shards, bm.measurements, bm.tagValues, index, useRand == 1) cnd := "Unfiltered" if c == 0 { cnd = "Filtered" } b.Run("random_values="+fmt.Sprint(useRand == 1)+"_index="+index+"_"+cnd+"_"+bm.name, func(b *testing.B) { for i := 0; i < b.N; i++ { if tvResult, err = s.TagValues(nil, shardIDs, condition); err != nil { b.Fatal(err) } } }) teardown() } } } } } // Store is a test wrapper for tsdb.Store. type Store struct { *tsdb.Store index string } // NewStore returns a new instance of Store with a temporary path. func NewStore(index string) *Store { path, err := ioutil.TempDir("", "influxdb-tsdb-") if err != nil { panic(err) } s := &Store{Store: tsdb.NewStore(path), index: index} s.EngineOptions.IndexVersion = index s.EngineOptions.Config.WALDir = filepath.Join(path, "wal") s.EngineOptions.Config.TraceLoggingEnabled = true if testing.Verbose() { s.WithLogger(logger.New(os.Stdout)) } return s } // MustOpenStore returns a new, open Store using the specified index, // at a temporary path. func MustOpenStore(index string) *Store { s := NewStore(index) if err := s.Open(); err != nil { panic(err) } return s } // Reopen closes and reopens the store as a new store. func (s *Store) Reopen() error { if err := s.Store.Close(); err != nil { return err } s.Store = tsdb.NewStore(s.Path()) s.EngineOptions.IndexVersion = s.index s.EngineOptions.Config.WALDir = filepath.Join(s.Path(), "wal") s.EngineOptions.Config.TraceLoggingEnabled = true if testing.Verbose() { s.WithLogger(logger.New(os.Stdout)) } return s.Store.Open() } // Close closes the store and removes the underlying data. func (s *Store) Close() error { defer os.RemoveAll(s.Path()) return s.Store.Close() } // MustCreateShardWithData creates a shard and writes line protocol data to it. func (s *Store) MustCreateShardWithData(db, rp string, shardID int, data ...string) { if err := s.CreateShard(db, rp, uint64(shardID), true); err != nil { panic(err) } s.MustWriteToShardString(shardID, data...) } // MustWriteToShardString parses the line protocol (with second precision) and // inserts the resulting points into a shard. Panic on error. func (s *Store) MustWriteToShardString(shardID int, data ...string) { var points []models.Point for i := range data { a, err := models.ParsePointsWithPrecision([]byte(strings.TrimSpace(data[i])), time.Time{}, "s") if err != nil { panic(err) } points = append(points, a...) } if err := s.WriteToShard(uint64(shardID), points); err != nil { panic(err) } } // BatchWrite writes points to a shard in chunks. func (s *Store) BatchWrite(shardID int, points []models.Point) error { nPts := len(points) chunkSz := 10000 start := 0 end := chunkSz for { if end > nPts { end = nPts } if end-start == 0 { break } if err := s.WriteToShard(uint64(shardID), points[start:end]); err != nil { return err } start = end end += chunkSz } return nil } // ParseTags returns an instance of Tags for a comma-delimited list of key/values. func ParseTags(s string) query.Tags { m := make(map[string]string) for _, kv := range strings.Split(s, ",") { a := strings.Split(kv, "=") m[a[0]] = a[1] } return query.NewTags(m) } func dirExists(path string) bool { var err error if _, err = os.Stat(path); err == nil { return true } return !os.IsNotExist(err) }
[ "\"GORACE\"", "\"APPVEYOR\"", "\"GORACE\"", "\"APPVEYOR\"", "\"GORACE\"", "\"APPVEYOR\"", "\"GORACE\"", "\"APPVEYOR\"" ]
[]
[ "APPVEYOR", "GORACE" ]
[]
["APPVEYOR", "GORACE"]
go
2
0
Project/train/train.py
import argparse import json import os import pickle import sys import sagemaker_containers import pandas as pd import torch import torch.optim as optim import torch.utils.data from model import LSTMClassifier def model_fn(model_dir): """Load the PyTorch model from the `model_dir` directory.""" print("Loading model.") # First, load the parameters used to create the model. model_info = {} model_info_path = os.path.join(model_dir, 'model_info.pth') with open(model_info_path, 'rb') as f: model_info = torch.load(f) print("model_info: {}".format(model_info)) # Determine the device and construct the model. device = torch.device("cuda" if torch.cuda.is_available() else "cpu") model = LSTMClassifier(model_info['embedding_dim'], model_info['hidden_dim'], model_info['vocab_size']) # Load the stored model parameters. model_path = os.path.join(model_dir, 'model.pth') with open(model_path, 'rb') as f: model.load_state_dict(torch.load(f)) # Load the saved word_dict. word_dict_path = os.path.join(model_dir, 'word_dict.pkl') with open(word_dict_path, 'rb') as f: model.word_dict = pickle.load(f) model.to(device).eval() print("Done loading model.") return model def _get_train_data_loader(batch_size, training_dir): print("Get train data loader.") train_data = pd.read_csv(os.path.join(training_dir, "train.csv"), header=None, names=None) train_y = torch.from_numpy(train_data[[0]].values).float().squeeze() train_X = torch.from_numpy(train_data.drop([0], axis=1).values).long() train_ds = torch.utils.data.TensorDataset(train_X, train_y) return torch.utils.data.DataLoader(train_ds, batch_size=batch_size) def train(model, train_loader, epochs, optimizer, loss_fn, device): """ This is the training method that is called by the PyTorch training script. The parameters passed are as follows: model - The PyTorch model that we wish to train. train_loader - The PyTorch DataLoader that should be used during training. epochs - The total number of epochs to train for. optimizer - The optimizer to use during training. loss_fn - The loss function used for training. device - Where the model and data should be loaded (gpu or cpu). """ # TODO: Paste the train() method developed in the notebook here. for epoch in range(1, epochs + 1): model.train() total_loss = 0 for batch in train_loader: batch_X, batch_y = batch batch_X = batch_X.to(device) batch_y = batch_y.to(device) # TODO: Complete this train method to train the model provided. optimizer.zero_grad() sentence_in = batch_X targets = batch_y tag_scores = model(sentence_in) loss = loss_fn(tag_scores, targets) loss.backward() optimizer.step() total_loss += loss.data.item() print("Epoch: {}, BCELoss: {}".format(epoch, total_loss / len(train_loader))) if __name__ == '__main__': # All of the model parameters and training parameters are sent as arguments when the script # is executed. Here we set up an argument parser to easily access the parameters. parser = argparse.ArgumentParser() # Training Parameters parser.add_argument('--batch-size', type=int, default=512, metavar='N', help='input batch size for training (default: 512)') parser.add_argument('--epochs', type=int, default=10, metavar='N', help='number of epochs to train (default: 10)') parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)') # Model Parameters parser.add_argument('--embedding_dim', type=int, default=32, metavar='N', help='size of the word embeddings (default: 32)') parser.add_argument('--hidden_dim', type=int, default=100, metavar='N', help='size of the hidden dimension (default: 100)') parser.add_argument('--vocab_size', type=int, default=5000, metavar='N', help='size of the vocabulary (default: 5000)') # SageMaker Parameters parser.add_argument('--hosts', type=list, default=json.loads(os.environ['SM_HOSTS'])) parser.add_argument('--current-host', type=str, default=os.environ['SM_CURRENT_HOST']) parser.add_argument('--model-dir', type=str, default=os.environ['SM_MODEL_DIR']) parser.add_argument('--data-dir', type=str, default=os.environ['SM_CHANNEL_TRAINING']) parser.add_argument('--num-gpus', type=int, default=os.environ['SM_NUM_GPUS']) args = parser.parse_args() device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print("Using device {}.".format(device)) torch.manual_seed(args.seed) # Load the training data. train_loader = _get_train_data_loader(args.batch_size, args.data_dir) # Build the model. model = LSTMClassifier(args.embedding_dim, args.hidden_dim, args.vocab_size).to(device) with open(os.path.join(args.data_dir, "word_dict.pkl"), "rb") as f: model.word_dict = pickle.load(f) print("Model loaded with embedding_dim {}, hidden_dim {}, vocab_size {}.".format( args.embedding_dim, args.hidden_dim, args.vocab_size )) # Train the model. optimizer = optim.Adam(model.parameters()) loss_fn = torch.nn.BCELoss() train(model, train_loader, args.epochs, optimizer, loss_fn, device) # Save the parameters used to construct the model model_info_path = os.path.join(args.model_dir, 'model_info.pth') with open(model_info_path, 'wb') as f: model_info = { 'embedding_dim': args.embedding_dim, 'hidden_dim': args.hidden_dim, 'vocab_size': args.vocab_size, } torch.save(model_info, f) # Save the word_dict word_dict_path = os.path.join(args.model_dir, 'word_dict.pkl') with open(word_dict_path, 'wb') as f: pickle.dump(model.word_dict, f) # Save the model parameters model_path = os.path.join(args.model_dir, 'model.pth') with open(model_path, 'wb') as f: torch.save(model.cpu().state_dict(), f)
[]
[]
[ "SM_MODEL_DIR", "SM_NUM_GPUS", "SM_CURRENT_HOST", "SM_CHANNEL_TRAINING", "SM_HOSTS" ]
[]
["SM_MODEL_DIR", "SM_NUM_GPUS", "SM_CURRENT_HOST", "SM_CHANNEL_TRAINING", "SM_HOSTS"]
python
5
0
examples/pyclient_mnist.py
import time import argparse import numpy as np import sys import os from tensorflow.examples.tutorials.mnist import input_data import he_seal_client FLAGS = None def test_mnist_cnn(FLAGS): mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True) batch_size = FLAGS.batch_size x_test_batch = mnist.test.images[:batch_size] y_test_batch = mnist.test.labels[:batch_size] data = x_test_batch.flatten('F') print('Client batch size from FLAG: ', batch_size) complex_scale_factor = 1 if ('NGRAPH_COMPLEX_PACK' in os.environ): complex_scale_factor = 2 print('complex_scale_factor', complex_scale_factor) # TODO: support even batch sizes assert (batch_size % complex_scale_factor == 0) hostname = 'localhost' port = 34000 new_batch_size = batch_size // complex_scale_factor print('new_batch_size', new_batch_size) client = he_seal_client.HESealClient(hostname, port, new_batch_size, data) print('Sleeping until client is done') while not client.is_done(): time.sleep(1) results = client.get_results() results = np.round(results, 2) y_pred_reshape = np.array(results).reshape(10, batch_size) with np.printoptions(precision=3, suppress=True): print(y_pred_reshape.T) y_pred = y_pred_reshape.argmax(axis=0) print('y_pred', y_pred) y_true = y_test_batch.argmax(axis=1) correct = np.sum(np.equal(y_pred, y_true)) acc = correct / float(batch_size) print('pred size', len(y_pred)) print('correct', correct) print('Accuracy (batch size', batch_size, ') =', acc * 100., '%') if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument( '--data_dir', type=str, default='/tmp/tensorflow/mnist/input_data', help='Directory where input data is stored') parser.add_argument('--batch_size', type=int, default=1, help='Batch size') FLAGS, unparsed = parser.parse_known_args() test_mnist_cnn(FLAGS)
[]
[]
[]
[]
[]
python
0
0
allennlp/common/testing/test_case.py
import logging import os import pathlib import shutil import tempfile from allennlp.common.checks import log_pytorch_version_info TEST_DIR = tempfile.mkdtemp(prefix="allennlp_tests") class AllenNlpTestCase: """ A custom testing class that disables some of the more verbose AllenNLP logging and that creates and destroys a temp directory as a test fixture. """ PROJECT_ROOT = (pathlib.Path(__file__).parent / ".." / ".." / "..").resolve() MODULE_ROOT = PROJECT_ROOT / "allennlp" TOOLS_ROOT = MODULE_ROOT / "tools" # to run test suite with finished package, which does not contain # tests & fixtures, we must be able to look them up somewhere else PROJECT_ROOT_FALLBACK = ( # users wanting to run test suite for installed package pathlib.Path(os.environ["ALLENNLP_SRC_DIR"]) if "ALLENNLP_SRC_DIR" in os.environ else ( # fallback for conda packaging pathlib.Path(os.environ["SRC_DIR"]) if "CONDA_BUILD" in os.environ # stay in-tree else PROJECT_ROOT ) ) TESTS_ROOT = PROJECT_ROOT_FALLBACK / "tests" FIXTURES_ROOT = PROJECT_ROOT_FALLBACK / "test_fixtures" def setup_method(self): logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", level=logging.DEBUG ) # Disabling some of the more verbose logging statements that typically aren't very helpful # in tests. logging.getLogger("allennlp.common.params").disabled = True logging.getLogger("allennlp.nn.initializers").disabled = True logging.getLogger("allennlp.modules.token_embedders.embedding").setLevel(logging.INFO) logging.getLogger("urllib3.connectionpool").disabled = True log_pytorch_version_info() self.TEST_DIR = pathlib.Path(TEST_DIR) os.makedirs(self.TEST_DIR, exist_ok=True) def teardown_method(self): shutil.rmtree(self.TEST_DIR)
[]
[]
[ "ALLENNLP_SRC_DIR", "SRC_DIR" ]
[]
["ALLENNLP_SRC_DIR", "SRC_DIR"]
python
2
0
molecule/check-iptables-nat/tests/test_default.py
import os import testinfra.utils.ansible_runner testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( os.environ["MOLECULE_INVENTORY_FILE"] ).get_hosts("instance") def test_iptables_filter(host): cmd = host.run_expect( expected=[0], command="iptables --table filter --list-rules", ) stdout_lines = cmd.stdout.splitlines() assert "-N DOCKER" in stdout_lines def test_iptables_nat(host): cmd = host.run_expect( expected=[0], command="iptables --table nat --list-rules", ) stdout_lines = cmd.stdout.splitlines() assert "-N DOCKER" in stdout_lines
[]
[]
[ "MOLECULE_INVENTORY_FILE" ]
[]
["MOLECULE_INVENTORY_FILE"]
python
1
0