filename
stringlengths
4
198
content
stringlengths
25
939k
environment
list
variablearg
list
constarg
list
variableargjson
stringclasses
1 value
constargjson
stringlengths
2
3.9k
lang
stringclasses
3 values
constargcount
float64
0
129
variableargcount
float64
0
0
sentence
stringclasses
1 value
config/config.go
package config import ( "bytes" "fmt" "html/template" "io/ioutil" "net/url" "os" "path/filepath" "regexp" "strings" "github.com/k1LoW/tbls/schema" "github.com/pkg/errors" "gopkg.in/yaml.v2" ) const defaultConfigFilePath = ".tbls.yml" const defaultDocPath = "dbdoc" // DefaultERFormat is default ER diagram format const DefaultERFormat = "png" // Config is tbls config type Config struct { DSN string `yaml:"dsn"` DocPath string `yaml:"docPath"` Format Format `yaml:"format"` ER ER `yaml:"er"` Exclude []string `yaml:"exclude"` Lint Lint `yaml:"lint"` LintExclude []string `yaml:"lintExclude"` Relations []AdditionalRelation `yaml:"relations"` Comments []AdditionalComment `yaml:"comments"` } // Format is document format setting type Format struct { Adjust bool `yaml:"adjust"` Sort bool `yaml:"sort"` } // ER is er diagram setting type ER struct { Skip bool `yaml:"skip"` Format string `yaml:"format"` Comment bool `yaml:"comment"` } // AdditionalRelation is the struct for table relation from yaml type AdditionalRelation struct { Table string `yaml:"table"` Columns []string `yaml:"columns"` ParentTable string `yaml:"parentTable"` ParentColumns []string `yaml:"parentColumns"` Def string `yaml:"def"` } // AdditionalComment is the struct for table relation from yaml type AdditionalComment struct { Table string `yaml:"table"` TableComment string `yaml:"tableComment"` ColumnComments map[string]string `yaml:"columnComments"` } // Option function change Config type Option func(*Config) error // DSN return Option set Config.DSN func DSN(dsn string) Option { return func(c *Config) error { c.DSN = dsn return nil } } // DocPath return Option set Config.DocPath func DocPath(docPath string) Option { return func(c *Config) error { c.DocPath = docPath return nil } } // Adjust return Option set Config.Format.Adjust func Adjust(adjust bool) Option { return func(c *Config) error { if adjust { c.Format.Adjust = adjust } return nil } } // Sort return Option set Config.Format.Sort func Sort(sort bool) Option { return func(c *Config) error { if sort { c.Format.Sort = sort } return nil } } // ERSkip return Option set Config.ER.Skip func ERSkip(skip bool) Option { return func(c *Config) error { c.ER.Skip = skip return nil } } // ERFormat return Option set Config.ER.Format func ERFormat(erFormat string) Option { return func(c *Config) error { if erFormat != "" { c.ER.Format = erFormat } return nil } } // NewConfig return Config func NewConfig() (*Config, error) { c := Config{ DSN: "", DocPath: "", } return &c, nil } // Load load config with all method func (c *Config) Load(configPath string, options ...Option) error { err := c.LoadConfigFile(configPath) if err != nil { return err } err = c.LoadEnviron() if err != nil { return err } for _, option := range options { err = option(c) if err != nil { return err } } if c.DocPath == "" { c.DocPath = defaultDocPath } if c.ER.Format == "" { c.ER.Format = DefaultERFormat } return nil } // LoadEnviron load environment variables func (c *Config) LoadEnviron() error { dsn := os.Getenv("TBLS_DSN") if dsn != "" { c.DSN = dsn } docPath := os.Getenv("TBLS_DOC_PATH") if docPath != "" { c.DocPath = docPath } return nil } // LoadConfigFile load config file func (c *Config) LoadConfigFile(path string) error { if path == "" { path = defaultConfigFilePath if _, err := os.Lstat(path); err != nil { return nil } } fullPath, err := filepath.Abs(path) if err != nil { return errors.Wrap(errors.WithStack(err), "failed to load config file") } buf, err := ioutil.ReadFile(filepath.Clean(fullPath)) if err != nil { return errors.Wrap(errors.WithStack(err), "failed to load config file") } err = yaml.Unmarshal(buf, c) if err != nil { return errors.Wrap(errors.WithStack(err), "failed to load config file") } c.DSN, err = parseWithEnviron(c.DSN) if err != nil { return errors.Wrap(errors.WithStack(err), "failed to load config file") } c.DocPath, err = parseWithEnviron(c.DocPath) if err != nil { return errors.Wrap(errors.WithStack(err), "failed to load config file") } return nil } // ModifySchema modify schema.Schema by config func (c *Config) ModifySchema(s *schema.Schema) error { err := c.MergeAdditionalData(s) if err != nil { return err } err = c.ExcludeTables(s) if err != nil { return err } if c.Format.Sort { err = s.Sort() if err != nil { return err } } return nil } // MergeAdditionalData merge additional* to schema.Schema func (c *Config) MergeAdditionalData(s *schema.Schema) error { err := mergeAdditionalRelations(s, c.Relations) if err != nil { return err } err = mergeAdditionalComments(s, c.Comments) if err != nil { return err } return nil } // ExcludeTables exclude tables from schema.Schema func (c *Config) ExcludeTables(s *schema.Schema) error { for _, e := range c.Exclude { for _, r := range s.Relations { if r.ParentTable.Name == e { return errors.New(fmt.Sprintf("failed to exclude table '%s': '%s' is related by '%s'", e, e, r.Table.Name)) } } err := excludeTableFromSchema(e, s) if err != nil { return errors.Wrap(errors.WithStack(err), fmt.Sprintf("failed to exclude table '%s'", e)) } } return nil } func excludeTableFromSchema(name string, s *schema.Schema) error { // Tables tables := []*schema.Table{} for _, t := range s.Tables { if t.Name != name { tables = append(tables, t) } for _, c := range t.Columns { // ChildRelations childRelations := []*schema.Relation{} for _, r := range c.ChildRelations { if r.Table.Name != name { childRelations = append(childRelations, r) } } c.ChildRelations = childRelations // ParentRelations parentRelations := []*schema.Relation{} for _, r := range c.ParentRelations { if r.Table.Name != name { parentRelations = append(parentRelations, r) } } c.ParentRelations = parentRelations } } s.Tables = tables // Relations relations := []*schema.Relation{} for _, r := range s.Relations { if r.Table.Name != name { relations = append(relations, r) } } s.Relations = relations return nil } // MaskedDSN return DSN mask password func (c *Config) MaskedDSN() (string, error) { u, err := url.Parse(c.DSN) if err != nil { return c.DSN, errors.WithStack(err) } tmp := "-----tbls-----" u.User = url.UserPassword(u.User.Username(), tmp) return strings.Replace(u.String(), tmp, "*****", 1), nil } func mergeAdditionalRelations(s *schema.Schema, relations []AdditionalRelation) error { for _, r := range relations { relation := &schema.Relation{ Virtual: true, } if r.Def != "" { relation.Def = r.Def } else { relation.Def = "Additional Relation" } var err error relation.Table, err = s.FindTableByName(r.Table) if err != nil { return errors.Wrap(err, "failed to add relation") } for _, c := range r.Columns { column, err := relation.Table.FindColumnByName(c) if err != nil { return errors.Wrap(err, "failed to add relation") } relation.Columns = append(relation.Columns, column) column.ParentRelations = append(column.ParentRelations, relation) } relation.ParentTable, err = s.FindTableByName(r.ParentTable) if err != nil { return errors.Wrap(err, "failed to add relation") } for _, c := range r.ParentColumns { column, err := relation.ParentTable.FindColumnByName(c) if err != nil { return errors.Wrap(err, "failed to add relation") } relation.ParentColumns = append(relation.ParentColumns, column) column.ChildRelations = append(column.ChildRelations, relation) } s.Relations = append(s.Relations, relation) } return nil } func mergeAdditionalComments(s *schema.Schema, comments []AdditionalComment) error { for _, c := range comments { table, err := s.FindTableByName(c.Table) if err != nil { return errors.Wrap(err, "failed to add table comment") } if c.TableComment != "" { table.Comment = c.TableComment } for c, comment := range c.ColumnComments { column, err := table.FindColumnByName(c) if err != nil { return errors.Wrap(err, "failed to add column comment") } column.Comment = comment } } return nil } func parseWithEnviron(v string) (string, error) { r := regexp.MustCompile(`\${\s*([^{}]+)\s*}`) r2 := regexp.MustCompile(`{{([^\.])`) r3 := regexp.MustCompile(`__TBLS__(.)`) replaced := r.ReplaceAllString(v, "{{.$1}}") replaced2 := r2.ReplaceAllString(replaced, "__TBLS__$1") tmpl, err := template.New("config").Parse(replaced2) if err != nil { return "", err } buf := &bytes.Buffer{} err = tmpl.Execute(buf, envMap()) if err != nil { return "", err } return r3.ReplaceAllString(buf.String(), "{{$1"), nil } func envMap() map[string]string { m := map[string]string{} for _, kv := range os.Environ() { if !strings.Contains(kv, "=") { continue } parts := strings.SplitN(kv, "=", 2) k := parts[0] if len(parts) < 2 { m[k] = "" continue } m[k] = parts[1] } return m }
[ "\"TBLS_DSN\"", "\"TBLS_DOC_PATH\"" ]
[]
[ "TBLS_DSN", "TBLS_DOC_PATH" ]
[]
["TBLS_DSN", "TBLS_DOC_PATH"]
go
2
0
vendor/github.com/cloudfoundry/dagger/utils/utils.go
package utils import ( "context" "fmt" "math/rand" "net/http" "os" "github.com/google/go-github/github" "golang.org/x/oauth2" ) func RandStringRunes(n int) string { runes := []rune("abcdefghijklmnopqrstuvwxyz1234567890") b := make([]rune, n) for i := range b { b[i] = runes[rand.Intn(len(runes))] } return string(b) } func NewGitClient(ctx context.Context) *github.Client { git_token := os.Getenv("GIT_TOKEN") ts := oauth2.StaticTokenSource( &oauth2.Token{AccessToken: git_token}, ) tc := oauth2.NewClient(ctx, ts) client := github.NewClient(tc) if git_token == "" { fmt.Println("Using unauthorized github api, consider setting the GIT_TOKEN environment variable") fmt.Println("More info on Github tokens here: https://help.github.com/en/articles/creating-a-personal-access-token-for-the-command-line") client = github.NewClient(http.DefaultClient) } return client }
[ "\"GIT_TOKEN\"" ]
[]
[ "GIT_TOKEN" ]
[]
["GIT_TOKEN"]
go
1
0
test/functional/test_framework/test_node.py
#!/usr/bin/env python3 # Copyright (c) 2017-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Class for bitcoind node under test""" import contextlib import decimal import errno from enum import Enum import http.client import json import logging import os import re import subprocess import tempfile import time import urllib.parse import collections import shlex import sys from .authproxy import JSONRPCException from .util import ( MAX_NODES, append_config, delete_cookie_file, get_rpc_proxy, rpc_url, wait_until, p2p_port, ) BITCOIND_PROC_WAIT_TIMEOUT = 60 class FailedToStartError(Exception): """Raised when a node fails to start correctly.""" class ErrorMatch(Enum): FULL_TEXT = 1 FULL_REGEX = 2 PARTIAL_REGEX = 3 class TestNode(): """A class for representing a bitcoind node under test. This class contains: - state about the node (whether it's running, etc) - a Python subprocess.Popen object representing the running process - an RPC connection to the node - one or more P2P connections to the node To make things easier for the test writer, any unrecognised messages will be dispatched to the RPC connection.""" def __init__(self, i, datadir, *, chain, rpchost, timewait, bitcoind, bitcoin_cli, coverage_dir, cwd, extra_conf=None, extra_args=None, use_cli=False, start_perf=False): """ Kwargs: start_perf (bool): If True, begin profiling the node with `perf` as soon as the node starts. """ self.index = i self.datadir = datadir self.bitcoinconf = os.path.join(self.datadir, "particl.conf") self.stdout_dir = os.path.join(self.datadir, "stdout") self.stderr_dir = os.path.join(self.datadir, "stderr") self.chain = chain self.rpchost = rpchost self.rpc_timeout = timewait self.binary = bitcoind self.coverage_dir = coverage_dir self.cwd = cwd if extra_conf is not None: append_config(datadir, extra_conf) # Most callers will just need to add extra args to the standard list below. # For those callers that need more flexibility, they can just set the args property directly. # Note that common args are set in the config file (see initialize_datadir) self.extra_args = extra_args # Configuration for logging is set as command-line args rather than in the bitcoin.conf file. # This means that starting a bitcoind using the temp dir to debug a failed test won't # spam debug.log. self.args = [ self.binary, "-datadir=" + self.datadir, "-logtimemicros", "-logthreadnames", "-debug", "-debugexclude=libevent", "-debugexclude=leveldb", "-uacomment=testnode%d" % i, ] self.cli = TestNodeCLI(bitcoin_cli, self.datadir) self.use_cli = use_cli self.start_perf = start_perf self.running = False self.process = None self.rpc_connected = False self.rpc = None self.url = None self.log = logging.getLogger('TestFramework.node%d' % i) self.cleanup_on_exit = True # Whether to kill the node when this object goes away # Cache perf subprocesses here by their data output filename. self.perf_subprocesses = {} self.p2ps = [] AddressKeyPair = collections.namedtuple('AddressKeyPair', ['address', 'key']) PRIV_KEYS = [ # address , privkey AddressKeyPair('mjTkW3DjgyZck4KbiRusZsqTgaYTxdSz6z', 'cVpF924EspNh8KjYsfhgY96mmxvT6DgdWiTYMtMjuM74hJaU5psW'), AddressKeyPair('msX6jQXvxiNhx3Q62PKeLPrhrqZQdSimTg', 'cUxsWyKyZ9MAQTaAhUQWJmBbSvHMwSmuv59KgxQV7oZQU3PXN3KE'), AddressKeyPair('mnonCMyH9TmAsSj3M59DsbH8H63U3RKoFP', 'cTrh7dkEAeJd6b3MRX9bZK8eRmNqVCMH3LSUkE3dSFDyzjU38QxK'), AddressKeyPair('mqJupas8Dt2uestQDvV2NH3RU8uZh2dqQR', 'cVuKKa7gbehEQvVq717hYcbE9Dqmq7KEBKqWgWrYBa2CKKrhtRim'), AddressKeyPair('msYac7Rvd5ywm6pEmkjyxhbCDKqWsVeYws', 'cQDCBuKcjanpXDpCqacNSjYfxeQj8G6CAtH1Dsk3cXyqLNC4RPuh'), AddressKeyPair('n2rnuUnwLgXqf9kk2kjvVm8R5BZK1yxQBi', 'cQakmfPSLSqKHyMFGwAqKHgWUiofJCagVGhiB4KCainaeCSxeyYq'), AddressKeyPair('myzuPxRwsf3vvGzEuzPfK9Nf2RfwauwYe6', 'cQMpDLJwA8DBe9NcQbdoSb1BhmFxVjWD5gRyrLZCtpuF9Zi3a9RK'), AddressKeyPair('mumwTaMtbxEPUswmLBBN3vM9oGRtGBrys8', 'cSXmRKXVcoouhNNVpcNKFfxsTsToY5pvB9DVsFksF1ENunTzRKsy'), AddressKeyPair('mpV7aGShMkJCZgbW7F6iZgrvuPHjZjH9qg', 'cSoXt6tm3pqy43UMabY6eUTmR3eSUYFtB2iNQDGgb3VUnRsQys2k'), AddressKeyPair('mq4fBNdckGtvY2mijd9am7DRsbRB4KjUkf', 'cN55daf1HotwBAgAKWVgDcoppmUNDtQSfb7XLutTLeAgVc3u8hik'), AddressKeyPair('mpFAHDjX7KregM3rVotdXzQmkbwtbQEnZ6', 'cT7qK7g1wkYEMvKowd2ZrX1E5f6JQ7TM246UfqbCiyF7kZhorpX3'), AddressKeyPair('mzRe8QZMfGi58KyWCse2exxEFry2sfF2Y7', 'cPiRWE8KMjTRxH1MWkPerhfoHFn5iHPWVK5aPqjW8NxmdwenFinJ'), ] def get_deterministic_priv_key(self): """Return a deterministic priv key in base58, that only depends on the node's index""" assert len(self.PRIV_KEYS) == MAX_NODES return self.PRIV_KEYS[self.index] def get_mem_rss_kilobytes(self): """Get the memory usage (RSS) per `ps`. Returns None if `ps` is unavailable. """ assert self.running try: return int(subprocess.check_output( ["ps", "h", "-o", "rss", "{}".format(self.process.pid)], stderr=subprocess.DEVNULL).split()[-1]) # Avoid failing on platforms where ps isn't installed. # # We could later use something like `psutils` to work across platforms. except (FileNotFoundError, subprocess.SubprocessError): self.log.exception("Unable to get memory usage") return None def _node_msg(self, msg: str) -> str: """Return a modified msg that identifies this node by its index as a debugging aid.""" return "[node %d] %s" % (self.index, msg) def _raise_assertion_error(self, msg: str): """Raise an AssertionError with msg modified to identify this node.""" raise AssertionError(self._node_msg(msg)) def __del__(self): # Ensure that we don't leave any bitcoind processes lying around after # the test ends if self.process and self.cleanup_on_exit: # Should only happen on test failure # Avoid using logger, as that may have already been shutdown when # this destructor is called. print(self._node_msg("Cleaning up leftover process")) self.process.kill() def __getattr__(self, name): """Dispatches any unrecognised messages to the RPC connection or a CLI instance.""" if self.use_cli: return getattr(self.cli, name) else: assert self.rpc_connected and self.rpc is not None, self._node_msg("Error: no RPC connection") return getattr(self.rpc, name) def start(self, extra_args=None, *, cwd=None, stdout=None, stderr=None, **kwargs): """Start the node.""" if extra_args is None: extra_args = self.extra_args if kwargs.pop('btcmode',True): extra_args.append('-btcmode') extra_args.append('-nosmsg') # Add a new stdout and stderr file each time bitcoind is started if stderr is None: stderr = tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False) if stdout is None: stdout = tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) self.stderr = stderr self.stdout = stdout if cwd is None: cwd = self.cwd # Delete any existing cookie file -- if such a file exists (eg due to # unclean shutdown), it will get overwritten anyway by bitcoind, and # potentially interfere with our attempt to authenticate delete_cookie_file(self.datadir, self.chain) # add environment variable LIBC_FATAL_STDERR_=1 so that libc errors are written to stderr and not the terminal subp_env = dict(os.environ, LIBC_FATAL_STDERR_="1") self.process = subprocess.Popen(self.args + extra_args, env=subp_env, stdout=stdout, stderr=stderr, cwd=cwd, **kwargs) self.running = True self.log.debug("bitcoind started, waiting for RPC to come up") if self.start_perf: self._start_perf() def wait_for_rpc_connection(self): """Sets up an RPC connection to the bitcoind process. Returns False if unable to connect.""" # Poll at a rate of four times per second poll_per_s = 4 for _ in range(poll_per_s * self.rpc_timeout): if self.process.poll() is not None: raise FailedToStartError(self._node_msg( 'bitcoind exited with status {} during initialization'.format(self.process.returncode))) try: rpc = get_rpc_proxy(rpc_url(self.datadir, self.index, self.chain, self.rpchost), self.index, timeout=self.rpc_timeout, coveragedir=self.coverage_dir) rpc.getblockcount() # If the call to getblockcount() succeeds then the RPC connection is up self.log.debug("RPC successfully started") if self.use_cli: return self.rpc = rpc self.rpc_connected = True self.url = self.rpc.url return except IOError as e: if e.errno != errno.ECONNREFUSED: # Port not yet open? raise # unknown IO error except JSONRPCException as e: # Initialization phase # -28 RPC in warmup # -342 Service unavailable, RPC server started but is shutting down due to error if e.error['code'] != -28 and e.error['code'] != -342: raise # unknown JSON RPC exception except ValueError as e: # cookie file not found and no rpcuser or rpcassword. bitcoind still starting if "No RPC credentials" not in str(e): raise time.sleep(1.0 / poll_per_s) self._raise_assertion_error("Unable to connect to bitcoind") def generate(self, nblocks, maxtries=1000000): self.log.debug("TestNode.generate() dispatches `generate` call to `generatetoaddress`") return self.generatetoaddress(nblocks=nblocks, address=self.get_deterministic_priv_key().address, maxtries=maxtries) def get_wallet_rpc(self, wallet_name): if self.use_cli: return self.cli("-rpcwallet={}".format(wallet_name)) else: assert self.rpc_connected and self.rpc, self._node_msg("RPC not connected") wallet_path = "wallet/{}".format(urllib.parse.quote(wallet_name)) return self.rpc / wallet_path def stop_node(self, expected_stderr='', wait=0): """Stop the node.""" if not self.running: return self.log.debug("Stopping node") try: self.stop(wait=wait) except http.client.CannotSendRequest: self.log.exception("Unable to stop node.") # If there are any running perf processes, stop them. for profile_name in tuple(self.perf_subprocesses.keys()): self._stop_perf(profile_name) # Check that stderr is as expected self.stderr.seek(0) stderr = self.stderr.read().decode('utf-8').strip() if stderr != expected_stderr: raise AssertionError("Unexpected stderr {} != {}".format(stderr, expected_stderr)) self.stdout.close() self.stderr.close() del self.p2ps[:] def is_node_stopped(self): """Checks whether the node has stopped. Returns True if the node has stopped. False otherwise. This method is responsible for freeing resources (self.process).""" if not self.running: return True return_code = self.process.poll() if return_code is None: return False # process has stopped. Assert that it didn't return an error code. assert return_code == 0, self._node_msg( "Node returned non-zero exit code (%d) when stopping" % return_code) self.running = False self.process = None self.rpc_connected = False self.rpc = None self.log.debug("Node stopped") return True def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT): wait_until(self.is_node_stopped, timeout=timeout) @contextlib.contextmanager def assert_debug_log(self, expected_msgs, timeout=2): time_end = time.time() + timeout debug_log = os.path.join(self.datadir, self.chain, 'debug.log') with open(debug_log, encoding='utf-8') as dl: dl.seek(0, 2) prev_size = dl.tell() yield while True: found = True with open(debug_log, encoding='utf-8') as dl: dl.seek(prev_size) log = dl.read() print_log = " - " + "\n - ".join(log.splitlines()) for expected_msg in expected_msgs: if re.search(re.escape(expected_msg), log, flags=re.MULTILINE) is None: found = False if found: return if time.time() >= time_end: break time.sleep(0.05) self._raise_assertion_error('Expected messages "{}" does not partially match log:\n\n{}\n\n'.format(str(expected_msgs), print_log)) @contextlib.contextmanager def assert_memory_usage_stable(self, *, increase_allowed=0.03): """Context manager that allows the user to assert that a node's memory usage (RSS) hasn't increased beyond some threshold percentage. Args: increase_allowed (float): the fractional increase in memory allowed until failure; e.g. `0.12` for up to 12% increase allowed. """ before_memory_usage = self.get_mem_rss_kilobytes() yield after_memory_usage = self.get_mem_rss_kilobytes() if not (before_memory_usage and after_memory_usage): self.log.warning("Unable to detect memory usage (RSS) - skipping memory check.") return perc_increase_memory_usage = (after_memory_usage / before_memory_usage) - 1 if perc_increase_memory_usage > increase_allowed: self._raise_assertion_error( "Memory usage increased over threshold of {:.3f}% from {} to {} ({:.3f}%)".format( increase_allowed * 100, before_memory_usage, after_memory_usage, perc_increase_memory_usage * 100)) @contextlib.contextmanager def profile_with_perf(self, profile_name): """ Context manager that allows easy profiling of node activity using `perf`. See `test/functional/README.md` for details on perf usage. Args: profile_name (str): This string will be appended to the profile data filename generated by perf. """ subp = self._start_perf(profile_name) yield if subp: self._stop_perf(profile_name) def _start_perf(self, profile_name=None): """Start a perf process to profile this node. Returns the subprocess running perf.""" subp = None def test_success(cmd): return subprocess.call( # shell=True required for pipe use below cmd, shell=True, stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL) == 0 if not sys.platform.startswith('linux'): self.log.warning("Can't profile with perf; only available on Linux platforms") return None if not test_success('which perf'): self.log.warning("Can't profile with perf; must install perf-tools") return None if not test_success('readelf -S {} | grep .debug_str'.format(shlex.quote(self.binary))): self.log.warning( "perf output won't be very useful without debug symbols compiled into bitcoind") output_path = tempfile.NamedTemporaryFile( dir=self.datadir, prefix="{}.perf.data.".format(profile_name or 'test'), delete=False, ).name cmd = [ 'perf', 'record', '-g', # Record the callgraph. '--call-graph', 'dwarf', # Compatibility for gcc's --fomit-frame-pointer. '-F', '101', # Sampling frequency in Hz. '-p', str(self.process.pid), '-o', output_path, ] subp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.perf_subprocesses[profile_name] = subp return subp def _stop_perf(self, profile_name): """Stop (and pop) a perf subprocess.""" subp = self.perf_subprocesses.pop(profile_name) output_path = subp.args[subp.args.index('-o') + 1] subp.terminate() subp.wait(timeout=10) stderr = subp.stderr.read().decode() if 'Consider tweaking /proc/sys/kernel/perf_event_paranoid' in stderr: self.log.warning( "perf couldn't collect data! Try " "'sudo sysctl -w kernel.perf_event_paranoid=-1'") else: report_cmd = "perf report -i {}".format(output_path) self.log.info("See perf output by running '{}'".format(report_cmd)) def assert_start_raises_init_error(self, extra_args=None, expected_msg=None, match=ErrorMatch.FULL_TEXT, *args, **kwargs): """Attempt to start the node and expect it to raise an error. extra_args: extra arguments to pass through to bitcoind expected_msg: regex that stderr should match when bitcoind fails Will throw if bitcoind starts without an error. Will throw if an expected_msg is provided and it does not match bitcoind's stdout.""" with tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False) as log_stderr, \ tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) as log_stdout: try: self.start(extra_args, stdout=log_stdout, stderr=log_stderr, *args, **kwargs) self.wait_for_rpc_connection() self.stop_node() self.wait_until_stopped() except FailedToStartError as e: self.log.debug('bitcoind failed to start: %s', e) self.running = False self.process = None # Check stderr for expected message if expected_msg is not None: log_stderr.seek(0) stderr = log_stderr.read().decode('utf-8').strip() if match == ErrorMatch.PARTIAL_REGEX: if re.search(expected_msg, stderr, flags=re.MULTILINE) is None: self._raise_assertion_error( 'Expected message "{}" does not partially match stderr:\n"{}"'.format(expected_msg, stderr)) elif match == ErrorMatch.FULL_REGEX: if re.fullmatch(expected_msg, stderr) is None: self._raise_assertion_error( 'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr)) elif match == ErrorMatch.FULL_TEXT: if expected_msg != stderr: self._raise_assertion_error( 'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr)) else: if expected_msg is None: assert_msg = "bitcoind should have exited with an error" else: assert_msg = "bitcoind should have exited with expected error " + expected_msg self._raise_assertion_error(assert_msg) def add_p2p_connection(self, p2p_conn, *, wait_for_verack=True, **kwargs): """Add a p2p connection to the node. This method adds the p2p connection to the self.p2ps list and also returns the connection to the caller.""" if 'dstport' not in kwargs: kwargs['dstport'] = p2p_port(self.index) if 'dstaddr' not in kwargs: kwargs['dstaddr'] = '127.0.0.1' p2p_conn.peer_connect(**kwargs, net=self.chain)() self.p2ps.append(p2p_conn) if wait_for_verack: p2p_conn.wait_for_verack() return p2p_conn @property def p2p(self): """Return the first p2p connection Convenience property - most tests only use a single p2p connection to each node, so this saves having to write node.p2ps[0] many times.""" assert self.p2ps, self._node_msg("No p2p connection") return self.p2ps[0] def disconnect_p2ps(self): """Close all p2p connections to the node.""" for p in self.p2ps: p.peer_disconnect() del self.p2ps[:] def tx(self, args): binary = self.binary[:-1] + '-tx' p_args = [binary, '-regtest'] + args self.log.debug("Running bitcoin-tx command: %s" % ' '.join(args)) process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) cli_stdout, cli_stderr = process.communicate() returncode = process.poll() if returncode: match = re.match(r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr) if match: code, message = match.groups() raise JSONRPCException(dict(code=int(code), message=message)) # Ignore cli_stdout, raise with cli_stderr raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr) try: return json.loads(cli_stdout, parse_float=decimal.Decimal) except json.JSONDecodeError: return cli_stdout.rstrip("\n") class TestNodeCLIAttr: def __init__(self, cli, command): self.cli = cli self.command = command def __call__(self, *args, **kwargs): return self.cli.send_cli(self.command, *args, **kwargs) def get_request(self, *args, **kwargs): return lambda: self(*args, **kwargs) def arg_to_cli(arg): if isinstance(arg, bool): return str(arg).lower() elif isinstance(arg, dict) or isinstance(arg, list): return json.dumps(arg) else: return str(arg) class TestNodeCLI(): """Interface to bitcoin-cli for an individual node""" def __init__(self, binary, datadir): self.options = [] self.binary = binary self.datadir = datadir self.input = None self.log = logging.getLogger('TestFramework.bitcoincli') def __call__(self, *options, input=None): # TestNodeCLI is callable with bitcoin-cli command-line options cli = TestNodeCLI(self.binary, self.datadir) cli.options = [str(o) for o in options] cli.input = input return cli def __getattr__(self, command): return TestNodeCLIAttr(self, command) def batch(self, requests): results = [] for request in requests: try: results.append(dict(result=request())) except JSONRPCException as e: results.append(dict(error=e)) return results def send_cli(self, command=None, *args, **kwargs): """Run bitcoin-cli command. Deserializes returned string as python object.""" pos_args = [arg_to_cli(arg) for arg in args] named_args = [str(key) + "=" + arg_to_cli(value) for (key, value) in kwargs.items()] assert not (pos_args and named_args), "Cannot use positional arguments and named arguments in the same bitcoin-cli call" p_args = [self.binary, "-datadir=" + self.datadir] + self.options if named_args: p_args += ["-named"] if command is not None: p_args += [command] p_args += pos_args + named_args self.log.debug("Running bitcoin-cli command: %s" % command) process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) cli_stdout, cli_stderr = process.communicate(input=self.input) returncode = process.poll() if returncode: match = re.match(r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr) if match: code, message = match.groups() raise JSONRPCException(dict(code=int(code), message=message)) # Ignore cli_stdout, raise with cli_stderr raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr) try: return json.loads(cli_stdout, parse_float=decimal.Decimal) except json.JSONDecodeError: return cli_stdout.rstrip("\n")
[]
[]
[]
[]
[]
python
0
0
common/patreon/poller.go
package patreon import ( "context" "github.com/jonas747/yagpdb/common" "github.com/jonas747/yagpdb/common/patreon/patreonapi" "github.com/mediocregopher/radix.v3" "github.com/sirupsen/logrus" "golang.org/x/oauth2" "os" "strconv" // "strconv" "sync" "time" ) type Poller struct { mu sync.RWMutex config *oauth2.Config token *oauth2.Token client *patreonapi.Client activePatrons []*Patron } func Run() { accessToken := os.Getenv("YAGPDB_PATREON_API_ACCESS_TOKEN") refreshToken := os.Getenv("YAGPDB_PATREON_API_REFRESH_TOKEN") clientID := os.Getenv("YAGPDB_PATREON_API_CLIENT_ID") clientSecret := os.Getenv("YAGPDB_PATREON_API_CLIENT_SECRET") if accessToken == "" || clientID == "" || clientSecret == "" { PatreonDisabled(nil, "Missing one of YAGPDB_PATREON_API_ACCESS_TOKEN, YAGPDB_PATREON_API_CLIENT_ID, YAGPDB_PATREON_API_CLIENT_SECRET") return } var storedRefreshToken string common.RedisPool.Do(radix.Cmd(&storedRefreshToken, "GET", "patreon_refresh_token")) config := &oauth2.Config{ ClientID: clientID, ClientSecret: clientSecret, Endpoint: oauth2.Endpoint{ AuthURL: patreonapi.AuthorizationURL, TokenURL: patreonapi.AccessTokenURL, }, Scopes: []string{"identity", "campaigns", "campaigns.members"}, } token := &oauth2.Token{ AccessToken: "", RefreshToken: refreshToken, // Must be non-nil, otherwise token will not be expired Expiry: time.Now().Add(-24 * time.Hour), } tc := oauth2.NewClient(context.Background(), &TokenSourceSaver{inner: config.TokenSource(context.Background(), token)}) // Either use the token provided in the env vars or a cached one in redis pClient := patreonapi.NewClient(tc) user, err := pClient.FetchUser() if err != nil { if storedRefreshToken == "" { PatreonDisabled(err, "Failed fetching current user with env var refresh token, no refresh token stored in redis.") return } logrus.WithError(err).Warn("Patreon: Failed fetching current user with env var refresh token, trying stored token") tCop := *token tCop.RefreshToken = storedRefreshToken tc = oauth2.NewClient(context.Background(), &TokenSourceSaver{inner: config.TokenSource(context.Background(), &tCop)}) pClient = patreonapi.NewClient(tc) user, err = pClient.FetchUser() if err != nil { PatreonDisabled(err, "Unable to fetch user with redis patreon token.") return } } poller := &Poller{ config: config, token: token, client: pClient, } ActivePoller = poller logrus.Info("Patreon integration activated as ", user.Data.ID, ": ", user.Data.Attributes.FullName) go poller.Run() } func PatreonDisabled(err error, reason string) { l := logrus.NewEntry(logrus.StandardLogger()) if err != nil { l = l.WithError(err) } l.Warn("Not starting patreon integration, also means that premium statuses wont update. " + reason) } func (p *Poller) Run() { ticker := time.NewTicker(time.Minute) for { p.Poll() <-ticker.C } } func (p *Poller) Poll() { // Get your campaign data campaignResponse, err := p.client.FetchCampaigns() if err != nil || len(campaignResponse.Data) < 1 { logrus.WithError(err).Error("Patreon: Failed fetching campaign") return } campaignId := campaignResponse.Data[0].ID cursor := "" page := 1 patrons := make([]*Patron, 0, 30) for { membersResponse, err := p.client.FetchMembers(campaignId, 0, cursor) // pledgesResponse, err := p.client.FetchPledges(campaignId, // patreon.WithPageSize(30), // patreon.WithCursor(cursor)) if err != nil { logrus.WithError(err).Error("Patreon: Failed fetching pledges") return } // logrus.Println("num results: ", len(membersResponse.Data)) // Get all the users in an easy-to-lookup way users := make(map[string]*patreonapi.UserAttributes) for _, item := range membersResponse.Included { if u, ok := item.Decoded.(*patreonapi.UserAttributes); ok { users[item.ID] = u } } // Loop over the pledges to get e.g. their amount and user name for _, memberData := range membersResponse.Data { attributes := memberData.Attributes user, ok := users[memberData.Relationships.User.Data.ID] if !ok { // logrus.Println("Unknown user: ", memberData.ID) continue } if attributes.LastChargeStatus != patreonapi.ChargeStatusPaid && attributes.LastChargeStatus != patreonapi.ChargeStatusPending { // logrus.Println("Not paid: ", attributes.FullName) continue } if attributes.PatronStatus != "active_patron" { continue } // logrus.Println(attributes.PatronStatus + " --- " + user.FirstName + ":" + user.LastName + ":" + user.Vanity) patron := &Patron{ AmountCents: attributes.CurrentEntitledAmountCents, Avatar: user.ImageURL, } if user.Vanity != "" { patron.Name = user.Vanity } else { patron.Name = user.FirstName } if user.SocialConnections.Discord != nil && user.SocialConnections.Discord.UserID != "" { discordID, _ := strconv.ParseInt(user.SocialConnections.Discord.UserID, 10, 64) patron.DiscordID = discordID } patrons = append(patrons, patron) // logrus.Printf("%s is pledging %d cents, Discord: %d\r\n", patron.Name, patron.AmountCents, patron.DiscordID) } // Get the link to the next page of pledges nextCursor := membersResponse.Meta.Pagination.Cursors.Next if nextCursor == "" { // logrus.Println("No nextlink ", page) break } cursor = nextCursor // logrus.Println("nextlink: ", page, ": ", cursor) page++ } patrons = append(patrons, &Patron{ DiscordID: common.Conf.Owner, Name: "Owner", AmountCents: 10000, }) // Swap the stored ones, this dosent mutate the existing returned slices so we dont have to do any copying on each request woo p.mu.Lock() p.activePatrons = patrons p.mu.Unlock() } func (p *Poller) GetPatrons() (patrons []*Patron) { p.mu.RLock() patrons = p.activePatrons p.mu.RUnlock() return } type TokenSourceSaver struct { inner oauth2.TokenSource lastRefreshToken string } func (t *TokenSourceSaver) Token() (*oauth2.Token, error) { tk, err := t.inner.Token() if err == nil { if t.lastRefreshToken != tk.RefreshToken { logrus.Info("Patreon: New refresh token") common.RedisPool.Do(radix.Cmd(nil, "SET", "patreon_refresh_token", tk.RefreshToken)) t.lastRefreshToken = tk.RefreshToken } } return tk, err }
[ "\"YAGPDB_PATREON_API_ACCESS_TOKEN\"", "\"YAGPDB_PATREON_API_REFRESH_TOKEN\"", "\"YAGPDB_PATREON_API_CLIENT_ID\"", "\"YAGPDB_PATREON_API_CLIENT_SECRET\"" ]
[]
[ "YAGPDB_PATREON_API_CLIENT_ID", "YAGPDB_PATREON_API_ACCESS_TOKEN", "YAGPDB_PATREON_API_REFRESH_TOKEN", "YAGPDB_PATREON_API_CLIENT_SECRET" ]
[]
["YAGPDB_PATREON_API_CLIENT_ID", "YAGPDB_PATREON_API_ACCESS_TOKEN", "YAGPDB_PATREON_API_REFRESH_TOKEN", "YAGPDB_PATREON_API_CLIENT_SECRET"]
go
4
0
test/e2e/identityvalidator/identityvalidator.go
package main import ( "context" "fmt" "os" "github.com/pkg/errors" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2018-04-01/compute" "github.com/Azure/azure-sdk-for-go/services/keyvault/2016-10-01/keyvault" "github.com/Azure/azure-sdk-for-go/services/keyvault/auth" "github.com/Azure/go-autorest/autorest" "github.com/Azure/go-autorest/autorest/adal" "github.com/Azure/go-autorest/autorest/azure" log "github.com/sirupsen/logrus" "github.com/spf13/pflag" ) var ( subscriptionID = pflag.String("subscription-id", "", "subscription id for test") identityClientID = pflag.String("identity-client-id", "", "client id for the msi id") resourceGroup = pflag.String("resource-group", "", "any resource group name with reader permission to the aad object") keyvaultName = pflag.String("keyvault-name", "", "the name of the keyvault to extract the secret from") keyvaultSecretName = pflag.String("keyvault-secret-name", "", "the name of the keyvault secret we are extracting with pod identity") keyvaultSecretVersion = pflag.String("keyvault-secret-version", "", "the version of the keyvault secret we are extracting with pod identity") ) func main() { pflag.Parse() podname := os.Getenv("E2E_TEST_POD_NAME") podnamespace := os.Getenv("E2E_TEST_POD_NAMESPACE") podip := os.Getenv("E2E_TEST_POD_IP") log.Infof("Starting identity validator pod %s/%s %s", podnamespace, podname, podip) logger := log.WithFields(log.Fields{ "podnamespace": podnamespace, "podname": podname, "podip": podip, }) msiEndpoint, err := adal.GetMSIVMEndpoint() if err != nil { logger.Fatalf("Failed to get msiEndpoint: %+v", err) } logger.Infof("Successfully obtain MSIEndpoint: %s\n", msiEndpoint) if *keyvaultName != "" && *keyvaultSecretName != "" { // Test if the pod identity is set up correctly if err := testUserAssignedIdentityOnPod(logger, msiEndpoint, *identityClientID, *keyvaultName, *keyvaultSecretName, *keyvaultSecretVersion); err != nil { logger.Fatalf("testUserAssignedIdentityOnPod failed, %+v", err) } } else { // Test if the cluster-wide user assigned identity is set up correctly if err := testClusterWideUserAssignedIdentity(logger, msiEndpoint, *subscriptionID, *resourceGroup, *identityClientID); err != nil { logger.Fatalf("testClusterWideUserAssignedIdentity failed, %+v", err) } } // Test if a service principal token can be obtained when using a system assigned identity if t1, err := testSystemAssignedIdentity(logger, msiEndpoint); err != nil || t1 == nil { logger.Fatalf("testSystemAssignedIdentity failed, %+v", err) } } // testClusterWideUserAssignedIdentity will verify whether cluster-wide user assigned identity is working properly func testClusterWideUserAssignedIdentity(logger *log.Entry, msiEndpoint, subscriptionID, resourceGroup, identityClientID string) error { os.Setenv("AZURE_CLIENT_ID", identityClientID) defer os.Unsetenv("AZURE_CLIENT_ID") token, err := adal.NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, azure.PublicCloud.ResourceManagerEndpoint, identityClientID) if err != nil { return errors.Wrapf(err, "Failed to get service principal token from user assigned identity") } vmClient := compute.NewVirtualMachinesClient(subscriptionID) vmClient.Authorizer = autorest.NewBearerAuthorizer(token) vmlist, err := vmClient.List(context.Background(), resourceGroup) if err != nil { return errors.Wrapf(err, "Failed to verify cluster-wide user assigned identity") } logger.Infof("Successfully verified cluster-wide user assigned identity. VM count: %d", len(vmlist.Values())) return nil } // testUserAssignedIdentityOnPod will verify whether a pod identity is working properly func testUserAssignedIdentityOnPod(logger *log.Entry, msiEndpoint, identityClientID, keyvaultName, keyvaultSecretName, keyvaultSecretVersion string) error { // When new authorizer is created, azure-sdk-for-go tries to create dataplane authorizer using MSI. It checks the AZURE_CLIENT_ID to get the client id // for the user assigned identity. If client id not found, then NewServicePrincipalTokenFromMSI is invoked instead of using the actual // user assigned identity. Setting this env var ensures we validate GetSecret using the desired user assigned identity. os.Setenv("AZURE_CLIENT_ID", identityClientID) defer os.Unsetenv("AZURE_CLIENT_ID") keyClient := keyvault.New() authorizer, err := auth.NewAuthorizerFromEnvironment() if err == nil { keyClient.Authorizer = authorizer } logger.Infof("%s %s %s\n", keyvaultName, keyvaultSecretName, keyvaultSecretVersion) secret, err := keyClient.GetSecret(context.Background(), fmt.Sprintf("https://%s.vault.azure.net", keyvaultName), keyvaultSecretName, keyvaultSecretVersion) if err != nil || *secret.Value == "" { return errors.Wrapf(err, "Failed to verify user assigned identity on pod") } logger.Infof("Successfully verified user assigned identity on pod") return nil } // testMSIEndpoint will return a service principal token obtained through a system assigned identity func testSystemAssignedIdentity(logger *log.Entry, msiEndpoint string) (*adal.Token, error) { spt, err := adal.NewServicePrincipalTokenFromMSI(msiEndpoint, azure.PublicCloud.ResourceManagerEndpoint) if err != nil { return nil, errors.Wrapf(err, "Failed to acquire a token using the MSI VM extension") } if err := spt.Refresh(); err != nil { return nil, errors.Wrapf(err, "Failed to refresh ServicePrincipalTokenFromMSI using the MSI VM extension, msiEndpoint(%s)", msiEndpoint) } token := spt.Token() if token.IsZero() { return nil, errors.Errorf("No token found, MSI VM extension, msiEndpoint(%s)", msiEndpoint) } logger.Infof("Successfully acquired a token using the MSI, msiEndpoint(%s)", msiEndpoint) return &token, nil }
[ "\"E2E_TEST_POD_NAME\"", "\"E2E_TEST_POD_NAMESPACE\"", "\"E2E_TEST_POD_IP\"" ]
[]
[ "E2E_TEST_POD_IP", "E2E_TEST_POD_NAME", "E2E_TEST_POD_NAMESPACE" ]
[]
["E2E_TEST_POD_IP", "E2E_TEST_POD_NAME", "E2E_TEST_POD_NAMESPACE"]
go
3
0
tree2labels/baselines.py
''' It evaluates some traditional baselines using for regular PoS-tagging or chunking It uses the implementations from the NLTK TRAINING PYTHONPATH=. python baselines/baselines.py \ --train /home/david.vilares/Escritorio/Papers/seq2constree/dataset/gold-tags-ptb-train.seqtrees \ --test /home/david.vilares/Escritorio/Papers/seq2constree/dataset/gold-tags-ptb-dev.seqtrees \ --out /home/david.vilares/Escritorio/Papers/seq2constree/baselines/gold-tags-ptb \ --status train TEST @author: david.vilares ''' from argparse import ArgumentParser from baseline_utils import * from utils import sequence_to_parenthesis, flat_list, get_enriched_labels_for_retagger from sklearn_crfsuite import CRF from sklearn_crfsuite.metrics import flat_f1_score from sklearn.feature_extraction.text import CountVectorizer from sklearn.naive_bayes import MultinomialNB from sklearn.feature_extraction import DictVectorizer from keras.models import load_model from keras.utils import np_utils from keras.models import Sequential, Model from keras.layers import Dense, Dropout, Activation, Embedding, Input, Flatten from keras.wrappers.scikit_learn import KerasClassifier # Fit LabelEncoder with our list of classes from sklearn.preprocessing import LabelEncoder from sklearn.metrics import accuracy_score # Convert integers to dummy variables (one hot encoded) import keras import codecs import functools import os import nltk import pickle import tempfile import time import os import numpy as np import sys import tensorflow as tf import random as rn import uuid #Uncomment/Comment these lines to determine when and which GPU(s) to use #os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" #os.environ["CUDA_VISIBLE_DEVICES"] = "0" reload(sys) # Reload does the trick! sys.setdefaultencoding('UTF8') STATUS_TEST = "test" STATUS_TRAIN = "train" SPLIT_SYMBOL = "~" if __name__ == '__main__': arg_parser = ArgumentParser() arg_parser.add_argument("--train", dest="train", help="Path to the training file", default=None) arg_parser.add_argument("--test", dest="test", help ="Path to the development/test file", default=None) # arg_parser.add_argument("--dir", dest="dir", help="Path to the output directory where to store the models", default=None) arg_parser.add_argument("--model", dest="model", help="Path to the model") # arg_parser.add_argument("--name", dest="name", help="Path to the name of the file") arg_parser.add_argument("--baseline", dest="baseline", help="Path to the baseline directory. Options: [emlp|mlp|crf]", default=None) arg_parser.add_argument("--gold", dest="gold", help="Path to the gold file", default=None) arg_parser.add_argument("--status", dest="status", help="") arg_parser.add_argument("--prev_context",dest="prev_context",type=int, default=1) arg_parser.add_argument("--next_context", dest="next_context",type=int,default=1) arg_parser.add_argument("--retagger", dest="retagger", default=False, action="store_true") arg_parser.add_argument("--unary", dest="unary",default=False, action="store_true") arg_parser.add_argument("--output_unary", dest="output_unary", help="Use together with unary to store the output in the desired file") arg_parser.add_argument("--output_decode", dest="output_decode", help="Path to store the predicted trees", default="/tmp/trees.txt") arg_parser.add_argument("--evalb",dest="evalb",help="Path to the script EVALB") arg_parser.add_argument("--gpu",dest="gpu",default="False") args = arg_parser.parse_args() if args.status.lower() == STATUS_TEST: if args.gpu.lower() == "true": os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = "0" else: os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = "" #TODO: Change for a temporaryfile, but getting problems with the Chinese encoding tmpfile = codecs.open(args.output_decode,"w") with codecs.open(args.test, encoding="utf-8") as f_dev: content = f_dev.read() gold_samples = [[ tuple(l.split("\t")) for l in sentence.split("\n")] for sentence in content.split("\n\n") if sentence != ""] sentences =[[(word,postag) for word, postag, label in sample] for sample in gold_samples] ####################################################################### # EVALUATING A PERCEPTRON WITH EMBEDDINGS ####################################################################### if args.baseline.lower() == "emlp": batch= 128 new_sentences = sentences unary_preds = None init_time = None with codecs.open(args.model+".emlp.labels") as f: label_encoder = pickle.load(f) with codecs.open(args.model+".emlp.features") as f: vocab,postags,all_labels, hidden_size, prev_context, next_context = pickle.load(f) emlp_parser = EmbeddedPerceptronTagger(hidden_size, vocab, postags, len(all_labels)) emlp_parser.model = load_model(args.model+".emlp.hdf5") #Loading and running the retagger, if needed if args.retagger: with codecs.open(args.model+"-unary.emlp.labels") as f: unary_label_encoder = pickle.load(f) with codecs.open(args.model+"-unary.emlp.features") as f: re_vocab,re_postags,re_all_labels, re_hidden_size, re_prev_context, re_next_context = pickle.load(f) emlp_retagger = EmbeddedPerceptronTagger(re_hidden_size, re_vocab, re_postags, len(re_all_labels)) emlp_retagger.model = load_model(args.model+"-unary.emlp.hdf5") #The time starts here, applying the retagging, if needed init_time = time.time() X_test_unary,X_tags_test_unary = emlp_retagger.transform_test(sentences, re_prev_context, re_next_context) X_test_unary = np.array(X_test_unary) X_tags_test_unary = np.array(X_tags_test_unary) unary_preds = emlp_retagger.model.predict_generator(emlp_retagger.samples_test(X_test_unary,X_tags_test_unary,batch), steps= (X_test_unary.shape[0]/batch)+1) unary_preds = list(unary_label_encoder.inverse_transform ( unary_preds.argmax(axis=-1) )) new_sentences, unary_preds = get_samples_retagged(sentences, unary_preds) #If we are not applying the retagging strategy, we start here to measure the time if init_time is None: init_time = time.time() X_test, X_tags_test = emlp_parser.transform_test(new_sentences, prev_context, next_context) X_test = np.array(X_test) X_tags_test = np.array(X_tags_test) preds = emlp_parser.model.predict_generator(emlp_parser.samples_test(X_test,X_tags_test,batch), steps= (X_test.shape[0]/batch)+1) preds = process_labels(new_sentences, preds, label_encoder, args.unary) preds, unary_preds = format_output(new_sentences, preds, unary_preds, args.retagger) ####################################################################### # EVALUATING A ONE-HOT VECTOR PERCEPTRON ####################################################################### elif args.baseline.lower() == "mlp": new_sentences = sentences unary_preds = None batch= 128 init_time = None # loading_parsing_time = time.time() with codecs.open(args.model+".mlp.features") as f: dict_vectorizer, hidden_size, prev_context, next_context = pickle.load(f) with codecs.open(args.model+".mlp.labels") as f: label_encoder = pickle.load(f) mlp_parser = PerceptronTagger.builder() mlp_parser.model = load_model(args.model+".mlp.hdf5") # end_loading_parsing_time = time.time() - loading_parsing_time #Running the retagger, if needed if args.retagger: with codecs.open(args.model+"-unary.mlp.features") as f: dict_unary_vectorizer,re_hidden_size, re_prev_context, re_next_context = pickle.load(f) with codecs.open(args.model+"-unary.mlp.labels") as f: unary_label_encoder = pickle.load(f) mlp_retagger = PerceptronTagger.builder() mlp_retagger.model = load_model(args.model+"-unary.mlp.hdf5") init_time = time.time() X_test_unary = mlp_retagger.transform_test(sentences, re_prev_context, re_next_context) unary_preds = mlp_retagger.model.predict_generator(mlp_retagger.samples_test(X_test_unary,batch, dict_unary_vectorizer), steps= (len(X_test_unary)/batch)+1) unary_preds = list(unary_label_encoder.inverse_transform ( unary_preds.argmax(axis=-1) )) new_sentences, unary_preds = get_samples_retagged(sentences, unary_preds) #If we are not applying the retagging strategy, we start here to measure the time if init_time is None: init_time = time.time() X_test = mlp_parser.transform_test(new_sentences, prev_context,next_context) preds = mlp_parser.model.predict_generator(mlp_parser.samples_test(X_test,batch, dict_vectorizer), steps= (len(X_test)/batch)+1) preds = process_labels(sentences, preds, label_encoder, args.unary) preds, unary_preds = format_output(new_sentences, preds, unary_preds, args.retagger) ####################################################################### # EVALUATING A CONDITIONAL RANDOM FIELDS ####################################################################### elif args.baseline.lower() == "crf": new_sentences = sentences unary_preds = None init_time = None with codecs.open(args.model+".crf.pickle","rb") as f: crf_parser, prev_context, next_context = pickle.load(f) #Running the retagger if args.retagger: with codecs.open(args.model+"-unary.crf.pickle","rb") as f: crf_retagger, re_prev_context, re_next_context= pickle.load(f) init_time = time.time() X_test = [sent2features_test(s,re_prev_context, re_next_context) for s in new_sentences] unary_preds = crf_retagger.predict([x for x in X_test]) unary_preds_aux =[] for unary_pred in unary_preds: for element in unary_pred: unary_preds_aux.append(element) unary_preds = unary_preds_aux new_sentences, unary_preds = get_samples_retagged(new_sentences, unary_preds) if init_time is None: init_time = time.time() X_test = [sent2features_test(s,prev_context, next_context) for s in new_sentences] preds = crf_parser.predict(X_test) preds_aux =[] for pred in preds: for element in pred: preds_aux.append(element) preds = preds_aux preds, unary_preds = format_output(new_sentences, preds, unary_preds, args.retagger) #Postprocessing the labels for the CRF for j,pred in enumerate(preds): for k,p in enumerate(pred): if (p in ["-EOS-","-BOS-"] or p.startswith("NONE")) and k != 0 and k < len(pred)-1: pred[k] = "ROOT_S" else: raise NotImplementedError ######################################################################### # DECODING AND POSPROCESS ######################################################################### if args.unary: if not os.path.exists(args.output_unary): with codecs.open(args.output_unary,"w") as f: for j,sentence in enumerate(sentences): for (word,postag), retag in zip(sentence,preds[j]): f.write("\t".join([word,postag,retag])+"\n") f.write("\n") else: raise ValueError("File already exist:", args.output_unary) exit() parenthesized_trees = sequence_to_parenthesis(new_sentences,preds) final_time = time.time() tmpfile.write("\n".join(parenthesized_trees)+"\n") os.system(" ".join([args.evalb,args.gold, tmpfile.name])) gold_labels = [e[2] for e in flat_list(gold_samples)] if args.retagger: enriched_preds = get_enriched_labels_for_retagger(preds, unary_preds) flat_preds = flat_list(enriched_preds) else: flat_preds = flat_list(preds) print "Accuracy",round(accuracy_score(gold_labels, flat_preds),4) total_time = final_time - init_time print "Total time:", round(total_time,4) print "Sents/s",round(len(gold_samples) / (total_time),2) ######################################################### # # TRAINING PHASE # # ######################################################### elif args.status.lower() == STATUS_TRAIN: # For reproducibility, if wanted os.environ['PYTHONHASHSEED'] = '17' np.random.seed(17) rn.seed(17) session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1) from keras import backend as K tf.set_random_seed(17) sess = tf.Session(graph=tf.get_default_graph(), config=session_conf) K.set_session(sess) ################################################################### # TRAINING AN EMBEDDED PERCEPTRON ################################################################### if args.baseline.lower() == "emlp": hidden_size = 100 batch = 8 context_len = 1+args.prev_context+args.next_context with codecs.open(args.test, encoding="utf-8") as f_dev: dev_samples = [[ tuple(l.split("\t")) for l in sentence.split("\n")] for sentence in f_dev.read().split("\n\n") if sentence != ""] with codecs.open(args.train, encoding="utf-8") as f_train: train_samples = [[ tuple(l.split("\t")) for l in sentence.split("\n")] for sentence in f_train.read().split("\n\n") if sentence != ""] vocab = set([]) postags = set([]) labels = set([]) for g in train_samples: for word,postag,label in g: vocab.add(word) postags.add(postag) labels.add(label) all_labels = labels for g in dev_samples: for _,_,label in g: all_labels.add(label) emlp_tagger = EmbeddedPerceptronTagger(hidden_size, vocab, postags, len(all_labels), context_len = context_len) X_train, X_tags_train, y_train = emlp_tagger.transform(train_samples, args.prev_context, args.next_context) X_dev, X_tags_dev, y_dev = emlp_tagger.transform(dev_samples, args.prev_context, args.next_context) label_encoder = LabelEncoder() label_encoder.fit(y_train + y_dev) y_train = label_encoder.transform(y_train) y_dev = label_encoder.transform(y_dev) X_train = np.array(X_train) X_tags_train = np.array(X_tags_train) X_dev = np.array(X_dev) X_tags_dev = np.array(X_tags_dev) with codecs.open(args.model+".emlp.features","wb") as f: pickle.dump((vocab,postags, all_labels, hidden_size, args.prev_context, args.next_context),f) with codecs.open(args.model+".emlp.labels","wb") as f: pickle.dump(label_encoder,f) checkpoint = keras.callbacks.ModelCheckpoint(args.model+".emlp.hdf5", save_best_only=True) early_stopping = keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=4, verbose=0, mode='auto') emlp_tagger.model.fit_generator(emlp_tagger.samples(X_train,X_tags_train,y_train, batch, label_encoder), validation_data=emlp_tagger.samples(X_dev,X_tags_dev,y_dev,batch, label_encoder), steps_per_epoch=(X_train.shape[0]/batch)+1, epochs=30, verbose=1, validation_steps=(X_dev.shape[0]/batch)+1, callbacks=[checkpoint,early_stopping]) print emlp_tagger.model.evaluate_generator(emlp_tagger.samples(X_dev,X_tags_dev,y_dev,batch, label_encoder), steps= (X_dev.shape[0]/batch)+1) ################################################################### # TRAINING A DISCRETE MLP ################################################################### elif args.baseline.lower() == "mlp": hidden_size = 100 batch = 8 with codecs.open(args.test, encoding="utf-8") as f_dev: dev_samples = [[ tuple(l.split("\t")) for l in sentence.split("\n")] for sentence in f_dev.read().split("\n\n") if sentence != ""] with codecs.open(args.train, encoding="utf-8") as f_train: train_samples = [[ tuple(l.split("\t")) for l in sentence.split("\n")] for sentence in f_train.read().split("\n\n") if sentence != ""] print "Len dev samples", len(dev_samples) print "Len train amples", len(train_samples) X_train, y_train = PerceptronTagger.builder().transform(train_samples, args.prev_context, args.next_context) X_dev, y_dev = PerceptronTagger.builder().transform(dev_samples, args.prev_context, args.next_context) # Fit our DictVectorizer with our set of features dict_vectorizer = DictVectorizer(sparse=True) dict_vectorizer.fit(X_train + X_dev) X_train = dict_vectorizer.transform(X_train) X_dev = dict_vectorizer.transform(X_dev) label_encoder = LabelEncoder() label_encoder.fit(y_train + y_dev) y_train = label_encoder.transform(y_train) y_dev = label_encoder.transform(y_dev) y_train = np_utils.to_categorical(y_train,num_classes=len(label_encoder.classes_)) y_dev = np_utils.to_categorical(y_dev,num_classes=len(label_encoder.classes_)) with codecs.open(args.model+".mlp.features","wb") as f: pickle.dump((dict_vectorizer, hidden_size, args.prev_context, args.next_context),f) with codecs.open(args.model+".mlp.labels","wb") as f: pickle.dump(label_encoder,f) checkpoint = keras.callbacks.ModelCheckpoint(args.model+".mlp.hdf5", save_best_only=True) early_stopping = keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=4, verbose=0, mode='auto') mlp_tagger = PerceptronTagger(X_train.shape[1],hidden_size, y_train.shape[1]) mlp_tagger.model.fit_generator(mlp_tagger.samples(X_train,y_train, batch), validation_data=mlp_tagger.samples(X_dev,y_dev,batch), steps_per_epoch=(X_train.shape[0]/batch)+1, epochs=30, verbose=1, validation_steps=(X_dev.shape[0]/batch)+1, callbacks=[checkpoint,early_stopping]) print mlp_tagger.model.evaluate_generator(mlp_tagger.samples(X_dev,y_dev,batch), steps= (X_dev.shape[0]/batch)+1) ################################################################### # TRAINING A CONDITIONAL RANDOM FIELDS MODEL ################################################################### elif args.baseline.lower() == "crf": crf = CRF( algorithm='lbfgs', c1=0.1, c2=0.1, max_iterations=20, all_possible_transitions=False, model_filename=args.model+".crf", ) with codecs.open(args.test, encoding="utf-8") as f_dev: dev_samples = [[l.split("\t") for l in sentence.split("\n")] for sentence in f_dev.read().split("\n\n") if sentence != ""] with codecs.open(args.train, encoding="utf-8") as f_train: train_samples = [[l.split("\t") for l in sentence.split("\n")] for sentence in f_train.read().split("\n\n") if sentence != ""] X_train = [sent2features(s,args.prev_context, args.next_context) for s in train_samples] y_train = [sent2labels(s) for s in train_samples] X_dev = [sent2features(s,args.prev_context, args.next_context) for s in dev_samples] y_dev = [sent2labels(s) for s in dev_samples] crf.fit(X_train, y_train) y_pred = crf.predict(X_dev) print "F-score",flat_f1_score(y_dev, y_pred, average='weighted') print "Accuracy:", crf.score(X_dev, y_dev) with codecs.open(args.model+".crf.pickle","wb") as f: pickle.dump((crf, args.prev_context, args.next_context), f) else: raise NotImplementedError else: raise NotImplementedError
[]
[]
[ "CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES", "PYTHONHASHSEED" ]
[]
["CUDA_DEVICE_ORDER", "CUDA_VISIBLE_DEVICES", "PYTHONHASHSEED"]
python
3
0
pkg/manifests/configfile.go
package manifests import ( "bytes" "context" "fmt" "io" "io/ioutil" "os" "os/exec" "path/filepath" "time" "github.com/pkg/errors" "gopkg.in/yaml.v2" "github.com/fluxcd/flux/pkg/image" "github.com/fluxcd/flux/pkg/resource" ) const ( ConfigFilename = ".flux.yaml" CommandTimeout = time.Minute ) // ConfigFile holds the values necessary for generating and updating // manifests according to a `.flux.yaml` file. It does double duty as // the format for the file (to deserialise into), and the state // necessary for running commands. type ConfigFile struct { Version int // Only one of the following should be set simultaneously CommandUpdated *CommandUpdated `yaml:"commandUpdated"` PatchUpdated *PatchUpdated `yaml:"patchUpdated"` // These are supplied, and can't be calculated from each other configPath string // the absolute path to the .flux.yaml workingDir string // the absolute path to the dir in which to run commands or find a patch file workingDirRelative string // the working dir, given relative to the repo root, to use as a location in errors // This is calculated on creation configPathRelative string // the path to the config file _relative_ to the working directory } // CommandUpdated represents a config in which updates are done by // execing commands as given. type CommandUpdated struct { Generators []Generator Updaters []Updater } // Generator is an individual command for generating manifests. type Generator struct { Command string } // Updater gives a means for updating image refs and a means for // updating policy in a manifest. type Updater struct { ContainerImage ContainerImageUpdater `yaml:"containerImage"` Policy PolicyUpdater } // ContainerImageUpdater is a command for updating the image used by a // container, in a manifest. type ContainerImageUpdater struct { Command string } // PolicyUpdater is a command for updating a policy for a manifest. type PolicyUpdater struct { Command string } // PatchUpdated represents a config in which updates are done by // maintaining a patch, which is calculating from, and applied to, the // generated manifests. type PatchUpdated struct { Generators []Generator PatchFile string `yaml:"patchFile"` } // NewConfigFile constructs a ConfigFile for the relative gitPath, // from the config file at the absolute path configPath, with the absolute // workingDir. func NewConfigFile(gitPath, configPath, workingDir string) (*ConfigFile, error) { result := &ConfigFile{ configPath: configPath, workingDir: workingDir, workingDirRelative: gitPath, } relConfigPath, err := filepath.Rel(workingDir, configPath) if err != nil { return nil, fmt.Errorf("config file not relative to working dir: %s", err) } result.configPathRelative = relConfigPath fileBytes, err := ioutil.ReadFile(configPath) if err != nil { return nil, fmt.Errorf("cannot read: %s", err) } if err := yaml.Unmarshal(fileBytes, result); err != nil { return nil, fmt.Errorf("cannot parse: %s", err) } switch { case result.Version != 1: return nil, errors.New("incorrect version, only version 1 is supported for now") case (result.CommandUpdated != nil && result.PatchUpdated != nil) || (result.CommandUpdated == nil && result.PatchUpdated == nil): return nil, errors.New("a single commandUpdated or patchUpdated entry must be defined") case result.PatchUpdated != nil && result.PatchUpdated.PatchFile == "": return nil, errors.New("patchUpdated's patchFile cannot be empty") } return result, nil } // -- entry points for using a config file to generate or update manifests func makeNoCommandsRunErr(field string, cf *ConfigFile) error { return fmt.Errorf("no %s commands to run in %s (from path %s)", field, cf.configPathRelative, cf.workingDirRelative) } // ConfigRelativeToWorkingDir shows the path to the config file taking // the working dir as a starting point; e.g., `staging/../.flux.yaml` func (cf *ConfigFile) ConfigRelativeToWorkingDir() string { // filepath.Join will clean the resulting path, but here I want to // leave parent paths in, e.g., `staging/../.flux.yaml` return fmt.Sprintf("%s%c%s", cf.workingDirRelative, filepath.Separator, cf.configPathRelative) } // GenerateManifests returns the manifests generated (and patched, if // necessary) according to the config file. func (cf *ConfigFile) GenerateManifests(ctx context.Context, manifests Manifests) ([]byte, error) { if cf.PatchUpdated != nil { _, finalBytes, _, err := cf.getGeneratedAndPatchedManifests(ctx, manifests) return finalBytes, err } return cf.getGeneratedManifests(ctx, manifests, cf.CommandUpdated.Generators) } func (cf *ConfigFile) SetWorkloadContainerImage(ctx context.Context, manifests Manifests, r resource.Resource, container string, newImageID image.Ref) error { if cf.PatchUpdated != nil { return cf.updatePatchFile(ctx, manifests, func(previousManifests []byte) ([]byte, error) { return manifests.SetWorkloadContainerImage(previousManifests, r.ResourceID(), container, newImageID) }) } // Command-updated result := cf.execContainerImageUpdaters(ctx, r.ResourceID(), container, newImageID.Name.String(), newImageID.Tag) if len(result) == 0 { return makeNoCommandsRunErr("update.containerImage", cf) } if len(result) > 0 && result[len(result)-1].Error != nil { updaters := cf.CommandUpdated.Updaters return fmt.Errorf("error executing image updater command %q from file %q: %s\noutput:\n%s", updaters[len(result)-1].ContainerImage.Command, result[len(result)-1].Error, r.Source(), result[len(result)-1].Output, ) } return nil } // UpdateWorkloadPolicies updates policies for a workload, using // commands or patching according to the config file. func (cf *ConfigFile) UpdateWorkloadPolicies(ctx context.Context, manifests Manifests, r resource.Resource, update resource.PolicyUpdate) (bool, error) { if cf.PatchUpdated != nil { var changed bool err := cf.updatePatchFile(ctx, manifests, func(previousManifests []byte) ([]byte, error) { updatedManifests, err := manifests.UpdateWorkloadPolicies(previousManifests, r.ResourceID(), update) if err == nil { changed = bytes.Compare(previousManifests, updatedManifests) != 0 } return updatedManifests, err }) return changed, err } // Command-updated workload, ok := r.(resource.Workload) if !ok { return false, errors.New("resource " + r.ResourceID().String() + " does not have containers") } changes, err := resource.ChangesForPolicyUpdate(workload, update) if err != nil { return false, err } for key, value := range changes { result := cf.execPolicyUpdaters(ctx, r.ResourceID(), key, value) if len(result) == 0 { return false, makeNoCommandsRunErr("updaters.policy", cf) } if len(result) > 0 && result[len(result)-1].Error != nil { updaters := cf.CommandUpdated.Updaters err := fmt.Errorf("error executing annotation updater command %q from file %q: %s\noutput:\n%s", updaters[len(result)-1].Policy.Command, result[len(result)-1].Error, r.Source(), result[len(result)-1].Output, ) return false, err } } // We assume that the update changed the resource. Alternatively, we could generate the resources // again and compare the output, but that's expensive. return true, nil } type ConfigFileExecResult struct { Error error Stderr []byte Stdout []byte } type ConfigFileCombinedExecResult struct { Error error Output []byte } // -- these are helpers to support the entry points above // getGeneratedAndPatchedManifests is used to generate manifests when // the config is patchUpdated. func (cf *ConfigFile) getGeneratedAndPatchedManifests(ctx context.Context, manifests Manifests) ([]byte, []byte, string, error) { generatedManifests, err := cf.getGeneratedManifests(ctx, manifests, cf.PatchUpdated.Generators) if err != nil { return nil, nil, "", err } // The patch file is given in the config file as a path relative // to the working directory relPatchFilePath := cf.PatchUpdated.PatchFile patchFilePath := filepath.Join(cf.workingDir, relPatchFilePath) patch, err := ioutil.ReadFile(patchFilePath) if err != nil { if !os.IsNotExist(err) { return nil, nil, "", err } // Tolerate a missing patch file, since it may not have been created yet. // However, its base path must exist. patchBaseDir := filepath.Dir(patchFilePath) if stat, err := os.Stat(patchBaseDir); err != nil || !stat.IsDir() { err := fmt.Errorf("base directory (%q) of patchFile (%q) does not exist", filepath.Dir(relPatchFilePath), relPatchFilePath) return nil, nil, "", err } patch = nil } patchedManifests, err := manifests.ApplyManifestPatch(generatedManifests, patch, cf.configPathRelative, relPatchFilePath) if err != nil { return nil, nil, "", fmt.Errorf("processing %q, cannot apply patchFile %q to generated resources: %s", cf.configPathRelative, relPatchFilePath, err) } return generatedManifests, patchedManifests, patchFilePath, nil } // getGeneratedManifests is used to produce the manifests based _only_ // on the generators in the config. This is sufficient for // commandUpdated config, and the first step for patchUpdated config. func (cf *ConfigFile) getGeneratedManifests(ctx context.Context, manifests Manifests, generators []Generator) ([]byte, error) { buf := bytes.NewBuffer(nil) for i, cmdResult := range cf.execGenerators(ctx, generators) { if cmdResult.Error != nil { err := fmt.Errorf("error executing generator command %q from file %q: %s\nerror output:\n%s\ngenerated output:\n%s", generators[i].Command, cf.configPathRelative, cmdResult.Error, string(cmdResult.Stderr), string(cmdResult.Stderr), ) return nil, err } if err := manifests.AppendManifestToBuffer(cmdResult.Stdout, buf); err != nil { return nil, err } } return buf.Bytes(), nil } // updatePatchFile calculates the patch given a transformation, and // updates the patch file given in the config. func (cf *ConfigFile) updatePatchFile(ctx context.Context, manifests Manifests, updateFn func(previousManifests []byte) ([]byte, error)) error { generatedManifests, patchedManifests, patchFilePath, err := cf.getGeneratedAndPatchedManifests(ctx, manifests) if err != nil { return fmt.Errorf("error parsing generated, patched output from file %s: %s", cf.configPathRelative, err) } finalManifests, err := updateFn(patchedManifests) if err != nil { return err } newPatch, err := manifests.CreateManifestPatch(generatedManifests, finalManifests, "generated manifests", "patched and updated manifests") if err != nil { return err } return ioutil.WriteFile(patchFilePath, newPatch, 0600) } // execGenerators executes all the generators given and returns the // results; it will stop at the first failing command. func (cf *ConfigFile) execGenerators(ctx context.Context, generators []Generator) []ConfigFileExecResult { result := []ConfigFileExecResult{} for _, g := range generators { stdErr := bytes.NewBuffer(nil) stdOut := bytes.NewBuffer(nil) err := cf.execCommand(ctx, nil, stdOut, stdErr, g.Command) r := ConfigFileExecResult{ Stdout: stdOut.Bytes(), Stderr: stdErr.Bytes(), Error: err, } result = append(result, r) // Stop executing on the first command error if err != nil { break } } return result } // execContainerImageUpdaters executes all the image updates in the configuration file. // It will stop at the first error, in which case the returned error will be non-nil func (cf *ConfigFile) execContainerImageUpdaters(ctx context.Context, workload resource.ID, container string, image, imageTag string) []ConfigFileCombinedExecResult { env := makeEnvFromResourceID(workload) env = append(env, "FLUX_CONTAINER="+container, "FLUX_IMG="+image, "FLUX_TAG="+imageTag, ) commands := []string{} var updaters []Updater if cf.CommandUpdated != nil { updaters = cf.CommandUpdated.Updaters } for _, u := range updaters { commands = append(commands, u.ContainerImage.Command) } return cf.execCommandsWithCombinedOutput(ctx, env, commands) } // execPolicyUpdaters executes all the policy update commands given in // the configuration file. An empty policyValue means remove the // policy. It will stop at the first error, in which case the returned // error will be non-nil func (cf *ConfigFile) execPolicyUpdaters(ctx context.Context, workload resource.ID, policyName, policyValue string) []ConfigFileCombinedExecResult { env := makeEnvFromResourceID(workload) env = append(env, "FLUX_POLICY="+policyName) if policyValue != "" { env = append(env, "FLUX_POLICY_VALUE="+policyValue) } commands := []string{} var updaters []Updater if cf.CommandUpdated != nil { updaters = cf.CommandUpdated.Updaters } for _, u := range updaters { commands = append(commands, u.Policy.Command) } return cf.execCommandsWithCombinedOutput(ctx, env, commands) } func (cf *ConfigFile) execCommandsWithCombinedOutput(ctx context.Context, env []string, commands []string) []ConfigFileCombinedExecResult { env = append(env, "PATH="+os.Getenv("PATH")) result := []ConfigFileCombinedExecResult{} for _, c := range commands { stdOutAndErr := bytes.NewBuffer(nil) err := cf.execCommand(ctx, env, stdOutAndErr, stdOutAndErr, c) r := ConfigFileCombinedExecResult{ Output: stdOutAndErr.Bytes(), Error: err, } result = append(result, r) // Stop executing on the first command error if err != nil { break } } return result } func (cf *ConfigFile) execCommand(ctx context.Context, env []string, stdOut, stdErr io.Writer, command string) error { cmdCtx, cancel := context.WithTimeout(ctx, CommandTimeout) defer cancel() cmd := exec.CommandContext(ctx, "/bin/sh", "-c", command) cmd.Env = env cmd.Dir = cf.workingDir cmd.Stdout = stdOut cmd.Stderr = stdErr err := cmd.Run() if cmdCtx.Err() == context.DeadlineExceeded { err = cmdCtx.Err() } else if cmdCtx.Err() == context.Canceled { err = errors.Wrap(ctx.Err(), fmt.Sprintf("context was unexpectedly cancelled")) } return err } func makeEnvFromResourceID(id resource.ID) []string { ns, kind, name := id.Components() return []string{ "FLUX_WORKLOAD=" + id.String(), "FLUX_WL_NS=" + ns, "FLUX_WL_KIND=" + kind, "FLUX_WL_NAME=" + name, } }
[ "\"PATH\"" ]
[]
[ "PATH" ]
[]
["PATH"]
go
1
0
cubes/slicer/commands.py
# -*- encoding: utf-8 -*- """Slicer – Cubes command-line tool For more information run: slicer --help To enable full user exception debugging set the ``CUBES_ERROR_DEBUG`` environment variable. """ from __future__ import absolute_import from __future__ import print_function import json import os import sys import click from .. import compat from ..datastructures import AttributeDict from ..errors import InconsistencyError, ArgumentError, InternalError, UserError from ..formatters import csv_generator, SlicerJSONEncoder, JSONLinesGenerator from ..metadata import read_model_metadata, write_model_metadata_bundle from ..workspace import Workspace from ..errors import CubesError from ..server import run_server from ..server.base import read_slicer_config from .. import ext from ..query import cuts_from_string, Cell from ..metadata import string_to_dimension_level DEFAULT_CONFIG = "slicer.ini" @click.group() @click.pass_context @click.option('--debug/--no-debug', envvar='CUBES_DEBUG', default=False, help="Enable/disable debugging output") def cli(ctx, debug): ctx.obj = AttributeDict() ctx.obj.debug = debug ################################################################################ # Command: serve @cli.command() @click.argument('config', type=click.Path(exists=True), default=DEFAULT_CONFIG) @click.option('--visualizer', help="Visualizer URL for /visualizer path") @click.pass_context def serve(ctx, config, visualizer): """Run Slicer HTTP server.""" config = read_config(config) # FIXME: "visualizer" shouldn't be in "server" section if visualizer: config.set("server", "visualizer", visualizer) run_server(config, debug=ctx.obj.debug) ################################################################################ # Command: extension @cli.command("ext-info") @click.argument('extension_type', metavar='TYPE', required=False, default='all') @click.argument('extension_name', metavar='NAME', required=False) @click.pass_context def extension_info(ctx, extension_type, extension_name): """Show info about Cubes extensions""" if extension_type == 'all': types = ext.EXTENSION_TYPES.items() else: label = ext.EXTENSION_TYPES[extension_type] types = [(extension_type, label)] if extension_name: # Print detailed extension information manager = getattr(ext, extension_type) extension = manager.get(extension_name) extension.load() desc = extension.description or "No description." click.echo("{e.name} - {e.label}\n\n" "{desc}\n" .format(e=extension, desc=desc)) if extension.options: click.echo("Configuration options:\n") for option in extension.options.values(): name = option.get("name") desc = option.get("description", option.get("label")) desc = " - {}".format(desc) if desc else "" type_ = option.get("type_", "string") click.echo(" {name} ({type_}){desc}" .format(name=name, desc=desc, type_=type_)) else: click.echo("No known options.") else: # List extensions click.echo("Available Cubes extensions:\n") for ext_type, _ in types: manager = getattr(ext, ext_type) manager.discover() names = manager.names() click.echo("{}:\n {}\n".format(ext_type, ", ".join(names))) click.echo() ################################################################################ # Command: list @cli.command() @click.option('--verbose/--terse', 'verbose', default=False, help='Display also cube description') @click.argument('config', required=False, default=DEFAULT_CONFIG, type=click.Path(exists=True)) @click.pass_context def list(ctx, config, verbose): """List cubes""" ws = Workspace(config) for cube in ws.list_cubes(): name = cube["name"] label = cube.get("label", name) desc = cube.get("description", "(no description)") if verbose: print("{} - {}\n {}\n".format(name, label, desc)) else: print("{} - {}".format(name, label)) ################################################################################ # Command: valdate_model @cli.group() @click.pass_context def model(ctx): """Model metadata tools.""" pass @model.command() @click.option('--defaults', '-d', 'show_defaults', default=False, help='show defaults') @click.option('--warnings/--no-warnings', 'show_warnings', default=True, help='enable/disable warnings') @click.argument('model_path', metavar='MODEL') def validate(show_defaults, show_warnings, model_path): """Validate model metadata""" click.echo("Reading model %s" % model_path) model = cubes.read_model_metadata(model_path) click.echo("Validating model...") result = cubes.providers.validate_model(model) error_count = 0 warning_count = 0 default_count = 0 for error in result: if error.scope == "model": scope = "model" else: if error.object: scope = "%s '%s'" % (error.scope, error.object) else: scope = "unknown %s" % error.scope if error.property: scope += " property '%s'" % error.property show = True if error.severity == "error": error_count += 1 elif error.severity == "warning": warning_count += 1 show = show_warnings elif error.severity == "default": show = show_defaults default_count += 1 if show: print("%s in %s: %s" % (error.severity.upper(), scope, error.message)) if error_count == 0: if warning_count == 0: if default_count == 0: verdict = "model can be used" else: verdict = "model can be used, " \ "make sure that the defaults reflect reality" else: verdict = "not recommended to use the model, " \ "some issues might emerge" else: verdict = "model can not be used" print("") print("Defaults used %d" % default_count) print("Warning %d" % warning_count) print("Errors %d" % error_count) print("Summary %s" % verdict) if error_count > 0: exit(1) ################################################################################ # Command: test @cli.command() @click.option('--aggregate', is_flag=True, default=False, help="Test aggregate of whole cube") @click.option('--exclude-store', '-E', 'exclude_stores', multiple=True) @click.option('--store', 'include_stores', multiple=True) @click.argument('config', default=DEFAULT_CONFIG) @click.argument('cube', nargs=-1) def test(aggregate, exclude_stores, include_stores, config, cube): """Test every cube in the model""" workspace = cubes.Workspace(config) errors = [] if cube: cube_list = cube else: cube_list = [c["name"] for c in workspace.list_cubes()] exclude = exclude_stores or [] include = include_stores or [] tested = 0 for name in cube_list: cube = workspace.cube(name) click.echo("testing {}: ".format(name), nl=False) if cube.store_name in exclude \ or (include and cube.store_name not in include): click.echo("pass") continue try: browser = workspace.browser(name) except Exception as e: errors.append((name, e)) click.echo("BROWSER ERROR") continue tested += 1 try: facts = browser.test(aggregate=aggregate) except NotImplementedError: click.echo("pass - no test") # FIXME XXX CubesError not defined except CubesError as e: errors.append((name, e)) click.echo("ERROR") click.echo() click.echo("tested %d cubes" % tested) if errors: click.echo("%d ERRORS:" % len(errors)) for (cube, e) in errors: if hasattr(e, "error_type"): etype = e.error_type else: etype = str(type(e)) click.echo("%s: %s - %s" % (cube, etype, str(e))) else: click.echo("test passed") @model.command() @click.option('--format', 'model_format', type=click.Choice(["json", "bundle"]), default='json', help='output model format') @click.option('--force', is_flag=True, default=False, help='replace existing model bundle') @click.argument('model_path', metavar='MODEL') @click.argument('target', required=False) @click.pass_context def convert(ctx, model_format, force, model_path, target): """Convert model between model formats.""" metadata = read_model_metadata(model_path) if model_format == "json": if not target: print(json.dumps(metadata, indent=4)) else: with open(target, "w") as f: json.dump(metadata, f, indent=4) elif model_format == "bundle": write_model_metadata_bundle(target, metadata, replace=force) def read_config(cfg): """Read the configuration file.""" return read_slicer_config(cfg) ################################################################################ # Group: sql @cli.group() @click.pass_context @click.option('--store', nargs=1, help="Name of the store to use other than default. Must be SQL.") @click.option('--config', nargs=1, default=DEFAULT_CONFIG, help="Name of slicer.ini configuration file") def sql(ctx, store, config): """SQL store commands""" ctx.obj.workspace = cubes.Workspace(config) ctx.obj.store = ctx.obj.workspace.get_store(store) ################################################################################ # Command: sql denormalize @sql.command() @click.option('--force', is_flag=True, default=False, help='replace existing views') @click.option('--materialize', '-m', is_flag=True, default=False, help='create materialized view (table)') @click.option('--index/--no-index', default=True, help='create index for key attributes') @click.option('--schema', '-s', help='target view schema (overrides default fact schema') @click.argument('cube', required=False) @click.argument('target', required=False) @click.pass_context def denormalize(ctx, force, materialize, index, schema, cube, target): """Create denormalized view(s) from cube(s).""" if not materialize and index: raise ArgumentError("Non-materialized views can't be indexed") # Shortcuts workspace = ctx.obj.workspace store = ctx.obj.store if cube: target = target or store.naming.denormalized_table_name(cube) cubes = [(cube, target)] else: names = workspace.cube_names() targets = [store.naming.denormalized_table_name(name) for name in names] cubes = zip(names, targets) for cube_name, target in cubes: cube = workspace.cube(cube_name) store = workspace.get_store(cube.store_name or "default") print("denormalizing cube '%s' into '%s'" % (cube_name, target)) store.create_denormalized_view(cube, target, materialize=materialize, replace=force, create_index=index, keys_only=False, schema=schema) # TODO: Nice to have it back # @sql.command("ddl") # @click.argument('cubes', required=False, nargs=-1) # @click.pass_context # def generate_ddl(ctx, cubes): # # Shortcuts # workspace = ctx.obj.workspace # store = ctx.obj.store # # ddl = store.ddl_for_model(args.url, model, fact_prefix=args.fact_prefix, # dimension_prefix=args.dimension_prefix, # fact_suffix=args.fact_suffix, # dimension_suffix=args.dimension_suffix) # # print(ddl) ################################################################################ # Command: sql aggregate @sql.command("aggregate") @click.option('--force', is_flag=True, default=False, help='replace existing views') @click.option('--index/--no-index', default=True, help='create index for key attributes') @click.option('--schema', '-s', help='target view schema (overrides default fact schema') @click.option('--dimension', '-d', "dimensions", multiple=True, help='dimension to be used for aggregation') @click.argument('cube', required=False) @click.argument('target', required=False) @click.pass_context def sql_aggregate(ctx, force, index, schema, cube, target, dimensions): """Create pre-aggregated table from cube(s). If no cube is specified, then all cubes are aggregated. Target table can be specified only for one cube, for multiple cubes naming convention is used. """ workspace = ctx.obj.workspace store = ctx.obj.store if cube: target = target or store.naming.aggregated_table_name(cube) cubes = [(cube, target)] else: names = workspace.cube_names() targets = [store.naming.aggregated_table_name(name) for name in names] cubes = zip(names, targets) for cube_name, target in cubes: cube = workspace.cube(cube_name) store = workspace.get_store(cube.store_name or "default") print("denormalizing cube '%s' into '%s'" % (cube_name, target)) store.create_cube_aggregate(cube, target, replace=force, create_index=index, schema=schema, dimensions=dimensions) ################################################################################ # Command: aggregate @cli.command() @click.option('--config', type=click.Path(exists=True), required=False, default=DEFAULT_CONFIG) @click.option('--aggregate', '-a', 'aggregates', multiple=True, help="List of aggregates to get") @click.option('--cut', '-c', 'cuts', multiple=True, help="Cell cut") @click.option('--split', 'split_str', multiple=False, help="Split cell") @click.option('--drilldown', '-d', 'drilldown', multiple=True, help="Drilldown dimensions") @click.option('--on-row', 'on_rows', multiple=True, help="Attribute to put on row (default is all)") @click.option('--on-column', 'on_columns', multiple=True, help="Attribute to put on column (default is none)") @click.option('--format', "-f", "formatter_name", default="cross_table", help="Output format") @click.argument('cube_name', metavar='CUBE') @click.pass_context def aggregate(ctx, config, cube_name, aggregates, cuts, drilldown, formatter_name, split_str, on_rows, on_columns): """Aggregate a cube""" config = read_config(config) workspace = Workspace(config) browser = workspace.browser(cube_name) cell_cuts = [] for cut_str in cuts: cell_cuts += cuts_from_string(browser.cube, cut_str) cell = Cell(browser.cube, cell_cuts) split_cuts = cuts_from_string(browser.cube, split_str) if split_cuts: split = Cell(browser.cube, split_cuts) else: split = None if not aggregates: aggregates = [agg.name for agg in browser.cube.aggregates] # TODO: paging and ordering result = browser.aggregate(cell, aggregates=aggregates, drilldown=drilldown, split=split, page=None, page_size=None, order=None) if formatter_name: formatter = ext.formatter(formatter_name) output = formatter.format(browser.cube, result, onrows=on_rows, oncolumns=on_columns, aggregates=aggregates, aggregates_on="columns") else: output = result.to_dict() click.echo(output) ################################################################################ # Command: members @cli.command() @click.option('--config', type=click.Path(exists=True), required=False, default=DEFAULT_CONFIG) @click.option('--cut', '-c', 'cuts', multiple=True, help="Cell cut") @click.option('--format', "-f", "output_format", default="json", type=click.Choice(["json", "csv", "json_lines" ]), help="Output format") @click.argument('cube_name', metavar='CUBE') @click.argument('dim_name', metavar='DIMENSION') @click.pass_context def members(ctx, config, cube_name, cuts, dim_name, output_format): """Aggregate a cube""" config = read_config(config) workspace = Workspace(config) browser = workspace.browser(cube_name) cube = browser.cube cell_cuts = [] for cut_str in cuts: cell_cuts += cuts_from_string(browser.cube, cut_str) cell = Cell(browser.cube, cell_cuts) (dim_name, hier_name, level_name) = string_to_dimension_level(dim_name) dimension = cube.dimension(dim_name) hierarchy = dimension.hierarchy(hier_name) if level_name: depth = hierarchy.level_index(level_name) + 1 else: depth = len(hierarchy) # TODO: pagination values = browser.members(cell, dimension, depth=depth, hierarchy=hierarchy, page=None, page_size=None) attributes = [] for level in hierarchy.levels_for_depth(depth): attributes += level.attributes fields = [attr.ref for attr in attributes] labels = [attr.label or attr.name for attr in attributes] if output_format == "json": encoder = SlicerJSONEncoder(indent=4) result = encoder.iterencode(values) elif output_format == "json_lines": result = JSONLinesGenerator(values) elif output_format == "csv": result = csv_generator(values, fields, include_header=True, header=labels) out = click.get_text_stream('stdout') for row in result: out.write(row) def main(*args, **kwargs): try: cli(*args, **kwargs) except InconsistencyError as e: # Internal Error - error caused by some edge case conditio, misbehaved # cubes or wrongly categorized error # # It is very unlikely that the user might fix this error by changing # his/her input. # if os.environ.get("CUBES_ERROR_DEBUG"): raise else: click.echo("\n" \ "Error: Internal error occured.\n" "Reason: {}\n\n" \ "Please report the error and information about what you " \ "were doing to the Cubes development team.\n" .format(e), err=True) sys.exit(1) except (InternalError, UserError) as e: # Error caused by the user – model or data related. # # User can fix the error by altering his/her input. # if os.environ.get("CUBES_ERROR_DEBUG"): raise else: click.echo("\nError: {}".format(e), err=True) sys.exit(1)
[]
[]
[ "CUBES_ERROR_DEBUG" ]
[]
["CUBES_ERROR_DEBUG"]
python
1
0
sdks/python/apache_beam/runners/portability/local_job_service.py
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # pytype: skip-file from __future__ import absolute_import import concurrent.futures import logging import os import queue import shutil import subprocess import tempfile import threading import time import traceback from builtins import object from typing import TYPE_CHECKING from typing import List from typing import Optional import grpc from google.protobuf import text_format # type: ignore # not in typeshed from apache_beam.metrics import monitoring_infos from apache_beam.portability.api import beam_artifact_api_pb2 from apache_beam.portability.api import beam_artifact_api_pb2_grpc from apache_beam.portability.api import beam_fn_api_pb2_grpc from apache_beam.portability.api import beam_job_api_pb2 from apache_beam.portability.api import beam_job_api_pb2_grpc from apache_beam.portability.api import beam_provision_api_pb2 from apache_beam.portability.api import endpoints_pb2 from apache_beam.runners.portability import abstract_job_service from apache_beam.runners.portability import artifact_service from apache_beam.runners.portability.fn_api_runner import fn_runner from apache_beam.runners.portability.fn_api_runner import worker_handlers from apache_beam.utils.thread_pool_executor import UnboundedThreadPoolExecutor if TYPE_CHECKING: from google.protobuf import struct_pb2 # pylint: disable=ungrouped-imports from apache_beam.portability.api import beam_runner_api_pb2 _LOGGER = logging.getLogger(__name__) def _iter_queue(q): while True: yield q.get(block=True) class LocalJobServicer(abstract_job_service.AbstractJobServiceServicer): """Manages one or more pipelines, possibly concurrently. Experimental: No backward compatibility guaranteed. Servicer for the Beam Job API. This JobService uses a basic local implementation of runner to run the job. This JobService is not capable of managing job on remote clusters. By default, this JobService executes the job in process but still uses GRPC to communicate pipeline and worker state. It can also be configured to use inline calls rather than GRPC (for speed) or launch completely separate subprocesses for the runner and worker(s). """ def __init__(self, staging_dir=None): super(LocalJobServicer, self).__init__() self._cleanup_staging_dir = staging_dir is None self._staging_dir = staging_dir or tempfile.mkdtemp() self._legacy_artifact_service = ( artifact_service.BeamFilesystemArtifactService(self._staging_dir)) self._artifact_service = artifact_service.ArtifactStagingService( artifact_service.BeamFilesystemHandler(self._staging_dir).file_writer) self._artifact_staging_endpoint = None # type: Optional[endpoints_pb2.ApiServiceDescriptor] def create_beam_job(self, preparation_id, # stype: str job_name, # type: str pipeline, # type: beam_runner_api_pb2.Pipeline options # type: struct_pb2.Struct ): # type: (...) -> BeamJob # TODO(angoenka): Pass an appropriate staging_session_token. The token can # be obtained in PutArtifactResponse from JobService if not self._artifact_staging_endpoint: # The front-end didn't try to stage anything, but the worker may # request what's here so we should at least store an empty manifest. self._legacy_artifact_service.CommitManifest( beam_artifact_api_pb2.CommitManifestRequest( staging_session_token=preparation_id, manifest=beam_artifact_api_pb2.Manifest())) self._artifact_service.register_job( staging_token=preparation_id, dependency_sets={ id: env.dependencies for (id, env) in pipeline.components.environments.items() }) provision_info = fn_runner.ExtendedProvisionInfo( beam_provision_api_pb2.ProvisionInfo( pipeline_options=options, retrieval_token=self._legacy_artifact_service.retrieval_token( preparation_id)), self._staging_dir, job_name=job_name) return BeamJob( preparation_id, pipeline, options, provision_info, self._artifact_staging_endpoint, self._artifact_service) def get_bind_address(self): """Return the address used to open the port on the gRPC server. This is often, but not always the same as the service address. For example, to make the service accessible to external machines, override this to return '[::]' and override `get_service_address()` to return a publicly accessible host name. """ return self.get_service_address() def get_service_address(self): """Return the host name at which this server will be accessible. In particular, this is provided to the client upon connection as the artifact staging endpoint. """ return 'localhost' def start_grpc_server(self, port=0): self._server = grpc.server(UnboundedThreadPoolExecutor()) port = self._server.add_insecure_port( '%s:%d' % (self.get_bind_address(), port)) beam_job_api_pb2_grpc.add_JobServiceServicer_to_server(self, self._server) beam_artifact_api_pb2_grpc.add_LegacyArtifactStagingServiceServicer_to_server( self._legacy_artifact_service, self._server) beam_artifact_api_pb2_grpc.add_ArtifactStagingServiceServicer_to_server( self._artifact_service, self._server) hostname = self.get_service_address() self._artifact_staging_endpoint = endpoints_pb2.ApiServiceDescriptor( url='%s:%d' % (hostname, port)) self._server.start() _LOGGER.info('Grpc server started at %s on port %d' % (hostname, port)) return port def stop(self, timeout=1): self._server.stop(timeout) if os.path.exists(self._staging_dir) and self._cleanup_staging_dir: shutil.rmtree(self._staging_dir, ignore_errors=True) def GetJobMetrics(self, request, context=None): if request.job_id not in self._jobs: raise LookupError("Job {} does not exist".format(request.job_id)) result = self._jobs[request.job_id].result monitoring_info_list = [] for mi in result._monitoring_infos_by_stage.values(): monitoring_info_list.extend(mi) # Filter out system metrics user_monitoring_info_list = [ x for x in monitoring_info_list if monitoring_infos.is_user_monitoring_info(x) ] return beam_job_api_pb2.GetJobMetricsResponse( metrics=beam_job_api_pb2.MetricResults( committed=user_monitoring_info_list)) class SubprocessSdkWorker(object): """Manages a SDK worker implemented as a subprocess communicating over grpc. """ def __init__( self, worker_command_line, # type: bytes control_address, worker_id=None): self._worker_command_line = worker_command_line self._control_address = control_address self._worker_id = worker_id def run(self): logging_server = grpc.server(UnboundedThreadPoolExecutor()) logging_port = logging_server.add_insecure_port('[::]:0') logging_server.start() logging_servicer = BeamFnLoggingServicer() beam_fn_api_pb2_grpc.add_BeamFnLoggingServicer_to_server( logging_servicer, logging_server) logging_descriptor = text_format.MessageToString( endpoints_pb2.ApiServiceDescriptor(url='localhost:%s' % logging_port)) control_descriptor = text_format.MessageToString( endpoints_pb2.ApiServiceDescriptor(url=self._control_address)) env_dict = dict( os.environ, CONTROL_API_SERVICE_DESCRIPTOR=control_descriptor, LOGGING_API_SERVICE_DESCRIPTOR=logging_descriptor) # only add worker_id when it is set. if self._worker_id: env_dict['WORKER_ID'] = self._worker_id with worker_handlers.SUBPROCESS_LOCK: p = subprocess.Popen(self._worker_command_line, shell=True, env=env_dict) try: p.wait() if p.returncode: raise RuntimeError( 'Worker subprocess exited with return code %s' % p.returncode) finally: if p.poll() is None: p.kill() logging_server.stop(0) class BeamJob(abstract_job_service.AbstractBeamJob): """This class handles running and managing a single pipeline. The current state of the pipeline is available as self.state. """ def __init__(self, job_id, # type: str pipeline, options, provision_info, # type: fn_runner.ExtendedProvisionInfo artifact_staging_endpoint, # type: Optional[endpoints_pb2.ApiServiceDescriptor] artifact_service, # type: artifact_service.ArtifactStagingService ): super(BeamJob, self).__init__(job_id, provision_info.job_name, pipeline, options) self._provision_info = provision_info self._artifact_staging_endpoint = artifact_staging_endpoint self._artifact_service = artifact_service self._state_queues = [] # type: List[queue.Queue] self._log_queues = [] # type: List[queue.Queue] self.daemon = True self.result = None def set_state(self, new_state): """Set the latest state as an int enum and notify consumers""" timestamp = super(BeamJob, self).set_state(new_state) if timestamp is not None: # Inform consumers of the new state. for queue in self._state_queues: queue.put((new_state, timestamp)) def prepare(self): pass def artifact_staging_endpoint(self): return self._artifact_staging_endpoint def run(self): self.set_state(beam_job_api_pb2.JobState.STARTING) self._run_thread = threading.Thread(target=self._run_job) self._run_thread.start() def _run_job(self): self.set_state(beam_job_api_pb2.JobState.RUNNING) with JobLogHandler(self._log_queues): self._update_dependencies() try: result = fn_runner.FnApiRunner( provision_info=self._provision_info).run_via_runner_api( self._pipeline_proto) _LOGGER.info('Successfully completed job.') self.set_state(beam_job_api_pb2.JobState.DONE) self.result = result except: # pylint: disable=bare-except _LOGGER.exception('Error running pipeline.') _LOGGER.exception(traceback) self.set_state(beam_job_api_pb2.JobState.FAILED) raise def _update_dependencies(self): try: for env_id, deps in self._artifact_service.resolved_deps( self._job_id, timeout=0).items(): # Slice assignment not supported for repeated fields. env = self._pipeline_proto.components.environments[env_id] del env.dependencies[:] env.dependencies.extend(deps) except concurrent.futures.TimeoutError: pass # TODO(BEAM-9577): Require this once all SDKs support it. def cancel(self): if not self.is_terminal_state(self.state): self.set_state(beam_job_api_pb2.JobState.CANCELLING) # TODO(robertwb): Actually cancel... self.set_state(beam_job_api_pb2.JobState.CANCELLED) def get_state_stream(self): # Register for any new state changes. state_queue = queue.Queue() self._state_queues.append(state_queue) for state, timestamp in self.with_state_history(_iter_queue(state_queue)): yield state, timestamp if self.is_terminal_state(state): break def get_message_stream(self): # Register for any new messages. log_queue = queue.Queue() self._log_queues.append(log_queue) self._state_queues.append(log_queue) for msg in self.with_state_history(_iter_queue(log_queue)): if isinstance(msg, tuple): assert len(msg) == 2 and isinstance(msg[0], int) current_state = msg[0] yield msg if self.is_terminal_state(current_state): break else: yield msg class BeamFnLoggingServicer(beam_fn_api_pb2_grpc.BeamFnLoggingServicer): def Logging(self, log_bundles, context=None): for log_bundle in log_bundles: for log_entry in log_bundle.log_entries: _LOGGER.info('Worker: %s', str(log_entry).replace('\n', ' ')) return iter([]) class JobLogHandler(logging.Handler): """Captures logs to be returned via the Beam Job API. Enabled via the with statement.""" # Mapping from logging levels to LogEntry levels. LOG_LEVEL_MAP = { logging.FATAL: beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR, logging.CRITICAL: beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR, logging.ERROR: beam_job_api_pb2.JobMessage.JOB_MESSAGE_ERROR, logging.WARNING: beam_job_api_pb2.JobMessage.JOB_MESSAGE_WARNING, logging.INFO: beam_job_api_pb2.JobMessage.JOB_MESSAGE_BASIC, logging.DEBUG: beam_job_api_pb2.JobMessage.JOB_MESSAGE_DEBUG, } def __init__(self, log_queues): super(JobLogHandler, self).__init__() self._last_id = 0 self._logged_thread = None self._log_queues = log_queues def __enter__(self): # Remember the current thread to demultiplex the logs of concurrently # running pipelines (as Python log handlers are global). self._logged_thread = threading.current_thread() logging.getLogger().addHandler(self) def __exit__(self, *args): self._logged_thread = None self.close() def _next_id(self): self._last_id += 1 return str(self._last_id) def emit(self, record): if self._logged_thread is threading.current_thread(): msg = beam_job_api_pb2.JobMessage( message_id=self._next_id(), time=time.strftime( '%Y-%m-%d %H:%M:%S.', time.localtime(record.created)), importance=self.LOG_LEVEL_MAP[record.levelno], message_text=self.format(record)) # Inform all message consumers. for queue in self._log_queues: queue.put(msg)
[]
[]
[]
[]
[]
python
0
0
applications/T2VLAD/train.py
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License" # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. import os import time import copy import socket import paddle import argparse import warnings import numpy as np import model.loss as module_loss import model.model as module_arch import model.metric as module_metric import data_loader.data_loaders as module_data from pathlib import Path from utils import set_seeds from trainer import Trainer from test import evaluation from mergedeep import merge, Strategy from parse_config import ConfigParser from logger.log_parser import log_summary from utils import compute_dims, compute_trn_config def run_exp(config): warnings.filterwarnings('ignore') logger = config.get_logger('train') expert_dims, raw_input_dims = compute_dims(config, logger) trn_config = compute_trn_config(config) if config._args.group_seed: seeds = [int(config._args.group_seed)] else: seeds = [int(x) for x in config._args.seeds.split(",")] # set up local filesystem on the cluster if socket.gethostname().endswith("cluster"): os.system(str(Path.home() / "configure_tmp_data.sh")) for ii, seed in enumerate(seeds): tic = time.time() logger.info(f"{ii + 1}/{len(seeds)} Setting experiment random seed to {seed}") set_seeds(seed) config["seed"] = seed model = config.init( name='arch', module=module_arch, expert_dims=expert_dims, text_dim=config["experts"]["text_dim"], ce_shared_dim=config["experts"].get("ce_shared_dim", None), feat_aggregation=config["data_loader"]["args"]["feat_aggregation"], ) logger.info(model) data_loaders = config.init( name='data_loader', module=module_data, logger=logger, raw_input_dims=raw_input_dims, text_feat=config["experts"]["text_feat"], text_dim=config["experts"]["text_dim"], text_agg=config["experts"]["text_agg"], use_zeros_for_missing=config["experts"].get("use_zeros_for_missing", False), eval_only=False, ) loss = config.init(name="loss", module=module_loss) metrics = [getattr(module_metric, met) for met in config['metrics']] lr_scheduler = paddle.optimizer.lr.StepDecay(learning_rate=0.0001, step_size=5, gamma=0.9) optimizer = paddle.optimizer.AdamW(learning_rate=lr_scheduler, weight_decay=1e-4, parameters=model.parameters(), grad_clip=paddle.nn.ClipGradByGlobalNorm(2)) trainer = Trainer( model, loss, metrics, optimizer, config=config, data_loaders=data_loaders, lr_scheduler=lr_scheduler, mini_train=config._args.mini_train, disable_nan_checks=config["disable_nan_checks"], visualizer=None, val_freq=config["trainer"].get("val_freq", 1), force_cpu_val=config.get("force_cpu_val", False), skip_first_n_saves=config["trainer"].get("skip_first_n_saves", 0), include_optim_in_save_model=config["trainer"].get("include_optim_in_save_model", 1), cache_targets=set(config.get("cache_targets", [])), ) trainer.train() best_model_path = config.save_dir / "trained_model.pdparams" duration = time.strftime('%Hh%Mm%Ss', time.gmtime(time.time() - tic)) logger.info(f"Training took {duration}") # If multiple runs were conducted, report relevant statistics if len(seeds) > 1: log_summary( logger=logger, log_path=config.log_path, eval_mode=config["eval_mode"], fixed_num_epochs=config["trainer"]["epochs"], ) print(f"Log file stored at {config.log_path}") # Report the location of the "best" model of the final seeded run (here # "best" corresponds to the model with the highest geometric mean over the # R@1, R@5 and R@10 metrics when a validation set is used, or simply the final # epoch of training for fixed-length schedules). print(f"The best performing model can be found at {str(best_model_path)}") def main(): args = argparse.ArgumentParser(description='Main entry point for training') args.add_argument('--config', help='config file path') args.add_argument('--resume', help='path to latest model (default: None)') args.add_argument('--device', help="indices of GPUs to enable") args.add_argument('--mini_train', action="store_true") args.add_argument('--group_id', help="if supplied, group these experiments") args.add_argument('--disable_workers', action="store_true") args.add_argument('--refresh_lru_cache', action="store_true") args.add_argument('--train_single_epoch', action="store_true") args.add_argument('--purge_exp_dir', action="store_true", help="remove all previous experiments with the given config") args.add_argument("--dbg", default="ipdb.set_trace") args.add_argument("--custom_args", help="qualified key,val pairs") # Seeds can either be passed directly as a comma separated list at the command line, # or individually for separate experiments as a group (used for slurm experiments) seed_args = args.add_mutually_exclusive_group() seed_args.add_argument('--seeds', default="0", help="comma separated list of seeds") seed_args.add_argument('--group_seed', help="seed for group member") args = ConfigParser(args) os.environ["PYTHONBREAKPOINT"] = args._args.dbg args["data_loader"]["args"]["refresh_lru_cache"] = args._args.refresh_lru_cache msg = (f"Expected the number of training epochs ({args['trainer']['epochs']})" f"to exceed the save period ({args['trainer']['save_period']}), otherwise" " no checkpoints will be saved.") assert args["trainer"]["epochs"] >= args["trainer"]["save_period"], msg run_exp(config=args) if __name__ == '__main__': main()
[]
[]
[ "PYTHONBREAKPOINT" ]
[]
["PYTHONBREAKPOINT"]
python
1
0
src/backend/settings.py
""" Django settings for backend project. Generated by 'django-admin startproject' using Django 3.1.1. For more information on this file, see https://docs.djangoproject.com/en/3.1/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/3.1/ref/settings/ """ import os import dj_database_url from pathlib import Path from chatbot.nlu_engine import NLUEngine # Build paths inside the project like this: BASE_DIR / 'subdir'. BASE_DIR = Path(__file__).resolve().parent.parent # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = os.environ.get("SECRET_KEY", default="foo") # SECURITY WARNING: don't run with debug turned on in production! DEBUG = int(os.environ.get("DEBUG", default=1)) ALLOWED_HOSTS = ["*"] # Application definition INSTALLED_APPS = [ "django.contrib.admin", "django.contrib.auth", "django.contrib.contenttypes", "django.contrib.sessions", "django.contrib.messages", "django.contrib.staticfiles", "backend", ] MIDDLEWARE = [ "django.middleware.security.SecurityMiddleware", "whitenoise.middleware.WhiteNoiseMiddleware", "django.contrib.sessions.middleware.SessionMiddleware", "django.middleware.common.CommonMiddleware", "django.contrib.auth.middleware.AuthenticationMiddleware", "django.contrib.messages.middleware.MessageMiddleware", "django.middleware.clickjacking.XFrameOptionsMiddleware", ] ROOT_URLCONF = "backend.urls" TEMPLATES = [ { "BACKEND": "django.template.backends.django.DjangoTemplates", "DIRS": [os.path.join(BASE_DIR, "frontend/build")], "APP_DIRS": True, "OPTIONS": { "context_processors": [ "django.template.context_processors.debug", "django.template.context_processors.request", "django.contrib.auth.context_processors.auth", "django.contrib.messages.context_processors.messages", ], }, }, ] WSGI_APPLICATION = "backend.wsgi.application" # Database # https://docs.djangoproject.com/en/3.1/ref/settings/#databases DATABASES = { "default": { "ENGINE": "django.db.backends.sqlite3", "NAME": BASE_DIR / "db.sqlite3", } } DATABASE_URL = os.environ.get("DATABASE_URL") db_from_env = dj_database_url.config( default=DATABASE_URL, conn_max_age=500, ssl_require=True ) DATABASES["default"].update(db_from_env) # Password validation # https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { "NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator", }, { "NAME": "django.contrib.auth.password_validation.MinimumLengthValidator", }, { "NAME": "django.contrib.auth.password_validation.CommonPasswordValidator", }, { "NAME": "django.contrib.auth.password_validation.NumericPasswordValidator", }, ] # Internationalization # https://docs.djangoproject.com/en/3.1/topics/i18n/ LANGUAGE_CODE = "en-us" TIME_ZONE = "UTC" USE_I18N = True USE_L10N = True USE_TZ = True DEFAULT_AUTO_FIELD = "django.db.models.AutoField" APPEND_SLASH = False STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles") STATICFILES_STORAGE = "whitenoise.storage.CompressedManifestStaticFilesStorage" STATIC_URL = "/static/" CHATBOT = NLUEngine( engine_path="engine.nlu", dataset_path="dataset.yaml", language="es" )
[]
[]
[ "SECRET_KEY", "DATABASE_URL", "DEBUG" ]
[]
["SECRET_KEY", "DATABASE_URL", "DEBUG"]
python
3
0
pgbouncer_test.go
package pgx_test import ( "context" "os" "testing" "github.com/jackc/pgconn" "github.com/jackc/pgconn/stmtcache" "github.com/nappspt/schemapgx/v4" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestPgbouncerStatementCacheDescribe(t *testing.T) { connString := os.Getenv("PGX_TEST_PGBOUNCER_CONN_STRING") if connString == "" { t.Skipf("Skipping due to missing environment variable %v", "PGX_TEST_PGBOUNCER_CONN_STRING") } config := mustParseConfig(t, connString) config.BuildStatementCache = func(conn *pgconn.PgConn) stmtcache.Cache { return stmtcache.New(conn, stmtcache.ModeDescribe, 1024) } testPgbouncer(t, config, 10, 100) } func TestPgbouncerSimpleProtocol(t *testing.T) { connString := os.Getenv("PGX_TEST_PGBOUNCER_CONN_STRING") if connString == "" { t.Skipf("Skipping due to missing environment variable %v", "PGX_TEST_PGBOUNCER_CONN_STRING") } config := mustParseConfig(t, connString) config.BuildStatementCache = nil config.PreferSimpleProtocol = true testPgbouncer(t, config, 10, 100) } func testPgbouncer(t *testing.T, config *pgx.ConnConfig, workers, iterations int) { doneChan := make(chan struct{}) for i := 0; i < workers; i++ { go func() { defer func() { doneChan <- struct{}{} }() conn, err := pgx.ConnectConfig(context.Background(), config) require.Nil(t, err) defer closeConn(t, conn) for i := 0; i < iterations; i++ { var i32 int32 var i64 int64 var f32 float32 var s string var s2 string err = conn.QueryRow(context.Background(), "select 1::int4, 2::int8, 3::float4, 'hi'::text").Scan(&i32, &i64, &f32, &s) require.NoError(t, err) assert.Equal(t, int32(1), i32) assert.Equal(t, int64(2), i64) assert.Equal(t, float32(3), f32) assert.Equal(t, "hi", s) err = conn.QueryRow(context.Background(), "select 1::int8, 2::float4, 'bye'::text, 4::int4, 'whatever'::text").Scan(&i64, &f32, &s, &i32, &s2) require.NoError(t, err) assert.Equal(t, int64(1), i64) assert.Equal(t, float32(2), f32) assert.Equal(t, "bye", s) assert.Equal(t, int32(4), i32) assert.Equal(t, "whatever", s2) } }() } for i := 0; i < workers; i++ { <-doneChan } }
[ "\"PGX_TEST_PGBOUNCER_CONN_STRING\"", "\"PGX_TEST_PGBOUNCER_CONN_STRING\"" ]
[]
[ "PGX_TEST_PGBOUNCER_CONN_STRING" ]
[]
["PGX_TEST_PGBOUNCER_CONN_STRING"]
go
1
0
bhagavad_gita_api/data/insert/verses.py
import json import os from rich.progress import track from sqlalchemy.orm import sessionmaker from bhagavad_gita_api.data.helpers import get_file from bhagavad_gita_api.db.session import engine from bhagavad_gita_api.models.gita import GitaVerse Session = sessionmaker(bind=engine) session = Session() content = get_file("verse.json") SANSKRIT_RECITATION_HOST = os.getenv("SANSKRIT_RECITATION_HOST") li = [] data = json.loads(content) for i in track(data, description="Loading verses"): li.append( GitaVerse( verse_number=i["verse_number"], chapter_number=i["chapter_number"], text=i["text"], sanskrit_recitation_url=f'{SANSKRIT_RECITATION_HOST}/{i["chapter_number"]}/{i["verse_number"]}.mp3', id=i["id"], chapter_id=i["chapter_id"], word_meanings=i["word_meanings"], slug=f'chapter-{i["chapter_number"]}-verse-{i["verse_number"]}', ) ) session.add_all(li) session.commit()
[]
[]
[ "SANSKRIT_RECITATION_HOST" ]
[]
["SANSKRIT_RECITATION_HOST"]
python
1
0
src/test/java/org/verdictdb/commons/DatabaseConnectionHelpers.java
package org.verdictdb.commons; import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStream; import java.sql.Connection; import java.sql.DriverManager; import java.sql.SQLException; import java.util.ArrayList; import java.util.List; import org.apache.commons.lang3.RandomStringUtils; import org.apache.spark.sql.SparkSession; import org.postgresql.copy.CopyManager; import org.postgresql.core.BaseConnection; import org.verdictdb.connection.DbmsConnection; import org.verdictdb.connection.DbmsQueryResult; import org.verdictdb.connection.JdbcConnection; import org.verdictdb.exception.VerdictDBDbmsException; import org.verdictdb.sqlsyntax.ImpalaSyntax; import org.verdictdb.sqlsyntax.PostgresqlSyntax; import org.verdictdb.sqlsyntax.RedshiftSyntax; import com.google.common.base.Charsets; import com.google.common.base.Joiner; import com.google.common.io.Files; public class DatabaseConnectionHelpers { // Default connection variables for databases used in unit tests public static final String MYSQL_HOST; public static final String MYSQL_DATABASE = "verdictdb_test_" + RandomStringUtils.randomAlphanumeric(8).toLowerCase(); public static final String MYSQL_USER = "root"; public static final String MYSQL_PASSWORD = ""; public static final String IMPALA_HOST; public static final String IMPALA_DATABASE = "verdictdb_test_" + RandomStringUtils.randomAlphanumeric(8).toLowerCase(); public static final String IMPALA_USER = ""; public static final String IMPALA_PASSWORD = ""; public static final String REDSHIFT_HOST; public static final String REDSHIFT_DATABASE = "dev"; public static final String REDSHIFT_SCHEMA = "verdictdb_test_" + RandomStringUtils.randomAlphanumeric(8).toLowerCase(); public static final String REDSHIFT_USER; public static final String REDSHIFT_PASSWORD; public static final String POSTGRES_HOST; public static final String POSTGRES_DATABASE = "test"; public static final String POSTGRES_USER = "postgres"; public static final String POSTGRES_PASSWORD = ""; public static final String POSTGRES_SCHEMA = "verdictdb_test_" + RandomStringUtils.randomAlphanumeric(8).toLowerCase(); static { String env = System.getenv("BUILD_ENV"); if (env != null && (env.equals("GitLab") || env.equals("DockerCompose"))) { MYSQL_HOST = "mysql"; } else { MYSQL_HOST = "localhost"; } } static { IMPALA_HOST = System.getenv("VERDICTDB_TEST_IMPALA_HOST"); } static { String env = System.getenv("BUILD_ENV"); if (env != null && (env.equals("GitLab") || env.equals("DockerCompose"))) { POSTGRES_HOST = "postgres"; } else { POSTGRES_HOST = "localhost"; } } static { REDSHIFT_HOST = System.getenv("VERDICTDB_TEST_REDSHIFT_ENDPOINT"); REDSHIFT_USER = System.getenv("VERDICTDB_TEST_REDSHIFT_USER"); REDSHIFT_PASSWORD = System.getenv("VERDICTDB_TEST_REDSHIFT_PASSWORD"); } public static final String COMMON_TABLE_NAME = "mytable"; public static final String COMMON_SCHEMA_NAME = "verdictdb_test_" + RandomStringUtils.randomAlphanumeric(8).toLowerCase(); public static final String TEMPLATE_SCHEMA_NAME = "VERDICTDB_TEST_DBNAME"; public static SparkSession setupSpark(String appname, String schema) { SparkSession spark = SparkSession.builder().appName(appname).master("local").enableHiveSupport().getOrCreate(); spark.conf().set("spark.cores.max", "24"); spark.conf().set("spark.serializer", "org.apache.spark.serializer.KryoSerializer"); spark.conf().set("spark.sql.tungsten.enabled", "true"); spark.conf().set("spark.eventLog.enabled", "true"); spark.conf().set("spark.app.id", "YourApp"); spark.conf().set("spark.io.compression.codec", "snappy"); spark.conf().set("spark.rdd.compress", "true"); spark.conf().set("spark.streaming.backpressure.enabled", "true"); spark.conf().set("spark.kryoserializer.buffer.max", "1"); spark.conf().set("spark.default.parallelism", "1"); spark.conf().set("spark.executor.cores", "8"); spark.conf().set("spark.shuffle.sort.bypassMergeThreshold", "50"); spark.conf().set("spark.broadcast.blockSize", "1"); spark.conf().set("spark.sql.parquet.compression.codec", "snappy"); spark.conf().set("spark.sql.parquet.mergeSchema", "true"); spark.conf().set("spark.sql.parquet.binaryAsString", "true"); spark.conf().set("spark.sql.crossJoin.enabled", "true"); // create schema spark.sql(String.format("DROP SCHEMA IF EXISTS `%s` CASCADE", schema)); spark.sql(String.format("CREATE SCHEMA IF NOT EXISTS `%s`", schema)); // create tables String datafilePath = new File("src/test/resources/tpch_test_data/").getAbsolutePath(); spark.sql( String.format( "CREATE EXTERNAL TABLE IF NOT EXISTS `%s`.`nation` (" + " `n_nationkey` INT, " + " `n_name` CHAR(25), " + " `n_regionkey` INT, " + " `n_comment` VARCHAR(152), " + " `n_dummy` VARCHAR(10)) " + "ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' " + "STORED AS TEXTFILE " + "LOCATION '%s/nation'", schema, datafilePath)); spark.sql( String.format( "CREATE EXTERNAL TABLE IF NOT EXISTS `%s`.`region` (" + " `r_regionkey` INT, " + " `r_name` CHAR(25), " + " `r_comment` VARCHAR(152), " + " `r_dummy` VARCHAR(10)) " + "ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' " + "STORED AS TEXTFILE " + "LOCATION '%s/region'", schema, datafilePath)); spark.sql( String.format( "CREATE EXTERNAL TABLE IF NOT EXISTS `%s`.`part` (" + " `p_partkey` INT, " + " `p_name` VARCHAR(55), " + " `p_mfgr` CHAR(25), " + " `p_brand` CHAR(10), " + " `p_type` VARCHAR(25), " + " `p_size` INT, " + " `p_container` CHAR(10), " + " `p_retailprice` DECIMAL(15,2) , " + " `p_comment` VARCHAR(23) , " + " `p_dummy` VARCHAR(10)) " + "ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' " + "STORED AS textfile " + "LOCATION '%s/part'", schema, datafilePath)); spark.sql( String.format( "CREATE EXTERNAL TABLE IF NOT EXISTS `%s`.`supplier` ( " + " `s_suppkey` INT , " + " `s_name` CHAR(25) , " + " `s_address` VARCHAR(40) , " + " `s_nationkey` INT , " + " `s_phone` CHAR(15) , " + " `s_acctbal` DECIMAL(15,2) , " + " `s_comment` VARCHAR(101), " + " `s_dummy` VARCHAR(10)) " + "ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' " + "STORED AS textfile " + "LOCATION '%s/supplier'", schema, datafilePath)); spark.sql( String.format( "CREATE EXTERNAL TABLE IF NOT EXISTS `%s`.`partsupp` ( " + " `ps_partkey` INT , " + " `ps_suppkey` INT , " + " `ps_availqty` INT , " + " `ps_supplycost` DECIMAL(15,2) , " + " `ps_comment` VARCHAR(199), " + " `ps_dummy` VARCHAR(10)) " + "ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' " + "STORED AS textfile " + "LOCATION '%s/partsupp'", schema, datafilePath)); spark.sql( String.format( "CREATE EXTERNAL TABLE IF NOT EXISTS `%s`.`customer` (" + " `c_custkey` INT , " + " `c_name` VARCHAR(25) , " + " `c_address` VARCHAR(40) , " + " `c_nationkey` INT , " + " `c_phone` CHAR(15) , " + " `c_acctbal` DECIMAL(15,2) , " + " `c_mktsegment` CHAR(10) , " + " `c_comment` VARCHAR(117), " + " `c_dummy` VARCHAR(10)) " + "ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' " + "STORED AS textfile " + "LOCATION '%s/customer'", schema, datafilePath)); spark.sql( String.format( "CREATE EXTERNAL TABLE IF NOT EXISTS `%s`.`orders` ( " + " `o_orderkey` INT , " + " `o_custkey` INT , " + " `o_orderstatus` CHAR(1) , " + " `o_totalprice` DECIMAL(15,2) , " + " `o_orderdate` DATE , " + " `o_orderpriority` CHAR(15) , " + " `o_clerk` CHAR(15) , " + " `o_shippriority` INT , " + " `o_comment` VARCHAR(79), " + " `o_dummy` VARCHAR(10)) " + "ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' " + "STORED AS textfile " + "LOCATION '%s/orders'", schema, datafilePath)); spark.sql( String.format( "CREATE EXTERNAL TABLE IF NOT EXISTS `%s`.`lineitem` (" + " `l_orderkey` INT , " + " `l_partkey` INT , " + " `l_suppkey` INT , " + " `l_linenumber` INT , " + " `l_quantity` DECIMAL(15,2) , " + " `l_extendedprice` DECIMAL(15,2) , " + " `l_discount` DECIMAL(15,2) , " + " `l_tax` DECIMAL(15,2) , " + " `l_returnflag` CHAR(1) , " + " `l_linestatus` CHAR(1) , " + " `l_shipdate` DATE , " + " `l_commitdate` DATE , " + " `l_receiptdate` DATE , " + " `l_shipinstruct` CHAR(25) , " + " `l_shipmode` CHAR(10) , " + " `l_comment` VARCHAR(44), " + " `l_dummy` VARCHAR(10))" + "ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' " + "STORED AS textfile " + "LOCATION '%s/lineitem'", schema, datafilePath)); return spark; } public static Connection setupImpala( String connectionString, String user, String password, String schema) throws SQLException, VerdictDBDbmsException { Connection conn = DriverManager.getConnection(connectionString, user, password); DbmsConnection dbmsConn = JdbcConnection.create(conn); dbmsConn.execute(String.format("DROP SCHEMA IF EXISTS `%s` CASCADE", schema)); dbmsConn.execute(String.format("CREATE SCHEMA IF NOT EXISTS `%s`", schema)); // Create tables dbmsConn.execute( String.format( "CREATE EXTERNAL TABLE IF NOT EXISTS `%s`.`nation` (" + " `n_nationkey` INT, " + " `n_name` STRING, " + " `n_regionkey` INT, " + " `n_comment` STRING, " + " `n_dummy` STRING) " + "ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'" + "LOCATION '/tmp/tpch_test_data/nation'", schema)); dbmsConn.execute( String.format( "CREATE TABLE IF NOT EXISTS `%s`.`region` (" + " `r_regionkey` INT, " + " `r_name` STRING, " + " `r_comment` STRING, " + " `r_dummy` STRING) " + "ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'" + "LOCATION '/tmp/tpch_test_data/region'", schema)); dbmsConn.execute( String.format( "CREATE TABLE IF NOT EXISTS `%s`.`part` ( " + " `p_partkey` INT, " + " `p_name` STRING, " + " `p_mfgr` STRING, " + " `p_brand` STRING, " + " `p_type` STRING, " + " `p_size` INT, " + " `p_container` STRING, " + " `p_retailprice` DECIMAL(15,2) , " + " `p_comment` STRING, " + " `p_dummy` STRING) " + "ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'" + "LOCATION '/tmp/tpch_test_data/part'", schema)); dbmsConn.execute( String.format( "CREATE TABLE IF NOT EXISTS `%s`.`supplier` ( " + " `s_suppkey` INT , " + " `s_name` STRING , " + " `s_address` STRING, " + " `s_nationkey` INT , " + " `s_phone` STRING , " + " `s_acctbal` DECIMAL(15,2) , " + " `s_comment` STRING, " + " `s_dummy` STRING) " + "ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'" + "LOCATION '/tmp/tpch_test_data/supplier'", schema)); dbmsConn.execute( String.format( "CREATE TABLE IF NOT EXISTS `%s`.`partsupp` ( " + " `ps_partkey` INT , " + " `ps_suppkey` INT , " + " `ps_availqty` INT , " + " `ps_supplycost` DECIMAL(15,2) , " + " `ps_comment` STRING, " + " `ps_dummy` STRING) " + "ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'" + "LOCATION '/tmp/tpch_test_data/partsupp'", schema)); dbmsConn.execute( String.format( "CREATE TABLE IF NOT EXISTS `%s`.`customer` (" + " `c_custkey` INT , " + " `c_name` STRING , " + " `c_address` STRING , " + " `c_nationkey` INT , " + " `c_phone` STRING , " + " `c_acctbal` DECIMAL(15,2) , " + " `c_mktsegment` STRING , " + " `c_comment` STRING, " + " `c_dummy` STRING) " + "ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'" + "LOCATION '/tmp/tpch_test_data/customer'", schema)); dbmsConn.execute( String.format( "CREATE TABLE IF NOT EXISTS `%s`.`orders` ( " + " `o_orderkey` INT , " + " `o_custkey` INT , " + " `o_orderstatus` STRING , " + " `o_totalprice` DECIMAL(15,2) , " + " `o_orderdate` TIMESTAMP , " + " `o_orderpriority` STRING , " + " `o_clerk` STRING , " + " `o_shippriority` INT, " + " `o_comment` STRING, " + " `o_dummy` STRING) " + "ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'" + "LOCATION '/tmp/tpch_test_data/orders'", schema)); dbmsConn.execute( String.format( "CREATE TABLE IF NOT EXISTS `%s`.`lineitem` ( " + " `l_orderkey` INT , " + " `l_partkey` INT , " + " `l_suppkey` INT , " + " `l_linenumber` INT , " + " `l_quantity` DECIMAL(15,2) , " + " `l_extendedprice` DECIMAL(15,2) , " + " `l_discount` DECIMAL(15,2) , " + " `l_tax` DECIMAL(15,2) , " + " `l_returnflag` STRING , " + " `l_linestatus` STRING , " + " `l_shipdate` TIMESTAMP , " + " `l_commitdate` TIMESTAMP , " + " `l_receiptdate` TIMESTAMP , " + " `l_shipinstruct` STRING , " + " `l_shipmode` STRING , " + " `l_comment` STRING, " + " `l_dummy` STRING) " + "ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'" + "LOCATION '/tmp/tpch_test_data/lineitem'", schema)); return conn; } static String getQuoted(String value) { return "'" + value + "'"; } static void loadRedshiftData(String schema, String table, DbmsConnection dbmsConn) throws IOException, VerdictDBDbmsException { String concat = ""; File file = new File(String.format("src/test/resources/tpch_test_data/%s/%s.tbl", table, table)); DbmsQueryResult columnMeta = dbmsConn.execute( String.format( "select data_type, ordinal_position from INFORMATION_SCHEMA.COLUMNS where table_name='%s' and table_schema='%s'", table, schema)); List<Boolean> quotedNeeded = new ArrayList<>(); for (int i = 0; i < columnMeta.getRowCount(); i++) { quotedNeeded.add(true); } while (columnMeta.next()) { String columnType = columnMeta.getString(0); int columnIndex = columnMeta.getInt(1); if (columnType.equals("integer") || columnType.equals("numeric")) { quotedNeeded.set(columnIndex - 1, false); } } String content = Files.toString(file, Charsets.UTF_8); for (String row : content.split("\n")) { String[] values = row.split("\\|"); row = ""; for (int i = 0; i < values.length - 1; i++) { if (quotedNeeded.get(i)) { row = row + getQuoted(values[i]) + ","; } else { row = row + values[i] + ","; } } row = row + "''"; if (concat.equals("")) { concat = concat + "(" + row + ")"; } else concat = concat + "," + "(" + row + ")"; } dbmsConn.execute(String.format("insert into \"%s\".\"%s\" values %s", schema, table, concat)); } public static Connection setupRedshift( String connectionString, String user, String password, String schema) throws VerdictDBDbmsException, SQLException, IOException { Connection conn = DriverManager.getConnection(connectionString, user, password); JdbcConnection dbmsConn = new JdbcConnection(conn, new RedshiftSyntax()); // dbmsConn.setOutputDebugMessage(true); dbmsConn.execute(String.format("DROP SCHEMA IF EXISTS \"%s\" CASCADE", schema)); dbmsConn.execute(String.format("CREATE SCHEMA IF NOT EXISTS \"%s\"", schema)); // Create tables dbmsConn.execute( String.format( "CREATE TABLE IF NOT EXISTS \"%s\".\"nation\" (" + " \"n_nationkey\" INT, " + " \"n_name\" CHAR(25), " + " \"n_regionkey\" INT, " + " \"n_comment\" VARCHAR(152), " + " \"n_dummy\" VARCHAR(10), " + " PRIMARY KEY (\"n_nationkey\"))", schema)); dbmsConn.execute( String.format( "CREATE TABLE IF NOT EXISTS \"%s\".\"region\" (" + " \"r_regionkey\" INT, " + " \"r_name\" CHAR(25), " + " \"r_comment\" VARCHAR(152), " + " \"r_dummy\" VARCHAR(10), " + " PRIMARY KEY (\"r_regionkey\"))", schema)); dbmsConn.execute( String.format( "CREATE TABLE IF NOT EXISTS \"%s\".\"part\" ( \"p_partkey\" INT, " + " \"p_name\" VARCHAR(55), " + " \"p_mfgr\" CHAR(25), " + " \"p_brand\" CHAR(10), " + " \"p_type\" VARCHAR(25), " + " \"p_size\" INT, " + " \"p_container\" CHAR(10), " + " \"p_retailprice\" DECIMAL(15,2) , " + " \"p_comment\" VARCHAR(23) , " + " \"p_dummy\" VARCHAR(10), " + " PRIMARY KEY (\"p_partkey\"))", schema)); dbmsConn.execute( String.format( "CREATE TABLE IF NOT EXISTS \"%s\".\"supplier\" ( " + " \"s_suppkey\" INT , " + " \"s_name\" CHAR(25) , " + " \"s_address\" VARCHAR(40) , " + " \"s_nationkey\" INT , " + " \"s_phone\" CHAR(15) , " + " \"s_acctbal\" DECIMAL(15,2) , " + " \"s_comment\" VARCHAR(101), " + " \"s_dummy\" varchar(10), " + " PRIMARY KEY (\"s_suppkey\"))", schema)); dbmsConn.execute( String.format( "CREATE TABLE IF NOT EXISTS \"%s\".\"partsupp\" ( " + " \"ps_partkey\" INT , " + " \"ps_suppkey\" INT , " + " \"ps_availqty\" INT , " + " \"ps_supplycost\" DECIMAL(15,2) , " + " \"ps_comment\" VARCHAR(199), " + " \"ps_dummy\" VARCHAR(10), " + " PRIMARY KEY (\"ps_partkey\", \"ps_suppkey\"))", schema)); dbmsConn.execute( String.format( "CREATE TABLE IF NOT EXISTS \"%s\".\"customer\" (" + " \"c_custkey\" INT , " + " \"c_name\" VARCHAR(25) , " + " \"c_address\" VARCHAR(40) , " + " \"c_nationkey\" INT , " + " \"c_phone\" CHAR(15) , " + " \"c_acctbal\" DECIMAL(15,2) , " + " \"c_mktsegment\" CHAR(10) , " + " \"c_comment\" VARCHAR(117), " + " \"c_dummy\" VARCHAR(10), " + " PRIMARY KEY (\"c_custkey\"))", schema)); dbmsConn.execute( String.format( "CREATE TABLE IF NOT EXISTS \"%s\".\"orders\" ( " + " \"o_orderkey\" INT , " + " \"o_custkey\" INT , " + " \"o_orderstatus\" CHAR(1) , " + " \"o_totalprice\" DECIMAL(15,2) , " + " \"o_orderdate\" DATE , " + " \"o_orderpriority\" CHAR(15) , " + " \"o_clerk\" CHAR(15) , " + " \"o_shippriority\" INT , " + " \"o_comment\" VARCHAR(79), " + " \"o_dummy\" varchar(10), " + " PRIMARY KEY (\"o_orderkey\"))", schema)); dbmsConn.execute( String.format( "CREATE TABLE IF NOT EXISTS \"%s\".\"lineitem\" (" + " \"l_orderkey\" INT , " + " \"l_partkey\" INT , " + " \"l_suppkey\" INT , " + " \"l_linenumber\" INT , " + " \"l_quantity\" DECIMAL(15,2) , " + " \"l_extendedprice\" DECIMAL(15,2) , " + " \"l_discount\" DECIMAL(15,2) , " + " \"l_tax\" DECIMAL(15,2) , " + " \"l_returnflag\" CHAR(1) , " + " \"l_linestatus\" CHAR(1) , " + " \"l_shipdate\" DATE , " + " \"l_commitdate\" DATE , " + " \"l_receiptdate\" DATE , " + " \"l_shipinstruct\" CHAR(25) , " + " \"l_shipmode\" CHAR(10) , " + " \"l_comment\" VARCHAR(44), " + " \"l_dummy\" varchar(10))", schema)); // load data use insert loadRedshiftData(schema, "nation", dbmsConn); loadRedshiftData(schema, "region", dbmsConn); loadRedshiftData(schema, "part", dbmsConn); loadRedshiftData(schema, "supplier", dbmsConn); loadRedshiftData(schema, "customer", dbmsConn); loadRedshiftData(schema, "partsupp", dbmsConn); loadRedshiftData(schema, "orders", dbmsConn); loadRedshiftData(schema, "lineitem", dbmsConn); return conn; } public static Connection setupMySqlForDataTypeTest( String connectionString, String user, String password, String schema, String table) throws SQLException, VerdictDBDbmsException { Connection conn = DriverManager.getConnection(connectionString, user, password); DbmsConnection dbmsConn = JdbcConnection.create(conn); dbmsConn.execute(String.format("DROP SCHEMA IF EXISTS `%s`", schema)); dbmsConn.execute(String.format("CREATE SCHEMA IF NOT EXISTS `%s`", schema)); dbmsConn.execute( String.format( "CREATE TABLE IF NOT EXISTS `%s`.`%s` (" + "bitCol BIT(1), " + "tinyintCol TINYINT(2), " + "boolCol BOOL, " + "smallintCol SMALLINT(3), " + "mediumintCol MEDIUMINT(4), " + "intCol INT(4), " + "integerCol INTEGER(4), " + "bigintCol BIGINT(8), " + "decimalCol DECIMAL(4,2), " + "decCol DEC(4,2), " + "floatCol FLOAT(4,2), " + "doubleCol DOUBLE(8,2), " + "doubleprecisionCol DOUBLE PRECISION(8,2), " + "dateCol DATE, " + "datetimeCol DATETIME, " + "timestampCol TIMESTAMP, " + "timeCol TIME, " + "yearCol YEAR(2), " + "yearCol2 YEAR(4), " + "charCol CHAR(4), " + "varcharCol VARCHAR(4), " + "binaryCol BINARY(4), " + "varbinaryCol VARBINARY(4), " + "tinyblobCol TINYBLOB, " + "tinytextCol TINYTEXT, " + "blobCol BLOB(4), " + "textCol TEXT(100), " + "medimumblobCol MEDIUMBLOB, " + "medimumtextCol MEDIUMTEXT, " + "longblobCol LONGBLOB, " + "longtextCol LONGTEXT, " + "enumCol ENUM('1', '2'), " + "setCol SET('1', '2'))", schema, table)); dbmsConn.execute( String.format( "INSERT INTO `%s`.`%s` VALUES ( " + "1, 2, 1, 1, 1, 1, 1, 1, " + "1.0, 1.0, 1.0, 1.0, 1.0, " + "'2018-12-31', '2018-12-31 01:00:00', '2018-12-31 00:00:01', '10:59:59', " + "18, 2018, 'abc', 'abc', '10', '10', " + "'10', 'a', '10', 'abc', '1110', 'abc', '1110', 'abc', '1', '2')", schema, table)); dbmsConn.execute( String.format( "INSERT INTO `%s`.`%s` VALUES ( " + "NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, " + "NULL, NULL, NULL, NULL, NULL, " + "NULL, NULL, NULL, NULL, " + "NULL, NULL, NULL, NULL, NULL, NULL, " + "NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL)", schema, table)); return conn; } public static Connection setupMySql( String connectionString, String user, String password, String schema) throws VerdictDBDbmsException, SQLException { Connection conn = DriverManager.getConnection(connectionString, user, password); DbmsConnection dbmsConn = JdbcConnection.create(conn); dbmsConn.execute(String.format("DROP SCHEMA IF EXISTS `%s`", schema)); dbmsConn.execute(String.format("CREATE SCHEMA IF NOT EXISTS `%s`", schema)); // Create tables dbmsConn.execute( String.format( "CREATE TABLE IF NOT EXISTS `%s`.`nation` (" + " `n_nationkey` INT, " + " `n_name` CHAR(25), " + " `n_regionkey` INT, " + " `n_comment` VARCHAR(152), " + " `n_dummy` VARCHAR(10), " + " PRIMARY KEY (`n_nationkey`))", schema)); dbmsConn.execute( String.format( "CREATE TABLE IF NOT EXISTS `%s`.`region` (" + " `r_regionkey` INT, " + " `r_name` CHAR(25), " + " `r_comment` VARCHAR(152), " + " `r_dummy` VARCHAR(10), " + " PRIMARY KEY (`r_regionkey`))", schema)); dbmsConn.execute( String.format( "CREATE TABLE IF NOT EXISTS `%s`.`part` ( `p_partkey` INT, " + " `p_name` VARCHAR(55), " + " `p_mfgr` CHAR(25), " + " `p_brand` CHAR(10), " + " `p_type` VARCHAR(25), " + " `p_size` INT, " + " `p_container` CHAR(10), " + " `p_retailprice` DECIMAL(15,2) , " + " `p_comment` VARCHAR(23) , " + " `p_dummy` VARCHAR(10), " + " PRIMARY KEY (`p_partkey`))", schema)); dbmsConn.execute( String.format( "CREATE TABLE IF NOT EXISTS `%s`.`supplier` ( " + " `s_suppkey` INT , " + " `s_name` CHAR(25) , " + " `s_address` VARCHAR(40) , " + " `s_nationkey` INT , " + " `s_phone` CHAR(15) , " + " `s_acctbal` DECIMAL(15,2) , " + " `s_comment` VARCHAR(101), " + " `s_dummy` varchar(10), " + " PRIMARY KEY (`s_suppkey`))", schema)); dbmsConn.execute( String.format( "CREATE TABLE IF NOT EXISTS `%s`.`partsupp` ( " + " `ps_partkey` INT , " + " `ps_suppkey` INT , " + " `ps_availqty` INT , " + " `ps_supplycost` DECIMAL(15,2) , " + " `ps_comment` VARCHAR(199), " + " `ps_dummy` VARCHAR(10), " + " PRIMARY KEY (`ps_partkey`))", schema)); dbmsConn.execute( String.format( "CREATE TABLE IF NOT EXISTS `%s`.`customer` (" + " `c_custkey` INT , " + " `c_name` VARCHAR(25) , " + " `c_address` VARCHAR(40) , " + " `c_nationkey` INT , " + " `c_phone` CHAR(15) , " + " `c_acctbal` DECIMAL(15,2) , " + " `c_mktsegment` CHAR(10) , " + " `c_comment` VARCHAR(117), " + " `c_dummy` VARCHAR(10), " + " PRIMARY KEY (`c_custkey`))", schema)); dbmsConn.execute( String.format( "CREATE TABLE IF NOT EXISTS `%s`.`orders` ( " + " `o_orderkey` INT , " + " `o_custkey` INT , " + " `o_orderstatus` CHAR(1) , " + " `o_totalprice` DECIMAL(15,2) , " + " `o_orderdate` DATE , " + " `o_orderpriority` CHAR(15) , " + " `o_clerk` CHAR(15) , " + " `o_shippriority` INT , " + " `o_comment` VARCHAR(79), " + " `o_dummy` varchar(10), " + " PRIMARY KEY (`o_orderkey`))", schema)); dbmsConn.execute( String.format( "CREATE TABLE IF NOT EXISTS `%s`.`lineitem` ( `l_orderkey` INT , " + " `l_partkey` INT , " + " `l_suppkey` INT , " + " `l_linenumber` INT , " + " `l_quantity` DECIMAL(15,2) , " + " `l_extendedprice` DECIMAL(15,2) , " + " `l_discount` DECIMAL(15,2) , " + " `l_tax` DECIMAL(15,2) , " + " `l_returnflag` CHAR(1) , " + " `l_linestatus` CHAR(1) , " + " `l_shipdate` DATE , " + " `l_commitdate` DATE , " + " `l_receiptdate` DATE , " + " `l_shipinstruct` CHAR(25) , " + " `l_shipmode` CHAR(10) , " + " `l_comment` VARCHAR(44), " + " `l_dummy` varchar(10))", schema)); // Load data dbmsConn.execute( String.format( "LOAD DATA LOCAL INFILE 'src/test/resources/tpch_test_data/region/region.tbl' " + "INTO TABLE `%s`.`region` FIELDS TERMINATED BY '|'", schema)); dbmsConn.execute( String.format( "LOAD DATA LOCAL INFILE 'src/test/resources/tpch_test_data/nation/nation.tbl' " + "INTO TABLE `%s`.`nation` FIELDS TERMINATED BY '|'", schema)); dbmsConn.execute( String.format( "LOAD DATA LOCAL INFILE 'src/test/resources/tpch_test_data/supplier/supplier.tbl' " + "INTO TABLE `%s`.`supplier` FIELDS TERMINATED BY '|'", schema)); dbmsConn.execute( String.format( "LOAD DATA LOCAL INFILE 'src/test/resources/tpch_test_data/customer/customer.tbl' " + "INTO TABLE `%s`.`customer` FIELDS TERMINATED BY '|'", schema)); dbmsConn.execute( String.format( "LOAD DATA LOCAL INFILE 'src/test/resources/tpch_test_data/part/part.tbl' " + "INTO TABLE `%s`.`part` FIELDS TERMINATED BY '|'", schema)); dbmsConn.execute( String.format( "LOAD DATA LOCAL INFILE 'src/test/resources/tpch_test_data/partsupp/partsupp.tbl' " + "INTO TABLE `%s`.`partsupp` FIELDS TERMINATED BY '|'", schema)); dbmsConn.execute( String.format( "LOAD DATA LOCAL INFILE 'src/test/resources/tpch_test_data/lineitem/lineitem.tbl' " + "INTO TABLE `%s`.`lineitem` FIELDS TERMINATED BY '|'", schema)); dbmsConn.execute( String.format( "LOAD DATA LOCAL INFILE 'src/test/resources/tpch_test_data/orders/orders.tbl' " + "INTO TABLE `%s`.`orders` FIELDS TERMINATED BY '|'", schema)); return conn; } public static Connection setupPostgresql( String connectionString, String user, String password, String schema) throws VerdictDBDbmsException, SQLException, IOException { Connection conn = DriverManager.getConnection(connectionString, user, password); DbmsConnection dbmsConn = new JdbcConnection(conn, new PostgresqlSyntax()); dbmsConn.execute(String.format("DROP SCHEMA IF EXISTS \"%s\" CASCADE", schema)); dbmsConn.execute(String.format("CREATE SCHEMA IF NOT EXISTS \"%s\"", schema)); // Create tables dbmsConn.execute( String.format( "CREATE TABLE IF NOT EXISTS \"%s\".\"nation\" (" + " \"n_nationkey\" INT, " + " \"n_name\" CHAR(25), " + " \"n_regionkey\" INT, " + " \"n_comment\" VARCHAR(152), " + " \"n_dummy\" VARCHAR(10), " + " PRIMARY KEY (\"n_nationkey\"))", schema)); dbmsConn.execute( String.format( "CREATE TABLE IF NOT EXISTS \"%s\".\"region\" (" + " \"r_regionkey\" INT, " + " \"r_name\" CHAR(25), " + " \"r_comment\" VARCHAR(152), " + " \"r_dummy\" VARCHAR(10), " + " PRIMARY KEY (\"r_regionkey\"))", schema)); dbmsConn.execute( String.format( "CREATE TABLE IF NOT EXISTS \"%s\".\"part\" ( \"p_partkey\" INT, " + " \"p_name\" VARCHAR(55), " + " \"p_mfgr\" CHAR(25), " + " \"p_brand\" CHAR(10), " + " \"p_type\" VARCHAR(25), " + " \"p_size\" INT, " + " \"p_container\" CHAR(10), " + " \"p_retailprice\" DECIMAL(15,2) , " + " \"p_comment\" VARCHAR(23) , " + " \"p_dummy\" VARCHAR(10), " + " PRIMARY KEY (\"p_partkey\"))", schema)); dbmsConn.execute( String.format( "CREATE TABLE IF NOT EXISTS \"%s\".\"supplier\" ( " + " \"s_suppkey\" INT , " + " \"s_name\" CHAR(25) , " + " \"s_address\" VARCHAR(40) , " + " \"s_nationkey\" INT , " + " \"s_phone\" CHAR(15) , " + " \"s_acctbal\" DECIMAL(15,2) , " + " \"s_comment\" VARCHAR(101), " + " \"s_dummy\" varchar(10), " + " PRIMARY KEY (\"s_suppkey\"))", schema)); dbmsConn.execute( String.format( "CREATE TABLE IF NOT EXISTS \"%s\".\"partsupp\" ( " + " \"ps_partkey\" INT , " + " \"ps_suppkey\" INT , " + " \"ps_availqty\" INT , " + " \"ps_supplycost\" DECIMAL(15,2) , " + " \"ps_comment\" VARCHAR(199), " + " \"ps_dummy\" VARCHAR(10), " + " PRIMARY KEY (\"ps_partkey\", \"ps_suppkey\"))", schema)); dbmsConn.execute( String.format( "CREATE TABLE IF NOT EXISTS \"%s\".\"customer\" (" + " \"c_custkey\" INT , " + " \"c_name\" VARCHAR(25) , " + " \"c_address\" VARCHAR(40) , " + " \"c_nationkey\" INT , " + " \"c_phone\" CHAR(15) , " + " \"c_acctbal\" DECIMAL(15,2) , " + " \"c_mktsegment\" CHAR(10) , " + " \"c_comment\" VARCHAR(117), " + " \"c_dummy\" VARCHAR(10), " + " PRIMARY KEY (\"c_custkey\"))", schema)); dbmsConn.execute( String.format( "CREATE TABLE IF NOT EXISTS \"%s\".\"orders\" ( " + " \"o_orderkey\" INT , " + " \"o_custkey\" INT , " + " \"o_orderstatus\" CHAR(1) , " + " \"o_totalprice\" DECIMAL(15,2) , " + " \"o_orderdate\" DATE , " + " \"o_orderpriority\" CHAR(15) , " + " \"o_clerk\" CHAR(15) , " + " \"o_shippriority\" INT , " + " \"o_comment\" VARCHAR(79), " + " \"o_dummy\" varchar(10), " + " PRIMARY KEY (\"o_orderkey\"))", schema)); dbmsConn.execute( String.format( "CREATE TABLE IF NOT EXISTS \"%s\".\"lineitem\" (" + " \"l_orderkey\" INT , " + " \"l_partkey\" INT , " + " \"l_suppkey\" INT , " + " \"l_linenumber\" INT , " + " \"l_quantity\" DECIMAL(15,2) , " + " \"l_extendedprice\" DECIMAL(15,2) , " + " \"l_discount\" DECIMAL(15,2) , " + " \"l_tax\" DECIMAL(15,2) , " + " \"l_returnflag\" CHAR(1) , " + " \"l_linestatus\" CHAR(1) , " + " \"l_shipdate\" DATE , " + " \"l_commitdate\" DATE , " + " \"l_receiptdate\" DATE , " + " \"l_shipinstruct\" CHAR(25) , " + " \"l_shipmode\" CHAR(10) , " + " \"l_comment\" VARCHAR(44), " + " \"l_dummy\" varchar(10))", schema)); // Load data CopyManager copy = new CopyManager((BaseConnection) conn); File region = new File("src/test/resources/tpch_test_data/region/region.tbl"); InputStream in = new FileInputStream(region); copy.copyIn(String.format("COPY \"%s\".\"region\" FROM STDOUT DELIMITER '|'", schema), in); File nation = new File("src/test/resources/tpch_test_data/nation/nation.tbl"); in = new FileInputStream(nation); copy.copyIn(String.format("COPY \"%s\".\"nation\" FROM STDOUT DELIMITER '|'", schema), in); File supplier = new File("src/test/resources/tpch_test_data/supplier/supplier.tbl"); in = new FileInputStream(supplier); copy.copyIn(String.format("COPY \"%s\".\"supplier\" FROM STDOUT DELIMITER '|'", schema), in); File customer = new File("src/test/resources/tpch_test_data/customer/customer.tbl"); in = new FileInputStream(customer); copy.copyIn(String.format("COPY \"%s\".\"customer\" FROM STDOUT DELIMITER '|'", schema), in); File part = new File("src/test/resources/tpch_test_data/part/part.tbl"); in = new FileInputStream(part); copy.copyIn(String.format("COPY \"%s\".\"part\" FROM STDOUT DELIMITER '|'", schema), in); File partsupp = new File("src/test/resources/tpch_test_data/partsupp/partsupp.tbl"); in = new FileInputStream(partsupp); copy.copyIn(String.format("COPY \"%s\".\"partsupp\" FROM STDOUT DELIMITER '|'", schema), in); File lineitem = new File("src/test/resources/tpch_test_data/lineitem/lineitem.tbl"); in = new FileInputStream(lineitem); copy.copyIn(String.format("COPY \"%s\".\"lineitem\" FROM STDOUT DELIMITER '|'", schema), in); File orders = new File("src/test/resources/tpch_test_data/orders/orders.tbl"); in = new FileInputStream(orders); copy.copyIn(String.format("COPY \"%s\".\"orders\" FROM STDOUT DELIMITER '|'", schema), in); return conn; } public static Connection setupPostgresqlForDataTypeTest( String connectionString, String user, String password, String schema, String table) throws SQLException, VerdictDBDbmsException { Connection conn = DriverManager.getConnection(connectionString, user, password); DbmsConnection dbmsConn = new JdbcConnection(conn, new PostgresqlSyntax()); dbmsConn.execute(String.format("DROP SCHEMA IF EXISTS \"%s\" CASCADE", schema)); dbmsConn.execute(String.format("CREATE SCHEMA IF NOT EXISTS \"%s\"", schema)); dbmsConn.execute( String.format( "CREATE TABLE \"%s\".\"%s\" (" + "bigintCol bigint, " + "bigserialCol bigserial, " + "bitCol bit(1), " + "varbitCol varbit(4), " + "booleanCol boolean, " + "boxCol box, " + "byteaCol bytea, " + "charCol char(4), " + "varcharCol varchar(4), " + "cidrCol cidr, " + "circleCol circle, " + "dateCol date, " + "float8Col float8, " + "inetCol inet, " + "integerCol integer, " + "jsonCol json, " + "lineCol line, " + "lsegCol lseg, " + "macaddrCol macaddr, " + "macaddr8Col macaddr8, " + "moneyCol money, " + "numericCol numeric(4,2), " + "pathCol path, " + "pointCol point, " + "polygonCol polygon, " + "realCol real, " + "smallintCol smallint, " + "smallserialCol smallserial, " + "serialCol serial, " + "textCol text, " + "timeCol time, " + "timestampCol timestamp, " + "uuidCol uuid, " + "xmlCol xml," + "bitvaryCol bit varying(1)," + "int8Col int8," + "boolCol bool," + "characterCol character(4)," + "charactervCol character varying(4)," + "intCol int," + "int4Col int4," + "doublepCol double precision," + "decimalCol decimal(4,2)," + "float4Col float," + "int2Col int2," + "serial2Col serial2," + "serial4Col serial4," + "timetzCol timetz," + "timestamptzCol timestamptz," + "serial8Col serial8)", schema, table)); dbmsConn.execute( String.format( "INSERT INTO \"%s\".\"%s\" VALUES ( " + "1, 1, '1', '1011', true, '((1,1), (2,2))', '1', '1234', '1234', " + "'10', '((1,1),2)', '2018-12-31', 1.0, '88.99.0.0/16', 1, " + "'{\"2\":1}', '{1,2,3}', '((1,1),(2,2))', " + "'08002b:010203', '08002b:0102030405', '12.34', 1.0, '((1,1))', '(1,1)', " + "'((1,1))', 1.0, 1, 1, 1, '1110', '2018-12-31 00:00:01', '2018-12-31 00:00:01', " + "'a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11'," + "'<foo>bar</foo>'," + " '1', 1, true, '1234', '1234', 1, 1, 1.0, 1.0, 1.0" + ", 1, 1, 1, '2018-12-31 00:00:01', '2018-12-31 00:00:01', 1)", schema, table)); dbmsConn.execute( String.format( "INSERT INTO \"%s\".\"%s\" VALUES ( " + "NULL, 1, NULL, NULL, NULL, NULL, NULL, NULL, " + "NULL, NULL, NULL, NULL, NULL, " + "NULL, NULL, NULL, NULL, " + "NULL, NULL, NULL, NULL, NULL, NULL, " + "NULL, NULL, NULL, NULL, 1, 1, NULL, NULL, NULL, NULL," + "NULL," + "NULL, NULL, NULL, NULL, NULL, NULL, " + "NULL, NULL, NULL, NULL, NULL, 1, 1, NULL, NULL, 1)", schema, table)); return conn; } public static Connection setupRedshiftForDataTypeTest( String connectionString, String user, String password, String schema, String table) throws SQLException, VerdictDBDbmsException { Connection conn = DriverManager.getConnection(connectionString, user, password); DbmsConnection dbmsConn = new JdbcConnection(conn, new RedshiftSyntax()); dbmsConn.execute(String.format("DROP SCHEMA IF EXISTS \"%s\" CASCADE", schema)); dbmsConn.execute(String.format("CREATE SCHEMA IF NOT EXISTS \"%s\"", schema)); // These types are gathered from (Jul 2018): // https://docs.aws.amazon.com/redshift/latest/dg/c_Supported_data_types.html dbmsConn.execute( String.format( "CREATE TABLE \"%s\".\"%s\" (" + "smallintCol smallint, " + "int2Col int2, " + "integerCol integer, " + "intCol int, " + "int4Col int4, " + "bigintCol bigint, " + "decimalCol decimal(5,2), " + "numericCol numeric(5,2), " + "realCol real, " + "float4Col float4, " + "doublePrecCol double precision, " + "float8Col float8, " + "floatCol float, " + "booleanCol boolean, " + "boolCol bool, " + "charCol char(10), " + "characterCol character(10), " + "ncharCol nchar(10), " + "bpcharCol bpchar, " + "varcharCol varchar(10), " + "charactervarCol character varying(10), " + "nvarcharCol nvarchar(10), " + "textCol text, " + "dateCol date," + "timestampCol timestamp, " + "timestampwtzCol timestamp without time zone, " + "timestamptzCol timestamptz, " + "timestamptzCol2 timestamp with time zone)", schema, table)); List<String> insertDataList = new ArrayList<>(); insertDataList.add("1"); // smallint insertDataList.add("2"); // int2 insertDataList.add("3"); // integer insertDataList.add("4"); // int insertDataList.add("5"); // int4 insertDataList.add("6"); // bigint insertDataList.add("123.45"); // decimal insertDataList.add("-123.45"); // numeric insertDataList.add("1000.001"); // real insertDataList.add("1000.001"); // float4 insertDataList.add("1000.001"); // double precision insertDataList.add("1000.001"); // float8 insertDataList.add("1000.001"); // float insertDataList.add("true"); // boolean insertDataList.add("false"); // bool insertDataList.add("'john'"); // char insertDataList.add("'kim'"); // character insertDataList.add("'michael'"); // nchar insertDataList.add("'jackson'"); // bpchar insertDataList.add("'yo'"); // varchar insertDataList.add("'hey'"); // character varying insertDataList.add("'sup'"); // nvarchar insertDataList.add("'sometext'"); // text insertDataList.add("'2018-12-31'"); // date insertDataList.add("'2018-12-31 11:22:33'"); // timestamp insertDataList.add("'2018-12-31 11:22:33'"); // timestamp without time zone insertDataList.add("'2018-12-31 11:22:33'"); // timestamptz insertDataList.add("'2018-12-31 11:22:33'"); // timestamp with time zone dbmsConn.execute( String.format( "INSERT INTO \"%s\".\"%s\" VALUES (%s)", schema, table, Joiner.on(",").join(insertDataList))); List<String> insertNullDataList = new ArrayList<>(); insertNullDataList.add("NULL"); // smallint insertNullDataList.add("NULL"); // int2 insertNullDataList.add("NULL"); // integer insertNullDataList.add("NULL"); // int insertNullDataList.add("NULL"); // int4 insertNullDataList.add("NULL"); // bigint insertNullDataList.add("NULL"); // decimal insertNullDataList.add("NULL"); // numeric insertNullDataList.add("NULL"); // real insertNullDataList.add("NULL"); // float4 insertNullDataList.add("NULL"); // double precision insertNullDataList.add("NULL"); // float8 insertNullDataList.add("NULL"); // float insertNullDataList.add("NULL"); // boolean insertNullDataList.add("NULL"); // bool insertNullDataList.add("NULL"); // char insertNullDataList.add("NULL"); // character insertNullDataList.add("NULL"); // nchar insertNullDataList.add("NULL"); // bpchar insertNullDataList.add("NULL"); // varchar insertNullDataList.add("NULL"); // character varying insertNullDataList.add("NULL"); // nvarchar insertNullDataList.add("NULL"); // text insertNullDataList.add("NULL"); // date insertNullDataList.add("NULL"); // timestamp insertNullDataList.add("NULL"); // timestamp without time zone insertNullDataList.add("NULL"); // timestamptz insertNullDataList.add("NULL"); // timestamp with time zone dbmsConn.execute( String.format( "INSERT INTO \"%s\".\"%s\" VALUES (%s)", schema, table, Joiner.on(",").join(insertNullDataList))); return conn; } public static Connection setupImpalaForDataTypeTest( String connectionString, String user, String password, String schema, String table) throws SQLException, VerdictDBDbmsException { Connection conn = DriverManager.getConnection(connectionString, user, password); DbmsConnection dbmsConn = new JdbcConnection(conn, new ImpalaSyntax()); dbmsConn.execute(String.format("CREATE SCHEMA IF NOT EXISTS `%s`", schema)); dbmsConn.execute(String.format("DROP TABLE IF EXISTS `%s`.`%s`", schema, table)); // These types are gathered from (Jul 2018): // https://www.cloudera.com/documentation/enterprise/latest/topics/impala_datatypes.html#datatypes dbmsConn.execute( String.format( "CREATE TABLE `%s`.`%s` (" + "bigintCol bigint, " + "booleanCol boolean, " + "charCol char(10), " + "decimalCol decimal(5,2), " + "doubleCol double, " + "floatCol float, " + "realCol real, " + "smallintCol smallint, " + "stringCol string, " + "timestampCol timestamp, " + "tinyintCol tinyint, " + "varcharCol varchar(10))", schema, table)); List<String> insertDataList = new ArrayList<>(); insertDataList.add("6"); // bigint insertDataList.add("true"); // boolean insertDataList.add("cast('john' as char(10))"); // char insertDataList.add("123.45"); // decimal insertDataList.add("1000.001"); // double insertDataList.add("1000.001"); // float insertDataList.add("1000.001"); // real insertDataList.add("1"); // smallint insertDataList.add("'michael'"); // string insertDataList.add("now()"); // timestamp insertDataList.add("2"); // tinyint insertDataList.add("cast('jackson' as varchar(10))"); // varchar dbmsConn.execute( String.format( "INSERT INTO `%s`.`%s` VALUES (%s)", schema, table, Joiner.on(",").join(insertDataList))); List<String> insertNullDataList = new ArrayList<>(); insertNullDataList.add("NULL"); // bigint insertNullDataList.add("NULL"); // boolean insertNullDataList.add("NULL"); // char insertNullDataList.add("NULL"); // decimal insertNullDataList.add("NULL"); // double insertNullDataList.add("NULL"); // float insertNullDataList.add("NULL"); // real insertNullDataList.add("NULL"); // smallint insertNullDataList.add("NULL"); // string insertNullDataList.add("NULL"); // timestamp insertNullDataList.add("NULL"); // tinyint insertNullDataList.add("NULL"); // varchar dbmsConn.execute( String.format( "INSERT INTO `%s`.`%s` VALUES (%s)", schema, table, Joiner.on(",").join(insertNullDataList))); return conn; } }
[ "\"BUILD_ENV\"", "\"VERDICTDB_TEST_IMPALA_HOST\"", "\"BUILD_ENV\"", "\"VERDICTDB_TEST_REDSHIFT_ENDPOINT\"", "\"VERDICTDB_TEST_REDSHIFT_USER\"", "\"VERDICTDB_TEST_REDSHIFT_PASSWORD\"" ]
[]
[ "BUILD_ENV", "VERDICTDB_TEST_IMPALA_HOST", "VERDICTDB_TEST_REDSHIFT_ENDPOINT", "VERDICTDB_TEST_REDSHIFT_USER", "VERDICTDB_TEST_REDSHIFT_PASSWORD" ]
[]
["BUILD_ENV", "VERDICTDB_TEST_IMPALA_HOST", "VERDICTDB_TEST_REDSHIFT_ENDPOINT", "VERDICTDB_TEST_REDSHIFT_USER", "VERDICTDB_TEST_REDSHIFT_PASSWORD"]
java
5
0
Network_Search/lib/model/model_search.py
"""RAAR3DNet architecture. The model is introduced in: Regional Attention with Architecture-Rebuilt 3D Network for RGB-D Gesture Recognition Benjia Zhou, Yunan Li, Jun Wan https://arxiv.org/pdf/2102.05348.pdf """ import torch import torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable import numpy as np import os import sys from collections import OrderedDict import pdb from .Operations import * from .Genotypes import PRIMITIVES_INCEPTION from .Genotypes import Genotype PRIMITIVES = PRIMITIVES_INCEPTION class MaxPool3dSamePadding(nn.MaxPool3d): def compute_pad(self, dim, s): if s % self.stride[dim] == 0: return max(self.kernel_size[dim] - self.stride[dim], 0) else: return max(self.kernel_size[dim] - (s % self.stride[dim]), 0) def forward(self, x): # compute 'same' padding (batch, channel, t, h, w) = x.size() # print t,h,w out_t = np.ceil(float(t) / float(self.stride[0])) out_h = np.ceil(float(h) / float(self.stride[1])) out_w = np.ceil(float(w) / float(self.stride[2])) # print out_t, out_h, out_w pad_t = self.compute_pad(0, t) pad_h = self.compute_pad(1, h) pad_w = self.compute_pad(2, w) # print pad_t, pad_h, pad_w pad_t_f = pad_t // 2 pad_t_b = pad_t - pad_t_f pad_h_f = pad_h // 2 pad_h_b = pad_h - pad_h_f pad_w_f = pad_w // 2 pad_w_b = pad_w - pad_w_f pad = (pad_w_f, pad_w_b, pad_h_f, pad_h_b, pad_t_f, pad_t_b) # print x.size() # print pad x = F.pad(x, pad) return super(MaxPool3dSamePadding, self).forward(x) class Unit3D(nn.Module): def __init__(self, in_channels, output_channels, kernel_shape=(1, 1, 1), stride=(1, 1, 1), padding=0, activation_fn=F.relu, use_batch_norm=True, use_bias=False, name='unit_3d'): """Initializes Unit3D module.""" super(Unit3D, self).__init__() self._output_channels = output_channels self._kernel_shape = kernel_shape self._stride = stride self._use_batch_norm = use_batch_norm self._activation_fn = activation_fn self._use_bias = use_bias self.name = name self.padding = padding self.conv3d = nn.Conv3d(in_channels=in_channels, out_channels=self._output_channels, kernel_size=self._kernel_shape, stride=self._stride, padding=0, # we always want padding to be 0 here. We will dynamically pad based on input size in forward function bias=self._use_bias) if self._use_batch_norm: self.bn = nn.BatchNorm3d(self._output_channels, eps=0.001, momentum=0.01) def compute_pad(self, dim, s): if s % self._stride[dim] == 0: return max(self._kernel_shape[dim] - self._stride[dim], 0) else: return max(self._kernel_shape[dim] - (s % self._stride[dim]), 0) def forward(self, x): # compute 'same' padding (batch, channel, t, h, w) = x.size() # print t,h,w out_t = np.ceil(float(t) / float(self._stride[0])) out_h = np.ceil(float(h) / float(self._stride[1])) out_w = np.ceil(float(w) / float(self._stride[2])) # print out_t, out_h, out_w pad_t = self.compute_pad(0, t) pad_h = self.compute_pad(1, h) pad_w = self.compute_pad(2, w) # print pad_t, pad_h, pad_w pad_t_f = pad_t // 2 pad_t_b = pad_t - pad_t_f pad_h_f = pad_h // 2 pad_h_b = pad_h - pad_h_f pad_w_f = pad_w // 2 pad_w_b = pad_w - pad_w_f pad = (pad_w_f, pad_w_b, pad_h_f, pad_h_b, pad_t_f, pad_t_b) # print x.size() # print pad x = F.pad(x, pad) # print x.size() x = self.conv3d(x) if self._use_batch_norm: x = self.bn(x) if self._activation_fn is not None: x = self._activation_fn(x) return x class InceptionModule(nn.Module): def __init__(self, in_channels, out_channels, name): super(InceptionModule, self).__init__() self.b0 = Unit3D(in_channels=in_channels, output_channels=out_channels[0], kernel_shape=[1, 1, 1], padding=0, name=name + '/Branch_0/Conv3d_0a_1x1') self.b1a = Unit3D(in_channels=in_channels, output_channels=out_channels[1], kernel_shape=[1, 1, 1], padding=0, name=name + '/Branch_1/Conv3d_0a_1x1') self.b1b = Unit3D(in_channels=out_channels[1], output_channels=out_channels[2], kernel_shape=[3, 3, 3], name=name + '/Branch_1/Conv3d_0b_3x3') self.b2a = Unit3D(in_channels=in_channels, output_channels=out_channels[3], kernel_shape=[1, 1, 1], padding=0, name=name + '/Branch_2/Conv3d_0a_1x1') self.b2b = Unit3D(in_channels=out_channels[3], output_channels=out_channels[4], kernel_shape=[3, 3, 3], name=name + '/Branch_2/Conv3d_0b_3x3') self.b3a = MaxPool3dSamePadding(kernel_size=[3, 3, 3], stride=(1, 1, 1), padding=0) self.b3b = Unit3D(in_channels=in_channels, output_channels=out_channels[5], kernel_shape=[1, 1, 1], padding=0, name=name + '/Branch_3/Conv3d_0b_1x1') self.name = name def forward(self, x): b0 = self.b0(x) b1 = self.b1b(self.b1a(x)) b2 = self.b2b(self.b2a(x)) b3 = self.b3b(self.b3a(x)) return torch.cat([b0, b1, b2, b3], dim=1) def channel_shuffle(x, groups): batchsize, num_channels, length, height, width = x.data.size() channels_per_group = num_channels // groups # reshape # ([2, 32, 4, 28, 28])--> ([2, 4, 8, 4, 28, 28]) x = x.view(batchsize, groups, channels_per_group, length, height, width) # -->([2, 8, 4, 4, 28, 28]) x = torch.transpose(x, 1, 2).contiguous() # not change x dim # flatten x = x.view(batchsize, -1, length, height, width) return x class MixedOp(nn.Module): def __init__(self, C, stride): super(MixedOp, self).__init__() self._ops = nn.ModuleList() self._k = 2 for primitive in PRIMITIVES: op = OPS[primitive](C // self._k, stride, False) self._ops.append(op) def forward(self, x, weights): # channel proportion k=4 dim_2 = x.shape[1] xtemp = x[:, : dim_2 // self._k, :, :].cuda() # [2, 8, 4, 28, 28] xtemp2 = x[:, dim_2 // self._k:, :, :].cuda() # [2, 24, 4, 28, 28] temp1 = sum(w.cuda() * op(xtemp).cuda() for w, op in zip(weights, self._ops)) ans = torch.cat([temp1, xtemp2], dim=1) ans = channel_shuffle(ans.cuda(), self._k) # ans = torch.cat([ans[ : , dim_2//4:, :, :],ans[ : , : dim_2//4, :, :]],dim=1) # except channe shuffle, channel shift also works return ans class Cell(nn.Module): def __init__(self, steps, multiplier, C_prev_prev, C_prev, C, reduction_prev): super(Cell, self).__init__() # cell的输出为各层输出的拼接,维度很高,所以在输入加了两个预处理preprocess0和preprocess1。 if reduction_prev: self.preprocess0 = VaniConv3d(C_prev_prev, C, 1, [1, 2, 2], 0, affine=False) else: self.preprocess0 = VaniConv3d(C_prev_prev, C, 1, 1, 0, affine=False) self.preprocess1 = VaniConv3d(C_prev, C, 1, 1, 0, affine=False) self._steps = steps self._multiplier = multiplier self._ops = nn.ModuleList() self._bns = nn.ModuleList() for i in range(self._steps): for j in range(2 + i): stride = 1 op = MixedOp(C, stride) self._ops.append(op) def forward(self, s0, s1, weights, weights2): s0 = self.preprocess0(s0) s1 = self.preprocess1(s1) states = [s0, s1] offset = 0 for i in range(self._steps): # weights[offset + j]=([0.1429, 0.1426, 0.1429, 0.1428, 0.1429, 0.1429, 0.1429], weights2[offset + j]=0.49999 s_temp = sum(weights2[offset + j].cuda() * self._ops[offset + j](h, weights[offset + j]).cuda() for j, h in enumerate(states)) offset += len(states) states.append(s_temp) return torch.cat(states[-self._multiplier:], dim=1) class InceptionI3d(nn.Module): """Inception-v1 I3D architecture. The model is introduced in: Quo Vadis, Action Recognition? A New Model and the Kinetics Dataset Joao Carreira, Andrew Zisserman https://arxiv.org/pdf/1705.07750v1.pdf. See also the Inception architecture, introduced in: Going deeper with convolutions Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed, Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, Andrew Rabinovich. http://arxiv.org/pdf/1409.4842v1.pdf. """ # Endpoints of the model in order. During construction, all the endpoints up # to a designated `final_endpoint` are returned in a dictionary as the # second return value. VALID_ENDPOINTS = ( 'Conv3d_1a_7x7', 'MaxPool3d_2a_3x3', 'Conv3d_2b_1x1', 'Conv3d_2c_3x3', 'MaxPool3d_3a_3x3', 'cell1', 'MaxPool3d_4a_3x3', 'cell2', 'MaxPool3d_5a_2x2', 'cell3', 'Logits', 'Predictions', ) def __init__(self, num_classes, criterion, local_rank, steps=4, multiplier=4, spatial_squeeze=True, final_endpoint='Logits', name='inception_i3d', in_channels=3, dropout_keep_prob=0.5): """Initializes I3D model instance. Args: num_classes: The number of outputs in the logit layer (default 400, which matches the Kinetics dataset). spatial_squeeze: Whether to squeeze the spatial dimensions for the logits before returning (default True). final_endpoint: The model contains many possible endpoints. `final_endpoint` specifies the last endpoint for the model to be built up to. In addition to the output at `final_endpoint`, all the outputs at endpoints up to `final_endpoint` will also be returned, in a dictionary. `final_endpoint` must be one of InceptionI3d.VALID_ENDPOINTS (default 'Logits'). name: A string (optional). The name of this module. Raises: ValueError: if `final_endpoint` is not recognized. """ if final_endpoint not in self.VALID_ENDPOINTS: raise ValueError('Unknown final endpoint %s' % final_endpoint) super(InceptionI3d, self).__init__() self._num_classes = num_classes self._spatial_squeeze = spatial_squeeze self._final_endpoint = final_endpoint self.logits = None self._criterion = criterion self._steps = steps self._multiplier = multiplier if self._final_endpoint not in self.VALID_ENDPOINTS: raise ValueError('Unknown final endpoint %s' % self._final_endpoint) self.MaxpoolSpa = nn.MaxPool3d(kernel_size=[1, 3, 3], padding=[0, 1, 1], stride=[1, 2, 2]) self.end_points = {} end_point = 'Conv3d_1a_7x7' self.end_points[end_point] = Unit3D(in_channels=in_channels, output_channels=64, kernel_shape=[7, 7, 7], stride=(2, 2, 2), padding=(3, 3, 3), name=name + end_point) if self._final_endpoint == end_point: return end_point = 'MaxPool3d_2a_3x3' self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[1, 3, 3], stride=(1, 2, 2), padding=0) if self._final_endpoint == end_point: return end_point = 'Conv3d_2b_1x1' self.end_points[end_point] = Unit3D(in_channels=64, output_channels=64, kernel_shape=[1, 1, 1], padding=0, name=name + end_point) if self._final_endpoint == end_point: return end_point = 'Conv3d_2c_3x3' self.end_points[end_point] = Unit3D(in_channels=64, output_channels=192, kernel_shape=[3, 3, 3], padding=1, name=name + end_point) if self._final_endpoint == end_point: return end_point = 'MaxPool3d_3a_3x3' self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[1, 3, 3], stride=(1, 2, 2), padding=0) if self._final_endpoint == end_point: return end_point = 'cell1' self.end_points[end_point] = nn.ModuleList() C_curr = 64 C_prev_prev, C_prev, C_curr = C_curr * 3, C_curr * 3, C_curr reduction_prev = False for i in range(2): cell = Cell(steps, multiplier, C_prev_prev, C_prev, C_curr, reduction_prev) self.end_points[end_point] += [cell] C_prev_prev, C_prev = C_prev, multiplier * C_curr if self._final_endpoint == end_point: return end_point = 'MaxPool3d_4a_3x3' self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[3, 3, 3], stride=(2, 2, 2), padding=0) if self._final_endpoint == end_point: return end_point = 'cell2' self.end_points[end_point] = nn.ModuleList() C_prev_prev, C_prev, C_curr = C_prev, C_prev, C_curr for i in range(5): if i == 2: C_curr *= 2 reduction_prev = False else: reduction_prev = False cell = Cell(steps, multiplier, C_prev_prev, C_prev, C_curr, reduction_prev) self.end_points[end_point] += [cell] C_prev_prev, C_prev = C_prev, multiplier * C_curr end_point = 'MaxPool3d_5a_2x2' self.end_points[end_point] = MaxPool3dSamePadding(kernel_size=[2, 2, 2], stride=(2, 2, 2), padding=0) if self._final_endpoint == end_point: return end_point = 'cell3' self.end_points[end_point] = nn.ModuleList() C_prev_prev, C_prev, C_curr = C_prev, C_prev, C_curr reduction_prev = False for i in range(2): if i == 1: C_curr *= 2 cell = Cell(steps, multiplier, C_prev_prev, C_prev, C_curr, reduction_prev) self.end_points[end_point] += [cell] C_prev_prev, C_prev = C_prev, multiplier * C_curr if self._final_endpoint == end_point: return end_point = 'Logits' # self.avg_pool = nn.AvgPool3d(kernel_size=[2, 7, 7], # stride=(1, 1, 1)) self.avg_pool = nn.AdaptiveAvgPool3d(1) self.dropout = nn.Dropout(dropout_keep_prob) self.logits = Unit3D(in_channels=384 + 384 + 128 + 128, output_channels=self._num_classes, kernel_shape=[1, 1, 1], padding=0, activation_fn=None, use_batch_norm=False, use_bias=True, name='logits') self.build() self._initialize_alphas(local_rank) def replace_logits(self, num_classes): self._num_classes = num_classes self.logits = Unit3D(in_channels=384 + 384 + 128 + 128, output_channels=self._num_classes, kernel_shape=[1, 1, 1], padding=0, activation_fn=None, use_batch_norm=False, use_bias=True, name='logits') def build(self): for k in self.end_points.keys(): self.add_module(k, self.end_points[k]) def forward(self, x): for end_point in self.VALID_ENDPOINTS: if end_point in self.end_points: if end_point == 'cell1': s0 = s1 = x for ii, cell in enumerate(self._modules[end_point]): weights = F.softmax(self.alphas_normal1, dim=-1) # 14x7(edge x operations) n = 3 start = 2 weights2 = F.softmax(self.betas_normal1[0:2], dim=-1) # self.betas_normal16: 14x1 for i in range(self._steps - 1): end = start + n tw2 = F.softmax(self.betas_normal1[start:end], dim=-1) # 2-5, 5-9, 9-14 start = end n += 1 weights2 = torch.cat([weights2, tw2], dim=0) s0, s1 = s1, cell(s0, s1, weights, weights2) x = s1 elif end_point == 'cell2': s0 = s1 = x for ii, cell in enumerate(self._modules[end_point]): weights = F.softmax(self.alphas_normal2, dim=-1) # 14x7(edge x operations) n = 3 start = 2 weights2 = F.softmax(self.betas_normal2[0:2], dim=-1) # self.betas_normal16: 14x1 for i in range(self._steps - 1): end = start + n tw2 = F.softmax(self.betas_normal2[start:end], dim=-1) # 2-5, 5-9, 9-14 start = end n += 1 weights2 = torch.cat([weights2, tw2], dim=0) s0, s1 = s1, cell(s0, s1, weights, weights2) x = s1 elif end_point == 'cell3': s0 = s1 = x for ii, cell in enumerate(self._modules[end_point]): weights = F.softmax(self.alphas_normal3, dim=-1) # 14x7(edge x operations) n = 3 start = 2 weights2 = F.softmax(self.betas_normal3[0:2], dim=-1) # self.betas_normal16: 14x1 for i in range(self._steps - 1): end = start + n tw3 = F.softmax(self.betas_normal3[start:end], dim=-1) # 2-5, 5-9, 9-14 start = end n += 1 weights2 = torch.cat([weights2, tw3], dim=0) s0, s1 = s1, cell(s0, s1, weights, weights2) x = s1 else: x = self._modules[end_point](x) # use _modules to work with dataparallel x = self.logits(self.dropout(self.avg_pool(x))) if self._spatial_squeeze: logits = x.squeeze(3).squeeze(3) # logits is batch X time X classes, which is what we want to work with return logits.squeeze() def _loss(self, inputs, target): logits = self(inputs) return self._criterion(logits, target) def _initialize_alphas(self, local_rank): k = sum(1 for i in range(self._steps) for n in range(2 + i)) num_ops = len(PRIMITIVES) self.alphas_normal1 = Variable(1e-3 * torch.randn(k, num_ops).cuda(local_rank), requires_grad=True) # net normal self.betas_normal1 = Variable(1e-3 * torch.randn(k).cuda(local_rank), requires_grad=True) # edge normal self.alphas_normal2 = Variable(1e-3 * torch.randn(k, num_ops).cuda(local_rank), requires_grad=True) # net normal self.betas_normal2 = Variable(1e-3 * torch.randn(k).cuda(local_rank), requires_grad=True) # edge normal self.alphas_normal3 = Variable(1e-3 * torch.randn(k, num_ops).cuda(local_rank), requires_grad=True) # net normal self.betas_normal3 = Variable(1e-3 * torch.randn(k).cuda(local_rank), requires_grad=True) # edge normal self._arch_parameters = [ self.alphas_normal1, self.betas_normal1, self.alphas_normal2, self.betas_normal2, self.alphas_normal3, self.betas_normal3 ] def resume_arch_parameters(self, parames, local_rank): self.alphas_normal1 = Variable(parames[0].cuda(local_rank), requires_grad=True) # net normal self.betas_normal1 = Variable(parames[1].cuda(local_rank), requires_grad=True) # edge normal self.alphas_normal2 = Variable(parames[2].cuda(local_rank), requires_grad=True) # net normal self.betas_normal2 = Variable(parames[3].cuda(local_rank), requires_grad=True) # edge normal self.alphas_normal3 = Variable(parames[4].cuda(local_rank), requires_grad=True) # net normal self.betas_normal3 = Variable(parames[5].cuda(local_rank), requires_grad=True) # edge normal def arch_parameters(self): return self._arch_parameters def extract_features(self, x): for end_point in self.VALID_ENDPOINTS: if end_point in self.end_points: x = self._modules[end_point](x) return self.avg_pool(x) def genotype(self): def _parse(weights, weights2): gene = [] n = 2 start = 0 # 对于每层,由归一化后的参数乘积排序,取最大的两个非空操作为边。每条边再确定最佳操作。 for i in range(self._steps): end = start + n # W: [[0.14320895 0.14271285 0.14264019 0.14287315 0.14279652 0.14292398, 0.14284438] # [0.1430605 0.14276284 0.14267652 0.14286381 0.1430042 0.14296356, 0.14266858]] W = weights[start:end].copy() W2 = weights2[start:end].copy() # [0.4998488 0.5001512] for j in range(n): W[j, :] = W[j, :] * W2[j] # each operation * weights2 edges = sorted(range(i + 2), key=lambda x: -max(W[x][k] for k in range(len(W[x])) if k != PRIMITIVES.index('none')))[ :2] # edges=1, 0 # edges = sorted(range(i + 2), key=lambda x: -W2[x])[:2] for j in edges: k_best = None for k in range(len(W[j])): if k != PRIMITIVES.index('none'): if k_best is None or W[j][k] > W[j][k_best]: k_best = k gene.append((PRIMITIVES[k_best], j)) start = end n += 1 return gene # start = 2跳过两个输入节点, 循环处理3个中间节点, 统一将同层betas_normal16送 Softmax 归一化 n = 3 start = 2 weightsn1 = F.softmax(self.betas_normal1[0:2], dim=-1) weightsn2 = F.softmax(self.betas_normal2[0:2], dim=-1) weightsn3 = F.softmax(self.betas_normal3[0:2], dim=-1) for i in range(self._steps - 1): end = start + n # print(self.betas_reduce[start:end]) tn1 = F.softmax(self.betas_normal1[start:end], dim=-1) tn2 = F.softmax(self.betas_normal2[start:end], dim=-1) tn3 = F.softmax(self.betas_normal3[start:end], dim=-1) start = end n += 1 weightsn1 = torch.cat([weightsn1, tn1], dim=0) weightsn2 = torch.cat([weightsn2, tn2], dim=0) weightsn3 = torch.cat([weightsn3, tn3], dim=0) gene_normal1 = _parse(F.softmax(self.alphas_normal1, dim=-1).data.cpu().numpy(), weightsn1.data.cpu().numpy()) gene_normal2 = _parse(F.softmax(self.alphas_normal2, dim=-1).data.cpu().numpy(), weightsn2.data.cpu().numpy()) gene_normal3 = _parse(F.softmax(self.alphas_normal3, dim=-1).data.cpu().numpy(), weightsn3.data.cpu().numpy()) concat = range(2 + self._steps - self._multiplier, self._steps + 2) genotype = Genotype( normal1=gene_normal1, normal_concat1=concat, normal2=gene_normal2, normal_concat2=concat, normal3=gene_normal3, normal_concat3=concat, ) return genotype if __name__ == '__main__': import os, torch sample_size = 224 sample_duration = 32 num_classes = 40 resnet_shortcut = 'A' os.environ["CUDA_VISIBLE_DEVICES"] = '1' model = InceptionI3d(num_classes, torch.nn.CrossEntropyLoss(), in_channels=3).cuda() inputs = torch.randn(2, 3, 64, 112, 112).cuda() # shape (C x T x H x W) outputs = model(inputs) print(outputs.shape)
[]
[]
[ "CUDA_VISIBLE_DEVICES" ]
[]
["CUDA_VISIBLE_DEVICES"]
python
1
0
tests/conftest.py
import json import os import sys import pytest from flask.testing import FlaskClient from gae_flask_boilerplate import create_app sys.path.insert(1, os.environ.get('GAEPATH')) sys.path.insert(1, os.environ.get('SRCPATH')) import dev_appserver # noqa:E402 isort:skip dev_appserver.fix_sys_path() # isort:skip from google.appengine.tools.devappserver2 import application_configuration # noqa:E402 isort:skip cfg = application_configuration.ApplicationConfiguration([os.path.join(os.environ.get('SRCPATH'), 'app.yaml')]) # noqa:E501 os.environ['APPLICATION_ID'] = cfg.app_id # simulate same environment as devappserver2 os.environ['CURRENT_VERSION_ID'] = cfg.modules[0].version_id try: import appengine_config # noqa:F401 except ImportError: print('Note: unable to import appengine_config.') from google.appengine.ext import testbed # noqa:E402 isort:skip @pytest.fixture(autouse=True) def tb(): tb = testbed.Testbed() # Then activate the testbed, which will allow you to use # service stubs. tb.activate() # Next, declare which service stubs you want to use. tb.init_datastore_v3_stub() tb.init_memcache_stub() # tb.init_user_stub() yield tb # Don't forget to deactivate the testbed after the tests are # completed. If the testbed is not deactivated, the original # stubs will not be restored. tb.deactivate() class TestClient(FlaskClient): def get_json(self, url, status=200, **kwargs): response = self.get(url, **kwargs) assert response.status_code == status assert response.content_type == 'application/json' return json.loads(response.data.decode('utf8')) def post_json(self, url, data, status=200, **kwargs): response = self.post(url, data=json.dumps(data), headers={'content-type': 'application/json'}, **kwargs) assert response.status_code == status assert response.content_type == 'application/json' return json.loads(response.data.decode('utf8')) def get_specs(self, prefix='', status=200, **kwargs): """Get a Swagger specification for a RestPlus API""" return self.get_json('{0}/swagger.json'.format(prefix), status=status, **kwargs) @pytest.fixture def app(): app = create_app('testing') app.test_client_class = TestClient yield app # @pytest.fixture(autouse=True) # def _push_custom_request_context(request): # app = request.getfuncargvalue('app') # options = request.keywords.get('request_context') # # if options is None: # return # # ctx = app.test_request_context(*options.args, **options.kwargs) # ctx.push() # # def teardown(): # ctx.pop() # # request.addfinalizer(teardown)
[]
[]
[ "SRCPATH", "CURRENT_VERSION_ID", "GAEPATH", "APPLICATION_ID" ]
[]
["SRCPATH", "CURRENT_VERSION_ID", "GAEPATH", "APPLICATION_ID"]
python
4
0
pkg/jx/cmd/step_helm_release.go
package cmd import ( "bufio" "fmt" "io" "io/ioutil" "net/http" "os" "path/filepath" "github.com/jenkins-x/jx/pkg/helm" "github.com/jenkins-x/jx/pkg/jx/cmd/templates" "github.com/jenkins-x/jx/pkg/kube" "github.com/jenkins-x/jx/pkg/log" "github.com/jenkins-x/jx/pkg/util" "github.com/pkg/errors" "github.com/spf13/cobra" "gopkg.in/AlecAivazis/survey.v1/terminal" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) const ( defaultChartRepo = "http://jenkins-x-chartmuseum:8080" ) // StepHelmReleaseOptions contains the command line flags type StepHelmReleaseOptions struct { StepHelmOptions } var ( StepHelmReleaseLong = templates.LongDesc(` This pipeline step releases the Helm chart in the current directory `) StepHelmReleaseExample = templates.Examples(` jx step helm release `) ) func NewCmdStepHelmRelease(f Factory, in terminal.FileReader, out terminal.FileWriter, errOut io.Writer) *cobra.Command { options := StepHelmReleaseOptions{ StepHelmOptions: StepHelmOptions{ StepOptions: StepOptions{ CommonOptions: CommonOptions{ Factory: f, In: in, Out: out, Err: errOut, }, }, }, } cmd := &cobra.Command{ Use: "release", Short: "Releases the helm chart in the current directory", Aliases: []string{""}, Long: StepHelmReleaseLong, Example: StepHelmReleaseExample, Run: func(cmd *cobra.Command, args []string) { options.Cmd = cmd options.Args = args err := options.Run() CheckErr(err) }, } options.addStepHelmFlags(cmd) return cmd } func (o *StepHelmReleaseOptions) Run() error { dir := o.Dir _, err := o.helmInitDependencyBuild(dir, o.defaultReleaseCharts()) if err != nil { return errors.Wrapf(err, "failed to build dependencies for chart from directory '%s'", dir) } o.Helm().SetCWD(dir) err = o.Helm().PackageChart() if err != nil { return errors.Wrapf(err, "failed to package the chart from directory '%s'", dir) } chartFile := filepath.Join(dir, "Chart.yaml") name, version, err := helm.LoadChartNameAndVersion(chartFile) if err != nil { return errors.Wrap(err, "failed to load chart name and version") } if name == "" { return fmt.Errorf("Could not find name in chart %s", chartFile) } if version == "" { return fmt.Errorf("Could not find version in chart %s", chartFile) } tarball := fmt.Sprintf("%s-%s.tgz", name, version) exists, err := util.FileExists(tarball) if err != nil { return errors.Wrapf(err, "don't find the chart archive '%s'", tarball) } if !exists { return fmt.Errorf("Generated helm file %s does not exist!", tarball) } defer os.Remove(tarball) chartRepo := o.releaseChartMuseumUrl() userName := os.Getenv("CHARTMUSEUM_CREDS_USR") password := os.Getenv("CHARTMUSEUM_CREDS_PSW") if userName == "" || password == "" { // lets try load them from the secret directly client, ns, err := o.KubeClient() if err != nil { return errors.Wrap(err, "failed to create the kube client") } secret, err := client.CoreV1().Secrets(ns).Get(kube.SecretJenkinsChartMuseum, metav1.GetOptions{}) if err != nil { log.Warnf("Could not load Secret %s in namespace %s: %s\n", kube.SecretJenkinsChartMuseum, ns, err) } else { if secret != nil && secret.Data != nil { if userName == "" { userName = string(secret.Data["BASIC_AUTH_USER"]) } if password == "" { password = string(secret.Data["BASIC_AUTH_PASS"]) } } } } if userName == "" { return fmt.Errorf("No environment variable $CHARTMUSEUM_CREDS_USR defined") } if password == "" { return fmt.Errorf("No environment variable CHARTMUSEUM_CREDS_PSW defined") } // post the tarball to the chart repository client := http.Client{} u := util.UrlJoin(chartRepo, "/api/charts") file, err := os.Open(tarball) if err != nil { return errors.Wrapf(err, "failed to open the chart archive '%s'", tarball) } log.Infof("Uploading chart file %s to %s\n", util.ColorInfo(tarball), util.ColorInfo(u)) req, err := http.NewRequest(http.MethodPost, u, bufio.NewReader(file)) if err != nil { return errors.Wrapf(err, "failed to build the chart upload request for endpoint '%s'", u) } req.SetBasicAuth(userName, password) req.Header.Set("Content-Type", "application/gzip") res, err := client.Do(req) if err != nil { errRes, _ := ioutil.ReadAll(res.Body) return errors.Wrapf(err, "failed to execute the chart upload HTTP request, response: '%s'", string(errRes)) } body, err := ioutil.ReadAll(res.Body) if err != nil { return errors.Wrap(err, "failed to read the response body of chart upload request") } responseMessage := string(body) statusCode := res.StatusCode log.Infof("Received %d response: %s\n", statusCode, responseMessage) if statusCode >= 300 { return fmt.Errorf("Failed to post chart to %s due to response %d: %s", u, statusCode, responseMessage) } return nil }
[ "\"CHARTMUSEUM_CREDS_USR\"", "\"CHARTMUSEUM_CREDS_PSW\"" ]
[]
[ "CHARTMUSEUM_CREDS_USR", "CHARTMUSEUM_CREDS_PSW" ]
[]
["CHARTMUSEUM_CREDS_USR", "CHARTMUSEUM_CREDS_PSW"]
go
2
0
mypy/config_parser.py
import argparse import configparser import glob as fileglob from io import StringIO import os import re import sys import tomli from typing import (Any, Callable, Dict, List, Mapping, MutableMapping, Optional, Sequence, TextIO, Tuple, Union) from typing_extensions import Final, TypeAlias as _TypeAlias from mypy import defaults from mypy.options import Options, PER_MODULE_OPTIONS _CONFIG_VALUE_TYPES: _TypeAlias = Union[ str, bool, int, float, Dict[str, str], List[str], Tuple[int, int], ] _INI_PARSER_CALLABLE: _TypeAlias = Callable[[Any], _CONFIG_VALUE_TYPES] def parse_version(v: str) -> Tuple[int, int]: m = re.match(r'\A(\d)\.(\d+)\Z', v) if not m: raise argparse.ArgumentTypeError( "Invalid python version '{}' (expected format: 'x.y')".format(v)) major, minor = int(m.group(1)), int(m.group(2)) if major == 2: if minor != 7: raise argparse.ArgumentTypeError( "Python 2.{} is not supported (must be 2.7)".format(minor)) elif major == 3: if minor < defaults.PYTHON3_VERSION_MIN[1]: raise argparse.ArgumentTypeError( "Python 3.{0} is not supported (must be {1}.{2} or higher)".format(minor, *defaults.PYTHON3_VERSION_MIN)) else: raise argparse.ArgumentTypeError( "Python major version '{}' out of range (must be 2 or 3)".format(major)) return major, minor def try_split(v: Union[str, Sequence[str]], split_regex: str = '[,]') -> List[str]: """Split and trim a str or list of str into a list of str""" if isinstance(v, str): return [p.strip() for p in re.split(split_regex, v)] return [p.strip() for p in v] def expand_path(path: str) -> str: """Expand the user home directory and any environment variables contained within the provided path. """ return os.path.expandvars(os.path.expanduser(path)) def str_or_array_as_list(v: Union[str, Sequence[str]]) -> List[str]: if isinstance(v, str): return [v.strip()] if v.strip() else [] return [p.strip() for p in v if p.strip()] def split_and_match_files_list(paths: Sequence[str]) -> List[str]: """Take a list of files/directories (with support for globbing through the glob library). Where a path/glob matches no file, we still include the raw path in the resulting list. Returns a list of file paths """ expanded_paths = [] for path in paths: path = expand_path(path.strip()) globbed_files = fileglob.glob(path, recursive=True) if globbed_files: expanded_paths.extend(globbed_files) else: expanded_paths.append(path) return expanded_paths def split_and_match_files(paths: str) -> List[str]: """Take a string representing a list of files/directories (with support for globbing through the glob library). Where a path/glob matches no file, we still include the raw path in the resulting list. Returns a list of file paths """ return split_and_match_files_list(paths.split(',')) def check_follow_imports(choice: str) -> str: choices = ['normal', 'silent', 'skip', 'error'] if choice not in choices: raise argparse.ArgumentTypeError( "invalid choice '{}' (choose from {})".format( choice, ', '.join("'{}'".format(x) for x in choices))) return choice # For most options, the type of the default value set in options.py is # sufficient, and we don't have to do anything here. This table # exists to specify types for values initialized to None or container # types. ini_config_types: Final[Dict[str, _INI_PARSER_CALLABLE]] = { 'python_version': parse_version, 'strict_optional_whitelist': lambda s: s.split(), 'custom_typing_module': str, 'custom_typeshed_dir': expand_path, 'mypy_path': lambda s: [expand_path(p.strip()) for p in re.split('[,:]', s)], 'files': split_and_match_files, 'quickstart_file': expand_path, 'junit_xml': expand_path, # These two are for backwards compatibility 'silent_imports': bool, 'almost_silent': bool, 'follow_imports': check_follow_imports, 'no_site_packages': bool, 'plugins': lambda s: [p.strip() for p in s.split(',')], 'always_true': lambda s: [p.strip() for p in s.split(',')], 'always_false': lambda s: [p.strip() for p in s.split(',')], 'disable_error_code': lambda s: [p.strip() for p in s.split(',')], 'enable_error_code': lambda s: [p.strip() for p in s.split(',')], 'package_root': lambda s: [p.strip() for p in s.split(',')], 'cache_dir': expand_path, 'python_executable': expand_path, 'strict': bool, 'exclude': lambda s: [s.strip()], } # Reuse the ini_config_types and overwrite the diff toml_config_types: Final[Dict[str, _INI_PARSER_CALLABLE]] = ini_config_types.copy() toml_config_types.update({ 'python_version': lambda s: parse_version(str(s)), 'strict_optional_whitelist': try_split, 'mypy_path': lambda s: [expand_path(p) for p in try_split(s, '[,:]')], 'files': lambda s: split_and_match_files_list(try_split(s)), 'follow_imports': lambda s: check_follow_imports(str(s)), 'plugins': try_split, 'always_true': try_split, 'always_false': try_split, 'disable_error_code': try_split, 'enable_error_code': try_split, 'package_root': try_split, 'exclude': str_or_array_as_list, }) def parse_config_file(options: Options, set_strict_flags: Callable[[], None], filename: Optional[str], stdout: Optional[TextIO] = None, stderr: Optional[TextIO] = None) -> None: """Parse a config file into an Options object. Errors are written to stderr but are not fatal. If filename is None, fall back to default config files. """ stdout = stdout or sys.stdout stderr = stderr or sys.stderr if filename is not None: config_files: Tuple[str, ...] = (filename,) else: config_files = tuple(map(os.path.expanduser, defaults.CONFIG_FILES)) config_parser = configparser.RawConfigParser() for config_file in config_files: if not os.path.exists(config_file): continue try: if is_toml(config_file): with open(config_file, encoding="utf-8") as f: toml_data = tomli.loads(f.read()) # Filter down to just mypy relevant toml keys toml_data = toml_data.get('tool', {}) if 'mypy' not in toml_data: continue toml_data = {'mypy': toml_data['mypy']} parser: MutableMapping[str, Any] = destructure_overrides(toml_data) config_types = toml_config_types else: config_parser.read(config_file) parser = config_parser config_types = ini_config_types except (tomli.TOMLDecodeError, configparser.Error, ConfigTOMLValueError) as err: print("%s: %s" % (config_file, err), file=stderr) else: if config_file in defaults.SHARED_CONFIG_FILES and 'mypy' not in parser: continue file_read = config_file options.config_file = file_read break else: return os.environ['MYPY_CONFIG_FILE_DIR'] = os.path.dirname( os.path.abspath(config_file)) if 'mypy' not in parser: if filename or file_read not in defaults.SHARED_CONFIG_FILES: print("%s: No [mypy] section in config file" % file_read, file=stderr) else: section = parser['mypy'] prefix = '%s: [%s]: ' % (file_read, 'mypy') updates, report_dirs = parse_section( prefix, options, set_strict_flags, section, config_types, stderr) for k, v in updates.items(): setattr(options, k, v) options.report_dirs.update(report_dirs) for name, section in parser.items(): if name.startswith('mypy-'): prefix = get_prefix(file_read, name) updates, report_dirs = parse_section( prefix, options, set_strict_flags, section, config_types, stderr) if report_dirs: print("%sPer-module sections should not specify reports (%s)" % (prefix, ', '.join(s + '_report' for s in sorted(report_dirs))), file=stderr) if set(updates) - PER_MODULE_OPTIONS: print("%sPer-module sections should only specify per-module flags (%s)" % (prefix, ', '.join(sorted(set(updates) - PER_MODULE_OPTIONS))), file=stderr) updates = {k: v for k, v in updates.items() if k in PER_MODULE_OPTIONS} globs = name[5:] for glob in globs.split(','): # For backwards compatibility, replace (back)slashes with dots. glob = glob.replace(os.sep, '.') if os.altsep: glob = glob.replace(os.altsep, '.') if (any(c in glob for c in '?[]!') or any('*' in x and x != '*' for x in glob.split('.'))): print("%sPatterns must be fully-qualified module names, optionally " "with '*' in some components (e.g spam.*.eggs.*)" % prefix, file=stderr) else: options.per_module_options[glob] = updates def get_prefix(file_read: str, name: str) -> str: if is_toml(file_read): module_name_str = 'module = "%s"' % '-'.join(name.split('-')[1:]) else: module_name_str = name return '%s: [%s]: ' % (file_read, module_name_str) def is_toml(filename: str) -> bool: return filename.lower().endswith('.toml') def destructure_overrides(toml_data: Dict[str, Any]) -> Dict[str, Any]: """Take the new [[tool.mypy.overrides]] section array in the pyproject.toml file, and convert it back to a flatter structure that the existing config_parser can handle. E.g. the following pyproject.toml file: [[tool.mypy.overrides]] module = [ "a.b", "b.*" ] disallow_untyped_defs = true [[tool.mypy.overrides]] module = 'c' disallow_untyped_defs = false Would map to the following config dict that it would have gotten from parsing an equivalent ini file: { "mypy-a.b": { disallow_untyped_defs = true, }, "mypy-b.*": { disallow_untyped_defs = true, }, "mypy-c": { disallow_untyped_defs: false, }, } """ if 'overrides' not in toml_data['mypy']: return toml_data if not isinstance(toml_data['mypy']['overrides'], list): raise ConfigTOMLValueError("tool.mypy.overrides sections must be an array. Please make " "sure you are using double brackets like so: [[tool.mypy.overrides]]") result = toml_data.copy() for override in result['mypy']['overrides']: if 'module' not in override: raise ConfigTOMLValueError("toml config file contains a [[tool.mypy.overrides]] " "section, but no module to override was specified.") if isinstance(override['module'], str): modules = [override['module']] elif isinstance(override['module'], list): modules = override['module'] else: raise ConfigTOMLValueError("toml config file contains a [[tool.mypy.overrides]] " "section with a module value that is not a string or a list of " "strings") for module in modules: module_overrides = override.copy() del module_overrides['module'] old_config_name = 'mypy-%s' % module if old_config_name not in result: result[old_config_name] = module_overrides else: for new_key, new_value in module_overrides.items(): if (new_key in result[old_config_name] and result[old_config_name][new_key] != new_value): raise ConfigTOMLValueError("toml config file contains " "[[tool.mypy.overrides]] sections with conflicting " "values. Module '%s' has two different values for '%s'" % (module, new_key)) result[old_config_name][new_key] = new_value del result['mypy']['overrides'] return result def parse_section(prefix: str, template: Options, set_strict_flags: Callable[[], None], section: Mapping[str, Any], config_types: Dict[str, Any], stderr: TextIO = sys.stderr ) -> Tuple[Dict[str, object], Dict[str, str]]: """Parse one section of a config file. Returns a dict of option values encountered, and a dict of report directories. """ results: Dict[str, object] = {} report_dirs: Dict[str, str] = {} for key in section: invert = False options_key = key if key in config_types: ct = config_types[key] else: dv = None # We have to keep new_semantic_analyzer in Options # for plugin compatibility but it is not a valid option anymore. assert hasattr(template, 'new_semantic_analyzer') if key != 'new_semantic_analyzer': dv = getattr(template, key, None) if dv is None: if key.endswith('_report'): report_type = key[:-7].replace('_', '-') if report_type in defaults.REPORTER_NAMES: report_dirs[report_type] = str(section[key]) else: print("%sUnrecognized report type: %s" % (prefix, key), file=stderr) continue if key.startswith('x_'): pass # Don't complain about `x_blah` flags elif key.startswith('no_') and hasattr(template, key[3:]): options_key = key[3:] invert = True elif key.startswith('allow') and hasattr(template, 'dis' + key): options_key = 'dis' + key invert = True elif key.startswith('disallow') and hasattr(template, key[3:]): options_key = key[3:] invert = True elif key == 'strict': pass # Special handling below else: print("%sUnrecognized option: %s = %s" % (prefix, key, section[key]), file=stderr) if invert: dv = getattr(template, options_key, None) else: continue ct = type(dv) v: Any = None try: if ct is bool: if isinstance(section, dict): v = convert_to_boolean(section.get(key)) else: v = section.getboolean(key) # type: ignore[attr-defined] # Until better stub if invert: v = not v elif callable(ct): if invert: print("%sCan not invert non-boolean key %s" % (prefix, options_key), file=stderr) continue try: v = ct(section.get(key)) except argparse.ArgumentTypeError as err: print("%s%s: %s" % (prefix, key, err), file=stderr) continue else: print("%sDon't know what type %s should have" % (prefix, key), file=stderr) continue except ValueError as err: print("%s%s: %s" % (prefix, key, err), file=stderr) continue if key == 'strict': if v: set_strict_flags() continue if key == 'silent_imports': print("%ssilent_imports has been replaced by " "ignore_missing_imports=True; follow_imports=skip" % prefix, file=stderr) if v: if 'ignore_missing_imports' not in results: results['ignore_missing_imports'] = True if 'follow_imports' not in results: results['follow_imports'] = 'skip' if key == 'almost_silent': print("%salmost_silent has been replaced by " "follow_imports=error" % prefix, file=stderr) if v: if 'follow_imports' not in results: results['follow_imports'] = 'error' results[options_key] = v return results, report_dirs def convert_to_boolean(value: Optional[Any]) -> bool: """Return a boolean value translating from other types if necessary.""" if isinstance(value, bool): return value if not isinstance(value, str): value = str(value) if value.lower() not in configparser.RawConfigParser.BOOLEAN_STATES: raise ValueError('Not a boolean: %s' % value) return configparser.RawConfigParser.BOOLEAN_STATES[value.lower()] def split_directive(s: str) -> Tuple[List[str], List[str]]: """Split s on commas, except during quoted sections. Returns the parts and a list of error messages.""" parts = [] cur: List[str] = [] errors = [] i = 0 while i < len(s): if s[i] == ',': parts.append(''.join(cur).strip()) cur = [] elif s[i] == '"': i += 1 while i < len(s) and s[i] != '"': cur.append(s[i]) i += 1 if i == len(s): errors.append("Unterminated quote in configuration comment") cur.clear() else: cur.append(s[i]) i += 1 if cur: parts.append(''.join(cur).strip()) return parts, errors def mypy_comments_to_config_map(line: str, template: Options) -> Tuple[Dict[str, str], List[str]]: """Rewrite the mypy comment syntax into ini file syntax. Returns """ options = {} entries, errors = split_directive(line) for entry in entries: if '=' not in entry: name = entry value = None else: name, value = [x.strip() for x in entry.split('=', 1)] name = name.replace('-', '_') if value is None: value = 'True' options[name] = value return options, errors def parse_mypy_comments( args: List[Tuple[int, str]], template: Options) -> Tuple[Dict[str, object], List[Tuple[int, str]]]: """Parse a collection of inline mypy: configuration comments. Returns a dictionary of options to be applied and a list of error messages generated. """ errors: List[Tuple[int, str]] = [] sections = {} for lineno, line in args: # In order to easily match the behavior for bools, we abuse configparser. # Oddly, the only way to get the SectionProxy object with the getboolean # method is to create a config parser. parser = configparser.RawConfigParser() options, parse_errors = mypy_comments_to_config_map(line, template) parser['dummy'] = options errors.extend((lineno, x) for x in parse_errors) stderr = StringIO() strict_found = False def set_strict_flags() -> None: nonlocal strict_found strict_found = True new_sections, reports = parse_section( '', template, set_strict_flags, parser['dummy'], ini_config_types, stderr=stderr) errors.extend((lineno, x) for x in stderr.getvalue().strip().split('\n') if x) if reports: errors.append((lineno, "Reports not supported in inline configuration")) if strict_found: errors.append((lineno, 'Setting "strict" not supported in inline configuration: specify it in ' 'a configuration file instead, or set individual inline flags ' '(see "mypy -h" for the list of flags enabled in strict mode)')) sections.update(new_sections) return sections, errors def get_config_module_names(filename: Optional[str], modules: List[str]) -> str: if not filename or not modules: return '' if not is_toml(filename): return ", ".join("[mypy-%s]" % module for module in modules) return "module = ['%s']" % ("', '".join(sorted(modules))) class ConfigTOMLValueError(ValueError): pass
[]
[]
[ "MYPY_CONFIG_FILE_DIR" ]
[]
["MYPY_CONFIG_FILE_DIR"]
python
1
0
app.py
from flask import Flask, render_template, request, make_response, jsonify, redirect, url_for import base64, os from tempfile import NamedTemporaryFile import uuid from copy import copy from EMDMeasurment.ComparisonMethods import produce_visualizations_from_event_logs_paths from flask_cors import CORS import json import traceback app = Flask(__name__) CORS(app, expose_headers=["x-suggested-filename"]) logs_dictio = {} @app.route('/') def empty_path(): return redirect(url_for('upload_page')) @app.route('/index.html') def index(): return redirect(url_for('upload_page')) @app.route("/comparison.html") def comparison_page(): return render_template("comparison.html") @app.route("/upload.html") def upload_page(): return render_template("upload.html") @app.route("/visualizationsService", methods=["GET"]) def visualizationsService(): uid1 = request.args.get("uuid1") uid2 = request.args.get("uuid2") if uid1 is None: uid1 = "log1" if uid2 is None: uid2 = "log2" log_path1 = logs_dictio[uid1] log_path2 = logs_dictio[uid2] resp = produce_visualizations_from_event_logs_paths(log_path1, log_path2) return jsonify(resp) @app.route("/uploadService", methods=["POST"]) def upload(): uuids = [] for file in request.files: tmp_file = NamedTemporaryFile() tmp_file.close() fo = request.files[file] fo.save(tmp_file.name) this_uuid = str(uuid.uuid4()) logs_dictio[this_uuid] = tmp_file.name uuids.append(this_uuid) return {"uuid1": uuids[0], "uuid2": uuids[1]} logs_dictio["log1"] = os.path.join("SampleEventLogs&SimulatedER2021", "running-example.xes") logs_dictio["log2"] = os.path.join("SampleEventLogs&SimulatedER2021", "Running-example-simulated.csv") port = os.environ.get("PORT") if port is None: port = "80" port = int(port) if __name__ == "__main__": if not os.path.exists(os.path.join("static", "temp")): os.mkdir(os.path.join("static", "temp")) app.run(port=port, threaded=True)
[]
[]
[ "PORT" ]
[]
["PORT"]
python
1
0
CosmAna/Ext_C/libfftw/setup.py
#!/usr/bin/env python # coding=utf-8 import os from distutils.core import setup from distutils.extension import Extension from Cython.Distutils import build_ext import numpy os.environ["CC"] = 'mpicc' # set CC compiler os.environ["LDSHARED"] = 'mpicc -shared' # set linker_so #============================ Extension C ======================================= FFTW_INCL = '/home/mtx/local/fftw-3.3.5/include' FFTW_LIBS = '/home/mtx/local/fftw-3.3.5/lib' MPI_INCL = '/home/mtx/local/mpich-3.2/include' INCL = [] INCL.append(FFTW_INCL) INCL.append(MPI_INCL) INCL.append(numpy.get_include()) ext_modules = [] ext_modules.append( Extension("libfftw", sources=["libfftw.pyx", "pyfftwf.c", "pyfftwd.c"], include_dirs=INCL, library_dirs=[FFTW_LIBS], libraries=['fftw3f_mpi', 'fftw3f', 'fftw3_mpi', 'fftw3'], ) ) setup( cmdclass = {'build_ext': build_ext}, ext_modules = ext_modules, )
[]
[]
[ "LDSHARED", "CC" ]
[]
["LDSHARED", "CC"]
python
2
0
heliosburn/django/hbproject/create_db_model.py
import os import dotenv from configurations import importer import pymongo from datetime import datetime dotenv.read_dotenv() os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hbproject.settings') os.environ.setdefault('DJANGO_CONFIGURATION', 'Development') importer.install() from hbproject import settings as s def main(): dbc = pymongo.Connection() for db in s.MONGODB_DATABASE.keys(): print("Dropping database '%s': %s" % (db, s.MONGODB_DATABASE[db])) dbc.drop_database(s.MONGODB_DATABASE[db]) # Initial roles roles = [ {"name": "admin"}, ] # Initial users import hashlib hasher1 = hashlib.sha512() hasher1.update("admin") admin_hash = hasher1.hexdigest() hasher2 = hashlib.sha512() hasher2.update("test1") test1_hash = hasher2.hexdigest() users = [ { "username": "admin", "password": admin_hash, "email": "admin@local", "roles": ["admin"], "createdAt": datetime.isoformat(datetime.now()), "updatedAt": datetime.isoformat(datetime.now()), }, { "username": "test1", "password": test1_hash, "email": "test1@local", "roles": ["standard"], "createdAt": datetime.isoformat(datetime.now()), "updatedAt": datetime.isoformat(datetime.now()), }, ] # Initial roles roles = [ {"name": "admin"}, {"name": "standard"}, ] for db in s.MONGODB_DATABASE.keys(): current_db = dbc[s.MONGODB_DATABASE[db]] print("Creating users in db '%s': %s" % (db, s.MONGODB_DATABASE[db])) for user in users: current_db.hbuser.save(user) print("Creating roles in db '%s': %s" % (db, s.MONGODB_DATABASE[db])) for role in roles: current_db.role.save(role) print("Creating indexes in db '%s': %s" % (db, s.MONGODB_DATABASE[db])) current_db.hbuser.ensure_index('username', unique=True) current_db.testplan.ensure_index('name', unique=True) current_db.session.ensure_index('name', unique=True) current_db.template.ensure_index('name', unique=True) current_db.traffic.ensure_index('recording_id') print("Creating capped collection 'log' in db '%s'" % s.MONGODB_DATABASE[db]) current_db.create_collection('log', capped=True, size=500000) print("Indexing log.timestamp in db '%s'" % s.MONGODB_DATABASE[db]) current_db.log.ensure_index('timestamp') if __name__ == '__main__': main()
[]
[]
[]
[]
[]
python
0
0
yatranepal/migrations/0019_testimonial.py
# Generated by Django 2.2.6 on 2020-01-23 07:09 from django.conf import settings from django.db import migrations, models import django.db.models.deletion import django_currentuser.db.models.fields import django_currentuser.middleware class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('yatranepal', '0018_auto_20200118_1514'), ] operations = [ migrations.CreateModel( name='Testimonial', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=500, verbose_name='Enter the Testimonial Title')), ('review', models.TextField(verbose_name='Your Message')), ('name', django_currentuser.db.models.fields.CurrentUserField(default=django_currentuser.middleware.get_current_authenticated_user, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), ]
[]
[]
[]
[]
[]
python
null
null
null
examples/sync_bench.py
#!/usr/bin/env python from __future__ import print_function import os import randopt as ro import torch as th import torch.distributed as dist from torch.multiprocessing import Process from time import time from drl.utils import get_setup, parse_args from drl.training import train, test def sync(tensors): size = float(dist.get_world_size()) for t in tensors: dist.all_reduce(t.data) t.data /= size def sync_update(args, env, agent, opt): opt.zero_grad() update = agent.get_update() sync(update) agent.set_gradients(update) opt.step() def run(rank, size): is_root = (rank == 0) args, env, agent, opt = get_setup(seed_offset=rank) exp = ro.Experiment(args.env + '-dev-sync', params={}) sync(list(agent.parameters())) train_rewards = train(args, env, agent, opt, sync_update, verbose=is_root) if is_root: test_rewards = test(args, env, agent) data = {p: getattr(args, p) for p in vars(args)} data['train_rewards'] = train_rewards data['test_rewards'] = test_rewards data['timestamp'] = time() exp.add_result(result=sum(test_rewards) / len(test_rewards), data=data) def init_processes(rank, size, fn, backend='tcp'): """ Initialize the distributed environment. """ os.environ['MASTER_ADDR'] = '127.0.0.1' os.environ['MASTER_PORT'] = '29500' th.set_num_threads(1) dist.init_process_group(backend, rank=rank, world_size=size) fn(rank, size) if __name__ == "__main__": args = parse_args() size = args.n_proc processes = [] for rank in range(size): p = Process(target=init_processes, args=(rank, size, run)) p.start() processes.append(p) for p in processes: p.join()
[]
[]
[ "MASTER_ADDR", "MASTER_PORT" ]
[]
["MASTER_ADDR", "MASTER_PORT"]
python
2
0
src/CExpFQC.py
''' Name: CExpFQC Desriptption: Full power with quantum counterpart sRNN Email: [email protected] OpenSource: https://github.com/yesunhuang Msg: Experiment One Author: YesunHuang Date: 2022-04-17 20:40:50 ''' #import all the things we need import os os.environ['KMP_DUPLICATE_LIB_OK']='True' import matplotlib.pyplot as plt import torch def transform(Xs): return [torch.squeeze(x) for x in Xs] #Some constants GENERATE_DATA=False TRAIN_NETWORK=True SAVE_NETWORK=True LOAD_NETWORK=False PREDICTION_TEST=True if __name__=='__main__': from DataGenerator.HenonMapDataGen import HenonMapDataGen from ClassicalModels.ClassicalSRNNs import ClassicalSRNN,SuportFunction from GradientFreeOptimizers.CostFunc import GradFreeMSELoss from GradientFreeOptimizers.Optimizers import MCSOptimizer import GradientFreeOptimizers.Helpers as hp #Save path: if __name__=='__main__': currentPath=os.getcwd() dataSavepath=os.path.join(currentPath,'data','HenonMap','Exp') netSavepath=os.path.join(currentPath,'TrainedNet','Exp') if __name__=='__main__': # Data Iter ## Parameters testSetRatio=0.2 numStep=10 batchSize=16 filename='QExp1.csv' ## Generate Data if GENERATE_DATA and __name__=='__main__': hmap=HenonMapDataGen(savepath=dataSavepath) hmap(1000) hmap.save_to_CSV(filename) if __name__=='__main__': ## Read the data hmap=HenonMapDataGen(savepath=dataSavepath) hmap.read_from_CSV(filename) ## Get the Iter trainIter,testIter=hmap.get_data_iter(testSetRatio,numStep,batchSize,mask=0,shuffle=False) ## Print information if __name__=='__main__': print(hmap) X,Y=next(iter(trainIter)) print('Train Data Size:',len(trainIter)) X,Y=next(iter(testIter)) print('Test Data Size:',len(testIter)) # Load the network if LOAD_NETWORK and __name__=='__main__': filename='CExpFQC.pt' netData=torch.load(os.path.join(netSavepath,filename)) inputSize=netData['inputSize'] outputSize=netData['outputSize'] hiddenSize=netData['hiddenSize'] inputRatio=netData['inputRatio'] outputRatio=netData['outputRatio'] initValue=netData['initValue'] inactive=netData['inactive'] rescale=netData['rescale'] isTypical=netData['isTypical'] elif __name__=='__main__': # Model ## Parameters inputSize=outputSize=1 hiddenSize=2 initValue=1.0 inputRatio=outputRatio=1.0 rescale=1.0 inactive=[] isTypical=False if __name__=='__main__': ## print parameters print('Input Ratio:',inputRatio) print('Output Ratio:',outputRatio) if __name__=='__main__': ## Get neccesary functions srnnTestSup=SuportFunction() #transform=lambda Xs:[torch.squeeze(x) for x in Xs] init_rnn_state=srnnTestSup.get_init_state_fun(initStateValue=initValue) get_params=srnnTestSup.get_get_params_fun(inputRatio=inputRatio,\ outputRatio=outputRatio,\ rescale=rescale,\ inactive=inactive) rnn=srnnTestSup.get_forward_fn_fun(isTypical=isTypical) predict_fun=srnnTestSup.get_predict_fun(outputTransoform=transform) net=ClassicalSRNN(inputSize,hiddenSize,outputSize,get_params,init_rnn_state,rnn) if LOAD_NETWORK and __name__=='__main__': net.params=netData['NetParams'] net.constants=netData['NetConstants'] ## Test prediction if __name__=='__main__': state=net.begin_state(batchSize) Y,newState=net(X,state) print(Y.shape, len(newState), newState[0][0].shape) if not LOAD_NETWORK and not TRAIN_NETWORK: print('The network is not trained, are you sure to move on?') # Train the network if TRAIN_NETWORK and __name__=='__main__': ## Parameters if LOAD_NETWORK: print('Are you sure to train the trained network?') num_epochs=netData['OptimizerConstant']['num_epochs'] maxLevyStepSize=netData['OptimizerConstant']['maxLevyStepSize'] regular=netData['OptimizerConstant']['regular'] nestNum=netData['OptimizerConstant']['nestNum'] else: num_epochs= 300 maxLevyStepSize=[0.3]*5 regular=None nestNum=40 step_epochs=5 ## Initial loss if __name__=='__main__': ## Loss function lossFunc=GradFreeMSELoss(net) if LOAD_NETWORK: l_epochs=netData['Loss'] print(f'Saved Train Loss: {l_epochs[-1][0]:f}') print(f'Saved Test Loss: {l_epochs[-1][1]:f}') else: l_epochs=[] timer=hp.Timer() train_l=SuportFunction.evaluate_accuracy(net,trainIter,lossFunc,False) t1=timer.stop() timer.start() test_l=SuportFunction.evaluate_accuracy(net,testIter,lossFunc,False) t2=timer.stop() l_epochs.append([train_l,test_l]) print(f'Initial Train Loss: {train_l:f}, Time Cost: {t1:f}s') print(f'Initial Test Loss: {test_l:f}, Time Cost: {t2:f}s') ## Training if TRAIN_NETWORK and __name__=='__main__': ## Optimizer mcs=MCSOptimizer(net.params,lossFunc,trainIter,nestNum=nestNum,\ maxLevyStepSize=maxLevyStepSize,regular=regular,\ randInit=True,epochToGeneration=lambda x:max(int(x/100),1)) ## prediction predict = lambda prefix: predict_fun(prefix,net, numPreds=9) ## train and predict timer=hp.Timer() for epoch in range(num_epochs): trainLoss, _=mcs.step() testLoss=SuportFunction.evaluate_accuracy(net, testIter, lossFunc, False) if (epoch + 1) % step_epochs == 0: timeEpoch=timer.stop() print(f'Epoch [{epoch+1}/{num_epochs}], Train Loss: {trainLoss:.4f}, Test Loss: {testLoss:.4f},\ Time: {timeEpoch:.4f}s') timer.start() l_epochs.append([trainLoss,testLoss]) #scheduler.step() testLoss=SuportFunction.evaluate_accuracy(net, testIter, lossFunc, False) print(f'TestLoss {testLoss:f}') ## Save the network if SAVE_NETWORK and __name__=='__main__': ## Parameters filename='CExpFQC.pt' OptimizerConstant={'num_epochs':num_epochs,'maxLevyStepSize':maxLevyStepSize,\ 'nestNum':nestNum} netData={'NetParams':net.params,'NetConstants':net.constants,\ 'inputSize':inputSize,'hiddenSize':hiddenSize,'outputSize':outputSize,\ 'inputRatio':inputRatio,'outputRatio':outputRatio,'initValue':initValue,\ 'inactive':inactive,'rescale':rescale,'isTypical':isTypical,\ 'Loss':l_epochs,'OptimizerConstant':OptimizerConstant} torch.save(netData,os.path.join(netSavepath,filename)) if PREDICTION_TEST and __name__=='__main__': # Prediction ## One-step prediction X,Y=next(iter(testIter)) state=net.begin_state(batchSize) Y_hat,newState=net(X,state) Y=Y.transpose(0,1).reshape([-1,Y.shape[-1]]) axes,fig=plt.subplots(1,1,figsize=(4,3)) plt.title('One-Step Prediction') plt.plot(torch.linspace(1,Y.numel(),Y.numel()),torch.squeeze(Y),label='Y') plt.plot(torch.linspace(1,Y.numel(),Y.numel()),torch.squeeze(Y_hat).detach(),label=r'$\hat{Y}$') plt.legend() plt.show() ## Multi Step Prediction prefixSize=10 totalSize=20 testShift=int(len(hmap)*(1-testSetRatio)) preX,preY=hmap.data_as_tensor preX,preY=torch.unsqueeze(preX[testShift:testShift+prefixSize],-1),torch.unsqueeze(preY[testShift:testShift+totalSize-1],-1) preY=[y for y in torch.cat((preX[:2],preY[1:]),dim=0)] preX=torch.unsqueeze(preX,-1) YHat=predict_fun(preX,net,numPreds=totalSize-prefixSize) axes,fig=plt.subplots(1,1,figsize=(4,3)) plt.title('Multi-Step Prediction') fig.set_ylim(-2,2) plt.plot(torch.linspace(1,len(preY),len(preY)),preY,label='Y') plt.plot(torch.linspace(1,len(preY),len(preY)),YHat,label=r'$\hat{Y}$') plt.vlines([prefixSize-1],ymin=-2,ymax=2,linestyles='dashed',label='Prediction') plt.legend() plt.show()
[]
[]
[ "KMP_DUPLICATE_LIB_OK" ]
[]
["KMP_DUPLICATE_LIB_OK"]
python
1
0
tests/testproj/settings.py
""" Settings for django-raster tests. """ from __future__ import unicode_literals import os SECRET_KEY = 'testkey' INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'django.contrib.postgres', 'django.contrib.gis', 'raster', ) DATABASES = { 'default': { 'ENGINE': 'django.contrib.gis.db.backends.postgis', 'USER': os.environ.get('DB_USER', 'postgres'), 'HOST': os.environ.get('DB_HOST', 'localhost'), 'NAME': os.environ.get('DB_NAME', 'postgres'), 'PASSWORD': os.environ.get('DB_PASSWORD', ''), 'PORT': os.environ.get('DB_PORT', '5432') } } MIDDLEWARE = [ 'django.contrib.sessions.middleware.SessionMiddleware', ] TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.contrib.auth.context_processors.auth', ], }, }, ] ROOT_URLCONF = 'raster.urls' RASTER_USE_CELERY = True CELERY_TASK_ALWAYS_EAGER = True CELERY_TASK_EAGER_PROPAGATES = True
[]
[]
[ "DB_PASSWORD", "DB_HOST", "DB_PORT", "DB_NAME", "DB_USER" ]
[]
["DB_PASSWORD", "DB_HOST", "DB_PORT", "DB_NAME", "DB_USER"]
python
5
0
SmartFoxServer_PRO_1.6.6/Server/lib/Lib/ftplib.py
"""An FTP client class and some helper functions. Based on RFC 959: File Transfer Protocol (FTP), by J. Postel and J. Reynolds Example: >>> from ftplib import FTP >>> ftp = FTP('ftp.python.org') # connect to host, default port >>> ftp.login() # default, i.e.: user anonymous, passwd anonymous@ '230 Guest login ok, access restrictions apply.' >>> ftp.retrlines('LIST') # list directory contents total 9 drwxr-xr-x 8 root wheel 1024 Jan 3 1994 . drwxr-xr-x 8 root wheel 1024 Jan 3 1994 .. drwxr-xr-x 2 root wheel 1024 Jan 3 1994 bin drwxr-xr-x 2 root wheel 1024 Jan 3 1994 etc d-wxrwxr-x 2 ftp wheel 1024 Sep 5 13:43 incoming drwxr-xr-x 2 root wheel 1024 Nov 17 1993 lib drwxr-xr-x 6 1094 wheel 1024 Sep 13 19:07 pub drwxr-xr-x 3 root wheel 1024 Jan 3 1994 usr -rw-r--r-- 1 root root 312 Aug 1 1994 welcome.msg '226 Transfer complete.' >>> ftp.quit() '221 Goodbye.' >>> A nice test that reveals some of the network dialogue would be: python ftplib.py -d localhost -l -p -l """ # # Changes and improvements suggested by Steve Majewski. # Modified by Jack to work on the mac. # Modified by Siebren to support docstrings and PASV. # import os import sys import string # Import SOCKS module if it exists, else standard socket module socket try: import SOCKS; socket = SOCKS; del SOCKS # import SOCKS as socket from socket import getfqdn; socket.getfqdn = getfqdn; del getfqdn except ImportError: import socket __all__ = ["FTP","Netrc"] # Magic number from <socket.h> MSG_OOB = 0x1 # Process data out of band # The standard FTP server control port FTP_PORT = 21 # Exception raised when an error or invalid response is received class Error(Exception): pass class error_reply(Error): pass # unexpected [123]xx reply class error_temp(Error): pass # 4xx errors class error_perm(Error): pass # 5xx errors class error_proto(Error): pass # response does not begin with [1-5] # All exceptions (hopefully) that may be raised here and that aren't # (always) programming errors on our side all_errors = (Error, socket.error, IOError, EOFError) # Line terminators (we always output CRLF, but accept any of CRLF, CR, LF) CRLF = '\r\n' # The class itself class FTP: '''An FTP client class. To create a connection, call the class using these argument: host, user, passwd, acct These are all strings, and have default value ''. Then use self.connect() with optional host and port argument. To download a file, use ftp.retrlines('RETR ' + filename), or ftp.retrbinary() with slightly different arguments. To upload a file, use ftp.storlines() or ftp.storbinary(), which have an open file as argument (see their definitions below for details). The download/upload functions first issue appropriate TYPE and PORT or PASV commands. ''' debugging = 0 host = '' port = FTP_PORT sock = None file = None welcome = None passiveserver = 1 # Initialization method (called by class instantiation). # Initialize host to localhost, port to standard ftp port # Optional arguments are host (for connect()), # and user, passwd, acct (for login()) def __init__(self, host='', user='', passwd='', acct=''): if host: self.connect(host) if user: self.login(user, passwd, acct) def connect(self, host = '', port = 0): '''Connect to host. Arguments are: - host: hostname to connect to (string, default previous host) - port: port to connect to (integer, default previous port)''' if host: self.host = host if port: self.port = port msg = "getaddrinfo returns an empty list" for res in socket.getaddrinfo(self.host, self.port, 0, socket.SOCK_STREAM): af, socktype, proto, canonname, sa = res try: self.sock = socket.socket(af, socktype, proto) self.sock.connect(sa) except socket.error, msg: if self.sock: self.sock.close() self.sock = None continue break if not self.sock: raise socket.error, msg self.af = af self.file = self.sock.makefile('rb') self.welcome = self.getresp() return self.welcome def getwelcome(self): '''Get the welcome message from the server. (this is read and squirreled away by connect())''' if self.debugging: print '*welcome*', self.sanitize(self.welcome) return self.welcome def set_debuglevel(self, level): '''Set the debugging level. The required argument level means: 0: no debugging output (default) 1: print commands and responses but not body text etc. 2: also print raw lines read and sent before stripping CR/LF''' self.debugging = level debug = set_debuglevel def set_pasv(self, val): '''Use passive or active mode for data transfers. With a false argument, use the normal PORT mode, With a true argument, use the PASV command.''' self.passiveserver = val # Internal: "sanitize" a string for printing def sanitize(self, s): if s[:5] == 'pass ' or s[:5] == 'PASS ': i = len(s) while i > 5 and s[i-1] in '\r\n': i = i-1 s = s[:5] + '*'*(i-5) + s[i:] return `s` # Internal: send one line to the server, appending CRLF def putline(self, line): line = line + CRLF if self.debugging > 1: print '*put*', self.sanitize(line) self.sock.sendall(line) # Internal: send one command to the server (through putline()) def putcmd(self, line): if self.debugging: print '*cmd*', self.sanitize(line) self.putline(line) # Internal: return one line from the server, stripping CRLF. # Raise EOFError if the connection is closed def getline(self): line = self.file.readline() if self.debugging > 1: print '*get*', self.sanitize(line) if not line: raise EOFError if line[-2:] == CRLF: line = line[:-2] elif line[-1:] in CRLF: line = line[:-1] return line # Internal: get a response from the server, which may possibly # consist of multiple lines. Return a single string with no # trailing CRLF. If the response consists of multiple lines, # these are separated by '\n' characters in the string def getmultiline(self): line = self.getline() if line[3:4] == '-': code = line[:3] while 1: nextline = self.getline() line = line + ('\n' + nextline) if nextline[:3] == code and \ nextline[3:4] != '-': break return line # Internal: get a response from the server. # Raise various errors if the response indicates an error def getresp(self): resp = self.getmultiline() if self.debugging: print '*resp*', self.sanitize(resp) self.lastresp = resp[:3] c = resp[:1] if c == '4': raise error_temp, resp if c == '5': raise error_perm, resp if c not in '123': raise error_proto, resp return resp def voidresp(self): """Expect a response beginning with '2'.""" resp = self.getresp() if resp[0] != '2': raise error_reply, resp return resp def abort(self): '''Abort a file transfer. Uses out-of-band data. This does not follow the procedure from the RFC to send Telnet IP and Synch; that doesn't seem to work with the servers I've tried. Instead, just send the ABOR command as OOB data.''' line = 'ABOR' + CRLF if self.debugging > 1: print '*put urgent*', self.sanitize(line) self.sock.sendall(line, MSG_OOB) resp = self.getmultiline() if resp[:3] not in ('426', '226'): raise error_proto, resp def sendcmd(self, cmd): '''Send a command and return the response.''' self.putcmd(cmd) return self.getresp() def voidcmd(self, cmd): """Send a command and expect a response beginning with '2'.""" self.putcmd(cmd) return self.voidresp() def sendport(self, host, port): '''Send a PORT command with the current host and the given port number. ''' hbytes = host.split('.') pbytes = [`port/256`, `port%256`] bytes = hbytes + pbytes cmd = 'PORT ' + ','.join(bytes) return self.voidcmd(cmd) def sendeprt(self, host, port): '''Send a EPRT command with the current host and the given port number.''' af = 0 if self.af == socket.AF_INET: af = 1 if self.af == socket.AF_INET6: af = 2 if af == 0: raise error_proto, 'unsupported address family' fields = ['', `af`, host, `port`, ''] cmd = 'EPRT ' + string.joinfields(fields, '|') return self.voidcmd(cmd) def makeport(self): '''Create a new socket and send a PORT command for it.''' msg = "getaddrinfo returns an empty list" sock = None for res in socket.getaddrinfo(None, 0, self.af, socket.SOCK_STREAM, 0, socket.AI_PASSIVE): af, socktype, proto, canonname, sa = res try: sock = socket.socket(af, socktype, proto) sock.bind(sa) except socket.error, msg: if sock: sock.close() sock = None continue break if not sock: raise socket.error, msg sock.listen(1) port = sock.getsockname()[1] # Get proper port host = self.sock.getsockname()[0] # Get proper host if self.af == socket.AF_INET: resp = self.sendport(host, port) else: resp = self.sendeprt(host, port) return sock def makepasv(self): if self.af == socket.AF_INET: host, port = parse227(self.sendcmd('PASV')) else: host, port = parse229(self.sendcmd('EPSV'), self.sock.getpeername()) return host, port def ntransfercmd(self, cmd, rest=None): """Initiate a transfer over the data connection. If the transfer is active, send a port command and the transfer command, and accept the connection. If the server is passive, send a pasv command, connect to it, and start the transfer command. Either way, return the socket for the connection and the expected size of the transfer. The expected size may be None if it could not be determined. Optional `rest' argument can be a string that is sent as the argument to a RESTART command. This is essentially a server marker used to tell the server to skip over any data up to the given marker. """ size = None if self.passiveserver: host, port = self.makepasv() af, socktype, proto, canon, sa = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM)[0] conn = socket.socket(af, socktype, proto) conn.connect(sa) if rest is not None: self.sendcmd("REST %s" % rest) resp = self.sendcmd(cmd) if resp[0] != '1': raise error_reply, resp else: sock = self.makeport() if rest is not None: self.sendcmd("REST %s" % rest) resp = self.sendcmd(cmd) if resp[0] != '1': raise error_reply, resp conn, sockaddr = sock.accept() if resp[:3] == '150': # this is conditional in case we received a 125 size = parse150(resp) return conn, size def transfercmd(self, cmd, rest=None): """Like ntransfercmd() but returns only the socket.""" return self.ntransfercmd(cmd, rest)[0] def login(self, user = '', passwd = '', acct = ''): '''Login, default anonymous.''' if not user: user = 'anonymous' if not passwd: passwd = '' if not acct: acct = '' if user == 'anonymous' and passwd in ('', '-'): # If there is no anonymous ftp password specified # then we'll just use anonymous@ # We don't send any other thing because: # - We want to remain anonymous # - We want to stop SPAM # - We don't want to let ftp sites to discriminate by the user, # host or country. passwd = passwd + 'anonymous@' resp = self.sendcmd('USER ' + user) if resp[0] == '3': resp = self.sendcmd('PASS ' + passwd) if resp[0] == '3': resp = self.sendcmd('ACCT ' + acct) if resp[0] != '2': raise error_reply, resp return resp def retrbinary(self, cmd, callback, blocksize=8192, rest=None): """Retrieve data in binary mode. `cmd' is a RETR command. `callback' is a callback function is called for each block. No more than `blocksize' number of bytes will be read from the socket. Optional `rest' is passed to transfercmd(). A new port is created for you. Return the response code. """ self.voidcmd('TYPE I') conn = self.transfercmd(cmd, rest) while 1: data = conn.recv(blocksize) if not data: break callback(data) conn.close() return self.voidresp() def retrlines(self, cmd, callback = None): '''Retrieve data in line mode. The argument is a RETR or LIST command. The callback function (2nd argument) is called for each line, with trailing CRLF stripped. This creates a new port for you. print_line() is the default callback.''' if not callback: callback = print_line resp = self.sendcmd('TYPE A') conn = self.transfercmd(cmd) fp = conn.makefile('rb') while 1: line = fp.readline() if self.debugging > 2: print '*retr*', `line` if not line: break if line[-2:] == CRLF: line = line[:-2] elif line[-1:] == '\n': line = line[:-1] callback(line) fp.close() conn.close() return self.voidresp() def storbinary(self, cmd, fp, blocksize=8192): '''Store a file in binary mode.''' self.voidcmd('TYPE I') conn = self.transfercmd(cmd) while 1: buf = fp.read(blocksize) if not buf: break conn.sendall(buf) conn.close() return self.voidresp() def storlines(self, cmd, fp): '''Store a file in line mode.''' self.voidcmd('TYPE A') conn = self.transfercmd(cmd) while 1: buf = fp.readline() if not buf: break if buf[-2:] != CRLF: if buf[-1] in CRLF: buf = buf[:-1] buf = buf + CRLF conn.sendall(buf) conn.close() return self.voidresp() def acct(self, password): '''Send new account name.''' cmd = 'ACCT ' + password return self.voidcmd(cmd) def nlst(self, *args): '''Return a list of files in a given directory (default the current).''' cmd = 'NLST' for arg in args: cmd = cmd + (' ' + arg) files = [] self.retrlines(cmd, files.append) return files def dir(self, *args): '''List a directory in long form. By default list current directory to stdout. Optional last argument is callback function; all non-empty arguments before it are concatenated to the LIST command. (This *should* only be used for a pathname.)''' cmd = 'LIST' func = None if args[-1:] and type(args[-1]) != type(''): args, func = args[:-1], args[-1] for arg in args: if arg: cmd = cmd + (' ' + arg) self.retrlines(cmd, func) def rename(self, fromname, toname): '''Rename a file.''' resp = self.sendcmd('RNFR ' + fromname) if resp[0] != '3': raise error_reply, resp return self.voidcmd('RNTO ' + toname) def delete(self, filename): '''Delete a file.''' resp = self.sendcmd('DELE ' + filename) if resp[:3] in ('250', '200'): return resp elif resp[:1] == '5': raise error_perm, resp else: raise error_reply, resp def cwd(self, dirname): '''Change to a directory.''' if dirname == '..': try: return self.voidcmd('CDUP') except error_perm, msg: if msg.args[0][:3] != '500': raise elif dirname == '': dirname = '.' # does nothing, but could return error cmd = 'CWD ' + dirname return self.voidcmd(cmd) def size(self, filename): '''Retrieve the size of a file.''' # Note that the RFC doesn't say anything about 'SIZE' resp = self.sendcmd('SIZE ' + filename) if resp[:3] == '213': s = resp[3:].strip() try: return int(s) except (OverflowError, ValueError): return long(s) def mkd(self, dirname): '''Make a directory, return its full pathname.''' resp = self.sendcmd('MKD ' + dirname) return parse257(resp) def rmd(self, dirname): '''Remove a directory.''' return self.voidcmd('RMD ' + dirname) def pwd(self): '''Return current working directory.''' resp = self.sendcmd('PWD') return parse257(resp) def quit(self): '''Quit, and close the connection.''' resp = self.voidcmd('QUIT') self.close() return resp def close(self): '''Close the connection without assuming anything about it.''' if self.file: self.file.close() self.sock.close() self.file = self.sock = None _150_re = None def parse150(resp): '''Parse the '150' response for a RETR request. Returns the expected transfer size or None; size is not guaranteed to be present in the 150 message. ''' if resp[:3] != '150': raise error_reply, resp global _150_re if _150_re is None: import re _150_re = re.compile("150 .* \((\d+) bytes\)", re.IGNORECASE) m = _150_re.match(resp) if not m: return None s = m.group(1) try: return int(s) except (OverflowError, ValueError): return long(s) _227_re = None def parse227(resp): '''Parse the '227' response for a PASV request. Raises error_proto if it does not contain '(h1,h2,h3,h4,p1,p2)' Return ('host.addr.as.numbers', port#) tuple.''' if resp[:3] != '227': raise error_reply, resp global _227_re if _227_re is None: import re _227_re = re.compile(r'(\d+),(\d+),(\d+),(\d+),(\d+),(\d+)') m = _227_re.search(resp) if not m: raise error_proto, resp numbers = m.groups() host = '.'.join(numbers[:4]) port = (int(numbers[4]) << 8) + int(numbers[5]) return host, port def parse229(resp, peer): '''Parse the '229' response for a EPSV request. Raises error_proto if it does not contain '(|||port|)' Return ('host.addr.as.numbers', port#) tuple.''' if resp[:3] <> '229': raise error_reply, resp left = string.find(resp, '(') if left < 0: raise error_proto, resp right = string.find(resp, ')', left + 1) if right < 0: raise error_proto, resp # should contain '(|||port|)' if resp[left + 1] <> resp[right - 1]: raise error_proto, resp parts = string.split(resp[left + 1:right], resp[left+1]) if len(parts) <> 5: raise error_proto, resp host = peer[0] port = string.atoi(parts[3]) return host, port def parse257(resp): '''Parse the '257' response for a MKD or PWD request. This is a response to a MKD or PWD request: a directory name. Returns the directoryname in the 257 reply.''' if resp[:3] != '257': raise error_reply, resp if resp[3:5] != ' "': return '' # Not compliant to RFC 959, but UNIX ftpd does this dirname = '' i = 5 n = len(resp) while i < n: c = resp[i] i = i+1 if c == '"': if i >= n or resp[i] != '"': break i = i+1 dirname = dirname + c return dirname def print_line(line): '''Default retrlines callback to print a line.''' print line def ftpcp(source, sourcename, target, targetname = '', type = 'I'): '''Copy file from one FTP-instance to another.''' if not targetname: targetname = sourcename type = 'TYPE ' + type source.voidcmd(type) target.voidcmd(type) sourcehost, sourceport = parse227(source.sendcmd('PASV')) target.sendport(sourcehost, sourceport) # RFC 959: the user must "listen" [...] BEFORE sending the # transfer request. # So: STOR before RETR, because here the target is a "user". treply = target.sendcmd('STOR ' + targetname) if treply[:3] not in ('125', '150'): raise error_proto # RFC 959 sreply = source.sendcmd('RETR ' + sourcename) if sreply[:3] not in ('125', '150'): raise error_proto # RFC 959 source.voidresp() target.voidresp() class Netrc: """Class to parse & provide access to 'netrc' format files. See the netrc(4) man page for information on the file format. WARNING: This class is obsolete -- use module netrc instead. """ __defuser = None __defpasswd = None __defacct = None def __init__(self, filename=None): if not filename: if os.environ.has_key("HOME"): filename = os.path.join(os.environ["HOME"], ".netrc") else: raise IOError, \ "specify file to load or set $HOME" self.__hosts = {} self.__macros = {} fp = open(filename, "r") in_macro = 0 while 1: line = fp.readline() if not line: break if in_macro and line.strip(): macro_lines.append(line) continue elif in_macro: self.__macros[macro_name] = tuple(macro_lines) in_macro = 0 words = line.split() host = user = passwd = acct = None default = 0 i = 0 while i < len(words): w1 = words[i] if i+1 < len(words): w2 = words[i + 1] else: w2 = None if w1 == 'default': default = 1 elif w1 == 'machine' and w2: host = w2.lower() i = i + 1 elif w1 == 'login' and w2: user = w2 i = i + 1 elif w1 == 'password' and w2: passwd = w2 i = i + 1 elif w1 == 'account' and w2: acct = w2 i = i + 1 elif w1 == 'macdef' and w2: macro_name = w2 macro_lines = [] in_macro = 1 break i = i + 1 if default: self.__defuser = user or self.__defuser self.__defpasswd = passwd or self.__defpasswd self.__defacct = acct or self.__defacct if host: if self.__hosts.has_key(host): ouser, opasswd, oacct = \ self.__hosts[host] user = user or ouser passwd = passwd or opasswd acct = acct or oacct self.__hosts[host] = user, passwd, acct fp.close() def get_hosts(self): """Return a list of hosts mentioned in the .netrc file.""" return self.__hosts.keys() def get_account(self, host): """Returns login information for the named host. The return value is a triple containing userid, password, and the accounting field. """ host = host.lower() user = passwd = acct = None if self.__hosts.has_key(host): user, passwd, acct = self.__hosts[host] user = user or self.__defuser passwd = passwd or self.__defpasswd acct = acct or self.__defacct return user, passwd, acct def get_macros(self): """Return a list of all defined macro names.""" return self.__macros.keys() def get_macro(self, macro): """Return a sequence of lines which define a named macro.""" return self.__macros[macro] def test(): '''Test program. Usage: ftp [-d] [-r[file]] host [-l[dir]] [-d[dir]] [-p] [file] ...''' debugging = 0 rcfile = None while sys.argv[1] == '-d': debugging = debugging+1 del sys.argv[1] if sys.argv[1][:2] == '-r': # get name of alternate ~/.netrc file: rcfile = sys.argv[1][2:] del sys.argv[1] host = sys.argv[1] ftp = FTP(host) ftp.set_debuglevel(debugging) userid = passwd = acct = '' try: netrc = Netrc(rcfile) except IOError: if rcfile is not None: sys.stderr.write("Could not open account file" " -- using anonymous login.") else: try: userid, passwd, acct = netrc.get_account(host) except KeyError: # no account for host sys.stderr.write( "No account -- using anonymous login.") ftp.login(userid, passwd, acct) for file in sys.argv[2:]: if file[:2] == '-l': ftp.dir(file[2:]) elif file[:2] == '-d': cmd = 'CWD' if file[2:]: cmd = cmd + ' ' + file[2:] resp = ftp.sendcmd(cmd) elif file == '-p': ftp.set_pasv(not ftp.passiveserver) else: ftp.retrbinary('RETR ' + file, \ sys.stdout.write, 1024) ftp.quit() if __name__ == '__main__': test()
[]
[]
[ "HOME" ]
[]
["HOME"]
python
1
0
providers/nextcloud/nextcloud_test.go
package nextcloud_test import ( "os" "testing" "github.com/floatingghost/goth" "github.com/floatingghost/goth/providers/nextcloud" "github.com/stretchr/testify/assert" ) func Test_New(t *testing.T) { t.Parallel() a := assert.New(t) p := provider() a.Equal(p.ClientKey, os.Getenv("NEXTCLOUD_KEY")) a.Equal(p.Secret, os.Getenv("NEXTCLOUD_SECRET")) a.Equal(p.CallbackURL, "/foo") } func Test_NewCustomisedURL(t *testing.T) { t.Parallel() a := assert.New(t) p := urlCustomisedURLProvider() session, err := p.BeginAuth("test_state") s := session.(*nextcloud.Session) a.NoError(err) a.Contains(s.AuthURL, "http://authURL") } func Test_Implements_Provider(t *testing.T) { t.Parallel() a := assert.New(t) a.Implements((*goth.Provider)(nil), provider()) } func Test_BeginAuth(t *testing.T) { t.Parallel() a := assert.New(t) p := provider() session, err := p.BeginAuth("test_state") s := session.(*nextcloud.Session) a.NoError(err) a.Contains(s.AuthURL, "/apps/oauth2/authorize?client_id=") } func Test_SessionFromJSON(t *testing.T) { t.Parallel() a := assert.New(t) p := provider() session, err := p.UnmarshalSession(`{"AuthURL":"https://nextcloud.com/oauth/authorize","AccessToken":"1234567890"}`) a.NoError(err) s := session.(*nextcloud.Session) a.Equal(s.AuthURL, "https://nextcloud.com/oauth/authorize") a.Equal(s.AccessToken, "1234567890") } func provider() *nextcloud.Provider { return nextcloud.NewCustomisedDNS( os.Getenv("NEXTCLOUD_KEY"), os.Getenv("NEXTCLOUD_SECRET"), "/foo", os.Getenv("NEXTCLOUD_DNS"), ) } func urlCustomisedURLProvider() *nextcloud.Provider { return nextcloud.NewCustomisedURL(os.Getenv("NEXTCLOUD_KEY"), os.Getenv("NEXTCLOUD_SECRET"), "/foo", "http://authURL", "http://tokenURL", "http://profileURL") }
[ "\"NEXTCLOUD_KEY\"", "\"NEXTCLOUD_SECRET\"", "\"NEXTCLOUD_KEY\"", "\"NEXTCLOUD_SECRET\"", "\"NEXTCLOUD_DNS\"", "\"NEXTCLOUD_KEY\"", "\"NEXTCLOUD_SECRET\"" ]
[]
[ "NEXTCLOUD_SECRET", "NEXTCLOUD_DNS", "NEXTCLOUD_KEY" ]
[]
["NEXTCLOUD_SECRET", "NEXTCLOUD_DNS", "NEXTCLOUD_KEY"]
go
3
0
magefile.go
// Licensed to Elasticsearch B.V. under one or more contributor // license agreements. See the NOTICE file distributed with // this work for additional information regarding copyright // ownership. Elasticsearch B.V. licenses this file to you under // the Apache License, Version 2.0 (the "License"); you may // not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. //go:build mage // +build mage package main import ( "context" "fmt" "log" "os" "path/filepath" "strings" "time" "github.com/magefile/mage/mg" "github.com/elastic/beats/v7/dev-tools/mage" devtools "github.com/elastic/beats/v7/dev-tools/mage" cloudbeat "github.com/elastic/cloudbeat/scripts/mage" // mage:import _ "github.com/elastic/beats/v7/dev-tools/mage/target/pkg" // mage:import _ "github.com/elastic/beats/v7/dev-tools/mage/target/unittest" // mage:import _ "github.com/elastic/beats/v7/dev-tools/mage/target/integtest/notests" // mage:import _ "github.com/elastic/beats/v7/dev-tools/mage/target/test" "github.com/elastic/beats/v7/dev-tools/mage/gotool" ) func init() { repo, err := devtools.GetProjectRepoInfo() if err != nil { panic(err) } devtools.BeatDescription = "Cloudbeat collects cloud compliance data and sends findings to ElasticSearch" devtools.BeatLicense = "Elastic License" devtools.SetBuildVariableSources(&devtools.BuildVariableSources{ BeatVersion: filepath.Join(repo.RootDir, "cmd/version.go"), GoVersion: filepath.Join(repo.RootDir, ".go-version"), DocBranch: filepath.Join(repo.RootDir, "docs/version.asciidoc"), }) } // Check formats code, updates generated content, check for common errors, and // checks for any modified files. func Check() error { return devtools.Check() } // Build builds the Beat binary. func Build() error { return devtools.Build(devtools.DefaultBuildArgs()) } // Clean cleans all generated files and build artifacts. func Clean() error { return devtools.Clean() } // Update updates the generated files (aka make update). // GolangCrossBuild build the Beat binary inside of the golang-builder. // Do not use directly, use crossBuild instead. func GolangCrossBuild() error { return devtools.GolangCrossBuild(devtools.DefaultGolangCrossBuildArgs()) } // BuildGoDaemon builds the go-daemon binary (use crossBuildGoDaemon). func BuildGoDaemon() error { return devtools.BuildGoDaemon() } // CrossBuild cross-builds the beat for all target platforms. func CrossBuild() error { return devtools.CrossBuild() } // CrossBuildGoDaemon cross-builds the go-daemon binary using Docker. func CrossBuildGoDaemon() error { return devtools.CrossBuildGoDaemon() } // Run UnitTests func GoTestUnit(ctx context.Context) error { return devtools.GoTest(ctx, devtools.DefaultGoTestUnitArgs()) } // Package packages the Beat for distribution. // Use SNAPSHOT=true to build snapshots. // Use PLATFORMS to control the target platforms. // Use VERSION_QUALIFIER to control the version qualifier. func Package() { start := time.Now() defer func() { fmt.Println("package ran for", time.Since(start)) }() devtools.UseElasticBeatXPackPackaging() cloudbeat.CustomizePackaging() if packageTypes := os.Getenv("TYPES"); packageTypes != "" { filterPackages(packageTypes) } mg.Deps(Update) mg.Deps(CrossBuild, CrossBuildGoDaemon) mg.SerialDeps(devtools.Package) } func keepPackages(types []string) map[devtools.PackageType]struct{} { keep := make(map[devtools.PackageType]struct{}) for _, t := range types { var pt devtools.PackageType if err := pt.UnmarshalText([]byte(t)); err != nil { log.Printf("skipped filtering package type %s", t) continue } keep[pt] = struct{}{} } return keep } func filterPackages(types string) { var packages []devtools.OSPackageArgs keep := keepPackages(strings.Split(types, " ")) for _, p := range devtools.Packages { for _, t := range p.Types { if _, ok := keep[t]; !ok { continue } packages = append(packages, p) break } } devtools.Packages = packages } // TestPackages tests the generated packages (i.e. file modes, owners, groups). func TestPackages() error { return devtools.TestPackages() } // Fmt formats code and adds license headers. func Fmt() { mg.Deps(devtools.GoImports, devtools.PythonAutopep8) mg.Deps(AddLicenseHeaders) } // AddLicenseHeaders adds ASL2 headers to .go files outside of x-pack and // add Elastic headers to .go files in x-pack. func AddLicenseHeaders() error { fmt.Println(">> fmt - go-licenser: Adding missing headers") mg.Deps(devtools.InstallGoLicenser) licenser := gotool.Licenser return licenser( licenser.License("ASL2"), licenser.Exclude("x-pack"), ) } // CheckLicenseHeaders checks ASL2 headers in .go files outside of x-pack and // checks Elastic headers in .go files in x-pack. func CheckLicenseHeaders() error { fmt.Println(">> fmt - go-licenser: Checking for missing headers") mg.Deps(devtools.InstallGoLicenser) licenser := gotool.Licenser return licenser( licenser.Check(), licenser.License("ASL2"), ) } func Update() { mg.Deps(cloudbeat.Update.All) } // Fields generates a fields.yml for the Beat. func Fields() { mg.Deps(cloudbeat.Update.Fields) } // Config generates both the short/reference/docker configs. func Config() { mg.Deps(cloudbeat.Update.Config) } // PythonEnv ensures the Python venv is up-to-date with the beats requrements.txt. func PythonEnv() error { _, err := mage.PythonVirtualenv() return err }
[ "\"TYPES\"" ]
[]
[ "TYPES" ]
[]
["TYPES"]
go
1
0
main/main.go
package main import ( fmt "fmt" log "log" os "os" models "wave-messaging-management-service/models" router "wave-messaging-management-service/router" ) var ( // TODO: Change these to be fetched automatically with Kubernetes Secrets // MongoDBHost : MongoDB Host MongoDBHost = "localhost" // MongoDBPort : MongoDB Port MongoDBPort = 27017 // MongoDBUsername : MongoDB Username MongoDBUsername = "wave-user" // MongoDBPassword : MongoDB Password MongoDBPassword = "example" // MongoDBName : MongoDB Database Name MongoDBName = "waveDB" // MongoDBURL : MongoDB Connection URL MongoDBURL = fmt.Sprintf("mongodb://%s:%s@%s:%d/%s", MongoDBUsername, MongoDBPassword, MongoDBHost, MongoDBPort, MongoDBName) // RedisHost : Redis Port RedisHost = "localhost" // RedisPort : Redis Port RedisPort = 6379 // RedisPassword : Redis Password RedisPassword = "example" // RedisURL : Redis Connection URL RedisURL = fmt.Sprintf("redis://%s:%d", RedisHost, RedisPort) ) func main() { if os.Getenv("WAVE_CONFIG_FILE_PATH") == "" { log.Fatalf("WAVE_CONFIG_FILE_PATH Environment variable must be set !") } // Get MongoDB communication interface // If an error occurs, program is set to panic mongoDB := models.NewMongoDB(MongoDBURL) // Get Redis communication interface // If an error occurs, program is set to panic redis := models.NewRedis(RedisURL, RedisPassword) // Add interfaces & blank config to the environment env := &models.Env{ MongoDB: mongoDB, Redis: redis, Config: models.Config{}, } // Dynamically load config err := env.RefreshConfig() if err != nil { log.Fatalf(err.Error()) } router.Listen(env) defer func() { env.Redis.CloseConnection() }() }
[ "\"WAVE_CONFIG_FILE_PATH\"" ]
[]
[ "WAVE_CONFIG_FILE_PATH" ]
[]
["WAVE_CONFIG_FILE_PATH"]
go
1
0
cmd/wrgl/utils/pager.go
// SPDX-License-Identifier: Apache-2.0 // Copyright © 2022 Wrangle Ltd package utils import ( "io" "os" "os/exec" "github.com/spf13/cobra" ) func getPager(cmd *cobra.Command) (*exec.Cmd, io.WriteCloser, error) { pager := os.Getenv("PAGER") if pager == "" { pager = "less" } p := exec.Command(pager) out, err := p.StdinPipe() if err != nil { return nil, nil, err } p.Stdout = cmd.OutOrStdout() p.Stderr = cmd.ErrOrStderr() if err := p.Start(); err != nil { return nil, nil, err } return p, out, nil } func PagerOrOut(cmd *cobra.Command) (io.Writer, func(), error) { noPager, err := cmd.Flags().GetBool("no-pager") if err != nil { return nil, nil, err } if noPager { return cmd.OutOrStdout(), func() {}, nil } pager, writer, err := getPager(cmd) if err != nil { return nil, nil, err } return writer, func() { writer.Close() pager.Wait() }, nil }
[ "\"PAGER\"" ]
[]
[ "PAGER" ]
[]
["PAGER"]
go
1
0
xsrc/nqbp/other/genfsm_for_c/src/genfsm.py
#!/usr/bin/python """Invokes NQBP's genfsm_base.py script. To run 'GENFSM' copy this file to your source directory. Then edit the local script to generate one or more Finite State Machines (FSMs) """ from __future__ import absolute_import import os import sys # Make sure the environment is properly set #NQBP_BIN = os.environ.get('NQBP_BIN') #if ( NQBP_BIN == None ): # sys.exit( "ERROR: The environment variable NQBP_BIN is not set!" ) #sys.path.append( NQBP_BIN ) sys.path.append( "C:/bin/testfsm" ) # Find the Package & Workspace root import genfsm_base ############################################################### # BEGIN EDITS HERE ############################################################### # Generate FSM#1, where argv:= Diagram name, sys.argv.append('bar') genfsm_base.run( sys.argv ) # Generate FSM#1, where argv:= Diagram name, sys.argv.append('example') #genfsm_base.run( sys.argv )
[]
[]
[ "NQBP_BIN" ]
[]
["NQBP_BIN"]
python
1
0
cmd/fetcher/main.go
/* Copyright 2020 The Knative Authors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Package main is the main package for the fetcher. The fetcher knows how to // collect a directory tree of release artifacts given a configuration file // indicating the desired top-level packages. package main import ( "context" "log" "net/http" "os" "path/filepath" ghclient "github.com/google/go-github/v33/github" "golang.org/x/oauth2" "knative.dev/operator/pkg/github" "knative.dev/operator/pkg/packages" ) func main() { cfg, err := packages.ReadConfig("cmd/fetcher/kodata/config.yaml") if err != nil { log.Print("Unable to read config: ", err) os.Exit(2) } ctx := context.Background() client := getClient(ctx) if client == nil { log.Print("GITHUB_TOKEN not set, skipping release fetch from GitHub") os.Exit(0) } ghClient := ghclient.NewClient(client) repos := make(map[string][]packages.Release, len(cfg)) for _, v := range cfg { if err := ensureRepo(ctx, repos, ghClient, v.Primary); err != nil { log.Printf("Unable to fetch %s: %v", v.Primary, err) os.Exit(2) } for _, s := range v.Additional { if err := ensureRepo(ctx, repos, ghClient, s); err != nil { log.Printf("Unable to fetch %s: %v", s, err) os.Exit(2) } } base := filepath.Join("cmd", "operator", "kodata", v.Name) if err := os.RemoveAll(base); err != nil && !os.IsNotExist(err) { log.Printf("Unable to remove directory %s: %v", base, err) os.Exit(3) } for _, release := range packages.LastN(4, repos[v.Primary.String()]) { if err := packages.HandleRelease(ctx, http.DefaultClient, *v, release, repos); err != nil { log.Printf("Unable to fetch %s: %v", release, err) } log.Printf("Wrote %s ==> %s", v.String(), release.String()) } } } func getClient(ctx context.Context) *http.Client { if os.Getenv("GITHUB_TOKEN") == "" { return nil } staticToken := oauth2.StaticTokenSource(&oauth2.Token{AccessToken: os.Getenv("GITHUB_TOKEN")}) return oauth2.NewClient(ctx, staticToken) } func ensureRepo(ctx context.Context, known map[string][]packages.Release, client *ghclient.Client, src packages.Source) error { if known[src.GitHub.Repo] != nil { return nil } owner, repo := src.OrgRepo() releases, err := github.GetReleases(ctx, client, owner, repo) if err != nil { return err } known[src.GitHub.Repo] = releases return nil }
[ "\"GITHUB_TOKEN\"", "\"GITHUB_TOKEN\"" ]
[]
[ "GITHUB_TOKEN" ]
[]
["GITHUB_TOKEN"]
go
1
0
certbot/cli.py
"""Certbot command line argument & config processing.""" # pylint: disable=too-many-lines from __future__ import print_function import argparse import copy import glob import logging import logging.handlers import os import sys import configargparse import six import zope.component import zope.interface from zope.interface import interfaces as zope_interfaces from acme import challenges # pylint: disable=unused-import, no-name-in-module from acme.magic_typing import Any, Dict, Optional # pylint: enable=unused-import, no-name-in-module import certbot from certbot import constants from certbot import crypto_util from certbot import errors from certbot import hooks from certbot import interfaces from certbot import util from certbot.display import util as display_util from certbot.plugins import disco as plugins_disco import certbot.plugins.enhancements as enhancements import certbot.plugins.selection as plugin_selection logger = logging.getLogger(__name__) # Global, to save us from a lot of argument passing within the scope of this module helpful_parser = None # type: Optional[HelpfulArgumentParser] # For help strings, figure out how the user ran us. # When invoked from letsencrypt-auto, sys.argv[0] is something like: # "/home/user/.local/share/certbot/bin/certbot" # Note that this won't work if the user set VENV_PATH or XDG_DATA_HOME before # running letsencrypt-auto (and sudo stops us from seeing if they did), so it # should only be used for purposes where inability to detect letsencrypt-auto # fails safely LEAUTO = "letsencrypt-auto" if "CERTBOT_AUTO" in os.environ: # if we're here, this is probably going to be certbot-auto, unless the # user saved the script under a different name LEAUTO = os.path.basename(os.environ["CERTBOT_AUTO"]) old_path_fragment = os.path.join(".local", "share", "letsencrypt") new_path_prefix = os.path.abspath(os.path.join(os.sep, "opt", "eff.org", "certbot", "venv")) if old_path_fragment in sys.argv[0] or sys.argv[0].startswith(new_path_prefix): cli_command = LEAUTO else: cli_command = "certbot" # Argparse's help formatting has a lot of unhelpful peculiarities, so we want # to replace as much of it as we can... # This is the stub to include in help generated by argparse SHORT_USAGE = """ {0} [SUBCOMMAND] [options] [-d DOMAIN] [-d DOMAIN] ... Certbot can obtain and install HTTPS/TLS/SSL certificates. By default, it will attempt to use a webserver both for obtaining and installing the certificate. """.format(cli_command) # This section is used for --help and --help all ; it needs information # about installed plugins to be fully formatted COMMAND_OVERVIEW = """The most common SUBCOMMANDS and flags are: obtain, install, and renew certificates: (default) run Obtain & install a certificate in your current webserver certonly Obtain or renew a certificate, but do not install it renew Renew all previously obtained certificates that are near expiry enhance Add security enhancements to your existing configuration -d DOMAINS Comma-separated list of domains to obtain a certificate for %s --standalone Run a standalone webserver for authentication %s --webroot Place files in a server's webroot folder for authentication --manual Obtain certificates interactively, or using shell script hooks -n Run non-interactively --test-cert Obtain a test certificate from a staging server --dry-run Test "renew" or "certonly" without saving any certificates to disk manage certificates: certificates Display information about certificates you have from Certbot revoke Revoke a certificate (supply --cert-path or --cert-name) delete Delete a certificate manage your account with Let's Encrypt: register Create a Let's Encrypt ACME account update_account Update a Let's Encrypt ACME account --agree-tos Agree to the ACME server's Subscriber Agreement -m EMAIL Email address for important account notifications """ # This is the short help for certbot --help, where we disable argparse # altogether HELP_USAGE = """ More detailed help: -h, --help [TOPIC] print this message, or detailed help on a topic; the available TOPICS are: all, automation, commands, paths, security, testing, or any of the subcommands or plugins (certonly, renew, install, register, nginx, apache, standalone, webroot, etc.) """ # These argparse parameters should be removed when detecting defaults. ARGPARSE_PARAMS_TO_REMOVE = ("const", "nargs", "type",) # These sets are used when to help detect options set by the user. EXIT_ACTIONS = set(("help", "version",)) ZERO_ARG_ACTIONS = set(("store_const", "store_true", "store_false", "append_const", "count",)) # Maps a config option to a set of config options that may have modified it. # This dictionary is used recursively, so if A modifies B and B modifies C, # it is determined that C was modified by the user if A was modified. VAR_MODIFIERS = {"account": set(("server",)), "renew_hook": set(("deploy_hook",)), "server": set(("dry_run", "staging",)), "webroot_map": set(("webroot_path",))} def report_config_interaction(modified, modifiers): """Registers config option interaction to be checked by set_by_cli. This function can be called by during the __init__ or add_parser_arguments methods of plugins to register interactions between config options. :param modified: config options that can be modified by modifiers :type modified: iterable or str (string_types) :param modifiers: config options that modify modified :type modifiers: iterable or str (string_types) """ if isinstance(modified, six.string_types): modified = (modified,) if isinstance(modifiers, six.string_types): modifiers = (modifiers,) for var in modified: VAR_MODIFIERS.setdefault(var, set()).update(modifiers) def possible_deprecation_warning(config): "A deprecation warning for users with the old, not-self-upgrading letsencrypt-auto." if cli_command != LEAUTO: return if config.no_self_upgrade: # users setting --no-self-upgrade might be hanging on a client version like 0.3.0 # or 0.5.0 which is the new script, but doesn't set CERTBOT_AUTO; they don't # need warnings return if "CERTBOT_AUTO" not in os.environ: logger.warning("You are running with an old copy of letsencrypt-auto" " that does not receive updates, and is less reliable than more" " recent versions. The letsencrypt client has also been renamed" " to Certbot. We recommend upgrading to the latest certbot-auto" " script, or using native OS packages.") logger.debug("Deprecation warning circumstances: %s / %s", sys.argv[0], os.environ) class _Default(object): """A class to use as a default to detect if a value is set by a user""" def __bool__(self): return False def __eq__(self, other): return isinstance(other, _Default) def __hash__(self): return id(_Default) def __nonzero__(self): return self.__bool__() def set_by_cli(var): """ Return True if a particular config variable has been set by the user (CLI or config file) including if the user explicitly set it to the default. Returns False if the variable was assigned a default value. """ detector = set_by_cli.detector # type: ignore if detector is None and helpful_parser is not None: # Setup on first run: `detector` is a weird version of config in which # the default value of every attribute is wrangled to be boolean-false plugins = plugins_disco.PluginsRegistry.find_all() # reconstructed_args == sys.argv[1:], or whatever was passed to main() reconstructed_args = helpful_parser.args + [helpful_parser.verb] detector = set_by_cli.detector = prepare_and_parse_args( # type: ignore plugins, reconstructed_args, detect_defaults=True) # propagate plugin requests: eg --standalone modifies config.authenticator detector.authenticator, detector.installer = ( # type: ignore plugin_selection.cli_plugin_requests(detector)) if not isinstance(getattr(detector, var), _Default): logger.debug("Var %s=%s (set by user).", var, getattr(detector, var)) return True for modifier in VAR_MODIFIERS.get(var, []): if set_by_cli(modifier): logger.debug("Var %s=%s (set by user).", var, VAR_MODIFIERS.get(var, [])) return True return False # static housekeeping var # functions attributed are not supported by mypy # https://github.com/python/mypy/issues/2087 set_by_cli.detector = None # type: ignore def has_default_value(option, value): """Does option have the default value? If the default value of option is not known, False is returned. :param str option: configuration variable being considered :param value: value of the configuration variable named option :returns: True if option has the default value, otherwise, False :rtype: bool """ if helpful_parser is not None: return (option in helpful_parser.defaults and helpful_parser.defaults[option] == value) return False def option_was_set(option, value): """Was option set by the user or does it differ from the default? :param str option: configuration variable being considered :param value: value of the configuration variable named option :returns: True if the option was set, otherwise, False :rtype: bool """ return set_by_cli(option) or not has_default_value(option, value) def argparse_type(variable): """Return our argparse type function for a config variable (default: str)""" # pylint: disable=protected-access if helpful_parser is not None: for action in helpful_parser.parser._actions: if action.type is not None and action.dest == variable: return action.type return str def read_file(filename, mode="rb"): """Returns the given file's contents. :param str filename: path to file :param str mode: open mode (see `open`) :returns: absolute path of filename and its contents :rtype: tuple :raises argparse.ArgumentTypeError: File does not exist or is not readable. """ try: filename = os.path.abspath(filename) with open(filename, mode) as the_file: contents = the_file.read() return filename, contents except IOError as exc: raise argparse.ArgumentTypeError(exc.strerror) def flag_default(name): """Default value for CLI flag.""" # XXX: this is an internal housekeeping notion of defaults before # argparse has been set up; it is not accurate for all flags. Call it # with caution. Plugin defaults are missing, and some things are using # defaults defined in this file, not in constants.py :( return copy.deepcopy(constants.CLI_DEFAULTS[name]) def config_help(name, hidden=False): """Extract the help message for an `.IConfig` attribute.""" # pylint: disable=no-member if hidden: return argparse.SUPPRESS else: field = interfaces.IConfig.__getitem__(name) # type: zope.interface.interface.Attribute return field.__doc__ class HelpfulArgumentGroup(object): """Emulates an argparse group for use with HelpfulArgumentParser. This class is used in the add_group method of HelpfulArgumentParser. Command line arguments can be added to the group, but help suppression and default detection is applied by HelpfulArgumentParser when necessary. """ def __init__(self, helpful_arg_parser, topic): self._parser = helpful_arg_parser self._topic = topic def add_argument(self, *args, **kwargs): """Add a new command line argument to the argument group.""" self._parser.add(self._topic, *args, **kwargs) class CustomHelpFormatter(argparse.HelpFormatter): """This is a clone of ArgumentDefaultsHelpFormatter, with bugfixes. In particular we fix https://bugs.python.org/issue28742 """ def _get_help_string(self, action): helpstr = action.help if '%(default)' not in action.help and '(default:' not in action.help: if action.default != argparse.SUPPRESS: defaulting_nargs = [argparse.OPTIONAL, argparse.ZERO_OR_MORE] if action.option_strings or action.nargs in defaulting_nargs: helpstr += ' (default: %(default)s)' return helpstr # The attributes here are: # short: a string that will be displayed by "certbot -h commands" # opts: a string that heads the section of flags with which this command is documented, # both for "certbot -h SUBCOMMAND" and "certbot -h all" # usage: an optional string that overrides the header of "certbot -h SUBCOMMAND" VERB_HELP = [ ("run (default)", { "short": "Obtain/renew a certificate, and install it", "opts": "Options for obtaining & installing certificates", "usage": SHORT_USAGE.replace("[SUBCOMMAND]", ""), "realname": "run" }), ("certonly", { "short": "Obtain or renew a certificate, but do not install it", "opts": "Options for modifying how a certificate is obtained", "usage": ("\n\n certbot certonly [options] [-d DOMAIN] [-d DOMAIN] ...\n\n" "This command obtains a TLS/SSL certificate without installing it anywhere.") }), ("renew", { "short": "Renew all certificates (or one specified with --cert-name)", "opts": ("The 'renew' subcommand will attempt to renew all" " certificates (or more precisely, certificate lineages) you have" " previously obtained if they are close to expiry, and print a" " summary of the results. By default, 'renew' will reuse the options" " used to create obtain or most recently successfully renew each" " certificate lineage. You can try it with `--dry-run` first. For" " more fine-grained control, you can renew individual lineages with" " the `certonly` subcommand. Hooks are available to run commands" " before and after renewal; see" " https://certbot.eff.org/docs/using.html#renewal for more" " information on these."), "usage": "\n\n certbot renew [--cert-name CERTNAME] [options]\n\n" }), ("certificates", { "short": "List certificates managed by Certbot", "opts": "List certificates managed by Certbot", "usage": ("\n\n certbot certificates [options] ...\n\n" "Print information about the status of certificates managed by Certbot.") }), ("delete", { "short": "Clean up all files related to a certificate", "opts": "Options for deleting a certificate", "usage": "\n\n certbot delete --cert-name CERTNAME\n\n" }), ("revoke", { "short": "Revoke a certificate specified with --cert-path or --cert-name", "opts": "Options for revocation of certificates", "usage": "\n\n certbot revoke [--cert-path /path/to/fullchain.pem | " "--cert-name example.com] [options]\n\n" }), ("register", { "short": "Register for account with Let's Encrypt / other ACME server", "opts": "Options for account registration", "usage": "\n\n certbot register --email [email protected] [options]\n\n" }), ("update_account", { "short": "Update existing account with Let's Encrypt / other ACME server", "opts": "Options for account modification", "usage": "\n\n certbot update_account --email [email protected] [options]\n\n" }), ("unregister", { "short": "Irrevocably deactivate your account", "opts": "Options for account deactivation.", "usage": "\n\n certbot unregister [options]\n\n" }), ("install", { "short": "Install an arbitrary certificate in a server", "opts": "Options for modifying how a certificate is deployed", "usage": "\n\n certbot install --cert-path /path/to/fullchain.pem " " --key-path /path/to/private-key [options]\n\n" }), ("config_changes", { "short": "Show changes that Certbot has made to server configurations", "opts": "Options for controlling which changes are displayed", "usage": "\n\n certbot config_changes --num NUM [options]\n\n" }), ("rollback", { "short": "Roll back server conf changes made during certificate installation", "opts": "Options for rolling back server configuration changes", "usage": "\n\n certbot rollback --checkpoints 3 [options]\n\n" }), ("plugins", { "short": "List plugins that are installed and available on your system", "opts": 'Options for for the "plugins" subcommand', "usage": "\n\n certbot plugins [options]\n\n" }), ("update_symlinks", { "short": "Recreate symlinks in your /etc/letsencrypt/live/ directory", "opts": ("Recreates certificate and key symlinks in {0}, if you changed them by hand " "or edited a renewal configuration file".format( os.path.join(flag_default("config_dir"), "live"))), "usage": "\n\n certbot update_symlinks [options]\n\n" }), ("enhance", { "short": "Add security enhancements to your existing configuration", "opts": ("Helps to harden the TLS configuration by adding security enhancements " "to already existing configuration."), "usage": "\n\n certbot enhance [options]\n\n" }), ] # VERB_HELP is a list in order to preserve order, but a dict is sometimes useful VERB_HELP_MAP = dict(VERB_HELP) class HelpfulArgumentParser(object): """Argparse Wrapper. This class wraps argparse, adding the ability to make --help less verbose, and request help on specific subcategories at a time, eg 'certbot --help security' for security options. """ def __init__(self, args, plugins, detect_defaults=False): from certbot import main self.VERBS = { "auth": main.certonly, "certonly": main.certonly, "config_changes": main.config_changes, "run": main.run, "install": main.install, "plugins": main.plugins_cmd, "register": main.register, "update_account": main.update_account, "unregister": main.unregister, "renew": main.renew, "revoke": main.revoke, "rollback": main.rollback, "everything": main.run, "update_symlinks": main.update_symlinks, "certificates": main.certificates, "delete": main.delete, "enhance": main.enhance, } # Get notification function for printing try: self.notify = zope.component.getUtility( interfaces.IDisplay).notification except zope_interfaces.ComponentLookupError: self.notify = display_util.NoninteractiveDisplay( sys.stdout).notification # List of topics for which additional help can be provided HELP_TOPICS = ["all", "security", "paths", "automation", "testing"] HELP_TOPICS += list(self.VERBS) + self.COMMANDS_TOPICS + ["manage"] plugin_names = list(plugins) self.help_topics = HELP_TOPICS + plugin_names + [None] # type: ignore self.detect_defaults = detect_defaults self.args = args if self.args and self.args[0] == 'help': self.args[0] = '--help' self.determine_verb() help1 = self.prescan_for_flag("-h", self.help_topics) help2 = self.prescan_for_flag("--help", self.help_topics) if isinstance(help1, bool) and isinstance(help2, bool): self.help_arg = help1 or help2 else: self.help_arg = help1 if isinstance(help1, six.string_types) else help2 short_usage = self._usage_string(plugins, self.help_arg) self.visible_topics = self.determine_help_topics(self.help_arg) # elements are added by .add_group() self.groups = {} # type: Dict[str, argparse._ArgumentGroup] # elements are added by .parse_args() self.defaults = {} # type: Dict[str, Any] self.parser = configargparse.ArgParser( prog="certbot", usage=short_usage, formatter_class=CustomHelpFormatter, args_for_setting_config_path=["-c", "--config"], default_config_files=flag_default("config_files"), config_arg_help_message="path to config file (default: {0})".format( " and ".join(flag_default("config_files")))) # This is the only way to turn off overly verbose config flag documentation self.parser._add_config_file_help = False # pylint: disable=protected-access # Help that are synonyms for --help subcommands COMMANDS_TOPICS = ["command", "commands", "subcommand", "subcommands", "verbs"] def _list_subcommands(self): longest = max(len(v) for v in VERB_HELP_MAP.keys()) text = "The full list of available SUBCOMMANDS is:\n\n" for verb, props in sorted(VERB_HELP): doc = props.get("short", "") text += '{0:<{length}} {1}\n'.format(verb, doc, length=longest) text += "\nYou can get more help on a specific subcommand with --help SUBCOMMAND\n" return text def _usage_string(self, plugins, help_arg): """Make usage strings late so that plugins can be initialised late :param plugins: all discovered plugins :param help_arg: False for none; True for --help; "TOPIC" for --help TOPIC :rtype: str :returns: a short usage string for the top of --help TOPIC) """ if "nginx" in plugins: nginx_doc = "--nginx Use the Nginx plugin for authentication & installation" else: nginx_doc = "(the certbot nginx plugin is not installed)" if "apache" in plugins: apache_doc = "--apache Use the Apache plugin for authentication & installation" else: apache_doc = "(the certbot apache plugin is not installed)" usage = SHORT_USAGE if help_arg == True: self.notify(usage + COMMAND_OVERVIEW % (apache_doc, nginx_doc) + HELP_USAGE) sys.exit(0) elif help_arg in self.COMMANDS_TOPICS: self.notify(usage + self._list_subcommands()) sys.exit(0) elif help_arg == "all": # if we're doing --help all, the OVERVIEW is part of the SHORT_USAGE at # the top; if we're doing --help someothertopic, it's OT so it's not usage += COMMAND_OVERVIEW % (apache_doc, nginx_doc) else: custom = VERB_HELP_MAP.get(help_arg, {}).get("usage", None) usage = custom if custom else usage return usage def remove_config_file_domains_for_renewal(self, parsed_args): """Make "certbot renew" safe if domains are set in cli.ini.""" # Works around https://github.com/certbot/certbot/issues/4096 if self.verb == "renew": for source, flags in self.parser._source_to_settings.items(): # pylint: disable=protected-access if source.startswith("config_file") and "domains" in flags: parsed_args.domains = _Default() if self.detect_defaults else [] def parse_args(self): """Parses command line arguments and returns the result. :returns: parsed command line arguments :rtype: argparse.Namespace """ parsed_args = self.parser.parse_args(self.args) parsed_args.func = self.VERBS[self.verb] parsed_args.verb = self.verb self.remove_config_file_domains_for_renewal(parsed_args) if self.detect_defaults: return parsed_args self.defaults = dict((key, copy.deepcopy(self.parser.get_default(key))) for key in vars(parsed_args)) # Do any post-parsing homework here if self.verb == "renew": if parsed_args.force_interactive: raise errors.Error( "{0} cannot be used with renew".format( constants.FORCE_INTERACTIVE_FLAG)) parsed_args.noninteractive_mode = True if parsed_args.force_interactive and parsed_args.noninteractive_mode: raise errors.Error( "Flag for non-interactive mode and {0} conflict".format( constants.FORCE_INTERACTIVE_FLAG)) if parsed_args.staging or parsed_args.dry_run: self.set_test_server(parsed_args) if parsed_args.csr: self.handle_csr(parsed_args) if parsed_args.must_staple: parsed_args.staple = True if parsed_args.validate_hooks: hooks.validate_hooks(parsed_args) if parsed_args.allow_subset_of_names: if any(util.is_wildcard_domain(d) for d in parsed_args.domains): raise errors.Error("Using --allow-subset-of-names with a" " wildcard domain is not supported.") if parsed_args.hsts and parsed_args.auto_hsts: raise errors.Error( "Parameters --hsts and --auto-hsts cannot be used simultaneously.") possible_deprecation_warning(parsed_args) return parsed_args def set_test_server(self, parsed_args): """We have --staging/--dry-run; perform sanity check and set config.server""" if parsed_args.server not in (flag_default("server"), constants.STAGING_URI): conflicts = ["--staging"] if parsed_args.staging else [] conflicts += ["--dry-run"] if parsed_args.dry_run else [] raise errors.Error("--server value conflicts with {0}".format( " and ".join(conflicts))) parsed_args.server = constants.STAGING_URI if parsed_args.dry_run: if self.verb not in ["certonly", "renew"]: raise errors.Error("--dry-run currently only works with the " "'certonly' or 'renew' subcommands (%r)" % self.verb) parsed_args.break_my_certs = parsed_args.staging = True if glob.glob(os.path.join(parsed_args.config_dir, constants.ACCOUNTS_DIR, "*")): # The user has a prod account, but might not have a staging # one; we don't want to start trying to perform interactive registration parsed_args.tos = True parsed_args.register_unsafely_without_email = True def handle_csr(self, parsed_args): """Process a --csr flag.""" if parsed_args.verb != "certonly": raise errors.Error("Currently, a CSR file may only be specified " "when obtaining a new or replacement " "via the certonly command. Please try the " "certonly command instead.") if parsed_args.allow_subset_of_names: raise errors.Error("--allow-subset-of-names cannot be used with --csr") csrfile, contents = parsed_args.csr[0:2] typ, csr, domains = crypto_util.import_csr_file(csrfile, contents) # This is not necessary for webroot to work, however, # obtain_certificate_from_csr requires parsed_args.domains to be set for domain in domains: add_domains(parsed_args, domain) if not domains: # TODO: add CN to domains instead: raise errors.Error( "Unfortunately, your CSR %s needs to have a SubjectAltName for every domain" % parsed_args.csr[0]) parsed_args.actual_csr = (csr, typ) csr_domains = set([d.lower() for d in domains]) config_domains = set(parsed_args.domains) if csr_domains != config_domains: raise errors.ConfigurationError( "Inconsistent domain requests:\nFrom the CSR: {0}\nFrom command line/config: {1}" .format(", ".join(csr_domains), ", ".join(config_domains))) def determine_verb(self): """Determines the verb/subcommand provided by the user. This function works around some of the limitations of argparse. """ if "-h" in self.args or "--help" in self.args: # all verbs double as help arguments; don't get them confused self.verb = "help" return for i, token in enumerate(self.args): if token in self.VERBS: verb = token if verb == "auth": verb = "certonly" if verb == "everything": verb = "run" self.verb = verb self.args.pop(i) return self.verb = "run" def prescan_for_flag(self, flag, possible_arguments): """Checks cli input for flags. Check for a flag, which accepts a fixed set of possible arguments, in the command line; we will use this information to configure argparse's help correctly. Return the flag's argument, if it has one that matches the sequence @possible_arguments; otherwise return whether the flag is present. """ if flag not in self.args: return False pos = self.args.index(flag) try: nxt = self.args[pos + 1] if nxt in possible_arguments: return nxt except IndexError: pass return True def add(self, topics, *args, **kwargs): """Add a new command line argument. :param topics: str or [str] help topic(s) this should be listed under, or None for "always documented". The first entry determines where the flag lives in the "--help all" output (None -> "optional arguments"). :param list *args: the names of this argument flag :param dict **kwargs: various argparse settings for this argument """ if isinstance(topics, list): # if this flag can be listed in multiple sections, try to pick the one # that the user has asked for help about topic = self.help_arg if self.help_arg in topics else topics[0] else: topic = topics # there's only one if self.detect_defaults: kwargs = self.modify_kwargs_for_default_detection(**kwargs) if self.visible_topics[topic]: if topic in self.groups: group = self.groups[topic] group.add_argument(*args, **kwargs) else: self.parser.add_argument(*args, **kwargs) else: kwargs["help"] = argparse.SUPPRESS self.parser.add_argument(*args, **kwargs) def modify_kwargs_for_default_detection(self, **kwargs): """Modify an arg so we can check if it was set by the user. Changes the parameters given to argparse when adding an argument so we can properly detect if the value was set by the user. :param dict kwargs: various argparse settings for this argument :returns: a modified versions of kwargs :rtype: dict """ action = kwargs.get("action", None) if action not in EXIT_ACTIONS: kwargs["action"] = ("store_true" if action in ZERO_ARG_ACTIONS else "store") kwargs["default"] = _Default() for param in ARGPARSE_PARAMS_TO_REMOVE: kwargs.pop(param, None) return kwargs def add_deprecated_argument(self, argument_name, num_args): """Adds a deprecated argument with the name argument_name. Deprecated arguments are not shown in the help. If they are used on the command line, a warning is shown stating that the argument is deprecated and no other action is taken. :param str argument_name: Name of deprecated argument. :param int nargs: Number of arguments the option takes. """ util.add_deprecated_argument( self.parser.add_argument, argument_name, num_args) def add_group(self, topic, verbs=(), **kwargs): """Create a new argument group. This method must be called once for every topic, however, calls to this function are left next to the argument definitions for clarity. :param str topic: Name of the new argument group. :param str verbs: List of subcommands that should be documented as part of this help group / topic :returns: The new argument group. :rtype: `HelpfulArgumentGroup` """ if self.visible_topics[topic]: self.groups[topic] = self.parser.add_argument_group(topic, **kwargs) if self.help_arg: for v in verbs: self.groups[topic].add_argument(v, help=VERB_HELP_MAP[v]["short"]) return HelpfulArgumentGroup(self, topic) def add_plugin_args(self, plugins): """ Let each of the plugins add its own command line arguments, which may or may not be displayed as help topics. """ for name, plugin_ep in six.iteritems(plugins): parser_or_group = self.add_group(name, description=plugin_ep.long_description) plugin_ep.plugin_cls.inject_parser_options(parser_or_group, name) def determine_help_topics(self, chosen_topic): """ The user may have requested help on a topic, return a dict of which topics to display. @chosen_topic has prescan_for_flag's return type :returns: dict """ # topics maps each topic to whether it should be documented by # argparse on the command line if chosen_topic == "auth": chosen_topic = "certonly" if chosen_topic == "everything": chosen_topic = "run" if chosen_topic == "all": # Addition of condition closes #6209 (removal of duplicate route53 option). return dict([(t, True) if t != 'certbot-route53:auth' else (t, False) for t in self.help_topics]) elif not chosen_topic: return dict([(t, False) for t in self.help_topics]) else: return dict([(t, t == chosen_topic) for t in self.help_topics]) def _add_all_groups(helpful): helpful.add_group("automation", description="Flags for automating execution & other tweaks") helpful.add_group("security", description="Security parameters & server settings") helpful.add_group("testing", description="The following flags are meant for testing and integration purposes only.") helpful.add_group("paths", description="Flags for changing execution paths & servers") helpful.add_group("manage", description="Various subcommands and flags are available for managing your certificates:", verbs=["certificates", "delete", "renew", "revoke", "update_symlinks"]) # VERBS for verb, docs in VERB_HELP: name = docs.get("realname", verb) helpful.add_group(name, description=docs["opts"]) def prepare_and_parse_args(plugins, args, detect_defaults=False): # pylint: disable=too-many-statements """Returns parsed command line arguments. :param .PluginsRegistry plugins: available plugins :param list args: command line arguments with the program name removed :returns: parsed command line arguments :rtype: argparse.Namespace """ # pylint: disable=too-many-statements helpful = HelpfulArgumentParser(args, plugins, detect_defaults) _add_all_groups(helpful) # --help is automatically provided by argparse helpful.add( None, "-v", "--verbose", dest="verbose_count", action="count", default=flag_default("verbose_count"), help="This flag can be used " "multiple times to incrementally increase the verbosity of output, " "e.g. -vvv.") helpful.add( None, "-t", "--text", dest="text_mode", action="store_true", default=flag_default("text_mode"), help=argparse.SUPPRESS) helpful.add( None, "--max-log-backups", type=nonnegative_int, default=flag_default("max_log_backups"), help="Specifies the maximum number of backup logs that should " "be kept by Certbot's built in log rotation. Setting this " "flag to 0 disables log rotation entirely, causing " "Certbot to always append to the same log file.") helpful.add( [None, "automation", "run", "certonly", "enhance"], "-n", "--non-interactive", "--noninteractive", dest="noninteractive_mode", action="store_true", default=flag_default("noninteractive_mode"), help="Run without ever asking for user input. This may require " "additional command line flags; the client will try to explain " "which ones are required if it finds one missing") helpful.add( [None, "register", "run", "certonly", "enhance"], constants.FORCE_INTERACTIVE_FLAG, action="store_true", default=flag_default("force_interactive"), help="Force Certbot to be interactive even if it detects it's not " "being run in a terminal. This flag cannot be used with the " "renew subcommand.") helpful.add( [None, "run", "certonly", "certificates", "enhance"], "-d", "--domains", "--domain", dest="domains", metavar="DOMAIN", action=_DomainsAction, default=flag_default("domains"), help="Domain names to apply. For multiple domains you can use " "multiple -d flags or enter a comma separated list of domains " "as a parameter. The first domain provided will be the " "subject CN of the certificate, and all domains will be " "Subject Alternative Names on the certificate. " "The first domain will also be used in " "some software user interfaces and as the file paths for the " "certificate and related material unless otherwise " "specified or you already have a certificate with the same " "name. In the case of a name collision it will append a number " "like 0001 to the file path name. (default: Ask)") helpful.add( [None, "run", "certonly", "register"], "--eab-kid", dest="eab_kid", metavar="EAB_KID", help="Key Identifier for External Account Binding" ) helpful.add( [None, "run", "certonly", "register"], "--eab-hmac-key", dest="eab_hmac_key", metavar="EAB_HMAC_KEY", help="HMAC key for External Account Binding" ) helpful.add( [None, "run", "certonly", "manage", "delete", "certificates", "renew", "enhance"], "--cert-name", dest="certname", metavar="CERTNAME", default=flag_default("certname"), help="Certificate name to apply. This name is used by Certbot for housekeeping " "and in file paths; it doesn't affect the content of the certificate itself. " "To see certificate names, run 'certbot certificates'. " "When creating a new certificate, specifies the new certificate's name. " "(default: the first provided domain or the name of an existing " "certificate on your system for the same domains)") helpful.add( [None, "testing", "renew", "certonly"], "--dry-run", action="store_true", dest="dry_run", default=flag_default("dry_run"), help="Perform a test run of the client, obtaining test (invalid) certificates" " but not saving them to disk. This can currently only be used" " with the 'certonly' and 'renew' subcommands. \nNote: Although --dry-run" " tries to avoid making any persistent changes on a system, it " " is not completely side-effect free: if used with webserver authenticator plugins" " like apache and nginx, it makes and then reverts temporary config changes" " in order to obtain test certificates, and reloads webservers to deploy and then" " roll back those changes. It also calls --pre-hook and --post-hook commands" " if they are defined because they may be necessary to accurately simulate" " renewal. --deploy-hook commands are not called.") helpful.add( ["register", "automation"], "--register-unsafely-without-email", action="store_true", default=flag_default("register_unsafely_without_email"), help="Specifying this flag enables registering an account with no " "email address. This is strongly discouraged, because in the " "event of key loss or account compromise you will irrevocably " "lose access to your account. You will also be unable to receive " "notice about impending expiration or revocation of your " "certificates. Updates to the Subscriber Agreement will still " "affect you, and will be effective 14 days after posting an " "update to the web site.") # TODO: When `certbot register --update-registration` is fully deprecated, # delete following helpful.add helpful.add( "register", "--update-registration", action="store_true", default=flag_default("update_registration"), dest="update_registration", help=argparse.SUPPRESS) helpful.add( ["register", "update_account", "unregister", "automation"], "-m", "--email", default=flag_default("email"), help=config_help("email")) helpful.add(["register", "update_account", "automation"], "--eff-email", action="store_true", default=flag_default("eff_email"), dest="eff_email", help="Share your e-mail address with EFF") helpful.add(["register", "update_account", "automation"], "--no-eff-email", action="store_false", default=flag_default("eff_email"), dest="eff_email", help="Don't share your e-mail address with EFF") helpful.add( ["automation", "certonly", "run"], "--keep-until-expiring", "--keep", "--reinstall", dest="reinstall", action="store_true", default=flag_default("reinstall"), help="If the requested certificate matches an existing certificate, always keep the " "existing one until it is due for renewal (for the " "'run' subcommand this means reinstall the existing certificate). (default: Ask)") helpful.add( "automation", "--expand", action="store_true", default=flag_default("expand"), help="If an existing certificate is a strict subset of the requested names, " "always expand and replace it with the additional names. (default: Ask)") helpful.add( "automation", "--version", action="version", version="%(prog)s {0}".format(certbot.__version__), help="show program's version number and exit") helpful.add( ["automation", "renew"], "--force-renewal", "--renew-by-default", dest="renew_by_default", action="store_true", default=flag_default("renew_by_default"), help="If a certificate " "already exists for the requested domains, renew it now, " "regardless of whether it is near expiry. (Often " "--keep-until-expiring is more appropriate). Also implies " "--expand.") helpful.add( "automation", "--renew-with-new-domains", dest="renew_with_new_domains", action="store_true", default=flag_default("renew_with_new_domains"), help="If a " "certificate already exists for the requested certificate name " "but does not match the requested domains, renew it now, " "regardless of whether it is near expiry.") helpful.add( "automation", "--reuse-key", dest="reuse_key", action="store_true", default=flag_default("reuse_key"), help="When renewing, use the same private key as the existing " "certificate.") helpful.add( ["automation", "renew", "certonly"], "--allow-subset-of-names", action="store_true", default=flag_default("allow_subset_of_names"), help="When performing domain validation, do not consider it a failure " "if authorizations can not be obtained for a strict subset of " "the requested domains. This may be useful for allowing renewals for " "multiple domains to succeed even if some domains no longer point " "at this system. This option cannot be used with --csr.") helpful.add( "automation", "--agree-tos", dest="tos", action="store_true", default=flag_default("tos"), help="Agree to the ACME Subscriber Agreement (default: Ask)") helpful.add( ["unregister", "automation"], "--account", metavar="ACCOUNT_ID", default=flag_default("account"), help="Account ID to use") helpful.add( "automation", "--duplicate", dest="duplicate", action="store_true", default=flag_default("duplicate"), help="Allow making a certificate lineage that duplicates an existing one " "(both can be renewed in parallel)") helpful.add( "automation", "--os-packages-only", action="store_true", default=flag_default("os_packages_only"), help="(certbot-auto only) install OS package dependencies and then stop") helpful.add( "automation", "--no-self-upgrade", action="store_true", default=flag_default("no_self_upgrade"), help="(certbot-auto only) prevent the certbot-auto script from" " upgrading itself to newer released versions (default: Upgrade" " automatically)") helpful.add( "automation", "--no-bootstrap", action="store_true", default=flag_default("no_bootstrap"), help="(certbot-auto only) prevent the certbot-auto script from" " installing OS-level dependencies (default: Prompt to install " " OS-wide dependencies, but exit if the user says 'No')") helpful.add( ["automation", "renew", "certonly", "run"], "-q", "--quiet", dest="quiet", action="store_true", default=flag_default("quiet"), help="Silence all output except errors. Useful for automation via cron." " Implies --non-interactive.") # overwrites server, handled in HelpfulArgumentParser.parse_args() helpful.add(["testing", "revoke", "run"], "--test-cert", "--staging", dest="staging", action="store_true", default=flag_default("staging"), help="Use the staging server to obtain or revoke test (invalid) certificates; equivalent" " to --server " + constants.STAGING_URI) helpful.add( "testing", "--debug", action="store_true", default=flag_default("debug"), help="Show tracebacks in case of errors, and allow certbot-auto " "execution on experimental platforms") helpful.add( [None, "certonly", "run"], "--debug-challenges", action="store_true", default=flag_default("debug_challenges"), help="After setting up challenges, wait for user input before " "submitting to CA") helpful.add( "testing", "--no-verify-ssl", action="store_true", help=config_help("no_verify_ssl"), default=flag_default("no_verify_ssl")) helpful.add( ["testing", "standalone", "apache", "nginx"], "--tls-sni-01-port", type=int, default=flag_default("tls_sni_01_port"), help=config_help("tls_sni_01_port")) helpful.add( ["testing", "standalone"], "--tls-sni-01-address", default=flag_default("tls_sni_01_address"), help=config_help("tls_sni_01_address")) helpful.add( ["testing", "standalone", "manual"], "--http-01-port", type=int, dest="http01_port", default=flag_default("http01_port"), help=config_help("http01_port")) helpful.add( ["testing", "standalone"], "--http-01-address", dest="http01_address", default=flag_default("http01_address"), help=config_help("http01_address")) helpful.add( "testing", "--break-my-certs", action="store_true", default=flag_default("break_my_certs"), help="Be willing to replace or renew valid certificates with invalid " "(testing/staging) certificates") helpful.add( "security", "--rsa-key-size", type=int, metavar="N", default=flag_default("rsa_key_size"), help=config_help("rsa_key_size")) helpful.add( "security", "--must-staple", action="store_true", dest="must_staple", default=flag_default("must_staple"), help=config_help("must_staple")) helpful.add( ["security", "enhance"], "--redirect", action="store_true", dest="redirect", default=flag_default("redirect"), help="Automatically redirect all HTTP traffic to HTTPS for the newly " "authenticated vhost. (default: Ask)") helpful.add( "security", "--no-redirect", action="store_false", dest="redirect", default=flag_default("redirect"), help="Do not automatically redirect all HTTP traffic to HTTPS for the newly " "authenticated vhost. (default: Ask)") helpful.add( ["security", "enhance"], "--hsts", action="store_true", dest="hsts", default=flag_default("hsts"), help="Add the Strict-Transport-Security header to every HTTP response." " Forcing browser to always use SSL for the domain." " Defends against SSL Stripping.") helpful.add( "security", "--no-hsts", action="store_false", dest="hsts", default=flag_default("hsts"), help=argparse.SUPPRESS) helpful.add( ["security", "enhance"], "--uir", action="store_true", dest="uir", default=flag_default("uir"), help='Add the "Content-Security-Policy: upgrade-insecure-requests"' ' header to every HTTP response. Forcing the browser to use' ' https:// for every http:// resource.') helpful.add( "security", "--no-uir", action="store_false", dest="uir", default=flag_default("uir"), help=argparse.SUPPRESS) helpful.add( "security", "--staple-ocsp", action="store_true", dest="staple", default=flag_default("staple"), help="Enables OCSP Stapling. A valid OCSP response is stapled to" " the certificate that the server offers during TLS.") helpful.add( "security", "--no-staple-ocsp", action="store_false", dest="staple", default=flag_default("staple"), help=argparse.SUPPRESS) helpful.add( "security", "--strict-permissions", action="store_true", default=flag_default("strict_permissions"), help="Require that all configuration files are owned by the current " "user; only needed if your config is somewhere unsafe like /tmp/") helpful.add( ["manual", "standalone", "certonly", "renew"], "--preferred-challenges", dest="pref_challs", action=_PrefChallAction, default=flag_default("pref_challs"), help='A sorted, comma delimited list of the preferred challenge to ' 'use during authorization with the most preferred challenge ' 'listed first (Eg, "dns" or "tls-sni-01,http,dns"). ' 'Not all plugins support all challenges. See ' 'https://certbot.eff.org/docs/using.html#plugins for details. ' 'ACME Challenges are versioned, but if you pick "http" rather ' 'than "http-01", Certbot will select the latest version ' 'automatically.') helpful.add( "renew", "--pre-hook", help="Command to be run in a shell before obtaining any certificates." " Intended primarily for renewal, where it can be used to temporarily" " shut down a webserver that might conflict with the standalone" " plugin. This will only be called if a certificate is actually to be" " obtained/renewed. When renewing several certificates that have" " identical pre-hooks, only the first will be executed.") helpful.add( "renew", "--post-hook", help="Command to be run in a shell after attempting to obtain/renew" " certificates. Can be used to deploy renewed certificates, or to" " restart any servers that were stopped by --pre-hook. This is only" " run if an attempt was made to obtain/renew a certificate. If" " multiple renewed certificates have identical post-hooks, only" " one will be run.") helpful.add("renew", "--renew-hook", action=_RenewHookAction, help=argparse.SUPPRESS) helpful.add( "renew", "--no-random-sleep-on-renew", action="store_false", default=flag_default("random_sleep_on_renew"), dest="random_sleep_on_renew", help=argparse.SUPPRESS) helpful.add( "renew", "--deploy-hook", action=_DeployHookAction, help='Command to be run in a shell once for each successfully' ' issued certificate. For this command, the shell variable' ' $RENEWED_LINEAGE will point to the config live subdirectory' ' (for example, "/etc/letsencrypt/live/example.com") containing' ' the new certificates and keys; the shell variable' ' $RENEWED_DOMAINS will contain a space-delimited list of' ' renewed certificate domains (for example, "example.com' ' www.example.com"') helpful.add( "renew", "--disable-hook-validation", action="store_false", dest="validate_hooks", default=flag_default("validate_hooks"), help="Ordinarily the commands specified for" " --pre-hook/--post-hook/--deploy-hook will be checked for" " validity, to see if the programs being run are in the $PATH," " so that mistakes can be caught early, even when the hooks" " aren't being run just yet. The validation is rather" " simplistic and fails if you use more advanced shell" " constructs, so you can use this switch to disable it." " (default: False)") helpful.add( "renew", "--no-directory-hooks", action="store_false", default=flag_default("directory_hooks"), dest="directory_hooks", help="Disable running executables found in Certbot's hook directories" " during renewal. (default: False)") helpful.add( "renew", "--disable-renew-updates", action="store_true", default=flag_default("disable_renew_updates"), dest="disable_renew_updates", help="Disable automatic updates to your server configuration that" " would otherwise be done by the selected installer plugin, and triggered" " when the user executes \"certbot renew\", regardless of if the certificate" " is renewed. This setting does not apply to important TLS configuration" " updates.") helpful.add( "renew", "--no-autorenew", action="store_false", default=flag_default("autorenew"), dest="autorenew", help="Disable auto renewal of certificates.") helpful.add_deprecated_argument("--agree-dev-preview", 0) helpful.add_deprecated_argument("--dialog", 0) # Populate the command line parameters for new style enhancements enhancements.populate_cli(helpful.add) _create_subparsers(helpful) _paths_parser(helpful) # _plugins_parsing should be the last thing to act upon the main # parser (--help should display plugin-specific options last) _plugins_parsing(helpful, plugins) if not detect_defaults: global helpful_parser # pylint: disable=global-statement helpful_parser = helpful return helpful.parse_args() def _create_subparsers(helpful): helpful.add("config_changes", "--num", type=int, default=flag_default("num"), help="How many past revisions you want to be displayed") from certbot.client import sample_user_agent # avoid import loops helpful.add( None, "--user-agent", default=flag_default("user_agent"), help='Set a custom user agent string for the client. User agent strings allow ' 'the CA to collect high level statistics about success rates by OS, ' 'plugin and use case, and to know when to deprecate support for past Python ' "versions and flags. If you wish to hide this information from the Let's " 'Encrypt server, set this to "". ' '(default: {0}). The flags encoded in the user agent are: ' '--duplicate, --force-renew, --allow-subset-of-names, -n, and ' 'whether any hooks are set.'.format(sample_user_agent())) helpful.add( None, "--user-agent-comment", default=flag_default("user_agent_comment"), type=_user_agent_comment_type, help="Add a comment to the default user agent string. May be used when repackaging Certbot " "or calling it from another tool to allow additional statistical data to be collected." " Ignored if --user-agent is set. (Example: Foo-Wrapper/1.0)") helpful.add("certonly", "--csr", default=flag_default("csr"), type=read_file, help="Path to a Certificate Signing Request (CSR) in DER or PEM format." " Currently --csr only works with the 'certonly' subcommand.") helpful.add("revoke", "--reason", dest="reason", choices=CaseInsensitiveList(sorted(constants.REVOCATION_REASONS, key=constants.REVOCATION_REASONS.get)), action=_EncodeReasonAction, default=flag_default("reason"), help="Specify reason for revoking certificate. (default: unspecified)") helpful.add("revoke", "--delete-after-revoke", action="store_true", default=flag_default("delete_after_revoke"), help="Delete certificates after revoking them.") helpful.add("revoke", "--no-delete-after-revoke", action="store_false", dest="delete_after_revoke", default=flag_default("delete_after_revoke"), help="Do not delete certificates after revoking them. This " "option should be used with caution because the 'renew' " "subcommand will attempt to renew undeleted revoked " "certificates.") helpful.add("rollback", "--checkpoints", type=int, metavar="N", default=flag_default("rollback_checkpoints"), help="Revert configuration N number of checkpoints.") helpful.add("plugins", "--init", action="store_true", default=flag_default("init"), help="Initialize plugins.") helpful.add("plugins", "--prepare", action="store_true", default=flag_default("prepare"), help="Initialize and prepare plugins.") helpful.add("plugins", "--authenticators", action="append_const", dest="ifaces", default=flag_default("ifaces"), const=interfaces.IAuthenticator, help="Limit to authenticator plugins only.") helpful.add("plugins", "--installers", action="append_const", dest="ifaces", default=flag_default("ifaces"), const=interfaces.IInstaller, help="Limit to installer plugins only.") class CaseInsensitiveList(list): """A list that will ignore case when searching. This class is passed to the `choices` argument of `argparse.add_arguments` through the `helpful` wrapper. It is necessary due to special handling of command line arguments by `set_by_cli` in which the `type_func` is not applied.""" def __contains__(self, element): return super(CaseInsensitiveList, self).__contains__(element.lower()) def _paths_parser(helpful): add = helpful.add verb = helpful.verb if verb == "help": verb = helpful.help_arg cph = "Path to where certificate is saved (with auth --csr), installed from, or revoked." sections = ["paths", "install", "revoke", "certonly", "manage"] if verb == "certonly": add(sections, "--cert-path", type=os.path.abspath, default=flag_default("auth_cert_path"), help=cph) elif verb == "revoke": add(sections, "--cert-path", type=read_file, required=False, help=cph) else: add(sections, "--cert-path", type=os.path.abspath, help=cph) section = "paths" if verb in ("install", "revoke"): section = verb # revoke --key-path reads a file, install --key-path takes a string add(section, "--key-path", type=((verb == "revoke" and read_file) or os.path.abspath), help="Path to private key for certificate installation " "or revocation (if account key is missing)") default_cp = None if verb == "certonly": default_cp = flag_default("auth_chain_path") add(["paths", "install"], "--fullchain-path", default=default_cp, type=os.path.abspath, help="Accompanying path to a full certificate chain (certificate plus chain).") add("paths", "--chain-path", default=default_cp, type=os.path.abspath, help="Accompanying path to a certificate chain.") add("paths", "--config-dir", default=flag_default("config_dir"), help=config_help("config_dir")) add("paths", "--work-dir", default=flag_default("work_dir"), help=config_help("work_dir")) add("paths", "--logs-dir", default=flag_default("logs_dir"), help="Logs directory.") add("paths", "--server", default=flag_default("server"), help=config_help("server")) def _plugins_parsing(helpful, plugins): # It's nuts, but there are two "plugins" topics. Somehow this works helpful.add_group( "plugins", description="Plugin Selection: Certbot client supports an " "extensible plugins architecture. See '%(prog)s plugins' for a " "list of all installed plugins and their names. You can force " "a particular plugin by setting options provided below. Running " "--help <plugin_name> will list flags specific to that plugin.") helpful.add("plugins", "--configurator", default=flag_default("configurator"), help="Name of the plugin that is both an authenticator and an installer." " Should not be used together with --authenticator or --installer. " "(default: Ask)") helpful.add("plugins", "-a", "--authenticator", default=flag_default("authenticator"), help="Authenticator plugin name.") helpful.add("plugins", "-i", "--installer", default=flag_default("installer"), help="Installer plugin name (also used to find domains).") helpful.add(["plugins", "certonly", "run", "install", "config_changes"], "--apache", action="store_true", default=flag_default("apache"), help="Obtain and install certificates using Apache") helpful.add(["plugins", "certonly", "run", "install", "config_changes"], "--nginx", action="store_true", default=flag_default("nginx"), help="Obtain and install certificates using Nginx") helpful.add(["plugins", "certonly"], "--standalone", action="store_true", default=flag_default("standalone"), help='Obtain certificates using a "standalone" webserver.') helpful.add(["plugins", "certonly"], "--manual", action="store_true", default=flag_default("manual"), help="Provide laborious manual instructions for obtaining a certificate") helpful.add(["plugins", "certonly"], "--webroot", action="store_true", default=flag_default("webroot"), help="Obtain certificates by placing files in a webroot directory.") helpful.add(["plugins", "certonly"], "--dns-cloudflare", action="store_true", default=flag_default("dns_cloudflare"), help=("Obtain certificates using a DNS TXT record (if you are " "using Cloudflare for DNS).")) helpful.add(["plugins", "certonly"], "--dns-cloudxns", action="store_true", default=flag_default("dns_cloudxns"), help=("Obtain certificates using a DNS TXT record (if you are " "using CloudXNS for DNS).")) helpful.add(["plugins", "certonly"], "--dns-digitalocean", action="store_true", default=flag_default("dns_digitalocean"), help=("Obtain certificates using a DNS TXT record (if you are " "using DigitalOcean for DNS).")) helpful.add(["plugins", "certonly"], "--dns-dnsimple", action="store_true", default=flag_default("dns_dnsimple"), help=("Obtain certificates using a DNS TXT record (if you are " "using DNSimple for DNS).")) helpful.add(["plugins", "certonly"], "--dns-dnsmadeeasy", action="store_true", default=flag_default("dns_dnsmadeeasy"), help=("Obtain certificates using a DNS TXT record (if you are" "using DNS Made Easy for DNS).")) helpful.add(["plugins", "certonly"], "--dns-gehirn", action="store_true", default=flag_default("dns_gehirn"), help=("Obtain certificates using a DNS TXT record " "(if you are using Gehirn Infrastracture Service for DNS).")) helpful.add(["plugins", "certonly"], "--dns-google", action="store_true", default=flag_default("dns_google"), help=("Obtain certificates using a DNS TXT record (if you are " "using Google Cloud DNS).")) helpful.add(["plugins", "certonly"], "--dns-linode", action="store_true", default=flag_default("dns_linode"), help=("Obtain certificates using a DNS TXT record (if you are " "using Linode for DNS).")) helpful.add(["plugins", "certonly"], "--dns-luadns", action="store_true", default=flag_default("dns_luadns"), help=("Obtain certificates using a DNS TXT record (if you are " "using LuaDNS for DNS).")) helpful.add(["plugins", "certonly"], "--dns-nsone", action="store_true", default=flag_default("dns_nsone"), help=("Obtain certificates using a DNS TXT record (if you are " "using NS1 for DNS).")) helpful.add(["plugins", "certonly"], "--dns-ovh", action="store_true", default=flag_default("dns_ovh"), help=("Obtain certificates using a DNS TXT record (if you are " "using OVH for DNS).")) helpful.add(["plugins", "certonly"], "--dns-rfc2136", action="store_true", default=flag_default("dns_rfc2136"), help="Obtain certificates using a DNS TXT record (if you are using BIND for DNS).") helpful.add(["plugins", "certonly"], "--dns-route53", action="store_true", default=flag_default("dns_route53"), help=("Obtain certificates using a DNS TXT record (if you are using Route53 for " "DNS).")) helpful.add(["plugins", "certonly"], "--dns-sakuracloud", action="store_true", default=flag_default("dns_sakuracloud"), help=("Obtain certificates using a DNS TXT record " "(if you are using Sakura Cloud for DNS).")) # things should not be reorder past/pre this comment: # plugins_group should be displayed in --help before plugin # specific groups (so that plugins_group.description makes sense) helpful.add_plugin_args(plugins) class _EncodeReasonAction(argparse.Action): """Action class for parsing revocation reason.""" def __call__(self, parser, namespace, reason, option_string=None): """Encodes the reason for certificate revocation.""" code = constants.REVOCATION_REASONS[reason.lower()] setattr(namespace, self.dest, code) class _DomainsAction(argparse.Action): """Action class for parsing domains.""" def __call__(self, parser, namespace, domain, option_string=None): """Just wrap add_domains in argparseese.""" add_domains(namespace, domain) def add_domains(args_or_config, domains): """Registers new domains to be used during the current client run. Domains are not added to the list of requested domains if they have already been registered. :param args_or_config: parsed command line arguments :type args_or_config: argparse.Namespace or configuration.NamespaceConfig :param str domain: one or more comma separated domains :returns: domains after they have been normalized and validated :rtype: `list` of `str` """ validated_domains = [] for domain in domains.split(","): domain = util.enforce_domain_sanity(domain.strip()) validated_domains.append(domain) if domain not in args_or_config.domains: args_or_config.domains.append(domain) return validated_domains class _PrefChallAction(argparse.Action): """Action class for parsing preferred challenges.""" def __call__(self, parser, namespace, pref_challs, option_string=None): try: challs = parse_preferred_challenges(pref_challs.split(",")) except errors.Error as error: raise argparse.ArgumentError(self, str(error)) namespace.pref_challs.extend(challs) def parse_preferred_challenges(pref_challs): """Translate and validate preferred challenges. :param pref_challs: list of preferred challenge types :type pref_challs: `list` of `str` :returns: validated list of preferred challenge types :rtype: `list` of `str` :raises errors.Error: if pref_challs is invalid """ aliases = {"dns": "dns-01", "http": "http-01", "tls-sni": "tls-sni-01"} challs = [c.strip() for c in pref_challs] challs = [aliases.get(c, c) for c in challs] unrecognized = ", ".join(name for name in challs if name not in challenges.Challenge.TYPES) if unrecognized: raise errors.Error( "Unrecognized challenges: {0}".format(unrecognized)) return challs def _user_agent_comment_type(value): if "(" in value or ")" in value: raise argparse.ArgumentTypeError("may not contain parentheses") return value class _DeployHookAction(argparse.Action): """Action class for parsing deploy hooks.""" def __call__(self, parser, namespace, values, option_string=None): renew_hook_set = namespace.deploy_hook != namespace.renew_hook if renew_hook_set and namespace.renew_hook != values: raise argparse.ArgumentError( self, "conflicts with --renew-hook value") namespace.deploy_hook = namespace.renew_hook = values class _RenewHookAction(argparse.Action): """Action class for parsing renew hooks.""" def __call__(self, parser, namespace, values, option_string=None): deploy_hook_set = namespace.deploy_hook is not None if deploy_hook_set and namespace.deploy_hook != values: raise argparse.ArgumentError( self, "conflicts with --deploy-hook value") namespace.renew_hook = values def nonnegative_int(value): """Converts value to an int and checks that it is not negative. This function should used as the type parameter for argparse arguments. :param str value: value provided on the command line :returns: integer representation of value :rtype: int :raises argparse.ArgumentTypeError: if value isn't a non-negative integer """ try: int_value = int(value) except ValueError: raise argparse.ArgumentTypeError("value must be an integer") if int_value < 0: raise argparse.ArgumentTypeError("value must be non-negative") return int_value
[]
[]
[ "CERTBOT_AUTO" ]
[]
["CERTBOT_AUTO"]
python
1
0
providers/vk/vk_test.go
package vk_test import ( "fmt" "os" "testing" "github.com/a93h/goth" "github.com/a93h/goth/providers/vk" "github.com/stretchr/testify/assert" ) func Test_New(t *testing.T) { t.Parallel() a := assert.New(t) provider := vkProvider() a.Equal(provider.ClientKey, os.Getenv("VK_KEY")) a.Equal(provider.Secret, os.Getenv("VK_SECRET")) a.Equal(provider.CallbackURL, "/foo") } func Test_Name(t *testing.T) { t.Parallel() a := assert.New(t) provider := vkProvider() a.Equal(provider.Name(), "vk") } func Test_SetName(t *testing.T) { t.Parallel() a := assert.New(t) provider := vkProvider() provider.SetName("foo") a.Equal(provider.Name(), "foo") } func Test_Implements_Provider(t *testing.T) { t.Parallel() a := assert.New(t) a.Implements((*goth.Provider)(nil), vkProvider()) } func Test_BeginAuth(t *testing.T) { t.Parallel() a := assert.New(t) provider := vkProvider() session, err := provider.BeginAuth("test_state") s := session.(*vk.Session) a.NoError(err) a.Contains(s.AuthURL, "oauth.vk.com/authorize") a.Contains(s.AuthURL, fmt.Sprintf("client_id=%s", os.Getenv("VK_KEY"))) a.Contains(s.AuthURL, "state=test_state") a.Contains(s.AuthURL, "scope=email") } func Test_SessionFromJSON(t *testing.T) { t.Parallel() a := assert.New(t) provider := vkProvider() s, err := provider.UnmarshalSession(`{"AuthURL":"http://vk.com/auth_url","AccessToken":"1234567890"}`) a.NoError(err) session := s.(*vk.Session) a.Equal(session.AuthURL, "http://vk.com/auth_url") a.Equal(session.AccessToken, "1234567890") } func vkProvider() *vk.Provider { return vk.New(os.Getenv("VK_KEY"), os.Getenv("VK_SECRET"), "/foo", "user") }
[ "\"VK_KEY\"", "\"VK_SECRET\"", "\"VK_KEY\"", "\"VK_KEY\"", "\"VK_SECRET\"" ]
[]
[ "VK_SECRET", "VK_KEY" ]
[]
["VK_SECRET", "VK_KEY"]
go
2
0
cmd/groot.go
/* Copyright © 2019 Amey Deshmukh Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package cmd import ( "fmt" "os" "github.com/spf13/cobra" "os/user" _ "k8s.io/client-go/plugin/pkg/client/auth" "github.com/ameydev/groot/kmap" homedir "github.com/mitchellh/go-homedir" "github.com/spf13/viper" appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" "k8s.io/client-go/kubernetes" "k8s.io/client-go/tools/clientcmd" ) var indentationCount int = 1 var indentation string var pods v1.PodList var deployments appsv1.DeploymentList var services v1.ServiceList type configs struct { Namespace string KubeConfig string } func Execute() error { return groot().Execute() } func groot() *cobra.Command { c := &configs{ Namespace: "default", KubeConfig: getKubeConfig(), } cmd := &cobra.Command{ Use: "groot", Version: "\nI am baby Groot - v0.0.1", Short: "groot is a k8s helper CLI utility tool.", Long: `This tool is used to find k8s resourses and their mappings with other k8s reources. For example: groot -n $namespace.`, PreRunE: func(cobracmd *cobra.Command, _ []string) error { // load current kube-config return initConfig(c) }, RunE: func(_ *cobra.Command, _ []string) error { return getOverView(c) }, } flags := cmd.Flags() flags.StringVar(&c.Namespace, "namespace", c.Namespace, "namespace in which we need to map k8s resources..") flags.StringVar(&c.KubeConfig, "kubeconfig", c.KubeConfig, "Any external kube config we want to use") return cmd } // initConfig reads in config file and ENV variables if set. func initConfig(c *configs) error { if c.KubeConfig != "" { // Use config file from the flag. viper.SetConfigFile(c.KubeConfig) } else { // Find home directory. home, err := homedir.Dir() if err != nil { fmt.Println(err) os.Exit(1) } // Search config in home directory with name ".groot" (without extension). viper.AddConfigPath(home) viper.SetConfigName(".groot") } viper.AutomaticEnv() // read in environment variables that match // If a config file is found, read it in. if err := viper.ReadInConfig(); err == nil { fmt.Println("Using config file:", viper.ConfigFileUsed()) } return nil } func getOverView(c *configs) error { config, err := clientcmd.BuildConfigFromFlags("", c.KubeConfig) if err != nil { return err } clientset, err := kubernetes.NewForConfig(config) if err != nil { return err } kmap.FindThemAll(clientset, &c.Namespace) return nil } func getKubeConfig() string { var kubeconfig string if envVar := os.Getenv("KUBECONFIG"); len(envVar) > 0 { kubeconfig = envVar } else { usr, err := user.Current() if err != nil { fmt.Println(err) } kubeconfig = usr.HomeDir + "/.kube/config" } return kubeconfig }
[ "\"KUBECONFIG\"" ]
[]
[ "KUBECONFIG" ]
[]
["KUBECONFIG"]
go
1
0
benchmarks/distributed/ddp/compare/compare_ddp.py
""" A simple tool to compare the performance of different impls of DistributedDataParallel on resnet50, three flavors: 1. DistributedDataParallel, which has a python wrapper and C++ core to do gradient distribution and reduction. It's current production version. 2. PythonDDP with async gradient reduction. 3. PythonDDP with synchrous gradient reduction. Example:: >>> modify configs in main func >>> python compare_ddp.py >>> Sample out: compare_ddp_sample.md """ import numpy as np import os import pickle import glob import python_ddp import torch import torch.distributed as dist import torch.multiprocessing as mp import torch.nn as nn import torch.optim as optim import torchvision.models as models from collections import OrderedDict from enum import Enum from tabulate import tabulate from torch.nn.parallel import DistributedDataParallel as DDP class DDPOption(Enum): DDP_CPP_CORE = 1 PYTHON_DDP_SYNC_REDUCTION = 2 PYTHON_DDP_ASYNC_REDUCTION = 3 class LatencyData: __slots__ = ["buffer_size_in_M", "ddp_option", "rank", "metrics"] def __init__(self, buffer_size_in_M, ddp_option, rank, metrics): self.buffer_size_in_M = buffer_size_in_M self.ddp_option = ddp_option self.rank = rank self.metrics = metrics def serialize(buffer_size_in_M, ddp_option, rank, metrics, data_dir="./tmp", ext="ddpraw"): if not os.path.exists(data_dir): print(f'{data_dir} not exist, mkdir {data_dir}') os.mkdir(data_dir) file_name = "buffer_size_{}M_rank{}_{}.{}".format( buffer_size_in_M, rank, ddp_option, ext) file_path = os.path.join(data_dir, file_name) print("Writing metrics to file: '{}'".format(file_path)) data = LatencyData(buffer_size_in_M, ddp_option, rank, metrics) with open(file_path, "wb") as f: pickle.dump(data, f, pickle.HIGHEST_PROTOCOL) print(f"Wrote metrics to '{file_path}''") def load_detailed_metrics(data_dir="./tmp", ext="ddpraw"): assert os.path.exists(data_dir) file_pattern = os.path.join(data_dir, f"*.{ext}") files = glob.glob(file_pattern) print("load_detailed_metrics found {} files".format(len(files))) buffer_size_to_metrics = OrderedDict() for file_path in files: with open(file_path, "rb") as f: data = pickle.load(f) # Add data to buffer_size_to_metrics buffer_size = data.buffer_size_in_M if buffer_size not in buffer_size_to_metrics: buffer_size_to_metrics[buffer_size] = {} metrics = buffer_size_to_metrics.get(buffer_size) assert metrics is not None metrics[data.ddp_option] = data.metrics return buffer_size_to_metrics def setup(rank, world_size): os.environ['MASTER_ADDR'] = 'localhost' os.environ['MASTER_PORT'] = '12355' # initialize the process group dist.init_process_group("gloo", rank=rank, world_size=world_size) def create_ddp_model(module, rank, pg, ddp_option, buffer_size_in_M): """Helper to create DDPModel. """ if ddp_option == DDPOption.DDP_CPP_CORE: ddp_model = DDP(module, device_ids=[rank], process_group=pg, bucket_cap_mb=buffer_size_in_M) ddp_model._set_static_graph() return ddp_model elif ddp_option == DDPOption.PYTHON_DDP_SYNC_REDUCTION: M = 2 ** 20 return python_ddp.PythonDDP(module, pg, False, buffer_size=buffer_size_in_M * M) elif ddp_option == DDPOption.PYTHON_DDP_ASYNC_REDUCTION: M = 2 ** 20 return python_ddp.PythonDDP(module, pg, True, buffer_size=buffer_size_in_M * M) else: raise NotImplementedError def run_ddp(rank, world_size, epochs, ddp_option, buffer_size_in_M, warmup_iterations=20): print(f'Invoked run_ddp rank {rank}') assert epochs > warmup_iterations # Setup print("setting up ... ") setup(rank, world_size) torch.manual_seed(rank) torch.cuda.manual_seed(rank) device = torch.device('cuda:%d' % rank) print('setup done') # Create ResNet50 module and wrap in DDP module. pg = dist.distributed_c10d._get_default_group() model = models.resnet50().to(device) ddp_model = create_ddp_model(model, rank, pg, ddp_option, buffer_size_in_M) assert ddp_model is not None loss_fn = nn.MSELoss() optimizer = optim.SGD(ddp_model.parameters(), lr=0.001) # Container to hold: event -> list of events in milliseconds MODEL_FORWARD = "forward" MODEL_BACKWARD = "backward" metrics = {MODEL_FORWARD: [], MODEL_BACKWARD: []} for epoch in range(epochs): if epoch % 10 == 0: print(f'Epoch {epoch}/{epochs} ...') start = torch.cuda.Event(enable_timing=True) end = torch.cuda.Event(enable_timing=True) # TODO(bowangbj): Switch to real training set from ImageNet. inputs = torch.rand([32, 3, 224, 224], device=device) labels = torch.rand([32, 1000], device=device) # Forward start.record() outputs = ddp_model(inputs) loss = loss_fn(outputs, labels) end.record() torch.cuda.synchronize() if epoch >= warmup_iterations: metrics[MODEL_FORWARD].append(start.elapsed_time(end)) # Backward start.record() loss.backward() # Reduce all grad, this is needed for non-DDP_CPP_CORE since the hook # for all_reduce does not exist yet. if ddp_option != DDPOption.DDP_CPP_CORE: ddp_model.all_reduce_grads() end.record() torch.cuda.synchronize() if epoch >= warmup_iterations: metrics[MODEL_BACKWARD].append(start.elapsed_time(end)) # Optimization optimizer.step() optimizer.zero_grad() if rank == 0: print(f"\nMetrics for GPU {rank}, ddp_option={ddp_option}, buffer_size={buffer_size_in_M}M") print(f"Skipped {warmup_iterations} CUDA warmpup iterations. ") for step, elapsed_milliseconds in metrics.items(): A = np.array(elapsed_milliseconds) print(' {N} iterations, {step}, mean={mean} ms, median={median} ms, p90={p90} ms, p99={p99} ms'.format( N=len(A), step=step, mean=np.mean(A), median=np.percentile(A, 50), p90=np.percentile(A, 90), p99=np.percentile(A, 99))) # Serialize the raw data to be used to compute summary. Didn't choose to # maintain a global object holding the metrics b/c mp.spawn tries to # fork all the arguments before spawning new process thus it's infeasible # save global states in an object. serialize(buffer_size_in_M, ddp_option, rank, metrics) def append_delta(row_list, base, exp): percent = 100 * ((exp - base) / base) row_list.append(percent) def print_summary(buffer_size_to_metrics): # metrics: {ddp_option, Metrics} # Metrics: step -> [latency] for buffer_size, metrics in buffer_size_to_metrics.items(): assert DDPOption.DDP_CPP_CORE in metrics.keys() baseline = metrics.get(DDPOption.DDP_CPP_CORE) print(f"=== Summary for buffer_size: {buffer_size}M === ") for step in baseline.keys(): # step takes value from [forward, backward] # compute latency for each step into a table, each row is looks like # [option, mean, diff, mean, diff, p90, diff, p95, diff, p99, diff] data = [] baseline_latencies = baseline.get(step) assert baseline_latencies is not None A_baseline = np.array(baseline_latencies) for ddp_option, exp_metrics in metrics.items(): exp_latencies = exp_metrics.get(step) assert exp_latencies is not None A_exp = np.array(exp_latencies) # Yield option, mean, p50, p90, p95, p99 and delta. row = [ddp_option] row.append(np.mean(A_exp)) append_delta(row, np.mean(A_baseline), np.mean(A_exp)) for px in [50, 90, 95, 99]: base = np.percentile(A_baseline, px) exp = np.percentile(A_exp, px) row.append(exp) append_delta(row, base, exp) data.append(row) # Output buffer_size, step as a table. print(tabulate(data, headers=[f"DDP: [{step}]", "Mean", "delta%", "mean", "delta%", "p90", "delta%", "p95", "delta%%", "p99", "delta%"])) print("\n") def main(): world_size = 2 epochs = 120 # resnet50 model facts: # total_param_count = 161 # total_elements = 25557032 ~= 24.37M # param_max_elements = 2359296 ~= 2.25M # Try different bucket sizes. buffer_size_in_mbs = [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27] print("buffer_size_in_mbs: " + str(buffer_size_in_mbs)) for buffer_size_in_M in buffer_size_in_mbs: print("\n\n=== NEW EXPERIMENT: buffer_size={}M, {} epochs, world_size={} ===".format( buffer_size_in_M, epochs, world_size)) options = [ DDPOption.DDP_CPP_CORE, DDPOption.PYTHON_DDP_ASYNC_REDUCTION, DDPOption.PYTHON_DDP_SYNC_REDUCTION ] for option in options: print("Measuring option: {} ... ".format(option)) mp.spawn(run_ddp, args=(world_size, epochs, option, buffer_size_in_M), nprocs=world_size, join=True) print("\n Generating summaries ... ") buffer_size_to_metrics = load_detailed_metrics(data_dir="./tmp", ext="ddpraw") print_summary(buffer_size_to_metrics) if __name__ == "__main__" : main()
[]
[]
[ "MASTER_ADDR", "MASTER_PORT" ]
[]
["MASTER_ADDR", "MASTER_PORT"]
python
2
0
tests/test_testbench_object_special.py
#!/usr/bin/env python3 # # Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit test for special object operations in the testbench.""" import json import os import unittest from testbench import rest_server class TestTestbenchObjectSpecial(unittest.TestCase): def setUp(self): rest_server.db.clear() self.client = rest_server.server.test_client() # Avoid magic buckets in the test os.environ.pop("GOOGLE_CLOUD_CPP_STORAGE_TEST_BUCKET_NAME", None) def test_object_compose(self): response = self.client.post( "/storage/v1/b", data=json.dumps({"name": "bucket-name"}) ) self.assertEqual(response.status_code, 200) payloads = { "fox": "The quick brown fox jumps over the lazy dog\n", "zebra": "How vexingly quick daft zebras jump!\n", } sources = [] for object_name, payload in payloads.items(): # Use the XML API to insert an object, as the JSON API is not yet ready. response = self.client.put( "/bucket-name/" + object_name, content_type="text/plain", data=payload, ) self.assertEqual(response.status_code, 200) # Get the metadata so we can include the metageneration in the compose request. response = self.client.get("/storage/v1/b/bucket-name/o/" + object_name) self.assertEqual(response.status_code, 200) o = json.loads(response.data) sources.append( { "name": object_name, "generation": o.get("generation"), "objectPreconditions": {"ifGenerationMatch": o.get("generation")}, } ) self.assertEqual(response.status_code, 200) response = self.client.post( "/storage/v1/b/bucket-name/o/both/compose", data=json.dumps({"sourceObjects": sources}), ) self.assertEqual(response.status_code, 200, msg=response.data) self.assertTrue( response.headers.get("content-type").startswith("application/json") ) compose_rest = json.loads(response.data) compose_rest.pop("acl") compose_rest.pop("owner") response = self.client.get("/storage/v1/b/bucket-name/o/both") self.assertEqual(response.status_code, 200) self.assertTrue( response.headers.get("content-type").startswith("application/json") ) get_rest = json.loads(response.data) self.assertEqual(get_rest, compose_rest) response = self.client.get("/bucket-name/both") self.assertEqual(response.status_code, 200) self.assertEqual( response.data.decode("utf-8"), payloads["fox"] + payloads["zebra"] ) def test_object_compose_invalid_requests(self): response = self.client.post( "/storage/v1/b", data=json.dumps({"name": "bucket-name"}) ) self.assertEqual(response.status_code, 200) response = self.client.post( "/storage/v1/b/bucket-name/o/both/compose", data=json.dumps({"invalid-sourceObjects": []}), ) self.assertEqual(response.status_code, 400) sources = [] for i in range(0, 64): sources.extend({"name": "test-only-invalid-object"}) response = self.client.post( "/storage/v1/b/bucket-name/o/both/compose", data=json.dumps({"sourceObjects": sources}), ) self.assertEqual(response.status_code, 400) response = self.client.post( "/storage/v1/b/bucket-name/o/both/compose", data=json.dumps({"sourceObjects": [{"invalid-name": "unused"}]}), ) self.assertEqual(response.status_code, 400) def test_object_copy(self): response = self.client.post( "/storage/v1/b", data=json.dumps({"name": "bucket-name"}) ) self.assertEqual(response.status_code, 200) payload = "The quick brown fox jumps over the lazy dog" response = self.client.put( "/bucket-name/fox", content_type="text/plain", data=payload, ) self.assertEqual(response.status_code, 200) response = self.client.post( "/storage/v1/b/bucket-name/o/fox/copyTo/b/bucket-name/o/fox2" ) self.assertEqual(response.status_code, 200, msg=response.data) self.assertTrue( response.headers.get("content-type").startswith("application/json") ) copy_rest = json.loads(response.data) copy_rest.pop("acl") copy_rest.pop("owner") response = self.client.get("/storage/v1/b/bucket-name/o/fox2") self.assertEqual(response.status_code, 200) self.assertTrue( response.headers.get("content-type").startswith("application/json") ) get_rest = json.loads(response.data) self.assertEqual(get_rest, copy_rest) response = self.client.get("/bucket-name/fox") self.assertEqual(response.status_code, 200) self.assertEqual( response.data.decode("utf-8"), "The quick brown fox jumps over the lazy dog" ) def test_object_copy_with_metadata(self): response = self.client.post( "/storage/v1/b", data=json.dumps({"name": "bucket-name"}) ) self.assertEqual(response.status_code, 200) payload = "The quick brown fox jumps over the lazy dog" response = self.client.put( "/bucket-name/fox", content_type="text/plain", data=payload, ) self.assertEqual(response.status_code, 200) metadata = {"key0": "label0"} response = self.client.post( "/storage/v1/b/bucket-name/o/fox/copyTo/b/bucket-name/o/fox2", data=json.dumps({"metadata": metadata}), ) self.assertEqual(response.status_code, 200) self.assertTrue( response.headers.get("content-type").startswith("application/json") ) copy_rest = json.loads(response.data) copy_rest.pop("acl") copy_rest.pop("owner") response = self.client.get("/storage/v1/b/bucket-name/o/fox2") self.assertEqual(response.status_code, 200) self.assertTrue( response.headers.get("content-type").startswith("application/json") ) get_rest = json.loads(response.data) self.assertEqual(get_rest, copy_rest) self.assertEqual(get_rest["metadata"], {**get_rest["metadata"], **metadata}) response = self.client.get("/bucket-name/fox2") self.assertEqual(response.status_code, 200) self.assertEqual( response.data.decode("utf-8"), "The quick brown fox jumps over the lazy dog" ) def test_object_rewrite(self): response = self.client.post( "/storage/v1/b", data=json.dumps({"name": "bucket-name"}) ) self.assertEqual(response.status_code, 200) # We need a large enough payload to make sure the first rewrite does # not complete. The minimum is 1 MiB payload = "The quick brown fox jumps over the lazy dog\n" * 1024 * 1024 response = self.client.put( "/bucket-name/fox", content_type="text/plain", data=payload, ) self.assertEqual(response.status_code, 200) metadata = {"key0": "label0"} response = self.client.post( "/storage/v1/b/bucket-name/o/fox/rewriteTo/b/bucket-name/o/fox2", data=json.dumps({"metadata": metadata}), ) self.assertEqual(response.status_code, 200) self.assertTrue( response.headers.get("content-type").startswith("application/json") ) rewrite_rest = json.loads(response.data) expected_fields = { "kind", "totalBytesRewritten", "objectSize", "done", "rewriteToken", } actual_fields = set(rewrite_rest.keys()) self.assertEqual(actual_fields, actual_fields | expected_fields) self.assertEqual(rewrite_rest.get("done"), False) token = rewrite_rest.get("rewriteToken") while not rewrite_rest.get("done"): response = self.client.post( "/storage/v1/b/bucket-name/o/fox/rewriteTo/b/bucket-name/o/fox2", query_string={"maxBytesRewrittenPerCall": 10, "rewriteToken": token}, data=json.dumps({"metadata": metadata}), ) self.assertEqual(response.status_code, 200, msg=response.data) self.assertTrue( response.headers.get("content-type").startswith("application/json") ) rewrite_rest = json.loads(response.data) # Once done, a rewrite returns the new object metadata self.assertIn("resource", rewrite_rest) resource = rewrite_rest.get("resource") # TODO(#27) - I do not understand why the rewrite always returns the full projection resource.pop("acl") resource.pop("owner") response = self.client.get("/storage/v1/b/bucket-name/o/fox2") self.assertEqual(response.status_code, 200) self.assertTrue( response.headers.get("content-type").startswith("application/json") ) get_rest = json.loads(response.data) self.assertEqual(get_rest, resource) self.assertEqual(get_rest["metadata"], {**get_rest["metadata"], **metadata}) response = self.client.get("/bucket-name/fox2") self.assertEqual(response.status_code, 200) self.assertEqual(len(response.data.decode("utf-8")), len(payload)) if __name__ == "__main__": unittest.main()
[]
[]
[]
[]
[]
python
0
0
manage.py
#!/usr/bin/env python """Django's command-line utility for administrative tasks.""" import os import sys def main(): os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'score.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv) if __name__ == '__main__': main()
[]
[]
[]
[]
[]
python
0
0
ecscale.py
import boto3 import datetime from optparse import OptionParser import os SCALE_IN_CPU_TH = int(os.getenv('SCALE_IN_CPU_TH', 60)) SCALE_IN_MEM_TH = int(os.getenv('SCALE_IN_MEM_TH', 60)) FUTURE_CPU_TH = int(os.getenv('FUTURE_CPU_TH', 70)) FUTURE_MEM_TH = int(os.getenv('FUTURE_MEM_TH', 70)) ASG_PREFIX = os.getenv('ASG_PREFIX', '') ASG_SUFFIX = os.getenv('ASG_SUFFIX', '') DRAIN_ALL_EMPTY_INSTANCES = bool(int(os.getenv('DRAIN_ALL_EMPTY_INSTANCES', 0))) ECS_AVOID_STR = os.getenv('ECS_AVOID_STR', 'awseb') logline = {} def clusters(ecsClient): # Returns an iterable list of cluster names response = ecsClient.list_clusters() if not response['clusterArns']: print 'No ECS cluster found' return return [cluster for cluster in response['clusterArns'] if ECS_AVOID_STR not in cluster] def cluster_metric(cwClient, clusterName, metricName): # Return cluster average per minute cloudwatch metric try: response = cwClient.get_metric_statistics( Namespace='AWS/ECS', MetricName=metricName, Dimensions=[ { 'Name': 'ClusterName', 'Value': clusterName }, ], StartTime=datetime.datetime.utcnow() - datetime.timedelta(seconds=120), EndTime=datetime.datetime.utcnow(), Period=60, Statistics=['Average'] ) return response['Datapoints'][0]['Average'] except Exception: logger({'ClusterMetricError': 'Could not retrieve {} for {}'.format(metricName, clusterName)}) def find_asg(clusterName, asgData): # Returns auto scaling group resourceId based on name for asg in asgData['AutoScalingGroups']: for tag in asg['Tags']: if tag['Key'] == 'Name': if tag['Value'].split(' ')[0] == '{}{}{}'.format(ASG_PREFIX, clusterName, ASG_SUFFIX): return tag['ResourceId'] else: logger({'ASGError': 'Auto scaling group for {} not found'.format(clusterName)}) def ec2_avg_cpu_utilization(clusterName, asgData, cwclient): asg = find_asg(clusterName, asgData) response = cwclient.get_metric_statistics( Namespace='AWS/EC2', MetricName='CPUUtilization', Dimensions=[ { 'Name': 'AutoScalingGroupName', 'Value': asg }, ], StartTime=datetime.datetime.utcnow() - datetime.timedelta(seconds=120), EndTime=datetime.datetime.utcnow(), Period=60, Statistics=['Average'] ) print '*** Average CPU: {}'.format(response['Datapoints'][0]['Average']) return response['Datapoints'][0]['Average'] def asg_scalable(clusterName, asgData, asgClient, activeInstanceCount): asg = find_asg(clusterName, asgData) if asg is None: print '{}: ASG not found'.format(clusterName) return False for sg in asgData['AutoScalingGroups']: if sg['AutoScalingGroupName'] == asg: if activeInstanceCount <= sg['MinSize']: print '{}: ASG is at ({}) or below MinSize ({})'.format(clusterName, activeInstanceCount, sg['MinSize']) return False else: return True print '{}: ASG not found during loop'.format(clusterName) return False def empty_instances(clusterArn, activeContainerDescribed): # returns a object of empty instances in cluster instances = [] empty_instances = {} for inst in activeContainerDescribed['containerInstances']: if inst['runningTasksCount'] == 0 and inst['pendingTasksCount'] == 0: empty_instances.update({inst['ec2InstanceId']: inst['containerInstanceArn']}) return empty_instances def draining_instances(clusterArn, drainingContainerDescribed): # returns an object of draining instances in cluster instances = [] draining_instances = {} for inst in drainingContainerDescribed['containerInstances']: draining_instances.update({inst['ec2InstanceId']: inst['containerInstanceArn']}) return draining_instances def terminate_decrease(instanceId, asgClient): # terminates an instance and decreases the desired number in its auto scaling group # [ only if desired > minimum ] try: response = asgClient.terminate_instance_in_auto_scaling_group( InstanceId=instanceId, ShouldDecrementDesiredCapacity=True ) logger({'Action': 'Terminate', 'Message': response['Activity']['Cause']}) except Exception as e: logger({'Error': e}) def scale_in_instance(clusterArn, activeContainerDescribed): # iterates over hosts, finds the least utilized: # The most under-utilized memory and minimum running tasks # return instance obj {instanceId, runningInstances, containerinstanceArn} instanceToScale = {'id': '', 'running': 0, 'freemem': 0} for inst in activeContainerDescribed['containerInstances']: for res in inst['remainingResources']: if res['name'] == 'MEMORY': if res['integerValue'] > instanceToScale['freemem']: instanceToScale['freemem'] = res['integerValue'] instanceToScale['id'] = inst['ec2InstanceId'] instanceToScale['running'] = inst['runningTasksCount'] instanceToScale['containerInstanceArn'] = inst['containerInstanceArn'] elif res['integerValue'] == instanceToScale['freemem']: # Two instances with same free memory level, choose the one with less running tasks if inst['runningTasksCount'] < instanceToScale['running']: instanceToScale['freemem'] = res['integerValue'] instanceToScale['id'] = inst['ec2InstanceId'] instanceToScale['running'] = inst['runningTasksCount'] instanceToScale['containerInstanceArn'] = inst['containerInstanceArn'] break logger({'Scale candidate': '{} with free {}'.format(instanceToScale['id'], instanceToScale['freemem'])}) return instanceToScale def running_tasks(instanceId, containerDescribed): # return a number of running tasks on a given ecs host for inst in containerDescribed['containerInstances']: if inst['ec2InstanceId'] == instanceId: return int(inst['runningTasksCount']) + int(inst['pendingTasksCount']) def drain_instance(containerInstanceId, ecsClient, clusterArn): # put a given ec2 into draining state try: response = ecsClient.update_container_instances_state( cluster=clusterArn, containerInstances=[containerInstanceId], status='DRAINING' ) except Exception as e: logger({'DrainingError': e}) def future_metric(activeInstanceCount, metricValue, metricName): # If the cluster were to scale in an instance, calculate the effect on the given metric value # return metric_value*active_instance_count / active_instance_count-1 if activeInstanceCount > 1: futureValue = (metricValue*activeInstanceCount) / (activeInstanceCount-1) else: return 100 print '*** {}: Current: {} | Future : {}'.format(metricName, metricValue, futureValue) return futureValue def retrieve_cluster_data(ecsClient, cwClient, asgClient, cluster): clusterName = cluster.split('/')[1] print '*** {} ***'.format(clusterName) activeContainerInstances = ecsClient.list_container_instances(cluster=cluster, status='ACTIVE') clusterCpuReservation = cluster_metric(cwClient, clusterName, 'CPUReservation') clusterMemReservation = cluster_metric(cwClient, clusterName, 'MemoryReservation') if activeContainerInstances['containerInstanceArns']: activeContainerDescribed = ecsClient.describe_container_instances(cluster=cluster, containerInstances=activeContainerInstances['containerInstanceArns']) else: print 'No active instances in cluster' return False drainingContainerInstances = ecsClient.list_container_instances(cluster=cluster, status='DRAINING') if drainingContainerInstances['containerInstanceArns']: drainingContainerDescribed = ecsClient.describe_container_instances(cluster=cluster, containerInstances=drainingContainerInstances['containerInstanceArns']) drainingInstances = draining_instances(cluster, drainingContainerDescribed) else: drainingInstances = {} drainingContainerDescribed = [] emptyInstances = empty_instances(cluster, activeContainerDescribed) dataObj = { 'clusterName': clusterName, 'clusterCpuReservation': clusterCpuReservation, 'clusterMemReservation': clusterMemReservation, 'activeContainerDescribed': activeContainerDescribed, 'drainingInstances': drainingInstances, 'emptyInstances': emptyInstances, 'drainingContainerDescribed': drainingContainerDescribed } return dataObj def logger(entry, action='log'): # print log as one-line json from cloudwatch integration if action == 'log': global logline logline.update(entry) elif action == 'print': print logline def main(run='normal'): ecsClient = boto3.client('ecs') cwClient = boto3.client('cloudwatch') asgClient = boto3.client('autoscaling') asgData = asgClient.describe_auto_scaling_groups() clusterList = clusters(ecsClient) for cluster in clusterList: ########### Cluster data retrival ########## clusterData = retrieve_cluster_data(ecsClient, cwClient, asgClient, cluster) if not clusterData: continue else: clusterName = clusterData['clusterName'] clusterCpuReservation = clusterData['clusterCpuReservation'] clusterMemReservation = clusterData['clusterMemReservation'] activeContainerDescribed = clusterData['activeContainerDescribed'] activeInstanceCount = len(activeContainerDescribed['containerInstances']) drainingInstances = clusterData['drainingInstances'] emptyInstances = clusterData['emptyInstances'] ########## Cluster scaling rules ########### if drainingInstances.keys(): # There are draining instsnces to terminate for instanceId, containerInstId in drainingInstances.iteritems(): if not running_tasks(instanceId, clusterData['drainingContainerDescribed']): if run == 'dry': print 'Would have terminated {}'.format(instanceId) else: print 'Terminating draining instance with no containers {}'.format(instanceId) terminate_decrease(instanceId, asgClient) else: print 'Draining instance not empty' if not asg_scalable(clusterName, asgData, asgClient, activeInstanceCount): continue if (clusterCpuReservation < FUTURE_CPU_TH and clusterMemReservation < FUTURE_MEM_TH and future_metric(activeInstanceCount, clusterCpuReservation, 'CPU') < FUTURE_CPU_TH and future_metric(activeInstanceCount, clusterMemReservation, 'MEM') < FUTURE_MEM_TH): # Future reservation levels allow scale if DRAIN_ALL_EMPTY_INSTANCES and emptyInstances.keys(): # There are empty instances for instanceId, containerInstId in emptyInstances.iteritems(): if run == 'dry': print 'Would have drained {}'.format(instanceId) else: print 'Draining empty instance {}'.format(instanceId) drain_instance(containerInstId, ecsClient, cluster) if (clusterCpuReservation < SCALE_IN_CPU_TH and clusterMemReservation < SCALE_IN_MEM_TH): # Cluster reservation level requires scale if (ec2_avg_cpu_utilization(clusterName, asgData, cwClient) < SCALE_IN_CPU_TH): instanceToScale = scale_in_instance(cluster, activeContainerDescribed)['containerInstanceArn'] if run == 'dry': print 'Would have scaled {}'.format(instanceToScale) else: print 'Draining least utilized instanced {}'.format(instanceToScale) drain_instance(instanceToScale, ecsClient, cluster) else: print 'CPU higher than TH, cannot scale' print '***' def lambda_handler(event, context): parser = OptionParser() parser.add_option("-a", "--access-key", dest="AWS_ACCESS_KEY_ID", help="Provide AWS access key") parser.add_option("-s", "--secret-key", dest="AWS_SECRET_ACCESS_KEY", help="Provide AWS secret key") parser.add_option("-d", "--dry-run", action="store_true", dest="DRY_RUN", default=False, help="Dry run the process") (options, args) = parser.parse_args() if options.AWS_ACCESS_KEY_ID and options.AWS_SECRET_ACCESS_KEY: os.environ['AWS_ACCESS_KEY_ID'] = options.AWS_ACCESS_KEY_ID os.environ['AWS_SECRET_ACCESS_KEY'] = options.AWS_SECRET_ACCESS_KEY elif options.AWS_ACCESS_KEY_ID or options.AWS_SECRET_ACCESS_KEY: print 'AWS key or secret are missing' runType = 'dry' if options.DRY_RUN else 'normal' main(run=runType) if __name__ == '__main__': # lambda_handler({}, '') main()
[]
[]
[ "ASG_SUFFIX", "AWS_SECRET_ACCESS_KEY", "ASG_PREFIX", "FUTURE_MEM_TH", "FUTURE_CPU_TH", "DRAIN_ALL_EMPTY_INSTANCES", "ECS_AVOID_STR", "AWS_ACCESS_KEY_ID", "SCALE_IN_MEM_TH", "SCALE_IN_CPU_TH" ]
[]
["ASG_SUFFIX", "AWS_SECRET_ACCESS_KEY", "ASG_PREFIX", "FUTURE_MEM_TH", "FUTURE_CPU_TH", "DRAIN_ALL_EMPTY_INSTANCES", "ECS_AVOID_STR", "AWS_ACCESS_KEY_ID", "SCALE_IN_MEM_TH", "SCALE_IN_CPU_TH"]
python
10
0
contrib/util.go
package contrib import ( "os/exec" "sync" "os" "io" "bufio" "strings" "crypto/sha512" "encoding/hex" "context" "time" "fmt" "log" "path/filepath" "io/ioutil" "github.com/google/zoekt" "github.com/google/zoekt/query" "github.com/google/zoekt/shards" "github.com/google/zoekt/build" "go.uber.org/automaxprocs/maxprocs" ) type execOutputProcessor func (proc *exec.Cmd, stdout, stderr io.ReadCloser) error type execLinesProcessor func (line string) type execBytesProcessor func (stdout io.ReadCloser) var ( DEBUG_ON bool ) func init() { DEBUG_ON = os.Getenv("ZOEKT_DEBUG") != "" } func PrintDebugCommand(cmd string) { if !DEBUG_ON { return } log.Println(cmd) } func File2Lines(filename string, fn execLinesProcessor) error { f, err := os.Open(filename) if err != nil { return err } defer f.Close() scanner := bufio.NewScanner(f) for scanner.Scan() { fn(scanner.Text()) } if err = scanner.Err(); err != nil { return err } return nil } func Exec2Lines(cmd string, fn execLinesProcessor) error { return doExec(cmd, func (proc *exec.Cmd, stdout, stderr io.ReadCloser) error { if err := proc.Start(); err != nil { return err } listener := &sync.WaitGroup{} listener.Add(2) go watchTextOutput(proc, listener, stdout, fn) go watchTextOutput(proc, listener, stderr, fn) listener.Wait() return proc.Wait() }) } func watchTextOutput(proc *exec.Cmd, listener *sync.WaitGroup, stream io.ReadCloser, fn execLinesProcessor) { defer listener.Done() if fn == nil { return } scanner := bufio.NewScanner(stream) scanner.Split(bufio.ScanLines) for scanner.Scan() { m := scanner.Text() fn(m) } } func Exec2Bytes(cmd string, fn execBytesProcessor) error { return doExec(cmd, func (proc *exec.Cmd, stdout, _stderr io.ReadCloser) error { if err := proc.Start(); err != nil { return err } listener := &sync.WaitGroup{} listener.Add(1) go watchByteOutput(proc, listener, stdout, fn) listener.Wait() return proc.Wait() }) } func watchByteOutput(proc *exec.Cmd, listener *sync.WaitGroup, stream io.ReadCloser, fn execBytesProcessor) { defer listener.Done() if fn == nil { return } fn(stream) } func doExec(cmd string, fn execOutputProcessor) error { // TEST=1 AND=2 ls -a -l // ^ ^ ^ ^--^-- args // | | \--> cmd // \------\-----> env argv := strings.Fields(cmd) cmdIndex := 0 for i, value := range argv { if !strings.Contains(value, "=") { break } cmdIndex = i + 1 } bin := argv[cmdIndex] proc := exec.Command(bin, argv[cmdIndex+1:]...) proc.Env = append(os.Environ(), argv[0:cmdIndex]...) stdout, err := proc.StdoutPipe() if err != nil { stdout = nil } stderr, err := proc.StderrPipe() if err != nil { stderr = nil } return fn(proc, stdout, stderr) } const BINARY_CHECK_BUF = 4 * 1024 * 1204 func IsBinaryFile(filepath string) (bool, error) { f, err := os.Open(filepath) if err != nil { return true, err } defer f.Close() buf := make([]byte, BINARY_CHECK_BUF /* 4 MB */) n, err := f.Read(buf) if err != nil { return true, err } if n < BINARY_CHECK_BUF { buf = buf[0:n] } text := string(buf) return strings.Contains(text, "\x00"), nil } func IsEmptyFolder(filepath string) (bool, error) { f, err := os.Open(filepath) if err != nil { return true, err } defer f.Close() list, err := f.Readdir(1) if err != nil { return true, err } return len(list) == 0, nil } func IoHash(stream io.ReadCloser) (string, error) { h := sha512.New() if _, err := io.Copy(h, stream); err != nil { return "", err } return hex.EncodeToString(h.Sum(nil)), nil } func FileHash(filepath string) (string, error) { f, err := os.Open(filepath) if err != nil { return "", err } defer f.Close() h := sha512.New() if _, err = io.Copy(h, f); err != nil { return "", err } return hex.EncodeToString(h.Sum(nil)), nil } func IoLen(stream io.ReadCloser) (int64, error) { buf := make([]byte, 1024 * 1204 * 1) var L int64 L = 0 n, err := stream.Read(buf) if err != nil { return -1, err } L += int64(n) for n >= 1024 * 1024 * 1 { n, err = stream.Read(buf) if err != nil { return -1, err } L += int64(n) } return L, nil } func FileLen(filepath string) (int64, error) { info, err := os.Stat(filepath) if err != nil { return -1, err } return info.Size(), nil } func PrepareDirectory(dirpath string) error { fileinfo, err := os.Stat(dirpath) if os.IsNotExist(err) { return os.MkdirAll(dirpath, 0755) } else if err != nil { return err } else if !fileinfo.IsDir() { return fmt.Errorf("%s has been used as a normal file not a directory", dirpath) } return nil } func Search(indexPath string, ctx context.Context, q string, num int) (*zoekt.SearchResult, error) { empty, err := IsEmptyFolder(indexPath) if err != nil { return nil, err } if empty { return nil, fmt.Errorf("invalid index path") } PrintDebugCommand(fmt.Sprintf("search in '%s'", indexPath)) searcher, err := shards.NewDirectorySearcher(indexPath) if err != nil { return nil, err } defer searcher.Close() Q, err := query.Parse(q) sOpts := zoekt.SearchOptions{ MaxWallTime: 10 * time.Second, } sOpts.SetDefaults() // limit doc number in case there are too many // ref: web/server.go if plan, err := searcher.Search(ctx, Q, &zoekt.SearchOptions{EstimateDocCount: true}); err != nil { return nil, err } else if numdocs := plan.ShardFilesConsidered; numdocs > 10000 { // 10k docs, top 50 -> max match/important = 275/4 sOpts.ShardMaxMatchCount = num*5 + (5*num)/(numdocs/1000) sOpts.ShardMaxImportantMatch = num/20 + num/(numdocs/500) } else { n := numdocs + num*100 sOpts.ShardMaxImportantMatch = n sOpts.ShardMaxMatchCount = n sOpts.TotalMaxMatchCount = n } sOpts.MaxDocDisplayCount = num // ref: api.go // sres.Files -> f.LineMatches // f.Language, f.Branches, string(f.Checksum), f.Filename, f.Repository // f.Version // m.LineNumber, m.Line, m.LineFragments -> x // x.LineOffset, x.MatchLength sres, err := searcher.Search(ctx, Q, &sOpts) if err != nil { return nil, err } return sres, nil } // ref: cmd/zoekt-index/main.go type fileInfo struct { name string size int64 } type fileAggregator struct { ignoreDirs map[string]struct{} sizeMax int64 sink chan fileInfo } func (a *fileAggregator) add(path string, info os.FileInfo, err error) error { if err != nil { return err } if info.IsDir() { base := filepath.Base(path) if _, ok := a.ignoreDirs[base]; ok { return filepath.SkipDir } } if info.Mode().IsRegular() { a.sink <- fileInfo{path, info.Size()} } return nil } func Index(indexPath, sourcePath string, ignoreDirs []string) error { maxprocs.Set() opts := build.Options{} ignoreDirMap := map[string]struct{}{} for _, d := range ignoreDirs { d = strings.TrimSpace(d) if d != "" { ignoreDirMap[d] = struct{}{} } } opts.SetDefaults() sourcePath, err := filepath.Abs(filepath.Clean(sourcePath)) if err != nil { return err } is, err := IsEmptyFolder(sourcePath) if err != nil { return err } if is { return fmt.Errorf("no file for indexing") } opts.IndexDir = indexPath opts.RepositoryDescription.Source = sourcePath opts.RepositoryDescription.Name = filepath.Base(sourcePath) builder, err := build.NewBuilder(opts) if err != nil { return err } defer builder.Finish() comm := make(chan fileInfo, 100) agg := fileAggregator{ ignoreDirs: ignoreDirMap, sink: comm, sizeMax: int64(opts.SizeMax), } go func() { if err := filepath.Walk(sourcePath, agg.add); err != nil { log.Fatal(err) } close(comm) }() pathPrefix := sourcePath + string(filepath.Separator) for f := range comm { displayName := strings.TrimPrefix(f.name, pathPrefix) if f.size > int64(opts.SizeMax) && !opts.IgnoreSizeMax(displayName) { builder.Add(zoekt.Document{ Name: displayName, SkipReason: fmt.Sprintf("document size %d larger than limit %d", f.size, opts.SizeMax), }) continue } content, err := ioutil.ReadFile(f.name) if err != nil { return err } builder.AddFile(displayName, content) } return builder.Finish() }
[ "\"ZOEKT_DEBUG\"" ]
[]
[ "ZOEKT_DEBUG" ]
[]
["ZOEKT_DEBUG"]
go
1
0
vendor/github.com/joyent/triton-go/examples/storage/get_object/main.go
// // Copyright (c) 2018, Joyent, Inc. All rights reserved. // // This Source Code Form is subject to the terms of the Mozilla Public // License, v. 2.0. If a copy of the MPL was not distributed with this // file, You can obtain one at http://mozilla.org/MPL/2.0/. // package main import ( "context" "fmt" "io/ioutil" "log" "os" "encoding/pem" triton "github.com/joyent/triton-go" "github.com/joyent/triton-go/authentication" "github.com/joyent/triton-go/storage" ) // This file stored in Manta is used in the example below. const path = "/stor/books/dracula.txt" func main() { var ( signer authentication.Signer err error keyID = os.Getenv("MANTA_KEY_ID") accountName = os.Getenv("MANTA_USER") keyMaterial = os.Getenv("MANTA_KEY_MATERIAL") userName = os.Getenv("TRITON_USER") ) if keyMaterial == "" { input := authentication.SSHAgentSignerInput{ KeyID: keyID, AccountName: accountName, Username: userName, } signer, err = authentication.NewSSHAgentSigner(input) if err != nil { log.Fatalf("error creating SSH agent signer: %v", err) } } else { var keyBytes []byte if _, err = os.Stat(keyMaterial); err == nil { keyBytes, err = ioutil.ReadFile(keyMaterial) if err != nil { log.Fatalf("error reading key material from %q: %v", keyMaterial, err) } block, _ := pem.Decode(keyBytes) if block == nil { log.Fatalf( "failed to read key material %q: no key found", keyMaterial) } if block.Headers["Proc-Type"] == "4,ENCRYPTED" { log.Fatalf("failed to read key %q: password protected keys are\n"+ "not currently supported, decrypt key prior to use", keyMaterial) } } else { keyBytes = []byte(keyMaterial) } input := authentication.PrivateKeySignerInput{ KeyID: keyID, PrivateKeyMaterial: keyBytes, AccountName: accountName, Username: userName, } signer, err = authentication.NewPrivateKeySigner(input) if err != nil { log.Fatalf("error creating SSH private key signer: %v", err) } } config := &triton.ClientConfig{ MantaURL: os.Getenv("MANTA_URL"), AccountName: accountName, Username: userName, Signers: []authentication.Signer{signer}, } client, err := storage.NewClient(config) if err != nil { log.Fatalf("failed to init storage client: %v", err) } ctx := context.Background() info, err := client.Objects().GetInfo(ctx, &storage.GetInfoInput{ ObjectPath: path, }) if err != nil { fmt.Printf("could not find %q\n", path) return } fmt.Println("--- HEAD ---") fmt.Printf("Content-Length: %d\n", info.ContentLength) fmt.Printf("Content-MD5: %s\n", info.ContentMD5) fmt.Printf("Content-Type: %s\n", info.ContentType) fmt.Printf("ETag: %s\n", info.ETag) fmt.Printf("Date-Modified: %s\n", info.LastModified.String()) ctx = context.Background() isDir, err := client.Objects().IsDir(ctx, path) if err != nil { log.Fatalf("failed to detect directory %q: %v\n", path, err) return } if isDir { fmt.Printf("%q is a directory\n", path) } else { fmt.Printf("%q is a file\n", path) } ctx = context.Background() obj, err := client.Objects().Get(ctx, &storage.GetObjectInput{ ObjectPath: path, }) if err != nil { log.Fatalf("failed to get %q: %v", path, err) } body, err := ioutil.ReadAll(obj.ObjectReader) if err != nil { log.Fatalf("failed to read response body: %v", err) } defer obj.ObjectReader.Close() fmt.Println("--- GET ---") fmt.Printf("Content-Length: %d\n", obj.ContentLength) fmt.Printf("Content-MD5: %s\n", obj.ContentMD5) fmt.Printf("Content-Type: %s\n", obj.ContentType) fmt.Printf("ETag: %s\n", obj.ETag) fmt.Printf("Date-Modified: %s\n", obj.LastModified.String()) fmt.Printf("Length: %d\n", len(body)) }
[ "\"MANTA_KEY_ID\"", "\"MANTA_USER\"", "\"MANTA_KEY_MATERIAL\"", "\"TRITON_USER\"", "\"MANTA_URL\"" ]
[]
[ "TRITON_USER", "MANTA_KEY_ID", "MANTA_URL", "MANTA_KEY_MATERIAL", "MANTA_USER" ]
[]
["TRITON_USER", "MANTA_KEY_ID", "MANTA_URL", "MANTA_KEY_MATERIAL", "MANTA_USER"]
go
5
0
cmd/byctl/sign_test.go
package main import ( "bytes" "errors" "fmt" "io" "math/rand" "os" "testing" "time" "github.com/google/uuid" "go.beyondstorage.io/v5/pkg/randbytes" "go.beyondstorage.io/v5/services" "go.beyondstorage.io/v5/types" ) func getSignTestService(s string) string { if s != "" { s += "/" } return fmt.Sprintf(os.Getenv("BEYOND_CTL_TEST_SERVICE"), s) } func setupSign(t *testing.T) (base, path string) { store, err := services.NewStoragerFromString(getSignTestService("")) if err != nil { t.Fatal(err) } base = uuid.NewString() path = uuid.NewString() rand.Seed(time.Now().Unix()) // Limit the content under 1MB. size := rand.Intn(1024 * 1024) bs := make([]byte, size) _, err = io.ReadFull(randbytes.NewRand(), bs) if err != nil { t.Fatal(err) } _, err = store.Write(fmt.Sprintf("%s/%s", base, path), bytes.NewReader(bs), int64(size)) if err != nil { t.Fatal(err) } err = os.Setenv( fmt.Sprintf("BEYOND_CTL_PROFILE_%s", base), getMvTestService(base), ) if err != nil { t.Fatal(err) } return base, path } func tearDownSign(t *testing.T, base string) { store, err := services.NewStoragerFromString(getSignTestService("")) if err != nil { t.Fatal(err) } it, err := store.List(base) if err != nil { t.Fatal(err) } for { o, err := it.Next() if err != nil && errors.Is(err, types.IterateDone) { break } if err != nil { t.Fatal(err) } err = store.Delete(o.Path) if err != nil { t.Fatal(err) } } err = os.Unsetenv(fmt.Sprintf("BEYOND_CTL_PROFILE_%s", base)) if err != nil { t.Fatal(err) } } func TestSign(t *testing.T) { if os.Getenv("BEYOND_CTL_INTEGRATION_TEST") != "on" { t.Skipf("BEYOND_CTL_INTEGRATION_TEST is not 'on', skipped") } base, path := setupSign(t) defer tearDownSign(t, base) err := app.Run([]string{ "byctl", "sign", fmt.Sprintf("%s:%s", base, path), }) if err != nil { t.Error(err) } } func TestSignViaExpire(t *testing.T) { if os.Getenv("BEYOND_CTL_INTEGRATION_TEST") != "on" { t.Skipf("BEYOND_CTL_INTEGRATION_TEST is not 'on', skipped") } base, path := setupSign(t) defer tearDownSign(t, base) // Set the expire time to 150 seconds. err := app.Run([]string{ "byctl", "sign", fmt.Sprintf("--expire=%d", 150), fmt.Sprintf("%s:%s", base, path), }) if err != nil { t.Error(err) } }
[ "\"BEYOND_CTL_TEST_SERVICE\"", "\"BEYOND_CTL_INTEGRATION_TEST\"", "\"BEYOND_CTL_INTEGRATION_TEST\"" ]
[]
[ "BEYOND_CTL_INTEGRATION_TEST", "BEYOND_CTL_TEST_SERVICE" ]
[]
["BEYOND_CTL_INTEGRATION_TEST", "BEYOND_CTL_TEST_SERVICE"]
go
2
0
transform/binary-plugin/binary_plugin_test.go
package binary_plugin import ( "encoding/json" "fmt" "os" "os/exec" "reflect" "testing" "github.com/konveyor/crane-lib/transform" "github.com/sirupsen/logrus" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" ) type fakeCommandRunner struct { stdout, stderr []byte errorRunningMetadata, errorRunningCommand error metadataStdout, metadataStderr []byte } func (f *fakeCommandRunner) Run(_ *unstructured.Unstructured, _ logrus.FieldLogger) ([]byte, []byte, error) { return f.stdout, f.stderr, f.errorRunningCommand } func (f *fakeCommandRunner) Metadata(_ logrus.FieldLogger) ([]byte, []byte, error) { return f.metadataStdout, f.metadataStderr, f.errorRunningMetadata } // TestShellMetadataSuccess is a method that is called as a substitute for a shell command, // the GO_TEST_PROCESS flag ensures that if it is called as part of the test suite, it is // skipped. func TestShellMetadataSuccess(t *testing.T) { if os.Getenv("GO_TEST_PROCESS") != "1" { return } var s string _, err := fmt.Scanln(&s) if err != nil { os.Exit(1) } if s != `{}` { os.Exit(1) } //TODO: Validate stdin is correct. res, err := json.Marshal(transform.PluginMetadata{ Name: "fakeShellMetadata", Version: "v1", RequestVersion: []transform.Version{transform.V1}, ResponseVersion: []transform.Version{transform.V1}, OptionalFields: []string{}, }) if err != nil { fmt.Fprint(os.Stderr, err.Error()) os.Exit(1) } fmt.Fprint(os.Stdout, string(res)) os.Exit(0) } func TestShellMetadataFailure(t *testing.T) { if os.Getenv("GO_TEST_PROCESS") != "1" { return } os.Exit(1) } func TestShellMetadataPluginFailure(t *testing.T) { if os.Getenv("GO_TEST_PROCESS") != "1" { return } fmt.Fprint(os.Stderr, "Testing failure") os.Exit(1) } func TestShellMetadataInvalid(t *testing.T) { if os.Getenv("GO_TEST_PROCESS") != "1" { return } fmt.Fprint(os.Stdout, "invalid json") os.Exit(0) } func TestNewBinaryPlugin(t *testing.T) { tests := []struct { name string want transform.PluginMetadata wantErr bool cliContext execContext }{ { name: "ValidStdoutNoStderr", want: transform.PluginMetadata{ Name: "fakeShellMetadata", Version: "v1", RequestVersion: []transform.Version{transform.V1}, ResponseVersion: []transform.Version{transform.V1}, }, cliContext: func(name string, args ...string) *exec.Cmd { cs := []string{"-test.run=TestShellMetadataSuccess", "--", name} cs = append(cs, args...) cmd := exec.Command(os.Args[0], cs...) cmd.Env = []string{"GO_TEST_PROCESS=1"} return cmd }, wantErr: false, }, { name: "InValidStdoutNoStderr", cliContext: func(name string, args ...string) *exec.Cmd { cs := []string{"-test.run=TestShellMetadataInvalid", "--", name} cs = append(cs, args...) cmd := exec.Command(os.Args[0], cs...) cmd.Env = []string{"GO_TEST_PROCESS=1"} return cmd }, wantErr: true, }, { name: "PluginFailure", cliContext: func(name string, args ...string) *exec.Cmd { cs := []string{"-test.run=TestShellMetadataPluginFailure", "--", name} cs = append(cs, args...) cmd := exec.Command(os.Args[0], cs...) cmd.Env = []string{"GO_TEST_PROCESS=1"} return cmd }, wantErr: true, }, { name: "NoMetadataPlugin", cliContext: func(name string, args ...string) *exec.Cmd { cs := []string{"-test.run=TestShellMetadataFailure", "--", name} cs = append(cs, args...) cmd := exec.Command(os.Args[0], cs...) cmd.Env = []string{"GO_TEST_PROCESS=1"} return cmd }, wantErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { cliContext = tt.cliContext b, err := NewBinaryPlugin(tt.name) if (err != nil) != tt.wantErr { t.Errorf("Run() error = %v, wantErr %v", err, tt.wantErr) return } if tt.wantErr && err != nil { return } if !reflect.DeepEqual(b.Metadata(), tt.want) { t.Errorf("Metadata() got = %v, want %v", b.Metadata(), tt.want) } }) } } func TestBinaryPlugin_Run(t *testing.T) { tests := []struct { name string stdout, stderr []byte runErr error want transform.PluginResponse wantErr bool }{ { name: "ValidStdoutNoStderr", stdout: []byte(`{"version": "v1", "isWhiteOut": true}`), runErr: nil, want: transform.PluginResponse{ Version: "v1", IsWhiteOut: true, Patches: nil, }, wantErr: false, }, { name: "InValidStdoutNoStderr", stdout: []byte(`{"version": v1", "isWhiteOut": true}`), runErr: nil, want: transform.PluginResponse{}, wantErr: true, }, { name: "NoStdoutSomeStderr", stdout: []byte(`{"version": "v1", "isWhiteOut": true}`), stderr: []byte("panic: invalid reference"), runErr: nil, want: transform.PluginResponse{ Version: "v1", IsWhiteOut: true, Patches: nil, }, wantErr: false, }, { name: "RunError", runErr: fmt.Errorf("error running the plugin"), want: transform.PluginResponse{}, wantErr: true, }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { b := &BinaryPlugin{ commandRunner: &fakeCommandRunner{ stdout: tt.stdout, stderr: tt.stderr, errorRunningCommand: tt.runErr, }, log: logrus.New().WithField("test", tt.name), } got, err := b.Run(&unstructured.Unstructured{}, nil) if (err != nil) != tt.wantErr { t.Errorf("Run() error = %v, wantErr %v", err, tt.wantErr) return } if !reflect.DeepEqual(got, tt.want) { t.Errorf("Run() got = %v, want %v", got, tt.want) } }) } }
[ "\"GO_TEST_PROCESS\"", "\"GO_TEST_PROCESS\"", "\"GO_TEST_PROCESS\"", "\"GO_TEST_PROCESS\"" ]
[]
[ "GO_TEST_PROCESS" ]
[]
["GO_TEST_PROCESS"]
go
1
0
src/sage/repl/attach.py
r""" Keep track of attached files TESTS:: sage: attach('http://wstein.org/loadtest.py') Traceback (most recent call last): ... NotImplementedError: you can't attach a URL Check that no file clutter is produced:: sage: dir = tmp_dir() sage: src = os.path.join(dir, 'foobar.sage') sage: with open(src, 'w') as f: ....: f.write('print("<output from attached file>")\n') sage: attach(src) <output from attached file> sage: os.listdir(dir) ['foobar.sage'] sage: detach(src) In debug mode backtraces contain code snippets. We need to manually print the traceback because the python doctest module has special support for exceptions and does not match them character-by-character:: sage: import traceback sage: with open(src, 'w') as f: ....: f.write('# first line\n') ....: f.write('# second line\n') ....: f.write('raise ValueError("third") # this should appear in the source snippet\n') ....: f.write('# fourth line\n') sage: load_attach_mode(attach_debug=False) sage: try: ....: attach(src) ....: except Exception: ....: traceback.print_exc() Traceback (most recent call last): ... exec(preparse_file(open(fpath).read()) + "\n", globals) File "<string>", line 3, in <module> ValueError: third sage: detach(src) sage: load_attach_mode(attach_debug=True) sage: try: ....: attach(src) ....: except Exception: ....: traceback.print_exc() Traceback (most recent call last): ... exec(code, globals) File ".../foobar.sage....py", line ..., in <module> raise ValueError("third") # this should appear in the source snippet ValueError: third sage: detach(src) """ #***************************************************************************** # Copyright (C) 2013 Volker Braun <[email protected]> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 2 of the License, or # (at your option) any later version. # http://www.gnu.org/licenses/ #***************************************************************************** from __future__ import print_function import os import six import time from sage.repl.load import load, load_wrap import sage.repl.inputhook import sage.env # The attached files as a dict of {filename:mtime} attached = {} load_debug_mode = False attach_debug_mode = True def load_attach_mode(load_debug=None, attach_debug=None): """ Get or modify the current debug mode for the behavior of :func:`load` and :func:`attach` on ``.sage`` files. In debug mode, loaded or attached ``.sage`` files are preparsed through a file to make their tracebacks more informative. If not in debug mode, then ``.sage`` files are preparsed in memory only for performance. At startup, debug mode is ``True`` for attaching and ``False`` for loading. .. NOTE:: This function should really be deprecated and code executed from memory should raise proper tracebacks. INPUT: - ``load_debug`` -- boolean or ``None`` (default); if not ``None``, then set a new value for the debug mode for loading files. - ``attach_debug`` -- boolean or ``None`` (default); same as ``load_debug``, but for attaching files. OUTPUT: If all input values are ``None``, returns a tuple giving the current modes for loading and attaching. EXAMPLES:: sage: load_attach_mode() (False, True) sage: load_attach_mode(attach_debug=False) sage: load_attach_mode() (False, False) sage: load_attach_mode(load_debug=True) sage: load_attach_mode() (True, False) sage: load_attach_mode(load_debug=False, attach_debug=True) """ global load_debug_mode, attach_debug_mode if load_debug is None and attach_debug is None: return (load_debug_mode, attach_debug_mode) if not load_debug is None: load_debug_mode = load_debug if not attach_debug is None: attach_debug_mode = attach_debug search_paths = [] def load_attach_path(path=None, replace=False): """ Get or modify the current search path for :func:`load` and :func:`attach`. INPUT: - ``path`` -- string or list of strings (default: ``None``); path(s) to append to or replace the current path. - ``replace`` -- boolean (default: ``False``); if ``path`` is not ``None``, whether to *replace* the search path instead of *appending* to it. OUTPUT: ``None`` or a *reference* to the current search paths. EXAMPLES: First, we extend the example given in :func:`load`'s docstring:: sage: sage.repl.attach.reset(); reset_load_attach_path() sage: load_attach_path() ['.'] sage: t_dir = tmp_dir() sage: fullpath = os.path.join(t_dir, 'test.py') sage: open(fullpath, 'w').write("print(37 * 3)") sage: attach('test.py') Traceback (most recent call last): ... IOError: did not find file 'test.py' to load or attach sage: load_attach_path(t_dir) sage: attach('test.py') 111 sage: attached_files() == [fullpath] True sage: sage.repl.attach.reset(); reset_load_attach_path() sage: load_attach_path() == ['.'] True sage: load('test.py') Traceback (most recent call last): ... IOError: did not find file 'test.py' to load or attach The function returns a reference to the path list:: sage: reset_load_attach_path(); load_attach_path() ['.'] sage: load_attach_path('/path/to/my/sage/scripts'); load_attach_path() ['.', '/path/to/my/sage/scripts'] sage: load_attach_path(['good', 'bad', 'ugly'], replace=True) sage: load_attach_path() ['good', 'bad', 'ugly'] sage: p = load_attach_path(); p.pop() 'ugly' sage: p[0] = 'weird'; load_attach_path() ['weird', 'bad'] sage: reset_load_attach_path(); load_attach_path() ['.'] """ global search_paths if path is None: return search_paths else: if isinstance(path, six.string_types): path = [path] if replace: search_paths = path else: for p in path: if not p: continue if p not in search_paths: search_paths.append(p) def reset_load_attach_path(): """ Resets the current search path for :func:`load` and :func:`attach`. The default path is ``'.'`` plus any paths specified in the environment variable ``SAGE_LOAD_ATTACH_PATH``. EXAMPLES:: sage: load_attach_path() ['.'] sage: t_dir = tmp_dir() sage: load_attach_path(t_dir) sage: t_dir in load_attach_path() True sage: reset_load_attach_path(); load_attach_path() ['.'] At startup, Sage adds colon-separated paths in the environment variable ``SAGE_LOAD_ATTACH_PATH``:: sage: reset_load_attach_path(); load_attach_path() ['.'] sage: os.environ['SAGE_LOAD_ATTACH_PATH'] = '/veni/vidi:vici:' sage: import imp sage: imp.reload(sage.repl.attach) # Simulate startup <module 'sage.repl.attach' from '...'> sage: load_attach_path() ['.', '/veni/vidi', 'vici'] sage: del os.environ['SAGE_LOAD_ATTACH_PATH'] sage: imp.reload(sage.repl.preparse) # Simulate startup <module 'sage.repl.preparse' from '...'> sage: reset_load_attach_path(); load_attach_path() ['.'] """ global search_paths search_paths = ['.'] for path in os.environ.get('SAGE_LOAD_ATTACH_PATH', '').split(':'): load_attach_path(path=path) # Set up the initial search path for loading and attaching files. A # user can modify the path with the function load_attach_path. reset_load_attach_path() def attach(*files): """ Attach a file or files to a running instance of Sage and also load that file. .. NOTE:: Attaching files uses the Python inputhook, which will conflict with other inputhook users. This generally includes GUI main loop integrations, for example tkinter. So you can only use tkinter or attach, but not both at the same time. INPUT: - ``files`` -- a list of filenames (strings) to attach. OUTPUT: Each file is read in and added to an internal list of watched files. The meaning of reading in a file depends on the file type: - ``.py`` files are read in with no preparsing (so, e.g., ``2^3`` is 2 bit-xor 3); - ``.sage`` files are preparsed, then the result is read in; - ``.pyx`` files are *not* preparsed, but rather are compiled to a module ``m`` and then ``from m import *`` is executed. The contents of the file are then loaded, which means they are read into the running Sage session. For example, if ``foo.sage`` contains ``x=5``, after attaching ``foo.sage`` the variable ``x`` will be set to 5. Moreover, any time you change ``foo.sage``, before you execute a command, the attached file will be re-read automatically (with no intervention on your part). .. SEEALSO:: :meth:`~sage.repl.load.load` is the same as :func:`attach`, but doesn't automatically reload a file when it changes. EXAMPLES: You attach a file, e.g., ``foo.sage`` or ``foo.py`` or ``foo.pyx``, to a running Sage session by typing:: sage: attach('foo.sage') # not tested Here we test attaching multiple files at once:: sage: sage.repl.attach.reset() sage: t1 = tmp_filename(ext='.py') sage: open(t1,'w').write("print('hello world')") sage: t2 = tmp_filename(ext='.py') sage: open(t2,'w').write("print('hi there xxx')") sage: attach(t1, t2) hello world hi there xxx sage: set(attached_files()) == set([t1,t2]) True .. SEEALSO:: - :meth:`attached_files` returns a list of all currently attached files. - :meth:`detach` instructs Sage to remove a file from the internal list of watched files. - :meth:`load_attach_path` allows you to get or modify the current search path for loading and attaching files. """ try: ipy = get_ipython() except NameError: ipy = None global attached for filename in files: if ipy: code = load_wrap(filename, attach=True) ipy.run_cell(code) else: load(filename, globals(), attach=True) def add_attached_file(filename): """ Add to the list of attached files This is a callback to be used from :func:`~sage.repl.load.load` after evaluating the attached file the first time. INPUT: - ``filename`` -- string, the fully qualified file name. EXAMPLES:: sage: import sage.repl.attach as af sage: af.reset() sage: t = tmp_filename(ext='.py') sage: af.add_attached_file(t) sage: af.attached_files() ['/.../tmp_....py'] sage: af.detach(t) sage: af.attached_files() [] """ sage.repl.inputhook.install() fpath = os.path.abspath(filename) attached[fpath] = os.path.getmtime(fpath) def attached_files(): """ Returns a list of all files attached to the current session with :meth:`attach`. OUTPUT: The filenames in a sorted list of strings. EXAMPLES:: sage: sage.repl.attach.reset() sage: t = tmp_filename(ext='.py') sage: open(t,'w').write("print('hello world')") sage: attach(t) hello world sage: attached_files() ['/....py'] sage: attached_files() == [t] True """ global attached return list(sorted(attached.keys())) def detach(filename): """ Detach a file. This is the counterpart to :meth:`attach`. INPUT: - ``filename`` -- a string, or a list of strings, or a tuple of strings. EXAMPLES:: sage: sage.repl.attach.reset() sage: t = tmp_filename(ext='.py') sage: open(t,'w').write("print('hello world')") sage: attach(t) hello world sage: attached_files() == [t] True sage: detach(t) sage: attached_files() [] sage: sage.repl.attach.reset(); reset_load_attach_path() sage: load_attach_path() ['.'] sage: t_dir = tmp_dir() sage: fullpath = os.path.join(t_dir, 'test.py') sage: open(fullpath, 'w').write("print(37 * 3)") sage: load_attach_path(t_dir) sage: attach('test.py') 111 sage: attached_files() == [os.path.normpath(fullpath)] True sage: detach('test.py') sage: attached_files() [] sage: attach('test.py') 111 sage: fullpath = os.path.join(t_dir, 'test2.py') sage: open(fullpath, 'w').write("print(3)") sage: attach('test2.py') 3 sage: detach(attached_files()) sage: attached_files() [] TESTS:: sage: detach('/dev/null/foobar.sage') Traceback (most recent call last): ... ValueError: file '/dev/null/foobar.sage' is not attached, see attached_files() """ if isinstance(filename, six.string_types): filelist = [filename] else: filelist = [str(x) for x in filename] global attached for filename in filelist: fpath = os.path.expanduser(filename) if not os.path.isabs(fpath): for path in load_attach_path(): epath = os.path.expanduser(path) fpath = os.path.join(epath, filename) fpath = os.path.abspath(fpath) if fpath in attached: break if fpath in attached: attached.pop(fpath) else: raise ValueError("file '{0}' is not attached, see attached_files()".format(filename)) if not attached: sage.repl.inputhook.uninstall() def reset(): """ Remove all the attached files from the list of attached files. EXAMPLES:: sage: sage.repl.attach.reset() sage: t = tmp_filename(ext='.py') sage: open(t,'w').write("print('hello world')") sage: attach(t) hello world sage: attached_files() == [t] True sage: sage.repl.attach.reset() sage: attached_files() [] """ global attached attached = {} def modified_file_iterator(): """ Iterate over the changed files As a side effect the stored time stamps are updated with the actual time stamps. So if you iterate over the attached files in order to reload them and you hit an error then the subsequent files are not marked as read. Files that are in the process of being saved are excluded. EXAMPLES:: sage: sage.repl.attach.reset() sage: t = tmp_filename(ext='.py') sage: attach(t) sage: from sage.repl.attach import modified_file_iterator sage: list(modified_file_iterator()) [] sage: sleep(1) # filesystem mtime granularity sage: open(t, 'w').write('1') sage: list(modified_file_iterator()) [('/.../tmp_....py', time.struct_time(...))] """ global attached modified = dict() for filename in attached.keys(): old_tm = attached[filename] if not os.path.exists(filename): print('### detaching file {0} because it does not exist (deleted?) ###'.format(filename)) detach(filename) continue new_tm = os.path.getmtime(filename) if new_tm > old_tm: modified[filename] = new_tm if not modified: return time.sleep(0.1) # sleep 100ms to give the editor time to finish saving for filename in modified.keys(): old_tm = modified[filename] new_tm = os.path.getmtime(filename) if new_tm == old_tm: # file was modified but did not change in the last 100ms attached[filename] = new_tm yield filename, time.gmtime(new_tm) def reload_attached_files_if_modified(): r""" Reload attached files that have been modified This is the internal implementation of the attach mechanism. EXAMPLES:: sage: sage.repl.attach.reset() sage: from sage.repl.interpreter import get_test_shell sage: shell = get_test_shell() sage: tmp = tmp_filename(ext='.py') sage: open(tmp, 'w').write('a = 2\n') sage: shell.run_cell('attach({0})'.format(repr(tmp))) sage: shell.run_cell('a') 2 sage: sleep(1) # filesystem mtime granularity sage: open(tmp, 'w').write('a = 3\n') Note that the doctests are never really at the command prompt where the automatic reload is triggered. So we have to do it manually:: sage: shell.run_cell('from sage.repl.attach import reload_attached_files_if_modified') sage: shell.run_cell('reload_attached_files_if_modified()') ### reloading attached file tmp_....py modified at ... ### sage: shell.run_cell('a') 3 sage: shell.run_cell('detach({0})'.format(repr(tmp))) sage: shell.run_cell('attached_files()') [] sage: shell.quit() """ for filename, mtime in modified_file_iterator(): basename = os.path.basename(filename) timestr = time.strftime('%T', mtime) from sage.libs.readline import interleaved_output with interleaved_output(): print('### reloading attached file {0} modified at {1} ###'.format(basename, timestr)) code = load_wrap(filename, attach=True) get_ipython().run_cell(code)
[]
[]
[ "SAGE_LOAD_ATTACH_PATH" ]
[]
["SAGE_LOAD_ATTACH_PATH"]
python
1
0
molecule/resources/tests/test_hugepage.py
# Copyright (c) 2019 Intel Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Test hugepage allocation """ import os import pytest import testinfra.utils.ansible_runner from common import ansible_vars, check_skip_dpdk_tests TESTINFRA_HOSTS = testinfra.utils.ansible_runner.AnsibleRunner( os.environ["MOLECULE_INVENTORY_FILE"] ).get_hosts("all") @pytest.fixture(scope="module") def hugepage_allocation(host): """ Get number of 2M and 1G hugepages allocated from target and return as tuple """ online_nodes_sysfs = "/sys/devices/system/node/online" hugepage_2m_sysfs = "/sys/devices/system/node/node{node_num}/hugepages/" \ "hugepages-2048kB/nr_hugepages" hugepage_1g_sysfs = "/sys/devices/system/node/node{node_num}/hugepages/" \ "hugepages-1048576kB/nr_hugepages" online_list = None with host.sudo(): online_list = host.file(online_nodes_sysfs).content_string.strip() if not online_list: raise Exception("Failed to get online nodes from '{online_nodes}'" .format(online_nodes=online_nodes_sysfs)) # online_list represents a range of online NUMA nodes. It is a list of # comma delimited ranges. E.g 0-1,4-5 or more commonly just 0-1 in a two # socket system. nr_1g_hugepages = 0 nr_2m_hugepages = 0 for block_range in online_list.split(","): low, high = block_range.split("-") if not low.isdigit() or not high.isdigit(): raise Exception("Failed to parse online nodes from '{online}'" .format(online=online_list)) for node_num in range(int(low), int(high) + 1): path_1g = hugepage_1g_sysfs.format(node_num=node_num) path_2m = hugepage_2m_sysfs.format(node_num=node_num) nr_1g_hugepages += get_sysfs_int(host, path_1g) nr_2m_hugepages += get_sysfs_int(host, path_2m) return (nr_2m_hugepages, nr_1g_hugepages) def get_sysfs_int(host, path): """ Get integer from target at location retrieved from argument 'path' """ value = None with host.sudo(): host_dir = host.file(path) if not host_dir.is_file: raise Exception("Failed to detect file at path '{sysfs}'" .format(sysfs=path)) value = host.file(path).content_string.strip() if not value or not value.isdigit(): raise Exception("Failed to get integer from sysfs path '{path}'" .format(path=path)) return int(value) # This test function uses the fixture "check_skip_dpdk_tests" to decide if the # test should be executed. If the Ansible variable "skip_ovs_dpdk_config" is # set to True, ovs-dpdk will not be configured on the target host, making # execution of this test redundant. Hence, it will be skipped. @pytest.mark.usefixtures("check_skip_dpdk_tests") def test_hugepage(ansible_vars, hugepage_allocation): """ Test to ensure the correct number of 1G/2M hugepages have been allocated """ assert int(ansible_vars['ovs_dpdk_nr_2m_pages']) ==\ hugepage_allocation[0], "2M hugepages defined in Ansible var "\ "is different than amount seen on remote host" assert int(ansible_vars['ovs_dpdk_nr_1g_pages']) ==\ hugepage_allocation[1], "1G hugepages defined in Ansible var "\ "is different than amount seen on remote host"
[]
[]
[ "MOLECULE_INVENTORY_FILE" ]
[]
["MOLECULE_INVENTORY_FILE"]
python
1
0
daemon/daemon_windows.go
package daemon import ( "fmt" "os" "syscall" "github.com/Sirupsen/logrus" "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/daemon/graphdriver/windows" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/parsers" "github.com/docker/docker/runconfig" "github.com/docker/libnetwork" "github.com/microsoft/hcsshim" ) const DefaultVirtualSwitch = "Virtual Switch" func (daemon *Daemon) Changes(container *Container) ([]archive.Change, error) { return daemon.driver.Changes(container.ID, container.ImageID) } func (daemon *Daemon) Diff(container *Container) (archive.Archive, error) { return daemon.driver.Diff(container.ID, container.ImageID) } func parseSecurityOpt(container *Container, config *runconfig.HostConfig) error { return nil } func (daemon *Daemon) createRootfs(container *Container) error { // Step 1: create the container directory. // This doubles as a barrier to avoid race conditions. if err := os.Mkdir(container.root, 0700); err != nil { return err } if wd, ok := daemon.driver.(*windows.WindowsGraphDriver); ok { if container.ImageID != "" { // Get list of paths to parent layers. logrus.Debugln("createRootfs: Container has parent image:", container.ImageID) img, err := daemon.graph.Get(container.ImageID) if err != nil { return err } ids, err := daemon.graph.ParentLayerIds(img) if err != nil { return err } logrus.Debugf("Got image ids: %d", len(ids)) if err := hcsshim.CreateSandboxLayer(wd.Info(), container.ID, container.ImageID, wd.LayerIdsToPaths(ids)); err != nil { return err } } else { if err := daemon.driver.Create(container.ID, container.ImageID); err != nil { return err } } } else { // Fall-back code path to allow the use of the VFS driver for development if err := daemon.driver.Create(container.ID, container.ImageID); err != nil { return err } } return nil } func checkKernel() error { return nil } func (daemon *Daemon) adaptContainerSettings(hostConfig *runconfig.HostConfig) { // TODO Windows. } func (daemon *Daemon) verifyContainerSettings(hostConfig *runconfig.HostConfig, config *runconfig.Config) ([]string, error) { // TODO Windows. Verifications TBC return nil, nil } // checkConfigOptions checks for mutually incompatible config options func checkConfigOptions(config *Config) error { return nil } // checkSystem validates platform-specific requirements func checkSystem() error { var dwVersion uint32 // TODO Windows. May need at some point to ensure have elevation and // possibly LocalSystem. // Validate the OS version. Note that docker.exe must be manifested for this // call to return the correct version. dwVersion, err := syscall.GetVersion() if err != nil { return fmt.Errorf("Failed to call GetVersion()") } if int(dwVersion&0xFF) < 10 { return fmt.Errorf("This version of Windows does not support the docker daemon") } return nil } // configureKernelSecuritySupport configures and validate security support for the kernel func configureKernelSecuritySupport(config *Config, driverName string) error { return nil } func migrateIfDownlevel(driver graphdriver.Driver, root string) error { return nil } func configureVolumes(config *Config) error { // Windows does not support volumes at this time return nil } func configureSysInit(config *Config) (string, error) { // TODO Windows. return os.Getenv("TEMP"), nil } func isBridgeNetworkDisabled(config *Config) bool { return false } func initNetworkController(config *Config) (libnetwork.NetworkController, error) { // Set the name of the virtual switch if not specified by -b on daemon start if config.Bridge.VirtualSwitchName == "" { config.Bridge.VirtualSwitchName = DefaultVirtualSwitch } return nil, nil } func (daemon *Daemon) RegisterLinks(container *Container, hostConfig *runconfig.HostConfig) error { // TODO Windows. Factored out for network modes. There may be more // refactoring required here. if hostConfig == nil || hostConfig.Links == nil { return nil } for _, l := range hostConfig.Links { name, alias, err := parsers.ParseLink(l) if err != nil { return err } child, err := daemon.Get(name) if err != nil { //An error from daemon.Get() means this name could not be found return fmt.Errorf("Could not get container for %s", name) } if err := daemon.RegisterLink(container, child, alias); err != nil { return err } } // After we load all the links into the daemon // set them to nil on the hostconfig hostConfig.Links = nil if err := container.WriteHostConfig(); err != nil { return err } return nil } func (daemon *Daemon) newBaseContainer(id string) Container { return Container{ CommonContainer: CommonContainer{ ID: id, State: NewState(), execCommands: newExecStore(), root: daemon.containerRoot(id), }, } }
[ "\"TEMP\"" ]
[]
[ "TEMP" ]
[]
["TEMP"]
go
1
0
grobber/locals.py
import os from motor.motor_asyncio import AsyncIOMotorClient, AsyncIOMotorCollection, AsyncIOMotorDatabase from pymongo import ASCENDING, IndexModel from quart.local import LocalProxy __all__ = ["mongo_client", "db", "anime_collection", "url_pool_collection", "source_index_collection", "source_index_meta_collection", "before_serving"] _MONGO_URI = os.getenv("MONGO_URI", "mongodb://localhost:27017") _MONGO_DB_NAME = os.getenv("MONGO_DB", "MyAnimeStream") _mongo_client = None def _get_mongo_client(): global _mongo_client if not _mongo_client: _mongo_client = AsyncIOMotorClient(_MONGO_URI) return _mongo_client mongo_client: AsyncIOMotorClient = LocalProxy(_get_mongo_client) db: AsyncIOMotorDatabase = LocalProxy(lambda: mongo_client[_MONGO_DB_NAME]) anime_collection: AsyncIOMotorCollection = LocalProxy(lambda: db["anime"]) url_pool_collection: AsyncIOMotorCollection = LocalProxy(lambda: db["url_pool"]) source_index_collection: AsyncIOMotorClient = LocalProxy(lambda: db["source_index"]) source_index_meta_collection: AsyncIOMotorClient = LocalProxy(lambda: db["source_index_meta"]) async def before_serving(): from .index_scraper import add_collection_indexes await anime_collection.create_indexes([ IndexModel([("title", ASCENDING), ("language", ASCENDING), ("is_dub", ASCENDING)], name="Query Index"), IndexModel([("media_id", ASCENDING), ("language", ASCENDING), ("is_dub", ASCENDING)], name="Media ID Index"), ]) await add_collection_indexes(source_index_collection)
[]
[]
[ "MONGO_URI", "MONGO_DB" ]
[]
["MONGO_URI", "MONGO_DB"]
python
2
0
test/test_torch.py
# -*- coding: utf-8 -*- # Owner(s): ["module: tests"] import torch import torch.utils.data import numpy as np import contextlib import gc import io import inspect import itertools import math import random import re import copy import os import tempfile import unittest import warnings import types import pickle import textwrap import subprocess import weakref import sys from torch.utils.dlpack import from_dlpack, to_dlpack from torch._six import inf, nan, string_classes from itertools import product, combinations, permutations from functools import partial from torch import multiprocessing as mp from torch.testing import make_tensor from torch.testing._internal.common_utils import ( TestCase, TEST_WITH_ROCM, run_tests, IS_WINDOWS, IS_FILESYSTEM_UTF8_ENCODING, NO_MULTIPROCESSING_SPAWN, IS_SANDCASTLE, IS_FBCODE, IS_REMOTE_GPU, load_tests, slowTest, TEST_WITH_CROSSREF, skipCUDAMemoryLeakCheckIf, BytesIOContext, skipIfRocm, skipIfNoSciPy, TemporaryFileName, TemporaryDirectoryName, wrapDeterministicFlagAPITest, DeterministicGuard, CudaSyncGuard, skipIfNotRegistered, bytes_to_scalar, parametrize, skipIfMps) from multiprocessing.reduction import ForkingPickler from torch.testing._internal.common_device_type import ( expectedFailureMeta, expectedFailureXLA, instantiate_device_type_tests, onlyCUDA, onlyCPU, dtypes, dtypesIfCUDA, dtypesIfCPU, deviceCountAtLeast, skipMeta, PYTORCH_CUDA_MEMCHECK, largeTensorTest, onlyNativeDeviceTypes, expectedAlertNondeterministic, get_all_device_types, skipXLA) from typing import Tuple import torch.backends.quantized import torch.testing._internal.data from torch.testing._internal.common_cuda import ( tf32_on_and_off, tf32_is_not_fp32, TEST_CUDNN) from torch.testing._internal.common_dtype import ( floating_types_and, get_all_math_dtypes, all_types_and_complex_and, complex_types, all_types_and, floating_types, floating_and_complex_types, integral_types, ) # Protects against includes accidentally setting the default dtype assert torch.get_default_dtype() is torch.float32 # load_tests from torch.testing._internal.common_utils is used to automatically filter tests for # sharding on sandcastle. This line silences flake warnings load_tests = load_tests AMPERE_OR_ROCM = TEST_WITH_ROCM or tf32_is_not_fp32() @contextlib.contextmanager def torch_vital_set(value): stash = None if 'TORCH_VITAL' in os.environ: stash = os.environ['TORCH_VITAL'] os.environ['TORCH_VITAL'] = value try: yield finally: if stash: os.environ['TORCH_VITAL'] = stash else: del os.environ['TORCH_VITAL'] # Tests Vital Signs for Torch # FIXME: document or deprecate whatever this is class TestBasicVitalSigns(TestCase): def test_basic_vitals(self): with torch_vital_set(''): self.assertFalse(torch.vitals_enabled()) with torch_vital_set('ON'): self.assertTrue(torch.vitals_enabled()) def test_basic_vitals_read_write(self): with torch_vital_set('ON'): self.assertTrue(torch.vitals_enabled()) # This tests the code path of setting a vital self.assertTrue(torch.set_vital('Dataloader', 'basic_unit_test', 'TEST_VALUE_STRING')) self.assertIn('TEST_VALUE_STRING', torch.read_vitals()) self.assertIn('CUDA.used', torch.read_vitals()) def test_dataloader_vitals(self): with torch_vital_set('ON'): inps = torch.arange(10 * 5, dtype=torch.float32).view(10, 5) tgts = torch.arange(10 * 5, dtype=torch.float32).view(10, 5) dataset = torch.utils.data.TensorDataset(inps, tgts) loader = torch.utils.data.DataLoader(dataset, batch_size=2) self.assertIn('Dataloader.enabled\t\t True', torch.read_vitals()) # FIXME: document or deprecate whatever this is class TestVitalSignsCuda(TestCase): @onlyCUDA def test_cuda_vitals_gpu_only(self, device): with torch_vital_set('ON'): self.assertIn('CUDA.used\t\t true', torch.read_vitals()) class TestTorchDeviceType(TestCase): exact_dtype = True # TODO: move all tensor creation to common ops def _rand_shape(self, dim, min_size, max_size): shape = [] for i in range(dim): shape.append(random.randint(min_size, max_size)) return tuple(shape) # Validates that mathematical constants are defined properly, as required by # the Python Array API (https://data-apis.org/array-api/latest/API_specification/constants.html) @onlyCPU def test_constants(self, device): self.assertIsInstance(torch.e, float) self.assertEqual(torch.e, math.e, atol=0, rtol=0) self.assertIsInstance(torch.pi, float) self.assertEqual(torch.pi, math.pi, atol=0, rtol=0) self.assertIsInstance(torch.nan, float) self.assertEqual(torch.nan, math.nan, equal_nan=True) self.assertIsInstance(torch.inf, float) self.assertEqual(torch.inf, math.inf) @onlyNativeDeviceTypes @dtypes(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64, torch.bool, torch.float32, torch.complex64, torch.float64, torch.complex128) def test_bytes_to_scalar(self, device, dtype): def rand_byte(): if dtype == torch.bool: return torch.randint(0, 2, ()).item() else: return torch.randint(0, 256, ()).item() element_size = torch._utils._element_size(dtype) for i in range(10): bytes_list = [rand_byte() for _ in range(element_size)] scalar = bytes_to_scalar(bytes_list, dtype, device) self.assertEqual(scalar.storage()._untyped().tolist(), bytes_list) @dtypes(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64, torch.bool, torch.float32, torch.complex64, torch.float64, torch.complex128) def test_storage(self, device, dtype): v = make_tensor((3, 5), dtype=dtype, device=device, low=-9, high=9) self.assertEqual(v.storage()[0], v[0][0]) self.assertEqual(v.storage()[14], v[2][4]) v_s = v.storage() for el_num in range(v.numel()): dim0 = el_num // v.size(1) dim1 = el_num % v.size(1) self.assertEqual( v_s[el_num], v[dim0][dim1]) v_s_byte = v.storage()._untyped() el_size = v.element_size() for el_num in range(v.numel()): start = el_num * el_size end = start + el_size dim0 = el_num // v.size(1) dim1 = el_num % v.size(1) self.assertEqual( bytes_to_scalar(v_s_byte[start:end], dtype, device), v[dim0][dim1]) @onlyNativeDeviceTypes @dtypes(torch.int8, torch.uint8, torch.int16, torch.int32, torch.int64, torch.bool, torch.float32, torch.complex64, torch.float64, torch.complex128, torch.quint8, torch.qint8, torch.qint32, torch.quint4x2) def test_storage_setitem(self, device, dtype): # Skip quantized dtypes for CUDA, since they're not supported if torch.device(device).type == 'cuda': if dtype in [torch.quint8, torch.qint8, torch.qint32, torch.quint4x2]: return storage_type_name = torch.storage._dtype_to_storage_type_map()[dtype] if torch.device(device).type == 'cuda': storage_type = eval('torch.cuda.' + storage_type_name) else: storage_type = eval('torch.' + storage_type_name) N = 10 s = storage_type(N) s[:] = 0 l = [0] * N self.assertEqual(s, storage_type(l)) for i in range(N): s[i] = i l[i] = i self.assertEqual(s, storage_type(l)) l[2:7] = [1] * 5 s[2:7] = 1 self.assertEqual(s, storage_type(l)) @onlyNativeDeviceTypes @dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) def test_tensor_storage_type(self, device, dtype): a = make_tensor((10,), dtype=dtype, device=device, low=-9, high=9) module = torch.cuda if (torch.device(device).type == 'cuda') else torch expected_storage_type = getattr(module, torch.storage._dtype_to_storage_type_map()[dtype]) self.assertEqual(a.storage_type(), expected_storage_type) @onlyNativeDeviceTypes @dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) def test_tensor_from_storage(self, device, dtype): a = make_tensor((4, 5, 3), dtype=dtype, device=device, low=-9, high=9) a_s = a.storage() b = torch.tensor(a_s, device=device, dtype=dtype).reshape(a.size()) self.assertEqual(a, b) c = torch.tensor(a_s._untyped(), device=device, dtype=dtype).reshape(a.size()) self.assertEqual(a, c) for error_dtype in all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16): if error_dtype == dtype: continue with self.assertRaisesRegex(RuntimeError, r'Expected a Storage of type'): error_storage = a.to(error_dtype).storage() torch.tensor(error_storage, device=device, dtype=dtype) @onlyNativeDeviceTypes @dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) def test_set_storage(self, device, dtype): a = make_tensor((4, 5, 3), dtype=dtype, device=device, low=-9, high=9) a_s = a.storage() b = torch.tensor([], device=device, dtype=dtype).set_(a_s).reshape(a.size()) self.assertEqual(a, b) c = torch.tensor([], device=device, dtype=dtype).set_(a_s._untyped()).reshape(a.size()) self.assertEqual(a, c) for error_dtype in all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16): if error_dtype == dtype: continue with self.assertRaisesRegex(RuntimeError, r'Expected a Storage of type'): error_storage = a.to(error_dtype).storage() b = torch.tensor([], device=device, dtype=dtype).set_(error_storage) def _check_storage_meta(self, s, s_check): self.assertTrue( isinstance(s, (torch._UntypedStorage, torch._TypedStorage)) and isinstance(s_check, type(s)), ( 's and s_check must both be one of _UntypedStorage or ' '_TypedStorage, but got' f' {type(s).__name__} and {type(s_check).__name__}')) self.assertEqual(s.device.type, 'meta') self.assertEqual(s.nbytes(), s_check.nbytes()) self.assertEqual(s.size(), s_check.size()) self.assertEqual(s.data_ptr(), 0) with self.assertRaisesRegex(NotImplementedError, r'Not available'): s[0] if isinstance(s, torch._TypedStorage): self.assertEqual(s.dtype, s_check.dtype) self._check_storage_meta(s._untyped(), s_check._untyped()) @onlyNativeDeviceTypes @dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) def test_typed_storage_meta(self, device, dtype): args_list = [ [], [0], [100], [[1, 2, 3, 4, 5, 6]], ] for args in args_list: s_check = torch._TypedStorage(*args, dtype=dtype, device=device) s = torch._TypedStorage(*args, dtype=dtype, device='meta') self._check_storage_meta(s, s_check) @onlyNativeDeviceTypes def test_untyped_storage_meta(self, device): args_list = [ [], [0], [100], [[1, 2, 3, 4, 5, 6]], ] for args in args_list: s_check = torch._UntypedStorage(*args, device=device) s = torch._UntypedStorage(*args, device='meta') self._check_storage_meta(s, s_check) @onlyNativeDeviceTypes @dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) def test_storage_meta_from_tensor(self, device, dtype): t_check = make_tensor((4, 5, 3), dtype=dtype, device=device, low=-9, high=9) t = t_check.to('meta') s_check = t_check.storage() s = t.storage() self._check_storage_meta(s, s_check) @onlyCPU @dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) def test_storage_meta_errors(self, device, dtype): s0 = torch._TypedStorage([1, 2, 3, 4], device='meta', dtype=dtype) with self.assertRaisesRegex(NotImplementedError, r'Cannot copy out'): s0.cpu() with self.assertRaisesRegex(RuntimeError, r'only available on CPU'): s0._share_fd_cpu_() with self.assertRaisesRegex(RuntimeError, r'only available on CPU'): s0._share_filename_cpu_() if torch.cuda.is_available(): with self.assertRaisesRegex(NotImplementedError, r'Cannot copy out'): s0.cuda() with self.assertRaisesRegex(RuntimeError, r'only available on CUDA'): s0._share_cuda_() with self.assertRaisesRegex(NotImplementedError, r'Cannot copy out'): s0.pin_memory() with self.assertRaisesRegex(RuntimeError, r'got unexpected device type'): s0.resize_(10) with self.assertRaisesRegex(RuntimeError, r'only available on CPU'): s0.share_memory_() with self.assertRaisesRegex(NotImplementedError, r'Not available'): s0.tolist() with tempfile.NamedTemporaryFile() as f: with self.assertRaisesRegex(RuntimeError, r'Device not recognized'): s0._write_file(f, True, True, s0.element_size()) for device in ['cpu', 'cuda'] if torch.cuda.is_available() else ['cpu']: s1 = torch._TypedStorage([1, 2, 3, 4], device=device, dtype=dtype) with self.assertRaisesRegex(NotImplementedError, r'Cannot copy out'): s1.copy_(s0) @dtypes(torch.float32, torch.complex64) def test_deepcopy(self, device, dtype): from copy import deepcopy a = torch.randn(5, 5, dtype=dtype, device=device) b = torch.randn(5, 5, dtype=dtype, device=device) c = a.view(25) q = [a, [a.storage(), b.storage()], b, c] w = deepcopy(q) self.assertEqual(w[0], q[0], atol=0, rtol=0) self.assertEqual(w[1][0], q[1][0], atol=0, rtol=0) self.assertEqual(w[1][1], q[1][1], atol=0, rtol=0) self.assertEqual(w[1], q[1], atol=0, rtol=0) self.assertEqual(w[2], q[2], atol=0, rtol=0) # Check that deepcopy preserves sharing w[0].add_(1) for i in range(a.numel()): self.assertEqual(w[1][0][i], q[1][0][i] + 1) self.assertEqual(w[3], c + 1) w[2].sub_(1) for i in range(a.numel()): self.assertEqual(w[1][1][i], q[1][1][i] - 1) # Check that deepcopy preserves attributes a.foo = 3 self.assertEqual(deepcopy(a).foo, 3) @dtypes(torch.float32, torch.complex64) def test_deepcopy_scalar(self, device, dtype): from copy import deepcopy a = torch.tensor(5, dtype=dtype, device=device) self.assertEqual(a.size(), deepcopy(a).size()) self.assertEqual(a, deepcopy(a)) def check_internal_mem_overlap(self, inplace_op, num_inputs, dtype, device, expected_failure=False): if isinstance(inplace_op, str): inplace_op = getattr(torch.Tensor, inplace_op) input = torch.randn(1, dtype=dtype, device=device).expand(3, 3) inputs = [input] + [torch.randn_like(input) for i in range(num_inputs - 1)] if not expected_failure: with self.assertRaisesRegex(RuntimeError, 'single memory location'): inplace_op(*inputs) else: with self.assertRaises(AssertionError): with self.assertRaisesRegex(RuntimeError, 'single memory location'): inplace_op(*inputs) def unary_check_input_output_mem_overlap(self, data, sz, op, expected_failure=False): def _test(op, output, input): output_exp = torch.empty_like(output) op(input, out=output_exp) self.assertEqual(op(input, out=output), output_exp, msg=op.__name__) # output is identical to input: _test(op, output=data[0:sz], input=data[0:sz]) # output and input are independent: _test(op, output=data[0:sz], input=data[sz:2 * sz]) # output partially overlaps with input: if not expected_failure: with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): _test(op, data[0:sz], data[1:sz + 1]) else: with self.assertRaises(AssertionError): with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): _test(op, data[0:sz], data[1:sz + 1]) # output is transpose of input: length = int(math.sqrt(sz)) input = data[:length**2].view([length, length]) out = input.t() if not expected_failure: with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): _test(op, out, input) else: with self.assertRaises(AssertionError): with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): _test(op, out, input) def ternary_check_input_output_mem_overlap(self, op, device, expected_failure=False): sz = 9 data = torch.randn(2 * sz, device=device) other1 = torch.randn(sz, device=device) other2 = torch.randn(sz, device=device) self.unary_check_input_output_mem_overlap( data, sz, lambda input, out: op(input, other1.view(input.shape), other2.view(input.shape), out=out), expected_failure=expected_failure) self.unary_check_input_output_mem_overlap( data, sz, lambda input, out: op(other1.view(input.shape), input, other2.view(input.shape), out=out), expected_failure=expected_failure) self.unary_check_input_output_mem_overlap( data, sz, lambda input, out: op(other1.view(input.shape), other2.view(input.shape), input, out=out), expected_failure=expected_failure) def _select_broadcastable_dims(self, dims_full=None): # select full dimensionality if dims_full is None: dims_full = [] ndims = random.randint(1, 4) dims_full = [random.randint(1, 8) for _ in range(ndims)] else: ndims = len(dims_full) # select actual dimensions for ops: # larger: full ndims, individual sizes may be reduced # smaller: possibly reduced ndims, sizes may be reduced smaller_ndims = random.randint(1, ndims) dims_small = [] dims_large = [] for i in range(ndims - 1, -1, -1): j = random.randint(1, 3) if j == 1: # no reduced singleton dimension ds = dims_full[i] dl = dims_full[i] elif j == 2: # larger may have reduced singleton dimension ds = dims_full[i] dl = 1 if len(dims_small) < smaller_ndims else dims_full[i] elif j == 3: # smaller may have reduced singleton dimension ds = 1 dl = dims_full[i] dims_large = [dl] + dims_large if len(dims_small) < smaller_ndims: dims_small = [ds] + dims_small return (dims_small, dims_large, dims_full) # collected tests of ops that used scalar_check in Declarations.cwrap for # correctness def test_scalar_check(self, device): zero_d = torch.randn((), device=device) one_d = torch.randn((1,), device=device) # remainder self.assertEqual((), torch.remainder(zero_d, zero_d).shape) self.assertEqual((), torch.remainder(zero_d, 2).shape) self.assertEqual((1,), torch.remainder(zero_d, one_d).shape) self.assertEqual((1,), torch.remainder(one_d, zero_d).shape) # fmod self.assertEqual((), torch.fmod(zero_d, zero_d).shape) self.assertEqual((), torch.fmod(zero_d, 2).shape) self.assertEqual((1,), torch.fmod(zero_d, one_d).shape) self.assertEqual((1,), torch.fmod(one_d, zero_d).shape) # exp, cos, cosh, tan, atan, tanh, erf, erfc, reciprocal self.assertEqual((), torch.exp(zero_d).shape) self.assertEqual((), torch.cos(zero_d).shape) self.assertEqual((), torch.cosh(zero_d).shape) self.assertEqual((), torch.tan(zero_d).shape) self.assertEqual((), torch.atan(zero_d).shape) self.assertEqual((), torch.acosh(zero_d).shape) self.assertEqual((), torch.asinh(zero_d).shape) self.assertEqual((), torch.atanh(zero_d).shape) self.assertEqual((), torch.tanh(zero_d).shape) self.assertEqual((), torch.erf(zero_d).shape) self.assertEqual((), torch.erfc(zero_d).shape) self.assertEqual((), torch.reciprocal(zero_d).shape) self.assertEqual((1,), torch.exp(one_d).shape) self.assertEqual((1,), torch.cos(one_d).shape) self.assertEqual((1,), torch.cosh(one_d).shape) self.assertEqual((1,), torch.tan(one_d).shape) self.assertEqual((1,), torch.atan(one_d).shape) self.assertEqual((1,), torch.acosh(one_d).shape) self.assertEqual((1,), torch.asinh(one_d).shape) self.assertEqual((1,), torch.atanh(one_d).shape) self.assertEqual((1,), torch.tanh(one_d).shape) self.assertEqual((1,), torch.erf(one_d).shape) self.assertEqual((1,), torch.erfc(one_d).shape) self.assertEqual((1,), torch.reciprocal(one_d).shape) # clamp self.assertEqual((), torch.clamp(zero_d, min=0, max=1).shape) self.assertEqual((), torch.clamp(zero_d, min=0).shape) self.assertEqual((), torch.clamp(zero_d, max=1).shape) self.assertEqual((1,), torch.clamp(one_d, min=0, max=1).shape) self.assertEqual((1,), torch.clamp(one_d, min=0).shape) self.assertEqual((1,), torch.clamp(one_d, max=1).shape) # cumsum, cumprod, cummax, cummin self.assertEqual((), torch.logcumsumexp(zero_d, 0).shape) self.assertEqual((), torch.cumsum(zero_d, 0).shape) self.assertEqual((), torch.cumprod(zero_d, 0).shape) self.assertEqual((), torch.cummax(zero_d, 0)[0].shape) self.assertEqual((), torch.cummin(zero_d, 0)[0].shape) # sort, topk self.assertEqual([(), ()], [x.shape for x in torch.sort(zero_d, 0, False)]) self.assertEqual([(), ()], [x.shape for x in torch.sort(zero_d, 0, True)]) self.assertEqual([(), ()], [x.shape for x in torch.topk(zero_d, 1, 0, False)]) self.assertEqual([(), ()], [x.shape for x in torch.topk(zero_d, 1, 0, True)]) # max, min self.assertEqual((), torch.max(zero_d, zero_d).shape) self.assertEqual((1,), torch.max(one_d, zero_d).shape) self.assertEqual((1,), torch.max(zero_d, one_d).shape) self.assertEqual((), torch.min(zero_d, zero_d).shape) self.assertEqual((1,), torch.min(one_d, zero_d).shape) self.assertEqual((1,), torch.min(zero_d, one_d).shape) zero_d_int = torch.tensor(1, device=device) one_d_int = torch.tensor([1], device=device) # lshift, rshift self.assertEqual((), (zero_d_int >> zero_d_int).shape) self.assertEqual((), (zero_d_int >> 1).shape) self.assertEqual((1,), (one_d_int >> zero_d_int).shape) self.assertEqual((1,), (zero_d_int >> one_d_int).shape) self.assertEqual((1,), (one_d_int >> 1).shape) self.assertEqual((), (zero_d_int << zero_d_int).shape) self.assertEqual((), (zero_d_int << 1).shape) self.assertEqual((1,), (one_d_int << zero_d_int).shape) self.assertEqual((1,), (zero_d_int << one_d_int).shape) self.assertEqual((1,), (one_d_int << 1).shape) # or self.assertEqual((), (zero_d_int | zero_d_int).shape) self.assertEqual((), (zero_d_int | 1).shape) self.assertEqual((1,), (one_d_int | zero_d_int).shape) self.assertEqual((1,), (zero_d_int | one_d_int).shape) self.assertEqual((1,), (one_d_int | 1).shape) # and self.assertEqual((), (zero_d_int & zero_d_int).shape) self.assertEqual((), (zero_d_int & 1).shape) self.assertEqual((1,), (one_d_int & zero_d_int).shape) self.assertEqual((1,), (zero_d_int & one_d_int).shape) self.assertEqual((1,), (one_d_int & 1).shape) # clone self.assertEqual((), zero_d.clone().shape) zero_d_bool = torch.tensor(True, device=device) one_d_bool = torch.tensor([True], device=device) # masked_select self.assertEqual((1,), torch.masked_select(zero_d_bool, zero_d_bool).shape) self.assertEqual((1,), torch.masked_select(zero_d_bool, one_d_bool).shape) self.assertEqual((1,), torch.masked_select(one_d_bool, zero_d_bool).shape) zero_d_uint8 = torch.tensor(1, dtype=torch.uint8, device=device) one_d_uint8 = torch.tensor([1], dtype=torch.uint8, device=device) with warnings.catch_warnings(): warnings.simplefilter("ignore") self.assertEqual((1,), torch.masked_select(zero_d_uint8, zero_d_uint8).shape) self.assertEqual((1,), torch.masked_select(zero_d_uint8, one_d_uint8).shape) self.assertEqual((1,), torch.masked_select(one_d_uint8, zero_d_uint8).shape) # mode self.assertEqual([(), ()], [x.shape for x in torch.mode(zero_d, dim=0, keepdim=True)]) self.assertEqual([(), ()], [x.shape for x in torch.mode(zero_d, dim=0, keepdim=False)]) self.assertEqual([(1,), (1,)], [x.shape for x in torch.mode(one_d, dim=0, keepdim=True)]) self.assertEqual([(), ()], [x.shape for x in torch.mode(one_d, dim=0, keepdim=False)]) # max self.assertEqual([(), ()], [x.shape for x in torch.max(zero_d, dim=0, keepdim=True)]) self.assertEqual([(), ()], [x.shape for x in torch.max(zero_d, dim=0, keepdim=False)]) self.assertEqual([(1,), (1,)], [x.shape for x in torch.max(one_d, dim=0, keepdim=True)]) self.assertEqual([(), ()], [x.shape for x in torch.max(one_d, dim=0, keepdim=False)]) # amax self.assertEqual((), torch.amax(zero_d, dim=0, keepdim=True).shape) self.assertEqual((), torch.amax(zero_d, dim=0, keepdim=False).shape) self.assertEqual((1,), torch.amax(one_d, dim=0, keepdim=True).shape) self.assertEqual((), torch.amax(one_d, dim=0, keepdim=False).shape) # min self.assertEqual([(), ()], [x.shape for x in torch.min(zero_d, dim=0, keepdim=True)]) self.assertEqual([(), ()], [x.shape for x in torch.min(zero_d, dim=0, keepdim=False)]) self.assertEqual([(1,), (1,)], [x.shape for x in torch.min(one_d, dim=0, keepdim=True)]) self.assertEqual([(), ()], [x.shape for x in torch.min(one_d, dim=0, keepdim=False)]) # amin self.assertEqual((), torch.amin(zero_d, dim=0, keepdim=True).shape) self.assertEqual((), torch.amin(zero_d, dim=0, keepdim=False).shape) self.assertEqual((1,), torch.amin(one_d, dim=0, keepdim=True).shape) self.assertEqual((), torch.amin(one_d, dim=0, keepdim=False).shape) # set_ zero_d_clone = zero_d.clone() one_d_clone = one_d.clone() self.assertEqual((), zero_d_clone.set_(one_d.storage(), 0, (), ()).shape) self.assertEqual((1,), zero_d_clone.set_(one_d.storage(), 0, (1,), (1,)).shape) self.assertEqual((), one_d_clone.set_(one_d.storage(), 0, (), ()).shape) self.assertEqual((1,), one_d_clone.set_(one_d.storage(), 0, (1,), (1,)).shape) self.assertEqual((), zero_d.clone().set_(zero_d).shape) self.assertEqual((), one_d.clone().set_(zero_d).shape) self.assertEqual((1,), zero_d.clone().set_(one_d).shape) self.assertEqual((1,), one_d.clone().set_(one_d).shape) # take self.assertEqual((), torch.randn((2, 3), device=device).take(zero_d_int).shape) self.assertEqual((1,), torch.randn((2, 3), device=device).take(one_d_int).shape) # gather self.assertEqual((), torch.gather(zero_d, 0, torch.zeros((), dtype=torch.int64, device=device)).shape) self.assertEqual((1,), torch.gather(zero_d, 0, torch.zeros((1,), dtype=torch.int64, device=device)).shape) self.assertEqual((), torch.gather(one_d, 0, torch.zeros((), dtype=torch.int64, device=device)).shape) self.assertEqual((1,), torch.gather(one_d, 0, torch.zeros((1,), dtype=torch.int64, device=device)).shape) # normal # std must be >= 0 zero_d_ge_0 = torch.rand((), device=device) # documentation says out shape matches shape of mean self.assertEqual((), torch.normal(zero_d, zero_d_ge_0).shape) self.assertEqual((1,), torch.normal(one_d, zero_d_ge_0).shape) self.assertEqual((), torch.normal(1, zero_d_ge_0).shape) self.assertEqual((), torch.normal(zero_d, 1).shape) self.assertEqual((1,), torch.normal(one_d, 1).shape) # TODO: this behavior differs on CPU and GPU, see https://github.com/pytorch/pytorch/issues/30480. # self.assertEqual((), torch.normal(zero_d, one_d).shape) # self.assertEqual((), torch.normal(1, one_d).shape) # convolutions. Yes, we are testing nn.functional here; seems justified # given its similar to the other tests w = torch.randn(2, 1, 3, 3, device=device).div_(2).requires_grad_() self.assertRaises(RuntimeError, lambda: torch.nn.functional.conv2d(zero_d, w, groups=1)) self.assertRaises(RuntimeError, lambda: torch.nn.functional.conv2d(zero_d, w, groups=2)) # nll_loss -- verify input can't be 0-dimensional. self.assertRaises(ValueError, lambda: torch.nn.functional.nll_loss(zero_d, zero_d, reduction='none')) self.assertRaises(ValueError, lambda: torch.nn.functional.nll_loss(zero_d, one_d, reduction='none')) # verify output is 0-dimensional when reduction != 'none' for (input, target) in ((torch.randn(1, 1, device=device), torch.tensor([0], device=device)), (torch.randn(1, 1, 1, 1, device=device), torch.tensor([[[0]]], device=device))): self.assertEqual((), torch.nn.functional.nll_loss(input, target, reduction='mean').shape) self.assertEqual((), torch.nn.functional.nll_loss(input, target, reduction='sum').shape) # multilabel_margin_loss for input in (zero_d, one_d, torch.randn(1, 1, device=device)): for target in (torch.tensor(0, device=device), torch.tensor([0], device=device), torch.tensor([[0]], device=device)): if (input.dim() <= 1 and target.dim() <= 1) or (input.dim() == 2 and target.dim() == 2): output_shape = (target.shape[0],) if target.dim() == 2 else () self.assertEqual(output_shape, torch.nn.functional.multilabel_margin_loss(input, target, reduction='none').shape) self.assertEqual((), torch.nn.functional.multilabel_margin_loss(input, target, reduction='mean').shape) self.assertEqual((), torch.nn.functional.multilabel_margin_loss(input, target, reduction='sum').shape) else: self.assertRaises(RuntimeError, lambda: torch.nn.functional.multilabel_margin_loss(input, target, reduction='none')) self.assertRaises(RuntimeError, lambda: torch.nn.functional.multilabel_margin_loss(input, target, reduction='mean')) self.assertRaises(RuntimeError, lambda: torch.nn.functional.multilabel_margin_loss(input, target, reduction='sum')) # multi_margin_loss for input in (zero_d, one_d, torch.randn(1, 1, device=device)): for target in (torch.tensor(0, device=device), torch.tensor([0], device=device)): self.assertEqual(target.shape, torch.nn.functional.multi_margin_loss(input, target, reduction='none').shape) self.assertEqual((), torch.nn.functional.multi_margin_loss(input, target, reduction='mean').shape) self.assertEqual((), torch.nn.functional.multi_margin_loss(input, target, reduction='sum').shape) # Uses mismatched arange out size to trigger a warning @unittest.skipIf(TEST_WITH_CROSSREF, "crossref perturbs line numbering") def test_cpp_warnings_have_python_context(self, device): # Creates long string in advance to avoid a too-long Python line s = ".+Triggered internally at.+RangeFactories.+" def cpp_warn_fn(): out = torch.empty((5,)) torch.arange(0, 3, out=out) return out # Checks eager-mode cpp warning with warnings.catch_warnings(record=True) as w: cpp_warn_fn() frameinfo = inspect.getframeinfo(inspect.currentframe()) warning = w[0] # Checks for cpp context in the warning message escaped_warning_message = str(warning.message).encode('unicode_escape') self.assertTrue(re.search(s, str(escaped_warning_message), re.IGNORECASE) is not None) # Checks the Python features of the warning # Note: the eager mode warning refers to the line in the function # that throws the warning. self.assertEqual(frameinfo.lineno - 6, warning.lineno) self.assertEqual(len(w), 1) # Checks jitted cpp warning with warnings.catch_warnings(record=True) as w: scripted_cpp_warn_fn = torch.jit.script(cpp_warn_fn) scripted_cpp_warn_fn() warning = w[0] # Checks for cpp context in the warning message escaped_warning_message = str(warning.message).encode('unicode_escape') self.assertTrue(re.search(s, str(escaped_warning_message), re.IGNORECASE) is not None) # Checks the Python features of the warning # Note: the jitted warning's lineno refers to the call to the jitted # function, which in our test suite has a layer of indirection # that makes checking the Python lineno fragile self.assertEqual(len(w), 1) # Checks jitted Python warning def warn_fn(): warnings.warn("Warning!") # The jit mimics an eager-mode Python warning in this case with warnings.catch_warnings(record=True) as w: scripted_warn_fn = torch.jit.script(warn_fn) scripted_warn_fn() frameinfo = inspect.getframeinfo(inspect.currentframe()) warning = w[0] self.assertTrue(re.search('Warning!', str(warning.message)) is not None) # Checks the Python features of the warning self.assertEqual(frameinfo.lineno - 6, warning.lineno) self.assertEqual(len(w), 1) # FIXME: move to test_testing @onlyCPU def test_warn_always_caught(self, device): # Check that we can catch a TORCH_WARN_ONCE warning twice # since assertWarnsOnceRegex uses set_warn_always(True) which changes # TORCH_WARN_ONCE to TORCH_WARN a = np.arange(10) a.flags.writeable = False with self.assertWarnsOnceRegex(UserWarning, '.*non-writable.*'): torch.from_numpy(a) # OK, got it once, now try again with self.assertWarnsOnceRegex(UserWarning, '.*non-writable.*'): torch.from_numpy(a) # Make sure emitting two warnings will pass the assertWarnsOnceRegex # context manager with self.assertWarnsOnceRegex(UserWarning, '.*non-writable.*'): torch.from_numpy(a) torch.from_numpy(a) # TODO: this test should be in test_nn.py def test_conv_transposed_backward_agnostic_to_memory_format(self, device): in_channels = 64 out_channels = 128 scale_factor = 8 batch_size = 8 length = 16 conv = torch.nn.ConvTranspose1d( in_channels, out_channels, kernel_size=scale_factor * 2, stride=scale_factor).to(device) layer_norm = torch.nn.LayerNorm(out_channels).to(device) input_ = torch.randn(batch_size, in_channels, length).to(device).contiguous() input_ = conv(input_).contiguous() input_ = layer_norm(input_.transpose(1, 2).contiguous()).contiguous() input_.sum().backward() # 3d conv = torch.nn.ConvTranspose3d(3, 3, kernel_size=3).to(device) input = torch.randn(batch_size, 3, length, length, length, device=device) out = conv(input) out.backward(torch.ones_like(out).transpose(-2, -1)) # TODO: this test should be in test_nn.py @onlyCUDA @largeTensorTest('12GB') def test_conv_transposed_large(self, device): # ConvTranspose3d works for large input tensors (gh-32866) in_channels = 64 out_channels = 128 kernel_size = 5 conv = torch.nn.ConvTranspose3d( in_channels, out_channels, kernel_size=kernel_size, stride=2, padding=2, output_padding=1).to(device) x = torch.rand([1, 64, 8, 128, 172]).to(device) y = conv(x) def test_is_set_to(self, device): t1 = torch.empty(3, 4, 9, 10, device=device) t2 = torch.empty(3, 4, 9, 10, device=device) t3 = torch.tensor([], device=device).set_(t1) t4 = t3.clone().resize_(12, 90) self.assertFalse(t1.is_set_to(t2)) self.assertTrue(t1.is_set_to(t3)) self.assertTrue(t3.is_set_to(t1), "is_set_to should be symmetric") self.assertFalse(t1.is_set_to(t4)) self.assertFalse(torch.tensor([]).is_set_to(torch.tensor([])), "Tensors with no storages should not appear to be set " "to each other") t1 = torch.tensor([True, True], dtype=torch.bool, device=device) t2 = torch.tensor([0], dtype=torch.bool, device=device).set_(t1) self.assertTrue(t1.is_set_to(t2)) # test that sizes must match t1 = torch.empty([2, 3, 4], device=device) t2 = t1.view(4, 3, 2) self.assertFalse(t1.is_set_to(t2)) self.assertFalse(t2.is_set_to(t1)) # test that legacy empty size behavior used to be respected (i.e. all # empty tensors were logically collapsed to size [0]). t1 = torch.empty([2, 5, 0], device=device) t2 = t1.view([0]) self.assertFalse(t1.is_set_to(t2)) self.assertFalse(t2.is_set_to(t1)) # See https://github.com/pytorch/pytorch/issues/72650 @skipIfMps @skipMeta @parametrize( "fn", [ "dist", "atan2", "pow", "lerp", "add", "sub", "mul", "div", "fmod", "remainder", "eq", "ge", "gt", "le", "lt", "max", "min", "ne", "addcdiv", "addcmul", "masked_scatter", "masked_select", "masked_fill", "map", "map2", "copy", ], ) def test_broadcast(self, fn, device): # functions with three tensor arguments fns_3_args = {"map2"} fns_value_kwarg = {"addcdiv", "addcmul"} (dims_small, dims_large, dims_full) = self._select_broadcastable_dims() full1d = torch.randn(*dims_full, device=device).flatten().float() small = torch.randn(*dims_small, device=device).float() large = torch.randn(*dims_large, device=device).float() small_expanded = small.expand(*dims_full) large_expanded = large.expand(*dims_full) small2 = None small2_expanded = None if fn in fns_3_args or fn in fns_value_kwarg: # create another smaller tensor (dims_small2, _, _) = self._select_broadcastable_dims(dims_full) small2 = torch.randn(*dims_small2, device=device).float() small2_expanded = small2.expand(*dims_full) if small.is_cuda and fn in ['map', 'map2']: # map and map2 are not implementd on CUDA tensors return if hasattr(large_expanded, fn): # run through tensor versions of functions # and verify fully expanded inputs give same results expanded = {large: large_expanded, small: small_expanded, small2: small2_expanded} def tensorfn(myfn, t1, t2): if fn == "lerp": return myfn(t1, 0.5) elif fn == "masked_select": return myfn(t1 < 0) elif fn == "masked_scatter": return myfn(t1 < 0.5, full1d) elif fn == "masked_fill": return myfn(t1 < 0.5, 1.0) elif fn in fns_3_args: return myfn(1, t1, t2) elif fn in fns_value_kwarg: return myfn(t1, t2, value=1) else: return myfn(t1) # test various orders for first, second, third in [(large, small, small2), (small, large, small2), (small2, small, large), (small2, large, small)]: if first is None: break # ignore last iter when small2 is None method_expanded = getattr(expanded[first], fn) method = getattr(first, fn) r1 = tensorfn(method_expanded, expanded[second], expanded[third]) r2 = tensorfn(method, second, third) self.assertEqual(r1, r2) # now for torch. versions of functions if hasattr(torch, fn): fntorch = getattr(torch, fn) expanded = {large: large_expanded, small: small_expanded, small2: small2_expanded} def torchfn(t1, t2, t3): if fn == "lerp": return fntorch(t1, t2, 0.5) elif fn == "masked_select": return fntorch(t1, t2 < 0) elif fn == "masked_scatter": return fntorch(t1, t2 < 0.5, full1d) elif fn == "masked_fill": return fntorch(t1, t2 < 0.5, 1.0) elif fn in fns_3_args: return fntorch(t1, 1.0, t2, t3) elif fn in fns_value_kwarg: return fntorch(t1, t2, t3, value=1.0) else: return fntorch(t1, t2) # test various orders for first, second, third in [(large, small, small2), (small, large, small2), (small2, small, large), (small2, large, small)]: if first is None: break # ignore last iter when small2 is None r1 = torchfn(expanded[first], expanded[second], expanded[third]) r2 = torchfn(first, second, third) self.assertEqual(r1, r2) # now for in place functions # in-place tensor is not broadcastable; test only guaranteed # to work by broadcasting other argument(s) if not hasattr(large_expanded, fn + "_"): return # need to clone largeExpanded so we can reuse, since functions are in-place large_expanded_clone = large_expanded.clone() def tensorfn_inplace(t0, t1, t2=None): t0_fn = getattr(t0, fn + "_") if fn == "lerp": return t0_fn(t1, 0.5) elif fn == "masked_scatter": return t0_fn(t1 < 0.5, full1d) elif fn == "masked_fill": return t0_fn(t1 < 0.5, 1.0) elif fn == "map": return t0_fn(t1, lambda x, y: x + y) elif fn == "map2": return t0_fn(t1, t2, lambda x, y, z: x + y + z) elif fn in fns_3_args: return t0_fn(1.0, t1, t2) elif fn in fns_value_kwarg: return t0_fn(t1, t2, value=1.0) else: return t0_fn(t1) # in-place pointwise operations don't actually work if the in-place # tensor is 0-strided (numpy has the same issue) if (0 not in large_expanded.stride() and 0 not in large_expanded_clone.stride()): r1 = tensorfn_inplace(large_expanded, small_expanded, small2_expanded) r2 = tensorfn_inplace(large_expanded_clone, small, small2) self.assertEqual(r1, r2) def broadcastable(t0, t1, t2=None): try: t1.expand_as(t0) if t2 is not None: t2.expand_as(t0) except RuntimeError: return False return True def _test_in_place_broadcastable(t0, t1, t2=None): if not broadcastable(t0, t1, t2): same_size = t0.numel() == t1.numel() and (t0.numel() == t2.numel() if t2 is not None else True) if not same_size: self.assertRaises(RuntimeError, lambda: tensorfn_inplace(t0, t1, t2)) else: tensorfn_inplace(t0, t1, t2) if fn not in fns_3_args and fn not in fns_value_kwarg: _test_in_place_broadcastable(small, large_expanded) _test_in_place_broadcastable(small, large) else: _test_in_place_broadcastable(small2, small_expanded, large_expanded) _test_in_place_broadcastable(small2, small, large) @unittest.skipIf(IS_FBCODE and IS_REMOTE_GPU, "cublas runtime error") @onlyCUDA @wrapDeterministicFlagAPITest def test_cublas_config_nondeterministic_alert(self, device): test_cases = [ # (function, (tensor sizes)) ('mm', ((2, 2), (2, 2),)), ('mv', ((2, 2), (2,),)), ('bmm', ((1, 2, 2), (1, 2, 2),))] test_configs = [ # (CuBLAS workspace config, is deterministic) ('garbage', False), (None, False), (':4096:8', True), (':16:8', True)] cublas_var_name = 'CUBLAS_WORKSPACE_CONFIG' is_cuda10_2_or_higher = ( (torch.version.cuda is not None) and ([int(x) for x in torch.version.cuda.split(".")] >= [10, 2])) def test_case_info(fn_name, config): return f'function "{fn_name}" with config "{"" if config is None else config}"' # Create processes to test each combination of test cases and config settings processes = [] for fn_name, arg_sizes in test_cases: for config, is_config_deterministic in test_configs: env = os.environ.copy() if config is None: if env.get(cublas_var_name) is not None: del env[cublas_var_name] else: env[cublas_var_name] = config should_throw_error = is_cuda10_2_or_higher and not is_config_deterministic script = f""" import torch torch.use_deterministic_algorithms(True) fn = torch.{fn_name} arg_sizes = {arg_sizes} device = '{device}' should_throw_error = {should_throw_error} args = [] for arg_size in arg_sizes: args.append(torch.randn(*arg_size, device=device)) try: fn(*args) except RuntimeError as e: if not should_throw_error: raise RuntimeError('Did not expect any error to be raised') elif 'Deterministic behavior was enabled with either' not in str(e): raise RuntimeError('Expected a CuBLAS nondeterministic error, but got a different error') else: if should_throw_error: raise RuntimeError('Expected a CuBLAS nondeterministic error, but it was not raised') """ try: subprocess.check_output( [sys.executable, '-c', script], stderr=subprocess.STDOUT, # On Windows, opening the subprocess with the default CWD makes `import torch` # fail, so just set CWD to this script's directory cwd=os.path.dirname(os.path.realpath(__file__)), env=env) except subprocess.CalledProcessError as e: self.fail(msg=( f'Subprocess exception while attempting to run {test_case_info(fn_name, config)}:\n' + e.output.decode("utf-8"))) # FIXME: update OpInfos to support "nondeterministic samples" and port these tests # to that architecture @skipIfMps def test_nondeterministic_alert_AvgPool3d(self, device): module = torch.nn.AvgPool3d(3) input = torch.randn(2, 3, 3, 3, requires_grad=True, device=device) res = module(input) grad = torch.ones_like(res) @expectedAlertNondeterministic('avg_pool3d_backward_cuda', ['cuda']) def backward_func(slf, device): res.backward(grad) backward_func(self, device) @skipIfMps def test_nondeterministic_alert_AdaptiveAvgPool2d(self, device): module = torch.nn.AdaptiveAvgPool2d(3) input = torch.randn(2, 3, 3, requires_grad=True, device=device) res = module(input) grad = torch.ones_like(res) @expectedAlertNondeterministic('adaptive_avg_pool2d_backward_cuda', ['cuda']) def backward_func(slf, device): res.backward(grad) backward_func(self, device) @skipIfMps def test_nondeterministic_alert_AdaptiveAvgPool3d(self, device): module = torch.nn.AdaptiveAvgPool3d(3) input = torch.randn(2, 3, 3, 3, requires_grad=True, device=device) res = module(input) grad = torch.ones_like(res) @expectedAlertNondeterministic('adaptive_avg_pool3d_backward_cuda', ['cuda']) def backward_func(slf, device): res.backward(grad) backward_func(self, device) @skipIfMps def test_nondeterministic_alert_MaxPool3d(self, device): module = torch.nn.MaxPool3d(3) input = torch.randn(2, 3, 3, 3, requires_grad=True, device=device) res = module(input) grad = torch.ones_like(res) @expectedAlertNondeterministic('max_pool3d_with_indices_backward_cuda', ['cuda']) def backward_func(slf, device): res.backward(grad) backward_func(self, device) @skipIfMps def test_nondeterministic_alert_AdaptiveMaxPool2d(self, device): module = torch.nn.AdaptiveMaxPool2d(3) input = torch.randn(2, 3, 3, requires_grad=True, device=device) res = module(input) grad = torch.ones_like(res) @expectedAlertNondeterministic('adaptive_max_pool2d_backward_cuda', ['cuda']) def backward_func(slf, device): res.backward(grad) backward_func(self, device) @skipIfMps def test_nondeterministic_alert_FractionalMaxPool2d(self, device): module = torch.nn.FractionalMaxPool2d(2, output_ratio=0.5) input = torch.randn(2, 3, 3, 3, requires_grad=True, device=device) res = module(input) grad = torch.ones_like(res) @expectedAlertNondeterministic('fractional_max_pool2d_backward_cuda', ['cuda']) def backward_func(slf, device): res.backward(grad) backward_func(self, device) @skipIfMps def test_nondeterministic_alert_FractionalMaxPool3d(self, device): module = torch.nn.FractionalMaxPool3d(2, output_ratio=0.5) input = torch.randn(2, 3, 3, 3, 3, requires_grad=True, device=device) res = module(input) grad = torch.ones_like(res) @expectedAlertNondeterministic('fractional_max_pool3d_backward_cuda', ['cuda']) def backward_func(slf, device): res.backward(grad) backward_func(self, device) @skipIfMps def test_nondeterministic_alert_interpolate_linear(self, device): input = torch.randn(1, 2, 4, device=device, requires_grad=True) res = torch.nn.functional.interpolate( input, size=12, mode='linear', align_corners=False) grad = torch.ones_like(res) @expectedAlertNondeterministic('upsample_linear1d_backward_out_cuda', ['cuda']) def backward_func(slf, device): res.backward(grad) backward_func(self, device) def test_nondeterministic_alert_interpolate_bilinear(self, device): input = torch.randn(1, 2, 4, 4, device=device, requires_grad=True) res = torch.nn.functional.interpolate( input, size=12, mode='bilinear', align_corners=False) grad = torch.ones_like(res) @expectedAlertNondeterministic('upsample_bilinear2d_backward_out_cuda', ['cuda']) def backward_func(slf, device): res.backward(grad) backward_func(self, device) @skipIfMps def test_nondeterministic_alert_interpolate_bicubic(self, device): input = torch.randn(1, 2, 4, 4, device=device, requires_grad=True) res = torch.nn.functional.interpolate( input, size=12, mode='bicubic', align_corners=False) grad = torch.ones_like(res) @expectedAlertNondeterministic('upsample_bicubic2d_backward_out_cuda', ['cuda']) def backward_func(slf, device): res.backward(grad) backward_func(self, device) @skipIfMps def test_nondeterministic_alert_interpolate_trilinear(self, device): input = torch.randn(1, 2, 4, 4, 4, device=device, requires_grad=True) res = torch.nn.functional.interpolate( input, size=12, mode='trilinear', align_corners=False) grad = torch.ones_like(res) @expectedAlertNondeterministic('upsample_trilinear3d_backward_out_cuda', ['cuda']) def backward_func(slf, device): res.backward(grad) backward_func(self, device) @skipIfMps def test_nondeterministic_alert_ReflectionPad1d(self, device): module = torch.nn.ReflectionPad1d((1, 2)) input = torch.randn(2, 3, 8, device=device, requires_grad=True) res = module(input) grad = torch.ones_like(res) @expectedAlertNondeterministic('reflection_pad1d_backward_out_cuda', ['cuda']) def backward_func(slf, device): res.backward(grad) backward_func(self, device) def test_nondeterministic_alert_ReflectionPad2d(self, device): module = torch.nn.ReflectionPad2d((1, 2, 3, 4)) input = torch.randn(2, 3, 8, 8, device=device, requires_grad=True) res = module(input) grad = torch.ones_like(res) @expectedAlertNondeterministic('reflection_pad2d_backward_cuda', ['cuda']) def backward_func(slf, device): res.backward(grad) backward_func(self, device) @skipIfMps def test_nondeterministic_alert_ReflectionPad3d(self, device): module = torch.nn.ReflectionPad3d((1, 2, 3, 4, 5, 6)) input = torch.randn(2, 3, 8, 8, 8, device=device, requires_grad=True) res = module(input) grad = torch.ones_like(res) @expectedAlertNondeterministic('reflection_pad3d_backward_out_cuda', ['cuda']) def backward_func(slf, device): res.backward(grad) backward_func(self, device) @skipIfMps def test_nondeterministic_alert_ReplicationPad1d(self, device): module = torch.nn.ReplicationPad1d((1, 2)) input = torch.randn(2, 3, 4, device=device, requires_grad=True) res = module(input) grad = torch.ones_like(res) @expectedAlertNondeterministic('replication_pad1d_backward_cuda', ['cuda']) def backward_func(slf, device): res.backward(grad) backward_func(self, device) def test_nondeterministic_alert_ReplicationPad2d(self, device): module = torch.nn.ReplicationPad2d((1, 2, 3, 4)) input = torch.randn(2, 3, 4, 4, device=device, requires_grad=True) res = module(input) grad = torch.ones_like(res) @expectedAlertNondeterministic('replication_pad2d_backward_cuda', ['cuda']) def backward_func(slf, device): res.backward(grad) backward_func(self, device) @skipIfMps def test_nondeterministic_alert_ReplicationPad3d(self, device): module = torch.nn.ReplicationPad3d((1, 2, 3, 4, 5, 6)) input = torch.randn(2, 3, 4, 4, 4, device=device, requires_grad=True) res = module(input) grad = torch.ones_like(res) @expectedAlertNondeterministic('replication_pad3d_backward_cuda', ['cuda']) def backward_func(slf, device): res.backward(grad) backward_func(self, device) def test_nondeterministic_alert_NLLLoss(self, device): module = torch.nn.NLLLoss() input = torch.randn(2, 3, 5, 5, device=device) target = torch.rand(2, 5, 5, device=device).mul(3).floor().long() @expectedAlertNondeterministic('nll_loss2d_forward_out_cuda_template', ['cuda']) def forward_func(slf, device): module(input, target) forward_func(self, device) def test_nondeterministic_alert_CTCLoss(self, device): module = torch.nn.CTCLoss() input = torch.randn(50, 3, 15, device=device, requires_grad=True) target = torch.randint(0, 14, (3, 30), device=device) input_lengths = [50, 50, 50] target_lengths = [30, 25, 20] res = module(input, target, input_lengths, target_lengths) grad = torch.ones_like(res) @expectedAlertNondeterministic('ctc_loss_backward_gpu', ['cuda']) def backward_func(slf, device): res.backward(grad, retain_graph=True) backward_func(self, device) def test_nondeterministic_alert_EmbeddingBag_max(self, device): module = torch.nn.EmbeddingBag( 4, 3, None, 2., False, 'max', _weight=torch.randn(4, 3, device=device, requires_grad=True)) input = torch.randint(0, 3, (4, 3), device=device) res = module(input) grad = torch.ones_like(res) @expectedAlertNondeterministic('embedding_bag_backward_cuda_max', ['cuda']) def backward_func(slf, device): res.backward(grad) backward_func(self, device) def test_nondeterministic_alert_scatter_add(self, device): def test_func(op_call): input = torch.randn(5, 4, device=device) dim = 0 index = torch.tensor([[3]], device=device) src = torch.tensor([[1.0]], device=device) @expectedAlertNondeterministic('scatter_add_cuda_kernel', ['cuda']) def forward_func(slf, device): op_call(input, dim, index, src) forward_func(self, device) test_func(torch.Tensor.scatter_add_) test_func(torch.Tensor.scatter_add) test_func(torch.scatter_add) @expectedFailureMeta # expected a non-determinitic error, but it was not raised @onlyNativeDeviceTypes def test_nondeterministic_alert_put(self, device): def test_func(op_call): a = torch.randn(10, device=device) indices = torch.tensor([0, 0], device=device) values = torch.tensor([0., 1.], device=device) @expectedAlertNondeterministic('put_') def forward_func(slf, device): op_call(a, indices, values, accumulate=False) forward_func(self, device) test_func(torch.Tensor.put) test_func(torch.Tensor.put_) def test_nondeterministic_alert_put_accumulate(self, device): def test_func(op_call): a = torch.randn(10, device=device) indices = torch.tensor([0, 0], device=device) values = torch.tensor([0., 1.], device=device) @expectedAlertNondeterministic('put_', ['cuda']) def forward_func(slf, device): op_call(a, indices, values, accumulate=True) forward_func(self, device) test_func(torch.Tensor.put) test_func(torch.Tensor.put_) @skipIfMps def test_nondeterministic_alert_histc(self, device): def test_func(op_call): a = torch.tensor([], device=device) @expectedAlertNondeterministic('_histc_cuda', ['cuda']) def forward_func(slf, device): res = op_call(a, min=0, max=3) forward_func(self, device) test_func(torch.histc) test_func(torch.Tensor.histc) @skipIfMps def test_nondeterministic_alert_bincount(self, device): def test_func(op_call): a = torch.tensor([], device=device, dtype=torch.long) @expectedAlertNondeterministic('_bincount_cuda', ['cuda']) def forward_func(slf, device): res = op_call(a) forward_func(self, device) test_func(torch.bincount) test_func(torch.Tensor.bincount) # Ensures that kthvalue throws nondeterministic alerts in the correct cases @dtypes(torch.double) def test_nondeterministic_alert_kthvalue(self, device, dtype): @expectedAlertNondeterministic('kthvalue CUDA', ['cuda']) def test_func(slf, device, call_type): S = 10 k = 5 a = torch.randn(S, device=device) if call_type == 'function': torch.kthvalue(a, k) elif call_type == 'method': a.kthvalue(k) elif call_type == 'out': values = torch.empty_like(a) indices = torch.empty((), device=device, dtype=torch.long) torch.kthvalue(a, k, out=(values, indices)) else: self.fail(f"'{call_type}' is not a valid call type") test_func(self, device, 'function') test_func(self, device, 'method') test_func(self, device, 'out') @onlyNativeDeviceTypes def test_nondeterministic_alert_gather(self, device): def test_func(op_call): a = torch.randn(3, 3, device=device, requires_grad=True) dim = 0 index = torch.tensor([[0]], device=device) res = op_call(a, dim, index) grad = torch.ones_like(res) @expectedAlertNondeterministic('scatter_add_cuda_kernel', ['cuda']) def backward_func(slf, device): res.backward(grad) backward_func(self, device) test_func(torch.gather) test_func(torch.Tensor.gather) @skipIfMps def test_nondeterministic_alert_grid_sample_2d(self, device): input = torch.empty(1, 1, 2, 2, device=device, requires_grad=True) grid = torch.empty(1, 1, 1, 2, device=device) res = torch.nn.functional.grid_sample(input, grid, align_corners=False) grad = torch.ones_like(res) @expectedAlertNondeterministic('grid_sampler_2d_backward_cuda', ['cuda']) def backward_func(slf, device): res.backward(grad) backward_func(self, device) @skipIfMps def test_nondeterministic_alert_grid_sample_3d(self, device): input = torch.empty(1, 1, 2, 2, 2, device=device, requires_grad=True) grid = torch.empty(1, 1, 1, 2, 3, device=device) res = torch.nn.functional.grid_sample(input, grid, align_corners=False) grad = torch.ones_like(res) @expectedAlertNondeterministic('grid_sampler_3d_backward_cuda', ['cuda']) def backward_func(slf, device): res.backward(grad) backward_func(self, device) def test_invalid_shapes_grid_sampler(self, device): make_arg = partial( make_tensor, device=device, dtype=torch.float64, requires_grad=True) inputs = ( # input, grid ((5, 5, 5, 5, 5,), (1, 1, 1, 4, 4,)), # 3d ((5, 5, 5, 5,), (1, 1, 4, 4,)), # 2d ) interpolation_mode = 0 padding_mode = 0 align_corners = True err = "expected grid and input to have same batch size" for input, grid in inputs: input = make_arg(input) grid = make_arg(grid, low=-1, high=1) # Wrapper for the 2d, 3d, and cuDNN functions listed below. with self.assertRaisesRegex(RuntimeError, err): torch.grid_sampler( input, grid, interpolation_mode, padding_mode, align_corners) # Expects 2d input. with self.assertRaisesRegex(RuntimeError, err): torch.grid_sampler_2d( input, grid, interpolation_mode, padding_mode, align_corners) # Expects 3d input. with self.assertRaisesRegex(RuntimeError, err): torch.grid_sampler_3d( input, grid, interpolation_mode, padding_mode, align_corners) # Expects 2d input. with self.assertRaisesRegex(RuntimeError, err): torch._grid_sampler_2d_cpu_fallback( input, grid, interpolation_mode, padding_mode, align_corners) # Expects 2d input, on CUDA. # Doesn't work on CPU and ROCm. if device != 'cpu' and TEST_CUDNN and not TEST_WITH_ROCM: with self.assertRaisesRegex(RuntimeError, err): torch.cudnn_grid_sampler(input, grid) def test_dist(self, device): def run_test(x, y): for p in [0, 1, 2, 3, 4, inf, -inf]: dist_xy = torch.dist(x, y, p) dist_xy_norm = torch.norm(x - y, p) self.assertEqual(dist_xy, dist_xy_norm) run_test(torch.randn(5, device=device), torch.randn(5, device=device)) x = torch.zeros(3, device=device) y = torch.zeros(3, device=device) y[1] = 1. run_test(x, y) # Ensures that median throws nondeterministic alerts in the correct cases @dtypes(torch.double) def test_nondeterministic_alert_median(self, device, dtype): def test_func(slf, device, call_type): S = 10 a = torch.randn(S, device=device) if call_type == 'function': torch.median(a) elif call_type == 'function with indices': torch.median(a, 0) elif call_type == 'method': a.median() elif call_type == 'method with indices': a.median(0) elif call_type == 'out with indices': result = torch.empty_like(a) indices = torch.empty((), dtype=torch.long, device=device) torch.median(a, 0, out=(result, indices)) else: self.fail(f"'{call_type}' is not a valid call type") @expectedAlertNondeterministic('median CUDA with indices output', ['cuda']) def test_func_expect_error(slf, device, call_type): test_func(slf, device, call_type) test_func(self, device, 'function') test_func_expect_error(self, device, 'function with indices') test_func(self, device, 'method') test_func_expect_error(self, device, 'method with indices') test_func_expect_error(self, device, 'out with indices') # FIXME: move to test_scatter_gather_ops def _test_gather_backward_one_dim(self, device, deterministic: bool = False) -> None: with DeterministicGuard(deterministic): m = random.randint(2000, 3000) elems = random.randint(10 * m, 20 * m) dim = 0 src = torch.randn(m, device=device, requires_grad=True) idx = torch.randint(m, (elems,), device=device) res = torch.gather(src, dim, idx) weight = torch.rand_like(res, device=device) * 10 ** 6 res.backward(weight) grad = src.grad.detach().clone() if torch.device(device).type == 'cuda': for _ in range(2): src.grad.data.zero_() res = torch.gather(src, dim, idx) res.backward(weight) self.assertEqual(src.grad, grad, atol=0, rtol=0) else: expected = torch.zeros_like(src, device=device) for i in range(elems): expected[idx[i]] += weight[i] self.assertEqual(grad, expected, atol=0, rtol=0) # FIXME: move to test_scatter_gather_ops @onlyNativeDeviceTypes def test_gather_backward_deterministic_path(self, device) -> None: self._test_gather_backward_one_dim(device, True) # FIXME: move to test_scatter_gather_ops @onlyCPU def test_gather_backward_one_dim(self, device) -> None: self._test_gather_backward_one_dim(device, False) # FIXME: move to test_scatter_gather_ops @onlyNativeDeviceTypes def test_scatter_add_one_dim_deterministic(self, device) -> None: with DeterministicGuard(True): m = random.randint(20, 30) elems = random.randint(2000 * m, 3000 * m) dim = 0 src = torch.randn(elems, device=device) idx = torch.randint(m, (elems,), device=device) x = torch.zeros(m, device=device) res = x.scatter_add(dim, idx, src) expected = torch.zeros(m, device=device) for i in range(elems): expected[idx[i]] += src[i] self.assertEqual(res, expected, atol=0, rtol=0) # FIXME: move to test_scatter_gather_ops @onlyNativeDeviceTypes def test_scatter_zero_size_index(self, device) -> None: null_index = torch.zeros((0, 4), dtype=torch.int64) null_arr = torch.zeros((0, 4)) original = torch.arange(4, dtype=torch.float32) result = original.scatter(0, null_index, null_arr) self.assertEqual(result, original, atol=0, rtol=0) @onlyCUDA def test_sync_warning(self, device): def _sync_raises_helper(f, level): with CudaSyncGuard(level): if level == 1: with self.assertWarnsRegex(UserWarning, "called a synchronizing "): f() elif level == 2: with self.assertRaisesRegex(RuntimeError, "called a synchronizing "): f() def _no_sync_helper(f, level): with CudaSyncGuard(level): f() def _ind_put_fn(x, ind, val): x[ind] = val return x def _ind_get_fn(x, ind): return x[ind] def _cond_fn(x): if x: # taking boolean value of a tensor synchronizes return x else: return 2 * x # prepare inputs for subsequent ops size = 4 x = torch.rand(size, device=device) y = torch.rand((), device=device) ind = torch.randint(size, (3,), device=device) ind_cpu = ind.cpu() repeats = torch.full((1,), 2, device=device) mask = torch.randint(2, (size,), device=device, dtype=bool) expect_no_sync = (lambda: _ind_put_fn(x, mask, 1.), lambda: _ind_put_fn(x, ind, y), lambda: _ind_get_fn(x, ind), lambda: torch.nn.functional.one_hot(ind, num_classes=size), lambda: torch.randperm(20000, device=device), lambda: torch.repeat_interleave(x, 2, output_size=2 * size), lambda: torch.repeat_interleave(x, repeats, output_size=2 * size)) expect_sync = (lambda: _ind_put_fn(x, mask, y), lambda: _ind_put_fn(x, ind_cpu, y), lambda: _ind_get_fn(x, mask), lambda: _ind_get_fn(x, ind_cpu), lambda: x.nonzero(), lambda: _cond_fn(y), lambda: torch.nn.functional.one_hot(ind), lambda: torch.repeat_interleave(x, 2), lambda: torch.repeat_interleave(x, repeats)) for f, level in product(expect_no_sync, (1, 2)): _no_sync_helper(f, level) for f, level in product(expect_sync, (1, 2)): _sync_raises_helper(f, level) @dtypes(*floating_types_and(torch.half, torch.bfloat16)) @skipIfMps def test_log_normal(self, device, dtype): a = torch.tensor([10], dtype=dtype, device=device).log_normal_() self.assertEqual(a.dtype, dtype) self.assertEqual(a.size(), torch.Size([1])) @dtypes(*all_types_and(torch.half, torch.bfloat16)) @skipIfMps def test_geometric(self, device, dtype): a = torch.tensor([10], dtype=dtype, device=device).geometric_(0.5) self.assertEqual(a.dtype, dtype) self.assertEqual(a.size(), torch.Size([1])) @skipIfMps def test_repeat_interleave(self, device): y = torch.tensor([[1, 2], [3, 4]], device=device) # exercise single argument function signature temp = y.repeat_interleave(2) self.assertEqual(torch.Size([8]), temp.size()) for dtype in [torch.int, torch.long]: lengths = torch.tensor([1, 2], dtype=dtype, device=device) output_size = torch.sum(lengths) a = torch.repeat_interleave( y, lengths, dim=0, ) self.assertEqual(a.dtype, y.dtype) self.assertEqual(a.size(), torch.Size([3, 2])) a_with_output = torch.repeat_interleave( y, lengths, dim=0, output_size=output_size, ) self.assertEqual(a_with_output.dtype, y.dtype) self.assertEqual(a_with_output.size(), torch.Size([3, 2])) @dtypes(*floating_types()) @dtypesIfCPU(*floating_types_and(torch.bfloat16)) @dtypesIfCUDA(*floating_types_and(torch.half)) def test_bernoulli_p(self, device, dtype): for trivial_p in ([0, 1], [1, 0, 1, 1, 0, 1]): x = torch.tensor(trivial_p, dtype=dtype, device=device) self.assertEqual(x.bernoulli().tolist(), trivial_p) def isBinary(t): return torch.ne(t, 0).mul_(torch.ne(t, 1)).sum().item() == 0 p = torch.rand(5, 5, dtype=dtype, device=device) self.assertTrue(isBinary(p.bernoulli())) p = torch.rand(5, dtype=dtype, device=device).expand(5, 5) self.assertTrue(isBinary(p.bernoulli())) p = torch.rand(5, 5, dtype=dtype, device=device) torch.bernoulli(torch.rand_like(p), out=p) self.assertTrue(isBinary(p)) # RngUniform not implemented for Integral type in XLA test @dtypes(*floating_types()) @dtypesIfCPU(*all_types_and(torch.bool)) @dtypesIfCUDA(*all_types_and(torch.bool, torch.half)) def test_bernoulli_self(self, device, dtype): def isBinary(t): return torch.ne(t, 0).mul_(torch.ne(t, 1)).sum().item() == 0 t = torch.empty(10, 10, dtype=dtype, device=device) t.fill_(2) t.bernoulli_(0.5) self.assertTrue(isBinary(t)) for p_dtype in floating_types_and(*[torch.half] if device.startswith('cuda') else []): p = torch.rand(10, dtype=p_dtype, device=device).expand(10, 10) t.fill_(2) t.bernoulli_(p) self.assertTrue(isBinary(t)) t.fill_(2) torch.bernoulli(torch.rand_like(t, dtype=p_dtype), out=t) self.assertTrue(isBinary(t)) t.fill_(2) t.bernoulli_(torch.rand_like(t, dtype=p_dtype)) self.assertTrue(isBinary(t)) @slowTest @dtypes(*floating_types()) @dtypesIfCUDA(*floating_types_and(torch.half)) def test_bernoulli_edge_cases(self, device, dtype): # Need to draw a lot of samples to cover every random floating point number. a = torch.zeros(10000, 10000, dtype=dtype, device=device) # probability of drawing "1" is 0 num_ones = (torch.bernoulli(a) == 1).sum() self.assertEqual(num_ones, 0) b = torch.ones(10000, 10000, dtype=dtype, device=device) # probability of drawing "1" is 1 num_zeros = (torch.bernoulli(b) == 0).sum() self.assertEqual(num_zeros, 0) @dtypes(*floating_types_and(torch.half, torch.bfloat16)) @skipIfMps def test_exponential(self, device, dtype): a = torch.tensor([10], dtype=dtype, device=device).exponential_(0.5) self.assertEqual(a.dtype, dtype) self.assertEqual(a.size(), torch.Size([1])) # Tests extremal behavior tests = ((-0, float('inf')), (0, float('inf')), (float('inf'), 0)) for test in tests: t = torch.empty((1,), device=device, dtype=dtype).exponential_(test[0]) self.assertTrue(t.item() == test[1]) # Tests that negative lambda fails with self.assertRaises(RuntimeError): torch.empty((1,), device=device, dtype=dtype).exponential_(-0.5) @onlyCUDA @dtypes(torch.half, torch.float) def test_exponential_no_zero(self, device, dtype): # naively, 0 in exponential can be generated with probability 2^-24 # so we need more samples to check if it's not generated # instead of doing one # don't test CPU, that would be a long test x = torch.empty(50000000, device=device, dtype=dtype).exponential_() self.assertTrue(x.min() > 0) def _generate_correlation_tensors(self, device, dtype): yield make_tensor((0, 0), dtype=dtype, device=device) yield make_tensor((1, 0), dtype=dtype, device=device) yield make_tensor((0, 1), dtype=dtype, device=device) yield make_tensor((2,), dtype=dtype, device=device) yield make_tensor((2, 1), dtype=dtype, device=device) yield make_tensor((2, 2), dtype=dtype, device=device) yield make_tensor((2, 3), dtype=dtype, device=device) yield make_tensor((5, 10), dtype=dtype, device=device) yield make_tensor((5, 10), dtype=dtype, device=device, noncontiguous=True) if dtype != torch.int: yield torch.tensor([0, -2, nan, 10.2, inf], dtype=dtype, device=device) @onlyNativeDeviceTypes @dtypes(torch.int, torch.float, torch.cfloat) def test_corrcoef(self, device, dtype): for x in self._generate_correlation_tensors(device, dtype): res = torch.corrcoef(x) ref = np.corrcoef(x.cpu().numpy()) self.assertEqual(res, ref, exact_dtype=False) @dtypes(torch.int, torch.float, torch.cfloat) def test_cov(self, device, dtype): def check(t, correction=1, fweights=None, aweights=None): res = torch.cov(t, correction=correction, fweights=fweights, aweights=aweights) t = t.cpu().numpy() fweights = fweights.cpu().numpy() if fweights is not None else None aweights = aweights.cpu().numpy() if aweights is not None else None ref = np.cov(t, ddof=correction, fweights=fweights, aweights=aweights) self.assertEqual(res, ref, atol=1e-05, rtol=1e-05, exact_dtype=False) for x in self._generate_correlation_tensors(device, dtype): check(x) num_observations = x.numel() if x.ndim < 2 else x.size(1) if num_observations > 0: fweights = torch.randint(1, 10, (num_observations,), device=device) aweights = make_tensor((num_observations,), dtype=torch.float, device=device, low=1) for correction, fw, aw in product([0, 1, 2], [None, fweights], [None, aweights]): check(x, correction, fweights, aweights) @skipIfNoSciPy @dtypes(*floating_types_and(torch.half, torch.bfloat16)) def test_uniform_kstest(self, device, dtype): from scipy import stats size = 1000 for from_ in [-42, 0, 4.2]: for to_ in [-4.2, 0, 42]: if to_ > from_: t = torch.empty(size, dtype=dtype, device=device).uniform_(from_, to_) res = stats.kstest(t.cpu().to(torch.double), 'uniform', args=(from_, (to_ - from_))) self.assertTrue(res.statistic < 0.1) @skipIfNoSciPy @dtypes(*floating_types_and(torch.half)) @dtypesIfCUDA(*floating_types_and(torch.half, torch.bfloat16)) def test_normal_kstest(self, device, dtype): from scipy import stats size = 1000 for mean in [-10, 0, 50]: for std in [1, 5, 10]: t = torch.empty(size, dtype=dtype, device=device).normal_(mean=mean, std=std) res = stats.kstest(t.cpu().to(torch.double), 'norm', args=(mean, std)) self.assertTrue(res.statistic < 0.1) @skipIfMps @skipIfNoSciPy @dtypes(*floating_types_and(torch.half, torch.bfloat16)) def test_lognormal_kstest(self, device, dtype): from scipy import stats size = 1000 for mean in [-3, 0, 7]: for std in [1, 5, 7]: t = torch.empty(size, dtype=dtype, device=device).log_normal_(mean=mean, std=std) res = stats.kstest(t.cpu().to(torch.double), 'lognorm', args=(std, 0, math.exp(mean))) if dtype == torch.half: self.assertTrue(res.statistic < 0.3) else: self.assertTrue(res.statistic < 0.1) @skipIfMps @skipIfNoSciPy @dtypes(*floating_types_and(torch.half, torch.bfloat16)) def test_exponential_kstest(self, device, dtype): from scipy import stats size = 1000 for lambd in [0.5, 1.0, 5.0]: t = torch.empty(size, dtype=dtype, device=device).exponential_(lambd=lambd) res = stats.kstest(t.cpu().to(torch.double), 'expon', args=(0, 1 / lambd,)) self.assertTrue(res.statistic < 0.1) @skipIfMps @skipIfNoSciPy @dtypes(*floating_types_and(torch.half, torch.bfloat16)) def test_cauchy_kstest(self, device, dtype): from scipy import stats size = 1000 for median in [-10, 0, 50]: for sigma in [0.5, 1.0, 10.0]: t = torch.empty(size, dtype=dtype, device=device).cauchy_(median=median, sigma=sigma) res = stats.kstest(t.cpu().to(torch.double), 'cauchy', args=(median, sigma)) self.assertTrue(res.statistic < 0.1) @slowTest @onlyCUDA @dtypes(torch.bfloat16, torch.float32) def test_cauchy_no_inf(self, device, dtype): # torch.float16 will have `inf` because of its smaller range. for _ in range((2**16) * 2): x = torch.empty((2**16), dtype=dtype, device=device) x.cauchy_() self.assertFalse(x.isinf().sum()) @skipIfMps @skipIfNoSciPy @dtypes(*all_types_and(torch.half, torch.bfloat16)) def test_geometric_kstest(self, device, dtype): from scipy import stats size = 1000 for p in [0.2, 0.5, 0.8]: t = torch.empty(size, dtype=dtype, device=device).geometric_(p=p) actual = np.histogram(t.cpu().to(torch.double), np.arange(1, 100))[0] expected = stats.geom(p).pmf(np.arange(1, 99)) * size res = stats.chisquare(actual, expected) self.assertEqual(res.pvalue, 1.0, atol=0.1, rtol=0) # FIXME: find test suite for pdist and cdist def test_pairwise_distance_empty(self, device): shape = (2, 0) x = torch.randn(shape, device=device) y = torch.randn(shape, device=device) self.assertEqual(torch.zeros(2, device=device), torch.pairwise_distance(x, y)) self.assertEqual(torch.zeros((2, 1), device=device), torch.pairwise_distance(x, y, keepdim=True)) shape = (0, 2) x = torch.randn(shape, device=device) y = torch.randn(shape, device=device) self.assertEqual(torch.zeros(0, device=device), torch.pairwise_distance(x, y)) self.assertEqual(torch.zeros((0, 1), device=device), torch.pairwise_distance(x, y, keepdim=True)) def test_pdist_empty(self, device): shape = (0, 2) x = torch.randn(shape, device=device) self.assertEqual(torch.empty(0, device=device), torch.pdist(x)) shape = (1, 2) x = torch.randn(shape, device=device) self.assertEqual(torch.empty(0, device=device), torch.pdist(x)) shape = (3, 0) x = torch.randn(shape, device=device) self.assertEqual(torch.zeros(3, device=device), torch.pdist(x)) def test_cdist_empty(self, device): x = torch.randn((0, 5), device=device) y = torch.randn((4, 5), device=device) self.assertEqual(torch.empty(0, 4, device=device), torch.cdist(x, y)) x = torch.randn((2, 5), device=device) y = torch.randn((0, 5), device=device) self.assertEqual(torch.empty(2, 0, device=device), torch.cdist(x, y)) x = torch.randn((2, 0), device=device) y = torch.randn((3, 0), device=device) self.assertEqual(torch.zeros(2, 3, device=device), torch.cdist(x, y)) x = torch.randn((2, 0), device=device) y = torch.randn((0, 0), device=device) self.assertEqual(torch.empty(2, 0, device=device), torch.cdist(x, y)) def _brute_cdist(self, x, y, p=2): r1 = x.shape[-2] r2 = y.shape[-2] if r1 == 0 or r2 == 0: return torch.empty(r1, r2, device=x.device) return torch.norm(x[..., None, :] - y[..., None, :, :], p=p, dim=-1) @skipIfMps def test_cdist_norm(self, device): for r1 in [3, 4, 5, 6]: for m in [2, 3, 4, 10]: for r2 in [4, 6, 7, 8]: for p in [0, 1, 2, 3, 1.5, 2.5, float('inf')]: x = torch.randn(r1, m, device=device) y = torch.randn(r2, m, device=device) if p == 2: for cm in ['use_mm_for_euclid_dist', 'donot_use_mm_for_euclid_dist']: actual = torch.cdist(x, y, p=2, compute_mode=cm) expected = self._brute_cdist(x, y, p=2) self.assertEqual(expected, actual, rtol=0, atol=0.02) else: actual = torch.cdist(x, y, p=p) expected = self._brute_cdist(x, y, p=p) self.assertEqual(expected, actual) @skipIfMps def test_cdist_norm_batch(self, device): for r1 in [3, 4, 5, 6]: for m in [2, 3, 4, 10]: for r2 in [4, 6, 7, 8]: for p in [0, 1, 2, 3, 1.5, 2.5, float('inf')]: x = torch.randn(2, 3, 6, r1, m, device=device) y = torch.randn(2, 3, 6, r2, m, device=device) if p == 2: for cm in ['use_mm_for_euclid_dist', 'donot_use_mm_for_euclid_dist']: actual = torch.cdist(x, y, p=2, compute_mode=cm) expected = self._brute_cdist(x, y, p=2) self.assertEqual(expected, actual, rtol=0, atol=0.02) else: actual = torch.cdist(x, y, p=p) expected = self._brute_cdist(x, y, p=p) self.assertEqual(expected, actual) @onlyCUDA def test_cdist_cuda_backward(self, device): for l1 in [1, 511, 513]: for l2 in [1, 511, 513]: for p in [0, 1, 2, 3, 1.5, 2.5, float('inf')]: x1 = torch.randn(4, l1, 32, device=device, requires_grad=True) x2 = x1.clone().detach_().requires_grad_() y1 = torch.randn(4, l2, 32, device=device, requires_grad=True) y2 = y1.clone().detach_().requires_grad_() if p == 2: for cm in ['use_mm_for_euclid_dist', 'donot_use_mm_for_euclid_dist']: z1 = torch.cdist(x1, y1, p=2, compute_mode=cm).mean() z2 = self._brute_cdist(x2, y2, p=2).mean() z1.backward() z2.backward() self.assertEqual(x1.grad, x2.grad, rtol=0, atol=0.001) self.assertEqual(y1.grad, y2.grad, rtol=0, atol=0.001) else: z1 = torch.cdist(x1, y1, p=p).mean() z2 = self._brute_cdist(x2, y2, p=p).mean() self.assertEqual(x1.grad, x2.grad, rtol=0, atol=0.001) self.assertEqual(y1.grad, y2.grad, rtol=0, atol=0.001) @tf32_on_and_off(0.005) def test_cdist_large(self, device): for cm in ['use_mm_for_euclid_dist_if_necessary', 'use_mm_for_euclid_dist', 'donot_use_mm_for_euclid_dist']: x = torch.randn(1000, 10, device=device) y = torch.randn(1000, 10, device=device) actual = torch.cdist(x, y, p=2, compute_mode=cm) expected = self._brute_cdist(x, y, p=2) self.assertEqual(expected, actual) @slowTest @tf32_on_and_off(0.01) def test_cdist_large_batch(self, device): for cm in ['use_mm_for_euclid_dist_if_necessary', 'use_mm_for_euclid_dist', 'donot_use_mm_for_euclid_dist']: x = torch.randn(4, 3, 1000, 10, device=device) y = torch.randn(4, 3, 1000, 10, device=device) actual = torch.cdist(x, y, p=2, compute_mode=cm) expected = self._brute_cdist(x, y, p=2) self.assertEqual(expected, actual) @tf32_on_and_off(0.005) def test_cdist_non_contiguous(self, device): for cm in ['use_mm_for_euclid_dist', 'donot_use_mm_for_euclid_dist']: x = torch.randn(5, 7, device=device).mT y = torch.randn(5, 3, device=device).mT actual = torch.cdist(x, y, p=2, compute_mode=cm) expected = self._brute_cdist(x, y, p=2) self.assertFalse(x.is_contiguous()) self.assertFalse(y.is_contiguous()) self.assertEqual(expected, actual) x = torch.randn(7, 5, device=device) y = torch.randn(5, 3, device=device).t() actual = torch.cdist(x, y, p=2, compute_mode=cm) expected = self._brute_cdist(x, y, p=2) self.assertTrue(x.is_contiguous()) self.assertFalse(y.is_contiguous()) self.assertEqual(expected, actual) x = torch.randn(5, 7, device=device).t() y = torch.randn(3, 5, device=device) actual = torch.cdist(x, y, p=2, compute_mode=cm) expected = self._brute_cdist(x, y, p=2) self.assertFalse(x.is_contiguous()) self.assertTrue(y.is_contiguous()) self.assertEqual(expected, actual) @tf32_on_and_off() def test_cdist_non_contiguous_batch(self, device): for cm in ['use_mm_for_euclid_dist', 'donot_use_mm_for_euclid_dist']: x = torch.randn(4, 3, 2, 5, 7, device=device).mT y = torch.randn(4, 3, 2, 5, 3, device=device).mT actual = torch.cdist(x, y, p=2, compute_mode=cm) expected = self._brute_cdist(x, y, p=2) self.assertFalse(x.is_contiguous()) self.assertFalse(y.is_contiguous()) self.assertEqual(expected, actual) x = torch.randn(7, 2, 7, 5, device=device) y = torch.randn(7, 2, 5, 3, device=device).mT actual = torch.cdist(x, y, p=2, compute_mode=cm) expected = self._brute_cdist(x, y, p=2) self.assertTrue(x.is_contiguous()) self.assertFalse(y.is_contiguous()) self.assertEqual(expected, actual) x = torch.randn(4, 5, 7, device=device).mT y = torch.randn(4, 3, 5, device=device) actual = torch.cdist(x, y, p=2, compute_mode=cm) expected = self._brute_cdist(x, y, p=2) self.assertFalse(x.is_contiguous()) self.assertTrue(y.is_contiguous()) self.assertEqual(expected, actual) # Maybe merge into OpInfo? def test_cdist_euclidean_large(self, device): def _test_euclidean_large_cdist(sizex, sizey=None): if sizey is None: sizey = sizex x = torch.randn(sizex, device=device, dtype=torch.float) y = torch.randn(sizey, device=device, dtype=torch.float) eps = 1e-6 # to avoid extremum x = x - (((x - y) < eps).float() * 2 * eps) x.requires_grad = True y.requires_grad = True dist = torch.cdist(x, y, p=2) # Do a backward pass to check that it is valid for large # matrices loss = dist.sum() loss.backward() _test_euclidean_large_cdist((2000, 5)) # Ensure that cdist backward with p<1 does not produce NaNs @skipIfMps def test_cdist_grad_p_lt_1_no_nan(self, device): for p in [0.99, 0.7, 0.5, 0.1, 0.01]: x = torch.randn(1, 2, device=device) y = x.clone().detach() + torch.tensor([[1., 0.]], device=device) x.requires_grad = True y.requires_grad = True result = torch.cdist(x, y, p=p) result.backward(torch.ones_like(result)) self.assertFalse(torch.isnan(x.grad).any()) self.assertFalse(torch.isnan(y.grad).any()) def test_cdist_same_inputs(self, device): # Test to detect issues in cdist gradient calculation # When the distances are 0 sizex = (1, 27, 32) for p in [0, 1, 2, 3, 1.5, 2.5, float('inf')]: x = torch.randn(sizex, device=device, dtype=torch.float) dist_grad = torch.randn((1, 27, 27), device=device, dtype=torch.float) y = x.clone() eps = 1e-6 x.requires_grad = True d = torch.cdist(x, y) d.backward(dist_grad) # Check that the backward passs does not contain invalid # values such as nan or inf assert torch.isfinite(x.grad).all() @skipIfMps def test_cumsum(self, device): x = torch.rand(100, 100, device=device) res1 = torch.cumsum(x, 1) res2 = torch.tensor([]).to(device) torch.cumsum(x, 1, out=res2) self.assertEqual(res1, res2) x.cumsum_(1) self.assertEqual(res1, x) a = torch.tensor([[True, False, True], [False, False, False], [True, True, True]], device=device) b = a.byte() aRes = torch.cumsum(a, 0) bRes = torch.cumsum(b, 0) self.assertEqual(aRes, bRes) self.assertEqual(aRes, torch.tensor([[1, 0, 1], [1, 0, 1], [2, 1, 2]])) aRes = torch.cumsum(a, 1) bRes = torch.cumsum(b, 1) self.assertEqual(aRes, bRes) self.assertEqual(aRes, torch.tensor([[1, 1, 2], [0, 0, 0], [1, 2, 3]])) # Check that cummulative sum over a zero length dimension doesn't crash on backprop. # Also check that cumsum over other dimensions in a tensor with a zero-length # dimensiuon also works # Also include a basic suite of similar tests for other bases cases. shapes = [[2, 0], [2, 1, 4], [0, 2, 3], [1], [5]] for shape in shapes: for dim in range(len(shape)): raw_tensor = torch.zeros(*shape, requires_grad=True) integrated = raw_tensor.cumsum(dim=dim) # Check that backward does not crash integrated.sum().backward() # Check that output maintained correct shape self.assertEqual(raw_tensor.shape, raw_tensor.grad.shape) # Check a scalar example raw_tensor = torch.tensor(3., requires_grad=True) integrated = raw_tensor.cumsum(dim=-1) self.assertEqual(raw_tensor, integrated) # Check that backward does not crash integrated.sum().backward() # Check that output maintained correct shape self.assertEqual(raw_tensor.shape, raw_tensor.grad.shape) @skipIfMps def test_cumprod(self, device): x = torch.rand(100, 100, device=device) res1 = torch.cumprod(x, 1) res2 = torch.tensor([]).to(device) torch.cumprod(x, 1, out=res2) self.assertEqual(res1, res2) x.cumprod_(1) self.assertEqual(res1, x) a = torch.tensor([[True, False, True], [False, False, False], [True, True, True]], dtype=torch.bool, device=device) b = a.byte() aRes = torch.cumprod(a, 0) bRes = torch.cumprod(b, 0) self.assertEqual(aRes, bRes) self.assertEqual(aRes, torch.tensor([[1, 0, 1], [0, 0, 0], [0, 0, 0]])) aRes = torch.cumprod(a, 1) bRes = torch.cumprod(b, 1) self.assertEqual(aRes, bRes) self.assertEqual(aRes, torch.tensor([[1, 0, 0], [0, 0, 0], [1, 1, 1]])) # Check that cummulative prod over a zero length dimension doesn't crash on backprop. # Also check that cumprod over other dimensions in a tensor with a zero-length # dimensiuon also works # Also include a basic suite of similar tests for other bases cases. shapes = [[2, 0], [2, 1, 4], [0, 2, 3], [1], [5]] for shape in shapes: for dim in range(len(shape)): raw_tensor = torch.zeros(*shape, requires_grad=True) integrated = raw_tensor.cumprod(dim=dim) # Check that backward does not crash integrated.sum().backward() # Check that output maintained correct shape self.assertEqual(raw_tensor.shape, raw_tensor.grad.shape) # Check a scalar example raw_tensor = torch.tensor(3., requires_grad=True) integrated = raw_tensor.cumprod(dim=-1) self.assertEqual(raw_tensor, integrated) # Check that backward does not crash integrated.sum().backward() # Check that output maintained correct shape self.assertEqual(raw_tensor.shape, raw_tensor.grad.shape) @skipIfMps def test_cummax_cummin(self, device): def test_ops(op, string_of_function_name, expected_output1, expected_output2): x = torch.rand(100, 100, device=device) out1 = op(x, 1) res2 = torch.empty(0, device=device) indices2 = torch.empty(0, dtype=torch.int64, device=device) op(x, 1, out=(res2, indices2)) self.assertEqual(out1[0], res2) self.assertEqual(out1[1], indices2) a = torch.tensor([[True, False, True], [False, False, False], [True, True, True]], dtype=torch.bool, device=device) b = a.byte() aRes = op(a, 0) bRes = op(b, 0) self.assertEqual(aRes[0], bRes[0].bool()) self.assertEqual(aRes[0], expected_output1.bool()) # test inf and nan input x = torch.tensor([4, inf, 1.5, -inf, 0, nan, 1]) xRes = op(x, 0)[0] self.assertEqual(xRes, expected_output2) # op shouldn't support values, indices with a dtype, device type or layout # different from that of input tensor t = torch.randn(10) values = torch.empty(0, dtype=torch.int16) indices = torch.empty(0, dtype=torch.int64) with self.assertRaisesRegex( RuntimeError, 'expected scalar_type Float but found Short'): op(t, 0, out=(values, indices)) # Check that op over a zero length dimension doesn't crash on backprop. # Also check that op over other dimensions in a tensor with a zero-length # dimension also works # Also include a basic suite of similar tests for other bases cases. shapes = [[2, 0], [2, 1, 4], [0, 2, 3], [1], [5]] for shape in shapes: for dim in range(len(shape)): raw_tensor = torch.zeros(*shape, requires_grad=True) integrated = getattr(raw_tensor, string_of_function_name)(dim=dim) # Check that backward does not crash integrated[0].sum().backward() # Check that output maintained correct shape self.assertEqual(raw_tensor.shape, raw_tensor.grad.shape) # Check a scalar example raw_tensor = torch.tensor(3., requires_grad=True) integrated = getattr(raw_tensor, string_of_function_name)(dim=-1) # Check that backward does not crash integrated[0].sum().backward() # Check that output maintained correct shape self.assertEqual(raw_tensor.shape, raw_tensor.grad.shape) expected_out = torch.tensor([4, inf, inf, inf, inf, nan, nan]) test_ops(torch.cummax, "cummax", torch.tensor([[1, 0, 1], [1, 0, 1], [1, 1, 1]]), expected_out) expected_out = torch.tensor([4, 4, 1.5, -inf, -inf, nan, nan]) test_ops(torch.cummin, "cummin", torch.tensor([[1, 0, 1], [0, 0, 0], [0, 0, 0]]), expected_out) @skipIfMps def test_logcumsumexp(self, device): def logcumsumexp(a, axis): return torch.cumsum(a.exp(), axis=axis).log_() axis = -1 a = torch.randn(100, 100, device=device) actual = a.logcumsumexp(axis) expected = logcumsumexp(a, axis) self.assertEqual(a.dtype, actual.dtype) self.assertEqual(expected.shape, actual.shape) self.assertEqual(expected, actual) # check -inf and nan handling x = torch.tensor([-float('inf'), -float('inf'), 1.0, 1.0, float('inf'), float('inf'), float('nan'), 1.0, 1.0], device=device) x2d = x.unsqueeze(0).expand(2, -1) for inp in (x, x2d): actual = inp.logcumsumexp(axis) expected = logcumsumexp(inp, axis) self.assertEqual(expected, actual) # Check that out is actually inplace b = torch.randn(5, 2, device=device) inplace_out = torch.zeros(5, 2, device=device) expected = logcumsumexp(b, axis) torch.logcumsumexp(b, axis=axis, out=inplace_out) self.assertEqual(inplace_out, expected) # Check input and inplace_output type mismatch b = torch.randn(5, 2, device=device, dtype=torch.float64) inplace_out = torch.zeros(5, 2, device=device, dtype=torch.float32) with self.assertRaisesRegex( RuntimeError, 'expected scalar_type Double but found Float'): torch.logcumsumexp(b, axis, out=inplace_out) def _test_diff_numpy(self, t, dims=None): # Helper for test_diff to compare with NumPy reference implementation def to_np(t): if t.dtype == torch.bfloat16: return t.to(dtype=torch.float, device="cpu").numpy() else: return t.cpu().numpy() for dim in dims if dims else range(t.dim()): prepend = t.narrow(dim, 0, 1) append = t.narrow(dim, 0, 1) np_t = to_np(t) # test when no prepend and append for n in range(t.size(dim)): actual = torch.diff(t, dim=dim, n=n) expected = torch.from_numpy(np.diff(np_t, axis=dim, n=n)) self.assertEqual(actual, expected.to(t.dtype)) # test when prepend and append's size along dim is 1 for n in range(1, t.size(dim) + 4): actual = torch.diff(t, dim=dim, n=n, prepend=prepend, append=append) expected = torch.from_numpy(np.diff(np_t, axis=dim, n=n, prepend=to_np(prepend), append=to_np(append))) self.assertEqual(actual, expected.to(t.dtype)) # test when prepend and append's size along dim != 1 for n in range(1, t.size(dim) * 3): actual = torch.diff(t, dim=dim, n=n, prepend=t, append=t) expected = torch.from_numpy(np.diff(np_t, axis=dim, n=n, prepend=np_t, append=np_t)) self.assertEqual(actual, expected.to(t.dtype)) # All tensors appear contiguous on XLA @onlyNativeDeviceTypes @dtypes(*all_types_and_complex_and(torch.half, torch.bool)) def test_diff_noncontig(self, device, dtype): shapes = ( (1,), (1, 5), (3, 5), (1, 5, 1), (2, 3, 5)) for shape in shapes: contig = make_tensor(shape, dtype=dtype, device=device, low=-9, high=9) non_contig = torch.empty(shape + (2, 2), device=device, dtype=dtype)[..., 0] non_contig = non_contig.select(-1, -1) non_contig.copy_(contig) self.assertTrue(not non_contig.is_contiguous() or shape == (1,)) self._test_diff_numpy(non_contig) # RngNormal not implemented for type f16 for XLA @dtypes(*all_types_and_complex_and(torch.bool)) @dtypesIfCPU(*all_types_and_complex_and(torch.half, torch.bool)) @dtypesIfCUDA(*all_types_and_complex_and(torch.half, torch.bool)) def test_diff(self, device, dtype): shapes = ( (1,), (1, 5), (3, 5), (1, 5, 1), (2, 3, 5)) for shape in shapes: contig = make_tensor(shape, dtype=dtype, device=device, low=-9, high=9) self._test_diff_numpy(contig) t = torch.ones(2, 3) with self.assertRaisesRegex( RuntimeError, 'diff expects prepend or append to be the same dimension as input'): invalid_prepend = torch.tensor([1, 2, 3], device=device, dtype=dtype) t.diff(dim=0, prepend=invalid_prepend) with self.assertRaisesRegex( RuntimeError, 'diff expects the shape of tensor to prepend or append to match that of input'): invalid_prepend = torch.tensor([[0, 1]], device=device, dtype=dtype) t.diff(dim=0, prepend=invalid_prepend) with self.assertRaisesRegex( RuntimeError, 'diff expects input to be at least one-dimensional'): scalar = torch.tensor(2, device=device, dtype=dtype) torch.diff(scalar) # if the given input arg is not a list, it returns a list of single element: [arg] def _wrap_to_list(self, input_array): return input_array if isinstance(input_array, list) else [input_array] # To ensure inf, -inf, and nan values do not cause divergence between Numpy and PyTorch. # There are two types of possible divergence: # 1. When we compute a,b both real numbers and has very small absolute values (i.e. very near to 0.0) # then, result of a/b be inf, -inf and nan, and this cause divergence. # 2. When we are dividing complex numbers by zero. For example, when a = torch.tensor(3+5j) we have # a/0 to be equal to nan + nan*j in PyTorch and inf + inf*j in Numpy. def _inf_nan_preprocess(self, actual, expected): for i in range(len(expected)): expected[i] = np.nan_to_num(expected[i], nan=nan, posinf=nan, neginf=nan) # nan_to_num is not defined for complex tensors in PyTorch. if actual[i].dtype == torch.complex64 : actual[i].real = torch.nan_to_num(actual[i].real, nan=nan, posinf=nan, neginf=nan) actual[i].imag = torch.nan_to_num(actual[i].imag, nan=nan, posinf=nan, neginf=nan) else: actual[i] = torch.nan_to_num(actual[i], nan=nan, posinf=nan, neginf=nan) return actual, expected @onlyNativeDeviceTypes @dtypes(torch.long, torch.float32, torch.complex64) def test_gradient_all(self, device, dtype): def create_scalar(shape): return make_tensor((1,), device='cpu', dtype=dtype, low=1.).item() def create_list(shape): return make_tensor((len(shape),), device='cpu', dtype=dtype, low=1.).tolist() def create_coordinate_tensors(shape): tensor_list = [] for i in range(len(shape)): tensor_list.append(make_tensor((shape[i],), device=device, dtype=dtype)) return tensor_list def filter_shape(shape, dim): filtered_shape = [] for i in range(len(dim)): filtered_shape.append(shape[dim[i]]) return filtered_shape # shape, dims format test_cases = ( ((5,), (0,)), ((4, 4), (0, 1)), ((3, 3, 3), (-1, 0)), ((4, 4, 4), (2,)), ((4, 4, 4), (0, 1)), ((4, 4, 4, 3), (0, 2, 3)), ((4, 5, 3, 4, 3), (1, 2)), ((4, 3, 6, 5, 3), (2, 4)), ((4, 3, 3, 5, 3), (0, 1, 2, 3, 4)), ((1, 3, 3), (1, 2)), ((1, 5), (1,)), ) for case, contig, edge_order, space_fn in product(test_cases, [True, False], [1, 2], (create_scalar, create_list, create_coordinate_tensors)): shape, dims = case # filter shape by dims before passing filtered shape to create_* functions filtered_shape = filter_shape(shape, dims) spacing = space_fn(filtered_shape) t = make_tensor(shape, device=device, dtype=dtype, noncontiguous=not contig) t_np = t.cpu().numpy() actual = torch.gradient(t, spacing=spacing, dim=dims, edge_order=edge_order) if space_fn == create_coordinate_tensors and spacing[0].device != 'cpu': spacing = [space.cpu().detach().numpy() for space in spacing] expected = np.gradient(t_np, *self._wrap_to_list(spacing), axis=dims, edge_order=edge_order) actual, expected = self._inf_nan_preprocess(list(actual), self._wrap_to_list(expected)) self.assertEqual(actual, expected, equal_nan=True, atol=1e-4, rtol=0, exact_dtype=False) @onlyNativeDeviceTypes @dtypes(torch.long, torch.float32, torch.complex64) def test_gradient_extreme_cases(self, device, dtype): # Test behaviour for inf and nan values actual = torch.gradient(torch.tensor([2, -2, inf, inf, -inf, -inf, inf, 3, -inf, 2, nan, nan, 3, inf, nan])) expected = np.gradient(np.array([2, -2, inf, inf, -inf, -inf, inf, 3, -inf, 2, nan, nan, 3, inf, nan])) self.assertEqual(actual, self._wrap_to_list(expected), exact_dtype=False) # Test behaviour in very big tensors large_size = 100000 t = make_tensor((large_size,), dtype=dtype, device=device) t_np = t.cpu().numpy() coordinates_np = list(np.random.randn(large_size)) coordinates = [torch.tensor(coordinates_np, device=device)] actual = torch.gradient(t, spacing=coordinates, dim=0, edge_order=1) expected = [np.gradient(t_np, coordinates_np, axis=0, edge_order=1)] self.assertEqual(actual, expected, exact_dtype=False) actual = torch.gradient(t, spacing=coordinates, dim=0, edge_order=2) expected = [np.gradient(t_np, coordinates_np, axis=0, edge_order=2)] self.assertEqual(actual, expected, exact_dtype=False) @onlyNativeDeviceTypes def test_gradient_type_promotion(self, device): inputs = ( make_tensor((4, 4), device=device, dtype=torch.float32), make_tensor((4, 4), device=device, dtype=torch.complex64), make_tensor((4, 4), device=device, dtype=torch.int64), ) spacing = ( make_tensor((1,), device='cpu', dtype=torch.float32).item(), make_tensor((1,), device='cpu', dtype=torch.int64).item(), make_tensor((1,), device='cpu', dtype=torch.complex64).item(), make_tensor((2,), device='cpu', dtype=torch.float32, low=0.1).tolist(), make_tensor((2,), device='cpu', dtype=torch.int64, low=1).tolist(), make_tensor((2,), device='cpu', dtype=torch.complex64).tolist(), [make_tensor((4,), device=device, dtype=torch.float32), make_tensor((4,), device=device, dtype=torch.float32)], [make_tensor((4,), device=device, dtype=torch.int64), make_tensor((4,), device=device, dtype=torch.int64)], [make_tensor((4,), device=device, dtype=torch.complex64), make_tensor((4,), device=device, dtype=torch.complex64)], ) for input, spacing_or_coord, edge_order in product(inputs, spacing, [1, 2]): input_np = input.cpu().numpy() input_np = input.cpu().numpy() actual = torch.gradient(input, spacing=spacing_or_coord, dim=(0, 1), edge_order=edge_order) spacing_or_coord_wrapped = self._wrap_to_list(spacing_or_coord) spacing_or_coord_np = [] if torch.is_tensor(spacing_or_coord_wrapped[0]) and torch.device(spacing_or_coord_wrapped[0].device).type != 'cpu': for i in range(len(spacing_or_coord_wrapped)): spacing_or_coord_np.append(spacing_or_coord_wrapped[i].detach().clone().cpu().numpy()) else: spacing_or_coord_np = spacing_or_coord_wrapped expected = np.gradient(input_np, *spacing_or_coord_np, axis=(0, 1), edge_order=edge_order) if actual[0].dtype == torch.complex64 and input.dtype != torch.complex64: for i in range(len(actual)): self.assertEqual(actual[i].real, expected[i].real, exact_dtype=False) # Type promotion fails on Numpy when spacing is given as complex number and input is given as real. # Result is given just as real number and all the imaginary parts to be equal to zero. self.assertEqual(expected[i].imag, torch.zeros(actual[i].shape), exact_dtype=False) else: actual, expected = self._inf_nan_preprocess(list(actual), expected) self.assertEqual(actual, expected, equal_nan=True, exact_dtype=False) def _test_large_cum_fn_helper(self, x, fn): x_cpu = x.cpu().float() expected = fn(x_cpu) actual = fn(x).cpu().float() self.assertEqual(expected, actual.cpu().float()) @unittest.skipIf(IS_FBCODE and IS_REMOTE_GPU, "sandcastle OOM with current tpx gpu/re configuration") @onlyCUDA @dtypes(torch.half) # only small dtype not to get oom def test_large_cumsum(self, device, dtype): # initialization to avoid overflow and half caveats x = torch.empty(2**30 + 200, device=device, dtype=dtype) x[::3] = -3 x[1::3] = 2 x[2::3] = 1 self._test_large_cum_fn_helper(x, lambda x: torch.cumsum(x, 0)) @onlyCUDA @dtypes(torch.half) # only small dtype not to get oom def test_large_cumprod(self, device, dtype): # initialization to avoid overflow and half caveats x = torch.empty(2**30 + 200, device=device, dtype=dtype) x[::3] = 8 x[1::3] = .25 x[2::3] = .5 self._test_large_cum_fn_helper(x, lambda x: torch.cumprod(x, 0)) @skipIfMps def test_discontiguous_out_cumsum(self, device): x = torch.randn(4, 8, device=device) y = torch.empty(4, 16, device=device)[:, ::2] out = torch.cumsum(x, 0) torch.cumsum(x, 0, out=y) self.assertFalse(y.is_contiguous()) self.assertEqual(out, y, atol=0., rtol=0.) def _test_cumminmax_helper(self, x, fn, expected_val, expected_ind): val, ind = fn(x, -1) self.assertEqual(val, expected_val, atol=0, rtol=0) self.assertEqual(ind, expected_ind, atol=0, rtol=0) out_val = torch.empty_like(val).t().contiguous().t() out_ind = torch.empty_like(ind).t().contiguous().t() fn(x, -1, out=(out_val, out_ind)) self.assertFalse(out_val.is_contiguous()) self.assertFalse(out_ind.is_contiguous()) self.assertEqual(out_val, expected_val, atol=0, rtol=0) self.assertEqual(out_ind, expected_ind, atol=0, rtol=0) @skipIfMps def test_cummax_discontiguous(self, device): x = torch.tensor([[0, 1, 2, 3, 2, 1], [4, 5, 6, 5, 6, 7]], device=device, dtype=torch.float).t().contiguous().t() expected_val = torch.tensor([[0, 1, 2, 3, 3, 3], [4, 5, 6, 6, 6, 7]], device=device, dtype=torch.float) expected_ind = torch.tensor([[0, 1, 2, 3, 3, 3], [0, 1, 2, 2, 4, 5]], device=device, dtype=torch.long) self._test_cumminmax_helper(x, torch.cummax, expected_val, expected_ind) @skipIfMps def test_cummin_discontiguous(self, device): x = torch.tensor([[3, 2, 1, 0, 1, 2], [7, 6, 5, 4, 5, 2]], device=device, dtype=torch.float).t().contiguous().t() expected_val = torch.tensor([[3, 2, 1, 0, 0, 0], [7, 6, 5, 4, 4, 2]], device=device, dtype=torch.float) expected_ind = torch.tensor([[0, 1, 2, 3, 3, 3], [0, 1, 2, 3, 3, 5]], device=device, dtype=torch.long) self._test_cumminmax_helper(x, torch.cummin, expected_val, expected_ind) def test_bool_tensor_value_change(self, device): x = torch.tensor([True, False], dtype=torch.bool, device=device) x[0] = False x[1] = True self.assertEqual(x, torch.tensor([False, True], dtype=torch.bool, device=device)) # FIXME: move to shape ops test suite def test_unfold_all_devices_and_dtypes(self, device): for dt in all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16): if dt == torch.bool: x = torch.empty((0, 1, 3, 0), dtype=dt, device=device) self.assertEqual((0, 1, 1, 0, 3), x.unfold(2, 3, 2).shape) else: x = torch.empty((0, 1, 3, 0), dtype=dt, device=device) self.assertEqual((0, 1, 1, 0, 3), x.unfold(2, 3, 2).shape) # FIXME: move to shape ops test suite def test_unfold_scalars(self, device): x = torch.tensor(0.5, device=device) # unfold on a 0-dimensional tensor should always return a 1-d dimensional # tensor of shape [size] (i.e., the second parameter to unfold) self.assertEqual(torch.empty(0, device=device), x.unfold(0, 0, 1)) self.assertEqual(torch.empty(0, device=device), x.unfold(0, 0, 2)) self.assertEqual(torch.tensor([0.5], device=device), x.unfold(0, 1, 1)) # FIXME: move to data movement test suite def test_copy_all_dtypes_and_devices(self, device): from copy import copy for dt in all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16): x = torch.tensor([1, 2, 3, 4], dtype=dt, device=device) x_clone = x.clone() y = copy(x) y.fill_(1) # copy is a shallow copy, only copies the tensor view, # not the data self.assertEqual(x, y) # FIXME: move to data movement test suite @onlyNativeDeviceTypes def test_copy_math_view(self, device): for dst_dtype, src_dtype in [ (torch.float32, torch.float32), (torch.float64, torch.float32), (torch.int64, torch.int32), (torch.complex128, torch.complex64), ]: src = make_tensor((100,), dtype=src_dtype, device=device) dst = torch.empty(100, dtype=dst_dtype, device=device) dst.copy_(src) self.assertEqual(dst, src, exact_dtype=False) dst.copy_(src._neg_view()) self.assertEqual(dst, src.neg(), exact_dtype=False) dst._neg_view().copy_(torch._neg_view(src)) self.assertEqual(dst, src, exact_dtype=False) dst._neg_view().copy_(src) self.assertEqual(dst, src.neg(), exact_dtype=False) for dst_dtype, src_dtype in [ (torch.complex64, torch.complex64), (torch.complex128, torch.complex64), ]: src = make_tensor((100,), dtype=src_dtype, device=device) dst = torch.empty(100, dtype=dst_dtype, device=device) dst.conj().copy_(src) self.assertEqual(dst, src.conj_physical(), exact_dtype=False) dst.conj().copy_(src._neg_view()) self.assertEqual(dst, src.neg().conj_physical(), exact_dtype=False) # FIXME: move to data movement test suite @onlyNativeDeviceTypes @dtypes(torch.int64, torch.float32, torch.complex64) def test_copy_transpose_math_view(self, device, dtype): src = make_tensor((100, 100), dtype=dtype, device=device).transpose(0, 1) dst = torch.empty((100, 100), dtype=dtype, device=device) dst._neg_view().copy_(src) self.assertEqual(dst, -src) dst._neg_view().copy_(src._neg_view()) self.assertEqual(dst, src) dst.copy_(src._neg_view()) self.assertEqual(dst, -src) if dtype.is_complex: dst.conj().copy_(src) self.assertEqual(dst, src.conj_physical()) dst.conj().copy_(src.conj()) self.assertEqual(dst, src) dst.copy_(src.conj()) self.assertEqual(dst, src.conj_physical()) def test_clone_all_dtypes_and_devices(self, device): for dt in all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16): x = torch.tensor((1, 1), dtype=dt, device=device) y = x.clone() self.assertEqual(x, y) def test_clone_zero_stride_dim(self, device): # stride zero, size 1 axis, not contiguous x = torch.randn(10) y = x.as_strided([2, 1, 5], [1, 0, 2]) self.assertEqual(y, y.clone()) def test_clone_not_memory_dense(self): # github issue: https://github.com/pytorch/pytorch/issues/64176 x = torch.randn(10, 8).t()[::2, ::2] y = x.clone() # should retain permutation after densification self.assertTrue(y.stride() == (1, 4)) # FIXME: move to elementwise ternary test suite @dtypesIfCUDA(*set(get_all_math_dtypes('cuda'))) @dtypes(*set(get_all_math_dtypes('cpu'))) def test_addcmul(self, device, dtype): # Returns floating or integral scalar corresponding to dtype def _number(floating, integer, dtype): if dtype in [torch.half, torch.float, torch.double, torch.bfloat16]: return floating elif dtype in [torch.cfloat, torch.cdouble]: return floating * (1 + 1j) else: return integer def rand_tensor(size, dtype, device): if dtype.is_floating_point or dtype.is_complex: return torch.rand(size=size, dtype=dtype, device=device) if dtype == torch.uint8: return torch.randint(1, 5, size=size, dtype=dtype, device=device) else: return torch.randint(-5, 5, size=size, dtype=dtype, device=device) a = rand_tensor((2, 2), dtype=dtype, device=device) b = rand_tensor((2, 2), dtype=dtype, device=device) c = rand_tensor((2, 2), dtype=dtype, device=device) alpha = _number(0.5, 3, dtype) actual = torch.addcmul(a, b, c, value=alpha) expected = a + alpha * b * c self.assertEqual(expected, actual) with self.assertWarnsOnceRegex( UserWarning, "This overload of addcmul is deprecated"): self.assertEqual(actual, torch.addcmul(a, alpha, b, c)) if self.device_type == 'cuda' and dtype == torch.half: a = torch.tensor([60000.0], device=device, dtype=dtype) b = torch.tensor([60000.0], device=device, dtype=dtype) c = torch.tensor([2.0], device=device, dtype=dtype) out = torch.addcmul(a, b, c, value=-1) self.assertTrue(not (out.isnan() or out.isinf())) # FIXME: move to shape ops test suite def test_narrow_empty(self, device): x = torch.randn(2, 3, 4, device=device) for d in range(x.dim()): y = x.narrow(d, x.size(d), 0) sz = list(x.size()) sz[d] = 0 self.assertEqual(sz, y.size()) # FIXME: move to indexing test suite @parametrize("reduce", ['prod', 'amin', 'amax', 'mean']) @dtypes(*floating_types_and(torch.half, torch.bfloat16)) def test_index_reduce(self, device, dtype, reduce): size = (3, 4, 5) index_dtypes = [torch.int, torch.long] include_selfs = [True, False] reduction_init = {'prod': 1, 'mean': 0, 'amin': float('inf'), 'amax': -float('inf')} for dest_contig, src_contig, index_contig in product([True, False], repeat=3): for idx_dtype, include_self in product(index_dtypes, include_selfs): for dim in range(len(size)): num_src = np.random.randint(10) num_dest = size[dim] dest = torch.randn(size, dtype=dtype, device=device) if not dest_contig: dest = make_tensor(size, device=device, dtype=dtype, noncontiguous=True) src = torch.randn(*size[:dim], num_src, *size[dim + 1:], dtype=dtype, device=device) if not src_contig: # noncontiguous_like fails with RuntimeError: XLA tensors do not have storage src = torch.testing.make_non_contiguous(src) idx = torch.randint(num_dest, (num_src,), dtype=idx_dtype, device=device) if not index_contig: # noncontiguous_like fails with RuntimeError: XLA tensors do not have storage idx = torch.testing.make_non_contiguous(idx) expected = dest.clone() dest.index_reduce_(dim, idx, src, reduce, include_self=include_self) # fill rows in idx with reduction inits if include_self=False if (not include_self): expected.index_fill_(dim, idx.long(), reduction_init[reduce]) expected = expected.transpose(0, dim) src = src.transpose(0, dim) for i in range(num_src): if reduce == 'prod': expected[idx[i]] *= src[i] elif reduce == 'amin': torch.minimum(expected[idx[i]], src[i], out=expected[idx[i]]) elif reduce == 'amax': torch.maximum(expected[idx[i]], src[i], out=expected[idx[i]]) else: expected[idx[i]] += src[i] if reduce == 'mean': counts = torch.ones_like(expected) if include_self else torch.zeros_like(expected) counts.index_add_(0, idx, torch.ones_like(src)) counts.masked_fill_(counts == 0, 1) expected /= counts expected = expected.transpose(0, dim) self.assertEqual(dest, expected) # FIXME: move to test indexing @dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) def test_index_copy(self, device, dtype): # We just test for num_copy <= num_dest, as otherwise there are repeated indices # and the behavior is undefined num_copy, num_dest = 3, 5 def make_arg(batch_sizes, n, dim, contig): size_arg = batch_sizes[:dim] + (n,) + batch_sizes[dim:] return make_tensor(size_arg, dtype=dtype, device=device, low=None, high=None, noncontiguous=not contig) def ref_index_copy(tgt, dim, idx, src): for i in range(idx.size(0)): idx_dest = dim * (slice(None),) + (idx[i],) idx_src = dim * (slice(None),) + (i,) tgt[idx_dest] = src[idx_src] # More thorough testing as in index_add for dest_contig, src_contig, index_contig in product([True, False], repeat=3): for other_sizes in ((), (4, 5)): for dim in range(len(other_sizes)): dest = make_arg(other_sizes, num_dest, dim, dest_contig) src = make_arg(other_sizes, num_copy, dim, src_contig) idx = torch.randperm(num_dest, dtype=torch.int64, device=device)[:num_copy] if not index_contig: idx = torch.repeat_interleave(idx, 2, dim=-1) idx = idx[..., ::2] dest2 = dest.clone() dest.index_copy_(dim, idx, src) ref_index_copy(dest2, dim, idx, src) self.assertEqual(dest, dest2) # FIXME: move to test indexing # onlyNativeDeviceTypes due to an XLA error: # https://github.com/pytorch/pytorch/issues/53256 @onlyNativeDeviceTypes @dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) def test_index_copy_scalars(self, device, dtype): # Create the 8 possible combinations of scalar sizes for target / index / source scalars = ((make_tensor(size_t, dtype=dtype, device=device, low=None, high=None), make_tensor(size_i, dtype=torch.int64, device=device, low=0, high=1), make_tensor(size_s, dtype=dtype, device=device, low=None, high=None)) for size_t, size_i, size_s in product([(), (1,)], repeat=3)) for target, idx, source in scalars: target.index_copy_(0, idx, source) self.assertEqual(target.item(), source.item()) # FIXME: move to test indexing @onlyCPU def test_errors_index_copy(self, device): # We do not test the GPU as the CUDA_ASSERT would break the CUDA context idx_dim = 8 tgt_dim = 5 batch_dim = 3 # Too large of an index a = torch.randn(batch_dim, tgt_dim, device=device) idx = torch.full((idx_dim,), tgt_dim, device=device) c = torch.zeros(batch_dim, idx_dim, device=device) with self.assertRaises(IndexError): a.index_copy_(1, idx, c) # Too small (negative indices) idx = torch.full((idx_dim,), -1, device=device) with self.assertRaises(IndexError): a.index_copy_(1, idx, c) # Too small (very negative indices) - they should be unsupported even # when support for negative indices is implemented for index_copy_ idx = torch.full((idx_dim,), -tgt_dim - 1, device=device) with self.assertRaises(IndexError): a.index_copy_(1, idx, c) def _prepare_data_for_index_copy_and_add_deterministic( self, dim: int, device: torch.device ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: assert (dim >= 0 and dim < 3) a = [5, 4, 3] a[dim] = 2000 x = torch.zeros(a, device=device) b = a.copy() elems = a[dim] * 20 b[dim] = elems src = torch.rand(b, device=device) index = torch.randint(a[dim], (elems,), device=device) return (x, index, src) # FIXME: move to test indexing @onlyNativeDeviceTypes def test_index_copy_deterministic(self, device: torch.device) -> None: for dim in range(3): x, index, src = self._prepare_data_for_index_copy_and_add_deterministic(dim, device) with DeterministicGuard(True): y0 = torch.index_copy(x, dim, index, src) x0 = x.clone().detach() index_list = index.tolist() for i in range(len(index_list)): if dim == 0: x0[index_list[i], :, :] = src[i, :, :] elif dim == 1: x0[:, index_list[i], :] = src[:, i, :] elif dim == 2: x0[:, :, index_list[i]] = src[:, :, i] self.assertEqual(x0, y0, atol=0, rtol=0) # FIXME: move to test indexing @onlyNativeDeviceTypes def test_index_add_deterministic(self, device: torch.device) -> None: for dim in range(3): x, index, src = self._prepare_data_for_index_copy_and_add_deterministic(dim, device) alpha = random.random() + 1 # on CPU it should be deterministic regardless of the deterministic mode with DeterministicGuard(True): y0 = torch.index_add(x, dim, index, src, alpha=alpha) for _ in range(3): y = torch.index_add(x, dim, index, src, alpha=alpha) self.assertEqual(y, y0, atol=0, rtol=0) with DeterministicGuard(False): for _ in range(3): y_nd = torch.index_add(x, dim, index, src, alpha=alpha) self.assertEqual(y_nd, y0, atol=1e-3, rtol=1e-5) # FIXME: find a test suite for the put operator @onlyNativeDeviceTypes def test_index_put_non_accumulate_deterministic(self, device) -> None: with DeterministicGuard(True): for i in range(3): m = random.randint(10, 20) elems = random.randint(20000, 30000) values = torch.rand(elems, device=device) indices = torch.randint(m, (elems,), device=device) input = torch.rand(m, device=device) output = input.index_put((indices,), values, accumulate=False) input_list = input.tolist() indices_list = indices.tolist() values_list = values.tolist() for i, v in zip(indices_list, values_list): input_list[i] = v self.assertEqual(output, input_list) # FIXME: move to test indexing @dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) @skipIfMps def test_index_fill(self, device, dtype): x = torch.tensor([[1, 2], [4, 5]], dtype=dtype, device=device) index = torch.tensor([0], device=device) x.index_fill_(1, index, 0) self.assertEqual(x, torch.tensor([[0, 2], [0, 5]], dtype=dtype, device=device)) if not x.is_complex() and not device == "meta": with self.assertRaisesRegex(RuntimeError, r"Scalar"): x.index_fill_(1, index, 1 + 1j) # Make sure that the result stays 0-dim while applied to # a 0-dim input x = torch.tensor(1, dtype=dtype, device=device) self.assertEqual(0, x.index_fill(0, index, -1).dim()) self.assertEqual(0, x.index_fill_(0, index, -1).dim()) # FIXME: move to test indexing # The test fails for zero-dimensional tensors on XLA @onlyNativeDeviceTypes @dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) def test_index_select(self, device, dtype): num_src, num_out = 3, 5 def make_arg(batch_sizes, n, dim, contig): size_arg = batch_sizes[:dim] + (n,) + batch_sizes[dim:] return make_tensor(size_arg, dtype=dtype, device=device, low=None, high=None, noncontiguous=not contig) def ref_index_select(src, dim, idx): # bfloat16 is just used on GPU, so it's not supported on numpy if dtype == torch.bfloat16: src = src.float() out = torch.from_numpy(np.take(src.cpu().numpy(), idx.cpu().numpy(), axis=dim)) if dtype == torch.bfloat16: out = out.to(device=device, dtype=dtype) return out for src_contig, idx_contig in product([True, False], repeat=2): for other_sizes in ((), (4, 5)): for dim in range(len(other_sizes)): src = make_arg(other_sizes, num_src, dim, src_contig) idx = make_tensor( (num_out,), dtype=torch.int64, device=device, low=0, high=num_src, noncontiguous=not idx_contig ) out = torch.index_select(src, dim, idx) out2 = ref_index_select(src, dim, idx) self.assertEqual(out, out2) for idx_type in (torch.int32, torch.int64): other_sizes = (3, 2) dim = 1 src = make_arg(other_sizes, num_src, dim, True) idx = make_tensor((num_out,), dtype=idx_type, device=device, low=0, high=num_src, noncontiguous=False) out = torch.index_select(src, dim, idx) out2 = ref_index_select(src, dim, idx) self.assertEqual(out, out2) # Create the 4 possible combinations of scalar sizes for index / source scalars = ((make_tensor(size_s, dtype=dtype, device=device), torch.zeros(size_i, dtype=torch.int64, device=device)) for size_s, size_i in product([(), (1,)], repeat=2)) for source, idx in scalars: out = source.index_select(0, idx) self.assertEqual(out.item(), source.item()) # FIXME: find a test suite for the take operator @dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) def test_take(self, device, dtype): idx_size = (4,) make_arg = partial(make_tensor, device=device, dtype=dtype) make_idx = partial(make_tensor, low=0, device=device, dtype=torch.int64) def ref_take(src, idx): if dtype == torch.bfloat16: src = src.half() src = src.cpu().numpy() idx = idx.cpu().numpy() out = torch.from_numpy(np.take(src, idx)).to(device=device, dtype=dtype) return out for src_contig, idx_contig, idx_reshape in product([True, False], repeat=3): for src_size in ((5,), (4, 5)): src = make_arg(src_size, noncontiguous=not src_contig) idx = make_idx(idx_size, high=src.numel(), noncontiguous=not idx_contig) if idx_reshape: idx = idx.reshape(2, 2) out = torch.take(src, idx) out2 = ref_take(src, idx) self.assertEqual(out, out2) # Create the 4 possible combinations of scalar sizes for source / index for size_s, size_i in product([(), (1,)], repeat=2): source = make_arg(size_s) idx = make_idx(size_i, high=1) out = source.take(idx) self.assertEqual(out.item(), source.item()) # FIXME: find a test suite for the put operator # The bool instance does not work on GPU. See # https://github.com/pytorch/pytorch/issues/54317 @dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16)) def test_put(self, device, dtype): src_size = (4,) make_arg = partial(make_tensor, device=device, dtype=dtype) make_idx = partial(make_tensor, low=0, device=device, dtype=torch.int64) def ref_put(dst, idx, src, accumulate): new_dst = dst.clone(memory_format=torch.contiguous_format).view(-1) new_idx = idx.contiguous().view(-1) new_src = src.contiguous().view(-1) method = new_dst.index_add_ if accumulate else new_dst.index_copy_ return method(0, new_idx, new_src).view_as(dst) for dst_contig, src_contig, idx_contig, idx_reshape, accumulate in product([True, False], repeat=5): for dst_size in ((5,), (4, 5)): dst = make_arg(dst_size, noncontiguous=not dst_contig) src = make_arg(src_size, noncontiguous=not src_contig) # If accumulate=True, `put_` should be deterministic regardless of the inputs on CPU # On CUDA it may not be, but the test has enough tolerance to account for this if accumulate: idx = make_idx(src_size, high=dst.numel()) else: idx = torch.randperm(dst.numel(), dtype=torch.int64, device=device)[:src_size[0]] if not idx_contig: idx = torch.repeat_interleave(idx, 2, dim=-1)[..., ::2] if idx_reshape: idx = idx.reshape(2, 2) out = torch.put(dst, idx, src, accumulate) # out-place reference = ref_put(dst, idx, src, accumulate) self.assertEqual(out, reference) # in-place dst.put_(idx, src, accumulate) self.assertEqual(dst, reference) # Create the 8 possible combinations of scalar sizes for target / index / source scalars = ((make_arg(size_t), make_idx(size_i, high=1), make_arg(size_s)) for size_t, size_i, size_s in product([(), (1,)], repeat=3)) for (dest, idx, source), accumulate in product(scalars, [True, False]): dest_init = dest.clone() # out-place out = torch.put(dest, idx, source, accumulate=accumulate) # in-place dest1 = dest.clone() dest1.put_(idx, source, accumulate=accumulate) for d in [out, dest1]: if accumulate: self.assertEqual(d.item(), (dest_init + source).item()) else: self.assertEqual(d.item(), source.item()) # Empty case dest = make_arg((3, 2)) reference = dest.clone() idx = make_idx((0,), high=1) source = make_arg((0,)) for accumulate in [True, False]: out = torch.put(dest, idx, source, accumulate=accumulate) self.assertEqual(out, reference) dest.put_(idx, source, accumulate=accumulate) self.assertEqual(dest, reference) # FIXME: find a test suite for the put operator # The bool instance does not work on GPU. See # https://github.com/pytorch/pytorch/issues/54317 @dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16)) def test_put_accumulate(self, device, dtype): # Test for parallel adds with accumulate == True low_precision = dtype == torch.half or dtype == torch.bfloat16 # Less numbers to avoid overflow with low_precision # Grainsize is 3000 for the for_loop to be parallized on CPU sizes = ((100,)) if low_precision else ((200,), (3002,)) # Bfloat16 has a particularly bad performance here # This operation is nondeterministic on GPU, so we are generous with the rtol rtol, atol = (1e-1, 1e-2) if low_precision else (1e-3, 1e-4) make_arg = partial(make_tensor, low=-2, high=3, device=device, dtype=dtype) # Dump everything into the 0-th position make_idx = partial(torch.zeros, device=device, dtype=torch.int64) args = ((make_idx(size), make_arg(size)) for size in sizes) for idx, source in args: orig = make_arg((1,)) out = orig.put(idx, source, accumulate=True) self.assertEqual(out, orig + source.sum(), rtol=rtol, atol=atol) # FIXME: find a test suite for the take operator @skipIfMps def test_take_empty(self, device): for input_shape in [(0,), (0, 1, 2, 0), (1, 2, 3)]: for indices_shape in [(0,), (0, 1, 2, 0)]: input = torch.empty(input_shape, device=device) indices = torch.empty(indices_shape, dtype=torch.int64, device=device) self.assertEqual(indices, torch.take(input, indices), exact_dtype=False) # FIXME: find a test suite for the put operator def test_put_empty(self, device): for dst_shape in [(0,), (0, 1, 2, 0), (1, 2, 3)]: for indices_shape in [(0,), (0, 1, 2, 0)]: for accumulate in [False, True]: dst = torch.randn(dst_shape, device=device) indices = torch.empty(indices_shape, dtype=torch.int64, device=device) src = torch.randn(indices_shape, device=device) self.assertEqual(dst, dst.put_(indices, src, accumulate=accumulate)) # FIXME: port to test_scatter_gather_ops.py def scatter_allow_reduce(self, device, dtype, reduceop): device_type = torch.device(device).type return device_type != 'cuda' or (reduceop == 'multiply' and dtype.is_floating_point) @dtypes(*floating_and_complex_types()) @dtypesIfCPU(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) @dtypesIfCUDA(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) def test_scatter_reduce_operations_to_large_input(self, device, dtype): index = torch.tensor([[1], [2]], device=device, dtype=torch.long) test_data = [ (torch.zeros(4, 4, device=device, dtype=dtype), torch.ones(2, 2, device=device, dtype=dtype), torch.tensor([[0, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0], [0, 0, 0, 0]], device=device, dtype=dtype), "add"), (torch.tensor([2], device=device, dtype=dtype).repeat(4, 4), torch.tensor([6], device=device, dtype=dtype).repeat(2, 2), torch.tensor([[2, 2, 2, 2], [12, 2, 2, 2], [12, 2, 2, 2], [2, 2, 2, 2]], device=device, dtype=dtype), "multiply"), ] for input, src, result, operation in test_data: if not self.scatter_allow_reduce(device, dtype, operation): continue input.scatter_(0, index, src, reduce=operation) self.assertEqual(input, result) @dtypes(*floating_and_complex_types()) @dtypesIfCPU(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) @dtypesIfCUDA(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) def test_scatter_reduce_scalar(self, device, dtype): index = torch.tensor([[1], [2]], device=device, dtype=torch.long) test_data = [ (torch.zeros(4, 4, device=device, dtype=dtype), 1, torch.tensor([[0, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0], [0, 0, 0, 0]], device=device, dtype=dtype), "add"), (torch.tensor([2], device=device, dtype=dtype).repeat(4, 4), 2, torch.tensor([[2, 2, 2, 2], [4, 2, 2, 2], [4, 2, 2, 2], [2, 2, 2, 2]], device=device, dtype=dtype), "multiply"), ] for input, src, result, operation in test_data: if not self.scatter_allow_reduce(device, dtype, operation): continue input.scatter_(0, index, src, reduce=operation) self.assertEqual(input, result) # FIXME: port to test_scatter_gather_ops.py # TODO: remove this after scatter_add_ is deprecated. def test_scatter_add_non_unique_index(self, device): height = 2 width = 65536 input = torch.ones(height, width, device=device) index = torch.zeros(height, width, dtype=torch.long, device=device) src = torch.ones(height, width, device=device) input.scatter_add_(0, index, src) self.assertEqual(input, torch.tensor([[3], [1]], device=device, dtype=torch.float32).repeat(1, width)) @dtypes(*floating_and_complex_types()) @dtypesIfCPU(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) @dtypesIfCUDA(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) def test_scatter_reduce_non_unique_index(self, device, dtype): height = 2 width = 2 index = torch.zeros(height, width, dtype=torch.long, device=device) test_data = [ (torch.ones(height, width, device=device, dtype=dtype), torch.ones(height, width, device=device, dtype=dtype), torch.tensor([[3], [1]], device=device, dtype=dtype).repeat(1, width), "add"), (torch.tensor([2], device=device, dtype=dtype).repeat(height, width), torch.tensor([2], device=device, dtype=dtype).repeat(height, width), torch.tensor([[8], [2]], device=device, dtype=dtype).repeat(1, width), "multiply"), ] for input, src, result, operation in test_data: if not self.scatter_allow_reduce(device, dtype, operation): continue input.scatter_(0, index, src, reduce=operation) self.assertEqual(input, result, msg=f"result: {result} input: {input} method: {str(operation)}") @onlyCUDA @dtypes(*integral_types(), *complex_types()) def test_scatter_reduce_multiply_unsupported_dtypes(self, device, dtype): height = 2 width = 2 index = torch.zeros(height, width, dtype=torch.long, device=device) input = torch.ones(height, width, device=device, dtype=dtype) src = torch.ones(height, width, device=device, dtype=dtype) with self.assertRaises(RuntimeError): input.scatter_(0, index, src, reduce="multiply") # FIXME: port to test_scatter_gather_ops.py def test_scatter_to_large_input(self, device): input = torch.zeros(4, 4, device=device) src = torch.ones(2, 2, device=device) index = torch.tensor([[1], [2]], device=device, dtype=torch.long) input.scatter_(0, index, src) self.assertEqual(input, torch.tensor([[0, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0], [0, 0, 0, 0]], device=device, dtype=torch.float32)) # FIXME: port to test_scatter_gather_ops.py def test_scatter_add_to_large_input(self, device): input = torch.zeros(4, 4, device=device) src = torch.ones(2, 2, device=device) index = torch.tensor([[1], [2]], device=device, dtype=torch.long) input.scatter_add_(0, index, src) self.assertEqual(input, torch.tensor([[0, 0, 0, 0], [1, 0, 0, 0], [1, 0, 0, 0], [0, 0, 0, 0]], device=device, dtype=torch.float32)) # FIXME: port to test_scatter_gather_ops.py def test_scatter_bool(self, device): x = torch.tensor([[True, True, True], [True, True, True]], device=device) res = torch.zeros(3, 3, dtype=torch.bool, device=device) res = res.scatter_(0, torch.tensor([[0, 1, 2], [0, 1, 2]], device=device), x) self.assertEqual(res, torch.tensor([[True, False, False], [False, True, False], [False, False, True]], device=device)) # FIXME: port to test_scatter_gather_ops.py def test_scatter_add_bool(self, device): x = torch.tensor([[True, True, True, True, True], [True, True, True, True, True]], device=device) res = torch.zeros(3, 5, dtype=torch.bool, device=device) res = res.scatter_add_(0, torch.tensor([[0, 1, 2, 0, 0], [2, 0, 0, 1, 2]], device=device), x) self.assertEqual(res, torch.tensor([[True, True, True, True, True], [False, True, False, True, False], [True, False, True, False, True]], device=device)) # FIXME: find a test suite for the masked scatter operator @onlyNativeDeviceTypes @dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) def test_masked_scatter(self, device, dtype): dt = dtype with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") for maskType in [torch.uint8, torch.bool]: num_copy, num_dest = 3, 10 dest = torch.tensor([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], dtype=dt, device=device) dest2 = dest.clone() dest_ones = dest.clone() dest_ones_expected = dest.clone() src = torch.tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=dt, device=device) src_ones = torch.tensor([1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=dt, device=device) mask = torch.tensor((0, 0, 0, 0, 1, 0, 1, 0, 1, 0), dtype=maskType, device=device) if dt == torch.bool: # torch.bool is a special case and is being tested # in a separate test return dest.masked_scatter_(mask, src) j = 0 for i in range(num_dest): if mask[i]: dest2[i] = src[j] dest_ones_expected[i] = src_ones[j] j += 1 self.assertEqual(dest, dest2, atol=0, rtol=0) dest_ones.masked_scatter_(mask, src_ones) self.assertEqual(dest_ones, dest_ones_expected, atol=0, rtol=0) # Bound checking in CUDA is done inside a kernel # in order to avoid synchronization, but this means # we can not clear the failures. So there is no way # to test it then recover. if self.device_type != 'cuda': # make src smaller. this should fail src = torch.zeros(num_copy - 1, dtype=dt, device=device) with self.assertRaises(RuntimeError): dest.masked_scatter_(mask, src) # empty tensor dest = torch.empty((5, 0, 5), dtype=dt, device=device) mask = torch.ones_like(dest, dtype=maskType, device=device) src = torch.empty((0,), dtype=dt, device=device) dest.masked_scatter_(mask, src) dest = torch.empty((5, 0, 5), dtype=dt, device=device) mask = torch.ones((5, 1, 5), dtype=maskType, device=device) src = torch.empty((0,), dtype=dt, device=device) dest.masked_scatter_(mask, src) if self.device_type != 'cuda': self.assertEqual(len(w), 5) else: self.assertEqual(len(w), 4) warn = 'masked_scatter_ received a mask with dtype torch.uint8,' for wi in w: self.assertEqual(str(wi.message)[0:55], str(warn)) # FIXME: find a test suite for the masked scatter operator @skipIfMps def test_masked_scatter_bool_tensor(self, device): src = torch.tensor([True, True, True], device=device) dst = torch.tensor([False, False, False], device=device) mask = torch.tensor([False, True, False], device=device) dst.masked_scatter_(mask, src) self.assertEqual(dst, torch.tensor([False, True, False], device=device)) mask = torch.tensor([True, False, True], device=device) dst = dst.masked_scatter(mask, src) self.assertEqual(dst, torch.tensor([True, True, True], device=device)) # FIXME: find a test suite for the masked scatter operator # test_scatter_gather_ops or test_masked_ops? @onlyCUDA @largeTensorTest('30GB') def test_masked_scatter_large_tensor(self, device): t_cpu = torch.empty(2**31 + 1, dtype=torch.bool).random_() t = t_cpu.to(device) result_cpu = t_cpu.masked_scatter(t_cpu, t_cpu) result = t.masked_scatter(t, t) self.assertEqual(result, result_cpu) # FIXME: find a test suite for the masked select operator @dtypes(*all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16)) def test_masked_select(self, device, dtype): if device == 'cpu': warn = 'masked_select received a mask with dtype torch.uint8,' else: warn = 'indexing with dtype torch.uint8 is now deprecated, pl' for maskType in [torch.uint8, torch.bool]: num_src = 10 src = torch.tensor([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=dtype, device=device) mask = torch.randint(2, (num_src,), device=device, dtype=maskType) with warnings.catch_warnings(record=True) as w: dst = src.masked_select(mask) if maskType is torch.uint8: self.assertEqual(len(w), 1) self.assertEqual(str(w[0].message)[0:53], str(warn)) dst2 = [] for i in range(num_src): if mask[i]: dst2 += [src[i]] self.assertEqual(dst, torch.tensor(dst2), atol=0, rtol=0) dst3 = torch.empty(0, device=device, dtype=dtype) torch.masked_select(src, mask, out=dst3) self.assertEqual(dst3, torch.tensor(dst2, dtype=dst3.dtype), atol=0, rtol=0) # Since half on CPU is not supported, need to skip the remaining test cases if dtype == torch.half and torch.device(device).type == 'cpu': return # Ensure that masks are expanded to match tensor properly a = torch.rand(100, 100, device=device).mul(100).to(dtype) mask_first_el_each_row = torch.zeros(100, device=device, dtype=torch.bool) mask_first_el_each_row[0] = True a_masked = a.masked_select(mask_first_el_each_row) self.assertEqual(a_masked, a[:, 0]) mask_first_row = torch.zeros(100, 1, device=device, dtype=torch.bool) mask_first_row[0][0] = True a_masked = a.masked_select(mask_first_row) self.assertEqual(a_masked, a[0, :]) # Ensure that tensor is expanded to match mask properly a = torch.rand(100, device=device).mul(100).to(dtype) mask_copy_3_times = torch.tensor([[True], [True], [False], [True]], device=device) a_masked = a.masked_select(mask_copy_3_times) self.assertEqual(a_masked, a.unsqueeze(0).expand(3, 100).flatten()) # FIXME: find a test suite for the masked select operator def test_masked_select_discontiguous(self, device): for size in (10, 200): vals = torch.rand(size, size, device=device) mask = torch.full((size, size), False, dtype=torch.bool, device=device) mask[:, ::2] = True vals_list = (vals, vals.t()) mask_list = (mask, mask.t()) out_dc = torch.empty(size * size, device=device)[::2] for v, m in product(vals_list, mask_list): if m.is_contiguous(): expected = v[:, ::2].clone().reshape((-1, )) else: expected = v[::2].clone().reshape((-1, )) out = torch.masked_select(v, m) self.assertEqual(out, expected, atol=0, rtol=0) torch.masked_select(v, m, out=out_dc) self.assertEqual(out_dc, expected, atol=0, rtol=0) # FIXME: find a test suite for the masked fill operator @dtypes(*product(all_types_and_complex_and(torch.half, torch.bool, torch.bfloat16), (torch.uint8, torch.bool))) def test_masked_fill(self, device, dtypes): dtype = dtypes[0] mask_dtype = dtypes[1] with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") num_dest = 10 dst = torch.zeros(num_dest, dtype=dtype) mask = torch.randint(2, (num_dest,), dtype=mask_dtype) val = random.random() dst2 = dst.clone() dst.masked_fill_(mask, val) for i in range(num_dest): if mask[i]: dst2[i] = val self.assertEqual(dst, dst2, atol=0, rtol=0) # test non-contiguous case dst = ((torch.randn(num_dest, num_dest, num_dest) * 10).to(dtype)).permute((2, 0, 1)) dst2 = dst.contiguous() if dtype.is_complex: mask = dst.abs() > 0 else: mask = dst > 0 self.assertTrue(not dst.is_contiguous()) self.assertTrue(dst2.is_contiguous()) dst.masked_fill_(mask.to(mask_dtype), val) dst2.masked_fill_(mask.to(mask_dtype), val) self.assertEqual(dst, dst2, atol=0, rtol=0) if mask_dtype == torch.uint8: self.assertEqual(len(w), 3) warn = 'masked_fill_ received a mask with dtype torch.uint8,' for wi in w: self.assertEqual(str(wi.message)[0:52], str(warn)) else: self.assertEqual(len(w), 0) # FIXME: find a test suite for the masked fill operator def test_masked_fill_bool_tensor(self, device): dst = torch.tensor([True, False, True], device=device) mask = torch.tensor([False, True, False], device=device) dst.masked_fill_(mask, True) self.assertEqual(dst, torch.tensor([True, True, True], device=device)) dst = dst.masked_fill(mask, False) self.assertEqual(dst, torch.tensor([True, False, True], device=device)) def test_tensor_shape_empty(self, device): x = torch.randn((0, 1, 3, 0), device=device) # flatten self.assertEqual((0,), torch.flatten(x, 0, 3).shape) self.assertEqual((0, 0), torch.flatten(x, 0, 2).shape) self.assertEqual((0, 3, 0), torch.flatten(x, 1, 2).shape) # squeeze, unsqueeze self.assertEqual((0, 1, 1, 3, 0), torch.unsqueeze(x, 1).shape) self.assertEqual((0, 3, 0), torch.squeeze(x, 1).shape) self.assertEqual((0, 3, 0), torch.squeeze(x).shape) # transpose, t self.assertEqual((0, 0, 3, 1), torch.transpose(x, 1, 3).shape) y = torch.randn((5, 0), device=device) self.assertEqual((0, 5), y.t().shape) # select self.assertEqual((0, 1, 0), torch.select(x, 2, 2).shape) # repeat, permute self.assertEqual((9, 0, 5, 6, 0), x.repeat(9, 7, 5, 2, 3).shape) self.assertEqual((3, 0, 0, 1), x.permute(2, 3, 0, 1).shape) # diagonal, diagflat self.assertEqual((0,), torch.diagonal(torch.randn((5, 0), device=device)).shape) self.assertEqual((0,), torch.diagonal(torch.randn((0, 5), device=device)).shape) # off the end offsets are valid self.assertEqual((0,), torch.diagonal(torch.randn((5, 0), device=device), offset=1).shape) self.assertEqual((0,), torch.diagonal(torch.randn((0, 5), device=device), offset=1).shape) # check non-zero sized offsets off the end self.assertEqual((5, 6, 0), torch.diagonal(torch.randn((3, 4, 5, 6), device=device), offset=45252).shape) self.assertEqual((5, 6, 0), torch.diagonal(torch.randn((3, 4, 5, 6), device=device), offset=-45252).shape) self.assertEqual((0, 0), torch.diagflat(torch.tensor([], device=device)).shape) self.assertEqual(torch.zeros(1, 1), torch.diagflat(torch.tensor([], device=device), offset=1)) self.assertEqual((0, 0), torch.diagflat(torch.tensor([[]], device=device)).shape) self.assertEqual(torch.zeros(1, 1), torch.diagflat(torch.tensor([[]], device=device), offset=1)) # stack, split, chunk self.assertEqual((4, 0, 1, 3, 0), torch.stack((x, x, x, x)).shape) self.assertEqual([(0, 1, 3, 0)], [z.shape for z in torch.chunk(x, 1, dim=0)]) self.assertEqual([(0, 1, 3, 0), ] * 3, [z.shape for z in torch.chunk(x, 3, dim=0)]) self.assertEqual([(0, 1, 1, 0), ] * 3, [z.shape for z in torch.chunk(x, 3, dim=2)]) # NOTE: split_with_sizes behaves differently than NumPy in that it # takes sizes rather than offsets self.assertEqual([(0, 1, 0, 0), (0, 1, 1, 0), (0, 1, 2, 0)], [z.shape for z in torch.split(x, (0, 1, 2), dim=2)]) self.assertRaises(RuntimeError, lambda: torch.split(x, 0, dim=1)) # This is strange because the split size is larger than the dim size, but consistent with # how split handles that case generally (when no 0s are involved). self.assertEqual([(0, 1, 3, 0)], [z.shape for z in torch.split(x, 1, dim=0)]) self.assertEqual([(0, 1, 3, 0)], [z.shape for z in torch.split(x, 0, dim=0)]) # functions that operate over a dimension but don't reduce. def test_dim_function_empty(self, device): shape = (0, 1, 2, 0) x = torch.randn(shape, device=device) # size stride self.assertEqual(0, x.size(3)) self.assertEqual(2, x.size(2)) self.assertEqual(2, x.stride(0)) self.assertEqual(1, x.stride(2)) self.assertEqual(x, torch.nn.functional.glu(x, 0)) self.assertEqual((0, 1, 1, 0), torch.nn.functional.glu(x, 2).shape) # softmax, logsoftmax self.assertEqual(x, torch.nn.functional.softmax(x, 0)) self.assertEqual(x, torch.nn.functional.softmax(x, 2)) self.assertEqual(x, torch.nn.functional.softmax(x, 3)) self.assertEqual(x, torch.nn.functional.log_softmax(x, 0)) self.assertEqual(x, torch.nn.functional.log_softmax(x, 2)) self.assertEqual(x, torch.nn.functional.log_softmax(x, 3)) # cumsum, cumprod, cummax, cummin self.assertEqual(shape, torch.cumsum(x, 0).shape) self.assertEqual(shape, torch.cumsum(x, 2).shape) self.assertEqual(shape, torch.cumprod(x, 0).shape) self.assertEqual(shape, torch.cumprod(x, 2).shape) self.assertEqual(shape, torch.cummax(x, 0)[0].shape) self.assertEqual(shape, torch.cummax(x, 2)[0].shape) self.assertEqual(shape, torch.cummin(x, 0)[0].shape) self.assertEqual(shape, torch.cummin(x, 2)[0].shape) self.assertEqual(shape, torch.logcumsumexp(x, 0).shape) self.assertEqual(shape, torch.logcumsumexp(x, 2).shape) # flip self.assertEqual(x, x.flip(0)) self.assertEqual(x, x.flip(2)) # roll self.assertEqual(x, x.roll(0, 1).roll(0, -1)) self.assertEqual(x, x.roll(1, x.size(1))) self.assertEqual(x, x.roll(1)) self.assertEqual(x, x.roll((1, 1), (3, 1))) # unbind self.assertEqual((), x.unbind(0)) self.assertEqual((torch.empty((0, 1, 0), device=device), torch.empty((0, 1, 0), device=device)), x.unbind(2)) # cross y = torch.randn((0, 1, 3, 0), device=device) self.assertEqual(y.shape, torch.cross(y, y).shape) # renorm self.assertEqual(shape, torch.renorm(x, 1, 0, 5).shape) self.assertEqual(shape, torch.renorm(x, 1, 2, 5).shape) # sort self.assertEqual([shape, shape], [z.shape for z in torch.sort(x, dim=0)]) self.assertEqual([shape, shape], [z.shape for z in torch.sort(x, dim=2)]) # topk self.assertEqual([shape, shape], [z.shape for z in torch.topk(x, 0, dim=0)]) self.assertEqual([(0, 1, 1, 0), (0, 1, 1, 0)], [z.shape for z in torch.topk(x, 1, dim=2)]) y = torch.randn((2, 3, 4), device=device) self.assertEqual([(2, 3, 0), (2, 3, 0)], [z.shape for z in torch.topk(y, 0)]) # gather self.assertEqual(shape, torch.gather(x, 0, torch.empty(shape, dtype=torch.int64, device=device)).shape) self.assertEqual(shape, torch.gather(x, 2, torch.empty(shape, dtype=torch.int64, device=device)).shape) larger_shape = torch.empty((0, 1, 3, 0), dtype=torch.int64, device=device) self.assertEqual(larger_shape.shape, torch.gather(x, 2, larger_shape).shape) smaller_shape = torch.empty((0, 1, 0, 0), dtype=torch.int64, device=device) self.assertEqual(smaller_shape.shape, torch.gather(x, 2, smaller_shape).shape) y = torch.randn((2, 3, 4), device=device) self.assertEqual((0, 3, 4), torch.gather(y, 0, torch.empty((0, 3, 4), dtype=torch.int64, device=device)).shape) # scatter, scatter_add for dim in [0, 2]: y = torch.randn(shape, device=device) y_src = torch.randn(shape, device=device) ind = torch.empty(shape, dtype=torch.int64, device=device) self.assertEqual(shape, y.scatter_(dim, ind, y_src).shape) self.assertEqual(shape, y.scatter_add_(dim, ind, y_src).shape) z = torch.randn((2, 3, 4), device=device) z_src = torch.randn((2, 3, 4), device=device) self.assertEqual(z, z.scatter_(2, torch.empty((2, 3, 0), dtype=torch.int64, device=device), z_src)) self.assertEqual(z, z.scatter_add_(2, torch.empty((2, 3, 0), dtype=torch.int64, device=device), z_src)) # index_fill, index_copy, index_add c = x.clone() c_clone = c.clone() ind_empty = torch.tensor([], dtype=torch.int64, device=device) ind_01 = torch.tensor([0, 1], dtype=torch.int64, device=device) self.assertEqual(c_clone, c.index_fill_(0, ind_empty, -1)) self.assertEqual(c_clone, c.index_fill_(2, ind_empty, -1)) self.assertEqual(c_clone, c.index_fill_(2, ind_01, -1)) self.assertEqual(c_clone, c.index_copy_(0, ind_empty, torch.empty((0, 1, 2, 0), device=device))) self.assertEqual(c_clone, c.index_copy_(2, ind_empty, torch.empty((0, 1, 0, 0), device=device))) self.assertEqual(c_clone, c.index_copy_(2, ind_01, torch.empty((0, 1, 2, 0), device=device))) self.assertEqual(c_clone, c.index_add_(0, ind_empty, torch.empty((0, 1, 2, 0), device=device))) self.assertEqual(c_clone, c.index_add_(2, ind_empty, torch.empty((0, 1, 0, 0), device=device))) self.assertEqual(c_clone, c.index_add_(2, ind_01, torch.empty((0, 1, 2, 0), device=device))) c = torch.randn((0, 1, 2), device=device) c_clone = c.clone() self.assertEqual(c_clone, c.index_fill_(0, ind_empty, -1)) self.assertEqual(c_clone, c.index_copy_(0, ind_empty, torch.empty((0, 1, 2), device=device))) self.assertEqual(c_clone, c.index_add_(0, ind_empty, torch.empty((0, 1, 2), device=device))) self.assertEqual(c_clone, c.index_fill_(0, ind_empty, -1)) self.assertEqual(c_clone, c.index_copy_(0, ind_empty, torch.empty((0, 1, 2), device=device))) self.assertEqual(c_clone, c.index_add_(0, ind_empty, torch.empty((0, 1, 2), device=device))) # index fill/copy/add non-empty z = torch.randn((2, 3, 4), device=device) self.assertEqual(z, z.index_fill_(0, ind_empty, -1)) z = torch.randn((2, 3, 4), device=device) self.assertEqual(z, z.index_copy_(0, ind_empty, torch.empty((0, 3, 4), device=device))) z = torch.randn((2, 3, 4), device=device) self.assertEqual(z, z.index_add_(0, ind_empty, torch.empty((0, 3, 4), device=device))) # index_select self.assertEqual(x, x.index_select(0, ind_empty)) self.assertEqual((0, 1, 0, 0), x.index_select(2, ind_empty).shape) self.assertEqual(x, x.index_select(2, ind_01)) z = torch.randn((2, 3, 4), device=device) # non-empty self.assertEqual((0, 3, 4), z.index_select(0, ind_empty).shape) c = torch.randn((0, 1, 2), device=device) self.assertEqual(c, c.index_select(0, ind_empty)) c = torch.randn((0, 1, 2), device=device) self.assertEqual(c, c.index_select(0, ind_empty)) w = torch.randn((0, 3), device=device) self.assertEqual((0, 2), w.index_select(1, ind_01).shape) w = torch.randn((3, 0), device=device) self.assertEqual((2, 0), w.index_select(0, ind_01).shape) ind_01_int32 = torch.tensor([0, 1], dtype=torch.int32, device=device) self.assertEqual((2, 0), w.index_select(0, ind_01_int32).shape) if device == 'cpu': w = torch.randn((0, 3), device=device) with self.assertRaisesRegex(RuntimeError, "self indexing axis dim should be positive"): torch.index_select(w, 0, ind_01) ind_05 = torch.tensor([0, 5], dtype=torch.int64, device=device) with self.assertRaisesRegex(RuntimeError, "INDICES element is out of DATA bounds"): torch.index_select(w, 1, ind_05) # FIXME: find a test suite for the pdist operator def _brute_pdist(self, inp, p=2): """Computes the same as torch.pdist using primitives""" n = inp.shape[-2] k = n * (n - 1) // 2 if k == 0: # torch complains about empty indices return torch.empty(inp.shape[:-2] + (0,), dtype=inp.dtype, device=inp.device) square = torch.norm(inp[..., None, :] - inp[..., None, :, :], p=p, dim=-1) unroll = square.view(square.shape[:-2] + (n * n,)) inds = torch.ones(k, dtype=torch.int) inds[torch.arange(n - 1, 1, -1, dtype=torch.int).cumsum(0)] += torch.arange(2, n, dtype=torch.int) return unroll[..., inds.cumsum(0)] # FIXME: find a test suite for the pdist operator def _pdist_single(self, shape, device, p, dtype, trans, grad_check=False): x = torch.randn(shape, dtype=dtype, device=device) if trans: x.transpose_(-2, -1) if grad_check: x.requires_grad_() y = x.detach().clone().requires_grad_() else: y = x actual = torch.pdist(x, p=p) expected = self._brute_pdist(y, p=p) self.assertEqual(expected.shape, actual.shape) self.assertEqual(expected, actual) if grad_check and expected.size() != torch.Size([0]): g0 = torch.rand_like(actual) actual.backward(g0) expected.backward(g0) self.assertEqual(x.grad, y.grad) # FIXME: find a test suite for the pdist operator @slowTest def test_pdist_norm_forward(self, device): for shape in [(4, 5), (3, 2), (2, 1), (1500, 1)]: for p in [0, 1, 2, 3, 1.5, 2.5, float('inf')]: for trans in [False, True]: for dtype in [torch.float32, torch.float64]: self._pdist_single(shape, device, p, dtype, trans, grad_check=False) # do a simplified comparison with big inputs, see: # https://github.com/pytorch/pytorch/issues/15511 for dtype in [torch.float32, torch.float64]: self._pdist_single((1000, 2), device, 2, dtype, trans=False, grad_check=False) # FIXME: find a test suite for the pdist operator @slowTest def test_pdist_norm_backward(self, device): for shape in [(4, 5), (3, 2), (2, 1), (1500, 1)]: for p in [0, 1, 2, 3, 1.5, 2.5, float('inf')]: for trans in [False, True]: self._pdist_single(shape, device, p, torch.float64, trans, grad_check=True) # FIXME: find a test suite for the pdist operator @unittest.skipIf(IS_FBCODE and IS_REMOTE_GPU, "sandcastle OOM with current tpx gpu/re configuration") @skipIfRocm @onlyCUDA @largeTensorTest('10GB', device='cpu') @largeTensorTest('5GB', device='cuda') def test_pdist_norm_large(self, device): # use dim0>=46342 for forward, see: # https://github.com/pytorch/pytorch/issues/30583 # Compare output using GPU with the CPU implementation, as brute_pdist uses too much memory x = torch.randn(50000, 1, dtype=torch.float32) # 50k * 4 bytes = 200 KB # Will require 1249975000 float32s expected_cpu = torch.pdist(x, p=2) # ~1250M * 4 bytes = 5 GB on CPU actual_gpu = torch.pdist(x.to(device), p=2) # 5 GB on GPU self.assertEqual(expected_cpu, actual_gpu.cpu()) # Another 5 GB on CPU # FIXME: move to elementwise ternary test suite @onlyNativeDeviceTypes @dtypesIfCUDA(*set(get_all_math_dtypes('cuda'))) @dtypes(*set(get_all_math_dtypes('cpu'))) def test_addcdiv(self, device, dtype): # Returns floating or integral scalar corresponding to dtype def _number(floating, integer, dtype): if dtype in [torch.half, torch.float, torch.double, torch.bfloat16]: return floating elif dtype in [torch.cfloat, torch.cdouble]: return floating * (1 + 1j) else: return integer def non_zero_rand(size, dtype, device): if dtype.is_floating_point or dtype.is_complex: a = torch.rand(size=size, dtype=dtype, device=device) elif dtype == torch.uint8: a = torch.randint(1, 5, size=size, dtype=dtype, device=device) else: a = torch.randint(-5, 5, size=size, dtype=dtype, device=device) return a + (a == 0).to(dtype) def _test_addcdiv(): a = non_zero_rand((2, 2), dtype=dtype, device=device) b = non_zero_rand((2, 2), dtype=dtype, device=device) c = non_zero_rand((2, 2), dtype=dtype, device=device) alpha = _number(0.5, 3, dtype) expected = a + (alpha * b) / c actual = torch.addcdiv(a, b, c, value=alpha) self.assertEqual(expected, actual) with self.assertWarnsOnceRegex( UserWarning, "This overload of addcdiv is deprecated"): self.assertEqual(actual, torch.addcdiv(a, alpha, b, c)) if not (dtype.is_floating_point or dtype.is_complex): # Integer division with addcdiv is prohibited with self.assertRaises(RuntimeError): _test_addcdiv() else: _test_addcdiv() if self.device_type == 'cuda' and dtype == torch.half: a = torch.tensor([60000.0], device=device, dtype=dtype) b = torch.tensor([60000.0], device=device, dtype=dtype) c = torch.tensor([1.0], device=device, dtype=dtype) out = torch.addcmul(a, b, c, value=-2) self.assertTrue(not (out.isnan() or out.isinf())) def test_nullary_op_mem_overlap(self, device): ops = ( ("random_", ()), ("uniform_", ()), ("cauchy_", ()), ("log_normal_", ()), ("exponential_", ()), ("geometric_", (0.5,)), ("normal_", ()), ) x = torch.rand((1, 3)).expand((3, 3)) for op, args in ops: with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): getattr(x, op)(*args) # FIXME: move to an elementwise ternary test suite and make this an OpInfo test @dtypes(torch.double) def test_ternary_op_mem_overlap(self, device, dtype): ops = [ ("addcmul", True, True, 'cpu'), ("addcmul", True, True, 'cuda'), ("addcdiv", True, True, 'cpu'), ("addcdiv", True, True, 'cuda'), ("lerp", True, True, 'cpu'), ("lerp", True, True, 'cuda') ] for (fn, has_input_output_mem_overlap_check, has_internal_mem_overlap_check, dev) in ops: if dev != device: continue out_op = getattr(torch, fn) inplace_op = getattr(torch.Tensor, fn + '_') self.check_internal_mem_overlap( inplace_op, 3, dtype, device, expected_failure=not has_internal_mem_overlap_check) self.ternary_check_input_output_mem_overlap(out_op, dev, expected_failure=not has_input_output_mem_overlap_check) @expectedFailureMeta # RuntimeError not raised @dtypes(torch.double) @onlyNativeDeviceTypes def test_copy_mem_overlap(self, device, dtype): self.check_internal_mem_overlap( torch.Tensor.copy_, num_inputs=2, dtype=dtype, device=device) sz = 9 doubles = torch.randn(2 * sz, dtype=dtype, device=device) self.unary_check_input_output_mem_overlap( doubles, sz, lambda input, out: out.copy_(input)) # FIXME: convert to ErrorInputs @onlyNativeDeviceTypes def test_index_add_mem_overlap(self, device): x = torch.rand((1,), device=device).expand((6,)) y = torch.rand((6,), device=device) ind = torch.tensor([2, 1, 0], device=device) value = torch.rand((3,), device=device) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): x.index_add_(0, ind, value) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): y.index_add_(0, ind, y[:3]) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): ind.index_add_(0, ind, ind.clone()) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): ind.index_add_(0, ind.clone(), ind) # FIXME: convert to ErrorInputs @onlyNativeDeviceTypes def test_index_copy_mem_overlap(self, device): x = torch.rand((1,), device=device).expand((6,)) y = torch.rand((6,), device=device) ind = torch.tensor([2, 1, 0], device=device) value = torch.rand((3,), device=device) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): x.index_copy_(0, ind, value) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): y.index_copy_(0, ind, y[:3]) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): ind.index_copy_(0, ind, ind.clone()) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): ind.index_copy_(0, ind.clone(), ind) # FIXME: convert to ErrorInputs @expectedFailureMeta # Warning not triggered @onlyNativeDeviceTypes def test_index_fill_mem_overlap(self, device): x = torch.rand((1,), device=device).expand((6,)) y = torch.rand((6,), device=device) ind = torch.tensor([2, 1, 0], device=device) value = torch.rand((3,), device=device) with self.assertWarnsRegex(UserWarning, "index_fill_ on expanded tensors"): x.index_fill_(0, ind, 1.0) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): ind.index_fill_(0, ind, 0) # FIXME: convert to ErrorInputs @expectedFailureMeta # RuntimeError not raised @onlyNativeDeviceTypes def test_shift_mem_overlap(self, device): x = torch.rand(3, device=device) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): x[:-1] <<= x[1:] with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): x[:-1] >>= x[1:] # FIXME: convert to ErrorInputs @expectedFailureMeta # RuntimeError not raised @onlyNativeDeviceTypes def test_bernoulli_mem_overlap(self, device): x = torch.rand((1,), device=device).expand((6,)) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): x.bernoulli_() with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): x.bernoulli_(p=0.1) p = torch.rand(6, device=device) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): x.bernoulli_(p=p) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): torch.bernoulli(torch.rand_like(x), out=x) # FIXME: convert to ErrorInputs @expectedFailureMeta # RuntimeError not raised @onlyNativeDeviceTypes def test_put_mem_overlap(self, device): x = torch.rand((1,), device=device).expand((6,)) y = torch.rand((6,), device=device) ind = torch.tensor([2, 1, 0], device=device) value = torch.rand((3,), device=device) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): x.put_(ind, value) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): y.put_(ind[0], y[0]) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): ind.put_(ind, ind) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): y.put_(ind, y[:3]) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): ind.put_(ind, ind.clone()) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): ind.put_(ind.clone(), ind) # FIXME: convert to ErrorInputs @expectedFailureMeta # UserWarning not triggered @onlyNativeDeviceTypes def test_index_put_mem_overlap(self, device): x = torch.rand((1,), device=device).expand((6,)) y = torch.rand((6,), device=device) ind = torch.tensor([2, 1, 0], device=device) value = torch.rand((3,), device=device) with self.assertWarnsRegex(UserWarning, 'expanded tensors'): x.index_put_((ind,), value) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): y.index_put_((ind,), y[0]) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): ind.index_put_((ind,), ind) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): y.index_put_((ind,), y[:3]) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): ind.index_put_((ind,), ind.clone()) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): ind.index_put_((ind.clone(),), ind) # FIXME: convert to ErrorInputs @expectedFailureMeta # UserWarning not triggered @onlyNativeDeviceTypes def test_masked_fill_mem_overlap(self, device): x = torch.rand((1,), device=device).expand((6,)) mask = torch.tensor([True, False, True, True, False, False], device=device) with self.assertWarnsRegex(UserWarning, 'expanded tensors'): x.masked_fill_(mask, 0.) fill_val = torch.tensor(0., device=device) with self.assertWarnsRegex(UserWarning, 'expanded tensors'): x.masked_fill_(mask, fill_val) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): mask[1:].masked_fill_(mask[:-1], False) # FIXME: convert to ErrorInputs @expectedFailureMeta # RuntimeError not raised @onlyNativeDeviceTypes def test_masked_scatter_mem_overlap(self, device): x = torch.rand((1,), device=device).expand((6,)) src = torch.rand((3,), device=device) mask = torch.tensor([True, False, True, True, False, False], device=device) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): x.masked_scatter_(mask, src) # FIXME: convert to ErrorInputs @onlyNativeDeviceTypes def test_scatter_mem_overlap(self, device): x = torch.rand((1,), device=device).expand((6,)) src = torch.rand((3,), device=device) ind = torch.tensor([2, 1, 0], device=device, dtype=torch.int64) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): x.scatter_(0, ind, src) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): src.scatter_(0, ind, src) with self.assertRaisesRegex(RuntimeError, 'unsupported operation'): ind.scatter_(0, ind, ind.clone()) # FIXME: move to test distributions @onlyCUDA def test_multinomial_device_constrain(self, device): x = torch.empty(0, device="cpu") y = torch.empty(0, device=device) self.assertRaisesRegex( RuntimeError, "Expected all tensors to be on the same device", lambda: torch.multinomial(x, 2, out=y)) # FIXME: move to test distributions @deviceCountAtLeast(2) @onlyCUDA def test_multinomial_gpu_device_constrain(self, devices): x = torch.empty(0, device=devices[0]) y = torch.empty(0, device=devices[1]) self.assertRaisesRegex( RuntimeError, "Expected all tensors to be on the same device", lambda: torch.multinomial(x, 2, out=y)) # FIXME: convert this to an automated OpInfo test @deviceCountAtLeast(2) @onlyCUDA def test_device_guard(self, devices): # verify that all operators with `device_guard: False` behave properly with multiple devices. # TODO: if we had operator introspection we could figure out this set of operators automatically... x = torch.randn((1, 2, 3), device=devices[1]) y = torch.zeros((1, 3, 2), device=devices[1]) scalar = torch.tensor(5, device=devices[1]) # property ops torch.cudnn_is_acceptable(x) x.is_distributed() x.is_floating_point() x.is_complex() x.is_same_size(y) x.is_signed() x.size(0) x.stride(0) x.numel() x.is_set_to(y) x.data_ptr() scalar.is_nonzero() # sparse property ops y[0][1] = 5 y_sparse = y.to_sparse() y_sparse.sparse_dim() y_sparse._dimI() y_sparse.dense_dim() y_sparse._dimV() y_sparse._nnz() y_sparse.is_coalesced() y_sparse._indices() y_sparse._values() y_sparse.indices() y_sparse.values() # in-place ops def inplace(): return torch.randn((1, 2, 3), device=devices[1]) inplace().as_strided_(y.size(), y.stride()) inplace().resize_(y.size()) inplace().squeeze_() inplace().squeeze_(0) inplace().unsqueeze_(2) inplace().transpose_(1, 2) inplace().squeeze_().t_() inplace().set_(x.storage()) inplace().set_(x.storage(), x.storage_offset(), x.size(), x.stride()) inplace().set_(x) inplace().set_() y_sparse._coalesced_(True) # shape modification x.as_strided(y.size(), y.stride()) x.expand((5, 2, 3)) x.expand_as(x) x.sum_to_size((1,)) torch.broadcast_tensors(x , x) x.reshape((1, 3, 2)) x.reshape_as(y) x.squeeze() x.squeeze(0) x.squeeze().t() x.transpose(1, 2) x.unsqueeze(2) x.view((1, 3, 2)) x.view_as(y) # chunk, split, etc. x.chunk(2, dim=1) x.split(1, dim=2) x.split_with_sizes([1, 2], dim=2) x.unfold(dimension=2, size=1, step=1) x.narrow(1, 1, 1) x.select(1, 1) torch.isnan(x) torch.empty((1, 3, 2), out=y) torch.empty_like(x) torch.empty_like(x, dtype=torch.int64) # to x.to(x) x.to(y) x.to(x, copy=True) def test_is_signed(self, device): self.assertEqual(torch.IntTensor(5).to(device).is_signed(), True) self.assertEqual(torch.ByteTensor(5).to(device).is_signed(), False) self.assertEqual(torch.CharTensor(5).to(device).is_signed(), True) self.assertEqual(torch.FloatTensor(5).to(device).is_signed(), True) self.assertEqual(torch.HalfTensor(10).to(device).is_signed(), True) # Note - reports a leak of 512 bytes on CUDA device 1 @deviceCountAtLeast(2) @skipCUDAMemoryLeakCheckIf(True) @onlyCUDA def test_tensor_set_errors_multigpu(self, devices): f_cuda0 = torch.randn((2, 3), dtype=torch.float32, device=devices[0]) f_cuda1 = torch.randn((2, 3), dtype=torch.float32, device=devices[1]) self.assertRaises(RuntimeError, lambda: f_cuda0.set_(f_cuda1.storage())) self.assertRaises(RuntimeError, lambda: f_cuda0.set_(f_cuda1.storage(), 0, f_cuda1.size(), f_cuda1.stride())) self.assertRaises(RuntimeError, lambda: f_cuda0.set_(f_cuda1)) # FIXME: move to test_serialization @onlyCUDA @deviceCountAtLeast(1) # Note: Tests works with one but prefers more devices def test_serialization(self, devices): def _test_serialization(filecontext_lambda): t0 = torch.cuda.FloatTensor(5).fill_(1) with torch.cuda.device(devices[-1]): tn = torch.cuda.FloatTensor(3).fill_(2) torch.cuda.set_device(devices[0]) b = (t0, tn) with filecontext_lambda() as f: torch.save(b, f) f.seek(0) c = torch.load(f) self.assertEqual(b, c, atol=0, rtol=0) u0, un = c self.assertEqual(str(u0.device), devices[0]) self.assertEqual(str(un.device), devices[-1]) _test_serialization(tempfile.NamedTemporaryFile) _test_serialization(BytesIOContext) # FIXME: move memory format tests to their own test class/suite def test_memory_format_preserved_after_permute(self, device): x = torch.randn(4, 3, 8, 8, device=device) nhwc = x.contiguous(memory_format=torch.channels_last) y = nhwc.permute(0, 1, 3, 2).permute(0, 1, 3, 2) self.assertTrue(y.is_contiguous(memory_format=torch.channels_last)) x = torch.randn(4, 3, 8, 8, 8, device=device) ndhwc = x.contiguous(memory_format=torch.channels_last_3d) y = ndhwc.permute(0, 1, 4, 3, 2).permute(0, 1, 4, 3, 2) self.assertTrue(y.is_contiguous(memory_format=torch.channels_last_3d)) def test_memory_format_propagation_rules(self, device): contiguous = torch.rand(10, 3, 5, 5, device=device) cl = torch.rand(10, 3, 5, 5, device=device).contiguous(memory_format=torch.channels_last) ambiguous = torch.rand(10, 3, 1, 1, device=device).contiguous(memory_format=torch.channels_last) self.assertTrue(ambiguous.is_contiguous(memory_format=torch.channels_last)) self.assertTrue(ambiguous.is_contiguous(memory_format=torch.contiguous_format)) bias = torch.rand(1, 1, 1, 1, device=device).contiguous(memory_format=torch.channels_last) def _test_propagation_rules(self, contiguous, cl, ambiguous, bias): options = ((ambiguous, contiguous, torch.contiguous_format), (ambiguous, cl, torch.channels_last), (contiguous, ambiguous, torch.contiguous_format), (contiguous, cl, torch.contiguous_format), (cl, ambiguous, torch.channels_last), (cl, contiguous, torch.channels_last), (bias, cl, torch.channels_last), (cl, bias, torch.channels_last),) for a, b, mf in options: result = a + b self.assertTrue(result.is_contiguous(memory_format=mf)) _test_propagation_rules(self, contiguous, cl, ambiguous, bias) cl = cl.to(memory_format=torch.channels_last) ambiguous = ambiguous.to(memory_format=torch.channels_last) bias = bias.to(memory_format=torch.channels_last) _test_propagation_rules(self, contiguous, cl, ambiguous, bias) # test cases when strides matter in ambiguous tensors for mf in (torch.channels_last, torch.contiguous_format): ambiguous = torch.rand(10, 3, 1, 1, device=device).to(memory_format=mf) bias = torch.rand(3, 1, 1, device=device) result = ambiguous + bias self.assertEqual(ambiguous.stride(), result.stride()) result = bias + ambiguous self.assertEqual(ambiguous.stride(), result.stride()) result = ambiguous * 5 self.assertEqual(ambiguous.stride(), result.stride()) @skipIfMps def test_memory_format_empty_like(self, device): def test_helper(x, memory_format): xc = x.contiguous(memory_format=memory_format) like = torch.empty_like(xc, memory_format=torch.preserve_format) self.assertFalse(like.is_contiguous()) self.assertTrue(like.is_contiguous(memory_format=memory_format)) like_x = torch.empty_like(x, memory_format=torch.preserve_format) self.assertTrue(like_x.is_contiguous()) self.assertFalse(like_x.is_contiguous(memory_format=memory_format)) like = torch.empty_like(x, memory_format=memory_format) self.assertFalse(like.is_contiguous()) self.assertTrue(like.is_contiguous(memory_format=memory_format)) like = torch.empty_like(xc, memory_format=torch.contiguous_format) self.assertTrue(like.is_contiguous()) self.assertFalse(like.is_contiguous(memory_format=memory_format)) like = torch.empty_like(xc) self.assertFalse(like.is_contiguous()) self.assertTrue(like.is_contiguous(memory_format=memory_format)) sparse = x.to_sparse() with self.assertRaises(RuntimeError): z = torch.empty_like(sparse, memory_format=torch.preserve_format) test_helper(torch.randn(4, 3, 8, 8, device=device), torch.channels_last) test_helper(torch.randn(4, 3, 8, 8, 8, device=device), torch.channels_last_3d) def test_memory_format_consistency(self, device): x = torch.randn(10, 3, 1, 1, device=device) x_rep = x.as_strided(x.size(), x.stride()) self.assertEqual(x.size(), x_rep.size()) self.assertEqual(x.stride(), x_rep.stride()) self.assertEqual(x.is_contiguous(), x_rep.is_contiguous()) self.assertEqual(x.is_contiguous(memory_format=torch.channels_last), x_rep.is_contiguous(memory_format=torch.channels_last)) self.assertEqual( x.is_contiguous(memory_format=torch.channels_last_3d), x_rep.is_contiguous(memory_format=torch.channels_last_3d)) # FIXME: make this a elementwise unary and elementwise binary OpInfo test def test_memory_format_operators(self, device): def _chunk_op(x, y): x1, x2 = x.chunk(2, dim=1) return x1 + x2 def _unsqueeze_op_add(x, y): return x[0].unsqueeze(0) + 3 def _unsqueeze_op_clone(x, y): return x[0].unsqueeze(0).clone() def _test_helper(x, y, bias, memory_format): return_contig_fns = [ lambda x, y: y + x, lambda x, y: y * x, lambda x, y: y.addcdiv(x, y, value=2), lambda x, y: y.addcmul(x, y, value=2), ] bias_fns = [ lambda x, b: x + b, lambda x, b: b + x, ] fns = [ lambda x, y: x.clone(), lambda x, y: x + 3, lambda x, y: 3 * x, lambda x, y: x + y, lambda x, y: x * y, lambda x, y: abs(x), lambda x, y: x.abs(), lambda x, y: x.abs_(), lambda x, y: x.acos(), lambda x, y: x.acos_(), lambda x, y: x.add(y, alpha=3), lambda x, y: x.add_(y, alpha=3), lambda x, y: x.addcdiv(y, y, value=2), lambda x, y: x.addcdiv_(y, y, value=2), lambda x, y: x.addcmul(y, y, value=2), lambda x, y: x.addcmul_(y, y, value=2), lambda x, y: x.acosh(), lambda x, y: x.acosh_(), lambda x, y: x.asinh(), lambda x, y: x.asinh_(), lambda x, y: x.atanh(), lambda x, y: x.atanh_(), lambda x, y: x.asin(), lambda x, y: x.asin_(), lambda x, y: x.atan(), lambda x, y: x.atan2(y), lambda x, y: x.atan2_(y), lambda x, y: x.ceil(), lambda x, y: x.ceil_(), lambda x, y: x.clamp(-1, 1), lambda x, y: x.cos(), lambda x, y: x.cosh(), lambda x, y: x.div(0.5), lambda x, y: x.div_(0.5), lambda x, y: x.div(y), lambda x, y: x.div_(y), lambda x, y: x.digamma(), lambda x, y: x.digamma_(), lambda x, y: x.erf(), lambda x, y: x.erfc(), lambda x, y: x.erfinv(), lambda x, y: x.erfinv_(), lambda x, y: x.exp(), lambda x, y: x.expm1(), lambda x, y: x.expm1_(), lambda x, y: x.floor(), lambda x, y: x.floor_(), lambda x, y: x.fmod(2), lambda x, y: x.frac(), lambda x, y: x.hypot(y), lambda x, y: x.hypot_(y), lambda x, y: x.i0(), lambda x, y: x.i0_(), lambda x, y: x.lerp(y, 0.5), lambda x, y: x.log(), lambda x, y: x.log_(), lambda x, y: x.log10(), lambda x, y: x.log10_(), lambda x, y: x.log1p(), lambda x, y: x.log1p_(), lambda x, y: x.log2(), lambda x, y: x.log2_(), lambda x, y: x.mul(3), lambda x, y: x.mul_(3), lambda x, y: x.neg(), lambda x, y: x.neg_(), lambda x, y: x.pow(3), lambda x, y: x.pow_(3), lambda x, y: x.pow(0.0), lambda x, y: x.pow(1.0), lambda x, y: x.reciprocal(), lambda x, y: x.remainder(2), lambda x, y: x.round(), lambda x, y: x.round_(), lambda x, y: x.rsqrt(), lambda x, y: x.rsqrt_(), lambda x, y: x.sigmoid(), lambda x, y: x.sigmoid_(), lambda x, y: x.logit(), lambda x, y: x.logit_(), lambda x, y: x.logit(1e-6), lambda x, y: x.logit_(1e-6), lambda x, y: x.sign(), lambda x, y: x.sign_(), lambda x, y: x.sgn(), lambda x, y: x.sgn_(), lambda x, y: x.sin(), lambda x, y: x.sin_(), lambda x, y: x.sinh(), lambda x, y: x.sinh_(), lambda x, y: x.sqrt(), lambda x, y: x.sqrt_(), lambda x, y: x.tan(), lambda x, y: x.tanh(), lambda x, y: x.trunc(), lambda x, y: x.trunc_(), _chunk_op, _unsqueeze_op_add, _unsqueeze_op_clone, ] for fn in fns: x_c = x.contiguous() y_c = y.contiguous() result_c = fn(x_c, y_c) result = fn(x, y) self.assertEqual(result, result_c) self.assertTrue( result.is_contiguous(memory_format=memory_format), "result of the '{}' is not in '{}' format".format(inspect.getsource(fn).strip(), memory_format)) for fn in bias_fns: x_c = x.contiguous() b_c = bias.contiguous() result_c = fn(x_c, b_c) result = fn(x, bias) self.assertEqual(result, result_c) self.assertTrue( result.is_contiguous(memory_format=memory_format), "result of the '{}' is not in '{}' format".format(inspect.getsource(fn).strip(), memory_format)) for fn in return_contig_fns: x_c = x.contiguous() y_c = y.contiguous() result_c = fn(x_c, y_c) result = fn(x, y) self.assertEqual(result, result_c) self.assertTrue( result.is_contiguous(memory_format=torch.contiguous_format), "result of the '{}' is not in '{}' format".format(inspect.getsource(fn).strip(), torch.contiguous_format)) _test_helper( torch.randn((4, 3, 8, 8), device=device).contiguous(memory_format=torch.channels_last), abs(torch.randn((4, 3, 8, 8), device=device)) + 1, torch.randn((1, 3, 1, 1), device=device).contiguous(memory_format=torch.channels_last), torch.channels_last) _test_helper( torch.randn((4, 3, 8, 8, 8), device=device).contiguous(memory_format=torch.channels_last_3d), abs(torch.randn((4, 3, 8, 8, 8), device=device)) + 1, torch.randn((1, 3, 1, 1, 1), device=device).contiguous(memory_format=torch.channels_last_3d), torch.channels_last_3d) # FIXME: make this a elementwise unary and elementwise binary OpInfo test def test_strides_propagation(self, device): def _test_helper(x, op, unary=False): def compare_strides(s1, s2, div): sdiv = [s // div for s in s1] self.assertEqual(sdiv, s2) dim = x.dim() # we produce memory dense outputs, so when input is strided on the last dimension # we need to divide by that dimension stride to compare input and result strides div = x.stride(-1) for p in permutations(range(dim)): xp = x.permute(p) if not unary: y = torch.randn(xp.size(-1), device=x.device, dtype=x.dtype) for inputs in ((xp, xp), (xp, y), (y, xp)): res = op(*inputs) compare_strides(xp.stride(), res.stride(), div) self.assertEqual(xp.size(), res.size()) out = torch.empty(0, device=xp.device, dtype=res.dtype) res = op(*inputs, out=out) compare_strides(xp.stride(), res.stride(), div) self.assertEqual(xp.size(), res.size()) else: res = op(xp) compare_strides(xp.stride(), res.stride(), div) self.assertEqual(xp.size(), res.size()) out = torch.empty(0, device=xp.device, dtype=res.dtype) res = op(xp, out=out) compare_strides(xp.stride(), res.stride(), div) self.assertEqual(xp.size(), res.size()) # torch.eq by default calls TensorIterator with defined output, torch.add with undefined binary_ops = (torch.eq, torch.add) unary_ops = (torch.exp,) # memory dense, sliced and ambiguous sliced (ambiguous dense loses permutation information) xs = (torch.randn(2, 3, 4, device=device), torch.randn(2, 3, 8, device=device)[:, :, ::2], torch.randn(1, 1, 4, 12, device=device)[:, :, :, ::2]) for op in binary_ops: for x in xs: _test_helper(x, op) for op in unary_ops: for x in xs: _test_helper(x, op, unary=True) # FIXME: move dlpack tests to their own test class/suite @skipMeta @onlyNativeDeviceTypes @dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16)) def test_dlpack_capsule_conversion(self, device, dtype): # DLpack does not explicitly support bool (xref dmlc/dlpack#75) x = make_tensor((5,), dtype=dtype, device=device) z = from_dlpack(to_dlpack(x)) self.assertEqual(z, x) @skipMeta @onlyNativeDeviceTypes @dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16)) def test_dlpack_protocol_conversion(self, device, dtype): x = make_tensor((5,), dtype=dtype, device=device) z = from_dlpack(x) self.assertEqual(z, x) @skipMeta @onlyNativeDeviceTypes def test_dlpack_shared_storage(self, device): x = make_tensor((5,), dtype=torch.float64, device=device) z = from_dlpack(to_dlpack(x)) z[0] = z[0] + 20.0 self.assertEqual(z, x) @skipMeta @onlyCUDA @dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16)) def test_dlpack_conversion_with_streams(self, device, dtype): # Create a stream where the tensor will reside stream = torch.cuda.Stream() with torch.cuda.stream(stream): # Do an operation in the actual stream x = make_tensor((5,), dtype=dtype, device=device) + 1 # DLPack protocol helps establish a correct stream order # (hence data dependency) at the exchange boundary. # DLPack manages this synchronization for us, so we don't need to # explicitly wait until x is populated stream = torch.cuda.Stream() with torch.cuda.stream(stream): z = from_dlpack(x) stream.synchronize() self.assertEqual(z, x) @skipMeta @onlyNativeDeviceTypes @dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16)) def test_from_dlpack(self, device, dtype): x = make_tensor((5,), dtype=dtype, device=device) y = torch.from_dlpack(x) self.assertEqual(x, y) @skipMeta @onlyNativeDeviceTypes @dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16)) def test_from_dlpack_noncontinguous(self, device, dtype): x = make_tensor((25,), dtype=dtype, device=device).reshape(5, 5) y1 = x[0] y1_dl = torch.from_dlpack(y1) self.assertEqual(y1, y1_dl) y2 = x[:, 0] y2_dl = torch.from_dlpack(y2) self.assertEqual(y2, y2_dl) y3 = x[1, :] y3_dl = torch.from_dlpack(y3) self.assertEqual(y3, y3_dl) y4 = x[1] y4_dl = torch.from_dlpack(y4) self.assertEqual(y4, y4_dl) y5 = x.t() y5_dl = torch.from_dlpack(y5) self.assertEqual(y5, y5_dl) @skipMeta @onlyCUDA @dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16)) def test_dlpack_conversion_with_diff_streams(self, device, dtype): stream_a = torch.cuda.Stream() stream_b = torch.cuda.Stream() # DLPack protocol helps establish a correct stream order # (hence data dependency) at the exchange boundary. # the `tensor.__dlpack__` method will insert a synchronization event # in the current stream to make sure that it was correctly populated. with torch.cuda.stream(stream_a): x = make_tensor((5,), dtype=dtype, device=device) + 1 z = torch.from_dlpack(x.__dlpack__(stream_b.cuda_stream)) stream_a.synchronize() stream_b.synchronize() self.assertEqual(z, x) @skipMeta @onlyNativeDeviceTypes @dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16)) def test_from_dlpack_dtype(self, device, dtype): x = make_tensor((5,), dtype=dtype, device=device) y = torch.from_dlpack(x) assert x.dtype == y.dtype @skipMeta @onlyCUDA def test_dlpack_default_stream(self, device): class DLPackTensor: def __init__(self, tensor): self.tensor = tensor def __dlpack_device__(self): return self.tensor.__dlpack_device__() def __dlpack__(self, stream=None): if torch.version.hip is None: assert stream == 1 else: assert stream == 0 capsule = self.tensor.__dlpack__(stream) converted = True return capsule # CUDA-based tests runs on non-default streams with torch.cuda.stream(torch.cuda.default_stream()): x = DLPackTensor(make_tensor((5,), dtype=torch.float32, device=device)) from_dlpack(x) @skipMeta @onlyNativeDeviceTypes @dtypes(*all_types_and_complex_and(torch.half, torch.bfloat16)) def test_dlpack_tensor_invalid_stream(self, device, dtype): with self.assertRaises(TypeError): x = make_tensor((5,), dtype=dtype, device=device) x.__dlpack__(stream=object()) @skipMeta def test_dlpack_error_on_bool_tensor(self): x = torch.tensor([True], dtype=torch.bool) with self.assertRaises(RuntimeError): to_dlpack(x) # TODO: increase tests once NumPy supports the `__dlpack__` protocol @skipMeta def test_dlpack_export_requires_grad(self): x = torch.zeros(10, dtype=torch.float32, requires_grad=True) with self.assertRaisesRegex(RuntimeError, r"require gradient"): x.__dlpack__() @skipMeta def test_dlpack_export_is_conj(self): x = torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]) y = torch.conj(x) with self.assertRaisesRegex(RuntimeError, r"conjugate bit"): y.__dlpack__() @skipMeta def test_dlpack_export_non_strided(self): x = torch.sparse_coo_tensor([[0]], [1], size=(1,)) y = torch.conj(x) with self.assertRaisesRegex(RuntimeError, r"strided"): y.__dlpack__() @onlyCUDA @unittest.skipIf(PYTORCH_CUDA_MEMCHECK, "is_pinned uses failure to detect pointer property") def test_pin_memory_from_constructor(self, device): def _get_like(t, **kwargs): return [ torch.rand_like(t, **kwargs), torch.randn_like(t, **kwargs), torch.empty_like(t, **kwargs), torch.full_like(t, 4, **kwargs), torch.zeros_like(t, **kwargs), torch.ones_like(t, **kwargs), ] def _get_tensors(**kwargs): return [ torch.tensor([10, 11], **kwargs), torch.randn(3, 5, **kwargs), torch.rand(3, **kwargs), # torch.randint(3, 5, **kwargs), // unsupported torch.zeros(3, **kwargs), torch.randperm(3, **kwargs), torch.empty(6, **kwargs), torch.ones(6, **kwargs), torch.eye(6, **kwargs), torch.arange(3, 5, **kwargs)] pinned_tensors = _get_tensors(pin_memory=True) + _get_like(torch.empty(5, dtype=torch.float64), pin_memory=True) for x in pinned_tensors: self.assertTrue(x.is_pinned()) tensors = _get_tensors() + _get_like(torch.empty(5, dtype=torch.float64, pin_memory=True)) for x in tensors: self.assertFalse(x.is_pinned()) @deviceCountAtLeast(1) @onlyCUDA def test_storage_all_devices(self, devices): for device in devices: t = torch.tensor((), device=device) self.assertEqual(t.dtype, t.storage().dtype) # FIXME: move to test distributions @skipIfMps @dtypesIfCUDA(torch.float, torch.double, torch.half) @dtypes(torch.float, torch.double) def test_multinomial(self, device, dtype): def make_prob_dist(shape, is_contiguous): if is_contiguous: if dtype == torch.half: return torch.zeros(shape, device=device).uniform_().to(dtype=torch.half) return torch.zeros(shape, device=device, dtype=dtype).uniform_() elif len(shape) == 1: if dtype == torch.half: return torch.zeros((shape + [5]), device=device).uniform_().to(dtype=torch.half)[:, 2] return torch.zeros((shape + [5]), device=device, dtype=dtype).uniform_()[:, 2] else: # num dim = 2 new_shape = [2, shape[1], 7, 1, shape[0], 1, 10] if dtype == torch.half: prob_dist = torch.zeros(new_shape, device=device).uniform_().to(dtype=torch.half) else: prob_dist = torch.zeros(new_shape, device=device, dtype=dtype).uniform_() prob_dist = prob_dist.transpose(1, 4) prob_dist = prob_dist[1, :, 5, 0, :, 0, 4] assert not prob_dist.is_contiguous() # sanity check return prob_dist for is_contiguous in (True, False): # with replacement n_row = 3 for n_col in range(4, 5 + 1): prob_dist = make_prob_dist([n_row, n_col], is_contiguous) # indices that shouldn't be sampled (<0 means none) zero_prob_indices = torch.LongTensor(n_row).random_(-2, n_col).tolist() for i, j in enumerate(zero_prob_indices): if j >= 0: prob_dist[i, j] = 0 n_sample = n_col * 3 sample_indices = torch.multinomial(prob_dist, n_sample, True) self.assertEqual(prob_dist.dim(), 2) self.assertEqual(sample_indices.size(1), n_sample) for i in range(n_row): zero_prob_idx = zero_prob_indices[i] if zero_prob_idx < 0: continue for j in range(n_sample): self.assertNotEqual(sample_indices[i, j], zero_prob_idx, msg="sampled an index with zero probability") # without replacement n_row = 3 for n_col in range(2, 10 + 1, 2): prob_dist = make_prob_dist([n_row, n_col], is_contiguous) # indices that shouldn't be sampled (<0 means none) zero_prob_indices = torch.LongTensor(n_row).random_(-1, n_col).tolist() for i, j in enumerate(zero_prob_indices): if j >= 0: prob_dist[i, j] = 0 n_sample = max(1, n_col - 2) sample_indices = torch.multinomial(prob_dist, n_sample, False) self.assertEqual(prob_dist.dim(), 2) self.assertEqual(sample_indices.size(1), n_sample) for i in range(n_row): row_samples = {} zero_prob_idx = zero_prob_indices[i] for j in range(n_sample): sample_idx = sample_indices[i, j] if zero_prob_idx >= 0: self.assertNotEqual(sample_idx, zero_prob_idx, msg="sampled an index with zero probability") self.assertNotIn(sample_idx, row_samples, "sampled an index twice") row_samples[sample_idx] = True # vector n_col = 4 prob_dist = make_prob_dist([n_col], is_contiguous).fill_(1) zero_prob_idx = 1 # index that shouldn't be sampled prob_dist[zero_prob_idx] = 0 n_sample = 20 sample_indices = torch.multinomial(prob_dist, n_sample, True) for sample_index in sample_indices: self.assertNotEqual(sample_index, zero_prob_idx, msg="sampled an index with zero probability") s_dim = sample_indices.dim() self.assertEqual(sample_indices.dim(), 1, msg="wrong number of dimensions") self.assertEqual(prob_dist.dim(), 1, msg="wrong number of prob_dist dimensions") self.assertEqual(sample_indices.size(0), n_sample, msg="wrong number of samples") # CUDA misalignment issue (#46702) n_row, n_col = 2, 3 prob_dist = make_prob_dist([n_row, n_col], True) n_sample = 1 sample_indices = torch.multinomial(prob_dist, n_sample, True) self.assertEqual(sample_indices.dim(), 2, msg="wrong number of dimensions") self.assertEqual(sample_indices.size(1), n_sample, msg="wrong number of samples") # FIXME: move to test distributions @onlyCUDA @dtypes(torch.float, torch.double, torch.half) def test_multinomial_deterministic(self, device, dtype): gen = torch.Generator(device=device) trials = 5 seed = 0 prob_dist = torch.rand(10000, 1000, device=device, dtype=dtype) n_sample = 1 for i in range(trials): gen.manual_seed(seed) samples_1 = torch.multinomial(prob_dist, n_sample, True, generator=gen) gen.manual_seed(seed) samples_2 = torch.multinomial(prob_dist, n_sample, True, generator=gen) self.assertEqual(samples_1, samples_2) self.assertEqual(samples_1.dim(), 2, msg="wrong number of dimensions") self.assertEqual(samples_1.size(1), n_sample, msg="wrong number of samples") # FIXME: move to test distributions @slowTest @dtypes(torch.float) def test_multinomial_rng_state_advance(self, device, dtype): corpus_size = 100000 freqs = torch.ones(corpus_size, dtype=torch.float, device=device) n_sample = 100 samples1 = torch.multinomial(freqs, n_sample, replacement=True) samples2 = torch.multinomial(freqs, n_sample, replacement=True) samples = torch.cat([samples1, samples2]) # expect no more than 1 repeating elements generated in 2 attempts # the probability of at least element being repeated is surprisingly large, 18% self.assertLessEqual(2 * n_sample - samples.unique().size(0), 2) samples1 = torch.multinomial(freqs, n_sample, replacement=False) samples2 = torch.multinomial(freqs, n_sample, replacement=False) samples = torch.cat([samples1, samples2]) # expect no more than 1 repeating elements generated in 2 attempts self.assertLessEqual(2 * n_sample - samples.unique().size(0), 1) def _test_memory_format_transformations(self, device, input_generator_fn, transformation_fn, memory_format, compare_data=True, default_is_preserve=False): assert(memory_format == torch.channels_last or memory_format == torch.channels_last_3d) # xc is a channels last tensor xc = input_generator_fn(device) # xc is not memory dense, but looks like channels last if memory_format == torch.channels_last: xc = xc[..., ::2, ::2] else: xc = xc[..., ::2, ::2, ::2] clone = transformation_fn(xc, memory_format=torch.preserve_format) self.assertFalse(clone.is_contiguous()) self.assertTrue(clone.is_contiguous(memory_format=memory_format)) self.assertFalse(xc.is_contiguous()) self.assertFalse(xc.is_contiguous(memory_format=memory_format)) if compare_data: self.assertEqual(xc, clone.to(xc)) xc = input_generator_fn(device) clone = transformation_fn(xc, memory_format=torch.contiguous_format) self.assertTrue(clone.is_contiguous()) self.assertFalse(clone.is_contiguous(memory_format=memory_format)) if compare_data: self.assertEqual(xc, clone.to(xc)) xc = input_generator_fn(device) clone = transformation_fn(xc) if default_is_preserve: self.assertFalse(clone.is_contiguous()) self.assertTrue(clone.is_contiguous(memory_format=memory_format)) else: self.assertTrue(clone.is_contiguous()) self.assertFalse(clone.is_contiguous(memory_format=memory_format)) if compare_data: self.assertEqual(xc, clone.to(xc)) x = torch.randn((3, 4, 5, 6, 7, 8, 9), device=device) for _ in range(10): permutation = list(range(len(x.shape))) random.shuffle(permutation) x = x.permute(permutation) self.assertEqual(x.stride(), transformation_fn(x, memory_format=torch.preserve_format).stride()) def test_memory_format_to(self, device): def get_generator(memory_format, shape): def input_generator_fn(device): return torch.randn(shape, device=device, dtype=torch.float32).contiguous(memory_format=memory_format) return input_generator_fn def transformation_fn(tensor, **kwargs): return tensor.to(dtype=torch.float64, **kwargs) formats_shapes = ( (torch.channels_last, (4, 3, 8, 8)), (torch.channels_last_3d, (4, 3, 8, 8, 8))) for mf, shape in formats_shapes: self._test_memory_format_transformations( device, get_generator(mf, shape), transformation_fn, mf, default_is_preserve=True) def test_memory_format_type(self, device): def get_generator(memory_format, shape): def input_generator_fn(device): return torch.randn(shape, device=device, dtype=torch.float32).contiguous(memory_format=memory_format) return input_generator_fn def transformation_fn(tensor, **kwargs): return tensor.to(torch.float64, **kwargs) formats_shapes = ( (torch.channels_last, (4, 3, 8, 8)), (torch.channels_last_3d, (4, 3, 8, 8, 8))) for mf, shape in formats_shapes: self._test_memory_format_transformations( device, get_generator(mf, shape), transformation_fn, mf, default_is_preserve=True) def test_memory_format_clone(self, device): def get_generator(memory_format, shape): def input_generator_fn(device): return torch.randn(shape, device=device, dtype=torch.float32).contiguous(memory_format=memory_format) return input_generator_fn def transformation_fn(tensor, **kwargs): return tensor.clone(**kwargs) formats_shapes = ( (torch.channels_last, (4, 3, 8, 8)), (torch.channels_last_3d, (4, 3, 8, 8, 8))) for mf, shape in formats_shapes: self._test_memory_format_transformations( device, get_generator(mf, shape), transformation_fn, mf, True, default_is_preserve=True) def test_memory_format_factory_like_functions_preserve(self, device): def get_generator(memory_format, shape): def input_generator_fn(device): return torch.randn(shape, device=device, dtype=torch.float32).contiguous(memory_format=memory_format) return input_generator_fn transformation_fns = [ lambda t, **kwargs: torch.zeros_like(t, **kwargs), lambda t, **kwargs: torch.ones_like(t, **kwargs), lambda t, **kwargs: torch.randint_like(t, 10, 100, **kwargs), lambda t, **kwargs: torch.randint_like(t, 100, **kwargs), lambda t, **kwargs: torch.randn_like(t, **kwargs), lambda t, **kwargs: torch.rand_like(t, **kwargs), lambda t, **kwargs: torch.full_like(t, 7, **kwargs), lambda t, **kwargs: torch.empty_like(t, **kwargs)] formats_shapes = ( (torch.channels_last, (4, 3, 8, 8)), (torch.channels_last_3d, (4, 3, 8, 8, 8))) for mf, shape, in formats_shapes: for transformation_fn in transformation_fns: self._test_memory_format_transformations( device, get_generator(mf, shape), transformation_fn, mf, compare_data=False, default_is_preserve=True) def test_memory_format_type_shortcuts(self, device): def get_generator(memory_format, shape, dtype): def input_generator_fn(device): return torch.randn(shape, device=device, dtype=dtype).clamp(0, 1) \ .round().contiguous(memory_format=memory_format) return input_generator_fn def get_fn(fn_name): def transformation_fn(tensor, **kwargs): fn = getattr(tensor, fn_name) return fn(**kwargs) return transformation_fn shortcuts = ['byte', 'char', 'double', 'bool', 'half', 'int', 'long', 'short'] if device == 'cpu': shortcuts += ['bfloat16'] formats_shapes = ( (torch.channels_last, (4, 3, 8, 8)), (torch.channels_last_3d, (4, 3, 8, 8, 8))) for mf, shape in formats_shapes: for fn_name in shortcuts: self._test_memory_format_transformations( device, get_generator(mf, shape, torch.float32), get_fn(fn_name), mf, default_is_preserve=True) # Test 'float' separately to avoid float->float no-op. for mf, shape in formats_shapes: self._test_memory_format_transformations( device, get_generator(mf, shape, torch.float64), get_fn('float'), mf, default_is_preserve=True) @onlyCUDA def test_memory_format_cpu_and_cuda_ops(self, device): def get_generator(memory_format, shape): def input_generator_fn(device): return torch.randn(shape, device=device, dtype=torch.float32).contiguous(memory_format=memory_format) return input_generator_fn def transformation_cpu_fn(tensor, **kwargs): return tensor.cpu(**kwargs) def transformation_cuda_fn(tensor, **kwargs): return tensor.cuda(**kwargs) formats_shapes = ( (torch.channels_last, (4, 3, 8, 8)), (torch.channels_last_3d, (4, 3, 8, 8, 8))) for mf, shape in formats_shapes: self._test_memory_format_transformations( 'cuda', get_generator(mf, shape), transformation_cpu_fn, mf, default_is_preserve=True) self._test_memory_format_transformations( 'cpu', get_generator(mf, shape), transformation_cuda_fn, mf, default_is_preserve=True) # FIXME: move to test_serialization def test_pickle_gradscaler(self, device): # This test is not in test_cuda.py because it should pass in 3 cases: # 1. cuda is not available. # 2. cuda is available but device is not cuda. # 3. cuda is available and device is cuda. # In case 1, a and b disable themselves on construction and shouldn't try to pickle workhorse attributes. # In case 2, a and b are enabled. Workhorse attributes participate in pickling, but none are lazy-inited # to cuda Tensors, because I don't want to do cuda things if device is not cuda. # In case 3, a and b are enabled and we may also try lazy-initing _scale to a cuda tensor. device = torch.device(device) try_lazy_inits = (True, False) if device.type == "cuda" else (False,) for lazy_init_scale in try_lazy_inits: a = torch.cuda.amp.GradScaler(init_scale=3., growth_factor=4., backoff_factor=.5, growth_interval=2) self.assertTrue(not a.is_enabled() if torch.cuda.amp.common.amp_definitely_not_available() else a.is_enabled()) if lazy_init_scale: # Dummy a.scale() call lazy-inits a._scale Tensor. a.scale(torch.tensor([4.0], dtype=torch.float32, device=device)) self.assertTrue(isinstance(a._scale, torch.cuda.FloatTensor)) # The following three lines should work whether or not cuda is available. serialized = pickle.dumps(a) b = pickle.loads(serialized) self.assertEqual(b.is_enabled(), a.is_enabled()) if a.is_enabled(): self.assertEqual(b.get_scale(), 3.) self.assertEqual(b.get_growth_factor(), 4.) self.assertEqual(b.get_backoff_factor(), .5) self.assertEqual(b.get_growth_interval(), 2) self.assertEqual(b._init_growth_tracker, 0) # supplies a dummy key to test the defaultdict's default_factory self.assertEqual(b._per_optimizer_states["fdsa"], torch.cuda.amp.grad_scaler._refresh_per_optimizer_state()) if lazy_init_scale: self.assertEqual(b.scale(torch.tensor([4.0], dtype=torch.float32, device=device)), 12.0) # FIXME: convert to ErrorInputs @skipIfMps def test_multinomial_invalid(self, device): def test(probs): with self.assertRaisesRegex(RuntimeError, 'probability tensor contains either `inf`, `nan` or element < 0'): torch.multinomial(probs.to(device), 2) torch.cuda.synchronize() test(torch.tensor([1., -1., 1.])) test(torch.tensor([1., inf, 1.])) test(torch.tensor([1., -inf, 1.])) test(torch.tensor([1., 1., nan])) # FIXME: convert to ErrorInputs @skipIfMps def test_multinomial_invalid_distribution(self, device): def test(probs, replacement): with self.assertRaisesRegex(RuntimeError, r"invalid multinomial distribution \(sum of probabilities <= 0\)"): torch.multinomial(probs, 2, replacement) torch.cuda.synchronize() x = torch.zeros(3, device=device) y = torch.zeros(3, 3, device=device) z = torch.zeros(3, 3, device=device) z[1, :] = 1 test(x, False) test(y, False) test(z, False) # Verify only for CPU as replacement=True # throws device side assert triggered. if self.device_type == 'cpu': test(x, True) test(y, True) test(z, True) # FIXME: move to test distributions def _test_multinomial_empty(self, device, replacement, num_samples): probs = torch.ones(0, 3, device=device) expected = torch.empty(0, num_samples, dtype=torch.int64) out = torch.multinomial(probs, num_samples=num_samples, replacement=replacement) self.assertEqual(out, expected) # FIXME: move to test distributions def test_multinomial_empty_w_replacement(self, device): self._test_multinomial_empty(device, True, 1) self._test_multinomial_empty(device, True, 2) # FIXME: move to test distributions def test_multinomial_empty_wo_replacement(self, device): self._test_multinomial_empty(device, False, 1) self._test_multinomial_empty(device, False, 2) @dtypesIfCUDA(torch.float, torch.double, torch.half) @dtypesIfCPU(torch.float, torch.double, torch.bfloat16) @dtypes(torch.float, torch.double) def test_multinomial_cpu(self, device, dtype): def make_prob_dist(shape, is_contiguous): if is_contiguous: if dtype == torch.half or dtype == torch.bfloat16: return torch.zeros(shape, device=device).uniform_().to(dtype=dtype) return torch.zeros(shape, device=device, dtype=dtype).uniform_() elif len(shape) == 1: if dtype == torch.half or dtype == torch.bfloat16: return torch.zeros((shape + [5]), device=device).uniform_().to(dtype=dtype)[:, 2] return torch.zeros((shape + [5]), device=device, dtype=dtype).uniform_()[:, 2] else: # num dim = 2 new_shape = [2, shape[1], 7, 1, shape[0], 1, 10] if dtype == torch.half or dtype == torch.bfloat16: prob_dist = torch.zeros(new_shape, device=device).uniform_().to(dtype=dtype) else: prob_dist = torch.zeros(new_shape, device=device, dtype=dtype).uniform_() prob_dist = prob_dist.transpose(1, 4) prob_dist = prob_dist[1, :, 5, 0, :, 0, 4] assert not prob_dist.is_contiguous() # sanity check return prob_dist # FIXME: move to elementwise ternary test suite # As the test fails with Runtime Error not raised on XLA @onlyNativeDeviceTypes def test_where_scalar_handcrafted_values(self, device): # Tests ScalarxScalar, ScalarxTensor and TensorxScalar # variant of `where` against NumPy version with # handcrafted values. condition_shape = (5, 5) dtypes = ( torch.bool, torch.uint8, torch.int8, torch.int16, torch.int64, torch.float16, torch.float32, torch.float64, torch.complex64, torch.complex128, ) shapes = ((), (5,), (1, 5),) with torch.no_grad(): tensors = (torch.empty(shape, dtype=dtype, device=device).fill_(17) for shape, dtype in product(shapes, dtypes)) # Use different values for `x` and `y` # as they are the output values which are compared. x_vals = (True, 3, 7.0, 1 + 0.5j) y_vals = itertools.chain((False, 4, 8.0, 2 + 0.5j), tensors) for x in x_vals: for y in y_vals: condition = torch.empty(*condition_shape, dtype=torch.bool, device=device).bernoulli_() common_dtype = torch.result_type(x, y) def check_equal(condition, x, y): condition_np = condition.cpu().numpy() x_np = x.cpu().numpy() if isinstance(x, torch.Tensor) else x y_np = y.cpu().numpy() if isinstance(y, torch.Tensor) else y # NumPy aggressively promotes to double, hence cast to output to correct dtype expected = torch.from_numpy(np.where(condition_np, x_np, y_np)).to(common_dtype) result = torch.where(condition, x, y) self.assertEqual(expected, result) check_equal(condition, x, y) check_equal(condition, y, x) def test_hook_remove(self, device): # Reference: https://github.com/pytorch/pytorch/issues/58354 def _test_helper(remove_hook): def install_hook(tensor): handle = None def hook(tensor): if remove_hook: handle.remove() return torch.zeros_like(tensor) handle = tensor.register_hook(hook) t = torch.ones((1, 5), device=device, requires_grad=True) install_hook(t) # First call to backward t.mean().backward() self.assertEqual(t.grad, torch.zeros_like(t)) # Second call to backward t.mean().backward() if remove_hook: # After removing the hook, make sure the usual gradient is returned self.assertEqual(t.grad, 0.2 * torch.ones_like(t)) else: self.assertEqual(t.grad, torch.zeros_like(t)) _test_helper(remove_hook=True) _test_helper(remove_hook=False) # FIXME: get PyTorch/XLA to run test_testing # This test should ideally be in test_testing.py, # but since pytorch/xla runs tests from test_torch.py, we have it here. @skipXLA def test_skip_xla(self, device): if self.device_type == 'xla': # Should not reach here! self.assertTrue(False) # FIXME: get PyTorch/XLA to run test_testing # This test should ideally be in test_testing.py, # but since pytorch/xla runs tests from test_torch.py, we have it here. @expectedFailureXLA def test_expected_failure_xla(self, device): if self.device_type == 'xla': self.assertTrue(False) # FIXME: get PyTorch/XLA to run test_testing # This test should ideally be in test_testing.py, # but since pytorch/xla runs tests from test_torch.py, we have it here. def test_assertRaisesRegex_ignore_msg_non_native_device(self, device): # Verify that self.assertRaisesRegex only checks the Error and ignores # message for non-native devices. x = torch.randn((10, 3), device=device) t = torch.empty(10, dtype=torch.int64, device=device).random_(0, 3) invalid_weight = torch.randn(4, device=device) msg = "weight tensor should be defined either for all 3 classes or no classes" # XLA raises RuntimeError with a different message. with self.assertRaisesRegex(RuntimeError, msg): torch.nn.functional.nll_loss(x, t, weight=invalid_weight) @dtypes(*all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.complex32)) def test_copy_(self, device, dtype): def can_cast(src_dtype, dst_dtype): # torch.can_cast(torch.int16, torch.uint8) returns True # which isn't actually safe-cast. # This function returns False in this case. def is_unsigned_int(dtype): return dtype is torch.uint8 if is_unsigned_int(dst_dtype): return is_unsigned_int(src_dtype) return torch.can_cast(src_dtype, dst_dtype) def make_tensor_wrapper(shape, dtype): if dtype is not torch.complex32: # Make tensor does not support generating # complex32 tensor return make_tensor(shape, device=device, dtype=dtype) return torch.randn(shape, device=device, dtype=dtype) t = make_tensor_wrapper((50,), dtype) src_dtypes = all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.complex32) for src_dtype in src_dtypes: src = make_tensor_wrapper((50,), dtype=src_dtype) t.copy_(src) dst = make_tensor_wrapper((50, ), dtype=src_dtype) if can_cast(src_dtype, dtype): rtol = None atol = None if dtype in (torch.half, torch.complex32): rtol = 1e-3 atol = 1e-3 if dtype in (torch.bfloat16,): rtol = 1e-2 atol = 1e-2 self.assertEqual(src, dst.copy_(t), rtol=rtol, atol=atol) @dtypes(*all_types_and_complex_and(torch.bool, torch.half, torch.bfloat16, torch.complex32)) def test_item(self, device, dtype): t = torch.ones((), device=device, dtype=dtype) self.assertEqual(1, t.item()) # Tests that compare a device's computation with the (gold-standard) CPU's. class TestDevicePrecision(TestCase): exact_dtype = True # FIXME: move to indexing test suite @onlyCUDA def test_index_add_bfloat16(self, device): inp_tensor = torch.randn(5, 3, device='cpu').bfloat16() t = torch.tensor([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=torch.bfloat16, device='cpu') index = torch.tensor([0, 4, 2], device='cpu') out_cpu = inp_tensor.index_add(0, index, t) inp_tensor = inp_tensor.to(device=device) t = t.to(device=device) index = index.to(device=device) out_gpu = inp_tensor.index_add(0, index, t) self.assertEqual(out_cpu, out_gpu, atol=1e-2, rtol=0) # FIXME: move to serialization test suite def test_device_serialization(self, device): x = torch.randn(4, 4, device=device) with tempfile.NamedTemporaryFile() as f: torch.save(x, f) f.seek(0) x_copy = torch.load(f) self.assertEqual(x_copy, x) self.assertIs(type(x_copy), type(x)) self.assertEqual(x_copy.device, x.device) # FIXME: move to serialization test suite @deviceCountAtLeast(2) def test_multidevice_serialization(self, devices): x = [torch.randn(4, 4, device=devices[0]), torch.randn(4, 4, device=devices[1])] with tempfile.NamedTemporaryFile() as f: torch.save(x, f) f.seek(0) x_copy = torch.load(f) for original, cp in zip(x, x_copy): self.assertEqual(cp, original) self.assertIs(type(cp), type(original)) self.assertEqual(cp.device, original.device) # FIXME: move to data movement test suite @deviceCountAtLeast(1) def test_copy_noncontig(self, devices): def do_test(d0, d1): x = torch.tensor([1.5, 2.5, 3.5, 4.5, 5.5, 6.5], device=d0) y = torch.tensor([0, 0, 0, 0, 0, 0], device=d1) self.assertNotEqual(x.dtype, y.dtype) y[::2].copy_(x[::2]) self.assertEqual(y, [1, 0, 3, 0, 5, 0]) do_test('cpu', devices[0]) do_test(devices[0], 'cpu') if len(devices) > 1: do_test(devices[0], devices[1]) @deviceCountAtLeast(2) def test_type_conversions_same_device(self, devices): x = torch.randn(5, 5, device=devices[1]) self.assertEqual(x.int().device, torch.device(devices[1])) self.assertEqual(x.type(torch.int).device, torch.device(devices[1])) self.assertEqual(x.to(torch.int).device, torch.device(devices[1])) @dtypesIfCUDA(torch.half, torch.float, torch.double, torch.int8, torch.short, torch.int, torch.long, torch.uint8) @dtypes(torch.float, torch.double, torch.int8, torch.short, torch.int, torch.long, torch.uint8) def test_from_sequence(self, device, dtype): seq = [list(range(i * 4, i * 4 + 4)) for i in range(5)] reference = torch.arange(0, 20).resize_(5, 4) self.assertEqual(torch.tensor(seq, dtype=dtype, device=device), reference, exact_dtype=False) # FIXME: moved to indexing test suite @deviceCountAtLeast(1) def test_advancedindex_mixed_cpu_devices(self, devices) -> None: def test(x: torch.Tensor, ia: torch.Tensor, ib: torch.Tensor) -> None: # test getitem self.assertEqual(x[:, ia, None, ib, 0].cpu(), x.cpu()[:, ia.cpu(), None, ib.cpu(), 0]) self.assertEqual(x[ia], x.cpu()[ia.cpu()]) # test setitem x_clone1 = x.clone() x_clone2 = x.clone() first_shape = x[:, ia, None, ib, 0].shape second_shape = x[ia].shape x_clone1[:, ia, None, ib, 0] = torch.randn(first_shape).to(x_clone1) x_clone2[ia] = torch.randn(second_shape).to(x_clone2) cpu = torch.device('cpu') for device in devices: # Index cpu tensor with device tensor x = torch.randn(3, 4, 4, 4, 3) ia = torch.tensor([0, 2, 1]).to(device) ib = torch.tensor([0, 2, 1]).to(device) test(x, ia, ib) # Index device tensor with cpu tensor x = x.to(device) ia = ia.to(cpu) ib = ib.to(cpu) test(x, ia, ib) # Index cpu tensor with mixed cpu, device tensors x = x.to(cpu) ia = ia.to(cpu) ib = ib.to(device) test(x, ia, ib) # Index device tensor with mixed cpu, device tensors x = x.to(device) ia = ia.to(cpu) ib = ib.to(device) test(x, ia, ib) if len(devices) > 1: other_device = devices[0] if device == devices[0]: other_device = devices[1] # Index device tensor with mixed cpu, device tensors on different devices x = x.to(device) ia = ia.to(cpu) ib = ib.to(other_device) test(x, ia, ib) # FIXME: move to data movement test suite def test_copy_broadcast(self, device) -> None: x = torch.randn(10, 5) y = torch.randn(5, device=device) x.copy_(y) self.assertEqual(x[3], y) x = torch.randn(10, 5, device=device) y = torch.randn(5) x.copy_(y) self.assertEqual(x[3], y) # FIXME: move to an elementwise ternary test suite @dtypes(torch.int64, torch.float32, torch.float64) def test_clamp(self, device, dtype): test_args = [ *product( [(100, 50), (10, 64), (97,)], # shape (True, False), # non-contiguous ) ] for shape, noncontig in test_args: x = make_tensor(shape, device=device, dtype=dtype, noncontiguous=noncontig) ub = make_tensor(shape, device=device, dtype=dtype, noncontiguous=noncontig) lb = make_tensor(shape, device=device, dtype=dtype, noncontiguous=noncontig) expect = x.max(lb).min(ub) actual = x.clamp(lb, ub) self.assertEqual(expect, actual) expect = np.clip(x.cpu().numpy(), lb.cpu().numpy(), ub.cpu().numpy()) self.assertEqual(expect, actual) expect = x.max(lb) actual = x.clamp(min=lb) self.assertEqual(expect, actual) expect = x.min(ub) actual = x.clamp(max=ub) self.assertEqual(expect, actual) # Test broadcasting min & max expect = x.max(lb[0]).min(ub[..., :1]) actual = x.clamp(lb[0], ub[..., :1]) self.assertEqual(expect, actual) # Test broadcasting x expect = x[..., :1].max(lb).min(ub) actual = x[..., :1].clamp(lb, ub) self.assertEqual(expect, actual) def test_cuda_device_idx(self, device): x = torch.zeros(3, device=device) y = torch._efficientzerotensor(3, device=device) self.assertEqual(x.device, y.device) # we implemented custom deallocation for subclasses, so it behooves # us to make sure all of these bits work. We'll use __del__ to # track if objects die or not class Tracker: def __init__(self, marker): self.marker = marker @staticmethod def make(): marker = [False] return marker, Tracker(marker) def __del__(self): self.marker[0] = True @contextlib.contextmanager def disable_gc(): if gc.isenabled(): try: gc.disable() yield finally: gc.enable() else: yield class TestTorch(TestCase): exact_dtype = True def test_dir(self): dir(torch) def test_wildcard_import(self): exec('from torch import *') def test_newaxis_numpy_comparison(self): def run_test(tensor, *idx): npt = tensor.numpy() self.assertEqual(tensor[idx], npt[idx]) # 1D Tensor Tests x = torch.arange(0, 10) cases = [ [None], [None, None], [Ellipsis, None], [None, Ellipsis], [2, None], [None, 2], [Ellipsis, None, 2], [Ellipsis, 2, None], [2, Ellipsis, None], [2, None, Ellipsis], [None, 2, Ellipsis], [None, Ellipsis, 2], ] for case in cases: run_test(x, *case) # 2D Tensor Tests x = torch.arange(0, 12).view(3, 4) cases = [ [None], [None, None], [None, None, None], [Ellipsis, None], [Ellipsis, None, None], [None, Ellipsis], [None, Ellipsis, None], [None, None, Ellipsis], [2, None], [2, None, Ellipsis], [2, Ellipsis, None], [None, 2, Ellipsis], [Ellipsis, 2, None], [Ellipsis, None, 2], [None, Ellipsis, 2], [1, 2, None], [1, 2, Ellipsis, None], [1, Ellipsis, 2, None], [Ellipsis, 1, None, 2], [Ellipsis, 1, 2, None], [1, None, 2, Ellipsis], [None, 1, Ellipsis, 2], [None, 1, 2, Ellipsis], ] for case in cases: run_test(x, *case) def _consecutive(self, size, start=1): sequence = torch.ones(torch.tensor(size).prod(0)).cumsum(0) sequence.add_(start - 1) return sequence.resize_(*size) def test_newindex(self): reference = self._consecutive((3, 3, 3)) # This relies on __index__() being correct - but we have separate tests for that def checkPartialAssign(index): reference = torch.zeros(3, 3, 3) reference[index] = self._consecutive((3, 3, 3))[index] self.assertEqual(reference[index], self._consecutive((3, 3, 3))[index], atol=0, rtol=0) reference[index] = 0 self.assertEqual(reference, torch.zeros(3, 3, 3), atol=0, rtol=0) checkPartialAssign(0) checkPartialAssign(1) checkPartialAssign(2) checkPartialAssign((0, 1)) checkPartialAssign((1, 2)) checkPartialAssign((0, 2)) checkPartialAssign(torch.LongTensor((0, 2))) with self.assertRaises(IndexError): reference[1, 1, 1, 1] = 1 with self.assertRaises(IndexError): reference[1, 1, 1, (1, 1)] = 1 with self.assertRaises(IndexError): reference[3, 3, 3, 3, 3, 3, 3, 3] = 1 with self.assertRaises(IndexError): reference[0.0] = 1 with self.assertRaises(TypeError): reference[0.0:2.0] = 1 with self.assertRaises(IndexError): reference[0.0, 0.0:2.0] = 1 with self.assertRaises(IndexError): reference[0.0, :, 0.0:2.0] = 1 with self.assertRaises(IndexError): reference[0.0, ..., 0.0:2.0] = 1 with self.assertRaises(IndexError): reference[0.0, :, 0.0] = 1 # FIXME: move to indexing test suite def test_index_add(self): for device in get_all_device_types(): for dest_contig, src_contig, index_contig in product([True, False], repeat=3): for other_sizes in ((), (4, 5)): for dtype in [torch.int, torch.long]: num_copy, num_dest = 3, 3 dest = torch.randn(num_dest, *other_sizes, device=device) if not dest_contig: dest = make_tensor(dest.shape, device=device, dtype=dest.dtype, noncontiguous=True) src = torch.randn(num_copy, *other_sizes, device=device) if not src_contig: src = torch.testing.make_non_contiguous(src) idx = torch.randperm(num_dest, dtype=dtype, device=device).narrow(0, 0, num_copy) if not index_contig: idx = torch.testing.make_non_contiguous(idx) # index_add_ without alpha argument dest2 = dest.clone() dest.index_add_(0, idx, src) for i in range(idx.size(0)): dest2[idx[i]] += src[i] self.assertEqual(dest, dest2) # index_add_ with alpha argument dest2 = dest.clone() dest.index_add_(0, idx, src, alpha=2) for i in range(idx.size(0)): dest2[idx[i]] += src[i] * 2 self.assertEqual(dest, dest2) # FIXME: resolve comment below and move this to indexing test suite # add coverage for issue with atomic add that appeared only for # specific dtypes on cuda: # https://github.com/pytorch/pytorch/issues/29153 def test_index_add_all_dtypes(self): for device in get_all_device_types(): for dtype in get_all_math_dtypes(device): for idx_dtype in [torch.int, torch.long]: size = [5, 5] if dtype.is_floating_point or dtype.is_complex: tensor = torch.rand(size, dtype=dtype, device=device) elif dtype.is_signed: tensor = torch.randint(-5, 15, size, dtype=dtype, device=device) else: tensor = torch.randint(0, 10, size, dtype=dtype, device=device) # index_add calls atomicAdd on cuda. zeros = torch.zeros(size, dtype=dtype, device=device) added = zeros.index_add(0, torch.arange(0, size[0], dtype=idx_dtype, device=device), tensor) self.assertEqual(added, tensor) added = zeros.index_add(0, torch.arange(0, size[0], dtype=idx_dtype, device=device), tensor, alpha=-1) self.assertEqual(added, -tensor) # FIXME: move to shape ops test suite def test_unflatten(self): # test args: tensor, int, sizes self.assertEqual(torch.tensor([]).unflatten(0, (0, 1)), torch.empty(0, 1)) self.assertEqual(torch.tensor([1]).unflatten(0, (1, 1)), torch.tensor([[1]])) self.assertEqual(torch.tensor([1, 2, 3, 4]).unflatten(0, (2, 2)), torch.tensor([[1, 2], [3, 4]])) self.assertEqual(torch.tensor([1, 2, 3, 4]).unflatten(0, [2, 2]), torch.tensor([[1, 2], [3, 4]])) self.assertEqual(torch.tensor([1, 2, 3, 4]).unflatten(0, torch.Size([2, 2])), torch.tensor([[1, 2], [3, 4]])) self.assertEqual(torch.ones(2, 10).unflatten(1, (5, 2)), torch.ones(2, 5, 2)) self.assertEqual(torch.tensor([1, 2, 3, 4]).unflatten(0, (-1, 2)), torch.tensor([[1, 2], [3, 4]])) self.assertEqual(torch.ones(2, 10).unflatten(1, (5, -1)), torch.ones(2, 5, 2)) self.assertEqual(torch.ones(2, 10).unflatten(1, (-1,)), torch.ones(2, 10)) self.assertEqual(torch.ones(2, 3 * 4 * 5 * 6).unflatten(1, (3, 4, -1, 6)), torch.ones(2, 3, 4, 5, 6)) self.assertEqual(torch.ones(2, 0, 2).unflatten(1, (3, -1, 4, 5)), torch.ones(2, 3, 0, 4, 5, 2)) # test invalid args: tensor, str, sizes with self.assertRaisesRegex(TypeError, r"received an invalid combination of arguments"): torch.tensor([1]).unflatten('A', (1, 1)) # test invalid args: tensor, str, namedshape with self.assertRaisesRegex(RuntimeError, r"Name 'A' not found in Tensor\[None\]."): torch.ones(4).unflatten('A', (('A', 2), ('B', 2))) # test other invalid arguments with self.assertRaisesRegex(RuntimeError, r"sizes must be non-empty"): torch.tensor([1]).unflatten(0, []) with self.assertRaisesRegex(RuntimeError, r"Provided sizes \[2, 2\] don't multiply up to the size of dim 0 \(1\)"): torch.tensor([1]).unflatten(0, [2, 2]) with self.assertRaisesRegex(IndexError, r"dimension specified as 0 but tensor has no dimensions"): torch.tensor(1).unflatten(0, [0]) with self.assertRaisesRegex(RuntimeError, r"only one dimension can be inferred"): torch.randn(5, 10).unflatten(1, (-1, -1)) with self.assertRaisesRegex(RuntimeError, r"Provided sizes \[-1, 4\] don't multiply up to the size of dim 1 \(10\)"): torch.randn(5, 10).unflatten(1, (-1, 4)) with self.assertRaisesRegex(RuntimeError, r"the unspecified dimension size -1 can be any value and is ambiguous"): torch.randn(2, 0).unflatten(1, (2, -1, 0)) def test_structseq_repr(self): a = torch.arange(250).reshape(5, 5, 10) expected = """ torch.return_types.max( values=tensor([[ 40, 41, 42, 43, 44, 45, 46, 47, 48, 49], [ 90, 91, 92, 93, 94, 95, 96, 97, 98, 99], [140, 141, 142, 143, 144, 145, 146, 147, 148, 149], [190, 191, 192, 193, 194, 195, 196, 197, 198, 199], [240, 241, 242, 243, 244, 245, 246, 247, 248, 249]]), indices=tensor([[4, 4, 4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, 4, 4, 4], [4, 4, 4, 4, 4, 4, 4, 4, 4, 4]]))""" self.assertEqual(repr(a.max(1)), textwrap.dedent(expected).strip()) def test_is_same_size(self): t1 = torch.empty(3, 4, 9, 10) t2 = torch.empty(3, 4) t3 = torch.empty(1, 9, 3, 3) t4 = torch.empty(3, 4, 9, 10) self.assertFalse(t1.is_same_size(t2)) self.assertFalse(t1.is_same_size(t3)) self.assertTrue(t1.is_same_size(t4)) def test_tensor_set(self): t1 = torch.tensor([]) t2 = torch.empty(3, 4, 9, 10).uniform_() t1.set_(t2) self.assertEqual(t1.storage()._cdata, t2.storage()._cdata) size = torch.Size([9, 3, 4, 10]) t1.set_(t2.storage(), 0, size) self.assertEqual(t1.size(), size) t1.set_(t2.storage(), 0, tuple(size)) self.assertEqual(t1.size(), size) self.assertEqual(t1.stride(), (120, 40, 10, 1)) stride = (10, 360, 90, 1) t1.set_(t2.storage(), 0, size, stride) self.assertEqual(t1.stride(), stride) t1.set_(t2.storage(), 0, size=size, stride=stride) self.assertEqual(t1.size(), size) self.assertEqual(t1.stride(), stride) # test argument names t1 = torch.tensor([]) # 1. case when source is tensor t1.set_(source=t2) self.assertEqual(t1.storage()._cdata, t2.storage()._cdata) # 2. case when source is storage t1.set_(source=t2.storage()) self.assertEqual(t1.storage()._cdata, t2.storage()._cdata) # 3. case when source is storage, and other args also specified t1.set_(source=t2.storage(), storage_offset=0, size=size, stride=stride) self.assertEqual(t1.size(), size) self.assertEqual(t1.stride(), stride) t1 = torch.tensor([True, True], dtype=torch.bool) t2 = torch.tensor([False, False], dtype=torch.bool) t1.set_(t2) self.assertEqual(t1.storage()._cdata, t2.storage()._cdata) def test_tensor_set_errors(self): f_cpu = torch.randn((2, 3), dtype=torch.float32) d_cpu = torch.randn((2, 3), dtype=torch.float64) # change dtype self.assertRaises(RuntimeError, lambda: f_cpu.set_(d_cpu.storage())) self.assertRaises(RuntimeError, lambda: f_cpu.set_(d_cpu.storage(), 0, d_cpu.size(), d_cpu.stride())) self.assertRaises(RuntimeError, lambda: f_cpu.set_(d_cpu)) # change device if torch.cuda.is_available(): f_cuda = torch.randn((2, 3), dtype=torch.float32, device='cuda') # cpu -> cuda self.assertRaises(RuntimeError, lambda: f_cpu.set_(f_cuda.storage())) self.assertRaises(RuntimeError, lambda: f_cpu.set_(f_cuda.storage(), 0, f_cuda.size(), f_cuda.stride())) self.assertRaises(RuntimeError, lambda: f_cpu.set_(f_cuda)) # cuda -> cpu self.assertRaises(RuntimeError, lambda: f_cuda.set_(f_cpu.storage())) self.assertRaises(RuntimeError, lambda: f_cuda.set_(f_cpu.storage(), 0, f_cpu.size(), f_cpu.stride())) self.assertRaises(RuntimeError, lambda: f_cuda.set_(f_cpu)) # FIXME: move this test test_testing.py (along with allclose testing) # NOTE: test_equal will be deprecated in favor of torch.testing.assert_close # once torch.testing is out of beta def test_equal(self): # Contiguous, 1D t1 = torch.tensor((3., 4., 9., 10.)) t2 = t1.contiguous() t3 = torch.tensor((1., 9., 3., 10.)) t4 = torch.tensor((3., 4., 9.)) t5 = torch.tensor([]) self.assertTrue(t1.equal(t2)) self.assertFalse(t1.equal(t3)) self.assertFalse(t1.equal(t4)) self.assertFalse(t1.equal(t5)) self.assertTrue(torch.equal(t1, t2)) self.assertFalse(torch.equal(t1, t3)) self.assertFalse(torch.equal(t1, t4)) self.assertFalse(torch.equal(t1, t5)) # Non contiguous, 2D s = torch.tensor(((1, 2, 3, 4), (5, 6, 7, 8))) s1 = s[:, 1:3] s2 = s1.clone() s3 = torch.tensor(((2, 3), (6, 7))) s4 = torch.tensor(((0, 0), (0, 0))) self.assertFalse(s1.is_contiguous()) self.assertTrue(s1.equal(s2)) self.assertTrue(s1.equal(s3)) self.assertFalse(s1.equal(s4)) self.assertTrue(torch.equal(s1, s2)) self.assertTrue(torch.equal(s1, s3)) self.assertFalse(torch.equal(s1, s4)) def test_element_size(self): byte = torch.ByteStorage().element_size() char = torch.CharStorage().element_size() short = torch.ShortStorage().element_size() int = torch.IntStorage().element_size() long = torch.LongStorage().element_size() float = torch.FloatStorage().element_size() double = torch.DoubleStorage().element_size() bool = torch.BoolStorage().element_size() bfloat16 = torch.BFloat16Storage().element_size() complexfloat = torch.ComplexFloatStorage().element_size() complexdouble = torch.ComplexDoubleStorage().element_size() self.assertEqual(byte, torch.ByteTensor().element_size()) self.assertEqual(char, torch.CharTensor().element_size()) self.assertEqual(short, torch.ShortTensor().element_size()) self.assertEqual(int, torch.IntTensor().element_size()) self.assertEqual(long, torch.LongTensor().element_size()) self.assertEqual(float, torch.FloatTensor().element_size()) self.assertEqual(double, torch.DoubleTensor().element_size()) self.assertEqual(bool, torch.BoolTensor().element_size()) self.assertEqual(bfloat16, torch.tensor([], dtype=torch.bfloat16).element_size()) self.assertEqual(complexfloat, torch.tensor([], dtype=torch.complex64).element_size()) self.assertEqual(complexdouble, torch.tensor([], dtype=torch.complex128).element_size()) self.assertGreater(byte, 0) self.assertGreater(char, 0) self.assertGreater(short, 0) self.assertGreater(int, 0) self.assertGreater(long, 0) self.assertGreater(float, 0) self.assertGreater(double, 0) self.assertGreater(bool, 0) self.assertGreater(bfloat16, 0) self.assertGreater(complexfloat, 0) self.assertGreater(complexdouble, 0) # These tests are portable, not necessarily strict for your system. self.assertEqual(byte, 1) self.assertEqual(char, 1) self.assertEqual(bool, 1) self.assertGreaterEqual(short, 2) self.assertGreaterEqual(int, 2) self.assertGreaterEqual(int, short) self.assertGreaterEqual(long, 4) self.assertGreaterEqual(long, int) self.assertGreaterEqual(double, float) def test_permute(self): orig = [1, 2, 3, 4, 5, 6, 7] perm = torch.randperm(7).tolist() x = torch.empty(*orig).fill_(0) new = [i - 1 for i in x.permute(*perm).size()] self.assertEqual(perm, new) self.assertEqual(x.size(), orig) def test_reversed(self): val = torch.arange(0, 10) self.assertEqual(reversed(val), torch.arange(9, -1, -1)) val = torch.arange(1, 10).view(3, 3) self.assertEqual(reversed(val), torch.tensor([[7, 8, 9], [4, 5, 6], [1, 2, 3]])) val = torch.tensor(42) self.assertEqual(reversed(val), torch.tensor(42)) def test_contains(self): x = torch.arange(0, 10) self.assertEqual(4 in x, True) self.assertEqual(12 in x, False) x = torch.arange(1, 10).view(3, 3) val = torch.arange(1, 4) self.assertEqual(val in x, True) val += 10 self.assertEqual(val in x, False) self.assertRaisesRegex( RuntimeError, "Tensor.__contains__ only supports Tensor or scalar, but you passed in a {}.".format(type("foo")), lambda: "foo" in x) self.assertRaisesRegex( RuntimeError, "Tensor.__contains__ only supports Tensor or scalar, but you passed in a {}.".format(type([1, 2])), lambda: [1, 2] in x) def test_deepcopy_parameter(self): from copy import deepcopy l = torch.nn.Linear(10, 1) s = l.state_dict(keep_vars=True) self.assertEqual(torch.nn.Parameter, type(s['weight'])) self.assertEqual(torch.nn.Parameter, type(s['bias'])) s2 = deepcopy(s) self.assertEqual(torch.nn.Parameter, type(s2['weight'])) self.assertEqual(torch.nn.Parameter, type(s2['bias'])) def test_pickle(self): import pickle a = torch.randn(5, 5) serialized = pickle.dumps(a) b = pickle.loads(serialized) self.assertEqual(a, b) def test_pickle_parameter(self): import pickle a = torch.nn.Parameter(torch.randn(5, 5)) serialized = pickle.dumps(a) b = pickle.loads(serialized) self.assertTrue(isinstance(b, torch.nn.Parameter)) self.assertEqual(a.requires_grad, b.requires_grad) self.assertEqual(a, b) def test_pickle_parameter_no_requires_grad(self): import pickle a = torch.nn.Parameter(torch.randn(5, 5), requires_grad=False) serialized = pickle.dumps(a) b = pickle.loads(serialized) self.assertTrue(isinstance(b, torch.nn.Parameter)) self.assertEqual(a.requires_grad, b.requires_grad) self.assertEqual(a, b) def test_pickle_dtype(self): t = torch.float32 serialized = pickle.dumps(t) b = pickle.loads(serialized) self.assertTrue(isinstance(b, torch.dtype)) self.assertEqual(id(b), id(t)) def test_pickle_size(self): a = torch.rand(10).size() serialized = pickle.dumps(a) b = pickle.loads(serialized) self.assertTrue(isinstance(b, torch.Size)) self.assertEqual(a, b) def test_pickle_function(self): # https://github.com/pytorch/pytorch/issues/37703 a = torch.tanh serialized = pickle.dumps(a) b = pickle.loads(serialized) self.assertEqual(a, b) def test_generator_cpu(self): # test default generators are equal self.assertEqual(torch.default_generator, torch.default_generator) # tests Generator API # manual_seed, seed, initial_seed, get_state, set_state g1 = torch.Generator() g2 = torch.Generator() g1.manual_seed(12345) g2.manual_seed(12345) self.assertEqual(g1.initial_seed(), g2.initial_seed()) g1.seed() g2.seed() self.assertNotEqual(g1.initial_seed(), g2.initial_seed()) g1 = torch.Generator() g2_state = g2.get_state() g2_randn = torch.randn(1, generator=g2) g1.set_state(g2_state) g1_randn = torch.randn(1, generator=g1) self.assertEqual(g1_randn, g2_randn) default_state = torch.default_generator.get_state() q = torch.empty(100) g1_normal = q.normal_() g2 = torch.Generator() g2.set_state(default_state) g2_normal = q.normal_(generator=g2) self.assertEqual(g1_normal, g2_normal) def test_invalid_generator_raises(self): self.assertRaises(RuntimeError, lambda: torch.Generator('opengl')) def _sobol_reference_samples(self, scramble: bool) -> torch.Tensor: if not scramble: # theoretical values from Joe Kuo 2010 return torch.tensor( [ [0., 0.], [0.5, 0.5], [0.75, 0.25], [0.25, 0.75], [0.375, 0.375], [0.875, 0.875], [0.625, 0.125], [0.125, 0.625], ], ) else: # theoretical values unknown: convergence properties checked return torch.tensor( [ [0.50860737, 0.29320504], [0.07116939, 0.89594537], [0.49354145, 0.11524881], [0.93097717, 0.70244044], [0.87266153, 0.23887917], [0.31021884, 0.57600391], [0.13687253, 0.42054182], [0.69931293, 0.77336788], ], ) def test_sobolengine_bounds(self, scramble: bool = False): engine = torch.quasirandom.SobolEngine(100, scramble=scramble, seed=123456) sample = engine.draw(512) self.assertTrue(torch.all(sample >= 0)) self.assertTrue(torch.all(sample <= 1)) def test_sobolengine_bounds_scrambled(self): self.test_sobolengine_bounds(scramble=True) def test_sobolengine_draw(self, scramble: bool = False): ref_sample = self._sobol_reference_samples(scramble=scramble) engine = torch.quasirandom.SobolEngine(2, scramble=scramble, seed=123456) sample = engine.draw(n=len(ref_sample)) self.assertEqual(sample, ref_sample) self.assertEqual(engine.num_generated, len(ref_sample)) def test_sobolengine_draw_scrambled(self): self.test_sobolengine_draw(scramble=True) def test_sobolengine_first_point(self): for dtype in (torch.float, torch.double): engine = torch.quasirandom.SobolEngine(2, scramble=False) sample = engine.draw(1, dtype=dtype) self.assertTrue(torch.all(sample == 0)) self.assertEqual(sample.dtype, dtype) for dtype in (torch.float, torch.double): engine = torch.quasirandom.SobolEngine(2, scramble=True, seed=123456) sample = engine.draw(1, dtype=dtype) self.assertTrue(torch.all(sample != 0)) self.assertEqual(sample.dtype, dtype) def test_sobolengine_continuing(self, scramble: bool = False): ref_sample = self._sobol_reference_samples(scramble=scramble) engine = torch.quasirandom.SobolEngine(2, scramble=scramble, seed=123456) n_half = len(ref_sample) // 2 _ = engine.draw(n=n_half) sample = engine.draw(n=n_half) torch.testing.assert_close(sample, ref_sample[n_half:]) def test_sobolengine_continuing_scrambled(self): self.test_sobolengine_continuing(scramble=True) def test_sobolengine_reset(self, scramble: bool = False): ref_sample = self._sobol_reference_samples(scramble=scramble) engine = torch.quasirandom.SobolEngine(2, scramble=scramble, seed=123456) _ = engine.draw(n=len(ref_sample) // 2) engine.reset() self.assertEqual(engine.num_generated, 0) sample = engine.draw(n=len(ref_sample)) torch.testing.assert_close(sample, ref_sample) def test_sobolengine_reset_scrambled(self): self.test_sobolengine_reset(scramble=True) def test_sobolengine_fast_forward(self, scramble: bool = False): ref_sample = self._sobol_reference_samples(scramble=scramble) engine = torch.quasirandom.SobolEngine(2, scramble=scramble, seed=123456) engine.fast_forward(4) sample = engine.draw(n=4) torch.testing.assert_close(sample, ref_sample[4:]) # alternate fast forwarding with sampling engine.reset() even_draws = [] for i in range(8): if i % 2 == 0: even_draws.append(engine.draw()) else: engine.fast_forward(1) torch.testing.assert_close( ref_sample[[i for i in range(8) if i % 2 == 0]], torch.from_numpy(np.concatenate(even_draws)), ) def test_sobolengine_fast_forward_scrambled(self): self.test_sobolengine_fast_forward(scramble=True) def test_sobolengine_distribution(self, scramble=False): d = 50 engine = torch.quasirandom.SobolEngine(d, scramble=scramble, seed=123456) sample = engine.draw(1024) torch.testing.assert_close( torch.mean(sample, dim=0), torch.full((d,), 0.5), atol=2, rtol=2 ) torch.testing.assert_close( np.percentile(sample, 25, axis=0), np.repeat(0.25, d), atol=2, rtol=2 ) torch.testing.assert_close( np.percentile(sample, 75, axis=0), np.repeat(0.75, d), atol=2, rtol=2 ) def test_sobolengine_distribution_scrambled(self): self.test_sobolengine_distribution(scramble=True) def test_sobolengine_draw_base2(self, scramble=False): ref_sample = self._sobol_reference_samples(scramble=scramble) engine = torch.quasirandom.SobolEngine(2, scramble=scramble, seed=123456) sample = engine.draw_base2(2) self.assertEqual(ref_sample[:4], sample) # resampling still having N=2**n sample = engine.draw_base2(2) self.assertEqual(ref_sample[4:8], sample) def test_sobolengine_draw_base2_scrambled(self): self.test_sobolengine_draw_base2(scramble=True) def test_sobolengine_raise(self): maxdim = torch.quasirandom.SobolEngine.MAXDIM with self.assertRaises(ValueError): torch.quasirandom.SobolEngine(maxdim + 1) def test_sobolengine_high_dim(self): engine = torch.quasirandom.SobolEngine(1111, scramble=False, seed=123456) samples1 = engine.draw() vals1, counts1 = torch.unique(samples1, return_counts=True) samples2 = engine.draw() vals2, counts2 = torch.unique(samples2, return_counts=True) self.assertEqual(vals1.item(), 0.0) self.assertEqual(counts1.item(), 1111) self.assertEqual(vals2.item(), 0.5) self.assertEqual(counts1.item(), 1111) def test_parsing_int64(self): # accepts integer arguments x = torch.cumsum(torch.ones(5, 5), 0) self.assertEqual(x, torch.cumsum(torch.ones(5, 5), torch.tensor(0))) # doesn't accept floating point variables self.assertRaises(TypeError, lambda: torch.cumsum(torch.ones(5, 5), torch.tensor(0.))) def test_parsing_double(self): # accepts floating point and integer arguments x = torch.randn(2, 3) torch.isclose(x, x, 1, 1) self.assertTrue(torch.isclose(x, x, 1, 1).all()) self.assertTrue(torch.isclose(x, x, 1.5, 1.).all()) # accepts floating point and integer tensors self.assertTrue(torch.isclose(x, x, torch.tensor(1), torch.tensor(1)).all()) self.assertTrue(torch.isclose(x, x, torch.tensor(1.5), torch.tensor(1.)).all()) # doesn't accept variables with requires_grad self.assertRaises(TypeError, lambda: torch.isclose(x, x, torch.tensor(1.5), torch.tensor(1., requires_grad=True)).all()) def test_parsing_intlist(self): # parse with integer variables self.assertEqual(torch.Size([3, 4]), torch.ones((torch.tensor(3), torch.tensor(4))).shape) self.assertEqual(torch.Size([3, 4]), torch.ones(torch.tensor(3), torch.tensor(4)).shape) # parse with numpy integers self.assertEqual(torch.Size([3, 4]), torch.ones((np.array(3), np.int64(4))).shape) self.assertEqual(torch.Size([3, 4]), torch.ones(np.array(3), np.int64(4)).shape) self.assertEqual(torch.Size([3, 4]), torch.ones((np.int64(3), np.array(4))).shape) self.assertEqual(torch.Size([3, 4]), torch.ones(np.int64(3), np.array(4)).shape) # fail parse with float variables self.assertRaises(TypeError, lambda: torch.ones((torch.tensor(3.), torch.tensor(4)))) # fail parse with numpy floats self.assertRaises(TypeError, lambda: torch.ones((np.float(3.), torch.tensor(4)))) self.assertRaises(TypeError, lambda: torch.ones((np.array(3.), torch.tensor(4)))) # fail parse with > 1 element variables self.assertRaises(TypeError, lambda: torch.ones(torch.tensor(3, 3))) self.assertRaises(TypeError, lambda: torch.ones((torch.tensor(3, 3)))) self.assertRaises(TypeError, lambda: torch.ones(np.array(3, 3))) self.assertRaises(TypeError, lambda: torch.ones((np.array(3, 3)))) # fail parse with additional positional args after intlist arg self.assertRaisesRegex(TypeError, "received an invalid combination of arguments", lambda: torch.LongTensor((6, 0), 1, 1, 0)) self.assertRaisesRegex(TypeError, "missing 1 required positional arguments", lambda: torch.tensor().new_zeros((5, 5), 0)) def test_from_buffer(self): a = bytearray([1, 2, 3, 4]) self.assertEqual(torch.ByteStorage.from_buffer(a).tolist(), [1, 2, 3, 4]) shorts = torch.ShortStorage.from_buffer(a, 'big') self.assertEqual(shorts.size(), 2) self.assertEqual(shorts.tolist(), [258, 772]) ints = torch.IntStorage.from_buffer(a, 'little') self.assertEqual(ints.size(), 1) self.assertEqual(ints[0], 67305985) f = bytearray([0x40, 0x10, 0x00, 0x00]) floats = torch.FloatStorage.from_buffer(f, 'big') self.assertEqual(floats.size(), 1) self.assertEqual(floats[0], 2.25) f = bytearray([0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x10, 0x40]) bools = torch.BoolStorage.from_buffer(f, 'big') self.assertEqual(bools.size(), 8) self.assertEqual(bools.tolist(), [False, True, True, True, True, True, True, True]) self.assertEqual(bools.type(), 'torch.BoolStorage') self.assertTrue(isinstance(bools, torch.BoolStorage)) f = bytearray(b'\x80\x02\x8a\nl\xfc\x9cF\xf9 j\xa8P\x19.\x80\x02M\xe9') bools = torch.BoolStorage.from_buffer(f, 'big') self.assertEqual(bools.size(), 19) f = bytearray(b'\0x4A') bools = torch.BoolStorage.from_buffer(f, 'big') self.assertEqual(bools.size(), 4) self.assertEqual(bools.tolist(), [False, True, True, True]) bytes = torch.ByteStorage.from_buffer(a) self.assertEqual(bytes.nbytes(), 4) self.assertEqual(bytes.tolist(), [1, 2, 3, 4]) self.assertTrue(isinstance(bytes, torch.ByteStorage)) def test_storage_error(self): quantized_storages = [ torch.QInt32Storage, torch.QInt8Storage, torch.QUInt2x4Storage, torch.QUInt4x2Storage, torch.QUInt8Storage, ] with self.assertRaisesRegex(RuntimeError, r"Only child classes of _LegacyStorage can be instantiated"): torch.storage._LegacyStorage() for storage_class in torch._storage_classes: if storage_class in [torch._UntypedStorage, torch._TypedStorage]: continue device = 'cuda' if storage_class.__module__ == 'torch.cuda' else 'cpu' dtype = storage_class.dtype if device == 'cuda' and not torch.cuda.is_available(): continue # Legacy <type>Storage constructor errors with self.assertRaisesRegex(RuntimeError, r"'device' cannot be specified"): storage_class(device='cpu') with self.assertRaisesRegex(RuntimeError, r"'dtype' cannot be specified"): storage_class(dtype=torch.float) with self.assertRaisesRegex(TypeError, r"got an unexpected keyword"): storage_class(sdlkjf=torch.float) with self.assertRaisesRegex(RuntimeError, r"Too many positional arguments"): storage_class(0, 0) with self.assertRaisesRegex(TypeError, r"invalid data type"): storage_class('string') with self.assertRaisesRegex(TypeError, r"Argument type not recognized"): storage_class(torch.tensor([])) s = storage_class() with self.assertRaisesRegex(RuntimeError, r"No positional arguments"): storage_class(0, wrap_storage=s._untyped()) with self.assertRaisesRegex(TypeError, r"must be _UntypedStorage"): storage_class(wrap_storage=s) if torch.cuda.is_available(): if storage_class in quantized_storages: with self.assertRaisesRegex(RuntimeError, r"Cannot create CUDA storage with quantized dtype"): s.cuda() else: if s.is_cuda: s_other_device = s.cpu() else: s_other_device = s.cuda() with self.assertRaisesRegex(RuntimeError, r"Device of 'wrap_storage' must be"): storage_class(wrap_storage=s_other_device._untyped()) # _TypedStorage constructor errors with self.assertRaisesRegex(RuntimeError, r"No positional arguments"): torch._TypedStorage(0, wrap_storage=s._untyped(), dtype=dtype) with self.assertRaisesRegex(RuntimeError, r"Argument 'dtype' must be specified"): torch._TypedStorage(wrap_storage=s._untyped()) with self.assertRaisesRegex(TypeError, r"Argument 'dtype' must be torch.dtype"): torch._TypedStorage(wrap_storage=s._untyped(), dtype=0) with self.assertRaisesRegex(RuntimeError, r"Argument 'device' should not be specified"): torch._TypedStorage(wrap_storage=s._untyped(), dtype=dtype, device=device) with self.assertRaisesRegex(TypeError, r"Argument 'wrap_storage' must be _UntypedStorage"): torch._TypedStorage(wrap_storage=s, dtype=dtype) with self.assertRaisesRegex(RuntimeError, r"Storage device not recognized"): torch._TypedStorage(dtype=dtype, device='xla') if torch.cuda.is_available(): if storage_class in quantized_storages: with self.assertRaisesRegex(RuntimeError, r"Cannot create CUDA storage with quantized dtype"): torch._TypedStorage(dtype=dtype, device='cuda') with self.assertRaisesRegex(TypeError, r"Argument type not recognized"): torch._TypedStorage(torch.tensor([]), dtype=dtype, device=device) with self.assertRaisesRegex(RuntimeError, r"Too many positional arguments"): torch._TypedStorage(0, 0, dtype=dtype, device=device) if isinstance(s, torch._TypedStorage): s_other = torch._TypedStorage([1, 2, 3, 4], device=device, dtype=dtype) with self.assertRaisesRegex(RuntimeError, r'cannot set item'): s.fill_(s_other) def test_storage_error_no_attribute(self): storage_classes = [ torch.cuda.ByteStorage, torch.cuda.FloatStorage, ] for storage_class in storage_classes: with self.assertRaisesRegex(RuntimeError, r'Not available for CUDA storage'): storage_class.from_buffer() with self.assertRaisesRegex(RuntimeError, r'Not available for CUDA storage'): storage_class._new_with_weak_ptr() with self.assertRaisesRegex(RuntimeError, r'Not available for CUDA storage'): storage_class._new_shared_filename(0, 0, 0) def test_storage_casts(self): storage = torch.IntStorage([-1, 0, 1, 2, 3, 4]) self.assertEqual(storage.size(), 6) self.assertEqual(storage.tolist(), [-1, 0, 1, 2, 3, 4]) self.assertEqual(storage.type(), 'torch.IntStorage') self.assertIs(storage.dtype, torch.int32) floatStorage = storage.float() self.assertEqual(floatStorage.size(), 6) self.assertEqual(floatStorage.tolist(), [-1, 0, 1, 2, 3, 4]) self.assertEqual(floatStorage.type(), 'torch.FloatStorage') self.assertEqual(floatStorage.int().tolist(), [-1, 0, 1, 2, 3, 4]) self.assertIs(floatStorage.dtype, torch.float32) halfStorage = storage.half() self.assertEqual(halfStorage.size(), 6) self.assertEqual(halfStorage.tolist(), [-1, 0, 1, 2, 3, 4]) self.assertEqual(halfStorage.type(), 'torch.HalfStorage') self.assertEqual(halfStorage.int().tolist(), [-1, 0, 1, 2, 3, 4]) self.assertIs(halfStorage.dtype, torch.float16) bfloat16Storage = storage.bfloat16() self.assertEqual(bfloat16Storage.size(), 6) self.assertEqual(bfloat16Storage.tolist(), [-1, 0, 1, 2, 3, 4]) self.assertEqual(bfloat16Storage.type(), 'torch.BFloat16Storage') self.assertEqual(bfloat16Storage.int().tolist(), [-1, 0, 1, 2, 3, 4]) self.assertIs(bfloat16Storage.dtype, torch.bfloat16) longStorage = storage.long() self.assertEqual(longStorage.size(), 6) self.assertEqual(longStorage.tolist(), [-1, 0, 1, 2, 3, 4]) self.assertEqual(longStorage.type(), 'torch.LongStorage') self.assertEqual(longStorage.int().tolist(), [-1, 0, 1, 2, 3, 4]) self.assertIs(longStorage.dtype, torch.int64) shortStorage = storage.short() self.assertEqual(shortStorage.size(), 6) self.assertEqual(shortStorage.tolist(), [-1, 0, 1, 2, 3, 4]) self.assertEqual(shortStorage.type(), 'torch.ShortStorage') self.assertEqual(shortStorage.int().tolist(), [-1, 0, 1, 2, 3, 4]) self.assertIs(shortStorage.dtype, torch.int16) doubleStorage = storage.double() self.assertEqual(doubleStorage.size(), 6) self.assertEqual(doubleStorage.tolist(), [-1.0, 0.0, 1.0, 2.0, 3.0, 4.0]) self.assertEqual(doubleStorage.type(), 'torch.DoubleStorage') self.assertEqual(doubleStorage.int().tolist(), [-1, 0, 1, 2, 3, 4]) self.assertIs(doubleStorage.dtype, torch.float64) charStorage = storage.char() self.assertEqual(charStorage.size(), 6) self.assertEqual(charStorage.tolist(), [-1.0, 0.0, 1.0, 2.0, 3.0, 4.0]) self.assertEqual(charStorage.type(), 'torch.CharStorage') self.assertEqual(charStorage.int().tolist(), [-1, 0, 1, 2, 3, 4]) self.assertIs(charStorage.dtype, torch.int8) byteStorage = storage.byte() self.assertEqual(byteStorage.size(), 6) self.assertEqual(byteStorage.tolist(), [255, 0, 1, 2, 3, 4]) self.assertEqual(byteStorage.type(), 'torch.ByteStorage') self.assertEqual(byteStorage.int().tolist(), [255, 0, 1, 2, 3, 4]) self.assertIs(byteStorage.dtype, torch.uint8) boolStorage = storage.bool() self.assertEqual(boolStorage.size(), 6) self.assertEqual(boolStorage.tolist(), [True, False, True, True, True, True]) self.assertEqual(boolStorage.type(), 'torch.BoolStorage') self.assertEqual(boolStorage.int().tolist(), [1, 0, 1, 1, 1, 1]) self.assertIs(boolStorage.dtype, torch.bool) complexfloat_storage = torch.ComplexFloatStorage([-1, 0, 1 + 2j, 2.5j, 3.5, 4 - 2j]) self.assertEqual(complexfloat_storage.size(), 6) self.assertEqual(complexfloat_storage.tolist(), [-1, 0, 1 + 2j, 2.5j, 3.5, 4 - 2j]) self.assertEqual(complexfloat_storage.type(), 'torch.ComplexFloatStorage') self.assertIs(complexfloat_storage.dtype, torch.complex64) complexdouble_storage = complexfloat_storage.complex_double() self.assertEqual(complexdouble_storage.size(), 6) self.assertEqual(complexdouble_storage.tolist(), [-1, 0, 1 + 2j, 2.5j, 3.5, 4 - 2j]) self.assertEqual(complexdouble_storage.type(), 'torch.ComplexDoubleStorage') self.assertIs(complexdouble_storage.dtype, torch.complex128) def test_from_file(self): def assert_with_filename(filename): size = 10000 s1 = torch.FloatStorage.from_file(filename, True, size) t1 = torch.FloatTensor(s1).copy_(torch.randn(size)) self.assertEqual(s1.data_ptr(), torch.FloatTensor(s1).data_ptr()) # check mapping s2 = torch.FloatStorage.from_file(filename, True, size) t2 = torch.FloatTensor(s2) self.assertEqual(t1, t2, atol=0, rtol=0) # check changes to t1 from t2 rnum = random.uniform(-1, 1) t1.fill_(rnum) self.assertEqual(t1, t2, atol=0, rtol=0) # check changes to t2 from t1 rnum = random.uniform(-1, 1) t2.fill_(rnum) self.assertEqual(t1, t2, atol=0, rtol=0) # release the tensors del s1, t1, s2, t2 with TemporaryFileName() as fname: assert_with_filename(fname) if IS_FILESYSTEM_UTF8_ENCODING: with TemporaryDirectoryName(suffix='中文') as dname, TemporaryFileName(dir=dname) as fname: assert_with_filename(fname) def test_torch_from_file(self): def assert_with_filename(filename): size = 10000 s1 = torch.from_file(filename, True, size, dtype=torch.float) t1 = torch.FloatTensor(s1).copy_(torch.randn(size)) # check mapping s2 = torch.from_file(filename, True, size, dtype=torch.float) t2 = torch.FloatTensor(s2) self.assertEqual(t1, t2, atol=0, rtol=0) # check changes to t1 from t2 rnum = random.uniform(-1, 1) t1.fill_(rnum) self.assertEqual(t1, t2, atol=0, rtol=0) # check changes to t2 from t1 rnum = random.uniform(-1, 1) t2.fill_(rnum) self.assertEqual(t1, t2, atol=0, rtol=0) # release the tensors del s1, t1, s2, t2 with TemporaryFileName() as fname: assert_with_filename(fname) if IS_FILESYSTEM_UTF8_ENCODING: with TemporaryDirectoryName(suffix='中文') as dname, TemporaryFileName(dir=dname) as fname: assert_with_filename(fname) def test_print(self): default_type = torch.tensor([]).type() for t in torch._tensor_classes: if t == torch.HalfTensor: continue # HalfTensor does not support fill if t.is_sparse: continue if t.is_cuda and not torch.cuda.is_available(): continue obj = t(100, 100).fill_(1) obj.__repr__() str(obj) # test half tensor obj = torch.rand(100, 100, device='cpu').half() obj.__repr__() str(obj) for t in torch._storage_classes: if t == torch.BFloat16Storage: continue # Fix once fill is enabled for bfloat16 if t.is_cuda and not torch.cuda.is_available(): continue if t == torch.BoolStorage or t == torch.cuda.BoolStorage: obj = t(100).fill_(True) else: obj = t(100).fill_(1) obj.__repr__() str(obj) # test complex tensor # complex tensor print uses two formatters, one for real values # and the other for imag values. this is consistent with numpy x = torch.tensor([2.3 + 4j, 7 + 6j]) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([2.3000+4.j, 7.0000+6.j])''') # test complex half tensor x = torch.tensor([1.25 + 4j, -7. + 6j], dtype=torch.chalf) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([ 1.2500+4.j, -7.0000+6.j], dtype=torch.complex32)''') # test scientific notation for complex tensors x = torch.tensor([1e28 + 2j , -1e-28j]) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([1.0000e+28+2.0000e+00j, -0.0000e+00-1.0000e-28j])''') # test big integer x = torch.tensor(2341234123412341) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor(2341234123412341)''') # test scientific notation x = torch.tensor([1e28, 1e-28]) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([1.0000e+28, 1.0000e-28])''') # test scientific notation using set_printoptions x = torch.tensor([1e2, 1e-2]) torch.set_printoptions(sci_mode=True) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([1.0000e+02, 1.0000e-02])''') torch.set_printoptions(sci_mode=False) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([ 100.0000, 0.0100])''') torch.set_printoptions(sci_mode=None) # reset to the default value # test no leading space if all elements positive x = torch.tensor([1, 2]) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([1, 2])''') # test for leading space if there are negative elements x = torch.tensor([1, -2]) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([ 1, -2])''') # test inf and nan x = torch.tensor([4, inf, 1.5, -inf, 0, nan, 1]) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([4.0000, inf, 1.5000, -inf, 0.0000, nan, 1.0000])''') y = torch.tensor([4, inf, complex(1.5, inf), complex(-inf, 4), 0, complex(nan, inf), complex(3, nan)]) self.assertEqual(y.__repr__(), str(y)) expected_str = '''\ tensor([4.0000+0.j, inf+0.j, 1.5000+infj, -inf+4.j, 0.0000+0.j, nan+infj, 3.0000+nanj])''' self.assertExpectedInline(str(y), expected_str) # test dtype torch.set_default_dtype(torch.float) x = torch.tensor([1e-324, 1e-323, 1e-322, 1e307, 1e308, 1e309], dtype=torch.float64) self.assertEqual(x.__repr__(), str(x)) expected_str = '''\ tensor([ 0.0000e+00, 9.8813e-324, 9.8813e-323, 1.0000e+307, 1.0000e+308, inf], dtype=torch.float64)''' self.assertExpectedInline(str(x), expected_str) # test changing default dtype torch.set_default_dtype(torch.float64) self.assertEqual(x.__repr__(), str(x)) expected_str = '''\ tensor([ 0.0000e+00, 9.8813e-324, 9.8813e-323, 1.0000e+307, 1.0000e+308, inf])''' self.assertExpectedInline(str(x), expected_str) # test summary x = torch.zeros(10000) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([0., 0., 0., ..., 0., 0., 0.])''') # test internal summary function x = torch.rand(1, 20, 5, 30) summary = torch._tensor_str.get_summarized_data(x) self.assertEqual(summary.shape, (1, 6, 5, 6)) first_and_last = [0, 1, 2, -3, -2, -1] self.assertEqual(summary, x[:, first_and_last][..., first_and_last]) # test device if torch.cuda.is_available(): x = torch.tensor([123], device='cuda:0') self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([123], device='cuda:0')''') # test changing default to cuda torch.set_default_tensor_type(torch.cuda.FloatTensor) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([123])''') # test printing a tensor on a different gpu than current one. if torch.cuda.device_count() >= 2: with torch.cuda.device(1): self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([123], device='cuda:0')''') # test printing cpu tensor when default device is cuda y = torch.tensor([123], device='cpu') self.assertEqual(y.__repr__(), str(y)) self.assertExpectedInline(str(y), '''tensor([123], device='cpu')''') torch.set_default_tensor_type(default_type) # test integral floats and requires_grad x = torch.tensor([123.], requires_grad=True) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([123.], requires_grad=True)''') # test non-contiguous print # sliced tensor should have > PRINT_OPTS.threshold elements x = torch.ones(100, 2, 2, 10) y = x.as_strided(size=(100, 2, 10), stride=(2 * 2 * 10, 2 * 10, 1)) self.assertEqual(str(y), y.__repr__()) expected_str = '''\ tensor([[[1., 1., 1., ..., 1., 1., 1.], [1., 1., 1., ..., 1., 1., 1.]], [[1., 1., 1., ..., 1., 1., 1.], [1., 1., 1., ..., 1., 1., 1.]], [[1., 1., 1., ..., 1., 1., 1.], [1., 1., 1., ..., 1., 1., 1.]], ..., [[1., 1., 1., ..., 1., 1., 1.], [1., 1., 1., ..., 1., 1., 1.]], [[1., 1., 1., ..., 1., 1., 1.], [1., 1., 1., ..., 1., 1., 1.]], [[1., 1., 1., ..., 1., 1., 1.], [1., 1., 1., ..., 1., 1., 1.]]])\ ''' self.assertExpectedInline(str(y), expected_str) x = torch.ones(100, 2, 2, 10) * (1 + 1j) y = x.as_strided(size=(100, 2, 10), stride=(2 * 2 * 10, 2 * 10, 1)) self.assertEqual(str(y), y.__repr__()) expected_str = '''\ tensor([[[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j], [1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j]], [[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j], [1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j]], [[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j], [1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j]], ..., [[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j], [1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j]], [[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j], [1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j]], [[1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j], [1.+1.j, 1.+1.j, 1.+1.j, ..., 1.+1.j, 1.+1.j, 1.+1.j]]])\ ''' self.assertExpectedInline(str(y), expected_str) # test print 0-dim tensor: there's no 0-dim in Numpy, we match arrayprint style x = torch.tensor(0.00002) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor(2.0000e-05)''') # test print boolean tensor x = torch.tensor([True]) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([True])''') x = torch.tensor(True) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor(True)''') # [Numpy] test print float in sci_mode when min < 0.0001. x = torch.tensor([0.00002]) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([2.0000e-05])''') # [Numpy] test print complex in sci_mode when real_min < 0.0001 and (or) imag_min < 0.0001. x = torch.tensor([0.00002]) * (1 + 1j) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([2.0000e-05+2.0000e-05j])''') # [Numpy] test print float in sci_mode when max > 1e8. # TODO: Pytorch uses fixed precision to print, while Numpy uses dragon4_scientific # to do automatic trimming and padding. x = torch.tensor([123456789.]) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([1.2346e+08])''') # [Numpy] test print float in sci_mode when max / min > 1000. x = torch.tensor([0.01, 11]) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([1.0000e-02, 1.1000e+01])''') # [Numpy] test print int max / min > 1000, no sci_mode x = torch.tensor([1, 1010]) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([ 1, 1010])''') # [Numpy] test print int > 1e8, no sci_mode x = torch.tensor([1000000000]) # 1e9 self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([1000000000])''') # [Numpy] test printing float in int_mode x = torch.tensor([1., 1000.]) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([ 1., 1000.])''') # [Numpy] test printing float in int_mode in sci format when max / min > 1000. x = torch.tensor([1., 1010.]) self.assertEqual(x.__repr__(), str(x)) self.assertExpectedInline(str(x), '''tensor([1.0000e+00, 1.0100e+03])''') def test_sizeof(self) -> None: sizeof_empty = torch.randn(0).storage().__sizeof__() sizeof_10 = torch.randn(10).storage().__sizeof__() sizeof_100 = torch.randn(100).storage().__sizeof__() self.assertEqual((sizeof_100 - sizeof_empty) // (sizeof_10 - sizeof_empty), 10) self.assertEqual((sizeof_100 - sizeof_empty) % (sizeof_10 - sizeof_empty), 0) sizeof_empty = torch.randn(0).to(torch.uint8).storage().__sizeof__() sizeof_10 = torch.randn(10).to(torch.uint8).storage().__sizeof__() sizeof_100 = torch.randn(100).to(torch.uint8).storage().__sizeof__() self.assertEqual((sizeof_100 - sizeof_empty) // (sizeof_10 - sizeof_empty), 10) self.assertEqual((sizeof_100 - sizeof_empty) % (sizeof_10 - sizeof_empty), 0) def test_iter(self) -> None: x = torch.randn(5, 5) for i, sub in enumerate(x): self.assertEqual(sub, x[i]) x = torch.tensor([]) self.assertEqual(list(x), []) def test_new(self) -> None: x = torch.autograd.Variable(torch.tensor([])) y = torch.autograd.Variable(torch.randn(4, 4)) z = torch.autograd.Variable(torch.IntTensor([1, 2, 3])) self.assertEqual(x.new().shape, [0]) self.assertEqual(x.new(), x) self.assertEqual(x.new(1, 2).shape, [1, 2]) self.assertEqual(x.new(torch.Size([3, 4])).shape, [3, 4]) self.assertEqual(x.new([3, 4]).shape, [2]) self.assertEqual(x.new([3, 4]).tolist(), [3, 4]) self.assertEqual(x.new((3, 4)).tolist(), [3, 4]) self.assertEqual(x.new([np.int32(3), np.float64(4)]).tolist(), [3, 4]) self.assertEqual(x.new(np.array((3, 4))).tolist(), [3, 4]) self.assertEqual(x.new([z[2], z[0] + 3]).tolist(), [3, 4]) self.assertEqual(x.new(size=(3, 4)).shape, [3, 4]) self.assertEqual(x.new(()).shape, [0]) self.assertEqual(x.new(y.storage()).data_ptr(), y.data_ptr()) self.assertEqual(x.new(y).data_ptr(), y.data_ptr()) self.assertIsNot(x.new(y), y) self.assertRaises(TypeError, lambda: x.new(z)) # TypeError would be better self.assertRaises(RuntimeError, lambda: x.new(z.storage())) @unittest.skipIf(PYTORCH_CUDA_MEMCHECK, "is_pinned uses failure to detect pointer property") def test_pin_memory(self): x = torch.randn(3, 5) self.assertFalse(x.is_pinned()) if not torch.cuda.is_available(): self.assertRaises(RuntimeError, lambda: x.pin_memory()) else: pinned = x.pin_memory() self.assertTrue(pinned.is_pinned()) self.assertEqual(pinned, x) self.assertNotEqual(pinned.data_ptr(), x.data_ptr()) # test that pin_memory on already pinned tensor has no effect self.assertIs(pinned, pinned.pin_memory()) self.assertEqual(pinned.data_ptr(), pinned.pin_memory().data_ptr()) def test_error_msg_type_translation(self): with self.assertRaisesRegex( RuntimeError, # message includes both Double and Long '(?=.*Double)(?=.*Long)'): # Calls model with a LongTensor input but DoubleTensor weights input = torch.zeros(1, 1, 1, 6, dtype=torch.long) weight = torch.nn.Parameter(torch.zeros(1, 1, 1, 3, dtype=torch.double)) model = torch.nn.Conv2d(1, 1, (1, 3), stride=1, padding=0, bias=False) model.weight = weight out = model(input) def test_apply(self): x = torch.arange(1, 6) res = x.clone().apply_(lambda k: k + k) self.assertEqual(res, x * 2) self.assertRaises(TypeError, lambda: x.apply_(lambda k: "str")) def test_map(self): x = torch.autograd.Variable(torch.randn(3, 3)) y = torch.autograd.Variable(torch.randn(3)) res = x.clone() res.map_(y, lambda a, b: a + b) self.assertEqual(res, x + y) self.assertRaisesRegex(TypeError, "not callable", lambda: res.map_(y, "str")) def test_map2(self): x = torch.autograd.Variable(torch.randn(3, 3)) y = torch.autograd.Variable(torch.randn(3)) z = torch.autograd.Variable(torch.randn(1, 3)) res = x.clone() res.map2_(y, z, lambda a, b, c: a + b * c) self.assertEqual(res, x + y * z) z.requires_grad = True self.assertRaisesRegex( RuntimeError, "requires grad", lambda: res.map2_(y, z, lambda a, b, c: a + b * c)) def test_Size(self): x = torch.Size([1, 2, 3]) self.assertIsInstance(x, tuple) self.assertEqual(x[0], 1) self.assertEqual(x[1], 2) self.assertEqual(x[2], 3) self.assertEqual(len(x), 3) self.assertRaises(TypeError, lambda: torch.Size(torch.ones(3))) self.assertIsInstance(x * 2, torch.Size) self.assertIsInstance(x[:-1], torch.Size) self.assertIsInstance(x + x, torch.Size) def test_Size_scalar(self): three = torch.tensor(3) two = torch.tensor(2) x = torch.Size([0, 1, two, three, 4]) for i in range(1, 5): self.assertEqual(x[i], i) def test_Size_iter(self): for sizes in [iter([1, 2, 3, 4, 5]), range(1, 6)]: x = torch.Size(sizes) for i in range(0, 5): self.assertEqual(x[i], i + 1) def test_t_not_2d_error(self): self.assertRaises(RuntimeError, lambda: torch.randn(2, 3, 4).t()) self.assertRaises(RuntimeError, lambda: torch.randn(2, 3, 4).t_()) # skip this test for now as it affects all tests @unittest.skipIf(True, "flush_denormal not supported") def test_set_flush_denormal(self): tiny_float = 1e-42 tiny_double = 1e-320 float_tensor = torch.FloatTensor([1.0, tiny_float]) double_tensor = torch.DoubleTensor([1.0, tiny_float, tiny_double]) self.assertEqual(float_tensor[0], 1.0, atol=0.0, rtol=0) self.assertEqual(float_tensor[1], tiny_float, atol=tiny_float / 16, rtol=0) self.assertEqual(double_tensor[0], 1.0, atol=0.0, rtol=0) self.assertEqual(double_tensor[1], tiny_float, atol=0.0, rtol=0) self.assertEqual(double_tensor[2], tiny_double, atol=0.0, rtol=0) torch.set_flush_denormal(True) self.assertEqual(float_tensor[0], 1.0, atol=0.0, rtol=0) self.assertEqual(float_tensor[1], 0.0, atol=0.0, rtol=0) # tiny_float to zero self.assertEqual(double_tensor[0], 1.0, atol=0.0, rtol=0) # tiny_float is not converted to zero in double type self.assertEqual(double_tensor[1], tiny_float, atol=0.0, rtol=0) self.assertEqual(double_tensor[2], 0.0, atol=0.0, rtol=0) # tiny_double to zero torch.set_flush_denormal(False) def test_show_config(self): # We can't usefully test the output; just make sure this doesn't crash torch.__config__.show() @unittest.skipIf(IS_FBCODE, "CXX_FLAGS is only for OSS build.") def test_cxx_flags(self): torch.__config__._cxx_flags() def test_parallel_info(self): torch.__config__.parallel_info() @slowTest def test_slow_test(self): # Just a smoketest to make sure our slowTest decorator works. pass def test_is_nonzero(self): with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with no values is ambiguous"): torch.tensor([]).is_nonzero() with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with more than one value is ambiguous"): torch.tensor([0, 0]).is_nonzero() self.assertFalse(torch.tensor(0).is_nonzero()) self.assertTrue(torch.tensor(1).is_nonzero()) self.assertFalse(torch.tensor([0]).is_nonzero()) self.assertTrue(torch.tensor([1]).is_nonzero()) self.assertFalse(torch.tensor([[0]]).is_nonzero()) self.assertTrue(torch.tensor([[1]]).is_nonzero()) self.assertTrue(torch.tensor(0.1).is_nonzero()) self.assertTrue(torch.tensor(-0.1).is_nonzero()) self.assertFalse(torch.tensor(0.0).is_nonzero()) self.assertTrue(torch.tensor(True).is_nonzero()) self.assertFalse(torch.tensor(False).is_nonzero()) self.assertFalse(torch.tensor(0 + 0j).is_nonzero()) self.assertTrue(torch.tensor(0 + 0.1j).is_nonzero()) def test_assert_async(self): with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with no values is ambiguous"): torch._assert_async(torch.tensor([])) with self.assertRaisesRegex(RuntimeError, "Boolean value of Tensor with more than one value is ambiguous"): torch._assert_async(torch.tensor([0, 0])) with self.assertRaisesRegex(RuntimeError, "Expected Tensor with single nonzero value, but got zero"): torch._assert_async(torch.tensor(0)) torch._assert_async(torch.tensor(1)) torch._assert_async(torch.tensor(0.1)) torch._assert_async(torch.tensor(-0.1)) with self.assertRaisesRegex(RuntimeError, "Expected Tensor with single nonzero value, but got zero"): torch._assert_async(torch.tensor(0.0)) torch._assert_async(torch.tensor(True)) with self.assertRaisesRegex(RuntimeError, "Expected Tensor with single nonzero value, but got zero"): torch._assert_async(torch.tensor(False)) torch._assert_async(torch.tensor(0 + 0.1j)) with self.assertRaisesRegex(RuntimeError, "Expected Tensor with single nonzero value, but got zero"): torch._assert_async(torch.tensor(0 + 0j)) # NB: we must not be built with CUDA; if we are built with CUDA but no CUDA # is available, we get a different error. @unittest.skipIf(torch.backends.cuda.is_built() or IS_SANDCASTLE, "CUDA is built, can't test CUDA not built error") def test_cuda_not_built(self): msg = "Torch not compiled with CUDA enabled" self.assertRaisesRegex(AssertionError, msg, lambda: torch.cuda.current_device()) self.assertRaisesRegex(AssertionError, msg, lambda: torch.tensor([1], device="cuda")) self.assertRaisesRegex(AssertionError, msg, lambda: torch.tensor([1]).cuda()) self.assertRaisesRegex(TypeError, msg, lambda: torch.cuda.FloatTensor()) self.assertRaisesRegex(TypeError, msg, lambda: torch.set_default_tensor_type(torch.cuda.FloatTensor)) self.assertRaisesRegex(AssertionError, msg, lambda: torch.tensor([1]).to(device="cuda")) def test_has_internal_overlap(self): OVERLAP_NO = 0 OVERLAP_YES = 1 OVERLAP_TOO_HARD = 2 # Check for contiguous tensors a = torch.randn(3, 3) self.assertEqual(torch._debug_has_internal_overlap(a), OVERLAP_NO) # Checks for zero strides b = torch.randn(1, 3) b_expanded = b.expand(4, 3) self.assertEqual(torch._debug_has_internal_overlap(b_expanded), OVERLAP_YES) # Check for zero strided, size 1 axis, in non-contiguous storage (gh-33812) c = torch.randn(10).as_strided([2, 1, 5], [1, 0, 2]) self.assertEqual(torch._debug_has_internal_overlap(c), OVERLAP_NO) c = torch.randn(2, 1, 10)[::2].as_strided((2, 1, 5), (10, 0, 2)) self.assertEqual(torch._debug_has_internal_overlap(c), OVERLAP_TOO_HARD) def test_allow_tensor_metadata_change(self): def do_test(t): with self.assertRaisesRegex( RuntimeError, "set_sizes_contiguous is not allowed on a Tensor created from .data or .detach()"): t.resize_((2, 1)) with self.assertRaisesRegex( RuntimeError, "set_storage is not allowed on a Tensor created from .data or .detach()"): t.set_() with self.assertRaisesRegex( RuntimeError, "set_storage_offset is not allowed on a Tensor created from .data or .detach()"): t.set_(t.storage(), 0, t.size(), list(t.stride())) do_test(torch.tensor([[1, 2]]).data) do_test(torch.tensor([[1, 2]]).detach()) @skipIfNotRegistered("LayerNorm", "Skipping as LayerNorm is not registered") def test_c10_layer_norm(self): # test that we can call c10 ops and they return a reasonable result X = torch.rand(5, 5, dtype=torch.float) weight = torch.rand(*X.size()[1:], dtype=torch.float) bias = torch.rand(*X.size()[1:], dtype=torch.float) epsilon = 1e-4 expected_norm = torch.nn.functional.layer_norm( X, X.size()[1:], weight=weight, bias=bias, eps=epsilon) actual_norm, actual_mean, actual_stdev = \ torch.ops._caffe2.LayerNorm(torch.tensor(X), torch.tensor( weight), torch.tensor(bias), 1, epsilon, True) torch.testing.assert_close(expected_norm, actual_norm) def test_memory_format(self): def test_helper(x, memory_format): y = x.contiguous(memory_format=memory_format) self.assertFalse(y.is_contiguous()) self.assertTrue(y.is_contiguous(memory_format=memory_format)) self.assertEqual(y, x) test_helper(torch.randn(4, 3, 8, 8), torch.channels_last) test_helper(torch.randn(4, 3, 8, 8, 8), torch.channels_last_3d) def test_memory_format_contiguous_returns_same_tensor_if_already_satisfies(self): def test_helper(x, memory_format): alias = x.contiguous(memory_format=memory_format) alias.fill_(7) self.assertEqual(x, alias) test_helper(torch.randn(4, 8, 8, 3).permute(0, 3, 1, 2), torch.channels_last) test_helper(torch.randn(4, 8, 8, 8, 3).permute(0, 4, 1, 2, 3), torch.channels_last_3d) def test_memory_format_empty(self): def test_helper(dim1, dim2, memory_format): with self.assertRaises(RuntimeError): x = torch.empty(dim1, memory_format=memory_format) x = torch.empty(dim2, memory_format=memory_format) self.assertTrue(x.is_contiguous(memory_format=memory_format)) test_helper((3, 3), (3, 3, 3, 3), torch.channels_last) test_helper((3, 3, 3), (3, 3, 3, 3, 3), torch.channels_last_3d) def test_subclass_tensors(self): # raise an error when trying to subclass FloatTensor with self.assertRaisesRegex(TypeError, "type 'torch.FloatTensor' is not an acceptable base type"): class Foo1(torch.FloatTensor): pass # but allow subclassing Tensor: class Foo2(torch.Tensor): def foo(self): return 5 f = Foo2() self.assertEqual(f.foo(), 5) def test_ndim(self): a = torch.randn(1, 2, 3) self.assertEqual(3, a.ndim) b = torch.randn(()) self.assertEqual(0, b.ndim) c = torch.randn(1, 0) self.assertEqual(2, c.ndim) def test_fill_diagonal(self): a1 = torch.randn(7, 3) a2 = a1.clone() v = 1 for i in range(3): a2[i][i] = v a1.fill_diagonal_(v) self.assertEqual(a1, a2) b1 = torch.randn(7, 3) b2 = b1.clone() for i in range(3): b2[i][i] = v b2[i + 4][i] = v b1.fill_diagonal_(v, wrap=True) self.assertEqual(b1, b2) c1 = torch.rand(3, 3, 3) c2 = c1.clone() for i in range(3): c2[i][i][i] = v c1.fill_diagonal_(v) self.assertEqual(c1, c2) # non-contiguous tensor d1 = torch.rand(3, 3, 3)[:, 1, ...] d2 = d1.clone() for i in range(3): d2[i][i] = v d1.fill_diagonal_(v) self.assertEqual(d1, d2) e1 = torch.rand(7, 3, 3)[:, 1, ...] e2 = e1.clone() for i in range(3): e2[i][i] = v e2[i + 4][i] = v e1.fill_diagonal_(v, wrap=True) self.assertEqual(e1, e2) def test_setting_real_imag_to_a_number(self): x = torch.randn(4, dtype=torch.cfloat) x.real = 0 x.imag = 0 zeros = torch.zeros(4) self.assertEqual(x.real, zeros) self.assertEqual(x.imag, zeros) def test_batch_norm_cpu_inference(self): # input nchw in (2,1,1,1), (2,2,2,2) inputs = [ torch.tensor([[[[-0.5000]]], [[[0.5000]]]]), torch.tensor([ [ [[-0.5000, 0.5000], [-1.0000, 1.0000]], [[-0.2500, -0.5000], [0.2500, 0.5000]] ], [ [[0.1000, 1.0000], [1.0000, 0.1000]], [[1.0000, 0.5000], [1.5000, -1.5000]] ]])] # output nchw in (2,1,1,1), (2,2,2,2) outputs = [ torch.tensor([ [[[-0.499997496604919433593750000]]], [[[0.499997496604919433593750000]]]]), torch.tensor([ [[[-0.499997496604919433593750000, 0.499997496604919433593750000], [-0.999994993209838867187500000, 0.999994993209838867187500000]], [[-0.249998748302459716796875000, -0.499997496604919433593750000], [0.249998748302459716796875000, 0.499997496604919433593750000]]], [[[0.099999502301216125488281250, 0.999994993209838867187500000], [0.999994993209838867187500000, 0.099999502301216125488281250]], [[0.999994993209838867187500000, 0.499997496604919433593750000], [1.499992489814758300781250000, -1.499992489814758300781250000]]]])] for i in range(len(inputs)): for affine in [False, True]: m = torch.nn.BatchNorm2d(inputs[i].size()[1], 1e-05, 0.1, affine=affine) m.eval() # contiguous case input1 = inputs[i].contiguous() output1 = m(input1) # non-contiguous case input2 = input1.permute(0, 1, 3, 2) output2 = m(input2).permute(0, 1, 3, 2) # channels last case input3 = input1.contiguous(memory_format=torch.channels_last) output3 = m(input3) self.assertEqual(output3, outputs[i]) self.assertEqual(output3, output1) self.assertEqual(output3, output2) # FIXME: move these meta tests to their own test suite/class or # distribute them among the appropriate test suites for their ops def test_empty_meta(self): x = torch.empty(2 ** 20, 2 ** 20, device='meta') y = torch.empty(2 ** 20, device='meta') z = x + y self.assertEqual(z.size(), (2 ** 20, 2 ** 20)) self.assertRaises(RuntimeError, lambda: z[0][0].item()) def test_format_scalar_meta(self): x = torch.empty((), device='meta') self.assertEqual(format(x), repr(x)) def test_upsample_nearest1d_meta(self): # TODO: this test should be triggered by test_nn.py but right # now meta is not enabled (and even if it was, we are probably # missing too many meta functions to get through the test unmolested) # NB: Can't make the exponent too big, or it will overflow # signed 64-bit integer x = torch.empty(2 * 10 ** 8, 3, 2 * 10 ** 8, device='meta') z = torch.nn.functional.interpolate(x, scale_factor=2) self.assertEqual(z.size(), (2 * 10 ** 8, 3, 4 * 10 ** 8)) self.assertRaises(RuntimeError, lambda: z[0][0][0].item()) # TODO: the out tests cannot be triggered by test_nn.py because # we don't actually do out= arguments for nn functions, so there # is no public API by which to get the out version # interpolate doesn't seem to support out= # (not sure why passing None here doesn't work? How strange...) z = torch.empty(0, device='meta') torch._C._nn.upsample_nearest1d(x, (4 * 10 ** 8,), 2, out=z) self.assertEqual(z.size(), (2 * 10 ** 8, 3, 4 * 10 ** 8)) self.assertRaises(RuntimeError, lambda: z[0][0][0].item()) def test_upsample_nearest2d_meta(self): # TODO: the out tests cannot be triggered by test_nn.py because # we don't actually do out= arguments for nn functions, so there # is no public API by which to get the out version # Make sure we don't clobber strides of out tensor. NB: this # test must be done on 2d/3d, because 1d doesn't have any meaningful # layout support x = torch.empty(4, 3, 8, 8, device='meta') out = torch.empty(4, 3, 16, 16, device='meta', memory_format=torch.channels_last) torch._C._nn.upsample_nearest2d(x, (16, 16), out=out) self.assertTrue(out.is_contiguous(memory_format=torch.channels_last)) x = torch.empty(4, 3, 8, 8, device='meta', memory_format=torch.channels_last) out = torch.empty(4, 3, 16, 16, device='meta') torch._C._nn.upsample_nearest2d(x, (16, 16), out=out) self.assertTrue(out.is_contiguous()) # But if resize occurs, do clobber x = torch.empty(4, 3, 8, 8, device='meta', memory_format=torch.channels_last) out = torch.empty(0, device='meta') torch._C._nn.upsample_nearest2d(x, (16, 16), out=out) self.assertTrue(out.is_contiguous(memory_format=torch.channels_last)) # Complain if out dtype mismatch x = torch.empty(4, 3, 8, 8, device='meta', dtype=torch.float) out = torch.empty(4, 3, 16, 16, device='meta', dtype=torch.double) self.assertExpectedRaisesInline( RuntimeError, lambda: torch._C._nn.upsample_nearest2d(x, (16, 16), out=out), """Expected out tensor to have dtype float, but got double instead""" ) # Complain if out device mismatch x = torch.empty(0, 3, 8, 8, device='meta') out = torch.empty(0, 3, 16, 16, device='cpu') self.assertExpectedRaisesInline( RuntimeError, lambda: torch._C._nn.upsample_nearest2d(x, (16, 16), out=out), """Expected out tensor to have device meta, but got cpu instead""" ) def test_add_meta_scalar(self): # From https://github.com/pytorch/pytorch/issues/53815 x = torch.empty(2, device='meta') y = x + 2 self.assertEqual(y.size(), x.size()) def test_normal_shape(self): warned = False for device in get_all_device_types(): tensor1 = torch.rand(1, device=device) tensor4 = torch.rand(4, device=device) tensor120 = torch.rand(120, device=device) tensor2145 = torch.rand(2, 1, 4, 5, device=device) tensor2345 = torch.rand(2, 3, 4, 5, device=device) tensor2345_non_contiguous = torch.rand(2, 4, 3, 5, device=device).permute(0, 2, 1, 3) tensor2345_channels_last = tensor2345.contiguous(memory_format=torch.channels_last) output2345 = torch.zeros(2, 3, 4, 5, device=device) output345 = torch.zeros(3, 4, 5, device=device) # inputs have same size self.assertEqual(torch.normal(tensor2345, tensor2345).size(), (2, 3, 4, 5)) self.assertEqual(torch.normal(tensor2345_non_contiguous, tensor2345).size(), (2, 3, 4, 5)) self.assertEqual(torch.normal(tensor2345, tensor2345_channels_last).size(), (2, 3, 4, 5)) self.assertEqual(torch.normal(tensor2345_non_contiguous, tensor2345_channels_last).size(), (2, 3, 4, 5)) # scalar case self.assertEqual(torch.normal(tensor2345, 2).size(), (2, 3, 4, 5)) self.assertEqual(torch.normal(2, tensor2345).size(), (2, 3, 4, 5)) # inputs are expandable tensors self.assertEqual(torch.normal(tensor2345, tensor1).size(), (2, 3, 4, 5)) self.assertEqual(torch.normal(tensor2145, tensor2345).size(), (2, 3, 4, 5)) # inputs are non-expandable tensors, but they have same number of elements with self.assertRaisesRegex( RuntimeError, r"The size of tensor a \(120\) must match the size of " r"tensor b \(5\) at non-singleton dimension 3"): self.assertEqual(torch.normal(tensor120, tensor2345).size(), (120,)) with self.assertRaisesRegex( RuntimeError, r"The size of tensor a \(5\) must match the size of " r"tensor b \(120\) at non-singleton dimension 3"): self.assertEqual(torch.normal(tensor2345, tensor120).size(), (2, 3, 4, 5)) # inputs are non-expandable tensors and they don't have same number of elements with self.assertRaisesRegex( RuntimeError, r"The size of tensor a \(5\) must match the size of " r"tensor b \(4\) at non-singleton dimension 3"): torch.normal(tensor2345, tensor4) # output and inputs are size compatible self.assertEqual(torch.normal(tensor2345, tensor2345, out=output2345).size(), (2, 3, 4, 5)) # output and inputs are not size compatible with self.assertWarnsRegex( UserWarning, "This behavior is deprecated, and in a future PyTorch " "release outputs will not be resized unless they have " "zero elements"): self.assertEqual(torch.normal(tensor2345, tensor2145, out=output345).size(), (2, 3, 4, 5)) with self.assertRaisesRegex( RuntimeError, r"The size of tensor a \(5\) must match the size of " r"tensor b \(120\) at non-singleton dimension 3"): # inputs are not expandable, output size is not the same as mean torch.normal(tensor2345, tensor120, out=output345) def test_tensoriterator_output_setup(self): # Test whether the output's memory layout is correct def test_memory_layout(x, y, scale, zero_point, out): self.assertEqual(x.dim(), 4) self.assertEqual(x.size(), y.size()) self.assertEqual(y.size(), out.size()) shape = x.size() for n in range(shape[0]): for c in range(shape[1]): for h in range(shape[2]): for w in range(shape[3]): if scale is not None and zero_point is not None: self.assertEqual( out[n][c][h][w], torch.ops.quantized.add(x[n][c][h][w], y[n][c][h][w], scale, zero_point)) else: self.assertEqual(out[n][c][h][w], x[n][c][h][w] + y[n][c][h][w]) xraw = torch.rand(2, 3, 4, 4) yraw = torch.rand(2, 3, 4, 4) qxraw = torch.quantize_per_tensor(xraw, 0.1, 5, torch.quint8) qyraw = torch.quantize_per_tensor(yraw, 0.1, 5, torch.quint8) # contiguous case fast setup test_memory_layout(xraw, yraw, None, None, xraw + yraw) test_memory_layout(qxraw, qyraw, 0.1, 5, torch.ops.quantized.add(qxraw, qyraw, 0.1, 5)) # channels last case fast setup x = xraw.contiguous(memory_format=torch.channels_last) y = yraw.contiguous(memory_format=torch.channels_last) test_memory_layout(x, y, None, None, x + y) qx = qxraw.contiguous(memory_format=torch.channels_last) qy = qyraw.contiguous(memory_format=torch.channels_last) test_memory_layout(qx, qy, 0.1, 5, torch.ops.quantized.add(qx, qy, 0.1, 5)) # non contiguous case fast setup (dense, non-overlapping, same shape and strides) x = xraw.permute(0, 2, 3, 1) y = yraw.permute(0, 2, 3, 1) test_memory_layout(x, y, None, None, x + y) qx = qxraw.permute(0, 2, 3, 1) qy = qyraw.permute(0, 2, 3, 1) test_memory_layout(qx, qy, 0.1, 5, torch.ops.quantized.add(qx, qy, 0.1, 5)) # non contiguous case fast setup (dense, non-overlapping) # input tensors have same shape and strides # output tensor have same shape as input tensors but different stride # output tensor should preserve its strides in this case x = xraw.permute(0, 2, 3, 1) y = yraw.permute(0, 2, 3, 1) out = torch.empty_like(xraw) out = out.permute(0, 3, 2, 1) expected_stride = out.stride() test_memory_layout(x, y, None, None, torch.add(x, y, out=out)) self.assertEqual(expected_stride, out.stride()) # non contiguous case non fast setup x = xraw.permute(0, 2, 3, 1) y = yraw.permute(0, 3, 2, 1) test_memory_layout(x, y, None, None, x + y) qx = qxraw.permute(0, 2, 3, 1) qy = qyraw.permute(0, 3, 2, 1) test_memory_layout(qx, qy, 0.1, 5, torch.ops.quantized.add(qx, qy, 0.1, 5)) # Tests to make sure we still handle .data properly until it is removed def test_dot_data_use(self): # .data allows to change the Tensors types inplace, check that we still # raise a nice error. with self.assertRaisesRegex( RuntimeError, # message includes both Double and Long '(?=.*Double)(?=.*Long)'): # Calls model with a LongTensor input but DoubleTensor weights input = torch.randn(1, 1, 1, 6, dtype=torch.double) weight = torch.zeros(1, 1, 1, 3, dtype=torch.long) model = torch.nn.Conv2d(1, 1, (1, 3), stride=1, padding=0, bias=False) model.weight.data = weight out = model(input) def test_empty_storage_view(self): # we should be able to "modify" slices of a 0-element # array without an error being raised due to # trying to resize its storage t = torch.from_numpy(np.empty((0, 4))) t[:, 1::2] *= 1 def test_has_storage(self): self.assertIsNotNone(torch.tensor([]).storage()) self.assertIsNotNone(torch.empty(0).storage()) self.assertIsNotNone(torch.tensor([]).clone().storage()) self.assertIsNotNone(torch.tensor([0, 0, 0]).nonzero().storage()) self.assertIsNotNone(torch.tensor([]).new().storage()) # FIXME: Extend this test and put in a TensorProperties test class def test_numel(self): b = torch.ByteTensor(3, 100, 100) self.assertEqual(b.nelement(), 3 * 100 * 100) self.assertEqual(b.numel(), 3 * 100 * 100) # Verifies that (deep)copies of dtypes are the same objects def test_copy_dtypes(self): for dtype in all_types_and_complex_and(torch.half, torch.bfloat16, torch.bool): copied_dtype = copy.deepcopy(dtype) self.assertIs(dtype, copied_dtype) def test_dtype_is_signed(self): for dtype in all_types_and_complex_and(torch.half, torch.bfloat16, torch.half): self.assertEqual(dtype.is_signed, torch.is_signed(torch.tensor(0, dtype=dtype))) self.assertRaisesRegex(RuntimeError, 'not supported for quantized', lambda: torch.quint8.is_signed) self.assertRaisesRegex(RuntimeError, 'not supported for quantized', lambda: torch.qint8.is_signed) self.assertRaisesRegex(RuntimeError, 'not supported for quantized', lambda: torch.qint32.is_signed) # FIXME: Put the following random tests into their own test class or test suite def test_RNGState(self): state = torch.get_rng_state() stateCloned = state.clone() before = torch.rand(1000) self.assertEqual(state.ne(stateCloned).long().sum(), 0, atol=0, rtol=0) torch.set_rng_state(state) after = torch.rand(1000) self.assertEqual(before, after, atol=0, rtol=0) def test_RNGStateAliasing(self): # Fork the random number stream at this point gen = torch.Generator() gen.set_state(torch.get_rng_state()) self.assertEqual(gen.get_state(), torch.get_rng_state()) target_value = torch.rand(1000) # Dramatically alter the internal state of the main generator _ = torch.rand(100000) forked_value = torch.rand(1000, generator=gen) self.assertEqual(target_value, forked_value, atol=0, rtol=0, msg="RNG has not forked correctly.") def test_RNG_after_pickle(self): torch.random.manual_seed(100) before = torch.rand(10) torch.random.manual_seed(100) buf = io.BytesIO() tensor = torch.tensor([1, 2, 3]) ForkingPickler(buf, pickle.HIGHEST_PROTOCOL).dump(tensor) after = torch.rand(10) self.assertEqual(before, after, atol=0, rtol=0) def test_boxMullerState(self): torch.manual_seed(123) odd_number = 101 seeded = torch.randn(odd_number) state = torch.get_rng_state() midstream = torch.randn(odd_number) torch.set_rng_state(state) repeat_midstream = torch.randn(odd_number) torch.manual_seed(123) reseeded = torch.randn(odd_number) self.assertEqual(midstream, repeat_midstream, atol=0, rtol=0, msg='get_rng_state/set_rng_state not generating same sequence of normally distributed numbers') self.assertEqual(seeded, reseeded, atol=0, rtol=0, msg='repeated calls to manual_seed not generating same sequence of normally distributed numbers') def test_manual_seed(self): rng_state = torch.get_rng_state() torch.manual_seed(2) x = torch.randn(100) self.assertEqual(torch.initial_seed(), 2) torch.manual_seed(2) y = torch.randn(100) self.assertEqual(x, y) max_int64 = 0x7fff_ffff_ffff_ffff min_int64 = -max_int64 - 1 max_uint64 = 0xffff_ffff_ffff_ffff # Check all boundary cases of valid seed value inputs test_cases = [ # (seed, expected_initial_seed) # Positive seeds should be unchanged (max_int64, max_int64), (max_int64 + 1, max_int64 + 1), (max_uint64, max_uint64), (0, 0), # Negative seeds wrap around starting from the largest seed value (-1, max_uint64), (min_int64, max_int64 + 1) ] for seed, expected_initial_seed in test_cases: torch.manual_seed(seed) actual_initial_seed = torch.initial_seed() msg = "expected initial_seed() = %x after calling manual_seed(%x), but got %x instead" % ( expected_initial_seed, seed, actual_initial_seed) self.assertEqual(expected_initial_seed, actual_initial_seed, msg=msg) for invalid_seed in [min_int64 - 1, max_uint64 + 1]: with self.assertRaisesRegex(RuntimeError, r'Overflow when unpacking long'): torch.manual_seed(invalid_seed) torch.set_rng_state(rng_state) # FIXME: Describe this test and port to the generic device framework in a more # appropriate test suite for the copy operation def test_copy_transpose(self): x = torch.arange(100 * 100, dtype=torch.float).reshape(100, 100).t() y = torch.empty(100, 100, dtype=torch.float) y.copy_(x) self.assertEqual(y[:, 0], range(100)) self.assertEqual(y[:, 40], range(4000, 4100)) y = torch.empty(100, 100, dtype=torch.double) y.copy_(x) self.assertEqual(y[:, 0], range(100)) self.assertEqual(y[:, 40], range(4000, 4100)) # Validates regression reported in https://github.com/pytorch/pytorch/issues/45269 x = torch.arange(100 * 100).reshape(100, 100).to(dtype=torch.cfloat).t() y = torch.empty(100, 100, dtype=torch.cfloat) y.copy_(x) self.assertEqual(y[:, 0], range(100)) self.assertEqual(y[:, 40], range(4000, 4100)) x = torch.arange(100 * 100).reshape(100, 100).to(dtype=torch.complex32).t() y = torch.empty(100, 100, dtype=torch.complex32) y.copy_(x) self.assertEqual(y[:, 0], range(100)) self.assertEqual(y[:, 40], range(4000, 4100)) # FIXME: Port to a more appropriate test suite def test_copy_broadcast(self): torch.zeros(5, 6).copy_(torch.zeros(6)) self.assertRaises(RuntimeError, lambda: torch.zeros(5, 6).copy_(torch.zeros(30))) # FIXME: Port to a more appropriate test suite def test_copy_many_to_one(self): # Testing in-place copy where it attempt to write from many memory # storage to a single storage would cause RuntimeError to be thrown self.assertRaises(RuntimeError, lambda: torch.zeros(1, 6).expand(5, 6).copy_(torch.zeros(5, 6))) # FIXME: Port to a more appropriate test suite def _test_to_with_layout(self, layout): def test_copy_behavior(t, non_blocking=False): self.assertIs(t, t.to(t, non_blocking=non_blocking)) self.assertIs(t, t.to(t.dtype, non_blocking=non_blocking)) self.assertIs(t, t.to(torch.empty_like(t), non_blocking=non_blocking)) self.assertIsNot(t, t.to(t, non_blocking=non_blocking, copy=True)) self.assertIsNot(t, t.to(t.dtype, non_blocking=non_blocking, copy=True)) self.assertIsNot(t, t.to(torch.empty_like(t), non_blocking=non_blocking, copy=True)) devices = [t.device] if t.device.type == 'cuda': if t.device.index == -1: devices.append('cuda:{}'.format(torch.cuda.current_device())) elif t.device.index == torch.cuda.current_device(): devices.append('cuda') for device in devices: self.assertIs(t, t.to(device, non_blocking=non_blocking)) self.assertIs(t, t.to(device, t.dtype, non_blocking=non_blocking)) self.assertIsNot(t, t.to(device, non_blocking=non_blocking, copy=True)) self.assertIsNot(t, t.to(device, t.dtype, non_blocking=non_blocking, copy=True)) a = torch.tensor(5) if layout == torch.sparse_csr: a = torch.tensor([[0, 1, 2], [2, 0, 3]]).to_sparse_csr() test_copy_behavior(a) self.assertEqual(a.device, a.to('cpu').device) self.assertEqual(a.device, a.to('cpu', dtype=torch.float32).device) self.assertIs(torch.float32, a.to('cpu', dtype=torch.float32).dtype) self.assertEqual(a.device, a.to(torch.float32).device) self.assertIs(torch.float32, a.to(dtype=torch.float32).dtype) def test_data_ptr(getter): self.assertEqual(getter(a), getter(a.to('cpu'))) self.assertEqual(getter(a), getter(a.to(dtype=a.dtype, device=a.device, copy=False))) self.assertEqual(getter(a), getter(a.to('cpu', copy=False))) self.assertNotEqual(getter(a), getter(a.to('cpu', copy=True))) if layout == torch.sparse_csr: # TODO: compressed sparse tensors currently don't support data_ptr. # Exercising failure will allow us to widen coverage of this test once it does. with self.assertRaisesRegex(RuntimeError, "Cannot access data pointer of Tensor that doesn't have storage"): a.data_ptr() # While compressed sparse tensors don't have a concept of data_ptr # the underlying tensors do. The implementation of to appropriately forwards # the call to the components, which is what we're test here. test_data_ptr(lambda a: a.values().data_ptr()) test_data_ptr(lambda a: a.crow_indices().data_ptr()) test_data_ptr(lambda a: a.col_indices().data_ptr()) else: test_data_ptr(lambda a: a.data_ptr()) if torch.cuda.is_available(): for non_blocking in [True, False]: for cuda in ['cuda', 'cuda:0' if torch.cuda.device_count() == 1 else 'cuda:1']: b = torch.tensor(5., device=cuda) test_copy_behavior(b, non_blocking) self.assertEqual(b.device, b.to(cuda, non_blocking=non_blocking).device) self.assertEqual(a.device, b.to('cpu', non_blocking=non_blocking).device) self.assertEqual(b.device, a.to(cuda, non_blocking=non_blocking).device) self.assertIs(torch.int32, b.to('cpu', dtype=torch.int32, non_blocking=non_blocking).dtype) self.assertEqual(a.device, b.to('cpu', dtype=torch.int32, non_blocking=non_blocking).device) self.assertIs(torch.int32, b.to(dtype=torch.int32).dtype) self.assertEqual(b.device, b.to(dtype=torch.int32).device) def test_to(self): self._test_to_with_layout(torch.strided) is_cuda10_2_or_higher = ( (torch.version.cuda is not None) and ([int(x) for x in torch.version.cuda.split(".")] >= [10, 2])) if is_cuda10_2_or_higher: # in cuda10_1 sparse_csr is beta self._test_to_with_layout(torch.sparse_csr) # FIXME: describe this test def test_as_subclass(self): class SubTensor(torch.Tensor): member_var = object() t0 = torch.tensor(0) t1 = torch.tensor([1, 2]) t2 = torch.tensor([[3, 4], [5, 6]]) s0 = t0.as_subclass(SubTensor) s1 = t1.as_subclass(SubTensor) s2 = t2.as_subclass(SubTensor) # Check that the correct type is returned. self.assertTrue(type(s0) is SubTensor) self.assertTrue(type(s1) is SubTensor) self.assertTrue(type(s2) is SubTensor) # Check that the data is equal. self.assertEqual(t0, s0) self.assertEqual(t1, s1) self.assertEqual(t2, s2) t0[()] = 1 t1[1] = 3 t2[1, 1] = 7 # Check that the data is equal even after modification. self.assertEqual(t0, s0) self.assertEqual(t1, s1) self.assertEqual(t2, s2) # Check that member variables are passed through. self.assertTrue(s0.member_var is SubTensor.member_var) self.assertTrue(s1.member_var is SubTensor.member_var) self.assertTrue(s2.member_var is SubTensor.member_var) # Test that autograd is propagated. t = torch.tensor(5, dtype=torch.float32, requires_grad=True) # Run a calculation on the tensor. exp_t = torch.exp(t) # Cast exp_t to a subclass. exp_s = exp_t.as_subclass(SubTensor) # Make sure that t.grad was initially None self.assertTrue(t.grad is None) # Run the autograd calculation. exp_s.backward() # Make sure autograd was propagated to the original tensor # declared with requires_grad. self.assertTrue(t.grad is not None) # Make sure invalid subclasses raise nice errors class BadSubTensor(): member_var = object() err_msg = "Creating a Tensor subclass from a class that does not inherit from Tensor" with self.assertRaisesRegex(RuntimeError, err_msg): s0 = t0.as_subclass(BadSubTensor) # FIXME: Port to a test suite that better fits slicing def test_slice(self): empty = torch.empty(0, 4) x = torch.arange(0., 16).view(4, 4) self.assertEqual(x[:], x) self.assertEqual(x[:4], x) # start and stop are clamped to the size of dim self.assertEqual(x[:5], x) # if start >= stop then the result is empty self.assertEqual(x[2:1], empty) self.assertEqual(x[2:2], empty) # out of bounds is also empty self.assertEqual(x[10:12], empty) # additional correctness checks self.assertEqual(x[:1].tolist(), [[0, 1, 2, 3]]) self.assertEqual(x[:-3].tolist(), [[0, 1, 2, 3]]) self.assertEqual(x[:, -2:3].tolist(), [[2], [6], [10], [14]]) self.assertEqual(x[0:-1:2].tolist(), [[0, 1, 2, 3], [8, 9, 10, 11]]) def test_type(self): x = torch.randn(3, 3).double() self.assertEqual(x.type('torch.FloatTensor').dtype, torch.float32) self.assertEqual(x.type(torch.FloatTensor).dtype, torch.float32) self.assertEqual(x.int().type(torch.Tensor).dtype, torch.get_default_dtype()) self.assertEqual(x.type(torch.int32).dtype, torch.int32) # FIXME: port to a quantization test suite def test_qengine(self): qengines = torch.backends.quantized.supported_engines original_qe = torch.backends.quantized.engine for qe in qengines: torch.backends.quantized.engine = qe assert torch.backends.quantized.engine == qe, 'qengine not set successfully' torch.backends.quantized.engine = original_qe # FIXME: port to a distributed test suite -- also... how could this be OOMing on Windows CUDA? @slowTest @unittest.skipIf(NO_MULTIPROCESSING_SPAWN, "Disabled for environments that \ don't support multiprocessing with spawn start method") @unittest.skipIf(IS_WINDOWS, 'FIXME: CUDA OOM error on Windows') def test_multinomial_invalid_probs(self): def _spawn_method(self, method, arg): try: mp.set_start_method('spawn') except RuntimeError: pass with mp.Pool(1) as pool: out: list = pool.map(method, [arg]) self.assertTrue(out[0]) def _test_multinomial_invalid_probs(probs): try: # n_sample = 1 is a special case, test n_sample=2 which is more general torch.multinomial(probs.to('cpu'), 2) return False # Should not be reached except RuntimeError as e: return 'probability tensor contains either `inf`, `nan` or element < 0' in str(e) _spawn_method(_test_multinomial_invalid_probs, torch.tensor([1., -1., 1.])) _spawn_method(_test_multinomial_invalid_probs, torch.tensor([1., inf, 1.])) _spawn_method(_test_multinomial_invalid_probs, torch.tensor([1., -inf, 1.])) _spawn_method(_test_multinomial_invalid_probs, torch.tensor([1., 1., nan])) # FIXME: port to more appropriate test suite def test_to_with_tensor(self): a = torch.tensor(5) self.assertEqual(a.device, a.to(a).device) if torch.cuda.is_available(): for non_blocking in [True, False]: for cuda in ['cuda', 'cuda:0' if torch.cuda.device_count() == 1 else 'cuda:1']: b = torch.tensor(5., device=cuda) self.assertEqual(b.device, b.to(b, non_blocking=non_blocking).device) self.assertEqual(a.device, b.to(a, non_blocking=non_blocking).device) self.assertEqual(b.device, a.to(b, non_blocking=non_blocking).device) def test_device(self): cpu = torch.device('cpu') self.assertEqual('cpu', str(cpu)) self.assertEqual('cpu', cpu.type) self.assertEqual(None, cpu.index) cpu0 = torch.device('cpu:0') self.assertEqual('cpu:0', str(cpu0)) self.assertEqual('cpu', cpu0.type) self.assertEqual(0, cpu0.index) cpu0 = torch.device('cpu', 0) self.assertEqual('cpu:0', str(cpu0)) self.assertEqual('cpu', cpu0.type) self.assertEqual(0, cpu0.index) cuda = torch.device('cuda') self.assertEqual('cuda', str(cuda)) self.assertEqual('cuda', cuda.type) self.assertEqual(None, cuda.index) cuda1 = torch.device('cuda:1') self.assertEqual('cuda:1', str(cuda1)) self.assertEqual('cuda', cuda1.type) self.assertEqual(1, cuda1.index) cuda1 = torch.device('cuda', 1) self.assertEqual('cuda:1', str(cuda1)) self.assertEqual('cuda', cuda1.type) self.assertEqual(1, cuda1.index) cuda90 = torch.device('cuda', 90) self.assertEqual('cuda:90', str(cuda90)) self.assertEqual('cuda', cuda90.type) self.assertEqual(90, cuda90.index) self.assertRaises(RuntimeError, lambda: torch.device('cpu:-1')) self.assertRaises(RuntimeError, lambda: torch.device('cuda:-1')) self.assertRaises(RuntimeError, lambda: torch.device('cuda:2 ')) self.assertRaises(RuntimeError, lambda: torch.device('cuda: 2')) self.assertRaises(RuntimeError, lambda: torch.device('cuda:2 2')) self.assertRaises(RuntimeError, lambda: torch.device('cuda:2.')) self.assertRaises(RuntimeError, lambda: torch.device('cuda:2?')) self.assertRaises(RuntimeError, lambda: torch.device('cuda:?2')) self.assertRaises(RuntimeError, lambda: torch.device('cuda:')) self.assertRaises(RuntimeError, lambda: torch.device('cuda:2.232')) self.assertRaises(RuntimeError, lambda: torch.device('cuda:2 cuda:3')) self.assertRaises(RuntimeError, lambda: torch.device('cuda:2+cuda:3')) self.assertRaises(RuntimeError, lambda: torch.device('cuda:2cuda:3')) self.assertRaises(RuntimeError, lambda: torch.device(-1)) self.assertRaises(RuntimeError, lambda: torch.device('other')) self.assertRaises(RuntimeError, lambda: torch.device('other:0')) device_set = {'cpu', 'cpu:0', 'cuda', 'cuda:0', 'cuda:1', 'cuda:10', 'cuda:100'} device_hash_set = set() for device in list(device_set): device_hash_set.add(hash(torch.device(device))) self.assertEqual(len(device_set), len(device_hash_set)) def get_expected_device_repr(device): if device.index is not None: return "device(type='{type}', index={index})".format( type=device.type, index=device.index) return "device(type='{type}')".format(type=device.type) for device in device_set: dev = torch.device(device) self.assertEqual(repr(dev), get_expected_device_repr(dev)) # Tests that the use_deterministic_flag can be set as expected @wrapDeterministicFlagAPITest def test_deterministic_flag(self): for deterministic, warn_only in product([True, False], [True, False]): torch.use_deterministic_algorithms(deterministic, warn_only=warn_only) self.assertEqual(deterministic, torch.are_deterministic_algorithms_enabled()) self.assertEqual(warn_only, torch.is_deterministic_algorithms_warn_only_enabled()) if deterministic: if warn_only: debug_mode = 1 else: debug_mode = 2 else: debug_mode = 0 self.assertEqual(debug_mode, torch.get_deterministic_debug_mode()) for debug_mode in [0, 1, 2]: torch.set_deterministic_debug_mode(debug_mode) self.assertEqual(debug_mode, torch.get_deterministic_debug_mode()) deterministic = debug_mode in [1, 2] warn_only = debug_mode == 1 self.assertEqual(deterministic, torch.are_deterministic_algorithms_enabled()) self.assertEqual(warn_only, torch.is_deterministic_algorithms_warn_only_enabled()) for debug_mode, debug_mode_str in [(0, 'default'), (1, 'warn'), (2, 'error')]: torch.set_deterministic_debug_mode(debug_mode_str) self.assertEqual(debug_mode, torch.get_deterministic_debug_mode()) with self.assertRaisesRegex( TypeError, r"_set_deterministic_algorithms\(\): argument 'mode' \(position 1\) must be bool, not int"): torch.use_deterministic_algorithms(1) with self.assertRaisesRegex( TypeError, r"_set_deterministic_algorithms\(\): argument 'warn_only' must be bool, not int"): torch.use_deterministic_algorithms(False, warn_only=1) def test_type_conversion_via_dtype_name(self): x = torch.tensor([1]) self.assertEqual(x.byte().dtype, torch.uint8) self.assertEqual(x.bool().dtype, torch.bool) self.assertEqual(x.char().dtype, torch.int8) self.assertEqual(x.double().dtype, torch.float64) self.assertEqual(x.float().dtype, torch.float32) self.assertEqual(x.half().dtype, torch.float16) self.assertEqual(x.int().dtype, torch.int32) self.assertEqual(x.bfloat16().dtype, torch.bfloat16) cfloat = x.cfloat() self.assertEqual(cfloat.dtype, torch.complex64) self.assertEqual(cfloat.real, x.float()) self.assertEqual(cfloat.imag, torch.zeros_like(cfloat.imag)) cdouble = x.cdouble() self.assertEqual(cdouble.dtype, torch.complex128) self.assertEqual(cdouble.real, x.double()) self.assertEqual(cdouble.imag, torch.zeros_like(cdouble.imag)) chalf = x.chalf() self.assertEqual(chalf.dtype, torch.complex32) self.assertEqual(chalf.real, x.half()) self.assertEqual(chalf.imag, torch.zeros_like(chalf.imag)) def test_type_alias(self): type_alias_map = {torch.float64: torch.double, torch.float32: torch.float, torch.int32: torch.int, torch.int64: torch.long, torch.int16: torch.short, torch.float16: torch.half, torch.complex32: torch.chalf, torch.complex64: torch.cfloat} for dtype, alias in type_alias_map.items(): self.assertIs(alias, dtype) # FIXME: Describe this test def test_doc_template(self) -> None: from torch._torch_docs import __file__ as doc_file from torch._torch_docs import multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args with open(doc_file, "r", encoding="utf-8") as f: doc_strs = f.read() for doc_str in re.findall(r'add_docstr\((.*?),.*?("""|\'\'\')(.*?)("""|\'\'\')\)', doc_strs, re.MULTILINE | re.DOTALL): for common_args in [multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args]: for k, v in common_args.items(): self.assertNotIn(v, doc_str[2], 'The argument description "{}" in {} can be ' 'replaced by {{{}}}'.format(v, doc_str[0], k)) def test_doc(self): checked_types = (types.MethodType, types.FunctionType, types.BuiltinFunctionType, types.BuiltinMethodType) def _test_namespace(ns, *skips): if isinstance(ns, object): ns_name = ns.__class__.__name__ else: ns_name = ns.__name__ skip_regexes = [] for r in skips: if isinstance(r, string_classes): skip_regexes.append(re.compile('^{}$'.format(re.escape(r)))) else: skip_regexes.append(r) for name in dir(ns): if name.startswith('_'): continue if name in ['real', 'imag']: y = torch.randn(1, dtype=torch.cfloat) var = getattr(y, name) elif name in ["H", "mT", "mH"]: y = torch.randn(1, 1) var = getattr(y, name) else: var = getattr(ns, name) if not isinstance(var, checked_types): continue doc = var.__doc__ has_doc = doc is not None and len(doc.strip()) > 0 full_name = ns_name + '.' + name if any(r.match(name) for r in skip_regexes): self.assertFalse(has_doc, 'New docs have been added for {}, please remove ' 'it from the skipped list in TestTorch.test_doc'.format(full_name)) else: self.assertTrue(has_doc, '{} is missing documentation'.format(full_name)) # FIXME: All of the following should be marked as expected failures # so that it is easier to tell when missing has been added. # FIXME: fix all the skipped ones below! test_namespace(torch.randn(1), 'as_strided_', re.compile('^clamp_(min|max)_?$'), 'is_distributed', 'is_nonzero', 'is_same_size', 'log_softmax', 'map2_', 'new', 'reinforce', 'relu', 'relu_', 'prelu', 'resize', 'resize_as', 'softmax', 'split_with_sizes', 'unsafe_split_with_sizes', '_autocast_to_fp16', '_autocast_to_fp32', ) test_namespace(torch.nn) test_namespace(torch.nn.functional, 'assert_int_or_pair') # TODO: add torch.* tests when we have proper namespacing on ATen functions # test_namespace(torch) # FIXME: deprecate torch.Tensor constructor def test_tensor_ctor_scalar(self): x = torch.Tensor(torch.tensor(1.0)) self.assertEqual(x, torch.tensor(1.0)) def test_deepcopy_gradient(self): from copy import deepcopy a = torch.zeros(10) a.grad = torch.ones(10) self.assertEqual(a.grad, deepcopy(a).grad) s = torch.zeros(10).to_sparse() s.grad = torch.ones(10).to_sparse() self.assertEqual(s.grad, deepcopy(s).grad) # ensure sharing is not broken c = deepcopy([a, a.grad]) self.assertTrue(c[0].grad is c[1]) def test_tensor_base_init(self): # Direct construction not OK self.assertRaises(RuntimeError, lambda: torch._C._TensorBase()) # But construction of subclass is OK class T(torch._C._TensorBase): pass T() def test_tensor_base_new(self): # OK to call super().__new__, see # https://github.com/pytorch/pytorch/issues/57421 class TestTensor(torch._C._TensorBase): @staticmethod def __new__(cls, x, *args, **kwargs): return super().__new__(cls, x, *args, **kwargs) x = torch.ones(5) test_tensor = TestTensor(x) def test_pyobj_preserved(self): x = torch.empty(2) x.foo = 2 # put something on __dict__ y = torch.empty(2) y.grad = x del x # x is dead in Python self.assertEqual(y.grad.foo, 2) z = y.grad # it's live del z # it's dead again self.assertEqual(y.grad.foo, 2) def test_subclass_preserved(self): class MyTensor(torch.Tensor): pass x = MyTensor(torch.empty(2)) y = torch.empty(2) y.grad = x del x # x is dead in Python self.assertEqual(type(y.grad), MyTensor) z = y.grad # it's live del z # it's dead again self.assertEqual(type(y.grad), MyTensor) def test_tensor_slot_dealloc(self): class SlotTensor1(torch._C._TensorBase): __slots__ = ['slot1'] class SlotTensor2(SlotTensor1): __slots__ = ['slot2'] m1, t1 = Tracker.make() m2, t2 = Tracker.make() slot_tensor = SlotTensor2(torch.empty(2)) slot_tensor.slot1 = t1 slot_tensor.slot2 = t2 del t1 del t2 self.assertFalse(m1[0]) self.assertFalse(m2[0]) del slot_tensor self.assertTrue(m1[0]) self.assertTrue(m2[0]) def test_tensor_dict_dealloc(self): m, t = Tracker.make() x = torch.empty(2) x.arf = t del t self.assertFalse(m[0]) del x self.assertTrue(m[0]) def test_tensor_finalizer_dealloc(self): m = [False] class FinalizerTensor(torch._C._TensorBase): def __del__(self): m[0] = True fin_tensor = FinalizerTensor(torch.empty(2)) self.assertFalse(m[0]) del fin_tensor self.assertTrue(m[0]) def test_tensor_weakref_dealloc(self): x = torch.empty(2) m = [False] def cb(r): m[0] = True wref = weakref.ref(x, cb) del x self.assertTrue(m[0]) self.assertEqual(wref(), None) def test_tensor_cycle_via_dict(self): m1, t1 = Tracker.make() x = torch.empty(2) x._tracker = t1 del t1 m2, t2 = Tracker.make() y = torch.empty(2) y._tracker = t2 del t2 x._loop = y y._loop = x # C++ reference should keep the cycle live! # This exercise THPVariable_subtype_traverse # NB: Because z.grad is a reference done entirely in C++, cycles # involving it directly are NOT broken by Python GC; you've # set up a good old C++ reference cycle which we cannot safely # break (because C++ references are allowed to be accessed # multithreaded-ly) (TODO: except maybe if you can prove that # only Python has access to the C++ object, in which case you can # also prove that no multithreaded access occurs) z = torch.empty(2) z.grad = x del x del y gc.collect() self.assertFalse(m1[0]) self.assertFalse(m2[0]) with disable_gc(): del z self.assertFalse(m1[0]) self.assertFalse(m2[0]) gc.collect() self.assertTrue(m1[0]) self.assertTrue(m2[0]) def test_tensor_cycle_via_slots(self): m1 = [False] m2 = [False] class SlotTensor1(torch._C._TensorBase): __slots__ = ['slot1'] def __del__(self): m1[0] = True class SlotTensor2(SlotTensor1): __slots__ = ['slot2'] def __del__(self): m2[0] = True x = SlotTensor1(torch.empty(2)) y = SlotTensor2(torch.empty(2)) x.slot1 = y y.slot2 = x del x with disable_gc(): del y self.assertFalse(m1[0]) self.assertFalse(m2[0]) gc.collect() self.assertTrue(m1[0]) self.assertTrue(m2[0]) # FIXME: move to test_autograd? def test_backward_hooks_traverse(self): m1, t1 = Tracker.make() m2, t2 = Tracker.make() x = torch.empty(2, requires_grad=True) x._tracker = t1 y = torch.empty(2, requires_grad=True) y._tracker = t2 del t1 del t2 # this hits a special setter, it's not just a __dict__ entry x._backward_hooks = y y._backward_hooks = x del x with disable_gc(): del y self.assertFalse(m1[0]) self.assertFalse(m2[0]) gc.collect() self.assertTrue(m1[0]) self.assertTrue(m2[0]) def test_dead_weak_ref(self): x = torch.empty(2) w_x = weakref.ref(x) y = torch.empty(2) y.grad = x del x x = w_x() # Ideally, x would keep the tensor live. But CPython doesn't # provide enough hooks to do this. So it will go dead and x # will transmute into an undefined tensor. Not great, but the # best we can do. del y self.assertRaises(RuntimeError, lambda: x.sigmoid()) def test_resurrected_weak_ref(self): x = torch.empty(2) w_x = weakref.ref(x) y = torch.empty(2) y.grad = x del x x = w_x() # Use this to manually fix weak references after dereferencing them x._fix_weakref() del y x.sigmoid() # FIXME: move to test_linalg @torch.inference_mode() def test_bmm_multithreaded(self): device = 'cpu' num_threads = torch.get_num_threads() torch.set_num_threads(4) batch_sizes = [1, 10] M, N, O = 23, 8, 12 dtype = torch.float32 numpy_dtype = dtype def invert_perm(p): d = {x: i for i, x in enumerate(p)} return (d[0], d[1], d[2]) def generate_inputs(num_batches): # transposed tensors for perm1, perm2 in itertools.product(itertools.permutations((0, 1, 2)), repeat=2): b1 = make_tensor((num_batches, M, N), dtype=dtype, device=device, low=-1, high=1) b2 = make_tensor((num_batches, N, O), dtype=dtype, device=device, low=-1, high=1) b1 = b1.permute(perm1).contiguous().permute(invert_perm(perm1)) b2 = b2.permute(perm2).contiguous().permute(invert_perm(perm2)) yield b1, b2 # broadcasting tensors for b1, b2, b3, b4, b5, b6 in itertools.product((True, False), repeat=6): shape1 = (num_batches if b1 else 1, M if b2 else 1, N if b3 else 1) shape2 = (num_batches if b4 else 1, N if b5 else 1, O if b6 else 1) b1 = make_tensor(shape1, dtype=dtype, device=device, low=-1, high=1).expand(num_batches, M, N) b2 = make_tensor(shape2, dtype=dtype, device=device, low=-1, high=1).expand(num_batches, N, O) yield b1, b2 # zero-sized tensors for z1, z2, z3, z4 in itertools.product((True, False), repeat=4): shape1 = (num_batches if z1 else 0, M if z2 else 0, N if z3 else 0) shape2 = (num_batches if z1 else 0, N if z3 else 0, O if z4 else 0) b1 = torch.randn(shape1, dtype=dtype, device=device) b2 = torch.randn(shape2, dtype=dtype, device=device) yield b1, b2 try: for num_batches in batch_sizes: for (b1, b2), perm3 in itertools.product(generate_inputs(num_batches), itertools.permutations((0, 1, 2))): res1 = torch.bmm(b1, b2) res2 = torch.full((num_batches, M, O), math.nan, dtype=dtype, device=device) \ .permute(perm3).contiguous().permute(invert_perm(perm3)) torch.bmm(b1, b2, out=res2) expect = torch.from_numpy( b1.to(numpy_dtype).cpu().numpy() @ b2.to(numpy_dtype).cpu().numpy()).to(device=device, dtype=dtype) self.assertEqual(expect, res1) self.assertEqual(expect, res2) finally: torch.set_num_threads(num_threads) def test_conj_neg_tolist(self): x = torch.randn(2, dtype=torch.cfloat) y1 = x.conj() y1_expect = x.conj_physical() y2 = y1.imag self.assertEqual(y1, y1_expect.tolist()) self.assertEqual(y2, y1_expect.imag.tolist()) # The following block extends TestTorch with negative dim wrapping tests # FIXME: replace these with OpInfo sample inputs or systemic OpInfo tests # Functions to test negative dimension wrapping METHOD = 1 INPLACE_METHOD = 2 FUNCTIONAL = 4 DIM_ARG = None def make_neg_dim_test(name, tensor_arg, arg_constr, types, extra_dim=0): def neg_dim_test(self): if isinstance(tensor_arg, list): assert METHOD not in types and INPLACE_METHOD not in types x = [torch.randn(arg) for arg in tensor_arg] ndim = len(tensor_arg[-1]) else: x = torch.randn(*tensor_arg) ndim = len(tensor_arg) ndim += extra_dim n_dim_to_test = sum(e is DIM_ARG for e in arg_constr()) for dims_val in combinations(range(ndim), n_dim_to_test): arg = arg_constr() arg_neg = copy.deepcopy(arg) idx = 0 for i, v in enumerate(arg): if v is DIM_ARG: arg[i] = dims_val[idx] arg_neg[i] = dims_val[idx] - ndim idx += 1 if METHOD in types: a = getattr(x, name)(*arg) b = getattr(x, name)(*arg_neg) self.assertEqual(a, b) if INPLACE_METHOD in types: a = x.clone() getattr(a, name + '_')(*arg) b = x.clone() getattr(b, name + '_')(*arg_neg) self.assertEqual(a, b) if FUNCTIONAL in types: a = getattr(torch, name)(x, *arg) b = getattr(torch, name)(x, *arg_neg) self.assertEqual(a, b) return neg_dim_test def idx_tensor(size, max_val): return torch.LongTensor(*size).random_(0, max_val - 1) def add_neg_dim_tests(): neg_dim_tests = [ ('narrow', (10, 20, 30), lambda: [DIM_ARG, 0, 5], [METHOD]), ('transpose', (10, 20, 30), lambda: [DIM_ARG, DIM_ARG], [METHOD, INPLACE_METHOD, FUNCTIONAL]), ('size', (10, 20, 30), lambda: [DIM_ARG], [METHOD]), ('cat', [(2, 3, 4), (2, 3, 4)], lambda: [DIM_ARG], [FUNCTIONAL]), ('chunk', (10, 20, 30), lambda: [5, DIM_ARG], [METHOD, FUNCTIONAL]), ('gather', (10, 20), lambda: [DIM_ARG, idx_tensor((10, 20), 10)], [METHOD, FUNCTIONAL]), ('index_select', (10, 10), lambda: [DIM_ARG, idx_tensor((10,), 10)], [METHOD, FUNCTIONAL]), ('split', (10, 20), lambda: [5, DIM_ARG], [METHOD, FUNCTIONAL]), ('squeeze', (10, 1, 20, 1), lambda: [DIM_ARG], [METHOD, INPLACE_METHOD, FUNCTIONAL]), ('unbind', (2, 3, 4), lambda: [DIM_ARG], [FUNCTIONAL]), ('unsqueeze', (10, 20), lambda: [DIM_ARG], [METHOD, INPLACE_METHOD, FUNCTIONAL], 1), ('logcumsumexp', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]), ('cumprod', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]), ('cumsum', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]), ('cummax', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]), ('cummin', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]), ('mean', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]), ('median', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]), ('nanmedian', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]), ('mode', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]), ('norm', (10, 20), lambda: [2, DIM_ARG], [METHOD, FUNCTIONAL]), ('prod', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]), ('std', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]), ('sum', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]), ('var', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]), ('kthvalue', (10, 20), lambda: [3, DIM_ARG], [METHOD, FUNCTIONAL]), ('max', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]), ('min', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]), ('sort', (10, 20), lambda: [DIM_ARG], [METHOD, FUNCTIONAL]), ('topk', (10, 20), lambda: [5, DIM_ARG], [METHOD, FUNCTIONAL]), ('renorm', (10, 20), lambda: [2, DIM_ARG, 1], [METHOD, INPLACE_METHOD, FUNCTIONAL]), ('index_add', (10, 10), lambda: [DIM_ARG, idx_tensor((10,), 10), torch.randn(10, 10)], [INPLACE_METHOD]), ('index_copy', (10, 10), lambda: [DIM_ARG, idx_tensor((10,), 10), torch.randn(10, 10)], [INPLACE_METHOD]), ('index_fill', (10, 10), lambda: [DIM_ARG, idx_tensor((10,), 10), 12], [INPLACE_METHOD]), ('scatter', (10, 10), lambda: [DIM_ARG, idx_tensor((10, 10), 10), torch.randn(10, 10)], [INPLACE_METHOD]), ('select', (10, 20), lambda: [DIM_ARG, 3], [METHOD]), ('unfold', (10, 20), lambda: [DIM_ARG, 5, 2], [METHOD]), ] for decl in neg_dim_tests: if len(decl) == 4: name, tensor_arg, arg_constr, types = decl extra_dim = 0 elif len(decl) == 5: name, tensor_arg, arg_constr, types, extra_dim = decl test_name = 'test_' + name + '_neg_dim' assert not hasattr(TestTorch, test_name), "Duplicated test name: " + test_name setattr(TestTorch, test_name, make_neg_dim_test(name, tensor_arg, arg_constr, types, extra_dim)) # TODO: these empy classes are temporarily instantiated for XLA compatibility # once XLA updates their test suite it should be removed class TestViewOps(TestCase): pass class TestTensorDeviceOps(TestCase): pass # Generates tests # Note: test generation must be done at file scope, not within main, or # pytest will fail. add_neg_dim_tests() instantiate_device_type_tests(TestViewOps, globals()) instantiate_device_type_tests(TestVitalSignsCuda, globals()) instantiate_device_type_tests(TestTensorDeviceOps, globals()) instantiate_device_type_tests(TestTorchDeviceType, globals()) instantiate_device_type_tests(TestDevicePrecision, globals(), except_for='cpu') if __name__ == '__main__': run_tests()
[]
[]
[ "TORCH_VITAL" ]
[]
["TORCH_VITAL"]
python
1
0
vips.go
package bimg /* #cgo pkg-config: vips #include "vips.h" */ import "C" import ( "errors" "fmt" "math" "os" "runtime" "strings" "sync" "unsafe" ) // VipsVersion exposes the current libvips semantic version const VipsVersion = string(C.VIPS_VERSION) // VipsMajorVersion exposes the current libvips major version number const VipsMajorVersion = int(C.VIPS_MAJOR_VERSION) // VipsMinorVersion exposes the current libvips minor version number const VipsMinorVersion = int(C.VIPS_MINOR_VERSION) const ( maxCacheMem = 100 * 1024 * 1024 maxCacheSize = 500 ) var ( m sync.Mutex initialized bool ) // VipsMemoryInfo represents the memory stats provided by libvips. type VipsMemoryInfo struct { Memory int64 MemoryHighwater int64 Allocations int64 } // vipsSaveOptions represents the internal option used to talk with libvips. type vipsSaveOptions struct { Speed int Quality int Compression int Type ImageType Interlace bool NoProfile bool StripMetadata bool Lossless bool InputICC string // Absolute path to the input ICC profile OutputICC string // Absolute path to the output ICC profile Interpretation Interpretation Palette bool } type vipsWatermarkOptions struct { Width C.int DPI C.int Margin C.int NoReplicate C.int Opacity C.float Background [3]C.double } type vipsWatermarkImageOptions struct { Left C.int Top C.int Opacity C.float } type vipsWatermarkTextOptions struct { Text *C.char Font *C.char } func init() { Initialize() } // Initialize is used to explicitly start libvips in thread-safe way. // Only call this function if you have previously turned off libvips. func Initialize() { if C.VIPS_MAJOR_VERSION <= 7 && C.VIPS_MINOR_VERSION < 40 { panic("unsupported libvips version!") } m.Lock() runtime.LockOSThread() defer m.Unlock() defer runtime.UnlockOSThread() err := C.vips_init(C.CString("bimg")) if err != 0 { panic("unable to start vips!") } // Set libvips cache params C.vips_cache_set_max_mem(maxCacheMem) C.vips_cache_set_max(maxCacheSize) // Define a custom thread concurrency limit in libvips (this may generate thread-unsafe issues) // See: https://github.com/jcupitt/libvips/issues/261#issuecomment-92850414 if os.Getenv("VIPS_CONCURRENCY") == "" { C.vips_concurrency_set(1) } // Enable libvips cache tracing if os.Getenv("VIPS_TRACE") != "" { C.vips_enable_cache_set_trace() } initialized = true } // Shutdown is used to shutdown libvips in a thread-safe way. // You can call this to drop caches as well. // If libvips was already initialized, the function is no-op func Shutdown() { m.Lock() defer m.Unlock() if initialized { C.vips_shutdown() initialized = false } } // VipsCacheSetMaxMem Sets the maximum amount of tracked memory allowed before the vips operation cache // begins to drop entries. func VipsCacheSetMaxMem(maxCacheMem int) { C.vips_cache_set_max_mem(C.size_t(maxCacheMem)) } // VipsCacheSetMax sets the maximum number of operations to keep in the vips operation cache. func VipsCacheSetMax(maxCacheSize int) { C.vips_cache_set_max(C.int(maxCacheSize)) } // VipsCacheDropAll drops the vips operation cache, freeing the allocated memory. func VipsCacheDropAll() { C.vips_cache_drop_all() } // VipsDebugInfo outputs to stdout libvips collected data. Useful for debugging. func VipsDebugInfo() { C.im__print_all() } // VipsMemory gets memory info stats from libvips (cache size, memory allocs...) func VipsMemory() VipsMemoryInfo { return VipsMemoryInfo{ Memory: int64(C.vips_tracked_get_mem()), MemoryHighwater: int64(C.vips_tracked_get_mem_highwater()), Allocations: int64(C.vips_tracked_get_allocs()), } } // VipsIsTypeSupported returns true if the given image type // is supported by the current libvips compilation. func VipsIsTypeSupported(t ImageType) bool { if t == JPEG { return int(C.vips_type_find_bridge(C.JPEG)) != 0 } if t == WEBP { return int(C.vips_type_find_bridge(C.WEBP)) != 0 } if t == PNG { return int(C.vips_type_find_bridge(C.PNG)) != 0 } if t == GIF { return int(C.vips_type_find_bridge(C.GIF)) != 0 } if t == PDF { return int(C.vips_type_find_bridge(C.PDF)) != 0 } if t == SVG { return int(C.vips_type_find_bridge(C.SVG)) != 0 } if t == TIFF { return int(C.vips_type_find_bridge(C.TIFF)) != 0 } if t == MAGICK { return int(C.vips_type_find_bridge(C.MAGICK)) != 0 } if t == HEIF { return int(C.vips_type_find_bridge(C.HEIF)) != 0 } if t == AVIF { return int(C.vips_type_find_bridge(C.HEIF)) != 0 } return false } // VipsIsTypeSupportedSave returns true if the given image type // is supported by the current libvips compilation for the // save operation. func VipsIsTypeSupportedSave(t ImageType) bool { if t == JPEG { return int(C.vips_type_find_save_bridge(C.JPEG)) != 0 } if t == WEBP { return int(C.vips_type_find_save_bridge(C.WEBP)) != 0 } if t == PNG { return int(C.vips_type_find_save_bridge(C.PNG)) != 0 } if t == GIF { return int(C.vips_type_find_save_bridge(C.GIF)) != 0 } if t == TIFF { return int(C.vips_type_find_save_bridge(C.TIFF)) != 0 } if t == HEIF { return int(C.vips_type_find_save_bridge(C.HEIF)) != 0 } if t == AVIF { return int(C.vips_type_find_save_bridge(C.HEIF)) != 0 } return false } func vipsExifStringTag(image *C.VipsImage, tag string) string { return vipsExifShort(C.GoString(C.vips_exif_tag(image, C.CString(tag)))) } func vipsExifIntTag(image *C.VipsImage, tag string) int { return int(C.vips_exif_tag_to_int(image, C.CString(tag))) } func vipsExifOrientation(image *C.VipsImage) int { return int(C.vips_exif_orientation(image)) } func vipsExifShort(s string) string { if strings.Contains(s, " (") { return s[:strings.Index(s, "(")-1] } return s } func vipsHasAlpha(image *C.VipsImage) bool { return int(C.has_alpha_channel(image)) > 0 } func vipsHasProfile(image *C.VipsImage) bool { return int(C.has_profile_embed(image)) > 0 } func vipsWindowSize(name string) float64 { cname := C.CString(name) defer C.free(unsafe.Pointer(cname)) return float64(C.interpolator_window_size(cname)) } func vipsSpace(image *C.VipsImage) string { return C.GoString(C.vips_enum_nick_bridge(image)) } func vipsRotate(image *C.VipsImage, angle Angle) (*C.VipsImage, error) { var out *C.VipsImage defer C.g_object_unref(C.gpointer(image)) err := C.vips_rotate_bridge(image, &out, C.int(angle)) if err != 0 { return nil, catchVipsError() } return out, nil } func vipsAutoRotate(image *C.VipsImage) (*C.VipsImage, error) { var out *C.VipsImage defer C.g_object_unref(C.gpointer(image)) err := C.vips_autorot_bridge(image, &out) if err != 0 { return nil, catchVipsError() } return out, nil } func vipsTransformICC(image *C.VipsImage, inputICC string, outputICC string) (*C.VipsImage, error) { var out *C.VipsImage defer C.g_object_unref(C.gpointer(image)) outputIccPath := C.CString(outputICC) defer C.free(unsafe.Pointer(outputIccPath)) inputIccPath := C.CString(inputICC) defer C.free(unsafe.Pointer(inputIccPath)) err := C.vips_icc_transform_with_default_bridge(image, &out, outputIccPath, inputIccPath) //err := C.vips_icc_transform_bridge2(image, &outImage, outputIccPath, inputIccPath) if int(err) != 0 { return nil, catchVipsError() } return out, nil } func vipsFlip(image *C.VipsImage, direction Direction) (*C.VipsImage, error) { var out *C.VipsImage defer C.g_object_unref(C.gpointer(image)) err := C.vips_flip_bridge(image, &out, C.int(direction)) if err != 0 { return nil, catchVipsError() } return out, nil } func vipsZoom(image *C.VipsImage, zoom int) (*C.VipsImage, error) { var out *C.VipsImage defer C.g_object_unref(C.gpointer(image)) err := C.vips_zoom_bridge(image, &out, C.int(zoom), C.int(zoom)) if err != 0 { return nil, catchVipsError() } return out, nil } func vipsWatermark(image *C.VipsImage, w Watermark) (*C.VipsImage, error) { var out *C.VipsImage // Defaults noReplicate := 0 if w.NoReplicate { noReplicate = 1 } text := C.CString(w.Text) font := C.CString(w.Font) background := [3]C.double{C.double(w.Background.R), C.double(w.Background.G), C.double(w.Background.B)} textOpts := vipsWatermarkTextOptions{text, font} opts := vipsWatermarkOptions{C.int(w.Width), C.int(w.DPI), C.int(w.Margin), C.int(noReplicate), C.float(w.Opacity), background} defer C.free(unsafe.Pointer(text)) defer C.free(unsafe.Pointer(font)) err := C.vips_watermark(image, &out, (*C.WatermarkTextOptions)(unsafe.Pointer(&textOpts)), (*C.WatermarkOptions)(unsafe.Pointer(&opts))) if err != 0 { return nil, catchVipsError() } return out, nil } func vipsRead(buf []byte) (*C.VipsImage, ImageType, error) { var image *C.VipsImage imageType := vipsImageType(buf) if imageType == UNKNOWN { return nil, UNKNOWN, errors.New("Unsupported image format") } length := C.size_t(len(buf)) imageBuf := unsafe.Pointer(&buf[0]) err := C.vips_init_image(imageBuf, length, C.int(imageType), &image) if err != 0 { return nil, UNKNOWN, catchVipsError() } return image, imageType, nil } func vipsColourspaceIsSupportedBuffer(buf []byte) (bool, error) { image, _, err := vipsRead(buf) if err != nil { return false, err } C.g_object_unref(C.gpointer(image)) return vipsColourspaceIsSupported(image), nil } func vipsColourspaceIsSupported(image *C.VipsImage) bool { return int(C.vips_colourspace_issupported_bridge(image)) == 1 } func vipsInterpretationBuffer(buf []byte) (Interpretation, error) { image, _, err := vipsRead(buf) if err != nil { return InterpretationError, err } interp := vipsInterpretation(image) C.g_object_unref(C.gpointer(image)) return interp, nil } func vipsInterpretation(image *C.VipsImage) Interpretation { return Interpretation(C.vips_image_guess_interpretation_bridge(image)) } func vipsFlattenBackground(image *C.VipsImage, background Color) (*C.VipsImage, error) { var outImage *C.VipsImage backgroundC := [3]C.double{ C.double(background.R), C.double(background.G), C.double(background.B), } if vipsHasAlpha(image) { err := C.vips_flatten_background_brigde(image, &outImage, backgroundC[0], backgroundC[1], backgroundC[2]) if int(err) != 0 { return nil, catchVipsError() } C.g_object_unref(C.gpointer(image)) image = outImage } return image, nil } func vipsPreSave(image *C.VipsImage, o *vipsSaveOptions) (*C.VipsImage, error) { var outImage *C.VipsImage // Remove ICC profile metadata if o.NoProfile { C.remove_profile(image) } // Use a default interpretation and cast it to C type if o.Interpretation == 0 { o.Interpretation = InterpretationSRGB } interpretation := C.VipsInterpretation(o.Interpretation) // Apply the proper colour space if vipsColourspaceIsSupported(image) { err := C.vips_colourspace_bridge(image, &outImage, interpretation) if int(err) != 0 { return nil, catchVipsError() } image = outImage } if o.OutputICC != "" && o.InputICC != "" { outputIccPath := C.CString(o.OutputICC) defer C.free(unsafe.Pointer(outputIccPath)) inputIccPath := C.CString(o.InputICC) defer C.free(unsafe.Pointer(inputIccPath)) err := C.vips_icc_transform_with_default_bridge(image, &outImage, outputIccPath, inputIccPath) if int(err) != 0 { return nil, catchVipsError() } C.g_object_unref(C.gpointer(image)) return outImage, nil } if o.OutputICC != "" && vipsHasProfile(image) { outputIccPath := C.CString(o.OutputICC) defer C.free(unsafe.Pointer(outputIccPath)) err := C.vips_icc_transform_bridge(image, &outImage, outputIccPath) if int(err) != 0 { return nil, catchVipsError() } C.g_object_unref(C.gpointer(image)) image = outImage } return image, nil } func vipsSave(image *C.VipsImage, o vipsSaveOptions) ([]byte, error) { defer C.g_object_unref(C.gpointer(image)) tmpImage, err := vipsPreSave(image, &o) if err != nil { return nil, err } // When an image has an unsupported color space, vipsPreSave // returns the pointer of the image passed to it unmodified. // When this occurs, we must take care to not dereference the // original image a second time; we may otherwise erroneously // free the object twice. if tmpImage != image { defer C.g_object_unref(C.gpointer(tmpImage)) } length := C.size_t(0) saveErr := C.int(0) interlace := C.int(boolToInt(o.Interlace)) quality := C.int(o.Quality) strip := C.int(boolToInt(o.StripMetadata)) lossless := C.int(boolToInt(o.Lossless)) palette := C.int(boolToInt(o.Palette)) speed := C.int(o.Speed) if o.Type != 0 && !IsTypeSupportedSave(o.Type) { return nil, fmt.Errorf("VIPS cannot save to %#v", ImageTypes[o.Type]) } var ptr unsafe.Pointer switch o.Type { case WEBP: saveErr = C.vips_webpsave_bridge(tmpImage, &ptr, &length, strip, quality, lossless) case PNG: saveErr = C.vips_pngsave_bridge(tmpImage, &ptr, &length, strip, C.int(o.Compression), quality, interlace, palette) case TIFF: saveErr = C.vips_tiffsave_bridge(tmpImage, &ptr, &length) case HEIF: saveErr = C.vips_heifsave_bridge(tmpImage, &ptr, &length, strip, quality, lossless) case AVIF: saveErr = C.vips_avifsave_bridge(tmpImage, &ptr, &length, strip, quality, lossless, speed) default: saveErr = C.vips_jpegsave_bridge(tmpImage, &ptr, &length, strip, quality, interlace) } if int(saveErr) != 0 { return nil, catchVipsError() } buf := C.GoBytes(ptr, C.int(length)) // Clean up C.g_free(C.gpointer(ptr)) C.vips_error_clear() return buf, nil } func getImageBuffer(image *C.VipsImage) ([]byte, error) { var ptr unsafe.Pointer length := C.size_t(0) interlace := C.int(0) quality := C.int(100) err := C.int(0) err = C.vips_jpegsave_bridge(image, &ptr, &length, 1, quality, interlace) if int(err) != 0 { return nil, catchVipsError() } defer C.g_free(C.gpointer(ptr)) defer C.vips_error_clear() return C.GoBytes(ptr, C.int(length)), nil } func vipsExtract(image *C.VipsImage, left, top, width, height int) (*C.VipsImage, error) { var buf *C.VipsImage defer C.g_object_unref(C.gpointer(image)) if width > MaxSize || height > MaxSize { return nil, errors.New("Maximum image size exceeded") } top, left = max(top), max(left) err := C.vips_extract_area_bridge(image, &buf, C.int(left), C.int(top), C.int(width), C.int(height)) if err != 0 { return nil, catchVipsError() } return buf, nil } func vipsSmartCrop(image *C.VipsImage, width, height int) (*C.VipsImage, error) { var buf *C.VipsImage defer C.g_object_unref(C.gpointer(image)) if width > MaxSize || height > MaxSize { return nil, errors.New("Maximum image size exceeded") } err := C.vips_smartcrop_bridge(image, &buf, C.int(width), C.int(height)) if err != 0 { return nil, catchVipsError() } return buf, nil } func vipsTrim(image *C.VipsImage, background Color, threshold float64) (int, int, int, int, error) { defer C.g_object_unref(C.gpointer(image)) top := C.int(0) left := C.int(0) width := C.int(0) height := C.int(0) err := C.vips_find_trim_bridge(image, &top, &left, &width, &height, C.double(background.R), C.double(background.G), C.double(background.B), C.double(threshold)) if err != 0 { return 0, 0, 0, 0, catchVipsError() } return int(top), int(left), int(width), int(height), nil } func vipsShrinkJpeg(buf []byte, input *C.VipsImage, shrink int) (*C.VipsImage, error) { var image *C.VipsImage var ptr = unsafe.Pointer(&buf[0]) defer C.g_object_unref(C.gpointer(input)) err := C.vips_jpegload_buffer_shrink(ptr, C.size_t(len(buf)), &image, C.int(shrink)) if err != 0 { return nil, catchVipsError() } return image, nil } func vipsShrinkWebp(buf []byte, input *C.VipsImage, shrink int) (*C.VipsImage, error) { var image *C.VipsImage var ptr = unsafe.Pointer(&buf[0]) defer C.g_object_unref(C.gpointer(input)) err := C.vips_webpload_buffer_shrink(ptr, C.size_t(len(buf)), &image, C.int(shrink)) if err != 0 { return nil, catchVipsError() } return image, nil } func vipsShrink(input *C.VipsImage, shrink int) (*C.VipsImage, error) { var image *C.VipsImage defer C.g_object_unref(C.gpointer(input)) err := C.vips_shrink_bridge(input, &image, C.double(float64(shrink)), C.double(float64(shrink))) if err != 0 { return nil, catchVipsError() } return image, nil } func vipsReduce(input *C.VipsImage, xshrink float64, yshrink float64) (*C.VipsImage, error) { var image *C.VipsImage defer C.g_object_unref(C.gpointer(input)) err := C.vips_reduce_bridge(input, &image, C.double(xshrink), C.double(yshrink)) if err != 0 { return nil, catchVipsError() } return image, nil } func vipsEmbed(input *C.VipsImage, left, top, width, height int, extend Extend, background Color) (*C.VipsImage, error) { var image *C.VipsImage // Max extend value, see: https://libvips.github.io/libvips/API/current/libvips-conversion.html#VipsExtend if extend > 5 { extend = ExtendBackground } defer C.g_object_unref(C.gpointer(input)) err := C.vips_embed_bridge(input, &image, C.int(left), C.int(top), C.int(width), C.int(height), C.int(extend), C.double(background.R), C.double(background.G), C.double(background.B)) if err != 0 { return nil, catchVipsError() } return image, nil } func vipsAffine(input *C.VipsImage, residualx, residualy float64, i Interpolator, extend Extend) (*C.VipsImage, error) { if extend > 5 { extend = ExtendBackground } var image *C.VipsImage cstring := C.CString(i.String()) interpolator := C.vips_interpolate_new(cstring) defer C.free(unsafe.Pointer(cstring)) defer C.g_object_unref(C.gpointer(input)) defer C.g_object_unref(C.gpointer(interpolator)) err := C.vips_affine_interpolator(input, &image, C.double(residualx), 0, 0, C.double(residualy), interpolator, C.int(extend)) if err != 0 { return nil, catchVipsError() } return image, nil } func vipsImageType(buf []byte) ImageType { if len(buf) < 12 { return UNKNOWN } if buf[0] == 0xFF && buf[1] == 0xD8 && buf[2] == 0xFF { return JPEG } if IsTypeSupported(GIF) && buf[0] == 0x47 && buf[1] == 0x49 && buf[2] == 0x46 { return GIF } if buf[0] == 0x89 && buf[1] == 0x50 && buf[2] == 0x4E && buf[3] == 0x47 { return PNG } if IsTypeSupported(TIFF) && ((buf[0] == 0x49 && buf[1] == 0x49 && buf[2] == 0x2A && buf[3] == 0x0) || (buf[0] == 0x4D && buf[1] == 0x4D && buf[2] == 0x0 && buf[3] == 0x2A)) { return TIFF } if IsTypeSupported(PDF) && buf[0] == 0x25 && buf[1] == 0x50 && buf[2] == 0x44 && buf[3] == 0x46 { return PDF } if IsTypeSupported(WEBP) && buf[8] == 0x57 && buf[9] == 0x45 && buf[10] == 0x42 && buf[11] == 0x50 { return WEBP } if IsTypeSupported(SVG) && IsSVGImage(buf) { return SVG } if IsTypeSupported(MAGICK) && strings.HasSuffix(readImageType(buf), "MagickBuffer") { return MAGICK } // NOTE: libheif currently only supports heic sub types; see: // https://github.com/strukturag/libheif/issues/83#issuecomment-421427091 if IsTypeSupported(HEIF) && buf[4] == 0x66 && buf[5] == 0x74 && buf[6] == 0x79 && buf[7] == 0x70 && buf[8] == 0x68 && buf[9] == 0x65 && buf[10] == 0x69 && buf[11] == 0x63 { // This is a HEIC file, ftypheic return HEIF } if IsTypeSupported(HEIF) && buf[4] == 0x66 && buf[5] == 0x74 && buf[6] == 0x79 && buf[7] == 0x70 && buf[8] == 0x6d && buf[9] == 0x69 && buf[10] == 0x66 && buf[11] == 0x31 { // This is a HEIF file, ftypmif1 return HEIF } if IsTypeSupported(HEIF) && buf[4] == 0x66 && buf[5] == 0x74 && buf[6] == 0x79 && buf[7] == 0x70 && buf[8] == 0x6d && buf[9] == 0x73 && buf[10] == 0x66 && buf[11] == 0x31 { // This is a HEIFS file, ftypmsf1 return HEIF } if IsTypeSupported(HEIF) && buf[4] == 0x66 && buf[5] == 0x74 && buf[6] == 0x79 && buf[7] == 0x70 && buf[8] == 0x68 && buf[9] == 0x65 && buf[10] == 0x69 && buf[11] == 0x73 { // This is a HEIFS file, ftypheis return HEIF } if IsTypeSupported(HEIF) && buf[4] == 0x66 && buf[5] == 0x74 && buf[6] == 0x79 && buf[7] == 0x70 && buf[8] == 0x68 && buf[9] == 0x65 && buf[10] == 0x76 && buf[11] == 0x63 { // This is a HEIFS file, ftyphevc return HEIF } if IsTypeSupported(HEIF) && buf[4] == 0x66 && buf[5] == 0x74 && buf[6] == 0x79 && buf[7] == 0x70 && buf[8] == 0x61 && buf[9] == 0x76 && buf[10] == 0x69 && buf[11] == 0x66 { return AVIF } return UNKNOWN } func readImageType(buf []byte) string { length := C.size_t(len(buf)) imageBuf := unsafe.Pointer(&buf[0]) load := C.vips_foreign_find_load_buffer(imageBuf, length) return C.GoString(load) } func catchVipsError() error { s := C.GoString(C.vips_error_buffer()) C.vips_error_clear() C.vips_thread_shutdown() return errors.New(s) } func boolToInt(b bool) int { if b { return 1 } return 0 } func vipsGaussianBlur(image *C.VipsImage, o GaussianBlur) (*C.VipsImage, error) { var out *C.VipsImage defer C.g_object_unref(C.gpointer(image)) err := C.vips_gaussblur_bridge(image, &out, C.double(o.Sigma), C.double(o.MinAmpl)) if err != 0 { return nil, catchVipsError() } return out, nil } func vipsSharpen(image *C.VipsImage, o Sharpen) (*C.VipsImage, error) { var out *C.VipsImage defer C.g_object_unref(C.gpointer(image)) err := C.vips_sharpen_bridge(image, &out, C.int(o.Radius), C.double(o.X1), C.double(o.Y2), C.double(o.Y3), C.double(o.M1), C.double(o.M2)) if err != 0 { return nil, catchVipsError() } return out, nil } func max(x int) int { return int(math.Max(float64(x), 0)) } func vipsDrawWatermark(image *C.VipsImage, o WatermarkImage) (*C.VipsImage, error) { var out *C.VipsImage watermark, _, e := vipsRead(o.Buf) if e != nil { return nil, e } opts := vipsWatermarkImageOptions{C.int(o.Left), C.int(o.Top), C.float(o.Opacity)} err := C.vips_watermark_image(image, watermark, &out, (*C.WatermarkImageOptions)(unsafe.Pointer(&opts))) if err != 0 { return nil, catchVipsError() } return out, nil } func vipsGamma(image *C.VipsImage, Gamma float64) (*C.VipsImage, error) { var out *C.VipsImage defer C.g_object_unref(C.gpointer(image)) err := C.vips_gamma_bridge(image, &out, C.double(Gamma)) if err != 0 { return nil, catchVipsError() } return out, nil }
[ "\"VIPS_CONCURRENCY\"", "\"VIPS_TRACE\"" ]
[]
[ "VIPS_TRACE", "VIPS_CONCURRENCY" ]
[]
["VIPS_TRACE", "VIPS_CONCURRENCY"]
go
2
0
work/Clashes/submit_overlap_all_pdb_to_queue.py
from __future__ import division from nonbonded_pdb_data_collect import get_pdb_id from libtbx.command_line import easy_qsub import os """ Collect non-bonded overlap information from all PDB files that: - have a single model - have no unknown atom pairs - have good cryst1 records """ class run_queue_tests(object): def __init__(self, clean_source=False,size_of_chunks=1): """ Test using compare_clashscores_CCTBX_vs_PROBE.py """ # set working environment To "chevy" or "clean" phenix copy if clean_source: paths = "/net/chevy/raid1/youval/Clean_copy/build/setpaths.csh" else: paths = "/net/chevy/raid1/youval/Work_chevy/build/setpaths.csh" self.phenix_source = paths command_path = '/net/cci/youval/work/work/Clashes' command = 'nonbonded_pdb_data_collect.py' self.com_path = os.path.join(command_path,command) # where all queue output will be deposited queue_job = "/net/cci/youval/work/work/Clashes/queue_clash_compare" self.where_to_run_dir = queue_job self.pdb_dir = os.environ["PDB_MIRROR_PDB"] # list of PDB id codes self.pdb_code = [] # file name and path for each pdb structure self.pdb_file_with_path = [] # The commands list is a list that will be sent to the queue for processing self.commands = [] # the number of command in each "chunk" sent to the queue self.size_of_chunks = size_of_chunks def get_pdb_files(self): """ Get all pdb files form LBL mirror index """ files = open(os.path.join(self.pdb_dir, "INDEX"), "r").read().splitlines() self.pdb_file_with_path = files def get_commands(self): """ get_commands process the command we want to run, with the options we want to use, on all the files we want the command to run on It produces a list of commands: list of strings, containing the command, options and file. in the same format that you would use to run is from the command prompt """ # Build the command list for fn in self.pdb_file_with_path: pdb_id = get_pdb_id(fn) # fn = os.path.join(self.pdb_dir,fn) # "-c" option leaves only the macro molecule outString = '{0} {1} >& log_{1}'.format(self.com_path,pdb_id) self.commands.append("python {}".format(outString)) def send_to_queue(self): """ Send the job to the queue """"" easy_qsub.run( phenix_source = self.phenix_source, where = self.where_to_run_dir, # Optional, when you want all jobs to run on machine_name # list of newer hosts: beck, morse, gently, rebus qsub_cmd = 'qsub -q all.q@rebus', commands = self.commands, # set the number of commands to send together to the queue. size_of_chunks= self.size_of_chunks) if (__name__ == "__main__"): queue_job = run_queue_tests(size_of_chunks=10) queue_job.get_pdb_files() queue_job.get_commands() queue_job.send_to_queue() print 'Done'
[]
[]
[ "PDB_MIRROR_PDB" ]
[]
["PDB_MIRROR_PDB"]
python
1
0
examples/storagefrombackup.go
package main import ( "bufio" "context" "os" log "github.com/sirupsen/logrus" "github.com/gridscale/gsclient-go/v3" ) var ctx = context.Background() const backupID = "this-should-be-a-backup-UUID" func main() { uuid := os.Getenv("GRIDSCALE_UUID") token := os.Getenv("GRIDSCALE_TOKEN") config := gsclient.DefaultConfiguration(uuid, token) client := gsclient.NewClient(config) log.Info("gridscale client configured") log.Info("Create storage from backup: Press 'Enter' to continue...") bufio.NewReader(os.Stdin).ReadBytes('\n') cStorage, err := client.CreateStorageFromBackup(ctx, backupID, "My new storage") if err != nil { log.Error("CreateStorageFromBackup failed with", err) return } log.WithFields(log.Fields{ "storage_uuid": cStorage.ObjectUUID, }).Info("Storage successfully created") defer func() { err := client.DeleteStorage(ctx, cStorage.ObjectUUID) if err != nil { log.Error("Delete storage failed with", err) return } log.Info("Storage successfully deleted") }() log.Info("Delete storage: press 'Enter' to continue...") bufio.NewReader(os.Stdin).ReadBytes('\n') }
[ "\"GRIDSCALE_UUID\"", "\"GRIDSCALE_TOKEN\"" ]
[]
[ "GRIDSCALE_UUID", "GRIDSCALE_TOKEN" ]
[]
["GRIDSCALE_UUID", "GRIDSCALE_TOKEN"]
go
2
0
tests/__init__.py
import contextlib from contextlib import contextmanager import inspect import os import sys from typing import List import pytest import ddtrace from ddtrace import Span from ddtrace import Tracer from ddtrace.compat import httplib from ddtrace.compat import parse from ddtrace.compat import to_unicode from ddtrace.constants import SPAN_MEASURED_KEY from ddtrace.encoding import JSONEncoder from ddtrace.ext import http from ddtrace.internal._encoding import MsgpackEncoder from ddtrace.internal.dogstatsd import get_dogstatsd_client from ddtrace.internal.writer import AgentWriter from ddtrace.vendor import wrapt from tests.subprocesstest import SubprocessTestCase NO_CHILDREN = object() def assert_is_measured(span): """Assert that the span has the proper _dd.measured tag set""" assert SPAN_MEASURED_KEY in span.metrics assert SPAN_MEASURED_KEY not in span.meta assert span.get_metric(SPAN_MEASURED_KEY) == 1 def assert_is_not_measured(span): """Assert that the span does not set _dd.measured""" assert SPAN_MEASURED_KEY not in span.meta if SPAN_MEASURED_KEY in span.metrics: assert span.get_metric(SPAN_MEASURED_KEY) == 0 else: assert SPAN_MEASURED_KEY not in span.metrics def assert_span_http_status_code(span, code): """Assert on the span's 'http.status_code' tag""" tag = span.get_tag(http.STATUS_CODE) code = str(code) assert tag == code, "%r != %r" % (tag, code) @contextlib.contextmanager def override_env(env): """ Temporarily override ``os.environ`` with provided values:: >>> with self.override_env(dict(DATADOG_TRACE_DEBUG=True)): # Your test """ # Copy the full original environment original = dict(os.environ) # Update based on the passed in arguments os.environ.update(env) try: yield finally: # Full clear the environment out and reset back to the original os.environ.clear() os.environ.update(original) @contextlib.contextmanager def override_global_config(values): """ Temporarily override an global configuration:: >>> with self.override_global_config(dict(name=value,...)): # Your test """ # List of global variables we allow overriding # DEV: We do not do `ddtrace.config.keys()` because we have all of our integrations global_config_keys = [ "analytics_enabled", "report_hostname", "health_metrics_enabled", "env", "version", "service", ] # Grab the current values of all keys originals = dict((key, getattr(ddtrace.config, key)) for key in global_config_keys) # Override from the passed in keys for key, value in values.items(): if key in global_config_keys: setattr(ddtrace.config, key, value) try: yield finally: # Reset all to their original values for key, value in originals.items(): setattr(ddtrace.config, key, value) @contextlib.contextmanager def override_config(integration, values): """ Temporarily override an integration configuration value:: >>> with self.override_config('flask', dict(service_name='test-service')): # Your test """ options = getattr(ddtrace.config, integration) original = dict((key, options.get(key)) for key in values.keys()) options.update(values) try: yield finally: options.update(original) @contextlib.contextmanager def override_http_config(integration, values): """ Temporarily override an integration configuration for HTTP value:: >>> with self.override_http_config('flask', dict(trace_query_string=True)): # Your test """ options = getattr(ddtrace.config, integration).http original = {} for key, value in values.items(): original[key] = getattr(options, key) setattr(options, key, value) try: yield finally: for key, value in original.items(): setattr(options, key, value) @contextlib.contextmanager def override_sys_modules(modules): """ Temporarily override ``sys.modules`` with provided dictionary of modules:: >>> mock_module = mock.MagicMock() >>> mock_module.fn.side_effect = lambda: 'test' >>> with self.override_sys_modules(dict(A=mock_module)): # Your test """ original = dict(sys.modules) sys.modules.update(modules) try: yield finally: sys.modules.clear() sys.modules.update(original) class BaseTestCase(SubprocessTestCase): """ BaseTestCase extends ``unittest.TestCase`` to provide some useful helpers/assertions Example:: from tests import BaseTestCase class MyTestCase(BaseTestCase): def test_case(self): with self.override_config('flask', dict(distributed_tracing_enabled=True): pass """ override_env = staticmethod(override_env) override_global_config = staticmethod(override_global_config) override_config = staticmethod(override_config) override_http_config = staticmethod(override_http_config) override_sys_modules = staticmethod(override_sys_modules) assert_is_measured = staticmethod(assert_is_measured) assert_is_not_measured = staticmethod(assert_is_not_measured) class TestSpanContainer(object): """ Helper class for a container of Spans. Subclasses of this class must implement a `get_spans` method:: def get_spans(self): return [] This class provides methods and assertions over a list of spans:: class TestCases(BaseTracerTestCase): def test_spans(self): # TODO: Create spans self.assert_has_spans() self.assert_span_count(3) self.assert_structure( ... ) # Grab only the `requests.request` spans spans = self.filter_spans(name='requests.request') """ def _ensure_test_spans(self, spans): """ internal helper to ensure the list of spans are all :class:`tests.utils.span.TestSpan` :param spans: List of :class:`ddtrace.span.Span` or :class:`tests.utils.span.TestSpan` :type spans: list :returns: A list og :class:`tests.utils.span.TestSpan` :rtype: list """ return [span if isinstance(span, TestSpan) else TestSpan(span) for span in spans] @property def spans(self): return self._ensure_test_spans(self.get_spans()) def get_spans(self): """subclass required property""" raise NotImplementedError def _build_tree(self, root): """helper to build a tree structure for the provided root span""" children = [] for span in self.spans: if span.parent_id == root.span_id: children.append(self._build_tree(span)) return TestSpanNode(root, children) def get_root_span(self): """ Helper to get the root span from the list of spans in this container :returns: The root span if one was found, None if not, and AssertionError if multiple roots were found :rtype: :class:`tests.utils.span.TestSpanNode`, None :raises: AssertionError """ root = None for span in self.spans: if span.parent_id is None: if root is not None: raise AssertionError("Multiple root spans found {0!r} {1!r}".format(root, span)) root = span assert root, "No root span found in {0!r}".format(self.spans) return self._build_tree(root) def get_root_spans(self): """ Helper to get all root spans from the list of spans in this container :returns: The root spans if any were found, None if not :rtype: list of :class:`tests.utils.span.TestSpanNode`, None """ roots = [] for span in self.spans: if span.parent_id is None: roots.append(self._build_tree(span)) return sorted(roots, key=lambda s: s.start) def assert_trace_count(self, count): """Assert the number of unique trace ids this container has""" trace_count = len(self.get_root_spans()) assert trace_count == count, "Trace count {0} != {1}".format(trace_count, count) def assert_span_count(self, count): """Assert this container has the expected number of spans""" assert len(self.spans) == count, "Span count {0} != {1}".format(len(self.spans), count) def assert_has_spans(self): """Assert this container has spans""" assert len(self.spans), "No spans found" def assert_has_no_spans(self): """Assert this container does not have any spans""" assert len(self.spans) == 0, "Span count {0}".format(len(self.spans)) def filter_spans(self, *args, **kwargs): """ Helper to filter current spans by provided parameters. This function will yield all spans whose `TestSpan.matches` function return `True`. :param args: Positional arguments to pass to :meth:`tests.utils.span.TestSpan.matches` :type args: list :param kwargs: Keyword arguments to pass to :meth:`tests.utils.span.TestSpan.matches` :type kwargs: dict :returns: generator for the matched :class:`tests.utils.span.TestSpan` :rtype: generator """ for span in self.spans: # ensure we have a TestSpan if not isinstance(span, TestSpan): span = TestSpan(span) if span.matches(*args, **kwargs): yield span def find_span(self, *args, **kwargs): """ Find a single span matches the provided filter parameters. This function will find the first span whose `TestSpan.matches` function return `True`. :param args: Positional arguments to pass to :meth:`tests.utils.span.TestSpan.matches` :type args: list :param kwargs: Keyword arguments to pass to :meth:`tests.utils.span.TestSpan.matches` :type kwargs: dict :returns: The first matching span :rtype: :class:`tests.TestSpan` """ span = next(self.filter_spans(*args, **kwargs), None) assert span is not None, "No span found for filter {0!r} {1!r}, have {2} spans".format( args, kwargs, len(self.spans) ) return span class TracerTestCase(TestSpanContainer, BaseTestCase): """ BaseTracerTestCase is a base test case for when you need access to a dummy tracer and span assertions """ def setUp(self): """Before each test case, setup a dummy tracer to use""" self.tracer = DummyTracer() super(TracerTestCase, self).setUp() def tearDown(self): """After each test case, reset and remove the dummy tracer""" super(TracerTestCase, self).tearDown() self.reset() delattr(self, "tracer") def get_spans(self): """Required subclass method for TestSpanContainer""" return self.tracer.writer.spans def pop_spans(self): # type: () -> List[Span] return self.tracer.pop() def pop_traces(self): # type: () -> List[List[Span]] return self.tracer.pop_traces() def reset(self): """Helper to reset the existing list of spans created""" self.tracer.writer.pop() def trace(self, *args, **kwargs): """Wrapper for self.tracer.trace that returns a TestSpan""" return TestSpan(self.tracer.trace(*args, **kwargs)) def start_span(self, *args, **kwargs): """Helper for self.tracer.start_span that returns a TestSpan""" return TestSpan(self.tracer.start_span(*args, **kwargs)) def assert_structure(self, root, children=NO_CHILDREN): """Helper to call TestSpanNode.assert_structure on the current root span""" root_span = self.get_root_span() root_span.assert_structure(root, children) @contextlib.contextmanager def override_global_tracer(self, tracer=None): original = ddtrace.tracer tracer = tracer or self.tracer setattr(ddtrace, "tracer", tracer) try: yield finally: setattr(ddtrace, "tracer", original) class DummyWriter(AgentWriter): """DummyWriter is a small fake writer used for tests. not thread-safe.""" def __init__(self, *args, **kwargs): # original call super(DummyWriter, self).__init__(*args, **kwargs) # dummy components self.spans = [] self.traces = [] self.json_encoder = JSONEncoder() self.msgpack_encoder = MsgpackEncoder() def write(self, spans=None): if spans: # the traces encoding expect a list of traces so we # put spans in a list like we do in the real execution path # with both encoders trace = [spans] self.json_encoder.encode_traces(trace) self.msgpack_encoder.encode_traces(trace) self.spans += spans self.traces += trace def pop(self): # type: () -> List[Span] s = self.spans self.spans = [] return s def pop_traces(self): # type: () -> List[List[Span]] traces = self.traces self.traces = [] return traces class DummyTracer(Tracer): """ DummyTracer is a tracer which uses the DummyWriter by default """ def __init__(self): super(DummyTracer, self).__init__() self._update_writer() def _update_writer(self): # Track which writer the DummyWriter was created with, used # some tests if not isinstance(self.writer, DummyWriter): self.original_writer = self.writer if isinstance(self.writer, AgentWriter): self.writer = DummyWriter( agent_url=self.writer.agent_url, priority_sampler=self.writer._priority_sampler, dogstatsd=get_dogstatsd_client(self._dogstatsd_url), ) else: self.writer = DummyWriter( priority_sampler=self.writer._priority_sampler, ) def pop(self): # type: () -> List[Span] return self.writer.pop() def pop_traces(self): # type: () -> List[List[Span]] return self.writer.pop_traces() def configure(self, *args, **kwargs): super(DummyTracer, self).configure(*args, **kwargs) # `.configure()` may reset the writer self._update_writer() class TestSpan(Span): """ Test wrapper for a :class:`ddtrace.span.Span` that provides additional functions and assertions Example:: span = tracer.trace('my.span') span = TestSpan(span) if span.matches(name='my.span'): print('matches') # Raises an AssertionError span.assert_matches(name='not.my.span', meta={'system.pid': getpid()}) """ def __init__(self, span): """ Constructor for TestSpan :param span: The :class:`ddtrace.span.Span` to wrap :type span: :class:`ddtrace.span.Span` """ if isinstance(span, TestSpan): span = span._span # DEV: Use `object.__setattr__` to by-pass this class's `__setattr__` object.__setattr__(self, "_span", span) def __getattr__(self, key): """ First look for property on the base :class:`ddtrace.span.Span` otherwise return this object's attribute """ if hasattr(self._span, key): return getattr(self._span, key) return self.__getattribute__(key) def __setattr__(self, key, value): """Pass through all assignment to the base :class:`ddtrace.span.Span`""" return setattr(self._span, key, value) def __eq__(self, other): """ Custom equality code to ensure we are using the base :class:`ddtrace.span.Span.__eq__` :param other: The object to check equality with :type other: object :returns: True if equal, False otherwise :rtype: bool """ if isinstance(other, TestSpan): return other._span == self._span elif isinstance(other, Span): return other == self._span return other == self def matches(self, **kwargs): """ Helper function to check if this span's properties matches the expected. Example:: span = TestSpan(span) span.matches(name='my.span', resource='GET /') :param kwargs: Property/Value pairs to evaluate on this span :type kwargs: dict :returns: True if the arguments passed match, False otherwise :rtype: bool """ for name, value in kwargs.items(): # Special case for `meta` if name == "meta" and not self.meta_matches(value): return False # Ensure it has the property first if not hasattr(self, name): return False # Ensure the values match if getattr(self, name) != value: return False return True def meta_matches(self, meta, exact=False): """ Helper function to check if this span's meta matches the expected Example:: span = TestSpan(span) span.meta_matches({'system.pid': getpid()}) :param meta: Property/Value pairs to evaluate on this span :type meta: dict :param exact: Whether to do an exact match on the meta values or not, default: False :type exact: bool :returns: True if the arguments passed match, False otherwise :rtype: bool """ if exact: return self.meta == meta for key, value in meta.items(): if key not in self.meta: return False if self.meta[key] != value: return False return True def assert_matches(self, **kwargs): """ Assertion method to ensure this span's properties match as expected Example:: span = TestSpan(span) span.assert_matches(name='my.span') :param kwargs: Property/Value pairs to evaluate on this span :type kwargs: dict :raises: AssertionError """ for name, value in kwargs.items(): # Special case for `meta` if name == "meta": self.assert_meta(value) elif name == "metrics": self.assert_metrics(value) else: assert hasattr(self, name), "{0!r} does not have property {1!r}".format(self, name) assert getattr(self, name) == value, "{0!r} property {1}: {2!r} != {3!r}".format( self, name, getattr(self, name), value ) def assert_meta(self, meta, exact=False): """ Assertion method to ensure this span's meta match as expected Example:: span = TestSpan(span) span.assert_meta({'system.pid': getpid()}) :param meta: Property/Value pairs to evaluate on this span :type meta: dict :param exact: Whether to do an exact match on the meta values or not, default: False :type exact: bool :raises: AssertionError """ if exact: assert self.meta == meta else: for key, value in meta.items(): assert key in self.meta, "{0} meta does not have property {1!r}".format(self, key) assert self.meta[key] == value, "{0} meta property {1!r}: {2!r} != {3!r}".format( self, key, self.meta[key], value ) def assert_metrics(self, metrics, exact=False): """ Assertion method to ensure this span's metrics match as expected Example:: span = TestSpan(span) span.assert_metrics({'_dd1.sr.eausr': 1}) :param metrics: Property/Value pairs to evaluate on this span :type metrics: dict :param exact: Whether to do an exact match on the metrics values or not, default: False :type exact: bool :raises: AssertionError """ if exact: assert self.metrics == metrics else: for key, value in metrics.items(): assert key in self.metrics, "{0} metrics does not have property {1!r}".format(self, key) assert self.metrics[key] == value, "{0} metrics property {1!r}: {2!r} != {3!r}".format( self, key, self.metrics[key], value ) class TracerSpanContainer(TestSpanContainer): """ A class to wrap a :class:`tests.utils.tracer.DummyTracer` with a :class:`tests.utils.span.TestSpanContainer` to use in tests """ def __init__(self, tracer): self.tracer = tracer super(TracerSpanContainer, self).__init__() def get_spans(self): """ Overridden method to return all spans attached to this tracer :returns: List of spans attached to this tracer :rtype: list """ return self.tracer.writer.spans def pop(self): return self.tracer.pop() def pop_traces(self): return self.tracer.pop_traces() def reset(self): """Helper to reset the existing list of spans created""" self.tracer.pop() class TestSpanNode(TestSpan, TestSpanContainer): """ A :class:`tests.utils.span.TestSpan` which is used as part of a span tree. Each :class:`tests.utils.span.TestSpanNode` represents the current :class:`ddtrace.span.Span` along with any children who have that span as it's parent. This class can be used to assert on the parent/child relationships between spans. Example:: class TestCase(BaseTestCase): def test_case(self): # TODO: Create spans self.assert_structure( ... ) tree = self.get_root_span() # Find the first child of the root span with the matching name request = tree.find_span(name='requests.request') # Assert the parent/child relationship of this `request` span request.assert_structure( ... ) """ def __init__(self, root, children=None): super(TestSpanNode, self).__init__(root) object.__setattr__(self, "_children", children or []) def get_spans(self): """required subclass property, returns this spans children""" return self._children def assert_structure(self, root, children=NO_CHILDREN): """ Assertion to assert on the structure of this node and it's children. This assertion takes a dictionary of properties to assert for this node along with a list of assertions to make for it's children. Example:: def test_case(self): # Assert the following structure # # One root_span, with two child_spans, one with a requests.request span # # | root_span | # | child_span | | child_span | # | requests.request | self.assert_structure( # Root span with two child_span spans dict(name='root_span'), ( # Child span with one child of it's own ( dict(name='child_span'), # One requests.request span with no children ( dict(name='requests.request'), ), ), # Child span with no children dict(name='child_span'), ), ) :param root: Properties to assert for this root span, these are passed to :meth:`tests.utils.span.TestSpan.assert_matches` :type root: dict :param children: List of child assertions to make, if children is None then do not make any assertions about this nodes children. Each list element must be a list with 2 items the first is a ``dict`` of property assertions on that child, and the second is a ``list`` of child assertions to make. :type children: list, None :raises: """ self.assert_matches(**root) # Give them a way to ignore asserting on children if children is None: return elif children is NO_CHILDREN: children = () spans = self.spans self.assert_span_count(len(children)) for i, child in enumerate(children): if not isinstance(child, (list, tuple)): child = (child, NO_CHILDREN) root, _children = child spans[i].assert_matches(parent_id=self.span_id, trace_id=self.trace_id, _parent=self) spans[i].assert_structure(root, _children) def pprint(self): parts = [super(TestSpanNode, self).pprint()] for child in self._children: parts.append("-" * 20) parts.append(child.pprint()) return "\r\n".join(parts) def assert_dict_issuperset(a, b): assert set(a.items()).issuperset(set(b.items())), "{a} is not a superset of {b}".format(a=a, b=b) @contextmanager def override_global_tracer(tracer): """Helper functions that overrides the global tracer available in the `ddtrace` package. This is required because in some `httplib` tests we can't get easily the PIN object attached to the `HTTPConnection` to replace the used tracer with a dummy tracer. """ original_tracer = ddtrace.tracer ddtrace.tracer = tracer yield ddtrace.tracer = original_tracer class SnapshotFailed(Exception): pass def snapshot(ignores=None, include_tracer=False, variants=None, async_mode=True): """Performs a snapshot integration test with the testing agent. All traces sent to the agent will be recorded and compared to a snapshot created for the test case. :param ignores: A list of keys to ignore when comparing snapshots. To refer to keys in the meta or metrics maps use "meta.key" and "metrics.key" :param tracer: A tracer providing the agent connection information to use. """ ignores = ignores or [] if include_tracer: tracer = Tracer() else: tracer = ddtrace.tracer @wrapt.decorator def wrapper(wrapped, instance, args, kwargs): if len(args) > 1: self = args[0] clsname = self.__class__.__name__ else: clsname = "" module = inspect.getmodule(wrapped) # Use the fully qualified function name as a unique test token to # identify the snapshot. token = "{}{}{}.{}".format(module.__name__, "." if clsname else "", clsname, wrapped.__name__) # Use variant that applies to update test token. One must apply. If none # apply, the test should have been marked as skipped. if variants: applicable_variant_ids = [k for (k, v) in variants.items() if v] assert len(applicable_variant_ids) == 1 variant_id = applicable_variant_ids[0] token = "{}_{}".format(token, variant_id) if variant_id else token parsed = parse.urlparse(tracer.writer.agent_url) conn = httplib.HTTPConnection(parsed.hostname, parsed.port) try: # clear queue in case traces have been generated before test case is # itself run try: tracer.writer.flush_queue() except Exception as e: pytest.fail("Could not flush the queue before test case: %s" % str(e), pytrace=True) if async_mode: # Patch the tracer writer to include the test token header for all requests. tracer.writer._headers["X-Datadog-Test-Token"] = token else: # Signal the start of this test case to the test agent. try: conn.request("GET", "/test/start?token=%s" % token) except Exception as e: pytest.fail("Could not connect to test agent: %s" % str(e), pytrace=False) else: r = conn.getresponse() if r.status != 200: # The test agent returns nice error messages we can forward to the user. raise SnapshotFailed(r.read()) # Run the test. try: if include_tracer: kwargs["tracer"] = tracer ret = wrapped(*args, **kwargs) # Force a flush so all traces are submitted. tracer.writer.flush_queue() finally: if async_mode: del tracer.writer._headers["X-Datadog-Test-Token"] # Query for the results of the test. conn = httplib.HTTPConnection(parsed.hostname, parsed.port) conn.request("GET", "/test/snapshot?ignores=%s&token=%s" % (",".join(ignores), token)) r = conn.getresponse() if r.status != 200: raise SnapshotFailed(r.read()) return ret except SnapshotFailed as e: # Fail the test if a failure has occurred and print out the # message we got from the test agent. pytest.fail(to_unicode(e.args[0]), pytrace=False) except Exception as e: # Even though it's unlikely any traces have been sent, make the # final request to the test agent so that the test case is finished. conn = httplib.HTTPConnection(parsed.hostname, parsed.port) conn.request("GET", "/test/snapshot?ignores=%s&token=%s" % (",".join(ignores), token)) conn.getresponse() pytest.fail("Unexpected test failure during snapshot test: %s" % str(e), pytrace=True) finally: conn.close() return wrapper class AnyStr(object): def __eq__(self, other): return isinstance(other, str) class AnyInt(object): def __eq__(self, other): return isinstance(other, int) class AnyFloat(object): def __eq__(self, other): return isinstance(other, float)
[]
[]
[]
[]
[]
python
0
0
experimentator/wandb_experiment.py
import os import pandas from functools import cached_property from experimentator import StateLogger import wandb os.environ["WANDB_SILENT"] = "true" os.environ["WANDB_START_METHOD"] = "thread" class LogStateWandB(StateLogger): best_report = {} def __init__(self, criterion_metric=None, mode="online"): self.criterion_metric = criterion_metric self.mode = mode self.initialized = False @cached_property def wandb_run(self): run = wandb.init( project=self.project_name, reinit=True, config=self.config, settings=wandb.Settings(show_emoji=False, _save_requirements=False), mode=self.mode, ) run.name = self.run_name self.initialized = True return run def __del__(self): if self.initialized: self.wandb_run.finish() def on_epoch_end(self, state, **_): report = {} for key, data in state.items(): if key not in self.excluded_keys: if isinstance(data, pandas.DataFrame): report[key] = wandb.Table(dataframe=data) else: report[key] = data self.wandb_run.log(report) # log *once* per epoch if self.criterion_metric and self.criterion_metric in report: if not self.best_report or report[self.criterion_metric] > self.best_report[self.criterion_metric]: self.best_report = report self.wandb_run.summary.update(self.best_report)
[]
[]
[ "WANDB_SILENT", "WANDB_START_METHOD" ]
[]
["WANDB_SILENT", "WANDB_START_METHOD"]
python
2
0
pkg/issuer/acme/dns/cis/cis_test.go
// // +skip_license_check // // /* // This file contains portions of code directly taken from the 'xenolf/lego' project. // A copy of the license for this code can be found in the file named LICENSE in // this directory. // */ // // package clouddns // // import ( // "os" // "testing" // "time" // // "golang.org/x/net/context" // "golang.org/x/oauth2/google" // "google.golang.org/api/dns/v1" // // "github.com/jetstack/cert-manager/pkg/issuer/acme/dns/util" // "github.com/stretchr/testify/assert" // ) // // var ( // gcloudLiveTest bool // gcloudProject string // gcloudDomain string // ) // // func init() { // gcloudProject = os.Getenv("GCE_PROJECT") // gcloudDomain = os.Getenv("GCE_DOMAIN") // _, err := google.DefaultClient(context.Background(), dns.NdevClouddnsReadwriteScope) // if err == nil && len(gcloudProject) > 0 && len(gcloudDomain) > 0 { // gcloudLiveTest = true // } // } // // func restoreGCloudEnv() { // os.Setenv("GCE_PROJECT", gcloudProject) // } // // func TestNewDNSProviderValid(t *testing.T) { // if !gcloudLiveTest { // t.Skip("skipping live test (requires credentials)") // } // os.Setenv("GCE_PROJECT", "") // _, err := NewDNSProviderCredentials("my-project", util.RecursiveNameservers) // assert.NoError(t, err) // restoreGCloudEnv() // } // // func TestNewDNSProviderValidEnv(t *testing.T) { // if !gcloudLiveTest { // t.Skip("skipping live test (requires credentials)") // } // os.Setenv("GCE_PROJECT", "my-project") // _, err := NewDNSProviderEnvironment(util.RecursiveNameservers) // assert.NoError(t, err) // restoreGCloudEnv() // } // // func TestNewDNSProviderMissingCredErr(t *testing.T) { // os.Setenv("GCE_PROJECT", "") // _, err := NewDNSProviderEnvironment(util.RecursiveNameservers) // assert.EqualError(t, err, "Google Cloud project name missing") // restoreGCloudEnv() // } // // func TestLiveGoogleCloudPresent(t *testing.T) { // if !gcloudLiveTest { // t.Skip("skipping live test") // } // // provider, err := NewDNSProviderCredentials(gcloudProject, util.RecursiveNameservers) // assert.NoError(t, err) // // err = provider.Present(gcloudDomain, "_acme-challenge."+gcloudDomain+".", "123d==") // assert.NoError(t, err) // } // // func TestLiveGoogleCloudPresentMultiple(t *testing.T) { // if !gcloudLiveTest { // t.Skip("skipping live test") // } // // provider, err := NewDNSProviderCredentials(gcloudProject, util.RecursiveNameservers) // assert.NoError(t, err) // // // Check that we're able to create multiple entries // err = provider.Present(gcloudDomain, "_acme-challenge."+gcloudDomain+".", "123d==") // assert.NoError(t, err) // err = provider.Present(gcloudDomain, "_acme-challenge."+gcloudDomain+".", "1123d==") // assert.NoError(t, err) // } // // func TestLiveGoogleCloudCleanUp(t *testing.T) { // if !gcloudLiveTest { // t.Skip("skipping live test") // } // // time.Sleep(time.Second * 1) // // provider, err := NewDNSProviderCredentials(gcloudProject, util.RecursiveNameservers) // assert.NoError(t, err) // // err = provider.CleanUp(gcloudDomain, "_acme-challenge."+gcloudDomain+".", "123d==") // assert.NoError(t, err) // }
[ "\"GCE_PROJECT\"", "\"GCE_DOMAIN\"" ]
[]
[ "GCE_DOMAIN", "GCE_PROJECT" ]
[]
["GCE_DOMAIN", "GCE_PROJECT"]
go
2
0
app.py
import os import discord from discord.ext import commands from dotenv import load_dotenv import random import re import logging # TTS import pyttsx3 # This one is to get current time import time from musicbot.music import Music from datetime import date today = date.today() # Declaring intents intents = discord.Intents.all() d1 = str(today.strftime("%Y-%m-%d")) log_name = 'logs/' + d1 + '.log' logging.basicConfig(filename=log_name, format='%(asctime)s - %(name)s \ - %(levelname)s - %(message)s', level=logging.DEBUG) # create logger logger = logging.getLogger('Watchdog') logger.setLevel(logging.INFO) # create console handler and set level to debug ch = logging.StreamHandler() ch.setLevel(logging.DEBUG) # create formatter formatter = logging.Formatter('%(asctime)s' '- %(name)s' '- %(levelname)s' '- %(message)s') # add formatter to ch ch.setFormatter(formatter) # add ch to logger logger.addHandler(ch) """ Available ways to log files """ # logger.debug('debug message') # logger.info('info message') # logger.warning('warn message') # logger.error('error message') # logger.critical('critical message') # This gets the pid of the bots process and stores it into a file so it read and then be killed on the automation script pid = os.getpid() with open('logs/pid/pid.txt', 'w') as pidFile: pidFile.write(str(pid)) # Necessário para o código funcionar no Spyder e noutros IDE's # import nest_asyncio # nest_asyncio.apply() load_dotenv() TOKEN = os.getenv('DISCORD_TOKEN') # Gets user id's sheep = int(os.getenv('DISCORD_SHEEP')) march = int(os.getenv('DISCORD_MARCH')) gordo = int(os.getenv('DISCORD_GORDO')) mata = int(os.getenv('DISCORD_MATA')) # Gets the image path img_path = 'tsm.jpeg' pfp = open(img_path, 'rb') img = pfp.read() pl_id = 'spotify:playlist:1Qhy7QA5Gfgc1Ugwpk5iXl' songList = [] playlistName = "" confirmed = 0 max_players_pummel = 8 voice_client = '' # Getting current time in miliseconds def current_milli_time(): return round(time.time() * 1000) # Created the bot with a prefix bot = commands.Bot(command_prefix='!', description="Discord bot created by March & Sheep", intents=intents) # Removes the default help command so we can create a new one bot.remove_command('help') @bot.command() async def test(ctx): logger.info("%s -> %s", ctx.author.name, ctx.message.content) await ctx.send("123") # Commands to invite people for games ------------------------------------- # Universal One @bot.command(name='invite', aliases=['inv']) async def invite(ctx, role): logger.info("%s invited to %s", ctx.author.name, role) await ctx.channel.purge(limit=1) # print(str(role)) role = int(re.sub(r'\D', '', role)) # print(str(role)) role = ctx.guild.get_role(role) logger.info("Invitation to play %s by ctx.author.name", role.name) global max_players_pummel # confirmed = 0 # Create embed embed_var = discord.Embed(title="Sessão de " + role.name + " hoje?", description=" ", color=role.colour) members = role.members # Next 2 lines are to tag the members of that role for x in members: logger.info('Invited %s', x.name) embed_var.add_field(name=x.name, value=x.mention, inline=False) # After the embeb is created this reacts with the 2 emojis of yay or nay mess = await ctx.channel.send(embed=embed_var) await mess.add_reaction("✅") await mess.add_reaction("❎") # Command to kick bifes - change to be able to kick @someone @bot.command() async def bifes(ctx): logger.info("%s -> %s", ctx.author.name, ctx.message.content) print('bifes kicked by: ', ctx.author) await ctx.channel.purge(limit=1) for member in ctx.guild.members: if member.id == int("307621482186670082"): # bifes id bifes_m = member await bifes_m.kick(reason='You were being annoying dude, ' 'pls take it easy, thank you!') # New help command @bot.command() async def help(ctx): logger.info("%s -> %s", ctx.author.name, ctx.message.content) print('help by: ', ctx.author) await ctx.channel.send(bot.get_user(march).mention + ' ' + bot.get_user(sheep).mention + '\nEsta aqui um nabo a pedir ajuda...' '\nPergunta a um destes dois se eles nao responderem!' '\nPara a musica é so !play song name/spotify link' '\n Para jogos é so !invite @role') # Deletes Gordo's messages @bot.event async def on_message(message): if not message.guild: # Hidden feature, send !r {sentence} in a DM for the bot to read out the {sentence} # Only in DM's if message.content.startswith('!r'): global voice_client guild = bot.guilds[0] if not voice_client: for vc in guild.voice_channels: for member in vc.members: if member.id == message.author.id: voice_client = await vc.connect() engine = pyttsx3.init() engine.save_to_file(message.content[3:], 'test.mp3') engine.runAndWait() audio_source = discord.FFmpegPCMAudio("test.mp3") voice_client.play(audio_source, after=None) else: engine = pyttsx3.init() engine.save_to_file(message.content[3:], 'test.mp3') engine.runAndWait() audio_source = discord.FFmpegPCMAudio("test.mp3") voice_client.play(audio_source, after=None) # Message Deleter------- if message.author != bot.user: logger.info("%s said -> %s", message.author.name, message.content) if message.author == bot.get_user(gordo): logger.info("Deleted %s's message -> [%s]", message.author.name, message.content) await message.channel.purge(limit=1) await bot.process_commands(message) # Annoy mata everytime he writes something if message.author == bot.get_user(mata): num = random.random() * 100 # print(num) if num >= 50: await message.add_reaction('🖕') elif num < 10: choice = await message.channel.send(message.author.mention + " " + random.choice(mensagem)) logger.info("said [%s] to mata", str(choice)) mensagem = ["You're a bitch", "No you", "Já estou farto de te ouvir bitch", "Vai estudar!", "A tua mãe chamou-te", "Celtics suck!", "Ouvi dizer que o Sheep te insultou", "Ouvi dizer que o March te insultou", "U gay", "Roses are red, violets are blue, I've got five fingers and the middle one is for you ;)", "If you were a vegetable you'd be a cabbitch", "So if i typed 'idiot' into Google would your picture come up?"] # Functions that get the user reations (yay or nay) and changes the emebeb to display their answers async def embed_yes(payload): global max_players_pummel channel = bot.get_channel(payload.channel_id) msg = await channel.fetch_message(payload.message_id) embed = msg.embeds[0] embed_dic = embed.to_dict() fields = embed_dic.get('fields') id_user = "" index = 0 # print(payload.user_id) for ind, x in enumerate(fields): id_field = re.sub(r'\D', '', x['value']) # print(id_field) if int(id_field) == payload.user_id: id_user = int(id_field) index = ind user = bot.get_user(id_user) nome = user.name print(nome) print(id_user) print(index) nome += " ✅" # print(user.name) embed.set_field_at(index, name=nome, value=user.mention, inline=False) embed_dic = embed.to_dict() fields = embed_dic.get('fields') confirmed_1 = 0 for x in fields: if "✅" in str(x['name']): confirmed_1 += + 1 await msg.edit(embed=embed) print("check marked") # Stuff about among us async def embed_no(payload): channel = bot.get_channel(payload.channel_id) msg = await channel.fetch_message(payload.message_id) embed = msg.embeds[0] embed_dic = embed.to_dict() fields = embed_dic.get('fields') id_user = "" index = 0 # print(payload.user_id) for ind, x in enumerate(fields): id_field = re.sub(r'\D', '', x['value']) # print(id_field) if int(id_field) == payload.user_id: id_user = int(id_field) index = ind user = bot.get_user(id_user) nome = user.name print(nome) print(id_user) print(index) nome += " ❎" # print(user.name) embed.set_field_at(index, name=nome, value=user.mention, inline=False) embed_dic = embed.to_dict() fields = embed_dic.get('fields') confirmed_1 = 0 for x in fields: if "✅" in str(x['name']): confirmed_1 += + 1 await msg.edit(embed=embed) print("cross marked") # Stuff about among us nr of players maybe? @bot.event async def on_raw_reaction_add(payload): if str(payload.emoji) == "✅" and payload.user_id != bot.user.id: logger.info("User %s said Yes", bot.get_user(payload.user_id).name) await embed_yes(payload) elif str(payload.emoji) == "❎" and payload.user_id != bot.user.id: logger.info("User %s said No", bot.get_user(payload.user_id).name) await embed_no(payload) elif payload.user_id != bot.get_user(gordo) and payload.user_id != bot.user.id: channel = bot.get_channel(payload.channel_id) msg = await channel.fetch_message(payload.message_id) logger.info("Copied %s's reaction", bot.get_user(payload.user_id).name) await msg.add_reaction(payload.emoji) # Stuff for the among us nr of players @bot.event async def on_raw_reaction_remove(payload): global max_players_pummel if str(payload.emoji) == "✅" and payload.user_id != bot.user.id: logger.info("User %s removed Yes", bot.get_user(payload.user_id).name) await embed_yes(payload) elif str(payload.emoji) == "❎" and payload.user_id != bot.user.id: logger.info("User %s removed No", bot.get_user(payload.user_id).name) await embed_no(payload) last_time = current_milli_time() mute_count = 0 # Disconnectes Gordo from voice channels @bot.event async def on_voice_state_update(member, before, after): global last_time # This is global so i can use it to check the time between mutes global mute_count # Simple channel movements log if before.self_mute != after.self_mute: current_time = current_milli_time() print(last_time) print(current_time) print(current_time - last_time) if current_time - last_time < 500: mute_count += 1 else: mute_count = 0 if mute_count > 3: voice_client = await after.channel.connect() mute_count = 0 engine = pyttsx3.init() engine.save_to_file("march is a bitch", 'test.mp3') engine.runAndWait() audio_source = discord.FFmpegPCMAudio("test.mp3") await voice_client.play(audio_source, after=None) last_time = current_time elif before.channel is None: logger.info("%s joined %s", member, after.channel) elif after.channel is None: logger.info("%s left %s", member, before.channel) else: logger.info("%s left %s and joined %s", member, before.channel, after.channel) # Disconnecting on specific user joining voice channels if member == bot.get_user(gordo): logger.info('member disconnected: ', member) await member.edit(voice_channel=None) # Removes Gordo's Professor chaos role- needs administrator role @bot.event async def on_member_update(before, after): if str(before.activity) != str(after.activity): logger.info("%s current activity changed to: '%s'", before.name, str(after.activity)) """Isto deteta se alguém mudou de status""" if str(before.status) != str(after.status): logger.info("%s current status changed to:'%s'", before.name, str(after.status)) """Isto deteta se alguém mudou o nickname""" if before.nick != after.nick: # Aqui é para ver se a pessoa já tinha um nickname \ # para não dar None quando se tenta escrever o nome na mensagem de info if before.nick: name_1 = before.nick else: name_1 = before.name if after.nick: name_2 = after.nick else: name_2 = after.name logger.info("%s changed nickname to:'%s'", name_1, name_2) """Isto deteta se alguém mudou de roles""" if before.roles != after.roles: # This is to check if someone on the hitlist changed roles logger.info("%s changed roles", after.name) if after == bot.get_user(gordo) and str(after.top_role) == "Professor Chaos": list_roles = after.roles.copy() # This is to check if professor chaos aka bitch is one of the roles and if it is, deletes it from the user for index, x in enumerate(list_roles, start=0): # Getting index of "bitch" if str(x) == "Professor Chaos": index_role = index del list_roles[index_role] await after.edit(roles=list_roles) # print(after.roles) # Music bot bot.add_cog(Music(bot)) @bot.event async def on_ready(): # Changes bot status await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name='You !help if you dumb enough\n' 'Created by March & Sheep')) # await bot.user.edit(avatar=img) print(f'{bot.user} is connected!') print('Logged in as: {0.user.name}'.format(bot)) print('Connected on the following servers:') # Gets servers that the bot is connected to for i in range(len(bot.guilds)): print(' ', bot.guilds[i].name) logger.info('Bot started') bot.run(TOKEN)
[]
[]
[ "DISCORD_TOKEN", "DISCORD_SHEEP", "DISCORD_MATA", "DISCORD_GORDO", "DISCORD_MARCH" ]
[]
["DISCORD_TOKEN", "DISCORD_SHEEP", "DISCORD_MATA", "DISCORD_GORDO", "DISCORD_MARCH"]
python
5
0
example/bmap_example.py
import json import os from pyecharts import options as opts from pyecharts.charts import BMap, Page from pyecharts.faker import Collector, Faker from pyecharts.globals import BMapType C = Collector() BAIDU_MAP_AK = os.environ.get("BAIDU_MAP_AK", "FAKE_AK") @C.funcs def bmap_base() -> BMap: c = ( BMap() .add_schema(baidu_ak=BAIDU_MAP_AK, center=[120.13066322374, 30.240018034923]) .add( "bmap", [list(z) for z in zip(Faker.provinces, Faker.values())], label_opts=opts.LabelOpts(formatter="{b}"), ) .set_global_opts(title_opts=opts.TitleOpts(title="BMap-基本示例")) ) return c @C.funcs def bmap_heatmap() -> BMap: c = ( BMap() .add_schema(baidu_ak=BAIDU_MAP_AK, center=[120.13066322374, 30.240018034923]) .add( "bmap", [list(z) for z in zip(Faker.provinces, Faker.values())], type_="heatmap", label_opts=opts.LabelOpts(formatter="{b}"), ) .set_global_opts( title_opts=opts.TitleOpts(title="BMap-热力图"), visualmap_opts=opts.VisualMapOpts(), ) ) return c @C.funcs def bmap_lines() -> BMap: with open( os.path.join("fixtures", "hangzhou-tracks.json"), "r", encoding="utf-8" ) as f: j = json.load(f) c = ( BMap() .add_schema( baidu_ak=BAIDU_MAP_AK, center=[120.13066322374, 30.240018034923], zoom=14, is_roam=True, map_style={ "styleJson": [ { "featureType": "water", "elementType": "all", "stylers": {"color": "#d1d1d1"}, }, { "featureType": "land", "elementType": "all", "stylers": {"color": "#f3f3f3"}, }, { "featureType": "railway", "elementType": "all", "stylers": {"visibility": "off"}, }, { "featureType": "highway", "elementType": "all", "stylers": {"color": "#fdfdfd"}, }, { "featureType": "highway", "elementType": "labels", "stylers": {"visibility": "off"}, }, { "featureType": "arterial", "elementType": "geometry", "stylers": {"color": "#fefefe"}, }, { "featureType": "arterial", "elementType": "geometry.fill", "stylers": {"color": "#fefefe"}, }, { "featureType": "poi", "elementType": "all", "stylers": {"visibility": "off"}, }, { "featureType": "green", "elementType": "all", "stylers": {"visibility": "off"}, }, { "featureType": "subway", "elementType": "all", "stylers": {"visibility": "off"}, }, { "featureType": "manmade", "elementType": "all", "stylers": {"color": "#d1d1d1"}, }, { "featureType": "local", "elementType": "all", "stylers": {"color": "#d1d1d1"}, }, { "featureType": "arterial", "elementType": "labels", "stylers": {"visibility": "off"}, }, { "featureType": "boundary", "elementType": "all", "stylers": {"color": "#fefefe"}, }, { "featureType": "building", "elementType": "all", "stylers": {"color": "#d1d1d1"}, }, { "featureType": "label", "elementType": "labels.text.fill", "stylers": {"color": "#999999"}, }, ] }, ) .add( "", type_="lines", data_pair=j, is_polyline=True, is_large=True, linestyle_opts=opts.LineStyleOpts(color="purple", opacity=0.6, width=1), ) .add_control_panel( maptype_control_opts=opts.BMapTypeControlOpts( type_=BMapType.MAPTYPE_CONTROL_DROPDOWN ), scale_control_opts=opts.BMapScaleControlOpts(), overview_map_opts=opts.BMapOverviewMapControlOpts(is_open=True), ) .set_global_opts(title_opts=opts.TitleOpts(title="BMap-杭州热门步行路线")) ) return c Page().add(*[fn() for fn, _ in C.charts]).render()
[]
[]
[ "BAIDU_MAP_AK" ]
[]
["BAIDU_MAP_AK"]
python
1
0
api/v1/app.py
#!/usr/bin/python3 """ Flask App that integrates with AirBnB static HTML Template """ from api.v1.views import app_views from flask import Flask, jsonify, make_response, render_template, url_for from flask_cors import CORS, cross_origin from flasgger import Swagger from models import storage import os from werkzeug.exceptions import HTTPException # Global Flask Application Variable: app app = Flask(__name__) swagger = Swagger(app) # global strict slashes app.url_map.strict_slashes = False # flask server environmental setup host = os.getenv('HBNB_API_HOST', '0.0.0.0') port = os.getenv('HBNB_API_PORT', 5000) # Cross-Origin Resource Sharing cors = CORS(app, resources={r"/api/v1/*": {"origins": "*"}}) # app_views BluePrint defined in api.v1.views app.register_blueprint(app_views) # begin flask page rendering @app.teardown_appcontext def teardown_db(exception): """ after each request, this method calls .close() (i.e. .remove()) on the current SQLAlchemy Session """ storage.close() @app.errorhandler(404) def handle_404(exception): """ handles 404 errors, in the event that global error handler fails """ code = exception.__str__().split()[0] description = exception.description message = {'error': description} return make_response(jsonify(message), code) @app.errorhandler(400) def handle_404(exception): """ handles 400 errros, in the event that global error handler fails """ code = exception.__str__().split()[0] description = exception.description message = {'error': description} return make_response(jsonify(message), code) @app.errorhandler(Exception) def global_error_handler(err): """ Global Route to handle All Error Status Codes """ if isinstance(err, HTTPException): if type(err).__name__ == 'NotFound': err.description = "Not found" message = {'error': err.description} code = err.code else: message = {'error': err} code = 500 return make_response(jsonify(message), code) def setup_global_errors(): """ This updates HTTPException Class with custom error function """ for cls in HTTPException.__subclasses__(): app.register_error_handler(cls, global_error_handler) if __name__ == "__main__": """ MAIN Flask App """ # initializes global error handling setup_global_errors() # start Flask app app.run(host=host, port=port)
[]
[]
[ "HBNB_API_HOST", "HBNB_API_PORT" ]
[]
["HBNB_API_HOST", "HBNB_API_PORT"]
python
2
0
src/backend/api/utils/rclone_connection.py
from collections import defaultdict import functools import json import logging import subprocess import threading import time import os from .abstract_connection import AbstractConnection, RcloneException from .hashsum_job_queue import HashsumJobQueue from .copy_job_queue import CopyJobQueue class RcloneConnection(AbstractConnection): def __init__(self): self._hashsum_job_queue = HashsumJobQueue() self._copy_job_queue = CopyJobQueue() self._job_status = defaultdict(functools.partial(defaultdict, str)) # Mapping from id to status dict self._job_text = defaultdict(str) self._job_error_text = defaultdict(str) self._job_percent = defaultdict(int) self._job_exitstatus = {} self._stop_events = {} # Mapping from id to threading.Event self._latest_job_id = 0 def verify(self, data): credentials = self._formatCredentials(data, name='current') user = data.owner bucket = getattr(data, 'bucket', None) if bucket is None: bucket = '' command = [ 'sudo', '-E', '-u', user, '/usr/local/bin/rclone', '--config=/dev/null', 'lsjson', 'current:{}'.format(bucket), ] self._log_command(command, credentials) try: result = self._execute(command, credentials) return { 'result': True, 'message': 'Success', } except subprocess.CalledProcessError as e: returncode = e.returncode return { 'result': False, 'message': 'Exit status {}'.format(returncode), } def ls(self, data, path): credentials = self._formatCredentials(data, name='current') user = data.owner command = [ 'sudo', '-E', '-u', user, '/usr/local/bin/rclone', '--config=/dev/null', 'lsjson', 'current:{}'.format(path), ] self._log_command(command, credentials) try: result = self._execute(command, credentials) files = json.loads(result) return { 'files': files, 'path': path, } except subprocess.CalledProcessError as e: raise RcloneException(str(e)) def mkdir(self, data, path): credentials = self._formatCredentials(data, name='current') user = data.owner command = [ 'sudo', '-E', '-u', user, '/usr/local/bin/rclone', '--config=/dev/null', 'touch', 'current:{}/.motuz_keep'.format(path), ] self._log_command(command, credentials) try: result = self._execute(command, credentials) return { 'message': 'Success', } except subprocess.CalledProcessError as e: raise RcloneException(str(e)) def copy(self, src_data, src_resource_path, dst_data, dst_resource_path, user, copy_links, job_id ): credentials = {} option_exclude_dot_snapshot = '' # HACKHACK: remove once https://github.com/rclone/rclone/issues/2425 is addressed if src_data is None: # Local src = src_resource_path if os.path.isdir(src): option_exclude_dot_snapshot = '--exclude=\\.snapshot/' else: credentials.update(self._formatCredentials(src_data, name='src')) src = 'src:{}'.format(src_resource_path) if dst_data is None: # Local dst = dst_resource_path else: credentials.update(self._formatCredentials(dst_data, name='dst')) dst = 'dst:{}'.format(dst_resource_path) if copy_links: option_copy_links = '--copy-links' else: option_copy_links = '' command = [ 'sudo', '-E', '-u', user, '/usr/local/bin/rclone', '--config=/dev/null', '--s3-acl', 'bucket-owner-full-control', option_exclude_dot_snapshot, '--contimeout=5m', 'copyto', src, dst, option_copy_links, '--progress', '--stats', '2s', ] command = [cmd for cmd in command if len(cmd) > 0] self._log_command(command, credentials) try: self._copy_job_queue.push(command, credentials, job_id) except RcloneException as e: raise RcloneException(str(e)) return job_id def copy_text(self, job_id): return self._copy_job_queue.copy_text(job_id) def copy_error_text(self, job_id): return self._copy_job_queue.copy_error_text(job_id) def copy_percent(self, job_id): return self._copy_job_queue.copy_percent(job_id) def copy_stop(self, job_id): self._copy_job_queue.copy_stop(job_id) def copy_finished(self, job_id): return self._copy_job_queue.copy_finished(job_id) def copy_exitstatus(self, job_id): return self._copy_job_queue.copy_exitstatus(job_id) def md5sum(self, data, resource_path, user, job_id, download=False, ): credentials = {} option_exclude_dot_snapshot = '' # HACKHACK: remove once https://github.com/rclone/rclone/issues/2425 is addressed if data is None: # Local src = resource_path download = False if os.path.isdir(src): option_exclude_dot_snapshot = '--exclude=\\.snapshot/' else: credentials.update(self._formatCredentials(data, name='src')) src = 'src:{}'.format(resource_path) command = [ 'sudo', '-E', '-u', user, '/usr/local/bin/rclone', '--config=/dev/null', 'md5sum', src, option_exclude_dot_snapshot, ] command = [cmd for cmd in command if len(cmd) > 0] self._log_command(command, credentials) try: self._hashsum_job_queue.push(command, credentials, job_id, download) except RcloneException as e: raise RcloneException(str(e)) return job_id def hashsum_text(self, job_id): return self._hashsum_job_queue.hashsum_text(job_id) def hashsum_error_text(self, job_id): return self._hashsum_job_queue.hashsum_error_text(job_id) def hashsum_percent(self, job_id): return self._hashsum_job_queue.hashsum_percent(job_id) def hashsum_stop(self, job_id): self._hashsum_job_queue.hashsum_stop(job_id) def hashsum_finished(self, job_id): return self._hashsum_job_queue.hashsum_finished(job_id) def hashsum_exitstatus(self, job_id): return self._hashsum_job_queue.hashsum_exitstatus(job_id) def hashsum_delete(self, job_id): return self._hashsum_job_queue.hashsum_delete(job_id) def _log_command(self, command, credentials): sanitized_credentials = {} for key, value in credentials.items(): if should_log_full_credential(key): sanitized_credentials[key] = value elif should_log_partial_credential(key): sanitized_credentials[key] = '***' + value[-4:] else: sanitized_credentials[key] = '***' bash_command = "{} {}".format( ' '.join("{}='{}'".format(key, value) for key, value in sanitized_credentials.items()), ' '.join(command), ) logging.info(bash_command) return bash_command def _formatCredentials(self, data, name): """ Credentials are of the form RCLONE_CONFIG_CURRENT_TYPE=s3 ^ ^ ^ ^ [mandatory ][name ][key][value] """ prefix = "RCLONE_CONFIG_{}".format(name.upper()) credentials = {} credentials['{}_TYPE'.format(prefix)] = data.type def _addCredential(env_key, data_key, *, value_functor=None): value = getattr(data, data_key, None) if value is not None: if value_functor is not None: value = value_functor(value) credentials[env_key] = value if data.type == 's3': _addCredential( '{}_REGION'.format(prefix), 's3_region' ) _addCredential( '{}_ACCESS_KEY_ID'.format(prefix), 's3_access_key_id' ) _addCredential( '{}_SECRET_ACCESS_KEY'.format(prefix), 's3_secret_access_key' ) _addCredential( '{}_ENDPOINT'.format(prefix), 's3_endpoint' ) _addCredential( '{}_V2_AUTH'.format(prefix), 's3_v2_auth' ) elif data.type == 'azureblob': _addCredential( '{}_ACCOUNT'.format(prefix), 'azure_account' ) _addCredential( '{}_KEY'.format(prefix), 'azure_key' ) _addCredential( '{}_SAS_URL'.format(prefix), 'azure_sas_url' ) elif data.type == 'swift': _addCredential( '{}_USER'.format(prefix), 'swift_user' ) _addCredential( '{}_KEY'.format(prefix), 'swift_key' ) _addCredential( '{}_AUTH'.format(prefix), 'swift_auth' ) _addCredential( '{}_TENANT'.format(prefix), 'swift_tenant' ) elif data.type == 'google cloud storage': _addCredential( '{}_CLIENT_ID'.format(prefix), 'gcp_client_id' ) _addCredential( '{}_SERVICE_ACCOUNT_CREDENTIALS'.format(prefix), 'gcp_service_account_credentials' ) _addCredential( '{}_PROJECT_NUMBER'.format(prefix), 'gcp_project_number' ) _addCredential( '{}_OBJECT_ACL'.format(prefix), 'gcp_object_acl' ) _addCredential( '{}_BUCKET_ACL'.format(prefix), 'gcp_bucket_acl' ) elif data.type == 'sftp': _addCredential( '{}_HOST'.format(prefix), 'sftp_host', ) _addCredential( '{}_PORT'.format(prefix), 'sftp_port', ) _addCredential( '{}_USER'.format(prefix), 'sftp_user', ) _addCredential( '{}_PASS'.format(prefix), 'sftp_pass', value_functor=self._obscure, ) _addCredential( '{}_KEY_FILE'.format(prefix), 'sftp_key_file', ) elif data.type == 'dropbox': _addCredential( '{}_TOKEN'.format(prefix), 'dropbox_token', ) elif data.type == 'onedrive': _addCredential( '{}_TOKEN'.format(prefix), 'onedrive_token', ) _addCredential( '{}_DRIVE_ID'.format(prefix), 'onedrive_drive_id', ) _addCredential( '{}_DRIVE_TYPE'.format(prefix), 'onedrive_drive_type', ) elif data.type == 'webdav': _addCredential( '{}_URL'.format(prefix), 'webdav_url', ) _addCredential( '{}_USER'.format(prefix), 'webdav_user', ) _addCredential( '{}_PASS'.format(prefix), 'webdav_pass', value_functor=self._obscure, ) else: logging.error("Connection type unknown: {}".format(data.type)) return credentials def _job_id_exists(self, job_id): return job_id in self._job_status def _obscure(self, password): """ Calls `rclone obscure password` and returns the result """ return self._execute(["rclone", "obscure", password]) def _execute(self, command, env=None): if env is None: env = {} full_env = os.environ.copy() full_env.update(env) try: byteOutput = subprocess.check_output( command, stderr=subprocess.PIPE, env=full_env ) output = byteOutput.decode('UTF-8').rstrip() return output except subprocess.CalledProcessError as err: if (err.stderr is None): raise stderr = err.stderr.decode('UTF-8').strip() if len(stderr) == 0: raise raise RcloneException(stderr) def should_log_full_credential(key): """ Returns true if we should log the value of the credential given the key (name) of the credential For robustness, prefer an allowlist over a blocklist """ suffix_allowlist = [ # generic '_TYPE', # s3 '_REGION', '_ENDPOINT', '_V2_AUTH', # azureblob '_ACCOUNT', # swift '_USER', '_AUTH', '_TENANT', # google cloud storage '_PROJECT_NUMBER', '_OBJECT_ACL', '_BUCKET_ACL', # sftp '_HOST', '_PORT', '_USER', '_KEY_FILE', # dropbox # onedrive '_DRIVE_ID', '_DRIVE_TYPE', # webdav '_URL', '_USER', ] return any(key.endswith(suffix) for suffix in suffix_allowlist) def should_log_partial_credential(key): """ Returns true if we should log the last 4 characters the value of the credential given the key (name) of the credential. For robustness, prefer an allowlist over a blocklist """ suffix_allowlist = [ # s3 '_ACCESS_KEY_ID', # google cloud storage '_CLIENT_ID', ] return any(key.endswith(suffix) for suffix in suffix_allowlist) def main(): """ Can run as export MOTUZ_REGION='<add-here>' export MOTUZ_ACCESS_KEY_ID='<add-here>' export MOTUZ_SECRET_ACCESS_KEY='<add-here>' python -m utils.rclone_connection """ import time import os class CloudConnection: pass data = CloudConnection() data.__dict__ = { 'type': 's3', 'owner': 'aicioara', 's3_region': os.environ['MOTUZ_REGION'], 's3_access_key_id': os.environ['MOTUZ_ACCESS_KEY_ID'], 's3_secret_access_key': os.environ['MOTUZ_SECRET_ACCESS_KEY'], } connection = RcloneConnection() # result = connection.ls(data, '/motuz-test/') # print(result) # return import random import json id = connection.md5sum( data, 'motuz-test/test/', 'aicioara', random.randint(1, 10000000), download=True, ) while not connection.hashsum_finished(id): print(json.dumps(connection.hashsum_text(id))) time.sleep(1) print(json.dumps(connection.hashsum_text(id))) # connection.copy( # src_data=None, # Local # src_resource_path='/tmp/motuz/mb_blob.bin', # dst_data=data, # dst_resource_path='/fh-ctr-mofuz-test/hello/world/{}'.format(random.randint(10, 10000)), # ) # while not connection.copy_finished(job_id): # print(connection.copy_percent(job_id)) # time.sleep(0.1) if __name__ == '__main__': main()
[]
[]
[ "MOTUZ_REGION", "MOTUZ_SECRET_ACCESS_KEY", "MOTUZ_ACCESS_KEY_ID" ]
[]
["MOTUZ_REGION", "MOTUZ_SECRET_ACCESS_KEY", "MOTUZ_ACCESS_KEY_ID"]
python
3
0
ansible/config/manager.py
# Copyright (c) 2017 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import sys import tempfile import yaml from collections import namedtuple from ansible.config.data import ConfigData from ansible.errors import AnsibleOptionsError, AnsibleError from ansible.module_utils.six import string_types from ansible.module_utils.six.moves import configparser from ansible.module_utils._text import to_text, to_bytes, to_native from ansible.module_utils.parsing.convert_bool import boolean from ansible.parsing.quoting import unquote from ansible.utils.path import unfrackpath from ansible.utils.path import makedirs_safe Plugin = namedtuple('Plugin', 'name type') Setting = namedtuple('Setting', 'name value origin type') # FIXME: see if we can unify in module_utils with similar function used by argspec def ensure_type(value, value_type, origin=None): ''' return a configuration variable with casting :arg value: The value to ensure correct typing of :kwarg value_type: The type of the value. This can be any of the following strings: :boolean: sets the value to a True or False value :integer: Sets the value to an integer or raises a ValueType error :float: Sets the value to a float or raises a ValueType error :list: Treats the value as a comma separated list. Split the value and return it as a python list. :none: Sets the value to None :path: Expands any environment variables and tilde's in the value. :tmp_path: Create a unique temporary directory inside of the directory specified by value and return its path. :pathlist: Treat the value as a typical PATH string. (On POSIX, this means colon separated strings.) Split the value and then expand each part for environment variables and tildes. ''' basedir = None if origin and os.path.isabs(origin) and os.path.exists(origin): basedir = origin if value_type: value_type = value_type.lower() if value_type in ('boolean', 'bool'): value = boolean(value, strict=False) elif value: if value_type in ('integer', 'int'): value = int(value) elif value_type == 'float': value = float(value) elif value_type == 'list': if isinstance(value, string_types): value = [x.strip() for x in value.split(',')] elif value_type == 'none': if value == "None": value = None elif value_type == 'path': value = resolve_path(value, basedir=basedir) elif value_type in ('tmp', 'temppath', 'tmppath'): value = resolve_path(value, basedir=basedir) if not os.path.exists(value): makedirs_safe(value, 0o700) prefix = 'ansible-local-%s' % os.getpid() value = tempfile.mkdtemp(prefix=prefix, dir=value) elif value_type == 'pathspec': if isinstance(value, string_types): value = value.split(os.pathsep) value = [resolve_path(x, basedir=basedir) for x in value] elif value_type == 'pathlist': if isinstance(value, string_types): value = value.split(',') value = [resolve_path(x, basedir=basedir) for x in value] # defaults to string types elif isinstance(value, string_types): value = unquote(value) return to_text(value, errors='surrogate_or_strict', nonstring='passthru') # FIXME: see if this can live in utils/path def resolve_path(path, basedir=None): ''' resolve relative or 'varaible' paths ''' if '{{CWD}}' in path: # allow users to force CWD using 'magic' {{CWD}} path = path.replace('{{CWD}}', os.getcwd()) return unfrackpath(path, follow=False, basedir=basedir) # FIXME: generic file type? def get_config_type(cfile): ftype = None if cfile is not None: ext = os.path.splitext(cfile)[-1] if ext in ('.ini', '.cfg'): ftype = 'ini' elif ext in ('.yaml', '.yml'): ftype = 'yaml' else: raise AnsibleOptionsError("Unsupported configuration file extension for %s: %s" % (cfile, to_native(ext))) return ftype # FIXME: can move to module_utils for use for ini plugins also? def get_ini_config_value(p, entry): ''' returns the value of last ini entry found ''' value = None if p is not None: try: value = p.get(entry.get('section', 'defaults'), entry.get('key', ''), raw=True) except: # FIXME: actually report issues here pass return value def find_ini_config_file(): ''' Load INI Config File order(first found is used): ENV, CWD, HOME, /etc/ansible ''' # FIXME: eventually deprecate ini configs path0 = os.getenv("ANSIBLE_CONFIG", None) if path0 is not None: path0 = unfrackpath(path0, follow=False) if os.path.isdir(path0): path0 += "/ansible.cfg" try: path1 = os.getcwd() + "/ansible.cfg" except OSError: path1 = None path2 = unfrackpath("~/.ansible.cfg", follow=False) path3 = "/etc/ansible/ansible.cfg" for path in [path0, path1, path2, path3]: if path is not None and os.path.exists(path): break else: path = None return path class ConfigManager(object): UNABLE = [] DEPRECATED = [] def __init__(self, conf_file=None): self._base_defs = {} self._plugins = {} self._parser = None self._config_file = conf_file self.data = ConfigData() # FIXME: make dynamic? scan for more? make it's own method? # Create configuration definitions from source bconfig_def = to_bytes('%s/base.yml' % os.path.dirname(__file__)) if os.path.exists(bconfig_def): with open(bconfig_def, 'rb') as config_def: self._base_defs = yaml.safe_load(config_def) else: raise AnsibleError("Missing base configuration definition file (bad install?): %s" % to_native(bconfig_def)) if self._config_file is None: # set config using ini self._config_file = find_ini_config_file() if self._config_file: if os.path.exists(self._config_file): # initialize parser and read config self._parse_config_file() # update constants self.update_config_data() def _parse_config_file(self, cfile=None): ''' return flat configuration settings from file(s) ''' # TODO: take list of files with merge/nomerge if cfile is None: cfile = self._config_file ftype = get_config_type(cfile) if cfile is not None: if ftype == 'ini': self._parser = configparser.ConfigParser() try: self._parser.read(cfile) except configparser.Error as e: raise AnsibleOptionsError("Error reading config file (%s): %s" % (cfile, to_native(e))) # FIXME: this should eventually handle yaml config files #elif ftype == 'yaml': # with open(cfile, 'rb') as config_stream: # self._parser = yaml.safe_load(config_stream) else: raise AnsibleOptionsError("Unsupported configuration file type: %s" % to_native(ftype)) def _find_yaml_config_files(self): ''' Load YAML Config Files in order, check merge flags, keep origin of settings''' pass def get_plugin_options(self, plugin_type, name, variables=None): options = {} defs = self.get_configuration_definitions(plugin_type, name) for option in defs: options[option] = self.get_config_value(option, plugin_type=plugin_type, plugin_name=name, variables=variables) return options def get_configuration_definitions(self, plugin_type=None, name=None): ''' just list the possible settings, either base or for specific plugins or plugin ''' ret = {} if plugin_type is None: ret = self._base_defs elif name is None: ret = self._plugins.get(plugin_type, {}) else: ret = self._plugins.get(plugin_type, {}).get(name, {}) return ret def _loop_entries(self, container, entry_list): ''' repeat code for value entry assignment ''' value = None origin = None for entry in entry_list: name = entry.get('name') temp_value = container.get(name, None) if temp_value is not None: # only set if env var is defined value = temp_value origin = name # deal with deprecation of setting source, if used if 'deprecated' in entry: self.DEPRECATED.append((entry['name'], entry['deprecated'])) return value, origin def get_config_value(self, config, cfile=None, plugin_type=None, plugin_name=None, variables=None): ''' wrapper ''' value, _drop = self.get_config_value_and_origin(config, cfile=cfile, plugin_type=plugin_type, plugin_name=plugin_name, variables=variables) return value def get_config_value_and_origin(self, config, cfile=None, plugin_type=None, plugin_name=None, variables=None): ''' Given a config key figure out the actual value and report on the origin of the settings ''' if cfile is None: cfile = self._config_file # Note: sources that are lists listed in low to high precedence (last one wins) value = None defs = {} if plugin_type is None: defs = self._base_defs elif plugin_name is None: defs = self._plugins[plugin_type] else: defs = self._plugins[plugin_type][plugin_name] # Use 'variable overrides' if present, highest precedence, but only present when querying running play if variables: value, origin = self._loop_entries(variables, defs[config]['vars']) origin = 'var: %s' % origin # env vars are next precedence if value is None and defs[config].get('env'): value, origin = self._loop_entries(os.environ, defs[config]['env']) origin = 'env: %s' % origin # try config file entries next, if we have one if value is None and cfile is not None: ftype = get_config_type(cfile) if ftype and defs[config].get(ftype): if ftype == 'ini': # load from ini config try: # FIXME: generaelize _loop_entries to allow for files also, most of this code is dupe for ini_entry in defs[config]['ini']: temp_value = get_ini_config_value(self._parser, ini_entry) if temp_value is not None: value = temp_value origin = cfile if 'deprecated' in ini_entry: self.DEPRECATED.append(('[%s]%s' % (ini_entry['section'], ini_entry['key']), ini_entry['deprecated'])) except Exception as e: sys.stderr.write("Error while loading ini config %s: %s" % (cfile, to_native(e))) elif ftype == 'yaml': pass # FIXME: implement, also , break down key from defs (. notation???) origin = cfile ''' # for plugins, try using existing constants, this is for backwards compatiblity if plugin_name and defs[config].get('constants'): value, origin = self._loop_entries(self.data, defs[config]['constants']) origin = 'constant: %s' % origin ''' # set default if we got here w/o a value if value is None: value = defs[config].get('default') origin = 'default' # skip typing as this is a temlated default that will be resolved later in constants, which has needed vars if plugin_type is None and isinstance(value, string_types) and (value.startswith('{{') and value.endswith('}}')): return value, origin # ensure correct type try: value = ensure_type(value, defs[config].get('type'), origin=origin) except Exception as e: self.UNABLE.append(config) # deal with deprecation of the setting if 'deprecated' in defs[config] and origin != 'default': self.DEPRECATED.append((config, defs[config].get('deprecated'))) return value, origin def initialize_plugin_configuration_definitions(self, plugin_type, name, defs): if plugin_type not in self._plugins: self._plugins[plugin_type] = {} self._plugins[plugin_type][name] = defs def update_config_data(self, defs=None, configfile=None): ''' really: update constants ''' if defs is None: defs = self._base_defs if configfile is None: configfile = self._config_file if not isinstance(defs, dict): raise AnsibleOptionsError("Invalid configuration definition type: %s for %s" % (type(defs), defs)) # update the constant for config file self.data.update_setting(Setting('CONFIG_FILE', configfile, '', 'string')) origin = None # env and config defs can have several entries, ordered in list from lowest to highest precedence for config in defs: if not isinstance(defs[config], dict): raise AnsibleOptionsError("Invalid configuration definition '%s': type is %s" % (to_native(config), type(defs[config]))) # get value and origin value, origin = self.get_config_value_and_origin(config, configfile) # set the constant self.data.update_setting(Setting(config, value, origin, defs[config].get('type', 'string')))
[]
[]
[ "ANSIBLE_CONFIG" ]
[]
["ANSIBLE_CONFIG"]
python
1
0
SigProfilerExtractor/sigpro.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Mon Aug 27 13:39:29 2018 @author: S M Ashiqul Islam (Mishu) ########################################## SigProfilerExtractor (``sigproextractor``) ########################################## SigProfilerExtractor allows de novo extraction of mutational signatures from data generated in a matrix format. The tool identifies the number of operative mutational signatures, their activities in each sample, and the probability for each signature to cause a specific mutation type in a cancer sample. The tool makes use of SigProfilerMatrixGenerator and SigProfilerPlotting. """ import os os.environ["MKL_NUM_THREADS"] = "1" os.environ["NUMEXPR_NUM_THREADS"] = "1" os.environ["OMP_NUM_THREADS"] = "1" import matplotlib.pyplot as plt plt.switch_backend('agg') import scipy import scipy.io import sklearn import numpy as np import pandas as pd import time from SigProfilerExtractor import subroutines as sub import SigProfilerMatrixGenerator from SigProfilerMatrixGenerator.scripts import SigProfilerMatrixGeneratorFunc as datadump import shutil import multiprocessing as mp import SigProfilerExtractor as cosmic import platform import datetime import psutil import sigProfilerPlotting import multiprocessing from SigProfilerExtractor import single_sample as ss def memory_usage(): pid = os.getpid() py = psutil.Process(pid) memoryUse1 = py.memory_info()[0]/2.**30 # memory use in GB...I think print('\n************** Reported Current Memory Use: '+ str(round(memoryUse1,2))+" GB *****************\n") #print('\n************** Reported Current Memory Use: '+ str(round(memoryUse2,2))+" GB *****************\n") def importdata(datatype="matrix"): """ Imports the path of example data. parameters ---------- datatype: A string. Type of data. The type of data should be one of the following: - "vcf": used for vcf format data. - "matrix": used for text format data. This format represents the catalog of mutations seperated by tab. - "matobj": used for matlab object format data. Returns: ------- The path of the example data. Example: ------- >>> from SigProfilerExtractor import sigpro as sig >>> data = sig.importdata("table") This "data" variable can be used as a parameter of the "project" argument of the sigProfilerExtractor function """ paths = cosmic.__path__[0] if datatype=="matobj": data = paths+"/data/21_breast_WGS_substitutions.mat" elif datatype=="text" or datatype=="table" or datatype=="matrix": data = paths+"/data/Samples.txt" elif datatype=="csv": data = paths+"/data/csvexample.csv" elif datatype=="vcf": directory = os.getcwd() dataold = paths+"/data/vcftest" datanew = directory+"/vcftest" if not os.path.exists(datanew): shutil.copytree(dataold , datanew) data="vcftest" return data def record_parameters(sysdata, excecution_parameters, start_time): #genomes = sub.normalize_samples(genomes, normalize=False, all_samples=False, number=30000) sysdata.write("\n--------------EXECUTION PARAMETERS--------------\n") sysdata.write("INPUT DATA\n") sysdata.write("\tinput_type: {}\n".format(excecution_parameters["input_type"])) sysdata.write("\toutput: {}\n".format(excecution_parameters["output"])) sysdata.write("\tinput_data: {}\n".format(excecution_parameters["input_data"])) sysdata.write("\treference_genome: {}\n".format(excecution_parameters["reference_genome"])) sysdata.write("\tcontext_types: {}\n".format(excecution_parameters["context_type"])) sysdata.write("\texome: {}\n".format(excecution_parameters["exome"])) sysdata.write("NMF REPLICATES\n") sysdata.write("\tminimum_signatures: {}\n".format(excecution_parameters["minimum_signatures"])) sysdata.write("\tmaximum_signatures: {}\n".format(excecution_parameters["maximum_signatures"])) sysdata.write("\tNMF_replicates: {}\n".format(excecution_parameters["NMF_replicates"])) sysdata.write("NMF ENGINE\n") sysdata.write("\tNMF_init: {}\n".format(excecution_parameters["NMF_init"])) sysdata.write("\tprecision: {}\n".format(excecution_parameters["precision"])) sysdata.write("\tmatrix_normalization: {}\n".format(excecution_parameters["matrix_normalization"])) sysdata.write("\tresample: {}\n".format(excecution_parameters["resample"])) sysdata.write("\tseeds: {}\n".format(excecution_parameters["seeds"])) sysdata.write("\tmin_NMF_iterations: {}\n".format(format(excecution_parameters["min_NMF_iterations"],',d'))) sysdata.write("\tmax_NMF_iterations: {}\n".format(format(excecution_parameters["max_NMF_iterations"], ',d'))) sysdata.write("\tNMF_test_conv: {}\n".format(format(excecution_parameters["NMF_test_conv"],',d'))) sysdata.write("\tNMF_tolerance: {}\n".format(excecution_parameters["NMF_tolerance"])) sysdata.write("CLUSTERING\n") sysdata.write("\tclustering_distance: {}\n".format(excecution_parameters["dist"])) sysdata.write("EXECUTION\n") if excecution_parameters["cpu"]==-1: sysdata.write("\tcpu: {}; Maximum number of CPU is {}\n".format(multiprocessing.cpu_count(), multiprocessing.cpu_count())) else: sysdata.write("\tcpu: {}; Maximum number of CPU is {}\n".format(excecution_parameters["cpu"], multiprocessing.cpu_count())) sysdata.write("\tgpu: {}\n".format(excecution_parameters["gpu"])) sysdata.write("Solution Estimation\n") sysdata.write("\tstability: {}\n".format(excecution_parameters["stability"])) sysdata.write("\tmin_stability: {}\n".format(excecution_parameters["min_stability"])) sysdata.write("\tcombined_stability: {}\n".format(excecution_parameters["combined_stability"])) sysdata.write("COSMIC MATCH\n") sysdata.write("\topportunity_genome: {}\n".format(excecution_parameters["opportunity_genome"])) sysdata.write("\cosmic_version: {}\n".format(excecution_parameters["cosmic_version"])) sysdata.write("\tnnls_add_penalty: {}\n".format(excecution_parameters["nnls_add_penalty"])) sysdata.write("\tnnls_remove_penalty: {}\n".format(excecution_parameters["nnls_remove_penalty"])) sysdata.write("\tinitial_remove_penalty: {}\n".format(excecution_parameters["initial_remove_penalty"])) sysdata.write("\tde_novo_fit_penalty: {}\n".format(excecution_parameters["de_novo_fit_penalty"])) sysdata.write("\trefit_denovo_signatures: {}\n".format(excecution_parameters["refit_denovo_signatures"])) sysdata.write("\n-------Analysis Progress------- \n") sysdata.write("[{}] Analysis started: \n".format(str(start_time).split(".")[0])) def sigProfilerExtractor(input_type, output, input_data, reference_genome="GRCh37", opportunity_genome = "GRCh37", cosmic_version=3.1, context_type = "default", exome = False, minimum_signatures=1, maximum_signatures=25, nmf_replicates=500, resample = True, batch_size=1, cpu=-1, gpu=False, nmf_init="random", precision= "single", matrix_normalization= "gmm", seeds= "random", min_nmf_iterations= 10000, max_nmf_iterations=1000000, nmf_test_conv= 10000, nmf_tolerance= 1e-15, nnls_add_penalty=0.05, nnls_remove_penalty=0.01, de_novo_fit_penalty=0.02, initial_remove_penalty=0.05, refit_denovo_signatures=True, clustering_distance="cosine", export_probabilities=True, make_decomposition_plots=True, stability=0.8, min_stability=0.2, combined_stability=1.0, get_all_signature_matrices= False, after_parallel=0): memory_usage() """ Extracts mutational signatures from an array of samples. Parameters ---------- INPUT DATA:- input_type: A string. Type of input. The type of input should be one of the following: - "vcf": used for vcf format inputs. - "matrix": used for table format inputs using a tab seperated file. output: A string. The name of the output folder. The output folder will be generated in the current working directory. input_data: A string. Name of the input folder (in case of "vcf" type input) or the input file (in case of "table" type input). The project file or folder should be inside the current working directory. For the "vcf" type input,the project has to be a folder which will contain the vcf files in vcf format or text formats. The "text"type projects have to be a file. reference_genome: A string, optional. The name of the reference genome. The default reference genome is "GRCh37". This parameter is applicable only if the input_type is "vcf". opportunity_genome: The build or version of the reference signatures for the reference genome. The default opportunity genome is GRCh37. If the input_type is "vcf", the genome_build automatically matches the input reference genome value. context_type: A list of strings, optional. The items in the list defines the mutational contexts to be considered to extract the signatures. The default value is "SBS96,DBS78,ID83". exome: Boolean, optional. Defines if the exomes will be extracted. The default value is "False". NMF RUNS:- minimum_signature: A positive integer, optional. The minimum number of signatures to be extracted. The default value is 1 maximum_signatures: A positive integer, optional. The maximum number of signatures to be extracted. The default value is 10 nmf_replicates: A positive integer, optional. The number of iteration to be performed to extract each number signature. The default value is 100 resample: Boolean, optional. Default is True. If True, add poisson noise to samples by resampling. seeds: Boolean. Default is "random". If random, then the seeds for resampling will be random for different analysis. If not random, then seeds will be obtained from a given path of a .txt file that contains a list of seed. NMF RUNS:- matrix_normalization: A string. Method of normalizing the genome matrix before it is analyzed by NMF. Default is "log2". Other options are "gmm", "100X" or "no_normalization". nmf_init: A String. The initialization algorithm for W and H matrix of NMF. Options are 'random', 'nndsvd', 'nndsvda', 'nndsvdar' and 'nndsvd_min' Default is 'nndsvd_min'. precision: A string. Values should be single or double. Default is single. min_nmf_iterations: An integer. Value defines the minimum number of iterations to be completed before NMF converges. Default is 2000. max_nmf_iterations: An integer. Value defines the maximum number of iterations to be completed before NMF converges. Default is 200000 nmf_test_conv: An integer. Value definer the number number of iterations to done between checking next convergence. nmf_tolerance: A float. Value defines the tolerance to achieve to converge. EXECUTION:- cpu: An integer, optional. The number of processors to be used to extract the signatures. The default value is -1 which will use all available processors. gpu:Boolean, optional. Defines if the GPU resource will used if available. Default is False. If True, the GPU resource will be used in the computation. batch_size: An integer. Will be effective only if the GPU is used. Defines the number of NMF replicates to be performed by each CPU during the parallel processing. Default is 1. SOLUTION ESTIMATION THRESH-HOLDS:- stability: A float. Default is 0.8. The cutoff thresh-hold of the average stability. Solutions with average stabilities below this thresh-hold will not be considered. min_stability: A float. Default is 0.2. The cutoff thresh-hold of the minimum stability. Solutions with minimum stabilities below this thresh-hold will not be considered. combined_stability: A float. Default is 1.0. The cutoff thresh-hold of the combined stability (sum of average and minimum stability). Solutions with combined stabilities below this thresh-hold will not be considered. DECOMPOSITION:- de_novo_fit_penalty: Float, optional. Takes any positive float. Default is 0.02. Defines the weak (remove) thresh-hold cutoff to be assigned denovo signatures to a sample. nnls_add_penalty: Float, optional. Takes any positive float. Default is 0.05. Defines the strong (add) thresh-hold cutoff to be assigned COSMIC signatures to a sample. nnls_remove_penalty: Float, optional. Takes any positive float. Default is 0.01. Defines the weak (remove) thresh-hold cutoff to be assigned COSMIC signatures to a sample. initial_remove_penalty: Float, optional. Takes any positive float. Default is 0.05. Defines the initial weak (remove) thresh-hold cutoff to be COSMIC assigned signatures to a sample. refit_denovo_signatures: Boolean, optional. Default is False. If True, then refit the denovo signatures with nnls. make_decomposition_plots: Boolean, optional. Defualt is True. If True, Denovo to Cosmic sigantures decompostion plots will be created as a part the results. OTHERS:- get_all_signature_matrices: A Boolean. If true, the Ws and Hs from all the NMF iterations are generated in the output. export_probabilities: A Boolean. Defualt is True. If False, then doesn't create the probability matrix. Returns ------- To learn about the output, please visit https://osf.io/t6j7u/wiki/home/ Examples -------- Examples -------- >>> from SigProfilerExtractor import sigpro as sig # to get input from vcf files >>> path_to_example_folder_containing_vcf_files = sig.importdata("vcf") >>> data = path_to_example_folder_containing_vcf_files # you can put the path to your folder containing the vcf samples >>> sig.sigProfilerExtractor("vcf", "example_output", data, minimum_signatures=1, maximum_signatures=3) Wait untill the excecution is finished. The process may a couple of hours based on the size of the data. Check the current working directory for the "example_output" folder. # to get input from table format (mutation catalog matrix) >>> path_to_example_table = sig.importdata("matrix") >>> data = path_to_example_table # you can put the path to your tab delimited file containing the mutational catalog matrix/table >>> sig.sigProfilerExtractor("matrix", "example_output", data, opportunity_genome="GRCh38", minimum_signatures=1, maximum_signatures=3) Wait untill the excecution is finished. The process may a couple of hours based on the size of the data. Check the results in the "example_output" folder. """ #record the start time start_time = datetime.datetime.now() #set the output variable out_put = output; if gpu == True: import torch if gpu and (torch.cuda.device_count() == 0): raise RuntimeError("GPU not available!") #################################### At first create the system data file #################################### if not os.path.exists(out_put): os.makedirs(out_put) sysdata = open(out_put+"/JOB_METADATA.txt", "w") sysdata.write("THIS FILE CONTAINS THE METADATA ABOUT SYSTEM AND RUNTIME\n\n\n") sysdata.write("-------System Info-------\n") sysdata.write("Operating System Name: "+ platform.uname()[0]+"\n"+"Nodename: "+platform.uname()[1]+"\n"+"Release: "+platform.uname()[2]+"\n"+"Version: "+platform.uname()[3]+"\n") sysdata.write("\n-------Python and Package Versions------- \n") sysdata.write("Python Version: "+str(platform.sys.version_info.major)+"."+str(platform.sys.version_info.minor)+"."+str(platform.sys.version_info.micro)+"\n") sysdata.write("Sigproextractor Version: "+cosmic.__version__+"\n") sysdata.write("SigprofilerPlotting Version: "+sigProfilerPlotting.__version__+"\n") sysdata.write("SigprofilerMatrixGenerator Version: "+SigProfilerMatrixGenerator.__version__+"\n") sysdata.write("Pandas version: "+pd.__version__+"\n") sysdata.write("Numpy version: "+np.__version__+"\n") sysdata.write("Scipy version: "+scipy.__version__+"\n") sysdata.write("Scikit-learn version: "+sklearn.__version__+"\n") #sysdata.write("Nimfa version: "+nimfa.__version__+"\n") #format the project_name first: project = input_data #will use this variable as the parameter for project argument in SigprofilerMatrixGenerator try: if project[-1] != "/": project_name = project.split("/")[-1] #will use this variable as the parameter for project_name argument in SigprofilerMatrixGenerator else: project_name = project.split("/")[-2] except: project_name = "Input from DataFrame" excecution_parameters= {"input_type":input_type, "output":output, "input_data":input_data, "reference_genome":reference_genome, "opportunity_genome":opportunity_genome, "cosmic_version":cosmic_version, "context_type":context_type, "exome":exome, "minimum_signatures":minimum_signatures, "maximum_signatures":maximum_signatures, "NMF_replicates":nmf_replicates, "cpu":cpu, "gpu":gpu, "batch_size":batch_size, "NMF_init":nmf_init, "precision":precision, "matrix_normalization":matrix_normalization, "resample":resample, "seeds":seeds, "min_NMF_iterations":min_nmf_iterations, "max_NMF_iterations":max_nmf_iterations, "NMF_test_conv": nmf_test_conv, "NMF_tolerance": nmf_tolerance, "nnls_add_penalty":nnls_add_penalty, "nnls_remove_penalty":nnls_remove_penalty, "initial_remove_penalty":initial_remove_penalty, "de_novo_fit_penalty":de_novo_fit_penalty, "refit_denovo_signatures":refit_denovo_signatures, "dist":clustering_distance, "export_probabilities":export_probabilities, "make_decompostion_plots":make_decomposition_plots, "stability":stability, "min_stability":min_stability, "combined_stability":combined_stability, "get_all_signature_matrices":get_all_signature_matrices} ################################ take the inputs from the mandatory arguments #################################### input_type = input_type; #project = input_data #the variable was already set above ################################ take the inputs from the general optional arguments #################################### startProcess=minimum_signatures ; endProcess=maximum_signatures; #totalIterations=nmf_replicates cpu = cpu hierarchy = False #No use mtype=context_type #init=nmf_init wall=get_all_signature_matrices add_penalty=nnls_add_penalty remove_penalty=nnls_remove_penalty genome_build=opportunity_genome refgen=reference_genome refit_denovo_signatures #set the squence type ("genome" or "exome") for the tmb plot inside the make_final_solution function if exome==False: sequence="genome" if exome==True: sequence="exome" #setting seeds if seeds=="random": excecution_parameters["seeds"]=seeds replicates=list(range(1,nmf_replicates+1)) seed=np.random.randint(0, 10000000, size=nmf_replicates) seeds=pd.DataFrame(list(zip(replicates, seed)), columns=["Replicates","Seeds"]) seeds=seeds.set_index("Replicates") seeds.to_csv(out_put+"/Seeds.txt", sep="\t") else: try: excecution_parameters["seeds"]=seeds seeds=pd.read_csv(seeds,sep="\t", index_col=0) seeds.to_csv(out_put+"/Seeds.txt", sep="\t") seed=np.array(seeds["Seeds"]) except: "Please set valid seeds" if input_type=="text" or input_type =="table" or input_type=="matrix": ################################### For text input files ###################################################### text_file = project title = "" # set the title for plotting if type(text_file)!=str: data=text_file excecution_parameters["input_data"]="Matrix["+str(data.shape[0])+" rows X "+str(data.shape[1])+ " columns]" else: data = pd.read_csv(text_file, sep="\t").iloc[:,:] if data.shape[0]==48: paths = cosmic.__path__[0] feature_map=pd.read_csv(paths+"/data/CN_classes_dictionary.txt", sep="\t", header=None) feature_order=pd.read_csv(paths+"/data/CNV_features.tsv", sep="\t", header=None) if list(data.iloc[:,0])==list(feature_order[0]): pass else: orderlist1=list(feature_map[0]) orderlist2=list(feature_order[0]) #sort the mutation types first step data["Mutation Types"]= pd.Categorical(data["Mutation Types"], orderlist1) data = data.sort_values("Mutation Types") data=data.reset_index() data=data.drop(columns='index') #sort the mutation types second step data["Mutation Types"]=feature_map[1] data["Mutation Types"]= pd.Categorical(data["Mutation Types"], orderlist2) data = data.sort_values("Mutation Types") data=data.dropna(axis=1, inplace=False) data = data.loc[:, (data != 0).any(axis=0)] genomes = data.iloc[:,1:] genomes = np.array(genomes) allgenomes = genomes.copy() # save the allgenomes for the final results #Contruct the indeces of the matrix #setting index and columns names of processAvg and exposureAvg index = data.iloc[:,0] colnames = data.columns[1:] allcolnames = colnames.copy() # save the allcolnames for the final results #creating list of mutational type to sync with the vcf type input mtypes = [str(genomes.shape[0])] if mtypes[0] == "78": mtypes = ["DBS78"] elif mtypes[0] == "83": mtypes = ["ID83"] elif mtypes[0] == "48": mtypes = ["CNV48"] elif mtypes[0]=="32": mtypes = ["SV32"] elif mtypes[0]=="96" or "288" or "384" or "1536": mtypes = ["SBS"+mtypes[0]] else: mtypes = ["CH"+mtypes[0]] ############################################################################################################### ########################################################################################################################################################################################### elif input_type=="csv": ################################# For matlab input files ####################################################### filename = project title = "" # set the title for plotting genomes, index, colnames, mtypes = sub.read_csv(filename) allgenomes = genomes.copy() allcolnames = colnames.copy() # Define the mtypes mtypes = [str(genomes.shape[0])] if mtypes[0] == "78": mtypes = ["DINUC"] elif mtypes[0] == "83": mtypes = ["ID"] ################################################################################################################# ########################################################################################################################################################################################### elif input_type=="matobj": ################################# For matlab input files ####################################################### mat_file = project title = "" # set the title for plotting mat = scipy.io.loadmat(mat_file) mat = sub.extract_input(mat) genomes = mat[1] allgenomes = genomes.copy() # save the allgenomes for the final results #Contruct the indeces of the matrix #setting index and columns names of processAvg and exposureAvg index1 = mat[3] index2 = mat[4] index = [] for i, j in zip(index1, index2): index.append(i[0]+"["+j+"]"+i[2]) colnames = np.array(pd.Series(mat[2])) allcolnames = colnames.copy() # save the allcolnames for the final results index = np.array(pd.Series(index)) #creating list of mutational type to sync with the vcf type input mtypes = [str(genomes.shape[0])] if mtypes[0] == "78": mtypes = ["DINUC"] elif mtypes[0] == "83": mtypes = ["ID"] ################################################################################################################# elif input_type=="vcf": ################################# For vcf input files ####################################################### project = project title = project # set the title for plotting refgen = refgen exome = exome #project_name = project.split("/")[-1] data = datadump.SigProfilerMatrixGeneratorFunc(project_name, refgen, project, exome=exome, bed_file=None, chrom_based=False, plot=False, gs=False) # Selecting the mutation types if mtype == ["default"]: mtypes = ["SBS96", "DBS78", "ID83"] elif mtype == "default": mtypes = ["SBS96", "DBS78", "ID83"] else: #mkeys = data.keys() mtype = mtype.upper() mtype = mtype.replace(" ", "") mtypes = mtype.split(",") # ============================================================================= # if any(x not in mkeys for x in mtypes): # raise Exception("Please pass valid mutation types seperated by comma with no space. Carefully check (using SigProfilerMatrixGenerator)"\ # "what mutation contexts should be generated by your VCF files. Also please use the uppercase characters") # ============================================================================= #change working directory #set the genome_build genome_build=refgen else: raise ValueError("Please provide a correct input_type. Check help for more details") #recording context types excecution_parameters["context_type"]=",".join(mtypes) record_parameters(sysdata, excecution_parameters, start_time) sysdata.close() ########################################################################################################################################################################################### for m in mtypes: mutation_context = m # we need to rename the m because users input could be SBS96, SBS1536, DBS78, ID83 etc if m.startswith("SBS"): m = m[3:] #removing "SBS" elif m.startswith("DBS"): m = "DINUC" elif m.startswith("ID"): m = "ID" elif m.startswith("CNV"): m="CNV" elif m.startswith("SV"): m="SV" # Determine the types of mutation which will be needed for exporting and copying the files if not (m=="DINUC" or m.startswith("DBS") or m.startswith("ID") or m.startswith("CNV") or m.startswith("SV")): if m.startswith("SBS"): mutation_type = m elif m in ["96","288","384","1536"]: mutation_type="SBS"+m elif m.startswith("78"): mutation_type="DBS78" elif m.startswith("83"): mutation_type="ID83" elif m.startswith("48"): mutation_type="CNV48" elif m.startswith("32"): mutation_type="SV32" else: mutation_type = "CH"+m else: if m == "DINUC" or m.startswith("DBS"): mutation_type = "DBS78" elif m== "ID" or m.startswith("ID"): mutation_type = "ID83" elif m== "CNV" or m.startswith("CNV"): mutation_type = "CNV48" elif m== "SV" or m.startswith("SV"): mutation_type = "SV32" if input_type=="vcf": try: genomes = pd.DataFrame(data[m]) except KeyError: sysdata = open(out_put+"/JOB_METADATA.txt", "a") sysdata.write("Context {} is not available in the current vcf files".format(m)+"\n") print("Context {} is not available in the current vcf files".format(m)) sysdata.close() continue #check if the genome is a nonzero matrix shape= genomes.shape if shape==(0,0): sysdata = open(out_put+"/JOB_METADATA.txt", "a") sysdata.write("Sample is not a nonzero matrix for the mutation context "+ m+"\n") print("Sample is not a nozero matrix for the mutation context "+ m) sysdata.close() continue genomes = genomes.loc[:, (genomes != 0).any(axis=0)] allgenomes = genomes.copy() # save the allgenomes for the final results index = genomes.index.values colnames = genomes.columns allcolnames = colnames.copy() # save the allcolnames for the final results #check if start and end processes are bigger than the number of samples startProcess = min(startProcess, genomes.shape[1]) endProcess = min(endProcess, genomes.shape[1]) #in the plotting funciton "ID" is used as "INDEL" if m=="ID": m="INDEL" #for plotting #create output directories to store all the results output = out_put+"/"+mutation_type est_genomes = np.zeros([1,1]) H_iteration = 1 genomes = np.array(genomes) information =[] layer_directory = output try: if not os.path.exists(layer_directory): os.makedirs(layer_directory) #os.makedirs(output+"/pickle_objects") #os.makedirs(output+"/All solutions") except: print ("The {} folder could not be created".format("output")) fh = open(layer_directory+"/All_solutions_stat.csv", "w") fh.write("Total Signatures,Stability,Matrix Frobenius%,avgStability\n") fh.close() # The following for loop operates to extract data from each number of signature all_similirities_list = [] #this list is going to store the dataframes of different similirieties as items minimum_stabilities = [] #similarity_dataframe = pd.DataFrame({"Sample Name": list(colnames)}) # get the cutoff for normatization to handle the hypermutators normalization_cutoff = sub.get_normalization_cutoff(genomes, manual_cutoff=100*genomes.shape[0]) #print("Normalization Cutoff is :", normalization_cutoff) excecution_parameters["normalization_cutoff"]= normalization_cutoff #pass the seed values to inner funtions: excecution_parameters["seeds"]= seed if genomes.shape[1]<endProcess: endProcess=genomes.shape[1] #report the notmatlization criteria sysdata = open(out_put+"/JOB_METADATA.txt", "a") context_start_time=datetime.datetime.now() sysdata.write("\n##################################\n") sysdata.write("\n[{}] Analysis started for {}. Matrix size [{} rows x {} columns]\n".format(str(context_start_time).split(".")[0],mutation_type,genomes.shape[0],genomes.shape[1])) if excecution_parameters["matrix_normalization"]=="gmm": sysdata.write("\n[{}] Normalization GMM with cutoff value set at {}\n". \ format(str(datetime.datetime.now()).split(".")[0], normalization_cutoff)) elif excecution_parameters["matrix_normalization"]=="100X": sysdata.write("\n[{}] Normalization 100X with cutoff value set at {}\n". \ format(str(datetime.datetime.now()).split(".")[0],(genomes.shape[0]*100))) elif excecution_parameters["matrix_normalization"]=="log2": sysdata.write("\n[{}] Normalization Log2\n". \ format(str(datetime.datetime.now()).split(".")[0])) elif excecution_parameters["matrix_normalization"]=="none": sysdata.write("\n[{}] Analysis is proceeding without normalization\n". \ format(str(datetime.datetime.now()).split(".")[0])) else: sysdata.write("\n[{}] Normalization Custom with cutoff value set at {}\n". \ format(str(datetime.datetime.now()).split(".")[0],excecution_parameters["matrix_normalization"])) sysdata.close() for i in range(startProcess,endProcess+1): current_time_start = datetime.datetime.now() #memory_usage() processAvg, \ exposureAvg, \ processStd, \ exposureStd, \ avgSilhouetteCoefficients, \ clusterSilhouetteCoefficients, \ finalgenomeErrors, \ finalgenomesReconstructed, \ finalWall, \ finalHall, \ converge_information, \ reconstruction_error, \ processes = sub.decipher_signatures(excecution_parameters, genomes= genomes, mut_context=m, i = i,after_parallel) #denormalize the genomes and exposures #genomes = sub.denormalize_samples(genomes, totalMutations, normalization_value=100000) #exposureStd = sub.denormalize_samples(exposureStd, totalMutations, normalization_value=100000) ####################################################################### add sparsity in the exposureAvg ################################################################# # remove signatures only if the process stability is above a thresh-hold of 0.85 if avgSilhouetteCoefficients> -1.0: stic = time.time() #removing signatures: # ============================================================================= # pool = mp.Pool() # results = [pool.apply_async(sub.remove_all_single_signatures_pool, args=(x,processAvg,exposureAvg,genomes,)) for x in range(genomes.shape[1])] # pooloutput = [p.get() for p in results] # # #print(results) # pool.close() # # for i in range(len(pooloutput)): # #print(results[i]) # exposureAvg[:,i]=pooloutput[i] # ============================================================================= #refitting signatures: #removing signatures: pool = mp.Pool() results = [pool.apply_async(ss.fit_signatures_pool, args=(genomes,processAvg,x,)) for x in range(genomes.shape[1])] pooloutput = [p.get() for p in results] pool.close() for i in range(len(pooloutput)): exposureAvg[:,i]=pooloutput[i][0] stoc = time.time() print ("Optimization time is {} seconds".format(stoc-stic)) #sysdata.write("\nAnalysis of context type {} is ended successfully\n".format(m)) #report progress to the system file: #Get total mutationation for each signature in reverse order and order the signatures from high to low mutation barden signature_total_mutations = np.sum(exposureAvg, axis =1).astype(int) sorted_idx = np.argsort(-signature_total_mutations) processAvg = np.take(processAvg, sorted_idx, axis=1) exposureAvg = np.take(exposureAvg, sorted_idx, axis=0) signature_total_mutations = np.sum(exposureAvg, axis =1).astype(int) processStd=np.take(processStd, sorted_idx, axis=1) exposureStd=np.take(exposureStd, sorted_idx, axis=0) clusterSilhouetteCoefficients=np.take(clusterSilhouetteCoefficients, sorted_idx, axis=0) signature_stats = pd.DataFrame({"Stability": clusterSilhouetteCoefficients, "Total Mutations": signature_total_mutations}) minimum_stabilities.append(round(np.mean(clusterSilhouetteCoefficients),2)) #here minimum stability is the average stability !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! # Compute the estimated genome from the processAvg and exposureAvg est_genomes = np.dot(processAvg, exposureAvg) #check the similarities between the original and estimated genome for each number of signatures all_similarities, cosine_similarities = sub.calculate_similarities(genomes, est_genomes, colnames) #print(totalMutations) ########################################################################################################################################################################## # store the resutls of the loop. Here, processStd and exposureStd are standard Errors, NOT STANDARD DEVIATIONS. loopResults = [genomes, processAvg, exposureAvg, processStd, exposureStd, avgSilhouetteCoefficients, clusterSilhouetteCoefficients, signature_total_mutations, all_similarities, signature_stats, reconstruction_error, finalgenomeErrors, finalgenomesReconstructed, converge_information, finalWall, finalHall, processes] information.append([processAvg, exposureAvg, processStd, exposureStd, clusterSilhouetteCoefficients, signature_total_mutations, signature_stats, all_similarities]) #Will be used during hierarchycal approach ################################# Export the results ########################################################### sub.export_information(loopResults, m, layer_directory, index, colnames, wall=wall, sequence=sequence) all_similirities_list.append(all_similarities) # #similarity_dataframe["Total Signatures "+str(processes)] = cosine_similarities current_time_end = datetime.datetime.now() sysdata = open(out_put+"/JOB_METADATA.txt", "a") sysdata.write("\n[{}] {} de novo extraction completed for a total of {} signatures! \nExecution time:{}\n". \ format(str(datetime.datetime.now()).split(".")[0],mutation_type,processes,str(current_time_end-current_time_start).split(".")[0], current_time_end)) sysdata.close() ################################################################################################################ ########################################## Plot Stabiltity vs Reconstruction Error ############################# ################################################################################################################ # Print the Stabiltity vs Reconstruction Error as get the solution as well solution, all_stats = sub.stabVsRError(layer_directory+"/All_solutions_stat.csv", layer_directory, title, all_similirities_list, mtype=mutation_type, stability=stability, min_stability=min_stability, combined_stability=combined_stability) all_stats.insert(1, 'Stability (Avg Silhouette)', minimum_stabilities) #!!!!!!!!!!!!!!!!1 here minimum stability is avg stability all_stats=all_stats.set_index(["Signatures"]) all_stats.to_csv(layer_directory+"/All_solutions_stat.csv", sep = ",") # add more information to results_stat.csv #Set index for the the Similarity Dataframe #similarity_dataframe = similarity_dataframe.set_index("Sample Name") #Add the total mutations of each sample #sample_total_mutations = list(np.sum(genomes, axis =0)) #similarity_dataframe.insert(loc=0, column = "Total Mutations", value = sample_total_mutations) # write the name of Samples and Matrix participating in each Layer. layer_genome = pd.DataFrame(genomes) layer_genome = layer_genome.set_index(index) layer_genome.columns = colnames layer_genome = layer_genome.rename_axis("Mutation Types", axis="columns") # ============================================================================= # data_stat_folder = output+"/Data_Stats" # try: # if not os.path.exists(data_stat_folder): # os.makedirs(data_stat_folder) # except: # print ("The {} folder could not be created".format("Data_Stats")) # # layer_genome.to_csv(data_stat_folder+"/Samples.text", sep = "\t", index_label=[layer_genome.columns.name]) # similarity_dataframe.to_csv(data_stat_folder+"/Similatiry_Data_All_Sigs.text", sep = "\t") # del layer_genome # for i in range(startProcess,endProcess+1): # all_similirities_list[i-startProcess].to_csv(data_stat_folder+"/Similatiry_Data_Sig_"+str(i)+".text", sep="\t") # ============================================================================= # record the samples layer_genome.to_csv(output+"/Samples.txt", sep = "\t", index_label=[layer_genome.columns.name]) #similarity_dataframe.to_csv(data_stat_folder+"/Similatiry_Data_All_Sigs"+str(H_iteration)+".text", sep = "\t") del layer_genome ################################### Decompose the new signatures into global signatures ######################### processAvg = information[solution-startProcess][0] exposureAvg = information[solution-startProcess][1] processSTE = information[solution-startProcess][2] signature_stabilities = information[solution-startProcess][4] signature_total_mutations = information[solution-startProcess][5] signature_stats = information[solution-startProcess][6] all_similarities = information[solution-startProcess][7] # create the folder for the final solution/ De Novo Solution layer_directory1 = output+"/Suggested_Solution/"+mutation_type+"_De-Novo_Solution" try: if not os.path.exists(layer_directory1): os.makedirs(layer_directory1) except: print ("The {} folder could not be created".format("output")) # make the texts for signature plotting signature_stabilities = sub.signature_plotting_text(signature_stabilities, "Stability", "float") signature_total_mutations = sub.signature_plotting_text(signature_total_mutations, "Total Mutations", "integer") # make de novo solution(processAvg, allgenomes, layer_directory1) listOfSignatures = sub.make_letter_ids(idlenth = processAvg.shape[1], mtype=mutation_context) allgenomes = pd.DataFrame(allgenomes) exposureAvg = sub.make_final_solution(processAvg, allgenomes, listOfSignatures, layer_directory1, m, index, \ allcolnames, process_std_error = processSTE, signature_stabilities = signature_stabilities, \ signature_total_mutations = signature_total_mutations,denovo_exposureAvg = exposureAvg, \ signature_stats = signature_stats, add_penalty=add_penalty, remove_penalty=remove_penalty, \ initial_remove_penalty=initial_remove_penalty, refit_denovo_signatures=refit_denovo_signatures, de_novo_fit_penalty=de_novo_fit_penalty, sequence=sequence) #try: # create the folder for the final solution/ Decomposed Solution layer_directory2 = output+"/Suggested_Solution/COSMIC_"+mutation_type+"_Decomposed_Solution" try: if not os.path.exists(layer_directory2): os.makedirs(layer_directory2) except: print ("The {} folder could not be created".format("output")) originalProcessAvg=pd.DataFrame(processAvg, index=index) if processAvg.shape[0]==1536: #collapse the 1596 context into 96 only for the deocmposition processAvg = pd.DataFrame(processAvg, index=index) processAvg = processAvg.groupby(processAvg.index.str[1:8]).sum() genomes = pd.DataFrame(genomes, index=index) genomes = genomes.groupby(genomes.index.str[1:8]).sum() index = genomes.index processAvg = np.array(processAvg) genomes = np.array(genomes) if processAvg.shape[0]==288: #collapse the 288 context into 96 only for the deocmposition processAvg = pd.DataFrame(processAvg, index=index) processAvg = processAvg.groupby(processAvg.index.str[2:9]).sum() genomes = pd.DataFrame(genomes, index=index) genomes = genomes.groupby(genomes.index.str[2:9]).sum() index = genomes.index processAvg = np.array(processAvg) genomes = np.array(genomes) originalProcessAvg.columns = listOfSignatures final_signatures = sub.signature_decomposition(processAvg, m, layer_directory2, genome_build=genome_build, cosmic_version=cosmic_version, add_penalty=0.05, remove_penalty=0.01, mutation_context=mutation_context, make_decomposition_plots=make_decomposition_plots, originalProcessAvg=originalProcessAvg) # extract the global signatures and new signatures from the final_signatures dictionary globalsigs = final_signatures["globalsigs"] globalsigs = np.array(globalsigs) newsigs = final_signatures["newsigs"] try: processAvg = np.hstack([globalsigs, newsigs]) allsigids = final_signatures["globalsigids"]+final_signatures["newsigids"] except: processAvg=newsigs allsigids=final_signatures["newsigids"] attribution = final_signatures["dictionary"] background_sigs= final_signatures["background_sigs"] genomes = pd.DataFrame(genomes) exposureAvg = sub.make_final_solution(processAvg, genomes, allsigids, layer_directory2, m, index, colnames, \ cosmic_sigs=True, attribution = attribution, denovo_exposureAvg = exposureAvg , background_sigs=background_sigs, add_penalty=add_penalty, remove_penalty=remove_penalty, initial_remove_penalty=initial_remove_penalty, genome_build=genome_build, sequence=sequence,export_probabilities=export_probabilities) sysdata = open(out_put+"/JOB_METADATA.txt", "a") end_time = datetime.datetime.now() sysdata.write("\n[{}] Analysis ended: \n".format(str(end_time).split(".")[0])) sysdata.write("\n-------Job Status------- \n") sysdata.write("Analysis of mutational signatures completed successfully! \nTotal execution time: "+str(end_time-start_time).split(".")[0]+" \nResults can be found in: "+" "+out_put+ " " +" folder") sysdata.close() print("\n\n \nYour Job Is Successfully Completed! Thank You For Using SigProfiler Extractor.\n ")
[]
[]
[ "MKL_NUM_THREADS", "OMP_NUM_THREADS", "NUMEXPR_NUM_THREADS" ]
[]
["MKL_NUM_THREADS", "OMP_NUM_THREADS", "NUMEXPR_NUM_THREADS"]
python
3
0
lib/honeycomb/daemon.go
package honeycomb import ( "os" "github.com/gliderlabs/comlab/pkg/com" "github.com/gliderlabs/comlab/pkg/log" "github.com/honeycombio/libhoney-go" ) func (c *Component) AppPreStart() error { libhoney.Init(libhoney.Config{ WriteKey: com.GetString("key"), Dataset: com.GetString("dataset"), SampleRate: 1, }) hostname, _ := os.Hostname() libhoney.AddField("servername", hostname) libhoney.AddField("release", os.Getenv("RELEASE")) log.RegisterObserver(new(honeylogger)) return nil }
[ "\"RELEASE\"" ]
[]
[ "RELEASE" ]
[]
["RELEASE"]
go
1
0
kubetest/main.go
/* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package main import ( "encoding/json" "errors" "flag" "fmt" "io/ioutil" "log" "math/rand" "os" "os/exec" "os/signal" "path/filepath" "regexp" "strconv" "strings" "time" "github.com/spf13/pflag" "k8s.io/test-infra/boskos/client" "k8s.io/test-infra/kubetest/conformance" "k8s.io/test-infra/kubetest/eks" "k8s.io/test-infra/kubetest/kubeadmdind" "k8s.io/test-infra/kubetest/process" "k8s.io/test-infra/kubetest/util" ) // Hardcoded in ginkgo-e2e.sh const defaultGinkgoParallel = 25 var ( artifacts = filepath.Join(os.Getenv("WORKSPACE"), "_artifacts") interrupt = time.NewTimer(time.Duration(0)) // interrupt testing at this time. terminate = time.NewTimer(time.Duration(0)) // terminate testing at this time. verbose = false timeout = time.Duration(0) boskos = client.NewClient(os.Getenv("JOB_NAME"), "http://boskos.test-pods.svc.cluster.local.") control = process.NewControl(timeout, interrupt, terminate, verbose) ) type options struct { build buildStrategy buildFederation buildFederationStrategy charts bool checkLeaks bool checkSkew bool cluster string clusterIPRange string deployment string down bool dump string dumpPreTestLogs string extract extractStrategies extractFederation extractFederationStrategies extractSource bool federation bool flushMemAfterBuild bool focusRegex string gcpCloudSdk string gcpMasterImage string gcpMasterSize string gcpNetwork string gcpNodeImage string gcpImageFamily string gcpImageProject string gcpNodes string gcpNodeSize string gcpProject string gcpProjectType string gcpServiceAccount string gcpRegion string gcpZone string ginkgoParallel ginkgoParallelValue kubecfg string kubemark bool kubemarkMasterSize string kubemarkNodes string // TODO(fejta): switch to int after migration logexporterGCSPath string metadataSources string multiClusters multiClusterDeployment multipleFederations bool noAllowDup bool nodeArgs string nodeTestArgs string nodeTests bool provider string publish string runtimeConfig string save string skew bool skipRegex string soak bool soakDuration time.Duration sshUser string stage stageStrategy stageFederation stageFederationStrategy test bool testArgs string testCmd string testCmdName string testCmdArgs []string up bool upgradeArgs string } func defineFlags() *options { o := options{} flag.Var(&o.build, "build", "Rebuild k8s binaries, optionally forcing (release|quick|bazel) strategy") flag.Var(&o.buildFederation, "build-federation", "Rebuild federation binaries, optionally forcing (release|quick|bazel) strategy") flag.BoolVar(&o.charts, "charts", false, "If true, run charts tests") flag.BoolVar(&o.checkSkew, "check-version-skew", true, "Verify client and server versions match") flag.BoolVar(&o.checkLeaks, "check-leaked-resources", false, "Ensure project ends with the same resources") flag.StringVar(&o.cluster, "cluster", "", "Cluster name. Must be set for --deployment=gke (TODO: other deployments).") flag.StringVar(&o.clusterIPRange, "cluster-ip-range", "", "Specifies CLUSTER_IP_RANGE value during --up and --test (only relevant for --deployment=bash). Auto-calculated if empty.") flag.StringVar(&o.deployment, "deployment", "bash", "Choices: none/bash/conformance/gke/eks/kops/kubernetes-anywhere/node/local") flag.BoolVar(&o.down, "down", false, "If true, tear down the cluster before exiting.") flag.StringVar(&o.dump, "dump", "", "If set, dump bring-up and cluster logs to this location on test or cluster-up failure") flag.StringVar(&o.dumpPreTestLogs, "dump-pre-test-logs", "", "If set, dump cluster logs to this location before running tests") flag.Var(&o.extract, "extract", "Extract k8s binaries from the specified release location") flag.Var(&o.extractFederation, "extract-federation", "Extract federation binaries from the specified release location") flag.BoolVar(&o.extractSource, "extract-source", false, "Extract k8s src together with other tarballs") flag.BoolVar(&o.federation, "federation", false, "If true, start/tear down the federation control plane along with the clusters. To only start/tear down the federation control plane, specify --deployment=none") flag.BoolVar(&o.flushMemAfterBuild, "flush-mem-after-build", false, "If true, try to flush container memory after building") flag.Var(&o.ginkgoParallel, "ginkgo-parallel", fmt.Sprintf("Run Ginkgo tests in parallel, default %d runners. Use --ginkgo-parallel=N to specify an exact count.", defaultGinkgoParallel)) flag.StringVar(&o.gcpCloudSdk, "gcp-cloud-sdk", "", "Install/upgrade google-cloud-sdk to the gs:// path if set") flag.StringVar(&o.gcpProject, "gcp-project", "", "For use with gcloud commands") flag.StringVar(&o.gcpProjectType, "gcp-project-type", "", "Explicitly indicate which project type to select from boskos") flag.StringVar(&o.gcpServiceAccount, "gcp-service-account", "", "Service account to activate before using gcloud") flag.StringVar(&o.gcpZone, "gcp-zone", "", "For use with gcloud commands") flag.StringVar(&o.gcpRegion, "gcp-region", "", "For use with gcloud commands") flag.StringVar(&o.gcpNetwork, "gcp-network", "", "Cluster network. Must be set for --deployment=gke (TODO: other deployments).") flag.StringVar(&o.gcpMasterImage, "gcp-master-image", "", "Master image type (cos|debian on GCE, n/a on GKE)") flag.StringVar(&o.gcpMasterSize, "gcp-master-size", "", "(--provider=gce only) Size of master to create (e.g n1-standard-1). Auto-calculated if left empty.") flag.StringVar(&o.gcpNodeImage, "gcp-node-image", "", "Node image type (cos|container_vm on GKE, cos|debian on GCE)") flag.StringVar(&o.gcpImageFamily, "image-family", "", "Node image family from which to use the latest image, required when --gcp-node-image=CUSTOM") flag.StringVar(&o.gcpImageProject, "image-project", "", "Project containing node image family, required when --gcp-node-image=CUSTOM") flag.StringVar(&o.gcpNodes, "gcp-nodes", "", "(--provider=gce only) Number of nodes to create.") flag.StringVar(&o.gcpNodeSize, "gcp-node-size", "", "(--provider=gce only) Size of nodes to create (e.g n1-standard-1).") flag.StringVar(&o.kubecfg, "kubeconfig", "", "The location of a kubeconfig file.") flag.StringVar(&o.focusRegex, "ginkgo-focus", "", "The ginkgo regex to focus. Currently only respected for (dind).") flag.StringVar(&o.skipRegex, "ginkgo-skip", "", "The ginkgo regex to skip. Currently only respected for (dind).") flag.BoolVar(&o.kubemark, "kubemark", false, "If true, run kubemark tests.") flag.StringVar(&o.kubemarkMasterSize, "kubemark-master-size", "", "Kubemark master size (only relevant if --kubemark=true). Auto-calculated based on '--kubemark-nodes' if left empty.") flag.StringVar(&o.kubemarkNodes, "kubemark-nodes", "5", "Number of kubemark nodes to start (only relevant if --kubemark=true).") flag.StringVar(&o.logexporterGCSPath, "logexporter-gcs-path", "", "Path to the GCS artifacts directory to dump logs from nodes. Logexporter gets enabled if this is non-empty") flag.StringVar(&o.metadataSources, "metadata-sources", "images.json", "Comma-separated list of files inside ./artifacts to merge into metadata.json") flag.Var(&o.multiClusters, "multi-clusters", "If set, bring up/down multiple clusters specified. Format is [Zone1:]Cluster1[,[ZoneN:]ClusterN]]*. Zone is optional and default zone is used if zone is not specified") flag.BoolVar(&o.multipleFederations, "multiple-federations", false, "If true, enable running multiple federation control planes in parallel") flag.StringVar(&o.nodeArgs, "node-args", "", "Args for node e2e tests.") flag.StringVar(&o.nodeTestArgs, "node-test-args", "", "Test args specifically for node e2e tests.") flag.BoolVar(&o.noAllowDup, "no-allow-dup", false, "if set --allow-dup will not be passed to push-build and --stage will error if the build already exists on the gcs path") flag.BoolVar(&o.nodeTests, "node-tests", false, "If true, run node-e2e tests.") flag.StringVar(&o.provider, "provider", "", "Kubernetes provider such as gce, gke, aws, eks, etc") flag.StringVar(&o.publish, "publish", "", "Publish version to the specified gs:// path on success") flag.StringVar(&o.runtimeConfig, "runtime-config", "batch/v2alpha1=true", "If set, API versions can be turned on or off while bringing up the API server.") flag.StringVar(&o.stage.dockerRegistry, "registry", "", "Push images to the specified docker registry (e.g. gcr.io/a-test-project)") flag.StringVar(&o.save, "save", "", "Save credentials to gs:// path on --up if set (or load from there if not --up)") flag.BoolVar(&o.skew, "skew", false, "If true, run tests in another version at ../kubernetes/hack/e2e.go") flag.BoolVar(&o.soak, "soak", false, "If true, job runs in soak mode") flag.DurationVar(&o.soakDuration, "soak-duration", 7*24*time.Hour, "Maximum age of a soak cluster before it gets recycled") flag.Var(&o.stage, "stage", "Upload binaries to gs://bucket/devel/job-suffix if set") flag.Var(&o.stageFederation, "stage-federation", "Upload federation binaries to gs://bucket/devel/job-suffix if set") flag.StringVar(&o.stage.versionSuffix, "stage-suffix", "", "Append suffix to staged version when set") flag.BoolVar(&o.test, "test", false, "Run Ginkgo tests.") flag.StringVar(&o.testArgs, "test_args", "", "Space-separated list of arguments to pass to Ginkgo test runner.") flag.StringVar(&o.testCmd, "test-cmd", "", "command to run against the cluster instead of Ginkgo e2e tests") flag.StringVar(&o.testCmdName, "test-cmd-name", "", "name to log the test command as in xml results") flag.DurationVar(&timeout, "timeout", time.Duration(0), "Terminate testing after the timeout duration (s/m/h)") flag.BoolVar(&o.up, "up", false, "If true, start the e2e cluster. If cluster is already up, recreate it.") flag.StringVar(&o.upgradeArgs, "upgrade_args", "", "If set, run upgrade tests before other tests") // The "-v" flag was also used by glog, which is used by k8s.io/client-go. Duplicate flags cause panics. // 1. Even if we could convince glog to change, they have too many consumers to ever do so. // 2. The glog lib parses flags during init. It is impossible to dynamically rewrite the args before they're parsed by glog. // 3. The glog lib takes an int value, so "-v false" is an error. // 4. It's possible, but unlikely, we could convince k8s.io/client-go to use a logging shim, because a library shouldn't force a logging implementation. This would take a major version release for the lib. // // The most reasonable solution is to accept that we shouldn't have made a single-letter global, and rename all references to this variable. flag.BoolVar(&verbose, "verbose-commands", true, "If true, print all command output.") // go flag does not support StringArrayVar pflag.StringArrayVar(&o.testCmdArgs, "test-cmd-args", []string{}, "args for test-cmd") return &o } var suite util.TestSuite func validWorkingDirectory() error { cwd, err := os.Getwd() if err != nil { return fmt.Errorf("could not get pwd: %v", err) } acwd, err := filepath.Abs(cwd) if err != nil { return fmt.Errorf("failed to convert %s to an absolute path: %v", cwd, err) } // This also matches "kubernetes_skew" for upgrades. if !strings.Contains(filepath.Base(acwd), "kubernetes") { return fmt.Errorf("must run from kubernetes directory root: %v", acwd) } return nil } type deployer interface { Up() error IsUp() error DumpClusterLogs(localPath, gcsPath string) error TestSetup() error Down() error GetClusterCreated(gcpProject string) (time.Time, error) KubectlCommand() (*exec.Cmd, error) } // publisher is implemented by deployers that want to publish status on success type publisher interface { // Publish is called when the tests were successful; the deployer should publish a success file Publish() error } func getDeployer(o *options) (deployer, error) { switch o.deployment { case "bash": return newBash(&o.clusterIPRange), nil case "conformance": return conformance.NewDeployer(o.kubecfg) case "gke": return newGKE(o.provider, o.gcpProject, o.gcpZone, o.gcpRegion, o.gcpNetwork, o.gcpNodeImage, o.gcpImageFamily, o.gcpImageProject, o.cluster, &o.testArgs, &o.upgradeArgs) case "eks": return eks.NewDeployer(timeout, verbose) case "kops": return newKops(o.provider, o.gcpProject, o.cluster) case "kubeadm-dind": return kubeadmdind.NewDeployer(control) case "kubernetes-anywhere": if o.multiClusters.Enabled() { return newKubernetesAnywhereMultiCluster(o.gcpProject, o.gcpZone, o.multiClusters) } return newKubernetesAnywhere(o.gcpProject, o.gcpZone) case "node": return nodeDeploy{}, nil case "none": return noneDeploy{}, nil case "local": return newLocalCluster(), nil case "acsengine": return newAcsEngine() default: return nil, fmt.Errorf("unknown deployment strategy %q", o.deployment) } } func validateFlags(o *options) error { if o.multiClusters.Enabled() && o.deployment != "kubernetes-anywhere" { return errors.New("--multi-clusters flag cannot be passed with deployments other than 'kubernetes-anywhere'") } if !o.extract.Enabled() && o.extractSource { return errors.New("--extract-source flag cannot be passed without --extract") } return nil } func main() { log.SetFlags(log.LstdFlags | log.Lshortfile) // Initialize global pseudo random generator. Initializing it to select random AWS Zones. rand.Seed(time.Now().UnixNano()) pflag.CommandLine = pflag.NewFlagSet(os.Args[0], pflag.ContinueOnError) o := defineFlags() pflag.CommandLine.AddGoFlagSet(flag.CommandLine) if err := pflag.CommandLine.Parse(os.Args[1:]); err != nil { log.Fatalf("Flag parse failed: %v", err) } if err := validateFlags(o); err != nil { log.Fatalf("Flags validation failed. err: %v", err) } control = process.NewControl(timeout, interrupt, terminate, verbose) // do things when we know we are running in the kubetest image if os.Getenv("KUBETEST_IN_DOCKER") == "true" { o.flushMemAfterBuild = true } err := complete(o) if boskos.HasResource() { if berr := boskos.ReleaseAll("dirty"); berr != nil { log.Fatalf("[Boskos] Fail To Release: %v, kubetest err: %v", berr, err) } } if err != nil { log.Fatalf("Something went wrong: %v", err) } } func complete(o *options) error { if !terminate.Stop() { <-terminate.C // Drain the value if necessary. } if !interrupt.Stop() { <-interrupt.C // Drain value } if timeout > 0 { log.Printf("Limiting testing to %s", timeout) interrupt.Reset(timeout) } if o.dump != "" { defer writeMetadata(o.dump, o.metadataSources) defer control.WriteXML(&suite, o.dump, time.Now()) } if o.logexporterGCSPath != "" { o.testArgs += fmt.Sprintf(" --logexporter-gcs-path=%s", o.logexporterGCSPath) } if err := prepare(o); err != nil { return fmt.Errorf("failed to prepare test environment: %v", err) } if err := prepareFederation(o); err != nil { return fmt.Errorf("failed to prepare federation test environment: %v", err) } // Get the deployer before we acquire k8s so any additional flag // verifications happen early. deploy, err := getDeployer(o) if err != nil { return fmt.Errorf("error creating deployer: %v", err) } // Check soaking before run tests if o.soak { if created, err := deploy.GetClusterCreated(o.gcpProject); err != nil { // continue, but log the error log.Printf("deploy %v, GetClusterCreated failed: %v", o.deployment, err) } else { if time.Now().After(created.Add(o.soakDuration)) { // flip up on - which will tear down previous cluster and start a new one log.Printf("Previous soak cluster created at %v, will recreate the cluster", created) o.up = true } } } if err := acquireKubernetes(o); err != nil { return fmt.Errorf("failed to acquire k8s binaries: %v", err) } if err := acquireFederation(o); err != nil { return fmt.Errorf("failed to acquire federation binaries: %v", err) } if o.extract.Enabled() { // If we specified `--extract-source` we will already be in the correct directory if !o.extractSource { if err := os.Chdir("kubernetes"); err != nil { return fmt.Errorf("failed to chdir to kubernetes dir: %v", err) } } } if err := validWorkingDirectory(); err != nil { return fmt.Errorf("called from invalid working directory: %v", err) } if o.down { // listen for signals such as ^C and gracefully attempt to clean up c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt) go func() { for range c { log.Print("Captured ^C, gracefully attempting to cleanup resources..") var fedErr, err error if o.federation { if fedErr = fedDown(); fedErr != nil { log.Printf("Tearing down federation failed: %v", fedErr) } } if err = deploy.Down(); err != nil { log.Printf("Tearing down deployment failed: %v", err) } if fedErr != nil || err != nil { os.Exit(1) } os.Exit(2) } }() } if err := run(deploy, *o); err != nil { return err } // Publish the successfully tested version when requested if o.publish != "" { if err := publish(o.publish); err != nil { return err } } return nil } func acquireKubernetes(o *options) error { // Potentially build kubernetes if o.build.Enabled() { err := control.XMLWrap(&suite, "Build", o.build.Build) if o.flushMemAfterBuild { util.FlushMem() } if err != nil { return err } } // Potentially stage build binaries somewhere on GCS if o.stage.Enabled() { if err := control.XMLWrap(&suite, "Stage", func() error { return o.stage.Stage(o.federation, o.noAllowDup) }); err != nil { return err } } // Potentially download existing binaries and extract them. if o.extract.Enabled() { err := control.XMLWrap(&suite, "Extract", func() error { // Should we restore a previous state? // Restore if we are not upping the cluster or we are bringing up // a federation control plane without the federated clusters. if o.save != "" { if !o.up { // Restore version and .kube/config from --up log.Printf("Overwriting extract strategy to load kubeconfig and version from %s", o.save) o.extract = extractStrategies{ extractStrategy{ mode: load, option: o.save, }, } } else if o.federation && o.up && o.deployment == "none" { // Only restore .kube/config from previous --up, use the regular // extraction strategy to restore version. log.Printf("Load kubeconfig from %s", o.save) loadKubeconfig(o.save) } } // New deployment, extract new version return o.extract.Extract(o.gcpProject, o.gcpZone, o.gcpRegion, o.extractSource) }) if err != nil { return err } } return nil } func acquireFederation(o *options) error { // Potentially build federation if o.buildFederation.Enabled() { err := control.XMLWrap(&suite, "BuildFederation", o.buildFederation.Build) if o.flushMemAfterBuild { util.FlushMem() } if err != nil { return err } } // Potentially stage federation binaries somewhere on GCS if o.stageFederation.Enabled() { if err := control.XMLWrap(&suite, "StageFederation", func() error { return o.stageFederation.Stage() }); err != nil { return err } } // Potentially download existing federation binaries and extract them. if o.extractFederation.Enabled() { err := control.XMLWrap(&suite, "ExtractFederation", func() error { return o.extractFederation.Extract(o.gcpProject, o.gcpZone) }) return err } return nil } // Returns the k8s version name func findVersion() string { // The version may be in a version file if _, err := os.Stat("version"); err == nil { b, err := ioutil.ReadFile("version") if err == nil { return strings.TrimSpace(string(b)) } log.Printf("Failed to read version: %v", err) } // We can also get it from the git repo. if _, err := os.Stat("hack/lib/version.sh"); err == nil { // TODO(fejta): do this in go. At least we removed the upload-to-gcs.sh dep. gross := `. hack/lib/version.sh && KUBE_ROOT=. kube::version::get_version_vars && echo "${KUBE_GIT_VERSION-}"` b, err := control.Output(exec.Command("bash", "-c", gross)) if err == nil { return strings.TrimSpace(string(b)) } log.Printf("Failed to get_version_vars: %v", err) } return "unknown" // Sad trombone } // maybeMergeMetadata will add new keyvals into the map; quietly eats errors. func maybeMergeJSON(meta map[string]string, path string) { if data, err := ioutil.ReadFile(path); err == nil { json.Unmarshal(data, &meta) } } // Write metadata.json, including version and env arg data. func writeMetadata(path, metadataSources string) error { m := make(map[string]string) // Look for any sources of metadata and load 'em for _, f := range strings.Split(metadataSources, ",") { maybeMergeJSON(m, filepath.Join(path, f)) } ver := findVersion() m["job-version"] = ver // TODO(krzyzacy): retire m["revision"] = ver re := regexp.MustCompile(`^BUILD_METADATA_(.+)$`) for _, e := range os.Environ() { p := strings.SplitN(e, "=", 2) r := re.FindStringSubmatch(p[0]) if r == nil { continue } k, v := strings.ToLower(r[1]), p[1] m[k] = v } f, err := os.Create(filepath.Join(path, "metadata.json")) if err != nil { return err } defer f.Close() e := json.NewEncoder(f) return e.Encode(m) } // Install cloudsdk tarball to location, updating PATH func installGcloud(tarball string, location string) error { if err := os.MkdirAll(location, 0775); err != nil { return err } if err := control.FinishRunning(exec.Command("tar", "xzf", tarball, "-C", location)); err != nil { return err } if err := control.FinishRunning(exec.Command(filepath.Join(location, "google-cloud-sdk", "install.sh"), "--disable-installation-options", "--bash-completion=false", "--path-update=false", "--usage-reporting=false")); err != nil { return err } if err := util.InsertPath(filepath.Join(location, "google-cloud-sdk", "bin")); err != nil { return err } if err := control.FinishRunning(exec.Command("gcloud", "components", "install", "alpha")); err != nil { return err } if err := control.FinishRunning(exec.Command("gcloud", "components", "install", "beta")); err != nil { return err } if err := control.FinishRunning(exec.Command("gcloud", "info")); err != nil { return err } return nil } func migrateGcpEnvAndOptions(o *options) error { var network string var zone string switch o.provider { case "gke": network = "KUBE_GKE_NETWORK" zone = "ZONE" default: network = "KUBE_GCE_NETWORK" zone = "KUBE_GCE_ZONE" } return util.MigrateOptions([]util.MigratedOption{ { Env: "PROJECT", Option: &o.gcpProject, Name: "--gcp-project", }, { Env: zone, Option: &o.gcpZone, Name: "--gcp-zone", }, { Env: "REGION", Option: &o.gcpRegion, Name: "--gcp-region", }, { Env: "GOOGLE_APPLICATION_CREDENTIALS", Option: &o.gcpServiceAccount, Name: "--gcp-service-account", }, { Env: network, Option: &o.gcpNetwork, Name: "--gcp-network", }, { Env: "KUBE_NODE_OS_DISTRIBUTION", Option: &o.gcpNodeImage, Name: "--gcp-node-image", }, { Env: "KUBE_MASTER_OS_DISTRIBUTION", Option: &o.gcpMasterImage, Name: "--gcp-master-image", }, { Env: "NUM_NODES", Option: &o.gcpNodes, Name: "--gcp-nodes", }, { Env: "NODE_SIZE", Option: &o.gcpNodeSize, Name: "--gcp-node-size", }, { Env: "MASTER_SIZE", Option: &o.gcpMasterSize, Name: "--gcp-master-size", }, { Env: "CLOUDSDK_BUCKET", Option: &o.gcpCloudSdk, Name: "--gcp-cloud-sdk", SkipPush: true, }, }) } func prepareGcp(o *options) error { if err := migrateGcpEnvAndOptions(o); err != nil { return err } if o.provider == "gce" { if distro := os.Getenv("KUBE_OS_DISTRIBUTION"); distro != "" { log.Printf("Please use --gcp-master-image=%s --gcp-node-image=%s (instead of deprecated KUBE_OS_DISTRIBUTION)", distro, distro) // Note: KUBE_OS_DISTRIBUTION takes precedence over // KUBE_{MASTER,NODE}_OS_DISTRIBUTION, so override here // after the migration above. o.gcpNodeImage = distro o.gcpMasterImage = distro if err := os.Setenv("KUBE_NODE_OS_DISTRIBUTION", distro); err != nil { return fmt.Errorf("could not set KUBE_NODE_OS_DISTRIBUTION=%s: %v", distro, err) } if err := os.Setenv("KUBE_MASTER_OS_DISTRIBUTION", distro); err != nil { return fmt.Errorf("could not set KUBE_MASTER_OS_DISTRIBUTION=%s: %v", distro, err) } } hasGCPImageFamily, hasGCPImageProject := len(o.gcpImageFamily) != 0, len(o.gcpImageProject) != 0 if hasGCPImageFamily != hasGCPImageProject { return fmt.Errorf("--image-family and --image-project must be both set or unset") } if hasGCPImageFamily && hasGCPImageProject { out, err := control.Output(exec.Command("gcloud", "compute", "images", "describe-from-family", o.gcpImageFamily, "--project", o.gcpImageProject)) if err != nil { return fmt.Errorf("failed to get latest image from family %q in project %q: %s", o.gcpImageFamily, o.gcpImageProject, err) } latestImage := "" latestImageRegexp := regexp.MustCompile("^name: *(\\S+)") for _, line := range strings.Split(string(out), "\n") { matches := latestImageRegexp.FindStringSubmatch(line) if len(matches) == 2 { latestImage = matches[1] break } } if len(latestImage) == 0 { return fmt.Errorf("failed to get latest image from family %q in project %q", o.gcpImageFamily, o.gcpImageProject) } if o.deployment == "node" { o.nodeArgs += fmt.Sprintf(" --images=%s --image-project=%s", latestImage, o.gcpImageProject) } else { os.Setenv("KUBE_GCE_NODE_IMAGE", latestImage) os.Setenv("KUBE_GCE_NODE_PROJECT", o.gcpImageProject) } } } else if o.provider == "gke" { if o.deployment == "" { o.deployment = "gke" } if o.deployment != "gke" { return fmt.Errorf("expected --deployment=gke for --provider=gke, found --deployment=%s", o.deployment) } if o.gcpNodeImage == "" { return fmt.Errorf("--gcp-node-image must be set for GKE") } if o.gcpMasterImage != "" { return fmt.Errorf("expected --gcp-master-image to be empty for --provider=gke, found --gcp-master-image=%s", o.gcpMasterImage) } if o.gcpNodes != "" { return fmt.Errorf("--gcp-nodes cannot be set on GKE, use --gke-shape instead") } if o.gcpNodeSize != "" { return fmt.Errorf("--gcp-node-size cannot be set on GKE, use --gke-shape instead") } if o.gcpMasterSize != "" { return fmt.Errorf("--gcp-master-size cannot be set on GKE, where it's auto-computed") } // TODO(kubernetes/test-infra#3536): This is used by the // ginkgo-e2e.sh wrapper. nod := o.gcpNodeImage if nod == "container_vm" { // gcloud container clusters create understands // "container_vm", e2es understand "debian". nod = "debian" } if nod == "cos_containerd" { // gcloud container clusters create understands // "cos_containerd", e2es only understand // "gci"/"cos", nod = "gci" } os.Setenv("NODE_OS_DISTRIBUTION", nod) } if o.gcpProject == "" { log.Print("--gcp-project is missing, trying to fetch a project from boskos.\n" + "(for local runs please set --gcp-project to your dev project)") var resType string if o.gcpProjectType != "" { resType = o.gcpProjectType } else if o.provider == "gke" { resType = "gke-project" } else { resType = "gce-project" } log.Printf("provider %v, will acquire project type %v from boskos", o.provider, resType) p, err := boskos.Acquire(resType, "free", "busy") if err != nil { return fmt.Errorf("--provider=%s boskos failed to acquire project: %v", o.provider, err) } if p == nil { return fmt.Errorf("boskos does not have a free %s at the moment", resType) } go func(c *client.Client, proj string) { for range time.Tick(time.Minute * 5) { if err := c.UpdateOne(p.Name, "busy", nil); err != nil { log.Printf("[Boskos] Update of %s failed with %v", p.Name, err) } } }(boskos, p.Name) o.gcpProject = p.Name } if err := os.Setenv("CLOUDSDK_CORE_PRINT_UNHANDLED_TRACEBACKS", "1"); err != nil { return fmt.Errorf("could not set CLOUDSDK_CORE_PRINT_UNHANDLED_TRACEBACKS=1: %v", err) } if err := control.FinishRunning(exec.Command("gcloud", "config", "set", "project", o.gcpProject)); err != nil { return fmt.Errorf("fail to set project %s : err %v", o.gcpProject, err) } // TODO(krzyzacy):Remove this when we retire migrateGcpEnvAndOptions // Note that a lot of scripts are still depend on this env in k/k repo. if err := os.Setenv("PROJECT", o.gcpProject); err != nil { return fmt.Errorf("fail to set env var PROJECT %s : err %v", o.gcpProject, err) } // gcloud creds may have changed if err := activateServiceAccount(o.gcpServiceAccount); err != nil { return err } // Ensure ssh keys exist log.Print("Checking existing of GCP ssh keys...") k := filepath.Join(util.Home(".ssh"), "google_compute_engine") if _, err := os.Stat(k); err != nil { return err } pk := k + ".pub" if _, err := os.Stat(pk); err != nil { return err } log.Printf("Checking presence of public key in %s", o.gcpProject) if out, err := control.Output(exec.Command("gcloud", "compute", "--project="+o.gcpProject, "project-info", "describe")); err != nil { return err } else if b, err := ioutil.ReadFile(pk); err != nil { return err } else if !strings.Contains(string(out), string(b)) { log.Print("Uploading public ssh key to project metadata...") if err = control.FinishRunning(exec.Command("gcloud", "compute", "--project="+o.gcpProject, "config-ssh")); err != nil { return err } } // Install custom gcloud version if necessary if o.gcpCloudSdk != "" { for i := 0; i < 3; i++ { if err := control.FinishRunning(exec.Command("gsutil", "-mq", "cp", "-r", o.gcpCloudSdk, util.Home())); err == nil { break // Success! } time.Sleep(1 << uint(i) * time.Second) } for _, f := range []string{util.Home(".gsutil"), util.Home("repo"), util.Home("cloudsdk")} { if _, err := os.Stat(f); err == nil || !os.IsNotExist(err) { if err = os.RemoveAll(f); err != nil { return err } } } install := util.Home("repo", "google-cloud-sdk.tar.gz") if strings.HasSuffix(o.gcpCloudSdk, ".tar.gz") { install = util.Home(filepath.Base(o.gcpCloudSdk)) } else { if err := os.Rename(util.Home(filepath.Base(o.gcpCloudSdk)), util.Home("repo")); err != nil { return err } // Controls which gcloud components to install. pop, err := util.PushEnv("CLOUDSDK_COMPONENT_MANAGER_SNAPSHOT_URL", "file://"+util.Home("repo", "components-2.json")) if err != nil { return err } defer pop() } if err := installGcloud(install, util.Home("cloudsdk")); err != nil { return err } // gcloud creds may have changed if err := activateServiceAccount(o.gcpServiceAccount); err != nil { return err } } if o.kubemark { if p := os.Getenv("KUBEMARK_BAZEL_BUILD"); strings.ToLower(p) == "y" { // we need docker-credential-gcr to get authed properly // https://github.com/bazelbuild/rules_docker#authorization if err := control.FinishRunning(exec.Command("gcloud", "components", "install", "docker-credential-gcr")); err != nil { return err } if err := control.FinishRunning(exec.Command("docker-credential-gcr", "configure-docker")); err != nil { return err } } } return nil } func prepareAws(o *options) error { // gcloud creds may have changed if err := activateServiceAccount(o.gcpServiceAccount); err != nil { return err } return control.FinishRunning(exec.Command("pip", "install", "awscli")) } // Activate GOOGLE_APPLICATION_CREDENTIALS if set or do nothing. func activateServiceAccount(path string) error { if path == "" { return nil } return control.FinishRunning(exec.Command("gcloud", "auth", "activate-service-account", "--key-file="+path)) } // Make all artifacts world readable. // The root user winds up owning the files when the container exists. // Ensure that other users can read these files at that time. func chmodArtifacts() error { return control.FinishRunning(exec.Command("chmod", "-R", "o+r", artifacts)) } func prepare(o *options) error { if err := util.MigrateOptions([]util.MigratedOption{ { Env: "KUBERNETES_PROVIDER", Option: &o.provider, Name: "--provider", }, { Env: "CLUSTER_NAME", Option: &o.cluster, Name: "--cluster", }, }); err != nil { return err } if err := prepareGinkgoParallel(&o.ginkgoParallel); err != nil { return err } switch o.provider { case "gce", "gke", "node": if err := prepareGcp(o); err != nil { return err } case "aws": if err := prepareAws(o); err != nil { return err } } // For kubernetes-anywhere as the deployer, call prepareGcp() // independent of the specified provider. if o.deployment == "kubernetes-anywhere" { if err := prepareGcp(o); err != nil { return err } } if o.kubemark { if err := util.MigrateOptions([]util.MigratedOption{ { Env: "KUBEMARK_NUM_NODES", Option: &o.kubemarkNodes, Name: "--kubemark-nodes", }, { Env: "KUBEMARK_MASTER_SIZE", Option: &o.kubemarkMasterSize, Name: "--kubemark-master-size", }, }); err != nil { return err } } if err := os.MkdirAll(artifacts, 0777); err != nil { // Create artifacts return err } return nil } func prepareFederation(o *options) error { if o.multipleFederations { // TODO(fejta): use boskos to grab a federation cluster // Note: EXECUTOR_NUMBER and NODE_NAME are Jenkins // specific environment variables. So this doesn't work // when we move away from Jenkins. execNum := os.Getenv("EXECUTOR_NUMBER") if execNum == "" { execNum = "0" } suffix := fmt.Sprintf("%s-%s", os.Getenv("NODE_NAME"), execNum) federationName := fmt.Sprintf("e2e-f8n-%s", suffix) federationSystemNamespace := fmt.Sprintf("f8n-system-%s", suffix) err := os.Setenv("FEDERATION_NAME", federationName) if err != nil { return err } return os.Setenv("FEDERATION_NAMESPACE", federationSystemNamespace) } return nil } type ginkgoParallelValue struct { v int // 0 == not set (defaults to 1) } func (v *ginkgoParallelValue) IsBoolFlag() bool { return true } func (v *ginkgoParallelValue) String() string { if v.v == 0 { return "1" } return strconv.Itoa(v.v) } func (v *ginkgoParallelValue) Set(s string) error { if s == "" { v.v = 0 return nil } if s == "true" { v.v = defaultGinkgoParallel return nil } p, err := strconv.Atoi(s) if err != nil { return fmt.Errorf("--ginkgo-parallel must be an integer, found %q", s) } if p < 1 { return fmt.Errorf("--ginkgo-parallel must be >= 1, found %d", p) } v.v = p return nil } func (v *ginkgoParallelValue) Type() string { return "ginkgoParallelValue" } func (v *ginkgoParallelValue) Get() int { if v.v == 0 { return 1 } return v.v } var _ flag.Value = &ginkgoParallelValue{} // Hand migrate this option. GINKGO_PARALLEL => GINKGO_PARALLEL_NODES=25 func prepareGinkgoParallel(v *ginkgoParallelValue) error { if p := os.Getenv("GINKGO_PARALLEL"); strings.ToLower(p) == "y" { log.Printf("Please use kubetest --ginkgo-parallel (instead of deprecated GINKGO_PARALLEL=y)") if err := v.Set("true"); err != nil { return err } os.Unsetenv("GINKGO_PARALLEL") } if p := os.Getenv("GINKGO_PARALLEL_NODES"); p != "" { log.Printf("Please use kubetest --ginkgo-parallel=%s (instead of deprecated GINKGO_PARALLEL_NODES=%s)", p, p) if err := v.Set(p); err != nil { return err } } os.Setenv("GINKGO_PARALLEL_NODES", v.String()) return nil } func publish(pub string) error { v, err := ioutil.ReadFile("version") if err != nil { return err } log.Printf("Set %s version to %s", pub, string(v)) return gcsWrite(pub, v) }
[ "\"WORKSPACE\"", "\"JOB_NAME\"", "\"KUBETEST_IN_DOCKER\"", "\"KUBE_OS_DISTRIBUTION\"", "\"KUBEMARK_BAZEL_BUILD\"", "\"EXECUTOR_NUMBER\"", "\"NODE_NAME\"", "\"GINKGO_PARALLEL\"", "\"GINKGO_PARALLEL_NODES\"" ]
[]
[ "KUBEMARK_BAZEL_BUILD", "JOB_NAME", "KUBETEST_IN_DOCKER", "NODE_NAME", "GINKGO_PARALLEL", "EXECUTOR_NUMBER", "GINKGO_PARALLEL_NODES", "WORKSPACE", "KUBE_OS_DISTRIBUTION" ]
[]
["KUBEMARK_BAZEL_BUILD", "JOB_NAME", "KUBETEST_IN_DOCKER", "NODE_NAME", "GINKGO_PARALLEL", "EXECUTOR_NUMBER", "GINKGO_PARALLEL_NODES", "WORKSPACE", "KUBE_OS_DISTRIBUTION"]
go
9
0
pygments/cmdline.py
# -*- coding: utf-8 -*- """ pygments.cmdline ~~~~~~~~~~~~~~~~ Command line interface. :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ from __future__ import print_function import os import sys import getopt from textwrap import dedent from pygments import __version__, highlight from pygments.util import ClassNotFound, OptionError, docstring_headline, \ guess_decode, guess_decode_from_terminal, terminal_encoding from pygments.lexers import get_all_lexers, get_lexer_by_name, guess_lexer, \ load_lexer_from_file, get_lexer_for_filename, find_lexer_class_for_filename from pygments.lexers.special import TextLexer from pygments.formatters.latex import LatexEmbeddedLexer, LatexFormatter from pygments.formatters import get_all_formatters, get_formatter_by_name, \ load_formatter_from_file, get_formatter_for_filename, find_formatter_class from pygments.formatters.terminal import TerminalFormatter from pygments.formatters.terminal256 import Terminal256Formatter from pygments.filters import get_all_filters, find_filter_class from pygments.styles import get_all_styles, get_style_by_name USAGE = """\ Usage: %s [-l <lexer> | -g] [-F <filter>[:<options>]] [-f <formatter>] [-O <options>] [-P <option=value>] [-s] [-v] [-x] [-o <outfile>] [<infile>] %s -S <style> -f <formatter> [-a <arg>] [-O <options>] [-P <option=value>] %s -L [<which> ...] %s -N <filename> %s -H <type> <name> %s -h | -V Highlight the input file and write the result to <outfile>. If no input file is given, use stdin, if -o is not given, use stdout. If -s is passed, lexing will be done in "streaming" mode, reading and highlighting one line at a time. This will only work properly with lexers that have no constructs spanning multiple lines! <lexer> is a lexer name (query all lexer names with -L). If -l is not given, the lexer is guessed from the extension of the input file name (this obviously doesn't work if the input is stdin). If -g is passed, attempt to guess the lexer from the file contents, or pass through as plain text if this fails (this can work for stdin). Likewise, <formatter> is a formatter name, and will be guessed from the extension of the output file name. If no output file is given, the terminal formatter will be used by default. The additional option -x allows custom lexers and formatters to be loaded from a .py file relative to the current working directory. For example, ``-l ./customlexer.py -x``. By default, this option expects a file with a class named CustomLexer or CustomFormatter; you can also specify your own class name with a colon (``-l ./lexer.py:MyLexer``). Users should be very careful not to use this option with untrusted files, because it will import and run them. With the -O option, you can give the lexer and formatter a comma- separated list of options, e.g. ``-O bg=light,python=cool``. The -P option adds lexer and formatter options like the -O option, but you can only give one option per -P. That way, the option value may contain commas and equals signs, which it can't with -O, e.g. ``-P "heading=Pygments, the Python highlighter". With the -F option, you can add filters to the token stream, you can give options in the same way as for -O after a colon (note: there must not be spaces around the colon). The -O, -P and -F options can be given multiple times. With the -S option, print out style definitions for style <style> for formatter <formatter>. The argument given by -a is formatter dependent. The -L option lists lexers, formatters, styles or filters -- set `which` to the thing you want to list (e.g. "styles"), or omit it to list everything. The -N option guesses and prints out a lexer name based solely on the given filename. It does not take input or highlight anything. If no specific lexer can be determined "text" is returned. The -H option prints detailed help for the object <name> of type <type>, where <type> is one of "lexer", "formatter" or "filter". The -s option processes lines one at a time until EOF, rather than waiting to process the entire file. This only works for stdin, and is intended for streaming input such as you get from 'tail -f'. Example usage: "tail -f sql.log | pygmentize -s -l sql" The -v option prints a detailed traceback on unhandled exceptions, which is useful for debugging and bug reports. The -h option prints this help. The -V option prints the package version. """ def _parse_options(o_strs): opts = {} if not o_strs: return opts for o_str in o_strs: if not o_str.strip(): continue o_args = o_str.split(',') for o_arg in o_args: o_arg = o_arg.strip() try: o_key, o_val = o_arg.split('=', 1) o_key = o_key.strip() o_val = o_val.strip() except ValueError: opts[o_arg] = True else: opts[o_key] = o_val return opts def _parse_filters(f_strs): filters = [] if not f_strs: return filters for f_str in f_strs: if ':' in f_str: fname, fopts = f_str.split(':', 1) filters.append((fname, _parse_options([fopts]))) else: filters.append((f_str, {})) return filters def _print_help(what, name): try: if what == 'lexer': cls = get_lexer_by_name(name) print("Help on the %s lexer:" % cls.name) print(dedent(cls.__doc__)) elif what == 'formatter': cls = find_formatter_class(name) print("Help on the %s formatter:" % cls.name) print(dedent(cls.__doc__)) elif what == 'filter': cls = find_filter_class(name) print("Help on the %s filter:" % name) print(dedent(cls.__doc__)) return 0 except (AttributeError, ValueError): print("%s not found!" % what, file=sys.stderr) return 1 def _print_list(what): if what == 'lexer': print() print("Lexers:") print("~~~~~~~") info = [] for fullname, names, exts, _ in get_all_lexers(): tup = (', '.join(names)+':', fullname, exts and '(filenames ' + ', '.join(exts) + ')' or '') info.append(tup) info.sort() for i in info: print(('* %s\n %s %s') % i) elif what == 'formatter': print() print("Formatters:") print("~~~~~~~~~~~") info = [] for cls in get_all_formatters(): doc = docstring_headline(cls) tup = (', '.join(cls.aliases) + ':', doc, cls.filenames and '(filenames ' + ', '.join(cls.filenames) + ')' or '') info.append(tup) info.sort() for i in info: print(('* %s\n %s %s') % i) elif what == 'filter': print() print("Filters:") print("~~~~~~~~") for name in get_all_filters(): cls = find_filter_class(name) print("* " + name + ':') print(" %s" % docstring_headline(cls)) elif what == 'style': print() print("Styles:") print("~~~~~~~") for name in get_all_styles(): cls = get_style_by_name(name) print("* " + name + ':') print(" %s" % docstring_headline(cls)) def main_inner(popts, args, usage): opts = {} O_opts = [] P_opts = [] F_opts = [] for opt, arg in popts: if opt == '-O': O_opts.append(arg) elif opt == '-P': P_opts.append(arg) elif opt == '-F': F_opts.append(arg) opts[opt] = arg if opts.pop('-h', None) is not None: print(usage) return 0 if opts.pop('-V', None) is not None: print('Pygments version %s, (c) 2006-2019 by Georg Brandl.' % __version__) return 0 # handle ``pygmentize -L`` L_opt = opts.pop('-L', None) if L_opt is not None: if opts: print(usage, file=sys.stderr) return 2 # print version main(['', '-V']) if not args: args = ['lexer', 'formatter', 'filter', 'style'] for arg in args: _print_list(arg.rstrip('s')) return 0 # handle ``pygmentize -H`` H_opt = opts.pop('-H', None) if H_opt is not None: if opts or len(args) != 2: print(usage, file=sys.stderr) return 2 what, name = args # pylint: disable=unbalanced-tuple-unpacking if what not in ('lexer', 'formatter', 'filter'): print(usage, file=sys.stderr) return 2 return _print_help(what, name) # parse -O options parsed_opts = _parse_options(O_opts) opts.pop('-O', None) # parse -P options for p_opt in P_opts: try: name, value = p_opt.split('=', 1) except ValueError: parsed_opts[p_opt] = True else: parsed_opts[name] = value opts.pop('-P', None) # encodings inencoding = parsed_opts.get('inencoding', parsed_opts.get('encoding')) outencoding = parsed_opts.get('outencoding', parsed_opts.get('encoding')) # handle ``pygmentize -N`` infn = opts.pop('-N', None) if infn is not None: lexer = find_lexer_class_for_filename(infn) if lexer is None: lexer = TextLexer print(lexer.aliases[0]) return 0 # handle ``pygmentize -S`` S_opt = opts.pop('-S', None) a_opt = opts.pop('-a', None) if S_opt is not None: f_opt = opts.pop('-f', None) if not f_opt: print(usage, file=sys.stderr) return 2 if opts or args: print(usage, file=sys.stderr) return 2 try: parsed_opts['style'] = S_opt fmter = get_formatter_by_name(f_opt, **parsed_opts) except ClassNotFound as err: print(err, file=sys.stderr) return 1 print(fmter.get_style_defs(a_opt or '')) return 0 # if no -S is given, -a is not allowed if a_opt is not None: print(usage, file=sys.stderr) return 2 # parse -F options F_opts = _parse_filters(F_opts) opts.pop('-F', None) allow_custom_lexer_formatter = False # -x: allow custom (eXternal) lexers and formatters if opts.pop('-x', None) is not None: allow_custom_lexer_formatter = True # select lexer lexer = None # given by name? lexername = opts.pop('-l', None) if lexername: # custom lexer, located relative to user's cwd if allow_custom_lexer_formatter and '.py' in lexername: try: if ':' in lexername: filename, name = lexername.rsplit(':', 1) lexer = load_lexer_from_file(filename, name, **parsed_opts) else: lexer = load_lexer_from_file(lexername, **parsed_opts) except ClassNotFound as err: print('Error:', err, file=sys.stderr) return 1 else: try: lexer = get_lexer_by_name(lexername, **parsed_opts) except (OptionError, ClassNotFound) as err: print('Error:', err, file=sys.stderr) return 1 # read input code code = None if args: if len(args) > 1: print(usage, file=sys.stderr) return 2 if '-s' in opts: print('Error: -s option not usable when input file specified', file=sys.stderr) return 2 infn = args[0] try: with open(infn, 'rb') as infp: code = infp.read() except Exception as err: print('Error: cannot read infile:', err, file=sys.stderr) return 1 if not inencoding: code, inencoding = guess_decode(code) # do we have to guess the lexer? if not lexer: try: lexer = get_lexer_for_filename(infn, code, **parsed_opts) except ClassNotFound as err: if '-g' in opts: try: lexer = guess_lexer(code, **parsed_opts) except ClassNotFound: lexer = TextLexer(**parsed_opts) else: print('Error:', err, file=sys.stderr) return 1 except OptionError as err: print('Error:', err, file=sys.stderr) return 1 elif '-s' not in opts: # treat stdin as full file (-s support is later) # read code from terminal, always in binary mode since we want to # decode ourselves and be tolerant with it if sys.version_info > (3,): # Python 3: we have to use .buffer to get a binary stream code = sys.stdin.buffer.read() else: code = sys.stdin.read() if not inencoding: code, inencoding = guess_decode_from_terminal(code, sys.stdin) # else the lexer will do the decoding if not lexer: try: lexer = guess_lexer(code, **parsed_opts) except ClassNotFound: lexer = TextLexer(**parsed_opts) else: # -s option needs a lexer with -l if not lexer: print('Error: when using -s a lexer has to be selected with -l', file=sys.stderr) return 2 # process filters for fname, fopts in F_opts: try: lexer.add_filter(fname, **fopts) except ClassNotFound as err: print('Error:', err, file=sys.stderr) return 1 # select formatter outfn = opts.pop('-o', None) fmter = opts.pop('-f', None) if fmter: # custom formatter, located relative to user's cwd if allow_custom_lexer_formatter and '.py' in fmter: try: if ':' in fmter: file, fmtername = fmter.rsplit(':', 1) fmter = load_formatter_from_file(file, fmtername, **parsed_opts) else: fmter = load_formatter_from_file(fmter, **parsed_opts) except ClassNotFound as err: print('Error:', err, file=sys.stderr) return 1 else: try: fmter = get_formatter_by_name(fmter, **parsed_opts) except (OptionError, ClassNotFound) as err: print('Error:', err, file=sys.stderr) return 1 if outfn: if not fmter: try: fmter = get_formatter_for_filename(outfn, **parsed_opts) except (OptionError, ClassNotFound) as err: print('Error:', err, file=sys.stderr) return 1 try: outfile = open(outfn, 'wb') except Exception as err: print('Error: cannot open outfile:', err, file=sys.stderr) return 1 else: if not fmter: if '256' in os.environ.get('TERM', ''): fmter = Terminal256Formatter(**parsed_opts) else: fmter = TerminalFormatter(**parsed_opts) if sys.version_info > (3,): # Python 3: we have to use .buffer to get a binary stream outfile = sys.stdout.buffer else: outfile = sys.stdout # determine output encoding if not explicitly selected if not outencoding: if outfn: # output file? use lexer encoding for now (can still be None) fmter.encoding = inencoding else: # else use terminal encoding fmter.encoding = terminal_encoding(sys.stdout) # provide coloring under Windows, if possible if not outfn and sys.platform in ('win32', 'cygwin') and \ fmter.name in ('Terminal', 'Terminal256'): # pragma: no cover # unfortunately colorama doesn't support binary streams on Py3 if sys.version_info > (3,): from pygments.util import UnclosingTextIOWrapper outfile = UnclosingTextIOWrapper(outfile, encoding=fmter.encoding) fmter.encoding = None try: import colorama.initialise except ImportError: pass else: outfile = colorama.initialise.wrap_stream( outfile, convert=None, strip=None, autoreset=False, wrap=True) # When using the LaTeX formatter and the option `escapeinside` is # specified, we need a special lexer which collects escaped text # before running the chosen language lexer. escapeinside = parsed_opts.get('escapeinside', '') if len(escapeinside) == 2 and isinstance(fmter, LatexFormatter): left = escapeinside[0] right = escapeinside[1] lexer = LatexEmbeddedLexer(left, right, lexer) # ... and do it! if '-s' not in opts: # process whole input as per normal... highlight(code, lexer, fmter, outfile) return 0 else: # line by line processing of stdin (eg: for 'tail -f')... try: while 1: if sys.version_info > (3,): # Python 3: we have to use .buffer to get a binary stream line = sys.stdin.buffer.readline() else: line = sys.stdin.readline() if not line: break if not inencoding: line = guess_decode_from_terminal(line, sys.stdin)[0] highlight(line, lexer, fmter, outfile) if hasattr(outfile, 'flush'): outfile.flush() return 0 except KeyboardInterrupt: # pragma: no cover return 0 def main(args=sys.argv): """ Main command line entry point. """ usage = USAGE % ((args[0],) * 6) try: popts, args = getopt.getopt(args[1:], "l:f:F:o:O:P:LS:a:N:vhVHgsx") except getopt.GetoptError: print(usage, file=sys.stderr) return 2 try: return main_inner(popts, args, usage) except Exception: if '-v' in dict(popts): print(file=sys.stderr) print('*' * 65, file=sys.stderr) print('An unhandled exception occurred while highlighting.', file=sys.stderr) print('Please report the whole traceback to the issue tracker at', file=sys.stderr) print('<https://github.com/pygments/pygments/issues>.', file=sys.stderr) print('*' * 65, file=sys.stderr) print(file=sys.stderr) raise import traceback info = traceback.format_exception(*sys.exc_info()) msg = info[-1].strip() if len(info) >= 3: # extract relevant file and position info msg += '\n (f%s)' % info[-2].split('\n')[0].strip()[1:] print(file=sys.stderr) print('*** Error while highlighting:', file=sys.stderr) print(msg, file=sys.stderr) print('*** If this is a bug you want to report, please rerun with -v.', file=sys.stderr) return 1 if __name__ == "__main__": main()
[]
[]
[ "TERM" ]
[]
["TERM"]
python
1
0
integration/marathon_test.go
package integration import ( "fmt" "net/http" "os" "time" "github.com/containous/traefik/v2/integration/try" "github.com/gambol99/go-marathon" "github.com/go-check/check" checker "github.com/vdemeester/shakers" ) const ( containerNameMesosSlave = "mesos-slave" containerNameMarathon = "marathon" ) // Marathon test suites (using libcompose). type MarathonSuite struct { BaseSuite marathonURL string } func (s *MarathonSuite) SetUpSuite(c *check.C) { s.createComposeProject(c, "marathon") s.composeProject.Start(c) marathonIPAddr := s.composeProject.Container(c, containerNameMarathon).NetworkSettings.IPAddress c.Assert(marathonIPAddr, checker.Not(checker.HasLen), 0) s.marathonURL = "http://" + marathonIPAddr + ":8080" // Wait for Marathon readiness prior to creating the client so that we // don't run into the "all cluster members down" state right from the // start. err := try.GetRequest(s.marathonURL+"/v2/leader", 1*time.Minute, try.StatusCodeIs(http.StatusOK)) c.Assert(err, checker.IsNil) // Add entry for Mesos slave container IP address in the hosts file so // that Traefik can properly forward traffic. // This is necessary as long as we are still using the docker-compose v1 // spec. Once we switch to v2 or higher, we can have both the test/builder // container and the Mesos slave container join the same custom network and // enjoy DNS-discoverable container host names. mesosSlaveIPAddr := s.composeProject.Container(c, containerNameMesosSlave).NetworkSettings.IPAddress c.Assert(mesosSlaveIPAddr, checker.Not(checker.HasLen), 0) err = s.extendDockerHostsFile(containerNameMesosSlave, mesosSlaveIPAddr) c.Assert(err, checker.IsNil) } // extendDockerHostsFile extends the hosts file (/etc/hosts) by the given // host/IP address mapping if we are running inside a container. func (s *MarathonSuite) extendDockerHostsFile(host, ipAddr string) error { const hostsFile = "/etc/hosts" // Determine if the run inside a container. The most reliable way to // do this is to inject an indicator, which we do in terms of an // environment variable. // (See also https://groups.google.com/d/topic/docker-user/JOGE7AnJ3Gw/discussion.) if os.Getenv("CONTAINER") == "DOCKER" { // We are running inside a container -- extend the hosts file. file, err := os.OpenFile(hostsFile, os.O_APPEND|os.O_WRONLY, 0600) if err != nil { return err } defer file.Close() if _, err = file.WriteString(fmt.Sprintf("%s\t%s\n", ipAddr, host)); err != nil { return err } } return nil } func deployApplication(c *check.C, client marathon.Marathon, application *marathon.Application) { deploy, err := client.UpdateApplication(application, false) c.Assert(err, checker.IsNil) // Wait for deployment to complete. c.Assert(client.WaitOnDeployment(deploy.DeploymentID, 1*time.Minute), checker.IsNil) } func (s *MarathonSuite) TestConfigurationUpdate(c *check.C) { // Start Traefik. file := s.adaptFile(c, "fixtures/marathon/simple.toml", struct { MarathonURL string }{s.marathonURL}) defer os.Remove(file) cmd, display := s.traefikCmd(withConfigFile(file)) defer display(c) err := cmd.Start() c.Assert(err, checker.IsNil) defer cmd.Process.Kill() // Wait for Traefik to turn ready. err = try.GetRequest("http://127.0.0.1:8000/", 2*time.Second, try.StatusCodeIs(http.StatusNotFound)) c.Assert(err, checker.IsNil) // Prepare Marathon client. config := marathon.NewDefaultConfig() config.URL = s.marathonURL client, err := marathon.NewClient(config) c.Assert(err, checker.IsNil) // Create test application to be deployed. app := marathon.NewDockerApplication(). Name("/whoami"). CPU(0.1). Memory(32). AddLabel("traefik.http.Routers.rt.Rule", "PathPrefix(`/service`)") app.Container.Docker.Bridged(). Expose(80). Container("containous/whoami") // Deploy the test application. deployApplication(c, client, app) // Query application via Traefik. err = try.GetRequest("http://127.0.0.1:8000/service", 30*time.Second, try.StatusCodeIs(http.StatusOK)) c.Assert(err, checker.IsNil) // Create test application with services to be deployed. app = marathon.NewDockerApplication(). Name("/whoami"). CPU(0.1). Memory(32). AddLabel("traefik.http.Routers.app.Rule", "PathPrefix(`/app`)") app.Container.Docker.Bridged(). Expose(80). Container("containous/whoami") // Deploy the test application. deployApplication(c, client, app) // Query application via Traefik. err = try.GetRequest("http://127.0.0.1:8000/app", 30*time.Second, try.StatusCodeIs(http.StatusOK)) c.Assert(err, checker.IsNil) }
[ "\"CONTAINER\"" ]
[]
[ "CONTAINER" ]
[]
["CONTAINER"]
go
1
0
wip/pulumi/helpers.py
import os import re import secrets import string import pulumi from pulumi import ResourceOptions from pulumi_kubernetes.apps.v1 import Deployment from pulumi_kubernetes.core.v1 import Service from azure.keyvault import KeyVaultClient, KeyVaultAuthentication, KeyVaultId from azure.common.credentials import ServicePrincipalCredentials def normalize_name(name): regex = re.compile('[^a-zA-Z0-9]') replaced = regex.sub('', name) normalized = replaced[:23] if len(replaced) > 23 else replaced return normalized def _get_kvclient(): def auth_callback(server, resource, scope): credentials = ServicePrincipalCredentials( client_id = os.getenv('ARM_CLIENT_ID'), secret = os.getenv('ARM_CLIENT_SECRET'), tenant = os.getenv('ARM_TENANT_ID'), resource = "https://vault.azure.net" ) token = credentials.token return token['token_type'], token['access_token'] kv_client = KeyVaultClient(KeyVaultAuthentication(auth_callback)) return kv_client def get_kv_secret(name): kv_client = _get_kvclient() secret = kv_client.get_secret("https://placeholder.vault.azure.net/", name, KeyVaultId.version_none).value return secret def _get_password(): alphabet = string.ascii_letters + string.digits password = ''.join(secrets.choice(alphabet) for i in range(20)) return password config = pulumi.Config('aks') PREFIX = pulumi.get_stack() PASSWORD = config.get('password') or _get_password() SSHKEY = config.get('sshkey') or 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCxinIAIDDCradZPAgX5GzBLv00u4rigOLUbU00E44FrfMTqu5wXiejJ4ycSb1bI+//ZNgaB2UYRbPL7A9OUKY+K4sX5O84Q6DPMjo/90IANHVTLf3xTaSc7hpvXOtIjJTJeiamxClgnTAcR55RV/j9/Wptxa8GGcRmRCcSmJUkx5AZTFI+s8aF0W3aeHHRw7TxNKBuwrX7FDcHyGKvdkFg4OP863Xe5hp5ql1C3XibmCOp1CMPIU2hCmGOy1LGbOf/Pa+QKAdtUSrPNK/jBWvPWo0k02Ii0JtMAdlpVqnJc3czNIp5gEqZCRCGEdkb/kZnJiMRZhmLBYnC8tiMxvZj core@k8s' LOCATION = config.get('location') or 'westeurope' NAMESPACE = config.get('namespace') or 'flux' args_flux = [ "--ssh-keygen-dir=/var/fluxd/keygen", "--k8s-secret-name=flux-ssh", "--memcached-hostname=memcached", "--memcached-service=", "[email protected]:v3/xxxxxx", "--git-branch=master", "--git-path=flux/cluster-setup,flux/{}".format(PREFIX), "--git-user=Weave Flux", "[email protected]", "--git-set-author=false", "--git-poll-interval=5m", "--git-label={}".format(PREFIX), "--git-timeout=20s", "--sync-interval=5m", "--git-ci-skip=false", "--registry-exclude-image=*", "--registry-poll-interval=5m", "--registry-rps=200", "--registry-burst=125", "--registry-trace=false" ] args_memcached = ["-m 64","-p 11211","-I 1m"] volumeMounts_flux = [ { "name": "kubedir", "mountPath": "/root/.kubectl" }, { "name": "git-key", "mountPath": "/etc/fluxd/ssh", "readOnly": True }, { "name": "git-keygen", "mountPath": "/var/fluxd/keygen" } ] volumes_flux = [ { "name": "kubedir", "configmap": { "name": "flux-configmap" } }, { "name": "git-key", "secret": { "secretName": "flux-ssh", "defaultMode": 0o400 # has to be in octal } }, { "name": "git-keygen", "emptyDir": { "medium": "Memory" } } ] def _gen_service(name, ports, custom_provider, dependencies=[], service_type="ClusterIP"): ports = [{"port": port, "target_port": port, "name": str(port)} for port in ports] labels = { "app": name, "purpose": "flux" } Service(name, metadata={ "name": name, "labels": labels, "namespace": NAMESPACE }, spec={ "ports": ports, "selector": labels, "type": service_type, "sessionAffinity": "ClientIP" }, __opts__=ResourceOptions( provider=custom_provider, depends_on=dependencies) ) def _gen_deployment(name, ports, image, custom_provider, serviceAccount, args=[], dependencies=[], replicas=1, resources={}, env={}, volumes=[], volume_mounts=[]): keys = ['container_port'] ports = [dict.fromkeys(keys, port) for port in ports] labels = { "app": name, "purpose": "flux" } container = { "name": name, "image": image, "imagePullPolicy": "Always", "resources": resources, "ports": ports, "args": args, "env": [ { "name": "KUBECONFIG", "value": "/root/.kubectl/config" } ], "volumeMounts": volume_mounts } Deployment(name, metadata={ "name": name, "labels": labels, "namespace": NAMESPACE }, spec={ "selector": { "match_labels": labels }, "replicas": replicas, "template": { "metadata": { "labels": labels }, "spec": { "containers": [ container ], "serviceAccount": serviceAccount, "volumes": volumes } } }, __opts__=ResourceOptions( provider=custom_provider, depends_on=dependencies) ) def gen_application(name, ports, image, customProvider, dependencies=[], serviceAccount="default", volumes=False, volumeMounts=False): args = globals()["args_{}".format(name)] if volumes: volumes = globals()["volumes_{}".format(name)] else: volumes = [] if volumeMounts: volumeMounts = globals()["volumeMounts_{}".format(name)] else: volumeMounts = [] _gen_service(name, ports, customProvider) _gen_deployment(name, ports, image, customProvider, serviceAccount, args=args, dependencies=dependencies, volumes=volumes, volume_mounts=volumeMounts)
[]
[]
[ "ARM_CLIENT_ID", "ARM_CLIENT_SECRET", "ARM_TENANT_ID" ]
[]
["ARM_CLIENT_ID", "ARM_CLIENT_SECRET", "ARM_TENANT_ID"]
python
3
0
python/ray/tune/trial.py
from collections import deque import copy import json import logging from numbers import Number import os import platform import re import shutil import time from typing import Dict, Optional, Sequence, Union, Callable, List import uuid import ray import ray.cloudpickle as cloudpickle from ray.exceptions import RayActorError, RayTaskError from ray.tune import TuneError from ray.tune.checkpoint_manager import _TuneCheckpoint, CheckpointManager # NOTE(rkn): We import ray.tune.registry here instead of importing the names we # need because there are cyclic imports that may cause specific names to not # have been defined yet. See https://github.com/ray-project/ray/issues/1716. from ray.tune.registry import get_trainable_cls, validate_trainable from ray.tune.result import ( DEFAULT_RESULTS_DIR, DONE, NODE_IP, PID, TRAINING_ITERATION, TRIAL_ID, DEBUG_METRICS, ) from ray.tune.resources import Resources from ray.tune.utils.placement_groups import ( PlacementGroupFactory, resource_dict_to_pg_factory, ) from ray.tune.utils.serialization import TuneFunctionEncoder from ray.tune.utils.trainable import TrainableUtil from ray.tune.utils import date_str, flatten_dict from ray.util.annotations import DeveloperAPI from ray._private.utils import binary_to_hex, hex_to_binary DEBUG_PRINT_INTERVAL = 5 logger = logging.getLogger(__name__) class Location: """Describes the location at which Trial is placed to run.""" def __init__(self, hostname=None, pid=None): self.hostname = hostname self.pid = pid def __str__(self): if not self.pid: return "" elif self.hostname == platform.node(): return "pid={}".format(self.pid) else: return "{}:{}".format(self.hostname, self.pid) @DeveloperAPI class ExportFormat: """Describes the format to import/export the trial Trainable. This may correspond to different file formats based on the Trainable implementation. """ CHECKPOINT = "checkpoint" MODEL = "model" ONNX = "onnx" H5 = "h5" @staticmethod def validate(formats): """Validates formats. Raises: ValueError if the format is unknown. """ for i in range(len(formats)): formats[i] = formats[i].strip().lower() if formats[i] not in [ ExportFormat.CHECKPOINT, ExportFormat.MODEL, ExportFormat.ONNX, ExportFormat.H5, ]: raise TuneError("Unsupported import/export format: " + formats[i]) class CheckpointDeleter: """Checkpoint deleter callback for a runner.""" def __init__(self, trial_id, runner): self.trial_id = trial_id self.runner = runner def __call__(self, checkpoint: _TuneCheckpoint): """Requests checkpoint deletion asynchronously. Args: checkpoint: Checkpoint to delete. """ if not self.runner: return if checkpoint.storage == _TuneCheckpoint.PERSISTENT and checkpoint.value: checkpoint_path = checkpoint.value logger.debug( "Trial %s: Deleting checkpoint %s", self.trial_id, checkpoint_path ) # TODO(ujvl): Batch remote deletes. # We first delete the remote checkpoint. If it is on the same # node as the driver, it will also remove the local copy. ray.get(self.runner.delete_checkpoint.remote(checkpoint_path)) # Delete local copy, if any exists. if os.path.exists(checkpoint_path): try: checkpoint_dir = TrainableUtil.find_checkpoint_dir(checkpoint_path) shutil.rmtree(checkpoint_dir) except FileNotFoundError: logger.debug("Local checkpoint dir not found during deletion.") class TrialInfo: """Serializable struct for holding information for a Trial. Attributes: trial_name: String name of the current trial. trial_id: trial_id of the trial trial_resources: resources used by trial. """ def __init__(self, trial: "Trial"): self._trial_name = str(trial) self._trial_id = trial.trial_id self._trial_resources = trial.placement_group_factory @property def trial_name(self): return self._trial_name @property def trial_id(self): return self._trial_id @property def trial_resources(self) -> Union[Resources, PlacementGroupFactory]: return self._trial_resources @trial_resources.setter def trial_resources(self, new_resources: Union[Resources, PlacementGroupFactory]): self._trial_resources = new_resources def create_logdir(dirname, local_dir): local_dir = os.path.expanduser(local_dir) logdir = os.path.join(local_dir, dirname) if os.path.exists(logdir): old_dirname = dirname dirname += "_" + uuid.uuid4().hex[:4] logger.info( f"Creating a new dirname {dirname} because " f"trial dirname '{old_dirname}' already exists." ) logdir = os.path.join(local_dir, dirname) os.makedirs(logdir, exist_ok=True) return logdir def _to_pg_factory( resources: Optional[Resources], placement_group_factory: Optional[PlacementGroupFactory], ) -> PlacementGroupFactory: """Outputs resources requirement in the form of PGF. In case that `placement_group_factory` is None, `resources` will be converted to PGF. If this is unsuccessful, an error will be raised. """ if not placement_group_factory: if not resources: resources = Resources(cpu=1, gpu=0) placement_group_factory = resource_dict_to_pg_factory(resources) return placement_group_factory @DeveloperAPI class Trial: """A trial object holds the state for one model training run. Trials are themselves managed by the TrialRunner class, which implements the event loop for submitting trial runs to a Ray cluster. Trials start in the PENDING state, and transition to RUNNING once started. On error it transitions to ERROR, otherwise TERMINATED on success. There are resources allocated to each trial. These should be specified using ``PlacementGroupFactory``. Attributes: trainable_name: Name of the trainable object to be executed. config: Provided configuration dictionary with evaluated params. trial_id: Unique identifier for the trial. local_dir: Local_dir as passed to tune.run. logdir: Directory where the trial logs are saved. evaluated_params: Evaluated parameters by search algorithm, experiment_tag: Identifying trial name to show in the console status: One of PENDING, RUNNING, PAUSED, TERMINATED, ERROR/ error_file: Path to the errors that this trial has raised. """ _nonjson_fields = [ "results", "best_result", "param_config", "extra_arg", "placement_group_factory", ] PENDING = "PENDING" RUNNING = "RUNNING" PAUSED = "PAUSED" TERMINATED = "TERMINATED" ERROR = "ERROR" def __init__( self, trainable_name: str, config: Optional[Dict] = None, trial_id: Optional[str] = None, local_dir: Optional[str] = DEFAULT_RESULTS_DIR, evaluated_params: Optional[Dict] = None, experiment_tag: str = "", resources: Optional[Resources] = None, placement_group_factory: Optional[PlacementGroupFactory] = None, stopping_criterion: Optional[Dict[str, float]] = None, remote_checkpoint_dir: Optional[str] = None, sync_function_tpl: Optional[str] = None, checkpoint_freq: int = 0, checkpoint_at_end: bool = False, sync_on_checkpoint: bool = True, keep_checkpoints_num: Optional[int] = None, checkpoint_score_attr: str = TRAINING_ITERATION, export_formats: Optional[List[str]] = None, restore_path: Optional[str] = None, trial_name_creator: Optional[Callable[["Trial"], str]] = None, trial_dirname_creator: Optional[Callable[["Trial"], str]] = None, log_to_file: Optional[str] = None, max_failures: int = 0, stub: bool = False, _setup_default_resource: bool = True, ): """Initialize a new trial. The args here take the same meaning as the command line flags defined in ray.tune.config_parser. Args: _setup_default_resource: Whether to set up default resources. When initializing trials from checkpoints, this field is set to false, so that setting up default resources can be delayed till after ``trial.config`` is loaded from checkpoints. """ # If this is set, trainables are not validated or looked up. # This can be used e.g. to initialize Trial objects from checkpoints # without loading the trainable first. self.stub = stub if not self.stub: validate_trainable(trainable_name) # Trial config self.trainable_name = trainable_name self.trial_id = Trial.generate_id() if trial_id is None else trial_id self.config = config or {} self.local_dir = local_dir # This remains unexpanded for syncing. # Parameters that Tune varies across searches. self.evaluated_params = evaluated_params or {} self.experiment_tag = experiment_tag self.location = Location() trainable_cls = self.get_trainable_cls() if trainable_cls and _setup_default_resource: default_resources = trainable_cls.default_resource_request(self.config) # If Trainable returns resources, do not allow manual override via # `resources_per_trial` by the user. if default_resources: if resources or placement_group_factory: raise ValueError( "Resources for {} have been automatically set to {} " "by its `default_resource_request()` method. Please " "clear the `resources_per_trial` option.".format( trainable_cls, default_resources ) ) if isinstance(default_resources, PlacementGroupFactory): placement_group_factory = default_resources resources = None else: placement_group_factory = None resources = default_resources self.placement_group_factory = _to_pg_factory( resources, placement_group_factory ) self.stopping_criterion = stopping_criterion or {} self.log_to_file = log_to_file # Make sure `stdout_file, stderr_file = Trial.log_to_file` works if ( not self.log_to_file or not isinstance(self.log_to_file, Sequence) or not len(self.log_to_file) == 2 ): self.log_to_file = (None, None) self.max_failures = max_failures # Local trial state that is updated during the run self._last_result = {} self._default_result_or_future: Union[ray.ObjectRef, dict, None] = None self.last_update_time = -float("inf") # stores in memory max/min/avg/last-n-avg/last result for each # metric by trial self.metric_analysis = {} # keep a moving average over these last n steps self.n_steps = [5, 10] self.metric_n_steps = {} self.export_formats = export_formats self.status = Trial.PENDING self.start_time = None self.logdir = None self.runner = None self.last_debug = 0 self.error_file = None self.pickled_error_file = None self.trial_name_creator = trial_name_creator self.trial_dirname_creator = trial_dirname_creator self.custom_trial_name = None self.custom_dirname = None # Checkpointing fields self.saving_to = None if remote_checkpoint_dir: self.remote_checkpoint_dir_prefix = remote_checkpoint_dir else: self.remote_checkpoint_dir_prefix = None if sync_function_tpl == "auto" or not isinstance(sync_function_tpl, str): sync_function_tpl = None self.sync_function_tpl = sync_function_tpl self.checkpoint_freq = checkpoint_freq self.checkpoint_at_end = checkpoint_at_end self.keep_checkpoints_num = keep_checkpoints_num self.checkpoint_score_attr = checkpoint_score_attr self.sync_on_checkpoint = sync_on_checkpoint self.checkpoint_manager = CheckpointManager( keep_checkpoints_num, checkpoint_score_attr, CheckpointDeleter(self._trainable_name(), self.runner), ) # Restoration fields self.restore_path = restore_path self.restoring_from = None self.num_failures = 0 # AutoML fields self.results = None self.best_result = None self.param_config = None self.extra_arg = None if trial_name_creator: self.custom_trial_name = trial_name_creator(self) if trial_dirname_creator: self.custom_dirname = trial_dirname_creator(self) if os.path.sep in self.custom_dirname: raise ValueError( f"Trial dirname must not contain '/'. Got {self.custom_dirname}" ) self._state_json = None self._state_valid = False def _get_default_result_or_future(self) -> Optional[dict]: """Calls ray.get on self._default_result_or_future and assigns back. Returns None in case of exceptions. Will also set the trial location if runner is set. """ if self._default_result_or_future and isinstance( self._default_result_or_future, ray.ObjectRef ): try: self._default_result_or_future = ray.get(self._default_result_or_future) except RayActorError: # error during initialization self._default_result_or_future = None if self._default_result_or_future and self.runner: self.set_location( Location( self._default_result_or_future.get(NODE_IP), self._default_result_or_future.get(PID), ) ) return self._default_result_or_future @property def last_result(self) -> dict: # The logic in here is as follows: # 1. If the trial has reported at least once, last_result would have # been set and therefore would not be empty. We can just return it. # 2. If the trial has not reported at least once but we have the # future for the default results dict, (obtained through # Trainable.get_auto_filled_metrics), we get that future # and return it. # 3. In the worst case where we have nothing, we just set the # trial_id and return that. result = self._last_result if not {k for k in result if k != TRIAL_ID}: self._get_default_result_or_future() result = self._default_result_or_future or result result.setdefault(TRIAL_ID, self.trial_id) return result @last_result.setter def last_result(self, val: dict): self._last_result = val @property def has_reported_at_least_once(self) -> bool: return bool(self._last_result) @property def node_ip(self): return self.location.hostname @property def checkpoint(self): """Returns the most recent checkpoint. If the trial is in ERROR state, the most recent PERSISTENT checkpoint is returned. """ if self.status == Trial.ERROR: checkpoint = self.checkpoint_manager.newest_persistent_checkpoint else: checkpoint = self.checkpoint_manager.newest_checkpoint if checkpoint.value is None: checkpoint = _TuneCheckpoint(_TuneCheckpoint.PERSISTENT, self.restore_path) return checkpoint @classmethod def generate_id(cls): return str(uuid.uuid1().hex)[:8] @property def remote_checkpoint_dir(self): """This is the **per trial** remote checkpoint dir. This is different from **per experiment** remote checkpoint dir. """ assert self.logdir, "Trial {}: logdir not initialized.".format(self) if not self.remote_checkpoint_dir_prefix: return None logdir_name = os.path.basename(self.logdir) return os.path.join(self.remote_checkpoint_dir_prefix, logdir_name) @property def uses_cloud_checkpointing(self): return bool(self.remote_checkpoint_dir) def reset(self): # If there is `default_resource_request` associated with the trainable, # clear `resources` and `placement_group_factory`. # This is mainly relevant for RLlib tuning jobs, where we save users # of the trouble to specify the resources themselves by having some # default resources for popular RLlib algorithms. trainable_cls = self.get_trainable_cls() clear_resources = trainable_cls and trainable_cls.default_resource_request( self.config ) placement_group_factory = ( self.placement_group_factory if not clear_resources else None ) return Trial( self.trainable_name, config=self.config, trial_id=None, local_dir=self.local_dir, evaluated_params=self.evaluated_params, experiment_tag=self.experiment_tag, resources=None, placement_group_factory=placement_group_factory, stopping_criterion=self.stopping_criterion, remote_checkpoint_dir=self.remote_checkpoint_dir, checkpoint_freq=self.checkpoint_freq, checkpoint_at_end=self.checkpoint_at_end, sync_on_checkpoint=self.sync_on_checkpoint, keep_checkpoints_num=self.keep_checkpoints_num, checkpoint_score_attr=self.checkpoint_score_attr, export_formats=self.export_formats, restore_path=self.restore_path, trial_name_creator=self.trial_name_creator, trial_dirname_creator=self.trial_dirname_creator, log_to_file=self.log_to_file, max_failures=self.max_failures, ) def init_logdir(self): """Init logdir.""" if not self.logdir: self.logdir = create_logdir(self._generate_dirname(), self.local_dir) else: os.makedirs(self.logdir, exist_ok=True) self.invalidate_json_state() def update_resources(self, resources: Union[Dict, PlacementGroupFactory]): """EXPERIMENTAL: Updates the resource requirements. Should only be called when the trial is not running. Raises: ValueError if trial status is running. """ if self.status is Trial.RUNNING: raise ValueError("Cannot update resources while Trial is running.") placement_group_factory = None if isinstance(resources, PlacementGroupFactory): placement_group_factory = resources else: resources = Resources(**resources) self.placement_group_factory = _to_pg_factory( resources, placement_group_factory ) self.invalidate_json_state() def set_runner(self, runner): self.runner = runner if runner: # Do not block here, the result will be gotten when last_result # property is accessed self._default_result_or_future = runner.get_auto_filled_metrics.remote( debug_metrics_only=True ) self.checkpoint_manager.delete = CheckpointDeleter( self._trainable_name(), runner ) # No need to invalidate state cache: runner is not stored in json # self.invalidate_json_state() def set_location(self, location): """Sets the location of the trial.""" self.location = location # No need to invalidate state cache: location is not stored in json # self.invalidate_json_state() def set_status(self, status): """Sets the status of the trial.""" self.status = status if status == Trial.RUNNING: if self.start_time is None: self.start_time = time.time() self.invalidate_json_state() def set_config(self, config): self.config = config self.invalidate_json_state() def set_experiment_tag(self, experiment_tag): self.experiment_tag = experiment_tag self.invalidate_json_state() def write_error_log(self, exc: Optional[Union[TuneError, RayTaskError]] = None): if exc and self.logdir: self.num_failures += 1 self.error_file = os.path.join(self.logdir, "error.txt") if exc and isinstance(exc, RayTaskError): # Piping through the actual error to result grid. self.pickled_error_file = os.path.join(self.logdir, "error.pkl") with open(self.pickled_error_file, "wb") as f: cloudpickle.dump(exc, f) with open(self.error_file, "a+") as f: f.write( "Failure # {} (occurred at {})\n".format( self.num_failures, date_str() ) ) f.write(str(exc) + "\n") self.invalidate_json_state() def should_stop(self, result): """Whether the given result meets this trial's stopping criteria.""" if result.get(DONE): return True for criteria, stop_value in self.stopping_criterion.items(): if criteria not in result: raise TuneError( "Stopping criteria {} not provided in result dict. Keys " "are {}.".format(criteria, list(result.keys())) ) elif isinstance(criteria, dict): raise ValueError( "Stopping criteria is now flattened by default. " "Use forward slashes to nest values `key1/key2/key3`." ) elif result[criteria] >= stop_value: return True return False def should_checkpoint(self): """Whether this trial is due for checkpointing.""" result = self.last_result or {} if result.get(DONE) and self.checkpoint_at_end: return True return ( self.checkpoint_freq and result.get(TRAINING_ITERATION, 0) % self.checkpoint_freq == 0 ) def has_checkpoint(self): return self.checkpoint.value is not None def clear_checkpoint(self): self.checkpoint.value = None self.restoring_from = None self.invalidate_json_state() def on_checkpoint(self, checkpoint: _TuneCheckpoint): """Hook for handling checkpoints taken by the Trainable. Args: checkpoint: Checkpoint taken. """ self.checkpoint_manager.on_checkpoint(checkpoint) self.invalidate_json_state() def on_restore(self): """Handles restoration completion.""" assert self.is_restoring self.last_result = self.restoring_from.result self.restoring_from = None self.invalidate_json_state() def should_recover(self): """Returns whether the trial qualifies for retrying. This is if the trial has not failed more than max_failures. Note this may return true even when there is no checkpoint, either because `self.checkpoint_freq` is `0` or because the trial failed before a checkpoint has been made. """ return self.num_failures < self.max_failures or self.max_failures < 0 def update_last_result(self, result): if self.experiment_tag: result.update(experiment_tag=self.experiment_tag) self.set_location(Location(result.get(NODE_IP), result.get(PID))) self.last_result = result self.last_update_time = time.time() metric_result = self.last_result.copy() for remove_metric in DEBUG_METRICS: metric_result.pop(remove_metric, None) for metric, value in flatten_dict(metric_result).items(): if isinstance(value, Number): if metric not in self.metric_analysis: self.metric_analysis[metric] = { "max": value, "min": value, "avg": value, "last": value, } self.metric_n_steps[metric] = {} for n in self.n_steps: key = "last-{:d}-avg".format(n) self.metric_analysis[metric][key] = value # Store n as string for correct restore. self.metric_n_steps[metric][str(n)] = deque([value], maxlen=n) else: step = result["training_iteration"] or 1 self.metric_analysis[metric]["max"] = max( value, self.metric_analysis[metric]["max"] ) self.metric_analysis[metric]["min"] = min( value, self.metric_analysis[metric]["min"] ) self.metric_analysis[metric]["avg"] = ( 1 / step * (value + (step - 1) * self.metric_analysis[metric]["avg"]) ) self.metric_analysis[metric]["last"] = value for n in self.n_steps: key = "last-{:d}-avg".format(n) self.metric_n_steps[metric][str(n)].append(value) self.metric_analysis[metric][key] = sum( self.metric_n_steps[metric][str(n)] ) / len(self.metric_n_steps[metric][str(n)]) self.invalidate_json_state() def get_trainable_cls(self): if self.stub: return None return get_trainable_cls(self.trainable_name) def is_finished(self): return self.status in [Trial.ERROR, Trial.TERMINATED] @property def is_restoring(self): return self.restoring_from is not None @property def is_saving(self): return self.saving_to is not None def __repr__(self): return self._trainable_name(include_trial_id=True) def __str__(self): return self._trainable_name(include_trial_id=True) def _trainable_name(self, include_trial_id=False): """Combines ``env`` with ``trainable_name`` and ``trial_id``. Can be overridden with a custom string creator. """ if self.custom_trial_name: return self.custom_trial_name if "env" in self.config: env = self.config["env"] if isinstance(env, type): env = env.__name__ identifier = "{}_{}".format(self.trainable_name, env) else: identifier = self.trainable_name if include_trial_id: identifier += "_" + self.trial_id return identifier.replace("/", "_") def _generate_dirname(self): if self.custom_dirname: generated_dirname = self.custom_dirname else: MAX_LEN_IDENTIFIER = int(os.environ.get("TUNE_MAX_LEN_IDENTIFIER", "130")) generated_dirname = f"{str(self)}_{self.experiment_tag}" generated_dirname = generated_dirname[:MAX_LEN_IDENTIFIER] generated_dirname += f"_{date_str()}" # This is the file path used by rsync. ['/', '(', ')'] are not allowed. return re.sub("[/()]", "_", generated_dirname) def invalidate_json_state(self): self._state_valid = False def get_json_state(self) -> str: if not self._state_json or not self._state_valid: json_state = json.dumps( self.__getstate__(), indent=2, cls=TuneFunctionEncoder ) self._state_json = json_state self._state_valid = True return self._state_json def __getstate__(self): """Memento generator for Trial. Sets RUNNING trials to PENDING. Note this can only occur if the trial holds a PERSISTENT checkpoint. """ state = self.__dict__.copy() for key in self._nonjson_fields: state[key] = binary_to_hex(cloudpickle.dumps(state.get(key))) state["runner"] = None state["location"] = Location() # Avoid waiting for events that will never occur on resume. state["restoring_from"] = None state["saving_to"] = None state["_state_json"] = None state["_state_valid"] = False state["_default_result_or_future"] = None return copy.deepcopy(state) def __setstate__(self, state): if state["status"] == Trial.RUNNING: state["status"] = Trial.PENDING for key in self._nonjson_fields: state[key] = cloudpickle.loads(hex_to_binary(state[key])) # Ensure that stub doesn't get overriden stub = state.pop("stub", True) self.__dict__.update(state) self.stub = stub or getattr(self, "stub", False) if not self.stub: validate_trainable(self.trainable_name) assert self.placement_group_factory # Avoid creating logdir in client mode for returned trial results, # since the dir might not be creatable locally. # TODO(ekl) this is kind of a hack. if not ray.util.client.ray.is_connected(): self.init_logdir() # Create logdir if it does not exist
[]
[]
[ "TUNE_MAX_LEN_IDENTIFIER" ]
[]
["TUNE_MAX_LEN_IDENTIFIER"]
python
1
0
internal/services/auth/token.go
package auth import ( "context" "log" "net/http" "time" "github.com/dgrijalva/jwt-go" "github.com/manabie-com/togo/internal/app/config" "github.com/manabie-com/togo/internal/utils" ) var secretkey = config.LoadConfigs().App.SecretKey func CreateToken(id int) (string, error) { atClaims := jwt.MapClaims{} atClaims["user_id"] = id atClaims["exp"] = time.Now().Add(time.Minute * 150).Unix() at := jwt.NewWithClaims(jwt.SigningMethodHS256, atClaims) token, err := at.SignedString([]byte(secretkey)) if err != nil { return "", err } return token, nil } func ValidToken(req *http.Request) (*http.Request, bool) { token := req.Header.Get("Authorization") claims := make(jwt.MapClaims) t, err := jwt.ParseWithClaims(token, claims, func(*jwt.Token) (interface{}, error) { return []byte(secretkey), nil }) if err != nil { log.Println(err) return req, false } if !t.Valid { return req, false } id, ok := claims["user_id"].(float64) if !ok { return req, false } req = req.WithContext(context.WithValue(req.Context(), utils.UserAuthKey(0), uint64(id))) return req, true }
[]
[]
[]
[]
[]
go
null
null
null
roadscene2vec/scripts/4_test_model.py
import os import sys #import check_gpu as cg #os.environ['CUDA_VISIBLE_DEVICES'] = cg.get_free_gpu() sys.path.append(os.path.dirname(sys.path[0])) from learning.util.image_trainer import Image_Trainer from learning.util.scenegraph_trainer import Scenegraph_Trainer from util.config_parser import configuration import wandb import torch.nn as nn #python 4_test_model.py --yaml_path C:\users\harsi\research\roadscene2vec\roadscene2vec\config\learning_config.yaml def test_Trainer(learning_config): ''' Training the dynamic kg algorithm with different attention layer choice.''' #wandb setup wandb_arg= wandb.init(project=learning_config.wandb_configuration['project'], entity=learning_config.wandb_configuration['entity']) outputs = [] labels = [] metrics = [] categories_train_list = [] categories_test_list = [] if learning_config.training_configuration["dataset_type"] == "real": trainer = Image_Trainer(learning_config, wandb_arg) trainer.split_dataset() trainer.load_model() #set load model to true in config, and specify load path trainer.loss_func = nn.CrossEntropyLoss() #set loss function categories_train, categories_test, metric = trainer.eval_model() categories_train_list.append(categories_train) categories_test_list.append(categories_test) metrics.append(metric) elif learning_config.training_configuration["dataset_type"] == "scenegraph": trainer = Scenegraph_Trainer(learning_config, wandb_arg) trainer.split_dataset() trainer.load_model() #set load model to true in config, and specify load path trainer.loss_func = nn.CrossEntropyLoss() #set loss function outputs_train, labels_train, outputs_test, labels_test, metric = trainer.evaluate() outputs += outputs_test labels += labels_test metrics.append(metric) else: raise ValueError("Type unrecognized") if __name__ == "__main__": # the entry of dynkg pipeline training learning_config = configuration(sys.argv[1:]) test_Trainer(learning_config)
[]
[]
[ "CUDA_VISIBLE_DEVICES" ]
[]
["CUDA_VISIBLE_DEVICES"]
python
1
0
ptvs_virtualenv_proxy.py
# ############################################################################ # # Copyright (c) Microsoft Corporation. # # This source code is subject to terms and conditions of the Apache License, Version 2.0. A # copy of the license can be found in the License.html file at the root of this distribution. If # you cannot locate the Apache License, Version 2.0, please send an email to # [email protected]. By using this source code in any fashion, you are agreeing to be bound # by the terms of the Apache License, Version 2.0. # # You must not remove this notice, or any other, from this software. # # ########################################################################### import datetime import os import sys if sys.version_info[0] == 3: def to_str(value): return value.decode(sys.getfilesystemencoding()) def execfile(path, global_dict): """Execute a file""" with open(path, 'r') as f: code = f.read() code = code.replace('\r\n', '\n') + '\n' exec(code, global_dict) else: def to_str(value): return value.encode(sys.getfilesystemencoding()) def log(txt): """Logs fatal errors to a log file if WSGI_LOG env var is defined""" log_file = os.environ.get('WSGI_LOG') if log_file: f = open(log_file, 'a+') try: f.write('%s: %s' % (datetime.datetime.now(), txt)) finally: f.close() ptvsd_secret = os.getenv('WSGI_PTVSD_SECRET') if ptvsd_secret: log('Enabling ptvsd ...\n') try: import ptvsd try: ptvsd.enable_attach(ptvsd_secret) log('ptvsd enabled.\n') except: log('ptvsd.enable_attach failed\n') except ImportError: log('error importing ptvsd.\n'); def get_wsgi_handler(handler_name): if not handler_name: raise Exception('WSGI_HANDLER env var must be set') if not isinstance(handler_name, str): handler_name = to_str(handler_name) module_name, _, callable_name = handler_name.rpartition('.') should_call = callable_name.endswith('()') callable_name = callable_name[:-2] if should_call else callable_name name_list = [(callable_name, should_call)] handler = None while module_name: try: handler = __import__(module_name, fromlist=[name_list[0][0]]) for name, should_call in name_list: handler = getattr(handler, name) if should_call: handler = handler() break except ImportError: module_name, _, callable_name = module_name.rpartition('.') should_call = callable_name.endswith('()') callable_name = callable_name[:-2] if should_call else callable_name name_list.insert(0, (callable_name, should_call)) handler = None if handler is None: raise ValueError('"%s" could not be imported' % handler_name) return handler activate_this = os.getenv('WSGI_ALT_VIRTUALENV_ACTIVATE_THIS') if not activate_this: raise Exception('WSGI_ALT_VIRTUALENV_ACTIVATE_THIS is not set') def get_virtualenv_handler(): log('Activating virtualenv with %s\n' % activate_this) execfile(activate_this, dict(__file__=activate_this)) log('Getting handler %s\n' % os.getenv('WSGI_ALT_VIRTUALENV_HANDLER')) handler = get_wsgi_handler(os.getenv('WSGI_ALT_VIRTUALENV_HANDLER')) log('Got handler: %r\n' % handler) return handler def get_venv_handler(): log('Activating venv with executable at %s\n' % activate_this) import site sys.executable = activate_this old_sys_path, sys.path = sys.path, [] site.main() sys.path.insert(0, '') for item in old_sys_path: if item not in sys.path: sys.path.append(item) log('Getting handler %s\n' % os.getenv('WSGI_ALT_VIRTUALENV_HANDLER')) handler = get_wsgi_handler(os.getenv('WSGI_ALT_VIRTUALENV_HANDLER')) log('Got handler: %r\n' % handler) return handler
[]
[]
[ "WSGI_ALT_VIRTUALENV_HANDLER", "WSGI_LOG", "WSGI_ALT_VIRTUALENV_ACTIVATE_THIS", "WSGI_PTVSD_SECRET" ]
[]
["WSGI_ALT_VIRTUALENV_HANDLER", "WSGI_LOG", "WSGI_ALT_VIRTUALENV_ACTIVATE_THIS", "WSGI_PTVSD_SECRET"]
python
4
0
go/test/endtoend/reparent/utils/utils.go
/* Copyright 2019 The Vitess Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package utils import ( "context" "encoding/json" "fmt" "os" "os/exec" "path" "reflect" "strings" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "vitess.io/vitess/go/json2" "vitess.io/vitess/go/vt/log" querypb "vitess.io/vitess/go/vt/proto/query" topodatapb "vitess.io/vitess/go/vt/proto/topodata" "vitess.io/vitess/go/mysql" "vitess.io/vitess/go/sqltypes" "vitess.io/vitess/go/test/endtoend/cluster" ) var ( KeyspaceName = "ks" dbName = "vt_" + KeyspaceName username = "vt_dba" Hostname = "localhost" insertVal = 1 insertSQL = "insert into vt_insert_test(id, msg) values (%d, 'test %d')" sqlSchema = ` create table vt_insert_test ( id bigint, msg varchar(64), primary key (id) ) Engine=InnoDB ` cell1 = "zone1" cell2 = "zone2" ShardName = "0" KeyspaceShard = KeyspaceName + "/" + ShardName ) //region cluster setup/teardown // SetupReparentClusterLegacy is used to setup the reparent cluster func SetupReparentClusterLegacy(t *testing.T, enableSemiSync bool) *cluster.LocalProcessCluster { return setupClusterLegacy(context.Background(), t, ShardName, []string{cell1, cell2}, []int{3, 1}, enableSemiSync) } // SetupReparentCluster is used to setup the reparent cluster func SetupReparentCluster(t *testing.T, enableSemiSync bool) *cluster.LocalProcessCluster { return setupCluster(context.Background(), t, ShardName, []string{cell1, cell2}, []int{3, 1}, enableSemiSync) } // SetupRangeBasedCluster sets up the range based cluster func SetupRangeBasedCluster(ctx context.Context, t *testing.T) *cluster.LocalProcessCluster { return setupClusterLegacy(ctx, t, ShardName, []string{cell1}, []int{2}, true) } // TeardownCluster is used to teardown the reparent cluster func TeardownCluster(clusterInstance *cluster.LocalProcessCluster) { clusterInstance.Teardown() } func setupCluster(ctx context.Context, t *testing.T, shardName string, cells []string, numTablets []int, enableSemiSync bool) *cluster.LocalProcessCluster { var tablets []*cluster.Vttablet clusterInstance := cluster.NewCluster(cells[0], Hostname) keyspace := &cluster.Keyspace{Name: KeyspaceName} if enableSemiSync { clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, "--enable_semi_sync") if clusterInstance.VtctlMajorVersion >= 13 { clusterInstance.VtctldExtraArgs = append(clusterInstance.VtctldExtraArgs, "--durability_policy=semi_sync") } } // Start topo server err := clusterInstance.StartTopo() require.NoError(t, err, "Error starting topo") err = clusterInstance.TopoProcess.ManageTopoDir("mkdir", "/vitess/"+cells[0]) require.NoError(t, err, "Error managing topo") numCell := 1 for numCell < len(cells) { err = clusterInstance.VtctlProcess.AddCellInfo(cells[numCell]) require.NoError(t, err, "Error managing topo") numCell++ } // Adding another cell in the same cluster numCell = 0 for numCell < len(cells) { i := 0 for i < numTablets[numCell] { i++ tablet := clusterInstance.NewVttabletInstance("replica", 100*(numCell+1)+i, cells[numCell]) tablets = append(tablets, tablet) } numCell++ } shard := &cluster.Shard{Name: shardName} shard.Vttablets = tablets clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, "--lock_tables_timeout", "5s", "--init_populate_metadata", "--track_schema_versions=true", // disabling online-ddl for reparent tests. This is done to reduce flakiness. // All the tests in this package reparent frequently between different tablets // This means that Promoting a tablet to primary is sometimes immediately followed by a DemotePrimary call. // In this case, the close method and initSchema method of the onlineDDL executor race. // If the initSchema acquires the lock, then it takes about 30 seconds for it to run during which time the // DemotePrimary rpc is stalled! "--queryserver_enable_online_ddl=false") if clusterInstance.VtTabletMajorVersion >= 13 && clusterInstance.VtctlMajorVersion >= 13 { // disabling active reparents on the tablet since we don't want the replication manager // to fix replication if it is stopped. Some tests deliberately do that. Also, we don't want // the replication manager to silently fix the replication in case ERS or PRS mess up. All the // tests in this test suite should work irrespective of this flag. Each run of ERS, PRS should be // setting up the replication correctly. // However, due to the bugs in old vitess components we can only do this for version >= 13. clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, "--disable_active_reparents") } // Initialize Cluster err = clusterInstance.SetupCluster(keyspace, []cluster.Shard{*shard}) require.NoError(t, err, "Cannot launch cluster") //Start MySql var mysqlCtlProcessList []*exec.Cmd for _, shard := range clusterInstance.Keyspaces[0].Shards { for _, tablet := range shard.Vttablets { log.Infof("Starting MySql for tablet %v", tablet.Alias) proc, err := tablet.MysqlctlProcess.StartProcess() require.NoError(t, err, "Error starting start mysql") mysqlCtlProcessList = append(mysqlCtlProcessList, proc) } } // Wait for mysql processes to start for _, proc := range mysqlCtlProcessList { if err := proc.Wait(); err != nil { clusterInstance.PrintMysqlctlLogFiles() require.FailNow(t, "Error starting mysql: %s", err.Error()) } } setupShard(ctx, t, clusterInstance, shardName, tablets) return clusterInstance } func setupShard(ctx context.Context, t *testing.T, clusterInstance *cluster.LocalProcessCluster, shardName string, tablets []*cluster.Vttablet) { for _, tablet := range tablets { // Start the tablet err := tablet.VttabletProcess.Setup() require.NoError(t, err) } for _, tablet := range tablets { err := tablet.VttabletProcess.WaitForTabletStatuses([]string{"SERVING", "NOT_SERVING"}) require.NoError(t, err) } // Initialize shard err := clusterInstance.VtctlclientProcess.InitializeShard(KeyspaceName, shardName, tablets[0].Cell, tablets[0].TabletUID) require.NoError(t, err) ValidateTopology(t, clusterInstance, true) // create Tables RunSQL(ctx, t, sqlSchema, tablets[0]) CheckPrimaryTablet(t, clusterInstance, tablets[0]) ValidateTopology(t, clusterInstance, false) time.Sleep(100 * time.Millisecond) // wait for replication to catchup strArray := GetShardReplicationPositions(t, clusterInstance, KeyspaceName, shardName, true) assert.Equal(t, len(tablets), len(strArray)) assert.Contains(t, strArray[0], "primary") // primary first } func setupClusterLegacy(ctx context.Context, t *testing.T, shardName string, cells []string, numTablets []int, enableSemiSync bool) *cluster.LocalProcessCluster { var tablets []*cluster.Vttablet clusterInstance := cluster.NewCluster(cells[0], Hostname) keyspace := &cluster.Keyspace{Name: KeyspaceName} if enableSemiSync { clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, "--enable_semi_sync") if clusterInstance.VtctlMajorVersion >= 13 { clusterInstance.VtctldExtraArgs = append(clusterInstance.VtctldExtraArgs, "--durability_policy=semi_sync") } } // Start topo server err := clusterInstance.StartTopo() require.NoError(t, err, "Error starting topo") err = clusterInstance.TopoProcess.ManageTopoDir("mkdir", "/vitess/"+cells[0]) require.NoError(t, err, "Error managing topo") numCell := 1 for numCell < len(cells) { err = clusterInstance.VtctlProcess.AddCellInfo(cells[numCell]) require.NoError(t, err, "Error managing topo") numCell++ } // Adding another cell in the same cluster numCell = 0 for numCell < len(cells) { i := 0 for i < numTablets[numCell] { i++ tablet := clusterInstance.NewVttabletInstance("replica", 100*(numCell+1)+i, cells[numCell]) tablets = append(tablets, tablet) } numCell++ } shard := &cluster.Shard{Name: shardName} shard.Vttablets = tablets clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, "--lock_tables_timeout", "5s", "--init_populate_metadata", "--track_schema_versions=true", // disabling online-ddl for reparent tests. This is done to reduce flakiness. // All the tests in this package reparent frequently between different tablets // This means that Promoting a tablet to primary is sometimes immediately followed by a DemotePrimary call. // In this case, the close method and initSchema method of the onlineDDL executor race. // If the initSchema acquires the lock, then it takes about 30 seconds for it to run during which time the // DemotePrimary rpc is stalled! "--queryserver_enable_online_ddl=false") if clusterInstance.VtTabletMajorVersion >= 13 && clusterInstance.VtctlMajorVersion >= 13 { // disabling active reparents on the tablet since we don't want the replication manager // to fix replication if it is stopped. Some tests deliberately do that. Also, we don't want // the replication manager to silently fix the replication in case ERS or PRS mess up. All the // tests in this test suite should work irrespective of this flag. Each run of ERS, PRS should be // setting up the replication correctly. // However, due to the bugs in old vitess components we can only do this for version >= 13. clusterInstance.VtTabletExtraArgs = append(clusterInstance.VtTabletExtraArgs, "--disable_active_reparents") } // Initialize Cluster err = clusterInstance.SetupCluster(keyspace, []cluster.Shard{*shard}) require.NoError(t, err, "Cannot launch cluster") //Start MySql var mysqlCtlProcessList []*exec.Cmd for _, shard := range clusterInstance.Keyspaces[0].Shards { for _, tablet := range shard.Vttablets { log.Infof("Starting MySql for tablet %v", tablet.Alias) proc, err := tablet.MysqlctlProcess.StartProcess() require.NoError(t, err, "Error starting start mysql") mysqlCtlProcessList = append(mysqlCtlProcessList, proc) } } // Wait for mysql processes to start for _, proc := range mysqlCtlProcessList { if err := proc.Wait(); err != nil { clusterInstance.PrintMysqlctlLogFiles() require.FailNow(t, "Error starting mysql: %s", err.Error()) } } setupShardLegacy(ctx, t, clusterInstance, shardName, tablets) return clusterInstance } func setupShardLegacy(ctx context.Context, t *testing.T, clusterInstance *cluster.LocalProcessCluster, shardName string, tablets []*cluster.Vttablet) { for _, tablet := range tablets { // create database err := tablet.VttabletProcess.CreateDB(KeyspaceName) require.NoError(t, err) // Start the tablet err = tablet.VttabletProcess.Setup() require.NoError(t, err) } for _, tablet := range tablets { err := tablet.VttabletProcess.WaitForTabletStatuses([]string{"SERVING", "NOT_SERVING"}) require.NoError(t, err) } // Force the replica to reparent assuming that all the datasets are identical. err := clusterInstance.VtctlclientProcess.ExecuteCommand("InitShardPrimary", "--", "--force", fmt.Sprintf("%s/%s", KeyspaceName, shardName), tablets[0].Alias) require.NoError(t, err) ValidateTopology(t, clusterInstance, true) // create Tables RunSQL(ctx, t, sqlSchema, tablets[0]) CheckPrimaryTablet(t, clusterInstance, tablets[0]) ValidateTopology(t, clusterInstance, false) time.Sleep(100 * time.Millisecond) // wait for replication to catchup strArray := GetShardReplicationPositions(t, clusterInstance, KeyspaceName, shardName, true) assert.Equal(t, len(tablets), len(strArray)) assert.Contains(t, strArray[0], "primary") // primary first } //endregion //region database queries func getMysqlConnParam(tablet *cluster.Vttablet) mysql.ConnParams { connParams := mysql.ConnParams{ Uname: username, DbName: dbName, UnixSocket: path.Join(os.Getenv("VTDATAROOT"), fmt.Sprintf("/vt_%010d/mysql.sock", tablet.TabletUID)), } return connParams } // RunSQL is used to run a SQL command directly on the MySQL instance of a vttablet func RunSQL(ctx context.Context, t *testing.T, sql string, tablet *cluster.Vttablet) *sqltypes.Result { tabletParams := getMysqlConnParam(tablet) conn, err := mysql.Connect(ctx, &tabletParams) require.Nil(t, err) defer conn.Close() return execute(t, conn, sql) } func execute(t *testing.T, conn *mysql.Conn, query string) *sqltypes.Result { t.Helper() qr, err := conn.ExecuteFetch(query, 1000, true) require.Nil(t, err) return qr } //endregion // region ers, prs // Prs runs PRS func Prs(t *testing.T, clusterInstance *cluster.LocalProcessCluster, tab *cluster.Vttablet) (string, error) { return PrsWithTimeout(t, clusterInstance, tab, false, "", "") } // PrsAvoid runs PRS func PrsAvoid(t *testing.T, clusterInstance *cluster.LocalProcessCluster, tab *cluster.Vttablet) (string, error) { return PrsWithTimeout(t, clusterInstance, tab, true, "", "") } // PrsWithTimeout runs PRS func PrsWithTimeout(t *testing.T, clusterInstance *cluster.LocalProcessCluster, tab *cluster.Vttablet, avoid bool, actionTimeout, waitTimeout string) (string, error) { args := []string{ "PlannedReparentShard", "--", "--keyspace_shard", fmt.Sprintf("%s/%s", KeyspaceName, ShardName)} if actionTimeout != "" { args = append(args, "--action_timeout", actionTimeout) } if waitTimeout != "" { args = append(args, "--wait_replicas_timeout", waitTimeout) } if avoid { args = append(args, "--avoid_tablet") } else { args = append(args, "--new_primary") } args = append(args, tab.Alias) out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput(args...) return out, err } // Ers runs the ERS func Ers(clusterInstance *cluster.LocalProcessCluster, tab *cluster.Vttablet, totalTimeout, waitReplicasTimeout string) (string, error) { return ErsIgnoreTablet(clusterInstance, tab, totalTimeout, waitReplicasTimeout, nil, false) } // ErsIgnoreTablet is used to run ERS func ErsIgnoreTablet(clusterInstance *cluster.LocalProcessCluster, tab *cluster.Vttablet, timeout, waitReplicasTimeout string, tabletsToIgnore []*cluster.Vttablet, preventCrossCellPromotion bool) (string, error) { var args []string if timeout != "" { args = append(args, "--action_timeout", timeout) } args = append(args, "EmergencyReparentShard", "--", "--keyspace_shard", fmt.Sprintf("%s/%s", KeyspaceName, ShardName)) if tab != nil { args = append(args, "--new_primary", tab.Alias) } if waitReplicasTimeout != "" { args = append(args, "--wait_replicas_timeout", waitReplicasTimeout) } if preventCrossCellPromotion { args = append(args, "--prevent_cross_cell_promotion=true") } if len(tabletsToIgnore) != 0 { tabsString := "" for _, vttablet := range tabletsToIgnore { if tabsString == "" { tabsString = vttablet.Alias } else { tabsString = tabsString + "," + vttablet.Alias } } args = append(args, "--ignore_replicas", tabsString) } return clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput(args...) } // ErsWithVtctl runs ERS via vtctl binary func ErsWithVtctl(clusterInstance *cluster.LocalProcessCluster) (string, error) { args := []string{"EmergencyReparentShard", "--", "--keyspace_shard", fmt.Sprintf("%s/%s", KeyspaceName, ShardName)} if clusterInstance.VtctlMajorVersion >= 13 { args = append([]string{"--durability_policy=semi_sync"}, args...) } return clusterInstance.VtctlProcess.ExecuteCommandWithOutput(args...) } // endregion // region validations // ValidateTopology is used to validate the topology func ValidateTopology(t *testing.T, clusterInstance *cluster.LocalProcessCluster, pingTablets bool) { args := []string{"Validate"} if pingTablets { args = append(args, "--", "--ping-tablets=true") } out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput(args...) require.Empty(t, out) require.NoError(t, err) } // ConfirmReplication confirms that the replication is working properly func ConfirmReplication(t *testing.T, primary *cluster.Vttablet, replicas []*cluster.Vttablet) int { ctx := context.Background() insertVal++ n := insertVal // unique value ... // insert data into the new primary, check the connected replica work insertSQL := fmt.Sprintf(insertSQL, n, n) RunSQL(ctx, t, insertSQL, primary) time.Sleep(100 * time.Millisecond) for _, tab := range replicas { err := CheckInsertedValues(ctx, t, tab, n) require.NoError(t, err) } return n } // ConfirmOldPrimaryIsHangingAround confirms that the old primary is hanging around func ConfirmOldPrimaryIsHangingAround(t *testing.T, clusterInstance *cluster.LocalProcessCluster) { out, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("Validate") require.Error(t, err) require.Contains(t, out, "already has primary") } // CheckPrimaryTablet makes sure the tablet type is primary, and its health check agrees. func CheckPrimaryTablet(t *testing.T, clusterInstance *cluster.LocalProcessCluster, tablet *cluster.Vttablet) { result, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetTablet", tablet.Alias) require.NoError(t, err) var tabletInfo topodatapb.Tablet err = json2.Unmarshal([]byte(result), &tabletInfo) require.NoError(t, err) assert.Equal(t, topodatapb.TabletType_PRIMARY, tabletInfo.GetType()) // make sure the health stream is updated result, err = clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("VtTabletStreamHealth", "--", "--count", "1", tablet.Alias) require.NoError(t, err) var streamHealthResponse querypb.StreamHealthResponse err = json2.Unmarshal([]byte(result), &streamHealthResponse) require.NoError(t, err) assert.True(t, streamHealthResponse.GetServing()) tabletType := streamHealthResponse.GetTarget().GetTabletType() assert.Equal(t, topodatapb.TabletType_PRIMARY, tabletType) } // isHealthyPrimaryTablet will return if tablet is primary AND healthy. func isHealthyPrimaryTablet(t *testing.T, clusterInstance *cluster.LocalProcessCluster, tablet *cluster.Vttablet) bool { result, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetTablet", tablet.Alias) require.Nil(t, err) var tabletInfo topodatapb.Tablet err = json2.Unmarshal([]byte(result), &tabletInfo) require.Nil(t, err) if tabletInfo.GetType() != topodatapb.TabletType_PRIMARY { return false } // make sure the health stream is updated result, err = clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("VtTabletStreamHealth", "--", "--count", "1", tablet.Alias) require.Nil(t, err) var streamHealthResponse querypb.StreamHealthResponse err = json2.Unmarshal([]byte(result), &streamHealthResponse) require.Nil(t, err) assert.True(t, streamHealthResponse.GetServing()) tabletType := streamHealthResponse.GetTarget().GetTabletType() return tabletType == topodatapb.TabletType_PRIMARY } // CheckInsertedValues checks that the given value is present in the given tablet func CheckInsertedValues(ctx context.Context, t *testing.T, tablet *cluster.Vttablet, index int) error { // wait until it gets the data timeout := time.Now().Add(15 * time.Second) i := 0 for time.Now().Before(timeout) { selectSQL := fmt.Sprintf("select msg from vt_insert_test where id=%d", index) qr := RunSQL(ctx, t, selectSQL, tablet) if len(qr.Rows) == 1 { return nil } t := time.Duration(300 * i) time.Sleep(t * time.Millisecond) i++ } return fmt.Errorf("data is not yet replicated on tablet %s", tablet.Alias) } func CheckSemiSyncSetupCorrectly(t *testing.T, tablet *cluster.Vttablet, semiSyncVal string) { dbVar, err := tablet.VttabletProcess.GetDBVar("rpl_semi_sync_slave_enabled", "") require.NoError(t, err) require.Equal(t, semiSyncVal, dbVar) } // CheckCountOfInsertedValues checks that the number of inserted values matches the given count on the given tablet func CheckCountOfInsertedValues(ctx context.Context, t *testing.T, tablet *cluster.Vttablet, count int) error { selectSQL := "select * from vt_insert_test" qr := RunSQL(ctx, t, selectSQL, tablet) if len(qr.Rows) == count { return nil } return fmt.Errorf("count does not match on the tablet %s", tablet.Alias) } // endregion // region tablet operations // StopTablet stops the tablet func StopTablet(t *testing.T, tab *cluster.Vttablet, stopDatabase bool) { err := tab.VttabletProcess.TearDownWithTimeout(30 * time.Second) require.NoError(t, err) if stopDatabase { err = tab.MysqlctlProcess.Stop() require.NoError(t, err) } } // RestartTablet restarts the tablet func RestartTablet(t *testing.T, clusterInstance *cluster.LocalProcessCluster, tab *cluster.Vttablet) { tab.MysqlctlProcess.InitMysql = false err := tab.MysqlctlProcess.Start() require.NoError(t, err) err = clusterInstance.VtctlclientProcess.InitTablet(tab, tab.Cell, KeyspaceName, Hostname, ShardName) require.NoError(t, err) } // ResurrectTablet is used to resurrect the given tablet func ResurrectTablet(ctx context.Context, t *testing.T, clusterInstance *cluster.LocalProcessCluster, tab *cluster.Vttablet) { tab.MysqlctlProcess.InitMysql = false err := tab.MysqlctlProcess.Start() require.NoError(t, err) err = clusterInstance.VtctlclientProcess.InitTablet(tab, tab.Cell, KeyspaceName, Hostname, ShardName) require.NoError(t, err) // As there is already a primary the new replica will come directly in SERVING state tab.VttabletProcess.ServingStatus = "SERVING" // Start the tablet err = tab.VttabletProcess.Setup() require.NoError(t, err) err = CheckInsertedValues(ctx, t, tab, insertVal) require.NoError(t, err) } // DeleteTablet is used to delete the given tablet func DeleteTablet(t *testing.T, clusterInstance *cluster.LocalProcessCluster, tab *cluster.Vttablet) { err := clusterInstance.VtctlclientProcess.ExecuteCommand( "DeleteTablet", "--", "--allow_primary", tab.Alias) require.NoError(t, err) } // endregion // region get info // GetNewPrimary is used to find the new primary of the cluster. func GetNewPrimary(t *testing.T, clusterInstance *cluster.LocalProcessCluster) *cluster.Vttablet { var newPrimary *cluster.Vttablet for _, tablet := range clusterInstance.Keyspaces[0].Shards[0].Vttablets[1:] { if isHealthyPrimaryTablet(t, clusterInstance, tablet) { newPrimary = tablet break } } require.NotNil(t, newPrimary) return newPrimary } // GetShardReplicationPositions gets the shards replication positions. func GetShardReplicationPositions(t *testing.T, clusterInstance *cluster.LocalProcessCluster, keyspaceName, shardName string, doPrint bool) []string { output, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput( "ShardReplicationPositions", fmt.Sprintf("%s/%s", keyspaceName, shardName)) require.NoError(t, err) strArray := strings.Split(output, "\n") if strArray[len(strArray)-1] == "" { strArray = strArray[:len(strArray)-1] // Truncate slice, remove empty line } if doPrint { log.Infof("Positions:") for _, pos := range strArray { log.Infof("\t%s", pos) } } return strArray } // endregion // CheckReplicaStatus checks the replication status and asserts that the replication is stopped func CheckReplicaStatus(ctx context.Context, t *testing.T, tablet *cluster.Vttablet) { qr := RunSQL(ctx, t, "show slave status", tablet) IOThreadRunning := fmt.Sprintf("%v", qr.Rows[0][10]) SQLThreadRunning := fmt.Sprintf("%v", qr.Rows[0][10]) assert.Equal(t, IOThreadRunning, "VARCHAR(\"No\")") assert.Equal(t, SQLThreadRunning, "VARCHAR(\"No\")") } // CheckReparentFromOutside checks that cluster was reparented from outside func CheckReparentFromOutside(t *testing.T, clusterInstance *cluster.LocalProcessCluster, tablet *cluster.Vttablet, downPrimary bool, baseTime int64) { result, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput("GetShardReplication", cell1, KeyspaceShard) require.Nil(t, err, "error should be Nil") if !downPrimary { assertNodeCount(t, result, int(3)) } else { assertNodeCount(t, result, int(2)) } // make sure the primary status page says it's the primary status := tablet.VttabletProcess.GetStatus() assert.Contains(t, status, "Tablet Type: PRIMARY") // make sure the primary health stream says it's the primary too // (health check is disabled on these servers, force it first) err = clusterInstance.VtctlclientProcess.ExecuteCommand("RunHealthCheck", tablet.Alias) require.NoError(t, err) streamHealth, err := clusterInstance.VtctlclientProcess.ExecuteCommandWithOutput( "VtTabletStreamHealth", "--", "--count", "1", tablet.Alias) require.NoError(t, err) var streamHealthResponse querypb.StreamHealthResponse err = json.Unmarshal([]byte(streamHealth), &streamHealthResponse) require.NoError(t, err) assert.Equal(t, streamHealthResponse.Target.TabletType, topodatapb.TabletType_PRIMARY) assert.True(t, streamHealthResponse.TabletExternallyReparentedTimestamp >= baseTime) } // WaitForReplicationPosition waits for tablet B to catch up to the replication position of tablet A. func WaitForReplicationPosition(t *testing.T, tabletA *cluster.Vttablet, tabletB *cluster.Vttablet) error { posA, _ := cluster.GetPrimaryPosition(t, *tabletA, Hostname) timeout := time.Now().Add(5 * time.Second) for time.Now().Before(timeout) { posB, _ := cluster.GetPrimaryPosition(t, *tabletB, Hostname) if positionAtLeast(t, tabletB, posA, posB) { return nil } time.Sleep(100 * time.Millisecond) } return fmt.Errorf("failed to catch up on replication position") } // positionAtLeast executes the command position at_least func positionAtLeast(t *testing.T, tablet *cluster.Vttablet, a string, b string) bool { isAtleast := false val, err := tablet.MysqlctlProcess.ExecuteCommandWithOutput("position", "at_least", a, b) require.NoError(t, err) if strings.Contains(val, "true") { isAtleast = true } return isAtleast } func assertNodeCount(t *testing.T, result string, want int) { resultMap := make(map[string]interface{}) err := json.Unmarshal([]byte(result), &resultMap) require.NoError(t, err) nodes := reflect.ValueOf(resultMap["nodes"]) got := nodes.Len() assert.Equal(t, want, got) } // CheckDBvar checks the db var func CheckDBvar(ctx context.Context, t *testing.T, tablet *cluster.Vttablet, variable string, status string) { tabletParams := getMysqlConnParam(tablet) conn, err := mysql.Connect(ctx, &tabletParams) require.NoError(t, err) defer conn.Close() qr := execute(t, conn, fmt.Sprintf("show variables like '%s'", variable)) got := fmt.Sprintf("%v", qr.Rows) want := fmt.Sprintf("[[VARCHAR(\"%s\") VARCHAR(\"%s\")]]", variable, status) assert.Equal(t, want, got) } // CheckDBstatus checks the db status func CheckDBstatus(ctx context.Context, t *testing.T, tablet *cluster.Vttablet, variable string, status string) { tabletParams := getMysqlConnParam(tablet) conn, err := mysql.Connect(ctx, &tabletParams) require.NoError(t, err) defer conn.Close() qr := execute(t, conn, fmt.Sprintf("show status like '%s'", variable)) got := fmt.Sprintf("%v", qr.Rows) want := fmt.Sprintf("[[VARCHAR(\"%s\") VARCHAR(\"%s\")]]", variable, status) assert.Equal(t, want, got) } // SetReplicationSourceFailed returns true if the given output from PRS had failed because the given tablet was // unable to setReplicationSource. Since some tests are used in upgrade-downgrade testing, we need this function to // work with different versions of vtctl. func SetReplicationSourceFailed(tablet *cluster.Vttablet, prsOut string) bool { if strings.Contains(prsOut, fmt.Sprintf("tablet %s failed to SetReplicationSource", tablet.Alias)) { return true } return strings.Contains(prsOut, fmt.Sprintf("tablet %s failed to SetMaster", tablet.Alias)) } // CheckReplicationStatus checks that the replication for sql and io threads is setup as expected func CheckReplicationStatus(ctx context.Context, t *testing.T, tablet *cluster.Vttablet, sqlThreadRunning bool, ioThreadRunning bool) { res := RunSQL(ctx, t, "show slave status;", tablet) if ioThreadRunning { require.Equal(t, "Yes", res.Rows[0][10].ToString()) } else { require.Equal(t, "No", res.Rows[0][10].ToString()) } if sqlThreadRunning { require.Equal(t, "Yes", res.Rows[0][11].ToString()) } else { require.Equal(t, "No", res.Rows[0][11].ToString()) } }
[ "\"VTDATAROOT\"" ]
[]
[ "VTDATAROOT" ]
[]
["VTDATAROOT"]
go
1
0
main.go
package lambdahttp // import "marxus.github.io/go/lambdahttp" import ( "bufio" "bytes" "context" "encoding/base64" "fmt" "io" "net/http" "net/http/httptest" "net/url" "os" "strconv" "strings" ) var ( _f = fmt.Sprintf _setCookie = strings.Split(""+ "set-cookie Set-cookie sEt-cookie SEt-cookie seT-cookie SeT-cookie sET-cookie SET-cookie "+ "set-Cookie Set-Cookie sEt-Cookie SEt-Cookie seT-Cookie SeT-Cookie sET-Cookie SET-Cookie "+ "set-cOokie Set-cOokie sEt-cOokie SEt-cOokie seT-cOokie SeT-cOokie sET-cOokie SET-cOokie "+ "set-COokie Set-COokie sEt-COokie SEt-COokie seT-COokie SeT-COokie sET-COokie SET-COokie "+ "set-coOkie Set-coOkie sEt-coOkie SEt-coOkie seT-coOkie SeT-coOkie sET-coOkie SET-coOkie "+ "set-CoOkie Set-CoOkie sEt-CoOkie SEt-CoOkie seT-CoOkie SeT-CoOkie sET-CoOkie SET-CoOkie "+ "set-cOOkie Set-cOOkie sEt-cOOkie SEt-cOOkie seT-cOOkie SeT-cOOkie sET-cOOkie SET-cOOkie "+ "set-COOkie Set-COOkie sEt-COOkie SEt-COOkie seT-COOkie SeT-COOkie sET-COOkie SET-COOkie", " ") ) type ( _o = map[string]interface{} ResponseRecorder struct{ *httptest.ResponseRecorder } ) func (*ResponseRecorder) CloseNotify() <-chan bool { return nil } /* request parsing related */ func parseBody(_body string, decode bool) []byte { body := []byte(_body) if decode { n, _ := base64.StdEncoding.Decode(body, body) body = body[:n] } return body } func parseHeaders(_headers _o) map[string]string { headers := make(map[string]string) for k, v := range _headers { headers[k] = v.(string) } return headers } func parseHeadersMV(_headers _o) map[string]string { headers := make(map[string]string) for k, vs := range _headers { for _, v := range vs.([]interface{}) { headers[k] += _f("%s,", v) } headers[k] = headers[k][:len(headers[k])-1] } return headers } func extractHeaderV(v string) string { return strings.TrimSpace(v[strings.LastIndex(v, ",")+1:]) } func parseQS(_qs _o) string { var qs []string for k, v := range _qs { qs = append(qs, _f("%s=%s", k, v)) } return strings.Join(qs, "&") } func parseQSMV(_qs _o, escape bool) string { var qs []string for k, vs := range _qs { if escape { k = url.QueryEscape(k) } for _, v := range vs.([]interface{}) { if escape { v = url.QueryEscape(v.(string)) } qs = append(qs, _f("%s=%s", k, v)) } } return strings.Join(qs, "&") } /* meta */ type meta struct { _mv bool RemoteAddr string Method, Scheme, Host string Port int Prefix, Path, QS string Headers map[string]string Body []byte } func (m *meta) api(ev _o) { ctx := ev["requestContext"].(_o) if ctx["stage"] != "$default" { m.Prefix = _f("/%s", ctx["stage"]) } if ctx["httpMethod"] != nil { // v1 m.Method = ctx["httpMethod"].(string) } else { // v2 ctx = ctx["http"].(_o) m.Method = ctx["method"].(string) } m.Path = ctx["path"].(string)[len(m.Prefix):] } func (m *meta) alb(ev _o) { m.Prefix = os.Getenv("PATH_PREFIX") m.Method = ev["httpMethod"].(string) m.Path = ev["path"].(string)[len(m.Prefix):] } func getMetaFor(ev _o) *meta { m := &meta{Body: parseBody(ev["body"].(string), ev["isBase64Encoded"].(bool))} if ev["version"] == "1.0" { // api v1 m.api(ev) m._mv = true m.Headers = parseHeadersMV(ev["multiValueHeaders"].(_o)) m.QS = parseQSMV(ev["multiValueQueryStringParameters"].(_o), true) if !strings.HasSuffix(m.Headers["host"], ".amazonaws.com") { evPath, metaPath := strings.Split(ev["path"].(string), "/"), strings.Split(m.Path, "/") if len(evPath) > len(metaPath) { m.Prefix = _f("/%s", evPath[1]) } } } else if ev["version"] == "2.0" { // api v2 m.api(ev) m._mv = false m.Headers = parseHeaders(ev["headers"].(_o)) if cookies, ok := ev["cookies"].([]string); ok { m.Headers["cookie"] = strings.Join(cookies, ";") } m.QS = ev["rawQueryString"].(string) if !strings.HasSuffix(m.Headers["host"], ".amazonaws.com") { m.Prefix = os.Getenv("PATH_PREFIX") } } else if ev["headers"] != nil { // alb m.alb(ev) m._mv = false m.Headers = parseHeaders(ev["headers"].(_o)) m.QS = parseQS(ev["queryStringParameters"].(_o)) } else { // alb mv m.alb(ev) m._mv = true m.Headers = parseHeadersMV(ev["multiValueHeaders"].(_o)) m.QS = parseQSMV(ev["multiValueQueryStringParameters"].(_o), false) } m.QS = strings.ReplaceAll(m.QS, " ", "%20") m.RemoteAddr = extractHeaderV(m.Headers["x-forwarded-for"]) m.Scheme = extractHeaderV(m.Headers["x-forwarded-proto"]) m.Host = m.Headers["host"] m.Port, _ = strconv.Atoi(extractHeaderV(m.Headers["x-forwarded-port"])) return m } /* request */ func getReqFor(m *meta) *http.Request { var headers []string for k, v := range m.Headers { headers = append(headers, _f("%s: %s", k, v)) } req, _ := http.ReadRequest(bufio.NewReader(bytes.NewReader(append( []byte(_f( "%s %s?%s HTTP/1.1\r\n%s\r\n\r\n", m.Method, m.Path, m.QS, strings.Join(headers, "\r\n"), )), m.Body..., )))) req.RemoteAddr = m.RemoteAddr req.URL.Scheme = m.Scheme req.URL.Host = m.Host return req } /* response */ func returnRsp(m *meta, _rsp *http.Response) _o { body, _ := io.ReadAll(_rsp.Body) rsp := _o{ "statusCode": _rsp.StatusCode, "statusDescription": _rsp.Status, "body": base64.StdEncoding.EncodeToString(body), "isBase64Encoded": true, } if !m._mv { headers := make(_o) for k, vs := range _rsp.Header { if k == "Set-Cookie" { for i, v := range vs { headers[_setCookie[i]] = v } continue } headers[k] = strings.Join(vs, ",") } rsp["headers"] = headers } else { rsp["multiValueHeaders"] = _rsp.Header } return rsp } /* found */ func returnFound(m *meta) _o { rsp, location := _o{ "statusCode": 302, "statusDescription": "302 Found", }, _f("%s/?%s", m.Prefix, m.QS) if !m._mv { rsp["headers"] = _o{"Location": location} } else { rsp["multiValueHeaders"] = _o{"Location": []string{location}} } return rsp } /* handler */ // lambda.Start(lambdahttp.MakeHandler(http.DefaultServeMux)) func MakeHandler(handler http.Handler) func(ctx context.Context, ev _o) (_o, error) { return func(ctx context.Context, ev _o) (_o, error) { debugEv(ev) m := getMetaFor(ev) debugMeta(m) if m.Path == "" { found := returnFound(m) debugFound(found) return found, nil } w, req := &ResponseRecorder{httptest.NewRecorder()}, getReqFor(m) these[req] = &this{ctx, ev, m} handler.ServeHTTP(w, req) delete(these, req) rsp := returnRsp(m, w.Result()) debugRsp(rsp) return rsp, nil } } /* this related */ var these = make(map[*http.Request]*this) type this struct { Context context.Context Event map[string]interface{} Meta *meta } // lambdahttp.GetThis(r).Event["httpMethod"] func GetThis(r *http.Request) *this { return these[r] }
[ "\"PATH_PREFIX\"", "\"PATH_PREFIX\"" ]
[]
[ "PATH_PREFIX" ]
[]
["PATH_PREFIX"]
go
1
0
cloud/google/client/client.go
//go:generate mockery -name=Client package client import ( "context" "encoding/json" "fmt" "os" "path/filepath" "golang.org/x/oauth2/google" compute "google.golang.org/api/compute/v1" "github.com/kelda/kelda/counter" "github.com/kelda/kelda/util" ) // A Client for Google's API. Used for unit testing. type Client interface { GetInstance(zone, id string) (*compute.Instance, error) ListInstances(zone, description string) (*compute.InstanceList, error) InsertInstance(zone string, instance *compute.Instance) ( *compute.Operation, error) DeleteInstance(zone, operation string) (*compute.Operation, error) AddAccessConfig(zone, instance, networkInterface string, accessConfig *compute.AccessConfig) (*compute.Operation, error) DeleteAccessConfig(zone, instance, accessConfig, networkInterface string) (*compute.Operation, error) GetZone(zone string) (*compute.Zone, error) GetZoneOperation(zone, operation string) (*compute.Operation, error) GetGlobalOperation(operation string) (*compute.Operation, error) ListFloatingIPs(region string) (*compute.AddressList, error) ListFirewalls(description string) (*compute.FirewallList, error) InsertFirewall(firewall *compute.Firewall) (*compute.Operation, error) DeleteFirewall(firewall string) (*compute.Operation, error) ListNetworks(name string) (*compute.NetworkList, error) InsertNetwork(network *compute.Network) (*compute.Operation, error) DeleteNetwork(name string) (*compute.Operation, error) } type client struct { gce *compute.Service projID string } var c = counter.New("Google") // New creates a new Google client. func New() (Client, error) { c.Inc("New Client") configPath := filepath.Join(os.Getenv("HOME"), ".gce", "kelda.json") configStr, err := util.ReadFile(configPath) if err != nil { return nil, err } service, err := newComputeService(configStr) if err != nil { return nil, err } projID, err := getProjectID(configStr) if err != nil { return nil, fmt.Errorf("failed to get project ID: %s", err) } return &client{gce: service, projID: projID}, nil } func newComputeService(configStr string) (*compute.Service, error) { jwtConfig, err := google.JWTConfigFromJSON( []byte(configStr), compute.ComputeScope) if err != nil { return nil, err } return compute.New(jwtConfig.Client(context.Background())) } const projectIDKey = "project_id" func getProjectID(configStr string) (string, error) { configFields := map[string]string{} if err := json.Unmarshal([]byte(configStr), &configFields); err != nil { return "", err } projID, ok := configFields[projectIDKey] if !ok { return "", fmt.Errorf("missing field: %s", projectIDKey) } return projID, nil } func (ci *client) GetInstance(zone, id string) (*compute.Instance, error) { c.Inc("Get Instance") return ci.gce.Instances.Get(ci.projID, zone, id).Do() } func (ci *client) ListInstances(zone, desc string) (*compute.InstanceList, error) { c.Inc("List Instances") return ci.gce.Instances.List(ci.projID, zone).Filter(descFilter(desc)).Do() } func (ci *client) InsertInstance(zone string, instance *compute.Instance) ( *compute.Operation, error) { c.Inc("Insert Instance") return ci.gce.Instances.Insert(ci.projID, zone, instance).Do() } func (ci *client) DeleteInstance(zone, instance string) (*compute.Operation, error) { return ci.gce.Instances.Delete(ci.projID, zone, instance).Do() } func (ci *client) AddAccessConfig(zone, instance, networkInterface string, accessConfig *compute.AccessConfig) (*compute.Operation, error) { c.Inc("Add Access Config") return ci.gce.Instances.AddAccessConfig(ci.projID, zone, instance, networkInterface, accessConfig).Do() } func (ci *client) DeleteAccessConfig(zone, instance, accessConfig, networkInterface string) (*compute.Operation, error) { c.Inc("Delete Access Config") return ci.gce.Instances.DeleteAccessConfig(ci.projID, zone, instance, accessConfig, networkInterface).Do() } func (ci *client) GetZone(zone string) (*compute.Zone, error) { c.Inc("Get Zone") return ci.gce.Zones.Get(ci.projID, zone).Do() } func (ci *client) GetZoneOperation(zone, operation string) ( *compute.Operation, error) { c.Inc("Get Zone Op") return ci.gce.ZoneOperations.Get(ci.projID, zone, operation).Do() } func (ci *client) GetGlobalOperation(operation string) (*compute.Operation, error) { c.Inc("Get Global Op") return ci.gce.GlobalOperations.Get(ci.projID, operation).Do() } func (ci *client) ListFloatingIPs(region string) (*compute.AddressList, error) { c.Inc("List Floating IPs") return ci.gce.Addresses.List(ci.projID, region).Do() } func (ci *client) ListFirewalls(description string) (*compute.FirewallList, error) { c.Inc("List Firewalls") return ci.gce.Firewalls.List(ci.projID).Filter(descFilter(description)).Do() } func (ci *client) InsertFirewall(firewall *compute.Firewall) ( *compute.Operation, error) { c.Inc("Insert Firewall") return ci.gce.Firewalls.Insert(ci.projID, firewall).Do() } func (ci *client) DeleteFirewall(firewall string) ( *compute.Operation, error) { c.Inc("Delete Firewall") return ci.gce.Firewalls.Delete(ci.projID, firewall).Do() } func (ci *client) ListNetworks(name string) (*compute.NetworkList, error) { c.Inc("List Networks") return ci.gce.Networks.List(ci.projID).Filter( fmt.Sprintf("name eq %s", name)).Do() } func (ci *client) InsertNetwork(network *compute.Network) (*compute.Operation, error) { c.Inc("Insert Network") return ci.gce.Networks.Insert(ci.projID, network).Do() } func (ci *client) DeleteNetwork(network string) (*compute.Operation, error) { c.Inc("Delete Network") return ci.gce.Networks.Delete(ci.projID, network).Do() } func descFilter(desc string) string { return fmt.Sprintf("description eq %s", desc) }
[ "\"HOME\"" ]
[]
[ "HOME" ]
[]
["HOME"]
go
1
0
bert_ner.py
#! usr/bin/env python3 # -*- coding:utf-8 -*- """ Copyright 2018 The Google AI Language Team Authors. BASED ON Google_BERT. @Author:zhoukaiyin """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import os from bert import modeling from bert import optimization from bert import tokenization import tensorflow as tf from sklearn.metrics import f1_score,precision_score,recall_score from tensorflow.python.ops import math_ops import tf_metrics flags = tf.flags FLAGS = flags.FLAGS flags.DEFINE_string( "data_dir", './drive/My Drive/ai/NERdata', "The input datadir.", ) flags.DEFINE_string( "bert_config_file", './drive/My Drive/ai/checkpoint/bert_config.json', "The config json file corresponding to the pre-trained BERT model." ) flags.DEFINE_string( "task_name", 'NER', "The name of the task to train." ) flags.DEFINE_string( "output_dir", './drive/My Drive/ai/output/result_dir/', "The output directory where the model checkpoints will be written." ) flags.DEFINE_string( "tpu_name", 'gcp_tpu', "Use Google Cloud Colaborator TPU to train" ) ## Other parameters flags.DEFINE_string( "init_checkpoint", './drive/My Drive/ai/checkpoint/bert_model.ckpt', "Initial checkpoint (usually from a pre-trained BERT model)." ) flags.DEFINE_bool( "do_lower_case", True, "Whether to lower case the input text." ) flags.DEFINE_integer( "max_seq_length", 128, "The maximum total input sequence length after WordPiece tokenization." ) flags.DEFINE_bool( "do_train", True, "Whether to run training." ) flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.") flags.DEFINE_bool("do_eval", False, "Whether to run eval on the dev set.") flags.DEFINE_integer("train_batch_size", 32, "Total batch size for training.") flags.DEFINE_integer("eval_batch_size", 8, "Total batch size for eval.") flags.DEFINE_float("learning_rate", 5e-5, "The initial learning rate for Adam.") flags.DEFINE_float("num_train_epochs", 3.0, "Total number of training epochs to perform.") flags.DEFINE_float( "warmup_proportion", 0.1, "Proportion of training to perform linear learning rate warmup for. " "E.g., 0.1 = 10% of training.") flags.DEFINE_integer("save_checkpoints_steps", 1000, "How often to save the model checkpoint.") flags.DEFINE_integer("iterations_per_loop", 1000, "How many steps to make in each estimator call.") flags.DEFINE_string("vocab_file", './drive/My Drive/ai/checkpoint/vocab.txt', "The vocabulary file that the BERT model was trained on.") tf.flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.") flags.DEFINE_integer( "num_tpu_cores", 8, "Only used if `use_tpu` is True. Total number of TPU cores to use.") class InputExample(object): """A single training/test example for simple sequence classification.""" def __init__(self, guid, text, label=None): """Constructs a InputExample. Args: guid: Unique id for the example. text_a: string. The untokenized text of the first sequence. For single sequence tasks, only this sequence must be specified. label: (Optional) string. The label of the example. This should be specified for train and dev examples, but not for test examples. """ self.guid = guid self.text = text self.label = label class InputFeatures(object): """A single set of features of data.""" def __init__(self, input_ids, input_mask, segment_ids, label_ids): self.input_ids = input_ids self.input_mask = input_mask self.segment_ids = segment_ids self.label_ids = label_ids class DataProcessor(object): """Base class for data converters for sequence classification data sets.""" def get_train_examples(self, data_dir): """Gets a collection of `InputExample`s for the train set.""" raise NotImplementedError() def get_dev_examples(self, data_dir): """Gets a collection of `InputExample`s for the dev set.""" raise NotImplementedError() def get_labels(self): """Gets the list of labels for this data set.""" raise NotImplementedError() @classmethod def _read_data(cls, input_file): """Reads a BIO data.""" with open(input_file) as f: lines = [] words = [] labels = [] for line in f: contends = line.strip() word = line.strip().split(' ')[0] label = line.strip().split(' ')[-1] if contends.startswith("-DOCSTART-"): words.append('') continue if len(contends) == 0 and words[-1] == '.': l = ' '.join([label for label in labels if len(label) > 0]) w = ' '.join([word for word in words if len(word) > 0]) lines.append([l, w]) words = [] labels = [] continue words.append(word) labels.append(label) return lines class NerProcessor(DataProcessor): def get_train_examples(self, data_dir): return self._create_example( self._read_data(os.path.join(data_dir, "train.txt")), "train" ) def get_dev_examples(self, data_dir): return self._create_example( self._read_data(os.path.join(data_dir, "dev.txt")), "dev" ) def get_labels(self): return ["B-MISC", "I-MISC", "O", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC", "X"] def _create_example(self, lines, set_type): examples = [] for (i, line) in enumerate(lines): guid = "%s-%s" % (set_type, i) text = tokenization.convert_to_unicode(line[1]) label = tokenization.convert_to_unicode(line[0]) examples.append(InputExample(guid=guid, text=text, label=label)) return examples def convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer): label_map = {} for (i, label) in enumerate(label_list, 1): label_map[label] = i textlist = example.text.split(' ') labellist = example.label.split(' ') tokens = [] labels = [] for i, word in enumerate(textlist): token = tokenizer.tokenize(word) tokens.extend(token) label_1 = labellist[i] for m in range(len(token)): if m == 0: labels.append(label_1) else: labels.append("X") # tokens = tokenizer.tokenize(example.text) if len(tokens) >= max_seq_length - 1: tokens = tokens[0:(max_seq_length - 2)] labels = labels[0:(max_seq_length - 2)] ntokens = [] segment_ids = [] label_ids = [] ntokens.append("[CLS]") segment_ids.append(0) label_ids.append(0) for i, token in enumerate(tokens): ntokens.append(token) segment_ids.append(0) label_ids.append(label_map[labels[i]]) ntokens.append("[SEP]") segment_ids.append(0) label_ids.append(0) input_ids = tokenizer.convert_tokens_to_ids(ntokens) input_mask = [1] * len(input_ids) while len(input_ids) < max_seq_length: input_ids.append(0) input_mask.append(0) segment_ids.append(0) label_ids.append(0) # print(len(input_ids)) assert len(input_ids) == max_seq_length assert len(input_mask) == max_seq_length assert len(segment_ids) == max_seq_length assert len(label_ids) == max_seq_length if ex_index < 5: tf.logging.info("*** Example ***") tf.logging.info("guid: %s" % (example.guid)) tf.logging.info("tokens: %s" % " ".join( [tokenization.printable_text(x) for x in tokens])) tf.logging.info("input_ids: %s" % " ".join([str(x) for x in input_ids])) tf.logging.info("input_mask: %s" % " ".join([str(x) for x in input_mask])) tf.logging.info("segment_ids: %s" % " ".join([str(x) for x in segment_ids])) tf.logging.info("label_ids: %s" % " ".join([str(x) for x in label_ids])) feature = InputFeatures( input_ids=input_ids, input_mask=input_mask, segment_ids=segment_ids, label_ids=label_ids ) return feature def filed_based_convert_examples_to_features( examples, label_list, max_seq_length, tokenizer, output_file ): writer = tf.python_io.TFRecordWriter(output_file) for (ex_index, example) in enumerate(examples): if ex_index % 5000 == 0: tf.logging.info("Writing example %d of %d" % (ex_index, len(examples))) feature = convert_single_example(ex_index, example, label_list, max_seq_length, tokenizer) def create_int_feature(values): f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values))) return f features = collections.OrderedDict() features["input_ids"] = create_int_feature(feature.input_ids) features["input_mask"] = create_int_feature(feature.input_mask) features["segment_ids"] = create_int_feature(feature.segment_ids) features["label_ids"] = create_int_feature(feature.label_ids) tf_example = tf.train.Example(features=tf.train.Features(feature=features)) writer.write(tf_example.SerializeToString()) def file_based_input_fn_builder(input_file, seq_length, is_training, drop_remainder): name_to_features = { "input_ids": tf.FixedLenFeature([seq_length], tf.int64), "input_mask": tf.FixedLenFeature([seq_length], tf.int64), "segment_ids": tf.FixedLenFeature([seq_length], tf.int64), "label_ids": tf.FixedLenFeature([seq_length], tf.int64), } def _decode_record(record, name_to_features): example = tf.parse_single_example(record, name_to_features) for name in list(example.keys()): t = example[name] if t.dtype == tf.int64: t = tf.to_int32(t) example[name] = t return example def input_fn(params): batch_size = params["batch_size"] d = tf.data.TFRecordDataset(input_file) if is_training: d = d.repeat() d = d.shuffle(buffer_size=100) d = d.apply(tf.contrib.data.map_and_batch( lambda record: _decode_record(record, name_to_features), batch_size=batch_size, drop_remainder=drop_remainder )) return d return input_fn def create_model(bert_config, is_training, input_ids, input_mask, segment_ids, labels, num_labels, use_one_hot_embeddings): model = modeling.BertModel( config=bert_config, is_training=is_training, input_ids=input_ids, input_mask=input_mask, token_type_ids=segment_ids, use_one_hot_embeddings=use_one_hot_embeddings ) output_layer = model.get_sequence_output() hidden_size = output_layer.shape[-1].value output_weight = tf.get_variable( "output_weights", [num_labels, hidden_size], initializer=tf.truncated_normal_initializer(stddev=0.02) ) output_bias = tf.get_variable( "output_bias", [num_labels], initializer=tf.zeros_initializer() ) with tf.variable_scope("loss"): if is_training: output_layer = tf.nn.dropout(output_layer, keep_prob=0.9) output_layer = tf.reshape(output_layer, [-1, hidden_size]) logits = tf.matmul(output_layer, output_weight, transpose_b=True) logits = tf.nn.bias_add(logits, output_bias) logits = tf.reshape(logits, [-1, FLAGS.max_seq_length, 11]) log_probs = tf.nn.log_softmax(logits, axis=-1) # labels = tf.cast(labels,dtype=tf.float32) one_hot_labels = tf.one_hot(labels, depth=num_labels, dtype=tf.float32) per_example_loss = -tf.reduce_sum(one_hot_labels * log_probs, axis=-1) loss = tf.reduce_sum(per_example_loss) return (loss, per_example_loss, logits) def model_fn_builder(bert_config, num_labels, init_checkpoint, learning_rate, num_train_steps, num_warmup_steps, use_tpu, use_one_hot_embeddings): def model_fn(features, labels, mode, params): tf.logging.info("*** Features ***") for name in sorted(features.keys()): tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape)) input_ids = features["input_ids"] input_mask = features["input_mask"] segment_ids = features["segment_ids"] label_ids = features["label_ids"] is_training = (mode == tf.estimator.ModeKeys.TRAIN) (total_loss, per_example_loss, logits) = create_model( bert_config, is_training, input_ids, input_mask, segment_ids, label_ids, num_labels, use_one_hot_embeddings) tvars = tf.trainable_variables() scaffold_fn = None if init_checkpoint: (assignment_map, initialized_variable_names) = modeling.get_assignment_map_from_checkpoint(tvars,init_checkpoint) tf.train.init_from_checkpoint(init_checkpoint, assignment_map) if use_tpu: def tpu_scaffold(): tf.train.init_from_checkpoint(init_checkpoint, assignment_map) return tf.train.Scaffold() scaffold_fn = tpu_scaffold else: tf.train.init_from_checkpoint(init_checkpoint, assignment_map) tf.logging.info("**** Trainable Variables ****") for var in tvars: init_string = "" if var.name in initialized_variable_names: init_string = ", *INIT_FROM_CKPT*" tf.logging.info(" name = %s, shape = %s%s", var.name, var.shape, init_string) output_spec = None if mode == tf.estimator.ModeKeys.TRAIN: train_op = optimization.create_optimizer( total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, train_op=train_op, scaffold_fn=scaffold_fn) elif mode == tf.estimator.ModeKeys.EVAL: def metric_fn(per_example_loss, label_ids, logits): predictions = tf.argmax(logits, axis=-1, output_type=tf.int32) precision = tf_metrics.precision(label_ids,predictions,11,[1,2,4,5,6,7,8,9],average="macro") recall = tf_metrics.recall(label_ids,predictions,11,[1,2,4,5,6,7,8,9],average="macro") f = tf_metrics.f1(label_ids,predictions,11,[1,2,4,5,6,7,8,9],average="macro") loss = tf.metrics.mean(per_example_loss) return { "eval_precision":precision, "eval_recall":recall, "eval_f": f, "eval_loss": loss, } eval_metrics = (metric_fn, [per_example_loss, label_ids, logits]) output_spec = tf.contrib.tpu.TPUEstimatorSpec( mode=mode, loss=total_loss, eval_metrics=eval_metrics, scaffold_fn=scaffold_fn) else: raise ValueError("Only TRAIN and EVAL modes are supported: %s" % (mode)) return output_spec return model_fn def main(_): tf.logging.set_verbosity(tf.logging.INFO) processors = { "ner": NerProcessor } if not FLAGS.do_train and not FLAGS.do_eval: raise ValueError("At least one of `do_train` or `do_eval` must be True.") bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file) if FLAGS.max_seq_length > bert_config.max_position_embeddings: raise ValueError( "Cannot use sequence length %d because the BERT model " "was only trained up to sequence length %d" % (FLAGS.max_seq_length, bert_config.max_position_embeddings)) task_name = FLAGS.task_name.lower() if task_name not in processors: raise ValueError("Task not found: %s" % (task_name)) processor = processors[task_name]() label_list = processor.get_labels() tokenizer = tokenization.FullTokenizer( vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case) tpu_cluster_resolver = None if FLAGS.use_tpu and FLAGS.tpu_name: tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver('grpc://' + os.environ['COLAB_TPU_ADDR']) is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2 run_config = tf.contrib.tpu.RunConfig( cluster=tpu_cluster_resolver, master=FLAGS.master, model_dir=FLAGS.output_dir, save_checkpoints_steps=FLAGS.save_checkpoints_steps, tpu_config=tf.contrib.tpu.TPUConfig( iterations_per_loop=FLAGS.iterations_per_loop, num_shards=FLAGS.num_tpu_cores, per_host_input_for_training=is_per_host)) train_examples = None num_train_steps = None num_warmup_steps = None if FLAGS.do_train: train_examples = processor.get_train_examples(FLAGS.data_dir) num_train_steps = int( len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs) num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion) model_fn = model_fn_builder( bert_config=bert_config, num_labels=len(label_list)+1, init_checkpoint=FLAGS.init_checkpoint, learning_rate=FLAGS.learning_rate, num_train_steps=num_train_steps, num_warmup_steps=num_warmup_steps, use_tpu=FLAGS.use_tpu, use_one_hot_embeddings=FLAGS.use_tpu) estimator = tf.contrib.tpu.TPUEstimator( use_tpu=FLAGS.use_tpu, model_fn=model_fn, config=run_config, train_batch_size=FLAGS.train_batch_size, eval_batch_size=FLAGS.eval_batch_size) if FLAGS.do_train: train_file = os.path.join(FLAGS.output_dir, "train.tf_record") filed_based_convert_examples_to_features( train_examples, label_list, FLAGS.max_seq_length, tokenizer, train_file) tf.logging.info("***** Running training *****") tf.logging.info(" Num examples = %d", len(train_examples)) tf.logging.info(" Batch size = %d", FLAGS.train_batch_size) tf.logging.info(" Num steps = %d", num_train_steps) train_input_fn = file_based_input_fn_builder( input_file=train_file, seq_length=FLAGS.max_seq_length, is_training=True, drop_remainder=True) estimator.train(input_fn=train_input_fn, max_steps=num_train_steps) if FLAGS.do_eval: eval_examples = processor.get_dev_examples(FLAGS.data_dir) eval_file = os.path.join(FLAGS.output_dir, "eval.tf_record") filed_based_convert_examples_to_features( eval_examples, label_list, FLAGS.max_seq_length, tokenizer, eval_file) tf.logging.info("***** Running evaluation *****") tf.logging.info(" Num examples = %d", len(eval_examples)) tf.logging.info(" Batch size = %d", FLAGS.eval_batch_size) eval_steps = None if FLAGS.use_tpu: eval_steps = int(len(eval_examples) / FLAGS.eval_batch_size) eval_drop_remainder = True if FLAGS.use_tpu else False eval_input_fn = file_based_input_fn_builder( input_file=eval_file, seq_length=FLAGS.max_seq_length, is_training=False, drop_remainder=eval_drop_remainder) result = estimator.evaluate(input_fn=eval_input_fn, steps=eval_steps) output_eval_file = os.path.join(FLAGS.output_dir, "eval_results.txt") with open(output_eval_file, "w") as writer: tf.logging.info("***** Eval results *****") for key in sorted(result.keys()): tf.logging.info(" %s = %s", key, str(result[key])) writer.write("%s = %s\n" % (key, str(result[key]))) if __name__ == "__main__": tf.app.run()
[]
[]
[ "COLAB_TPU_ADDR" ]
[]
["COLAB_TPU_ADDR"]
python
1
0
tests/unit/test_process_handler.py
import asyncio import getpass import logging import os import sys from asyncio.subprocess import Process import pytest from aiofiles.threadpool.binary import AsyncFileIO from mock import AsyncMock from further_link.runner.process_handler import ProcessHandler logging.basicConfig( stream=sys.stdout, level=(logging.DEBUG if os.environ.get("FURTHER_LINK_DEBUG") else logging.INFO), ) user = getpass.getuser() @pytest.mark.asyncio async def test_basic(): p = ProcessHandler(user) p.on_start = AsyncMock() p.on_stop = AsyncMock() p.on_output = AsyncMock() # is this too fast to even lookup the pgid?! # await p.start("echo 'hello\nworld'") await p.start("bash -c \"echo 'hello\nworld'\"") assert type(p.process) == Process p.on_start.assert_called() await p.process.wait() # takes some time to complete - there's a 0.1 sleep in there await asyncio.sleep(0.2) p.on_output.assert_called_with("stdout", "hello\nworld\n") p.on_stop.assert_called_with(0) @pytest.mark.asyncio async def test_input(): p = ProcessHandler(user) p.on_start = AsyncMock() p.on_stop = AsyncMock() p.on_output = AsyncMock() await p.start('python3 -c "print(input())"') p.on_start.assert_called() await p.send_input("hello\n") await p.process.wait() # takes some time to complete - there's a 0.1 sleep in there await asyncio.sleep(0.2) p.on_output.assert_called_with("stdout", "hello\n") p.on_stop.assert_called_with(0) @pytest.mark.asyncio async def test_pty(): p = ProcessHandler(user, pty=True) p.on_start = AsyncMock() p.on_stop = AsyncMock() p.on_output = AsyncMock() await p.start('python3 -c "print(input())"') assert type(p.process) == Process assert p.pty assert type(p.pty_master) == AsyncFileIO assert type(p.pty_slave) == AsyncFileIO p.on_start.assert_called() await p.send_input("hello\n") await p.process.wait() # takes some time to complete - there's a 0.1 sleep in there await asyncio.sleep(0.2) p.on_output.assert_called_with("stdout", "hello\r\nhello\r\n") p.on_stop.assert_called_with(0)
[]
[]
[ "FURTHER_LINK_DEBUG" ]
[]
["FURTHER_LINK_DEBUG"]
python
1
0
pkg/surveyext/editor.go
package surveyext // This file extends survey.Editor to give it more flexible behavior. For more context, read // https://github.com/cli/cli/issues/70 // To see what we extended, search through for EXTENDED comments. import ( "os" "path/filepath" "runtime" "github.com/AlecAivazis/survey/v2" "github.com/AlecAivazis/survey/v2/terminal" ) var ( bom = []byte{0xef, 0xbb, 0xbf} defaultEditor = "nano" // EXTENDED to switch from vim as a default editor ) func init() { if runtime.GOOS == "windows" { defaultEditor = "notepad" } else if g := os.Getenv("GIT_EDITOR"); g != "" { defaultEditor = g } else if v := os.Getenv("VISUAL"); v != "" { defaultEditor = v } else if e := os.Getenv("EDITOR"); e != "" { defaultEditor = e } } // EXTENDED to enable different prompting behavior type GhEditor struct { *survey.Editor EditorCommand string BlankAllowed bool } func (e *GhEditor) editorCommand() string { if e.EditorCommand == "" { return defaultEditor } return e.EditorCommand } // EXTENDED to change prompt text var EditorQuestionTemplate = ` {{- if .ShowHelp }}{{- color .Config.Icons.Help.Format }}{{ .Config.Icons.Help.Text }} {{ .Help }}{{color "reset"}}{{"\n"}}{{end}} {{- color .Config.Icons.Question.Format }}{{ .Config.Icons.Question.Text }} {{color "reset"}} {{- color "default+hb"}}{{ .Message }} {{color "reset"}} {{- if .ShowAnswer}} {{- color "cyan"}}{{.Answer}}{{color "reset"}}{{"\n"}} {{- else }} {{- if and .Help (not .ShowHelp)}}{{color "cyan"}}[{{ .Config.HelpInput }} for help]{{color "reset"}} {{end}} {{- if and .Default (not .HideDefault)}}{{color "white"}}({{.Default}}) {{color "reset"}}{{end}} {{- color "cyan"}}[(e) to launch {{ .EditorCommand }}{{- if .BlankAllowed }}, enter to skip{{ end }}] {{color "reset"}} {{- end}}` // EXTENDED to pass editor name (to use in prompt) type EditorTemplateData struct { survey.Editor EditorCommand string BlankAllowed bool Answer string ShowAnswer bool ShowHelp bool Config *survey.PromptConfig } // EXTENDED to augment prompt text and keypress handling func (e *GhEditor) prompt(initialValue string, config *survey.PromptConfig) (interface{}, error) { err := e.Render( EditorQuestionTemplate, // EXTENDED to support printing editor in prompt and BlankAllowed EditorTemplateData{ Editor: *e.Editor, BlankAllowed: e.BlankAllowed, EditorCommand: filepath.Base(e.editorCommand()), Config: config, }, ) if err != nil { return "", err } // start reading runes from the standard in rr := e.NewRuneReader() _ = rr.SetTermMode() defer func() { _ = rr.RestoreTermMode() }() cursor := e.NewCursor() cursor.Hide() defer cursor.Show() for { // EXTENDED to handle the e to edit / enter to skip behavior + BlankAllowed r, _, err := rr.ReadRune() if err != nil { return "", err } if r == 'e' { break } if r == '\r' || r == '\n' { if e.BlankAllowed { return "", nil } else { continue } } if r == terminal.KeyInterrupt { return "", terminal.InterruptErr } if r == terminal.KeyEndTransmission { break } if string(r) == config.HelpInput && e.Help != "" { err = e.Render( EditorQuestionTemplate, EditorTemplateData{ // EXTENDED to support printing editor in prompt, BlankAllowed Editor: *e.Editor, BlankAllowed: e.BlankAllowed, EditorCommand: filepath.Base(e.editorCommand()), ShowHelp: true, Config: config, }, ) if err != nil { return "", err } } continue } stdio := e.Stdio() text, err := Edit(e.editorCommand(), e.FileName, initialValue, stdio.In, stdio.Out, stdio.Err, cursor) if err != nil { return "", err } // check length, return default value on empty if len(text) == 0 && !e.AppendDefault { return e.Default, nil } return text, nil } // EXTENDED This is straight copypasta from survey to get our overridden prompt called.; func (e *GhEditor) Prompt(config *survey.PromptConfig) (interface{}, error) { initialValue := "" if e.Default != "" && e.AppendDefault { initialValue = e.Default } return e.prompt(initialValue, config) }
[ "\"GIT_EDITOR\"", "\"VISUAL\"", "\"EDITOR\"" ]
[]
[ "VISUAL", "EDITOR", "GIT_EDITOR" ]
[]
["VISUAL", "EDITOR", "GIT_EDITOR"]
go
3
0
statick_tool/discovery_plugin.py
"""Discovery plugin.""" import logging import os import subprocess import sys from typing import Any, List, Optional, Union from yapsy.IPlugin import IPlugin from statick_tool.exceptions import Exceptions from statick_tool.package import Package from statick_tool.plugin_context import PluginContext class DiscoveryPlugin(IPlugin): # type: ignore """Default implementation of discovery plugin.""" plugin_context = None def get_name(self) -> Optional[str]: """Get name of plugin.""" @classmethod def get_discovery_dependencies(cls) -> List[str]: """Get a list of discovery plugins that must run before this one.""" return [] def gather_args(self, args: Any) -> None: """Gather arguments for plugin.""" def scan( self, package: Package, level: str, exceptions: Optional[Exceptions] = None ) -> None: """Scan package to discover files for analysis. If exceptions is passed, then the plugin should (if practical) use it to filter which files the plugin detects. """ def find_files(self, package: Package) -> None: """Walk the package path exactly once to discover files for analysis.""" if package._walked: # pylint: disable=protected-access return for root, _, files in os.walk(package.path): for fname in files: full_path = os.path.join(root, fname) abs_path = os.path.abspath(full_path) file_output = self.get_file_cmd_output(full_path) file_dict = { "name": fname.lower(), "path": abs_path, "file_cmd_out": file_output, } package.files[abs_path] = file_dict package._walked = True # pylint: disable=protected-access def get_file_cmd_output(self, full_path: str) -> str: """Run the file command (if it exists) on the supplied path. The output from the file command is converted to lowercase. There are two recommended ways to check it: 1. When searching for a single string just use the python "in" operator: if "search string" in fild_dict["file_cmd_out"]: 2. When searching for multiple different strings, use the `any()` function: expected_output = ("output_1", "output_2") if any(item in file_dict["file_cmd_out"] for item in expected_output): """ if not self.file_command_exists(): return "" try: output: str = subprocess.check_output( ["file", full_path], universal_newlines=True ) return output.lower() except subprocess.CalledProcessError as ex: logging.warning( "Failed to run 'file' command. Returncode = %d", ex.returncode ) logging.warning("Exception output: %s", ex.output) return "" def set_plugin_context(self, plugin_context: Union[None, PluginContext]) -> None: """Set the plugin context.""" self.plugin_context = plugin_context @staticmethod def file_command_exists() -> bool: """Return whether the 'file' command is available on $PATH.""" if sys.platform == "win32": command_name = "file.exe" else: command_name = "file" for path in os.environ["PATH"].split(os.pathsep): exe_path = os.path.join(path, command_name) if os.path.isfile(exe_path) and os.access(exe_path, os.X_OK): return True return False
[]
[]
[ "PATH" ]
[]
["PATH"]
python
1
0
plugins/inputs/sysstat/sysstat_test.go
// +build linux package sysstat import ( "fmt" "os" "os/exec" "path" "testing" "github.com/orlando-signer/telegraf/testutil" ) var s = Sysstat{ interval: 10, Sadc: "/usr/lib/sa/sadc", Sadf: "/usr/bin/sadf", Group: false, Activities: []string{"DISK", "SNMP"}, Options: map[string]string{ "C": "cpu", "d": "disk", }, DeviceTags: map[string][]map[string]string{ "sda": { { "vg": "rootvg", }, }, }, } func TestGather(t *testing.T) { // overwriting exec commands with mock commands execCommand = fakeExecCommand defer func() { execCommand = exec.Command }() var acc testutil.Accumulator err := acc.GatherError(s.Gather) if err != nil { t.Fatal(err) } cpuTags := map[string]string{"device": "all"} diskTags := map[string]string{"device": "sda", "vg": "rootvg"} tests := []struct { measurement string fields map[string]interface{} tags map[string]string }{ { "cpu_pct_user", map[string]interface{}{ "value": 0.65, }, cpuTags, }, { "cpu_pct_nice", map[string]interface{}{ "value": 0.0, }, cpuTags, }, { "cpu_pct_system", map[string]interface{}{ "value": 0.10, }, cpuTags, }, { "cpu_pct_iowait", map[string]interface{}{ "value": 0.15, }, cpuTags, }, { "cpu_pct_steal", map[string]interface{}{ "value": 0.0, }, cpuTags, }, { "cpu_pct_idle", map[string]interface{}{ "value": 99.1, }, cpuTags, }, { "disk_tps", map[string]interface{}{ "value": 0.00, }, diskTags, }, { "disk_rd_sec_per_s", map[string]interface{}{ "value": 0.00, }, diskTags, }, { "disk_wr_sec_per_s", map[string]interface{}{ "value": 0.00, }, diskTags, }, { "disk_avgrq-sz", map[string]interface{}{ "value": 0.00, }, diskTags, }, { "disk_avgqu-sz", map[string]interface{}{ "value": 0.00, }, diskTags, }, { "disk_await", map[string]interface{}{ "value": 0.00, }, diskTags, }, { "disk_svctm", map[string]interface{}{ "value": 0.00, }, diskTags, }, { "disk_pct_util", map[string]interface{}{ "value": 0.00, }, diskTags, }, } for _, test := range tests { acc.AssertContainsTaggedFields(t, test.measurement, test.fields, test.tags) } } func TestGatherGrouped(t *testing.T) { s.Group = true // overwriting exec commands with mock commands execCommand = fakeExecCommand defer func() { execCommand = exec.Command }() var acc testutil.Accumulator err := acc.GatherError(s.Gather) if err != nil { t.Fatal(err) } var tests = []struct { measurement string fields map[string]interface{} tags map[string]string }{ { "cpu", map[string]interface{}{ "pct_user": 0.65, "pct_nice": 0.0, "pct_system": 0.10, "pct_iowait": 0.15, "pct_steal": 0.0, "pct_idle": 99.1, }, map[string]string{"device": "all"}, }, { "disk", map[string]interface{}{ "tps": 0.00, "rd_sec_per_s": 0.00, "wr_sec_per_s": 0.00, "avgrq-sz": 0.00, "avgqu-sz": 0.00, "await": 0.00, "svctm": 0.00, "pct_util": 0.00, }, map[string]string{"device": "sda", "vg": "rootvg"}, }, { "disk", map[string]interface{}{ "tps": 2.01, "rd_sec_per_s": 1.0, "wr_sec_per_s": 0.00, "avgrq-sz": 0.30, "avgqu-sz": 0.60, "await": 0.70, "svctm": 0.20, "pct_util": 0.30, }, map[string]string{"device": "sdb"}, }, } for _, test := range tests { acc.AssertContainsTaggedFields(t, test.measurement, test.fields, test.tags) } } func TestEscape(t *testing.T) { var tests = []struct { input string escaped string }{ { "%util", "pct_util", }, { "%%util", "pct_util", }, { "bread/s", "bread_per_s", }, { "%nice", "pct_nice", }, } for _, test := range tests { if test.escaped != escape(test.input) { t.Errorf("wrong escape, got %s, wanted %s", escape(test.input), test.escaped) } } } // Helper function that mock the exec.Command call (and call the test binary) func fakeExecCommand(command string, args ...string) *exec.Cmd { cs := []string{"-test.run=TestHelperProcess", "--", command} cs = append(cs, args...) cmd := exec.Command(os.Args[0], cs...) cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"} return cmd } // TestHelperProcess isn't a real test. It's used to mock exec.Command // For example, if you run: // GO_WANT_HELPER_PROCESS=1 go test -test.run=TestHelperProcess -- sadf -p -- -p -C tmpFile // it returns mockData["C"] output. func TestHelperProcess(t *testing.T) { if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { return } mockData := map[string]string{ "C": `dell-xps 5 2016-03-25 16:18:10 UTC all %user 0.65 dell-xps 5 2016-03-25 16:18:10 UTC all %nice 0.00 dell-xps 5 2016-03-25 16:18:10 UTC all %system 0.10 dell-xps 5 2016-03-25 16:18:10 UTC all %iowait 0.15 dell-xps 5 2016-03-25 16:18:10 UTC all %steal 0.00 dell-xps 5 2016-03-25 16:18:10 UTC all %idle 99.10 `, "d": `dell-xps 5 2016-03-25 16:18:10 UTC sda tps 0.00 dell-xps 5 2016-03-25 16:18:10 UTC sda rd_sec/s 0.00 dell-xps 5 2016-03-25 16:18:10 UTC sda wr_sec/s 0.00 dell-xps 5 2016-03-25 16:18:10 UTC sda avgrq-sz 0.00 dell-xps 5 2016-03-25 16:18:10 UTC sda avgqu-sz 0.00 dell-xps 5 2016-03-25 16:18:10 UTC sda await 0.00 dell-xps 5 2016-03-25 16:18:10 UTC sda svctm 0.00 dell-xps 5 2016-03-25 16:18:10 UTC sda %util 0.00 dell-xps 5 2016-03-25 16:18:10 UTC sdb tps 2.01 dell-xps 5 2016-03-25 16:18:10 UTC sdb rd_sec/s 1.00 dell-xps 5 2016-03-25 16:18:10 UTC sdb wr_sec/s 0.00 dell-xps 5 2016-03-25 16:18:10 UTC sdb avgrq-sz 0.30 dell-xps 5 2016-03-25 16:18:10 UTC sdb avgqu-sz 0.60 dell-xps 5 2016-03-25 16:18:10 UTC sdb await 0.70 dell-xps 5 2016-03-25 16:18:10 UTC sdb svctm 0.20 dell-xps 5 2016-03-25 16:18:10 UTC sdb %util 0.30 `, } args := os.Args // Previous arguments are tests stuff, that looks like : // /tmp/go-build970079519/…/_test/integration.test -test.run=TestHelperProcess -- cmd, args := args[3], args[4:] // Handle the case where args[0] is dir:... switch path.Base(cmd) { case "sadf": fmt.Fprint(os.Stdout, mockData[args[3]]) default: } // some code here to check arguments perhaps? os.Exit(0) }
[ "\"GO_WANT_HELPER_PROCESS\"" ]
[]
[ "GO_WANT_HELPER_PROCESS" ]
[]
["GO_WANT_HELPER_PROCESS"]
go
1
0
client/config.go
package client import ( "fmt" "io/ioutil" "net" "net/url" "os" "os/user" "path" "regexp" "strconv" "strings" "zach-rock/log" yaml "gopkg.in/yaml.v2" ) type Configuration struct { HttpProxy string `yaml:"http_proxy,omitempty"` ServerAddr string `yaml:"server_addr,omitempty"` InspectAddr string `yaml:"inspect_addr,omitempty"` TrustHostRootCerts bool `yaml:"trust_host_root_certs,omitempty"` AuthToken string `yaml:"auth_token,omitempty"` Tunnels map[string]*TunnelConfiguration `yaml:"tunnels,omitempty"` LogTo string `yaml:"-"` Path string `yaml:"-"` } type TunnelConfiguration struct { Subdomain string `yaml:"subdomain,omitempty"` Hostname string `yaml:"hostname,omitempty"` Protocols map[string]string `yaml:"proto,omitempty"` HttpAuth string `yaml:"auth,omitempty"` RemotePort uint16 `yaml:"remote_port,omitempty"` } func LoadConfiguration(opts *Options) (config *Configuration, err error) { configPath := opts.config if configPath == "" { configPath = defaultPath() } log.Info("Reading configuration file %s", configPath) configBuf, err := ioutil.ReadFile(configPath) if err != nil { // failure to read a configuration file is only a fatal error if // the user specified one explicitly if opts.config != "" { err = fmt.Errorf("failed to read configuration file %s: %v", configPath, err) return } } // deserialize/parse the config config = new(Configuration) if err = yaml.Unmarshal(configBuf, &config); err != nil { err = fmt.Errorf("error parsing configuration file %s: %v", configPath, err) return } matched := false content := strings.TrimSpace(string(configBuf)) if matched, err = regexp.MatchString("^[0-9a-zA-Z_\\-!]+$", content); err != nil { return } else if matched { config = &Configuration{AuthToken: content} } // set configuration defaults if config.ServerAddr == "" { config.ServerAddr = defaultServerAddr } if config.InspectAddr == "" { config.InspectAddr = defaultInspectAddr } if config.HttpProxy == "" { config.HttpProxy = os.Getenv("http_proxy") } // validate and normalize configuration if config.InspectAddr != "disabled" { if config.InspectAddr, err = normalizeAddress(config.InspectAddr, "inspect_addr"); err != nil { return } } if config.ServerAddr, err = normalizeAddress(config.ServerAddr, "server_addr"); err != nil { return } if config.HttpProxy != "" { var proxyUrl *url.URL if proxyUrl, err = url.Parse(config.HttpProxy); err != nil { return } else { if proxyUrl.Scheme != "http" && proxyUrl.Scheme != "https" { err = fmt.Errorf("proxy url scheme must be 'http' or 'https', got %v", proxyUrl.Scheme) return } } } for name, t := range config.Tunnels { if t == nil || t.Protocols == nil || len(t.Protocols) == 0 { err = fmt.Errorf("tunnel %s does not specify any protocols to tunnel", name) return } for k, addr := range t.Protocols { tunnelName := fmt.Sprintf("for tunnel %s[%s]", name, k) if t.Protocols[k], err = normalizeAddress(addr, tunnelName); err != nil { return } if err = validateProtocol(k, tunnelName); err != nil { return } } // use the name of the tunnel as the subdomain if none is specified if t.Hostname == "" && t.Subdomain == "" { // XXX: a crude heuristic, really we should be checking if the last part // is a TLD if len(strings.Split(name, ".")) > 1 { t.Hostname = name } else { t.Subdomain = name } } } // override configuration with command-line options config.LogTo = opts.logto config.Path = configPath if opts.authtoken != "" { config.AuthToken = opts.authtoken } switch opts.command { // start a single tunnel, the default, simple behavior case "default": config.Tunnels = make(map[string]*TunnelConfiguration) config.Tunnels["default"] = &TunnelConfiguration{ Subdomain: opts.subdomain, Hostname: opts.hostname, HttpAuth: opts.httpauth, Protocols: make(map[string]string), } for _, proto := range strings.Split(opts.protocol, "+") { if err = validateProtocol(proto, "default"); err != nil { return } if config.Tunnels["default"].Protocols[proto], err = normalizeAddress(opts.args[0], ""); err != nil { return } } // list tunnels case "list": for name := range config.Tunnels { fmt.Println(name) } os.Exit(0) // start tunnels case "start": if len(opts.args) == 0 { err = fmt.Errorf("you must specify at least one tunnel to start") return } requestedTunnels := make(map[string]bool) for _, arg := range opts.args { requestedTunnels[arg] = true if _, ok := config.Tunnels[arg]; !ok { err = fmt.Errorf("requested to start tunnel %s which is not defined in the config file", arg) return } } for name := range config.Tunnels { if !requestedTunnels[name] { delete(config.Tunnels, name) } } case "start-all": return default: err = fmt.Errorf("unknown command: %s", opts.command) return } return } func defaultPath() string { user, err := user.Current() // user.Current() does not work on linux when cross compiling because // it requires CGO; use os.Getenv("HOME") hack until we compile natively homeDir := os.Getenv("HOME") if err != nil { log.Warn("Failed to get user's home directory: %s. Using $HOME: %s", err.Error(), homeDir) } else { homeDir = user.HomeDir } return path.Join(homeDir, "zach-rock") } func normalizeAddress(addr string, propName string) (string, error) { // normalize port to address if _, err := strconv.Atoi(addr); err == nil { addr = ":" + addr } host, port, err := net.SplitHostPort(addr) if err != nil { return "", fmt.Errorf("invalid address %s '%s': %s", propName, addr, err.Error()) } if host == "" { host = "127.0.0.1" } return fmt.Sprintf("%s:%s", host, port), nil } func validateProtocol(proto, propName string) (err error) { switch proto { case "http", "https", "http+https", "tcp": default: err = fmt.Errorf("invalid protocol for %s: %s", propName, proto) } return } func SaveAuthToken(configPath, authtoken string) (err error) { // empty configuration by default for the case that we can't read it c := new(Configuration) // read the configuration oldConfigBytes, err := ioutil.ReadFile(configPath) if err == nil { // unmarshal if we successfully read the configuration file if err = yaml.Unmarshal(oldConfigBytes, c); err != nil { return } } // no need to save, the authtoken is already the correct value if c.AuthToken == authtoken { return } // update auth token c.AuthToken = authtoken // rewrite configuration newConfigBytes, err := yaml.Marshal(c) if err != nil { return } err = ioutil.WriteFile(configPath, newConfigBytes, 0600) return }
[ "\"http_proxy\"", "\"HOME\"", "\"HOME\"" ]
[]
[ "HOME", "http_proxy" ]
[]
["HOME", "http_proxy"]
go
2
0
aoc_fun/aoc_fun.go
package aoc_fun import ( "log" "os" "path" "runtime" "runtime/pprof" "sort" "strings" "time" ) func Abs(a int) int { if a < 0 { return -a } return a } func Runningtime() time.Time { return time.Now() } func Track(startTime time.Time) { endTime := time.Now() log.Println("Took", endTime.Sub(startTime)) } func GetDefaultInputFilePath() string { _, filename, _, _ := runtime.Caller(2) filename = string(path.Dir(filename)) + "/input.txt" return filename } func SortString(input string) string { runeArray := []rune(input) sort.Sort(sortRuneString(runeArray)) return string(runeArray) } type sortRuneString []rune func (s sortRuneString) Swap(i, j int) { s[i], s[j] = s[j], s[i] } func (s sortRuneString) Less(i, j int) bool { return s[i] < s[j] } func (s sortRuneString) Len() int { return len(s) } func ProfileCPU() *os.File { // Example run: // ```ENABLE_PROFILING=TRUE go run d04/d04.go && go tool pprof -ignore 'syscall' -ignore 'aoc_fun' -dot cpu.prof | dot -Tpng -o call_profile_graph.png``` do_profile := strings.ToUpper(os.Getenv("ENABLE_PROFILING")) == "TRUE" if !do_profile { return nil } log.Println("*** PROFILING ENABLED ***") cpu_profile_file_handler, err := os.Create("cpu.prof") if err != nil { log.Fatal("could not create CPU profile: ", err) } if err := pprof.StartCPUProfile(cpu_profile_file_handler); err != nil { log.Fatal("could not start CPU profile: ", err) } return cpu_profile_file_handler } func Unprofile(cpu_profile_file *os.File) { if cpu_profile_file != nil { pprof.StopCPUProfile() cpu_profile_file.Close() } }
[ "\"ENABLE_PROFILING\"" ]
[]
[ "ENABLE_PROFILING" ]
[]
["ENABLE_PROFILING"]
go
1
0
orc8r/cloud/go/services/configurator/storage/sql.go
/* * Copyright 2020 The Magma Authors. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package storage import ( "context" "database/sql" "fmt" "os" "sort" "magma/orc8r/cloud/go/sqorc" "magma/orc8r/cloud/go/storage" sq "github.com/Masterminds/squirrel" "github.com/pkg/errors" "github.com/thoas/go-funk" ) const ( networksTable = "cfg_networks" networkConfigTable = "cfg_network_configs" entityTable = "cfg_entities" entityAssocTable = "cfg_assocs" ) const ( nwIDCol = "id" nwTypeCol = "type" nwNameCol = "name" nwDescCol = "description" nwVerCol = "version" nwcIDCol = "network_id" nwcTypeCol = "type" nwcValCol = "value" entPkCol = "pk" entNidCol = "network_id" entTypeCol = "type" entKeyCol = "\"key\"" entGidCol = "graph_id" entNameCol = "name" entDescCol = "description" entPidCol = "physical_id" entConfCol = "config" entVerCol = "version" aFrCol = "from_pk" aToCol = "to_pk" ) // NewSQLConfiguratorStorageFactory returns a ConfiguratorStorageFactory // implementation backed by a SQL database. func NewSQLConfiguratorStorageFactory(db *sql.DB, generator storage.IDGenerator, sqlBuilder sqorc.StatementBuilder, maxEntityLoadSize uint32) ConfiguratorStorageFactory { return &sqlConfiguratorStorageFactory{db: db, idGenerator: generator, builder: sqlBuilder, maxEntityLoadSize: maxEntityLoadSize} } type sqlConfiguratorStorageFactory struct { db *sql.DB idGenerator storage.IDGenerator builder sqorc.StatementBuilder maxEntityLoadSize uint32 } func (fact *sqlConfiguratorStorageFactory) InitializeServiceStorage() (err error) { tx, err := fact.db.BeginTx(context.Background(), &sql.TxOptions{ Isolation: sql.LevelSerializable, }) if err != nil { return } defer func() { if err == nil { err = tx.Commit() } else { rollbackErr := tx.Rollback() if rollbackErr != nil { err = fmt.Errorf("%s; rollback error: %s", err, rollbackErr) } } }() // Named return values below so we can automatically decide tx commit/ // rollback in deferred function _, err = fact.builder.CreateTable(networksTable). IfNotExists(). Column(nwIDCol).Type(sqorc.ColumnTypeText).PrimaryKey().EndColumn(). Column(nwTypeCol).Type(sqorc.ColumnTypeText).EndColumn(). Column(nwNameCol).Type(sqorc.ColumnTypeText).EndColumn(). Column(nwDescCol).Type(sqorc.ColumnTypeText).EndColumn(). Column(nwVerCol).Type(sqorc.ColumnTypeInt).NotNull().Default(0).EndColumn(). RunWith(tx). Exec() if err != nil { err = errors.Wrap(err, "failed to create networks table") return } // Adding a type column if it doesn't exist already. This will ensure network // tables that are already created will also have the type column. // TODO Remove after 1-2 months to ensure service isn't disrupted _, err = tx.Exec(fmt.Sprintf("ALTER TABLE %s ADD COLUMN IF NOT EXISTS %s text", networksTable, nwTypeCol)) // special case sqlite3 because ADD COLUMN IF NOT EXISTS is not supported // and we only run sqlite3 for unit tests if err != nil && os.Getenv("SQL_DRIVER") != "sqlite3" { err = errors.Wrap(err, "failed to add 'type' field to networks table") } _, err = fact.builder.CreateIndex("type_idx"). IfNotExists(). On(networksTable). Columns(nwTypeCol). RunWith(tx). Exec() if err != nil { err = errors.Wrap(err, "failed to create network type index") return } _, err = fact.builder.CreateTable(networkConfigTable). IfNotExists(). Column(nwcIDCol).Type(sqorc.ColumnTypeText).EndColumn(). Column(nwcTypeCol).Type(sqorc.ColumnTypeText).NotNull().EndColumn(). Column(nwcValCol).Type(sqorc.ColumnTypeBytes).EndColumn(). PrimaryKey(nwcIDCol, nwcTypeCol). ForeignKey(networksTable, map[string]string{nwcIDCol: nwIDCol}, sqorc.ColumnOnDeleteCascade). RunWith(tx). Exec() if err != nil { err = errors.Wrap(err, "failed to create network configs table") return } // Create an internal-only primary key (UUID) for entities. // This keeps index size in control and supporting table schemas simpler. _, err = fact.builder.CreateTable(entityTable). IfNotExists(). Column(entPkCol).Type(sqorc.ColumnTypeText).PrimaryKey().EndColumn(). Column(entNidCol).Type(sqorc.ColumnTypeText).EndColumn(). Column(entTypeCol).Type(sqorc.ColumnTypeText).NotNull().EndColumn(). Column(entKeyCol).Type(sqorc.ColumnTypeText).NotNull().EndColumn(). Column(entGidCol).Type(sqorc.ColumnTypeText).NotNull().EndColumn(). Column(entNameCol).Type(sqorc.ColumnTypeText).EndColumn(). Column(entDescCol).Type(sqorc.ColumnTypeText).EndColumn(). Column(entPidCol).Type(sqorc.ColumnTypeText).EndColumn(). Column(entConfCol).Type(sqorc.ColumnTypeBytes).EndColumn(). Column(entVerCol).Type(sqorc.ColumnTypeInt).NotNull().Default(0).EndColumn(). Unique(entNidCol, entKeyCol, entTypeCol). Unique(entPidCol). ForeignKey(networksTable, map[string]string{entNidCol: nwIDCol}, sqorc.ColumnOnDeleteCascade). RunWith(tx). Exec() if err != nil { err = errors.Wrap(err, "failed to create entities table") return } _, err = fact.builder.CreateTable(entityAssocTable). IfNotExists(). Column(aFrCol).Type(sqorc.ColumnTypeText).EndColumn(). Column(aToCol).Type(sqorc.ColumnTypeText).EndColumn(). PrimaryKey(aFrCol, aToCol). ForeignKey(entityTable, map[string]string{aFrCol: entPkCol}, sqorc.ColumnOnDeleteCascade). ForeignKey(entityTable, map[string]string{aToCol: entPkCol}, sqorc.ColumnOnDeleteCascade). RunWith(tx). Exec() if err != nil { err = errors.Wrap(err, "failed to create entity assoc table") return } // Create indexes (index is not implicitly created on a referencing FK) _, err = fact.builder.CreateIndex("graph_id_idx"). IfNotExists(). On(entityTable). Columns(entGidCol). RunWith(tx). Exec() if err != nil { err = errors.Wrap(err, "failed to create graph ID index") return } // Create internal network(s) _, err = fact.builder.Insert(networksTable). Columns(nwIDCol, nwTypeCol, nwNameCol, nwDescCol). Values(InternalNetworkID, internalNetworkType, internalNetworkName, internalNetworkDescription). OnConflict(nil, nwIDCol). RunWith(tx). Exec() if err != nil { err = errors.Wrap(err, "error creating internal networks") return } return } func (fact *sqlConfiguratorStorageFactory) StartTransaction(ctx context.Context, opts *storage.TxOptions) (ConfiguratorStorage, error) { tx, err := fact.db.BeginTx(ctx, getSqlOpts(opts)) if err != nil { return nil, err } return &sqlConfiguratorStorage{tx: tx, idGenerator: fact.idGenerator, builder: fact.builder, maxEntityLoadSize: fact.maxEntityLoadSize}, nil } func getSqlOpts(opts *storage.TxOptions) *sql.TxOptions { if opts == nil { return nil } if opts.Isolation == 0 { return &sql.TxOptions{ReadOnly: opts.ReadOnly} } return &sql.TxOptions{ReadOnly: opts.ReadOnly, Isolation: sql.IsolationLevel(opts.Isolation)} } type sqlConfiguratorStorage struct { tx *sql.Tx idGenerator storage.IDGenerator builder sqorc.StatementBuilder maxEntityLoadSize uint32 } func (store *sqlConfiguratorStorage) Commit() error { return store.tx.Commit() } func (store *sqlConfiguratorStorage) Rollback() error { return store.tx.Rollback() } func (store *sqlConfiguratorStorage) LoadNetworks(filter NetworkLoadFilter, loadCriteria NetworkLoadCriteria) (NetworkLoadResult, error) { emptyRet := NetworkLoadResult{NetworkIDsNotFound: []string{}, Networks: []*Network{}} if funk.IsEmpty(filter.Ids) && funk.IsEmpty(filter.TypeFilter) { return emptyRet, nil } selectBuilder := store.getLoadNetworksSelectBuilder(filter, loadCriteria) if loadCriteria.LoadConfigs { selectBuilder = selectBuilder.LeftJoin( fmt.Sprintf( "%s ON %s.%s = %s.%s", networkConfigTable, networkConfigTable, nwcIDCol, networksTable, nwIDCol, ), ) } rows, err := selectBuilder.RunWith(store.tx).Query() if err != nil { return emptyRet, fmt.Errorf("error querying for networks: %s", err) } defer sqorc.CloseRowsLogOnError(rows, "LoadNetworks") loadedNetworksByID, loadedNetworkIDs, err := scanNetworkRows(rows, loadCriteria) if err != nil { return emptyRet, err } ret := NetworkLoadResult{ NetworkIDsNotFound: getNetworkIDsNotFound(loadedNetworksByID, filter.Ids), Networks: make([]*Network, 0, len(loadedNetworksByID)), } for _, nid := range loadedNetworkIDs { ret.Networks = append(ret.Networks, loadedNetworksByID[nid]) } return ret, nil } func (store *sqlConfiguratorStorage) LoadAllNetworks(loadCriteria NetworkLoadCriteria) ([]Network, error) { emptyNetworks := []Network{} idsToExclude := []string{InternalNetworkID} selectBuilder := store.builder.Select(getNetworkQueryColumns(loadCriteria)...). From(networksTable). Where(sq.NotEq{ fmt.Sprintf("%s.%s", networksTable, nwIDCol): idsToExclude, }) if loadCriteria.LoadConfigs { selectBuilder = selectBuilder.LeftJoin( fmt.Sprintf( "%s ON %s.%s = %s.%s", networkConfigTable, networkConfigTable, nwcIDCol, networksTable, nwIDCol, ), ) } rows, err := selectBuilder.RunWith(store.tx).Query() if err != nil { return emptyNetworks, fmt.Errorf("error querying for networks: %s", err) } defer sqorc.CloseRowsLogOnError(rows, "LoadAllNetworks") loadedNetworksByID, loadedNetworkIDs, err := scanNetworkRows(rows, loadCriteria) if err != nil { return emptyNetworks, err } networks := make([]Network, 0, len(loadedNetworksByID)) for _, nid := range loadedNetworkIDs { networks = append(networks, *loadedNetworksByID[nid]) } return networks, nil } func (store *sqlConfiguratorStorage) CreateNetwork(network Network) (Network, error) { exists, err := store.doesNetworkExist(network.ID) if err != nil { return Network{}, err } if exists { return Network{}, fmt.Errorf("a network with ID %s already exists", network.ID) } _, err = store.builder.Insert(networksTable). Columns(nwIDCol, nwTypeCol, nwNameCol, nwDescCol). Values(network.ID, network.Type, network.Name, network.Description). RunWith(store.tx). Exec() if err != nil { return Network{}, fmt.Errorf("error inserting network: %s", err) } if funk.IsEmpty(network.Configs) { return network, nil } // Sort config keys for deterministic behavior configKeys := funk.Keys(network.Configs).([]string) sort.Strings(configKeys) insertBuilder := store.builder.Insert(networkConfigTable). Columns(nwcIDCol, nwcTypeCol, nwcValCol) for _, configKey := range configKeys { insertBuilder = insertBuilder.Values(network.ID, configKey, network.Configs[configKey]) } _, err = insertBuilder.RunWith(store.tx).Exec() if err != nil { return Network{}, errors.Wrap(err, "error inserting network configs") } return network, nil } func (store *sqlConfiguratorStorage) UpdateNetworks(updates []NetworkUpdateCriteria) error { if err := validateNetworkUpdates(updates); err != nil { return err } networksToDelete := []string{} networksToUpdate := []NetworkUpdateCriteria{} for _, update := range updates { if update.DeleteNetwork { networksToDelete = append(networksToDelete, update.ID) } else { networksToUpdate = append(networksToUpdate, update) } } stmtCache := sq.NewStmtCache(store.tx) defer sqorc.ClearStatementCacheLogOnError(stmtCache, "UpdateNetworks") // Update networks first for _, update := range networksToUpdate { err := store.updateNetwork(update, stmtCache) if err != nil { return errors.WithStack(err) } } _, err := store.builder.Delete(networkConfigTable).Where(sq.Eq{nwcIDCol: networksToDelete}). RunWith(store.tx). Exec() if err != nil { return errors.Wrap(err, "failed to delete configs associated with networks") } _, err = store.builder.Delete(networksTable).Where(sq.Eq{nwIDCol: networksToDelete}). RunWith(store.tx). Exec() if err != nil { return errors.Wrap(err, "failed to delete networks") } return nil } func (store *sqlConfiguratorStorage) CountEntities(networkID string, filter EntityLoadFilter) (EntityCountResult, error) { ret := EntityCountResult{Count: 0} count, err := store.countEntities(networkID, filter) if err != nil { return ret, err } ret.Count = count return ret, nil } func (store *sqlConfiguratorStorage) LoadEntities(networkID string, filter EntityLoadFilter, criteria EntityLoadCriteria) (EntityLoadResult, error) { if err := validatePaginatedLoadParameters(filter, criteria); err != nil { return EntityLoadResult{}, err } entsByTK, err := store.loadEntities(networkID, filter, criteria) if err != nil { return EntityLoadResult{}, err } if criteria.LoadAssocsFromThis { assocs, err := store.loadAssocs(networkID, filter, criteria, loadChildren) if err != nil { return EntityLoadResult{}, err } for _, assoc := range assocs { // Assoc may be from a not-loaded ent e, ok := entsByTK[assoc.fromTK] if ok { e.Associations = append(e.Associations, assoc.getToID()) } } for _, ent := range entsByTK { SortIDs(ent.Associations) // for deterministic return } } if criteria.LoadAssocsToThis { parentAssocs, err := store.loadAssocs(networkID, filter, criteria, loadParents) if err != nil { return EntityLoadResult{}, err } for _, parentAssoc := range parentAssocs { // Assoc may be to a not-loaded ent e, ok := entsByTK[parentAssoc.toTK] if ok { e.ParentAssociations = append(e.ParentAssociations, parentAssoc.getFromID()) } } for _, ent := range entsByTK { SortIDs(ent.ParentAssociations) // for deterministic return } } res := EntityLoadResult{} for _, ent := range entsByTK { res.Entities = append(res.Entities, ent) } SortEntities(res.Entities) // for deterministic return res.EntitiesNotFound = calculateIDsNotFound(entsByTK, filter.IDs) // Set next page token when there may be more pages to return if len(res.Entities) == store.getEntityLoadPageSize(criteria) { res.NextPageToken, err = getNextPageToken(res.Entities) if err != nil { return EntityLoadResult{}, err } } return res, nil } func (store *sqlConfiguratorStorage) CreateEntity(networkID string, entity NetworkEntity) (NetworkEntity, error) { exists, err := store.doesEntExist(networkID, entity.GetTypeAndKey()) if err != nil { return NetworkEntity{}, err } if exists { return NetworkEntity{}, errors.Errorf("an entity '%s' already exists", entity.GetTypeAndKey()) } // Physical ID must be unique across all networks, since we use a gateway's // physical ID to search for its network (and ent) physicalIDExists, err := store.doesPhysicalIDExist(entity.GetPhysicalID()) if err != nil { return NetworkEntity{}, err } if physicalIDExists { return NetworkEntity{}, errors.Errorf("an entity with physical ID '%s' already exists", entity.GetPhysicalID()) } // First insert the associations as graph edges. This step involves a // lookup of the associated entities to retrieve their PKs (since we don't // trust the provided PKs). // Finally, if the created entity "bridges" 1 or more graphs, we merge // those graphs into a single graph. // For simplicity, we don't do any cycle detection at the moment. This // shouldn't be a problem on the load side because we load graphs via // graph ID, not by traversing edges. createdEnt, err := store.insertIntoEntityTable(networkID, entity) if err != nil { return NetworkEntity{}, err } allAssociatedEntsByTk, err := store.createEdges(networkID, createdEnt) if err != nil { return NetworkEntity{}, err } newGraphID, err := store.mergeGraphs(createdEnt, allAssociatedEntsByTk) if err != nil { return NetworkEntity{}, err } createdEnt.GraphID = newGraphID // If we were given duplicate edges, get rid of those if funk.NotEmpty(createdEnt.Associations) { createdEnt.Associations = funk.Chain(createdEnt.Associations). Map(func(id *EntityID) storage.TypeAndKey { return id.ToTypeAndKey() }). Uniq(). Map(func(tk storage.TypeAndKey) *EntityID { return (&EntityID{}).FromTypeAndKey(tk) }). Value().([]*EntityID) } createdEnt.NetworkID = networkID return createdEnt, nil } func (store *sqlConfiguratorStorage) UpdateEntity(networkID string, update EntityUpdateCriteria) (NetworkEntity, error) { emptyRet := NetworkEntity{Type: update.Type, Key: update.Key} entToUpdate, err := store.loadEntToUpdate(networkID, update) if err != nil && !update.DeleteEntity { return emptyRet, errors.Wrap(err, "failed to load entity being updated") } if entToUpdate == nil { return emptyRet, nil } if update.DeleteEntity { // Cascading FK relations in the schema will handle the other tables _, err := store.builder.Delete(entityTable). Where(sq.And{ sq.Eq{entNidCol: networkID}, sq.Eq{entTypeCol: update.Type}, sq.Eq{entKeyCol: update.Key}, }). RunWith(store.tx). Exec() if err != nil { return emptyRet, errors.Wrapf(err, "failed to delete entity (%s, %s)", update.Type, update.Key) } // Deleting a node could partition its graph err = store.fixGraph(networkID, entToUpdate.GraphID, entToUpdate) if err != nil { return emptyRet, errors.Wrap(err, "failed to fix entity graph after deletion") } return emptyRet, nil } // Then, update the fields on the entity table entToUpdate.NetworkID = networkID err = store.processEntityFieldsUpdate(entToUpdate.Pk, update, entToUpdate) if err != nil { return *entToUpdate, errors.WithStack(err) } // Finally, process edge updates for the graph err = store.processEdgeUpdates(networkID, update, entToUpdate) if err != nil { return *entToUpdate, errors.WithStack(err) } return *entToUpdate, nil } func (store *sqlConfiguratorStorage) LoadGraphForEntity(networkID string, entityID EntityID, loadCriteria EntityLoadCriteria) (EntityGraph, error) { // We just care about getting the graph ID off this entity so use an empty // load criteria singleEnt, err := store.loadEntities(networkID, EntityLoadFilter{IDs: []*EntityID{&entityID}}, EntityLoadCriteria{}) if err != nil { return EntityGraph{}, errors.Wrap(err, "failed to load entity for graph query") } var ent *NetworkEntity for _, e := range singleEnt { ent = e } if ent == nil { return EntityGraph{}, errors.Errorf("could not find requested entity (%s) for graph query", entityID.String()) } internalGraph, err := store.loadGraphInternal(networkID, ent.GraphID, loadCriteria) if err != nil { return EntityGraph{}, errors.WithStack(err) } rootPKs := findRootNodes(internalGraph) if funk.IsEmpty(rootPKs) { return EntityGraph{}, errors.Errorf("graph does not have root nodes") } edges, err := updateEntitiesWithAssocs(internalGraph.entsByTK, internalGraph.edges) if err != nil { return EntityGraph{}, errors.Wrap(err, "failed to construct graph after loading") } // To make testing easier, we'll order the returned entities by TK entsByPK := internalGraph.entsByTK.ByPK() retEnts := internalGraph.entsByTK.Ents() retRoots := funk.Map(rootPKs, func(pk string) *EntityID { return &EntityID{Type: entsByPK[pk].Type, Key: entsByPK[pk].Key} }).([]*EntityID) SortEntities(retEnts) SortIDs(retRoots) return EntityGraph{ Entities: retEnts, RootEntities: retRoots, Edges: edges, }, nil }
[ "\"SQL_DRIVER\"" ]
[]
[ "SQL_DRIVER" ]
[]
["SQL_DRIVER"]
go
1
0
sample-main.go
/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package main import ( "flag" "os" "runtime" "github.com/kubernetes-incubator/custom-metrics-apiserver/pkg/sample-cmd/server" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/apiserver/pkg/util/logs" ) func main() { logs.InitLogs() defer logs.FlushLogs() if len(os.Getenv("GOMAXPROCS")) == 0 { runtime.GOMAXPROCS(runtime.NumCPU()) } cmd := server.NewCommandStartSampleAdapterServer(os.Stdout, os.Stderr, wait.NeverStop) cmd.Flags().AddGoFlagSet(flag.CommandLine) if err := cmd.Execute(); err != nil { panic(err) } }
[ "\"GOMAXPROCS\"" ]
[]
[ "GOMAXPROCS" ]
[]
["GOMAXPROCS"]
go
1
0
instagram_scraper/app.py
#!/usr/bin/python # -*- coding: utf-8 -*- import argparse import codecs import configparser import errno import glob from operator import itemgetter import json import logging.config import hashlib import os import pickle import re import sys import textwrap import time try: from urllib.parse import urlparse except ImportError: from urlparse import urlparse import warnings import threading import concurrent.futures import requests import tqdm from instagram_scraper.constants import * try: reload(sys) # Python 2.7 sys.setdefaultencoding("UTF8") except NameError: pass warnings.filterwarnings('ignore') input_lock = threading.RLock() class LockedStream(object): file = None def __init__(self, file): self.file = file def write(self, x): with input_lock: self.file.write(x) def flush(self): return getattr(self.file, 'flush', lambda: None)() original_stdout, original_stderr = sys.stdout, sys.stderr sys.stdout, sys.stderr = map(LockedStream, (sys.stdout, sys.stderr)) def threaded_input(prompt): with input_lock: try: with tqdm.external_write_mode(): original_stdout.write(prompt) original_stdout.flush() return sys.stdin.readline() except AttributeError: original_stdout.write('\n') original_stdout.write(prompt) original_stdout.flush() return sys.stdin.readline() input = threaded_input class PartialContentException(Exception): pass class InstagramScraper(object): """InstagramScraper scrapes and downloads an instagram user's photos and videos""" def __init__(self, **kwargs): default_attr = dict(username='', usernames=[], filename=None, login_user=None, login_pass=None, destination='./', retain_username=False, interactive=False, quiet=False, maximum=0, media_metadata=False, latest=False, latest_stamps=False, cookiejar=None, media_types=['image', 'video', 'story-image', 'story-video'], tag=False, location=False, search_location=False, comments=False, verbose=0, include_location=False, filter=None, template='{urlname}') allowed_attr = list(default_attr.keys()) default_attr.update(kwargs) for key in default_attr: if key in allowed_attr: self.__dict__[key] = default_attr.get(key) # story media type means story-image & story-video if 'story' in self.media_types: self.media_types.remove('story') if 'story-image' not in self.media_types: self.media_types.append('story-image') if 'story-video' not in self.media_types: self.media_types.append('story-video') # Read latest_stamps file with ConfigParser self.latest_stamps_parser = None if self.latest_stamps: parser = configparser.ConfigParser() parser.read(self.latest_stamps) self.latest_stamps_parser = parser # If we have a latest_stamps file, latest must be true as it's the common flag self.latest = True # Set up a logger self.logger = InstagramScraper.get_logger(level=logging.DEBUG, verbose=default_attr.get('verbose')) self.posts = [] self.session = requests.Session() self.session.headers = {'user-agent': CHROME_WIN_UA} if self.cookiejar and os.path.exists(self.cookiejar): with open(self.cookiejar, 'rb') as f: self.session.cookies.update(pickle.load(f)) self.session.cookies.set('ig_pr', '1') self.rhx_gis = None self.cookies = None self.logged_in = False self.last_scraped_filemtime = 0 if default_attr['filter']: self.filter = list(self.filter) self.quit = False def sleep(self, secs): min_delay = 1 for _ in range(secs // min_delay): time.sleep(min_delay) if self.quit: return time.sleep(secs % min_delay) def _retry_prompt(self, url, exception_message): """Show prompt and return True: retry, False: ignore, None: abort""" answer = input( 'Repeated error {0}\n(A)bort, (I)gnore, (R)etry or retry (F)orever?'.format(exception_message) ) if answer: answer = answer[0].upper() if answer == 'I': self.logger.info( 'The user has chosen to ignore {0}'.format(url) ) return False elif answer == 'R': return True elif answer == 'F': self.logger.info( 'The user has chosen to retry forever' ) global MAX_RETRIES MAX_RETRIES = sys.maxsize return True else: self.logger.info( 'The user has chosen to abort' ) return None def safe_get(self, *args, **kwargs): # out of the box solution # session.mount('https://', HTTPAdapter(max_retries=...)) # only covers failed DNS lookups, socket connections and connection timeouts # It doesnt work when server terminate connection while response is downloaded retry = 0 retry_delay = RETRY_DELAY while True: if self.quit: return try: response = self.session.get(timeout=CONNECT_TIMEOUT, cookies=self.cookies, *args, **kwargs) if response.status_code == 404: return response.raise_for_status() content_length = response.headers.get('Content-Length') if content_length is not None and len(response.content) != int(content_length): #if content_length is None we repeat anyway to get size and be confident raise PartialContentException('Partial response') return response except (KeyboardInterrupt): raise except (requests.exceptions.RequestException, PartialContentException) as e: if 'url' in kwargs: url = kwargs['url'] elif len(args) > 0: url = args[0] if retry < MAX_RETRIES: self.logger.warning('Retry after exception {0} on {1}'.format(repr(e), url)) self.sleep(retry_delay) retry_delay = min( 2 * retry_delay, MAX_RETRY_DELAY ) retry = retry + 1 continue else: keep_trying = self._retry_prompt(url, repr(e)) if keep_trying == True: retry = 0 continue elif keep_trying == False: return raise def get_json(self, *args, **kwargs): """Retrieve text from url. Return text as string or None if no data present """ resp = self.safe_get(*args, **kwargs) if resp is not None: return resp.text def login(self): """Logs in to instagram.""" self.session.headers.update({'Referer': BASE_URL, 'user-agent': STORIES_UA}) req = self.session.get(BASE_URL) self.session.headers.update({'X-CSRFToken': req.cookies['csrftoken']}) login_data = {'username': self.login_user, 'password': self.login_pass} login = self.session.post(LOGIN_URL, data=login_data, allow_redirects=True) self.session.headers.update({'X-CSRFToken': login.cookies['csrftoken']}) self.cookies = login.cookies login_text = json.loads(login.text) if login_text.get('authenticated') and login.status_code == 200: self.logged_in = True self.session.headers = {'user-agent': CHROME_WIN_UA} self.rhx_gis = self.get_shared_data()['rhx_gis'] else: self.logger.error('Login failed for ' + self.login_user) if 'checkpoint_url' in login_text: checkpoint_url = login_text.get('checkpoint_url') self.logger.error('Please verify your account at ' + BASE_URL[0:-1] + checkpoint_url) if self.interactive is True: self.login_challenge(checkpoint_url) elif 'errors' in login_text: for count, error in enumerate(login_text['errors'].get('error')): count += 1 self.logger.debug('Session error %(count)s: "%(error)s"' % locals()) else: self.logger.error(json.dumps(login_text)) def login_challenge(self, checkpoint_url): self.session.headers.update({'Referer': BASE_URL}) req = self.session.get(BASE_URL[:-1] + checkpoint_url) self.session.headers.update({'X-CSRFToken': req.cookies['csrftoken'], 'X-Instagram-AJAX': '1'}) self.session.headers.update({'Referer': BASE_URL[:-1] + checkpoint_url}) mode = int(input('Choose a challenge mode (0 - SMS, 1 - Email): ')) challenge_data = {'choice': mode} challenge = self.session.post(BASE_URL[:-1] + checkpoint_url, data=challenge_data, allow_redirects=True) self.session.headers.update({'X-CSRFToken': challenge.cookies['csrftoken'], 'X-Instagram-AJAX': '1'}) code = int(input('Enter code received: ')) code_data = {'security_code': code} code = self.session.post(BASE_URL[:-1] + checkpoint_url, data=code_data, allow_redirects=True) self.session.headers.update({'X-CSRFToken': code.cookies['csrftoken']}) self.cookies = code.cookies code_text = json.loads(code.text) if code_text.get('status') == 'ok': self.logged_in = True elif 'errors' in code.text: for count, error in enumerate(code_text['challenge']['errors']): count += 1 self.logger.error('Session error %(count)s: "%(error)s"' % locals()) else: self.logger.error(json.dumps(code_text)) def logout(self): """Logs out of instagram.""" if self.logged_in: try: logout_data = {'csrfmiddlewaretoken': self.cookies['csrftoken']} self.session.post(LOGOUT_URL, data=logout_data) self.logged_in = False except requests.exceptions.RequestException: self.logger.warning('Failed to log out ' + self.login_user) def get_dst_dir(self, username): """Gets the destination directory and last scraped file time.""" if self.destination == './': dst = './' + username else: if self.retain_username: dst = self.destination + '/' + username else: dst = self.destination # Resolve last scraped filetime if self.latest_stamps_parser: self.last_scraped_filemtime = self.get_last_scraped_timestamp(username) elif os.path.isdir(dst): self.last_scraped_filemtime = self.get_last_scraped_filemtime(dst) return dst def make_dir(self, dst): try: os.makedirs(dst) except OSError as err: if err.errno == errno.EEXIST and os.path.isdir(dst): # Directory already exists pass else: # Target dir exists as a file, or a different error raise def get_last_scraped_timestamp(self, username): if self.latest_stamps_parser: try: return self.latest_stamps_parser.getint(LATEST_STAMPS_USER_SECTION, username) except configparser.Error: pass return 0 def set_last_scraped_timestamp(self, username, timestamp): if self.latest_stamps_parser: if not self.latest_stamps_parser.has_section(LATEST_STAMPS_USER_SECTION): self.latest_stamps_parser.add_section(LATEST_STAMPS_USER_SECTION) self.latest_stamps_parser.set(LATEST_STAMPS_USER_SECTION, username, str(timestamp)) with open(self.latest_stamps, 'w') as f: self.latest_stamps_parser.write(f) def get_last_scraped_filemtime(self, dst): """Stores the last modified time of newest file in a directory.""" list_of_files = [] file_types = ('*.jpg', '*.mp4') for type in file_types: list_of_files.extend(glob.glob(dst + '/' + type)) if list_of_files: latest_file = max(list_of_files, key=os.path.getmtime) return int(os.path.getmtime(latest_file)) return 0 def query_comments_gen(self, shortcode, end_cursor=''): """Generator for comments.""" comments, end_cursor = self.__query_comments(shortcode, end_cursor) if comments: try: while True: for item in comments: yield item if end_cursor: comments, end_cursor = self.__query_comments(shortcode, end_cursor) else: return except ValueError: self.logger.exception('Failed to query comments for shortcode ' + shortcode) def __query_comments(self, shortcode, end_cursor=''): params = QUERY_COMMENTS_VARS.format(shortcode, end_cursor) self.update_ig_gis_header(params) resp = self.get_json(QUERY_COMMENTS.format(params)) if resp is not None: payload = json.loads(resp)['data']['shortcode_media'] if payload: container = payload['edge_media_to_comment'] comments = [node['node'] for node in container['edges']] end_cursor = container['page_info']['end_cursor'] return comments, end_cursor return None, None def scrape_hashtag(self): self.__scrape_query(self.query_hashtag_gen) def scrape_location(self): self.__scrape_query(self.query_location_gen) def worker_wrapper(self, fn, *args, **kwargs): try: if self.quit: return return fn(*args, **kwargs) except: self.logger.debug("Exception in worker thread", exc_info=sys.exc_info()) raise def __scrape_query(self, media_generator, executor=concurrent.futures.ThreadPoolExecutor(max_workers=MAX_CONCURRENT_DOWNLOADS)): """Scrapes the specified value for posted media.""" self.quit = False try: for value in self.usernames: self.posts = [] self.last_scraped_filemtime = 0 greatest_timestamp = 0 future_to_item = {} dst = self.get_dst_dir(value) if self.include_location: media_exec = concurrent.futures.ThreadPoolExecutor(max_workers=5) iter = 0 for item in tqdm.tqdm(media_generator(value), desc='Searching {0} for posts'.format(value), unit=" media", disable=self.quiet): if ((item['is_video'] is False and 'image' in self.media_types) or \ (item['is_video'] is True and 'video' in self.media_types) ) and self.is_new_media(item): future = executor.submit(self.worker_wrapper, self.download, item, dst) future_to_item[future] = item if self.include_location and 'location' not in item: media_exec.submit(self.worker_wrapper, self.__get_location, item) if self.comments: item['edge_media_to_comment']['data'] = list(self.query_comments_gen(item['shortcode'])) if self.media_metadata or self.comments or self.include_location: self.posts.append(item) iter = iter + 1 if self.maximum != 0 and iter >= self.maximum: break if future_to_item: for future in tqdm.tqdm(concurrent.futures.as_completed(future_to_item), total=len(future_to_item), desc='Downloading', disable=self.quiet): item = future_to_item[future] if future.exception() is not None: self.logger.warning( 'Media for {0} at {1} generated an exception: {2}'.format(value, item['urls'], future.exception())) else: timestamp = self.__get_timestamp(item) if timestamp > greatest_timestamp: greatest_timestamp = timestamp # Even bother saving it? if greatest_timestamp > self.last_scraped_filemtime: self.set_last_scraped_timestamp(value, greatest_timestamp) if (self.media_metadata or self.comments or self.include_location) and self.posts: self.save_json(self.posts, '{0}/{1}.json'.format(dst, value)) finally: self.quit = True def query_hashtag_gen(self, hashtag): return self.__query_gen(QUERY_HASHTAG, QUERY_HASHTAG_VARS, 'hashtag', hashtag) def query_location_gen(self, location): return self.__query_gen(QUERY_LOCATION, QUERY_LOCATION_VARS, 'location', location) def __query_gen(self, url, variables, entity_name, query, end_cursor=''): """Generator for hashtag and location.""" nodes, end_cursor = self.__query(url, variables, entity_name, query, end_cursor) if nodes: try: while True: for node in nodes: yield node if end_cursor: nodes, end_cursor = self.__query(url, variables, entity_name, query, end_cursor) else: return except ValueError: self.logger.exception('Failed to query ' + query) def __query(self, url, variables, entity_name, query, end_cursor): params = variables.format(query, end_cursor) self.update_ig_gis_header(params) resp = self.get_json(url.format(params)) if resp is not None: payload = json.loads(resp)['data'][entity_name] if payload: nodes = [] if end_cursor == '': top_posts = payload['edge_' + entity_name + '_to_top_posts'] nodes.extend(self._get_nodes(top_posts)) posts = payload['edge_' + entity_name + '_to_media'] nodes.extend(self._get_nodes(posts)) end_cursor = posts['page_info']['end_cursor'] return nodes, end_cursor return None, None def _get_nodes(self, container): return [self.augment_node(node['node']) for node in container['edges']] def augment_node(self, node): self.extract_tags(node) details = None if self.include_location and 'location' not in node: details = self.__get_media_details(node['shortcode']) node['location'] = details.get('location') if details else None if 'urls' not in node: node['urls'] = [] if node['is_video'] and 'video_url' in node: node['urls'] = [node['video_url']] elif '__typename' in node and node['__typename'] == 'GraphImage': node['urls'] = [self.get_original_image(node['display_url'])] else: if details is None: details = self.__get_media_details(node['shortcode']) if details: if '__typename' in details and details['__typename'] == 'GraphVideo': node['urls'] = [details['video_url']] elif '__typename' in details and details['__typename'] == 'GraphSidecar': urls = [] for carousel_item in details['edge_sidecar_to_children']['edges']: urls += self.augment_node(carousel_item['node'])['urls'] node['urls'] = urls else: node['urls'] = [self.get_original_image(details['display_url'])] return node def __get_media_details(self, shortcode): resp = self.get_json(VIEW_MEDIA_URL.format(shortcode)) if resp is not None: try: return json.loads(resp)['graphql']['shortcode_media'] except ValueError: self.logger.warning('Failed to get media details for ' + shortcode) else: self.logger.warning('Failed to get media details for ' + shortcode) def __get_location(self, item): code = item.get('shortcode', item.get('code')) if code: details = self.__get_media_details(code) item['location'] = details.get('location') def scrape(self, executor=concurrent.futures.ThreadPoolExecutor(max_workers=MAX_CONCURRENT_DOWNLOADS)): """Crawls through and downloads user's media""" self.session.headers = {'user-agent': STORIES_UA} try: for username in self.usernames: self.posts = [] self.last_scraped_filemtime = 0 greatest_timestamp = 0 future_to_item = {} dst = self.get_dst_dir(username) # Get the user metadata. shared_data = self.get_shared_data(username) user = self.deep_get(shared_data, 'entry_data.ProfilePage[0].graphql.user') if not user: self.logger.error( 'Error getting user details for {0}. Please verify that the user exists.'.format(username)) continue elif user and user['is_private'] and user['edge_owner_to_timeline_media']['count'] > 0 and not \ user['edge_owner_to_timeline_media']['edges']: self.logger.error('User {0} is private'.format(username)) self.rhx_gis = shared_data['rhx_gis'] self.get_profile_pic(dst, executor, future_to_item, user, username) self.get_stories(dst, executor, future_to_item, user, username) # Crawls the media and sends it to the executor. try: self.get_media(dst, executor, future_to_item, user) # Displays the progress bar of completed downloads. Might not even pop up if all media is downloaded while # the above loop finishes. if future_to_item: for future in tqdm.tqdm(concurrent.futures.as_completed(future_to_item), total=len(future_to_item), desc='Downloading', disable=self.quiet): item = future_to_item[future] if future.exception() is not None: self.logger.error( 'Media at {0} generated an exception: {1}'.format(item['urls'], future.exception())) else: timestamp = self.__get_timestamp(item) if timestamp > greatest_timestamp: greatest_timestamp = timestamp # Even bother saving it? if greatest_timestamp > self.last_scraped_filemtime: self.set_last_scraped_timestamp(username, greatest_timestamp) if (self.media_metadata or self.comments or self.include_location) and self.posts: self.save_json(self.posts, '{0}/{1}.json'.format(dst, username)) except ValueError: self.logger.error("Unable to scrape user - %s" % username) finally: self.quit = True self.logout() def get_profile_pic(self, dst, executor, future_to_item, user, username): if 'image' not in self.media_types: return url = USER_INFO.format(user['id']) resp = self.get_json(url) if resp is None: self.logger.error('Error getting user info for {0}'.format(username)) return user_info = json.loads(resp)['user'] if user_info['has_anonymous_profile_picture']: return try: profile_pic_urls = [ user_info['hd_profile_pic_url_info']['url'], user_info['hd_profile_pic_versions'][-1]['url'], ] profile_pic_url = next(url for url in profile_pic_urls if url is not None) except (KeyError, IndexError, StopIteration): self.logger.warning('Failed to get high resolution profile picture for {0}'.format(username)) profile_pic_url = user['profile_pic_url_hd'] item = {'urls': [profile_pic_url], 'username': username, 'shortcode':'', 'created_time': 1286323200, '__typename': 'GraphProfilePic'} if self.latest is False or os.path.isfile(dst + '/' + item['urls'][0].split('/')[-1]) is False: for item in tqdm.tqdm([item], desc='Searching {0} for profile pic'.format(username), unit=" images", ncols=0, disable=self.quiet): future = executor.submit(self.worker_wrapper, self.download, item, dst) future_to_item[future] = item def get_stories(self, dst, executor, future_to_item, user, username): """Scrapes the user's stories.""" if self.logged_in and \ ('story-image' in self.media_types or 'story-video' in self.media_types): # Get the user's stories. stories = self.fetch_stories(user['id']) # Downloads the user's stories and sends it to the executor. iter = 0 for item in tqdm.tqdm(stories, desc='Searching {0} for stories'.format(username), unit=" media", disable=self.quiet): if self.story_has_selected_media_types(item) and self.is_new_media(item): item['username'] = username item['shortcode'] = '' future = executor.submit(self.worker_wrapper, self.download, item, dst) future_to_item[future] = item iter = iter + 1 if self.maximum != 0 and iter >= self.maximum: break def get_media(self, dst, executor, future_to_item, user): """Scrapes the user's posts for media.""" if 'image' not in self.media_types and 'video' not in self.media_types and 'none' not in self.media_types: return username = user['username'] if self.include_location: media_exec = concurrent.futures.ThreadPoolExecutor(max_workers=5) iter = 0 for item in tqdm.tqdm(self.query_media_gen(user), desc='Searching {0} for posts'.format(username), unit=' media', disable=self.quiet): # -Filter command line if self.filter: if 'tags' in item: filtered = any(x in item['tags'] for x in self.filter) if self.has_selected_media_types(item) and self.is_new_media(item) and filtered: item['username']=username future = executor.submit(self.worker_wrapper, self.download, item, dst) future_to_item[future] = item else: # For when filter is on but media doesnt contain tags pass # --------------# else: if self.has_selected_media_types(item) and self.is_new_media(item): item['username']=username future = executor.submit(self.worker_wrapper, self.download, item, dst) future_to_item[future] = item if self.include_location: item['username']=username media_exec.submit(self.worker_wrapper, self.__get_location, item) if self.comments: item['username']=username item['comments'] = {'data': list(self.query_comments_gen(item['shortcode']))} if self.media_metadata or self.comments or self.include_location: item['username']=username self.posts.append(item) iter = iter + 1 if self.maximum != 0 and iter >= self.maximum: break def get_shared_data(self, username=''): """Fetches the user's metadata.""" resp = self.get_json(BASE_URL + username) if resp is not None and '_sharedData' in resp: try: shared_data = resp.split("window._sharedData = ")[1].split(";</script>")[0] return json.loads(shared_data) except (TypeError, KeyError, IndexError): pass def fetch_stories(self, user_id): """Fetches the user's stories.""" resp = self.get_json(STORIES_URL.format(user_id)) if resp is not None: retval = json.loads(resp) if retval['data'] and 'reels_media' in retval['data'] and len(retval['data']['reels_media']) > 0 and len(retval['data']['reels_media'][0]['items']) > 0: return [self.set_story_url(item) for item in retval['data']['reels_media'][0]['items']] return [] def query_media_gen(self, user, end_cursor=''): """Generator for media.""" media, end_cursor = self.__query_media(user['id'], end_cursor) if media: try: while True: for item in media: if not self.is_new_media(item): return yield item if end_cursor: media, end_cursor = self.__query_media(user['id'], end_cursor) else: return except ValueError: self.logger.exception('Failed to query media for user ' + user['username']) def __query_media(self, id, end_cursor=''): params = QUERY_MEDIA_VARS.format(id, end_cursor) self.update_ig_gis_header(params) resp = self.get_json(QUERY_MEDIA.format(params)) if resp is not None: payload = json.loads(resp)['data']['user'] if payload: container = payload['edge_owner_to_timeline_media'] nodes = self._get_nodes(container) end_cursor = container['page_info']['end_cursor'] return nodes, end_cursor return None, None def get_ig_gis(self, rhx_gis, params): data = rhx_gis + ":" + params if sys.version_info.major >= 3: return hashlib.md5(data.encode('utf-8')).hexdigest() else: return hashlib.md5(data).hexdigest() def update_ig_gis_header(self, params): self.session.headers.update({ 'x-instagram-gis': self.get_ig_gis( self.rhx_gis, params ) }) def has_selected_media_types(self, item): filetypes = {'jpg': 0, 'mp4': 0} for url in item['urls']: ext = self.__get_file_ext(url) if ext not in filetypes: filetypes[ext] = 0 filetypes[ext] += 1 if ('image' in self.media_types and filetypes['jpg'] > 0) or \ ('video' in self.media_types and filetypes['mp4'] > 0): return True return False def story_has_selected_media_types(self, item): # media_type 1 is image, 2 is video if item['__typename'] == 'GraphStoryImage' and 'story-image' in self.media_types: return True if item['__typename'] == 'GraphStoryVideo' and 'story-video' in self.media_types: return True return False def extract_tags(self, item): """Extracts the hashtags from the caption text.""" caption_text = '' if 'caption' in item and item['caption']: if isinstance(item['caption'], dict): caption_text = item['caption']['text'] else: caption_text = item['caption'] elif 'edge_media_to_caption' in item and item['edge_media_to_caption'] and item['edge_media_to_caption'][ 'edges']: caption_text = item['edge_media_to_caption']['edges'][0]['node']['text'] if caption_text: # include words and emojis item['tags'] = re.findall( r"(?<!&)#(\w+|(?:[\xA9\xAE\u203C\u2049\u2122\u2139\u2194-\u2199\u21A9\u21AA\u231A\u231B\u2328\u2388\u23CF\u23E9-\u23F3\u23F8-\u23FA\u24C2\u25AA\u25AB\u25B6\u25C0\u25FB-\u25FE\u2600-\u2604\u260E\u2611\u2614\u2615\u2618\u261D\u2620\u2622\u2623\u2626\u262A\u262E\u262F\u2638-\u263A\u2648-\u2653\u2660\u2663\u2665\u2666\u2668\u267B\u267F\u2692-\u2694\u2696\u2697\u2699\u269B\u269C\u26A0\u26A1\u26AA\u26AB\u26B0\u26B1\u26BD\u26BE\u26C4\u26C5\u26C8\u26CE\u26CF\u26D1\u26D3\u26D4\u26E9\u26EA\u26F0-\u26F5\u26F7-\u26FA\u26FD\u2702\u2705\u2708-\u270D\u270F\u2712\u2714\u2716\u271D\u2721\u2728\u2733\u2734\u2744\u2747\u274C\u274E\u2753-\u2755\u2757\u2763\u2764\u2795-\u2797\u27A1\u27B0\u27BF\u2934\u2935\u2B05-\u2B07\u2B1B\u2B1C\u2B50\u2B55\u3030\u303D\u3297\u3299]|\uD83C[\uDC04\uDCCF\uDD70\uDD71\uDD7E\uDD7F\uDD8E\uDD91-\uDD9A\uDE01\uDE02\uDE1A\uDE2F\uDE32-\uDE3A\uDE50\uDE51\uDF00-\uDF21\uDF24-\uDF93\uDF96\uDF97\uDF99-\uDF9B\uDF9E-\uDFF0\uDFF3-\uDFF5\uDFF7-\uDFFF]|\uD83D[\uDC00-\uDCFD\uDCFF-\uDD3D\uDD49-\uDD4E\uDD50-\uDD67\uDD6F\uDD70\uDD73-\uDD79\uDD87\uDD8A-\uDD8D\uDD90\uDD95\uDD96\uDDA5\uDDA8\uDDB1\uDDB2\uDDBC\uDDC2-\uDDC4\uDDD1-\uDDD3\uDDDC-\uDDDE\uDDE1\uDDE3\uDDEF\uDDF3\uDDFA-\uDE4F\uDE80-\uDEC5\uDECB-\uDED0\uDEE0-\uDEE5\uDEE9\uDEEB\uDEEC\uDEF0\uDEF3]|\uD83E[\uDD10-\uDD18\uDD80-\uDD84\uDDC0]|(?:0\u20E3|1\u20E3|2\u20E3|3\u20E3|4\u20E3|5\u20E3|6\u20E3|7\u20E3|8\u20E3|9\u20E3|#\u20E3|\\*\u20E3|\uD83C(?:\uDDE6\uD83C(?:\uDDEB|\uDDFD|\uDDF1|\uDDF8|\uDDE9|\uDDF4|\uDDEE|\uDDF6|\uDDEC|\uDDF7|\uDDF2|\uDDFC|\uDDE8|\uDDFA|\uDDF9|\uDDFF|\uDDEA)|\uDDE7\uD83C(?:\uDDF8|\uDDED|\uDDE9|\uDDE7|\uDDFE|\uDDEA|\uDDFF|\uDDEF|\uDDF2|\uDDF9|\uDDF4|\uDDE6|\uDDFC|\uDDFB|\uDDF7|\uDDF3|\uDDEC|\uDDEB|\uDDEE|\uDDF6|\uDDF1)|\uDDE8\uD83C(?:\uDDF2|\uDDE6|\uDDFB|\uDDEB|\uDDF1|\uDDF3|\uDDFD|\uDDF5|\uDDE8|\uDDF4|\uDDEC|\uDDE9|\uDDF0|\uDDF7|\uDDEE|\uDDFA|\uDDFC|\uDDFE|\uDDFF|\uDDED)|\uDDE9\uD83C(?:\uDDFF|\uDDF0|\uDDEC|\uDDEF|\uDDF2|\uDDF4|\uDDEA)|\uDDEA\uD83C(?:\uDDE6|\uDDE8|\uDDEC|\uDDF7|\uDDEA|\uDDF9|\uDDFA|\uDDF8|\uDDED)|\uDDEB\uD83C(?:\uDDF0|\uDDF4|\uDDEF|\uDDEE|\uDDF7|\uDDF2)|\uDDEC\uD83C(?:\uDDF6|\uDDEB|\uDDE6|\uDDF2|\uDDEA|\uDDED|\uDDEE|\uDDF7|\uDDF1|\uDDE9|\uDDF5|\uDDFA|\uDDF9|\uDDEC|\uDDF3|\uDDFC|\uDDFE|\uDDF8|\uDDE7)|\uDDED\uD83C(?:\uDDF7|\uDDF9|\uDDF2|\uDDF3|\uDDF0|\uDDFA)|\uDDEE\uD83C(?:\uDDF4|\uDDE8|\uDDF8|\uDDF3|\uDDE9|\uDDF7|\uDDF6|\uDDEA|\uDDF2|\uDDF1|\uDDF9)|\uDDEF\uD83C(?:\uDDF2|\uDDF5|\uDDEA|\uDDF4)|\uDDF0\uD83C(?:\uDDED|\uDDFE|\uDDF2|\uDDFF|\uDDEA|\uDDEE|\uDDFC|\uDDEC|\uDDF5|\uDDF7|\uDDF3)|\uDDF1\uD83C(?:\uDDE6|\uDDFB|\uDDE7|\uDDF8|\uDDF7|\uDDFE|\uDDEE|\uDDF9|\uDDFA|\uDDF0|\uDDE8)|\uDDF2\uD83C(?:\uDDF4|\uDDF0|\uDDEC|\uDDFC|\uDDFE|\uDDFB|\uDDF1|\uDDF9|\uDDED|\uDDF6|\uDDF7|\uDDFA|\uDDFD|\uDDE9|\uDDE8|\uDDF3|\uDDEA|\uDDF8|\uDDE6|\uDDFF|\uDDF2|\uDDF5|\uDDEB)|\uDDF3\uD83C(?:\uDDE6|\uDDF7|\uDDF5|\uDDF1|\uDDE8|\uDDFF|\uDDEE|\uDDEA|\uDDEC|\uDDFA|\uDDEB|\uDDF4)|\uDDF4\uD83C\uDDF2|\uDDF5\uD83C(?:\uDDEB|\uDDF0|\uDDFC|\uDDF8|\uDDE6|\uDDEC|\uDDFE|\uDDEA|\uDDED|\uDDF3|\uDDF1|\uDDF9|\uDDF7|\uDDF2)|\uDDF6\uD83C\uDDE6|\uDDF7\uD83C(?:\uDDEA|\uDDF4|\uDDFA|\uDDFC|\uDDF8)|\uDDF8\uD83C(?:\uDDFB|\uDDF2|\uDDF9|\uDDE6|\uDDF3|\uDDE8|\uDDF1|\uDDEC|\uDDFD|\uDDF0|\uDDEE|\uDDE7|\uDDF4|\uDDF8|\uDDED|\uDDE9|\uDDF7|\uDDEF|\uDDFF|\uDDEA|\uDDFE)|\uDDF9\uD83C(?:\uDDE9|\uDDEB|\uDDFC|\uDDEF|\uDDFF|\uDDED|\uDDF1|\uDDEC|\uDDF0|\uDDF4|\uDDF9|\uDDE6|\uDDF3|\uDDF7|\uDDF2|\uDDE8|\uDDFB)|\uDDFA\uD83C(?:\uDDEC|\uDDE6|\uDDF8|\uDDFE|\uDDF2|\uDDFF)|\uDDFB\uD83C(?:\uDDEC|\uDDE8|\uDDEE|\uDDFA|\uDDE6|\uDDEA|\uDDF3)|\uDDFC\uD83C(?:\uDDF8|\uDDEB)|\uDDFD\uD83C\uDDF0|\uDDFE\uD83C(?:\uDDF9|\uDDEA)|\uDDFF\uD83C(?:\uDDE6|\uDDF2|\uDDFC))))[\ufe00-\ufe0f\u200d]?)+", caption_text, re.UNICODE) item['tags'] = list(set(item['tags'])) return item def get_original_image(self, url): """Gets the full-size image from the specified url.""" # these path parts somehow prevent us from changing the rest of media url #url = re.sub(r'/vp/[0-9A-Fa-f]{32}/[0-9A-Fa-f]{8}/', '/', url) # remove dimensions to get largest image #url = re.sub(r'/[sp]\d{3,}x\d{3,}/', '/', url) # get non-square image if one exists #url = re.sub(r'/c\d{1,}.\d{1,}.\d{1,}.\d{1,}/', '/', url) return url def set_story_url(self, item): """Sets the story url.""" urls = [] if 'video_resources' in item: urls.append(item['video_resources'][-1]['src']) if 'display_resources' in item: urls.append(item['display_resources'][-1]['src']) item['urls'] = urls return item def download(self, item, save_dir='./'): """Downloads the media file.""" for full_url, base_name in self.templatefilename(item): url = full_url.split('?')[0] #try the static url first, stripping parameters file_path = os.path.join(save_dir, base_name) if not os.path.exists(os.path.dirname(file_path)): self.make_dir(os.path.dirname(file_path)) if not os.path.isfile(file_path): headers = {'Host': urlparse(url).hostname} part_file = file_path + '.part' downloaded = 0 total_length = None with open(part_file, 'wb') as media_file: try: retry = 0 retry_delay = RETRY_DELAY while(True): if self.quit: return try: downloaded_before = downloaded headers['Range'] = 'bytes={0}-'.format(downloaded_before) with self.session.get(url, cookies=self.cookies, headers=headers, stream=True, timeout=CONNECT_TIMEOUT) as response: if response.status_code == 404: #instagram don't lie on this break if response.status_code == 403 and url != full_url: #see issue #254 url = full_url continue response.raise_for_status() if response.status_code == 206: try: match = re.match(r'bytes (?P<first>\d+)-(?P<last>\d+)/(?P<size>\d+)', response.headers['Content-Range']) range_file_position = int(match.group('first')) if range_file_position != downloaded_before: raise Exception() total_length = int(match.group('size')) media_file.truncate(total_length) except: raise requests.exceptions.InvalidHeader('Invalid range response "{0}" for requested "{1}"'.format( response.headers.get('Content-Range'), headers.get('Range'))) elif response.status_code == 200: if downloaded_before != 0: downloaded_before = 0 downloaded = 0 media_file.seek(0) content_length = response.headers.get('Content-Length') if content_length is None: self.logger.warning('No Content-Length in response, the file {0} may be partially downloaded'.format(base_name)) else: total_length = int(content_length) media_file.truncate(total_length) else: raise PartialContentException('Wrong status code {0}', response.status_code) for chunk in response.iter_content(chunk_size=64*1024): if chunk: downloaded += len(chunk) media_file.write(chunk) if self.quit: return if downloaded != total_length and total_length is not None: raise PartialContentException('Got first {0} bytes from {1}'.format(downloaded, total_length)) break # In case of exception part_file is not removed on purpose, # it is easier to exemine it later when analising logs. # Please do not add os.remove here. except (KeyboardInterrupt): raise except (requests.exceptions.RequestException, PartialContentException) as e: if downloaded - downloaded_before > 0: # if we got some data on this iteration do not count it as a failure self.logger.warning('Continue after exception {0} on {1}'.format(repr(e), url)) retry = 0 # the next fail will be first in a row with no data continue if retry < MAX_RETRIES: self.logger.warning('Retry after exception {0} on {1}'.format(repr(e), url)) self.sleep(retry_delay) retry_delay = min( 2 * retry_delay, MAX_RETRY_DELAY ) retry = retry + 1 continue else: keep_trying = self._retry_prompt(url, repr(e)) if keep_trying == True: retry = 0 continue elif keep_trying == False: break raise finally: media_file.truncate(downloaded) if downloaded == total_length or total_length is None: os.rename(part_file, file_path) timestamp = self.__get_timestamp(item) file_time = int(timestamp if timestamp else time.time()) os.utime(file_path, (file_time, file_time)) def templatefilename(self, item): for url in item['urls']: filename, extension = os.path.splitext(os.path.split(url.split('?')[0])[1]) try: template = self.template template_values = { 'username' : item['username'], 'urlname': filename, 'shortcode': str(item['shortcode']), 'mediatype' : item['__typename'][5:], 'datetime': time.strftime('%Y%m%d %Hh%Mm%Ss', time.localtime(self.__get_timestamp(item))), 'date': time.strftime('%Y%m%d', time.localtime(self.__get_timestamp(item))), 'year': time.strftime('%Y', time.localtime(self.__get_timestamp(item))), 'month': time.strftime('%m', time.localtime(self.__get_timestamp(item))), 'day': time.strftime('%d', time.localtime(self.__get_timestamp(item))), 'h': time.strftime('%Hh', time.localtime(self.__get_timestamp(item))), 'm': time.strftime('%Mm', time.localtime(self.__get_timestamp(item))), 's': time.strftime('%Ss', time.localtime(self.__get_timestamp(item)))} customfilename = str(template.format(**template_values) + extension) yield url, customfilename except KeyError: customfilename = str(filename + extension) yield url, customfilename def is_new_media(self, item): """Returns True if the media is new.""" if self.latest is False or self.last_scraped_filemtime == 0: return True current_timestamp = self.__get_timestamp(item) return current_timestamp > 0 and current_timestamp > self.last_scraped_filemtime @staticmethod def __get_timestamp(item): if item: for key in ['taken_at_timestamp', 'created_time', 'taken_at', 'date']: found = item.get(key, 0) try: found = int(found) if found > 1: # >1 to ignore any boolean casts return found except ValueError: pass return 0 @staticmethod def __get_file_ext(url): return os.path.splitext(urlparse(url).path)[1][1:].strip().lower() @staticmethod def __search(query): resp = requests.get(SEARCH_URL.format(query)) return json.loads(resp.text) def search_locations(self): query = ' '.join(self.usernames) result = self.__search(query) if len(result['places']) == 0: raise ValueError("No locations found for query '{0}'".format(query)) sorted_places = sorted(result['places'], key=itemgetter('position')) for item in sorted_places[0:5]: place = item['place'] print('location-id: {0}, title: {1}, subtitle: {2}, city: {3}, lat: {4}, lng: {5}'.format( place['location']['pk'], place['title'], place['subtitle'], place['location']['city'], place['location']['lat'], place['location']['lng'] )) @staticmethod def save_json(data, dst='./'): """Saves the data to a json file.""" if not os.path.exists(os.path.dirname(dst)): os.makedirs(os.path.dirname(dst)) if data: with open(dst, 'wb') as f: json.dump(data, codecs.getwriter('utf-8')(f), indent=4, sort_keys=True, ensure_ascii=False) @staticmethod def get_logger(level=logging.DEBUG, verbose=0): """Returns a logger.""" logger = logging.getLogger(__name__) fh = logging.FileHandler('instagram-scraper.log', 'w') fh.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')) fh.setLevel(level) logger.addHandler(fh) sh = logging.StreamHandler(sys.stdout) sh.setFormatter(logging.Formatter('%(levelname)s: %(message)s')) sh_lvls = [logging.ERROR, logging.WARNING, logging.INFO] sh.setLevel(sh_lvls[verbose]) logger.addHandler(sh) logger.setLevel(level) return logger @staticmethod def parse_file_usernames(usernames_file): """Parses a file containing a list of usernames.""" users = [] try: with open(usernames_file) as user_file: for line in user_file.readlines(): # Find all usernames delimited by ,; or whitespace users += re.findall(r'[^,;\s]+', line.split("#")[0]) except IOError as err: raise ValueError('File not found ' + err) return users @staticmethod def parse_delimited_str(input): """Parse the string input as a list of delimited tokens.""" return re.findall(r'[^,;\s]+', input) def deep_get(self, dict, path): def _split_indexes(key): split_array_index = re.compile(r'[.\[\]]+') # ['foo', '0'] return filter(None, split_array_index.split(key)) ends_with_index = re.compile(r'\[(.*?)\]$') # foo[0] keylist = path.split('.') val = dict for key in keylist: try: if ends_with_index.search(key): for prop in _split_indexes(key): if prop.isdigit(): val = val[int(prop)] else: val = val[prop] else: val = val[key] except (KeyError, IndexError, TypeError): return None return val def save_cookies(self): if self.cookiejar: with open(self.cookiejar, 'wb') as f: pickle.dump(self.session.cookies, f) def main(): parser = argparse.ArgumentParser( description="instagram-scraper scrapes and downloads an instagram user's photos and videos.", epilog=textwrap.dedent(""" You can hide your credentials from the history, by reading your username from a local file: $ instagram-scraper @insta_args.txt user_to_scrape with insta_args.txt looking like this: -u=my_username -p=my_password You can add all arguments you want to that file, just remember to have one argument per line. Customize filename: by adding option --template or -T Default is: {urlname} And there are some option: {username}: Instagram user(s) to scrape. {shortcode}: post shortcode, but profile_pic and story are none. {urlname}: filename form url. {mediatype}: type of media. {datetime}: date and time that photo/video post on, format is: 20180101 01h01m01s {date}: date that photo/video post on, format is: 20180101 {year}: format is: 2018 {month}: format is: 01-12 {day}: format is: 01-31 {h}: hour, format is: 00-23h {m}: minute, format is 00-59m {s}: second, format is 00-59s """), formatter_class=argparse.RawDescriptionHelpFormatter, fromfile_prefix_chars='@') parser.add_argument('username', help='Instagram user(s) to scrape', nargs='*') parser.add_argument('--destination', '-d', default='./', help='Download destination') parser.add_argument('--login-user', '--login_user', '-u', default=None, help='Instagram login user', required=True) parser.add_argument('--login-pass', '--login_pass', '-p', default=None, help='Instagram login password', required=True) parser.add_argument('--filename', '-f', help='Path to a file containing a list of users to scrape') parser.add_argument('--quiet', '-q', default=False, action='store_true', help='Be quiet while scraping') parser.add_argument('--maximum', '-m', type=int, default=0, help='Maximum number of items to scrape') parser.add_argument('--retain-username', '--retain_username', '-n', action='store_true', default=False, help='Creates username subdirectory when destination flag is set') parser.add_argument('--media-metadata', '--media_metadata', action='store_true', default=False, help='Save media metadata to json file') parser.add_argument('--include-location', '--include_location', action='store_true', default=False, help='Include location data when saving media metadata') parser.add_argument('--media-types', '--media_types', '-t', nargs='+', default=['image', 'video', 'story'], help='Specify media types to scrape') parser.add_argument('--latest', action='store_true', default=False, help='Scrape new media since the last scrape') parser.add_argument('--latest-stamps', '--latest_stamps', default=None, help='Scrape new media since timestamps by user in specified file') parser.add_argument('--cookiejar', '--cookierjar', default=None, help='File in which to store cookies so that they can be reused between runs.') parser.add_argument('--tag', action='store_true', default=False, help='Scrape media using a hashtag') parser.add_argument('--filter', default=None, help='Filter by tags in user posts', nargs='*') parser.add_argument('--location', action='store_true', default=False, help='Scrape media using a location-id') parser.add_argument('--search-location', action='store_true', default=False, help='Search for locations by name') parser.add_argument('--comments', action='store_true', default=False, help='Save post comments to json file') parser.add_argument('--interactive', '-i', action='store_true', default=False, help='Enable interactive login challenge solving') parser.add_argument('--retry-forever', action='store_true', default=False, help='Retry download attempts endlessly when errors are received') parser.add_argument('--verbose', '-v', type=int, default=0, help='Logging verbosity level') parser.add_argument('--template', '-T', type=str, default='{urlname}', help='Customize filename template') args = parser.parse_args() if (args.login_user and args.login_pass is None) or (args.login_user is None and args.login_pass): parser.print_help() raise ValueError('Must provide login user AND password') if not args.username and args.filename is None: parser.print_help() raise ValueError('Must provide username(s) OR a file containing a list of username(s)') elif args.username and args.filename: parser.print_help() raise ValueError('Must provide only one of the following: username(s) OR a filename containing username(s)') if args.tag and args.location: parser.print_help() raise ValueError('Must provide only one of the following: hashtag OR location') if args.tag and args.filter: parser.print_help() raise ValueError('Filters apply to user posts') if args.filename: args.usernames = InstagramScraper.parse_file_usernames(args.filename) else: args.usernames = InstagramScraper.parse_delimited_str(','.join(args.username)) if args.media_types and len(args.media_types) == 1 and re.compile(r'[,;\s]+').findall(args.media_types[0]): args.media_types = InstagramScraper.parse_delimited_str(args.media_types[0]) if args.retry_forever: global MAX_RETRIES MAX_RETRIES = sys.maxsize scraper = InstagramScraper(**vars(args)) scraper.login() if args.tag: scraper.scrape_hashtag() elif args.location: scraper.scrape_location() elif args.search_location: scraper.search_locations() else: scraper.scrape() scraper.save_cookies() if __name__ == '__main__': main()
[]
[]
[]
[]
[]
python
null
null
null
azure-storage-queue/src/test/java/akka/stream/alpakka/azure/storagequeue/javadsl/JavaDslTest.java
/* * Copyright (C) 2016-2017 Lightbend Inc. <http://www.lightbend.com> */ package akka.stream.alpakka.azure.storagequeue.javadsl; import akka.Done; import akka.NotUsed; import akka.actor.ActorSystem; import akka.stream.ActorMaterializer; import akka.stream.alpakka.azure.storagequeue.*; import akka.stream.javadsl.Sink; import akka.stream.javadsl.Source; import akka.testkit.javadsl.TestKit; import com.microsoft.azure.storage.*; import com.microsoft.azure.storage.queue.*; import java.util.List; import java.util.concurrent.CompletionStage; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; import java.util.concurrent.TimeoutException; import java.util.function.Supplier; import org.junit.AfterClass; import org.junit.Assert; import org.junit.Assume; import org.junit.Before; import org.junit.BeforeClass; import org.junit.Test; import org.scalatest.junit.JUnitSuite; public class JavaDslTest extends JUnitSuite { private static ActorSystem system; private static ActorMaterializer materializer; private static final String storageConnectionString = System.getenv("AZURE_CONNECTION_STRING"); private static final Supplier<CloudQueue> queueSupplier = () -> { try { if (storageConnectionString == null) { return null; } CloudStorageAccount storageAccount = CloudStorageAccount.parse(storageConnectionString); CloudQueueClient queueClient = storageAccount.createCloudQueueClient(); return queueClient.getQueueReference("testqueue"); } catch (Exception ex) { throw new RuntimeException("Could not create CloudQueue", ex); } }; private static final CloudQueue queue = queueSupplier.get(); @BeforeClass public static void setup() throws StorageException, java.net.URISyntaxException, java.security.InvalidKeyException { system = ActorSystem.create(); materializer = ActorMaterializer.create(system); if (queue != null) { queue.createIfNotExists(); } } @AfterClass public static void teardown() throws StorageException { TestKit.shutdownActorSystem(system); if (queue != null) { queue.deleteIfExists(); } } @Before public void clearQueue() throws StorageException { if (queue != null) { queue.clear(); } } @Test public void testAzureQueueSink() throws StorageException, InterruptedException, ExecutionException, TimeoutException { Assume.assumeNotNull(queue); final Source<Integer, NotUsed> sourceInt = Source.range(1, 10); final Source<CloudQueueMessage, NotUsed> source = sourceInt.map(i -> new CloudQueueMessage("Java Azure Cloud Test " + i.toString())); final Sink<CloudQueueMessage, CompletionStage<Done>> sink = AzureQueueSink.create(queueSupplier); source.runWith(sink, materializer).toCompletableFuture().get(10, TimeUnit.SECONDS); Assert.assertNotNull(queue.retrieveMessage()); } @Test public void testAzureQueueWithTimeoutsSink() throws StorageException, InterruptedException, ExecutionException, TimeoutException { Assume.assumeNotNull(queue); final Source<Integer, NotUsed> sourceInt = Source.range(1, 10); final Source<MessageWithTimeouts, NotUsed> source = sourceInt.map( i -> new MessageWithTimeouts( new CloudQueueMessage("Java Azure Cloud Test " + i.toString()), 0, 600)); final Sink<MessageWithTimeouts, CompletionStage<Done>> sink = AzureQueueWithTimeoutsSink.create(queueSupplier); source.runWith(sink, materializer).toCompletableFuture().get(10, TimeUnit.SECONDS); Assert.assertNull( queue.retrieveMessage()); // There should be no message because of inital visibility timeout } @Test public void testAzureQueueSource() throws StorageException, InterruptedException, ExecutionException, TimeoutException { Assume.assumeNotNull(queue); // Queue 10 Messages for (int i = 0; i < 10; i++) { queue.addMessage(new CloudQueueMessage("Java Test " + Integer.toString(i))); } final Source<CloudQueueMessage, NotUsed> source = AzureQueueSource.create(queueSupplier); final CompletionStage<List<CloudQueueMessage>> msgs = source.take(10).runWith(Sink.seq(), materializer); msgs.toCompletableFuture().get(10, TimeUnit.SECONDS); } @Test public void testAzureQueueDeleteSink() throws StorageException, InterruptedException, ExecutionException, TimeoutException { Assume.assumeNotNull(queue); // Queue 10 Messages for (int i = 0; i < 10; i++) { queue.addMessage(new CloudQueueMessage("Java Test " + Integer.toString(i))); } // We limit us to buffers of size 1 here, so that there are no stale message in the buffer final Source<CloudQueueMessage, NotUsed> source = AzureQueueSource.create(queueSupplier, AzureQueueSourceSettings.create(20, 1, 0)); final Sink<CloudQueueMessage, CompletionStage<Done>> deleteSink = AzureQueueDeleteSink.create(queueSupplier); final CompletionStage<Done> done = source.take(10).runWith(deleteSink, materializer); done.toCompletableFuture().get(10, TimeUnit.SECONDS); Assert.assertNull(queue.retrieveMessage()); } @Test public void testAzureQueueDeleteOrUpdateSink() throws StorageException, InterruptedException, ExecutionException, TimeoutException { Assume.assumeNotNull(queue); // Queue 10 Messages for (int i = 0; i < 10; i++) { queue.addMessage(new CloudQueueMessage("Java Test " + Integer.toString(i))); } // We limit us to buffers of size 1 here, so that there are no stale message in the buffer final Source<CloudQueueMessage, NotUsed> source = AzureQueueSource.create(queueSupplier, AzureQueueSourceSettings.create(20, 1, 0)); final Sink<MessageAndDeleteOrUpdate, CompletionStage<Done>> deleteOrUpdateSink = AzureQueueDeleteOrUpdateSink.create(queueSupplier); final CompletionStage<Done> done = source .take(10) .map(msg -> new MessageAndDeleteOrUpdate(msg, MessageAndDeleteOrUpdate.delete())) .runWith(deleteOrUpdateSink, materializer); done.toCompletableFuture().get(10, TimeUnit.SECONDS); Assert.assertNull(queue.retrieveMessage()); } }
[ "\"AZURE_CONNECTION_STRING\"" ]
[]
[ "AZURE_CONNECTION_STRING" ]
[]
["AZURE_CONNECTION_STRING"]
java
1
0
cmd/root.go
// Copyright © 2019 Alexey Stolpovskikh <[email protected]> // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package cmd import ( "fmt" "os" "path/filepath" "strings" "github.com/alexstov/sling/conf" "github.com/alexstov/sling/cui" "github.com/alexstov/sling/slog" "github.com/cloudflare/cfssl/log" "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/spf13/viper" ) var logger slog.Logger var sconf *conf.SlingConfig // Con Consoler interface implementation. var Con cui.Consoler // RootCmd sling root command var RootCmd = &cobra.Command{ Use: "sling", DisableFlagsInUseLine: true, Short: "sling network traffic emulator", Long: ` sling is a tool to send files to the endpoint(s), setting trottling and delays between concurrent requests.`, Example: ` # View current sling config sling config view # Send a single file to specified endpoint sling request send -f myfile.dat -a myhost.com -p 9080`, } // RootFlags command flags var RootFlags Flagmapper func init() { var err error // Create logger with default settings. if logger, err = slog.NewLogger(); err != nil { fmt.Println("Failed to create logger", err) return } logger.Out(logrus.TraceLevel, nil, "Calling root command initializer") if err = initSlingFromConfig(); err != nil { fmt.Println("Failed to initialize sling from config.", err) return } } func initSlingFromConfig() (err error) { if sconf, err = LoadConfig(os.Getenv("SLINGCONFIG"), logger); err != nil { return } // Set root command flags. RootFlags, err = NewCmdFlags(RootCmd, CmdRoot, sconf) // Init console if Con, err = cui.NewConsole(&sconf.Console, logger); err != nil { log.Error("Cannot initialize console.", err) return } flagmap := RootFlags.GetFlagmap() // The events are used to set flag values passed explicitly from command line. flagmap[ConFlat].AddEvent(Events[ConsoleSetFlat], Con.SetFlat) flagmap[LogLvl].AddEvent(Events[LogSetLevel], logger.SetLevel) flagmap[ConLvl].AddEvent(Events[ConsoleSetLevel], Con.SetLevel) if err = logger.Configure(&sconf.Log); err != nil { log.Error("Cannot initialize logger.", err) } return } // Execute adds all child commands to the root command and sets flags appropriately. func Execute() { if err := RootCmd.Execute(); err != nil { Con.OutLogAndConsole(logrus.FatalLevel, logrus.Fields{"error": err}, "Root command execution failed.") os.Exit(1) } else { logger.Out(logrus.InfoLevel, nil, "Root command executed.") } } // LoadConfig loads configuration from the file. func LoadConfig(path string, logger slog.Logger) (sconf *conf.SlingConfig, err error) { logger.Out(logrus.DebugLevel, nil, "Creating new sling config.") sconf = conf.NewConfig() logger.Out(logrus.DebugLevel, logrus.Fields{"SLINGCONFIG": sconf.Slingconfig}, "Reading sling config.") sconf.Slingconfig = path viper.SetConfigType("yml") basename := filepath.Base(sconf.Slingconfig) viper.SetConfigName(strings.TrimSuffix(basename, filepath.Ext(basename))) viper.AddConfigPath(filepath.Dir(sconf.Slingconfig)) if err := viper.ReadInConfig(); err != nil { logger.Out(logrus.FatalLevel, logrus.Fields{"SLINGCONFIG": sconf.Slingconfig, "error": err}, "Error reading config file.") } if err := viper.Unmarshal(sconf); err != nil { logger.Out(logrus.FatalLevel, logrus.Fields{"SLINGCONFIG": sconf.Slingconfig, "error": err}, "Error parsing config file.") } logger.Out(logrus.TraceLevel, logrus.Fields{"SLINGCONFIG": sconf.Slingconfig}, "Successfully created sling config.") return sconf, nil }
[ "\"SLINGCONFIG\"" ]
[]
[ "SLINGCONFIG" ]
[]
["SLINGCONFIG"]
go
1
0
orc8r/cloud/docker/nginx/generate_nginx_configs.py
#!/usr/bin/env python3 """ Copyright 2020 The Magma Authors. This source code is licensed under the BSD-style license found in the LICENSE file in the root directory of this source tree. Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import os from typing import Any, Dict import jinja2 import yaml CONFIGS_DIR = '/etc/magma/configs' TEMPLATES_DIR = '/etc/magma/templates' OUTPUT_DIR = '/etc/nginx' def _load_services() -> Dict[Any, Any]: services = {} # type: Dict[Any, Any] modules = os.listdir(CONFIGS_DIR) for module in modules: print("Loading registry for module: %s..." % module) filename = os.path.join(CONFIGS_DIR, module, "service_registry.yml") with open(filename) as file: registry = yaml.safe_load(file) if registry and "services" in registry: services.update(registry["services"]) return services def _generate_config(context: Dict[str, Any]) -> str: loader = jinja2.FileSystemLoader(TEMPLATES_DIR) env = jinja2.Environment(loader=loader) template = env.get_template("nginx.conf.j2") output = template.render(context) outfile = os.path.join(OUTPUT_DIR, "nginx.conf") with open(outfile, "w") as file: file.write(output) return outfile def main(): context = { 'service_registry': _load_services(), 'controller_hostname': os.environ['CONTROLLER_HOSTNAME'], 'backend': os.environ['PROXY_BACKENDS'], 'resolver': os.environ['RESOLVER'], 'service_registry_mode': os.environ.get('SERVICE_REGISTRY_MODE', 'yaml'), 'ssl_certificate': os.environ['SSL_CERTIFICATE'], 'ssl_certificate_key': os.environ['SSL_CERTIFICATE_KEY'], 'ssl_client_certificate': os.environ['SSL_CLIENT_CERTIFICATE'], } _generate_config(context) if __name__ == '__main__': main()
[]
[]
[ "SERVICE_REGISTRY_MODE", "PROXY_BACKENDS", "SSL_CLIENT_CERTIFICATE", "RESOLVER", "CONTROLLER_HOSTNAME", "SSL_CERTIFICATE", "SSL_CERTIFICATE_KEY" ]
[]
["SERVICE_REGISTRY_MODE", "PROXY_BACKENDS", "SSL_CLIENT_CERTIFICATE", "RESOLVER", "CONTROLLER_HOSTNAME", "SSL_CERTIFICATE", "SSL_CERTIFICATE_KEY"]
python
7
0
app/export_v10.go
package app import ( "encoding/csv" "encoding/json" "fmt" "io" "os" "sort" "strings" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/cosmos/cosmos-sdk/x/distribution" "github.com/cosmos/cosmos-sdk/x/gov" "github.com/cosmos/cosmos-sdk/x/mint" "github.com/cosmos/cosmos-sdk/x/staking" "github.com/cosmos/cosmos-sdk/x/staking/exported" "github.com/cosmos/cosmos-sdk/x/supply" "github.com/dfinance/dnode/cmd/config/genesis/defaults" "github.com/dfinance/dnode/helpers" "github.com/dfinance/dnode/x/ccstorage" "github.com/dfinance/dnode/x/vmauth" ) // setMainnetZeroHeightOptionsV10 updates options map per module for Testnet v0.7 -> Mainnet v1.0 migration. // Options removes all XFI tokens and renames SXFI -> XFI. func setMainnetZeroHeightOptionsV10(optsMap map[string]interface{}) (map[string]interface{}, error) { const ( oldStakingDenom = "sxfi" newStakingDenom = "xfi" ) var ( denomsToRemove = []string{"xfi", "usdt", "btc"} ) // Supply { moduleName := supply.ModuleName optsObj, found := optsMap[moduleName] if !found { return nil, fmt.Errorf("module %s: options not found", moduleName) } opts, ok := optsObj.(supply.SquashOptions) if !ok { return nil, fmt.Errorf("module %s: options type assert failed: %T", moduleName, optsObj) } for _, denom := range denomsToRemove { if err := opts.SetDenomOp(denom, true, "", "0"); err != nil { return nil, fmt.Errorf("module %s: %w", moduleName, err) } } if err := opts.SetDenomOp(oldStakingDenom, false, newStakingDenom, "0"); err != nil { return nil, fmt.Errorf("module %s: %w", moduleName, err) } optsMap[moduleName] = opts } // VMAuth { moduleName := vmauth.ModuleName optsObj, found := optsMap[moduleName] if !found { return nil, fmt.Errorf("module %s: options not found", moduleName) } opts, ok := optsObj.(vmauth.SquashOptions) if !ok { return nil, fmt.Errorf("module %s: options type assert failed: %T", moduleName, optsObj) } for _, denom := range denomsToRemove { if err := opts.SetAccountBalanceOp(denom, true, ""); err != nil { return nil, fmt.Errorf("module %s: %w", moduleName, err) } } if err := opts.SetAccountBalanceOp(oldStakingDenom, false, newStakingDenom); err != nil { return nil, fmt.Errorf("module %s: %w", moduleName, err) } optsMap[moduleName] = opts } // Staking { moduleName := staking.ModuleName optsObj, found := optsMap[moduleName] if !found { return nil, fmt.Errorf("module %s: options not found", moduleName) } opts, ok := optsObj.(staking.SquashOptions) if !ok { return nil, fmt.Errorf("module %s: options type assert failed: %T", moduleName, optsObj) } if err := opts.SetParamsOp(newStakingDenom); err != nil { return nil, fmt.Errorf("module %s: %w", moduleName, err) } optsMap[moduleName] = opts } // Distribution { moduleName := distribution.ModuleName optsObj, found := optsMap[moduleName] if !found { return nil, fmt.Errorf("module %s: options not found", moduleName) } opts, ok := optsObj.(distribution.SquashOptions) if !ok { return nil, fmt.Errorf("module %s: options type assert failed: %T", moduleName, optsObj) } if err := opts.SetDecCoinOp(newStakingDenom, true, ""); err != nil { return nil, fmt.Errorf("module %s: %w", moduleName, err) } if err := opts.SetDecCoinOp(oldStakingDenom, false, newStakingDenom); err != nil { return nil, fmt.Errorf("module %s: %w", moduleName, err) } optsMap[moduleName] = opts } // Mint { moduleName := mint.ModuleName optsObj, found := optsMap[moduleName] if !found { return nil, fmt.Errorf("module %s: options not found", moduleName) } opts, ok := optsObj.(mint.SquashOptions) if !ok { return nil, fmt.Errorf("module %s: options type assert failed: %T", moduleName, optsObj) } if err := opts.SetParamsOp(newStakingDenom); err != nil { return nil, fmt.Errorf("module %s: %w", moduleName, err) } optsMap[moduleName] = opts } // Gov { moduleName := gov.ModuleName optsObj, found := optsMap[moduleName] if !found { return nil, fmt.Errorf("module %s: options not found", moduleName) } opts, ok := optsObj.(gov.SquashOptions) if !ok { return nil, fmt.Errorf("module %s: options type assert failed: %T", moduleName, optsObj) } if err := opts.SetParamsOp(defaults.GovMinDepositAmount + newStakingDenom); err != nil { return nil, fmt.Errorf("module %s: %w", moduleName, err) } optsMap[moduleName] = opts } // CCStorage { moduleName := ccstorage.ModuleName optsObj, found := optsMap[moduleName] if !found { return nil, fmt.Errorf("module %s: options not found", moduleName) } opts, ok := optsObj.(ccstorage.SquashOptions) if !ok { return nil, fmt.Errorf("module %s: options type assert failed: %T", moduleName, optsObj) } if err := opts.SetSupplyOperation(true); err != nil { return nil, fmt.Errorf("module %s: %w", moduleName, err) } optsMap[moduleName] = opts } return optsMap, nil } // SXFIBalanceReportItem keeps initial, staked and reward balances per account. type SXFIBalanceReportItem struct { AccAddress sdk.AccAddress AccBalance sdk.Coins IssueCoins sdk.Coins RewardCoins sdk.Coins DelBondingShares sdk.Dec DelLPShares sdk.Dec DelBondingTokens sdk.Dec DelLPTokens sdk.Dec GenCoins sdk.Coins StakerReport *SXFIStakerReportItem IssueBondingDenom string IssueLPDenom string BondingDenom string LPDenom string } // SXFIStakerReportItem is a parsed Staker CSV-report. type SXFIStakerReportItem struct { TxHash string AccAddress sdk.AccAddress EthAddress string BondingAmount sdk.Int LPAmount sdk.Int } // GetInitialBondingBalance returns initial amount for BondingDenom (gen balance + issues). func (i SXFIBalanceReportItem) GetInitialBondingBalance() sdk.Int { genAmt := i.GenCoins.AmountOf(i.BondingDenom) issuedAmt := i.IssueCoins.AmountOf(i.IssueBondingDenom) if i.StakerReport == nil { issuedAmt = sdk.ZeroInt() } return genAmt.Add(issuedAmt) } // GetIssueBondingBalance returns initial amount for LPDenom (gen balance + issues). func (i SXFIBalanceReportItem) GetInitialLPBalance() sdk.Int { genAmt := i.GenCoins.AmountOf(i.LPDenom) issuedAmt := i.IssueCoins.AmountOf(i.IssueLPDenom) if i.StakerReport == nil { issuedAmt = sdk.ZeroInt() } return genAmt.Add(issuedAmt) } // GetCurrentBondingBalance returns final amount for BondingDenom (current balance + rewards + delegations). func (i SXFIBalanceReportItem) GetCurrentBondingBalance() sdk.Int { accBalanceAmt := i.AccBalance.AmountOf(i.BondingDenom) rewardAmt := i.RewardCoins.AmountOf(i.BondingDenom) delAmt := i.DelBondingTokens.TruncateInt() return accBalanceAmt.Add(rewardAmt).Add(delAmt) } // GetCurrentLPBalance returns final amount for LPDenom (current balance + rewards + delegations). func (i SXFIBalanceReportItem) GetCurrentLPBalance() sdk.Int { accBalanceAmt := i.AccBalance.AmountOf(i.LPDenom) rewardAmt := i.RewardCoins.AmountOf(i.LPDenom) delAmt := i.DelLPTokens.TruncateInt() return accBalanceAmt.Add(rewardAmt).Add(delAmt) } func NewSXFIBalanceReportItem(accAddr sdk.AccAddress, accCoins sdk.Coins, issueBondingDenom, issueLPDenom, bondingDenom, lpDenom string) *SXFIBalanceReportItem { return &SXFIBalanceReportItem{ AccAddress: accAddr, AccBalance: accCoins, IssueCoins: sdk.NewCoins(), RewardCoins: sdk.NewCoins(), DelBondingShares: sdk.ZeroDec(), DelLPShares: sdk.ZeroDec(), DelBondingTokens: sdk.ZeroDec(), DelLPTokens: sdk.ZeroDec(), GenCoins: sdk.NewCoins(), StakerReport: nil, IssueBondingDenom: issueBondingDenom, IssueLPDenom: issueLPDenom, BondingDenom: bondingDenom, LPDenom: lpDenom, } } type SXFIBalanceReportResult struct { ReportItem SXFIBalanceReportItem BondingDiff sdk.Int LPDiff sdk.Int } type SXFIBalanceReportResults []SXFIBalanceReportResult func (results SXFIBalanceReportResults) SaveToCSV(path string) error { f, err := os.Create(path) if err != nil { return fmt.Errorf("creating file: %w", err) } defer f.Close() csvWriter := csv.NewWriter(f) // Header err = csvWriter.Write([]string{ "AccAddress", "GenCoins", "IssueCoins", "WalletCoins", "RewardCoins", "DelBondingTokens", "DelLPTokens", "BondingDiff", "LPDiff", }) if err != nil { return fmt.Errorf("header write: %w", err) } // Entries for i, result := range results { err := csvWriter.Write([]string{ result.ReportItem.AccAddress.String(), result.ReportItem.GenCoins.String(), result.ReportItem.IssueCoins.String(), result.ReportItem.AccBalance.String(), result.ReportItem.RewardCoins.String(), result.ReportItem.DelBondingTokens.String(), result.ReportItem.DelLPTokens.String(), result.BondingDiff.String(), result.LPDiff.String(), }) if err != nil { return fmt.Errorf("entry %d: write: %w", i+1, err) } } csvWriter.Flush() return nil } func (results SXFIBalanceReportResults) String() string { decimalDec := sdk.NewDecWithPrec(1, 18) str := strings.Builder{} str.WriteString("Mainnet SXFI-XFI relation report:\n") for _, result := range results { diffBondingDec, diffLPDec := result.BondingDiff.ToDec().Mul(decimalDec), result.LPDiff.ToDec().Mul(decimalDec) str.WriteString(fmt.Sprintf(" - %s\n", result.ReportItem.AccAddress)) str.WriteString(fmt.Sprintf(" BondingDiff: %s (%s)\n", diffBondingDec, result.BondingDiff)) str.WriteString(fmt.Sprintf(" LPDiff: %s (%s)\n", diffLPDec, result.LPDiff)) str.WriteString(fmt.Sprintf(" GenBalance: %s\n", result.ReportItem.GenCoins)) str.WriteString(fmt.Sprintf(" AccBalance: %s\n", result.ReportItem.AccBalance)) str.WriteString(fmt.Sprintf(" Issues: %s\n", result.ReportItem.IssueCoins)) str.WriteString(fmt.Sprintf(" Rewards: %s\n", result.ReportItem.RewardCoins)) str.WriteString(fmt.Sprintf(" BDel: %s (%s)\n", result.ReportItem.DelBondingTokens, result.ReportItem.DelBondingShares)) str.WriteString(fmt.Sprintf(" LPDel: %s (%s)\n", result.ReportItem.DelLPTokens, result.ReportItem.DelLPShares)) } return str.String() } // SXFIBalanceReport contains initial and final Testnet (v0.7) sxfi balance for accounts. // Key - account address. type SXFIBalanceReport map[string]*SXFIBalanceReportItem // AppendGenesisBalances modifies a SXFIBalanceReport with genesis account balances. func (r SXFIBalanceReport) AppendGenesisBalances( ctx sdk.Context, app *DnServiceApp, issueBondingDenom, issueLPDenom, bondingDenom, lpDenom string, ) error { genBalances := []struct { AccAddress string BondingBalance string LPBalance string }{ { AccAddress: "wallet1wwmenr38hhrem2v3ue3gwdhj03ynzcvlxgc92u", BondingBalance: "3400000000000000000000000", LPBalance: "0", }, { AccAddress: "wallet1a6sd0y8l0ma0gnytacrnwlmnupm7ftnwxngalr", BondingBalance: "2500000000000000000000", LPBalance: "0", }, { AccAddress: "wallet1whpkntyj549f7euftgpng24k2we8legght4rzg", BondingBalance: "2500000000000000000000", LPBalance: "0", }, { AccAddress: "wallet1zwkqfm2sdgyx0g6h2dj9em4z4kjgy5lmtnmgjd", BondingBalance: "2500000000000000000000", LPBalance: "0", }, { AccAddress: "wallet10a24shxzjtutj637rr8shwkwaxx8paplu4vc6f", BondingBalance: "2500000000000000000000", LPBalance: "0", }, { AccAddress: "wallet19xshddf5ww7fhd53fumly2r7lqsszz63fxca9x", BondingBalance: "2500000000000000000000", LPBalance: "0", }, { AccAddress: "wallet1l9mukqvh0etam66dvgw99w9awv3jjv6tyh2hpc", BondingBalance: "50000000000000000000000", LPBalance: "0", }, } for i, genBalance := range genBalances { accAddress, err := sdk.AccAddressFromBech32(genBalance.AccAddress) if err != nil { return fmt.Errorf("genBalance (%d): AccAddress (%s): invalid: %w", i, genBalance.AccAddress, err) } bondingAmt, ok := sdk.NewIntFromString(genBalance.BondingBalance) if !ok { return fmt.Errorf("genBalance (%d): BondingBalance (%s): invalid sdk.Int", i, genBalance.BondingBalance) } lpAmt, ok := sdk.NewIntFromString(genBalance.LPBalance) if !ok { return fmt.Errorf("genBalance (%d): LPBalance (%s): invalid sdk.Int", i, genBalance.LPBalance) } acc := app.accountKeeper.GetAccount(ctx, accAddress) if acc == nil { return fmt.Errorf("genBalance (%d): account (%s): not found", i, accAddress) } reportItem := NewSXFIBalanceReportItem(accAddress, acc.GetCoins(), issueBondingDenom, issueLPDenom, bondingDenom, lpDenom) reportItem.GenCoins = sdk.NewCoins( sdk.NewCoin(bondingDenom, bondingAmt), sdk.NewCoin(lpDenom, lpAmt), ) r[accAddress.String()] = reportItem } return nil } // AppendStakerCSVReport modifies a SXFIBalanceReport with staker CSV report data. func (r SXFIBalanceReport) AppendStakerCSVReport(filePath string) error { const ( csvEntryColumns = 5 ) f, err := os.Open(filePath) if err != nil { return fmt.Errorf("CSV staker report open: %w", err) } defer f.Close() csvReader := csv.NewReader(f) entryIdx := 0 for { entryIdx++ csvEntry, err := csvReader.Read() if err != nil { if err == io.EOF { break } return fmt.Errorf("entry (%d): read failed: %w", entryIdx, err) } if entryIdx == 1 { // skip the header continue } // parse if len(csvEntry) != csvEntryColumns { return fmt.Errorf("entry (%d): invalid number of columns: %d / %d", entryIdx, len(csvEntry), csvEntryColumns) } stakerTxHash := csvEntry[0] if stakerTxHash == "" { return fmt.Errorf("entry (%d): TxHash: emtpy", entryIdx) } stakerBondingAmt := sdk.ZeroInt() if amtRaw := csvEntry[1]; amtRaw != "" { amt, ok := sdk.NewIntFromString(amtRaw) if !ok { return fmt.Errorf("entry (%d): BondingAmount (%s): invalid sdk.Int", entryIdx, amtRaw) } stakerBondingAmt = amt } stakerAccAddress, err := sdk.AccAddressFromBech32(csvEntry[2]) if err != nil { return fmt.Errorf("entry (%d): AccAddress (%s): invalid sdk.AccAddress: %w", entryIdx, csvEntry[2], err) } stakerEthAddress := csvEntry[3] if !helpers.IsEthereumAddress(stakerEthAddress) { return fmt.Errorf("entry (%d): EthAddress (%s): invalid", entryIdx, stakerEthAddress) } stakerLPAmt := sdk.ZeroInt() if amtRaw := csvEntry[4]; amtRaw != "" { amt, ok := sdk.NewIntFromString(amtRaw) if !ok { return fmt.Errorf("entry (%d): LPAmount (%s): invalid sdk.Int", entryIdx, amtRaw) } stakerLPAmt = amt } stakerReport := &SXFIStakerReportItem{ TxHash: stakerTxHash, AccAddress: stakerAccAddress, EthAddress: stakerEthAddress, BondingAmount: stakerBondingAmt, LPAmount: stakerLPAmt, } reportItem, found := r[stakerReport.AccAddress.String()] if !found { return fmt.Errorf("entry (%d): reportEntry for AccAddress %s: not found", entryIdx, stakerReport.AccAddress) } if reportItem.StakerReport != nil { return fmt.Errorf("entry (%d): reportEntry for AccAddress %s: StakerReport already exists", entryIdx, stakerReport.AccAddress) } reportItem.StakerReport = stakerReport } return nil } // Verify compares issues data with Staker report data. func (r SXFIBalanceReport) Verify() error { for accAddr, reportItem := range r { if reportItem.StakerReport == nil { continue } issuedBondingAmt := reportItem.IssueCoins.AmountOf(reportItem.IssueBondingDenom) stakerBondingAmt := reportItem.StakerReport.BondingAmount if !issuedBondingAmt.Equal(stakerBondingAmt) { return fmt.Errorf("account %s: issued / staker Bonding amount mismatch: %s / %s", accAddr, issuedBondingAmt, stakerBondingAmt) } } return nil } func (r SXFIBalanceReport) GetResults() SXFIBalanceReportResults { results := make(SXFIBalanceReportResults, 0, len(r)) for _, reportItem := range r { diffBonding := reportItem.GetCurrentBondingBalance().Sub(reportItem.GetInitialBondingBalance()) diffLP := reportItem.GetCurrentLPBalance().Sub(reportItem.GetInitialLPBalance()) results = append(results, SXFIBalanceReportResult{ ReportItem: *reportItem, BondingDiff: diffBonding, LPDiff: diffLP, }) } sort.Slice(results, func(i, j int) bool { return results[i].BondingDiff.LT(results[j].BondingDiff) }) return results } // getMainnetSXFIBalanceReport returns a SXFIBalanceReport report. func (app *DnServiceApp) getMainnetSXFIBalanceReport(ctx sdk.Context, issueBondingDenom, issueLPDenom, bondingDenom, lpDenom string, stakerCSVReportPath string, ) (SXFIBalanceReport, error) { cacheCtx, _ := ctx.CacheContext() // initialize report with genesis data report := make(SXFIBalanceReport) if err := report.AppendGenesisBalances(ctx, app, issueBondingDenom, issueLPDenom, bondingDenom, lpDenom); err != nil { return nil, fmt.Errorf("append genesis balances: %w", err) } // iterate all issues and combine duplicate payees for _, issue := range app.ccKeeper.GetGenesisIssues(cacheCtx) { accAddr := issue.Payee reportItem, found := report[accAddr.String()] if !found { acc := app.accountKeeper.GetAccount(cacheCtx, accAddr) if acc == nil { return nil, fmt.Errorf("issue %s: getAccount for %s: not found", issue.ID, accAddr) } reportItem = NewSXFIBalanceReportItem(accAddr, acc.GetCoins(), issueBondingDenom, issueLPDenom, bondingDenom, lpDenom) } reportItem.IssueCoins = reportItem.IssueCoins.Add(issue.Coin) report[accAddr.String()] = reportItem } // withdraw all rewards // as all rewards were transferred to rewards bank before, we only query the bank coins for each validator for _, reportItem := range report { accAddr := reportItem.AccAddress app.distrKeeper.IterateDelegatorRewardsBankCoins(ctx, accAddr, func(_ sdk.ValAddress, coins sdk.Coins) (stop bool) { reportItem.RewardCoins = reportItem.RewardCoins.Add(coins...) return false }) } // unbond all delegations // no actual undelegation is done, we just calculate delegator tokens based on shares and validator tokens { for _, reportItem := range report { accAddr := reportItem.AccAddress var iterationErr error app.stakingKeeper.IterateDelegations( cacheCtx, accAddr, func(_ int64, del exported.DelegationI) (stop bool) { val, found := app.stakingKeeper.GetValidator(cacheCtx, del.GetValidatorAddr()) if !found { iterationErr = fmt.Errorf("account %s: get delegation validator %s: not found", accAddr, del.GetValidatorAddr()) return true } reportItem.DelBondingShares = reportItem.DelBondingShares.Add(del.GetBondingShares()) if !del.GetBondingShares().IsZero() { reportItem.DelBondingTokens = reportItem.DelBondingTokens.Add(val.BondingTokensFromSharesTruncated(del.GetBondingShares())) } reportItem.DelLPShares = reportItem.DelLPShares.Add(del.GetLPShares()) if !del.GetLPShares().IsZero() { reportItem.DelLPTokens = reportItem.DelLPTokens.Add(val.LPTokensFromSharesTruncated(del.GetLPShares())) } return false }, ) if iterationErr != nil { return nil, iterationErr } } } // update report with Staker CSV-report if stakerCSVReportPath != "" { if err := report.AppendStakerCSVReport(stakerCSVReportPath); err != nil { return nil, fmt.Errorf("append append StakerCSVReport: %w", err) } } return report, nil } type SXFIBalanceReportStats struct { TotalNegativeBondingDiffs sdk.Dec TotalPositiveBondingDiffs sdk.Dec AccMints map[string]sdk.DecCoin } // processMainnetSXFIBalance builds getMainnetSXFIBalanceReport and mints and transfers negative diffs. func (app *DnServiceApp) processMainnetSXFIBalance(ctx sdk.Context) error { const ( issueDenom = "sxfi" bondingDenom = "xfi" lpDenom = "lpt" ) decimalDec := sdk.NewDecWithPrec(1, 18) stakerReportPath := os.Getenv("DN_ZHP_STAKERREPORT_PATH") reportOutputPrefix := os.Getenv("DN_ZHP_REPORTOUTPUT_PREFIX") if stakerReportPath == "" { return fmt.Errorf("envVar %q: not set", "DN_ZHP_STAKERREPORT_PATH") } if reportOutputPrefix == "" { return fmt.Errorf("envVar %q: not set", "DN_ZHP_REPORTOUTPUT_PREFIX") } // build report report, err := app.getMainnetSXFIBalanceReport( ctx, issueDenom, lpDenom, bondingDenom, lpDenom, stakerReportPath, ) if err != nil { return fmt.Errorf("getMainnetSXFIBalanceReport: %w", err) } if err := report.Verify(); err != nil { return fmt.Errorf("report verification: %w", err) } // save results results := report.GetResults() if err := results.SaveToCSV(reportOutputPrefix + "data.csv"); err != nil { return fmt.Errorf("saving report results to CSV: %w", err) } // calculate the mint amount positiveDiffs, negativeDiffs := sdk.ZeroInt(), sdk.ZeroInt() stats := SXFIBalanceReportStats{ TotalNegativeBondingDiffs: sdk.ZeroDec(), TotalPositiveBondingDiffs: sdk.ZeroDec(), AccMints: make(map[string]sdk.DecCoin, len(report)), } for _, result := range results { if !result.BondingDiff.IsNegative() { positiveDiffs = positiveDiffs.Add(result.BondingDiff) continue } negativeDiffs = negativeDiffs.Add(result.BondingDiff) } negativeDiffs = negativeDiffs.MulRaw(-1) bondingMintCoin := sdk.NewCoin(bondingDenom, negativeDiffs) // stats.TotalPositiveBondingDiffs = positiveDiffs.ToDec().Mul(decimalDec) stats.TotalNegativeBondingDiffs = negativeDiffs.ToDec().Mul(decimalDec) // mint if err := app.mintKeeper.MintCoins(ctx, sdk.NewCoins(bondingMintCoin)); err != nil { return fmt.Errorf("minting bonding coins: %w", err) } if err := app.ccsKeeper.IncreaseCurrencySupply(ctx, bondingMintCoin); err != nil { return fmt.Errorf("increasing ccStorage supply: %w", err) } // distribute minted coins for _, result := range results { diff := result.BondingDiff if !diff.IsNegative() { continue } coin := sdk.NewCoin(bondingDenom, diff.MulRaw(-1)) if err := app.supplyKeeper.SendCoinsFromModuleToAccount(ctx, mint.ModuleName, result.ReportItem.AccAddress, sdk.NewCoins(coin)); err != nil { return fmt.Errorf("sending minted coins to %s: %w", result.ReportItem.AccAddress, err) } // stats.AccMints[result.ReportItem.AccAddress.String()] = sdk.NewDecCoinFromDec( coin.Denom, coin.Amount.ToDec().Mul(decimalDec), ) } // save stats statsBz, err := json.Marshal(stats) if err != nil { return fmt.Errorf("stats: JSON marshal: %w", err) } f, err := os.Create(reportOutputPrefix + "stats.json") if err != nil { return fmt.Errorf("stats: creating file: %w", err) } defer f.Close() if _, err := f.Write(statsBz); err != nil { return fmt.Errorf("stats: write to file: %w", err) } // check the invariants if err := app.checkInvariants(ctx); err != nil { return fmt.Errorf("post invariants check: %w", err) } return nil }
[ "\"DN_ZHP_STAKERREPORT_PATH\"", "\"DN_ZHP_REPORTOUTPUT_PREFIX\"" ]
[]
[ "DN_ZHP_REPORTOUTPUT_PREFIX", "DN_ZHP_STAKERREPORT_PATH" ]
[]
["DN_ZHP_REPORTOUTPUT_PREFIX", "DN_ZHP_STAKERREPORT_PATH"]
go
2
0
backend/www/test/auth_google_test.py
#!/usr/bin/env python # # Copyright 2012 Viewfinder Inc. All Rights Reserved. """Account authorization tests for Google accounts. """ __authors__ = ['[email protected] (Spencer Kimball)', '[email protected] (Andrew Kimball)'] import mock import os import unittest from viewfinder.backend.base import util from viewfinder.backend.base.testing import MockAsyncHTTPClient from viewfinder.backend.db.identity import Identity from viewfinder.backend.www.test import auth_test, service_base_test @unittest.skip("needs google credentials") @unittest.skipIf('NO_NETWORK' in os.environ, 'no network') class AuthGoogleTestCase(service_base_test.ServiceBaseTestCase): """Tests authentication via the Google OAuth service.""" def setUp(self): super(AuthGoogleTestCase, self).setUp() self._google_user_dict = {'family_name': 'Kimball', 'name': 'Andrew Kimball', 'locale': 'en', 'gender': 'male', 'email': '[email protected]', 'link': 'https://plus.google.com/id', 'given_name': 'Andrew', 'id': 'id', 'verified_email': True} self._google_user2_dict = {'name': 'Spencer Kimball', 'email': '[email protected]', 'verified_email': True} self._mobile_device_dict = {'name': 'Andy\'s IPhone', 'version': '1.0', 'platform': 'IPhone 4S', 'os': 'iOS 5.0.1', 'push_token': 'push_token'} def testRegisterWebUser(self): """Test successful register of web user.""" # Register as web user, register as mobile user self._tester.RegisterGoogleUser(self._google_user_dict) self.assertRaisesHttpError(403, self._tester.RegisterGoogleUser, self._google_user_dict, self._mobile_device_dict) def testRegisterMobileUser(self): """Test successful register of mobile user.""" # Register as mobile user, register as web user. self._tester.RegisterGoogleUser(self._google_user_dict, self._mobile_device_dict) self.assertRaisesHttpError(403, self._tester.RegisterGoogleUser, self._google_user_dict) def testLoginWebUser(self): """Test successful login of web user.""" # Register as web user, login as web user. user, device_id = self._tester.RegisterGoogleUser(self._google_user_dict) user2, device_id2 = self._tester.LoginGoogleUser(self._google_user_dict) self.assertEqual(user.user_id, user2.user_id) self.assertEqual(device_id, device_id2) # And login again as mobile user. self._tester.LoginGoogleUser(self._google_user_dict, self._mobile_device_dict) def testLoginMobileUser(self): """Test successful login of mobile user.""" # Register as web user, login as mobile user. user, device_id = self._tester.RegisterGoogleUser(self._google_user_dict) user2, device_id2 = self._tester.LoginGoogleUser(self._google_user_dict, self._mobile_device_dict) self.assertEqual(user.user_id, user2.user_id) self.assertNotEqual(device_id, device_id2) # And login again as web user. self._tester.LoginGoogleUser(self._google_user_dict) def testLinkWebUser(self): """Test successful link of web user.""" # Register as mobile user, link as web user user, device_id = self._tester.RegisterGoogleUser(self._google_user_dict, self._mobile_device_dict) cookie = self._GetSecureUserCookie(user, device_id) user2, device_id2 = self._tester.LinkGoogleUser(self._google_user2_dict, user_cookie=cookie) self.assertEqual(user.user_id, user2.user_id) self.assertNotEqual(device_id, device_id2) # And link again as mobile user. self._tester.LinkGoogleUser(self._google_user2_dict, self._mobile_device_dict, user_cookie=cookie) self.assertEqual(len(self._tester.ListIdentities(cookie)), 2) def testLinkMobileUser(self): """Test successful link of mobile user.""" # Register as web user, link as mobile user. user, device_id = self._tester.RegisterGoogleUser(self._google_user_dict) cookie = self._GetSecureUserCookie(user, device_id) self._tester.LinkGoogleUser(self._google_user2_dict, self._mobile_device_dict, user_cookie=cookie) # And link again as web user. self._tester.LinkGoogleUser(self._google_user2_dict, user_cookie=cookie) self.assertEqual(len(self._tester.ListIdentities(cookie)), 2) def testNonCanonicalId(self): """Test that identity key is canonicalized during import from Google.""" user, device_id = self._tester.RegisterGoogleUser(self._google_user_dict, self._mobile_device_dict) self._google_user_dict['email'] = self._google_user_dict['email'].upper() user2, device_id2 = self._tester.LoginGoogleUser(self._google_user_dict, self._mobile_device_dict) self.assertEqual(user.user_id, user2.user_id) def testLoginNoExist(self): """ERROR: Try to login with Google identity that is not linked to a Viewfinder account.""" self.assertRaisesHttpError(403, self._tester.LoginGoogleUser, self._google_user_dict) self.assertRaisesHttpError(403, self._tester.LoginGoogleUser, self._google_user_dict, self._mobile_device_dict) def testUnverifiedEmail(self): """ERROR: Try to register an unverified email address.""" self._google_user_dict['verified_email'] = False self.assertRaisesHttpError(403, self._tester.RegisterGoogleUser, self._google_user_dict, self._mobile_device_dict) def testMissingRefreshToken(self): """ERROR: Test error on missing Google refresh token.""" self.assertRaisesHttpError(400, auth_test._SendAuthRequest, self._tester, self.get_url('/register/google'), 'POST', request_dict=auth_test._CreateRegisterRequest(self._mobile_device_dict)) def testGoogleRegistration(self): # TODO(spencer): implement something here; a cursory look around # the internets didn't turn up anything provided by Google analogous # to Facebook's test accounts. pass def _TestAuthGoogleUser(action, tester, user_dict, device_dict=None, user_cookie=None): """Called by the ServiceTester in order to test login/google, link/google, and register/google calls. """ ident_dict = {'key': 'Email:%s' % Identity.CanonicalizeEmail(user_dict['email']), 'authority': 'Google', 'refresh_token': 'refresh_token', 'access_token': 'access_token', 'expires': util._TEST_TIME + 3600} if device_dict: device_dict.pop('device_uuid', None) device_dict.pop('test_udid', None) # Mock responses from Google. with mock.patch('tornado.httpclient.AsyncHTTPClient', MockAsyncHTTPClient()) as mock_client: # Response to request for access token. auth_test._AddMockJSONResponse(mock_client, r'https://accounts.google.com/o/oauth2/token', {'access_token': ident_dict['access_token'], 'token_type': 'Bearer', 'expires_in': ident_dict['expires'] - util._TEST_TIME, 'id_token': 'id_token', 'refresh_token': ident_dict['refresh_token']}) # Response to request for user info. auth_test._AddMockJSONResponse(mock_client, r'https://www.googleapis.com/oauth2/v1/userinfo\?', user_dict) # Response to request for people (i.e. contacts). auth_test._AddMockJSONResponse(mock_client, r'https://www.google.com/m8/feeds/contacts/default/full', {'feed': {'entry': [], 'openSearch$startIndex': {'$t': '1'}, 'openSearch$totalResults': {'$t': '0'}}}) response = auth_test._AuthFacebookOrGoogleUser(tester, action, user_dict, ident_dict, device_dict, user_cookie) return auth_test._ValidateAuthUser(tester, action, user_dict, ident_dict, device_dict, user_cookie, response)
[]
[]
[]
[]
[]
python
0
0
govcd/api.go
/* * Copyright 2019 VMware, Inc. All rights reserved. Licensed under the Apache v2 License. */ // Package govcd provides a simple binding for vCloud Director REST APIs. package govcd import ( "bytes" "encoding/xml" "fmt" "io" "io/ioutil" "net/http" "net/url" "os" "reflect" "strings" "github.com/vmware/go-vcloud-director/v2/types/v56" "github.com/vmware/go-vcloud-director/v2/util" ) // Client provides a client to vCloud Director, values can be populated automatically using the Authenticate method. type Client struct { APIVersion string // The API version required VCDToken string // Access Token (authorization header) VCDAuthHeader string // Authorization header VCDHREF url.URL // VCD API ENDPOINT Http http.Client // HttpClient is the client to use. Default will be used if not provided. IsSysAdmin bool // flag if client is connected as system administrator // MaxRetryTimeout specifies a time limit (in seconds) for retrying requests made by the SDK // where vCloud director may take time to respond and retry mechanism is needed. // This must be >0 to avoid instant timeout errors. MaxRetryTimeout int } // General purpose error to be used whenever an entity is not found from a "GET" request // Allows a simpler checking of the call result // such as // if err == ErrorEntityNotFound { // // do what is needed in case of not found // } var errorEntityNotFoundMessage = "[ENF] entity not found" var ErrorEntityNotFound = fmt.Errorf(errorEntityNotFoundMessage) // Triggers for debugging functions that show requests and responses var debugShowRequestEnabled = os.Getenv("GOVCD_SHOW_REQ") != "" var debugShowResponseEnabled = os.Getenv("GOVCD_SHOW_RESP") != "" // Enables the debugging hook to show requests as they are processed. func enableDebugShowRequest() { debugShowRequestEnabled = true } // Disables the debugging hook to show requests as they are processed. func disableDebugShowRequest() { debugShowRequestEnabled = false _ = os.Setenv("GOVCD_SHOW_REQ", "") } // Enables the debugging hook to show responses as they are processed. func enableDebugShowResponse() { debugShowResponseEnabled = true } // Disables the debugging hook to show responses as they are processed. func disableDebugShowResponse() { debugShowResponseEnabled = false _ = os.Setenv("GOVCD_SHOW_RESP", "") } // On-the-fly debug hook. If either debugShowRequestEnabled or the environment // variable "GOVCD_SHOW_REQ" are enabled, this function will show the contents // of the request as it is being processed. func debugShowRequest(req *http.Request, payload string) { if debugShowRequestEnabled { header := "[\n" for key, value := range req.Header { header += fmt.Sprintf("\t%s => %s\n", key, value) } header += "]\n" fmt.Printf("method: %s\n", req.Method) fmt.Printf("host: %s\n", req.Host) fmt.Printf("length: %d\n", req.ContentLength) fmt.Printf("URL: %s\n", req.URL.String()) fmt.Printf("header: %s\n", header) fmt.Printf("payload: %s\n", payload) } } // On-the-fly debug hook. If either debugShowResponseEnabled or the environment // variable "GOVCD_SHOW_RESP" are enabled, this function will show the contents // of the response as it is being processed. func debugShowResponse(resp *http.Response, body []byte) { if debugShowResponseEnabled { fmt.Printf("status: %d - %s \n", resp.StatusCode, resp.Status) fmt.Printf("length: %d\n", resp.ContentLength) fmt.Printf("header: %v\n", resp.Header) fmt.Printf("body: %s\n", body) } } // Convenience function, similar to os.IsNotExist that checks whether a given error // is a "Not found" error, such as // if isNotFound(err) { // // do what is needed in case of not found // } func IsNotFound(err error) bool { return err != nil && err == ErrorEntityNotFound } // ContainsNotFound is a convenience function, similar to os.IsNotExist that checks whether a given error // contains a "Not found" error. It is almost the same as `IsNotFound` but checks if an error contains substring // ErrorEntityNotFound func ContainsNotFound(err error) bool { return err != nil && strings.Contains(err.Error(), ErrorEntityNotFound.Error()) } // Function allow to pass complex values params which shouldn't be encoded like for queries. e.g. /query?filter=(name=foo) func (cli *Client) NewRequestWitNotEncodedParams(params map[string]string, notEncodedParams map[string]string, method string, reqUrl url.URL, body io.Reader) *http.Request { reqValues := url.Values{} // Build up our request parameters for key, value := range params { reqValues.Add(key, value) } // Add the params to our URL reqUrl.RawQuery = reqValues.Encode() for key, value := range notEncodedParams { if key != "" && value != "" { reqUrl.RawQuery += "&" + key + "=" + value } } // Build the request, no point in checking for errors here as we're just // passing a string version of an url.URL struct and http.NewRequest returns // error only if can't process an url.ParseRequestURI(). req, _ := http.NewRequest(method, reqUrl.String(), body) if cli.VCDAuthHeader != "" && cli.VCDToken != "" { // Add the authorization header req.Header.Add(cli.VCDAuthHeader, cli.VCDToken) // Add the Accept header for VCD req.Header.Add("Accept", "application/*+xml;version="+cli.APIVersion) } // Avoids passing data if the logging of requests is disabled if util.LogHttpRequest { // Makes a safe copy of the request body, and passes it // to the processing function. payload := "" if req.ContentLength > 0 { // We try to convert body to a *bytes.Buffer var ibody interface{} = body bbody, ok := ibody.(*bytes.Buffer) // If the inner object is a bytes.Buffer, we get a safe copy of the data. // If it is really just an io.Reader, we don't, as the copy would empty the reader if ok { payload = bbody.String() } else { // With this content, we'll know that the payload is not really empty, but // it was unavailable due to the body type. payload = fmt.Sprintf("<Not retrieved from type %s>", reflect.TypeOf(body)) } } util.ProcessRequestOutput(util.FuncNameCallStack(), method, reqUrl.String(), payload, req) debugShowRequest(req, payload) } return req } // NewRequest creates a new HTTP request and applies necessary auth headers if // set. func (cli *Client) NewRequest(params map[string]string, method string, reqUrl url.URL, body io.Reader) *http.Request { return cli.NewRequestWitNotEncodedParams(params, nil, method, reqUrl, body) } // ParseErr takes an error XML resp, error interface for unmarshaling and returns a single string for // use in error messages. func ParseErr(resp *http.Response, errType error) error { // if there was an error decoding the body, just return that if err := decodeBody(resp, errType); err != nil { util.Logger.Printf("[ParseErr]: unhandled response <--\n%+v\n-->\n", resp) return fmt.Errorf("[ParseErr]: error parsing error body for non-200 request: %s (%+v)", err, resp) } return errType } // decodeBody is used to XML decode a response body func decodeBody(resp *http.Response, out interface{}) error { body, err := ioutil.ReadAll(resp.Body) util.ProcessResponseOutput(util.FuncNameCallStack(), resp, fmt.Sprintf("%s", body)) if err != nil { return err } debugShowResponse(resp, body) // Unmarshal the XML. if err = xml.Unmarshal(body, &out); err != nil { return err } return nil } // checkResp wraps http.Client.Do() and verifies the request, if status code // is 2XX it passes back the response, if it's a known invalid status code it // parses the resultant XML error and returns a descriptive error, if the // status code is not handled it returns a generic error with the status code. func checkResp(resp *http.Response, err error) (*http.Response, error) { return checkRespWithErrType(resp, err, &types.Error{}) } // checkRespWithErrType allows to specify custom error errType for checkResp unmarshaling // the error. func checkRespWithErrType(resp *http.Response, err, errType error) (*http.Response, error) { if err != nil { return resp, err } switch resp.StatusCode { // Valid request, return the response. case http.StatusOK, // 200 http.StatusCreated, // 201 http.StatusAccepted, // 202 http.StatusNoContent: // 204 return resp, nil // Invalid request, parse the XML error returned and return it. case http.StatusBadRequest, // 400 http.StatusUnauthorized, // 401 http.StatusForbidden, // 403 http.StatusNotFound, // 404 http.StatusMethodNotAllowed, // 405 http.StatusNotAcceptable, // 406 http.StatusProxyAuthRequired, // 407 http.StatusRequestTimeout, // 408 http.StatusConflict, // 409 http.StatusGone, // 410 http.StatusLengthRequired, // 411 http.StatusPreconditionFailed, // 412 http.StatusRequestEntityTooLarge, // 413 http.StatusRequestURITooLong, // 414 http.StatusUnsupportedMediaType, // 415 http.StatusLocked, // 423 http.StatusFailedDependency, // 424 http.StatusUpgradeRequired, // 426 http.StatusPreconditionRequired, // 428 http.StatusTooManyRequests, // 429 http.StatusRequestHeaderFieldsTooLarge, // 431 http.StatusUnavailableForLegalReasons, // 451 http.StatusInternalServerError, // 500 http.StatusServiceUnavailable, // 503 http.StatusGatewayTimeout: // 504 return nil, ParseErr(resp, errType) // Unhandled response. default: return nil, fmt.Errorf("unhandled API response, please report this issue, status code: %s", resp.Status) } } // Helper function creates request, runs it, checks response and parses task from response. // pathURL - request URL // requestType - HTTP method type // contentType - value to set for "Content-Type" // errorMessage - error message to return when error happens // payload - XML struct which will be marshalled and added as body/payload // E.g. client.ExecuteTaskRequest(updateDiskLink.HREF, http.MethodPut, updateDiskLink.Type, "error updating disk: %s", xmlPayload) func (client *Client) ExecuteTaskRequest(pathURL, requestType, contentType, errorMessage string, payload interface{}) (Task, error) { if !isMessageWithPlaceHolder(errorMessage) { return Task{}, fmt.Errorf("error message has to include place holder for error") } resp, err := executeRequest(pathURL, requestType, contentType, payload, client) if err != nil { return Task{}, fmt.Errorf(errorMessage, err) } task := NewTask(client) if err = decodeBody(resp, task.Task); err != nil { return Task{}, fmt.Errorf("error decoding Task response: %s", err) } err = resp.Body.Close() if err != nil { return Task{}, fmt.Errorf(errorMessage, err) } // The request was successful return *task, nil } // Helper function creates request, runs it, checks response and do not expect any values from it. // pathURL - request URL // requestType - HTTP method type // contentType - value to set for "Content-Type" // errorMessage - error message to return when error happens // payload - XML struct which will be marshalled and added as body/payload // E.g. client.ExecuteRequestWithoutResponse(catalogItemHREF.String(), http.MethodDelete, "", "error deleting Catalog item: %s", nil) func (client *Client) ExecuteRequestWithoutResponse(pathURL, requestType, contentType, errorMessage string, payload interface{}) error { if !isMessageWithPlaceHolder(errorMessage) { return fmt.Errorf("error message has to include place holder for error") } resp, err := executeRequest(pathURL, requestType, contentType, payload, client) if err != nil { return fmt.Errorf(errorMessage, err) } // log response explicitly because decodeBody() was not triggered util.ProcessResponseOutput(util.FuncNameCallStack(), resp, fmt.Sprintf("%s", resp.Body)) debugShowResponse(resp, []byte("SKIPPED RESPONSE")) err = resp.Body.Close() if err != nil { return fmt.Errorf("error closing response body: %s", err) } // The request was successful return nil } // Helper function creates request, runs it, check responses and parses out interface from response. // pathURL - request URL // requestType - HTTP method type // contentType - value to set for "Content-Type" // errorMessage - error message to return when error happens // payload - XML struct which will be marshalled and added as body/payload // out - structure to be used for unmarshalling xml // E.g. unmarshalledAdminOrg := &types.AdminOrg{} // client.ExecuteRequest(adminOrg.AdminOrg.HREF, http.MethodGet, "", "error refreshing organization: %s", nil, unmarshalledAdminOrg) func (client *Client) ExecuteRequest(pathURL, requestType, contentType, errorMessage string, payload, out interface{}) (*http.Response, error) { if !isMessageWithPlaceHolder(errorMessage) { return &http.Response{}, fmt.Errorf("error message has to include place holder for error") } resp, err := executeRequest(pathURL, requestType, contentType, payload, client) if err != nil { return resp, fmt.Errorf(errorMessage, err) } if err = decodeBody(resp, out); err != nil { return resp, fmt.Errorf("error decoding response: %s", err) } err = resp.Body.Close() if err != nil { return resp, fmt.Errorf("error closing response body: %s", err) } // The request was successful return resp, nil } // ExecuteRequestWithCustomError sends the request and checks for 2xx response. If the returned status code // was not as expected - the returned error will be unmarshaled to `errType` which implements Go's standard `error` // interface. func (client *Client) ExecuteRequestWithCustomError(pathURL, requestType, contentType, errorMessage string, payload interface{}, errType error) (*http.Response, error) { if !isMessageWithPlaceHolder(errorMessage) { return &http.Response{}, fmt.Errorf("error message has to include place holder for error") } resp, err := executeRequestCustomErr(pathURL, requestType, contentType, payload, client, errType) if err != nil { return &http.Response{}, fmt.Errorf(errorMessage, err) } // read from resp.Body io.Reader for debug output if it has body var bodyBytes []byte if resp.Body != nil { bodyBytes, err = ioutil.ReadAll(resp.Body) if err != nil { return &http.Response{}, fmt.Errorf("could not read response body: %s", err) } // Restore the io.ReadCloser to its original state with no-op closer resp.Body = ioutil.NopCloser(bytes.NewBuffer(bodyBytes)) } util.ProcessResponseOutput(util.FuncNameCallStack(), resp, string(bodyBytes)) debugShowResponse(resp, bodyBytes) return resp, nil } // executeRequest does executeRequestCustomErr and checks for vCD errors in API response func executeRequest(pathURL, requestType, contentType string, payload interface{}, client *Client) (*http.Response, error) { return executeRequestCustomErr(pathURL, requestType, contentType, payload, client, &types.Error{}) } // executeRequestCustomErr performs request and unmarshals API error to errType if not 2xx status was returned func executeRequestCustomErr(pathURL, requestType, contentType string, payload interface{}, client *Client, errType error) (*http.Response, error) { url, _ := url.ParseRequestURI(pathURL) var req *http.Request switch requestType { case http.MethodPost, http.MethodPut: marshaledXml, err := xml.MarshalIndent(payload, " ", " ") if err != nil { return &http.Response{}, fmt.Errorf("error marshalling xml data %v", err) } body := bytes.NewBufferString(xml.Header + string(marshaledXml)) req = client.NewRequest(map[string]string{}, requestType, *url, body) default: req = client.NewRequest(map[string]string{}, requestType, *url, nil) } if contentType != "" { req.Header.Add("Content-Type", contentType) } resp, err := client.Http.Do(req) if err != nil { return resp, err } return checkRespWithErrType(resp, err, errType) } func isMessageWithPlaceHolder(message string) bool { err := fmt.Errorf(message, "test error") return !strings.Contains(err.Error(), "%!(EXTRA") } // combinedTaskErrorMessage is a general purpose function // that returns the contents of the operation error and, if found, the error // returned by the associated task func combinedTaskErrorMessage(task *types.Task, err error) string { extendedError := err.Error() if task.Error != nil { extendedError = fmt.Sprintf("operation error: %s - task error: [%d - %s] %s", err, task.Error.MajorErrorCode, task.Error.MinorErrorCode, task.Error.Message) } return extendedError }
[ "\"GOVCD_SHOW_REQ\"", "\"GOVCD_SHOW_RESP\"" ]
[]
[ "GOVCD_SHOW_REQ", "GOVCD_SHOW_RESP" ]
[]
["GOVCD_SHOW_REQ", "GOVCD_SHOW_RESP"]
go
2
0
cmd/vulcan-aws-alerts/main.go
/* Copyright 2020 Adevinta */ package main import ( "bytes" "context" "encoding/json" "fmt" "io/ioutil" "net/http" "os" "github.com/aws/aws-sdk-go/aws/credentials" check "github.com/adevinta/vulcan-check-sdk" "github.com/adevinta/vulcan-check-sdk/helpers" checkstate "github.com/adevinta/vulcan-check-sdk/state" "github.com/aws/aws-sdk-go/aws/arn" ) var ( checkName = "vulcan-aws-alerts" logger = check.NewCheckLog(checkName) ) func main() { run := func(ctx context.Context, target, assetType, optJSON string, state checkstate.State) error { if target == "" { return fmt.Errorf("check target missing") } vulcanAssumeRoleEndpoint := os.Getenv("VULCAN_ASSUME_ROLE_ENDPOINT") if vulcanAssumeRoleEndpoint == "" { return fmt.Errorf("VULCAN_ASSUME_ROLE_ENDPOINT option is missing") } roleName := os.Getenv("ROLE_NAME") isReachable, err := helpers.IsReachable(target, assetType, helpers.NewAWSCreds(vulcanAssumeRoleEndpoint, roleName)) if err != nil { logger.Warnf("Can not check asset reachability: %v", err) } if !isReachable { return checkstate.ErrAssetUnreachable } parsedARN, err := arn.Parse(target) if err != nil { return err } return caCertificateRotation(parsedARN.AccountID, vulcanAssumeRoleEndpoint, roleName, state) } c := check.NewCheckFromHandler(checkName, run) c.RunAndServe() } // AssumeRoleResponse represent a response from vulcan-assume-role type AssumeRoleResponse struct { AccessKey string `json:"access_key"` SecretAccessKey string `json:"secret_access_key"` SessionToken string `json:"session_token"` } func getCredentials(url string, accountID, role string) (*credentials.Credentials, error) { m := map[string]string{"account_id": accountID} if role != "" { m["role"] = role } jsonBody, err := json.Marshal(m) if err != nil { return nil, err } req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonBody)) if err != nil { return nil, err } req.Header.Set("Content-Type", "application/json") client := &http.Client{} resp, err := client.Do(req) if err != nil { logger.Errorf("cannot do request: %s", err.Error()) return nil, err } defer resp.Body.Close() assumeRoleResponse := AssumeRoleResponse{} buf, err := ioutil.ReadAll(resp.Body) if err != nil { logger.Errorf("Cannot read request body %s", err.Error()) return nil, err } err = json.Unmarshal(buf, &assumeRoleResponse) if err != nil { logger.Errorf("Cannot decode request %s", err.Error()) logger.Errorf("RequestBody: %s", string(buf)) return nil, err } return credentials.NewStaticCredentials( assumeRoleResponse.AccessKey, assumeRoleResponse.SecretAccessKey, assumeRoleResponse.SessionToken), nil }
[ "\"VULCAN_ASSUME_ROLE_ENDPOINT\"", "\"ROLE_NAME\"" ]
[]
[ "ROLE_NAME", "VULCAN_ASSUME_ROLE_ENDPOINT" ]
[]
["ROLE_NAME", "VULCAN_ASSUME_ROLE_ENDPOINT"]
go
2
0
manage.py
#!/usr/bin/env python """Django's command-line utility for administrative tasks.""" import os import sys def main(): """Run administrative tasks.""" os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'fullapp_project.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv) if __name__ == '__main__': main()
[]
[]
[]
[]
[]
python
0
0
oshi-core/src/main/java/oshi/software/os/windows/WindowsOperatingSystem.java
/** * MIT License * * Copyright (c) 2010-2019 The OSHI project team * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ package oshi.software.os.windows; import static oshi.software.os.OSService.State.OTHER; import static oshi.software.os.OSService.State.RUNNING; import static oshi.software.os.OSService.State.STOPPED; import static oshi.util.Memoizer.memoize; import java.io.File; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.concurrent.TimeUnit; import java.util.function.Supplier; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.sun.jna.Memory; // NOSONAR squid:S1191 import com.sun.jna.Native; import com.sun.jna.Pointer; import com.sun.jna.platform.win32.Advapi32; import com.sun.jna.platform.win32.Advapi32Util; import com.sun.jna.platform.win32.Advapi32Util.Account; import com.sun.jna.platform.win32.Advapi32Util.EventLogIterator; import com.sun.jna.platform.win32.Advapi32Util.EventLogRecord; import com.sun.jna.platform.win32.BaseTSD.ULONG_PTRByReference; import com.sun.jna.platform.win32.Kernel32Util; import com.sun.jna.platform.win32.Psapi; import com.sun.jna.platform.win32.Psapi.PERFORMANCE_INFORMATION; import com.sun.jna.platform.win32.Tlhelp32; import com.sun.jna.platform.win32.User32; import com.sun.jna.platform.win32.VersionHelpers; import com.sun.jna.platform.win32.W32ServiceManager; import com.sun.jna.platform.win32.Win32Exception; import com.sun.jna.platform.win32.WinBase; import com.sun.jna.platform.win32.WinDef.DWORD; import com.sun.jna.platform.win32.WinError; import com.sun.jna.platform.win32.WinNT; import com.sun.jna.platform.win32.WinNT.HANDLE; import com.sun.jna.platform.win32.WinNT.HANDLEByReference; import com.sun.jna.platform.win32.WinPerf.PERF_COUNTER_BLOCK; import com.sun.jna.platform.win32.WinPerf.PERF_COUNTER_DEFINITION; import com.sun.jna.platform.win32.WinPerf.PERF_DATA_BLOCK; import com.sun.jna.platform.win32.WinPerf.PERF_INSTANCE_DEFINITION; import com.sun.jna.platform.win32.WinPerf.PERF_OBJECT_TYPE; import com.sun.jna.platform.win32.WinReg; import com.sun.jna.platform.win32.WinUser; import com.sun.jna.platform.win32.Winsvc; import com.sun.jna.platform.win32.Wtsapi32; import com.sun.jna.platform.win32.Wtsapi32.WTS_PROCESS_INFO_EX; import com.sun.jna.platform.win32.COM.WbemcliUtil.WmiQuery; import com.sun.jna.platform.win32.COM.WbemcliUtil.WmiResult; import com.sun.jna.ptr.IntByReference; import com.sun.jna.ptr.PointerByReference; import oshi.jna.platform.windows.Kernel32; import oshi.software.common.AbstractOperatingSystem; import oshi.software.os.FileSystem; import oshi.software.os.NetworkParams; import oshi.software.os.OSProcess; import oshi.software.os.OSService; import oshi.software.os.OSService.State; import oshi.software.os.OperatingSystem; import oshi.util.GlobalConfig; import oshi.util.ParseUtil; import oshi.util.platform.windows.PerfCounterQuery; import oshi.util.platform.windows.PerfCounterWildcardQuery; import oshi.util.platform.windows.PerfCounterWildcardQuery.PdhCounterWildcardProperty; import oshi.util.platform.windows.WmiQueryHandler; import oshi.util.platform.windows.WmiUtil; public class WindowsOperatingSystem extends AbstractOperatingSystem { private static final Logger LOG = LoggerFactory.getLogger(WindowsOperatingSystem.class); private static final boolean IS_VISTA_OR_GREATER = VersionHelpers.IsWindowsVistaOrGreater(); private static final boolean IS_WINDOWS7_OR_GREATER = VersionHelpers.IsWindows7OrGreater(); /** * Windows event log name */ private static Supplier<String> systemLog = memoize(WindowsOperatingSystem::querySystemLog, TimeUnit.HOURS.toNanos(1)); private static final long BOOTTIME = querySystemBootTime(); private static final HkeyPerformanceData HKEY_PERFORMANCE_DATA; static { HkeyPerformanceData data = null; try { data = new HkeyPerformanceData(); } catch (InstantiationException e) { LOG.warn("{} Process statistics will be read from PDH or WMI.", e.getMessage()); } HKEY_PERFORMANCE_DATA = data; } private final PerfCounterWildcardQuery<ProcessPerformanceProperty> processPerformancePerfCounters = new PerfCounterWildcardQuery<>( ProcessPerformanceProperty.class, "Process", "Win32_Process WHERE NOT Name LIKE\"%_Total\"", "Process Information"); static { enableDebugPrivilege(); } @SuppressWarnings("deprecation") public WindowsOperatingSystem() { this.version = new WindowsOSVersionInfoEx(); } @Override public String queryManufacturer() { return "Microsoft"; } @Override public FamilyVersionInfo queryFamilyVersionInfo() { WmiQuery<OSVersionProperty> osVersionQuery = new WmiQuery<>("Win32_OperatingSystem", OSVersionProperty.class); WmiResult<OSVersionProperty> versionInfo = WmiQueryHandler.createInstance().queryWMI(osVersionQuery); if (versionInfo.getResultCount() < 1) { return new FamilyVersionInfo("Windows", new OSVersionInfo(System.getProperty("os.version"), null, null)); } // Guaranteed that versionInfo is not null and lists non-empty // before calling the parse*() methods int suiteMask = WmiUtil.getUint32(versionInfo, OSVersionProperty.SuiteMask, 0); String buildNumber = WmiUtil.getString(versionInfo, OSVersionProperty.BuildNumber, 0); String version = parseVersion(versionInfo, suiteMask, buildNumber); String codeName = parseCodeName(suiteMask); return new FamilyVersionInfo("Windows", new OSVersionInfo(version, codeName, buildNumber)); } private static String parseVersion(WmiResult<OSVersionProperty> versionInfo, int suiteMask, String buildNumber) { // Initialize a default, sane value String version = System.getProperty("os.version"); // Version is major.minor.build. Parse the version string for // major/minor and get the build number separately String[] verSplit = WmiUtil.getString(versionInfo, OSVersionProperty.Version, 0).split("\\D"); int major = verSplit.length > 0 ? ParseUtil.parseIntOrDefault(verSplit[0], 0) : 0; int minor = verSplit.length > 1 ? ParseUtil.parseIntOrDefault(verSplit[1], 0) : 0; // see // http://msdn.microsoft.com/en-us/library/windows/desktop/ms724833%28v=vs.85%29.aspx boolean ntWorkstation = WmiUtil.getUint32(versionInfo, OSVersionProperty.ProductType, 0) == WinNT.VER_NT_WORKSTATION; switch (major) { case 10: if (minor == 0) { if (ntWorkstation) { version = "10"; } else { // Build numbers greater than 17762 is Server 2019 for OS // Version 10.0 version = (ParseUtil.parseLongOrDefault(buildNumber, 0L) > 17762) ? "Server 2019" : "Server 2016"; } } break; case 6: if (minor == 3) { version = ntWorkstation ? "8.1" : "Server 2012 R2"; } else if (minor == 2) { version = ntWorkstation ? "8" : "Server 2012"; } else if (minor == 1) { version = ntWorkstation ? "7" : "Server 2008 R2"; } else if (minor == 0) { version = ntWorkstation ? "Vista" : "Server 2008"; } break; case 5: if (minor == 2) { if ((suiteMask & 0x00008000) != 0) {// VER_SUITE_WH_SERVER version = "Home Server"; } else if (ntWorkstation) { version = "XP"; // 64 bits } else { version = User32.INSTANCE.GetSystemMetrics(WinUser.SM_SERVERR2) != 0 ? "Server 2003" : "Server 2003 R2"; } } else if (minor == 1) { version = "XP"; // 32 bits } else if (minor == 0) { version = "2000"; } break; default: break; } String sp = WmiUtil.getString(versionInfo, OSVersionProperty.CSDVersion, 0); if (!sp.isEmpty() && !"unknown".equals(sp)) { version = version + " " + sp.replace("Service Pack ", "SP"); } return version; } /** * Gets suites available on the system and return as a codename * * @param suiteMask * * @return Suites */ private static String parseCodeName(int suiteMask) { List<String> suites = new ArrayList<>(); if ((suiteMask & 0x00000002) != 0) { suites.add("Enterprise"); } if ((suiteMask & 0x00000004) != 0) { suites.add("BackOffice"); } if ((suiteMask & 0x00000008) != 0) { suites.add("Communication Server"); } if ((suiteMask & 0x00000080) != 0) { suites.add("Datacenter"); } if ((suiteMask & 0x00000200) != 0) { suites.add("Home"); } if ((suiteMask & 0x00000400) != 0) { suites.add("Web Server"); } if ((suiteMask & 0x00002000) != 0) { suites.add("Storage Server"); } if ((suiteMask & 0x00004000) != 0) { suites.add("Compute Cluster"); } // 0x8000, Home Server, is included in main version name return String.join(",", suites); } @Override protected int queryBitness(int jvmBitness) { WmiQueryHandler wmiQueryHandler = WmiQueryHandler.createInstance(); if (jvmBitness < 64 && System.getenv("ProgramFiles(x86)") != null && IS_VISTA_OR_GREATER) { WmiQuery<BitnessProperty> bitnessQuery = new WmiQuery<>("Win32_Processor", BitnessProperty.class); WmiResult<BitnessProperty> bitnessMap = wmiQueryHandler.queryWMI(bitnessQuery); if (bitnessMap.getResultCount() > 0) { return WmiUtil.getUint16(bitnessMap, BitnessProperty.AddressWidth, 0); } } return jvmBitness; } @Override public boolean queryElevated() { try { File dir = new File(System.getenv("windir") + "\\system32\\config\\systemprofile"); return dir.isDirectory(); } catch (SecurityException e) { return false; } } @Override public FileSystem getFileSystem() { return new WindowsFileSystem(); } @Override public OSProcess[] getProcesses(int limit, ProcessSort sort, boolean slowFields) { List<OSProcess> procList = processMapToList(null, slowFields); List<OSProcess> sorted = processSort(procList, limit, sort); return sorted.toArray(new OSProcess[0]); } @Override public List<OSProcess> getProcesses(Collection<Integer> pids) { return processMapToList(pids, true); } @Override public OSProcess[] getChildProcesses(int parentPid, int limit, ProcessSort sort) { Set<Integer> childPids = new HashSet<>(); // Get processes from ToolHelp API for parent PID Tlhelp32.PROCESSENTRY32.ByReference processEntry = new Tlhelp32.PROCESSENTRY32.ByReference(); WinNT.HANDLE snapshot = Kernel32.INSTANCE.CreateToolhelp32Snapshot(Tlhelp32.TH32CS_SNAPPROCESS, new DWORD(0)); try { while (Kernel32.INSTANCE.Process32Next(snapshot, processEntry)) { if (processEntry.th32ParentProcessID.intValue() == parentPid) { childPids.add(processEntry.th32ProcessID.intValue()); } } } finally { Kernel32.INSTANCE.CloseHandle(snapshot); } List<OSProcess> procList = getProcesses(childPids); List<OSProcess> sorted = processSort(procList, limit, sort); return sorted.toArray(new OSProcess[0]); } @Override public OSProcess getProcess(int pid, boolean slowFields) { List<OSProcess> procList = processMapToList(Arrays.asList(pid), slowFields); return procList.isEmpty() ? null : procList.get(0); } /** * Private method to do the heavy lifting for all the getProcess functions. * * @param pids * A collection of pids to query. If null, the entire process list * will be queried. * @param slowFields * Whether to include fields that incur processor latency * @return A corresponding list of processes */ private List<OSProcess> processMapToList(Collection<Integer> pids, boolean slowFields) { WmiQueryHandler wmiQueryHandler = WmiQueryHandler.createInstance(); // Get data from the registry if possible, otherwise performance counters with // WMI backup Map<Integer, OSProcess> processMap = (HKEY_PERFORMANCE_DATA != null) ? HKEY_PERFORMANCE_DATA.buildProcessMapFromRegistry(this, pids) : buildProcessMapFromPerfCounters(pids); // define here to avoid object repeated creation overhead later List<String> groupList = new ArrayList<>(); List<String> groupIDList = new ArrayList<>(); int myPid = getProcessId(); // Structure we'll fill from native memory pointer for Vista+ Pointer pProcessInfo = null; WTS_PROCESS_INFO_EX[] processInfo = null; IntByReference pCount = new IntByReference(0); // WMI result we'll use for pre-Vista WmiResult<ProcessXPProperty> processWmiResult = null; // Get processes from WTS (post-XP) if (IS_WINDOWS7_OR_GREATER) { final PointerByReference ppProcessInfo = new PointerByReference(); if (!Wtsapi32.INSTANCE.WTSEnumerateProcessesEx(Wtsapi32.WTS_CURRENT_SERVER_HANDLE, new IntByReference(Wtsapi32.WTS_PROCESS_INFO_LEVEL_1), Wtsapi32.WTS_ANY_SESSION, ppProcessInfo, pCount)) { LOG.error("Failed to enumerate Processes. Error code: {}", Kernel32.INSTANCE.GetLastError()); return new ArrayList<>(0); } // extract the pointed-to pointer and create array pProcessInfo = ppProcessInfo.getValue(); final WTS_PROCESS_INFO_EX processInfoRef = new WTS_PROCESS_INFO_EX(pProcessInfo); processInfo = (WTS_PROCESS_INFO_EX[]) processInfoRef.toArray(pCount.getValue()); } else { // Pre-Vista we can't use WTSEnumerateProcessesEx so we'll grab the // same info from WMI and fake the array StringBuilder sb = new StringBuilder(PROCESS_BASE_CLASS); if (pids != null) { boolean first = true; for (Integer pid : pids) { if (first) { sb.append(" WHERE ProcessID="); first = false; } else { sb.append(" OR ProcessID="); } sb.append(pid); } } WmiQuery<ProcessXPProperty> processQueryXP = new WmiQuery<>(sb.toString(), ProcessXPProperty.class); processWmiResult = wmiQueryHandler.queryWMI(processQueryXP); } // Store a subset of processes in a list to later return. List<OSProcess> processList = new ArrayList<>(); int procCount = IS_WINDOWS7_OR_GREATER ? processInfo.length : processWmiResult.getResultCount(); for (int i = 0; i < procCount; i++) { int pid = IS_WINDOWS7_OR_GREATER ? processInfo[i].ProcessId : WmiUtil.getUint32(processWmiResult, ProcessXPProperty.ProcessId, i); OSProcess proc = null; // If the cache is empty, there was a problem with // filling the cache using performance information. if (processMap.isEmpty()) { if (pids != null && !pids.contains(pid)) { continue; } proc = new OSProcess(this); proc.setProcessID(pid); proc.setName(IS_WINDOWS7_OR_GREATER ? processInfo[i].pProcessName : WmiUtil.getString(processWmiResult, ProcessXPProperty.Name, i)); } else { proc = processMap.get(pid); if (proc == null || pids != null && !pids.contains(pid)) { continue; } } // For my own process, set CWD if (pid == myPid) { String cwd = new File(".").getAbsolutePath(); // trim off trailing "." proc.setCurrentWorkingDirectory(cwd.isEmpty() ? "" : cwd.substring(0, cwd.length() - 1)); } if (IS_WINDOWS7_OR_GREATER) { WTS_PROCESS_INFO_EX procInfo = processInfo[i]; proc.setKernelTime(procInfo.KernelTime.getValue() / 10000L); proc.setUserTime(procInfo.UserTime.getValue() / 10000L); proc.setThreadCount(procInfo.NumberOfThreads); proc.setVirtualSize(procInfo.PagefileUsage & 0xffff_ffffL); proc.setOpenFiles(procInfo.HandleCount); } else { proc.setKernelTime(WmiUtil.getUint64(processWmiResult, ProcessXPProperty.KernelModeTime, i) / 10000L); proc.setUserTime(WmiUtil.getUint64(processWmiResult, ProcessXPProperty.UserModeTime, i) / 10000L); proc.setThreadCount(WmiUtil.getUint32(processWmiResult, ProcessXPProperty.ThreadCount, i)); // WMI Pagefile usage is in KB proc.setVirtualSize(1024 * (WmiUtil.getUint32(processWmiResult, ProcessXPProperty.PageFileUsage, i) & 0xffff_ffffL)); proc.setOpenFiles(WmiUtil.getUint32(processWmiResult, ProcessXPProperty.HandleCount, i)); } // Get a handle to the process for various extended info. Only gets // current user unless running as administrator final HANDLE pHandle = Kernel32.INSTANCE.OpenProcess(WinNT.PROCESS_QUERY_INFORMATION, false, proc.getProcessID()); if (pHandle != null) { proc.setBitness(this.getBitness()); // Only test for 32-bit process on 64-bit windows if (IS_VISTA_OR_GREATER && this.getBitness() == 64) { IntByReference wow64 = new IntByReference(0); if (Kernel32.INSTANCE.IsWow64Process(pHandle, wow64)) { proc.setBitness(wow64.getValue() > 0 ? 32 : 64); } } // Full path final HANDLEByReference phToken = new HANDLEByReference(); try {// EXECUTABLEPATH proc.setPath(IS_WINDOWS7_OR_GREATER ? Kernel32Util.QueryFullProcessImageName(pHandle, 0) : WmiUtil.getString(processWmiResult, ProcessXPProperty.ExecutablePath, i)); if (Advapi32.INSTANCE.OpenProcessToken(pHandle, WinNT.TOKEN_DUPLICATE | WinNT.TOKEN_QUERY, phToken)) { Account account = Advapi32Util.getTokenAccount(phToken.getValue()); proc.setUser(account.name); proc.setUserID(account.sidString); // Fetching group information incurs ~10ms per process. if (slowFields) { Account[] accounts = Advapi32Util.getTokenGroups(phToken.getValue()); // get groups groupList.clear(); groupIDList.clear(); for (Account a : accounts) { groupList.add(a.name); groupIDList.add(a.sidString); } proc.setGroup(String.join(",", groupList)); proc.setGroupID(String.join(",", groupIDList)); } } else { int error = Kernel32.INSTANCE.GetLastError(); // Access denied errors are common. Fail silently. if (error != WinError.ERROR_ACCESS_DENIED) { LOG.error("Failed to get process token for process {}: {}", proc.getProcessID(), Kernel32.INSTANCE.GetLastError()); } } } catch (Win32Exception e) { handleWin32ExceptionOnGetProcessInfo(proc, e); } finally { final HANDLE token = phToken.getValue(); if (token != null) { Kernel32.INSTANCE.CloseHandle(token); } } Kernel32.INSTANCE.CloseHandle(pHandle); } // There is no easy way to get ExecutuionState for a process. // The WMI value is null. It's possible to get thread Execution // State and possibly roll up. proc.setState(OSProcess.State.RUNNING); // Initialize default proc.setCommandLine(""); processList.add(proc); } // Clean up memory allocated in C (only Vista+ but null pointer // effectively tests) if (pProcessInfo != null && !Wtsapi32.INSTANCE.WTSFreeMemoryEx(Wtsapi32.WTS_PROCESS_INFO_LEVEL_1, pProcessInfo, pCount.getValue())) { LOG.error("Failed to Free Memory for Processes. Error code: {}", Kernel32.INSTANCE.GetLastError()); return new ArrayList<>(0); } // Command Line only accessible via WMI. if (slowFields) { StringBuilder sb = new StringBuilder(PROCESS_BASE_CLASS); if (pids != null) { Set<Integer> pidsToQuery = new HashSet<>(); for (OSProcess process : processList) { pidsToQuery.add(process.getProcessID()); } boolean first = true; for (Integer pid : pidsToQuery) { if (first) { sb.append(" WHERE ProcessID="); first = false; } else { sb.append(" OR ProcessID="); } sb.append(pid); } } WmiQuery<ProcessProperty> processQuery = new WmiQuery<>(sb.toString(), ProcessProperty.class); WmiResult<ProcessProperty> commandLineProcs = wmiQueryHandler.queryWMI(processQuery); for (int p = 0; p < commandLineProcs.getResultCount(); p++) { int pid = WmiUtil.getUint32(commandLineProcs, ProcessProperty.ProcessId, p); if (processMap.containsKey(pid)) { OSProcess proc = processMap.get(pid); proc.setCommandLine(WmiUtil.getString(commandLineProcs, ProcessProperty.CommandLine, p)); } } } return processList; } protected void handleWin32ExceptionOnGetProcessInfo(OSProcess proc, Win32Exception ex) { LOG.warn("Failed to set path or get user/group on PID {}. It may have terminated. {}", proc.getProcessID(), ex.getMessage()); } private Map<Integer, OSProcess> buildProcessMapFromPerfCounters(Collection<Integer> pids) { Map<Integer, OSProcess> processMap = new HashMap<>(); Map<ProcessPerformanceProperty, List<Long>> valueMap = this.processPerformancePerfCounters .queryValuesWildcard(); long now = System.currentTimeMillis(); // 1970 epoch List<String> instances = this.processPerformancePerfCounters.getInstancesFromLastQuery(); List<Long> pidList = valueMap.get(ProcessPerformanceProperty.ProcessId); List<Long> ppidList = valueMap.get(ProcessPerformanceProperty.ParentProcessId); List<Long> priorityList = valueMap.get(ProcessPerformanceProperty.Priority); List<Long> ioReadList = valueMap.get(ProcessPerformanceProperty.ReadTransferCount); List<Long> ioWriteList = valueMap.get(ProcessPerformanceProperty.WriteTransferCount); List<Long> workingSetSizeList = valueMap.get(ProcessPerformanceProperty.PrivatePageCount); List<Long> creationTimeList = valueMap.get(ProcessPerformanceProperty.CreationDate); for (int inst = 0; inst < instances.size(); inst++) { int pid = pidList.get(inst).intValue(); if (pids == null || pids.contains(pid)) { OSProcess proc = new OSProcess(this); processMap.put(pid, proc); proc.setProcessID(pid); proc.setName(instances.get(inst)); proc.setParentProcessID(ppidList.get(inst).intValue()); proc.setPriority(priorityList.get(inst).intValue()); // if creation time value is less than current millis, it's in 1970 epoch, // otherwise it's 1601 epoch and we must convert long ctime = creationTimeList.get(inst); if (ctime > now) { ctime = WinBase.FILETIME.filetimeToDate((int) (ctime >> 32), (int) (ctime & 0xffffffffL)).getTime(); } proc.setUpTime(now - ctime); proc.setStartTime(ctime); proc.setBytesRead(ioReadList.get(inst)); proc.setBytesWritten(ioWriteList.get(inst)); proc.setResidentSetSize(workingSetSizeList.get(inst)); } } return processMap; } @Override public long getProcessAffinityMask(int processId) { final HANDLE pHandle = Kernel32.INSTANCE.OpenProcess(WinNT.PROCESS_QUERY_INFORMATION, false, processId); if (pHandle != null) { ULONG_PTRByReference processAffinity = new ULONG_PTRByReference(); ULONG_PTRByReference systemAffinity = new ULONG_PTRByReference(); if (Kernel32.INSTANCE.GetProcessAffinityMask(pHandle, processAffinity, systemAffinity)) { return Pointer.nativeValue(processAffinity.getValue().toPointer()); } } return 0L; } @Override public int getProcessId() { return Kernel32.INSTANCE.GetCurrentProcessId(); } @Override public int getProcessCount() { PERFORMANCE_INFORMATION perfInfo = new PERFORMANCE_INFORMATION(); if (!Psapi.INSTANCE.GetPerformanceInfo(perfInfo, perfInfo.size())) { LOG.error("Failed to get Performance Info. Error code: {}", Kernel32.INSTANCE.GetLastError()); return 0; } return perfInfo.ProcessCount.intValue(); } @Override public int getThreadCount() { PERFORMANCE_INFORMATION perfInfo = new PERFORMANCE_INFORMATION(); if (!Psapi.INSTANCE.GetPerformanceInfo(perfInfo, perfInfo.size())) { LOG.error("Failed to get Performance Info. Error code: {}", Kernel32.INSTANCE.GetLastError()); return 0; } return perfInfo.ThreadCount.intValue(); } @Override public long getSystemUptime() { return querySystemUptime(); } private static long querySystemUptime() { // Uptime is in seconds so divide milliseconds // GetTickCount64 requires Vista (6.0) or later if (IS_VISTA_OR_GREATER) { return Kernel32.INSTANCE.GetTickCount64() / 1000L; } else { // 32 bit rolls over at ~ 49 days return Kernel32.INSTANCE.GetTickCount() / 1000L; } } @Override public long getSystemBootTime() { return BOOTTIME; } private static long querySystemBootTime() { String eventLog = systemLog.get(); if (eventLog != null) { try { EventLogIterator iter = new EventLogIterator(null, eventLog, WinNT.EVENTLOG_BACKWARDS_READ); // Get the most recent boot event (ID 12) from the Event log. If Windows "Fast // Startup" is enabled we may not see event 12, so also check for most recent ID // 6005 (Event log startup) as a reasonably close backup. long event6005Time = 0L; while (iter.hasNext()) { EventLogRecord record = iter.next(); if (record.getStatusCode() == 12) { // Event 12 is system boot. We want this value unless we find two 6005 events // first (may occur with Fast Boot) return record.getRecord().TimeGenerated.longValue(); } else if (record.getStatusCode() == 6005) { // If we already found one, this means we've found a second one without finding // an event 12. Return the latest one. if (event6005Time > 0) { return event6005Time; } // First 6005; tentatively assign event6005Time = record.getRecord().TimeGenerated.longValue(); } } // Only one 6005 found, return if (event6005Time > 0) { return event6005Time; } } catch (Win32Exception e) { LOG.warn("Can't open event log \"{}\".", eventLog); } } // If we get this far, event log reading has failed, either from no log or no // startup times. Subtract up time from current time as a reasonable proxy. return System.currentTimeMillis() / 1000L - querySystemUptime(); } @Override public NetworkParams getNetworkParams() { return new WindowsNetworkParams(); } /** * Enables debug privileges for this process, required for OpenProcess() to get * processes other than the current user */ private static void enableDebugPrivilege() { HANDLEByReference hToken = new HANDLEByReference(); boolean success = Advapi32.INSTANCE.OpenProcessToken(Kernel32.INSTANCE.GetCurrentProcess(), WinNT.TOKEN_QUERY | WinNT.TOKEN_ADJUST_PRIVILEGES, hToken); if (!success) { LOG.error("OpenProcessToken failed. Error: {}", Native.getLastError()); return; } WinNT.LUID luid = new WinNT.LUID(); success = Advapi32.INSTANCE.LookupPrivilegeValue(null, WinNT.SE_DEBUG_NAME, luid); if (!success) { LOG.error("LookupprivilegeValue failed. Error: {}", Native.getLastError()); Kernel32.INSTANCE.CloseHandle(hToken.getValue()); return; } WinNT.TOKEN_PRIVILEGES tkp = new WinNT.TOKEN_PRIVILEGES(1); tkp.Privileges[0] = new WinNT.LUID_AND_ATTRIBUTES(luid, new DWORD(WinNT.SE_PRIVILEGE_ENABLED)); success = Advapi32.INSTANCE.AdjustTokenPrivileges(hToken.getValue(), false, tkp, 0, null, null); if (!success) { LOG.error("AdjustTokenPrivileges failed. Error: {}", Native.getLastError()); } Kernel32.INSTANCE.CloseHandle(hToken.getValue()); } @Override public OSService[] getServices() { try (W32ServiceManager sm = new W32ServiceManager()) { sm.open(Winsvc.SC_MANAGER_ENUMERATE_SERVICE); Winsvc.ENUM_SERVICE_STATUS_PROCESS[] services = sm.enumServicesStatusExProcess(WinNT.SERVICE_WIN32, Winsvc.SERVICE_STATE_ALL, null); OSService[] svcArray = new OSService[services.length]; for (int i = 0; i < services.length; i++) { State state; switch (services[i].ServiceStatusProcess.dwCurrentState) { case 1: state = STOPPED; break; case 4: state = RUNNING; break; default: state = OTHER; break; } svcArray[i] = new OSService(services[i].lpDisplayName, services[i].ServiceStatusProcess.dwProcessId, state); } return svcArray; } catch (com.sun.jna.platform.win32.Win32Exception ex) { LOG.error("Win32Exception: {}", ex.getMessage()); return new OSService[0]; } } private static String querySystemLog() { String systemLog = GlobalConfig.get("oshi.os.windows.eventlog", "System"); if (systemLog.isEmpty()) { // Use faster boot time approximation return null; } // Check whether it works HANDLE h = Advapi32.INSTANCE.OpenEventLog(null, systemLog); if (h == null) { LOG.warn("Unable to open configured system Event log \"{}\". Calculating boot time from uptime.", systemLog); return null; } return systemLog; } enum OSVersionProperty { Version, ProductType, BuildNumber, CSDVersion, SuiteMask; } enum BitnessProperty { AddressWidth; } enum ProcessProperty { ProcessId, CommandLine; } private static final String PROCESS_BASE_CLASS = "Win32_Process"; // Properties to get from WMI if WTSEnumerateProcesses doesn't work enum ProcessXPProperty { ProcessId, Name, KernelModeTime, UserModeTime, ThreadCount, PageFileUsage, HandleCount, ExecutablePath; } enum ProcessPerformanceProperty implements PdhCounterWildcardProperty { // First element defines WMI instance name field and PDH instance filter Name(PerfCounterQuery.NOT_TOTAL_INSTANCES), // Remaining elements define counters Priority("Priority Base"), // CreationDate("Elapsed Time"), // ProcessId("ID Process"), // ParentProcessId("Creating Process ID"), // ReadTransferCount("IO Read Bytes/sec"), // WriteTransferCount("IO Write Bytes/sec"), // PrivatePageCount("Working Set - Private"); private final String counter; ProcessPerformanceProperty(String counter) { this.counter = counter; } @Override public String getCounter() { return counter; } } private static class HkeyPerformanceData { /* * Grow as needed but persist */ private int perfDataBufferSize = 8192; /* * Process counter index in integer and string form */ private int processIndex; // 6 private String processIndexStr; // "6" /* * Registry counter data byte offsets */ private int priorityBaseOffset; // 92 private int elapsedTimeOffset; // 96 private int idProcessOffset; // 104 private int creatingProcessIdOffset; // 108 private int ioReadOffset; // 160 private int ioWriteOffset; // 168 private int workingSetPrivateOffset; // 192 private HkeyPerformanceData() throws InstantiationException { // Get the title indices int priorityBaseIndex = 0; int elapsedTimeIndex = 0; int idProcessIndex = 0; int creatingProcessIdIndex = 0; int ioReadIndex = 0; int ioWriteIndex = 0; int workingSetPrivateIndex = 0; try { final String ENGLISH_COUNTER_KEY = "SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Perflib\\009"; final String ENGLISH_COUNTER_VALUE = "Counter"; // Look up list of english names and ids String[] counters = Advapi32Util.registryGetStringArray(WinReg.HKEY_LOCAL_MACHINE, ENGLISH_COUNTER_KEY, ENGLISH_COUNTER_VALUE); // Array contains alternating index/name pairs // {"1", "1847", "2", "System", "4", "Memory", ... } // Get position of name in the array (odd index), return parsed value of // previous even index for (int i = 1; i < counters.length; i += 2) { if (counters[i].equals("Process")) { this.processIndex = Integer.parseInt(counters[i - 1]); } else if (counters[i].equals("Priority Base")) { priorityBaseIndex = Integer.parseInt(counters[i - 1]); } else if (counters[i].equals("Elapsed Time")) { elapsedTimeIndex = Integer.parseInt(counters[i - 1]); } else if (counters[i].equals("ID Process")) { idProcessIndex = Integer.parseInt(counters[i - 1]); } else if (counters[i].equals("Creating Process ID")) { creatingProcessIdIndex = Integer.parseInt(counters[i - 1]); } else if (counters[i].equals("IO Read Bytes/sec")) { ioReadIndex = Integer.parseInt(counters[i - 1]); } else if (counters[i].equals("IO Write Bytes/sec")) { ioWriteIndex = Integer.parseInt(counters[i - 1]); } else if (counters[i].equals("Working Set - Private")) { workingSetPrivateIndex = Integer.parseInt(counters[i - 1]); } } } catch (NumberFormatException e) { // Unexpected but handle anyway throw new InstantiationException("Failed to parse counter index/name array."); } catch (Win32Exception e) { throw new InstantiationException("Unable to locate English counter names in registry Perflib 009."); } // If any of the indices are 0, we failed if (this.processIndex == 0 || priorityBaseIndex == 0 || elapsedTimeIndex == 0 || idProcessIndex == 0 || creatingProcessIdIndex == 0 || ioReadIndex == 0 || ioWriteIndex == 0 || workingSetPrivateIndex == 0) { throw new InstantiationException("Failed to parse counter index/name array."); } this.processIndexStr = Integer.toString(this.processIndex); // now load the Process registry to match up the offsets // Sequentially increase the buffer until everything fits. // Save this buffer size for later use IntByReference lpcbData = new IntByReference(this.perfDataBufferSize); Pointer pPerfData = new Memory(this.perfDataBufferSize); int ret = Advapi32.INSTANCE.RegQueryValueEx(WinReg.HKEY_PERFORMANCE_DATA, this.processIndexStr, 0, null, pPerfData, lpcbData); if (ret != WinError.ERROR_SUCCESS && ret != WinError.ERROR_MORE_DATA) { throw new InstantiationException("Error " + ret + " reading HKEY_PERFORMANCE_DATA from the registry."); } while (ret == WinError.ERROR_MORE_DATA) { this.perfDataBufferSize += 4096; lpcbData.setValue(this.perfDataBufferSize); pPerfData = new Memory(this.perfDataBufferSize); ret = Advapi32.INSTANCE.RegQueryValueEx(WinReg.HKEY_PERFORMANCE_DATA, this.processIndexStr, 0, null, pPerfData, lpcbData); } PERF_DATA_BLOCK perfData = new PERF_DATA_BLOCK(pPerfData.share(0)); // See format at // https://msdn.microsoft.com/en-us/library/windows/desktop/aa373105(v=vs.85).aspx // [ ] Object Type // [ ][ ][ ] Multiple counter definitions // Then multiple: // [ ] Instance Definition // [ ] Instance name // [ ] Counter Block // [ ][ ][ ] Counter data for each definition above long perfObjectOffset = perfData.HeaderLength; // Iterate object types. For Process should only be one here for (int obj = 0; obj < perfData.NumObjectTypes; obj++) { PERF_OBJECT_TYPE perfObject = new PERF_OBJECT_TYPE(pPerfData.share(perfObjectOffset)); // Identify where counter definitions start long perfCounterOffset = perfObjectOffset + perfObject.HeaderLength; // If this isn't the Process object, ignore if (perfObject.ObjectNameTitleIndex == this.processIndex) { for (int counter = 0; counter < perfObject.NumCounters; counter++) { PERF_COUNTER_DEFINITION perfCounter = new PERF_COUNTER_DEFINITION( pPerfData.share(perfCounterOffset)); if (perfCounter.CounterNameTitleIndex == priorityBaseIndex) { this.priorityBaseOffset = perfCounter.CounterOffset; } else if (perfCounter.CounterNameTitleIndex == elapsedTimeIndex) { this.elapsedTimeOffset = perfCounter.CounterOffset; } else if (perfCounter.CounterNameTitleIndex == creatingProcessIdIndex) { this.creatingProcessIdOffset = perfCounter.CounterOffset; } else if (perfCounter.CounterNameTitleIndex == idProcessIndex) { this.idProcessOffset = perfCounter.CounterOffset; } else if (perfCounter.CounterNameTitleIndex == ioReadIndex) { this.ioReadOffset = perfCounter.CounterOffset; } else if (perfCounter.CounterNameTitleIndex == ioWriteIndex) { this.ioWriteOffset = perfCounter.CounterOffset; } else if (perfCounter.CounterNameTitleIndex == workingSetPrivateIndex) { this.workingSetPrivateOffset = perfCounter.CounterOffset; } // Increment for next Counter perfCounterOffset += perfCounter.ByteLength; } // We're done, break the loop break; } // Increment for next object (should never need this) perfObjectOffset += perfObject.TotalByteLength; } } private Map<Integer, OSProcess> buildProcessMapFromRegistry(OperatingSystem os, Collection<Integer> pids) { Map<Integer, OSProcess> processMap = new HashMap<>(); // Grab the PERF_DATA_BLOCK from the registry. // Sequentially increase the buffer until everything fits. IntByReference lpcbData = new IntByReference(this.perfDataBufferSize); Pointer pPerfData = new Memory(this.perfDataBufferSize); int ret = Advapi32.INSTANCE.RegQueryValueEx(WinReg.HKEY_PERFORMANCE_DATA, this.processIndexStr, 0, null, pPerfData, lpcbData); if (ret != WinError.ERROR_SUCCESS && ret != WinError.ERROR_MORE_DATA) { LOG.error("Error {} reading HKEY_PERFORMANCE_DATA from the registry.", ret); return processMap; } while (ret == WinError.ERROR_MORE_DATA) { this.perfDataBufferSize += 4096; lpcbData.setValue(this.perfDataBufferSize); pPerfData = new Memory(this.perfDataBufferSize); ret = Advapi32.INSTANCE.RegQueryValueEx(WinReg.HKEY_PERFORMANCE_DATA, this.processIndexStr, 0, null, pPerfData, lpcbData); } PERF_DATA_BLOCK perfData = new PERF_DATA_BLOCK(pPerfData.share(0)); long perfTime100nSec = perfData.PerfTime100nSec.getValue(); // 1601 long now = System.currentTimeMillis(); // 1970 epoch // See format at // https://msdn.microsoft.com/en-us/library/windows/desktop/aa373105(v=vs.85).aspx // [ ] Object Type // [ ][ ][ ] Multiple counter definitions // Then multiple: // [ ] Instance Definition // [ ] Instance name // [ ] Counter Block // [ ][ ][ ] Counter data for each definition above long perfObjectOffset = perfData.HeaderLength; // Iterate object types. For Process should only be one here for (int obj = 0; obj < perfData.NumObjectTypes; obj++) { PERF_OBJECT_TYPE perfObject = new PERF_OBJECT_TYPE(pPerfData.share(perfObjectOffset)); // If this isn't the Process object, ignore if (perfObject.ObjectNameTitleIndex == this.processIndex) { // Skip over counter definitions // There will be many of these, this points to the first one long perfInstanceOffset = perfObjectOffset + perfObject.DefinitionLength; // We need this for every process, initialize outside loop to // save overhead PERF_COUNTER_BLOCK perfCounterBlock = null; // Iterate instances. // The last instance is _Total so subtract 1 from max for (int inst = 0; inst < perfObject.NumInstances - 1; inst++) { PERF_INSTANCE_DEFINITION perfInstance = new PERF_INSTANCE_DEFINITION( pPerfData.share(perfInstanceOffset)); long perfCounterBlockOffset = perfInstanceOffset + perfInstance.ByteLength; int pid = pPerfData.getInt(perfCounterBlockOffset + this.idProcessOffset); if (pids == null || pids.contains(pid)) { OSProcess proc = new OSProcess(os); processMap.put(pid, proc); proc.setProcessID(pid); proc.setName(pPerfData.getWideString(perfInstanceOffset + perfInstance.NameOffset)); long upTime = (perfTime100nSec - pPerfData.getLong(perfCounterBlockOffset + this.elapsedTimeOffset)) / 10_000L; proc.setUpTime(upTime < 1L ? 1L : upTime); proc.setStartTime(now - upTime); proc.setBytesRead(pPerfData.getLong(perfCounterBlockOffset + this.ioReadOffset)); proc.setBytesWritten(pPerfData.getLong(perfCounterBlockOffset + this.ioWriteOffset)); proc.setResidentSetSize( pPerfData.getLong(perfCounterBlockOffset + this.workingSetPrivateOffset)); proc.setParentProcessID( pPerfData.getInt(perfCounterBlockOffset + this.creatingProcessIdOffset)); proc.setPriority(pPerfData.getInt(perfCounterBlockOffset + this.priorityBaseOffset)); } // Increment to next instance perfCounterBlock = new PERF_COUNTER_BLOCK(pPerfData.share(perfCounterBlockOffset)); perfInstanceOffset = perfCounterBlockOffset + perfCounterBlock.ByteLength; } // We've found the process object and are done, no need to look at any other // objects (shouldn't be any). Break the loop break; } // Increment for next object (should never need this) perfObjectOffset += perfObject.TotalByteLength; } return processMap; } } }
[ "\"ProgramFiles(x86", "\"windir\"" ]
[]
[ "ProgramFiles(x8", "windir" ]
[]
["ProgramFiles(x8", "windir"]
java
2
0
cmd/bot/main.go
package main import ( "github.com/BasLangenberg/discord-norris/internal/giphy" "github.com/BasLangenberg/discord-norris/internal/icndb" mcsrvstat_us "github.com/BasLangenberg/discord-norris/internal/mcsrvstat-us" "github.com/bwmarrin/discordgo" "log" "os" "os/signal" "strings" "syscall" "time" ) const ( version = "v0.1.0" ) func main(){ log.Printf("starting discord-norris %v\n", version) signalchan := make(chan os.Signal, 1) signal.Notify(signalchan, syscall.SIGINT, syscall.SIGTERM) dg, err := discordgo.New("Bot " + os.Getenv("DISCORD_BOT_KEY")) if err != nil { log.Printf("Unable to connect to discord: %v\n", err) os.Exit(1) } dg.AddHandler(responseWithQuote) err = dg.Open() if err != nil { log.Printf("unable to start bot: %v", err) } go checkForMySon(dg) log.Println("Bot initialized and running, press CTRL+C to stop") for { select { case <- signalchan: log.Println("Terminating...") dg.Close() os.Exit(0) } } } func responseWithQuote(s *discordgo.Session, m *discordgo.MessageCreate) { if m.Author.ID == s.State.User.ID { return } if strings.Contains(strings.ToLower(m.Content), "!chuck") { quote, qerr := icndb.GetRandomQuote() gif, gerr := giphy.GetRandomChuckGifDownSizedLarge() if qerr != nil { s.ChannelMessageSend(m.ChannelID, "Can't get a quote, please message @commissarbas who is supposed to maintain this bot") } if gerr != nil { s.ChannelMessageSend(m.ChannelID, "Can't get a gif, please message @commissarbas who is supposed to maintain this bot") } embed := &discordgo.MessageEmbed{ Author: &discordgo.MessageEmbedAuthor{}, Color: 0x00ff00, // Green Description: quote, Image: &discordgo.MessageEmbedImage{ URL: gif, }, Timestamp: time.Now().Format(time.RFC3339), // Discord wants ISO8601; RFC3339 is an extension of ISO8601 and should be completely compatible. Title: "Chuck Norris Quote", } _, err := s.ChannelMessageSendEmbed(m.ChannelID, embed) if err != nil { log.Printf("error publishing message: %v", err) } } } func checkForMySon(s *discordgo.Session) { var cache []string for { time.Sleep(1 * time.Minute) log.Println("Checking for specific username") online, err := mcsrvstat_us.GetOnlinePlayers() if err != nil { log.Printf("Unable to get online playes: %v\n", err) } // Check for _, gamer := range online { if strings.Contains(gamer, "Sebe") { // Discord meuk if !isStringInStringSlice(cache, gamer) { s.ChannelMessageSend("692063079483047989", "Sébe just came online") } } if strings.Contains(gamer, "Bas") { if !isStringInStringSlice(cache, gamer) { s.ChannelMessageSend("692063079483047989", "Bas just came online") } } } cache = online log.Printf("online users: %v", cache) } } func isStringInStringSlice(cache []string, gamer string) bool { for _, user := range cache { if user == gamer { return true } } return false }
[ "\"DISCORD_BOT_KEY\"" ]
[]
[ "DISCORD_BOT_KEY" ]
[]
["DISCORD_BOT_KEY"]
go
1
0
cs-config/cs_config/helpers.py
""" Functions used to help tax-brain configure to COMP """ import os import inspect import time import copy import hashlib import gzip import copy from pathlib import Path import warnings import pandas as pd import numpy as np from collections import defaultdict from taxbrain.report_utils import convert_params from taxcalc import (Policy, DIFF_TABLE_COLUMNS, DIFF_TABLE_LABELS, DIST_TABLE_COLUMNS, DIST_TABLE_LABELS, add_income_table_row_variable, add_quantile_table_row_variable, STANDARD_INCOME_BINS) from operator import itemgetter from .constants import (POLICY_SCHEMA, RESULTS_TABLE_TAGS, RESULTS_TABLE_TITLES, RESULTS_TOTAL_ROW_KEY_LABELS, MONEY_VARS) from .tables import (summary_aggregate, summary_diff_xbin, summary_diff_xdec, summary_dist_xbin, summary_dist_xdec) try: from s3fs import S3FileSystem except ImportError as ie: S3FileSystem = None TCPATH = inspect.getfile(Policy) TCDIR = os.path.dirname(TCPATH) AWS_ACCESS_KEY_ID = os.environ.get("AWS_ACCESS_KEY_ID", None) AWS_SECRET_ACCESS_KEY = os.environ.get("AWS_SECRET_ACCESS_KEY", None) def random_seed(user_mods, year): """ Compute random seed based on specified user_mods, which is a dictionary returned by Calculator.read_json_parameter_files(). """ def random_seed_from_subdict(subdict): """ Compute random seed from one user_mods subdictionary. """ assert isinstance(subdict, dict) all_vals = [] for year in sorted(subdict.keys()): all_vals.append(str(year)) params = subdict[year] for param in sorted(params.keys()): try: tple = tuple(params[param]) except TypeError: # params[param] is not an iterable value; make it so tple = tuple((params[param],)) all_vals.append(str((param, tple))) txt = u''.join(all_vals).encode('utf-8') hsh = hashlib.sha512(txt) seed = int(hsh.hexdigest(), 16) return seed % np.iinfo(np.uint32).max # start of random_seed function # modify the user mods to work in the random_seed_from_subdict function # TODO: Change all of this to work with new adjustments user_mods_copy = copy.deepcopy(user_mods) beh_mods_dict = {year: {}} for param, value in user_mods_copy["behavior"].items(): beh_mods_dict[year][param] = [value] user_mods_copy["behavior"] = beh_mods_dict ans = 0 for subdict_name in user_mods_copy: subdict = user_mods_copy[subdict_name] if subdict_name == "policy": subdict = convert_params(subdict) ans += random_seed_from_subdict(subdict) return ans % np.iinfo(np.uint32).max NUM_TO_FUZZ = 3 # when using dropq algorithm on puf.csv results def fuzzed(df1, df2, reform_affected, table_row_type): """ Create fuzzed df2 dataframe and corresponding unfuzzed df1 dataframe. Parameters ---------- df1: Pandas DataFrame contains results variables for the baseline policy, which are not changed by this function df2: Pandas DataFrame contains results variables for the reform policy, which are not changed by this function reform_affected: boolean numpy array (not changed by this function) True for filing units with a reform-induced combined tax difference; otherwise False table_row_type: string valid values are 'aggr', 'xbin', and 'xdec' Returns ------- df1, df2: Pandas DataFrames where copied df2 is fuzzed to maintain data privacy and where copied df1 has same filing unit order as has the fuzzed df2 """ assert table_row_type in ('aggr', 'xbin', 'xdec') assert len(df1.index) == len(df2.index) assert reform_affected.size == len(df1.index) df1 = copy.deepcopy(df1) df2 = copy.deepcopy(df2) # add copy of reform_affected to df2 df2['reform_affected'] = copy.deepcopy(reform_affected) # construct table rows, for which filing units in each row must be fuzzed if table_row_type == 'xbin': df1 = add_income_table_row_variable(df1, 'expanded_income', STANDARD_INCOME_BINS) df2['expanded_income_baseline'] = df1['expanded_income'] df2 = add_income_table_row_variable(df2, 'expanded_income_baseline', STANDARD_INCOME_BINS) del df2['expanded_income_baseline'] elif table_row_type == 'xdec': df1 = add_quantile_table_row_variable(df1, 'expanded_income', 10, decile_details=True) df2['expanded_income_baseline'] = df1['expanded_income'] df2 = add_quantile_table_row_variable(df2, 'expanded_income_baseline', 10, decile_details=True) del df2['expanded_income_baseline'] elif table_row_type == 'aggr': df1['table_row'] = np.ones(reform_affected.shape, dtype=int) df2['table_row'] = df1['table_row'] gdf1 = df1.groupby('table_row', sort=False) gdf2 = df2.groupby('table_row', sort=False) del df1['table_row'] del df2['table_row'] # fuzz up to NUM_TO_FUZZ filing units randomly chosen in each group # (or table row), where fuzz means to replace the reform (2) results # with the baseline (1) results for each chosen filing unit pd.options.mode.chained_assignment = None group_list = list() for name, group2 in gdf2: group2 = copy.deepcopy(group2) indices = np.where(group2['reform_affected']) num = min(len(indices[0]), NUM_TO_FUZZ) if num > 0: choices = np.random.choice(indices[0], size=num, replace=False) group1 = gdf1.get_group(name) for idx in choices: group2.iloc[idx] = group1.iloc[idx] group_list.append(group2) del group2 df2 = pd.concat(group_list) del df2['reform_affected'] pd.options.mode.chained_assignment = 'warn' # reinstate index order of df1 and df2 and return df1.sort_index(inplace=True) df2.sort_index(inplace=True) return (df1, df2) def nth_year_results(tb, year, user_mods, fuzz, return_html=True): """ Function to process taxbrain results for a given year """ start_time = time.time() dv1 = tb.base_data[year] dv2 = tb.reform_data[year] sres = {} if fuzz: # seed random number generator with a seed value based on user_mods # (reform-specific seed is used to choose whose results are fuzzed) seed = random_seed(user_mods, year) np.random.seed(seed) # make bool array marking which filing units are affected by the reform reform_affected = np.logical_not( np.isclose(dv1['combined'], dv2['combined'], atol=0.01, rtol=0.0) ) agg1, agg2 = fuzzed(dv1, dv2, reform_affected, 'aggr') sres = summary_aggregate(sres, tb) del agg1 del agg2 dv1b, dv2b = fuzzed(dv1, dv2, reform_affected, 'xbin') sres = summary_dist_xbin(sres, tb, year) sres = summary_diff_xbin(sres, tb, year) del dv1b del dv2b dv1d, dv2d = fuzzed(dv1, dv2, reform_affected, 'xdec') sres = summary_dist_xdec(sres, tb, year) sres = summary_diff_xdec(sres, tb, year) del dv1d del dv2d del reform_affected else: sres = summary_aggregate(sres, tb) sres = summary_dist_xbin(sres, tb, year) sres = summary_diff_xbin(sres, tb, year) sres = summary_dist_xdec(sres, tb, year) sres = summary_diff_xdec(sres, tb, year) # optionally return non-JSON-like results # it would be nice to allow the user to download the full CSV instead # of a CSV for each year # what if we allowed an aggregate format call? # - presents project with all data proeduced in a run? if return_html: res = {} for id in sres: res[id] = [{ 'dimension': year, 'raw': sres[id] }] elapsed_time = time.time() - start_time print('elapsed time for this run: {:.1f}'.format(elapsed_time)) return res else: elapsed_time = time.time() - start_time print('elapsed time for this run: {:.1f}'.format(elapsed_time)) return sres def postprocess(data_to_process): """ Receives results from run_nth_year_taxcalc_model over N years, formats the results, and combines the aggregate results """ labels = {x: DIFF_TABLE_LABELS[i] for i, x in enumerate(DIFF_TABLE_COLUMNS)} labels.update({x: DIST_TABLE_LABELS[i] for i, x in enumerate(DIST_TABLE_COLUMNS)}) # nested functions used below def label_columns(pdf): pdf.columns = [(labels[str(col)] if str(col) in labels else str(col)) for col in pdf.columns] return pdf def append_year(pdf, year): """ append_year embedded function revises all column names in dframe """ pdf.columns = ['{}_{}'.format(col, year) for col in pdf.columns] return pdf def year_columns(pdf, year): pdf.columns = [str(year)] return pdf def arbitrary_defaultdict(): """ Return an arbitrary number of defaultdicts. This is used to store all of the distribution and differences tables """ return defaultdict(arbitrary_defaultdict) formatted = {"tbl_outputs": arbitrary_defaultdict(), "aggr_outputs": defaultdict(dict)} downloadable = [] year_getter = itemgetter('dimension') for id, pdfs in data_to_process.items(): if id.startswith('aggr'): pdfs.sort(key=year_getter) tbl = pdfs[0]["raw"] tbl.index = pd.Index(RESULTS_TOTAL_ROW_KEY_LABELS[i] for i in tbl.index) # format table for col in tbl.columns: tbl.update(tbl[col].apply("${:,.2f}".format)) title = RESULTS_TABLE_TITLES[id] tags = RESULTS_TABLE_TAGS[id] formatted["aggr_outputs"][tags["law"]] = { "title": title, "renderable": pdf_to_clean_html(tbl) } # append a downloadable version of the results downloadable.append( { "media_type": "CSV", "title": title + ".csv", "data": tbl.to_csv() } ) else: for i in pdfs: year = i["dimension"] tbl = label_columns(i["raw"]) title = '{} ({})'.format(RESULTS_TABLE_TITLES[id], year) # format table for col in tbl.columns: if col in MONEY_VARS: tbl.update(tbl[col].apply("${:,.2f}".format)) tags = RESULTS_TABLE_TAGS[id] tbl_type = tags["table_type"] group = tags["grouping"] if id.startswith("dist"): law = tags["law"] formatted["tbl_outputs"][tbl_type][law][group][year] = { "title": title, "renderable": pdf_to_clean_html(tbl) } else: tax = tags["tax_type"] formatted["tbl_outputs"][tbl_type][tax][group][year] = { "title": title, "renderable": pdf_to_clean_html(tbl) } # add downloadable information downloadable.append( { "media_type": "CSV", "title": title + ".csv", "data": tbl.to_csv() } ) return formatted, downloadable def pdf_to_clean_html(pdf): """Takes a PDF and returns an HTML table without any deprecated tags or irrelevant styling""" tb_replace = ('<table class="table table-striped"') return (pdf.to_html() .replace('<table ', tb_replace) .replace(' border="1"', '') .replace('class="dataframe"', '')) def retrieve_puf( aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY ): """ Function for retrieving the PUF from the OSPC S3 bucket """ s3_reader_installed = S3FileSystem is not None has_credentials = ( aws_access_key_id is not None and aws_secret_access_key is not None ) if has_credentials and s3_reader_installed: print("Reading puf from S3 bucket.") fs = S3FileSystem(key=AWS_ACCESS_KEY_ID, secret=AWS_SECRET_ACCESS_KEY,) with fs.open("s3://ospc-data-files/puf.csv.gz") as f: # Skips over header from top of file. puf_df = pd.read_csv(f, compression="gzip") return puf_df elif Path("puf.csv.gz").exists(): print("Reading puf from puf.csv.gz.") return pd.read_csv("puf.csv.gz", compression="gzip") elif Path("puf.csv").exists(): print("Reading puf from puf.csv.") return pd.read_csv("puf.csv") else: warnings.warn( f"PUF file not available (has_credentials={has_credentials}, " f"s3_reader_installed={s3_reader_installed})" ) return None
[]
[]
[ "AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY" ]
[]
["AWS_ACCESS_KEY_ID", "AWS_SECRET_ACCESS_KEY"]
python
2
0
Experiments/BostonHousing/mc_dropout_hetero.py
import json from sklearn.linear_model import LinearRegression,Lasso,Ridge from sklearn.datasets import load_boston import os import sys curPath = os.path.abspath(os.path.dirname(__file__)) rootPath = curPath for i in range(2): rootPath = os.path.split(rootPath)[0] sys.path.append(rootPath) import numpy as np import torch import time import math from pandas import Series,DataFrame import argparse from src.utils import mkdir from src.MC_dropout.model import * from Experiments.BostonHousing.utils import * os.environ["CUDA_VISIBLE_DEVICES"] = "1" if __name__ == '__main__': # Load data X, Y = load_data() inputs = 13 outputs = 1 # Hyper-parameters # pdrops = [0.005, 0.01, 0.05, 0.1] # taus = [0.1, 0.15, 0.2] # lengthscales = [1e-2, 1e-1, 1, 10] # lrs = [1e-3, 1e-4] # momentums = [0.9] # Ts = [1000] pdrops = [0.2, 0.1] taus = [0.1, 0.15] lengthscales = [1e-1, 1] lrs = [1e-3] momentums = [0.9] Ts = [1000] NTrainPoints = 364 batch_size = 128 nb_epochs = 40 log_interval = 1 n_splits = 15 # Paths base_dir = './results_hetero/mc_dropout_results' # Grid search results = {} for pdrop in pdrops : for tau in taus: for lengthscale in lengthscales: for T in Ts: for lr in lrs: for momentum in momentums: Hps = 'Pdrop_' + str(pdrop) + '_Tau_' + str(tau) + '_Lengthscale_' + str(lengthscale) \ + '_Lr_' + str(lr) + '_Momentum_' + str(momentum) + '_T_' + str(T) print('Grid search step:' + Hps ) results_dir = base_dir + '/' + Hps results_file = results_dir + '_results.txt' mkdir(results_dir) rmses = [] picps = [] mpiws = [] for split in range(int(n_splits)): results_dir_split = results_dir + '/split_' + str(split) mkdir(results_dir_split) # get splited data\dataset\dataloder X_train, y_train, X_val, y_val, X_test, y_test, y_stds = get_data_splited(split, X, Y) trainset, valset, testset = get_dataset(X_train, y_train, X_val, y_val, X_test, y_test) use_cuda = torch.cuda.is_available() trainloader, valloader, testloader = get_dataloader(trainset, valset, testset, use_cuda, batch_size) results_val = base_dir + '/results_val_split_' + str(split) + '.txt' results_test = base_dir + '/results_test_split_' + str(split) + '.txt' # net dims N = X_train.shape[0] reg = lengthscale ** 2 * (1 - pdrop) / (2. * N * tau) cprint('c', '\nNetwork:') net = MC_drop_net_BH_hetero(lr=lr, input_dim=inputs, output_dim=outputs, cuda=use_cuda, batch_size=batch_size, weight_decay=reg,n_hid=50, momentum=momentum, pdrop=pdrop) # ---- train epoch = 0 cprint('c', '\nTrain:') print(' init cost variables:') pred_cost_train = np.zeros(nb_epochs) rmse_train = np.zeros(nb_epochs) cost_dev = np.zeros(nb_epochs) rmse_dev = np.zeros(nb_epochs) best_rmse = np.inf nb_its_dev = 1 tic0 = time.time() for i in range(epoch, nb_epochs): net.set_mode_train(True) tic = time.time() nb_samples = 0 for x, y in trainloader: cost_pred = net.fit(x, y) pred_cost_train[i] += cost_pred rmse_train[i] += cost_pred nb_samples += len(x) pred_cost_train[i] /= nb_samples rmse_train[i] = (rmse_train[i] / nb_samples)**0.5 toc = time.time() net.epoch = i # ---- print print("it %d/%d, Jtr_pred = %f, rmse = %f" % ( i, nb_epochs, pred_cost_train[i], rmse_train[i]), end="") cprint('r', ' time: %f seconds\n' % (toc - tic)) # ---- dev if i % nb_its_dev == 0: net.set_mode_train(False) nb_samples = 0 for j, (x, y) in enumerate(valloader): cost, mse, _, _, _ = net.eval(x, y, samples=T) cost_dev[i] += cost rmse_dev[i] += mse nb_samples += len(x) cost_dev[i] /= nb_samples rmse_dev[i] = (rmse_dev[i] / nb_samples)**0.5 cprint('g', ' Jdev = %f, rmse = %f\n' % (cost_dev[i], rmse_dev[i])) if rmse_dev[i] < best_rmse: best_rmse = rmse_dev[i] cprint('b', 'best val rmse') net.save(results_dir_split + '/theta_best_val.dat') toc0 = time.time() runtime_per_it = (toc0 - tic0) / float(nb_epochs) cprint('r', ' average time: %f seconds\n' % runtime_per_it) ## --------------------------------------------------------------------------------------------------------------------- # results net.load(results_dir_split + '/theta_best_val.dat') cprint('c', '\nRESULTS:') nb_parameters = net.get_nb_parameters() net.set_mode_train(False) nb_samples = 0 cost_test = 0 rmse_test = 0 means = np.zeros((X_test.shape[0], outputs)) stds = np.zeros((X_test.shape[0], outputs)) noises = np.zeros((X_test.shape[0], outputs)) # ---- test start = 0 for j, (x, y) in enumerate(testloader): end = len(x) + start cost, mse, mean, std, noise = net.eval(x, y, samples=T) if use_cuda: mean = mean.cpu() std = std.cpu() noise = std.cpu() means[start:end, :] = mean stds[start:end, :] = std noises[start:end, :] = noise start = end cost_test += cost rmse_test += mse nb_samples += len(x) # compute PICP MPIW total_unc_1 = (noises ** 2 + stds ** 2) ** 0.5 total_unc_2 = (noises ** 2 + (2 * stds) ** 2) ** 0.5 total_unc_3 = (noises ** 2 + (3 * stds) ** 2) ** 0.5 y_L = means - total_unc_2 y_U = means + total_unc_2 u = np.maximum(0, np.sign(y_U - y_test)) l = np.maximum(0, np.sign(y_test - y_L)) PICP = np.mean(np.multiply(u, l)) MPIW = np.mean(y_U - y_L) cost_test /= nb_samples rmse_test = (rmse_test / nb_samples)**0.5 cost_test = cost_test.cpu().data.numpy() rmse_test = rmse_test.cpu().data.numpy() rmses.append(rmse_test*y_stds) picps.append(PICP) mpiws.append(MPIW) best_cost_dev = np.min(cost_dev) best_cost_train = np.min(pred_cost_train) rmse_dev_min = rmse_dev[::nb_its_dev].min() print(' cost_test: %f ' % (cost_test)) print(' rmse_test: %f' % (rmse_test)) print(' cost_dev: %f (cost_train %f)' % (best_cost_dev, best_cost_train)) print(' rmse_dev: %f' % (rmse_dev_min)) print(' nb_parameters: %d (%s)' % (nb_parameters, humansize(nb_parameters))) print(' time_per_it: %fs\n' % (runtime_per_it)) ## Save results for plots np.save(results_dir_split + '/pred_cost_train.npy', pred_cost_train) np.save(results_dir_split + '/cost_dev.npy', cost_dev) np.save(results_dir_split + '/rmse_train.npy', rmse_train) np.save(results_dir_split + '/rmse_dev.npy', rmse_dev) np.save(results_dir_split + '/means.npy', means) np.save(results_dir_split + '/stds.npy', stds) # Storing validation results store_results(results_val, [Hps + ' :: ', 'rmse %f ' % (rmse_dev_min * y_stds) + '\n']) # Storing testing results store_results(results_test, [Hps + ' :: ', 'rmse %f PICP %f MPIW %f' % (rmse_test * y_stds, PICP, MPIW) + '\n']) # storing testing results for this split store_results(results_file, ['rmse %f PICP %f MPIW %f' % (rmse_test * y_stds, PICP, MPIW) + '\n']) ## --------------------------------------------------------------------------------------------------------------------- ## plot figures plot_pred_cost(pred_cost_train, nb_epochs, nb_its_dev, cost_dev, results_dir_split) plot_rmse(nb_epochs, nb_its_dev, rmse_train, rmse_dev, results_dir_split) plot_uncertainty_noise(means, noises, [total_unc_1, total_unc_2, total_unc_3], y_test, results_dir_split) rmses = np.array(rmses) picps = np.array(picps) mpiws = np.array(mpiws) store_results(results_file,['Overall: \n rmses %f +- %f (stddev) +- %f (std error) PICP %f MPIW %f\n' % ( np.mean(rmses), np.std(rmses), np.std(rmses) / math.sqrt(n_splits), np.mean(picps), np.mean(mpiws))]) s = 'Pdrop: ' + str(pdrop) + ' Tau: ' + str(tau) + \ ' Lengthscale: ' + str(lengthscale) + ' Lr: ' + str(lr) + ' Momentum: ' + str(momentum) + ' T: ' + str(T) results[s] = [np.mean(rmses), np.std(rmses), np.std(rmses)/math.sqrt(n_splits), np.mean(picps), np.mean(mpiws)] # sort all the results store_all_results(results, base_dir)
[]
[]
[ "CUDA_VISIBLE_DEVICES" ]
[]
["CUDA_VISIBLE_DEVICES"]
python
1
0
aiven/resource_service_kafka_connect_test.go
package aiven import ( "fmt" "github.com/hashicorp/terraform/helper/acctest" "github.com/hashicorp/terraform/helper/resource" "github.com/hashicorp/terraform/terraform" "os" "testing" ) // Kafka Connect service tests func TestAccAivenService_kafkaconnect(t *testing.T) { t.Parallel() resourceName := "aiven_service.bar" rName := acctest.RandStringFromCharSet(10, acctest.CharSetAlphaNum) resource.Test(t, resource.TestCase{ PreCheck: func() { testAccPreCheck(t) }, Providers: testAccProviders, CheckDestroy: testAccCheckAivenServiceResourceDestroy, Steps: []resource.TestStep{ { Config: testAccKafkaConnectServiceResource(rName), Check: resource.ComposeTestCheckFunc( testAccCheckAivenServiceCommonAttributes("data.aiven_service.service"), testAccCheckAivenServiceKafkaConnectAttributes("data.aiven_service.service"), resource.TestCheckResourceAttr(resourceName, "service_name", fmt.Sprintf("test-acc-sr-%s", rName)), resource.TestCheckResourceAttr(resourceName, "state", "RUNNING"), resource.TestCheckResourceAttr(resourceName, "project", fmt.Sprintf("test-acc-pr-%s", rName)), resource.TestCheckResourceAttr(resourceName, "service_type", "kafka_connect"), resource.TestCheckResourceAttr(resourceName, "cloud_name", "google-europe-west1"), resource.TestCheckResourceAttr(resourceName, "maintenance_window_dow", "monday"), resource.TestCheckResourceAttr(resourceName, "maintenance_window_time", "10:00:00"), resource.TestCheckResourceAttr(resourceName, "state", "RUNNING"), resource.TestCheckResourceAttr(resourceName, "termination_protection", "false"), ), }, }, }) } func testAccKafkaConnectServiceResource(name string) string { return fmt.Sprintf(` resource "aiven_project" "foo" { project = "test-acc-pr-%s" card_id="%s" } resource "aiven_service" "bar" { project = aiven_project.foo.project cloud_name = "google-europe-west1" plan = "startup-4" service_name = "test-acc-sr-%s" service_type = "kafka_connect" maintenance_window_dow = "monday" maintenance_window_time = "10:00:00" kafka_connect_user_config { kafka_connect { consumer_isolation_level = "read_committed" } public_access { kafka_connect = true } } } data "aiven_service" "service" { service_name = aiven_service.bar.service_name project = aiven_project.foo.project } `, name, os.Getenv("AIVEN_CARD_ID"), name) } func testAccCheckAivenServiceKafkaConnectAttributes(n string) resource.TestCheckFunc { return func(s *terraform.State) error { r := s.RootModule().Resources[n] a := r.Primary.Attributes if a["service_type"] != "kafka_connect" { return fmt.Errorf("expected to get a correct service type from Aiven, got :%s", a["service_type"]) } if a["kafka_connect_user_config.0.kafka_connect.0.consumer_isolation_level"] != "read_committed" { return fmt.Errorf("expected to get a correct consumer_isolation_level from Aiven") } if a["kafka_connect_user_config.0.kafka_connect.0.consumer_max_poll_records"] != "-1" { return fmt.Errorf("expected to get a correct consumer_max_poll_records from Aiven") } if a["kafka_connect_user_config.0.kafka_connect.0.offset_flush_interval_ms"] != "-1" { return fmt.Errorf("expected to get a correct offset_flush_interval_ms from Aiven") } if a["kafka_connect_user_config.0.public_access.0.kafka_connect"] != "true" { return fmt.Errorf("expected to get a correct public_access.kafka_connect from Aiven") } if a["kafka_connect_user_config.0.public_access.0.prometheus"] != "<<value not set>>" { return fmt.Errorf("expected to get a correct public_access.prometheus from Aiven") } return nil } }
[ "\"AIVEN_CARD_ID\"" ]
[]
[ "AIVEN_CARD_ID" ]
[]
["AIVEN_CARD_ID"]
go
1
0
tests/flytekit/unit/engines/flyte/test_engine.py
from __future__ import absolute_import import os import pytest from flyteidl.core import errors_pb2 from mock import MagicMock, patch, PropertyMock from flytekit.common import constants, utils from flytekit.common.exceptions import scopes from flytekit.configuration import TemporaryConfiguration from flytekit.engines.flyte import engine from flytekit.models import literals, execution as _execution_models, common as _common_models, launch_plan as \ _launch_plan_models from flytekit.models.core import errors, identifier from flytekit.sdk import test_utils _INPUT_MAP = literals.LiteralMap( { 'a': literals.Literal(scalar=literals.Scalar(primitive=literals.Primitive(integer=1))) } ) _OUTPUT_MAP = literals.LiteralMap( { 'b': literals.Literal(scalar=literals.Scalar(primitive=literals.Primitive(integer=2))) } ) @pytest.fixture(scope="function", autouse=True) def temp_config(): with TemporaryConfiguration( os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../common/configs/local.config'), internal_overrides={ 'image': 'myflyteimage:{}'.format( os.environ.get('IMAGE_VERSION', 'sha') ), 'project': 'myflyteproject', 'domain': 'development' } ): yield @pytest.fixture(scope="function", autouse=True) def execution_data_locations(): with test_utils.LocalTestFileSystem() as fs: input_filename = fs.get_named_tempfile("inputs.pb") output_filename = fs.get_named_tempfile("outputs.pb") utils.write_proto_to_file(_INPUT_MAP.to_flyte_idl(), input_filename) utils.write_proto_to_file(_OUTPUT_MAP.to_flyte_idl(), output_filename) yield ( _common_models.UrlBlob(input_filename, 100), _common_models.UrlBlob(output_filename, 100) ) @scopes.system_entry_point def _raise_system_exception(*args, **kwargs): raise ValueError("errorERRORerror") @scopes.user_entry_point def _raise_user_exception(*args, **kwargs): raise ValueError("userUSERuser") @scopes.system_entry_point def test_task_system_failure(): m = MagicMock() m.execute = _raise_system_exception with utils.AutoDeletingTempDir("test") as tmp: engine.FlyteTask(m).execute(None, {'output_prefix': tmp.name}) doc = errors.ErrorDocument.from_flyte_idl( utils.load_proto_from_file(errors_pb2.ErrorDocument, os.path.join(tmp.name, constants.ERROR_FILE_NAME)) ) assert doc.error.code == "SYSTEM:Unknown" assert doc.error.kind == errors.ContainerError.Kind.RECOVERABLE assert "errorERRORerror" in doc.error.message @scopes.system_entry_point def test_task_user_failure(): m = MagicMock() m.execute = _raise_user_exception with utils.AutoDeletingTempDir("test") as tmp: engine.FlyteTask(m).execute(None, {'output_prefix': tmp.name}) doc = errors.ErrorDocument.from_flyte_idl( utils.load_proto_from_file(errors_pb2.ErrorDocument, os.path.join(tmp.name, constants.ERROR_FILE_NAME)) ) assert doc.error.code == "USER:Unknown" assert doc.error.kind == errors.ContainerError.Kind.NON_RECOVERABLE assert "userUSERuser" in doc.error.message @patch.object(engine._FlyteClientManager, '_CLIENT', new_callable=PropertyMock) def test_execution_notification_overrides(mock_client_factory): mock_client = MagicMock() mock_client.create_execution = MagicMock(return_value=identifier.WorkflowExecutionIdentifier('xp', 'xd', 'xn')) mock_client_factory.return_value = mock_client m = MagicMock() type(m).id = PropertyMock( return_value=identifier.Identifier( identifier.ResourceType.LAUNCH_PLAN, "project", "domain", "name", "version" ) ) engine.FlyteLaunchPlan(m).execute( 'xp', 'xd', 'xn', literals.LiteralMap({}), notification_overrides=[] ) mock_client.create_execution.assert_called_once_with( 'xp', 'xd', 'xn', _execution_models.ExecutionSpec( identifier.Identifier( identifier.ResourceType.LAUNCH_PLAN, "project", "domain", "name", "version" ), _execution_models.ExecutionMetadata( _execution_models.ExecutionMetadata.ExecutionMode.MANUAL, 'sdk', 0 ), disable_all=True, ), literals.LiteralMap({}), ) @patch.object(engine._FlyteClientManager, '_CLIENT', new_callable=PropertyMock) def test_execution_notification_soft_overrides(mock_client_factory): mock_client = MagicMock() mock_client.create_execution = MagicMock(return_value=identifier.WorkflowExecutionIdentifier('xp', 'xd', 'xn')) mock_client_factory.return_value = mock_client m = MagicMock() type(m).id = PropertyMock( return_value=identifier.Identifier( identifier.ResourceType.LAUNCH_PLAN, "project", "domain", "name", "version" ) ) notification = _common_models.Notification([0, 1, 2], email=_common_models.EmailNotification(["[email protected]"])) engine.FlyteLaunchPlan(m).execute( 'xp', 'xd', 'xn', literals.LiteralMap({}), notification_overrides=[notification] ) mock_client.create_execution.assert_called_once_with( 'xp', 'xd', 'xn', _execution_models.ExecutionSpec( identifier.Identifier( identifier.ResourceType.LAUNCH_PLAN, "project", "domain", "name", "version" ), _execution_models.ExecutionMetadata( _execution_models.ExecutionMetadata.ExecutionMode.MANUAL, 'sdk', 0 ), notifications=_execution_models.NotificationList([notification]), ), literals.LiteralMap({}), ) @patch.object(engine._FlyteClientManager, '_CLIENT', new_callable=PropertyMock) def test_execution_label_overrides(mock_client_factory): mock_client = MagicMock() mock_client.create_execution = MagicMock(return_value=identifier.WorkflowExecutionIdentifier('xp', 'xd', 'xn')) mock_client_factory.return_value = mock_client m = MagicMock() type(m).id = PropertyMock( return_value=identifier.Identifier( identifier.ResourceType.LAUNCH_PLAN, "project", "domain", "name", "version" ) ) labels = _common_models.Labels({"my": "label"}) engine.FlyteLaunchPlan(m).execute( 'xp', 'xd', 'xn', literals.LiteralMap({}), notification_overrides=[], label_overrides=labels ) mock_client.create_execution.assert_called_once_with( 'xp', 'xd', 'xn', _execution_models.ExecutionSpec( identifier.Identifier( identifier.ResourceType.LAUNCH_PLAN, "project", "domain", "name", "version" ), _execution_models.ExecutionMetadata( _execution_models.ExecutionMetadata.ExecutionMode.MANUAL, 'sdk', 0 ), disable_all=True, labels=labels, ), literals.LiteralMap({}), ) @patch.object(engine._FlyteClientManager, '_CLIENT', new_callable=PropertyMock) def test_execution_annotation_overrides(mock_client_factory): mock_client = MagicMock() mock_client.create_execution = MagicMock(return_value=identifier.WorkflowExecutionIdentifier('xp', 'xd', 'xn')) mock_client_factory.return_value = mock_client m = MagicMock() type(m).id = PropertyMock( return_value=identifier.Identifier( identifier.ResourceType.LAUNCH_PLAN, "project", "domain", "name", "version" ) ) annotations = _common_models.Annotations({"my": "annotation"}) engine.FlyteLaunchPlan(m).execute( 'xp', 'xd', 'xn', literals.LiteralMap({}), notification_overrides=[], annotation_overrides=annotations ) mock_client.create_execution.assert_called_once_with( 'xp', 'xd', 'xn', _execution_models.ExecutionSpec( identifier.Identifier( identifier.ResourceType.LAUNCH_PLAN, "project", "domain", "name", "version" ), _execution_models.ExecutionMetadata( _execution_models.ExecutionMetadata.ExecutionMode.MANUAL, 'sdk', 0 ), disable_all=True, annotations=annotations, ), literals.LiteralMap({}), ) @patch.object(engine._FlyteClientManager, '_CLIENT', new_callable=PropertyMock) def test_fetch_launch_plan(mock_client_factory): mock_client = MagicMock() mock_client.get_launch_plan = MagicMock( return_value=_launch_plan_models.LaunchPlan( identifier.Identifier(identifier.ResourceType.LAUNCH_PLAN, "p1", "d1", "n1", "v1"), MagicMock(), MagicMock(), ) ) mock_client_factory.return_value = mock_client lp = engine.FlyteEngineFactory().fetch_launch_plan( identifier.Identifier(identifier.ResourceType.LAUNCH_PLAN, "p", "d", "n", "v") ) assert lp.id == identifier.Identifier(identifier.ResourceType.LAUNCH_PLAN, "p1", "d1", "n1", "v1") mock_client.get_launch_plan.assert_called_once_with( identifier.Identifier(identifier.ResourceType.LAUNCH_PLAN, "p", "d", "n", "v") ) @patch.object(engine._FlyteClientManager, '_CLIENT', new_callable=PropertyMock) def test_fetch_active_launch_plan(mock_client_factory): mock_client = MagicMock() mock_client.get_active_launch_plan = MagicMock( return_value=_launch_plan_models.LaunchPlan( identifier.Identifier(identifier.ResourceType.LAUNCH_PLAN, "p1", "d1", "n1", "v1"), MagicMock(), MagicMock(), ) ) mock_client_factory.return_value = mock_client lp = engine.FlyteEngineFactory().fetch_launch_plan( identifier.Identifier(identifier.ResourceType.LAUNCH_PLAN, "p", "d", "n", "") ) assert lp.id == identifier.Identifier(identifier.ResourceType.LAUNCH_PLAN, "p1", "d1", "n1", "v1") mock_client.get_active_launch_plan.assert_called_once_with( _common_models.NamedEntityIdentifier("p", "d", "n") ) @patch.object(engine._FlyteClientManager, '_CLIENT', new_callable=PropertyMock) def test_get_execution_inputs(mock_client_factory, execution_data_locations): mock_client = MagicMock() mock_client.get_execution_data = MagicMock( return_value=_execution_models.WorkflowExecutionGetDataResponse( execution_data_locations[0], execution_data_locations[1] ) ) mock_client_factory.return_value = mock_client m = MagicMock() type(m).id = PropertyMock( return_value=identifier.WorkflowExecutionIdentifier( "project", "domain", "name", ) ) inputs = engine.FlyteWorkflowExecution(m).get_inputs() assert len(inputs.literals) == 1 assert inputs.literals['a'].scalar.primitive.integer == 1 mock_client.get_execution_data.assert_called_once_with( identifier.WorkflowExecutionIdentifier("project", "domain", "name") ) @patch.object(engine._FlyteClientManager, '_CLIENT', new_callable=PropertyMock) def test_get_execution_outputs(mock_client_factory, execution_data_locations): mock_client = MagicMock() mock_client.get_execution_data = MagicMock( return_value=_execution_models.WorkflowExecutionGetDataResponse( execution_data_locations[0], execution_data_locations[1] ) ) mock_client_factory.return_value = mock_client m = MagicMock() type(m).id = PropertyMock( return_value=identifier.WorkflowExecutionIdentifier( "project", "domain", "name", ) ) inputs = engine.FlyteWorkflowExecution(m).get_outputs() assert len(inputs.literals) == 1 assert inputs.literals['b'].scalar.primitive.integer == 2 mock_client.get_execution_data.assert_called_once_with( identifier.WorkflowExecutionIdentifier("project", "domain", "name") ) @patch.object(engine._FlyteClientManager, '_CLIENT', new_callable=PropertyMock) def test_get_node_execution_inputs(mock_client_factory, execution_data_locations): mock_client = MagicMock() mock_client.get_node_execution_data = MagicMock( return_value=_execution_models.NodeExecutionGetDataResponse( execution_data_locations[0], execution_data_locations[1] ) ) mock_client_factory.return_value = mock_client m = MagicMock() type(m).id = PropertyMock( return_value=identifier.NodeExecutionIdentifier( "node-a", identifier.WorkflowExecutionIdentifier( "project", "domain", "name", ) ) ) inputs = engine.FlyteNodeExecution(m).get_inputs() assert len(inputs.literals) == 1 assert inputs.literals['a'].scalar.primitive.integer == 1 mock_client.get_node_execution_data.assert_called_once_with( identifier.NodeExecutionIdentifier( "node-a", identifier.WorkflowExecutionIdentifier( "project", "domain", "name", ) ) ) @patch.object(engine._FlyteClientManager, '_CLIENT', new_callable=PropertyMock) def test_get_node_execution_outputs(mock_client_factory, execution_data_locations): mock_client = MagicMock() mock_client.get_node_execution_data = MagicMock( return_value=_execution_models.NodeExecutionGetDataResponse( execution_data_locations[0], execution_data_locations[1] ) ) mock_client_factory.return_value = mock_client m = MagicMock() type(m).id = PropertyMock( return_value=identifier.NodeExecutionIdentifier( "node-a", identifier.WorkflowExecutionIdentifier( "project", "domain", "name", ) ) ) inputs = engine.FlyteNodeExecution(m).get_outputs() assert len(inputs.literals) == 1 assert inputs.literals['b'].scalar.primitive.integer == 2 mock_client.get_node_execution_data.assert_called_once_with( identifier.NodeExecutionIdentifier( "node-a", identifier.WorkflowExecutionIdentifier( "project", "domain", "name", ) ) ) @patch.object(engine._FlyteClientManager, '_CLIENT', new_callable=PropertyMock) def test_get_task_execution_inputs(mock_client_factory, execution_data_locations): mock_client = MagicMock() mock_client.get_task_execution_data = MagicMock( return_value=_execution_models.TaskExecutionGetDataResponse( execution_data_locations[0], execution_data_locations[1] ) ) mock_client_factory.return_value = mock_client m = MagicMock() type(m).id = PropertyMock( return_value=identifier.TaskExecutionIdentifier( identifier.Identifier( identifier.ResourceType.TASK, 'project', 'domain', 'task-name', 'version' ), identifier.NodeExecutionIdentifier( "node-a", identifier.WorkflowExecutionIdentifier( "project", "domain", "name", ) ), 0 ) ) inputs = engine.FlyteTaskExecution(m).get_inputs() assert len(inputs.literals) == 1 assert inputs.literals['a'].scalar.primitive.integer == 1 mock_client.get_task_execution_data.assert_called_once_with( identifier.TaskExecutionIdentifier( identifier.Identifier( identifier.ResourceType.TASK, 'project', 'domain', 'task-name', 'version' ), identifier.NodeExecutionIdentifier( "node-a", identifier.WorkflowExecutionIdentifier( "project", "domain", "name", ) ), 0 ) ) @patch.object(engine._FlyteClientManager, '_CLIENT', new_callable=PropertyMock) def test_get_task_execution_outputs(mock_client_factory, execution_data_locations): mock_client = MagicMock() mock_client.get_task_execution_data = MagicMock( return_value=_execution_models.TaskExecutionGetDataResponse( execution_data_locations[0], execution_data_locations[1] ) ) mock_client_factory.return_value = mock_client m = MagicMock() type(m).id = PropertyMock( return_value=identifier.TaskExecutionIdentifier( identifier.Identifier( identifier.ResourceType.TASK, 'project', 'domain', 'task-name', 'version' ), identifier.NodeExecutionIdentifier( "node-a", identifier.WorkflowExecutionIdentifier( "project", "domain", "name", ) ), 0 ) ) inputs = engine.FlyteTaskExecution(m).get_outputs() assert len(inputs.literals) == 1 assert inputs.literals['b'].scalar.primitive.integer == 2 mock_client.get_task_execution_data.assert_called_once_with( identifier.TaskExecutionIdentifier( identifier.Identifier( identifier.ResourceType.TASK, 'project', 'domain', 'task-name', 'version' ), identifier.NodeExecutionIdentifier( "node-a", identifier.WorkflowExecutionIdentifier( "project", "domain", "name", ) ), 0 ) )
[]
[]
[ "IMAGE_VERSION" ]
[]
["IMAGE_VERSION"]
python
1
0
components/automate-cs-nginx/cmd/chef-server-ctl/ctl.go
// chef-server-ctl: A minimal wrapper for chef-server-ctl // // This command provides users with a subset of the commands available // in the upstream chef-server-ctl command. Not all of the upstream // commands are appropriate in the context of Chef Automate. Further, // in some cases we need to call the chef-server-ctl command with // alternate configuration. // package main import ( "fmt" "io" "os" "os/exec" "path/filepath" "sort" "strings" "github.com/sirupsen/logrus" "github.com/chef/automate/lib/secrets" ) // TODO(ssd) 2018-08-09: Mad about something that seems hard-coded? // // It probably lives in one of the following constants or the // build-time variables below. These values are passed to the // underlying chef-server-ctl and knife commands via environment // variables and options. const ( knifeConfigFile = "/hab/svc/automate-cs-nginx/config/knife_superuser.rb" erchefReindexScript = "hab pkg exec chef/oc_erchef reindex-opc-organization" erchefDBURI = "postgresql://[email protected]:5432/automate-cs-oc-erchef?sslmode=verify-ca&sslcert=/hab/svc/automate-postgresql/config/server.crt&sslkey=/hab/svc/automate-postgresql/config/server.key&sslrootcert=/hab/svc/automate-postgresql/config/root.crt" // nolint: lll bifrostDBURI = "postgresql://[email protected]:5432/automate-cs-oc-bifrost?sslmode=verify-ca&sslcert=/hab/svc/automate-postgresql/config/server.crt&sslkey=/hab/svc/automate-postgresql/config/server.key&sslrootcert=/hab/svc/automate-postgresql/config/root.crt" // nolint: lll bifrostURL = "https://127.0.0.1:10202" lbURL = "https://127.0.0.1:10200" tlsCrt = "/hab/svc/automate-cs-nginx/config/service.crt" tlsKey = "/hab/svc/automate-cs-nginx/config/service.key" tlsCA = "/hab/svc/automate-cs-nginx/config/root_ca.crt" ) // These paths are injected at BUILD time based on our dependencies to // save us a few calls to hab and to try to keep Habitat-like // dependency isolation. var ( Version = "UNKNOWN" RubyPath = "UNKNOWN" BundlePath = "UNKNOWN" KnifePath = "UNKNOWN" ChefServerCtlPath = "UNKNOWN" ) // A subCommand is a chef-server-ctl command. type subCommand interface { // Run executes the subcommand Run(args []string) error // Hiddent returns true if the command should be excluded from // the default help output. Hidden() bool // Help summary that is displayed by chef-server-ctl help HelpLine() string } // unsupported subcommands are not supported in the Automate 2 // integrated Chef server. type unsupported struct { name string alternative string } const unsupportedNoAltFmt = "The command %q is not supported by the Chef Automate Chef server.\n" const unsupportedWithAltFmt = "The command %q is not supported by the Chef Automate Chef server. Instead, try\n %s\n" func (c unsupported) Run([]string) error { if c.alternative == "" { fmt.Printf(unsupportedNoAltFmt, c.name) } else { fmt.Printf(unsupportedWithAltFmt, c.name, c.alternative) } return nil } func (c unsupported) Hidden() bool { return true } func (c unsupported) HelpLine() string { return "" } // wrapKnife commands replace the wrap-knife-opc commands in the // upstream chef-server-ctl. While we could pass through to the // underlying chef-server-ctl, it is straightforward to call our knife // wrapper directly. type wrapKnife struct { cmdVerb string cmdNoun string helpText string } func (c wrapKnife) Run(args []string) error { args = append([]string{"opc", c.cmdNoun, c.cmdVerb}, args...) cmd := exec.Command(KnifePath, args...) cmd.Env = os.Environ() cmd.Stdout = os.Stdout cmd.Stdin = os.Stdin cmd.Stderr = os.Stderr return cmd.Run() } func (c wrapKnife) Hidden() bool { return false } func (c wrapKnife) HelpLine() string { return c.helpText } // native subcommands are subcommands that we've reimplemented // completely in Go. We should keep these to a minimum. type native struct { runFunc func([]string) error helpText string } func (c native) Run(args []string) error { return c.runFunc(args) } func (c native) Hidden() bool { return false } func (c native) HelpLine() string { return c.helpText } //passthrough subcommands are commands where we call the underlying chef-server-ctl command. type passthrough struct { name string helpText string } func (c passthrough) Run(args []string) error { // The wrapper in core/chef-server-ctl sets up an environment // that assumes the chef/chef-server-ctl service is running. // Since we don't want to deploy that service we can't `hab // pkg exec` that helper and rather have to call the binstub // directly. // // The binstub we are going to call has an interpreter line // that uses `/usr/bin/env ruby`. We don't necessarily have // ruby in our path because if we are called from a binlink // habitat doesn't set our path. Thus, we need to inject the // path to both ruby and bundler. existingPath := os.Getenv("PATH") var pathEnv string if existingPath != "" { pathEnv = fmt.Sprintf("PATH=%s:%s/bin:%s/bin", existingPath, RubyPath, BundlePath) } else { pathEnv = fmt.Sprintf("PATH=%s/bin:%s/bin", RubyPath, BundlePath) } // Get bifrostSuperuserID from the secrets store secretsStore := secrets.NewDiskStoreReader(secrets.DefaultDiskStoreDataDir) var bifrostSuperuserID string bifrostSecData, err := secretsStore.GetSecret(secrets.BifrostSuperuserIDName) if err != nil { // We are silently dropping this error for now so that // you can run commands that don't require the // bifrostSuperuserID, in case bifrost hasn't come up. logrus.WithError(err).Debugf("failed to get the bifrost superuser id") bifrostSuperuserID = "" } else { bifrostSuperuserID = string(bifrostSecData) } // chef-server-ctl has been modified to take all necessary // config via environment variables. All CSC_ variables are // chef-server-ctl specific configuration. env := []string{ pathEnv, fmt.Sprintf("CSC_LB_URL=%s", lbURL), fmt.Sprintf("CSC_BIFROST_SUPERUSER_ID=%s", bifrostSuperuserID), fmt.Sprintf("CSC_BIFROST_URL=%s", bifrostURL), fmt.Sprintf("CSC_BIFROST_DB_URI=%s", bifrostDBURI), fmt.Sprintf("CSC_ERCHEF_DB_URI=%s", erchefDBURI), fmt.Sprintf("CSC_ERCHEF_REINDEX_SCRIPT=%s", erchefReindexScript), fmt.Sprintf("CSC_KNIFE_CONFIG_FILE=%s", knifeConfigFile), fmt.Sprintf("CSC_TLS_KEY=%s", tlsKey), fmt.Sprintf("CSC_TLS_CRT=%s", tlsCrt), fmt.Sprintf("CSC_TLS_CA=%s", tlsCA), fmt.Sprintf("CSC_KNIFE_BIN=%s exec %s", filepath.Join(BundlePath, "bin", "bundle"), filepath.Join(ChefServerCtlPath, "chef", "bin", "knife")), "CSC_FIPS_ENABLED=false", "CSC_HABITAT_MODE=true", fmt.Sprintf("BUNDLE_GEMFILE=%s", filepath.Join(ChefServerCtlPath, "omnibus-ctl", "Gemfile")), } // remove possibly bad ruby Env settings that could break us. // ignore errors because the underlying syscall only fails if: // * EINVAL: name is NULL, points to a string of length 0, or contains an '=' character. // * ENOMEM os.Unsetenv("RUBYOPT") os.Unsetenv("GEM_HOME") os.Unsetenv("GEM_PATH") os.Unsetenv("BUNDLE_GEMFILE") os.Unsetenv("BUNDLE_PATH") bundlerCmd := filepath.Join(BundlePath, "bin", "bundle") chefServerCtlCmd := filepath.Join(ChefServerCtlPath, "omnibus-ctl", "binstubs", "chef-server-ctl") cmd := exec.Command(bundlerCmd, append([]string{"exec", chefServerCtlCmd, c.name}, args...)...) cmd.Env = append(os.Environ(), env...) cmd.Stdout = os.Stdout cmd.Stdin = os.Stdin cmd.Stderr = os.Stderr return cmd.Run() } func (c passthrough) Hidden() bool { return false } func (c passthrough) HelpLine() string { return c.helpText } const ( topLevelKey = "__TOP_LEVEL__" unsupportedKey = "__UNSUPPORTED__" ) // SubCommands is a map of subcommand name to the SubCommand // interfaces that implement them. // // nolint: govet var subCommands = map[string]map[string]subCommand{ "Organization and User Management": { "org-create": wrapKnife{"create", "org", "Create an organization in the Chef server."}, "org-delete": wrapKnife{"delete", "org", "Delete an organization in the Chef server."}, "org-list": wrapKnife{"list", "org", "List all organizations in the Chef server."}, "org-show": wrapKnife{"show", "org", "Show an organization in the Chef server."}, "org-user-add": wrapKnife{"add", "org user", "Associate a user with an organization."}, "org-user-remove": wrapKnife{"remove", "org user", "Dissociate a user with an organization."}, "user-create": wrapKnife{"create", "user", "Create a user in the Chef server."}, "user-delete": wrapKnife{"delete", "user", "Delete a user in the Chef server."}, "user-edit": wrapKnife{"edit", "user", "Edit a user in the Chef server."}, "user-list": wrapKnife{"list", "user", "List all users in the Chef server."}, "user-show": wrapKnife{"show", "user", "Show a user in the Chef server."}, "password": passthrough{"password", "Reset a user's password."}, }, "Debugging and Maintenance": { "cleanup-bifrost": passthrough{"cleanup-bifrost", "Cleanup orphaned authorization objects."}, "filtered-dump": passthrough{"filtered-dump", "Generate a filtered dump of all indexable Chef Objects."}, "reindex": passthrough{"reindex", "Destroy and recreate the Chef server search index."}, "test": native{func(args []string) error { env := []string{ fmt.Sprintf("SUPERUSER_KEY=/hab/svc/automate-cs-oc-erchef/data/pivotal.pem"), fmt.Sprintf("WEBUI_KEY=/hab/svc/automate-cs-oc-erchef/data/webui_priv.pem"), fmt.Sprintf("BUNDLE_GEMFILE=%s", filepath.Join(ChefServerCtlPath, "oc-chef-pedant/Gemfile")), } bundleCmd := filepath.Join(BundlePath, "bin/bundle") cmdArgs := []string{ "exec", filepath.Join(ChefServerCtlPath, "oc-chef-pedant/bin/oc-chef-pedant"), "--log-file", "/dev/null", "-c", "/hab/svc/automate-cs-oc-erchef/config/pedant_config.rb", } // If no arguments are passed default to smoke tests if len(args) < 1 { cmdArgs = append(cmdArgs, []string{"--focus", "smoke"}...) } cmd := exec.Command(bundleCmd, append(cmdArgs, args...)...) cmd.Env = append(os.Environ(), env...) cmd.Stdout = os.Stdout cmd.Stdin = os.Stdin cmd.Stderr = os.Stderr return cmd.Run() }, "Run the Chef server test suite"}, }, "Key Rotation": { "add-client-key": passthrough{"add-client-key", "Add a client key."}, "add-user-key": passthrough{"add-user-key", "Add a user key."}, "delete-client-key": passthrough{"delete-client-key", "Delete a client key."}, "delete-user-key": passthrough{"delete-user-key", "Delete a user key."}, "list-client-keys": passthrough{"list-client-keys", "List keys for a client."}, "list-user-keys": passthrough{"list-user-keys", "List keys for a user."}, }, "Server Administrators": { "grant-server-admin-permissions": passthrough{"grant-server-admin-permissions", "Make a user a server admin."}, "list-server-admins": passthrough{"list-server-admins", "List server admins."}, "remove-server-admin-permissions": passthrough{"remove-server-admin-permissions", "Remove a server admin."}, }, topLevelKey: { // TODO(ssd) 2018-08-09: The version commands are now // inconsistent across the three different install types we // have. "version": native{func(args []string) error { fmt.Printf("Chef Automate Chef server %s\n", Version) fmt.Printf("For more information run \n chef-automate version.\n") return nil }, "Display the Chef server version."}, }, unsupportedKey: { // Unsupported Commands: // - Secrets Management Command // - High Availability // - Service Management // - Most "general" commands // // NOTE(ssd) 2018-08-07: We don't really know what is // happening with partybus/mover yet but let's avoid needing "rebuild-migration-state" if we can. "oc-id-show-app": unsupported{name: "oc-id-show-app"}, "remove-secret": unsupported{name: "remove-secret"}, "require-credential-rotation": unsupported{name: "require-credential-rotation"}, "rotate-all-credentials": unsupported{name: "rotate-all-credentials"}, "rotate-credentials": unsupported{name: "rotate-credentials"}, "rotate-shared-secrets": unsupported{name: "rotate-shared-secrets"}, "set-actions-password": unsupported{name: "set-actions-password"}, "set-db-superuser-password": unsupported{name: "set-db-superuser-password"}, "set-secret": unsupported{name: "set-secret"}, "show-secret": unsupported{name: "show-secret"}, "show-service-credentials": unsupported{name: "show-service-credentials"}, "backup-recover": unsupported{name: "backup-recover"}, "ha-status": unsupported{name: "ha-status"}, "master-recover": unsupported{name: "master-recover"}, "graceful-kill": unsupported{name: "graceful-kill"}, "hup": unsupported{name: "hup"}, "int": unsupported{name: "int"}, "kill": unsupported{name: "kill"}, "once": unsupported{name: "once"}, "restart": unsupported{name: "restart", alternative: "chef-automate restart-services"}, "service-list": unsupported{name: "service-list", alternative: "chef-automate service-versions"}, "start": unsupported{name: "start", alternative: "systemctl start chef-automate.service"}, "stop": unsupported{name: "stop", alternative: "chef-automate stop"}, "status": unsupported{name: "status", alternative: "chef-automate status"}, "tail": unsupported{name: "tail", alternative: "journalctl -f -u chef-automate"}, "term": unsupported{name: "term"}, "usr1": unsupported{name: "usr1"}, "usr2": unsupported{name: "usr2"}, "backup": unsupported{name: "backup", alternative: "chef-automate backup create"}, "cleanse": unsupported{name: "cleanse", alternative: "chef-automate unistall"}, "cleanup": unsupported{name: "cleanup"}, "rebuild-migration-state": unsupported{name: "rebuild-migration-state"}, "restore": unsupported{name: "restore", alternative: "chef-automate backup restore"}, "show-config": unsupported{name: "restore", alternative: "chef-automate config show"}, "gather-logs": unsupported{name: "gather-logs", alternative: "chef-automate gather-logs"}, "install": unsupported{name: "install"}, "psql": unsupported{name: "psql", alternative: "chef-automate dev psql"}, "uninstall": unsupported{name: "uninstall", alternative: "chef-automate uninstall"}, "upgrade": unsupported{name: "upgrade", alternative: "chef-automate upgrade"}, }, } func main() { if len(os.Args) < 2 { printHelp() os.Exit(1) } subcommand := os.Args[1] if isHelpCommand(subcommand) { printHelp() os.Exit(0) } c, ok := findSubcommand(subcommand) if !ok { fmt.Fprintf(os.Stderr, "Unknown command %q. See %s --help for available commands\n", subcommand, os.Args[0]) // nolint errcheck os.Exit(1) } err := c.Run(os.Args[2:]) if err != nil { os.Exit(1) } } func findSubcommand(commandName string) (subCommand, bool) { for _, categorySubCommands := range subCommands { for name, c := range categorySubCommands { if name == commandName { return c, true } } } return nil, false } // Note this is iterates over all of the commands a few times, but // since there aren't many I don't think it will be much of a problem. func printHelp() { // Calculate longest lines of all names across all categories maxLen := 0 for _, categorySubCommands := range subCommands { for name := range categorySubCommands { if len(name) > maxLen { maxLen = len(name) } } } helpLineFmt := fmt.Sprintf(" %%-%ds %%s\n", maxLen) buf := new(strings.Builder) fmt.Fprintf(buf, "Chef Automate chef-server-ctl %s\n\n", Version) // nolint errcheck for category, categoryCommands := range subCommands { switch category { case topLevelKey: continue case unsupportedKey: continue default: fmt.Fprintf(buf, "%s\n", category) // nolint errcheck printHelpForCategory(buf, helpLineFmt, categoryCommands) fmt.Fprintf(buf, "\n") // nolint errcheck } } // Print top level commands at the end printHelpForCategory(buf, helpLineFmt, subCommands[topLevelKey]) // Add the help "command" fmt.Fprintf(buf, helpLineFmt, "help", "Print this help message.") // nolint errcheck fmt.Print(buf.String()) } func printHelpForCategory(buf io.Writer, helpLineFmt string, commands map[string]subCommand) { commandNames := make([]string, 0, len(commands)) for name := range commands { commandNames = append(commandNames, name) } sort.Strings(commandNames) for _, name := range commandNames { c := commands[name] if !c.Hidden() { fmt.Fprintf(buf, helpLineFmt, name, c.HelpLine()) // nolint errcheck } } } func isHelpCommand(cmd string) bool { return cmd == "-h" || cmd == "--help" || cmd == "help" }
[ "\"PATH\"" ]
[]
[ "PATH" ]
[]
["PATH"]
go
1
0
regr-tests/test3d-bcns/test.py
# --- Revised 3-Clause BSD License --- # Copyright Semtech Corporation 2022. All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # * Neither the name of the Semtech corporation nor the names of its # contributors may be used to endorse or promote products derived from this # software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL SEMTECH CORPORATION. BE LIABLE FOR ANY DIRECT, # INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE # OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import os import sys import time import json import asyncio from asyncio import subprocess import logging logger = logging.getLogger('test3d-bcns') import tcutils as tu import simutils as su import testutils as tstu station = None infos = None muxs = None sim = None REGION = os.environ.get('REGION','KR920') PPM = 1000000 BINTV = 2 PPSTHRES = 10 # measure for timing accuracy of test/sim platform class TestLgwSimServer(su.LgwSimServer): fcnt = 0 updf_task = None txcnt = 0 seen_bcnfreqs = set() mono2utc = 0.0 last_secs = 0 test_muxs = None last_chnl = -1 async def on_connected(self, lgwsim:su.LgwSim) -> None: self.mono2utc = int((time.time() - time.monotonic()) * 1e6) async def on_close(self): self.updf_task.cancel() self.updf_task = None logger.debug('LGWSIM - close') async def on_tx(self, lgwsim, pkt): try: if pkt['tx_mode'] != lgwsim.hal.ON_GPS: logger.debug('LGWSIM: tx_mode!=ON_GPS (ignored): %r', pkt) return xticks = pkt['count_us'] mono = lgwsim.xticks2mono(xticks) utc = mono + self.mono2utc logger.info('LGWSIM: ON_GPS xticks=%d/%X mono=%d/%X utc=%d/%X', xticks, xticks, mono, mono, utc, utc) logger.debug('LGWSIM: ON_GPS %r', pkt) d = utc % PPM if d >= PPM//2: d -= PPM assert abs(d) < PPSTHRES, f'Failed abs(d)={abs(d)} < PPSTHRES={PPSTHRES}' assert (utc - d) % PPM == 0 secs = (utc - d) // PPM assert self.last_secs==0 or self.last_secs+BINTV == secs self.last_secs = secs if REGION == 'KR920': assert pkt['freq_hz'] == 923100000 assert pkt['size'] == 17 if REGION == 'US915': chnl = (pkt['freq_hz'] - 923300000) / 600000 assert self.last_chnl < 0 or (self.last_chnl + 1) % 8 == chnl self.last_chnl = chnl assert pkt['size'] == 23 self.txcnt += 1 if self.txcnt >= 10: await self.test_muxs.testDone(0) except Exception as exc: logger.error('FAILED: %s', exc, exc_info=True) await self.test_muxs.testDone(1) class TestMuxs(tu.Muxs): exp_seqno = [] seqno = 0 ws = None send_task = None ev = None def get_router_config(self): if REGION == 'US915': conf = tu.router_config_US902_8ch conf['bcning'] = { 'DR': 8, 'layout': [5,11,23], 'freqs': [923300000 + chx * 600000 for chx in range(8)] } else: conf = tu.router_config_KR920 conf['bcning'] = { 'DR': 3, 'layout': [2,8,17], 'freqs': [923100000] } return conf async def handle_connection(self, ws): self.ws = ws self.ev = asyncio.Event() #XXX:old: self.send_task = asyncio.ensure_future(self.send_classC()) await super().handle_connection(ws) async def testDone(self, status): global station if station: try: station.terminate() except Exception as exc: logger.error('Shutting down station: %s', exc, exc_info=True) try: await station.wait() logger.error('Exit code station: %d', station.returncode) station = None except Exception as exc: logger.error('Failed to get exit code of station: %s', exc, exc_info=True) os._exit(status) async def handle_dntxed(self, ws, msg): if [msg['seqno']] != self.exp_seqno[0:1]: logger.debug('DNTXED: %r\nbut expected seqno=%r' % (msg, self.exp_seqno)) await self.testDone(2) del self.exp_seqno[0] self.ev.set() #XXX:old: # airtime: dr=4 (SF8) plen=12 <83ms #XXX:old: def make_dnmsgC(self, rx2dr=4, rx2freq=FREQ1, plen=12): #XXX:old: dnmsg = { #XXX:old: 'msgtype' : 'dnmsg', #XXX:old: 'dC' : 2, # device class C #XXX:old: 'dnmode' : 'dn', #XXX:old: 'priority': 0, #XXX:old: 'RX2DR' : rx2dr, #XXX:old: 'RX2Freq' : int(rx2freq*1e6), #XXX:old: 'DevEui' : '00-00-00-00-11-00-00-01', #XXX:old: #'xtime' : 0, # not required #XXX:old: 'seqno' : self.seqno, #XXX:old: 'MuxTime' : time.time(), #XXX:old: 'rctx' : 0, # antenna#0 #XXX:old: 'pdu' : bytes(range(plen)).hex(), #XXX:old: } #XXX:old: self.seqno += 1 #XXX:old: return dnmsg #XXX:old: #XXX:old: async def send_classC(self): #XXX:old: try: #XXX:old: await asyncio.sleep(1.0) #XXX:old: assert self.seqno & 1 == 0 #XXX:old: #XXX:old: for f in (FREQ1,FREQ2,FREQ3,FREQ2): #XXX:old: dnmsg = self.make_dnmsgC(rx2freq=f) #XXX:old: if f != FREQ2: #XXX:old: self.exp_seqno.append(dnmsg['seqno']) #XXX:old: sim.exp_txfreq.append(dnmsg['RX2Freq']) #XXX:old: await self.ws.send(json.dumps(dnmsg)) #XXX:old: #XXX:old: while self.exp_seqno: #XXX:old: self.ev.clear() #XXX:old: await asyncio.wait_for(self.ev.wait(), 5.0) #XXX:old: #XXX:old: await asyncio.sleep(2.0) #XXX:old: assert sim.exp_txfreq == [] #XXX:old: await self.testDone(0) #XXX:old: except asyncio.CancelledError: #XXX:old: logger.debug('send_classC canceled.') #XXX:old: except Exception as exc: #XXX:old: logger.error('send_classC failed: %s', exc, exc_info=True) #XXX:old: await self.testDone(1) if 'PPSTHRES' in os.environ: PPSTHRES = int(os.environ['PPSTHRES']) with open("tc.uri","w") as f: f.write('ws://localhost:6038') async def test_start(): global station, infos, muxs, sim infos = tu.Infos() muxs = TestMuxs() sim = TestLgwSimServer() sim.test_muxs = muxs await infos.start_server() await muxs.start_server() await sim.start_server() # 'valgrind', '--leak-check=full', station_args = ['station','-p', '--temp', '.'] station = await subprocess.create_subprocess_exec(*station_args) tstu.setup_logging() asyncio.ensure_future(test_start()) asyncio.get_event_loop().run_forever()
[]
[]
[ "REGION", "PPSTHRES" ]
[]
["REGION", "PPSTHRES"]
python
2
0
metrics/registry_test.go
// Copyright (c) 2018 Palantir Technologies. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package metrics_test import ( "os" "os/exec" "reflect" "sync" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/palantir/pkg/metrics" ) func TestRegistryRegistration(t *testing.T) { // register root metrics root := metrics.NewRootMetricsRegistry() // register metric _ = root.Counter("my-counter") // create subregistry and register metric on it sub := root.Subregistry("subregistry") _ = sub.Gauge("sub-gauge") wantNames := []string{ "my-counter", "subregistry.sub-gauge", } var gotNames []string root.Each(metrics.MetricVisitor(func(name string, tags metrics.Tags, metric metrics.MetricVal) { gotNames = append(gotNames, name) assert.NotNil(t, metric) })) assert.Equal(t, wantNames, gotNames) } func TestMetricsWithTags(t *testing.T) { root := metrics.NewRootMetricsRegistry() // register metric with tags _ = root.Counter("my-counter", metrics.MustNewTag("region", "nw")) _ = root.Counter("my-counter", metrics.MustNewTag("region", "ne")) _ = root.Counter("my-counter", metrics.MustNewTag("region", "se"), metrics.MustNewTag("application", "database")) var gotNames []string var gotTags [][]metrics.Tag root.Each(metrics.MetricVisitor(func(name string, tags metrics.Tags, metric metrics.MetricVal) { gotNames = append(gotNames, name) gotTags = append(gotTags, tags) assert.NotNil(t, metric) })) // output is sorted by metric name and then by tag names (which themselves are sorted alphabetically) wantNames := []string{ "my-counter", "my-counter", "my-counter", } wantTags := [][]metrics.Tag{ {metrics.MustNewTag("application", "database"), metrics.MustNewTag("region", "se")}, {metrics.MustNewTag("region", "ne")}, {metrics.MustNewTag("region", "nw")}, } assert.Equal(t, wantNames, gotNames) assert.Equal(t, wantTags, gotTags) } func TestMetricDoesNotMutateInputTagSlice(t *testing.T) { root := metrics.NewRootMetricsRegistry() unsortedTags := metrics.Tags{metrics.MustNewTag("b", "b"), metrics.MustNewTag("a", "a")} root.Counter("my-counter", unsortedTags...).Inc(1) assert.Equal(t, metrics.Tags{metrics.MustNewTag("b", "b"), metrics.MustNewTag("a", "a")}, unsortedTags) } // Prefix should be used as provided (no case conversion/normalization), while tags should always be converted to // lowercase. func TestMetricsCasing(t *testing.T) { root := metrics.NewRootMetricsRegistry() // register metric with tags _ = root.Counter("my-COUNTER", metrics.MustNewTag("REGION", "nW")) _ = root.Counter("my-counter", metrics.MustNewTag("region", "NE")) var gotNames []string var gotTags [][]metrics.Tag root.Each(metrics.MetricVisitor(func(name string, tags metrics.Tags, metric metrics.MetricVal) { gotNames = append(gotNames, name) gotTags = append(gotTags, tags) assert.NotNil(t, metric) })) // output is sorted by metric name and then by tag names (which themselves are sorted alphabetically) wantNames := []string{ "my-COUNTER", "my-counter", } wantTags := [][]metrics.Tag{ {metrics.MustNewTag("region", "nw")}, {metrics.MustNewTag("region", "ne")}, } assert.Equal(t, wantNames, gotNames) assert.Equal(t, wantTags, gotTags) } func TestRegistryRegistrationWithMemStats(t *testing.T) { // register root metrics root := metrics.NewRootMetricsRegistry() metrics.CaptureRuntimeMemStats(root, time.Hour) // register metric _ = root.Counter("my-counter") // create subregistry and register metric on it sub := root.Subregistry("subregistry") _ = sub.Gauge("sub-gauge") wantNames := []string{ "go.runtime.MemStats.Alloc", "go.runtime.MemStats.GCCPUFraction", "go.runtime.MemStats.HeapAlloc", "go.runtime.MemStats.HeapIdle", "go.runtime.MemStats.HeapInuse", "go.runtime.MemStats.HeapObjects", "go.runtime.MemStats.HeapReleased", "go.runtime.MemStats.HeapSys", "go.runtime.MemStats.NumGC", "go.runtime.MemStats.PauseNs", "go.runtime.MemStats.StackInuse", "go.runtime.NumGoroutine", "go.runtime.NumThread", "go.runtime.ReadMemStats", "my-counter", "subregistry.sub-gauge", } var gotNames []string root.Each(metrics.MetricVisitor(func(name string, tags metrics.Tags, metric metrics.MetricVal) { gotNames = append(gotNames, name) assert.NotNil(t, metric) })) assert.Equal(t, wantNames, gotNames) } func concurrentMetricTest(t *testing.T) { root := metrics.NewRootMetricsRegistry() commonMetric := "test-counter" increments := 100 var waitGroup sync.WaitGroup waitGroup.Add(2) go func() { for i := 0; i < increments; i++ { root.Counter(commonMetric).Inc(1) } waitGroup.Done() }() go func() { for i := 0; i < increments; i++ { root.Counter(commonMetric).Inc(1) } waitGroup.Done() }() waitGroup.Wait() require.Equal(t, int64(2*increments), root.Counter(commonMetric).Count()) } // It is hard to catch the goroutine exits and have them impact actual test reporting. We end up having // to simulate the testing ourselves, but it also means that if this test fails, it takes a bit of work to figure out why. func TestManyConcurrentMetrics(t *testing.T) { if os.Getenv("CRASH_IF_FAILS") == "1" { concurrentMetricTest(t) return } cmd := exec.Command(os.Args[0], "-test.run=TestManyConcurrentMetrics") cmd.Env = append(os.Environ(), "CRASH_IF_FAILS=1") err := cmd.Run() require.NoError(t, err, "Error while checking for concurrent metric handling!") } func TestSubregistry_Each(t *testing.T) { rootRegistry := metrics.NewRootMetricsRegistry() subRegistry := rootRegistry.Subregistry("prefix.") subRegistry.Gauge("gauge1").Update(0) subRegistry.Gauge("gauge2").Update(1) gauge1Count := 0 gauge2Count := 0 subRegistry.Each(metrics.MetricVisitor(func(name string, tags metrics.Tags, metric metrics.MetricVal) { assert.NotNil(t, metric) assert.Empty(t, tags) switch name { case "gauge1": gauge1Count++ case "gauge2": gauge2Count++ default: assert.Fail(t, "unexpected metric %s", name) } })) assert.Equal(t, 1, gauge1Count) assert.Equal(t, 1, gauge2Count) } func TestSubregistry_Unregister(t *testing.T) { registry := metrics.NewRootMetricsRegistry().Subregistry("prefix.") registry.Gauge("gauge1", metrics.MustNewTag("tagKey", "tagValue1")).Update(0) registry.Gauge("gauge1", metrics.MustNewTag("tagKey", "tagValue2")).Update(0) registry.Gauge("gauge2").Update(0) assert.True(t, registryContains(registry, "gauge1", []metrics.Tag{metrics.MustNewTag("tagKey", "tagValue1")})) assert.True(t, registryContains(registry, "gauge1", []metrics.Tag{metrics.MustNewTag("tagKey", "tagValue2")})) assert.True(t, registryContains(registry, "gauge2", nil)) assert.Equal(t, 3, registrySize(registry)) registry.Unregister("gauge1", metrics.MustNewTag("tagKey", "tagValue1")) assert.True(t, registryContains(registry, "gauge1", []metrics.Tag{metrics.MustNewTag("tagKey", "tagValue2")})) assert.True(t, registryContains(registry, "gauge2", nil)) assert.Equal(t, 2, registrySize(registry)) registry.Unregister("gauge1", metrics.MustNewTag("tagKey", "tagValue2")) assert.True(t, registryContains(registry, "gauge2", nil)) assert.Equal(t, 1, registrySize(registry)) registry.Unregister("gauge2") assert.Equal(t, 0, registrySize(registry)) } func TestRootRegistry_Unregister(t *testing.T) { registry := metrics.NewRootMetricsRegistry() registry.Gauge("gauge1", metrics.MustNewTag("tagKey", "tagValue1")).Update(0) registry.Gauge("gauge1", metrics.MustNewTag("tagKey", "tagValue2")).Update(0) registry.Gauge("gauge2").Update(0) assert.True(t, registryContains(registry, "gauge1", []metrics.Tag{metrics.MustNewTag("tagKey", "tagValue1")})) assert.True(t, registryContains(registry, "gauge1", []metrics.Tag{metrics.MustNewTag("tagKey", "tagValue2")})) assert.True(t, registryContains(registry, "gauge2", nil)) assert.Equal(t, 3, registrySize(registry)) registry.Unregister("gauge1", metrics.MustNewTag("tagKey", "tagValue1")) assert.True(t, registryContains(registry, "gauge1", []metrics.Tag{metrics.MustNewTag("tagKey", "tagValue2")})) assert.True(t, registryContains(registry, "gauge2", nil)) assert.Equal(t, 2, registrySize(registry)) registry.Unregister("gauge1", metrics.MustNewTag("tagKey", "tagValue2")) assert.True(t, registryContains(registry, "gauge2", nil)) assert.Equal(t, 1, registrySize(registry)) registry.Unregister("gauge2") assert.Equal(t, 0, registrySize(registry)) } func TestRootRegistry_ConcurrentUnregisterAndEachDoesNotPanic(t *testing.T) { registry := metrics.NewRootMetricsRegistry() registry.Gauge("gauge1").Update(0) registry.Gauge("gauge2").Update(0) var firstMetricVisited, metricUnregistered, goRoutineFinished sync.WaitGroup firstMetricVisited.Add(1) metricUnregistered.Add(1) goRoutineFinished.Add(1) go func() { registry.Each(metrics.MetricVisitor(func(name string, tags metrics.Tags, metric metrics.MetricVal) { if name == "gauge1" { firstMetricVisited.Done() metricUnregistered.Wait() } })) goRoutineFinished.Done() }() firstMetricVisited.Wait() registry.Unregister("gauge2") metricUnregistered.Done() goRoutineFinished.Wait() } func TestRootRegistry_SubregistryWithTags(t *testing.T) { rootRegistry := metrics.NewRootMetricsRegistry() permanentTag := metrics.MustNewTag("permanentKey", "permanentValue") subregistry := rootRegistry.Subregistry("subregistry", permanentTag) runtimeTag := metrics.MustNewTag("key", "value") subregistry.Counter("counter", runtimeTag).Count() subregistry.Gauge("gauge", runtimeTag).Update(0) subregistry.GaugeFloat64("gaugeFloat64", runtimeTag).Update(0) subregistry.Meter("meter", runtimeTag).Mark(0) subregistry.Timer("timer", runtimeTag).Update(0) subregistry.Histogram("histogram", runtimeTag).Update(0) subregistry.HistogramWithSample("histogramWithSample", metrics.DefaultSample(), runtimeTag).Update(0) registered := map[string]map[string]string{} subregistry.Each(func(name string, tags metrics.Tags, metric metrics.MetricVal) { registered[name] = tags.ToMap() }) assert.Equal(t, map[string]map[string]string{ "counter": metrics.Tags{permanentTag, runtimeTag}.ToMap(), "gauge": metrics.Tags{permanentTag, runtimeTag}.ToMap(), "gaugeFloat64": metrics.Tags{permanentTag, runtimeTag}.ToMap(), "meter": metrics.Tags{permanentTag, runtimeTag}.ToMap(), "timer": metrics.Tags{permanentTag, runtimeTag}.ToMap(), "histogram": metrics.Tags{permanentTag, runtimeTag}.ToMap(), "histogramWithSample": metrics.Tags{permanentTag, runtimeTag}.ToMap(), }, registered, ) subregistry.Unregister("counter", runtimeTag) subregistry.Unregister("gauge", runtimeTag) subregistry.Unregister("gaugeFloat64", runtimeTag) subregistry.Unregister("meter", runtimeTag) subregistry.Unregister("timer", runtimeTag) subregistry.Unregister("histogram", runtimeTag) subregistry.Unregister("histogramWithSample", runtimeTag) subregistry.Each(metrics.MetricVisitor(func(name string, tags metrics.Tags, metric metrics.MetricVal) { assert.Fail(t, "there should be no metrics registered") })) } func registrySize(registry metrics.Registry) int { count := 0 registry.Each(metrics.MetricVisitor(func(name string, tags metrics.Tags, metric metrics.MetricVal) { count++ })) return count } func registryContains(registry metrics.Registry, name string, tags metrics.Tags) bool { contains := false var tagStrings []string for _, tag := range tags { tagStrings = append(tagStrings, tag.String()) } registry.Each(metrics.MetricVisitor(func(eachName string, eachTags metrics.Tags, metric metrics.MetricVal) { var eachTagStrings []string for _, eachTag := range eachTags { eachTagStrings = append(eachTagStrings, eachTag.String()) } if eachName == name && reflect.DeepEqual(eachTagStrings, tagStrings) { contains = true } })) return contains }
[ "\"CRASH_IF_FAILS\"" ]
[]
[ "CRASH_IF_FAILS" ]
[]
["CRASH_IF_FAILS"]
go
1
0
src/cmd/go/internal/work/gc.go
// Copyright 2011 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. package work import ( "bufio" "bytes" "fmt" "internal/buildcfg" "io" "log" "os" "path/filepath" "runtime" "strings" "cmd/go/internal/base" "cmd/go/internal/cfg" "cmd/go/internal/fsys" "cmd/go/internal/load" "cmd/internal/objabi" "cmd/internal/str" "cmd/internal/sys" "crypto/sha1" ) // The 'path' used for GOROOT_FINAL when -trimpath is specified const trimPathGoRootFinal = "go" var runtimePackages = map[string]struct{}{ "internal/abi": struct{}{}, "internal/bytealg": struct{}{}, "internal/cpu": struct{}{}, "internal/goarch": struct{}{}, "internal/goos": struct{}{}, "runtime": struct{}{}, "runtime/internal/atomic": struct{}{}, "runtime/internal/math": struct{}{}, "runtime/internal/sys": struct{}{}, } // The Go toolchain. type gcToolchain struct{} func (gcToolchain) compiler() string { return base.Tool("compile") } func (gcToolchain) linker() string { return base.Tool("link") } func pkgPath(a *Action) string { p := a.Package ppath := p.ImportPath if cfg.BuildBuildmode == "plugin" { ppath = pluginPath(a) } else if p.Name == "main" && !p.Internal.ForceLibrary { ppath = "main" } return ppath } func (gcToolchain) gc(b *Builder, a *Action, archive string, importcfg, embedcfg []byte, symabis string, asmhdr bool, gofiles []string) (ofile string, output []byte, err error) { p := a.Package objdir := a.Objdir if archive != "" { ofile = archive } else { out := "_go_.o" ofile = objdir + out } pkgpath := pkgPath(a) gcflags := []string{"-p", pkgpath} if p.Module != nil { v := p.Module.GoVersion if v == "" { // We started adding a 'go' directive to the go.mod file unconditionally // as of Go 1.12, so any module that still lacks such a directive must // either have been authored before then, or have a hand-edited go.mod // file that hasn't been updated by cmd/go since that edit. // // Unfortunately, through at least Go 1.16 we didn't add versions to // vendor/modules.txt. So this could also be a vendored 1.16 dependency. // // Fortunately, there were no breaking changes to the language between Go // 1.11 and 1.16, so if we assume Go 1.16 semantics we will not introduce // any spurious errors — we will only mask errors, and not particularly // important ones at that. v = "1.16" } if allowedVersion(v) { gcflags = append(gcflags, "-lang=go"+v) } } if p.Standard { gcflags = append(gcflags, "-std") } _, compilingRuntime := runtimePackages[p.ImportPath] compilingRuntime = compilingRuntime && p.Standard if compilingRuntime { // runtime compiles with a special gc flag to check for // memory allocations that are invalid in the runtime package, // and to implement some special compiler pragmas. gcflags = append(gcflags, "-+") } // If we're giving the compiler the entire package (no C etc files), tell it that, // so that it can give good error messages about forward declarations. // Exceptions: a few standard packages have forward declarations for // pieces supplied behind-the-scenes by package runtime. extFiles := len(p.CgoFiles) + len(p.CFiles) + len(p.CXXFiles) + len(p.MFiles) + len(p.FFiles) + len(p.SFiles) + len(p.SysoFiles) + len(p.SwigFiles) + len(p.SwigCXXFiles) if p.Standard { switch p.ImportPath { case "bytes", "internal/poll", "net", "os": fallthrough case "runtime/metrics", "runtime/pprof", "runtime/trace": fallthrough case "sync", "syscall", "time": extFiles++ } } if extFiles == 0 { gcflags = append(gcflags, "-complete") } if cfg.BuildContext.InstallSuffix != "" { gcflags = append(gcflags, "-installsuffix", cfg.BuildContext.InstallSuffix) } if a.buildID != "" { gcflags = append(gcflags, "-buildid", a.buildID) } if p.Internal.OmitDebug || cfg.Goos == "plan9" || cfg.Goarch == "wasm" { gcflags = append(gcflags, "-dwarf=false") } if strings.HasPrefix(runtimeVersion, "go1") && !strings.Contains(os.Args[0], "go_bootstrap") { gcflags = append(gcflags, "-goversion", runtimeVersion) } if symabis != "" { gcflags = append(gcflags, "-symabis", symabis) } gcflags = append(gcflags, str.StringList(forcedGcflags, p.Internal.Gcflags)...) if compilingRuntime { // Remove -N, if present. // It is not possible to build the runtime with no optimizations, // because the compiler cannot eliminate enough write barriers. for i := 0; i < len(gcflags); i++ { if gcflags[i] == "-N" { copy(gcflags[i:], gcflags[i+1:]) gcflags = gcflags[:len(gcflags)-1] i-- } } } args := []interface{}{cfg.BuildToolexec, base.Tool("compile"), "-o", ofile, "-trimpath", a.trimpath(), gcflags} if p.Internal.LocalPrefix != "" { // Workaround #43883. args = append(args, "-D", p.Internal.LocalPrefix) } if importcfg != nil { if err := b.writeFile(objdir+"importcfg", importcfg); err != nil { return "", nil, err } args = append(args, "-importcfg", objdir+"importcfg") } if embedcfg != nil { if err := b.writeFile(objdir+"embedcfg", embedcfg); err != nil { return "", nil, err } args = append(args, "-embedcfg", objdir+"embedcfg") } if ofile == archive { args = append(args, "-pack") } if asmhdr { args = append(args, "-asmhdr", objdir+"go_asm.h") } // Add -c=N to use concurrent backend compilation, if possible. if c := gcBackendConcurrency(gcflags); c > 1 { args = append(args, fmt.Sprintf("-c=%d", c)) } for _, f := range gofiles { f := mkAbs(p.Dir, f) // Handle overlays. Convert path names using OverlayPath // so these paths can be handed directly to tools. // Deleted files won't show up in when scanning directories earlier, // so OverlayPath will never return "" (meaning a deleted file) here. // TODO(#39958): Handle cases where the package directory // doesn't exist on disk (this can happen when all the package's // files are in an overlay): the code expects the package directory // to exist and runs some tools in that directory. // TODO(#39958): Process the overlays when the // gofiles, cgofiles, cfiles, sfiles, and cxxfiles variables are // created in (*Builder).build. Doing that requires rewriting the // code that uses those values to expect absolute paths. f, _ = fsys.OverlayPath(f) args = append(args, f) } output, err = b.runOut(a, base.Cwd(), nil, args...) return ofile, output, err } // gcBackendConcurrency returns the backend compiler concurrency level for a package compilation. func gcBackendConcurrency(gcflags []string) int { // First, check whether we can use -c at all for this compilation. canDashC := concurrentGCBackendCompilationEnabledByDefault switch e := os.Getenv("GO19CONCURRENTCOMPILATION"); e { case "0": canDashC = false case "1": canDashC = true case "": // Not set. Use default. default: log.Fatalf("GO19CONCURRENTCOMPILATION must be 0, 1, or unset, got %q", e) } CheckFlags: for _, flag := range gcflags { // Concurrent compilation is presumed incompatible with any gcflags, // except for known commonly used flags. // If the user knows better, they can manually add their own -c to the gcflags. switch flag { case "-N", "-l", "-S", "-B", "-C", "-I": // OK default: canDashC = false break CheckFlags } } // TODO: Test and delete these conditions. if buildcfg.Experiment.FieldTrack || buildcfg.Experiment.PreemptibleLoops { canDashC = false } if !canDashC { return 1 } // Decide how many concurrent backend compilations to allow. // // If we allow too many, in theory we might end up with p concurrent processes, // each with c concurrent backend compiles, all fighting over the same resources. // However, in practice, that seems not to happen too much. // Most build graphs are surprisingly serial, so p==1 for much of the build. // Furthermore, concurrent backend compilation is only enabled for a part // of the overall compiler execution, so c==1 for much of the build. // So don't worry too much about that interaction for now. // // However, in practice, setting c above 4 tends not to help very much. // See the analysis in CL 41192. // // TODO(josharian): attempt to detect whether this particular compilation // is likely to be a bottleneck, e.g. when: // - it has no successor packages to compile (usually package main) // - all paths through the build graph pass through it // - critical path scheduling says it is high priority // and in such a case, set c to runtime.GOMAXPROCS(0). // By default this is the same as runtime.NumCPU. // We do this now when p==1. // To limit parallelism, set GOMAXPROCS below numCPU; this may be useful // on a low-memory builder, or if a deterministic build order is required. c := runtime.GOMAXPROCS(0) if cfg.BuildP == 1 { // No process parallelism, do not cap compiler parallelism. return c } // Some process parallelism. Set c to min(4, maxprocs). if c > 4 { c = 4 } return c } // trimpath returns the -trimpath argument to use // when compiling the action. func (a *Action) trimpath() string { // Keep in sync with Builder.ccompile // The trimmed paths are a little different, but we need to trim in the // same situations. // Strip the object directory entirely. objdir := a.Objdir if len(objdir) > 1 && objdir[len(objdir)-1] == filepath.Separator { objdir = objdir[:len(objdir)-1] } rewrite := "" rewriteDir := a.Package.Dir if cfg.BuildTrimpath { importPath := a.Package.Internal.OrigImportPath if m := a.Package.Module; m != nil && m.Version != "" { rewriteDir = m.Path + "@" + m.Version + strings.TrimPrefix(importPath, m.Path) } else { rewriteDir = importPath } rewrite += a.Package.Dir + "=>" + rewriteDir + ";" } // Add rewrites for overlays. The 'from' and 'to' paths in overlays don't need to have // same basename, so go from the overlay contents file path (passed to the compiler) // to the path the disk path would be rewritten to. cgoFiles := make(map[string]bool) for _, f := range a.Package.CgoFiles { cgoFiles[f] = true } // TODO(matloob): Higher up in the stack, when the logic for deciding when to make copies // of c/c++/m/f/hfiles is consolidated, use the same logic that Build uses to determine // whether to create the copies in objdir to decide whether to rewrite objdir to the // package directory here. var overlayNonGoRewrites string // rewrites for non-go files hasCgoOverlay := false if fsys.OverlayFile != "" { for _, filename := range a.Package.AllFiles() { path := filename if !filepath.IsAbs(path) { path = filepath.Join(a.Package.Dir, path) } base := filepath.Base(path) isGo := strings.HasSuffix(filename, ".go") || strings.HasSuffix(filename, ".s") isCgo := cgoFiles[filename] || !isGo overlayPath, isOverlay := fsys.OverlayPath(path) if isCgo && isOverlay { hasCgoOverlay = true } if !isCgo && isOverlay { rewrite += overlayPath + "=>" + filepath.Join(rewriteDir, base) + ";" } else if isCgo { // Generate rewrites for non-Go files copied to files in objdir. if filepath.Dir(path) == a.Package.Dir { // This is a file copied to objdir. overlayNonGoRewrites += filepath.Join(objdir, base) + "=>" + filepath.Join(rewriteDir, base) + ";" } } else { // Non-overlay Go files are covered by the a.Package.Dir rewrite rule above. } } } if hasCgoOverlay { rewrite += overlayNonGoRewrites } rewrite += objdir + "=>" return rewrite } func asmArgs(a *Action, p *load.Package) []interface{} { // Add -I pkg/GOOS_GOARCH so #include "textflag.h" works in .s files. inc := filepath.Join(cfg.GOROOT, "pkg", "include") pkgpath := pkgPath(a) args := []interface{}{cfg.BuildToolexec, base.Tool("asm"), "-p", pkgpath, "-trimpath", a.trimpath(), "-I", a.Objdir, "-I", inc, "-D", "GOOS_" + cfg.Goos, "-D", "GOARCH_" + cfg.Goarch, forcedAsmflags, p.Internal.Asmflags} if p.ImportPath == "runtime" && cfg.Goarch == "386" { for _, arg := range forcedAsmflags { if arg == "-dynlink" { args = append(args, "-D=GOBUILDMODE_shared=1") } } } if objabi.IsRuntimePackagePath(pkgpath) { args = append(args, "-compiling-runtime") } if cfg.Goarch == "386" { // Define GO386_value from cfg.GO386. args = append(args, "-D", "GO386_"+cfg.GO386) } if cfg.Goarch == "mips" || cfg.Goarch == "mipsle" { // Define GOMIPS_value from cfg.GOMIPS. args = append(args, "-D", "GOMIPS_"+cfg.GOMIPS) } if cfg.Goarch == "mips64" || cfg.Goarch == "mips64le" { // Define GOMIPS64_value from cfg.GOMIPS64. args = append(args, "-D", "GOMIPS64_"+cfg.GOMIPS64) } return args } func (gcToolchain) asm(b *Builder, a *Action, sfiles []string) ([]string, error) { p := a.Package args := asmArgs(a, p) var ofiles []string for _, sfile := range sfiles { overlayPath, _ := fsys.OverlayPath(mkAbs(p.Dir, sfile)) ofile := a.Objdir + sfile[:len(sfile)-len(".s")] + ".o" ofiles = append(ofiles, ofile) args1 := append(args, "-o", ofile, overlayPath) if err := b.run(a, p.Dir, p.ImportPath, nil, args1...); err != nil { return nil, err } } return ofiles, nil } func (gcToolchain) symabis(b *Builder, a *Action, sfiles []string) (string, error) { mkSymabis := func(p *load.Package, sfiles []string, path string) error { args := asmArgs(a, p) args = append(args, "-gensymabis", "-o", path) for _, sfile := range sfiles { if p.ImportPath == "runtime/cgo" && strings.HasPrefix(sfile, "gcc_") { continue } op, _ := fsys.OverlayPath(mkAbs(p.Dir, sfile)) args = append(args, op) } // Supply an empty go_asm.h as if the compiler had been run. // -gensymabis parsing is lax enough that we don't need the // actual definitions that would appear in go_asm.h. if err := b.writeFile(a.Objdir+"go_asm.h", nil); err != nil { return err } return b.run(a, p.Dir, p.ImportPath, nil, args...) } var symabis string // Only set if we actually create the file p := a.Package if len(sfiles) != 0 { symabis = a.Objdir + "symabis" if err := mkSymabis(p, sfiles, symabis); err != nil { return "", err } } return symabis, nil } // toolVerify checks that the command line args writes the same output file // if run using newTool instead. // Unused now but kept around for future use. func toolVerify(a *Action, b *Builder, p *load.Package, newTool string, ofile string, args []interface{}) error { newArgs := make([]interface{}, len(args)) copy(newArgs, args) newArgs[1] = base.Tool(newTool) newArgs[3] = ofile + ".new" // x.6 becomes x.6.new if err := b.run(a, p.Dir, p.ImportPath, nil, newArgs...); err != nil { return err } data1, err := os.ReadFile(ofile) if err != nil { return err } data2, err := os.ReadFile(ofile + ".new") if err != nil { return err } if !bytes.Equal(data1, data2) { return fmt.Errorf("%s and %s produced different output files:\n%s\n%s", filepath.Base(args[1].(string)), newTool, strings.Join(str.StringList(args...), " "), strings.Join(str.StringList(newArgs...), " ")) } os.Remove(ofile + ".new") return nil } func (gcToolchain) pack(b *Builder, a *Action, afile string, ofiles []string) error { var absOfiles []string for _, f := range ofiles { absOfiles = append(absOfiles, mkAbs(a.Objdir, f)) } absAfile := mkAbs(a.Objdir, afile) // The archive file should have been created by the compiler. // Since it used to not work that way, verify. if !cfg.BuildN { if _, err := os.Stat(absAfile); err != nil { base.Fatalf("os.Stat of archive file failed: %v", err) } } p := a.Package if cfg.BuildN || cfg.BuildX { cmdline := str.StringList(base.Tool("pack"), "r", absAfile, absOfiles) b.Showcmd(p.Dir, "%s # internal", joinUnambiguously(cmdline)) } if cfg.BuildN { return nil } if err := packInternal(absAfile, absOfiles); err != nil { b.showOutput(a, p.Dir, p.Desc(), err.Error()+"\n") return errPrintedOutput } return nil } func packInternal(afile string, ofiles []string) error { dst, err := os.OpenFile(afile, os.O_WRONLY|os.O_APPEND, 0) if err != nil { return err } defer dst.Close() // only for error returns or panics w := bufio.NewWriter(dst) for _, ofile := range ofiles { src, err := os.Open(ofile) if err != nil { return err } fi, err := src.Stat() if err != nil { src.Close() return err } // Note: Not using %-16.16s format because we care // about bytes, not runes. name := fi.Name() if len(name) > 16 { name = name[:16] } else { name += strings.Repeat(" ", 16-len(name)) } size := fi.Size() fmt.Fprintf(w, "%s%-12d%-6d%-6d%-8o%-10d`\n", name, 0, 0, 0, 0644, size) n, err := io.Copy(w, src) src.Close() if err == nil && n < size { err = io.ErrUnexpectedEOF } else if err == nil && n > size { err = fmt.Errorf("file larger than size reported by stat") } if err != nil { return fmt.Errorf("copying %s to %s: %v", ofile, afile, err) } if size&1 != 0 { w.WriteByte(0) } } if err := w.Flush(); err != nil { return err } return dst.Close() } // setextld sets the appropriate linker flags for the specified compiler. func setextld(ldflags []string, compiler []string) ([]string, error) { for _, f := range ldflags { if f == "-extld" || strings.HasPrefix(f, "-extld=") { // don't override -extld if supplied return ldflags, nil } } joined, err := str.JoinAndQuoteFields(compiler) if err != nil { return nil, err } return append(ldflags, "-extld="+joined), nil } // pluginPath computes the package path for a plugin main package. // // This is typically the import path of the main package p, unless the // plugin is being built directly from source files. In that case we // combine the package build ID with the contents of the main package // source files. This allows us to identify two different plugins // built from two source files with the same name. func pluginPath(a *Action) string { p := a.Package if p.ImportPath != "command-line-arguments" { return p.ImportPath } h := sha1.New() buildID := a.buildID if a.Mode == "link" { // For linking, use the main package's build ID instead of // the binary's build ID, so it is the same hash used in // compiling and linking. // When compiling, we use actionID/actionID (instead of // actionID/contentID) as a temporary build ID to compute // the hash. Do the same here. (See buildid.go:useCache) // The build ID matters because it affects the overall hash // in the plugin's pseudo-import path returned below. // We need to use the same import path when compiling and linking. id := strings.Split(buildID, buildIDSeparator) buildID = id[1] + buildIDSeparator + id[1] } fmt.Fprintf(h, "build ID: %s\n", buildID) for _, file := range str.StringList(p.GoFiles, p.CgoFiles, p.SFiles) { data, err := os.ReadFile(filepath.Join(p.Dir, file)) if err != nil { base.Fatalf("go: %s", err) } h.Write(data) } return fmt.Sprintf("plugin/unnamed-%x", h.Sum(nil)) } func (gcToolchain) ld(b *Builder, root *Action, out, importcfg, mainpkg string) error { cxx := len(root.Package.CXXFiles) > 0 || len(root.Package.SwigCXXFiles) > 0 for _, a := range root.Deps { if a.Package != nil && (len(a.Package.CXXFiles) > 0 || len(a.Package.SwigCXXFiles) > 0) { cxx = true } } var ldflags []string if cfg.BuildContext.InstallSuffix != "" { ldflags = append(ldflags, "-installsuffix", cfg.BuildContext.InstallSuffix) } if root.Package.Internal.OmitDebug { ldflags = append(ldflags, "-s", "-w") } if cfg.BuildBuildmode == "plugin" { ldflags = append(ldflags, "-pluginpath", pluginPath(root)) } // Store BuildID inside toolchain binaries as a unique identifier of the // tool being run, for use by content-based staleness determination. if root.Package.Goroot && strings.HasPrefix(root.Package.ImportPath, "cmd/") { // External linking will include our build id in the external // linker's build id, which will cause our build id to not // match the next time the tool is built. // Rely on the external build id instead. if !sys.MustLinkExternal(cfg.Goos, cfg.Goarch) { ldflags = append(ldflags, "-X=cmd/internal/objabi.buildID="+root.buildID) } } // If the user has not specified the -extld option, then specify the // appropriate linker. In case of C++ code, use the compiler named // by the CXX environment variable or defaultCXX if CXX is not set. // Else, use the CC environment variable and defaultCC as fallback. var compiler []string if cxx { compiler = envList("CXX", cfg.DefaultCXX(cfg.Goos, cfg.Goarch)) } else { compiler = envList("CC", cfg.DefaultCC(cfg.Goos, cfg.Goarch)) } ldflags = append(ldflags, "-buildmode="+ldBuildmode) if root.buildID != "" { ldflags = append(ldflags, "-buildid="+root.buildID) } ldflags = append(ldflags, forcedLdflags...) ldflags = append(ldflags, root.Package.Internal.Ldflags...) ldflags, err := setextld(ldflags, compiler) if err != nil { return err } // On OS X when using external linking to build a shared library, // the argument passed here to -o ends up recorded in the final // shared library in the LC_ID_DYLIB load command. // To avoid putting the temporary output directory name there // (and making the resulting shared library useless), // run the link in the output directory so that -o can name // just the final path element. // On Windows, DLL file name is recorded in PE file // export section, so do like on OS X. dir := "." if (cfg.Goos == "darwin" || cfg.Goos == "windows") && cfg.BuildBuildmode == "c-shared" { dir, out = filepath.Split(out) } env := []string{} if cfg.BuildTrimpath { env = append(env, "GOROOT_FINAL="+trimPathGoRootFinal) } return b.run(root, dir, root.Package.ImportPath, env, cfg.BuildToolexec, base.Tool("link"), "-o", out, "-importcfg", importcfg, ldflags, mainpkg) } func (gcToolchain) ldShared(b *Builder, root *Action, toplevelactions []*Action, out, importcfg string, allactions []*Action) error { ldflags := []string{"-installsuffix", cfg.BuildContext.InstallSuffix} ldflags = append(ldflags, "-buildmode=shared") ldflags = append(ldflags, forcedLdflags...) ldflags = append(ldflags, root.Package.Internal.Ldflags...) cxx := false for _, a := range allactions { if a.Package != nil && (len(a.Package.CXXFiles) > 0 || len(a.Package.SwigCXXFiles) > 0) { cxx = true } } // If the user has not specified the -extld option, then specify the // appropriate linker. In case of C++ code, use the compiler named // by the CXX environment variable or defaultCXX if CXX is not set. // Else, use the CC environment variable and defaultCC as fallback. var compiler []string if cxx { compiler = envList("CXX", cfg.DefaultCXX(cfg.Goos, cfg.Goarch)) } else { compiler = envList("CC", cfg.DefaultCC(cfg.Goos, cfg.Goarch)) } ldflags, err := setextld(ldflags, compiler) if err != nil { return err } for _, d := range toplevelactions { if !strings.HasSuffix(d.Target, ".a") { // omit unsafe etc and actions for other shared libraries continue } ldflags = append(ldflags, d.Package.ImportPath+"="+d.Target) } return b.run(root, ".", out, nil, cfg.BuildToolexec, base.Tool("link"), "-o", out, "-importcfg", importcfg, ldflags) } func (gcToolchain) cc(b *Builder, a *Action, ofile, cfile string) error { return fmt.Errorf("%s: C source files not supported without cgo", mkAbs(a.Package.Dir, cfile)) }
[ "\"GO19CONCURRENTCOMPILATION\"" ]
[]
[ "GO19CONCURRENTCOMPILATION" ]
[]
["GO19CONCURRENTCOMPILATION"]
go
1
0