file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
conn_executor.go
|
// Copyright 2017 The Cockroach Authors.
//
// Use of this software is governed by the Business Source License
// included in the file licenses/BSL.txt.
//
// As of the Change Date specified in that file, in accordance with
// the Business Source License, use of this software will be governed
// by the Apache License, Version 2.0, included in the file
// licenses/APL.txt.
package sql
import (
"context"
"fmt"
"io"
"math"
"strings"
"sync/atomic"
"time"
"unicode/utf8"
"github.com/cockroachdb/cockroach/pkg/config"
"github.com/cockroachdb/cockroach/pkg/kv"
"github.com/cockroachdb/cockroach/pkg/roachpb"
"github.com/cockroachdb/cockroach/pkg/server/serverpb"
"github.com/cockroachdb/cockroach/pkg/server/telemetry"
"github.com/cockroachdb/cockroach/pkg/settings"
"github.com/cockroachdb/cockroach/pkg/sql/parser"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgcode"
"github.com/cockroachdb/cockroach/pkg/sql/pgwire/pgerror"
"github.com/cockroachdb/cockroach/pkg/sql/sem/tree"
"github.com/cockroachdb/cockroach/pkg/sql/sessiondata"
"github.com/cockroachdb/cockroach/pkg/sql/sqlbase"
"github.com/cockroachdb/cockroach/pkg/sql/types"
"github.com/cockroachdb/cockroach/pkg/util"
"github.com/cockroachdb/cockroach/pkg/util/envutil"
"github.com/cockroachdb/cockroach/pkg/util/errorutil"
"github.com/cockroachdb/cockroach/pkg/util/fsm"
"github.com/cockroachdb/cockroach/pkg/util/hlc"
"github.com/cockroachdb/cockroach/pkg/util/log"
"github.com/cockroachdb/cockroach/pkg/util/metric"
"github.com/cockroachdb/cockroach/pkg/util/mon"
"github.com/cockroachdb/cockroach/pkg/util/stop"
"github.com/cockroachdb/cockroach/pkg/util/syncutil"
"github.com/cockroachdb/cockroach/pkg/util/timeutil"
"github.com/cockroachdb/cockroach/pkg/util/tracing"
"github.com/cockroachdb/cockroach/pkg/util/uuid"
"github.com/cockroachdb/errors"
"github.com/cockroachdb/logtags"
"golang.org/x/net/trace"
)
// noteworthyMemoryUsageBytes is the minimum size tracked by a
// transaction or session monitor before the monitor starts explicitly
// logging overall usage growth in the log.
var noteworthyMemoryUsageBytes = envutil.EnvOrDefaultInt64("COCKROACH_NOTEWORTHY_SESSION_MEMORY_USAGE", 1024*1024)
// A connExecutor is in charge of executing queries received on a given client
// connection. The connExecutor implements a state machine (dictated by the
// Postgres/pgwire session semantics). The state machine is supposed to run
// asynchronously wrt the client connection: it receives input statements
// through a stmtBuf and produces results through a clientComm interface. The
// connExecutor maintains a cursor over the statementBuffer and executes
// statements / produces results for one statement at a time. The cursor points
// at all times to the statement that the connExecutor is currently executing.
// Results for statements before the cursor have already been produced (but not
// necessarily delivered to the client). Statements after the cursor are queued
// for future execution. Keeping already executed statements in the buffer is
// useful in case of automatic retries (in which case statements from the
// retried transaction have to be executed again); the connExecutor is in charge
// of removing old statements that are no longer needed for retries from the
// (head of the) buffer. Separately, the implementer of the clientComm interface
// (e.g. the pgwire module) is in charge of keeping track of what results have
// been delivered to the client and what results haven't (yet).
//
// The connExecutor has two main responsibilities: to dispatch queries to the
// execution engine(s) and relay their results to the clientComm, and to
// implement the state machine maintaining the various aspects of a connection's
// state. The state machine implementation is further divided into two aspects:
// maintaining the transaction status of the connection (outside of a txn,
// inside a txn, in an aborted txn, in a txn awaiting client restart, etc.) and
// maintaining the cursor position (i.e. correctly jumping to whatever the
// "next" statement to execute is in various situations).
//
// The cursor normally advances one statement at a time, but it can also skip
// some statements (remaining statements in a query string are skipped once an
// error is encountered) and it can sometimes be rewound when performing
// automatic retries. Rewinding can only be done if results for the rewound
// statements have not actually been delivered to the client; see below.
//
// +---------------------+
// |connExecutor |
// | |
// +->execution+--------------+
// || + | |
// || |fsm.Event | |
// || | | |
// || v | |
// || fsm.Machine(TxnStateTransitions)
// || + +--------+ | |
// +--------------------+ || | |txnState| | |
// |stmtBuf | || | +--------+ | |
// | | statements are read || | | |
// | +-+-+ +-+-+ +-+-+ +------------------------+ | | |
// | | | | | | | | | | | | | +-------------+ |
// +---> +-+-+ +++-+ +-+-+ | | | |session data | |
// | | ^ | | | +-------------+ |
// | | | +-----------------------------------+ | |
// | | + v | cursor is advanced | advanceInfo | |
// | | cursor | | | |
// | +--------------------+ +---------------------+ |
// | |
// | |
// +-------------+ |
// +--------+ |
// | parser | |
// +--------+ |
// | |
// | |
// | +----------------+ |
// +-------+------+ |execution engine<--------+
// | pgwire conn | +------------+(local/DistSQL) |
// | | | +----------------+
// | +----------+ |
// | |clientComm<---------------+
// | +----------+ results are produced
// | |
// +-------^------+
// |
// |
// +-------+------+
// | SQL client |
// +--------------+
//
// The connExecutor is disconnected from client communication (i.e. generally
// network communication - i.e. pgwire.conn); the module doing client
// communication is responsible for pushing statements into the buffer and for
// providing an implementation of the clientConn interface (and thus sending
// results to the client). The connExecutor does not control when
// results are delivered to the client, but still it does have some influence
// over that; this is because of the fact that the possibility of doing
// automatic retries goes away the moment results for the transaction in
// question are delivered to the client. The communication module has full
// freedom in sending results whenever it sees fit; however the connExecutor
// influences communication in the following ways:
//
// a) When deciding whether an automatic retry can be performed for a
// transaction, the connExecutor needs to:
//
// 1) query the communication status to check that no results for the txn have
// been delivered to the client and, if this check passes:
// 2) lock the communication so that no further results are delivered to the
// client, and, eventually:
// 3) rewind the clientComm to a certain position corresponding to the start
// of the transaction, thereby discarding all the results that had been
// accumulated for the previous attempt to run the transaction in question.
//
// These steps are all orchestrated through clientComm.lockCommunication() and
// rewindCapability{}.
//
// b) The connExecutor sometimes ask the clientComm to deliver everything
// (most commonly in response to a Sync command).
//
// As of Feb 2018, the pgwire.conn delivers results synchronously to the client
// when its internal buffer overflows. In principle, delivery of result could be
// done asynchronously wrt the processing of commands (e.g. we could have a
// timing policy in addition to the buffer size). The first implementation of
// that showed a performance impact of involving a channel communication in the
// Sync processing path.
//
//
// Implementation notes:
//
// --- Error handling ---
//
// The key to understanding how the connExecutor handles errors is understanding
// the fact that there's two distinct categories of errors to speak of. There
// are "query execution errors" and there are the rest. Most things fall in the
// former category: invalid queries, queries that fail constraints at runtime,
// data unavailability errors, retriable errors (i.e. serializability
// violations) "internal errors" (e.g. connection problems in the cluster). This
// category of errors doesn't represent dramatic events as far as the connExecutor
// is concerned: they produce "results" for the query to be passed to the client
// just like more successful queries do and they produce Events for the
// state machine just like the successful queries (the events in question
// are generally event{non}RetriableErr and they generally cause the
// state machine to move to the Aborted state, but the connExecutor doesn't
// concern itself with this). The way the connExecutor reacts to these errors is
// the same as how it reacts to a successful query completing: it moves the
// cursor over the incoming statements as instructed by the state machine and
// continues running statements.
//
// And then there's other errors that don't have anything to do with a
// particular query, but with the connExecutor itself. In other languages, these
// would perhaps be modeled as Exceptions: we want them to unwind the stack
// significantly. These errors cause the connExecutor.run() to break out of its
// loop and return an error. Example of such errors include errors in
// communication with the client (e.g. the network connection is broken) or the
// connection's context being canceled.
//
// All of connExecutor's methods only return errors for the 2nd category. Query
// execution errors are written to a CommandResult. Low-level methods don't
// operate on a CommandResult directly; instead they operate on a wrapper
// (resultWithStoredErr), which provides access to the query error for purposes
// of building the correct state machine event.
//
// --- Context management ---
//
// At the highest level, there's connExecutor.run() that takes a context. That
// context is supposed to represent "the connection's context": its lifetime is
// the client connection's lifetime and it is assigned to
// connEx.ctxHolder.connCtx. Below that, every SQL transaction has its own
// derived context because that's the level at which we trace operations. The
// lifetime of SQL transactions is determined by the txnState: the state machine
// decides when transactions start and end in txnState.performStateTransition().
// When we're inside a SQL transaction, most operations are considered to happen
// in the context of that txn. When there's no SQL transaction (i.e.
// stateNoTxn), everything happens in the connection's context.
//
// High-level code in connExecutor is agnostic of whether it currently is inside
// a txn or not. To deal with both cases, such methods don't explicitly take a
// context; instead they use connEx.Ctx(), which returns the appropriate ctx
// based on the current state.
// Lower-level code (everything from connEx.execStmt() and below which runs in
// between state transitions) knows what state its running in, and so the usual
// pattern of explicitly taking a context as an argument is used.
// Server is the top level singleton for handling SQL connections. It creates
// connExecutors to server every incoming connection.
type Server struct {
_ util.NoCopy
cfg *ExecutorConfig
// sqlStats tracks per-application statistics for all applications on each
// node.
sqlStats sqlStats
reportedStats sqlStats
reCache *tree.RegexpCache
// pool is the parent monitor for all session monitors except "internal" ones.
pool *mon.BytesMonitor
// Metrics is used to account normal queries.
Metrics Metrics
// InternalMetrics is used to account internal queries.
InternalMetrics Metrics
// dbCache is a cache for database descriptors, maintained through Gossip
// updates.
dbCache *databaseCacheHolder
}
// Metrics collects timeseries data about SQL activity.
type Metrics struct {
// EngineMetrics is exported as required by the metrics.Struct magic we use
// for metrics registration.
EngineMetrics EngineMetrics
// StartedStatementCounters contains metrics for statements initiated by
// users. These metrics count user-initiated operations, regardless of
// success (in particular, TxnCommitCount is the number of COMMIT statements
// attempted, not the number of transactions that successfully commit).
StartedStatementCounters StatementCounters
// ExecutedStatementCounters contains metrics for successfully executed
// statements.
ExecutedStatementCounters StatementCounters
}
// NewServer creates a new Server. Start() needs to be called before the Server
// is used.
func NewServer(cfg *ExecutorConfig, pool *mon.BytesMonitor) *Server {
systemCfg := config.NewSystemConfig(cfg.DefaultZoneConfig)
return &Server{
cfg: cfg,
Metrics: makeMetrics(false /*internal*/),
InternalMetrics: makeMetrics(true /*internal*/),
// dbCache will be updated on Start().
dbCache: newDatabaseCacheHolder(newDatabaseCache(systemCfg)),
pool: pool,
sqlStats: sqlStats{st: cfg.Settings, apps: make(map[string]*appStats)},
reportedStats: sqlStats{st: cfg.Settings, apps: make(map[string]*appStats)},
reCache: tree.NewRegexpCache(512),
}
}
func makeMetrics(internal bool) Metrics {
return Metrics{
EngineMetrics: EngineMetrics{
DistSQLSelectCount: metric.NewCounter(getMetricMeta(MetaDistSQLSelect, internal)),
SQLOptFallbackCount: metric.NewCounter(getMetricMeta(MetaSQLOptFallback, internal)),
SQLOptPlanCacheHits: metric.NewCounter(getMetricMeta(MetaSQLOptPlanCacheHits, internal)),
SQLOptPlanCacheMisses: metric.NewCounter(getMetricMeta(MetaSQLOptPlanCacheMisses, internal)),
// TODO(mrtracy): See HistogramWindowInterval in server/config.go for the 6x factor.
DistSQLExecLatency: metric.NewLatency(getMetricMeta(MetaDistSQLExecLatency, internal),
6*metricsSampleInterval),
SQLExecLatency: metric.NewLatency(getMetricMeta(MetaSQLExecLatency, internal),
6*metricsSampleInterval),
DistSQLServiceLatency: metric.NewLatency(getMetricMeta(MetaDistSQLServiceLatency, internal),
6*metricsSampleInterval),
SQLServiceLatency: metric.NewLatency(getMetricMeta(MetaSQLServiceLatency, internal),
6*metricsSampleInterval),
SQLTxnLatency: metric.NewLatency(getMetricMeta(MetaSQLTxnLatency, internal),
6*metricsSampleInterval),
TxnAbortCount: metric.NewCounter(getMetricMeta(MetaTxnAbort, internal)),
FailureCount: metric.NewCounter(getMetricMeta(MetaFailure, internal)),
},
StartedStatementCounters: makeStartedStatementCounters(internal),
ExecutedStatementCounters: makeExecutedStatementCounters(internal),
}
}
// Start starts the Server's background processing.
func (s *Server) Start(ctx context.Context, stopper *stop.Stopper) {
gossipUpdateC := s.cfg.Gossip.RegisterSystemConfigChannel()
stopper.RunWorker(ctx, func(ctx context.Context) {
for {
select {
case <-gossipUpdateC:
sysCfg := s.cfg.Gossip.GetSystemConfig()
s.dbCache.updateSystemConfig(sysCfg)
case <-stopper.ShouldStop():
return
}
}
})
// Start a loop to clear SQL stats at the max reset interval. This is
// to ensure that we always have some worker clearing SQL stats to avoid
// continually allocating space for the SQL stats. Additionally, spawn
// a loop to clear the reported stats at the same large interval just
// in case the telemetry worker fails.
s.PeriodicallyClearSQLStats(ctx, stopper, maxSQLStatReset, &s.sqlStats)
s.PeriodicallyClearSQLStats(ctx, stopper, maxSQLStatReset, &s.reportedStats)
// Start a second loop to clear SQL stats at the requested interval.
s.PeriodicallyClearSQLStats(ctx, stopper, sqlStatReset, &s.sqlStats)
s.PeriodicallyPollForStatementInfoRequests(ctx, stopper)
}
// ResetSQLStats resets the executor's collected sql statistics.
func (s *Server) ResetSQLStats(ctx context.Context) {
// Dump the SQL stats into the reported stats before clearing the SQL stats.
s.reportedStats.Add(&s.sqlStats)
s.sqlStats.resetStats(ctx)
}
// ResetReportedStats resets the executor's collected reported stats.
func (s *Server) ResetReportedStats(ctx context.Context) {
s.reportedStats.resetStats(ctx)
}
// GetScrubbedStmtStats returns the statement statistics by app, with the
// queries scrubbed of their identifiers. Any statements which cannot be
// scrubbed will be omitted from the returned map.
func (s *Server) GetScrubbedStmtStats() []roachpb.CollectedStatementStatistics {
return s.sqlStats.getScrubbedStmtStats(s.cfg.VirtualSchemas)
}
// Avoid lint errors.
var _ = (*Server).GetScrubbedStmtStats
// GetUnscrubbedStmtStats returns the same thing as GetScrubbedStmtStats, except
// identifiers (e.g. table and column names) aren't scrubbed from the statements.
func (s *Server) GetUnscrubbedStmtStats() []roachpb.CollectedStatementStatistics {
return s.sqlStats.getUnscrubbedStmtStats(s.cfg.VirtualSchemas)
}
// GetScrubbedReportingStats does the same thing as GetScrubbedStmtStats but
// returns statistics from the reported stats pool.
func (s *Server) GetScrubbedReportingStats() []roachpb.CollectedStatementStatistics {
return s.reportedStats.getScrubbedStmtStats(s.cfg.VirtualSchemas)
}
// GetUnscrubbedReportingStats does the same thing as GetUnscrubbedStmtStats but
// returns statistics from the reported stats pool.
func (s *Server) GetUnscrubbedReportingStats() []roachpb.CollectedStatementStatistics {
return s.reportedStats.getUnscrubbedStmtStats(s.cfg.VirtualSchemas)
}
// GetStmtStatsLastReset returns the time at which the statement statistics were
// last cleared.
func (s *Server) GetStmtStatsLastReset() time.Time {
return s.sqlStats.getLastReset()
}
// GetExecutorConfig returns this server's executor config.
func (s *Server) GetExecutorConfig() *ExecutorConfig {
return s.cfg
}
// SetupConn creates a connExecutor for the client connection.
//
// When this method returns there are no resources allocated yet that
// need to be close()d.
//
// Args:
// args: The initial session parameters. They are validated by SetupConn
// and an error is returned if this validation fails.
// stmtBuf: The incoming statement for the new connExecutor.
// clientComm: The interface through which the new connExecutor is going to
// produce results for the client.
// memMetrics: The metrics that statements executed on this connection will
// contribute to.
func (s *Server) SetupConn(
ctx context.Context,
args SessionArgs,
stmtBuf *StmtBuf,
clientComm ClientComm,
memMetrics MemoryMetrics,
) (ConnectionHandler, error) {
sd := s.newSessionData(args)
sdMut := s.makeSessionDataMutator(sd, args.SessionDefaults)
ex, err := s.newConnExecutor(
ctx, sd, &sdMut, stmtBuf, clientComm, memMetrics, &s.Metrics, resetSessionDataToDefaults)
return ConnectionHandler{ex}, err
}
// ConnectionHandler is the interface between the result of SetupConn
// and the ServeConn below. It encapsulates the connExecutor and hides
// it away from other packages.
type ConnectionHandler struct {
ex *connExecutor
}
// GetUnqualifiedIntSize implements pgwire.sessionDataProvider and returns
// the type that INT should be parsed as.
func (h ConnectionHandler) GetUnqualifiedIntSize() *types.T {
var size int
if h.ex != nil {
// The executor will be nil in certain testing situations where
// no server is actually present.
size = h.ex.sessionData.DefaultIntSize
}
switch size {
case 4, 32:
return types.Int4
default:
return types.Int
}
}
// GetParamStatus retrieves the configured value of the session
// variable identified by varName. This is used for the initial
// message sent to a client during a session set-up.
func (h ConnectionHandler) GetParamStatus(ctx context.Context, varName string) string {
name := strings.ToLower(varName)
v, ok := varGen[name]
if !ok {
log.Fatalf(ctx, "programming error: status param %q must be defined session var", varName)
return ""
}
hasDefault, defVal := getSessionVarDefaultString(name, v, h.ex.dataMutator)
if !hasDefault {
log.Fatalf(ctx, "programming error: status param %q must have a default value", varName)
return ""
}
return defVal
}
// ServeConn serves a client connection by reading commands from the stmtBuf
// embedded in the ConnHandler.
//
// If not nil, reserved represents memory reserved for the connection. The
// connExecutor takes ownership of this memory.
func (s *Server) ServeConn(
ctx context.Context, h ConnectionHandler, reserved mon.BoundAccount, cancel context.CancelFunc,
) error {
defer func() {
r := recover()
h.ex.closeWrapper(ctx, r)
}()
return h.ex.run(ctx, s.pool, reserved, cancel)
}
// newSessionData a SessionData that can be passed to newConnExecutor.
func (s *Server) newSessionData(args SessionArgs) *sessiondata.SessionData {
sd := &sessiondata.SessionData{
User: args.User,
RemoteAddr: args.RemoteAddr,
ResultsBufferSize: args.ConnResultsBufferSize,
}
s.populateMinimalSessionData(sd)
return sd
}
func (s *Server) makeSessionDataMutator(
sd *sessiondata.SessionData, defaults SessionDefaults,
) sessionDataMutator {
return sessionDataMutator{
data: sd,
defaults: defaults,
settings: s.cfg.Settings,
paramStatusUpdater: &noopParamStatusUpdater{},
}
}
// populateMinimalSessionData populates sd with some minimal values needed for
// not crashing. Fields of sd that are already set are not overwritten.
func (s *Server) populateMinimalSessionData(sd *sessiondata.SessionData) {
if sd.SequenceState == nil {
sd.SequenceState = sessiondata.NewSequenceState()
}
if sd.DataConversion == (sessiondata.DataConversionConfig{}) {
sd.DataConversion = sessiondata.DataConversionConfig{
Location: time.UTC,
}
}
if len(sd.SearchPath.GetPathArray()) == 0 {
sd.SearchPath = sqlbase.DefaultSearchPath
}
}
type sdResetOption bool
const (
resetSessionDataToDefaults sdResetOption = true
dontResetSessionDataToDefaults = false
)
// newConnExecutor creates a new connExecutor.
//
// resetOpt controls whether sd is to be reset to the default values.
// TODO(andrei): resetOpt is a hack needed by the InternalExecutor, which
// doesn't want this resetting. Figure out a better API where the responsibility
// of assigning default values is either entirely inside or outside of this
// ctor.
func (s *Server) newConnExecutor(
ctx context.Context,
sd *sessiondata.SessionData,
sdMutator *sessionDataMutator,
stmtBuf *StmtBuf,
clientComm ClientComm,
memMetrics MemoryMetrics,
srvMetrics *Metrics,
resetOpt sdResetOption,
) (*connExecutor, error) {
// Create the various monitors.
// The session monitors are started in activate().
sessionRootMon := mon.MakeMonitor(
"session root",
mon.MemoryResource,
memMetrics.CurBytesCount,
memMetrics.MaxBytesHist,
-1 /* increment */, math.MaxInt64, s.cfg.Settings,
)
sessionMon := mon.MakeMonitor(
"session",
mon.MemoryResource,
memMetrics.SessionCurBytesCount,
memMetrics.SessionMaxBytesHist,
-1 /* increment */, noteworthyMemoryUsageBytes, s.cfg.Settings,
)
// The txn monitor is started in txnState.resetForNewSQLTxn().
txnMon := mon.MakeMonitor(
"txn",
mon.MemoryResource,
memMetrics.TxnCurBytesCount,
memMetrics.TxnMaxBytesHist,
-1 /* increment */, noteworthyMemoryUsageBytes, s.cfg.Settings,
)
ex := &connExecutor{
server: s,
metrics: srvMetrics,
stmtBuf: stmtBuf,
clientComm: clientComm,
mon: &sessionRootMon,
sessionMon: &sessionMon,
sessionData: sd,
dataMutator: sdMutator,
state: txnState{
mon: &txnMon,
connCtx: ctx,
},
transitionCtx: transitionCtx{
db: s.cfg.DB,
nodeID: s.cfg.NodeID.Get(),
clock: s.cfg.Clock,
// Future transaction's monitors will inherits from sessionRootMon.
connMon: &sessionRootMon,
tracer: s.cfg.AmbientCtx.Tracer,
settings: s.cfg.Settings,
},
memMetrics: memMetrics,
planner: planner{execCfg: s.cfg},
// ctxHolder will be reset at the start of run(). We only define
// it here so that an early call to close() doesn't panic.
ctxHolder: ctxHolder{connCtx: ctx},
executorType: executorTypeExec,
hasCreatedTemporarySchema: false,
stmtInfoRegistry: s.cfg.stmtInfoRequestRegistry,
}
ex.state.txnAbortCount = ex.metrics.EngineMetrics.TxnAbortCount
sdMutator.setCurTxnReadOnly = func(val bool) {
ex.state.readOnly = val
}
sdMutator.onTempSchemaCreation = func() {
ex.hasCreatedTemporarySchema = true
}
sdMutator.RegisterOnSessionDataChange("application_name", func(newName string) {
ex.appStats = ex.server.sqlStats.getStatsForApplication(newName)
ex.applicationName.Store(newName)
})
// Initialize the session data from provided defaults. We need to do this early
// because other initializations below use the configured values.
if resetOpt == resetSessionDataToDefaults {
if err := resetSessionVars(ctx, sdMutator); err != nil {
log.Errorf(ctx, "error setting up client session: %v", err)
return nil, err
}
} else {
// We have set the ex.sessionData without using the dataMutator.
// So we need to update the application name manually.
ex.applicationName.Store(ex.sessionData.ApplicationName)
// When the connEx is serving an internal executor, it can inherit
// the application name from an outer session. This happens
// e.g. during ::regproc casts and built-in functions that use SQL internally.
// In that case, we do not want to record statistics against
// the outer application name directly; instead we want
// to use a separate bucket. However we will still
// want to have separate buckets for different applications so that
// we can measure their respective "pressure" on internal queries.
// Hence the choice here to add the delegate prefix
// to the current app name.
var appStatsBucketName string
if !strings.HasPrefix(ex.sessionData.ApplicationName, sqlbase.InternalAppNamePrefix) {
appStatsBucketName = sqlbase.DelegatedAppNamePrefix + ex.sessionData.ApplicationName
} else {
// If this is already an "internal app", don't put more prefix.
appStatsBucketName = ex.sessionData.ApplicationName
}
ex.appStats = s.sqlStats.getStatsForApplication(appStatsBucketName)
}
ex.phaseTimes[sessionInit] = timeutil.Now()
ex.extraTxnState.prepStmtsNamespace = prepStmtNamespace{
prepStmts: make(map[string]*PreparedStatement),
portals: make(map[string]*PreparedPortal),
}
ex.extraTxnState.prepStmtsNamespaceAtTxnRewindPos = prepStmtNamespace{
prepStmts: make(map[string]*PreparedStatement),
portals: make(map[string]*PreparedPortal),
}
ex.extraTxnState.tables = TableCollection{
leaseMgr: s.cfg.LeaseManager,
databaseCache: s.dbCache.getDatabaseCache(),
dbCacheSubscriber: s.dbCache,
settings: s.cfg.Settings,
}
ex.extraTxnState.txnRewindPos = -1
ex.mu.ActiveQueries = make(map[ClusterWideID]*queryMeta)
ex.machine = fsm.MakeMachine(TxnStateTransitions, stateNoTxn{}, &ex.state)
ex.sessionTracing.ex = ex
ex.transitionCtx.sessionTracing = &ex.sessionTracing
ex.statsCollector = ex.newStatsCollector()
ex.initPlanner(ctx, &ex.planner)
return ex, nil
}
// newConnExecutorWithTxn creates a connExecutor that will execute statements
// under a higher-level txn. This connExecutor runs with a different state
// machine, much reduced from the regular one. It cannot initiate or end
// transactions (so, no BEGIN, COMMIT, ROLLBACK, no auto-commit, no automatic
// retries).
//
// If there is no error, this function also activate()s the returned
// executor, so the caller does not need to run the
// activation. However this means that run() or close() must be called
// to release resources.
func (s *Server) newConnExecutorWithTxn(
ctx context.Context,
sd *sessiondata.SessionData,
sdMutator *sessionDataMutator,
stmtBuf *StmtBuf,
clientComm ClientComm,
parentMon *mon.BytesMonitor,
memMetrics MemoryMetrics,
srvMetrics *Metrics,
txn *kv.Txn,
tcModifier tableCollectionModifier,
resetOpt sdResetOption,
) (*connExecutor, error) {
ex, err := s.newConnExecutor(
ctx, sd, sdMutator, stmtBuf, clientComm, memMetrics, srvMetrics, resetOpt)
if err != nil {
return nil, err
}
// The new transaction stuff below requires active monitors and traces, so
// we need to activate the executor now.
ex.activate(ctx, parentMon, mon.BoundAccount{})
// Perform some surgery on the executor - replace its state machine and
// initialize the state.
ex.machine = fsm.MakeMachine(
BoundTxnStateTransitions,
stateOpen{ImplicitTxn: fsm.False},
&ex.state,
)
ex.state.resetForNewSQLTxn(
ctx,
explicitTxn,
txn.ReadTimestamp().GoTime(),
nil, /* historicalTimestamp */
txn.UserPriority(),
tree.ReadWrite,
txn,
ex.transitionCtx)
// Modify the TableCollection to match the parent executor's TableCollection.
// This allows the InternalExecutor to see schema changes made by the
// parent executor.
if tcModifier != nil {
tcModifier.copyModifiedSchema(&ex.extraTxnState.tables)
}
return ex, nil
}
var sqlStatReset = settings.RegisterPublicNonNegativeDurationSettingWithMaximum(
"diagnostics.sql_stat_reset.interval",
"interval controlling how often SQL statement statistics should "+
"be reset (should be less than diagnostics.forced_sql_stat_reset.interval). It has a max value of 24H.",
time.Hour,
time.Hour*24,
)
var maxSQLStatReset = settings.RegisterPublicNonNegativeDurationSettingWithMaximum(
"diagnostics.forced_sql_stat_reset.interval",
"interval after which SQL statement statistics are refreshed even "+
"if not collected (should be more than diagnostics.sql_stat_reset.interval). It has a max value of 24H.",
time.Hour*2, // 2 x diagnostics.sql_stat_reset.interval
time.Hour*24,
)
// PeriodicallyClearSQLStats spawns a loop to reset stats based on the setting
// of a given duration settings variable.
func (s *Server) PeriodicallyClearSQLStats(
ctx context.Context, stopper *stop.Stopper, setting *settings.DurationSetting, stats *sqlStats,
) {
stopper.RunWorker(ctx, func(ctx context.Context) {
var timer timeutil.Timer
for {
s.sqlStats.Lock()
last := stats.lastReset
s.sqlStats.Unlock()
next := last.Add(setting.Get(&s.cfg.Settings.SV))
wait := next.Sub(timeutil.Now())
if wait < 0 {
stats.resetStats(ctx)
} else {
timer.Reset(wait)
select {
case <-stopper.ShouldQuiesce():
return
case <-timer.C:
timer.Read = true
}
}
}
})
}
// PeriodicallyPollForStatementInfoRequests runs a worker that periodically
// polls system.statement_diagnostics_requests.
func (s *Server) PeriodicallyPollForStatementInfoRequests(
ctx context.Context, stopper *stop.Stopper,
) {
pollingInterval := 10 * time.Second
stopper.RunWorker(ctx, func(ctx context.Context) {
ctx, _ = stopper.WithCancelOnQuiesce(ctx)
var timer timeutil.Timer
for {
if err := s.cfg.stmtInfoRequestRegistry.pollRequests(ctx); err != nil {
log.Warningf(ctx, "error polling for statement diagnostics requests: %s", err)
}
timer.Reset(pollingInterval)
select {
case <-stopper.ShouldQuiesce():
return
case <-timer.C:
timer.Read = true
}
}
})
}
type closeType int
const (
normalClose closeType = iota
panicClose
// externalTxnClose means that the connExecutor has been used within a
// higher-level txn (through the InternalExecutor).
externalTxnClose
)
func (ex *connExecutor) closeWrapper(ctx context.Context, recovered interface{}) {
if recovered != nil {
// A warning header guaranteed to go to stderr. This is unanonymized.
var cutStmt string
var stmt string
if ex.curStmt != nil {
stmt = ex.curStmt.String()
cutStmt = stmt
}
if len(cutStmt) > panicLogOutputCutoffChars {
cutStmt = cutStmt[:panicLogOutputCutoffChars] + " [...]"
}
log.Shout(ctx, log.Severity_ERROR,
fmt.Sprintf("a SQL panic has occurred while executing %q: %s", cutStmt, recovered))
ex.close(ctx, panicClose)
safeErr := AnonymizeStatementsForReporting("executing", stmt, recovered)
log.ReportPanic(ctx, &ex.server.cfg.Settings.SV, safeErr, 1 /* depth */)
// Propagate the (sanitized) panic further.
// NOTE(andrei): It used to be that we sanitized the panic and then a higher
// layer was in charge of doing the log.ReportPanic() call. Now that the
// call is above, it's unclear whether we should propagate the original
// panic or safeErr. I'm propagating safeErr to be on the safe side.
panic(safeErr)
}
// Closing is not cancelable.
closeCtx := logtags.WithTags(context.Background(), logtags.FromContext(ctx))
ex.close(closeCtx, normalClose)
}
func (ex *connExecutor) close(ctx context.Context, closeType closeType) {
ex.sessionEventf(ctx, "finishing connExecutor")
if ex.hasCreatedTemporarySchema && !ex.server.cfg.TestingKnobs.DisableTempObjectsCleanupOnSessionExit {
ie := MakeInternalExecutor(ctx, ex.server, MemoryMetrics{}, ex.server.cfg.Settings)
err := cleanupSessionTempObjects(
ctx,
ex.server.cfg.Settings,
ex.server.cfg.DB,
&ie,
ex.sessionID,
)
if err != nil {
log.Errorf(
ctx,
"error deleting temporary objects at session close, "+
"the temp tables deletion job will retry periodically: %s",
err,
)
}
}
ev := noEvent
if _, noTxn := ex.machine.CurState().(stateNoTxn); !noTxn {
ev = txnRollback
}
if closeType == normalClose {
// We'll cleanup the SQL txn by creating a non-retriable (commit:true) event.
// This event is guaranteed to be accepted in every state.
ev := eventNonRetriableErr{IsCommit: fsm.True}
payload := eventNonRetriableErrPayload{err: pgerror.Newf(pgcode.AdminShutdown,
"connExecutor closing")}
if err := ex.machine.ApplyWithPayload(ctx, ev, payload); err != nil {
log.Warningf(ctx, "error while cleaning up connExecutor: %s", err)
}
} else if closeType == externalTxnClose {
ex.state.finishExternalTxn()
}
if err := ex.resetExtraTxnState(ctx, ex.server.dbCache, ev); err != nil {
log.Warningf(ctx, "error while cleaning up connExecutor: %s", err)
}
if closeType != panicClose {
// Close all statements and prepared portals.
ex.extraTxnState.prepStmtsNamespace.resetTo(ctx, prepStmtNamespace{})
ex.extraTxnState.prepStmtsNamespaceAtTxnRewindPos.resetTo(ctx, prepStmtNamespace{})
}
if ex.sessionTracing.Enabled() {
if err := ex.sessionTracing.StopTracing(); err != nil {
log.Warningf(ctx, "error stopping tracing: %s", err)
}
}
if ex.eventLog != nil {
ex.eventLog.Finish()
ex.eventLog = nil
}
if closeType != panicClose {
ex.state.mon.Stop(ctx)
ex.sessionMon.Stop(ctx)
ex.mon.Stop(ctx)
} else {
ex.state.mon.EmergencyStop(ctx)
ex.sessionMon.EmergencyStop(ctx)
ex.mon.EmergencyStop(ctx)
}
}
type connExecutor struct {
_ util.NoCopy
// The server to which this connExecutor is attached. The reference is used
// for getting access to configuration settings.
// Note: do not use server.Metrics directly. Use metrics below instead.
server *Server
// The metrics to which the statement metrics should be accounted.
// This is different whether the executor is for regular client
// queries or for "internal" queries.
metrics *Metrics
// mon tracks memory usage for SQL activity within this session. It
// is not directly used, but rather indirectly used via sessionMon
// and state.mon. sessionMon tracks session-bound objects like prepared
// statements and result sets.
//
// The reason why state.mon and mon are split is to enable
// separate reporting of statistics per transaction and per
// session. This is because the "interesting" behavior w.r.t memory
// is typically caused by transactions, not sessions. The reason why
// sessionMon and mon are split is to enable separate reporting of
// statistics for result sets (which escape transactions).
mon *mon.BytesMonitor
sessionMon *mon.BytesMonitor
// memMetrics contains the metrics that statements executed on this connection
// will contribute to.
memMetrics MemoryMetrics
// The buffer with incoming statements to execute.
stmtBuf *StmtBuf
// The interface for communicating statement results to the client.
clientComm ClientComm
// Finity "the machine" Automaton is the state machine controlling the state
// below.
machine fsm.Machine
// state encapsulates fields related to the ongoing SQL txn. It is mutated as
// the machine's ExtendedState.
state txnState
transitionCtx transitionCtx
sessionTracing SessionTracing
// eventLog for SQL statements and other important session events. Will be set
// if traceSessionEventLogEnabled; it is used by ex.sessionEventf()
eventLog trace.EventLog
// extraTxnState groups fields scoped to a SQL txn that are not handled by
// ex.state, above. The rule of thumb is that, if the state influences state
// transitions, it should live in state, otherwise it can live here.
// This is only used in the Open state. extraTxnState is reset whenever a
// transaction finishes or gets retried.
extraTxnState struct {
// tables collects descriptors used by the current transaction.
tables TableCollection
// jobs accumulates jobs staged for execution inside the transaction.
// Staging happens when executing statements that are implemented with a
// job. The jobs are staged via the function QueueJob in
// pkg/sql/planner.go. The staged jobs are executed once the transaction
// that staged them commits.
jobs jobsCollection
// autoRetryCounter keeps track of the which iteration of a transaction
// auto-retry we're currently in. It's 0 whenever the transaction state is not
// stateOpen.
autoRetryCounter int
// numDDL keeps track of how many DDL statements have been
// executed so far.
numDDL int
// txnRewindPos is the position within stmtBuf to which we'll rewind when
// performing automatic retries. This is more or less the position where the
// current transaction started.
// This field is only defined while in stateOpen.
//
// Set via setTxnRewindPos().
txnRewindPos CmdPos
// prepStmtNamespace contains the prepared statements and portals that the
// session currently has access to.
// Portals are bound to a transaction and they're all destroyed once the
// transaction finishes.
// Prepared statements are not transactional and so it's a bit weird that
// they're part of extraTxnState, but it's convenient to put them here
// because they need the same kind of "snapshoting" as the portals (see
// prepStmtsNamespaceAtTxnRewindPos).
prepStmtsNamespace prepStmtNamespace
// prepStmtsNamespaceAtTxnRewindPos is a snapshot of the prep stmts/portals
// (ex.prepStmtsNamespace) before processing the command at position
// txnRewindPos.
// Here's the deal: prepared statements are not transactional, but they do
// need to interact properly with automatic retries (i.e. rewinding the
// command buffer). When doing a rewind, we need to be able to restore the
// prep stmts as they were. We do this by taking a snapshot every time
// txnRewindPos is advanced. Prepared statements are shared between the two
// collections, but these collections are periodically reconciled.
prepStmtsNamespaceAtTxnRewindPos prepStmtNamespace
// onTxnFinish (if non-nil) will be called when txn is finished (either
// committed or aborted). It is set when txn is started but can remain
// unset when txn is executed within another higher-level txn.
onTxnFinish func(txnEvent)
// savepoints maintains the stack of savepoints currently open.
savepoints savepointStack
// savepointsAtTxnRewindPos is a snapshot of the savepoints stack before
// processing the command at position txnRewindPos. When rewinding, we're
// going to restore this snapshot.
savepointsAtTxnRewindPos savepointStack
}
// sessionData contains the user-configurable connection variables.
sessionData *sessiondata.SessionData
// dataMutator is nil for session-bound internal executors; we shouldn't issue
// statements that manipulate session state to an internal executor.
dataMutator *sessionDataMutator
// appStats tracks per-application SQL usage statistics. It is maintained to
// represent statistrics for the application currently identified by
// sessiondata.ApplicationName.
appStats *appStats
// applicationName is the same as sessionData.ApplicationName. It's copied
// here as an atomic so that it can be read concurrently by serialize().
applicationName atomic.Value
// ctxHolder contains the connection's context in which all command executed
// on the connection are running. This generally should not be used directly,
// but through the Ctx() method; if we're inside a transaction, Ctx() is going
// to return a derived context. See the Context Management comments at the top
// of the file.
ctxHolder ctxHolder
// onCancelSession is called when the SessionRegistry is cancels this session.
// For pgwire connections, this is hooked up to canceling the connection's
// context.
// If nil, canceling this session will be a no-op.
onCancelSession context.CancelFunc
// planner is the "default planner" on a session, to save planner allocations
// during serial execution. Since planners are not threadsafe, this is only
// safe to use when a statement is not being parallelized. It must be reset
// before using.
planner planner
// phaseTimes tracks session- and transaction-level phase times. It is
// copied-by-value when resetting statsCollector before executing each
// statement.
phaseTimes phaseTimes
// statsCollector is used to collect statistics about SQL statements and
// transactions.
statsCollector *sqlStatsCollector
// mu contains of all elements of the struct that can be changed
// after initialization, and may be accessed from another thread.
mu struct {
syncutil.RWMutex
// ActiveQueries contains all queries in flight.
ActiveQueries map[ClusterWideID]*queryMeta
// LastActiveQuery contains a reference to the AST of the last
// query that ran on this session.
LastActiveQuery tree.Statement
}
// curStmt is the statement that's currently being prepared or executed, if
// any. This is printed by high-level panic recovery.
curStmt tree.Statement
sessionID ClusterWideID
// activated determines whether activate() was called already.
// When this is set, close() must be called to release resources.
activated bool
// draining is set if we've received a DrainRequest. Once this is set, we're
// going to find a suitable time to close the connection.
draining bool
// executorType is set to whether this executor is an ordinary executor which
// responds to user queries or an internal one.
executorType executorType
// hasCreatedTemporarySchema is set if the executor has created a
// temporary schema, which requires special cleanup on close.
hasCreatedTemporarySchema bool
// stmtInfoRequestRegistry is used to track which queries need to have
// information collected.
stmtInfoRegistry *stmtDiagnosticsRequestRegistry
}
// ctxHolder contains a connection's context and, while session tracing is
// enabled, a derived context with a recording span. The connExecutor should use
// the latter while session tracing is active, or the former otherwise; that's
// what the ctx() method returns.
type ctxHolder struct {
connCtx context.Context
sessionTracingCtx context.Context
}
func (ch *ctxHolder) ctx() context.Context {
if ch.sessionTracingCtx != nil {
return ch.sessionTracingCtx
}
return ch.connCtx
}
func (ch *ctxHolder) hijack(sessionTracingCtx context.Context) {
if ch.sessionTracingCtx != nil {
panic("hijack already in effect")
}
ch.sessionTracingCtx = sessionTracingCtx
}
func (ch *ctxHolder) unhijack() {
if ch.sessionTracingCtx == nil {
panic("hijack not in effect")
}
ch.sessionTracingCtx = nil
}
type prepStmtNamespace struct {
// prepStmts contains the prepared statements currently available on the
// session.
prepStmts map[string]*PreparedStatement
// portals contains the portals currently available on the session.
portals map[string]*PreparedPortal
}
func (ns prepStmtNamespace) String() string {
var sb strings.Builder
sb.WriteString("Prep stmts: ")
for name := range ns.prepStmts {
sb.WriteString(name + " ")
}
sb.WriteString("Portals: ")
for name := range ns.portals {
sb.WriteString(name + " ")
}
return sb.String()
}
// resetTo resets a namespace to equate another one (`to`). All the receiver's
// references are release and all the to's references are duplicated.
//
// An empty `to` can be passed in to deallocate everything.
func (ns *prepStmtNamespace) resetTo(ctx context.Context, to prepStmtNamespace) {
for name, p := range ns.prepStmts {
p.decRef(ctx)
delete(ns.prepStmts, name)
}
for name, p := range ns.portals {
p.decRef(ctx)
delete(ns.portals, name)
}
for name, ps := range to.prepStmts {
ps.incRef(ctx)
ns.prepStmts[name] = ps
}
for name, p := range to.portals {
p.incRef(ctx)
ns.portals[name] = p
}
}
// resetExtraTxnState resets the fields of ex.extraTxnState when a transaction
// commits, rolls back or restarts.
func (ex *connExecutor) resetExtraTxnState(
ctx context.Context, dbCacheHolder *databaseCacheHolder, ev txnEvent,
) error {
ex.extraTxnState.jobs = nil
ex.extraTxnState.tables.releaseTables(ctx)
ex.extraTxnState.tables.databaseCache = dbCacheHolder.getDatabaseCache()
// Close all portals.
for name, p := range ex.extraTxnState.prepStmtsNamespace.portals {
p.decRef(ctx)
delete(ex.extraTxnState.prepStmtsNamespace.portals, name)
}
switch ev {
case txnCommit, txnRollback:
ex.extraTxnState.savepoints.clear()
// After txn is finished, we need to call onTxnFinish (if it's non-nil).
if ex.extraTxnState.onTxnFinish != nil {
ex.extraTxnState.onTxnFinish(ev)
ex.extraTxnState.onTxnFinish = nil
}
}
// NOTE: on txnRestart we don't need to muck with the savepoints stack. It's either a
// a ROLLBACK TO SAVEPOINT that generated the event, and that statement deals with the
// savepoints, or it's a rewind which also deals with them.
return nil
}
// Ctx returns the transaction's ctx, if we're inside a transaction, or the
// session's context otherwise.
func (ex *connExecutor) Ctx() context.Context {
if _, ok := ex.machine.CurState().(stateNoTxn); ok {
return ex.ctxHolder.ctx()
}
// stateInternalError is used by the InternalExecutor.
if _, ok := ex.machine.CurState().(stateInternalError); ok {
return ex.ctxHolder.ctx()
}
return ex.state.Ctx
}
// activate engages the use of resources that must be cleaned up
// afterwards. after activate() completes, the close() method must be
// called.
//
// Args:
// parentMon: The root monitor.
// reserved: Memory reserved for the connection. The connExecutor takes
// ownership of this memory.
func (ex *connExecutor) activate(
ctx context.Context, parentMon *mon.BytesMonitor, reserved mon.BoundAccount,
) {
// Note: we pass `reserved` to sessionRootMon where it causes it to act as a
// buffer. This is not done for sessionMon nor state.mon: these monitors don't
// start with any buffer, so they'll need to ask their "parent" for memory as
// soon as the first allocation. This is acceptable because the session is
// single threaded, and the point of buffering is just to avoid contention.
ex.mon.Start(ctx, parentMon, reserved)
ex.sessionMon.Start(ctx, ex.mon, mon.BoundAccount{})
// Enable the trace if configured.
if traceSessionEventLogEnabled.Get(&ex.server.cfg.Settings.SV) {
remoteStr := "<admin>"
if ex.sessionData.RemoteAddr != nil {
remoteStr = ex.sessionData.RemoteAddr.String()
}
ex.eventLog = trace.NewEventLog(
fmt.Sprintf("sql session [%s]", ex.sessionData.User), remoteStr)
}
ex.activated = true
}
// run implements the run loop for a connExecutor. Commands are read one by one
// from the input buffer; they are executed and the resulting state transitions
// are performed.
//
// run returns when either the stmtBuf is closed by someone else or when an
// error is propagated from query execution. Note that query errors are not
// propagated as errors to this layer; only things that are supposed to
// terminate the session are (e.g. client communication errors and ctx
// cancelations).
// run() is expected to react on ctx cancelation, but the caller needs to also
// close the stmtBuf at the same time as canceling the ctx. If cancelation
// happens in the middle of a query execution, that's expected to interrupt the
// execution and generate an error. run() is then supposed to return because the
// buffer is closed and no further commands can be read.
//
// When this returns, ex.close() needs to be called and the connection to the
// client needs to be terminated. If it returns with an error, that error may
// represent a communication error (in which case the connection might already
// also have an error from the reading side), or some other unexpected failure.
// Returned errors have not been communicated to the client: it's up to the
// caller to do that if it wants.
//
// If not nil, reserved represents Memory reserved for the connection. The
// connExecutor takes ownership of this memory.
//
// onCancel, if not nil, will be called when the SessionRegistry cancels the
// session. TODO(andrei): This is hooked up to canceling the pgwire connection's
// context (of which ctx is also a child). It seems uncouth for the connExecutor
// to cancel a higher-level task. A better design would probably be for pgwire
// to own the SessionRegistry, instead of it being owned by the sql.Server -
// then pgwire would directly cancel its own tasks; the sessions also more
// naturally belong there. There is a problem, however, as query cancelation (as
// opposed to session cancelation) is done through the SessionRegistry and that
// does belong with the connExecutor. Introducing a query registry, separate
// from the session registry, might be too costly - the way query cancelation
// works is that every session is asked to cancel a given query until the right
// one is found. That seems like a good performance trade-off.
func (ex *connExecutor) run(
ctx context.Context,
parentMon *mon.BytesMonitor,
reserved mon.BoundAccount,
onCancel context.CancelFunc,
) error {
if !ex.activated {
ex.activate(ctx, parentMon, reserved)
}
ex.ctxHolder.connCtx = ctx
ex.onCancelSession = onCancel
ex.sessionID = ex.generateID()
ex.server.cfg.SessionRegistry.register(ex.sessionID, ex)
ex.planner.extendedEvalCtx.setSessionID(ex.sessionID)
defer ex.server.cfg.SessionRegistry.deregister(ex.sessionID)
for {
ex.curStmt = nil
if err := ctx.Err(); err != nil {
return err
}
var err error
if err = ex.execCmd(ex.Ctx()); err != nil {
if err == io.EOF || err == errDrainingComplete {
return nil
}
return err
}
}
}
// errDrainingComplete is returned by execCmd when the connExecutor previously got
// a DrainRequest and the time is ripe to finish this session (i.e. we're no
// longer in a transaction).
var errDrainingComplete = fmt.Errorf("draining done. this is a good time to finish this session")
// execCmd reads the current command from the stmtBuf and executes it. The
// transaction state is modified accordingly, and the stmtBuf is advanced or
// rewinded accordingly.
//
// Returns an error if communication of results to the client has failed and the
// session should be terminated. Returns io.EOF if the stmtBuf has been closed.
// Returns drainingComplete if the session should finish because draining is
// complete (i.e. we received a DrainRequest - possibly previously - and the
// connection is found to be idle).
func (ex *connExecutor) execCmd(ctx context.Context) error {
cmd, pos, err := ex.stmtBuf.CurCmd()
if err != nil {
return err // err could be io.EOF
}
ctx, sp := tracing.EnsureChildSpan(
ctx, ex.server.cfg.AmbientCtx.Tracer,
// We print the type of command, not the String() which includes long
// statements.
cmd.command())
defer sp.Finish()
if log.ExpensiveLogEnabled(ctx, 2) || ex.eventLog != nil {
ex.sessionEventf(ctx, "[%s pos:%d] executing %s",
ex.machine.CurState(), pos, cmd)
}
var ev fsm.Event
var payload fsm.EventPayload
var res ResultBase
switch tcmd := cmd.(type) {
case ExecStmt:
if tcmd.AST == nil {
res = ex.clientComm.CreateEmptyQueryResult(pos)
break
}
ex.curStmt = tcmd.AST
stmtRes := ex.clientComm.CreateStatementResult(
tcmd.AST,
NeedRowDesc,
pos,
nil, /* formatCodes */
ex.sessionData.DataConversion,
0, /* limit */
"", /* portalName */
ex.implicitTxn(),
)
res = stmtRes
curStmt := Statement{Statement: tcmd.Statement}
ex.phaseTimes[sessionQueryReceived] = tcmd.TimeReceived
ex.phaseTimes[sessionStartParse] = tcmd.ParseStart
ex.phaseTimes[sessionEndParse] = tcmd.ParseEnd
stmtCtx := withStatement(ctx, ex.curStmt)
ev, payload, err = ex.execStmt(stmtCtx, curStmt, stmtRes, nil /* pinfo */)
if err != nil {
return err
}
case ExecPortal:
// ExecPortal is handled like ExecStmt, except that the placeholder info
// is taken from the portal.
portal, ok := ex.extraTxnState.prepStmtsNamespace.portals[tcmd.Name]
if !ok {
err := pgerror.Newf(
pgcode.InvalidCursorName, "unknown portal %q", tcmd.Name)
ev = eventNonRetriableErr{IsCommit: fsm.False}
payload = eventNonRetriableErrPayload{err: err}
res = ex.clientComm.CreateErrorResult(pos)
break
}
if portal.Stmt.AST == nil {
res = ex.clientComm.CreateEmptyQueryResult(pos)
break
}
if log.ExpensiveLogEnabled(ctx, 2) {
log.VEventf(ctx, 2, "portal resolved to: %s", portal.Stmt.AST.String())
}
ex.curStmt = portal.Stmt.AST
pinfo := &tree.PlaceholderInfo{
PlaceholderTypesInfo: tree.PlaceholderTypesInfo{
TypeHints: portal.Stmt.TypeHints,
Types: portal.Stmt.Types,
},
Values: portal.Qargs,
}
ex.phaseTimes[sessionQueryReceived] = tcmd.TimeReceived
// When parsing has been done earlier, via a separate parse
// message, it is not any more part of the statistics collected
// for this execution. In that case, we simply report that
// parsing took no time.
ex.phaseTimes[sessionStartParse] = time.Time{}
ex.phaseTimes[sessionEndParse] = time.Time{}
stmtRes := ex.clientComm.CreateStatementResult(
portal.Stmt.AST,
// The client is using the extended protocol, so no row description is
// needed.
DontNeedRowDesc,
pos, portal.OutFormats,
ex.sessionData.DataConversion,
tcmd.Limit,
tcmd.Name,
ex.implicitTxn(),
)
res = stmtRes
curStmt := Statement{
Statement: portal.Stmt.Statement,
Prepared: portal.Stmt,
ExpectedTypes: portal.Stmt.Columns,
AnonymizedStr: portal.Stmt.AnonymizedStr,
}
stmtCtx := withStatement(ctx, ex.curStmt)
ev, payload, err = ex.execStmt(stmtCtx, curStmt, stmtRes, pinfo)
if err != nil {
return err
}
case PrepareStmt:
ex.curStmt = tcmd.AST
res = ex.clientComm.CreatePrepareResult(pos)
stmtCtx := withStatement(ctx, ex.curStmt)
ev, payload = ex.execPrepare(stmtCtx, tcmd)
case DescribeStmt:
descRes := ex.clientComm.CreateDescribeResult(pos)
res = descRes
ev, payload = ex.execDescribe(ctx, tcmd, descRes)
case BindStmt:
res = ex.clientComm.CreateBindResult(pos)
ev, payload = ex.execBind(ctx, tcmd)
case DeletePreparedStmt:
res = ex.clientComm.CreateDeleteResult(pos)
ev, payload = ex.execDelPrepStmt(ctx, tcmd)
case SendError:
res = ex.clientComm.CreateErrorResult(pos)
ev = eventNonRetriableErr{IsCommit: fsm.False}
payload = eventNonRetriableErrPayload{err: tcmd.Err}
case Sync:
// Note that the Sync result will flush results to the network connection.
res = ex.clientComm.CreateSyncResult(pos)
if ex.draining {
// If we're draining, check whether this is a good time to finish the
// connection. If we're not inside a transaction, we stop processing
// now. If we are inside a transaction, we'll check again the next time
// a Sync is processed.
if ex.idleConn() {
// If we're about to close the connection, close res in order to flush
// now, as we won't have an opportunity to do it later.
res.Close(ctx, stateToTxnStatusIndicator(ex.machine.CurState()))
return errDrainingComplete
}
}
case CopyIn:
res = ex.clientComm.CreateCopyInResult(pos)
var err error
ev, payload, err = ex.execCopyIn(ctx, tcmd)
if err != nil {
return err
}
case DrainRequest:
// We received a drain request. We terminate immediately if we're not in a
// transaction. If we are in a transaction, we'll finish as soon as a Sync
// command (i.e. the end of a batch) is processed outside of a
// transaction.
ex.draining = true
res = ex.clientComm.CreateDrainResult(pos)
if ex.idleConn() {
return errDrainingComplete
}
case Flush:
// Closing the res will flush the connection's buffer.
res = ex.clientComm.CreateFlushResult(pos)
default:
panic(fmt.Sprintf("unsupported command type: %T", cmd))
}
var advInfo advanceInfo
// If an event was generated, feed it to the state machine.
if ev != nil {
var err error
advInfo, err = ex.txnStateTransitionsApplyWrapper(ev, payload, res, pos)
if err != nil {
return err
}
} else {
// If no event was generated synthesize an advance code.
advInfo = advanceInfo{
code: advanceOne,
}
}
// Decide if we need to close the result or not. We don't need to do it if
// we're staying in place or rewinding - the statement will be executed
// again.
if advInfo.code != stayInPlace && advInfo.code != rewind {
// Close the result. In case of an execution error, the result might have
// its error set already or it might not.
resErr := res.Err()
pe, ok := payload.(payloadWithError)
if ok {
ex.sessionEventf(ctx, "execution error: %s", pe.errorCause())
if resErr == nil {
res.SetError(pe.errorCause())
}
}
res.Close(ctx, stateToTxnStatusIndicator(ex.machine.CurState()))
} else {
res.Discard()
}
// Move the cursor according to what the state transition told us to do.
switch advInfo.code {
case advanceOne:
ex.stmtBuf.AdvanceOne()
case skipBatch:
// We'll flush whatever results we have to the network. The last one must
// be an error. This flush may seem unnecessary, as we generally only
// flush when the client requests it through a Sync or a Flush but without
// it the Node.js driver isn't happy. That driver likes to send "flush"
// command and only sends Syncs once it received some data. But we ignore
// flush commands (just like we ignore any other commands) when skipping
// to the next batch.
if err := ex.clientComm.Flush(pos); err != nil {
return err
}
if err := ex.stmtBuf.seekToNextBatch(); err != nil {
return err
}
case rewind:
ex.rewindPrepStmtNamespace(ctx)
ex.extraTxnState.savepoints = ex.extraTxnState.savepointsAtTxnRewindPos
advInfo.rewCap.rewindAndUnlock(ctx)
case stayInPlace:
// Nothing to do. The same statement will be executed again.
default:
panic(fmt.Sprintf("unexpected advance code: %s", advInfo.code))
}
return ex.updateTxnRewindPosMaybe(ctx, cmd, pos, advInfo)
}
func (ex *connExecutor) idleConn() bool {
switch ex.machine.CurState().(type) {
case stateNoTxn:
return true
case stateInternalError:
return true
default:
return false
}
}
// updateTxnRewindPosMaybe checks whether the ex.extraTxnState.txnRewindPos
// should be advanced, based on the advInfo produced by running cmd at position
// pos.
func (ex *connExecutor) updateTxnRewindPosMaybe(
ctx context.Context, cmd Command, pos CmdPos, advInfo advanceInfo,
) error {
// txnRewindPos is only maintained while in stateOpen.
if _, ok := ex.machine.CurState().(stateOpen); !ok {
return nil
}
if advInfo.txnEvent == txnStart || advInfo.txnEvent == txnRestart {
var nextPos CmdPos
switch advInfo.code {
case stayInPlace:
nextPos = pos
case advanceOne:
// Future rewinds will refer to the next position; the statement that
// started the transaction (i.e. BEGIN) will not be itself be executed
// again.
nextPos = pos + 1
case rewind:
if advInfo.rewCap.rewindPos != ex.extraTxnState.txnRewindPos {
return errors.AssertionFailedf(
"unexpected rewind position: %d when txn start is: %d",
errors.Safe(advInfo.rewCap.rewindPos),
errors.Safe(ex.extraTxnState.txnRewindPos))
}
// txnRewindPos stays unchanged.
return nil
default:
return errors.AssertionFailedf(
"unexpected advance code when starting a txn: %s",
errors.Safe(advInfo.code))
}
ex.setTxnRewindPos(ctx, nextPos)
} else {
// See if we can advance the rewind point even if this is not the point
// where the transaction started. We can do that after running a special
// statement (e.g. SET TRANSACTION or SAVEPOINT) or after most commands that
// don't execute statements.
// The idea is that, for example, we don't want the following sequence to
// disable retries for what comes after the sequence:
// 1: PrepareStmt BEGIN
// 2: BindStmt
// 3: ExecutePortal
// 4: Sync
// Note that the current command cannot influence the rewind point if
// if the rewind point is not current set to the command's position
// (i.e. we don't do anything if txnRewindPos != pos).
if advInfo.code != advanceOne {
panic(fmt.Sprintf("unexpected advanceCode: %s", advInfo.code))
}
var canAdvance bool
_, inOpen := ex.machine.CurState().(stateOpen)
if inOpen && (ex.extraTxnState.txnRewindPos == pos) {
switch tcmd := cmd.(type) {
case ExecStmt:
canAdvance = ex.stmtDoesntNeedRetry(tcmd.AST)
case ExecPortal:
portal := ex.extraTxnState.prepStmtsNamespace.portals[tcmd.Name]
canAdvance = ex.stmtDoesntNeedRetry(portal.Stmt.AST)
case PrepareStmt:
canAdvance = true
case DescribeStmt:
canAdvance = true
case BindStmt:
canAdvance = true
case DeletePreparedStmt:
canAdvance = true
case SendError:
canAdvance = true
case Sync:
canAdvance = true
case CopyIn:
// Can't advance.
case DrainRequest:
canAdvance = true
case Flush:
canAdvance = true
default:
panic(fmt.Sprintf("unsupported cmd: %T", cmd))
}
if canAdvance {
ex.setTxnRewindPos(ctx, pos+1)
}
}
}
return nil
}
// setTxnRewindPos updates the position to which future rewinds will refer.
//
// All statements with lower position in stmtBuf (if any) are removed, as we
// won't ever need them again.
func (ex *connExecutor) setTxnRewindPos(ctx context.Context, pos CmdPos) {
if pos <= ex.extraTxnState.txnRewindPos {
panic(fmt.Sprintf("can only move the txnRewindPos forward. "+
"Was: %d; new value: %d", ex.extraTxnState.txnRewindPos, pos))
}
ex.extraTxnState.txnRewindPos = pos
ex.stmtBuf.ltrim(ctx, pos)
ex.commitPrepStmtNamespace(ctx)
ex.extraTxnState.savepointsAtTxnRewindPos = ex.extraTxnState.savepoints.clone()
}
// stmtDoesntNeedRetry returns true if the given statement does not need to be
// retried when performing automatic retries. This means that the results of the
// statement do not change with retries.
func (ex *connExecutor) stmtDoesntNeedRetry(stmt tree.Statement) bool {
wrap := Statement{Statement: parser.Statement{AST: stmt}}
return isSavepoint(wrap) || isSetTransaction(wrap)
}
func
|
(s fsm.State) TransactionStatusIndicator {
switch s.(type) {
case stateOpen:
return InTxnBlock
case stateAborted:
return InFailedTxnBlock
case stateNoTxn:
return IdleTxnBlock
case stateCommitWait:
return InTxnBlock
case stateInternalError:
return InTxnBlock
default:
panic(fmt.Sprintf("unknown state: %T", s))
}
}
// We handle the CopyFrom statement by creating a copyMachine and handing it
// control over the connection until the copying is done. The contract is that,
// when this is called, the pgwire.conn is not reading from the network
// connection any more until this returns. The copyMachine will to the reading
// and writing up to the CommandComplete message.
func (ex *connExecutor) execCopyIn(
ctx context.Context, cmd CopyIn,
) (fsm.Event, fsm.EventPayload, error) {
// When we're done, unblock the network connection.
defer cmd.CopyDone.Done()
state := ex.machine.CurState()
_, isNoTxn := state.(stateNoTxn)
_, isOpen := state.(stateOpen)
if !isNoTxn && !isOpen {
ev := eventNonRetriableErr{IsCommit: fsm.False}
payload := eventNonRetriableErrPayload{
err: sqlbase.NewTransactionAbortedError("" /* customMsg */)}
return ev, payload, nil
}
// If we're in an explicit txn, then the copying will be done within that
// txn. Otherwise, we tell the copyMachine to manage its own transactions.
var txnOpt copyTxnOpt
if isOpen {
txnOpt = copyTxnOpt{
txn: ex.state.mu.txn,
txnTimestamp: ex.state.sqlTimestamp,
stmtTimestamp: ex.server.cfg.Clock.PhysicalTime(),
}
}
var monToStop *mon.BytesMonitor
defer func() {
if monToStop != nil {
monToStop.Stop(ctx)
}
}()
if isNoTxn {
// HACK: We're reaching inside ex.state and starting the monitor. Normally
// that's driven by the state machine, but we're bypassing the state machine
// here.
ex.state.mon.Start(ctx, ex.sessionMon, mon.BoundAccount{} /* reserved */)
monToStop = ex.state.mon
}
var cm copyMachineInterface
var err error
resetPlanner := func(p *planner, txn *kv.Txn, txnTS time.Time, stmtTS time.Time) {
// HACK: We're reaching inside ex.state and changing sqlTimestamp by hand.
// It is used by resetPlanner. Normally sqlTimestamp is updated by the
// state machine, but the copyMachine manages its own transactions without
// going through the state machine.
ex.state.sqlTimestamp = txnTS
ex.statsCollector = ex.newStatsCollector()
ex.statsCollector.reset(&ex.server.sqlStats, ex.appStats, &ex.phaseTimes)
ex.initPlanner(ctx, p)
ex.resetPlanner(ctx, p, txn, stmtTS, 0 /* numAnnotations */)
}
if table := cmd.Stmt.Table; table.Table() == fileUploadTable && table.Schema() == crdbInternalName {
cm, err = newFileUploadMachine(cmd.Conn, cmd.Stmt, ex.server.cfg, resetPlanner)
} else {
cm, err = newCopyMachine(
ctx, cmd.Conn, cmd.Stmt, txnOpt, ex.server.cfg, resetPlanner,
// execInsertPlan
func(ctx context.Context, p *planner, res RestrictedCommandResult) error {
_, _, err := ex.execWithDistSQLEngine(ctx, p, tree.RowsAffected, res, false /* distribute */, nil /* progressAtomic */)
return err
},
)
}
if err != nil {
ev := eventNonRetriableErr{IsCommit: fsm.False}
payload := eventNonRetriableErrPayload{err: err}
return ev, payload, nil
}
if err := cm.run(ctx); err != nil {
// TODO(andrei): We don't have a retriable error story for the copy machine.
// When running outside of a txn, the copyMachine should probably do retries
// internally. When not, it's unclear what we should do. For now, we abort
// the txn (if any).
// We also don't have a story for distinguishing communication errors (which
// should terminate the connection) from query errors. For now, we treat all
// errors as query errors.
ev := eventNonRetriableErr{IsCommit: fsm.False}
payload := eventNonRetriableErrPayload{err: err}
return ev, payload, nil
}
return nil, nil, nil
}
// stmtHasNoData returns true if describing a result of the input statement
// type should return NoData.
func stmtHasNoData(stmt tree.Statement) bool {
return stmt == nil || stmt.StatementType() != tree.Rows
}
// generateID generates a unique ID based on the node's ID and its current HLC
// timestamp. These IDs are either scoped at the query level or at the session
// level.
func (ex *connExecutor) generateID() ClusterWideID {
return GenerateClusterWideID(ex.server.cfg.Clock.Now(), ex.server.cfg.NodeID.Get())
}
// commitPrepStmtNamespace deallocates everything in
// prepStmtsNamespaceAtTxnRewindPos that's not part of prepStmtsNamespace.
func (ex *connExecutor) commitPrepStmtNamespace(ctx context.Context) {
ex.extraTxnState.prepStmtsNamespaceAtTxnRewindPos.resetTo(
ctx, ex.extraTxnState.prepStmtsNamespace)
}
// commitPrepStmtNamespace deallocates everything in prepStmtsNamespace that's
// not part of prepStmtsNamespaceAtTxnRewindPos.
func (ex *connExecutor) rewindPrepStmtNamespace(ctx context.Context) {
ex.extraTxnState.prepStmtsNamespace.resetTo(
ctx, ex.extraTxnState.prepStmtsNamespaceAtTxnRewindPos)
}
// getRewindTxnCapability checks whether rewinding to the position previously
// set through setTxnRewindPos() is possible and, if it is, returns a
// rewindCapability bound to that position. The returned bool is true if the
// rewind is possible. If it is, client communication is blocked until the
// rewindCapability is exercised.
func (ex *connExecutor) getRewindTxnCapability() (rewindCapability, bool) {
cl := ex.clientComm.LockCommunication()
// If we already delivered results at or past the start position, we can't
// rewind.
if cl.ClientPos() >= ex.extraTxnState.txnRewindPos {
cl.Close()
return rewindCapability{}, false
}
return rewindCapability{
cl: cl,
buf: ex.stmtBuf,
rewindPos: ex.extraTxnState.txnRewindPos,
}, true
}
// isCommit returns true if stmt is a "COMMIT" statement.
func isCommit(stmt tree.Statement) bool {
_, ok := stmt.(*tree.CommitTransaction)
return ok
}
func errIsRetriable(err error) bool {
err = errors.UnwrapAll(err)
_, retriable := err.(*roachpb.TransactionRetryWithProtoRefreshError)
return retriable
}
// makeErrEvent takes an error and returns either an eventRetriableErr or an
// eventNonRetriableErr, depending on the error type.
func (ex *connExecutor) makeErrEvent(err error, stmt tree.Statement) (fsm.Event, fsm.EventPayload) {
retriable := errIsRetriable(err)
if retriable {
rc, canAutoRetry := ex.getRewindTxnCapability()
ev := eventRetriableErr{
IsCommit: fsm.FromBool(isCommit(stmt)),
CanAutoRetry: fsm.FromBool(canAutoRetry),
}
payload := eventRetriableErrPayload{
err: err,
rewCap: rc,
}
return ev, payload
}
ev := eventNonRetriableErr{
IsCommit: fsm.FromBool(isCommit(stmt)),
}
payload := eventNonRetriableErrPayload{err: err}
return ev, payload
}
// setTransactionModes implements the txnModesSetter interface.
func (ex *connExecutor) setTransactionModes(
modes tree.TransactionModes, asOfTs hlc.Timestamp,
) error {
// This method cheats and manipulates ex.state directly, not through an event.
// The alternative would be to create a special event, but it's unclear how
// that'd work given that this method is called while executing a statement.
// Transform the transaction options into the types needed by the state
// machine.
if modes.UserPriority != tree.UnspecifiedUserPriority {
pri, err := priorityToProto(modes.UserPriority)
if err != nil {
return err
}
if err := ex.state.setPriority(pri); err != nil {
return err
}
}
if modes.Isolation != tree.UnspecifiedIsolation && modes.Isolation != tree.SerializableIsolation {
return errors.AssertionFailedf(
"unknown isolation level: %s", errors.Safe(modes.Isolation))
}
rwMode := modes.ReadWriteMode
if modes.AsOf.Expr != nil && (asOfTs == hlc.Timestamp{}) {
return errors.AssertionFailedf("expected an evaluated AS OF timestamp")
}
if (asOfTs != hlc.Timestamp{}) {
ex.state.setHistoricalTimestamp(ex.Ctx(), asOfTs)
ex.state.sqlTimestamp = asOfTs.GoTime()
if rwMode == tree.UnspecifiedReadWriteMode {
rwMode = tree.ReadOnly
}
}
return ex.state.setReadOnlyMode(rwMode)
}
func priorityToProto(mode tree.UserPriority) (roachpb.UserPriority, error) {
var pri roachpb.UserPriority
switch mode {
case tree.UnspecifiedUserPriority:
pri = roachpb.NormalUserPriority
case tree.Low:
pri = roachpb.MinUserPriority
case tree.Normal:
pri = roachpb.NormalUserPriority
case tree.High:
pri = roachpb.MaxUserPriority
default:
return roachpb.UserPriority(0), errors.AssertionFailedf("unknown user priority: %s", errors.Safe(mode))
}
return pri, nil
}
func (ex *connExecutor) readWriteModeWithSessionDefault(
mode tree.ReadWriteMode,
) tree.ReadWriteMode {
if mode == tree.UnspecifiedReadWriteMode {
if ex.sessionData.DefaultReadOnly {
return tree.ReadOnly
}
return tree.ReadWrite
}
return mode
}
// initEvalCtx initializes the fields of an extendedEvalContext that stay the
// same across multiple statements. resetEvalCtx must also be called before each
// statement, to reinitialize other fields.
func (ex *connExecutor) initEvalCtx(ctx context.Context, evalCtx *extendedEvalContext, p *planner) {
scInterface := newSchemaInterface(&ex.extraTxnState.tables, ex.server.cfg.VirtualSchemas)
ie := MakeInternalExecutor(
ctx,
ex.server,
ex.memMetrics,
ex.server.cfg.Settings,
)
ie.SetSessionData(ex.sessionData)
*evalCtx = extendedEvalContext{
EvalContext: tree.EvalContext{
Planner: p,
Sequence: p,
SessionData: ex.sessionData,
SessionAccessor: p,
PrivilegedAccessor: p,
Settings: ex.server.cfg.Settings,
TestingKnobs: ex.server.cfg.EvalContextTestingKnobs,
ClusterID: ex.server.cfg.ClusterID(),
ClusterName: ex.server.cfg.RPCContext.ClusterName(),
NodeID: ex.server.cfg.NodeID.Get(),
Locality: ex.server.cfg.Locality,
ReCache: ex.server.reCache,
InternalExecutor: &ie,
DB: ex.server.cfg.DB,
},
SessionMutator: ex.dataMutator,
VirtualSchemas: ex.server.cfg.VirtualSchemas,
Tracing: &ex.sessionTracing,
StatusServer: ex.server.cfg.StatusServer,
MemMetrics: &ex.memMetrics,
Tables: &ex.extraTxnState.tables,
ExecCfg: ex.server.cfg,
DistSQLPlanner: ex.server.cfg.DistSQLPlanner,
TxnModesSetter: ex,
Jobs: &ex.extraTxnState.jobs,
schemaAccessors: scInterface,
sqlStatsCollector: ex.statsCollector,
}
}
// resetEvalCtx initializes the fields of evalCtx that can change
// during a session (i.e. the fields not set by initEvalCtx).
//
// stmtTS is the timestamp that the statement_timestamp() SQL builtin will
// return for statements executed with this evalCtx. Since generally each
// statement is supposed to have a different timestamp, the evalCtx generally
// shouldn't be reused across statements.
func (ex *connExecutor) resetEvalCtx(evalCtx *extendedEvalContext, txn *kv.Txn, stmtTS time.Time) {
evalCtx.TxnState = ex.getTransactionState()
evalCtx.TxnReadOnly = ex.state.readOnly
evalCtx.TxnImplicit = ex.implicitTxn()
evalCtx.StmtTimestamp = stmtTS
evalCtx.TxnTimestamp = ex.state.sqlTimestamp
evalCtx.Placeholders = nil
evalCtx.Annotations = nil
evalCtx.IVarContainer = nil
evalCtx.Context = ex.Ctx()
evalCtx.Txn = txn
evalCtx.Mon = ex.state.mon
evalCtx.PrepareOnly = false
evalCtx.SkipNormalize = false
}
// getTransactionState retrieves a text representation of the given state.
func (ex *connExecutor) getTransactionState() string {
state := ex.machine.CurState()
if ex.implicitTxn() {
// If the statement reading the state is in an implicit transaction, then we
// want to tell NoTxn to the client.
state = stateNoTxn{}
}
return state.(fmt.Stringer).String()
}
func (ex *connExecutor) implicitTxn() bool {
state := ex.machine.CurState()
os, ok := state.(stateOpen)
return ok && os.ImplicitTxn.Get()
}
// initPlanner initializes a planner so it can can be used for planning a
// query in the context of this session.
func (ex *connExecutor) initPlanner(ctx context.Context, p *planner) {
p.cancelChecker = sqlbase.NewCancelChecker(ctx)
ex.initEvalCtx(ctx, &p.extendedEvalCtx, p)
p.sessionDataMutator = ex.dataMutator
p.noticeSender = noopNoticeSender
p.preparedStatements = ex.getPrepStmtsAccessor()
p.queryCacheSession.Init()
p.optPlanningCtx.init(p)
}
func (ex *connExecutor) resetPlanner(
ctx context.Context, p *planner, txn *kv.Txn, stmtTS time.Time, numAnnotations tree.AnnotationIdx,
) {
p.txn = txn
p.stmt = nil
p.cancelChecker.Reset(ctx)
p.semaCtx = tree.MakeSemaContext()
p.semaCtx.Location = &ex.sessionData.DataConversion.Location
p.semaCtx.SearchPath = ex.sessionData.SearchPath
p.semaCtx.AsOfTimestamp = nil
p.semaCtx.Annotations = tree.MakeAnnotations(numAnnotations)
ex.resetEvalCtx(&p.extendedEvalCtx, txn, stmtTS)
p.autoCommit = false
p.isPreparing = false
p.avoidCachedDescriptors = false
}
// txnStateTransitionsApplyWrapper is a wrapper on top of Machine built with the
// TxnStateTransitions above. Its point is to detect when we go in and out of
// transactions and update some state.
//
// Any returned error indicates an unrecoverable error for the session;
// execution on this connection should be interrupted.
func (ex *connExecutor) txnStateTransitionsApplyWrapper(
ev fsm.Event, payload fsm.EventPayload, res ResultBase, pos CmdPos,
) (advanceInfo, error) {
var implicitTxn bool
if os, ok := ex.machine.CurState().(stateOpen); ok {
implicitTxn = os.ImplicitTxn.Get()
}
err := ex.machine.ApplyWithPayload(withStatement(ex.Ctx(), ex.curStmt), ev, payload)
if err != nil {
if _, ok := err.(fsm.TransitionNotFoundError); ok {
panic(err)
}
return advanceInfo{}, err
}
advInfo := ex.state.consumeAdvanceInfo()
if advInfo.code == rewind {
ex.extraTxnState.autoRetryCounter++
}
// Handle transaction events which cause updates to txnState.
switch advInfo.txnEvent {
case noEvent:
case txnStart:
ex.extraTxnState.autoRetryCounter = 0
ex.extraTxnState.onTxnFinish = ex.recordTransactionStart()
case txnCommit:
if res.Err() != nil {
err := errorutil.UnexpectedWithIssueErrorf(
26687,
"programming error: non-error event "+
advInfo.txnEvent.String()+ //the event is included like this so that it doesn't get sanitized
" generated even though res.Err() has been set to: %s",
res.Err())
log.Error(ex.Ctx(), err)
errorutil.SendReport(ex.Ctx(), &ex.server.cfg.Settings.SV, err)
return advanceInfo{}, err
}
handleErr := func(err error) {
if implicitTxn {
// The schema change/job failed but it was also the only
// operation in the transaction. In this case, the transaction's
// error is the schema change error.
// TODO (lucy): I'm not sure the above is true. What about DROP TABLE
// with multiple tables?
res.SetError(err)
} else {
// The schema change/job failed but everything else in the
// transaction was actually committed successfully already. At
// this point, it is too late to cancel the transaction. In
// effect, we have violated the "A" of ACID.
//
// This situation is sufficiently serious that we cannot let the
// error that caused the schema change to fail flow back to the
// client as-is. We replace it by a custom code dedicated to
// this situation. Replacement occurs because this error code is
// a "serious error" and the code computation logic will give it
// a higher priority.
//
// We also print out the original error code as prefix of the
// error message, in case it was a serious error.
newErr := pgerror.Wrapf(err,
pgcode.TransactionCommittedWithSchemaChangeFailure,
"transaction committed but schema change aborted with error: (%s)",
pgerror.GetPGCode(err))
newErr = errors.WithHint(newErr,
"Some of the non-DDL statements may have committed successfully, "+
"but some of the DDL statement(s) failed.\nManual inspection may be "+
"required to determine the actual state of the database.")
newErr = errors.WithIssueLink(newErr,
errors.IssueLink{IssueURL: "https://github.com/cockroachdb/cockroach/issues/42061"})
res.SetError(newErr)
}
}
if err := ex.server.cfg.JobRegistry.Run(
ex.ctxHolder.connCtx,
ex.server.cfg.InternalExecutor,
ex.extraTxnState.jobs); err != nil {
handleErr(err)
}
// Wait for the cache to reflect the dropped databases if any.
ex.extraTxnState.tables.waitForCacheToDropDatabases(ex.Ctx())
fallthrough
case txnRestart, txnRollback:
if err := ex.resetExtraTxnState(ex.Ctx(), ex.server.dbCache, advInfo.txnEvent); err != nil {
return advanceInfo{}, err
}
default:
return advanceInfo{}, errors.AssertionFailedf(
"unexpected event: %v", errors.Safe(advInfo.txnEvent))
}
return advInfo, nil
}
// initStatementResult initializes res according to a query.
//
// cols represents the columns of the result rows. Should be nil if
// stmt.AST.StatementType() != tree.Rows.
//
// If an error is returned, it is to be considered a query execution error.
func (ex *connExecutor) initStatementResult(
ctx context.Context, res RestrictedCommandResult, stmt *Statement, cols sqlbase.ResultColumns,
) error {
for _, c := range cols {
if err := checkResultType(c.Typ); err != nil {
return err
}
}
if stmt.AST.StatementType() == tree.Rows {
// Note that this call is necessary even if cols is nil.
res.SetColumns(ctx, cols)
}
return nil
}
// newStatsCollector returns a sqlStatsCollector that will record stats in the
// session's stats containers.
func (ex *connExecutor) newStatsCollector() *sqlStatsCollector {
return newSQLStatsCollector(&ex.server.sqlStats, ex.appStats, &ex.phaseTimes)
}
// cancelQuery is part of the registrySession interface.
func (ex *connExecutor) cancelQuery(queryID ClusterWideID) bool {
ex.mu.Lock()
defer ex.mu.Unlock()
if queryMeta, exists := ex.mu.ActiveQueries[queryID]; exists {
queryMeta.cancel()
return true
}
return false
}
// cancelSession is part of the registrySession interface.
func (ex *connExecutor) cancelSession() {
if ex.onCancelSession == nil {
return
}
// TODO(abhimadan): figure out how to send a nice error message to the client.
ex.onCancelSession()
}
// user is part of the registrySession interface.
func (ex *connExecutor) user() string {
return ex.sessionData.User
}
// serialize is part of the registrySession interface.
func (ex *connExecutor) serialize() serverpb.Session {
ex.mu.RLock()
defer ex.mu.RUnlock()
ex.state.mu.RLock()
defer ex.state.mu.RUnlock()
var kvTxnID *uuid.UUID
txn := ex.state.mu.txn
if txn != nil {
id := txn.ID()
kvTxnID = &id
}
activeQueries := make([]serverpb.ActiveQuery, 0, len(ex.mu.ActiveQueries))
truncateSQL := func(sql string) string {
if len(sql) > MaxSQLBytes {
sql = sql[:MaxSQLBytes-utf8.RuneLen('…')]
// Ensure the resulting string is valid utf8.
for {
if r, _ := utf8.DecodeLastRuneInString(sql); r != utf8.RuneError {
break
}
sql = sql[:len(sql)-1]
}
sql += "…"
}
return sql
}
for id, query := range ex.mu.ActiveQueries {
if query.hidden {
continue
}
sql := truncateSQL(query.stmt.String())
progress := math.Float64frombits(atomic.LoadUint64(&query.progressAtomic))
activeQueries = append(activeQueries, serverpb.ActiveQuery{
ID: id.String(),
Start: query.start.UTC(),
Sql: sql,
IsDistributed: query.isDistributed,
Phase: (serverpb.ActiveQuery_Phase)(query.phase),
Progress: float32(progress),
})
}
lastActiveQuery := ""
if ex.mu.LastActiveQuery != nil {
lastActiveQuery = truncateSQL(ex.mu.LastActiveQuery.String())
}
remoteStr := "<admin>"
if ex.sessionData.RemoteAddr != nil {
remoteStr = ex.sessionData.RemoteAddr.String()
}
return serverpb.Session{
Username: ex.sessionData.User,
ClientAddress: remoteStr,
ApplicationName: ex.applicationName.Load().(string),
Start: ex.phaseTimes[sessionInit].UTC(),
ActiveQueries: activeQueries,
KvTxnID: kvTxnID,
LastActiveQuery: lastActiveQuery,
ID: ex.sessionID.GetBytes(),
AllocBytes: ex.mon.AllocBytes(),
MaxAllocBytes: ex.mon.MaximumBytes(),
}
}
func (ex *connExecutor) getPrepStmtsAccessor() preparedStatementsAccessor {
return connExPrepStmtsAccessor{
ex: ex,
}
}
// sessionEventf logs a message to the session event log (if any).
func (ex *connExecutor) sessionEventf(ctx context.Context, format string, args ...interface{}) {
if log.ExpensiveLogEnabled(ctx, 2) {
log.VEventfDepth(ctx, 1 /* depth */, 2 /* level */, format, args...)
}
if ex.eventLog != nil {
ex.eventLog.Printf(format, args...)
}
}
// StatementCounters groups metrics for counting different types of
// statements.
type StatementCounters struct {
// QueryCount includes all statements and it is therefore the sum of
// all the below metrics.
QueryCount telemetry.CounterWithMetric
// Basic CRUD statements.
SelectCount telemetry.CounterWithMetric
UpdateCount telemetry.CounterWithMetric
InsertCount telemetry.CounterWithMetric
DeleteCount telemetry.CounterWithMetric
// Transaction operations.
TxnBeginCount telemetry.CounterWithMetric
TxnCommitCount telemetry.CounterWithMetric
TxnRollbackCount telemetry.CounterWithMetric
// Savepoint operations. SavepointCount is for real SQL savepoints;
// the RestartSavepoint variants are for the
// cockroach-specific client-side retry protocol.
SavepointCount telemetry.CounterWithMetric
ReleaseSavepointCount telemetry.CounterWithMetric
RollbackToSavepointCount telemetry.CounterWithMetric
RestartSavepointCount telemetry.CounterWithMetric
ReleaseRestartSavepointCount telemetry.CounterWithMetric
RollbackToRestartSavepointCount telemetry.CounterWithMetric
// DdlCount counts all statements whose StatementType is DDL.
DdlCount telemetry.CounterWithMetric
// MiscCount counts all statements not covered by a more specific stat above.
MiscCount telemetry.CounterWithMetric
}
func makeStartedStatementCounters(internal bool) StatementCounters {
return StatementCounters{
TxnBeginCount: telemetry.NewCounterWithMetric(
getMetricMeta(MetaTxnBeginStarted, internal)),
TxnCommitCount: telemetry.NewCounterWithMetric(
getMetricMeta(MetaTxnCommitStarted, internal)),
TxnRollbackCount: telemetry.NewCounterWithMetric(
getMetricMeta(MetaTxnRollbackStarted, internal)),
RestartSavepointCount: telemetry.NewCounterWithMetric(
getMetricMeta(MetaRestartSavepointStarted, internal)),
ReleaseRestartSavepointCount: telemetry.NewCounterWithMetric(
getMetricMeta(MetaReleaseRestartSavepointStarted, internal)),
RollbackToRestartSavepointCount: telemetry.NewCounterWithMetric(
getMetricMeta(MetaRollbackToRestartSavepointStarted, internal)),
SavepointCount: telemetry.NewCounterWithMetric(
getMetricMeta(MetaSavepointStarted, internal)),
ReleaseSavepointCount: telemetry.NewCounterWithMetric(
getMetricMeta(MetaReleaseSavepointStarted, internal)),
RollbackToSavepointCount: telemetry.NewCounterWithMetric(
getMetricMeta(MetaRollbackToSavepointStarted, internal)),
SelectCount: telemetry.NewCounterWithMetric(
getMetricMeta(MetaSelectStarted, internal)),
UpdateCount: telemetry.NewCounterWithMetric(
getMetricMeta(MetaUpdateStarted, internal)),
InsertCount: telemetry.NewCounterWithMetric(
getMetricMeta(MetaInsertStarted, internal)),
DeleteCount: telemetry.NewCounterWithMetric(
getMetricMeta(MetaDeleteStarted, internal)),
DdlCount: telemetry.NewCounterWithMetric(
getMetricMeta(MetaDdlStarted, internal)),
MiscCount: telemetry.NewCounterWithMetric(
getMetricMeta(MetaMiscStarted, internal)),
QueryCount: telemetry.NewCounterWithMetric(
getMetricMeta(MetaQueryStarted, internal)),
}
}
func makeExecutedStatementCounters(internal bool) StatementCounters {
return StatementCounters{
TxnBeginCount: telemetry.NewCounterWithMetric(
getMetricMeta(MetaTxnBeginExecuted, internal)),
TxnCommitCount: telemetry.NewCounterWithMetric(
getMetricMeta(MetaTxnCommitExecuted, internal)),
TxnRollbackCount: telemetry.NewCounterWithMetric(
getMetricMeta(MetaTxnRollbackExecuted, internal)),
RestartSavepointCount: telemetry.NewCounterWithMetric(
getMetricMeta(MetaRestartSavepointExecuted, internal)),
ReleaseRestartSavepointCount: telemetry.NewCounterWithMetric(
getMetricMeta(MetaReleaseRestartSavepointExecuted, internal)),
RollbackToRestartSavepointCount: telemetry.NewCounterWithMetric(
getMetricMeta(MetaRollbackToRestartSavepointExecuted, internal)),
SavepointCount: telemetry.NewCounterWithMetric(
getMetricMeta(MetaSavepointExecuted, internal)),
ReleaseSavepointCount: telemetry.NewCounterWithMetric(
getMetricMeta(MetaReleaseSavepointExecuted, internal)),
RollbackToSavepointCount: telemetry.NewCounterWithMetric(
getMetricMeta(MetaRollbackToSavepointExecuted, internal)),
SelectCount: telemetry.NewCounterWithMetric(
getMetricMeta(MetaSelectExecuted, internal)),
UpdateCount: telemetry.NewCounterWithMetric(
getMetricMeta(MetaUpdateExecuted, internal)),
InsertCount: telemetry.NewCounterWithMetric(
getMetricMeta(MetaInsertExecuted, internal)),
DeleteCount: telemetry.NewCounterWithMetric(
getMetricMeta(MetaDeleteExecuted, internal)),
DdlCount: telemetry.NewCounterWithMetric(
getMetricMeta(MetaDdlExecuted, internal)),
MiscCount: telemetry.NewCounterWithMetric(
getMetricMeta(MetaMiscExecuted, internal)),
QueryCount: telemetry.NewCounterWithMetric(
getMetricMeta(MetaQueryExecuted, internal)),
}
}
func (sc *StatementCounters) incrementCount(ex *connExecutor, stmt tree.Statement) {
sc.QueryCount.Inc()
switch t := stmt.(type) {
case *tree.BeginTransaction:
sc.TxnBeginCount.Inc()
case *tree.Select:
sc.SelectCount.Inc()
case *tree.Update:
sc.UpdateCount.Inc()
case *tree.Insert:
sc.InsertCount.Inc()
case *tree.Delete:
sc.DeleteCount.Inc()
case *tree.CommitTransaction:
sc.TxnCommitCount.Inc()
case *tree.RollbackTransaction:
sc.TxnRollbackCount.Inc()
case *tree.Savepoint:
if ex.isCommitOnReleaseSavepoint(t.Name) {
sc.RestartSavepointCount.Inc()
} else {
sc.SavepointCount.Inc()
}
case *tree.ReleaseSavepoint:
if ex.isCommitOnReleaseSavepoint(t.Savepoint) {
sc.ReleaseRestartSavepointCount.Inc()
} else {
sc.ReleaseSavepointCount.Inc()
}
case *tree.RollbackToSavepoint:
if ex.isCommitOnReleaseSavepoint(t.Savepoint) {
sc.RollbackToRestartSavepointCount.Inc()
} else {
sc.RollbackToSavepointCount.Inc()
}
default:
if tree.CanModifySchema(stmt) {
sc.DdlCount.Inc()
} else {
sc.MiscCount.Inc()
}
}
}
// connExPrepStmtsAccessor is an implementation of preparedStatementsAccessor
// that gives access to a connExecutor's prepared statements.
type connExPrepStmtsAccessor struct {
ex *connExecutor
}
var _ preparedStatementsAccessor = connExPrepStmtsAccessor{}
// List is part of the preparedStatementsAccessor interface.
func (ps connExPrepStmtsAccessor) List() map[string]*PreparedStatement {
// Return a copy of the data, to prevent modification of the map.
stmts := ps.ex.extraTxnState.prepStmtsNamespace.prepStmts
ret := make(map[string]*PreparedStatement, len(stmts))
for key, stmt := range stmts {
ret[key] = stmt
}
return ret
}
// Get is part of the preparedStatementsAccessor interface.
func (ps connExPrepStmtsAccessor) Get(name string) (*PreparedStatement, bool) {
s, ok := ps.ex.extraTxnState.prepStmtsNamespace.prepStmts[name]
return s, ok
}
// Delete is part of the preparedStatementsAccessor interface.
func (ps connExPrepStmtsAccessor) Delete(ctx context.Context, name string) bool {
_, ok := ps.Get(name)
if !ok {
return false
}
ps.ex.deletePreparedStmt(ctx, name)
return true
}
// DeleteAll is part of the preparedStatementsAccessor interface.
func (ps connExPrepStmtsAccessor) DeleteAll(ctx context.Context) {
ps.ex.extraTxnState.prepStmtsNamespace.resetTo(ctx, prepStmtNamespace{})
}
// contextStatementKey is an empty type for the handle associated with the
// statement value (see context.Value).
type contextStatementKey struct{}
// withStatement adds a SQL statement to the provided context. The statement
// will then be included in crash reports which use that context.
func withStatement(ctx context.Context, stmt tree.Statement) context.Context {
return context.WithValue(ctx, contextStatementKey{}, stmt)
}
// statementFromCtx returns the statement value from a context, or nil if unset.
func statementFromCtx(ctx context.Context) tree.Statement {
stmt := ctx.Value(contextStatementKey{})
if stmt == nil {
return nil
}
return stmt.(tree.Statement)
}
func init() {
// Register a function to include the anonymized statement in crash reports.
log.RegisterTagFn("statement", func(ctx context.Context) string {
stmt := statementFromCtx(ctx)
if stmt == nil {
return ""
}
// Anonymize the statement for reporting.
return anonymizeStmtAndConstants(stmt)
})
}
|
stateToTxnStatusIndicator
|
generate.rs
|
use jormungandr_automation::jcli::JCli;
#[test]
pub fn test_ed25519_key_generation() {
let jcli: JCli = Default::default();
let generated_key = jcli.key().generate("ed25519");
assert_ne!(generated_key, "", "generated key is empty");
}
#[test]
pub fn test_ed25519_uppercase_key_generation() {
let jcli: JCli = Default::default();
let generated_key = jcli.key().generate("ED25519EXTENDED");
assert_ne!(generated_key, "", "generated key is empty");
}
#[test]
pub fn test_ed25510bip32_key_generation() {
let jcli: JCli = Default::default();
let generated_key = jcli.key().generate("Ed25519Bip32");
assert_ne!(generated_key, "", "generated key is empty");
}
#[test]
pub fn test_ed25519extended_key_generation() {
let jcli: JCli = Default::default();
let generated_key = jcli.key().generate("Ed25519Extended");
assert_ne!(generated_key, "", "generated key is empty");
}
#[test]
pub fn test_curve25519_2hashdh_key_generation() {
let jcli: JCli = Default::default();
let generated_key = jcli.key().generate("RistrettoGroup2HashDh");
assert_ne!(generated_key, "", "generated key is empty");
}
#[test]
pub fn test_sumed25519_12_key_generation() {
let jcli: JCli = Default::default();
|
#[test]
pub fn test_unknown_key_type_generation() {
let jcli: JCli = Default::default();
jcli.key()
.generate_expect_fail("unknown", "Invalid value for '--type <key-type>':");
}
#[test]
pub fn test_key_with_seed_generation() {
let jcli: JCli = Default::default();
let correct_seed = "73855612722627931e20c850f8ad53eb04c615c7601a95747be073dcada3e135";
let generated_key = jcli
.key()
.generate_with_seed("Ed25519Extended", correct_seed);
assert_ne!(generated_key, "", "generated key is empty");
}
#[test]
pub fn test_key_with_too_short_seed_generation() {
let too_short_seed = "73855612722627931e20c850f8ad53eb04c615c7601a95747be073dcadaa";
test_key_invalid_seed_length(too_short_seed);
}
#[test]
pub fn test_key_with_too_long_seed_generation() {
let too_long_seed = "73855612722627931e20c850f8ad53eb04c615c7601a95747be073dcada0234212";
test_key_invalid_seed_length(too_long_seed);
}
fn test_key_invalid_seed_length(seed: &str) {
let jcli: JCli = Default::default();
jcli.key().generate_with_seed_expect_fail(
"Ed25519Extended",
seed,
"invalid seed length, expected 32 bytes but received",
);
}
#[test]
pub fn test_key_with_seed_with_unknown_symbol_generation() {
let jcli: JCli = Default::default();
let incorrect_seed = "73855612722627931e20c850f8ad53eb04c615c7601a95747be073dcay";
jcli.key().generate_with_seed_expect_fail(
"Ed25519Extended",
incorrect_seed,
"invalid Hexadecimal",
);
}
|
let generated_key = jcli.key().generate("SumEd25519_12");
assert_ne!(generated_key, "", "generated key is empty");
}
|
lib.rs
|
//! # Oracle
//! A module to allow oracle operators to feed external data.
//!
//! - [`Trait`](./trait.Trait.html)
//! - [`Call`](./enum.Call.html)
//! - [`Module`](./struct.Module.html)
//!
//! ## Overview
//!
//! This module exposes capabilities for oracle operators to feed external
//! offchain data. The raw values can be combined to provide an aggregated
//! value.
//!
//! The data is valid only if feeded by an authorized operator. This module
//! implements `frame_support::traits::InitializeMembers` and `frame_support::
//! traits::ChangeMembers`, to provide a way to manage operators membership.
//! Typically it could be leveraged to `pallet_membership` in FRAME.
#![cfg_attr(not(feature = "std"), no_std)]
// Disable the following two lints since they originate from an external macro (namely decl_storage)
#![allow(clippy::string_lit_as_bytes)]
mod default_combine_data;
mod default_weight;
mod mock;
mod tests;
pub trait WeightInfo {
fn feed_values(c: u32) -> Weight;
fn on_finalize() -> Weight;
}
use codec::{Decode, Encode};
pub use default_combine_data::DefaultCombineData;
use frame_support::{
decl_error, decl_event, decl_module, decl_storage,
dispatch::DispatchResultWithPostInfo,
ensure,
traits::{ChangeMembers, Get, InitializeMembers, Time},
weights::{DispatchClass, Pays, Weight},
IterableStorageMap, Parameter,
};
use frame_system::{ensure_root, ensure_signed};
pub use orml_traits::{CombineData, DataFeeder, DataProvider, DataProviderExtended, OnNewData};
use orml_utilities::OrderedSet;
#[cfg(feature = "std")]
use serde::{Deserialize, Serialize};
use sp_runtime::{traits::Member, DispatchResult, RuntimeDebug};
use sp_std::{prelude::*, vec};
type MomentOf<T, I = DefaultInstance> = <<T as Trait<I>>::Time as Time>::Moment;
pub type TimestampedValueOf<T, I = DefaultInstance> = TimestampedValue<<T as Trait<I>>::OracleValue, MomentOf<T, I>>;
#[derive(Encode, Decode, RuntimeDebug, Eq, PartialEq, Clone, Copy, Ord, PartialOrd)]
#[cfg_attr(feature = "std", derive(Serialize, Deserialize))]
pub struct
|
<Value, Moment> {
pub value: Value,
pub timestamp: Moment,
}
pub trait Trait<I: Instance = DefaultInstance>: frame_system::Trait {
type Event: From<Event<Self, I>> + Into<<Self as frame_system::Trait>::Event>;
/// Hook on new data received
type OnNewData: OnNewData<Self::AccountId, Self::OracleKey, Self::OracleValue>;
/// Provide the implementation to combine raw values to produce aggregated
/// value
type CombineData: CombineData<Self::OracleKey, TimestampedValueOf<Self, I>>;
/// Time provider
type Time: Time;
/// The data key type
type OracleKey: Parameter + Member;
/// The data value type
type OracleValue: Parameter + Member + Ord;
/// The root operator account id, recorad all sudo feeds on this account.
type RootOperatorAccountId: Get<Self::AccountId>;
/// Weight information for extrinsics in this module.
type WeightInfo: WeightInfo;
}
decl_error! {
pub enum Error for Module<T: Trait<I>, I: Instance> {
/// Sender does not have permission
NoPermission,
/// Feeder has already feeded at this block
AlreadyFeeded,
}
}
decl_event!(
pub enum Event<T, I=DefaultInstance> where
<T as frame_system::Trait>::AccountId,
<T as Trait<I>>::OracleKey,
<T as Trait<I>>::OracleValue,
{
/// New feed data is submitted. [sender, values]
NewFeedData(AccountId, Vec<(OracleKey, OracleValue)>),
}
);
decl_storage! {
trait Store for Module<T: Trait<I>, I: Instance=DefaultInstance> as Oracle {
/// Raw values for each oracle operators
pub RawValues get(fn raw_values): double_map hasher(twox_64_concat) T::AccountId, hasher(twox_64_concat) T::OracleKey => Option<TimestampedValueOf<T, I>>;
/// True if Self::values(key) is up to date, otherwise the value is stale
pub IsUpdated get(fn is_updated): map hasher(twox_64_concat) <T as Trait<I>>::OracleKey => bool;
/// Combined value, may not be up to date
pub Values get(fn values): map hasher(twox_64_concat) <T as Trait<I>>::OracleKey => Option<TimestampedValueOf<T, I>>;
/// If an oracle operator has feed a value in this block
HasDispatched: OrderedSet<T::AccountId>;
// TODO: this shouldn't be required https://github.com/paritytech/substrate/issues/6041
/// The current members of the collective. This is stored sorted (just by value).
pub Members get(fn members) config(): OrderedSet<T::AccountId>;
pub Nonces get(fn nonces): map hasher(twox_64_concat) T::AccountId => u32;
}
add_extra_genesis {
config(phantom): sp_std::marker::PhantomData<I>;
}
}
decl_module! {
pub struct Module<T: Trait<I>, I: Instance=DefaultInstance> for enum Call where origin: T::Origin {
type Error = Error<T, I>;
fn deposit_event() = default;
/// Feed the external value.
///
/// Require authorized operator.
#[weight = (T::WeightInfo::feed_values(values.len() as u32), DispatchClass::Operational)]
pub fn feed_values(
origin,
values: Vec<(T::OracleKey, T::OracleValue)>,
) -> DispatchResultWithPostInfo {
let feeder = ensure_signed(origin.clone()).or_else(|_| ensure_root(origin).map(|_| T::RootOperatorAccountId::get()))?;
Self::do_feed_values(feeder, values)?;
Ok(Pays::No.into())
}
/// `on_initialize` to return the weight used in `on_finalize`.
fn on_initialize() -> Weight {
T::WeightInfo::on_finalize()
}
fn on_finalize(_n: T::BlockNumber) {
// cleanup for next block
<HasDispatched<T, I>>::kill();
}
}
}
impl<T: Trait<I>, I: Instance> Module<T, I> {
pub fn read_raw_values(key: &T::OracleKey) -> Vec<TimestampedValueOf<T, I>> {
Self::members()
.0
.iter()
.chain(vec![T::RootOperatorAccountId::get()].iter())
.filter_map(|x| Self::raw_values(x, key))
.collect()
}
/// Returns fresh combined value if has update, or latest combined value.
///
/// Note this will update values storage if has update.
pub fn get(key: &T::OracleKey) -> Option<TimestampedValueOf<T, I>> {
if Self::is_updated(key) {
<Values<T, I>>::get(key)
} else {
let timestamped = Self::combined(key)?;
<Values<T, I>>::insert(key, timestamped.clone());
IsUpdated::<T, I>::insert(key, true);
Some(timestamped)
}
}
/// Returns fresh combined value if has update, or latest combined value.
///
/// This is a no-op function which would not change storage.
pub fn get_no_op(key: &T::OracleKey) -> Option<TimestampedValueOf<T, I>> {
if Self::is_updated(key) {
Self::values(key)
} else {
Self::combined(key)
}
}
#[allow(clippy::complexity)]
pub fn get_all_values() -> Vec<(T::OracleKey, Option<TimestampedValueOf<T, I>>)> {
<Values<T, I>>::iter()
.map(|(key, _)| key)
.map(|key| {
let v = Self::get_no_op(&key);
(key, v)
})
.collect()
}
fn combined(key: &T::OracleKey) -> Option<TimestampedValueOf<T, I>> {
let values = Self::read_raw_values(key);
T::CombineData::combine_data(key, values, Self::values(key))
}
fn do_feed_values(who: T::AccountId, values: Vec<(T::OracleKey, T::OracleValue)>) -> DispatchResult {
// ensure feeder is authorized
ensure!(
Self::members().contains(&who) || who == T::RootOperatorAccountId::get(),
Error::<T, I>::NoPermission
);
// ensure account hasn't dispatched an updated yet
ensure!(
HasDispatched::<T, I>::mutate(|set| set.insert(who.clone())),
Error::<T, I>::AlreadyFeeded
);
let now = T::Time::now();
for (key, value) in &values {
let timestamped = TimestampedValue {
value: value.clone(),
timestamp: now,
};
RawValues::<T, I>::insert(&who, &key, timestamped);
IsUpdated::<T, I>::remove(&key);
T::OnNewData::on_new_data(&who, &key, &value);
}
Self::deposit_event(RawEvent::NewFeedData(who, values));
Ok(())
}
}
impl<T: Trait<I>, I: Instance> InitializeMembers<T::AccountId> for Module<T, I> {
fn initialize_members(members: &[T::AccountId]) {
if !members.is_empty() {
assert!(Members::<T, I>::get().0.is_empty(), "Members are already initialized!");
Members::<T, I>::put(OrderedSet::from_sorted_set(members.into()));
}
}
}
impl<T: Trait<I>, I: Instance> ChangeMembers<T::AccountId> for Module<T, I> {
fn change_members_sorted(_incoming: &[T::AccountId], outgoing: &[T::AccountId], new: &[T::AccountId]) {
// remove session keys and its values
for removed in outgoing {
RawValues::<T, I>::remove_prefix(removed);
}
Members::<T, I>::put(OrderedSet::from_sorted_set(new.into()));
// not bothering to track which key needs recompute, just update all
IsUpdated::<T, I>::remove_all();
}
fn set_prime(_prime: Option<T::AccountId>) {
// nothing
}
}
impl<T: Trait<I>, I: Instance> DataProvider<T::OracleKey, T::OracleValue> for Module<T, I> {
fn get(key: &T::OracleKey) -> Option<T::OracleValue> {
Self::get(key).map(|timestamped_value| timestamped_value.value)
}
}
impl<T: Trait<I>, I: Instance> DataProviderExtended<T::OracleKey, TimestampedValueOf<T, I>> for Module<T, I> {
fn get_no_op(key: &T::OracleKey) -> Option<TimestampedValueOf<T, I>> {
Self::get_no_op(key)
}
#[allow(clippy::complexity)]
fn get_all_values() -> Vec<(T::OracleKey, Option<TimestampedValueOf<T, I>>)> {
Self::get_all_values()
}
}
impl<T: Trait<I>, I: Instance> DataFeeder<T::OracleKey, T::OracleValue, T::AccountId> for Module<T, I> {
fn feed_value(who: T::AccountId, key: T::OracleKey, value: T::OracleValue) -> DispatchResult {
Self::do_feed_values(who, vec![(key, value)])?;
Ok(())
}
}
|
TimestampedValue
|
yaml.go
|
package utils
import (
"bytes"
"fmt"
"text/template"
"github.com/davecgh/go-spew/spew"
"github.com/mitchellh/mapstructure"
"gopkg.in/yaml.v3"
)
func UnmarshalMapIntoStructWithTemplate(input map[string]string, output interface{}, contextValues map[string]interface{}, debug bool) error
|
func UnmarshalStringIntoMap(input string) (map[string]interface{}, error) {
output := map[string]interface{}{}
err := yaml.Unmarshal([]byte(input), &output)
if err != nil {
return nil, err
}
return output, nil
}
func MarshalMapWithTemplate(input map[string]string, values map[string]interface{}, debug bool) (string, error) {
inputBytes, err := yaml.Marshal(input)
if err != nil {
return "", fmt.Errorf("failed to marshal input into bytes: %w", err)
}
if debug {
fmt.Printf("marshalled input: \n%s\n", string(inputBytes))
}
tmpl, err := template.New("conf").Parse(string(inputBytes))
if err != nil {
return "", fmt.Errorf("failed to parse template: %w", err)
}
tmplOutput := bytes.NewBuffer([]byte(""))
err = tmpl.Execute(tmplOutput, values)
if err != nil {
return "", fmt.Errorf("failed to execute template: %w", err)
}
if debug {
fmt.Printf("marshalled input (templated): \n%s\n", tmplOutput.String())
}
return tmplOutput.String(), nil
}
func UnmarshalMapIntoStruct(input map[string]interface{}, output interface{}, debug bool) error {
config := &mapstructure.DecoderConfig{
Metadata: nil,
Result: output,
TagName: "yaml",
}
decoder, err := mapstructure.NewDecoder(config)
if err != nil {
return fmt.Errorf("failed to create decoder: %w", err)
}
decoder.Decode(input)
if err != nil {
return fmt.Errorf("failed to unmarshal map into struct: %w", err)
}
if debug {
fmt.Println("unmarshaled conf:")
spew.Dump(output)
}
return nil
}
|
{
if debug {
fmt.Println("default conf:")
spew.Dump(output)
}
inputTemplatedString, err := MarshalMapWithTemplate(input, contextValues, debug)
if err != nil {
return fmt.Errorf("failed to marshal input with template values: %w", err)
}
inputTemplatedMap, err := UnmarshalStringIntoMap(inputTemplatedString)
if err != nil {
return fmt.Errorf("failed to unmarshal templated input map into generic map: %w", err)
}
return UnmarshalMapIntoStruct(inputTemplatedMap, output, debug)
}
|
test165.js
|
var callbackArguments = [];
var argument1 = function (err, ids) {
|
if (err) {
console.dir(err);
}
t.same(ids.sort(), [
users[0].id,
users[1].id
].sort(), 'The found id did not match the id of the saved object.');
t.done();
};
var argument2 = true;
var argument3 = function (doc) {
callbackArguments.push(arguments)
return doc.a === 5;
};
var argument4 = true;
var argument5 = function (ext) {
callbackArguments.push(arguments)
return !!v.canPlayType(ext).replace(/no/, '');
};
var argument6 = function (doc) {
callbackArguments.push(arguments)
return doc._id === doc1._id;
};
var argument7 = false;
var base_0 = ["`<5;",1.7976931348623157e+308,823]
var r_0= undefined
try {
r_0 = base_0.find(argument1,argument2)
}
catch(e) {
r_0= "Error"
}
var base_1 = ["`<5;",1.7976931348623157e+308,823]
var r_1= undefined
try {
r_1 = base_1.find(argument3,argument4)
}
catch(e) {
r_1= "Error"
}
var base_2 = ["`<5;",1.7976931348623157e+308,823]
var r_2= undefined
try {
r_2 = base_2.find(argument5)
}
catch(e) {
r_2= "Error"
}
var base_3 = ["`<5;",1.7976931348623157e+308,823]
var r_3= undefined
try {
r_3 = base_3.find(argument6,argument7)
}
catch(e) {
r_3= "Error"
}
function serialize(array){
return array.map(function(a){
if (a === null || a == undefined) return a;
var name = a.constructor.name;
if (name==='Object' || name=='Boolean'|| name=='Array'||name=='Number'||name=='String')
return JSON.stringify(a);
return name;
});
}
setTimeout(function(){
require("fs").writeFileSync("./experiments/find/findMined/test165.json",JSON.stringify({"baseObjects":serialize([base_0,base_1,base_2,base_3]),"returnObjects":serialize([r_0,r_1,r_2,r_3]),"callbackArgs":callbackArguments}))
},300)
|
callbackArguments.push(arguments)
|
AnimatedBezier.js
|
import { val } from '../utils';
import AnimatedNode from './AnimatedNode';
// These values are established by empiricism with tests (tradeoff: performance VS precision)
var NEWTON_ITERATIONS = 4;
var NEWTON_MIN_SLOPE = 0.001;
var SUBDIVISION_PRECISION = 0.0000001;
var SUBDIVISION_MAX_ITERATIONS = 10;
var kSplineTableSize = 11;
var kSampleStepSize = 1.0 / (kSplineTableSize - 1.0);
var float32ArraySupported = typeof Float32Array === 'function';
function A(aA1, aA2) {
return 1.0 - 3.0 * aA2 + 3.0 * aA1;
}
function B(aA1, aA2) {
return 3.0 * aA2 - 6.0 * aA1;
}
function C(aA1) {
return 3.0 * aA1;
}
// Returns x(t) given t, x1, and x2, or y(t) given t, y1, and y2.
function
|
(aT, aA1, aA2) {
return ((A(aA1, aA2) * aT + B(aA1, aA2)) * aT + C(aA1)) * aT;
}
// Returns dx/dt given t, x1, and x2, or dy/dt given t, y1, and y2.
function getSlope(aT, aA1, aA2) {
return 3.0 * A(aA1, aA2) * aT * aT + 2.0 * B(aA1, aA2) * aT + C(aA1);
}
function binarySubdivide(aX, aA, aB, mX1, mX2) {
var currentX,
currentT,
i = 0;
do {
currentT = aA + (aB - aA) / 2.0;
currentX = calcBezier(currentT, mX1, mX2) - aX;
if (currentX > 0.0) {
aB = currentT;
} else {
aA = currentT;
}
} while (
Math.abs(currentX) > SUBDIVISION_PRECISION &&
++i < SUBDIVISION_MAX_ITERATIONS
);
return currentT;
}
function newtonRaphsonIterate(aX, aGuessT, mX1, mX2) {
for (var i = 0; i < NEWTON_ITERATIONS; ++i) {
var currentSlope = getSlope(aGuessT, mX1, mX2);
if (currentSlope === 0.0) {
return aGuessT;
}
var currentX = calcBezier(aGuessT, mX1, mX2) - aX;
aGuessT -= currentX / currentSlope;
}
return aGuessT;
}
function bezier(mX1, mY1, mX2, mY2) {
if (!(0 <= mX1 && mX1 <= 1 && 0 <= mX2 && mX2 <= 1)) {
// eslint-disable-line yoda
throw new Error('bezier x values must be in [0, 1] range');
}
// Precompute samples table
var sampleValues = float32ArraySupported
? new Float32Array(kSplineTableSize)
: new Array(kSplineTableSize);
if (mX1 !== mY1 || mX2 !== mY2) {
for (var i = 0; i < kSplineTableSize; ++i) {
sampleValues[i] = calcBezier(i * kSampleStepSize, mX1, mX2);
}
}
function getTForX(aX) {
var intervalStart = 0.0;
var currentSample = 1;
var lastSample = kSplineTableSize - 1;
for (
;
currentSample !== lastSample && sampleValues[currentSample] <= aX;
++currentSample
) {
intervalStart += kSampleStepSize;
}
--currentSample;
// Interpolate to provide an initial guess for t
var dist =
(aX - sampleValues[currentSample]) /
(sampleValues[currentSample + 1] - sampleValues[currentSample]);
var guessForT = intervalStart + dist * kSampleStepSize;
var initialSlope = getSlope(guessForT, mX1, mX2);
if (initialSlope >= NEWTON_MIN_SLOPE) {
return newtonRaphsonIterate(aX, guessForT, mX1, mX2);
} else if (initialSlope === 0.0) {
return guessForT;
} else {
return binarySubdivide(
aX,
intervalStart,
intervalStart + kSampleStepSize,
mX1,
mX2
);
}
}
return function BezierEasing(x) {
if (mX1 === mY1 && mX2 === mY2) {
return x; // linear
}
// Because JavaScript number are imprecise, we should guarantee the extremes are right.
if (x === 0) {
return 0;
}
if (x === 1) {
return 1;
}
return calcBezier(getTForX(x), mY1, mY2);
};
}
export default class AnimatedBezier extends AnimatedNode {
_value;
_bezier;
constructor(value, mX1, mY1, mX2, mY2) {
super({ type: 'bezier', mX1, mY1, mX2, mY2, input: value.__nodeID }, [
value,
]);
this._value = value;
this._bezier = bezier(mX1, mY1, mX2, mY2);
}
__onEvaluate() {
return this._bezier(val(this._value));
}
}
|
calcBezier
|
foreign_rpc.rs
|
// Copyright 2019 The Grin Developers
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! JSON-RPC Stub generation for the Foreign API
use crate::keychain::Keychain;
use crate::libwallet::{
BlockFees, CbData, ErrorKind, InitTxArgs, IssueInvoiceTxArgs, NodeClient, Slate, VersionInfo,
VersionedSlate, WalletBackend,
};
use crate::Foreign;
use easy_jsonrpc;
/// Public definition used to generate Foreign jsonrpc api.
/// * When running `grin-wallet listen` with defaults, the V2 api is available at
/// `localhost:3415/v2/foreign`
/// * The endpoint only supports POST operations, with the json-rpc request as the body
#[easy_jsonrpc::rpc]
pub trait ForeignRpc {
/**
Networked version of [Foreign::check_version](struct.Foreign.html#method.check_version).
# Json rpc example
```
# grin_wallet_api::doctest_helper_json_rpc_foreign_assert_response!(
# r#"
{
"jsonrpc": "2.0",
"method": "check_version",
"id": 1,
"params": []
}
# "#
# ,
# r#"
{
"id": 1,
"jsonrpc": "2.0",
"result": {
"Ok": {
"foreign_api_version": 2,
"supported_slate_versions": [
"V0",
"V1",
"V2"
]
}
}
}
# "#
# , 0, false, false);
```
*/
fn check_version(&self) -> Result<VersionInfo, ErrorKind>;
/**
Networked version of [Foreign::build_coinbase](struct.Foreign.html#method.build_coinbase).
# Json rpc example
```
# grin_wallet_api::doctest_helper_json_rpc_foreign_assert_response!(
# r#"
{
"jsonrpc": "2.0",
"method": "build_coinbase",
"id": 1,
"params": [
{
"fees": 0,
"height": 0,
"key_id": null
}
]
}
# "#
# ,
# r#"
{
"id": 1,
"jsonrpc": "2.0",
"result": {
"Ok": {
"kernel": {
"excess": "08dfe86d732f2dd24bac36aa7502685221369514197c26d33fac03041d47e4b490",
"excess_sig": "8f07ddd5e9f5179cff19486034181ed76505baaad53e5d994064127b56c5841be02fa098c54c9bf638e0ee1ad5eb896caa11565f632be7b9cd65643ba371044f",
"features": "Coinbase",
"fee": "0",
"lock_height": "0"
},
"key_id": "0300000000000000000000000400000000",
"output": {
"commit": "08fe198e525a5937d0c5d01fa354394d2679be6df5d42064a0f7550c332fce3d9d",
"features": "Coinbase",
"proof": "9166dc13a374a50d99f16ddfb228ce6010ea22d1676de755c34123402b5a8e68871b37d716c14e07be14ceb0771cca62a302358aa82922fa87f1387cff3a4507027f04f3fcf54ed16bd97e40a06c6f969139188daca366bb78ccbc7ff0203de62e30077f8b4a8b314901666205d24ca93d54581aa082e37c370e178dea267ff11fa4669756a31c026348255108c4de4b7abe3636ebdd67f25387c9c2868d16fab9209ebee6d19c6395eaf313da67f164d8e997ed97de9478ddb24c34d8a0dcedc24c5d0a9d1c9f15de3264323fc768271d7981b1e2ae1e59675537115fdcd1ea7d60a7bd276865698d1c1598b7c22a1a6e212db4d0a0ba98706a746f63f2d8460a9d28b4e8a7d2ad1f531b32046e2285a034c2d49f7896026fa186f9665766ae158435157f94bd31b8ebf5c0637a9d72036348c1d1fb70659b6ca5e64427a9eb51569074311e970316fd370373149067a0781cd49cc450e80e14a84f9818ae8caf6c02877f15ab11397d60309249658e5a03f49354dce3873118be6f43ca436aa81165ca44d624fd6f504b8d186bca2ef7e3c5ff2b85db86b29ddd0fb58173960caf2b437c8190511685303ab0eb1b5a757e1509529063a145f5242350edb8e1a1807f505866fdb5689fd39d4595cf5084d30a1ba2af882969bf64aecad342926b16930a3d93781dcebc839b7bf5762146e0016c502aad33d24c9e708c810505bd9c6648bd8303ddbbe5c5cf82eb420784223182e1b59286249e38458c885f089e9211b3aafe7c6f85097878679775287423ebca7557cd3be9e44bb454c6b1914b9012e100d601d7a2ecb0c2a07b5e6f0c293b671e45a425d97169eb793834a40a0a64277e68b2809ca4556eed7d130c2ea973021fda08a01c771111b1cc12b647029fe19f1018486a0ef82bbe5ca7ff484c71d52f3238766d771eaf4204793809dc27"
}
}
}
}
# "#
# , 4, false, false);
```
*/
fn build_coinbase(&self, block_fees: &BlockFees) -> Result<CbData, ErrorKind>;
/**
Networked version of [Foreign::verify_slate_messages](struct.Foreign.html#method.verify_slate_messages).
# Json rpc example
```
# grin_wallet_api::doctest_helper_json_rpc_foreign_assert_response!(
# r#"
{
"jsonrpc": "2.0",
"method": "verify_slate_messages",
"id": 1,
"params": [ {
"amount": "6000000000",
"fee": "8000000",
"height": "4",
"id": "0436430c-2b02-624c-2032-570501212b00",
"lock_height": "4",
"num_participants": 2,
"participant_data": [
{
"id": "0",
"message": "my message",
"message_sig": "8f07ddd5e9f5179cff19486034181ed76505baaad53e5d994064127b56c5841b1d4c1358be398f801eb90d933774b5218fa7e769b11c4c640402253353656f75",
"part_sig": null,
"public_blind_excess": "034b4df2f0558b73ea72a1ca5c4ab20217c66bbe0829056fca7abe76888e9349ee",
"public_nonce": "031b84c5567b126440995d3ed5aaba0565d71e1834604819ff9c17f5e9d5dd078f"
}
],
"tx": {
"body": {
"inputs": [
{
"commit": "08e1da9e6dc4d6e808a718b2f110a991dd775d65ce5ae408a4e1f002a4961aa9e7",
"features": "Coinbase"
}
],
"kernels": [
{
"excess": "000000000000000000000000000000000000000000000000000000000000000000",
"excess_sig": "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"features": "HeightLocked",
"fee": "8000000",
"lock_height": "4"
}
],
"outputs": [
{
"commit": "094be57c91787fc2033d5d97fae099f1a6ddb37ea48370f1a138f09524c767fdd3",
"features": "Plain",
"proof": "2a42e9e902b70ce44e1fccb14de87ee0a97100bddf12c6bead1b9c5f4eb60300f29c13094fa12ffeee238fb4532b18f6b61cf51b23c1c7e1ad2e41560dc27edc0a2b9e647a0b3e4e806fced5b65e61d0f1f5197d3e2285c632d359e27b6b9206b2caffea4f67e0c7a2812e7a22c134b98cf89bd43d9f28b8bec25cce037a0ac5b1ae8f667e54e1250813a5263004486b4465ad4e641ab2b535736ea26535a11013564f08f483b7dab1c2bcc3ee38eadf2f7850eff7e3459a4bbabf9f0cf6c50d0c0a4120565cd4a2ce3e354c11721cd695760a24c70e0d5a0dfc3c5dcd51dfad6de2c237a682f36dc0b271f21bb3655e5333016aaa42c2efa1446e5f3c0a79ec417c4d30f77556951cb0f05dbfafb82d9f95951a9ea241fda2a6388f73ace036b98acce079f0e4feebccc96290a86dcc89118a901210b245f2d114cf94396e4dbb461e82aa26a0581389707957968c7cdc466213bb1cd417db207ef40c05842ab67a01a9b96eb1430ebc26e795bb491258d326d5174ad549401059e41782121e506744af8af9d8e493644a87d613600888541cbbe538c625883f3eb4aa3102c5cfcc25de8e97af8927619ce6a731b3b8462d51d993066b935b0648d2344ad72e4fd70f347fbd81041042e5ea31cc7b2e3156a920b80ecba487b950ca32ca95fae85b759c936246ecf441a9fdd95e8fee932d6782cdec686064018c857efc47fb4b2a122600d5fdd79af2486f44df7e629184e1c573bc0a9b3feb40b190ef2861a1ab45e2ac2201b9cd42e495deea247269820ed32389a2810ad6c0f9a296d2a2d9c54089fed50b7f5ecfcd33ab9954360e1d7f5598c32128cfcf2a1d8bf14616818da8a5343bfa88f0eedf392e9d4ab1ace1b60324129cd4852c2e27813a9cf71a6ae6229a4fcecc1a756b3e664c5f50af333082616815a3bec8fc0b75b8e4e767d719"
}
]
},
"offset": "d202964900000000d302964900000000d402964900000000d502964900000000"
},
"version_info": {
"orig_version": 2,
"version": 2,
"block_header_version": 1
}
}
]
}
# "#
# ,
# r#"
{
"jsonrpc": "2.0",
"id": 1,
"result": {
"Ok": null
}
}
# "#
# ,1 ,false, false);
```
*/
fn verify_slate_messages(&self, slate: &Slate) -> Result<(), ErrorKind>;
/**
Networked version of [Foreign::receive_tx](struct.Foreign.html#method.receive_tx).
# Json rpc example
```
# grin_wallet_api::doctest_helper_json_rpc_foreign_assert_response!(
# r#"
{
"jsonrpc": "2.0",
"method": "receive_tx",
"id": 1,
"params": [
{
"version_info": {
"version": 2,
"orig_version": 2,
"block_header_version": 1
},
"num_participants": 2,
"id": "0436430c-2b02-624c-2032-570501212b00",
"tx": {
"offset": "d202964900000000d302964900000000d402964900000000d502964900000000",
"body": {
"inputs": [
{
"features": "Coinbase",
"commit": "087df32304c5d4ae8b2af0bc31e700019d722910ef87dd4eec3197b80b207e3045"
},
{
"features": "Coinbase",
"commit": "08e1da9e6dc4d6e808a718b2f110a991dd775d65ce5ae408a4e1f002a4961aa9e7"
}
],
"outputs": [
{
"features": "Plain",
"commit": "0812276cc788e6870612296d926cba9f0e7b9810670710b5a6e6f1ba006d395774",
"proof": "dcff6175390c602bfa92c2ffd1a9b2d84dcc9ea941f6f317bdd0f875244ef23e696fd17c71df79760ce5ce1a96aab1d15dd057358dc835e972febeb86d50ccec0dad7cfe0246d742eb753cf7b88c045d15bc7123f8cf7155647ccf663fca92a83c9a65d0ed756ea7ebffd2cac90c380a102ed9caaa355d175ed0bf58d3ac2f5e909d6c447dfc6b605e04925c2b17c33ebd1908c965a5541ea5d2ed45a0958e6402f89d7a56df1992e036d836e74017e73ccad5cb3a82b8e139e309792a31b15f3ffd72ed033253428c156c2b9799458a25c1da65b719780a22de7fe7f437ae2fccd22cf7ea357ab5aa66a5ef7d71fb0dc64aa0b5761f68278062bb39bb296c787e4cabc5e2a2933a416ce1c9a9696160386449c437e9120f7bb26e5b0e74d1f2e7d5bcd7aafb2a92b87d1548f1f911fb06af7bd6cc13cee29f7c9cb79021aed18186272af0e9d189ec107c81a8a3aeb4782b0d950e4881aa51b776bb6844b25bce97035b48a9bdb2aea3608687bcdd479d4fa998b5a839ff88558e4a29dff0ed13b55900abb5d439b70793d902ae9ad34587b18c919f6b875c91d14deeb1c373f5e76570d59a6549758f655f1128a54f162dfe8868e1587028e26ad91e528c5ae7ee9335fa58fb59022b5de29d80f0764a9917390d46db899acc6a5b416e25ecc9dccb7153646addcc81cadb5f0078febc7e05d7735aba494f39ef05697bbcc9b47b2ccc79595d75fc13c80678b5e237edce58d731f34c05b1ddcaa649acf2d865bbbc3ceda10508bcdd29d0496744644bf1c3516f6687dfeef5649c7dff90627d642739a59d91a8d1d0c4dc55d74a949e1074427664b467992c9e0f7d3af9d6ea79513e8946ddc0d356bac49878e64e6a95b0a30214214faf2ce317fa622ff3266b32a816e10a18e6d789a5da1f23e67b4f970a68a7bcd9e18825ee274b0483896a40"
}
],
"kernels": [
{
"features": "Plain",
"fee": "7000000",
"lock_height": "0",
"excess": "000000000000000000000000000000000000000000000000000000000000000000",
"excess_sig": "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
}
]
}
},
"amount": "60000000000",
"fee": "7000000",
"height": "5",
"lock_height": "0",
"participant_data": [
{
"id": "0",
"public_blind_excess": "033ac2158fa0077f087de60c19d8e431753baa5b63b6e1477f05a2a6e7190d4592",
"public_nonce": "031b84c5567b126440995d3ed5aaba0565d71e1834604819ff9c17f5e9d5dd078f",
"part_sig": null,
"message": null,
"message_sig": null
}
]
},
null,
"Thanks, Yeastplume"
]
}
# "#
# ,
# r#"
{
"id": 1,
"jsonrpc": "2.0",
"result": {
"Ok": {
"amount": "60000000000",
"fee": "7000000",
"height": "5",
"id": "0436430c-2b02-624c-2032-570501212b00",
"lock_height": "0",
"num_participants": 2,
"participant_data": [
{
"id": "0",
"message": null,
"message_sig": null,
"part_sig": null,
"public_blind_excess": "033ac2158fa0077f087de60c19d8e431753baa5b63b6e1477f05a2a6e7190d4592",
"public_nonce": "031b84c5567b126440995d3ed5aaba0565d71e1834604819ff9c17f5e9d5dd078f"
},
{
"id": "1",
"message": "Thanks, Yeastplume",
"message_sig": "8f07ddd5e9f5179cff19486034181ed76505baaad53e5d994064127b56c5841b30a1f1b21eade1b4bd211e1f137fbdbca1b78dc43da21b1695f6a0edf2437ff9",
"part_sig": "8f07ddd5e9f5179cff19486034181ed76505baaad53e5d994064127b56c5841b2b35bd28dfd2269e0670e0cf9270bd6df2d03fbd64523ee4ae622396055b96fc",
"public_blind_excess": "038fe0443243dab173c068ef5fa891b242d2b5eb890ea09475e6e381170442ee16",
"public_nonce": "031b84c5567b126440995d3ed5aaba0565d71e1834604819ff9c17f5e9d5dd078f"
}
],
"tx": {
"body": {
"inputs": [
{
"commit": "087df32304c5d4ae8b2af0bc31e700019d722910ef87dd4eec3197b80b207e3045",
"features": "Coinbase"
},
{
"commit": "08e1da9e6dc4d6e808a718b2f110a991dd775d65ce5ae408a4e1f002a4961aa9e7",
"features": "Coinbase"
}
],
"kernels": [
{
"excess": "000000000000000000000000000000000000000000000000000000000000000000",
"excess_sig": "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000",
"features": "Plain",
"fee": "7000000",
"lock_height": "0"
}
],
"outputs": [
{
"commit": "084ee97defa8c37124d4c69baa753e2532535faa81f79ea5e0489db25297d5beb8",
"features": "Plain",
"proof": "bffb26e7df4bf753f4d8e810c67fb5106b1746c1870f5cb96585537eb8e2f66b372ed05fd35ae18c6e8515cd9f2aaae85d5a7655361c6a8573e20fbdfdda6e0a0b25817fc0db23dc25297382af379659d846bd8044f807c467722708d3a3797b84fceb09eb29f11c77b79c7c93c578d06d95b58d845930531e5cac6346d1373ee1c5db69c14d0aa1a9c22e187dc346156c468540ad166a04902d3faf357ed31a50775d274913ccc9ba976ca3977e18f383b20f0cd02a0866b7b44847bfbba35c099f5eba9c9747cad961033321925f3e0ad43e357aaecc50989bbbcb5b44ead58fe359c59903530c58bf1c9a6f9fb120a3492e835fabc01bb8b31b52b15ace4785a08c3ea9a82bd15c41c744544286b114b1be733fa6237300cf2dc99e8af6f8557bd9a083ba59cc1a500bdfba228b53785a7fdbf576f7dce035769058bc7644041ec5731485e5641eac5c75a6eb57e4abc287b0be8eab77c7e8a5122ee8d49f02f103a3af6fe38b8fcecd1aa9bb342b3e110f4003ee6c771ed93401ca3438dcf0d751a36dbb7a7a45d32709525686f3d2e5f542c747c9c745fe50cd789a0aa55419934afff363044d3c3f5f7669ebb9f2245b449bfdc4e09dfb1661552485107afbd9a2b571a0647b1fc330089a65e4b5df07f58f1a9c11c3da51d56cd854f227c5111d25ca8c4bec4bb0fbcb4a23fc3288418423dd0649d731b6a6c08851954ea920046ce67a4114d35c3876c25361e7a99474aa04354a4ed0555f9bef527d902fbb0d1d5c2b42f5eea5ced359005121167f9908729939dba610cdabca41f714e144ab148faec77f4d70566287671e6786459bd7d16787a24e12f2328b9faab1c7ac80a916d2f83f12a7351a2bedff610d33dfb2df7d8e57b68fb4a5dcc0d8e4fa807b2077877aa96ba7bc22e627a4f6a308d3abc091f56d518258f073cc1b70ef81"
},
{
"commit": "0812276cc788e6870612296d926cba9f0e7b9810670710b5a6e6f1ba006d395774",
"features": "Plain",
"proof": "dcff6175390c602bfa92c2ffd1a9b2d84dcc9ea941f6f317bdd0f875244ef23e696fd17c71df79760ce5ce1a96aab1d15dd057358dc835e972febeb86d50ccec0dad7cfe0246d742eb753cf7b88c045d15bc7123f8cf7155647ccf663fca92a83c9a65d0ed756ea7ebffd2cac90c380a102ed9caaa355d175ed0bf58d3ac2f5e909d6c447dfc6b605e04925c2b17c33ebd1908c965a5541ea5d2ed45a0958e6402f89d7a56df1992e036d836e74017e73ccad5cb3a82b8e139e309792a31b15f3ffd72ed033253428c156c2b9799458a25c1da65b719780a22de7fe7f437ae2fccd22cf7ea357ab5aa66a5ef7d71fb0dc64aa0b5761f68278062bb39bb296c787e4cabc5e2a2933a416ce1c9a9696160386449c437e9120f7bb26e5b0e74d1f2e7d5bcd7aafb2a92b87d1548f1f911fb06af7bd6cc13cee29f7c9cb79021aed18186272af0e9d189ec107c81a8a3aeb4782b0d950e4881aa51b776bb6844b25bce97035b48a9bdb2aea3608687bcdd479d4fa998b5a839ff88558e4a29dff0ed13b55900abb5d439b70793d902ae9ad34587b18c919f6b875c91d14deeb1c373f5e76570d59a6549758f655f1128a54f162dfe8868e1587028e26ad91e528c5ae7ee9335fa58fb59022b5de29d80f0764a9917390d46db899acc6a5b416e25ecc9dccb7153646addcc81cadb5f0078febc7e05d7735aba494f39ef05697bbcc9b47b2ccc79595d75fc13c80678b5e237edce58d731f34c05b1ddcaa649acf2d865bbbc3ceda10508bcdd29d0496744644bf1c3516f6687dfeef5649c7dff90627d642739a59d91a8d1d0c4dc55d74a949e1074427664b467992c9e0f7d3af9d6ea79513e8946ddc0d356bac49878e64e6a95b0a30214214faf2ce317fa622ff3266b32a816e10a18e6d789a5da1f23e67b4f970a68a7bcd9e18825ee274b0483896a40"
}
]
},
"offset": "d202964900000000d302964900000000d402964900000000d502964900000000"
},
"version_info": {
"orig_version": 2,
"version": 2,
"block_header_version": 1
}
}
}
}
# "#
# , 5, true, false);
```
*/
fn receive_tx(
&self,
slate: VersionedSlate,
dest_acct_name: Option<String>,
message: Option<String>,
) -> Result<VersionedSlate, ErrorKind>;
/**
Networked version of [Foreign::finalize_invoice_tx](struct.Foreign.html#method.finalize_invoice_tx).
# Json rpc example
```
# grin_wallet_api::doctest_helper_json_rpc_foreign_assert_response!(
# r#"
{
"jsonrpc": "2.0",
"method": "finalize_invoice_tx",
"id": 1,
"params": [{
"version_info": {
"version": 2,
"orig_version": 2,
"block_header_version": 1
},
"num_participants": 2,
"id": "0436430c-2b02-624c-2032-570501212b00",
"tx": {
"offset": "d202964900000000d302964900000000d402964900000000d502964900000000",
"body": {
"inputs": [
{
"features": "Coinbase",
"commit": "087df32304c5d4ae8b2af0bc31e700019d722910ef87dd4eec3197b80b207e3045"
},
{
"features": "Coinbase",
"commit": "08e1da9e6dc4d6e808a718b2f110a991dd775d65ce5ae408a4e1f002a4961aa9e7"
}
],
"outputs": [
{
"features": "Plain",
"commit": "099b48cfb1f80a2347dc89818449e68e76a3c6817a532a8e9ef2b4a5ccf4363850",
"proof": "7ebcd2ed9bf5fb29854033ba3d0e720613bdf7dfacc586d2f6084c1cde0a2b72e955d4ce625916701dc7c347132f40d0f102a34e801d745ee54b49b765d08aae0bb801c60403e57cafade3b4b174e795b633ab9e402b5b1b6e1243fd10bbcf9368a75cb6a6c375c7bdf02da9e03b7f210df45d942e6fba2729cd512a372e6ed91a1b5c9c22831febea843e3f85adcf198f39ac9f7b73b70c60bfb474aa69878ea8d1d32fef30166b59caacaec3fd024de29a90f1587e08d2c36b3d5c560cabf658e212e0a40a4129b3e5c35557058def5551f4eb395759597ba808b3c34eac3bfb9716e4480d7931c5789c538463ec75be0eb807c894047fda6cbcd22682d3c6d3823cb330f090a2099e3510a3706b57d46c95224394d7f1c0a20d99cc314b8f1d9d02668e2e435f62e1194de0be6a1f50f72ed777ed51c8819f527a94918d1aa8df6461e98ed4c2b18210de50fbcf8c3df210bfe326d41f1dc0ad748cb0320ae28401c85ab4f7dcb99d88a052e95dc85b76d22b36cabd60e06ab84bb7e4ddfdab9c9730c8a986583237ed1ecbb323ee8e79b8cadca4b438b7c09531670b471dda6a2eb3e747916c88ce7d9d8e1b7f61660eeb9e5a13c60e4dfe89d1177d81d6f6570fda85158e646a15f1e8b9e977494dc19a339aab2e0e478670d80092d6ba37646e60714ef64eb4a3d37fe15f8f38b59114af34b235489eed3f69b7781c5fe496eb43ffe245c14bd740f745844a38cf0d904347aaa2b64f51add18822dac009d8b63fa3e4c9b1fa72187f9a4acba1ab315daa1b04c9a41f3be846ac420b37990e6c947a16cc9d5c0671b292bf77d7d8b8974d2ad3afae95ba7772c37432840f53a007f31e0195f3abdf100c4477723cc6c6d5da14894a73dfac342833731036487488fdade7b9d556c06f26173b6b67598d3769447ce2828d71dd45ac5af436c6b0"
},
{
"features": "Plain",
"commit": "0812276cc788e6870612296d926cba9f0e7b9810670710b5a6e6f1ba006d395774",
"proof": "dcff6175390c602bfa92c2ffd1a9b2d84dcc9ea941f6f317bdd0f875244ef23e696fd17c71df79760ce5ce1a96aab1d15dd057358dc835e972febeb86d50ccec0dad7cfe0246d742eb753cf7b88c045d15bc7123f8cf7155647ccf663fca92a83c9a65d0ed756ea7ebffd2cac90c380a102ed9caaa355d175ed0bf58d3ac2f5e909d6c447dfc6b605e04925c2b17c33ebd1908c965a5541ea5d2ed45a0958e6402f89d7a56df1992e036d836e74017e73ccad5cb3a82b8e139e309792a31b15f3ffd72ed033253428c156c2b9799458a25c1da65b719780a22de7fe7f437ae2fccd22cf7ea357ab5aa66a5ef7d71fb0dc64aa0b5761f68278062bb39bb296c787e4cabc5e2a2933a416ce1c9a9696160386449c437e9120f7bb26e5b0e74d1f2e7d5bcd7aafb2a92b87d1548f1f911fb06af7bd6cc13cee29f7c9cb79021aed18186272af0e9d189ec107c81a8a3aeb4782b0d950e4881aa51b776bb6844b25bce97035b48a9bdb2aea3608687bcdd479d4fa998b5a839ff88558e4a29dff0ed13b55900abb5d439b70793d902ae9ad34587b18c919f6b875c91d14deeb1c373f5e76570d59a6549758f655f1128a54f162dfe8868e1587028e26ad91e528c5ae7ee9335fa58fb59022b5de29d80f0764a9917390d46db899acc6a5b416e25ecc9dccb7153646addcc81cadb5f0078febc7e05d7735aba494f39ef05697bbcc9b47b2ccc79595d75fc13c80678b5e237edce58d731f34c05b1ddcaa649acf2d865bbbc3ceda10508bcdd29d0496744644bf1c3516f6687dfeef5649c7dff90627d642739a59d91a8d1d0c4dc55d74a949e1074427664b467992c9e0f7d3af9d6ea79513e8946ddc0d356bac49878e64e6a95b0a30214214faf2ce317fa622ff3266b32a816e10a18e6d789a5da1f23e67b4f970a68a7bcd9e18825ee274b0483896a40"
}
],
"kernels": [
{
"features": "Plain",
"fee": "7000000",
"lock_height": "0",
"excess": "000000000000000000000000000000000000000000000000000000000000000000",
"excess_sig": "00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"
}
]
}
},
"amount": "60000000000",
"fee": "7000000",
"height": "5",
"lock_height": "0",
"participant_data": [
{
"id": "1",
"public_blind_excess": "033bbe2a419ea2e9d6810a8d66552e709d1783ca50759a44dbaf63fc79c0164c4c",
"public_nonce": "031b84c5567b126440995d3ed5aaba0565d71e1834604819ff9c17f5e9d5dd078f",
"part_sig": null,
"message": null,
"message_sig": null
},
{
"id": "0",
"public_blind_excess": "029f12f9f8c5489a18904de7cd46dc3384b79369d4cbc17cd74b299da8c2cf7445",
"public_nonce": "031b84c5567b126440995d3ed5aaba0565d71e1834604819ff9c17f5e9d5dd078f",
"part_sig": "8f07ddd5e9f5179cff19486034181ed76505baaad53e5d994064127b56c5841b1840d69ed5f33bc9b424422903d5d1d3e9b914143bcbe3b7ed32d8f15fcccce3",
"message": null,
"message_sig": null
}
]
}]
}
# "#
# ,
# r#"
{
"id": 1,
"jsonrpc": "2.0",
"result": {
"Ok": {
"amount": "60000000000",
"fee": "7000000",
"height": "5",
"id": "0436430c-2b02-624c-2032-570501212b00",
"lock_height": "0",
"num_participants": 2,
"participant_data": [
{
"id": "1",
"message": null,
"message_sig": null,
"part_sig": "8f07ddd5e9f5179cff19486034181ed76505baaad53e5d994064127b56c5841bc9ea21b259d61e4de177d9ef8ab475dfab0ec7299009a7fea61010f963f2e6c0",
"public_blind_excess": "033bbe2a419ea2e9d6810a8d66552e709d1783ca50759a44dbaf63fc79c0164c4c",
"public_nonce": "031b84c5567b126440995d3ed5aaba0565d71e1834604819ff9c17f5e9d5dd078f"
},
{
"id": "0",
"message": null,
"message_sig": null,
"part_sig": "8f07ddd5e9f5179cff19486034181ed76505baaad53e5d994064127b56c5841b1840d69ed5f33bc9b424422903d5d1d3e9b914143bcbe3b7ed32d8f15fcccce3",
"public_blind_excess": "029f12f9f8c5489a18904de7cd46dc3384b79369d4cbc17cd74b299da8c2cf7445",
"public_nonce": "031b84c5567b126440995d3ed5aaba0565d71e1834604819ff9c17f5e9d5dd078f"
}
],
"tx": {
"body": {
"inputs": [
{
"commit": "087df32304c5d4ae8b2af0bc31e700019d722910ef87dd4eec3197b80b207e3045",
"features": "Coinbase"
},
{
"commit": "08e1da9e6dc4d6e808a718b2f110a991dd775d65ce5ae408a4e1f002a4961aa9e7",
"features": "Coinbase"
}
],
"kernels": [
{
"excess": "09bac6083b05a32a9d9b37710c70dd0a1ef9329fde0848558976b6f1b81d80ceed",
"excess_sig": "66074d25a751c4743342c90ad8ead9454daa00d9b9aed29bca321036d16c4b4da0e9c180a26b88565afcd269a7ac98f896c8db3dcbd48ab69443e8eac3beb3a4",
"features": "Plain",
"fee": "7000000",
"lock_height": "0"
}
],
"outputs": [
{
"commit": "099b48cfb1f80a2347dc89818449e68e76a3c6817a532a8e9ef2b4a5ccf4363850",
"features": "Plain",
"proof": "7ebcd2ed9bf5fb29854033ba3d0e720613bdf7dfacc586d2f6084c1cde0a2b72e955d4ce625916701dc7c347132f40d0f102a34e801d745ee54b49b765d08aae0bb801c60403e57cafade3b4b174e795b633ab9e402b5b1b6e1243fd10bbcf9368a75cb6a6c375c7bdf02da9e03b7f210df45d942e6fba2729cd512a372e6ed91a1b5c9c22831febea843e3f85adcf198f39ac9f7b73b70c60bfb474aa69878ea8d1d32fef30166b59caacaec3fd024de29a90f1587e08d2c36b3d5c560cabf658e212e0a40a4129b3e5c35557058def5551f4eb395759597ba808b3c34eac3bfb9716e4480d7931c5789c538463ec75be0eb807c894047fda6cbcd22682d3c6d3823cb330f090a2099e3510a3706b57d46c95224394d7f1c0a20d99cc314b8f1d9d02668e2e435f62e1194de0be6a1f50f72ed777ed51c8819f527a94918d1aa8df6461e98ed4c2b18210de50fbcf8c3df210bfe326d41f1dc0ad748cb0320ae28401c85ab4f7dcb99d88a052e95dc85b76d22b36cabd60e06ab84bb7e4ddfdab9c9730c8a986583237ed1ecbb323ee8e79b8cadca4b438b7c09531670b471dda6a2eb3e747916c88ce7d9d8e1b7f61660eeb9e5a13c60e4dfe89d1177d81d6f6570fda85158e646a15f1e8b9e977494dc19a339aab2e0e478670d80092d6ba37646e60714ef64eb4a3d37fe15f8f38b59114af34b235489eed3f69b7781c5fe496eb43ffe245c14bd740f745844a38cf0d904347aaa2b64f51add18822dac009d8b63fa3e4c9b1fa72187f9a4acba1ab315daa1b04c9a41f3be846ac420b37990e6c947a16cc9d5c0671b292bf77d7d8b8974d2ad3afae95ba7772c37432840f53a007f31e0195f3abdf100c4477723cc6c6d5da14894a73dfac342833731036487488fdade7b9d556c06f26173b6b67598d3769447ce2828d71dd45ac5af436c6b0"
},
{
"commit": "0812276cc788e6870612296d926cba9f0e7b9810670710b5a6e6f1ba006d395774",
"features": "Plain",
"proof": "dcff6175390c602bfa92c2ffd1a9b2d84dcc9ea941f6f317bdd0f875244ef23e696fd17c71df79760ce5ce1a96aab1d15dd057358dc835e972febeb86d50ccec0dad7cfe0246d742eb753cf7b88c045d15bc7123f8cf7155647ccf663fca92a83c9a65d0ed756ea7ebffd2cac90c380a102ed9caaa355d175ed0bf58d3ac2f5e909d6c447dfc6b605e04925c2b17c33ebd1908c965a5541ea5d2ed45a0958e6402f89d7a56df1992e036d836e74017e73ccad5cb3a82b8e139e309792a31b15f3ffd72ed033253428c156c2b9799458a25c1da65b719780a22de7fe7f437ae2fccd22cf7ea357ab5aa66a5ef7d71fb0dc64aa0b5761f68278062bb39bb296c787e4cabc5e2a2933a416ce1c9a9696160386449c437e9120f7bb26e5b0e74d1f2e7d5bcd7aafb2a92b87d1548f1f911fb06af7bd6cc13cee29f7c9cb79021aed18186272af0e9d189ec107c81a8a3aeb4782b0d950e4881aa51b776bb6844b25bce97035b48a9bdb2aea3608687bcdd479d4fa998b5a839ff88558e4a29dff0ed13b55900abb5d439b70793d902ae9ad34587b18c919f6b875c91d14deeb1c373f5e76570d59a6549758f655f1128a54f162dfe8868e1587028e26ad91e528c5ae7ee9335fa58fb59022b5de29d80f0764a9917390d46db899acc6a5b416e25ecc9dccb7153646addcc81cadb5f0078febc7e05d7735aba494f39ef05697bbcc9b47b2ccc79595d75fc13c80678b5e237edce58d731f34c05b1ddcaa649acf2d865bbbc3ceda10508bcdd29d0496744644bf1c3516f6687dfeef5649c7dff90627d642739a59d91a8d1d0c4dc55d74a949e1074427664b467992c9e0f7d3af9d6ea79513e8946ddc0d356bac49878e64e6a95b0a30214214faf2ce317fa622ff3266b32a816e10a18e6d789a5da1f23e67b4f970a68a7bcd9e18825ee274b0483896a40"
}
]
},
"offset": "d202964900000000d302964900000000d402964900000000d502964900000000"
},
"version_info": {
"orig_version": 2,
"version": 2,
"block_header_version": 1
}
}
}
}
# "#
# , 5, false, true);
```
*/
fn finalize_invoice_tx(&self, slate: &Slate) -> Result<Slate, ErrorKind>;
}
impl<W: ?Sized, C, K> ForeignRpc for Foreign<W, C, K>
where
W: WalletBackend<C, K>,
C: NodeClient,
K: Keychain,
{
fn check_version(&self) -> Result<VersionInfo, ErrorKind> {
Foreign::check_version(self).map_err(|e| e.kind())
}
fn
|
(&self, block_fees: &BlockFees) -> Result<CbData, ErrorKind> {
Foreign::build_coinbase(self, block_fees).map_err(|e| e.kind())
}
fn verify_slate_messages(&self, slate: &Slate) -> Result<(), ErrorKind> {
Foreign::verify_slate_messages(self, slate).map_err(|e| e.kind())
}
fn receive_tx(
&self,
slate: VersionedSlate,
dest_acct_name: Option<String>,
message: Option<String>,
) -> Result<VersionedSlate, ErrorKind> {
let version = slate.version();
let slate: Slate = slate.into();
let slate = Foreign::receive_tx(
self,
&slate,
dest_acct_name.as_ref().map(String::as_str),
message,
)
.map_err(|e| e.kind())?;
Ok(VersionedSlate::into_version(slate, version))
}
fn finalize_invoice_tx(&self, slate: &Slate) -> Result<Slate, ErrorKind> {
Foreign::finalize_invoice_tx(self, slate).map_err(|e| e.kind())
}
}
/// helper to set up a real environment to run integrated doctests
pub fn run_doctest_foreign(
request: serde_json::Value,
test_dir: &str,
blocks_to_mine: u64,
init_tx: bool,
init_invoice_tx: bool,
) -> Result<Option<serde_json::Value>, String> {
use easy_jsonrpc::Handler;
use grin_wallet_impls::test_framework::{self, LocalWalletClient, WalletProxy};
use grin_wallet_libwallet::api_impl;
use grin_wallet_util::grin_keychain::ExtKeychain;
use crate::core::global;
use crate::core::global::ChainTypes;
use grin_wallet_util::grin_util as util;
use std::fs;
use std::thread;
util::init_test_logger();
let _ = fs::remove_dir_all(test_dir);
global::set_mining_mode(ChainTypes::AutomatedTesting);
let mut wallet_proxy: WalletProxy<LocalWalletClient, ExtKeychain> = WalletProxy::new(test_dir);
let chain = wallet_proxy.chain.clone();
let rec_phrase_1 =
"fat twenty mean degree forget shell check candy immense awful \
flame next during february bulb bike sun wink theory day kiwi embrace peace lunch";
let client1 = LocalWalletClient::new("wallet1", wallet_proxy.tx.clone());
let wallet1 = test_framework::create_wallet(
&format!("{}/wallet1", test_dir),
client1.clone(),
Some(rec_phrase_1),
);
wallet_proxy.add_wallet("wallet1", client1.get_send_instance(), wallet1.clone());
let rec_phrase_2 =
"hour kingdom ripple lunch razor inquiry coyote clay stamp mean \
sell finish magic kid tiny wage stand panther inside settle feed song hole exile";
let client2 = LocalWalletClient::new("wallet2", wallet_proxy.tx.clone());
let wallet2 = test_framework::create_wallet(
&format!("{}/wallet2", test_dir),
client2.clone(),
Some(rec_phrase_2),
);
wallet_proxy.add_wallet("wallet2", client2.get_send_instance(), wallet2.clone());
// Set the wallet proxy listener running
thread::spawn(move || {
if let Err(e) = wallet_proxy.run() {
error!("Wallet Proxy error: {}", e);
}
});
// Mine a few blocks to wallet 1 so there's something to send
for _ in 0..blocks_to_mine {
let _ = test_framework::award_blocks_to_wallet(&chain, wallet1.clone(), 1 as usize, false);
//update local outputs after each block, so transaction IDs stay consistent
let mut w = wallet1.lock();
w.open_with_credentials().unwrap();
let (wallet_refreshed, _) =
api_impl::owner::retrieve_summary_info(&mut *w, true, 1).unwrap();
assert!(wallet_refreshed);
w.close().unwrap();
}
if init_invoice_tx {
let amount = 60_000_000_000;
let mut slate = {
let mut w = wallet2.lock();
w.open_with_credentials().unwrap();
let args = IssueInvoiceTxArgs {
amount,
..Default::default()
};
api_impl::owner::issue_invoice_tx(&mut *w, args, true).unwrap()
};
slate = {
let mut w = wallet1.lock();
w.open_with_credentials().unwrap();
let args = InitTxArgs {
src_acct_name: None,
amount: slate.amount,
minimum_confirmations: 2,
max_outputs: 500,
num_change_outputs: 1,
selection_strategy_is_use_all: true,
..Default::default()
};
api_impl::owner::process_invoice_tx(&mut *w, &slate, args, true).unwrap()
};
println!("INIT INVOICE SLATE");
// Spit out slate for input to finalize_invoice_tx
println!("{}", serde_json::to_string_pretty(&slate).unwrap());
}
if init_tx {
let amount = 60_000_000_000;
let mut w = wallet1.lock();
w.open_with_credentials().unwrap();
let args = InitTxArgs {
src_acct_name: None,
amount,
minimum_confirmations: 2,
max_outputs: 500,
num_change_outputs: 1,
selection_strategy_is_use_all: true,
..Default::default()
};
let slate = api_impl::owner::init_send_tx(&mut *w, args, true).unwrap();
println!("INIT SLATE");
// Spit out slate for input to finalize_tx
println!("{}", serde_json::to_string_pretty(&slate).unwrap());
}
let mut api_foreign = match init_invoice_tx {
false => Foreign::new(wallet1.clone()),
true => Foreign::new(wallet2.clone()),
};
api_foreign.doctest_mode = true;
let foreign_api = &api_foreign as &dyn ForeignRpc;
Ok(foreign_api.handle_request(request).as_option())
}
#[doc(hidden)]
#[macro_export]
macro_rules! doctest_helper_json_rpc_foreign_assert_response {
($request:expr, $expected_response:expr, $blocks_to_mine:expr, $init_tx:expr, $init_invoice_tx:expr) => {
// create temporary wallet, run jsonrpc request on owner api of wallet, delete wallet, return
// json response.
// In order to prevent leaking tempdirs, This function should not panic.
use grin_wallet_api::run_doctest_foreign;
use serde_json;
use serde_json::Value;
use tempfile::tempdir;
let dir = tempdir().map_err(|e| format!("{:#?}", e)).unwrap();
let dir = dir
.path()
.to_str()
.ok_or("Failed to convert tmpdir path to string.".to_owned())
.unwrap();
let request_val: Value = serde_json::from_str($request).unwrap();
let expected_response: Value = serde_json::from_str($expected_response).unwrap();
let response = run_doctest_foreign(
request_val,
dir,
$blocks_to_mine,
$init_tx,
$init_invoice_tx,
)
.unwrap()
.unwrap();
if response != expected_response {
panic!(
"(left != right) \nleft: {}\nright: {}",
serde_json::to_string_pretty(&response).unwrap(),
serde_json::to_string_pretty(&expected_response).unwrap()
);
}
};
}
|
build_coinbase
|
kafka.go
|
package trace
import (
"context"
clusterSarama "github.com/Shopify/sarama"
"github.com/opentracing/opentracing-go"
"google.golang.org/grpc/metadata"
"gopkg.in/Shopify/sarama.v1"
)
func InjectMQHeader(tracer opentracing.Tracer, sm opentracing.SpanContext, ctx context.Context, msg *sarama.ProducerMessage) {
md, ok := metadata.FromIncomingContext(ctx)
if !ok {
md = metadata.New(nil)
} else {
md = md.Copy()
}
carrier := TextMapRW{md}
err := tracer.Inject(sm, opentracing.TextMap, carrier)
if err != nil
|
msg.Headers = NewOutgoingHeaders(msg.Headers, md)
}
func ExtractMQHeader(tracer opentracing.Tracer, msg *clusterSarama.ConsumerMessage) (opentracing.SpanContext, error) {
md := FromIncomingHeaders(msg.Headers)
carrier := TextMapRW{md}
return tracer.Extract(opentracing.TextMap, carrier)
}
//将map[string][]string数据格式转换为[]struct{[]byte, []byte}格式
func NewOutgoingHeaders(h []sarama.RecordHeader, md metadata.MD) []sarama.RecordHeader {
var headers []sarama.RecordHeader
copy(headers, h)
for key, val := range md {
for _, v := range val {
header := sarama.RecordHeader{
Key: []byte(key),
Value: []byte(v),
}
headers = append(headers, header)
}
}
return headers
}
func FromIncomingHeaders(headers []*clusterSarama.RecordHeader) metadata.MD {
md := metadata.MD{}
for _, header := range headers {
md.Append(string(header.Key), string(header.Value))
}
return md
}
|
{
// maybe rpc client not transform trace instance, it should be work continue
}
|
detuning-checkpoint.py
|
'''
Define functions for the detuning of an atomic system
'''
from LASED.state import *
from LASED.constants import *
def delta(e, g):
"""Detunings between substates.
Parameters:
e (State): State object
g (State): State object
Returns:
float: Difference in angular frequency of states (Grad/s).
"""
return e.w - g.w
def
|
(wavelength):
"""Calculates the angular frequency in Grad/s from a given wavelength.
Parameters:
wavelength (float): A wavelength in nm
Returns:
float: The angular frequency in Grad/s
"""
return 2*PI*C/wavelength*1e-9
def dopplerDelta(e, g, w_q, lambda_q, v_z):
""" The detuning between excited and ground states.
Accounts for a fixed motion of the atoms. Used between excited and ground states.
Parameters:
e (State): State object for excited state.
g (State): State object for ground state.
w_q (float): Angular frequency of exciting laser in rad/s.
lambda_q (float): Wavelength of exciting laser in m.
v_z (float): Velocity component of atoms in direction of laser in m/s.
Returns:
float: The detuning between ground and excited states including the doppler detuning due to a given atomic velocity.
"""
return w_q - v_z/lambda_q - e.w + g.w
|
angularFreq
|
tag.go
|
package entity
import (
"strings"
"time"
"github.com/raismaulana/blogP/application/apperror"
)
type Tag struct {
ID int64 `gorm:"primary_key:auto_increment;column:id_tag"` //
Tag string `gorm:"type: varchar(200) not null unique"` //
CreatedAt time.Time `gorm:"not null;default:CURRENT_TIMESTAMP"` //
UpdatedAt time.Time `gorm:"not null;default:CURRENT_TIMESTAMP"` //
}
type TagRequest struct {
Tag string `` //
}
func NewTag(req TagRequest) (*Tag, error) {
//validate
if strings.TrimSpace(req.Tag) == "" || len(strings.TrimSpace(req.Tag)) > 200
|
obj := Tag{
Tag: req.Tag,
}
return &obj, nil
}
|
{
return nil, apperror.TagMustNotEmpty
}
|
Profile.tsx
|
import { useContext } from 'react';
import { ChallengesContext } from '../contexts/ChanllengeContext';
import styles from '../styles/components/Profile.module.css'
export function
|
(){
const { level } = useContext(ChallengesContext)
return(
<div className={styles.profileContainer}>
<img src="https://github.com/raphaom35.png" alt="Raphael"/>
<div>
<strong>Raphael</strong>
<p>
<img src="icons/level.svg" alt="Level"/>
Level {level}</p>
</div>
</div>
);
}
|
Profile
|
Rodape.js
|
render() {
return (
<div className="row mt-1 pagamento__rodape">
<div className="col">
<Link to="/">
<small>Retornar à Loja Demo</small>
</Link>
</div>
<div className="col text-right">
<a href="#">
<small>Ajuda</small>
</a>
</div>
</div>
);
}
}
export default Rodape;
|
import React, { Component } from 'react';
import { Link } from 'react-router-dom';
class Rodape extends Component {
|
|
cloud_service.py
|
from schematics.types import ModelType, StringType, PolyModelType
from spaceone.inventory.libs.schema.metadata.dynamic_field import TextDyField, DateTimeDyField, EnumDyField, \
ListDyField, SizeField, StateItemDyField
from spaceone.inventory.libs.schema.metadata.dynamic_layout import ItemDynamicLayout, TableDynamicLayout, \
ListDynamicLayout, SimpleTableDynamicLayout
from spaceone.inventory.libs.schema.cloud_service import CloudServiceResource, CloudServiceResponse, CloudServiceMeta
from spaceone.inventory.model.networksecuritygroup.data import NetworkSecurityGroup
'''
NETWORK_SECURITY_GROUP
'''
# TAB - Default
network_security_group_info_meta = ItemDynamicLayout.set_fields('Network Security Group', fields=[
TextDyField.data_source('Name', 'data.name'),
TextDyField.data_source('Resource ID', 'data.id'),
TextDyField.data_source('Resource Group', 'data.resource_group'),
TextDyField.data_source('Location', 'data.location'),
TextDyField.data_source('Subscription', 'data.subscription_name'),
TextDyField.data_source('Subscription ID', 'data.subscription_id'),
])
# TAB - Inbound Security Rules
network_security_group_inbound_security_rules = TableDynamicLayout.set_fields('Inbound Security Rules', 'data.inbound_security_rules', fields=[
TextDyField.data_source('Priority', 'priority'),
TextDyField.data_source('Name', 'name'),
TextDyField.data_source('Port', 'destination_port_range'),
TextDyField.data_source('Protocol', 'protocol'),
TextDyField.data_source('Source', 'source_address_prefix'),
TextDyField.data_source('Destination', 'destination_address_prefix'),
TextDyField.data_source('Action', 'access')
])
# TAB - Outbound Security Rules
network_security_group_outbound_security_rules = TableDynamicLayout.set_fields('Outbound Security Rules', 'data.outbound_security_rules', fields=[
TextDyField.data_source('Priority', 'priority'),
TextDyField.data_source('Name', 'name'),
TextDyField.data_source('Port', 'destination_port_range'),
TextDyField.data_source('Protocol', 'protocol'),
TextDyField.data_source('Source', 'source_address_prefix'),
TextDyField.data_source('Destination', 'destination_address_prefix'),
TextDyField.data_source('Action', 'access')
])
# TAB - Network Interfaces
network_security_group_network_interfaces = TableDynamicLayout.set_fields('Network Interfaces', 'data.network_interfaces', fields=[
TextDyField.data_source('Name', 'name'),
TextDyField.data_source('Public IP Address', 'public_ip_address'),
TextDyField.data_source('Private IP Address', 'private_ip_address'),
TextDyField.data_source('Virtual Machine', 'virtual_machine_display')
])
# TAB - Subnets
network_subnets = TableDynamicLayout.set_fields('Subnets', 'data.subnets', fields=[
TextDyField.data_source('Name', 'name'),
TextDyField.data_source('Address Range', 'address_prefix'),
TextDyField.data_source('Virtual Network', 'virtual_network')
])
# TAB - tags
network_security_group_tags = TableDynamicLayout.set_fields('Tags', 'data.tags', fields=[
TextDyField.data_source('Key', 'key'),
TextDyField.data_source('Value', 'value')
])
network_security_group_meta = CloudServiceMeta.set_layouts(
[network_security_group_info_meta, network_security_group_inbound_security_rules, network_security_group_outbound_security_rules,
network_security_group_network_interfaces, network_subnets, network_security_group_tags])
class ComputeResource(CloudServiceResource):
|
class NetworkSecurityGroupResource(ComputeResource):
cloud_service_type = StringType(default='NetworkSecurityGroup')
data = ModelType(NetworkSecurityGroup)
_metadata = ModelType(CloudServiceMeta, default=network_security_group_meta, serialized_name='metadata')
name = StringType()
class NetworkSecurityGroupResponse(CloudServiceResponse):
resource = PolyModelType(NetworkSecurityGroupResource)
|
cloud_service_group = StringType(default='Network')
|
Cell.tsx
|
import styled, { css } from "react-emotion";
import { innerCellCss, cellAlignmentCss } from "../style";
export type TextAlign = "left" | "right" | "center";
export interface CellProps {
textAlign?: TextAlign;
children: React.ReactElement<HTMLElement> | string;
className?: string;
}
|
const alignmentStyle = (props: CellProps) => css`
${cellAlignmentCss(props.textAlign || "left")};
`;
export default styled("div")`
${innerCellCss};
${alignmentStyle};
`;
| |
user_score_test.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Test for the user_score example."""
# pytype: skip-file
from __future__ import absolute_import
import logging
import unittest
import apache_beam as beam
|
from apache_beam.examples.complete.game import user_score
from apache_beam.testing.test_pipeline import TestPipeline
from apache_beam.testing.util import assert_that
from apache_beam.testing.util import equal_to
class UserScoreTest(unittest.TestCase):
SAMPLE_DATA = [
'user1_team1,team1,18,1447686663000,2015-11-16 15:11:03.921',
'user1_team1,team1,18,1447690263000,2015-11-16 16:11:03.921',
'user2_team2,team2,2,1447690263000,2015-11-16 16:11:03.955',
'user3_team3,team3,8,1447690263000,2015-11-16 16:11:03.955',
'user4_team3,team3,5,1447690263000,2015-11-16 16:11:03.959',
'user1_team1,team1,14,1447697463000,2015-11-16 18:11:03.955',
]
def test_user_score(self):
with TestPipeline() as p:
result = (
p | beam.Create(UserScoreTest.SAMPLE_DATA) | user_score.UserScore())
assert_that(
result,
equal_to([('user1_team1', 50), ('user2_team2', 2), ('user3_team3', 8),
('user4_team3', 5)]))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| |
room.go
|
package templates
import (
"fmt"
"html/template"
"os"
"scummatlas"
l "scummatlas/condlog"
"strings"
)
type roomData struct {
Background string
Boxes [][4]scummatlas.Point
scummatlas.Room
}
func WriteRoom(room scummatlas.Room, outputdir string) {
t := template.Must(template.ParseFiles(
htmlPath+"room.html",
htmlPath+"partials.html"))
bgPath := fmt.Sprintf("./img_bg/room%02d_bg.png", room.Id)
htmlPath := fmt.Sprintf("%v/room%02d.html", outputdir, room.Id)
file, err := os.Create(htmlPath)
l.Log("template", "Create "+htmlPath)
if err != nil
|
var boxes [][4]scummatlas.Point
for _, v := range room.Boxes {
boxes = append(boxes, v.Corners())
}
data := roomData{
bgPath,
boxes,
room,
}
data.Name = strings.Title(data.Name)
t.Execute(file, data)
}
/* Helper functions */
func (room roomData) ZplanesURL() (urls []string) {
for i := len(room.Zplanes); i > 0; i-- {
urls = append(urls, fmt.Sprintf("img_bg/room%02d_bg-zplane%d.png", room.Id, i))
}
return
}
func (room roomData) PaletteHex() []string {
var hexes []string
hexes = make([]string, len(room.Palette))
for i, color := range room.Palette {
r, g, b, _ := color.RGBA()
hexes[i] = fmt.Sprintf("%02x%02x%02x", uint8(r), uint8(g), uint8(b))
}
return hexes
}
func (room roomData) DoubleHeight() int {
return room.Height * 2
}
func (room roomData) ViewBox() string {
return fmt.Sprintf("0 0 %v %v", room.Width, room.Height)
}
func (room roomData) DoubleWidth() int {
return room.Width * 2
}
|
{
panic("Can't create room file, " + err.Error())
}
|
servicenetwork.go
|
/*
* Copyright 2018 Insolar
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package configuration
// Service is configuration struct for servicenetwork.Service.
type Service struct {
}
// ServiceNetwork is configuration for ServiceNetwork.
type ServiceNetwork struct {
Service Service
}
// NewServiceNetwork creates a new ServiceNetwork configuration.
|
func NewServiceNetwork() ServiceNetwork {
return ServiceNetwork{}
}
| |
0001_initial.py
|
# Generated by Django 3.0.5 on 2020-05-05 15:14
from django.db import migrations, models
|
dependencies = [
]
operations = [
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=64)),
('price', models.DecimalField(decimal_places=3, max_digits=20)),
],
),
]
|
class Migration(migrations.Migration):
initial = True
|
main.js
|
(function(){var require = function (file, cwd) {
var resolved = require.resolve(file, cwd || '/');
var mod = require.modules[resolved];
if (!mod) throw new Error(
'Failed to resolve module ' + file + ', tried ' + resolved
);
var cached = require.cache[resolved];
var res = cached? cached.exports : mod();
return res;
};
require.paths = [];
require.modules = {};
require.cache = {};
require.extensions = [".js",".coffee",".json"];
require._core = {
'assert': true,
'events': true,
'fs': true,
'path': true,
'vm': true
};
require.resolve = (function () {
return function (x, cwd) {
if (!cwd) cwd = '/';
if (require._core[x]) return x;
var path = require.modules.path();
cwd = path.resolve('/', cwd);
var y = cwd || '/';
if (x.match(/^(?:\.\.?\/|\/)/)) {
var m = loadAsFileSync(path.resolve(y, x))
|| loadAsDirectorySync(path.resolve(y, x));
if (m) return m;
}
var n = loadNodeModulesSync(x, y);
if (n) return n;
throw new Error("Cannot find module '" + x + "'");
function loadAsFileSync (x) {
x = path.normalize(x);
if (require.modules[x]) {
return x;
}
for (var i = 0; i < require.extensions.length; i++) {
var ext = require.extensions[i];
if (require.modules[x + ext]) return x + ext;
}
}
function loadAsDirectorySync (x) {
x = x.replace(/\/+$/, '');
var pkgfile = path.normalize(x + '/package.json');
if (require.modules[pkgfile]) {
var pkg = require.modules[pkgfile]();
var b = pkg.browserify;
if (typeof b === 'object' && b.main) {
var m = loadAsFileSync(path.resolve(x, b.main));
if (m) return m;
}
else if (typeof b === 'string') {
var m = loadAsFileSync(path.resolve(x, b));
if (m) return m;
}
else if (pkg.main) {
var m = loadAsFileSync(path.resolve(x, pkg.main));
if (m) return m;
}
}
return loadAsFileSync(x + '/index');
}
function loadNodeModulesSync (x, start) {
var dirs = nodeModulesPathsSync(start);
for (var i = 0; i < dirs.length; i++) {
var dir = dirs[i];
var m = loadAsFileSync(dir + '/' + x);
if (m) return m;
var n = loadAsDirectorySync(dir + '/' + x);
if (n) return n;
}
var m = loadAsFileSync(x);
if (m) return m;
}
function nodeModulesPathsSync (start) {
var parts;
if (start === '/') parts = [ '' ];
else parts = path.normalize(start).split('/');
var dirs = [];
for (var i = parts.length - 1; i >= 0; i--) {
if (parts[i] === 'node_modules') continue;
var dir = parts.slice(0, i + 1).join('/') + '/node_modules';
dirs.push(dir);
}
return dirs;
}
};
})();
require.alias = function (from, to) {
var path = require.modules.path();
var res = null;
try {
res = require.resolve(from + '/package.json', '/');
}
catch (err) {
res = require.resolve(from, '/');
}
var basedir = path.dirname(res);
var keys = (Object.keys || function (obj) {
var res = [];
for (var key in obj) res.push(key);
return res;
})(require.modules);
for (var i = 0; i < keys.length; i++) {
var key = keys[i];
if (key.slice(0, basedir.length + 1) === basedir + '/') {
var f = key.slice(basedir.length);
require.modules[to + f] = require.modules[basedir + f];
}
else if (key === basedir) {
require.modules[to] = require.modules[basedir];
}
}
};
(function () {
var process = {};
var global = typeof window !== 'undefined' ? window : {};
var definedProcess = false;
require.define = function (filename, fn) {
if (!definedProcess && require.modules.__browserify_process) {
process = require.modules.__browserify_process();
definedProcess = true;
}
var dirname = require._core[filename]
? ''
: require.modules.path().dirname(filename)
;
var require_ = function (file) {
var requiredModule = require(file, dirname);
var cached = require.cache[require.resolve(file, dirname)];
if (cached && cached.parent === null) {
cached.parent = module_;
}
return requiredModule;
};
require_.resolve = function (name) {
return require.resolve(name, dirname);
};
require_.modules = require.modules;
require_.define = require.define;
require_.cache = require.cache;
var module_ = {
id : filename,
filename: filename,
exports : {},
loaded : false,
parent: null
};
require.modules[filename] = function () {
require.cache[filename] = module_;
fn.call(
module_.exports,
require_,
module_,
module_.exports,
dirname,
filename,
process,
global
);
module_.loaded = true;
return module_.exports;
};
};
})();
require.define("path",function(require,module,exports,__dirname,__filename,process,global){function filter (xs, fn) {
var res = [];
for (var i = 0; i < xs.length; i++) {
if (fn(xs[i], i, xs)) res.push(xs[i]);
}
return res;
}
// resolves . and .. elements in a path array with directory names there
// must be no slashes, empty elements, or device names (c:\) in the array
// (so also no leading and trailing slashes - it does not distinguish
// relative and absolute paths)
function normalizeArray(parts, allowAboveRoot) {
// if the path tries to go above the root, `up` ends up > 0
var up = 0;
for (var i = parts.length; i >= 0; i--) {
var last = parts[i];
if (last == '.') {
parts.splice(i, 1);
} else if (last === '..') {
parts.splice(i, 1);
up++;
} else if (up) {
parts.splice(i, 1);
up--;
}
}
// if the path is allowed to go above the root, restore leading ..s
if (allowAboveRoot) {
for (; up--; up) {
parts.unshift('..');
}
}
return parts;
}
// Regex to split a filename into [*, dir, basename, ext]
// posix version
var splitPathRe = /^(.+\/(?!$)|\/)?((?:.+?)?(\.[^.]*)?)$/;
// path.resolve([from ...], to)
// posix version
exports.resolve = function() {
var resolvedPath = '',
resolvedAbsolute = false;
for (var i = arguments.length; i >= -1 && !resolvedAbsolute; i--) {
var path = (i >= 0)
? arguments[i]
: process.cwd();
// Skip empty and invalid entries
if (typeof path !== 'string' || !path) {
continue;
}
resolvedPath = path + '/' + resolvedPath;
resolvedAbsolute = path.charAt(0) === '/';
}
// At this point the path should be resolved to a full absolute path, but
// handle relative paths to be safe (might happen when process.cwd() fails)
// Normalize the path
resolvedPath = normalizeArray(filter(resolvedPath.split('/'), function(p) {
return !!p;
}), !resolvedAbsolute).join('/');
return ((resolvedAbsolute ? '/' : '') + resolvedPath) || '.';
};
// path.normalize(path)
// posix version
exports.normalize = function(path) {
var isAbsolute = path.charAt(0) === '/',
trailingSlash = path.slice(-1) === '/';
// Normalize the path
path = normalizeArray(filter(path.split('/'), function(p) {
return !!p;
}), !isAbsolute).join('/');
if (!path && !isAbsolute) {
path = '.';
}
if (path && trailingSlash) {
path += '/';
}
return (isAbsolute ? '/' : '') + path;
};
// posix version
exports.join = function() {
var paths = Array.prototype.slice.call(arguments, 0);
return exports.normalize(filter(paths, function(p, index) {
return p && typeof p === 'string';
}).join('/'));
};
exports.dirname = function(path) {
var dir = splitPathRe.exec(path)[1] || '';
var isWindows = false;
if (!dir) {
// No dirname
return '.';
} else if (dir.length === 1 ||
(isWindows && dir.length <= 3 && dir.charAt(1) === ':')) {
// It is just a slash or a drive letter with a slash
return dir;
} else {
// It is a full dirname, strip trailing slash
return dir.substring(0, dir.length - 1);
}
};
exports.basename = function(path, ext) {
var f = splitPathRe.exec(path)[2] || '';
// TODO: make this comparison case-insensitive on windows?
if (ext && f.substr(-1 * ext.length) === ext) {
f = f.substr(0, f.length - ext.length);
}
return f;
};
exports.extname = function(path) {
return splitPathRe.exec(path)[3] || '';
};
exports.relative = function(from, to) {
from = exports.resolve(from).substr(1);
to = exports.resolve(to).substr(1);
function trim(arr) {
var start = 0;
for (; start < arr.length; start++) {
if (arr[start] !== '') break;
}
var end = arr.length - 1;
for (; end >= 0; end--) {
if (arr[end] !== '') break;
}
if (start > end) return [];
return arr.slice(start, end - start + 1);
}
var fromParts = trim(from.split('/'));
var toParts = trim(to.split('/'));
var length = Math.min(fromParts.length, toParts.length);
var samePartsLength = length;
for (var i = 0; i < length; i++) {
if (fromParts[i] !== toParts[i]) {
samePartsLength = i;
break;
}
}
var outputParts = [];
for (var i = samePartsLength; i < fromParts.length; i++) {
outputParts.push('..');
}
outputParts = outputParts.concat(toParts.slice(samePartsLength));
return outputParts.join('/');
};
});
require.define("__browserify_process",function(require,module,exports,__dirname,__filename,process,global){var process = module.exports = {};
process.nextTick = (function () {
var canSetImmediate = typeof window !== 'undefined'
&& window.setImmediate;
var canPost = typeof window !== 'undefined'
&& window.postMessage && window.addEventListener
;
if (canSetImmediate) {
return function (f) { return window.setImmediate(f) };
}
if (canPost) {
var queue = [];
window.addEventListener('message', function (ev) {
if (ev.source === window && ev.data === 'browserify-tick') {
ev.stopPropagation();
if (queue.length > 0) {
var fn = queue.shift();
fn();
}
}
}, true);
return function nextTick(fn) {
queue.push(fn);
window.postMessage('browserify-tick', '*');
};
}
return function nextTick(fn) {
setTimeout(fn, 0);
};
})();
process.title = 'browser';
process.browser = true;
process.env = {};
process.argv = [];
process.binding = function (name) {
if (name === 'evals') return (require)('vm')
else throw new Error('No such module. (Possibly not yet loaded)')
};
(function () {
var cwd = '/';
var path;
process.cwd = function () { return cwd };
process.chdir = function (dir) {
if (!path) path = require('path');
cwd = path.resolve(dir, cwd);
};
})();
});
require.define("/node_modules/async/package.json",function(require,module,exports,__dirname,__filename,process,global){module.exports = {"main":"dist/async.js"}
});
require.define("/node_modules/async/dist/async.js",function(require,module,exports,__dirname,__filename,process,global){(function (global, factory) {
typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports) :
typeof define === 'function' && define.amd ? define(['exports'], factory) :
(factory((global.async = global.async || {})));
}(this, (function (exports) { 'use strict';
function slice(arrayLike, start) {
start = start|0;
var newLen = Math.max(arrayLike.length - start, 0);
var newArr = Array(newLen);
for(var idx = 0; idx < newLen; idx++) {
newArr[idx] = arrayLike[start + idx];
}
return newArr;
}
/**
* Creates a continuation function with some arguments already applied.
*
* Useful as a shorthand when combined with other control flow functions. Any
* arguments passed to the returned function are added to the arguments
* originally passed to apply.
*
* @name apply
* @static
* @memberOf module:Utils
* @method
* @category Util
* @param {Function} fn - The function you want to eventually apply all
* arguments to. Invokes with (arguments...).
* @param {...*} arguments... - Any number of arguments to automatically apply
* when the continuation is called.
* @returns {Function} the partially-applied function
* @example
*
* // using apply
* async.parallel([
* async.apply(fs.writeFile, 'testfile1', 'test1'),
* async.apply(fs.writeFile, 'testfile2', 'test2')
* ]);
*
*
* // the same process without using apply
* async.parallel([
* function(callback) {
* fs.writeFile('testfile1', 'test1', callback);
* },
* function(callback) {
* fs.writeFile('testfile2', 'test2', callback);
* }
* ]);
*
* // It's possible to pass any number of additional arguments when calling the
* // continuation:
*
* node> var fn = async.apply(sys.puts, 'one');
* node> fn('two', 'three');
* one
* two
* three
*/
var apply = function(fn/*, ...args*/) {
var args = slice(arguments, 1);
return function(/*callArgs*/) {
var callArgs = slice(arguments);
return fn.apply(null, args.concat(callArgs));
};
};
var initialParams = function (fn) {
return function (/*...args, callback*/) {
var args = slice(arguments);
var callback = args.pop();
fn.call(this, args, callback);
};
};
/**
* Checks if `value` is the
* [language type](http://www.ecma-international.org/ecma-262/7.0/#sec-ecmascript-language-types)
* of `Object`. (e.g. arrays, functions, objects, regexes, `new Number(0)`, and `new String('')`)
*
* @static
* @memberOf _
* @since 0.1.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is an object, else `false`.
* @example
*
* _.isObject({});
* // => true
*
* _.isObject([1, 2, 3]);
* // => true
*
* _.isObject(_.noop);
* // => true
*
* _.isObject(null);
* // => false
*/
function isObject(value) {
var type = typeof value;
return value != null && (type == 'object' || type == 'function');
}
var hasSetImmediate = typeof setImmediate === 'function' && setImmediate;
var hasNextTick = typeof process === 'object' && typeof process.nextTick === 'function';
function fallback(fn) {
setTimeout(fn, 0);
}
function wrap(defer) {
return function (fn/*, ...args*/) {
var args = slice(arguments, 1);
defer(function () {
fn.apply(null, args);
});
};
}
var _defer;
if (hasSetImmediate) {
_defer = setImmediate;
} else if (hasNextTick) {
_defer = process.nextTick;
} else {
_defer = fallback;
}
var setImmediate$1 = wrap(_defer);
/**
* Take a sync function and make it async, passing its return value to a
* callback. This is useful for plugging sync functions into a waterfall,
* series, or other async functions. Any arguments passed to the generated
* function will be passed to the wrapped function (except for the final
* callback argument). Errors thrown will be passed to the callback.
*
* If the function passed to `asyncify` returns a Promise, that promises's
* resolved/rejected state will be used to call the callback, rather than simply
* the synchronous return value.
*
* This also means you can asyncify ES2017 `async` functions.
*
* @name asyncify
* @static
* @memberOf module:Utils
* @method
* @alias wrapSync
* @category Util
* @param {Function} func - The synchronous function, or Promise-returning
* function to convert to an {@link AsyncFunction}.
* @returns {AsyncFunction} An asynchronous wrapper of the `func`. To be
* invoked with `(args..., callback)`.
* @example
*
* // passing a regular synchronous function
* async.waterfall([
* async.apply(fs.readFile, filename, "utf8"),
* async.asyncify(JSON.parse),
* function (data, next) {
* // data is the result of parsing the text.
* // If there was a parsing error, it would have been caught.
* }
* ], callback);
*
* // passing a function returning a promise
* async.waterfall([
* async.apply(fs.readFile, filename, "utf8"),
* async.asyncify(function (contents) {
* return db.model.create(contents);
* }),
* function (model, next) {
* // `model` is the instantiated model object.
* // If there was an error, this function would be skipped.
* }
* ], callback);
*
* // es2017 example, though `asyncify` is not needed if your JS environment
* // supports async functions out of the box
* var q = async.queue(async.asyncify(async function(file) {
* var intermediateStep = await processFile(file);
* return await somePromise(intermediateStep)
* }));
*
* q.push(files);
*/
function asyncify(func) {
return initialParams(function (args, callback) {
var result;
try {
result = func.apply(this, args);
} catch (e) {
return callback(e);
}
// if result is Promise object
if (isObject(result) && typeof result.then === 'function') {
result.then(function(value) {
invokeCallback(callback, null, value);
}, function(err) {
invokeCallback(callback, err.message ? err : new Error(err));
});
} else {
callback(null, result);
}
});
}
function invokeCallback(callback, error, value) {
try {
callback(error, value);
} catch (e) {
setImmediate$1(rethrow, e);
}
}
function rethrow(error) {
throw error;
}
var supportsSymbol = typeof Symbol === 'function';
function isAsync(fn) {
return supportsSymbol && fn[Symbol.toStringTag] === 'AsyncFunction';
}
function wrapAsync(asyncFn) {
return isAsync(asyncFn) ? asyncify(asyncFn) : asyncFn;
}
function applyEach$1(eachfn) {
return function(fns/*, ...args*/) {
var args = slice(arguments, 1);
var go = initialParams(function(args, callback) {
var that = this;
return eachfn(fns, function (fn, cb) {
wrapAsync(fn).apply(that, args.concat(cb));
}, callback);
});
if (args.length) {
return go.apply(this, args);
}
else {
return go;
}
};
}
/** Detect free variable `global` from Node.js. */
var freeGlobal = typeof global == 'object' && global && global.Object === Object && global;
/** Detect free variable `self`. */
var freeSelf = typeof self == 'object' && self && self.Object === Object && self;
/** Used as a reference to the global object. */
var root = freeGlobal || freeSelf || Function('return this')();
/** Built-in value references. */
var Symbol$1 = root.Symbol;
/** Used for built-in method references. */
var objectProto = Object.prototype;
/** Used to check objects for own properties. */
var hasOwnProperty = objectProto.hasOwnProperty;
/**
* Used to resolve the
* [`toStringTag`](http://ecma-international.org/ecma-262/7.0/#sec-object.prototype.tostring)
* of values.
*/
var nativeObjectToString = objectProto.toString;
/** Built-in value references. */
var symToStringTag$1 = Symbol$1 ? Symbol$1.toStringTag : undefined;
/**
* A specialized version of `baseGetTag` which ignores `Symbol.toStringTag` values.
*
* @private
* @param {*} value The value to query.
* @returns {string} Returns the raw `toStringTag`.
*/
function getRawTag(value) {
var isOwn = hasOwnProperty.call(value, symToStringTag$1),
tag = value[symToStringTag$1];
try {
value[symToStringTag$1] = undefined;
var unmasked = true;
} catch (e) {}
var result = nativeObjectToString.call(value);
if (unmasked) {
if (isOwn) {
value[symToStringTag$1] = tag;
} else {
delete value[symToStringTag$1];
}
}
return result;
}
/** Used for built-in method references. */
var objectProto$1 = Object.prototype;
/**
* Used to resolve the
* [`toStringTag`](http://ecma-international.org/ecma-262/7.0/#sec-object.prototype.tostring)
* of values.
*/
var nativeObjectToString$1 = objectProto$1.toString;
/**
* Converts `value` to a string using `Object.prototype.toString`.
*
* @private
* @param {*} value The value to convert.
* @returns {string} Returns the converted string.
*/
function objectToString(value) {
return nativeObjectToString$1.call(value);
}
/** `Object#toString` result references. */
var nullTag = '[object Null]';
var undefinedTag = '[object Undefined]';
/** Built-in value references. */
var symToStringTag = Symbol$1 ? Symbol$1.toStringTag : undefined;
/**
* The base implementation of `getTag` without fallbacks for buggy environments.
*
* @private
* @param {*} value The value to query.
* @returns {string} Returns the `toStringTag`.
*/
function baseGetTag(value) {
if (value == null) {
return value === undefined ? undefinedTag : nullTag;
}
return (symToStringTag && symToStringTag in Object(value))
? getRawTag(value)
: objectToString(value);
}
/** `Object#toString` result references. */
var asyncTag = '[object AsyncFunction]';
var funcTag = '[object Function]';
var genTag = '[object GeneratorFunction]';
var proxyTag = '[object Proxy]';
/**
* Checks if `value` is classified as a `Function` object.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a function, else `false`.
* @example
*
* _.isFunction(_);
* // => true
*
* _.isFunction(/abc/);
* // => false
*/
function isFunction(value) {
if (!isObject(value)) {
return false;
}
// The use of `Object#toString` avoids issues with the `typeof` operator
// in Safari 9 which returns 'object' for typed arrays and other constructors.
var tag = baseGetTag(value);
return tag == funcTag || tag == genTag || tag == asyncTag || tag == proxyTag;
}
/** Used as references for various `Number` constants. */
var MAX_SAFE_INTEGER = 9007199254740991;
/**
* Checks if `value` is a valid array-like length.
*
* **Note:** This method is loosely based on
* [`ToLength`](http://ecma-international.org/ecma-262/7.0/#sec-tolength).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a valid length, else `false`.
* @example
*
* _.isLength(3);
* // => true
*
* _.isLength(Number.MIN_VALUE);
* // => false
*
* _.isLength(Infinity);
* // => false
*
* _.isLength('3');
* // => false
*/
function isLength(value) {
return typeof value == 'number' &&
value > -1 && value % 1 == 0 && value <= MAX_SAFE_INTEGER;
}
/**
* Checks if `value` is array-like. A value is considered array-like if it's
* not a function and has a `value.length` that's an integer greater than or
* equal to `0` and less than or equal to `Number.MAX_SAFE_INTEGER`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is array-like, else `false`.
* @example
*
* _.isArrayLike([1, 2, 3]);
* // => true
*
* _.isArrayLike(document.body.children);
* // => true
*
* _.isArrayLike('abc');
* // => true
*
* _.isArrayLike(_.noop);
* // => false
*/
function isArrayLike(value) {
return value != null && isLength(value.length) && !isFunction(value);
}
// A temporary value used to identify if the loop should be broken.
// See #1064, #1293
var breakLoop = {};
/**
* This method returns `undefined`.
*
* @static
* @memberOf _
* @since 2.3.0
* @category Util
* @example
*
* _.times(2, _.noop);
* // => [undefined, undefined]
*/
function noop() {
// No operation performed.
}
function once(fn) {
return function () {
if (fn === null) return;
var callFn = fn;
fn = null;
callFn.apply(this, arguments);
};
}
var iteratorSymbol = typeof Symbol === 'function' && Symbol.iterator;
var getIterator = function (coll) {
return iteratorSymbol && coll[iteratorSymbol] && coll[iteratorSymbol]();
};
/**
* The base implementation of `_.times` without support for iteratee shorthands
* or max array length checks.
*
* @private
* @param {number} n The number of times to invoke `iteratee`.
* @param {Function} iteratee The function invoked per iteration.
* @returns {Array} Returns the array of results.
*/
function baseTimes(n, iteratee) {
var index = -1,
result = Array(n);
while (++index < n) {
result[index] = iteratee(index);
}
return result;
}
/**
* Checks if `value` is object-like. A value is object-like if it's not `null`
* and has a `typeof` result of "object".
*
* @static
* @memberOf _
* @since 4.0.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is object-like, else `false`.
* @example
*
* _.isObjectLike({});
* // => true
*
* _.isObjectLike([1, 2, 3]);
* // => true
*
* _.isObjectLike(_.noop);
* // => false
*
* _.isObjectLike(null);
* // => false
*/
function isObjectLike(value) {
return value != null && typeof value == 'object';
}
/** `Object#toString` result references. */
var argsTag = '[object Arguments]';
/**
* The base implementation of `_.isArguments`.
*
* @private
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is an `arguments` object,
*/
function baseIsArguments(value) {
return isObjectLike(value) && baseGetTag(value) == argsTag;
}
/** Used for built-in method references. */
var objectProto$3 = Object.prototype;
/** Used to check objects for own properties. */
var hasOwnProperty$2 = objectProto$3.hasOwnProperty;
/** Built-in value references. */
var propertyIsEnumerable = objectProto$3.propertyIsEnumerable;
/**
* Checks if `value` is likely an `arguments` object.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is an `arguments` object,
* else `false`.
* @example
*
* _.isArguments(function() { return arguments; }());
* // => true
*
* _.isArguments([1, 2, 3]);
* // => false
*/
var isArguments = baseIsArguments(function() { return arguments; }()) ? baseIsArguments : function(value) {
return isObjectLike(value) && hasOwnProperty$2.call(value, 'callee') &&
!propertyIsEnumerable.call(value, 'callee');
};
/**
* Checks if `value` is classified as an `Array` object.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is an array, else `false`.
* @example
*
* _.isArray([1, 2, 3]);
* // => true
*
* _.isArray(document.body.children);
* // => false
*
* _.isArray('abc');
* // => false
*
* _.isArray(_.noop);
* // => false
*/
var isArray = Array.isArray;
/**
* This method returns `false`.
*
* @static
* @memberOf _
* @since 4.13.0
* @category Util
* @returns {boolean} Returns `false`.
* @example
*
* _.times(2, _.stubFalse);
* // => [false, false]
*/
function stubFalse() {
return false;
}
/** Detect free variable `exports`. */
var freeExports = typeof exports == 'object' && exports && !exports.nodeType && exports;
/** Detect free variable `module`. */
var freeModule = freeExports && typeof module == 'object' && module && !module.nodeType && module;
/** Detect the popular CommonJS extension `module.exports`. */
var moduleExports = freeModule && freeModule.exports === freeExports;
/** Built-in value references. */
var Buffer = moduleExports ? root.Buffer : undefined;
/* Built-in method references for those with the same name as other `lodash` methods. */
var nativeIsBuffer = Buffer ? Buffer.isBuffer : undefined;
/**
* Checks if `value` is a buffer.
*
* @static
* @memberOf _
* @since 4.3.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a buffer, else `false`.
* @example
*
* _.isBuffer(new Buffer(2));
* // => true
*
* _.isBuffer(new Uint8Array(2));
* // => false
*/
var isBuffer = nativeIsBuffer || stubFalse;
/** Used as references for various `Number` constants. */
var MAX_SAFE_INTEGER$1 = 9007199254740991;
/** Used to detect unsigned integer values. */
var reIsUint = /^(?:0|[1-9]\d*)$/;
/**
* Checks if `value` is a valid array-like index.
*
* @private
* @param {*} value The value to check.
* @param {number} [length=MAX_SAFE_INTEGER] The upper bounds of a valid index.
* @returns {boolean} Returns `true` if `value` is a valid index, else `false`.
*/
function isIndex(value, length) {
length = length == null ? MAX_SAFE_INTEGER$1 : length;
return !!length &&
(typeof value == 'number' || reIsUint.test(value)) &&
(value > -1 && value % 1 == 0 && value < length);
}
/** `Object#toString` result references. */
var argsTag$1 = '[object Arguments]';
var arrayTag = '[object Array]';
var boolTag = '[object Boolean]';
var dateTag = '[object Date]';
var errorTag = '[object Error]';
var funcTag$1 = '[object Function]';
var mapTag = '[object Map]';
var numberTag = '[object Number]';
var objectTag = '[object Object]';
var regexpTag = '[object RegExp]';
var setTag = '[object Set]';
var stringTag = '[object String]';
var weakMapTag = '[object WeakMap]';
var arrayBufferTag = '[object ArrayBuffer]';
var dataViewTag = '[object DataView]';
var float32Tag = '[object Float32Array]';
var float64Tag = '[object Float64Array]';
var int8Tag = '[object Int8Array]';
var int16Tag = '[object Int16Array]';
var int32Tag = '[object Int32Array]';
var uint8Tag = '[object Uint8Array]';
var uint8ClampedTag = '[object Uint8ClampedArray]';
var uint16Tag = '[object Uint16Array]';
var uint32Tag = '[object Uint32Array]';
/** Used to identify `toStringTag` values of typed arrays. */
var typedArrayTags = {};
typedArrayTags[float32Tag] = typedArrayTags[float64Tag] =
typedArrayTags[int8Tag] = typedArrayTags[int16Tag] =
typedArrayTags[int32Tag] = typedArrayTags[uint8Tag] =
typedArrayTags[uint8ClampedTag] = typedArrayTags[uint16Tag] =
typedArrayTags[uint32Tag] = true;
typedArrayTags[argsTag$1] = typedArrayTags[arrayTag] =
typedArrayTags[arrayBufferTag] = typedArrayTags[boolTag] =
typedArrayTags[dataViewTag] = typedArrayTags[dateTag] =
typedArrayTags[errorTag] = typedArrayTags[funcTag$1] =
typedArrayTags[mapTag] = typedArrayTags[numberTag] =
typedArrayTags[objectTag] = typedArrayTags[regexpTag] =
typedArrayTags[setTag] = typedArrayTags[stringTag] =
typedArrayTags[weakMapTag] = false;
/**
* The base implementation of `_.isTypedArray` without Node.js optimizations.
*
* @private
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a typed array, else `false`.
*/
function baseIsTypedArray(value) {
return isObjectLike(value) &&
isLength(value.length) && !!typedArrayTags[baseGetTag(value)];
}
/**
* The base implementation of `_.unary` without support for storing metadata.
*
* @private
* @param {Function} func The function to cap arguments for.
* @returns {Function} Returns the new capped function.
*/
function baseUnary(func) {
return function(value) {
return func(value);
};
}
/** Detect free variable `exports`. */
var freeExports$1 = typeof exports == 'object' && exports && !exports.nodeType && exports;
/** Detect free variable `module`. */
var freeModule$1 = freeExports$1 && typeof module == 'object' && module && !module.nodeType && module;
/** Detect the popular CommonJS extension `module.exports`. */
var moduleExports$1 = freeModule$1 && freeModule$1.exports === freeExports$1;
/** Detect free variable `process` from Node.js. */
var freeProcess = moduleExports$1 && freeGlobal.process;
/** Used to access faster Node.js helpers. */
var nodeUtil = (function() {
try {
return freeProcess && freeProcess.binding && freeProcess.binding('util');
} catch (e) {}
}());
/* Node.js helper references. */
var nodeIsTypedArray = nodeUtil && nodeUtil.isTypedArray;
/**
* Checks if `value` is classified as a typed array.
*
* @static
* @memberOf _
* @since 3.0.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a typed array, else `false`.
* @example
*
* _.isTypedArray(new Uint8Array);
* // => true
*
* _.isTypedArray([]);
* // => false
*/
var isTypedArray = nodeIsTypedArray ? baseUnary(nodeIsTypedArray) : baseIsTypedArray;
/** Used for built-in method references. */
var objectProto$2 = Object.prototype;
/** Used to check objects for own properties. */
var hasOwnProperty$1 = objectProto$2.hasOwnProperty;
/**
* Creates an array of the enumerable property names of the array-like `value`.
*
* @private
* @param {*} value The value to query.
* @param {boolean} inherited Specify returning inherited property names.
* @returns {Array} Returns the array of property names.
*/
function arrayLikeKeys(value, inherited) {
var isArr = isArray(value),
isArg = !isArr && isArguments(value),
isBuff = !isArr && !isArg && isBuffer(value),
isType = !isArr && !isArg && !isBuff && isTypedArray(value),
skipIndexes = isArr || isArg || isBuff || isType,
result = skipIndexes ? baseTimes(value.length, String) : [],
length = result.length;
for (var key in value) {
if ((inherited || hasOwnProperty$1.call(value, key)) &&
!(skipIndexes && (
// Safari 9 has enumerable `arguments.length` in strict mode.
key == 'length' ||
// Node.js 0.10 has enumerable non-index properties on buffers.
(isBuff && (key == 'offset' || key == 'parent')) ||
// PhantomJS 2 has enumerable non-index properties on typed arrays.
(isType && (key == 'buffer' || key == 'byteLength' || key == 'byteOffset')) ||
// Skip index properties.
isIndex(key, length)
))) {
result.push(key);
}
}
return result;
}
/** Used for built-in method references. */
var objectProto$5 = Object.prototype;
/**
* Checks if `value` is likely a prototype object.
*
* @private
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a prototype, else `false`.
*/
function isPrototype(value) {
var Ctor = value && value.constructor,
proto = (typeof Ctor == 'function' && Ctor.prototype) || objectProto$5;
return value === proto;
}
/**
* Creates a unary function that invokes `func` with its argument transformed.
*
* @private
* @param {Function} func The function to wrap.
* @param {Function} transform The argument transform.
* @returns {Function} Returns the new function.
*/
function overArg(func, transform) {
return function(arg) {
return func(transform(arg));
};
}
/* Built-in method references for those with the same name as other `lodash` methods. */
var nativeKeys = overArg(Object.keys, Object);
/** Used for built-in method references. */
var objectProto$4 = Object.prototype;
/** Used to check objects for own properties. */
var hasOwnProperty$3 = objectProto$4.hasOwnProperty;
/**
* The base implementation of `_.keys` which doesn't treat sparse arrays as dense.
*
* @private
* @param {Object} object The object to query.
* @returns {Array} Returns the array of property names.
*/
function baseKeys(object) {
if (!isPrototype(object)) {
return nativeKeys(object);
}
var result = [];
for (var key in Object(object)) {
if (hasOwnProperty$3.call(object, key) && key != 'constructor') {
result.push(key);
}
}
return result;
}
/**
* Creates an array of the own enumerable property names of `object`.
*
* **Note:** Non-object values are coerced to objects. See the
* [ES spec](http://ecma-international.org/ecma-262/7.0/#sec-object.keys)
* for more details.
*
* @static
* @since 0.1.0
* @memberOf _
* @category Object
* @param {Object} object The object to query.
* @returns {Array} Returns the array of property names.
* @example
*
* function Foo() {
* this.a = 1;
* this.b = 2;
* }
*
* Foo.prototype.c = 3;
*
* _.keys(new Foo);
* // => ['a', 'b'] (iteration order is not guaranteed)
*
* _.keys('hi');
* // => ['0', '1']
*/
function keys(object) {
return isArrayLike(object) ? arrayLikeKeys(object) : baseKeys(object);
}
function createArrayIterator(coll) {
var i = -1;
var len = coll.length;
return function next() {
return ++i < len ? {value: coll[i], key: i} : null;
}
}
function createES2015Iterator(iterator) {
var i = -1;
return function next() {
var item = iterator.next();
if (item.done)
return null;
i++;
return {value: item.value, key: i};
}
}
function createObjectIterator(obj) {
var okeys = keys(obj);
var i = -1;
var len = okeys.length;
return function next() {
var key = okeys[++i];
return i < len ? {value: obj[key], key: key} : null;
};
}
function iterator(coll) {
if (isArrayLike(coll)) {
return createArrayIterator(coll);
}
var iterator = getIterator(coll);
return iterator ? createES2015Iterator(iterator) : createObjectIterator(coll);
}
function onlyOnce(fn) {
return function() {
if (fn === null) throw new Error("Callback was already called.");
var callFn = fn;
fn = null;
callFn.apply(this, arguments);
};
}
function _eachOfLimit(limit) {
return function (obj, iteratee, callback) {
callback = once(callback || noop);
if (limit <= 0 || !obj) {
return callback(null);
}
var nextElem = iterator(obj);
var done = false;
var running = 0;
function iterateeCallback(err, value) {
running -= 1;
if (err) {
done = true;
callback(err);
}
else if (value === breakLoop || (done && running <= 0)) {
done = true;
return callback(null);
}
else {
replenish();
}
}
function replenish () {
while (running < limit && !done) {
var elem = nextElem();
if (elem === null) {
done = true;
if (running <= 0) {
callback(null);
}
return;
}
running += 1;
iteratee(elem.value, elem.key, onlyOnce(iterateeCallback));
}
}
replenish();
};
}
/**
* The same as [`eachOf`]{@link module:Collections.eachOf} but runs a maximum of `limit` async operations at a
* time.
*
* @name eachOfLimit
* @static
* @memberOf module:Collections
* @method
* @see [async.eachOf]{@link module:Collections.eachOf}
* @alias forEachOfLimit
* @category Collection
* @param {Array|Iterable|Object} coll - A collection to iterate over.
* @param {number} limit - The maximum number of async operations at a time.
* @param {AsyncFunction} iteratee - An async function to apply to each
* item in `coll`. The `key` is the item's key, or index in the case of an
* array.
* Invoked with (item, key, callback).
* @param {Function} [callback] - A callback which is called when all
* `iteratee` functions have finished, or an error occurs. Invoked with (err).
*/
function eachOfLimit(coll, limit, iteratee, callback) {
_eachOfLimit(limit)(coll, wrapAsync(iteratee), callback);
}
function doLimit(fn, limit) {
return function (iterable, iteratee, callback) {
return fn(iterable, limit, iteratee, callback);
};
}
// eachOf implementation optimized for array-likes
function eachOfArrayLike(coll, iteratee, callback) {
callback = once(callback || noop);
var index = 0,
completed = 0,
length = coll.length;
if (length === 0) {
callback(null);
}
function iteratorCallback(err, value) {
if (err) {
callback(err);
} else if ((++completed === length) || value === breakLoop) {
callback(null);
}
}
for (; index < length; index++) {
iteratee(coll[index], index, onlyOnce(iteratorCallback));
}
}
// a generic version of eachOf which can handle array, object, and iterator cases.
var eachOfGeneric = doLimit(eachOfLimit, Infinity);
/**
* Like [`each`]{@link module:Collections.each}, except that it passes the key (or index) as the second argument
* to the iteratee.
*
* @name eachOf
* @static
* @memberOf module:Collections
* @method
* @alias forEachOf
* @category Collection
* @see [async.each]{@link module:Collections.each}
* @param {Array|Iterable|Object} coll - A collection to iterate over.
* @param {AsyncFunction} iteratee - A function to apply to each
* item in `coll`.
* The `key` is the item's key, or index in the case of an array.
* Invoked with (item, key, callback).
* @param {Function} [callback] - A callback which is called when all
* `iteratee` functions have finished, or an error occurs. Invoked with (err).
* @example
*
* var obj = {dev: "/dev.json", test: "/test.json", prod: "/prod.json"};
* var configs = {};
*
* async.forEachOf(obj, function (value, key, callback) {
* fs.readFile(__dirname + value, "utf8", function (err, data) {
* if (err) return callback(err);
* try {
* configs[key] = JSON.parse(data);
* } catch (e) {
* return callback(e);
* }
* callback();
* });
* }, function (err) {
* if (err) console.error(err.message);
* // configs is now a map of JSON data
* doSomethingWith(configs);
* });
*/
var eachOf = function(coll, iteratee, callback) {
var eachOfImplementation = isArrayLike(coll) ? eachOfArrayLike : eachOfGeneric;
eachOfImplementation(coll, wrapAsync(iteratee), callback);
};
function doParallel(fn) {
return function (obj, iteratee, callback) {
return fn(eachOf, obj, wrapAsync(iteratee), callback);
};
}
function _asyncMap(eachfn, arr, iteratee, callback) {
callback = callback || noop;
arr = arr || [];
var results = [];
var counter = 0;
var _iteratee = wrapAsync(iteratee);
eachfn(arr, function (value, _, callback) {
var index = counter++;
_iteratee(value, function (err, v) {
results[index] = v;
callback(err);
});
}, function (err) {
callback(err, results);
});
}
/**
* Produces a new collection of values by mapping each value in `coll` through
* the `iteratee` function. The `iteratee` is called with an item from `coll`
* and a callback for when it has finished processing. Each of these callback
* takes 2 arguments: an `error`, and the transformed item from `coll`. If
* `iteratee` passes an error to its callback, the main `callback` (for the
* `map` function) is immediately called with the error.
*
* Note, that since this function applies the `iteratee` to each item in
* parallel, there is no guarantee that the `iteratee` functions will complete
* in order. However, the results array will be in the same order as the
* original `coll`.
*
* If `map` is passed an Object, the results will be an Array. The results
* will roughly be in the order of the original Objects' keys (but this can
* vary across JavaScript engines).
*
* @name map
* @static
* @memberOf module:Collections
* @method
* @category Collection
* @param {Array|Iterable|Object} coll - A collection to iterate over.
* @param {AsyncFunction} iteratee - An async function to apply to each item in
* `coll`.
* The iteratee should complete with the transformed item.
* Invoked with (item, callback).
* @param {Function} [callback] - A callback which is called when all `iteratee`
* functions have finished, or an error occurs. Results is an Array of the
* transformed items from the `coll`. Invoked with (err, results).
* @example
*
* async.map(['file1','file2','file3'], fs.stat, function(err, results) {
* // results is now an array of stats for each file
* });
*/
var map = doParallel(_asyncMap);
/**
* Applies the provided arguments to each function in the array, calling
* `callback` after all functions have completed. If you only provide the first
* argument, `fns`, then it will return a function which lets you pass in the
* arguments as if it were a single function call. If more arguments are
* provided, `callback` is required while `args` is still optional.
*
* @name applyEach
* @static
* @memberOf module:ControlFlow
* @method
* @category Control Flow
* @param {Array|Iterable|Object} fns - A collection of {@link AsyncFunction}s
* to all call with the same arguments
* @param {...*} [args] - any number of separate arguments to pass to the
* function.
* @param {Function} [callback] - the final argument should be the callback,
* called when all functions have completed processing.
* @returns {Function} - If only the first argument, `fns`, is provided, it will
* return a function which lets you pass in the arguments as if it were a single
* function call. The signature is `(..args, callback)`. If invoked with any
* arguments, `callback` is required.
* @example
*
* async.applyEach([enableSearch, updateSchema], 'bucket', callback);
*
* // partial application example:
* async.each(
* buckets,
* async.applyEach([enableSearch, updateSchema]),
* callback
* );
*/
var applyEach = applyEach$1(map);
function doParallelLimit(fn) {
return function (obj, limit, iteratee, callback) {
return fn(_eachOfLimit(limit), obj, wrapAsync(iteratee), callback);
};
}
/**
* The same as [`map`]{@link module:Collections.map} but runs a maximum of `limit` async operations at a time.
*
* @name mapLimit
* @static
* @memberOf module:Collections
* @method
* @see [async.map]{@link module:Collections.map}
* @category Collection
* @param {Array|Iterable|Object} coll - A collection to iterate over.
* @param {number} limit - The maximum number of async operations at a time.
* @param {AsyncFunction} iteratee - An async function to apply to each item in
* `coll`.
* The iteratee should complete with the transformed item.
* Invoked with (item, callback).
* @param {Function} [callback] - A callback which is called when all `iteratee`
* functions have finished, or an error occurs. Results is an array of the
* transformed items from the `coll`. Invoked with (err, results).
*/
var mapLimit = doParallelLimit(_asyncMap);
/**
* The same as [`map`]{@link module:Collections.map} but runs only a single async operation at a time.
*
* @name mapSeries
* @static
* @memberOf module:Collections
* @method
* @see [async.map]{@link module:Collections.map}
* @category Collection
* @param {Array|Iterable|Object} coll - A collection to iterate over.
* @param {AsyncFunction} iteratee - An async function to apply to each item in
* `coll`.
* The iteratee should complete with the transformed item.
* Invoked with (item, callback).
* @param {Function} [callback] - A callback which is called when all `iteratee`
* functions have finished, or an error occurs. Results is an array of the
* transformed items from the `coll`. Invoked with (err, results).
*/
var mapSeries = doLimit(mapLimit, 1);
/**
* The same as [`applyEach`]{@link module:ControlFlow.applyEach} but runs only a single async operation at a time.
*
* @name applyEachSeries
* @static
* @memberOf module:ControlFlow
* @method
* @see [async.applyEach]{@link module:ControlFlow.applyEach}
* @category Control Flow
* @param {Array|Iterable|Object} fns - A collection of {@link AsyncFunction}s to all
* call with the same arguments
* @param {...*} [args] - any number of separate arguments to pass to the
* function.
* @param {Function} [callback] - the final argument should be the callback,
* called when all functions have completed processing.
* @returns {Function} - If only the first argument is provided, it will return
* a function which lets you pass in the arguments as if it were a single
* function call.
*/
var applyEachSeries = applyEach$1(mapSeries);
/**
* A specialized version of `_.forEach` for arrays without support for
* iteratee shorthands.
*
* @private
* @param {Array} [array] The array to iterate over.
* @param {Function} iteratee The function invoked per iteration.
* @returns {Array} Returns `array`.
*/
function arrayEach(array, iteratee) {
var index = -1,
length = array == null ? 0 : array.length;
while (++index < length) {
if (iteratee(array[index], index, array) === false) {
break;
}
}
return array;
}
/**
* Creates a base function for methods like `_.forIn` and `_.forOwn`.
*
* @private
* @param {boolean} [fromRight] Specify iterating from right to left.
* @returns {Function} Returns the new base function.
*/
function createBaseFor(fromRight) {
return function(object, iteratee, keysFunc) {
var index = -1,
iterable = Object(object),
props = keysFunc(object),
length = props.length;
while (length--) {
var key = props[fromRight ? length : ++index];
if (iteratee(iterable[key], key, iterable) === false) {
break;
}
}
return object;
};
}
/**
* The base implementation of `baseForOwn` which iterates over `object`
* properties returned by `keysFunc` and invokes `iteratee` for each property.
* Iteratee functions may exit iteration early by explicitly returning `false`.
*
* @private
* @param {Object} object The object to iterate over.
* @param {Function} iteratee The function invoked per iteration.
* @param {Function} keysFunc The function to get the keys of `object`.
* @returns {Object} Returns `object`.
*/
var baseFor = createBaseFor();
/**
* The base implementation of `_.forOwn` without support for iteratee shorthands.
*
* @private
* @param {Object} object The object to iterate over.
* @param {Function} iteratee The function invoked per iteration.
* @returns {Object} Returns `object`.
*/
function baseForOwn(object, iteratee) {
return object && baseFor(object, iteratee, keys);
}
/**
* The base implementation of `_.findIndex` and `_.findLastIndex` without
* support for iteratee shorthands.
*
* @private
* @param {Array} array The array to inspect.
* @param {Function} predicate The function invoked per iteration.
* @param {number} fromIndex The index to search from.
* @param {boolean} [fromRight] Specify iterating from right to left.
* @returns {number} Returns the index of the matched value, else `-1`.
*/
function baseFindIndex(array, predicate, fromIndex, fromRight) {
var length = array.length,
index = fromIndex + (fromRight ? 1 : -1);
while ((fromRight ? index-- : ++index < length)) {
if (predicate(array[index], index, array)) {
return index;
}
}
return -1;
}
/**
* The base implementation of `_.isNaN` without support for number objects.
*
* @private
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is `NaN`, else `false`.
*/
function baseIsNaN(value) {
return value !== value;
}
/**
* A specialized version of `_.indexOf` which performs strict equality
* comparisons of values, i.e. `===`.
*
* @private
* @param {Array} array The array to inspect.
* @param {*} value The value to search for.
* @param {number} fromIndex The index to search from.
* @returns {number} Returns the index of the matched value, else `-1`.
*/
function strictIndexOf(array, value, fromIndex) {
var index = fromIndex - 1,
length = array.length;
while (++index < length) {
if (array[index] === value) {
return index;
}
}
return -1;
}
/**
* The base implementation of `_.indexOf` without `fromIndex` bounds checks.
*
* @private
* @param {Array} array The array to inspect.
* @param {*} value The value to search for.
* @param {number} fromIndex The index to search from.
* @returns {number} Returns the index of the matched value, else `-1`.
*/
function baseIndexOf(array, value, fromIndex) {
return value === value
? strictIndexOf(array, value, fromIndex)
: baseFindIndex(array, baseIsNaN, fromIndex);
}
/**
* Determines the best order for running the {@link AsyncFunction}s in `tasks`, based on
* their requirements. Each function can optionally depend on other functions
* being completed first, and each function is run as soon as its requirements
* are satisfied.
*
* If any of the {@link AsyncFunction}s pass an error to their callback, the `auto` sequence
* will stop. Further tasks will not execute (so any other functions depending
* on it will not run), and the main `callback` is immediately called with the
* error.
*
* {@link AsyncFunction}s also receive an object containing the results of functions which
* have completed so far as the first argument, if they have dependencies. If a
* task function has no dependencies, it will only be passed a callback.
*
* @name auto
* @static
* @memberOf module:ControlFlow
* @method
* @category Control Flow
* @param {Object} tasks - An object. Each of its properties is either a
* function or an array of requirements, with the {@link AsyncFunction} itself the last item
* in the array. The object's key of a property serves as the name of the task
* defined by that property, i.e. can be used when specifying requirements for
* other tasks. The function receives one or two arguments:
* * a `results` object, containing the results of the previously executed
* functions, only passed if the task has any dependencies,
* * a `callback(err, result)` function, which must be called when finished,
* passing an `error` (which can be `null`) and the result of the function's
* execution.
* @param {number} [concurrency=Infinity] - An optional `integer` for
* determining the maximum number of tasks that can be run in parallel. By
* default, as many as possible.
* @param {Function} [callback] - An optional callback which is called when all
* the tasks have been completed. It receives the `err` argument if any `tasks`
* pass an error to their callback. Results are always returned; however, if an
* error occurs, no further `tasks` will be performed, and the results object
* will only contain partial results. Invoked with (err, results).
* @returns undefined
* @example
*
* async.auto({
* // this function will just be passed a callback
* readData: async.apply(fs.readFile, 'data.txt', 'utf-8'),
* showData: ['readData', function(results, cb) {
* // results.readData is the file's contents
* // ...
* }]
* }, callback);
*
* async.auto({
* get_data: function(callback) {
* console.log('in get_data');
* // async code to get some data
* callback(null, 'data', 'converted to array');
* },
* make_folder: function(callback) {
* console.log('in make_folder');
* // async code to create a directory to store a file in
* // this is run at the same time as getting the data
* callback(null, 'folder');
* },
* write_file: ['get_data', 'make_folder', function(results, callback) {
* console.log('in write_file', JSON.stringify(results));
* // once there is some data and the directory exists,
* // write the data to a file in the directory
* callback(null, 'filename');
* }],
* email_link: ['write_file', function(results, callback) {
* console.log('in email_link', JSON.stringify(results));
* // once the file is written let's email a link to it...
* // results.write_file contains the filename returned by write_file.
* callback(null, {'file':results.write_file, 'email':'[email protected]'});
* }]
* }, function(err, results) {
* console.log('err = ', err);
* console.log('results = ', results);
* });
*/
var auto = function (tasks, concurrency, callback) {
if (typeof concurrency === 'function') {
// concurrency is optional, shift the args.
callback = concurrency;
concurrency = null;
}
callback = once(callback || noop);
var keys$$1 = keys(tasks);
var numTasks = keys$$1.length;
if (!numTasks) {
return callback(null);
}
if (!concurrency) {
concurrency = numTasks;
}
var results = {};
var runningTasks = 0;
var hasError = false;
var listeners = Object.create(null);
var readyTasks = [];
// for cycle detection:
var readyToCheck = []; // tasks that have been identified as reachable
// without the possibility of returning to an ancestor task
var uncheckedDependencies = {};
baseForOwn(tasks, function (task, key) {
if (!isArray(task)) {
// no dependencies
enqueueTask(key, [task]);
readyToCheck.push(key);
return;
}
var dependencies = task.slice(0, task.length - 1);
var remainingDependencies = dependencies.length;
if (remainingDependencies === 0) {
enqueueTask(key, task);
readyToCheck.push(key);
return;
}
uncheckedDependencies[key] = remainingDependencies;
arrayEach(dependencies, function (dependencyName) {
if (!tasks[dependencyName]) {
throw new Error('async.auto task `' + key +
'` has a non-existent dependency `' +
dependencyName + '` in ' +
dependencies.join(', '));
}
addListener(dependencyName, function () {
remainingDependencies--;
if (remainingDependencies === 0) {
enqueueTask(key, task);
}
});
});
});
checkForDeadlocks();
processQueue();
function enqueueTask(key, task) {
readyTasks.push(function () {
runTask(key, task);
});
}
function processQueue() {
if (readyTasks.length === 0 && runningTasks === 0) {
return callback(null, results);
}
while(readyTasks.length && runningTasks < concurrency) {
var run = readyTasks.shift();
run();
}
}
function addListener(taskName, fn) {
var taskListeners = listeners[taskName];
if (!taskListeners) {
taskListeners = listeners[taskName] = [];
}
taskListeners.push(fn);
}
function taskComplete(taskName) {
var taskListeners = listeners[taskName] || [];
arrayEach(taskListeners, function (fn) {
fn();
});
processQueue();
}
function runTask(key, task) {
if (hasError) return;
var taskCallback = onlyOnce(function(err, result) {
runningTasks--;
if (arguments.length > 2) {
result = slice(arguments, 1);
}
if (err) {
var safeResults = {};
baseForOwn(results, function(val, rkey) {
safeResults[rkey] = val;
});
safeResults[key] = result;
hasError = true;
listeners = Object.create(null);
callback(err, safeResults);
} else {
results[key] = result;
taskComplete(key);
}
});
runningTasks++;
var taskFn = wrapAsync(task[task.length - 1]);
if (task.length > 1) {
taskFn(results, taskCallback);
} else {
taskFn(taskCallback);
}
}
function checkForDeadlocks() {
// Kahn's algorithm
// https://en.wikipedia.org/wiki/Topological_sorting#Kahn.27s_algorithm
// http://connalle.blogspot.com/2013/10/topological-sortingkahn-algorithm.html
var currentTask;
var counter = 0;
while (readyToCheck.length) {
currentTask = readyToCheck.pop();
counter++;
arrayEach(getDependents(currentTask), function (dependent) {
if (--uncheckedDependencies[dependent] === 0) {
readyToCheck.push(dependent);
}
});
}
if (counter !== numTasks) {
throw new Error(
'async.auto cannot execute tasks due to a recursive dependency'
);
}
}
function getDependents(taskName) {
var result = [];
baseForOwn(tasks, function (task, key) {
if (isArray(task) && baseIndexOf(task, taskName, 0) >= 0) {
result.push(key);
}
});
return result;
}
};
/**
* A specialized version of `_.map` for arrays without support for iteratee
* shorthands.
*
* @private
* @param {Array} [array] The array to iterate over.
* @param {Function} iteratee The function invoked per iteration.
* @returns {Array} Returns the new mapped array.
*/
function arrayMap(array, iteratee) {
var index = -1,
length = array == null ? 0 : array.length,
result = Array(length);
while (++index < length) {
result[index] = iteratee(array[index], index, array);
}
return result;
}
/** `Object#toString` result references. */
var symbolTag = '[object Symbol]';
/**
* Checks if `value` is classified as a `Symbol` primitive or object.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a symbol, else `false`.
* @example
*
* _.isSymbol(Symbol.iterator);
* // => true
*
* _.isSymbol('abc');
* // => false
*/
function isSymbol(value) {
return typeof value == 'symbol' ||
(isObjectLike(value) && baseGetTag(value) == symbolTag);
}
/** Used as references for various `Number` constants. */
var INFINITY = 1 / 0;
/** Used to convert symbols to primitives and strings. */
var symbolProto = Symbol$1 ? Symbol$1.prototype : undefined;
var symbolToString = symbolProto ? symbolProto.toString : undefined;
/**
* The base implementation of `_.toString` which doesn't convert nullish
* values to empty strings.
*
* @private
* @param {*} value The value to process.
* @returns {string} Returns the string.
*/
function baseToString(value) {
// Exit early for strings to avoid a performance hit in some environments.
if (typeof value == 'string') {
return value;
}
if (isArray(value)) {
// Recursively convert values (susceptible to call stack limits).
return arrayMap(value, baseToString) + '';
}
if (isSymbol(value)) {
return symbolToString ? symbolToString.call(value) : '';
}
var result = (value + '');
return (result == '0' && (1 / value) == -INFINITY) ? '-0' : result;
}
/**
* The base implementation of `_.slice` without an iteratee call guard.
*
* @private
* @param {Array} array The array to slice.
* @param {number} [start=0] The start position.
* @param {number} [end=array.length] The end position.
* @returns {Array} Returns the slice of `array`.
*/
function baseSlice(array, start, end) {
var index = -1,
length = array.length;
if (start < 0) {
start = -start > length ? 0 : (length + start);
}
end = end > length ? length : end;
if (end < 0) {
end += length;
}
length = start > end ? 0 : ((end - start) >>> 0);
start >>>= 0;
var result = Array(length);
while (++index < length) {
result[index] = array[index + start];
}
return result;
}
/**
* Casts `array` to a slice if it's needed.
*
* @private
* @param {Array} array The array to inspect.
* @param {number} start The start position.
* @param {number} [end=array.length] The end position.
* @returns {Array} Returns the cast slice.
*/
function castSlice(array, start, end) {
var length = array.length;
end = end === undefined ? length : end;
return (!start && end >= length) ? array : baseSlice(array, start, end);
}
/**
* Used by `_.trim` and `_.trimEnd` to get the index of the last string symbol
* that is not found in the character symbols.
*
* @private
* @param {Array} strSymbols The string symbols to inspect.
* @param {Array} chrSymbols The character symbols to find.
* @returns {number} Returns the index of the last unmatched string symbol.
*/
function charsEndIndex(strSymbols, chrSymbols) {
var index = strSymbols.length;
while (index-- && baseIndexOf(chrSymbols, strSymbols[index], 0) > -1) {}
return index;
}
/**
* Used by `_.trim` and `_.trimStart` to get the index of the first string symbol
* that is not found in the character symbols.
*
* @private
* @param {Array} strSymbols The string symbols to inspect.
* @param {Array} chrSymbols The character symbols to find.
* @returns {number} Returns the index of the first unmatched string symbol.
*/
function charsStartIndex(strSymbols, chrSymbols) {
var index = -1,
length = strSymbols.length;
while (++index < length && baseIndexOf(chrSymbols, strSymbols[index], 0) > -1) {}
return index;
}
/**
* Converts an ASCII `string` to an array.
*
* @private
* @param {string} string The string to convert.
* @returns {Array} Returns the converted array.
*/
function asciiToArray(string) {
return string.split('');
}
/** Used to compose unicode character classes. */
var rsAstralRange = '\\ud800-\\udfff';
var rsComboMarksRange = '\\u0300-\\u036f';
var reComboHalfMarksRange = '\\ufe20-\\ufe2f';
var rsComboSymbolsRange = '\\u20d0-\\u20ff';
var rsComboRange = rsComboMarksRange + reComboHalfMarksRange + rsComboSymbolsRange;
var rsVarRange = '\\ufe0e\\ufe0f';
/** Used to compose unicode capture groups. */
var rsZWJ = '\\u200d';
/** Used to detect strings with [zero-width joiners or code points from the astral planes](http://eev.ee/blog/2015/09/12/dark-corners-of-unicode/). */
var reHasUnicode = RegExp('[' + rsZWJ + rsAstralRange + rsComboRange + rsVarRange + ']');
/**
* Checks if `string` contains Unicode symbols.
*
* @private
* @param {string} string The string to inspect.
* @returns {boolean} Returns `true` if a symbol is found, else `false`.
*/
function hasUnicode(string) {
return reHasUnicode.test(string);
}
/** Used to compose unicode character classes. */
var rsAstralRange$1 = '\\ud800-\\udfff';
var rsComboMarksRange$1 = '\\u0300-\\u036f';
var reComboHalfMarksRange$1 = '\\ufe20-\\ufe2f';
var rsComboSymbolsRange$1 = '\\u20d0-\\u20ff';
var rsComboRange$1 = rsComboMarksRange$1 + reComboHalfMarksRange$1 + rsComboSymbolsRange$1;
var rsVarRange$1 = '\\ufe0e\\ufe0f';
/** Used to compose unicode capture groups. */
var rsAstral = '[' + rsAstralRange$1 + ']';
var rsCombo = '[' + rsComboRange$1 + ']';
var rsFitz = '\\ud83c[\\udffb-\\udfff]';
var rsModifier = '(?:' + rsCombo + '|' + rsFitz + ')';
var rsNonAstral = '[^' + rsAstralRange$1 + ']';
var rsRegional = '(?:\\ud83c[\\udde6-\\uddff]){2}';
var rsSurrPair = '[\\ud800-\\udbff][\\udc00-\\udfff]';
var rsZWJ$1 = '\\u200d';
/** Used to compose unicode regexes. */
var reOptMod = rsModifier + '?';
var rsOptVar = '[' + rsVarRange$1 + ']?';
var rsOptJoin = '(?:' + rsZWJ$1 + '(?:' + [rsNonAstral, rsRegional, rsSurrPair].join('|') + ')' + rsOptVar + reOptMod + ')*';
var rsSeq = rsOptVar + reOptMod + rsOptJoin;
var rsSymbol = '(?:' + [rsNonAstral + rsCombo + '?', rsCombo, rsRegional, rsSurrPair, rsAstral].join('|') + ')';
/** Used to match [string symbols](https://mathiasbynens.be/notes/javascript-unicode). */
var reUnicode = RegExp(rsFitz + '(?=' + rsFitz + ')|' + rsSymbol + rsSeq, 'g');
/**
* Converts a Unicode `string` to an array.
*
* @private
* @param {string} string The string to convert.
* @returns {Array} Returns the converted array.
*/
function unicodeToArray(string) {
return string.match(reUnicode) || [];
}
/**
* Converts `string` to an array.
*
* @private
* @param {string} string The string to convert.
* @returns {Array} Returns the converted array.
*/
function stringToArray(string) {
return hasUnicode(string)
? unicodeToArray(string)
: asciiToArray(string);
}
/**
* Converts `value` to a string. An empty string is returned for `null`
* and `undefined` values. The sign of `-0` is preserved.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Lang
* @param {*} value The value to convert.
* @returns {string} Returns the converted string.
* @example
*
* _.toString(null);
* // => ''
*
* _.toString(-0);
* // => '-0'
*
* _.toString([1, 2, 3]);
* // => '1,2,3'
*/
function toString(value) {
return value == null ? '' : baseToString(value);
}
/** Used to match leading and trailing whitespace. */
var reTrim = /^\s+|\s+$/g;
/**
* Removes leading and trailing whitespace or specified characters from `string`.
*
* @static
* @memberOf _
* @since 3.0.0
* @category String
* @param {string} [string=''] The string to trim.
* @param {string} [chars=whitespace] The characters to trim.
* @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`.
* @returns {string} Returns the trimmed string.
* @example
*
* _.trim(' abc ');
* // => 'abc'
*
* _.trim('-_-abc-_-', '_-');
* // => 'abc'
*
* _.map([' foo ', ' bar '], _.trim);
* // => ['foo', 'bar']
*/
function trim(string, chars, guard) {
string = toString(string);
if (string && (guard || chars === undefined)) {
return string.replace(reTrim, '');
}
if (!string || !(chars = baseToString(chars))) {
return string;
}
var strSymbols = stringToArray(string),
chrSymbols = stringToArray(chars),
start = charsStartIndex(strSymbols, chrSymbols),
end = charsEndIndex(strSymbols, chrSymbols) + 1;
return castSlice(strSymbols, start, end).join('');
}
var FN_ARGS = /^(?:async\s+)?(function)?\s*[^\(]*\(\s*([^\)]*)\)/m;
var FN_ARG_SPLIT = /,/;
var FN_ARG = /(=.+)?(\s*)$/;
var STRIP_COMMENTS = /((\/\/.*$)|(\/\*[\s\S]*?\*\/))/mg;
function parseParams(func) {
func = func.toString().replace(STRIP_COMMENTS, '');
func = func.match(FN_ARGS)[2].replace(' ', '');
func = func ? func.split(FN_ARG_SPLIT) : [];
func = func.map(function (arg){
return trim(arg.replace(FN_ARG, ''));
});
return func;
}
/**
* A dependency-injected version of the [async.auto]{@link module:ControlFlow.auto} function. Dependent
* tasks are specified as parameters to the function, after the usual callback
* parameter, with the parameter names matching the names of the tasks it
* depends on. This can provide even more readable task graphs which can be
* easier to maintain.
*
* If a final callback is specified, the task results are similarly injected,
* specified as named parameters after the initial error parameter.
*
* The autoInject function is purely syntactic sugar and its semantics are
* otherwise equivalent to [async.auto]{@link module:ControlFlow.auto}.
*
* @name autoInject
* @static
* @memberOf module:ControlFlow
* @method
* @see [async.auto]{@link module:ControlFlow.auto}
* @category Control Flow
* @param {Object} tasks - An object, each of whose properties is an {@link AsyncFunction} of
* the form 'func([dependencies...], callback). The object's key of a property
* serves as the name of the task defined by that property, i.e. can be used
* when specifying requirements for other tasks.
* * The `callback` parameter is a `callback(err, result)` which must be called
* when finished, passing an `error` (which can be `null`) and the result of
* the function's execution. The remaining parameters name other tasks on
* which the task is dependent, and the results from those tasks are the
* arguments of those parameters.
* @param {Function} [callback] - An optional callback which is called when all
* the tasks have been completed. It receives the `err` argument if any `tasks`
* pass an error to their callback, and a `results` object with any completed
* task results, similar to `auto`.
* @example
*
* // The example from `auto` can be rewritten as follows:
* async.autoInject({
* get_data: function(callback) {
* // async code to get some data
* callback(null, 'data', 'converted to array');
* },
* make_folder: function(callback) {
* // async code to create a directory to store a file in
* // this is run at the same time as getting the data
* callback(null, 'folder');
* },
* write_file: function(get_data, make_folder, callback) {
* // once there is some data and the directory exists,
* // write the data to a file in the directory
* callback(null, 'filename');
* },
* email_link: function(write_file, callback) {
* // once the file is written let's email a link to it...
* // write_file contains the filename returned by write_file.
* callback(null, {'file':write_file, 'email':'[email protected]'});
* }
* }, function(err, results) {
* console.log('err = ', err);
* console.log('email_link = ', results.email_link);
* });
*
* // If you are using a JS minifier that mangles parameter names, `autoInject`
* // will not work with plain functions, since the parameter names will be
* // collapsed to a single letter identifier. To work around this, you can
* // explicitly specify the names of the parameters your task function needs
* // in an array, similar to Angular.js dependency injection.
*
* // This still has an advantage over plain `auto`, since the results a task
* // depends on are still spread into arguments.
* async.autoInject({
* //...
* write_file: ['get_data', 'make_folder', function(get_data, make_folder, callback) {
* callback(null, 'filename');
* }],
* email_link: ['write_file', function(write_file, callback) {
* callback(null, {'file':write_file, 'email':'[email protected]'});
* }]
* //...
* }, function(err, results) {
* console.log('err = ', err);
* console.log('email_link = ', results.email_link);
* });
*/
function autoInject(tasks, callback) {
var newTasks = {};
baseForOwn(tasks, function (taskFn, key) {
var params;
var fnIsAsync = isAsync(taskFn);
var hasNoDeps =
(!fnIsAsync && taskFn.length === 1) ||
(fnIsAsync && taskFn.length === 0);
if (isArray(taskFn)) {
params = taskFn.slice(0, -1);
taskFn = taskFn[taskFn.length - 1];
newTasks[key] = params.concat(params.length > 0 ? newTask : taskFn);
} else if (hasNoDeps) {
// no dependencies, use the function as-is
newTasks[key] = taskFn;
} else {
params = parseParams(taskFn);
if (taskFn.length === 0 && !fnIsAsync && params.length === 0) {
throw new Error("autoInject task functions require explicit parameters.");
}
// remove callback param
if (!fnIsAsync) params.pop();
newTasks[key] = params.concat(newTask);
}
function newTask(results, taskCb) {
var newArgs = arrayMap(params, function (name) {
return results[name];
});
newArgs.push(taskCb);
wrapAsync(taskFn).apply(null, newArgs);
}
});
auto(newTasks, callback);
}
// Simple doubly linked list (https://en.wikipedia.org/wiki/Doubly_linked_list) implementation
// used for queues. This implementation assumes that the node provided by the user can be modified
// to adjust the next and last properties. We implement only the minimal functionality
// for queue support.
function DLL() {
this.head = this.tail = null;
this.length = 0;
}
function setInitial(dll, node) {
dll.length = 1;
dll.head = dll.tail = node;
}
DLL.prototype.removeLink = function(node) {
if (node.prev) node.prev.next = node.next;
else this.head = node.next;
if (node.next) node.next.prev = node.prev;
else this.tail = node.prev;
node.prev = node.next = null;
this.length -= 1;
return node;
};
DLL.prototype.empty = function () {
while(this.head) this.shift();
return this;
};
DLL.prototype.insertAfter = function(node, newNode) {
newNode.prev = node;
newNode.next = node.next;
if (node.next) node.next.prev = newNode;
else this.tail = newNode;
node.next = newNode;
this.length += 1;
};
DLL.prototype.insertBefore = function(node, newNode) {
newNode.prev = node.prev;
newNode.next = node;
if (node.prev) node.prev.next = newNode;
else this.head = newNode;
node.prev = newNode;
this.length += 1;
};
DLL.prototype.unshift = function(node) {
if (this.head) this.insertBefore(this.head, node);
else setInitial(this, node);
};
DLL.prototype.push = function(node) {
if (this.tail) this.insertAfter(this.tail, node);
else setInitial(this, node);
};
DLL.prototype.shift = function() {
return this.head && this.removeLink(this.head);
};
DLL.prototype.pop = function() {
return this.tail && this.removeLink(this.tail);
};
DLL.prototype.toArray = function () {
var arr = Array(this.length);
var curr = this.head;
for(var idx = 0; idx < this.length; idx++) {
arr[idx] = curr.data;
curr = curr.next;
}
return arr;
};
DLL.prototype.remove = function (testFn) {
var curr = this.head;
while(!!curr) {
var next = curr.next;
if (testFn(curr)) {
this.removeLink(curr);
}
curr = next;
}
return this;
};
function queue(worker, concurrency, payload) {
if (concurrency == null) {
concurrency = 1;
}
else if(concurrency === 0) {
throw new Error('Concurrency must not be zero');
}
var _worker = wrapAsync(worker);
var numRunning = 0;
var workersList = [];
var processingScheduled = false;
function _insert(data, insertAtFront, callback) {
if (callback != null && typeof callback !== 'function') {
throw new Error('task callback must be a function');
}
q.started = true;
if (!isArray(data)) {
data = [data];
}
if (data.length === 0 && q.idle()) {
// call drain immediately if there are no tasks
return setImmediate$1(function() {
q.drain();
});
}
for (var i = 0, l = data.length; i < l; i++) {
var item = {
data: data[i],
callback: callback || noop
};
if (insertAtFront) {
q._tasks.unshift(item);
} else {
q._tasks.push(item);
}
}
if (!processingScheduled) {
processingScheduled = true;
setImmediate$1(function() {
processingScheduled = false;
q.process();
});
}
}
function _next(tasks) {
return function(err){
numRunning -= 1;
for (var i = 0, l = tasks.length; i < l; i++) {
var task = tasks[i];
var index = baseIndexOf(workersList, task, 0);
if (index === 0) {
workersList.shift();
} else if (index > 0) {
workersList.splice(index, 1);
}
task.callback.apply(task, arguments);
if (err != null) {
q.error(err, task.data);
}
}
if (numRunning <= (q.concurrency - q.buffer) ) {
q.unsaturated();
}
if (q.idle()) {
q.drain();
}
q.process();
};
}
var isProcessing = false;
var q = {
_tasks: new DLL(),
concurrency: concurrency,
payload: payload,
saturated: noop,
unsaturated:noop,
buffer: concurrency / 4,
empty: noop,
drain: noop,
error: noop,
started: false,
paused: false,
push: function (data, callback) {
_insert(data, false, callback);
},
kill: function () {
q.drain = noop;
q._tasks.empty();
},
unshift: function (data, callback) {
_insert(data, true, callback);
},
remove: function (testFn) {
q._tasks.remove(testFn);
},
process: function () {
// Avoid trying to start too many processing operations. This can occur
// when callbacks resolve synchronously (#1267).
if (isProcessing) {
return;
}
isProcessing = true;
while(!q.paused && numRunning < q.concurrency && q._tasks.length){
var tasks = [], data = [];
var l = q._tasks.length;
if (q.payload) l = Math.min(l, q.payload);
for (var i = 0; i < l; i++) {
var node = q._tasks.shift();
tasks.push(node);
workersList.push(node);
data.push(node.data);
}
numRunning += 1;
if (q._tasks.length === 0) {
q.empty();
}
if (numRunning === q.concurrency) {
q.saturated();
}
var cb = onlyOnce(_next(tasks));
_worker(data, cb);
}
isProcessing = false;
},
length: function () {
return q._tasks.length;
},
running: function () {
return numRunning;
},
workersList: function () {
return workersList;
},
idle: function() {
return q._tasks.length + numRunning === 0;
},
pause: function () {
q.paused = true;
},
resume: function () {
if (q.paused === false) { return; }
q.paused = false;
setImmediate$1(q.process);
}
};
return q;
}
/**
* A cargo of tasks for the worker function to complete. Cargo inherits all of
* the same methods and event callbacks as [`queue`]{@link module:ControlFlow.queue}.
* @typedef {Object} CargoObject
* @memberOf module:ControlFlow
* @property {Function} length - A function returning the number of items
* waiting to be processed. Invoke like `cargo.length()`.
* @property {number} payload - An `integer` for determining how many tasks
* should be process per round. This property can be changed after a `cargo` is
* created to alter the payload on-the-fly.
* @property {Function} push - Adds `task` to the `queue`. The callback is
* called once the `worker` has finished processing the task. Instead of a
* single task, an array of `tasks` can be submitted. The respective callback is
* used for every task in the list. Invoke like `cargo.push(task, [callback])`.
* @property {Function} saturated - A callback that is called when the
* `queue.length()` hits the concurrency and further tasks will be queued.
* @property {Function} empty - A callback that is called when the last item
* from the `queue` is given to a `worker`.
* @property {Function} drain - A callback that is called when the last item
* from the `queue` has returned from the `worker`.
* @property {Function} idle - a function returning false if there are items
* waiting or being processed, or true if not. Invoke like `cargo.idle()`.
* @property {Function} pause - a function that pauses the processing of tasks
* until `resume()` is called. Invoke like `cargo.pause()`.
* @property {Function} resume - a function that resumes the processing of
* queued tasks when the queue is paused. Invoke like `cargo.resume()`.
* @property {Function} kill - a function that removes the `drain` callback and
* empties remaining tasks from the queue forcing it to go idle. Invoke like `cargo.kill()`.
*/
/**
* Creates a `cargo` object with the specified payload. Tasks added to the
* cargo will be processed altogether (up to the `payload` limit). If the
* `worker` is in progress, the task is queued until it becomes available. Once
* the `worker` has completed some tasks, each callback of those tasks is
* called. Check out [these](https://camo.githubusercontent.com/6bbd36f4cf5b35a0f11a96dcd2e97711ffc2fb37/68747470733a2f2f662e636c6f75642e6769746875622e636f6d2f6173736574732f313637363837312f36383130382f62626330636662302d356632392d313165322d393734662d3333393763363464633835382e676966) [animations](https://camo.githubusercontent.com/f4810e00e1c5f5f8addbe3e9f49064fd5d102699/68747470733a2f2f662e636c6f75642e6769746875622e636f6d2f6173736574732f313637363837312f36383130312f38346339323036362d356632392d313165322d383134662d3964336430323431336266642e676966)
* for how `cargo` and `queue` work.
*
* While [`queue`]{@link module:ControlFlow.queue} passes only one task to one of a group of workers
* at a time, cargo passes an array of tasks to a single worker, repeating
* when the worker is finished.
*
* @name cargo
* @static
* @memberOf module:ControlFlow
* @method
* @see [async.queue]{@link module:ControlFlow.queue}
* @category Control Flow
* @param {AsyncFunction} worker - An asynchronous function for processing an array
* of queued tasks. Invoked with `(tasks, callback)`.
* @param {number} [payload=Infinity] - An optional `integer` for determining
* how many tasks should be processed per round; if omitted, the default is
* unlimited.
* @returns {module:ControlFlow.CargoObject} A cargo object to manage the tasks. Callbacks can
* attached as certain properties to listen for specific events during the
* lifecycle of the cargo and inner queue.
* @example
*
* // create a cargo object with payload 2
* var cargo = async.cargo(function(tasks, callback) {
* for (var i=0; i<tasks.length; i++) {
* console.log('hello ' + tasks[i].name);
* }
* callback();
* }, 2);
*
* // add some items
* cargo.push({name: 'foo'}, function(err) {
* console.log('finished processing foo');
* });
* cargo.push({name: 'bar'}, function(err) {
* console.log('finished processing bar');
* });
* cargo.push({name: 'baz'}, function(err) {
* console.log('finished processing baz');
* });
*/
function cargo(worker, payload) {
return queue(worker, 1, payload);
}
/**
* The same as [`eachOf`]{@link module:Collections.eachOf} but runs only a single async operation at a time.
*
* @name eachOfSeries
* @static
* @memberOf module:Collections
* @method
* @see [async.eachOf]{@link module:Collections.eachOf}
* @alias forEachOfSeries
* @category Collection
* @param {Array|Iterable|Object} coll - A collection to iterate over.
* @param {AsyncFunction} iteratee - An async function to apply to each item in
* `coll`.
* Invoked with (item, key, callback).
* @param {Function} [callback] - A callback which is called when all `iteratee`
* functions have finished, or an error occurs. Invoked with (err).
*/
var eachOfSeries = doLimit(eachOfLimit, 1);
/**
* Reduces `coll` into a single value using an async `iteratee` to return each
* successive step. `memo` is the initial state of the reduction. This function
* only operates in series.
*
* For performance reasons, it may make sense to split a call to this function
* into a parallel map, and then use the normal `Array.prototype.reduce` on the
* results. This function is for situations where each step in the reduction
* needs to be async; if you can get the data before reducing it, then it's
* probably a good idea to do so.
*
* @name reduce
* @static
* @memberOf module:Collections
* @method
* @alias inject
* @alias foldl
* @category Collection
* @param {Array|Iterable|Object} coll - A collection to iterate over.
* @param {*} memo - The initial state of the reduction.
* @param {AsyncFunction} iteratee - A function applied to each item in the
* array to produce the next step in the reduction.
* The `iteratee` should complete with the next state of the reduction.
* If the iteratee complete with an error, the reduction is stopped and the
* main `callback` is immediately called with the error.
* Invoked with (memo, item, callback).
* @param {Function} [callback] - A callback which is called after all the
* `iteratee` functions have finished. Result is the reduced value. Invoked with
* (err, result).
* @example
*
* async.reduce([1,2,3], 0, function(memo, item, callback) {
* // pointless async:
* process.nextTick(function() {
* callback(null, memo + item)
* });
* }, function(err, result) {
* // result is now equal to the last value of memo, which is 6
* });
*/
function reduce(coll, memo, iteratee, callback) {
callback = once(callback || noop);
var _iteratee = wrapAsync(iteratee);
eachOfSeries(coll, function(x, i, callback) {
_iteratee(memo, x, function(err, v) {
memo = v;
callback(err);
});
}, function(err) {
callback(err, memo);
});
}
/**
* Version of the compose function that is more natural to read. Each function
* consumes the return value of the previous function. It is the equivalent of
* [compose]{@link module:ControlFlow.compose} with the arguments reversed.
*
* Each function is executed with the `this` binding of the composed function.
*
* @name seq
* @static
* @memberOf module:ControlFlow
* @method
* @see [async.compose]{@link module:ControlFlow.compose}
* @category Control Flow
* @param {...AsyncFunction} functions - the asynchronous functions to compose
* @returns {Function} a function that composes the `functions` in order
* @example
*
* // Requires lodash (or underscore), express3 and dresende's orm2.
* // Part of an app, that fetches cats of the logged user.
* // This example uses `seq` function to avoid overnesting and error
* // handling clutter.
* app.get('/cats', function(request, response) {
* var User = request.models.User;
* async.seq(
* _.bind(User.get, User), // 'User.get' has signature (id, callback(err, data))
* function(user, fn) {
* user.getCats(fn); // 'getCats' has signature (callback(err, data))
* }
* )(req.session.user_id, function (err, cats) {
* if (err) {
* console.error(err);
* response.json({ status: 'error', message: err.message });
* } else {
* response.json({ status: 'ok', message: 'Cats found', data: cats });
* }
* });
* });
*/
function seq(/*...functions*/) {
var _functions = arrayMap(arguments, wrapAsync);
return function(/*...args*/) {
var args = slice(arguments);
var that = this;
var cb = args[args.length - 1];
if (typeof cb == 'function') {
args.pop();
} else {
cb = noop;
}
reduce(_functions, args, function(newargs, fn, cb) {
fn.apply(that, newargs.concat(function(err/*, ...nextargs*/) {
var nextargs = slice(arguments, 1);
cb(err, nextargs);
}));
},
function(err, results) {
cb.apply(that, [err].concat(results));
});
};
}
/**
* Creates a function which is a composition of the passed asynchronous
* functions. Each function consumes the return value of the function that
* follows. Composing functions `f()`, `g()`, and `h()` would produce the result
* of `f(g(h()))`, only this version uses callbacks to obtain the return values.
*
* Each function is executed with the `this` binding of the composed function.
*
* @name compose
* @static
* @memberOf module:ControlFlow
* @method
* @category Control Flow
* @param {...AsyncFunction} functions - the asynchronous functions to compose
* @returns {Function} an asynchronous function that is the composed
* asynchronous `functions`
* @example
*
* function add1(n, callback) {
* setTimeout(function () {
* callback(null, n + 1);
* }, 10);
* }
*
* function mul3(n, callback) {
* setTimeout(function () {
* callback(null, n * 3);
* }, 10);
* }
*
* var add1mul3 = async.compose(mul3, add1);
* add1mul3(4, function (err, result) {
* // result now equals 15
* });
*/
var compose = function(/*...args*/) {
return seq.apply(null, slice(arguments).reverse());
};
var _concat = Array.prototype.concat;
/**
* The same as [`concat`]{@link module:Collections.concat} but runs a maximum of `limit` async operations at a time.
*
* @name concatLimit
* @static
* @memberOf module:Collections
* @method
* @see [async.concat]{@link module:Collections.concat}
* @category Collection
* @param {Array|Iterable|Object} coll - A collection to iterate over.
* @param {number} limit - The maximum number of async operations at a time.
* @param {AsyncFunction} iteratee - A function to apply to each item in `coll`,
* which should use an array as its result. Invoked with (item, callback).
* @param {Function} [callback] - A callback which is called after all the
* `iteratee` functions have finished, or an error occurs. Results is an array
* containing the concatenated results of the `iteratee` function. Invoked with
* (err, results).
*/
var concatLimit = function(coll, limit, iteratee, callback) {
callback = callback || noop;
var _iteratee = wrapAsync(iteratee);
mapLimit(coll, limit, function(val, callback) {
_iteratee(val, function(err /*, ...args*/) {
if (err) return callback(err);
return callback(null, slice(arguments, 1));
});
}, function(err, mapResults) {
var result = [];
for (var i = 0; i < mapResults.length; i++) {
if (mapResults[i]) {
result = _concat.apply(result, mapResults[i]);
}
}
return callback(err, result);
});
};
/**
* Applies `iteratee` to each item in `coll`, concatenating the results. Returns
* the concatenated list. The `iteratee`s are called in parallel, and the
* results are concatenated as they return. There is no guarantee that the
* results array will be returned in the original order of `coll` passed to the
* `iteratee` function.
*
* @name concat
* @static
* @memberOf module:Collections
* @method
* @category Collection
* @param {Array|Iterable|Object} coll - A collection to iterate over.
* @param {AsyncFunction} iteratee - A function to apply to each item in `coll`,
* which should use an array as its result. Invoked with (item, callback).
* @param {Function} [callback(err)] - A callback which is called after all the
* `iteratee` functions have finished, or an error occurs. Results is an array
* containing the concatenated results of the `iteratee` function. Invoked with
* (err, results).
* @example
*
* async.concat(['dir1','dir2','dir3'], fs.readdir, function(err, files) {
* // files is now a list of filenames that exist in the 3 directories
* });
*/
var concat = doLimit(concatLimit, Infinity);
/**
* The same as [`concat`]{@link module:Collections.concat} but runs only a single async operation at a time.
*
* @name concatSeries
* @static
* @memberOf module:Collections
* @method
* @see [async.concat]{@link module:Collections.concat}
* @category Collection
* @param {Array|Iterable|Object} coll - A collection to iterate over.
* @param {AsyncFunction} iteratee - A function to apply to each item in `coll`.
* The iteratee should complete with an array an array of results.
* Invoked with (item, callback).
* @param {Function} [callback(err)] - A callback which is called after all the
* `iteratee` functions have finished, or an error occurs. Results is an array
* containing the concatenated results of the `iteratee` function. Invoked with
* (err, results).
*/
var concatSeries = doLimit(concatLimit, 1);
/**
* Returns a function that when called, calls-back with the values provided.
* Useful as the first function in a [`waterfall`]{@link module:ControlFlow.waterfall}, or for plugging values in to
* [`auto`]{@link module:ControlFlow.auto}.
*
* @name constant
* @static
* @memberOf module:Utils
* @method
* @category Util
* @param {...*} arguments... - Any number of arguments to automatically invoke
* callback with.
* @returns {AsyncFunction} Returns a function that when invoked, automatically
* invokes the callback with the previous given arguments.
* @example
*
* async.waterfall([
* async.constant(42),
* function (value, next) {
* // value === 42
* },
* //...
* ], callback);
*
* async.waterfall([
* async.constant(filename, "utf8"),
* fs.readFile,
* function (fileData, next) {
* //...
* }
* //...
* ], callback);
*
* async.auto({
* hostname: async.constant("https://server.net/"),
* port: findFreePort,
* launchServer: ["hostname", "port", function (options, cb) {
* startServer(options, cb);
* }],
* //...
* }, callback);
*/
var constant = function(/*...values*/) {
var values = slice(arguments);
var args = [null].concat(values);
return function (/*...ignoredArgs, callback*/) {
var callback = arguments[arguments.length - 1];
return callback.apply(this, args);
};
};
/**
* This method returns the first argument it receives.
*
* @static
* @since 0.1.0
* @memberOf _
* @category Util
* @param {*} value Any value.
* @returns {*} Returns `value`.
* @example
*
* var object = { 'a': 1 };
*
* console.log(_.identity(object) === object);
* // => true
*/
function identity(value) {
return value;
}
function _createTester(check, getResult) {
return function(eachfn, arr, iteratee, cb) {
cb = cb || noop;
var testPassed = false;
var testResult;
eachfn(arr, function(value, _, callback) {
iteratee(value, function(err, result) {
if (err) {
callback(err);
} else if (check(result) && !testResult) {
testPassed = true;
testResult = getResult(true, value);
callback(null, breakLoop);
} else {
callback();
}
});
}, function(err) {
if (err) {
cb(err);
} else {
cb(null, testPassed ? testResult : getResult(false));
}
});
};
}
function _findGetResult(v, x) {
return x;
}
/**
* Returns the first value in `coll` that passes an async truth test. The
* `iteratee` is applied in parallel, meaning the first iteratee to return
* `true` will fire the detect `callback` with that result. That means the
* result might not be the first item in the original `coll` (in terms of order)
* that passes the test.
* If order within the original `coll` is important, then look at
* [`detectSeries`]{@link module:Collections.detectSeries}.
*
* @name detect
* @static
* @memberOf module:Collections
* @method
* @alias find
* @category Collections
* @param {Array|Iterable|Object} coll - A collection to iterate over.
* @param {AsyncFunction} iteratee - A truth test to apply to each item in `coll`.
* The iteratee must complete with a boolean value as its result.
* Invoked with (item, callback).
* @param {Function} [callback] - A callback which is called as soon as any
* iteratee returns `true`, or after all the `iteratee` functions have finished.
* Result will be the first item in the array that passes the truth test
* (iteratee) or the value `undefined` if none passed. Invoked with
* (err, result).
* @example
*
* async.detect(['file1','file2','file3'], function(filePath, callback) {
* fs.access(filePath, function(err) {
* callback(null, !err)
* });
* }, function(err, result) {
* // result now equals the first file in the list that exists
* });
*/
var detect = doParallel(_createTester(identity, _findGetResult));
/**
* The same as [`detect`]{@link module:Collections.detect} but runs a maximum of `limit` async operations at a
* time.
*
* @name detectLimit
* @static
* @memberOf module:Collections
* @method
* @see [async.detect]{@link module:Collections.detect}
* @alias findLimit
* @category Collections
* @param {Array|Iterable|Object} coll - A collection to iterate over.
* @param {number} limit - The maximum number of async operations at a time.
* @param {AsyncFunction} iteratee - A truth test to apply to each item in `coll`.
* The iteratee must complete with a boolean value as its result.
* Invoked with (item, callback).
* @param {Function} [callback] - A callback which is called as soon as any
* iteratee returns `true`, or after all the `iteratee` functions have finished.
* Result will be the first item in the array that passes the truth test
* (iteratee) or the value `undefined` if none passed. Invoked with
* (err, result).
*/
var detectLimit = doParallelLimit(_createTester(identity, _findGetResult));
/**
* The same as [`detect`]{@link module:Collections.detect} but runs only a single async operation at a time.
*
* @name detectSeries
* @static
* @memberOf module:Collections
* @method
* @see [async.detect]{@link module:Collections.detect}
* @alias findSeries
* @category Collections
* @param {Array|Iterable|Object} coll - A collection to iterate over.
* @param {AsyncFunction} iteratee - A truth test to apply to each item in `coll`.
* The iteratee must complete with a boolean value as its result.
* Invoked with (item, callback).
* @param {Function} [callback] - A callback which is called as soon as any
* iteratee returns `true`, or after all the `iteratee` functions have finished.
* Result will be the first item in the array that passes the truth test
* (iteratee) or the value `undefined` if none passed. Invoked with
* (err, result).
*/
var detectSeries = doLimit(detectLimit, 1);
function consoleFunc(name) {
return function (fn/*, ...args*/) {
var args = slice(arguments, 1);
args.push(function (err/*, ...args*/) {
var args = slice(arguments, 1);
if (typeof console === 'object') {
if (err) {
if (console.error) {
console.error(err);
}
} else if (console[name]) {
arrayEach(args, function (x) {
console[name](x);
});
}
}
});
wrapAsync(fn).apply(null, args);
};
}
/**
* Logs the result of an [`async` function]{@link AsyncFunction} to the
* `console` using `console.dir` to display the properties of the resulting object.
* Only works in Node.js or in browsers that support `console.dir` and
* `console.error` (such as FF and Chrome).
* If multiple arguments are returned from the async function,
* `console.dir` is called on each argument in order.
*
* @name dir
* @static
* @memberOf module:Utils
* @method
* @category Util
* @param {AsyncFunction} function - The function you want to eventually apply
* all arguments to.
* @param {...*} arguments... - Any number of arguments to apply to the function.
* @example
*
* // in a module
* var hello = function(name, callback) {
* setTimeout(function() {
* callback(null, {hello: name});
* }, 1000);
* };
*
* // in the node repl
* node> async.dir(hello, 'world');
* {hello: 'world'}
*/
var dir = consoleFunc('dir');
/**
* The post-check version of [`during`]{@link module:ControlFlow.during}. To reflect the difference in
* the order of operations, the arguments `test` and `fn` are switched.
*
* Also a version of [`doWhilst`]{@link module:ControlFlow.doWhilst} with asynchronous `test` function.
* @name doDuring
* @static
* @memberOf module:ControlFlow
* @method
* @see [async.during]{@link module:ControlFlow.during}
* @category Control Flow
* @param {AsyncFunction} fn - An async function which is called each time
* `test` passes. Invoked with (callback).
* @param {AsyncFunction} test - asynchronous truth test to perform before each
* execution of `fn`. Invoked with (...args, callback), where `...args` are the
* non-error args from the previous callback of `fn`.
* @param {Function} [callback] - A callback which is called after the test
* function has failed and repeated execution of `fn` has stopped. `callback`
* will be passed an error if one occurred, otherwise `null`.
*/
function doDuring(fn, test, callback) {
callback = onlyOnce(callback || noop);
var _fn = wrapAsync(fn);
var _test = wrapAsync(test);
function next(err/*, ...args*/) {
if (err) return callback(err);
var args = slice(arguments, 1);
args.push(check);
_test.apply(this, args);
}
function check(err, truth) {
if (err) return callback(err);
if (!truth) return callback(null);
_fn(next);
}
check(null, true);
}
/**
* The post-check version of [`whilst`]{@link module:ControlFlow.whilst}. To reflect the difference in
* the order of operations, the arguments `test` and `iteratee` are switched.
*
* `doWhilst` is to `whilst` as `do while` is to `while` in plain JavaScript.
*
* @name doWhilst
* @static
* @memberOf module:ControlFlow
* @method
* @see [async.whilst]{@link module:ControlFlow.whilst}
* @category Control Flow
* @param {AsyncFunction} iteratee - A function which is called each time `test`
* passes. Invoked with (callback).
* @param {Function} test - synchronous truth test to perform after each
* execution of `iteratee`. Invoked with any non-error callback results of
* `iteratee`.
* @param {Function} [callback] - A callback which is called after the test
* function has failed and repeated execution of `iteratee` has stopped.
* `callback` will be passed an error and any arguments passed to the final
* `iteratee`'s callback. Invoked with (err, [results]);
*/
function doWhilst(iteratee, test, callback) {
callback = onlyOnce(callback || noop);
var _iteratee = wrapAsync(iteratee);
var next = function(err/*, ...args*/) {
if (err) return callback(err);
var args = slice(arguments, 1);
if (test.apply(this, args)) return _iteratee(next);
callback.apply(null, [null].concat(args));
};
_iteratee(next);
}
/**
* Like ['doWhilst']{@link module:ControlFlow.doWhilst}, except the `test` is inverted. Note the
* argument ordering differs from `until`.
*
* @name doUntil
* @static
* @memberOf module:ControlFlow
* @method
* @see [async.doWhilst]{@link module:ControlFlow.doWhilst}
* @category Control Flow
* @param {AsyncFunction} iteratee - An async function which is called each time
* `test` fails. Invoked with (callback).
* @param {Function} test - synchronous truth test to perform after each
* execution of `iteratee`. Invoked with any non-error callback results of
* `iteratee`.
* @param {Function} [callback] - A callback which is called after the test
* function has passed and repeated execution of `iteratee` has stopped. `callback`
* will be passed an error and any arguments passed to the final `iteratee`'s
* callback. Invoked with (err, [results]);
*/
function doUntil(iteratee, test, callback) {
doWhilst(iteratee, function() {
return !test.apply(this, arguments);
}, callback);
}
/**
* Like [`whilst`]{@link module:ControlFlow.whilst}, except the `test` is an asynchronous function that
* is passed a callback in the form of `function (err, truth)`. If error is
* passed to `test` or `fn`, the main callback is immediately called with the
* value of the error.
*
* @name during
* @static
* @memberOf module:ControlFlow
* @method
* @see [async.whilst]{@link module:ControlFlow.whilst}
* @category Control Flow
* @param {AsyncFunction} test - asynchronous truth test to perform before each
* execution of `fn`. Invoked with (callback).
* @param {AsyncFunction} fn - An async function which is called each time
* `test` passes. Invoked with (callback).
* @param {Function} [callback] - A callback which is called after the test
* function has failed and repeated execution of `fn` has stopped. `callback`
* will be passed an error, if one occurred, otherwise `null`.
* @example
*
* var count = 0;
*
* async.during(
* function (callback) {
* return callback(null, count < 5);
* },
* function (callback) {
* count++;
* setTimeout(callback, 1000);
* },
* function (err) {
* // 5 seconds have passed
* }
* );
*/
function during(test, fn, callback) {
callback = onlyOnce(callback || noop);
var _fn = wrapAsync(fn);
var _test = wrapAsync(test);
function next(err) {
if (err) return callback(err);
_test(check);
}
function check(err, truth) {
if (err) return callback(err);
if (!truth) return callback(null);
_fn(next);
}
_test(check);
}
function _withoutIndex(iteratee) {
return function (value, index, callback) {
return iteratee(value, callback);
};
}
/**
* Applies the function `iteratee` to each item in `coll`, in parallel.
* The `iteratee` is called with an item from the list, and a callback for when
* it has finished. If the `iteratee` passes an error to its `callback`, the
* main `callback` (for the `each` function) is immediately called with the
* error.
*
* Note, that since this function applies `iteratee` to each item in parallel,
* there is no guarantee that the iteratee functions will complete in order.
*
* @name each
* @static
* @memberOf module:Collections
* @method
* @alias forEach
* @category Collection
* @param {Array|Iterable|Object} coll - A collection to iterate over.
* @param {AsyncFunction} iteratee - An async function to apply to
* each item in `coll`. Invoked with (item, callback).
* The array index is not passed to the iteratee.
* If you need the index, use `eachOf`.
* @param {Function} [callback] - A callback which is called when all
* `iteratee` functions have finished, or an error occurs. Invoked with (err).
* @example
*
* // assuming openFiles is an array of file names and saveFile is a function
* // to save the modified contents of that file:
*
* async.each(openFiles, saveFile, function(err){
* // if any of the saves produced an error, err would equal that error
* });
*
* // assuming openFiles is an array of file names
* async.each(openFiles, function(file, callback) {
*
* // Perform operation on file here.
* console.log('Processing file ' + file);
*
* if( file.length > 32 ) {
* console.log('This file name is too long');
* callback('File name too long');
* } else {
* // Do work to process file here
* console.log('File processed');
* callback();
* }
* }, function(err) {
* // if any of the file processing produced an error, err would equal that error
* if( err ) {
* // One of the iterations produced an error.
* // All processing will now stop.
* console.log('A file failed to process');
* } else {
* console.log('All files have been processed successfully');
* }
* });
*/
function eachLimit(coll, iteratee, callback) {
eachOf(coll, _withoutIndex(wrapAsync(iteratee)), callback);
}
/**
* The same as [`each`]{@link module:Collections.each} but runs a maximum of `limit` async operations at a time.
*
* @name eachLimit
* @static
* @memberOf module:Collections
* @method
* @see [async.each]{@link module:Collections.each}
* @alias forEachLimit
* @category Collection
* @param {Array|Iterable|Object} coll - A collection to iterate over.
* @param {number} limit - The maximum number of async operations at a time.
* @param {AsyncFunction} iteratee - An async function to apply to each item in
* `coll`.
* The array index is not passed to the iteratee.
* If you need the index, use `eachOfLimit`.
* Invoked with (item, callback).
* @param {Function} [callback] - A callback which is called when all
* `iteratee` functions have finished, or an error occurs. Invoked with (err).
*/
function eachLimit$1(coll, limit, iteratee, callback) {
_eachOfLimit(limit)(coll, _withoutIndex(wrapAsync(iteratee)), callback);
}
/**
* The same as [`each`]{@link module:Collections.each} but runs only a single async operation at a time.
*
* @name eachSeries
* @static
* @memberOf module:Collections
* @method
* @see [async.each]{@link module:Collections.each}
* @alias forEachSeries
* @category Collection
* @param {Array|Iterable|Object} coll - A collection to iterate over.
* @param {AsyncFunction} iteratee - An async function to apply to each
* item in `coll`.
* The array index is not passed to the iteratee.
* If you need the index, use `eachOfSeries`.
* Invoked with (item, callback).
* @param {Function} [callback] - A callback which is called when all
* `iteratee` functions have finished, or an error occurs. Invoked with (err).
*/
var eachSeries = doLimit(eachLimit$1, 1);
/**
* Wrap an async function and ensure it calls its callback on a later tick of
* the event loop. If the function already calls its callback on a next tick,
* no extra deferral is added. This is useful for preventing stack overflows
* (`RangeError: Maximum call stack size exceeded`) and generally keeping
* [Zalgo](http://blog.izs.me/post/59142742143/designing-apis-for-asynchrony)
* contained. ES2017 `async` functions are returned as-is -- they are immune
* to Zalgo's corrupting influences, as they always resolve on a later tick.
*
* @name ensureAsync
* @static
* @memberOf module:Utils
* @method
* @category Util
* @param {AsyncFunction} fn - an async function, one that expects a node-style
* callback as its last argument.
* @returns {AsyncFunction} Returns a wrapped function with the exact same call
* signature as the function passed in.
* @example
*
* function sometimesAsync(arg, callback) {
* if (cache[arg]) {
* return callback(null, cache[arg]); // this would be synchronous!!
* } else {
* doSomeIO(arg, callback); // this IO would be asynchronous
* }
* }
*
* // this has a risk of stack overflows if many results are cached in a row
* async.mapSeries(args, sometimesAsync, done);
*
* // this will defer sometimesAsync's callback if necessary,
* // preventing stack overflows
* async.mapSeries(args, async.ensureAsync(sometimesAsync), done);
*/
function ensureAsync(fn) {
if (isAsync(fn)) return fn;
return initialParams(function (args, callback) {
var sync = true;
args.push(function () {
var innerArgs = arguments;
if (sync) {
setImmediate$1(function () {
callback.apply(null, innerArgs);
});
} else {
callback.apply(null, innerArgs);
}
});
fn.apply(this, args);
sync = false;
});
}
function notId(v) {
return !v;
}
/**
* Returns `true` if every element in `coll` satisfies an async test. If any
* iteratee call returns `false`, the main `callback` is immediately called.
*
* @name every
* @static
* @memberOf module:Collections
* @method
* @alias all
* @category Collection
* @param {Array|Iterable|Object} coll - A collection to iterate over.
* @param {AsyncFunction} iteratee - An async truth test to apply to each item
* in the collection in parallel.
* The iteratee must complete with a boolean result value.
* Invoked with (item, callback).
* @param {Function} [callback] - A callback which is called after all the
* `iteratee` functions have finished. Result will be either `true` or `false`
* depending on the values of the async tests. Invoked with (err, result).
* @example
*
* async.every(['file1','file2','file3'], function(filePath, callback) {
* fs.access(filePath, function(err) {
* callback(null, !err)
* });
* }, function(err, result) {
* // if result is true then every file exists
* });
*/
var every = doParallel(_createTester(notId, notId));
/**
* The same as [`every`]{@link module:Collections.every} but runs a maximum of `limit` async operations at a time.
*
* @name everyLimit
* @static
* @memberOf module:Collections
* @method
* @see [async.every]{@link module:Collections.every}
* @alias allLimit
* @category Collection
* @param {Array|Iterable|Object} coll - A collection to iterate over.
* @param {number} limit - The maximum number of async operations at a time.
* @param {AsyncFunction} iteratee - An async truth test to apply to each item
* in the collection in parallel.
* The iteratee must complete with a boolean result value.
* Invoked with (item, callback).
* @param {Function} [callback] - A callback which is called after all the
* `iteratee` functions have finished. Result will be either `true` or `false`
* depending on the values of the async tests. Invoked with (err, result).
*/
var everyLimit = doParallelLimit(_createTester(notId, notId));
/**
* The same as [`every`]{@link module:Collections.every} but runs only a single async operation at a time.
*
* @name everySeries
* @static
* @memberOf module:Collections
* @method
* @see [async.every]{@link module:Collections.every}
* @alias allSeries
* @category Collection
* @param {Array|Iterable|Object} coll - A collection to iterate over.
* @param {AsyncFunction} iteratee - An async truth test to apply to each item
* in the collection in series.
* The iteratee must complete with a boolean result value.
* Invoked with (item, callback).
* @param {Function} [callback] - A callback which is called after all the
* `iteratee` functions have finished. Result will be either `true` or `false`
* depending on the values of the async tests. Invoked with (err, result).
*/
var everySeries = doLimit(everyLimit, 1);
/**
* The base implementation of `_.property` without support for deep paths.
*
* @private
* @param {string} key The key of the property to get.
* @returns {Function} Returns the new accessor function.
*/
function baseProperty(key) {
return function(object) {
return object == null ? undefined : object[key];
};
}
function filterArray(eachfn, arr, iteratee, callback) {
var truthValues = new Array(arr.length);
eachfn(arr, function (x, index, callback) {
iteratee(x, function (err, v) {
truthValues[index] = !!v;
callback(err);
});
}, function (err) {
if (err) return callback(err);
var results = [];
for (var i = 0; i < arr.length; i++) {
if (truthValues[i]) results.push(arr[i]);
}
callback(null, results);
});
}
function filterGeneric(eachfn, coll, iteratee, callback) {
var results = [];
eachfn(coll, function (x, index, callback) {
iteratee(x, function (err, v) {
if (err) {
callback(err);
} else {
if (v) {
results.push({index: index, value: x});
}
callback();
}
});
}, function (err) {
if (err) {
callback(err);
} else {
callback(null, arrayMap(results.sort(function (a, b) {
return a.index - b.index;
}), baseProperty('value')));
}
});
}
function _filter(eachfn, coll, iteratee, callback) {
var filter = isArrayLike(coll) ? filterArray : filterGeneric;
filter(eachfn, coll, wrapAsync(iteratee), callback || noop);
}
/**
* Returns a new array of all the values in `coll` which pass an async truth
* test. This operation is performed in parallel, but the results array will be
* in the same order as the original.
*
* @name filter
* @static
* @memberOf module:Collections
* @method
* @alias select
* @category Collection
* @param {Array|Iterable|Object} coll - A collection to iterate over.
* @param {Function} iteratee - A truth test to apply to each item in `coll`.
* The `iteratee` is passed a `callback(err, truthValue)`, which must be called
* with a boolean argument once it has completed. Invoked with (item, callback).
* @param {Function} [callback] - A callback which is called after all the
* `iteratee` functions have finished. Invoked with (err, results).
* @example
*
* async.filter(['file1','file2','file3'], function(filePath, callback) {
* fs.access(filePath, function(err) {
* callback(null, !err)
* });
* }, function(err, results) {
* // results now equals an array of the existing files
* });
*/
var filter = doParallel(_filter);
/**
* The same as [`filter`]{@link module:Collections.filter} but runs a maximum of `limit` async operations at a
* time.
*
* @name filterLimit
* @static
* @memberOf module:Collections
* @method
* @see [async.filter]{@link module:Collections.filter}
* @alias selectLimit
* @category Collection
* @param {Array|Iterable|Object} coll - A collection to iterate over.
* @param {number} limit - The maximum number of async operations at a time.
* @param {Function} iteratee - A truth test to apply to each item in `coll`.
* The `iteratee` is passed a `callback(err, truthValue)`, which must be called
* with a boolean argument once it has completed. Invoked with (item, callback).
* @param {Function} [callback] - A callback which is called after all the
* `iteratee` functions have finished. Invoked with (err, results).
*/
var filterLimit = doParallelLimit(_filter);
/**
* The same as [`filter`]{@link module:Collections.filter} but runs only a single async operation at a time.
*
* @name filterSeries
* @static
* @memberOf module:Collections
* @method
* @see [async.filter]{@link module:Collections.filter}
* @alias selectSeries
* @category Collection
* @param {Array|Iterable|Object} coll - A collection to iterate over.
* @param {Function} iteratee - A truth test to apply to each item in `coll`.
* The `iteratee` is passed a `callback(err, truthValue)`, which must be called
* with a boolean argument once it has completed. Invoked with (item, callback).
* @param {Function} [callback] - A callback which is called after all the
* `iteratee` functions have finished. Invoked with (err, results)
*/
var filterSeries = doLimit(filterLimit, 1);
/**
* Calls the asynchronous function `fn` with a callback parameter that allows it
* to call itself again, in series, indefinitely.
* If an error is passed to the callback then `errback` is called with the
* error, and execution stops, otherwise it will never be called.
*
* @name forever
* @static
* @memberOf module:ControlFlow
* @method
* @category Control Flow
* @param {AsyncFunction} fn - an async function to call repeatedly.
* Invoked with (next).
* @param {Function} [errback] - when `fn` passes an error to it's callback,
* this function will be called, and execution stops. Invoked with (err).
* @example
*
* async.forever(
* function(next) {
* // next is suitable for passing to things that need a callback(err [, whatever]);
* // it will result in this function being called again.
* },
* function(err) {
* // if next is called with a value in its first parameter, it will appear
* // in here as 'err', and execution will stop.
* }
* );
*/
function forever(fn, errback) {
var done = onlyOnce(errback || noop);
var task = wrapAsync(ensureAsync(fn));
function next(err) {
if (err) return done(err);
task(next);
}
next();
}
/**
* The same as [`groupBy`]{@link module:Collections.groupBy} but runs a maximum of `limit` async operations at a time.
*
* @name groupByLimit
* @static
* @memberOf module:Collections
* @method
* @see [async.groupBy]{@link module:Collections.groupBy}
* @category Collection
* @param {Array|Iterable|Object} coll - A collection to iterate over.
* @param {number} limit - The maximum number of async operations at a time.
* @param {AsyncFunction} iteratee - An async function to apply to each item in
* `coll`.
* The iteratee should complete with a `key` to group the value under.
* Invoked with (value, callback).
* @param {Function} [callback] - A callback which is called when all `iteratee`
* functions have finished, or an error occurs. Result is an `Object` whoses
* properties are arrays of values which returned the corresponding key.
*/
var groupByLimit = function(coll, limit, iteratee, callback) {
callback = callback || noop;
var _iteratee = wrapAsync(iteratee);
mapLimit(coll, limit, function(val, callback) {
_iteratee(val, function(err, key) {
if (err) return callback(err);
return callback(null, {key: key, val: val});
});
}, function(err, mapResults) {
var result = {};
// from MDN, handle object having an `hasOwnProperty` prop
var hasOwnProperty = Object.prototype.hasOwnProperty;
for (var i = 0; i < mapResults.length; i++) {
if (mapResults[i]) {
var key = mapResults[i].key;
var val = mapResults[i].val;
if (hasOwnProperty.call(result, key)) {
result[key].push(val);
} else {
result[key] = [val];
}
}
}
return callback(err, result);
});
};
/**
* Returns a new object, where each value corresponds to an array of items, from
* `coll`, that returned the corresponding key. That is, the keys of the object
* correspond to the values passed to the `iteratee` callback.
*
* Note: Since this function applies the `iteratee` to each item in parallel,
* there is no guarantee that the `iteratee` functions will complete in order.
* However, the values for each key in the `result` will be in the same order as
* the original `coll`. For Objects, the values will roughly be in the order of
* the original Objects' keys (but this can vary across JavaScript engines).
*
* @name groupBy
* @static
* @memberOf module:Collections
* @method
* @category Collection
* @param {Array|Iterable|Object} coll - A collection to iterate over.
* @param {AsyncFunction} iteratee - An async function to apply to each item in
* `coll`.
* The iteratee should complete with a `key` to group the value under.
* Invoked with (value, callback).
* @param {Function} [callback] - A callback which is called when all `iteratee`
* functions have finished, or an error occurs. Result is an `Object` whoses
* properties are arrays of values which returned the corresponding key.
* @example
*
* async.groupBy(['userId1', 'userId2', 'userId3'], function(userId, callback) {
* db.findById(userId, function(err, user) {
* if (err) return callback(err);
* return callback(null, user.age);
* });
* }, function(err, result) {
* // result is object containing the userIds grouped by age
* // e.g. { 30: ['userId1', 'userId3'], 42: ['userId2']};
* });
*/
var groupBy = doLimit(groupByLimit, Infinity);
/**
* The same as [`groupBy`]{@link module:Collections.groupBy} but runs only a single async operation at a time.
*
* @name groupBySeries
* @static
* @memberOf module:Collections
* @method
* @see [async.groupBy]{@link module:Collections.groupBy}
* @category Collection
* @param {Array|Iterable|Object} coll - A collection to iterate over.
* @param {number} limit - The maximum number of async operations at a time.
* @param {AsyncFunction} iteratee - An async function to apply to each item in
* `coll`.
* The iteratee should complete with a `key` to group the value under.
* Invoked with (value, callback).
* @param {Function} [callback] - A callback which is called when all `iteratee`
* functions have finished, or an error occurs. Result is an `Object` whoses
* properties are arrays of values which returned the corresponding key.
*/
var groupBySeries = doLimit(groupByLimit, 1);
/**
* Logs the result of an `async` function to the `console`. Only works in
* Node.js or in browsers that support `console.log` and `console.error` (such
* as FF and Chrome). If multiple arguments are returned from the async
* function, `console.log` is called on each argument in order.
*
* @name log
* @static
* @memberOf module:Utils
* @method
* @category Util
* @param {AsyncFunction} function - The function you want to eventually apply
* all arguments to.
* @param {...*} arguments... - Any number of arguments to apply to the function.
* @example
*
* // in a module
* var hello = function(name, callback) {
* setTimeout(function() {
* callback(null, 'hello ' + name);
* }, 1000);
* };
*
* // in the node repl
* node> async.log(hello, 'world');
* 'hello world'
*/
var log = consoleFunc('log');
/**
* The same as [`mapValues`]{@link module:Collections.mapValues} but runs a maximum of `limit` async operations at a
* time.
*
* @name mapValuesLimit
* @static
* @memberOf module:Collections
* @method
* @see [async.mapValues]{@link module:Collections.mapValues}
* @category Collection
* @param {Object} obj - A collection to iterate over.
* @param {number} limit - The maximum number of async operations at a time.
* @param {AsyncFunction} iteratee - A function to apply to each value and key
* in `coll`.
* The iteratee should complete with the transformed value as its result.
* Invoked with (value, key, callback).
* @param {Function} [callback] - A callback which is called when all `iteratee`
* functions have finished, or an error occurs. `result` is a new object consisting
* of each key from `obj`, with each transformed value on the right-hand side.
* Invoked with (err, result).
*/
function mapValuesLimit(obj, limit, iteratee, callback) {
callback = once(callback || noop);
var newObj = {};
var _iteratee = wrapAsync(iteratee);
eachOfLimit(obj, limit, function(val, key, next) {
_iteratee(val, key, function (err, result) {
if (err) return next(err);
newObj[key] = result;
next();
});
}, function (err) {
callback(err, newObj);
});
}
/**
* A relative of [`map`]{@link module:Collections.map}, designed for use with objects.
*
* Produces a new Object by mapping each value of `obj` through the `iteratee`
* function. The `iteratee` is called each `value` and `key` from `obj` and a
* callback for when it has finished processing. Each of these callbacks takes
* two arguments: an `error`, and the transformed item from `obj`. If `iteratee`
* passes an error to its callback, the main `callback` (for the `mapValues`
* function) is immediately called with the error.
*
* Note, the order of the keys in the result is not guaranteed. The keys will
* be roughly in the order they complete, (but this is very engine-specific)
*
* @name mapValues
* @static
* @memberOf module:Collections
* @method
* @category Collection
* @param {Object} obj - A collection to iterate over.
* @param {AsyncFunction} iteratee - A function to apply to each value and key
* in `coll`.
* The iteratee should complete with the transformed value as its result.
* Invoked with (value, key, callback).
* @param {Function} [callback] - A callback which is called when all `iteratee`
* functions have finished, or an error occurs. `result` is a new object consisting
* of each key from `obj`, with each transformed value on the right-hand side.
* Invoked with (err, result).
* @example
*
* async.mapValues({
* f1: 'file1',
* f2: 'file2',
* f3: 'file3'
* }, function (file, key, callback) {
* fs.stat(file, callback);
* }, function(err, result) {
* // result is now a map of stats for each file, e.g.
* // {
* // f1: [stats for file1],
* // f2: [stats for file2],
* // f3: [stats for file3]
* // }
* });
*/
var mapValues = doLimit(mapValuesLimit, Infinity);
/**
* The same as [`mapValues`]{@link module:Collections.mapValues} but runs only a single async operation at a time.
*
* @name mapValuesSeries
* @static
* @memberOf module:Collections
* @method
* @see [async.mapValues]{@link module:Collections.mapValues}
* @category Collection
* @param {Object} obj - A collection to iterate over.
* @param {AsyncFunction} iteratee - A function to apply to each value and key
* in `coll`.
* The iteratee should complete with the transformed value as its result.
* Invoked with (value, key, callback).
* @param {Function} [callback] - A callback which is called when all `iteratee`
* functions have finished, or an error occurs. `result` is a new object consisting
* of each key from `obj`, with each transformed value on the right-hand side.
* Invoked with (err, result).
*/
var mapValuesSeries = doLimit(mapValuesLimit, 1);
function has(obj, key) {
return key in obj;
}
/**
* Caches the results of an async function. When creating a hash to store
* function results against, the callback is omitted from the hash and an
* optional hash function can be used.
*
* If no hash function is specified, the first argument is used as a hash key,
* which may work reasonably if it is a string or a data type that converts to a
* distinct string. Note that objects and arrays will not behave reasonably.
* Neither will cases where the other arguments are significant. In such cases,
* specify your own hash function.
*
* The cache of results is exposed as the `memo` property of the function
* returned by `memoize`.
*
* @name memoize
* @static
* @memberOf module:Utils
* @method
* @category Util
* @param {AsyncFunction} fn - The async function to proxy and cache results from.
* @param {Function} hasher - An optional function for generating a custom hash
* for storing results. It has all the arguments applied to it apart from the
* callback, and must be synchronous.
* @returns {AsyncFunction} a memoized version of `fn`
* @example
*
* var slow_fn = function(name, callback) {
* // do something
* callback(null, result);
* };
* var fn = async.memoize(slow_fn);
*
* // fn can now be used as if it were slow_fn
* fn('some name', function() {
* // callback
* });
*/
function memoize(fn, hasher) {
var memo = Object.create(null);
var queues = Object.create(null);
hasher = hasher || identity;
var _fn = wrapAsync(fn);
var memoized = initialParams(function memoized(args, callback) {
var key = hasher.apply(null, args);
if (has(memo, key)) {
setImmediate$1(function() {
callback.apply(null, memo[key]);
});
} else if (has(queues, key)) {
queues[key].push(callback);
} else {
queues[key] = [callback];
_fn.apply(null, args.concat(function(/*args*/) {
var args = slice(arguments);
memo[key] = args;
var q = queues[key];
delete queues[key];
for (var i = 0, l = q.length; i < l; i++) {
q[i].apply(null, args);
}
}));
}
});
memoized.memo = memo;
memoized.unmemoized = fn;
return memoized;
}
/**
* Calls `callback` on a later loop around the event loop. In Node.js this just
* calls `process.nextTicl`. In the browser it will use `setImmediate` if
* available, otherwise `setTimeout(callback, 0)`, which means other higher
* priority events may precede the execution of `callback`.
*
* This is used internally for browser-compatibility purposes.
*
* @name nextTick
* @static
* @memberOf module:Utils
* @method
* @see [async.setImmediate]{@link module:Utils.setImmediate}
* @category Util
* @param {Function} callback - The function to call on a later loop around
* the event loop. Invoked with (args...).
* @param {...*} args... - any number of additional arguments to pass to the
* callback on the next tick.
* @example
*
* var call_order = [];
* async.nextTick(function() {
* call_order.push('two');
* // call_order now equals ['one','two']
* });
* call_order.push('one');
*
* async.setImmediate(function (a, b, c) {
* // a, b, and c equal 1, 2, and 3
* }, 1, 2, 3);
*/
var _defer$1;
if (hasNextTick) {
_defer$1 = process.nextTick;
} else if (hasSetImmediate) {
_defer$1 = setImmediate;
} else {
_defer$1 = fallback;
}
var nextTick = wrap(_defer$1);
function _parallel(eachfn, tasks, callback) {
callback = callback || noop;
var results = isArrayLike(tasks) ? [] : {};
eachfn(tasks, function (task, key, callback) {
wrapAsync(task)(function (err, result) {
if (arguments.length > 2) {
result = slice(arguments, 1);
}
results[key] = result;
callback(err);
});
}, function (err) {
callback(err, results);
});
}
/**
* Run the `tasks` collection of functions in parallel, without waiting until
* the previous function has completed. If any of the functions pass an error to
* its callback, the main `callback` is immediately called with the value of the
* error. Once the `tasks` have completed, the results are passed to the final
* `callback` as an array.
*
* **Note:** `parallel` is about kicking-off I/O tasks in parallel, not about
* parallel execution of code. If your tasks do not use any timers or perform
* any I/O, they will actually be executed in series. Any synchronous setup
* sections for each task will happen one after the other. JavaScript remains
* single-threaded.
*
* **Hint:** Use [`reflect`]{@link module:Utils.reflect} to continue the
* execution of other tasks when a task fails.
*
* It is also possible to use an object instead of an array. Each property will
* be run as a function and the results will be passed to the final `callback`
* as an object instead of an array. This can be a more readable way of handling
* results from {@link async.parallel}.
*
* @name parallel
* @static
* @memberOf module:ControlFlow
* @method
* @category Control Flow
* @param {Array|Iterable|Object} tasks - A collection of
* [async functions]{@link AsyncFunction} to run.
* Each async function can complete with any number of optional `result` values.
* @param {Function} [callback] - An optional callback to run once all the
* functions have completed successfully. This function gets a results array
* (or object) containing all the result arguments passed to the task callbacks.
* Invoked with (err, results).
*
* @example
* async.parallel([
* function(callback) {
* setTimeout(function() {
* callback(null, 'one');
* }, 200);
* },
* function(callback) {
* setTimeout(function() {
* callback(null, 'two');
* }, 100);
* }
* ],
* // optional callback
* function(err, results) {
* // the results array will equal ['one','two'] even though
* // the second function had a shorter timeout.
* });
*
* // an example using an object instead of an array
* async.parallel({
* one: function(callback) {
* setTimeout(function() {
* callback(null, 1);
* }, 200);
* },
* two: function(callback) {
* setTimeout(function() {
* callback(null, 2);
* }, 100);
* }
* }, function(err, results) {
* // results is now equals to: {one: 1, two: 2}
* });
*/
function parallelLimit(tasks, callback) {
_parallel(eachOf, tasks, callback);
}
/**
* The same as [`parallel`]{@link module:ControlFlow.parallel} but runs a maximum of `limit` async operations at a
* time.
*
* @name parallelLimit
* @static
* @memberOf module:ControlFlow
* @method
* @see [async.parallel]{@link module:ControlFlow.parallel}
* @category Control Flow
* @param {Array|Iterable|Object} tasks - A collection of
* [async functions]{@link AsyncFunction} to run.
* Each async function can complete with any number of optional `result` values.
* @param {number} limit - The maximum number of async operations at a time.
* @param {Function} [callback] - An optional callback to run once all the
* functions have completed successfully. This function gets a results array
* (or object) containing all the result arguments passed to the task callbacks.
* Invoked with (err, results).
*/
function parallelLimit$1(tasks, limit, callback) {
_parallel(_eachOfLimit(limit), tasks, callback);
}
/**
* A queue of tasks for the worker function to complete.
* @typedef {Object} QueueObject
* @memberOf module:ControlFlow
* @property {Function} length - a function returning the number of items
* waiting to be processed. Invoke with `queue.length()`.
* @property {boolean} started - a boolean indicating whether or not any
* items have been pushed and processed by the queue.
* @property {Function} running - a function returning the number of items
* currently being processed. Invoke with `queue.running()`.
* @property {Function} workersList - a function returning the array of items
* currently being processed. Invoke with `queue.workersList()`.
* @property {Function} idle - a function returning false if there are items
* waiting or being processed, or true if not. Invoke with `queue.idle()`.
* @property {number} concurrency - an integer for determining how many `worker`
* functions should be run in parallel. This property can be changed after a
* `queue` is created to alter the concurrency on-the-fly.
* @property {Function} push - add a new task to the `queue`. Calls `callback`
* once the `worker` has finished processing the task. Instead of a single task,
* a `tasks` array can be submitted. The respective callback is used for every
* task in the list. Invoke with `queue.push(task, [callback])`,
* @property {Function} unshift - add a new task to the front of the `queue`.
* Invoke with `queue.unshift(task, [callback])`.
* @property {Function} remove - remove items from the queue that match a test
* function. The test function will be passed an object with a `data` property,
* and a `priority` property, if this is a
* [priorityQueue]{@link module:ControlFlow.priorityQueue} object.
* Invoked with `queue.remove(testFn)`, where `testFn` is of the form
* `function ({data, priority}) {}` and returns a Boolean.
* @property {Function} saturated - a callback that is called when the number of
* running workers hits the `concurrency` limit, and further tasks will be
* queued.
* @property {Function} unsaturated - a callback that is called when the number
* of running workers is less than the `concurrency` & `buffer` limits, and
* further tasks will not be queued.
* @property {number} buffer - A minimum threshold buffer in order to say that
* the `queue` is `unsaturated`.
* @property {Function} empty - a callback that is called when the last item
* from the `queue` is given to a `worker`.
* @property {Function} drain - a callback that is called when the last item
* from the `queue` has returned from the `worker`.
* @property {Function} error - a callback that is called when a task errors.
* Has the signature `function(error, task)`.
* @property {boolean} paused - a boolean for determining whether the queue is
* in a paused state.
* @property {Function} pause - a function that pauses the processing of tasks
* until `resume()` is called. Invoke with `queue.pause()`.
* @property {Function} resume - a function that resumes the processing of
* queued tasks when the queue is paused. Invoke with `queue.resume()`.
* @property {Function} kill - a function that removes the `drain` callback and
* empties remaining tasks from the queue forcing it to go idle. No more tasks
* should be pushed to the queue after calling this function. Invoke with `queue.kill()`.
*/
/**
* Creates a `queue` object with the specified `concurrency`. Tasks added to the
* `queue` are processed in parallel (up to the `concurrency` limit). If all
* `worker`s are in progress, the task is queued until one becomes available.
* Once a `worker` completes a `task`, that `task`'s callback is called.
*
* @name queue
* @static
* @memberOf module:ControlFlow
* @method
* @category Control Flow
* @param {AsyncFunction} worker - An async function for processing a queued task.
* If you want to handle errors from an individual task, pass a callback to
* `q.push()`. Invoked with (task, callback).
* @param {number} [concurrency=1] - An `integer` for determining how many
* `worker` functions should be run in parallel. If omitted, the concurrency
* defaults to `1`. If the concurrency is `0`, an error is thrown.
* @returns {module:ControlFlow.QueueObject} A queue object to manage the tasks. Callbacks can
* attached as certain properties to listen for specific events during the
* lifecycle of the queue.
* @example
*
* // create a queue object with concurrency 2
* var q = async.queue(function(task, callback) {
* console.log('hello ' + task.name);
* callback();
* }, 2);
*
* // assign a callback
* q.drain = function() {
* console.log('all items have been processed');
* };
*
* // add some items to the queue
* q.push({name: 'foo'}, function(err) {
* console.log('finished processing foo');
* });
* q.push({name: 'bar'}, function (err) {
* console.log('finished processing bar');
* });
*
* // add some items to the queue (batch-wise)
* q.push([{name: 'baz'},{name: 'bay'},{name: 'bax'}], function(err) {
* console.log('finished processing item');
* });
*
* // add some items to the front of the queue
* q.unshift({name: 'bar'}, function (err) {
* console.log('finished processing bar');
* });
*/
var queue$1 = function (worker, concurrency) {
var _worker = wrapAsync(worker);
return queue(function (items, cb) {
_worker(items[0], cb);
}, concurrency, 1);
};
/**
* The same as [async.queue]{@link module:ControlFlow.queue} only tasks are assigned a priority and
* completed in ascending priority order.
*
* @name priorityQueue
* @static
* @memberOf module:ControlFlow
* @method
* @see [async.queue]{@link module:ControlFlow.queue}
* @category Control Flow
* @param {AsyncFunction} worker - An async function for processing a queued task.
* If you want to handle errors from an individual task, pass a callback to
* `q.push()`.
* Invoked with (task, callback).
* @param {number} concurrency - An `integer` for determining how many `worker`
* functions should be run in parallel. If omitted, the concurrency defaults to
* `1`. If the concurrency is `0`, an error is thrown.
* @returns {module:ControlFlow.QueueObject} A priorityQueue object to manage the tasks. There are two
* differences between `queue` and `priorityQueue` objects:
* * `push(task, priority, [callback])` - `priority` should be a number. If an
* array of `tasks` is given, all tasks will be assigned the same priority.
* * The `unshift` method was removed.
*/
var priorityQueue = function(worker, concurrency) {
// Start with a normal queue
var q = queue$1(worker, concurrency);
// Override push to accept second parameter representing priority
q.push = function(data, priority, callback) {
if (callback == null) callback = noop;
if (typeof callback !== 'function') {
throw new Error('task callback must be a function');
}
q.started = true;
if (!isArray(data)) {
data = [data];
}
if (data.length === 0) {
// call drain immediately if there are no tasks
return setImmediate$1(function() {
q.drain();
});
}
priority = priority || 0;
var nextNode = q._tasks.head;
while (nextNode && priority >= nextNode.priority) {
nextNode = nextNode.next;
}
for (var i = 0, l = data.length; i < l; i++) {
var item = {
data: data[i],
priority: priority,
callback: callback
};
if (nextNode) {
q._tasks.insertBefore(nextNode, item);
} else {
q._tasks.push(item);
}
}
setImmediate$1(q.process);
};
// Remove unshift function
delete q.unshift;
return q;
};
/**
* Runs the `tasks` array of functions in parallel, without waiting until the
* previous function has completed. Once any of the `tasks` complete or pass an
* error to its callback, the main `callback` is immediately called. It's
* equivalent to `Promise.race()`.
*
* @name race
* @static
* @memberOf module:ControlFlow
* @method
* @category Control Flow
* @param {Array} tasks - An array containing [async functions]{@link AsyncFunction}
* to run. Each function can complete with an optional `result` value.
* @param {Function} callback - A callback to run once any of the functions have
* completed. This function gets an error or result from the first function that
* completed. Invoked with (err, result).
* @returns undefined
* @example
*
* async.race([
* function(callback) {
* setTimeout(function() {
* callback(null, 'one');
* }, 200);
* },
* function(callback) {
* setTimeout(function() {
* callback(null, 'two');
* }, 100);
* }
* ],
* // main callback
* function(err, result) {
* // the result will be equal to 'two' as it finishes earlier
* });
*/
function race(tasks, callback) {
callback = once(callback || noop);
if (!isArray(tasks)) return callback(new TypeError('First argument to race must be an array of functions'));
if (!tasks.length) return callback();
for (var i = 0, l = tasks.length; i < l; i++) {
wrapAsync(tasks[i])(callback);
}
}
/**
* Same as [`reduce`]{@link module:Collections.reduce}, only operates on `array` in reverse order.
*
* @name reduceRight
* @static
* @memberOf module:Collections
* @method
* @see [async.reduce]{@link module:Collections.reduce}
* @alias foldr
* @category Collection
* @param {Array} array - A collection to iterate over.
* @param {*} memo - The initial state of the reduction.
* @param {AsyncFunction} iteratee - A function applied to each item in the
* array to produce the next step in the reduction.
* The `iteratee` should complete with the next state of the reduction.
* If the iteratee complete with an error, the reduction is stopped and the
* main `callback` is immediately called with the error.
* Invoked with (memo, item, callback).
* @param {Function} [callback] - A callback which is called after all the
* `iteratee` functions have finished. Result is the reduced value. Invoked with
* (err, result).
*/
function reduceRight (array, memo, iteratee, callback) {
var reversed = slice(array).reverse();
reduce(reversed, memo, iteratee, callback);
}
/**
* Wraps the async function in another function that always completes with a
* result object, even when it errors.
*
* The result object has either the property `error` or `value`.
*
* @name reflect
* @static
* @memberOf module:Utils
* @method
* @category Util
* @param {AsyncFunction} fn - The async function you want to wrap
* @returns {Function} - A function that always passes null to it's callback as
* the error. The second argument to the callback will be an `object` with
* either an `error` or a `value` property.
* @example
*
* async.parallel([
* async.reflect(function(callback) {
* // do some stuff ...
* callback(null, 'one');
* }),
* async.reflect(function(callback) {
* // do some more stuff but error ...
* callback('bad stuff happened');
* }),
* async.reflect(function(callback) {
* // do some more stuff ...
* callback(null, 'two');
* })
* ],
* // optional callback
* function(err, results) {
* // values
* // results[0].value = 'one'
* // results[1].error = 'bad stuff happened'
* // results[2].value = 'two'
* });
*/
function reflect(fn) {
var _fn = wrapAsync(fn);
return initialParams(function reflectOn(args, reflectCallback) {
args.push(function callback(error, cbArg) {
if (error) {
reflectCallback(null, { error: error });
} else {
var value;
if (arguments.length <= 2) {
value = cbArg;
} else {
value = slice(arguments, 1);
}
reflectCallback(null, { value: value });
}
});
return _fn.apply(this, args);
});
}
/**
* A helper function that wraps an array or an object of functions with `reflect`.
*
* @name reflectAll
* @static
* @memberOf module:Utils
* @method
* @see [async.reflect]{@link module:Utils.reflect}
* @category Util
* @param {Array|Object|Iterable} tasks - The collection of
* [async functions]{@link AsyncFunction} to wrap in `async.reflect`.
* @returns {Array} Returns an array of async functions, each wrapped in
* `async.reflect`
* @example
*
* let tasks = [
* function(callback) {
* setTimeout(function() {
* callback(null, 'one');
* }, 200);
* },
* function(callback) {
* // do some more stuff but error ...
* callback(new Error('bad stuff happened'));
* },
* function(callback) {
* setTimeout(function() {
* callback(null, 'two');
* }, 100);
* }
* ];
*
* async.parallel(async.reflectAll(tasks),
* // optional callback
* function(err, results) {
* // values
* // results[0].value = 'one'
* // results[1].error = Error('bad stuff happened')
* // results[2].value = 'two'
* });
*
* // an example using an object instead of an array
* let tasks = {
* one: function(callback) {
* setTimeout(function() {
* callback(null, 'one');
* }, 200);
* },
* two: function(callback) {
* callback('two');
* },
* three: function(callback) {
* setTimeout(function() {
* callback(null, 'three');
* }, 100);
* }
* };
*
* async.parallel(async.reflectAll(tasks),
* // optional callback
* function(err, results) {
* // values
* // results.one.value = 'one'
* // results.two.error = 'two'
* // results.three.value = 'three'
* });
*/
function reflectAll(tasks) {
var results;
if (isArray(tasks)) {
results = arrayMap(tasks, reflect);
} else {
results = {};
baseForOwn(tasks, function(task, key) {
results[key] = reflect.call(this, task);
});
}
return results;
}
function reject$1(eachfn, arr, iteratee, callback) {
_filter(eachfn, arr, function(value, cb) {
iteratee(value, function(err, v) {
cb(err, !v);
});
}, callback);
}
/**
* The opposite of [`filter`]{@link module:Collections.filter}. Removes values that pass an `async` truth test.
*
* @name reject
* @static
* @memberOf module:Collections
* @method
* @see [async.filter]{@link module:Collections.filter}
* @category Collection
* @param {Array|Iterable|Object} coll - A collection to iterate over.
* @param {Function} iteratee - An async truth test to apply to each item in
* `coll`.
* The should complete with a boolean value as its `result`.
* Invoked with (item, callback).
* @param {Function} [callback] - A callback which is called after all the
* `iteratee` functions have finished. Invoked with (err, results).
* @example
*
* async.reject(['file1','file2','file3'], function(filePath, callback) {
* fs.access(filePath, function(err) {
* callback(null, !err)
* });
* }, function(err, results) {
* // results now equals an array of missing files
* createFiles(results);
* });
*/
var reject = doParallel(reject$1);
/**
* The same as [`reject`]{@link module:Collections.reject} but runs a maximum of `limit` async operations at a
* time.
*
* @name rejectLimit
* @static
* @memberOf module:Collections
* @method
* @see [async.reject]{@link module:Collections.reject}
* @category Collection
* @param {Array|Iterable|Object} coll - A collection to iterate over.
* @param {number} limit - The maximum number of async operations at a time.
* @param {Function} iteratee - An async truth test to apply to each item in
* `coll`.
* The should complete with a boolean value as its `result`.
* Invoked with (item, callback).
* @param {Function} [callback] - A callback which is called after all the
* `iteratee` functions have finished. Invoked with (err, results).
*/
var rejectLimit = doParallelLimit(reject$1);
/**
* The same as [`reject`]{@link module:Collections.reject} but runs only a single async operation at a time.
*
* @name rejectSeries
* @static
* @memberOf module:Collections
* @method
* @see [async.reject]{@link module:Collections.reject}
* @category Collection
* @param {Array|Iterable|Object} coll - A collection to iterate over.
* @param {Function} iteratee - An async truth test to apply to each item in
* `coll`.
* The should complete with a boolean value as its `result`.
* Invoked with (item, callback).
* @param {Function} [callback] - A callback which is called after all the
* `iteratee` functions have finished. Invoked with (err, results).
*/
var rejectSeries = doLimit(rejectLimit, 1);
/**
* Creates a function that returns `value`.
*
* @static
* @memberOf _
* @since 2.4.0
* @category Util
* @param {*} value The value to return from the new function.
* @returns {Function} Returns the new constant function.
* @example
*
* var objects = _.times(2, _.constant({ 'a': 1 }));
*
* console.log(objects);
* // => [{ 'a': 1 }, { 'a': 1 }]
*
* console.log(objects[0] === objects[1]);
* // => true
*/
function constant$1(value) {
return function() {
return value;
};
}
/**
* Attempts to get a successful response from `task` no more than `times` times
* before returning an error. If the task is successful, the `callback` will be
* passed the result of the successful task. If all attempts fail, the callback
* will be passed the error and result (if any) of the final attempt.
*
* @name retry
* @static
* @memberOf module:ControlFlow
* @method
* @category Control Flow
* @see [async.retryable]{@link module:ControlFlow.retryable}
* @param {Object|number} [opts = {times: 5, interval: 0}| 5] - Can be either an
* object with `times` and `interval` or a number.
* * `times` - The number of attempts to make before giving up. The default
* is `5`.
* * `interval` - The time to wait between retries, in milliseconds. The
* default is `0`. The interval may also be specified as a function of the
* retry count (see example).
* * `errorFilter` - An optional synchronous function that is invoked on
* erroneous result. If it returns `true` the retry attempts will continue;
* if the function returns `false` the retry flow is aborted with the current
* attempt's error and result being returned to the final callback.
* Invoked with (err).
* * If `opts` is a number, the number specifies the number of times to retry,
* with the default interval of `0`.
* @param {AsyncFunction} task - An async function to retry.
* Invoked with (callback).
* @param {Function} [callback] - An optional callback which is called when the
* task has succeeded, or after the final failed attempt. It receives the `err`
* and `result` arguments of the last attempt at completing the `task`. Invoked
* with (err, results).
*
* @example
*
* // The `retry` function can be used as a stand-alone control flow by passing
* // a callback, as shown below:
*
* // try calling apiMethod 3 times
* async.retry(3, apiMethod, function(err, result) {
* // do something with the result
* });
*
* // try calling apiMethod 3 times, waiting 200 ms between each retry
* async.retry({times: 3, interval: 200}, apiMethod, function(err, result) {
* // do something with the result
* });
*
* // try calling apiMethod 10 times with exponential backoff
* // (i.e. intervals of 100, 200, 400, 800, 1600, ... milliseconds)
* async.retry({
* times: 10,
* interval: function(retryCount) {
* return 50 * Math.pow(2, retryCount);
* }
* }, apiMethod, function(err, result) {
* // do something with the result
* });
*
* // try calling apiMethod the default 5 times no delay between each retry
* async.retry(apiMethod, function(err, result) {
* // do something with the result
* });
*
* // try calling apiMethod only when error condition satisfies, all other
* // errors will abort the retry control flow and return to final callback
* async.retry({
* errorFilter: function(err) {
* return err.message === 'Temporary error'; // only retry on a specific error
* }
* }, apiMethod, function(err, result) {
* // do something with the result
* });
*
* // to retry individual methods that are not as reliable within other
* // control flow functions, use the `retryable` wrapper:
* async.auto({
* users: api.getUsers.bind(api),
* payments: async.retryable(3, api.getPayments.bind(api))
* }, function(err, results) {
* // do something with the results
* });
*
*/
function retry(opts, task, callback) {
var DEFAULT_TIMES = 5;
var DEFAULT_INTERVAL = 0;
var options = {
times: DEFAULT_TIMES,
intervalFunc: constant$1(DEFAULT_INTERVAL)
};
function parseTimes(acc, t) {
if (typeof t === 'object') {
acc.times = +t.times || DEFAULT_TIMES;
acc.intervalFunc = typeof t.interval === 'function' ?
t.interval :
constant$1(+t.interval || DEFAULT_INTERVAL);
acc.errorFilter = t.errorFilter;
} else if (typeof t === 'number' || typeof t === 'string') {
acc.times = +t || DEFAULT_TIMES;
} else {
throw new Error("Invalid arguments for async.retry");
}
}
if (arguments.length < 3 && typeof opts === 'function') {
callback = task || noop;
task = opts;
} else {
parseTimes(options, opts);
callback = callback || noop;
}
if (typeof task !== 'function') {
throw new Error("Invalid arguments for async.retry");
}
var _task = wrapAsync(task);
var attempt = 1;
function retryAttempt() {
_task(function(err) {
if (err && attempt++ < options.times &&
(typeof options.errorFilter != 'function' ||
options.errorFilter(err))) {
setTimeout(retryAttempt, options.intervalFunc(attempt));
} else {
callback.apply(null, arguments);
}
});
}
retryAttempt();
}
/**
* A close relative of [`retry`]{@link module:ControlFlow.retry}. This method
* wraps a task and makes it retryable, rather than immediately calling it
* with retries.
*
* @name retryable
* @static
* @memberOf module:ControlFlow
* @method
* @see [async.retry]{@link module:ControlFlow.retry}
* @category Control Flow
* @param {Object|number} [opts = {times: 5, interval: 0}| 5] - optional
* options, exactly the same as from `retry`
* @param {AsyncFunction} task - the asynchronous function to wrap.
* This function will be passed any arguments passed to the returned wrapper.
* Invoked with (...args, callback).
* @returns {AsyncFunction} The wrapped function, which when invoked, will
* retry on an error, based on the parameters specified in `opts`.
* This function will accept the same parameters as `task`.
* @example
*
* async.auto({
* dep1: async.retryable(3, getFromFlakyService),
* process: ["dep1", async.retryable(3, function (results, cb) {
* maybeProcessData(results.dep1, cb);
* })]
* }, callback);
*/
var retryable = function (opts, task) {
if (!task) {
task = opts;
opts = null;
}
var _task = wrapAsync(task);
return initialParams(function (args, callback) {
function taskFn(cb) {
_task.apply(null, args.concat(cb));
}
if (opts) retry(opts, taskFn, callback);
else retry(taskFn, callback);
});
};
/**
* Run the functions in the `tasks` collection in series, each one running once
* the previous function has completed. If any functions in the series pass an
* error to its callback, no more functions are run, and `callback` is
* immediately called with the value of the error. Otherwise, `callback`
* receives an array of results when `tasks` have completed.
*
* It is also possible to use an object instead of an array. Each property will
* be run as a function, and the results will be passed to the final `callback`
* as an object instead of an array. This can be a more readable way of handling
* results from {@link async.series}.
*
* **Note** that while many implementations preserve the order of object
* properties, the [ECMAScript Language Specification](http://www.ecma-international.org/ecma-262/5.1/#sec-8.6)
* explicitly states that
*
* > The mechanics and order of enumerating the properties is not specified.
*
* So if you rely on the order in which your series of functions are executed,
* and want this to work on all platforms, consider using an array.
*
* @name series
* @static
* @memberOf module:ControlFlow
* @method
* @category Control Flow
* @param {Array|Iterable|Object} tasks - A collection containing
* [async functions]{@link AsyncFunction} to run in series.
* Each function can complete with any number of optional `result` values.
* @param {Function} [callback] - An optional callback to run once all the
* functions have completed. This function gets a results array (or object)
* containing all the result arguments passed to the `task` callbacks. Invoked
* with (err, result).
* @example
* async.series([
* function(callback) {
* // do some stuff ...
* callback(null, 'one');
* },
* function(callback) {
* // do some more stuff ...
* callback(null, 'two');
* }
* ],
* // optional callback
* function(err, results) {
* // results is now equal to ['one', 'two']
* });
*
* async.series({
* one: function(callback) {
* setTimeout(function() {
* callback(null, 1);
* }, 200);
* },
* two: function(callback){
* setTimeout(function() {
* callback(null, 2);
* }, 100);
* }
* }, function(err, results) {
* // results is now equal to: {one: 1, two: 2}
* });
*/
function series(tasks, callback) {
_parallel(eachOfSeries, tasks, callback);
}
/**
* Returns `true` if at least one element in the `coll` satisfies an async test.
* If any iteratee call returns `true`, the main `callback` is immediately
* called.
*
* @name some
* @static
* @memberOf module:Collections
* @method
* @alias any
* @category Collection
* @param {Array|Iterable|Object} coll - A collection to iterate over.
* @param {AsyncFunction} iteratee - An async truth test to apply to each item
* in the collections in parallel.
* The iteratee should complete with a boolean `result` value.
* Invoked with (item, callback).
* @param {Function} [callback] - A callback which is called as soon as any
* iteratee returns `true`, or after all the iteratee functions have finished.
* Result will be either `true` or `false` depending on the values of the async
* tests. Invoked with (err, result).
* @example
*
* async.some(['file1','file2','file3'], function(filePath, callback) {
* fs.access(filePath, function(err) {
* callback(null, !err)
* });
* }, function(err, result) {
* // if result is true then at least one of the files exists
* });
*/
var some = doParallel(_createTester(Boolean, identity));
/**
* The same as [`some`]{@link module:Collections.some} but runs a maximum of `limit` async operations at a time.
*
* @name someLimit
* @static
* @memberOf module:Collections
* @method
* @see [async.some]{@link module:Collections.some}
* @alias anyLimit
* @category Collection
* @param {Array|Iterable|Object} coll - A collection to iterate over.
* @param {number} limit - The maximum number of async operations at a time.
* @param {AsyncFunction} iteratee - An async truth test to apply to each item
* in the collections in parallel.
* The iteratee should complete with a boolean `result` value.
* Invoked with (item, callback).
* @param {Function} [callback] - A callback which is called as soon as any
* iteratee returns `true`, or after all the iteratee functions have finished.
* Result will be either `true` or `false` depending on the values of the async
* tests. Invoked with (err, result).
*/
var someLimit = doParallelLimit(_createTester(Boolean, identity));
/**
* The same as [`some`]{@link module:Collections.some} but runs only a single async operation at a time.
*
* @name someSeries
* @static
* @memberOf module:Collections
* @method
* @see [async.some]{@link module:Collections.some}
* @alias anySeries
* @category Collection
* @param {Array|Iterable|Object} coll - A collection to iterate over.
* @param {AsyncFunction} iteratee - An async truth test to apply to each item
* in the collections in series.
* The iteratee should complete with a boolean `result` value.
* Invoked with (item, callback).
* @param {Function} [callback] - A callback which is called as soon as any
* iteratee returns `true`, or after all the iteratee functions have finished.
* Result will be either `true` or `false` depending on the values of the async
* tests. Invoked with (err, result).
*/
var someSeries = doLimit(someLimit, 1);
/**
* Sorts a list by the results of running each `coll` value through an async
* `iteratee`.
*
* @name sortBy
* @static
* @memberOf module:Collections
* @method
* @category Collection
* @param {Array|Iterable|Object} coll - A collection to iterate over.
* @param {AsyncFunction} iteratee - An async function to apply to each item in
* `coll`.
* The iteratee should complete with a value to use as the sort criteria as
* its `result`.
* Invoked with (item, callback).
* @param {Function} callback - A callback which is called after all the
|
* `iteratee` functions have finished, or an error occurs. Results is the items
* from the original `coll` sorted by the values returned by the `iteratee`
* calls. Invoked with (err, results).
* @example
*
* async.sortBy(['file1','file2','file3'], function(file, callback) {
* fs.stat(file, function(err, stats) {
* callback(err, stats.mtime);
* });
* }, function(err, results) {
* // results is now the original array of files sorted by
* // modified date
* });
*
* // By modifying the callback parameter the
* // sorting order can be influenced:
*
* // ascending order
* async.sortBy([1,9,3,5], function(x, callback) {
* callback(null, x);
* }, function(err,result) {
* // result callback
* });
*
* // descending order
* async.sortBy([1,9,3,5], function(x, callback) {
* callback(null, x*-1); //<- x*-1 instead of x, turns the order around
* }, function(err,result) {
* // result callback
* });
*/
function sortBy (coll, iteratee, callback) {
var _iteratee = wrapAsync(iteratee);
map(coll, function (x, callback) {
_iteratee(x, function (err, criteria) {
if (err) return callback(err);
callback(null, {value: x, criteria: criteria});
});
}, function (err, results) {
if (err) return callback(err);
callback(null, arrayMap(results.sort(comparator), baseProperty('value')));
});
function comparator(left, right) {
var a = left.criteria, b = right.criteria;
return a < b ? -1 : a > b ? 1 : 0;
}
}
/**
* Sets a time limit on an asynchronous function. If the function does not call
* its callback within the specified milliseconds, it will be called with a
* timeout error. The code property for the error object will be `'ETIMEDOUT'`.
*
* @name timeout
* @static
* @memberOf module:Utils
* @method
* @category Util
* @param {AsyncFunction} asyncFn - The async function to limit in time.
* @param {number} milliseconds - The specified time limit.
* @param {*} [info] - Any variable you want attached (`string`, `object`, etc)
* to timeout Error for more information..
* @returns {AsyncFunction} Returns a wrapped function that can be used with any
* of the control flow functions.
* Invoke this function with the same parameters as you would `asyncFunc`.
* @example
*
* function myFunction(foo, callback) {
* doAsyncTask(foo, function(err, data) {
* // handle errors
* if (err) return callback(err);
*
* // do some stuff ...
*
* // return processed data
* return callback(null, data);
* });
* }
*
* var wrapped = async.timeout(myFunction, 1000);
*
* // call `wrapped` as you would `myFunction`
* wrapped({ bar: 'bar' }, function(err, data) {
* // if `myFunction` takes < 1000 ms to execute, `err`
* // and `data` will have their expected values
*
* // else `err` will be an Error with the code 'ETIMEDOUT'
* });
*/
function timeout(asyncFn, milliseconds, info) {
var fn = wrapAsync(asyncFn);
return initialParams(function (args, callback) {
var timedOut = false;
var timer;
function timeoutCallback() {
var name = asyncFn.name || 'anonymous';
var error = new Error('Callback function "' + name + '" timed out.');
error.code = 'ETIMEDOUT';
if (info) {
error.info = info;
}
timedOut = true;
callback(error);
}
args.push(function () {
if (!timedOut) {
callback.apply(null, arguments);
clearTimeout(timer);
}
});
// setup timer and call original function
timer = setTimeout(timeoutCallback, milliseconds);
fn.apply(null, args);
});
}
/* Built-in method references for those with the same name as other `lodash` methods. */
var nativeCeil = Math.ceil;
var nativeMax = Math.max;
/**
* The base implementation of `_.range` and `_.rangeRight` which doesn't
* coerce arguments.
*
* @private
* @param {number} start The start of the range.
* @param {number} end The end of the range.
* @param {number} step The value to increment or decrement by.
* @param {boolean} [fromRight] Specify iterating from right to left.
* @returns {Array} Returns the range of numbers.
*/
function baseRange(start, end, step, fromRight) {
var index = -1,
length = nativeMax(nativeCeil((end - start) / (step || 1)), 0),
result = Array(length);
while (length--) {
result[fromRight ? length : ++index] = start;
start += step;
}
return result;
}
/**
* The same as [times]{@link module:ControlFlow.times} but runs a maximum of `limit` async operations at a
* time.
*
* @name timesLimit
* @static
* @memberOf module:ControlFlow
* @method
* @see [async.times]{@link module:ControlFlow.times}
* @category Control Flow
* @param {number} count - The number of times to run the function.
* @param {number} limit - The maximum number of async operations at a time.
* @param {AsyncFunction} iteratee - The async function to call `n` times.
* Invoked with the iteration index and a callback: (n, next).
* @param {Function} callback - see [async.map]{@link module:Collections.map}.
*/
function timeLimit(count, limit, iteratee, callback) {
var _iteratee = wrapAsync(iteratee);
mapLimit(baseRange(0, count, 1), limit, _iteratee, callback);
}
/**
* Calls the `iteratee` function `n` times, and accumulates results in the same
* manner you would use with [map]{@link module:Collections.map}.
*
* @name times
* @static
* @memberOf module:ControlFlow
* @method
* @see [async.map]{@link module:Collections.map}
* @category Control Flow
* @param {number} n - The number of times to run the function.
* @param {AsyncFunction} iteratee - The async function to call `n` times.
* Invoked with the iteration index and a callback: (n, next).
* @param {Function} callback - see {@link module:Collections.map}.
* @example
*
* // Pretend this is some complicated async factory
* var createUser = function(id, callback) {
* callback(null, {
* id: 'user' + id
* });
* };
*
* // generate 5 users
* async.times(5, function(n, next) {
* createUser(n, function(err, user) {
* next(err, user);
* });
* }, function(err, users) {
* // we should now have 5 users
* });
*/
var times = doLimit(timeLimit, Infinity);
/**
* The same as [times]{@link module:ControlFlow.times} but runs only a single async operation at a time.
*
* @name timesSeries
* @static
* @memberOf module:ControlFlow
* @method
* @see [async.times]{@link module:ControlFlow.times}
* @category Control Flow
* @param {number} n - The number of times to run the function.
* @param {AsyncFunction} iteratee - The async function to call `n` times.
* Invoked with the iteration index and a callback: (n, next).
* @param {Function} callback - see {@link module:Collections.map}.
*/
var timesSeries = doLimit(timeLimit, 1);
/**
* A relative of `reduce`. Takes an Object or Array, and iterates over each
* element in series, each step potentially mutating an `accumulator` value.
* The type of the accumulator defaults to the type of collection passed in.
*
* @name transform
* @static
* @memberOf module:Collections
* @method
* @category Collection
* @param {Array|Iterable|Object} coll - A collection to iterate over.
* @param {*} [accumulator] - The initial state of the transform. If omitted,
* it will default to an empty Object or Array, depending on the type of `coll`
* @param {AsyncFunction} iteratee - A function applied to each item in the
* collection that potentially modifies the accumulator.
* Invoked with (accumulator, item, key, callback).
* @param {Function} [callback] - A callback which is called after all the
* `iteratee` functions have finished. Result is the transformed accumulator.
* Invoked with (err, result).
* @example
*
* async.transform([1,2,3], function(acc, item, index, callback) {
* // pointless async:
* process.nextTick(function() {
* acc.push(item * 2)
* callback(null)
* });
* }, function(err, result) {
* // result is now equal to [2, 4, 6]
* });
*
* @example
*
* async.transform({a: 1, b: 2, c: 3}, function (obj, val, key, callback) {
* setImmediate(function () {
* obj[key] = val * 2;
* callback();
* })
* }, function (err, result) {
* // result is equal to {a: 2, b: 4, c: 6}
* })
*/
function transform (coll, accumulator, iteratee, callback) {
if (arguments.length <= 3) {
callback = iteratee;
iteratee = accumulator;
accumulator = isArray(coll) ? [] : {};
}
callback = once(callback || noop);
var _iteratee = wrapAsync(iteratee);
eachOf(coll, function(v, k, cb) {
_iteratee(accumulator, v, k, cb);
}, function(err) {
callback(err, accumulator);
});
}
/**
* It runs each task in series but stops whenever any of the functions were
* successful. If one of the tasks were successful, the `callback` will be
* passed the result of the successful task. If all tasks fail, the callback
* will be passed the error and result (if any) of the final attempt.
*
* @name tryEach
* @static
* @memberOf module:ControlFlow
* @method
* @category Control Flow
* @param {Array|Iterable|Object} tasks - A collection containing functions to
* run, each function is passed a `callback(err, result)` it must call on
* completion with an error `err` (which can be `null`) and an optional `result`
* value.
* @param {Function} [callback] - An optional callback which is called when one
* of the tasks has succeeded, or all have failed. It receives the `err` and
* `result` arguments of the last attempt at completing the `task`. Invoked with
* (err, results).
* @example
* async.tryEach([
* function getDataFromFirstWebsite(callback) {
* // Try getting the data from the first website
* callback(err, data);
* },
* function getDataFromSecondWebsite(callback) {
* // First website failed,
* // Try getting the data from the backup website
* callback(err, data);
* }
* ],
* // optional callback
* function(err, results) {
* Now do something with the data.
* });
*
*/
function tryEach(tasks, callback) {
var error = null;
var result;
callback = callback || noop;
eachSeries(tasks, function(task, callback) {
wrapAsync(task)(function (err, res/*, ...args*/) {
if (arguments.length > 2) {
result = slice(arguments, 1);
} else {
result = res;
}
error = err;
callback(!err);
});
}, function () {
callback(error, result);
});
}
/**
* Undoes a [memoize]{@link module:Utils.memoize}d function, reverting it to the original,
* unmemoized form. Handy for testing.
*
* @name unmemoize
* @static
* @memberOf module:Utils
* @method
* @see [async.memoize]{@link module:Utils.memoize}
* @category Util
* @param {AsyncFunction} fn - the memoized function
* @returns {AsyncFunction} a function that calls the original unmemoized function
*/
function unmemoize(fn) {
return function () {
return (fn.unmemoized || fn).apply(null, arguments);
};
}
/**
* Repeatedly call `iteratee`, while `test` returns `true`. Calls `callback` when
* stopped, or an error occurs.
*
* @name whilst
* @static
* @memberOf module:ControlFlow
* @method
* @category Control Flow
* @param {Function} test - synchronous truth test to perform before each
* execution of `iteratee`. Invoked with ().
* @param {AsyncFunction} iteratee - An async function which is called each time
* `test` passes. Invoked with (callback).
* @param {Function} [callback] - A callback which is called after the test
* function has failed and repeated execution of `iteratee` has stopped. `callback`
* will be passed an error and any arguments passed to the final `iteratee`'s
* callback. Invoked with (err, [results]);
* @returns undefined
* @example
*
* var count = 0;
* async.whilst(
* function() { return count < 5; },
* function(callback) {
* count++;
* setTimeout(function() {
* callback(null, count);
* }, 1000);
* },
* function (err, n) {
* // 5 seconds have passed, n = 5
* }
* );
*/
function whilst(test, iteratee, callback) {
callback = onlyOnce(callback || noop);
var _iteratee = wrapAsync(iteratee);
if (!test()) return callback(null);
var next = function(err/*, ...args*/) {
if (err) return callback(err);
if (test()) return _iteratee(next);
var args = slice(arguments, 1);
callback.apply(null, [null].concat(args));
};
_iteratee(next);
}
/**
* Repeatedly call `iteratee` until `test` returns `true`. Calls `callback` when
* stopped, or an error occurs. `callback` will be passed an error and any
* arguments passed to the final `iteratee`'s callback.
*
* The inverse of [whilst]{@link module:ControlFlow.whilst}.
*
* @name until
* @static
* @memberOf module:ControlFlow
* @method
* @see [async.whilst]{@link module:ControlFlow.whilst}
* @category Control Flow
* @param {Function} test - synchronous truth test to perform before each
* execution of `iteratee`. Invoked with ().
* @param {AsyncFunction} iteratee - An async function which is called each time
* `test` fails. Invoked with (callback).
* @param {Function} [callback] - A callback which is called after the test
* function has passed and repeated execution of `iteratee` has stopped. `callback`
* will be passed an error and any arguments passed to the final `iteratee`'s
* callback. Invoked with (err, [results]);
*/
function until(test, iteratee, callback) {
whilst(function() {
return !test.apply(this, arguments);
}, iteratee, callback);
}
/**
* Runs the `tasks` array of functions in series, each passing their results to
* the next in the array. However, if any of the `tasks` pass an error to their
* own callback, the next function is not executed, and the main `callback` is
* immediately called with the error.
*
* @name waterfall
* @static
* @memberOf module:ControlFlow
* @method
* @category Control Flow
* @param {Array} tasks - An array of [async functions]{@link AsyncFunction}
* to run.
* Each function should complete with any number of `result` values.
* The `result` values will be passed as arguments, in order, to the next task.
* @param {Function} [callback] - An optional callback to run once all the
* functions have completed. This will be passed the results of the last task's
* callback. Invoked with (err, [results]).
* @returns undefined
* @example
*
* async.waterfall([
* function(callback) {
* callback(null, 'one', 'two');
* },
* function(arg1, arg2, callback) {
* // arg1 now equals 'one' and arg2 now equals 'two'
* callback(null, 'three');
* },
* function(arg1, callback) {
* // arg1 now equals 'three'
* callback(null, 'done');
* }
* ], function (err, result) {
* // result now equals 'done'
* });
*
* // Or, with named functions:
* async.waterfall([
* myFirstFunction,
* mySecondFunction,
* myLastFunction,
* ], function (err, result) {
* // result now equals 'done'
* });
* function myFirstFunction(callback) {
* callback(null, 'one', 'two');
* }
* function mySecondFunction(arg1, arg2, callback) {
* // arg1 now equals 'one' and arg2 now equals 'two'
* callback(null, 'three');
* }
* function myLastFunction(arg1, callback) {
* // arg1 now equals 'three'
* callback(null, 'done');
* }
*/
var waterfall = function(tasks, callback) {
callback = once(callback || noop);
if (!isArray(tasks)) return callback(new Error('First argument to waterfall must be an array of functions'));
if (!tasks.length) return callback();
var taskIndex = 0;
function nextTask(args) {
var task = wrapAsync(tasks[taskIndex++]);
args.push(onlyOnce(next));
task.apply(null, args);
}
function next(err/*, ...args*/) {
if (err || taskIndex === tasks.length) {
return callback.apply(null, arguments);
}
nextTask(slice(arguments, 1));
}
nextTask([]);
};
/**
* An "async function" in the context of Async is an asynchronous function with
* a variable number of parameters, with the final parameter being a callback.
* (`function (arg1, arg2, ..., callback) {}`)
* The final callback is of the form `callback(err, results...)`, which must be
* called once the function is completed. The callback should be called with a
* Error as its first argument to signal that an error occurred.
* Otherwise, if no error occurred, it should be called with `null` as the first
* argument, and any additional `result` arguments that may apply, to signal
* successful completion.
* The callback must be called exactly once, ideally on a later tick of the
* JavaScript event loop.
*
* This type of function is also referred to as a "Node-style async function",
* or a "continuation passing-style function" (CPS). Most of the methods of this
* library are themselves CPS/Node-style async functions, or functions that
* return CPS/Node-style async functions.
*
* Wherever we accept a Node-style async function, we also directly accept an
* [ES2017 `async` function]{@link https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Statements/async_function}.
* In this case, the `async` function will not be passed a final callback
* argument, and any thrown error will be used as the `err` argument of the
* implicit callback, and the return value will be used as the `result` value.
* (i.e. a `rejected` of the returned Promise becomes the `err` callback
* argument, and a `resolved` value becomes the `result`.)
*
* Note, due to JavaScript limitations, we can only detect native `async`
* functions and not transpilied implementations.
* Your environment must have `async`/`await` support for this to work.
* (e.g. Node > v7.6, or a recent version of a modern browser).
* If you are using `async` functions through a transpiler (e.g. Babel), you
* must still wrap the function with [asyncify]{@link module:Utils.asyncify},
* because the `async function` will be compiled to an ordinary function that
* returns a promise.
*
* @typedef {Function} AsyncFunction
* @static
*/
/**
* Async is a utility module which provides straight-forward, powerful functions
* for working with asynchronous JavaScript. Although originally designed for
* use with [Node.js](http://nodejs.org) and installable via
* `npm install --save async`, it can also be used directly in the browser.
* @module async
* @see AsyncFunction
*/
/**
* A collection of `async` functions for manipulating collections, such as
* arrays and objects.
* @module Collections
*/
/**
* A collection of `async` functions for controlling the flow through a script.
* @module ControlFlow
*/
/**
* A collection of `async` utility functions.
* @module Utils
*/
var index = {
apply: apply,
applyEach: applyEach,
applyEachSeries: applyEachSeries,
asyncify: asyncify,
auto: auto,
autoInject: autoInject,
cargo: cargo,
compose: compose,
concat: concat,
concatLimit: concatLimit,
concatSeries: concatSeries,
constant: constant,
detect: detect,
detectLimit: detectLimit,
detectSeries: detectSeries,
dir: dir,
doDuring: doDuring,
doUntil: doUntil,
doWhilst: doWhilst,
during: during,
each: eachLimit,
eachLimit: eachLimit$1,
eachOf: eachOf,
eachOfLimit: eachOfLimit,
eachOfSeries: eachOfSeries,
eachSeries: eachSeries,
ensureAsync: ensureAsync,
every: every,
everyLimit: everyLimit,
everySeries: everySeries,
filter: filter,
filterLimit: filterLimit,
filterSeries: filterSeries,
forever: forever,
groupBy: groupBy,
groupByLimit: groupByLimit,
groupBySeries: groupBySeries,
log: log,
map: map,
mapLimit: mapLimit,
mapSeries: mapSeries,
mapValues: mapValues,
mapValuesLimit: mapValuesLimit,
mapValuesSeries: mapValuesSeries,
memoize: memoize,
nextTick: nextTick,
parallel: parallelLimit,
parallelLimit: parallelLimit$1,
priorityQueue: priorityQueue,
queue: queue$1,
race: race,
reduce: reduce,
reduceRight: reduceRight,
reflect: reflect,
reflectAll: reflectAll,
reject: reject,
rejectLimit: rejectLimit,
rejectSeries: rejectSeries,
retry: retry,
retryable: retryable,
seq: seq,
series: series,
setImmediate: setImmediate$1,
some: some,
someLimit: someLimit,
someSeries: someSeries,
sortBy: sortBy,
timeout: timeout,
times: times,
timesLimit: timeLimit,
timesSeries: timesSeries,
transform: transform,
tryEach: tryEach,
unmemoize: unmemoize,
until: until,
waterfall: waterfall,
whilst: whilst,
// aliases
all: every,
allLimit: everyLimit,
allSeries: everySeries,
any: some,
anyLimit: someLimit,
anySeries: someSeries,
find: detect,
findLimit: detectLimit,
findSeries: detectSeries,
forEach: eachLimit,
forEachSeries: eachSeries,
forEachLimit: eachLimit$1,
forEachOf: eachOf,
forEachOfSeries: eachOfSeries,
forEachOfLimit: eachOfLimit,
inject: reduce,
foldl: reduce,
foldr: reduceRight,
select: filter,
selectLimit: filterLimit,
selectSeries: filterSeries,
wrapSync: asyncify
};
exports['default'] = index;
exports.apply = apply;
exports.applyEach = applyEach;
exports.applyEachSeries = applyEachSeries;
exports.asyncify = asyncify;
exports.auto = auto;
exports.autoInject = autoInject;
exports.cargo = cargo;
exports.compose = compose;
exports.concat = concat;
exports.concatLimit = concatLimit;
exports.concatSeries = concatSeries;
exports.constant = constant;
exports.detect = detect;
exports.detectLimit = detectLimit;
exports.detectSeries = detectSeries;
exports.dir = dir;
exports.doDuring = doDuring;
exports.doUntil = doUntil;
exports.doWhilst = doWhilst;
exports.during = during;
exports.each = eachLimit;
exports.eachLimit = eachLimit$1;
exports.eachOf = eachOf;
exports.eachOfLimit = eachOfLimit;
exports.eachOfSeries = eachOfSeries;
exports.eachSeries = eachSeries;
exports.ensureAsync = ensureAsync;
exports.every = every;
exports.everyLimit = everyLimit;
exports.everySeries = everySeries;
exports.filter = filter;
exports.filterLimit = filterLimit;
exports.filterSeries = filterSeries;
exports.forever = forever;
exports.groupBy = groupBy;
exports.groupByLimit = groupByLimit;
exports.groupBySeries = groupBySeries;
exports.log = log;
exports.map = map;
exports.mapLimit = mapLimit;
exports.mapSeries = mapSeries;
exports.mapValues = mapValues;
exports.mapValuesLimit = mapValuesLimit;
exports.mapValuesSeries = mapValuesSeries;
exports.memoize = memoize;
exports.nextTick = nextTick;
exports.parallel = parallelLimit;
exports.parallelLimit = parallelLimit$1;
exports.priorityQueue = priorityQueue;
exports.queue = queue$1;
exports.race = race;
exports.reduce = reduce;
exports.reduceRight = reduceRight;
exports.reflect = reflect;
exports.reflectAll = reflectAll;
exports.reject = reject;
exports.rejectLimit = rejectLimit;
exports.rejectSeries = rejectSeries;
exports.retry = retry;
exports.retryable = retryable;
exports.seq = seq;
exports.series = series;
exports.setImmediate = setImmediate$1;
exports.some = some;
exports.someLimit = someLimit;
exports.someSeries = someSeries;
exports.sortBy = sortBy;
exports.timeout = timeout;
exports.times = times;
exports.timesLimit = timeLimit;
exports.timesSeries = timesSeries;
exports.transform = transform;
exports.tryEach = tryEach;
exports.unmemoize = unmemoize;
exports.until = until;
exports.waterfall = waterfall;
exports.whilst = whilst;
exports.all = every;
exports.allLimit = everyLimit;
exports.allSeries = everySeries;
exports.any = some;
exports.anyLimit = someLimit;
exports.anySeries = someSeries;
exports.find = detect;
exports.findLimit = detectLimit;
exports.findSeries = detectSeries;
exports.forEach = eachLimit;
exports.forEachSeries = eachSeries;
exports.forEachLimit = eachLimit$1;
exports.forEachOf = eachOf;
exports.forEachOfSeries = eachOfSeries;
exports.forEachOfLimit = eachOfLimit;
exports.inject = reduce;
exports.foldl = reduce;
exports.foldr = reduceRight;
exports.select = filter;
exports.selectLimit = filterLimit;
exports.selectSeries = filterSeries;
exports.wrapSync = asyncify;
Object.defineProperty(exports, '__esModule', { value: true });
})));
});
require.define("/coffee/client/client.coffee",function(require,module,exports,__dirname,__filename,process,global){(function() {
var async;
async = require('async');
window.onload = function() {
var Am, GRADIANT_FACTOR, GRAVITY, RADIAL_DEGREE_CONVERSION, TAIL_LENGTH, UNIT_RADIUS, Va, Vr, adjustScale, adjustUNIT_RADIUS, canvas, ctx, drawObjs, drawScale, drawScaleV, initObj, initObjs, keyDown, keyUp, moveObjs, objCollision, objs, pointList, r, startTime, theta, tick, tickFunction, translateX, translateY, x, y;
canvas = document.getElementById('canvas');
ctx = canvas.getContext('2d');
startTime = (new Date()).getTime();
tickFunction = function(func) {
return function() {
var t;
t = (new Date()).getTime() - startTime;
return func(t);
};
};
RADIAL_DEGREE_CONVERSION = (2.0 * Math.PI) / 360;
UNIT_RADIUS = canvas.height * (1 / 3);
ctx.translate(canvas.width / 2, canvas.height / 2);
objs = [];
initObj = function() {
var obj;
return obj = {
r: Math.random() * UNIT_RADIUS + 10,
t: Math.random() * 360,
vr: Math.random() * 0.1,
vt: Math.random(),
d: Math.random() * 20 + 5
};
};
initObjs = function() {
var i, j, obj, results;
results = [];
for (i = j = 1; j <= 20; i = ++j) {
obj = initObj();
results.push(objs.push(obj));
}
return results;
};
drawObjs = function() {
var j, len, obj, results, theta, x, y;
ctx = canvas.getContext('2d');
results = [];
for (j = 0, len = objs.length; j < len; j++) {
obj = objs[j];
ctx.beginPath();
ctx.strokeStyle = '#000000';
theta = obj.t * RADIAL_DEGREE_CONVERSION;
x = obj.r * Math.cos(theta);
y = obj.r * Math.sin(theta);
ctx.arc(x, y, obj.d, 0, Math.PI * 2);
results.push(ctx.stroke());
}
return results;
};
moveObjs = function(t) {
var j, len, obj, results;
results = [];
for (j = 0, len = objs.length; j < len; j++) {
obj = objs[j];
obj.r += obj.vr;
results.push(obj.t += obj.vt);
}
return results;
};
objCollision = function(x, y) {
var collisionOccurred, dr, dx, dy, j, len, newObjs, obj, ox, oy, theta;
newObjs = [];
collisionOccurred = false;
for (j = 0, len = objs.length; j < len; j++) {
obj = objs[j];
theta = obj.t * RADIAL_DEGREE_CONVERSION;
ox = obj.r * Math.cos(theta);
oy = obj.r * Math.sin(theta);
dx = x - ox;
dy = y - oy;
dr = obj.d + 10;
if (Math.pow(dx, 2) + Math.pow(dy, 2) < Math.pow(dr, 2)) {
newObjs.push(initObj());
collisionOccurred = true;
} else {
newObjs.push(obj);
}
}
objs = newObjs;
return collisionOccurred;
};
Va = void 0;
Vr = 0;
r = UNIT_RADIUS * 2;
Am = 0;
x = void 0;
y = void 0;
theta = 0;
pointList = [];
TAIL_LENGTH = 50;
GRADIANT_FACTOR = 256 / TAIL_LENGTH;
GRAVITY = 0.15;
drawScale = 1;
drawScaleV = 0;
keyDown = function() {
console.log('Key Down');
return Am = 100;
};
keyUp = function() {
console.log('Key Up');
return Am = 0;
};
canvas.focus();
window.addEventListener('keyup', keyUp);
window.addEventListener('keydown', keyDown);
translateX = 0;
translateY = 0;
initObjs();
adjustScale = function(r) {
drawScale = (UNIT_RADIUS / r) * 1.50;
if (drawScale > 1) {
return drawScale = 1;
}
};
adjustUNIT_RADIUS = function(t) {
Vr = Vr - GRAVITY;
Vr *= 0.995;
r += Vr;
if (Vr < 0 && r < 10) {
return Vr = -Vr;
}
};
tick = function(t) {
var first, grey, i, j, ref, th;
adjustUNIT_RADIUS(t);
adjustScale(r, Vr);
moveObjs();
Va = Am / r;
theta = (theta + Va) % 360;
th = theta * RADIAL_DEGREE_CONVERSION;
first = x == null;
x = r * Math.cos(th);
y = r * Math.sin(th);
pointList.unshift({
x: x,
y: y
});
if (pointList.length === 1) {
return;
}
if (pointList.length > TAIL_LENGTH) {
pointList = pointList.slice(0, -1);
}
if (objCollision(x, y, drawScale)) {
if (Vr < 0) {
Vr = -Vr;
}
Vr += 2;
}
ctx.save();
ctx.clearRect(-2000, -2000, 4000, 4000);
ctx.scale(drawScale, drawScale);
ctx.translate(-pointList[0].x / 2, -pointList[0].y / 2);
drawObjs();
ctx.beginPath();
ctx.fillStyle = '#000000';
ctx.arc(0, 0, 10, 0, Math.PI * 2);
ctx.fill();
for (i = j = 1, ref = pointList.length; 1 <= ref ? j < ref : j > ref; i = 1 <= ref ? ++j : --j) {
grey = Math.round(i * GRADIANT_FACTOR);
ctx.strokeStyle = "rgb(" + grey + "," + grey + "," + grey + ")";
ctx.beginPath();
if (i === 1) {
ctx.arc(x, y, 10, 0, 2 * Math.PI);
}
ctx.moveTo(pointList[i - 1].x, pointList[i - 1].y);
ctx.lineTo(pointList[i].x, pointList[i].y);
ctx.stroke();
}
return ctx.restore();
};
return setInterval(tickFunction(tick), 10);
};
}).call(this);
});
require("/coffee/client/client.coffee");
})();
| |
vec3.rs
|
use crate::prelude::*;
use rand::{Rng};
use core::cmp;
use core::fmt;
use core::ops::{Add, Div, Mul, Neg, Sub};
#[derive(Clone, Copy, Default)]
pub struct Vec3 {
pub x: f64,
pub y: f64,
pub z: f64,
}
impl Vec3 {
pub fn zero() -> Vec3 {
Vec3 {
x: 0.0,
y: 0.0,
z: 0.0,
}
}
pub fn one() -> Vec3 {
Vec3 {
x: 1.0,
y: 1.0,
z: 1.0,
}
}
pub fn len(&self) -> f64 {
(self.x * self.x + self.y * self.y + self.z * self.z).sqrt()
}
pub fn dot(&self, other: &Vec3) -> f64 {
self.x * other.x + self.y * other.y + self.z * other.z
}
pub fn cross(&self, other: &Vec3) -> Vec3 {
Vec3 {
x: self.y * other.z - self.z * other.y,
y: self.z * other.x - self.x * other.z,
z: self.x * other.y - self.y * other.x,
}
}
pub fn unit(&self) -> Vec3 {
let len = self.len();
Vec3 {
x: self.x / len,
y: self.y / len,
z: self.z / len,
}
}
|
Vec3 {
x: self.x * scalar,
y: self.y * scalar,
z: self.z * scalar,
}
}
/// V, N should be unit vectors
///
/// ^ ^
/// V \ | N
/// \|
/// =========
pub fn reflect(v: &Vec3, n: &Vec3) -> Vec3 {
n.scale(2.0 * (n.dot(v))) - *v
}
/// V, N should be unit vectors
/// ior: Refractive index
/// inside: Is the ray inside an object (ie. going out of an object)?
pub fn refract(v: &Vec3, n: &Vec3, ior: f64, inside: bool) -> Option<Vec3> {
let (n1, n2, n_dot_v, nn): (f64, f64, _, _) = if !inside {
(1.0, ior, n.dot(v), *n)
} else {
(ior, 1.0, -n.dot(v), -*n)
};
let ratio = n1 / n2;
let disc = 1.0 - ((ratio * ratio) * (1.0 - n_dot_v * n_dot_v));
if disc < 0.0 {
None // Total internal reflection
} else {
Some(v.scale(-ratio) + nn.scale(ratio * n_dot_v - disc.sqrt()))
}
}
pub fn lerp(v1: &Vec3, v2: &Vec3, alpha: f64) -> Vec3 {
Vec3 {
x: v1.x + (v2.x - v1.x) * alpha,
y: v1.y + (v2.y - v1.y) * alpha,
z: v1.z + (v2.z - v1.z) * alpha,
}
}
pub fn clamp(&self, min: f64, max: f64) -> Vec3 {
Vec3 {
x: self.x.max(min).min(max),
y: self.y.max(min).min(max),
z: self.z.max(min).min(max),
}
}
/// Generates a random vector across a uniform distribution using the answer found in
/// http://stackoverflow.com/questions/5408276/python-uniform-spherical-distribution
pub fn random(rng : &mut Box<dyn rand::RngCore>) -> Vec3 {
let phi: f64 = rng.gen_range(0.0, 2.0 * ::core::f64::consts::PI);
let costheta: f64 = rng.gen_range(-1.0, 1.0);
let u: f64 = rng.gen_range(0.0, 1.0);
let theta = costheta.acos();
let r = u.powf(1.0 / 3.0);
Vec3 {
x: r * theta.sin() * phi.cos(),
y: r * theta.sin() * phi.sin(),
z: r * theta.cos(),
}
}
}
impl Add for Vec3 {
type Output = Vec3;
fn add(self, other: Vec3) -> Vec3 {
Vec3 {
x: self.x + other.x,
y: self.y + other.y,
z: self.z + other.z,
}
}
}
impl Add<f64> for Vec3 {
type Output = Vec3;
fn add(self, other: f64) -> Vec3 {
Vec3 {
x: self.x + other,
y: self.y + other,
z: self.z + other,
}
}
}
impl Sub for Vec3 {
type Output = Vec3;
fn sub(self, other: Vec3) -> Vec3 {
Vec3 {
x: self.x - other.x,
y: self.y - other.y,
z: self.z - other.z,
}
}
}
impl Sub<f64> for Vec3 {
type Output = Vec3;
fn sub(self, other: f64) -> Vec3 {
Vec3 {
x: self.x - other,
y: self.y - other,
z: self.z - other,
}
}
}
impl Mul for Vec3 {
type Output = Vec3;
fn mul(self, other: Vec3) -> Vec3 {
Vec3 {
x: self.x * other.x,
y: self.y * other.y,
z: self.z * other.z,
}
}
}
impl Mul<f64> for Vec3 {
type Output = Vec3;
fn mul(self, other: f64) -> Vec3 {
Vec3 {
x: self.x * other,
y: self.y * other,
z: self.z * other,
}
}
}
impl Div for Vec3 {
type Output = Vec3;
fn div(self, other: Vec3) -> Vec3 {
Vec3 {
x: self.x / other.x,
y: self.y / other.y,
z: self.z / other.z,
}
}
}
impl Div<f64> for Vec3 {
type Output = Vec3;
fn div(self, other: f64) -> Vec3 {
Vec3 {
x: self.x / other,
y: self.y / other,
z: self.z / other,
}
}
}
impl Neg for Vec3 {
type Output = Vec3;
fn neg(self) -> Vec3 {
Vec3 {
x: -self.x,
y: -self.y,
z: -self.z,
}
}
}
impl cmp::PartialEq for Vec3 {
fn eq(&self, other: &Vec3) -> bool {
self.x == other.x && self.y == other.y && self.z == other.z
}
}
impl fmt::Debug for Vec3 {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "({}, {}, {})", self.x, self.y, self.z)
}
}
#[test]
fn it_implements_debug() {
let vec = Vec3 {
x: 0.0,
y: 1.0,
z: 1.3,
};
let formatted_string = format!("{:?}", vec);
let expected_string = "(0, 1, 1.3)";
assert_eq!(&formatted_string, expected_string);
}
#[test]
fn it_does_cross_product() {
assert_eq!(
Vec3 {
x: -1.0,
y: 2.0,
z: -1.0
},
Vec3 {
x: 1.0,
y: 2.0,
z: 3.0
}.cross(&Vec3 {
x: 2.0,
y: 3.0,
z: 4.0
})
);
}
#[test]
fn it_does_dot_product() {
assert_eq!(
5.0,
Vec3 {
x: 0.0,
y: 1.0,
z: 2.0
}.dot(&Vec3 {
x: 0.0,
y: 1.0,
z: 2.0
})
);
}
#[test]
fn it_computes_length_of_a_vec3() {
assert_eq!(
Vec3 {
x: -1.0,
y: -1.0,
z: -1.0
},
-Vec3::one()
);
assert_eq!(
29.0_f64.sqrt(),
Vec3 {
x: 2.0,
y: 3.0,
z: 4.0
}.len()
);
assert_eq!(
1.0,
Vec3 {
x: 10.0,
y: 0.0,
z: 0.0
}.unit()
.len()
);
}
#[test]
fn it_has_vec3vec3_equality() {
assert!(Vec3::zero() != Vec3::one());
assert!(Vec3::zero() == Vec3::zero());
}
#[test]
fn it_adds_vec3s_and_scalars() {
assert_eq!(
Vec3 {
x: 2.0,
y: 2.0,
z: 2.0
},
Vec3::one() + Vec3::one()
);
assert_eq!(
Vec3 {
x: 2.0,
y: 2.0,
z: 2.0
},
Vec3::one() + 1.0
);
}
#[test]
fn it_subtracts_vec3s_and_scalars() {
assert_eq!(Vec3::zero(), Vec3::one() - Vec3::one());
assert_eq!(Vec3::zero(), Vec3::one() - 1.0);
}
#[test]
fn it_multiplies_vec3s_and_scalars_elementwise() {
assert_eq!(
Vec3 {
x: 2.0,
y: 2.0,
z: 2.0
},
Vec3::one().scale(2.0)
);
assert_eq!(
Vec3 {
x: 2.0,
y: 2.0,
z: 2.0
},
Vec3::one() * 2.0
);
assert_eq!(
Vec3 {
x: 4.0,
y: 9.0,
z: -4.0
},
Vec3 {
x: 2.0,
y: 3.0,
z: 4.0
} * Vec3 {
x: 2.0,
y: 3.0,
z: -1.0
}
);
}
#[test]
fn it_divides_vec3s_and_scalars_elementwise() {
assert_eq!(
Vec3 {
x: 0.5,
y: 0.5,
z: 0.5
},
Vec3::one() / 2.0
);
assert_eq!(
Vec3 {
x: 0.5,
y: 0.5,
z: 0.5
},
Vec3::one() / Vec3 {
x: 2.0,
y: 2.0,
z: 2.0
}
);
}
#[test]
fn it_linearly_interpolates() {
assert_eq!(Vec3::zero(), Vec3::lerp(&Vec3::zero(), &Vec3::one(), 0.0));
assert_eq!(
Vec3 {
x: 0.5,
y: 0.5,
z: 0.5
},
Vec3::lerp(&Vec3::zero(), &Vec3::one(), 0.5)
);
assert_eq!(Vec3::one(), Vec3::lerp(&Vec3::zero(), &Vec3::one(), 1.0));
}
|
pub fn scale(&self, scalar: f64) -> Vec3 {
|
kubernetes_test.go
|
//+build test
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT license.
package kubernetes
import (
"bufio"
"context"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"log"
"math"
"math/rand"
"net/url"
"os"
"os/exec"
"path/filepath"
"regexp"
"strconv"
"strings"
"syscall"
"time"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/autorest/to"
"github.com/Azure/aks-engine/pkg/api"
"github.com/Azure/aks-engine/pkg/api/common"
"github.com/Azure/aks-engine/pkg/armhelpers"
"github.com/Azure/aks-engine/test/e2e/config"
"github.com/Azure/aks-engine/test/e2e/engine"
"github.com/Azure/aks-engine/test/e2e/kubernetes/daemonset"
"github.com/Azure/aks-engine/test/e2e/kubernetes/deployment"
"github.com/Azure/aks-engine/test/e2e/kubernetes/event"
"github.com/Azure/aks-engine/test/e2e/kubernetes/hpa"
"github.com/Azure/aks-engine/test/e2e/kubernetes/job"
"github.com/Azure/aks-engine/test/e2e/kubernetes/namespace"
"github.com/Azure/aks-engine/test/e2e/kubernetes/networkpolicy"
"github.com/Azure/aks-engine/test/e2e/kubernetes/node"
"github.com/Azure/aks-engine/test/e2e/kubernetes/persistentvolume"
"github.com/Azure/aks-engine/test/e2e/kubernetes/persistentvolumeclaims"
"github.com/Azure/aks-engine/test/e2e/kubernetes/pod"
"github.com/Azure/aks-engine/test/e2e/kubernetes/service"
"github.com/Azure/aks-engine/test/e2e/kubernetes/storageclass"
"github.com/Azure/aks-engine/test/e2e/kubernetes/util"
"github.com/Azure/aks-engine/test/e2e/remote"
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
const (
WorkloadDir = "workloads"
PolicyDir = "workloads/policies"
kubeSystemPodsReadinessChecks = 6
sleepBetweenRetriesWhenWaitingForPodReady = 1 * time.Second
sleepBetweenRetriesRemoteSSHCommand = 3 * time.Second
timeoutWhenWaitingForPodOutboundAccess = 1 * time.Minute
validateNetworkPolicyTimeout = 3 * time.Minute
podLookupRetries = 5
sigPublishingTimeout = 8 * time.Hour // :(
)
var (
cfg config.Config
eng engine.Engine
masterSSHPort string
masterSSHPrivateKeyFilepath string
longRunningApacheDeploymentName string
sshConn *remote.Connection
kubeConfig *Config
firstMasterRegexp *regexp.Regexp
masterNodes []node.Node
clusterAutoscalerEngaged bool
clusterAutoscalerAddon api.KubernetesAddon
deploymentReplicasCount int
dnsAddonName string
singleCommandTimeout time.Duration
stabilityCommandTimeout time.Duration
env azure.Environment
azureClient *armhelpers.AzureClient
firstMasterRegexStr = fmt.Sprintf("^%s-.*-0", common.LegacyControlPlaneVMPrefix)
vmssHealthCommand *exec.Cmd
vmssHealthCommandStdOut string
)
var _ = BeforeSuite(func() {
cwd, _ := os.Getwd()
rootPath := filepath.Join(cwd, "../../..") // The current working dir of these tests is down a few levels from the root of the project. We should traverse up that path so we can find the _output dir
c, err := config.ParseConfig()
c.CurrentWorkingDir = rootPath
Expect(err).NotTo(HaveOccurred())
cfg = *c // We have to do this because golang anon functions and scoping and stuff
engCfg, err := engine.ParseConfig(c.CurrentWorkingDir, c.ClusterDefinition, c.Name)
Expect(err).NotTo(HaveOccurred())
csInput, err := engine.ParseInput(engCfg.ClusterDefinitionTemplate)
Expect(err).NotTo(HaveOccurred())
isUpdate := cfg.Name != ""
validate := false
csGenerated, err := engine.ParseOutput(engCfg.GeneratedDefinitionPath+"/apimodel.json", validate, isUpdate)
Expect(err).NotTo(HaveOccurred())
eng = engine.Engine{
Config: engCfg,
ClusterDefinition: csInput,
ExpandedDefinition: csGenerated,
}
longRunningApacheDeploymentName = "php-apache-long-running"
for _, profile := range eng.ExpandedDefinition.Properties.AgentPoolProfiles {
deploymentReplicasCount += profile.Count
}
var getNodeByRegexError error
masterNodes, getNodeByRegexError = node.GetByRegexWithRetry(fmt.Sprintf("^%s-", common.LegacyControlPlaneVMPrefix), 3*time.Minute, cfg.Timeout)
Expect(getNodeByRegexError).NotTo(HaveOccurred())
var getKubeConfigError error
kubeConfig, getKubeConfigError = GetConfigWithRetry(3*time.Second, cfg.Timeout)
Expect(getKubeConfigError).NotTo(HaveOccurred())
if cfg.RebootControlPlaneNodes {
cfg.BlockSSHPort = true
cfg.StabilityIterations = 0
}
if !cfg.BlockSSHPort {
var err error
masterName := masterNodes[0].Metadata.Name
if strings.Contains(masterName, "vmss") {
masterSSHPort = "50001"
} else {
masterSSHPort = "22"
}
masterSSHPrivateKeyFilepath = cfg.GetSSHKeyPath()
sshConn, err = remote.NewConnectionWithRetry(kubeConfig.GetServerName(), masterSSHPort, eng.ExpandedDefinition.Properties.LinuxProfile.AdminUsername, masterSSHPrivateKeyFilepath, 3*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
success := false
for i := 0; i < 3; i++ {
sshAddErr := util.AddToSSHKeyChain(masterSSHPrivateKeyFilepath)
if sshAddErr == nil {
success = true
break
}
if i > 1 {
log.Printf("Error while setting up ssh key forwarding:%s\n", sshAddErr)
}
time.Sleep(10 * time.Second)
}
Expect(success).To(BeTrue())
firstMasterRegexp, err = regexp.Compile(firstMasterRegexStr)
Expect(err).NotTo(HaveOccurred())
}
if hasAddon, addon := eng.HasAddon(common.ClusterAutoscalerAddonName); hasAddon {
clusterAutoscalerAddon = addon
if len(addon.Pools) > 0 {
for _, pool := range addon.Pools {
p := eng.ExpandedDefinition.Properties.GetAgentPoolIndexByName(pool.Name)
maxNodes, _ := strconv.Atoi(pool.Config["max-nodes"])
minNodes, _ := strconv.Atoi(pool.Config["min-nodes"])
if maxNodes > eng.ExpandedDefinition.Properties.AgentPoolProfiles[p].Count &&
minNodes <= eng.ExpandedDefinition.Properties.AgentPoolProfiles[p].Count {
clusterAutoscalerEngaged = true
break
}
}
}
}
if hasAddon, _ := eng.HasAddon(common.KubeDNSAddonName); hasAddon {
dnsAddonName = common.KubeDNSAddonName
}
if hasAddon, _ := eng.HasAddon("coredns"); hasAddon {
dnsAddonName = common.CoreDNSAddonName
}
Expect(dnsAddonName).NotTo(Equal(""))
singleCommandTimeout = time.Duration(cfg.SingleCommandTimeoutMinutes) * time.Minute
stabilityCommandTimeout = time.Duration(cfg.StabilityTimeoutSeconds) * time.Second
if !cfg.IsCustomCloudProfile() {
env, err = azure.EnvironmentFromName("AzurePublicCloud") // TODO get this programmatically
if err != nil {
Expect(err).NotTo(HaveOccurred())
}
azureClient, err = armhelpers.NewAzureClientWithClientSecret(env, cfg.SubscriptionID, cfg.ClientID, cfg.ClientSecret)
if err != nil {
Expect(err).NotTo(HaveOccurred())
}
}
if cfg.RunVMSSHygiene {
vmssHealthCommand, err = RunVMSSHealthCheck(cfg)
Expect(err).NotTo(HaveOccurred())
vmssHealthCommandStdOut = fmt.Sprintf("./vmss-health-check-%s.out", cfg.ResourceGroup)
}
})
var _ = AfterSuite(func() {
if cfg.RunVMSSHygiene {
if err := vmssHealthCommand.Process.Kill(); err != nil {
log.Fatal(fmt.Sprintf("failed to kill process ID %d: ", vmssHealthCommand.Process.Pid), err)
}
stdout, err := ioutil.ReadFile(vmssHealthCommandStdOut)
if err != nil {
fmt.Printf("Unable to read file %s", vmssHealthCommandStdOut)
}
fmt.Println(string(stdout))
}
if cfg.DebugAfterSuite {
cmd := exec.Command("k", "get", "deployments,pods,svc,daemonsets,configmaps,endpoints,jobs,clusterroles,clusterrolebindings,roles,rolebindings,storageclasses,podsecuritypolicy", "--all-namespaces", "-o", "wide")
out, err := cmd.CombinedOutput()
log.Printf("%s\n", out)
if err != nil {
log.Printf("Error: Unable to print all cluster resources\n")
}
pod.PrintPodsLogs("kube-addon-manager", "kube-system", 5*time.Second, 1*time.Minute)
pod.PrintPodsLogs("kube-proxy", "kube-system", 5*time.Second, 1*time.Minute)
pod.PrintPodsLogs("kube-scheduler", "kube-system", 5*time.Second, 1*time.Minute)
pod.PrintPodsLogs(common.APIServerComponentName, "kube-system", 5*time.Second, 1*time.Minute)
pod.PrintPodsLogs("kube-controller-manager", "kube-system", 5*time.Second, 1*time.Minute)
}
})
var _ = Describe("Azure Container Cluster using the Kubernetes Orchestrator", func() {
Describe("regardless of agent pool type", func() {
It("should check for cluster-init pod", func() {
if cfg.ClusterInitPodName != "" {
By(fmt.Sprintf("Ensuring that cluster-init Pod \"%s\" is Running", cfg.ClusterInitPodName))
running, err := pod.WaitOnSuccesses(cfg.ClusterInitPodName, "default", kubeSystemPodsReadinessChecks, true, sleepBetweenRetriesWhenWaitingForPodReady, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(Equal(true))
}
if cfg.ClusterInitJobName != "" {
ready, err := job.WaitOnSucceeded(cfg.ClusterInitJobName, "default", 30*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(ready).To(Equal(true))
}
})
It("should validate filesystem config", func() {
if cfg.BlockSSHPort {
Skip("SSH port is blocked")
} else if !eng.ExpandedDefinition.Properties.HasNonRegularPriorityScaleset() {
nodes, err := node.GetReadyWithRetry(1*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
filesystemValidateScript := "host-os-fs.sh"
err = sshConn.CopyTo(filesystemValidateScript)
Expect(err).NotTo(HaveOccurred())
envString := fmt.Sprintf("MASTER_NODE=true")
filesystemValidationCommand := fmt.Sprintf("%s /tmp/%s", envString, filesystemValidateScript)
err = sshConn.Execute(filesystemValidationCommand, false)
Expect(err).NotTo(HaveOccurred())
for _, n := range nodes {
if n.IsUbuntu() && !firstMasterRegexp.MatchString(n.Metadata.Name) {
err := sshConn.CopyToRemoteWithRetry(n.Metadata.Name, "/tmp/"+filesystemValidateScript, sleepBetweenRetriesRemoteSSHCommand, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
envString = fmt.Sprintf("MASTER_NODE=%t", n.HasSubstring([]string{common.LegacyControlPlaneVMPrefix}))
filesystemValidationCommand = fmt.Sprintf("%s /tmp/%s", envString, filesystemValidateScript)
err = sshConn.ExecuteRemoteWithRetry(n.Metadata.Name, filesystemValidationCommand, false, sleepBetweenRetriesRemoteSSHCommand, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
}
}
} else {
Skip("Skip per-node tests in low-priority VMSS cluster configuration scenario")
}
})
It("should validate host OS DNS", func() {
if cfg.BlockSSHPort {
Skip("SSH port is blocked")
} else {
var nodes []node.Node
var err error
if !eng.ExpandedDefinition.Properties.HasNonRegularPriorityScaleset() {
nodes, err = node.GetReadyWithRetry(1*time.Second, cfg.Timeout)
} else {
nodes = masterNodes
}
Expect(err).NotTo(HaveOccurred())
hostOSDNSValidateScript := "host-os-dns-validate.sh"
err = sshConn.CopyTo(hostOSDNSValidateScript)
Expect(err).NotTo(HaveOccurred())
envString := "NODE_HOSTNAMES='"
for _, n := range nodes {
envString += fmt.Sprintf("%s ", n.Metadata.Name)
}
lookupRetries := 3
envString += fmt.Sprintf("' LOOKUP_RETRIES=%d", lookupRetries)
hostOSDNSValidationCommand := fmt.Sprintf("%s /tmp/%s", envString, hostOSDNSValidateScript)
var success bool
// Retry for up to 5 minutes host vm DNS validation
for i := 0; i < 30; i++ {
err := sshConn.Execute(hostOSDNSValidationCommand, true)
if err == nil {
success = true
break
} else {
time.Sleep(10 * time.Second)
}
}
Expect(success).To(BeTrue())
hostOSDNSValidationCommand = fmt.Sprintf("\"%s /tmp/%s\"", envString, hostOSDNSValidateScript)
for _, n := range nodes {
if n.IsLinux() && !firstMasterRegexp.MatchString(n.Metadata.Name) {
err := sshConn.CopyToRemoteWithRetry(n.Metadata.Name, "/tmp/"+hostOSDNSValidateScript, sleepBetweenRetriesRemoteSSHCommand, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
err = sshConn.ExecuteRemoteWithRetry(n.Metadata.Name, hostOSDNSValidationCommand, false, sleepBetweenRetriesRemoteSSHCommand, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
}
}
}
})
It("should validate cloudprovider config", func() {
if cfg.BlockSSHPort {
Skip("SSH port is blocked")
} else if !eng.ExpandedDefinition.Properties.HasNonRegularPriorityScaleset() {
var cloudproviderEnabledPrefixes []string
if eng.ExpandedDefinition.Properties.MasterProfile != nil {
cloudproviderEnabledPrefixes = append(cloudproviderEnabledPrefixes, fmt.Sprintf("%s-", common.LegacyControlPlaneVMPrefix))
}
for _, profile := range eng.ExpandedDefinition.Properties.AgentPoolProfiles {
if profile.RequiresCloudproviderConfig() {
cloudproviderEnabledPrefixes = append(cloudproviderEnabledPrefixes, "k8s-"+profile.Name)
}
}
nodes, err := node.GetReadyWithRetry(1*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
cloudproviderConfigValidateScript := "cloudprovider-config-validate.sh"
err = sshConn.CopyTo(cloudproviderConfigValidateScript)
Expect(err).NotTo(HaveOccurred())
envString := fmt.Sprintf("BACKOFF_MODE=%s", eng.ExpandedDefinition.Properties.OrchestratorProfile.KubernetesConfig.CloudProviderBackoffMode)
// TODO test remaining cloudprovider config
cloudproviderConfigValidationCommand := fmt.Sprintf("%s /tmp/%s", envString, cloudproviderConfigValidateScript)
err = sshConn.Execute(cloudproviderConfigValidationCommand, false)
Expect(err).NotTo(HaveOccurred())
for _, n := range nodes {
if n.IsUbuntu() && !firstMasterRegexp.MatchString(n.Metadata.Name) && n.HasSubstring(cloudproviderEnabledPrefixes) {
err := sshConn.CopyToRemoteWithRetry(n.Metadata.Name, "/tmp/"+cloudproviderConfigValidateScript, sleepBetweenRetriesRemoteSSHCommand, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
err = sshConn.ExecuteRemoteWithRetry(n.Metadata.Name, cloudproviderConfigValidationCommand, false, sleepBetweenRetriesRemoteSSHCommand, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
}
}
} else {
Skip("Skip per-node tests in low-priority VMSS cluster configuration scenario")
}
})
It("should have the expected k8s version", func() {
customHyperkubeImage := eng.ExpandedDefinition.Properties.OrchestratorProfile.KubernetesConfig.CustomHyperkubeImage
customWindowsPackageURL := eng.ExpandedDefinition.Properties.OrchestratorProfile.KubernetesConfig.CustomWindowsPackageURL
if customHyperkubeImage == "" && customWindowsPackageURL == "" {
nodes, err := node.GetReadyWithRetry(1*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
for _, n := range nodes {
err := n.Describe()
if err != nil {
log.Printf("Unable to describe node %s: %s", n.Metadata.Name, err)
}
Expect("v" + eng.ExpandedDefinition.Properties.OrchestratorProfile.OrchestratorVersion).To(Equal(n.Version()))
}
} else if customHyperkubeImage != "" {
customHyperkubeValidateScript := "custom-hyperkube-validate.sh"
err := sshConn.CopyTo(customHyperkubeValidateScript)
Expect(err).NotTo(HaveOccurred())
envString := fmt.Sprintf("CUSTOM_HYPERKUBE_IMAGE=%s", customHyperkubeImage)
customHyperkubeValidationCommand := fmt.Sprintf("%s /tmp/%s", envString, customHyperkubeValidateScript)
err = sshConn.Execute(customHyperkubeValidationCommand, false)
Expect(err).NotTo(HaveOccurred())
} else {
Skip("This is a cluster built from source")
}
})
It("should display the installed Ubuntu version on the master node", func() {
if !eng.ExpandedDefinition.Properties.MasterProfile.IsUbuntu() {
Skip("This is not an ubuntu master")
} else if cfg.BlockSSHPort {
Skip("SSH port is blocked")
} else {
lsbReleaseCmd := fmt.Sprintf("lsb_release -a && uname -r")
err := sshConn.Execute(lsbReleaseCmd, true)
Expect(err).NotTo(HaveOccurred())
kernelVerCmd := fmt.Sprintf("cat /proc/version")
err = sshConn.Execute(kernelVerCmd, true)
Expect(err).NotTo(HaveOccurred())
}
})
It("should display the installed docker runtime on all nodes", func() {
if cfg.BlockSSHPort {
Skip("SSH port is blocked")
} else if !eng.ExpandedDefinition.Properties.HasNonRegularPriorityScaleset() {
if eng.ExpandedDefinition.Properties.OrchestratorProfile.KubernetesConfig.RequiresDocker() {
nodes, err := node.GetReadyWithRetry(1*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
dockerVersionCmd := fmt.Sprintf("\"docker version\"")
for _, n := range nodes {
if n.IsWindows() {
if eng.ExpandedDefinition.Properties.WindowsProfile != nil && !eng.ExpandedDefinition.Properties.WindowsProfile.GetSSHEnabled() {
log.Printf("Can't ssh into Windows node %s because there is no SSH listener", n.Metadata.Name)
continue
}
}
err = sshConn.ExecuteRemoteWithRetry(n.Metadata.Name, dockerVersionCmd, true, sleepBetweenRetriesRemoteSSHCommand, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
}
} else {
Skip("Skip docker validations on non-docker-backed clusters")
}
} else {
Skip("Skip per-node tests in low-priority VMSS cluster configuration scenario")
}
})
It("should validate that every linux node has a root password", func() {
if cfg.BlockSSHPort {
Skip("SSH port is blocked")
} else if !eng.ExpandedDefinition.Properties.HasNonRegularPriorityScaleset() {
if eng.ExpandedDefinition.Properties.IsVHDDistroForAllNodes() {
nodes, err := node.GetReadyWithRetry(1*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
rootPasswdCmd := fmt.Sprintf("\"sudo grep '^root:[!*]:' /etc/shadow\" && exit 1 || exit 0")
for _, n := range nodes {
if n.IsUbuntu() {
err = sshConn.ExecuteRemoteWithRetry(n.Metadata.Name, rootPasswdCmd, true, sleepBetweenRetriesRemoteSSHCommand, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
}
}
} else {
Skip("This config is only available on VHD")
}
} else {
Skip("Skip per-node tests in low-priority VMSS cluster configuration scenario")
}
})
It("should validate aks-engine-provided sysctl configuration", func() {
if cfg.BlockSSHPort {
Skip("SSH port is blocked")
} else if !eng.ExpandedDefinition.Properties.HasNonRegularPriorityScaleset() {
nodes, err := node.GetReadyWithRetry(1*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
for key, val := range eng.ExpandedDefinition.Properties.MasterProfile.SysctlDConfig {
for _, n := range nodes {
if n.HasSubstring([]string{common.LegacyControlPlaneVMPrefix}) && n.IsUbuntu() {
err = sshConn.ExecuteRemoteWithRetry(n.Metadata.Name, fmt.Sprintf("sysctl %s | grep '= %s'", key, val), false, sleepBetweenRetriesRemoteSSHCommand, singleCommandTimeout)
Expect(err).NotTo(HaveOccurred())
}
}
}
for _, pool := range eng.ExpandedDefinition.Properties.AgentPoolProfiles {
for key, val := range pool.SysctlDConfig {
for _, n := range nodes {
if n.HasSubstring([]string{pool.Name}) && n.IsUbuntu() {
err = sshConn.ExecuteRemoteWithRetry(n.Metadata.Name, fmt.Sprintf("sysctl %s | grep '= %s'", key, val), false, sleepBetweenRetriesRemoteSSHCommand, singleCommandTimeout)
Expect(err).NotTo(HaveOccurred())
}
}
}
}
} else {
Skip("Skip per-node tests in low-priority VMSS cluster configuration scenario")
}
})
It("should validate Ubuntu host OS network configuration on all nodes", func() {
if cfg.BlockSSHPort {
Skip("SSH port is blocked")
} else if !eng.ExpandedDefinition.Properties.HasNonRegularPriorityScaleset() {
if eng.ExpandedDefinition.Properties.IsVHDDistroForAllNodes() {
var largeSKUPrefixes []string
if eng.ExpandedDefinition.Properties.MasterProfile != nil {
if util.IsLargeVMSKU(eng.ExpandedDefinition.Properties.MasterProfile.VMSize) {
largeSKUPrefixes = append(largeSKUPrefixes, fmt.Sprintf("%s-", common.LegacyControlPlaneVMPrefix))
}
}
for _, profile := range eng.ExpandedDefinition.Properties.AgentPoolProfiles {
if util.IsLargeVMSKU(profile.VMSize) {
largeSKUPrefixes = append(largeSKUPrefixes, "k8s-"+profile.Name)
}
}
nodes, err := node.GetReadyWithRetry(1*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
netConfigValidateScript := "net-config-validate.sh"
err = sshConn.CopyTo(netConfigValidateScript)
Expect(err).NotTo(HaveOccurred())
for _, n := range nodes {
var gt8CoreSKU string
if n.HasSubstring(largeSKUPrefixes) && n.IsUbuntu() {
gt8CoreSKU = "true"
}
var netConfigValidationCommand string
if eng.ExpandedDefinition.Properties.OrchestratorProfile.IsAzureCNI() && eng.ExpandedDefinition.Properties.LinuxProfile.Eth0MTU != 0 {
expectedEth0MTU := eng.ExpandedDefinition.Properties.LinuxProfile.Eth0MTU
netConfigValidationCommand = fmt.Sprintf("\"GT_8_CORE_SKU=%s ETH0_MTU=%d /tmp/%s\"", gt8CoreSKU, expectedEth0MTU, netConfigValidateScript)
} else {
netConfigValidationCommand = fmt.Sprintf("\"GT_8_CORE_SKU=%s /tmp/%s\"", gt8CoreSKU, netConfigValidateScript)
}
if n.IsUbuntu() && !firstMasterRegexp.MatchString(n.Metadata.Name) {
err := sshConn.CopyToRemoteWithRetry(n.Metadata.Name, "/tmp/"+netConfigValidateScript, sleepBetweenRetriesRemoteSSHCommand, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
err = sshConn.ExecuteRemoteWithRetry(n.Metadata.Name, netConfigValidationCommand, false, sleepBetweenRetriesRemoteSSHCommand, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
}
}
} else {
Skip("This config is only available on VHD")
}
} else {
Skip("Skip per-node tests in low-priority VMSS cluster configuration scenario")
}
})
It("should validate all CIS VHD-paved files", func() {
if cfg.BlockSSHPort {
Skip("SSH port is blocked")
} else if !eng.ExpandedDefinition.Properties.HasNonRegularPriorityScaleset() {
if eng.ExpandedDefinition.Properties.IsVHDDistroForAllNodes() {
nodes, err := node.GetReadyWithRetry(1*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
CISFilesValidateScript := "CIS-files-validate.sh"
err = sshConn.CopyTo(CISFilesValidateScript)
Expect(err).NotTo(HaveOccurred())
CISValidationCommand := fmt.Sprintf("\"/tmp/%s\"", CISFilesValidateScript)
err = sshConn.Execute(CISValidationCommand, false)
Expect(err).NotTo(HaveOccurred())
for _, n := range nodes {
if !firstMasterRegexp.MatchString(n.Metadata.Name) {
err := sshConn.CopyToRemoteWithRetry(n.Metadata.Name, "/tmp/"+CISFilesValidateScript, sleepBetweenRetriesRemoteSSHCommand, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
err = sshConn.ExecuteRemoteWithRetry(n.Metadata.Name, CISValidationCommand, false, sleepBetweenRetriesRemoteSSHCommand, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
fmt.Println(err)
}
}
} else {
Skip("This config is only available on VHD")
}
} else {
Skip("Skip per-node tests in low-priority VMSS cluster configuration scenario")
}
})
It("should validate kernel module configuration", func() {
if cfg.BlockSSHPort {
Skip("SSH port is blocked")
} else if !eng.ExpandedDefinition.Properties.HasNonRegularPriorityScaleset() {
if eng.ExpandedDefinition.Properties.IsVHDDistroForAllNodes() {
nodes, err := node.GetReadyWithRetry(1*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
modprobeConfigValidateScript := "modprobe-config-validate.sh"
err = sshConn.CopyTo(modprobeConfigValidateScript)
Expect(err).NotTo(HaveOccurred())
netConfigValidationCommand := fmt.Sprintf("\"/tmp/%s\"", modprobeConfigValidateScript)
err = sshConn.Execute(netConfigValidationCommand, false)
Expect(err).NotTo(HaveOccurred())
for _, n := range nodes {
if n.IsUbuntu() && !firstMasterRegexp.MatchString(n.Metadata.Name) {
err := sshConn.CopyToRemoteWithRetry(n.Metadata.Name, "/tmp/"+modprobeConfigValidateScript, sleepBetweenRetriesRemoteSSHCommand, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
err = sshConn.ExecuteRemoteWithRetry(n.Metadata.Name, netConfigValidationCommand, false, sleepBetweenRetriesRemoteSSHCommand, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
}
}
} else {
Skip("This config is only available on VHD")
}
} else {
Skip("Skip per-node tests in low-priority VMSS cluster configuration scenario")
}
})
It("should validate installed software packages", func() {
if cfg.BlockSSHPort {
Skip("SSH port is blocked")
} else if !eng.ExpandedDefinition.Properties.HasNonRegularPriorityScaleset() {
nodes, err := node.GetReadyWithRetry(1*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
installedPackagesValidateScript := "ubuntu-installed-packages-validate.sh"
err = sshConn.CopyTo(installedPackagesValidateScript)
Expect(err).NotTo(HaveOccurred())
installedPackagesValidationCommand := fmt.Sprintf("\"/tmp/%s\"", installedPackagesValidateScript)
err = sshConn.Execute(installedPackagesValidationCommand, false)
Expect(err).NotTo(HaveOccurred())
for _, n := range nodes {
if n.IsUbuntu() && !firstMasterRegexp.MatchString(n.Metadata.Name) {
err := sshConn.CopyToRemoteWithRetry(n.Metadata.Name, "/tmp/"+installedPackagesValidateScript, sleepBetweenRetriesRemoteSSHCommand, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
err = sshConn.ExecuteRemoteWithRetry(n.Metadata.Name, installedPackagesValidationCommand, false, sleepBetweenRetriesRemoteSSHCommand, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
}
}
} else {
Skip("Skip per-node tests in low-priority VMSS cluster configuration scenario")
}
})
It("should validate that every linux node has the right sshd config", func() {
if cfg.BlockSSHPort {
Skip("SSH port is blocked")
} else if !eng.ExpandedDefinition.Properties.HasNonRegularPriorityScaleset() {
if eng.ExpandedDefinition.Properties.IsVHDDistroForAllNodes() {
nodes, err := node.GetReadyWithRetry(1*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
sshdConfigValidateScript := "sshd-config-validate.sh"
err = sshConn.CopyTo(sshdConfigValidateScript)
Expect(err).NotTo(HaveOccurred())
sshdConfigValidationCommand := fmt.Sprintf("\"/tmp/%s\"", sshdConfigValidateScript)
err = sshConn.Execute(sshdConfigValidationCommand, false)
Expect(err).NotTo(HaveOccurred())
for _, n := range nodes {
if n.IsUbuntu() && !firstMasterRegexp.MatchString(n.Metadata.Name) {
err := sshConn.CopyToRemoteWithRetry(n.Metadata.Name, "/tmp/"+sshdConfigValidateScript, sleepBetweenRetriesRemoteSSHCommand, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
err = sshConn.ExecuteRemoteWithRetry(n.Metadata.Name, sshdConfigValidationCommand, false, sleepBetweenRetriesRemoteSSHCommand, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
}
}
} else {
Skip("This config is only available on VHD")
}
} else {
Skip("Skip per-node tests in low-priority VMSS cluster configuration scenario")
}
})
It("should validate password enforcement configuration", func() {
if cfg.BlockSSHPort {
Skip("SSH port is blocked")
} else if !eng.ExpandedDefinition.Properties.HasNonRegularPriorityScaleset() {
if eng.ExpandedDefinition.Properties.IsVHDDistroForAllNodes() {
nodes, err := node.GetReadyWithRetry(1*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
pwQualityValidateScript := "pwquality-validate.sh"
err = sshConn.CopyTo(pwQualityValidateScript)
Expect(err).NotTo(HaveOccurred())
pwQualityValidationCommand := fmt.Sprintf("\"/tmp/%s\"", pwQualityValidateScript)
err = sshConn.Execute(pwQualityValidationCommand, false)
Expect(err).NotTo(HaveOccurred())
for _, n := range nodes {
if n.IsUbuntu() && !firstMasterRegexp.MatchString(n.Metadata.Name) {
err := sshConn.CopyToRemoteWithRetry(n.Metadata.Name, "/tmp/"+pwQualityValidateScript, sleepBetweenRetriesRemoteSSHCommand, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
err = sshConn.ExecuteRemoteWithRetry(n.Metadata.Name, pwQualityValidationCommand, false, sleepBetweenRetriesRemoteSSHCommand, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
}
}
} else {
Skip("This config is only available on VHD")
}
} else {
Skip("Skip per-node tests in low-priority VMSS cluster configuration scenario")
}
})
It("should validate auditd configuration", func() {
if cfg.BlockSSHPort {
Skip("SSH port is blocked")
} else {
var auditDNodePrefixes []string
var nonRegularPriVMSSPrefixes []string
if eng.ExpandedDefinition.Properties.MasterProfile != nil {
if to.Bool(eng.ExpandedDefinition.Properties.MasterProfile.AuditDEnabled) {
auditDNodePrefixes = append(auditDNodePrefixes, fmt.Sprintf("%s-", common.LegacyControlPlaneVMPrefix))
}
}
for _, profile := range eng.ExpandedDefinition.Properties.AgentPoolProfiles {
if profile.IsLowPriorityScaleSet() || profile.IsSpotScaleSet() {
nonRegularPriVMSSPrefixes = append(nonRegularPriVMSSPrefixes, "k8s-"+profile.Name)
} else if to.Bool(profile.AuditDEnabled) {
auditDNodePrefixes = append(auditDNodePrefixes, profile.Name)
}
}
nodes, err := node.GetReadyWithRetry(1*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
auditdValidateScript := "auditd-validate.sh"
err = sshConn.CopyTo(auditdValidateScript)
Expect(err).NotTo(HaveOccurred())
for _, n := range nodes {
if !n.HasSubstring(nonRegularPriVMSSPrefixes) && n.IsUbuntu() {
var enabled bool
if n.HasSubstring(auditDNodePrefixes) {
enabled = true
}
err := sshConn.CopyToRemoteWithRetry(n.Metadata.Name, "/tmp/"+auditdValidateScript, sleepBetweenRetriesRemoteSSHCommand, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
auditdValidationCommand := fmt.Sprintf("\"ENABLED=%t /tmp/%s\"", enabled, auditdValidateScript)
err = sshConn.ExecuteRemoteWithRetry(n.Metadata.Name, auditdValidationCommand, false, sleepBetweenRetriesRemoteSSHCommand, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
}
}
}
})
It("should report all nodes in a Ready state", func() {
var expectedReadyNodes int
if !eng.ExpandedDefinition.Properties.HasNonRegularPriorityScaleset() &&
!clusterAutoscalerEngaged &&
cfg.AddNodePoolInput == "" {
expectedReadyNodes = eng.NodeCount()
log.Printf("Checking for %d Ready nodes\n", expectedReadyNodes)
} else {
expectedReadyNodes = -1
}
ready := node.WaitOnReady(expectedReadyNodes, 10*time.Second, cfg.Timeout)
cmd := exec.Command("k", "get", "nodes", "-o", "wide")
out, _ := cmd.CombinedOutput()
log.Printf("%s\n", out)
Expect(ready).To(Equal(true))
})
It("should have node labels and annotations added by E2E test runner", func() {
if !eng.ExpandedDefinition.Properties.HasNonRegularPriorityScaleset() &&
cfg.AddNodePoolInput == "" && !cfg.RebootControlPlaneNodes {
totalNodeCount := eng.NodeCount()
nodes := totalNodeCount - len(masterNodes)
_, err := node.WaitForNodesWithAnnotation(nodes, "foo", "bar", 5*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
} else {
Skip("Skip per-node tests in low-priority VMSS cluster configuration scenario")
}
})
It("should have core kube-system componentry running", func() {
coreComponents := []string{
common.AddonManagerComponentName,
common.APIServerComponentName,
common.ControllerManagerComponentName,
common.KubeProxyAddonName,
common.SchedulerComponentName,
}
if to.Bool(eng.ExpandedDefinition.Properties.OrchestratorProfile.KubernetesConfig.UseCloudControllerManager) {
coreComponents = append(coreComponents, common.CloudControllerManagerComponentName)
}
if to.Bool(eng.ExpandedDefinition.Properties.OrchestratorProfile.KubernetesConfig.EnableEncryptionWithExternalKms) {
coreComponents = append(coreComponents, common.AzureKMSProviderComponentName)
}
for _, componentName := range coreComponents {
By(fmt.Sprintf("Ensuring that %s is Running", componentName))
running, err := pod.WaitOnSuccesses(componentName, "kube-system", kubeSystemPodsReadinessChecks, true, sleepBetweenRetriesWhenWaitingForPodReady, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(Equal(true))
}
customHyperkubeImage := eng.ExpandedDefinition.Properties.OrchestratorProfile.KubernetesConfig.CustomHyperkubeImage
if customHyperkubeImage != "" {
hyperkubeComponents := []string{
common.APIServerComponentName,
common.ControllerManagerComponentName,
common.KubeProxyAddonName,
common.SchedulerComponentName,
}
for _, hyperkubeComponent := range hyperkubeComponents {
pods, err := pod.GetAllByPrefixWithRetry(hyperkubeComponent, "kube-system", 3*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
for _, pod := range pods {
Expect(pod.Spec.Containers[0].Image).To(Equal(customHyperkubeImage))
}
}
}
})
It("should be able to schedule a pod to a control plane node", func() {
By("Creating a Job with control plane nodeSelector")
for i := 1; i <= 3; i++ {
j, err := job.CreateJobFromFileWithRetry(filepath.Join(WorkloadDir, "busybox-master.yaml"), "busybox-master", "default", 3*time.Second, 3*time.Minute)
if err != nil {
fmt.Printf("unable to create job: %s\n", err)
continue
}
ready, err := j.WaitOnSucceeded(30*time.Second, 3*time.Minute)
if err != nil {
fmt.Printf("timed out waiting for pod success: %s\n", err)
continue
}
Expect(ready).To(Equal(true))
fmt.Printf("successfully scheduled a pod to the control plane in %d attempts\n", i)
break
}
})
It("should be able to schedule a pod to a Linux node", func() {
if eng.AnyAgentIsLinux() {
By("Creating a Job with agent nodeSelector")
for i := 1; i <= 3; i++ {
j, err := job.CreateJobFromFileWithRetry(filepath.Join(WorkloadDir, "busybox-agent.yaml"), "busybox-agent", "default", 3*time.Second, 3*time.Minute)
if err != nil {
fmt.Printf("unable to create job: %s\n", err)
continue
}
ready, err := j.WaitOnSucceeded(30*time.Second, 3*time.Minute)
if err != nil {
fmt.Printf("timed out waiting for pod success: %s\n", err)
continue
}
Expect(ready).To(Equal(true))
fmt.Printf("successfully scheduled a pod to a Linux node in %d attempts\n", i)
break
}
} else {
Skip("agent nodeSelector test Job is currently Linux-only")
}
})
It("should be able to schedule a pod to a Windows node", func() {
if eng.HasWindowsAgents() {
windowsImages, err := eng.GetWindowsTestImages()
Expect(err).NotTo(HaveOccurred())
p, err := pod.RunWindowsWithRetry(windowsImages.ServerCore, "windows-schedule-test", "default", "powershell", true, 3*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
succeeded, err := p.WaitOnSucceeded(10*time.Second, cfg.Timeout)
Expect(succeeded).To(Equal(true))
//err = pod.Delete(util.DefaultDeleteRetries)
//Expect(err).NotTo(HaveOccurred())
} else {
Skip("no Windows nodes")
}
})
It("should have core kube-system addons running the correct version", func() {
if eng.ExpandedDefinition.Properties.OrchestratorProfile.KubernetesConfig.CustomKubeProxyImage == "" {
By(fmt.Sprintf("Ensuring that the %s addon image matches orchestrator version", common.KubeProxyAddonName))
ds, err := daemonset.Get(common.KubeProxyAddonName, "kube-system", 3)
Expect(err).NotTo(HaveOccurred())
log.Printf("Image: %s", ds.Spec.Template.TemplateSpec.Containers[0].Image)
log.Printf("OrchestratorVersion: %s", eng.ExpandedDefinition.Properties.OrchestratorProfile.OrchestratorVersion)
version := eng.ExpandedDefinition.Properties.OrchestratorProfile.OrchestratorVersion
Expect(strings.Contains(ds.Spec.Template.TemplateSpec.Containers[0].Image, version)).To(Equal(true))
} else {
Skip("Skipping as testing custom kube-proxy image")
}
})
It("Should not have any unready or crashing pods right after deployment", func() {
if eng.Config.DebugCrashingPods {
By("Checking ready status of each pod in kube-system")
pods, err := pod.GetAll("kube-system")
Expect(err).NotTo(HaveOccurred())
Expect(len(pods.Pods)).ToNot(BeZero())
for _, currentPod := range pods.Pods {
log.Printf("Checking %s - ready: %t, restarts: %d", currentPod.Metadata.Name, currentPod.Status.ContainerStatuses[0].Ready, currentPod.Status.ContainerStatuses[0].RestartCount)
Expect(currentPod.Status.ContainerStatuses[0].Ready).To(BeTrue())
tooManyRestarts := 5
if strings.Contains(currentPod.Metadata.Name, common.ClusterAutoscalerAddonName) {
log.Print("need to investigate cluster-autoscaler restarts!")
tooManyRestarts = 10
}
Expect(currentPod.Status.ContainerStatuses[0].RestartCount).To(BeNumerically("<", tooManyRestarts))
}
} else {
Skip("Skipping this DEBUG test")
}
})
It("should print cluster resources", func() {
cmd := exec.Command("k", "get", "deployments,pods,svc,daemonsets,configmaps,endpoints,jobs,clusterroles,clusterrolebindings,roles,rolebindings,storageclasses,podsecuritypolicy", "--all-namespaces", "-o", "wide")
out, err := cmd.CombinedOutput()
log.Printf("%s\n", out)
if err != nil {
log.Printf("Error: Unable to print all cluster resources\n")
}
})
It("should have DNS resolver pod running", func() {
By(fmt.Sprintf("Ensuring that %s is running", dnsAddonName))
running, err := pod.WaitOnSuccesses(dnsAddonName, "kube-system", kubeSystemPodsReadinessChecks, true, sleepBetweenRetriesWhenWaitingForPodReady, cfg.Timeout)
pod.PrintPodsLogs(dnsAddonName, "kube-system", 5*time.Second, 1*time.Minute)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(Equal(true))
})
It("should have functional container networking DNS", func() {
By("Ensuring that we have functional DNS resolution from a linux container")
validateDNSLinuxName := "validate-dns-linux"
validateDNSLinuxNamespace := "default"
j, err := job.CreateJobFromFileWithRetry(filepath.Join(WorkloadDir, fmt.Sprintf("%s.yaml", validateDNSLinuxName)), validateDNSLinuxName, validateDNSLinuxNamespace, 3*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
ready, err := j.WaitOnSucceeded(sleepBetweenRetriesWhenWaitingForPodReady, cfg.Timeout)
if err != nil {
pod.PrintPodsLogs(validateDNSLinuxName, validateDNSLinuxNamespace, 5*time.Second, 1*time.Minute)
pods, err := pod.GetAllByPrefixWithRetry(validateDNSLinuxName, validateDNSLinuxNamespace, 3*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
for _, p := range pods {
out, err := p.Exec("--", "cat", "/etc/resolv.conf")
log.Printf("%s\n", string(out))
Expect(err).NotTo(HaveOccurred())
out, err = p.Exec("--", "ifconfig")
log.Printf("%s\n", string(out))
Expect(err).NotTo(HaveOccurred())
out, err = p.Exec("--", "nc", "-vz", eng.ExpandedDefinition.Properties.OrchestratorProfile.KubernetesConfig.DNSServiceIP, "53")
log.Printf("%s\n", string(out))
Expect(err).NotTo(HaveOccurred())
}
}
Expect(err).NotTo(HaveOccurred())
Expect(ready).To(Equal(true))
if eng.HasWindowsAgents() {
By("Ensuring that we have functional DNS resolution from a windows container")
windowsImages, imgErr := eng.GetWindowsTestImages()
Expect(imgErr).NotTo(HaveOccurred())
j, err = job.CreateWindowsJobFromTemplateDeleteIfExists(filepath.Join(WorkloadDir, "validate-dns-windows.yaml"), "validate-dns-windows", "default", windowsImages, 3*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
ready, err = j.WaitOnSucceeded(sleepBetweenRetriesWhenWaitingForPodReady, cfg.Timeout)
if err != nil {
pod.PrintPodsLogs("validate-dns-windows", "default", 5*time.Second, 1*time.Minute)
}
Expect(err).NotTo(HaveOccurred())
Expect(ready).To(Equal(true))
}
By("Ensuring that we have stable and responsive DNS resolution")
p, err := pod.CreatePodFromFileIfNotExist(filepath.Join(WorkloadDir, "dns-loop.yaml"), "dns-loop", "default", 1*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
running, err := p.WaitOnReady(true, sleepBetweenRetriesWhenWaitingForPodReady, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(Equal(true))
out, err := p.Exec("--", "dns-loop", "100", "google.com", "0.3", "5")
log.Printf("%s\n", string(out))
Expect(err).NotTo(HaveOccurred())
By("Ensuring that we have stable external DNS resolution as we recycle a bunch of pods")
name := fmt.Sprintf("alpine-%s", cfg.Name)
command := fmt.Sprintf("time nc -vz bbc.co.uk 80 || nc -vz google.com 443 || nc -vz microsoft.com 80")
deploymentCommand := fmt.Sprintf("%s && while true; do sleep 1; done || echo unable to make external connections or resolve dns", command)
// Ensure across all nodes
successes, err := deployment.RunDeploymentMultipleTimes(deployment.RunLinuxDeploy, "alpine", name, deploymentCommand, deploymentReplicasCount, cfg.StabilityIterations, 1*time.Second, timeoutWhenWaitingForPodOutboundAccess, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(successes).To(Equal(cfg.StabilityIterations))
successes, err = pod.RunCommandMultipleTimes(pod.RunLinuxPod, "alpine", name, command, cfg.StabilityIterations, 1*time.Second, stabilityCommandTimeout, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(successes).To(Equal(cfg.StabilityIterations))
})
It("should be able to create and connect to a hostPort-configured pod", func() {
if eng.AnyAgentIsLinux() {
nodes, err := node.GetReadyWithRetry(1*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
var numNodes int
controlPlaneNodeRegexString := fmt.Sprintf("^%s-.*", common.LegacyControlPlaneVMPrefix)
controlPlaneNodeRegexp, err := regexp.Compile(controlPlaneNodeRegexString)
Expect(err).NotTo(HaveOccurred())
for _, n := range nodes {
if n.IsLinux() && !controlPlaneNodeRegexp.MatchString(n.Metadata.Name) {
numNodes++
}
}
By("Creating a httpbin DaemonSet")
httpbinName := "httpbin"
httpbinNamespace := "default"
d, err := daemonset.CreateDaemonsetDeleteIfExists(filepath.Join(WorkloadDir, fmt.Sprintf("%s.yaml", httpbinName)), httpbinName, httpbinNamespace, "app", httpbinName, 5*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
_, err = pod.WaitForMinRunningByLabelWithRetry(numNodes, "app", httpbinName, httpbinNamespace, 1*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
By("Ensuring that we have can connect locally using the configured httpbin container hostPort")
hostPortTestName := "hostport-test"
hostPortTestNamespace := "default"
j, err := job.CreateJobFromFileWithRetry(filepath.Join(WorkloadDir, fmt.Sprintf("%s.yaml", hostPortTestName)), hostPortTestName, hostPortTestNamespace, 3*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
ready, err := j.WaitOnSucceeded(5*time.Second, 1*time.Minute)
if err != nil {
pod.PrintPodsLogs(hostPortTestName, hostPortTestNamespace, 5*time.Second, cfg.Timeout)
}
Expect(err).NotTo(HaveOccurred())
Expect(ready).To(Equal(true))
By("Cleaning up after ourselves")
err = j.Delete(util.DefaultDeleteRetries)
Expect(err).NotTo(HaveOccurred())
err = d.Delete(util.DefaultDeleteRetries)
Expect(err).NotTo(HaveOccurred())
} else {
Skip("hostPort test requires a Linux node")
}
})
It("should be able to launch a long-running container networking DNS liveness pod", func() {
p, err := pod.CreatePodFromFileIfNotExist(filepath.Join(WorkloadDir, "dns-liveness.yaml"), "dns-liveness", "default", 1*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
running, err := p.WaitOnReady(true, sleepBetweenRetriesWhenWaitingForPodReady, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(Equal(true))
})
It("should be able to restart a pod due to an exec livenessProbe failure", func() {
_, err := pod.CreatePodFromFileIfNotExist(filepath.Join(WorkloadDir, "exec-liveness.yaml"), "exec-liveness", "default", 1*time.Second, 2*time.Minute)
Expect(err).NotTo(HaveOccurred())
time.Sleep(30 * time.Second) // Wait for probe to take effect
p, err := pod.Get("exec-liveness", "default", podLookupRetries)
Expect(err).NotTo(HaveOccurred())
restarts := p.Status.ContainerStatuses[0].RestartCount
By("Validating that the exec livenessProbe caused at least one pod restart due to probe command failure")
p.Describe()
Expect(restarts > 0).To(BeTrue())
Expect(p.Delete(util.DefaultDeleteRetries)).To(Succeed())
_, err = pod.CreatePodFromFileIfNotExist(filepath.Join(WorkloadDir, "exec-liveness-assume-1-second-default-timeout.yaml"), "exec-liveness-assume-1-second-default-timeout", "default", 1*time.Second, 2*time.Minute)
Expect(err).NotTo(HaveOccurred())
time.Sleep(30 * time.Second) // Wait for probe to take effect
p, err = pod.Get("exec-liveness-assume-1-second-default-timeout", "default", podLookupRetries)
Expect(err).NotTo(HaveOccurred())
restarts = p.Status.ContainerStatuses[0].RestartCount
p.Describe()
if strings.Contains(eng.ExpandedDefinition.Properties.OrchestratorProfile.KubernetesConfig.KubeletConfig["--feature-gates"], "ExecProbeTimeout=false") ||
!common.IsKubernetesVersionGe(eng.ExpandedDefinition.Properties.OrchestratorProfile.OrchestratorVersion, "1.20.0") {
By("Validating that a default exec livenessProbe timeout was not enforced and no restarts occured due to a < 1.20.0 or ExecProbeTimeout=false configuration")
Expect(restarts).To(Equal(0))
} else {
By("Validating that the exec livenessProbe caused at least one pod restart due to the enforcement of a default 1 second timeout ")
Expect(restarts > 0).To(BeTrue())
}
Expect(p.Delete(util.DefaultDeleteRetries)).To(Succeed())
_, err = pod.CreatePodFromFileIfNotExist(filepath.Join(WorkloadDir, "exec-liveness-always-fail.yaml"), "exec-liveness-always-fail", "default", 1*time.Second, 2*time.Minute)
Expect(err).NotTo(HaveOccurred())
time.Sleep(30 * time.Second) // Wait for probe to take effect
p, err = pod.Get("exec-liveness-always-fail", "default", podLookupRetries)
Expect(err).NotTo(HaveOccurred())
restarts = p.Status.ContainerStatuses[0].RestartCount
By("Validating that the exec livenessProbe caused at least one pod restart due to probe command failure")
p.Describe()
Expect(restarts > 0).To(BeTrue())
err = p.Delete(util.DefaultDeleteRetries)
// exec probe timeout is broken entirely for containerd prior to 1.20, and/or if ExecProbeTimeout=false, so we can't test it
if !(eng.ExpandedDefinition.Properties.OrchestratorProfile.KubernetesConfig.NeedsContainerd() &&
(!common.IsKubernetesVersionGe(eng.ExpandedDefinition.Properties.OrchestratorProfile.OrchestratorVersion, "1.20.0") || strings.Contains(eng.ExpandedDefinition.Properties.OrchestratorProfile.KubernetesConfig.KubeletConfig["--feature-gates"], "ExecProbeTimeout=false"))) {
_, err = pod.CreatePodFromFileIfNotExist(filepath.Join(WorkloadDir, "exec-liveness-timeout-always-fail.yaml"), "exec-liveness-timeout-always-fail", "default", 1*time.Second, 2*time.Minute)
Expect(err).NotTo(HaveOccurred())
time.Sleep(30 * time.Second) // Wait for probe to take effect
p, err = pod.Get("exec-liveness-timeout-always-fail", "default", podLookupRetries)
Expect(err).NotTo(HaveOccurred())
restarts = p.Status.ContainerStatuses[0].RestartCount
if strings.Contains(eng.ExpandedDefinition.Properties.OrchestratorProfile.KubernetesConfig.KubeletConfig["--feature-gates"], "ExecProbeTimeout=false") ||
!common.IsKubernetesVersionGe(eng.ExpandedDefinition.Properties.OrchestratorProfile.OrchestratorVersion, "1.20.0") {
By("Validating that the exec livenessProbe caused at least one pod restart due to command failure, even if timeout is not respected")
} else {
By("Validating that the exec livenessProbe caused at least one pod restart due to timeout")
}
p.Describe()
Expect(restarts > 0).To(BeTrue())
Expect(p.Delete(util.DefaultDeleteRetries)).To(Succeed())
}
})
It("should be able to run a node reboot daemonset", func() {
if cfg.RebootControlPlaneNodes {
_, err := daemonset.CreateDaemonsetFromFileWithRetry(filepath.Join(WorkloadDir, "reboot-control-plane-node.yaml"), "reboot-test", "default", 5*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
pods, err := pod.GetAllRunningByLabelWithRetry("app", "reboot-test", "default", 5*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(pods).NotTo(BeEmpty())
}
})
It("should have stable external container networking as we recycle a bunch of pods", func() {
// Test for basic UDP networking
name := fmt.Sprintf("alpine-%s", cfg.Name)
command := fmt.Sprintf("time nc -vz 8.8.8.8 53 || nc -vz 8.8.4.4 53")
deploymentCommand := fmt.Sprintf("%s && while true; do sleep 1; done || echo unable to connect externally against known listeners", command)
// Ensure across all nodes
successes, err := deployment.RunDeploymentMultipleTimes(deployment.RunLinuxDeploy, "alpine", name, deploymentCommand, deploymentReplicasCount, cfg.StabilityIterations, 1*time.Second, timeoutWhenWaitingForPodOutboundAccess, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(successes).To(Equal(cfg.StabilityIterations))
// Ensure responsiveness
successes, err = pod.RunCommandMultipleTimes(pod.RunLinuxPod, "alpine", name, command, cfg.StabilityIterations, 1*time.Second, stabilityCommandTimeout, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(successes).To(Equal(cfg.StabilityIterations))
// Use curl to test responsive DNS lookup + TCP 443 connectivity
name = fmt.Sprintf("alpine-%s", cfg.Name)
command = fmt.Sprintf("time curl --head https://www.bing.com 1> /dev/null || curl --head https://google.com 1> /dev/null || curl --head https://microsoft.com 1> /dev/null")
deploymentCommand = fmt.Sprintf("%s && while true; do sleep 1; done || echo unable to curl externally against known endpoints", command)
// Ensure across all nodes
successes, err = deployment.RunDeploymentMultipleTimes(deployment.RunLinuxDeploy, "byrnedo/alpine-curl", name, deploymentCommand, deploymentReplicasCount, cfg.StabilityIterations, 1*time.Second, timeoutWhenWaitingForPodOutboundAccess, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(successes).To(Equal(cfg.StabilityIterations))
// Ensure responsiveness
successes, err = pod.RunCommandMultipleTimes(pod.RunLinuxPod, "byrnedo/alpine-curl", name, command, cfg.StabilityIterations, 1*time.Second, stabilityCommandTimeout, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(successes).To(Equal(cfg.StabilityIterations))
})
It("should have stable internal container networking as we recycle a bunch of pods", func() {
name := fmt.Sprintf("alpine-%s", cfg.Name)
command := fmt.Sprintf("time nc -vz kubernetes 443 && nc -vz kubernetes.default.svc 443 && nc -vz kubernetes.default.svc.cluster.local 443")
deploymentCommand := fmt.Sprintf("time %s && while true; do sleep 1; done || echo unable to reach internal kubernetes endpoints", command)
// Ensure across all nodes
successes, err := deployment.RunDeploymentMultipleTimes(deployment.RunLinuxDeploy, "alpine", name, deploymentCommand, deploymentReplicasCount, cfg.StabilityIterations, 1*time.Second, timeoutWhenWaitingForPodOutboundAccess, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(successes).To(Equal(cfg.StabilityIterations))
// Ensure responsiveness
successes, err = pod.RunCommandMultipleTimes(pod.RunLinuxPod, "alpine", name, command, cfg.StabilityIterations, 1*time.Second, stabilityCommandTimeout, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(successes).To(Equal(cfg.StabilityIterations))
})
It("should have stable pod-to-pod networking", func() {
if eng.AnyAgentIsLinux() {
By("Creating a php-apache deployment")
phpApacheDeploy, err := deployment.CreateLinuxDeployIfNotExist("deis/hpa-example", longRunningApacheDeploymentName, "default", "", "", 3*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
By("Ensuring that php-apache pod is running")
running, err := pod.WaitOnSuccesses(longRunningApacheDeploymentName, "default", 4, true, sleepBetweenRetriesWhenWaitingForPodReady, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(Equal(true))
By("Ensuring that the php-apache pod has outbound internet access")
pods, err := phpApacheDeploy.PodsRunning()
Expect(err).NotTo(HaveOccurred())
for _, p := range pods {
pass, outboundErr := p.CheckLinuxOutboundConnection(5*time.Second, cfg.Timeout)
Expect(outboundErr).NotTo(HaveOccurred())
Expect(pass).To(BeTrue())
}
By("Exposing TCP 80 internally on the php-apache deployment")
err = phpApacheDeploy.ExposeIfNotExist("ClusterIP", 80, 80)
Expect(err).NotTo(HaveOccurred())
By("Creating another pod that will connect to the php-apache pod")
r := rand.New(rand.NewSource(time.Now().UnixNano()))
commandString := fmt.Sprintf("time nc -vz %s.default.svc.cluster.local 80", longRunningApacheDeploymentName)
consumerPodName := fmt.Sprintf("consumer-pod-%s-%v", cfg.Name, r.Intn(99999))
deploymentCommand := fmt.Sprintf("%s && while true; do sleep 1; done || echo unable to connect to in-cluster web listener", commandString)
// Ensure across all nodes
successes, err := deployment.RunDeploymentMultipleTimes(deployment.RunLinuxDeploy, "busybox", consumerPodName, deploymentCommand, deploymentReplicasCount, cfg.StabilityIterations, 1*time.Second, timeoutWhenWaitingForPodOutboundAccess, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(successes).To(Equal(cfg.StabilityIterations))
// Ensure responsiveness
successes, err = pod.RunCommandMultipleTimes(pod.RunLinuxPod, "busybox", consumerPodName, commandString, cfg.StabilityIterations, 1*time.Second, stabilityCommandTimeout, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(successes).To(Equal(cfg.StabilityIterations))
} else {
Skip("Pod-to-pod network tests only valid on Linux clusters")
}
})
It("should have addon pods running", func() {
timeout := cfg.Timeout
for _, addonName := range []string{common.CoreDNSAddonName, common.TillerAddonName, common.AADPodIdentityAddonName,
common.AzureDiskCSIDriverAddonName, common.AzureFileCSIDriverAddonName, common.CloudNodeManagerAddonName, common.ClusterAutoscalerAddonName,
common.BlobfuseFlexVolumeAddonName, common.SMBFlexVolumeAddonName, common.KeyVaultFlexVolumeAddonName, common.DashboardAddonName,
common.MetricsServerAddonName, common.NVIDIADevicePluginAddonName, common.ContainerMonitoringAddonName,
common.CalicoAddonName, common.AzureNetworkPolicyAddonName, common.IPMASQAgentAddonName,
common.AzurePolicyAddonName, common.NodeProblemDetectorAddonName, common.AntreaAddonName, common.FlannelAddonName,
common.ScheduledMaintenanceAddonName, common.SecretsStoreCSIDriverAddonName} {
var addonPods = []string{addonName}
var addonNamespace = "kube-system"
switch addonName {
case common.BlobfuseFlexVolumeAddonName:
addonPods = []string{"blobfuse-flexvol-installer"}
case common.SMBFlexVolumeAddonName:
addonPods = []string{"smb-flexvol-installer"}
case common.ContainerMonitoringAddonName:
addonPods = []string{"omsagent", "omsagent-rs"}
if eng.HasWindowsAgents() {
addonPods = append(addonPods, "omsagent-win")
}
timeout = 60 * time.Minute
case common.AzureNetworkPolicyAddonName:
addonPods = []string{"azure-npm"}
case common.DashboardAddonName:
addonPods = []string{common.DashboardAddonName, "dashboard-metrics-scraper"}
addonNamespace = common.DashboardAddonName
case common.AADPodIdentityAddonName:
addonPods = []string{"nmi", "mic"}
case common.AzureDiskCSIDriverAddonName:
addonPods = []string{"csi-azuredisk-node", "csi-azuredisk-controller"}
if eng.HasWindowsAgents() && common.IsKubernetesVersionGe(eng.ExpandedDefinition.Properties.OrchestratorProfile.OrchestratorVersion, "1.18.0") {
addonPods = append(addonPods, "csi-azuredisk-node-windows")
}
if eng.AnyAgentIsLinux() && common.IsKubernetesVersionGe(eng.ExpandedDefinition.Properties.OrchestratorProfile.OrchestratorVersion, "1.17.0") {
addonPods = append(addonPods, "csi-snapshot-controller")
}
case common.AzureFileCSIDriverAddonName:
addonPods = []string{"csi-azurefile-node", "csi-azurefile-controller"}
if eng.HasWindowsAgents() && common.IsKubernetesVersionGe(eng.ExpandedDefinition.Properties.OrchestratorProfile.OrchestratorVersion, "1.18.0") {
addonPods = append(addonPods, "csi-azurefile-node-windows")
}
case common.CloudNodeManagerAddonName:
addonPods = []string{common.CloudNodeManagerAddonName}
if eng.HasWindowsAgents() {
addonPods = append(addonPods, common.CloudNodeManagerAddonName+"-windows")
}
case common.CoreDNSAddonName:
addonPods = []string{common.CoreDNSAddonName, common.CoreDNSAddonName + "-autoscaler"}
case common.IPMASQAgentAddonName:
addonPods = []string{"azure-ip-masq-agent"}
case common.CalicoAddonName:
addonPods = []string{"calico-node", "calico-typha", "calico-typha-horizontal-autoscaler"}
case common.AzurePolicyAddonName:
addonPods = []string{common.AzurePolicyAddonName, "gatekeeper-controller-manager"}
case common.AntreaAddonName:
addonPods = []string{common.AntreaAddonName + "-agent", common.AntreaAddonName + "-controller"}
case common.FlannelAddonName:
addonPods = []string{"kube-flannel-ds"}
case common.ScheduledMaintenanceAddonName:
addonPods = []string{"drainsafe-controller-manager", "drainsafe-controller-scheduledevent-manager"}
case common.SecretsStoreCSIDriverAddonName:
addonPods = []string{"csi-secrets-store", "csi-secrets-store-provider-azure"}
}
if hasAddon, addon := eng.HasAddon(addonName); hasAddon {
for _, addonPod := range addonPods {
if addon.Name == common.AzurePolicyAddonName {
switch addonPod {
case common.AzurePolicyAddonName:
addonNamespace = "kube-system"
case "gatekeeper-controller-manager":
addonNamespace = "gatekeeper-system"
}
}
By(fmt.Sprintf("Ensuring that the %s pod(s) in the %s addon is Running", addonPod, addonName))
running, err := pod.WaitOnSuccesses(addonPod, addonNamespace, kubeSystemPodsReadinessChecks, true, sleepBetweenRetriesWhenWaitingForPodReady, timeout)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(Equal(true))
}
} else {
fmt.Printf("%s disabled for this cluster, will not test\n", addonName)
}
}
})
It("should have a working node-problem-detector configuration", func() {
if hasNpd, _ := eng.HasAddon(common.NodeProblemDetectorAddonName); hasNpd {
running, err := pod.WaitOnSuccesses(common.NodeProblemDetectorAddonName, "kube-system", kubeSystemPodsReadinessChecks, true, sleepBetweenRetriesWhenWaitingForPodReady, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(Equal(true))
pods, err := pod.GetAllRunningByPrefixWithRetry(common.NodeProblemDetectorAddonName, "kube-system", 3*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(pods).NotTo(BeEmpty())
nodeName := pods[0].Spec.NodeName
// Create a fake kernel message on a node running node-problem-detector
r := rand.New(rand.NewSource(time.Now().UnixNano()))
msgId := r.Intn(999999999999)
msg := fmt.Sprintf("kernel: BUG: unable to handle kernel NULL pointer dereference at TESTING-%d", msgId)
kernelMsgTestCommand := fmt.Sprintf("sudo 'echo %s | sudo tee /dev/kmsg'", msg)
if cfg.BlockSSHPort {
Skip("SSH port is blocked")
}
err = sshConn.ExecuteRemoteWithRetry(nodeName, kernelMsgTestCommand, false, sleepBetweenRetriesRemoteSSHCommand, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
evt, err := event.GetWithRetry(msg, 5*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(evt.Type).To(Equal("Warning"))
Expect(evt.Reason).To(Equal("KernelOops"))
}
})
It("should have the correct tiller configuration", func() {
if hasTiller, tillerAddon := eng.HasAddon(common.TillerAddonName); hasTiller {
running, err := pod.WaitOnSuccesses(common.TillerAddonName, "kube-system", kubeSystemPodsReadinessChecks, true, sleepBetweenRetriesWhenWaitingForPodReady, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(Equal(true))
pods, err := pod.GetAllRunningByPrefixWithRetry("tiller-deploy", "kube-system", 3*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
By("Ensuring that the correct max-history has been applied")
maxHistory := tillerAddon.Config["max-history"]
// There is only one tiller pod and one container in that pod
actualTillerMaxHistory, err := pods[0].Spec.Containers[0].GetEnvironmentVariable("TILLER_HISTORY_MAX")
Expect(err).NotTo(HaveOccurred())
Expect(actualTillerMaxHistory).To(Equal(maxHistory))
} else {
Skip("tiller disabled for this cluster, will not test")
}
})
It("should have the expected omsagent cluster footprint", func() {
if hasContainerMonitoring, _ := eng.HasAddon(common.ContainerMonitoringAddonName); hasContainerMonitoring {
By("Validating the omsagent replicaset")
running, err := pod.WaitOnSuccesses("omsagent-rs", "kube-system", kubeSystemPodsReadinessChecks, true, sleepBetweenRetriesWhenWaitingForPodReady, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(Equal(true))
pods, err := pod.GetAllRunningByPrefixWithRetry("omsagent-rs", "kube-system", 3*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
By("Ensuring that the kubepodinventory plugin is writing data successfully")
pass, err := pods[0].ValidateOmsAgentLogs("kubePodInventoryEmitStreamSuccess", 1*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(pass).To(BeTrue())
By("Ensuring that the kubenodeinventory plugin is writing data successfully")
pass, err = pods[0].ValidateOmsAgentLogs("kubeNodeInventoryEmitStreamSuccess", 1*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(pass).To(BeTrue())
By("Validating the omsagent daemonset")
running, err = pod.WaitOnSuccesses("omsagent", "kube-system", kubeSystemPodsReadinessChecks, true, sleepBetweenRetriesWhenWaitingForPodReady, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(Equal(true))
pods, err = pod.GetAllRunningByPrefixWithRetry("omsagent", "kube-system", 3*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
By("Ensuring that the cadvisor_perf plugin is writing data successfully")
pass, err = pods[0].ValidateOmsAgentLogs("cAdvisorPerfEmitStreamSuccess", 1*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(pass).To(BeTrue())
By("Ensuring that the containerinventory plugin is writing data successfully")
pass, err = pods[0].ValidateOmsAgentLogs("containerInventoryEmitStreamSuccess", 1*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(pass).To(BeTrue())
} else {
Skip("container monitoring disabled for this cluster, will not test")
}
})
It("should be able to access the dashboard", func() {
if cfg.BlockSSHPort {
Skip("SSH port is blocked")
} else {
if hasDashboard, _ := eng.HasAddon(common.DashboardAddonName); hasDashboard {
By("Ensuring that the kubernetes-dashboard service is Running")
s, err := service.Get(common.DashboardAddonName, common.DashboardAddonName)
Expect(err).NotTo(HaveOccurred())
Expect(s).NotTo(BeNil())
By("Ensuring that the dashboard responds to requests")
// start `kubectl proxy` in the background on a random port
var proxyStdout io.ReadCloser
var proxyStdoutReader *bufio.Reader
proxyCmd := exec.Command("k", "proxy", "-p", "0")
proxyCmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
proxyStdout, err = proxyCmd.StdoutPipe()
Expect(err).NotTo(HaveOccurred())
util.PrintCommand(proxyCmd)
err = proxyCmd.Start()
Expect(err).NotTo(HaveOccurred())
defer func() {
syscall.Kill(-proxyCmd.Process.Pid, syscall.SIGKILL)
if _, waiterr := proxyCmd.Process.Wait(); waiterr != nil {
log.Printf("kubectl proxy - wait returned err: %v\n", waiterr)
}
}()
proxyStdoutReader = bufio.NewReader(proxyStdout)
proxyOutStr, outErr := proxyStdoutReader.ReadString('\n')
Expect(outErr).NotTo(HaveOccurred())
log.Printf("kubectl proxy stdout: %s\n", proxyOutStr)
serverStartPrefix := "Starting to serve on "
Expect(proxyOutStr).To(HavePrefix(serverStartPrefix))
dashboardHost := strings.TrimSpace(strings.TrimPrefix(proxyOutStr, serverStartPrefix))
// get an HTTP response from the dashboard login URL
url := fmt.Sprintf("http://%s/api/v1/namespaces/kubernetes-dashboard/services/https:kubernetes-dashboard:/proxy/#/login", dashboardHost)
cmd := exec.Command("curl", "--max-time", "60", "--retry", "10", "--retry-delay", "10", "--retry-max-time", "120", url)
util.PrintCommand(cmd)
var out []byte
out, err = cmd.CombinedOutput()
log.Printf("%s\n", out)
Expect(err).NotTo(HaveOccurred())
Expect(out).To(ContainSubstring("<!doctype html>"))
Expect(out).To(ContainSubstring("<title>Kubernetes Dashboard</title>"))
} else {
Skip("kubernetes-dashboard disabled for this cluster, will not test")
}
}
})
It("should have the correct storage classes deployed", func() {
if util.IsUsingEphemeralDisks(eng.ExpandedDefinition.Properties.AgentPoolProfiles) {
Skip("no storage class is deployed when ephemeral disk is used, will not test")
}
var (
isUsingAzureDiskCSIDriver bool
isUsingAzureFileCSIDriver bool
azureDiskProvisioner string
azureFileProvisioner string
)
if isUsingAzureDiskCSIDriver, _ = eng.HasAddon(common.AzureDiskCSIDriverAddonName); isUsingAzureDiskCSIDriver {
azureDiskProvisioner = "disk.csi.azure.com"
} else {
azureDiskProvisioner = "kubernetes.io/azure-disk"
}
if isUsingAzureFileCSIDriver, _ = eng.HasAddon(common.AzureFileCSIDriverAddonName); isUsingAzureFileCSIDriver {
azureFileProvisioner = "file.csi.azure.com"
} else {
azureFileProvisioner = "kubernetes.io/azure-file"
}
azureDiskStorageClasses := []string{"default"}
// Managed disk is used by default when useCloudControllerManager is enabled
if to.Bool(eng.ExpandedDefinition.Properties.OrchestratorProfile.KubernetesConfig.UseCloudControllerManager) || util.IsUsingManagedDisks(eng.ExpandedDefinition.Properties.AgentPoolProfiles) {
azureDiskStorageClasses = append(azureDiskStorageClasses, "managed-premium", "managed-standard")
} else {
azureDiskStorageClasses = append(azureDiskStorageClasses, "unmanaged-premium", "unmanaged-standard")
}
for _, azureDiskStorageClass := range azureDiskStorageClasses {
sc, err := storageclass.Get(azureDiskStorageClass)
Expect(err).NotTo(HaveOccurred())
Expect(sc.Provisioner).To(Equal(azureDiskProvisioner))
if isUsingAzureDiskCSIDriver && eng.ExpandedDefinition.Properties.HasAvailabilityZones() {
Expect(sc.VolumeBindingMode).To(Equal("WaitForFirstConsumer"))
Expect(len(sc.AllowedTopologies)).To(Equal(1))
Expect(len(sc.AllowedTopologies[0].MatchLabelExpressions)).To(Equal(1))
Expect(sc.AllowedTopologies[0].MatchLabelExpressions[0].Key).To(Equal("topology.disk.csi.azure.com/zone"))
for _, zone := range eng.ExpandedDefinition.Properties.AgentPoolProfiles[0].AvailabilityZones {
Expect(sc.AllowedTopologies[0].MatchLabelExpressions[0].Values).To(ContainElement(eng.ExpandedDefinition.Location + "-" + zone))
}
} else {
Expect(sc.VolumeBindingMode).To(Equal("Immediate"))
}
if isUsingAzureDiskCSIDriver && common.IsKubernetesVersionGe(eng.ExpandedDefinition.Properties.OrchestratorProfile.OrchestratorVersion, "1.16.0") {
Expect(sc.AllowVolumeExpansion).To(BeTrue())
}
}
for _, azureFileStorageClass := range []string{"azurefile"} {
sc, err := storageclass.Get(azureFileStorageClass)
Expect(err).NotTo(HaveOccurred())
Expect(sc.Provisioner).To(Equal(azureFileProvisioner))
Expect(sc.VolumeBindingMode).To(Equal("Immediate"))
if isUsingAzureFileCSIDriver && common.IsKubernetesVersionGe(eng.ExpandedDefinition.Properties.OrchestratorProfile.OrchestratorVersion, "1.16.0") {
Expect(sc.AllowVolumeExpansion).To(BeTrue())
}
}
})
It("should be able to kubectl port-forward to a running pod", func() {
deploymentNamespace := "default"
testPortForward := func(deploymentName string) {
running, podWaitErr := pod.WaitOnSuccesses(deploymentName, deploymentNamespace, 3, true, sleepBetweenRetriesWhenWaitingForPodReady, cfg.Timeout)
Expect(podWaitErr).NotTo(HaveOccurred())
Expect(running).To(Equal(true))
d, err := deployment.GetWithRetry(deploymentName, deploymentNamespace, 5*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
pods, err := d.PodsRunning()
Expect(err).NotTo(HaveOccurred())
Expect(len(pods)).To(Equal(1))
for _, p := range pods {
func() {
By("Ensuring that the pod is running")
var running bool
running, err = p.WaitOnReady(true, sleepBetweenRetriesWhenWaitingForPodReady, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(Equal(true))
By("Running kubectl port-forward")
var proxyCmd *exec.Cmd
var proxyStdout, proxyStderr io.ReadCloser
var proxyStdoutReader, proxyStderrReader *bufio.Reader
success := false
for i := 0; i < 5; i++ {
if i > 1 {
log.Printf("Waiting for retry...\n")
time.Sleep(10 * time.Second)
}
proxyCmd = exec.Command("k", "port-forward", p.Metadata.Name, "8123:80")
proxyCmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true}
proxyStdout, err = proxyCmd.StdoutPipe()
Expect(err).NotTo(HaveOccurred())
proxyStderr, err = proxyCmd.StderrPipe()
Expect(err).NotTo(HaveOccurred())
util.PrintCommand(proxyCmd)
err = proxyCmd.Start()
if err != nil {
log.Printf("kubectl port-forward start error: %v\n", err)
continue
}
proxyStdoutReader = bufio.NewReader(proxyStdout)
proxyStderrReader = bufio.NewReader(proxyStderr)
proxyOutStr, outErr := proxyStdoutReader.ReadString('\n')
log.Printf("kubectl port-forward stdout: %s\n", proxyOutStr)
if outErr != nil {
proxyErrStr, _ := proxyStderrReader.ReadString('\n') // returns EOF error, ignore it
log.Printf("kubectl port-forward stderr: %s\n", proxyErrStr)
continue
}
defer func() {
syscall.Kill(-proxyCmd.Process.Pid, syscall.SIGKILL)
_, waiterr := proxyCmd.Process.Wait()
if waiterr != nil {
log.Printf("kubectl port-forward - no wait error\n")
} else {
log.Printf("kubectl port-forward - wait returned err: %v\n", waiterr)
}
}()
log.Printf("kubectl port-forward running as pid: %d\n", proxyCmd.Process.Pid)
success = true
break
}
Expect(success).To(Equal(true))
By("Running curl to access the forwarded port")
url := fmt.Sprintf("http://%s:%v", "localhost", 8123)
cmd := exec.Command("curl", "--max-time", "60", "--retry", "10", "--retry-delay", "10", "--retry-max-time", "120", url)
util.PrintCommand(cmd)
var out []byte
out, err = cmd.CombinedOutput()
log.Printf("%s\n", out)
Expect(err).NotTo(HaveOccurred())
}()
}
}
r := rand.New(rand.NewSource(time.Now().UnixNano()))
if eng.AnyAgentIsLinux() {
By("Creating a Linux nginx deployment")
deploymentPrefix := "portforwardlinux"
deploymentName := fmt.Sprintf("%s-%v", deploymentPrefix, r.Intn(9999))
deploy, err := deployment.CreateLinuxDeployDeleteIfExists(deploymentPrefix, "library/nginx:latest", deploymentName, deploymentNamespace, "", "", cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
testPortForward(deploymentName)
err = deploy.Delete(util.DefaultDeleteRetries)
Expect(err).NotTo(HaveOccurred())
}
if eng.HasWindowsAgents() {
By("Creating a Windows IIS deployment")
if common.IsKubernetesVersionGe(eng.ExpandedDefinition.Properties.OrchestratorProfile.OrchestratorVersion, "1.15.0") {
windowsImages, err := eng.GetWindowsTestImages()
Expect(err).NotTo(HaveOccurred())
deploymentPrefix := "portforwardwindows"
deploymentName := fmt.Sprintf("%s-%v", deploymentPrefix, r.Intn(9999))
deploy, err := deployment.CreateWindowsDeployDeleteIfExist(deploymentPrefix, windowsImages.IIS, deploymentName, deploymentNamespace, "", "", cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
testPortForward(deploymentName)
err = deploy.Delete(util.DefaultDeleteRetries)
Expect(err).NotTo(HaveOccurred())
} else {
Skip("kubectl port-forward only works on Windows nodes with Kubernetes 1.15+")
// Reference: https://github.com/kubernetes/kubernetes/pull/75479
}
}
})
It("should have the correct pods and containers deployed for CSI drivers", func() {
addons := map[string]string{
common.AzureDiskCSIDriverAddonName: "azuredisk",
common.AzureFileCSIDriverAddonName: "azurefile",
}
for addonName, shortenedAddonName := range addons {
if hasAddon, _ := eng.HasAddon(addonName); !hasAddon {
continue
}
// Validate CSI controller pod
addonPod := fmt.Sprintf("csi-%s-controller", shortenedAddonName)
containers := []string{"csi-provisioner", "csi-attacher", "liveness-probe", shortenedAddonName}
if common.IsKubernetesVersionGe(eng.ExpandedDefinition.Properties.OrchestratorProfile.OrchestratorVersion, "1.16.0") {
containers = append(containers, "csi-resizer")
}
if eng.AnyAgentIsLinux() {
switch addonName {
case common.AzureDiskCSIDriverAddonName:
if common.IsKubernetesVersionGe(eng.ExpandedDefinition.Properties.OrchestratorProfile.OrchestratorVersion, "1.17.0") {
containers = append(containers, "csi-snapshotter")
}
case common.AzureFileCSIDriverAddonName:
if common.IsKubernetesVersionGe(eng.ExpandedDefinition.Properties.OrchestratorProfile.OrchestratorVersion, "1.13.0") &&
!common.IsKubernetesVersionGe(eng.ExpandedDefinition.Properties.OrchestratorProfile.OrchestratorVersion, "1.17.0") {
containers = append(containers, "csi-snapshotter")
}
}
}
By(fmt.Sprintf("Ensuring that %s are running within %s pod", containers, addonPod))
Expect(pod.EnsureContainersRunningInAllPods(containers, addonPod, "kube-system", kubeSystemPodsReadinessChecks, true, sleepBetweenRetriesWhenWaitingForPodReady, cfg.Timeout)).NotTo(HaveOccurred())
// Validate CSI node pod
addonPod = fmt.Sprintf("csi-%s-node", shortenedAddonName)
containers = []string{"liveness-probe", "node-driver-registrar", shortenedAddonName}
By(fmt.Sprintf("Ensuring that %s are running within %s pod", containers, addonPod))
Expect(pod.EnsureContainersRunningInAllPods(containers, addonPod, "kube-system", kubeSystemPodsReadinessChecks, true, sleepBetweenRetriesWhenWaitingForPodReady, cfg.Timeout)).NotTo(HaveOccurred())
// Validate CSI node windows pod
if eng.HasWindowsAgents() && common.IsKubernetesVersionGe(eng.ExpandedDefinition.Properties.OrchestratorProfile.OrchestratorVersion, "1.18.0") {
addonPod = fmt.Sprintf("csi-%s-node-windows", shortenedAddonName)
containers = []string{"liveness-probe", "node-driver-registrar", shortenedAddonName}
By(fmt.Sprintf("Ensuring that %s are running within %s pod", containers, addonPod))
Expect(pod.EnsureContainersRunningInAllPods(containers, addonPod, "kube-system", kubeSystemPodsReadinessChecks, true, sleepBetweenRetriesWhenWaitingForPodReady, cfg.Timeout)).NotTo(HaveOccurred())
}
// Validate CSI snapshot controller pod
switch addonName {
case common.AzureDiskCSIDriverAddonName:
if eng.AnyAgentIsLinux() && common.IsKubernetesVersionGe(eng.ExpandedDefinition.Properties.OrchestratorProfile.OrchestratorVersion, "1.17.0") {
addonPod = "csi-snapshot-controller"
containers = []string{"csi-snapshot-controller"}
By(fmt.Sprintf("Ensuring that %s are running within %s pod", containers, addonPod))
Expect(pod.EnsureContainersRunningInAllPods(containers, addonPod, "kube-system", kubeSystemPodsReadinessChecks, true, sleepBetweenRetriesWhenWaitingForPodReady, cfg.Timeout)).NotTo(HaveOccurred())
}
}
}
})
It("should have the correct service account issuer when consuming a projected service account token", func() {
if !common.IsKubernetesVersionGe(eng.ExpandedDefinition.Properties.OrchestratorProfile.OrchestratorVersion, "1.20.0") {
Skip("Service account token volume projection is not available prior to Kubernetes 1.20")
}
By("Launching a pod with a projected service account token")
p, err := pod.CreatePodFromFileWithRetry(filepath.Join(WorkloadDir, "pod-projected-svc-token.yaml"), "nginx", "default", 1*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
ready, err := p.WaitOnReady(true, sleepBetweenRetriesWhenWaitingForPodReady, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(ready).To(Equal(true))
By("Obtaining the projected service account token")
tokenPath := "/var/run/secrets/tokens/token"
nodeList, err := node.Get()
for _, n := range nodeList.Nodes {
if n.Metadata.Name == p.Spec.NodeName && n.IsWindows() {
tokenPath = "C:\\var\\run\\secrets\\tokens\\token"
break
}
}
out, err := p.Exec("--", "cat", tokenPath)
Expect(err).NotTo(HaveOccurred())
By("Decoding the service account token")
// out is a JWT token, which has 3 parts joined by "."
s := strings.Split(string(out), ".")
|
data, err := base64.StdEncoding.WithPadding(base64.NoPadding).DecodeString(s[1])
Expect(err).NotTo(HaveOccurred())
var payload struct {
Issuer string `json:"iss"`
}
err = json.Unmarshal(data, &payload)
Expect(err).NotTo(HaveOccurred())
svcIssuer := eng.ExpandedDefinition.Properties.OrchestratorProfile.KubernetesConfig.APIServerConfig["--service-account-issuer"]
By("Ensuring that the service account issuer matches the token issuer")
Expect(payload.Issuer).To(Equal(svcIssuer))
By("Ensuring that the service account issuer has a https scheme")
u, err := url.Parse(svcIssuer)
Expect(err).NotTo(HaveOccurred())
// Service account issuer for OIDC requires https scheme to work.
// See https://github.com/Azure/aks-engine/pull/4262 for more detail.
Expect(u.Scheme).To(Equal("https"))
})
})
Describe("with a windows agent pool", func() {
It("kubelet service should be able to recover when the docker service is stopped", func() {
if cfg.BlockSSHPort {
Skip("SSH port is blocked")
} else if !eng.ExpandedDefinition.Properties.HasNonRegularPriorityScaleset() {
if eng.HasWindowsAgents() {
if eng.ExpandedDefinition.Properties.WindowsProfile != nil && eng.ExpandedDefinition.Properties.WindowsProfile.GetSSHEnabled() {
nodes, err := node.GetReadyWithRetry(1*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
simulateDockerdCrashScript := "simulate-dockerd-crash.cmd"
err = sshConn.CopyTo(simulateDockerdCrashScript)
Expect(err).NotTo(HaveOccurred())
for _, n := range nodes {
if n.IsWindows() {
By(fmt.Sprintf("simulating docker and subsequent kubelet service crash on node: %s", n.Metadata.Name))
err = sshConn.CopyToRemoteWithRetry(n.Metadata.Name, "/tmp/"+simulateDockerdCrashScript, sleepBetweenRetriesRemoteSSHCommand, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
simulateDockerCrashCommand := fmt.Sprintf("\"/tmp/%s\"", simulateDockerdCrashScript)
err = sshConn.ExecuteRemoteWithRetry(n.Metadata.Name, simulateDockerCrashCommand, true, sleepBetweenRetriesRemoteSSHCommand, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
}
}
log.Print("Waiting 1 minute to allow nodes to report not ready state after the crash occurred\n")
time.Sleep(1 * time.Minute)
for _, n := range nodes {
if n.IsWindows() {
By(fmt.Sprintf("restarting kubelet service on node: %s", n.Metadata.Name))
restartKubeletCommand := fmt.Sprintf("\"Powershell Start-Service kubelet\"")
err = sshConn.ExecuteRemoteWithRetry(n.Metadata.Name, restartKubeletCommand, true, sleepBetweenRetriesRemoteSSHCommand, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
}
}
var expectedReadyNodes int
if !eng.ExpandedDefinition.Properties.HasNonRegularPriorityScaleset() && !clusterAutoscalerEngaged {
expectedReadyNodes = len(nodes)
log.Printf("Checking for %d Ready nodes\n", expectedReadyNodes)
} else {
expectedReadyNodes = -1
}
ready := node.WaitOnReady(expectedReadyNodes, 1*time.Minute, cfg.Timeout)
cmd2 := exec.Command("k", "get", "nodes", "-o", "wide")
out2, _ := cmd2.CombinedOutput()
log.Printf("%s\n", out2)
if !ready {
log.Printf("Error: Not all nodes in a healthy state\n")
}
Expect(ready).To(Equal(true))
} else {
Skip("Windows SSH tests only work if WindowsProfile.SSHEnabled is true")
}
} else {
Skip("Docker service recovery test is Windows only")
}
} else {
Skip("Skip per-node tests in low-priority VMSS cluster configuration scenario")
}
})
})
Describe("with a linux agent pool", func() {
It("should be able to produce working LoadBalancers", func() {
if eng.AnyAgentIsLinux() {
By("Creating a nginx deployment")
r := rand.New(rand.NewSource(time.Now().UnixNano()))
serviceName := "ingress-nginx"
deploymentPrefix := fmt.Sprintf("%s-%s", serviceName, cfg.Name)
deploymentName := fmt.Sprintf("%s-%v", deploymentPrefix, r.Intn(99999))
deploy, err := deployment.CreateLinuxDeployDeleteIfExists(deploymentPrefix, "library/nginx:latest", deploymentName, "default", serviceName, "", cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
nodes, err := node.GetReadyWithRetry(1*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
if len(nodes) < 100 {
By("Ensuring we can create an ILB service attachment")
sILB, err := service.CreateServiceFromFileDeleteIfExist(filepath.Join(WorkloadDir, "ingress-nginx-ilb.yaml"), serviceName+"-ilb", "default")
Expect(err).NotTo(HaveOccurred())
err = sILB.WaitForIngress(cfg.LBTimeout, 5*time.Second)
Expect(err).NotTo(HaveOccurred())
By("Ensuring we can create a curl pod to connect to the service")
ilbCurlPod, err := pod.RunLinuxWithRetry("byrnedo/alpine-curl", "curl-to-ilb", "default", fmt.Sprintf("curl %s", sILB.Status.LoadBalancer.Ingress[0]["ip"]), false, 1*time.Minute, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
err = sILB.Delete(util.DefaultDeleteRetries)
Expect(err).NotTo(HaveOccurred())
err = ilbCurlPod.Delete(util.DefaultDeleteRetries)
Expect(err).NotTo(HaveOccurred())
}
By("Ensuring we can create an ELB service attachment")
sELB, err := service.CreateServiceFromFileDeleteIfExist(filepath.Join(WorkloadDir, "ingress-nginx-elb.yaml"), serviceName+"-elb", "default")
Expect(err).NotTo(HaveOccurred())
err = sELB.WaitForIngress(cfg.LBTimeout, 5*time.Second)
Expect(err).NotTo(HaveOccurred())
By("Ensuring we can connect to the ELB service on the service IP")
err = sELB.ValidateWithRetry("(Welcome to nginx)", 30*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
By("Ensuring we can connect to the ELB service from another pod")
elbCurlPod, err := pod.RunLinuxWithRetry("byrnedo/alpine-curl", "curl-to-elb", "default", fmt.Sprintf("curl %s", sELB.Status.LoadBalancer.Ingress[0]["ip"]), false, 1*time.Minute, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
By("Ensuring we can donwnload files through the ELB")
pods, err := pod.GetAllByPrefixWithRetry(deploymentPrefix, "default", 3*time.Second, cfg.Timeout)
for _, p := range pods {
out, err := p.Exec("--", "/bin/bash", "-c", "base64 /dev/urandom | head -c 500000 | tee -a /usr/share/nginx/html/index.html > /dev/null")
log.Printf("%s\n", string(out))
Expect(err).NotTo(HaveOccurred())
}
err = sELB.ValidateWithRetry("(Welcome to nginx)", 30*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
err = sELB.Delete(util.DefaultDeleteRetries)
Expect(err).NotTo(HaveOccurred())
err = elbCurlPod.Delete(util.DefaultDeleteRetries)
Expect(err).NotTo(HaveOccurred())
err = deploy.Delete(util.DefaultDeleteRetries)
Expect(err).NotTo(HaveOccurred())
} else {
Skip("No linux agent was provisioned for this Cluster Definition")
}
})
It("should be able to get nodes metrics", func() {
err := node.TopNodesWithRetry(1*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
})
It("should create a pv by deploying a pod that consumes a pvc", func() {
if !util.IsUsingManagedDisks(eng.ExpandedDefinition.Properties.AgentPoolProfiles) {
Skip("Skip PV test for clusters using unmanaged disks")
} else if !eng.ExpandedDefinition.Properties.HasNonRegularPriorityScaleset() &&
cfg.TestPVC {
By("Creating a persistent volume claim")
pvcName := "azure-disk" // should be the same as in pvc-azuredisk.yaml
pvc, err := persistentvolumeclaims.CreatePersistentVolumeClaimsFromFileWithRetry(filepath.Join(WorkloadDir, "pvc-azuredisk.yaml"), pvcName, "default", 3*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
// Azure Disk CSI driver in zone-enabled clusters uses 'WaitForFirstConsumer' volume binding mode
// thus, pvc won't be available until a pod consumes it
isUsingAzureDiskCSIDriver, _ := eng.HasAddon("azuredisk-csi-driver")
if !(isUsingAzureDiskCSIDriver && eng.ExpandedDefinition.Properties.HasZonesForAllAgentPools()) {
ready, err := pvc.WaitOnReady("default", 5*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(ready).To(Equal(true))
}
By("Launching a pod using the volume claim")
podName := "pv-pod" // should be the same as in pod-pvc.yaml
testPod, err := pod.CreatePodFromFileWithRetry(filepath.Join(WorkloadDir, "pod-pvc.yaml"), podName, "default", 1*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
ready, err := testPod.WaitOnReady(true, sleepBetweenRetriesWhenWaitingForPodReady, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(ready).To(Equal(true))
By("Checking that the pod can access volume")
valid, err := testPod.ValidatePVC("/mnt/azure", 10, 10*time.Second)
Expect(valid).To(BeTrue())
Expect(err).NotTo(HaveOccurred())
// Skip label validation for Azure Disk CSI driver since it currently doesn't apply any label to PV
if !isUsingAzureDiskCSIDriver && eng.ExpandedDefinition.Properties.HasZonesForAllAgentPools() {
pvList, err := persistentvolume.Get()
Expect(err).NotTo(HaveOccurred())
pvZone := ""
for _, pv := range pvList.PersistentVolumes {
By("Ensuring that we get zones for the pv")
// zone is chosen by round-robin across all zones
pvZone = pv.Metadata.Labels["failure-domain.beta.kubernetes.io/zone"]
fmt.Printf("pvZone: %s\n", pvZone)
contains := strings.Contains(pvZone, "-")
Expect(contains).To(Equal(true))
// VolumeScheduling feature gate is set to true by default starting v1.10+
for _, expression := range pv.Spec.NodeAffinity.Required.NodeSelectorTerms[0].MatchExpressions {
if expression.Key == "failure-domain.beta.kubernetes.io/zone" {
By("Ensuring that we get nodeAffinity for each pv")
value := expression.Values[0]
fmt.Printf("NodeAffinity value: %s\n", value)
contains := strings.Contains(value, "-")
Expect(contains).To(Equal(true))
}
}
}
By("Ensuring that attached volume pv has the same zone as the zone of the node")
nodeName := testPod.Spec.NodeName
nodeList, err := node.GetByRegexWithRetry(nodeName, 3*time.Minute, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
nodeZone := nodeList[0].Metadata.Labels["failure-domain.beta.kubernetes.io/zone"]
fmt.Printf("pvZone: %s\n", pvZone)
fmt.Printf("nodeZone: %s\n", nodeZone)
Expect(nodeZone == pvZone).To(Equal(true))
}
By("Cleaning up after ourselves")
err = testPod.Delete(util.DefaultDeleteRetries)
Expect(err).NotTo(HaveOccurred())
err = pvc.Delete(util.DefaultDeleteRetries)
Expect(err).NotTo(HaveOccurred())
} else {
Skip("Skip per-node tests in low-priority VMSS cluster configuration scenario")
}
})
})
Describe("with a GPU-enabled agent pool", func() {
It("should be able to run a nvidia-gpu job", func() {
if eng.ExpandedDefinition.Properties.HasNSeriesSKU() {
if hasAddon, _ := eng.HasAddon("nvidia-device-plugin"); !hasAddon {
By("Installing nvidia gpu-operator helm chart")
commandArgsSlice := []string{"upgrade", "--install", "--wait", "gpu-operator", "--repo", "https://nvidia.github.io/gpu-operator", "gpu-operator"}
if eng.ExpandedDefinition.Properties.OrchestratorProfile.KubernetesConfig.NeedsContainerd() {
commandArgsSlice = append(commandArgsSlice, []string{"--set", "operator.defaultRuntime=containerd"}...)
}
ctx, cancel := context.WithTimeout(context.Background(), cfg.Timeout)
defer cancel()
cmd := exec.CommandContext(ctx, "helm", commandArgsSlice...)
out, err := cmd.CombinedOutput()
log.Printf("%s\n", out)
Expect(err).NotTo(HaveOccurred())
}
By("Running a CUDA vector job")
j, err := job.CreateJobFromFileWithRetry(filepath.Join(WorkloadDir, "cuda-vector-add.yaml"), "cuda-vector-add", "default", 3*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
ready, err := j.WaitOnSucceeded(30*time.Second, cfg.Timeout)
delErr := j.Delete(util.DefaultDeleteRetries)
if delErr != nil {
fmt.Printf("could not delete job %s\n", j.Metadata.Name)
fmt.Println(delErr)
}
Expect(err).NotTo(HaveOccurred())
Expect(ready).To(Equal(true))
} else {
Skip("This is not a GPU-enabled cluster")
}
})
})
Describe("with a DC-series SKU agent pool", func() {
It("should be able to run an SGX job", func() {
if eng.ExpandedDefinition.Properties.HasDCSeriesSKU() {
j, err := job.CreateJobFromFileWithRetry(filepath.Join(WorkloadDir, "sgx-test.yaml"), "sgx-test", "default", 3*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
ready, err := j.WaitOnSucceeded(30*time.Second, cfg.Timeout)
delErr := j.Delete(util.DefaultDeleteRetries)
if delErr != nil {
fmt.Printf("could not delete job %s\n", j.Metadata.Name)
fmt.Println(delErr)
}
Expect(err).NotTo(HaveOccurred())
Expect(ready).To(Equal(true))
} else {
Skip("This cluster does not have a DC-series SKU agent pool")
}
})
It("should be able to run an SGX job with sgx-device-plugin", func() {
if eng.ExpandedDefinition.Properties.HasDCSeriesSKU() {
sgx_device_plugin := ""
sgx_device_plugin_name := "sgx-device-plugin"
sgx_device_plugin_namespace := "kube-system"
sgx_device_plugin_label_key := "app"
sgx_device_plugin_label_value := "sgx-device-plugin"
if common.IsKubernetesVersionGe(eng.ExpandedDefinition.Properties.OrchestratorProfile.OrchestratorVersion, "1.17.0") {
sgx_device_plugin = "sgx-device-plugin.yaml"
} else {
sgx_device_plugin = "sgx-device-plugin-before-k8s-1-17.yaml"
}
_, err := daemonset.CreateDaemonsetFromFile(filepath.Join(WorkloadDir, sgx_device_plugin), sgx_device_plugin_name, sgx_device_plugin_namespace, 1*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
pods, err := pod.GetAllRunningByLabelWithRetry(sgx_device_plugin_label_key, sgx_device_plugin_label_value, sgx_device_plugin_namespace, 1*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(pods).NotTo(BeEmpty())
j, err := job.CreateJobFromFileWithRetry(filepath.Join(WorkloadDir, "sgx-test-with-plugin.yaml"), "sgx-test-with-plugin", "default", 1*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
ready, err := j.WaitOnSucceeded(30*time.Second, cfg.Timeout)
delErr := j.Delete(util.DefaultDeleteRetries)
if delErr != nil {
fmt.Printf("could not delete job %s\n", j.Metadata.Name)
fmt.Println(delErr)
}
Expect(err).NotTo(HaveOccurred())
Expect(ready).To(Equal(true))
} else {
Skip("This cluster does not have a DC-series SKU agent pool")
}
})
})
Describe("with zoned master profile", func() {
It("should be labeled with zones for each masternode", func() {
if eng.ExpandedDefinition.Properties.MasterProfile.HasAvailabilityZones() {
nodes, err := node.GetReadyWithRetry(1*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
for _, n := range nodes {
var role string
if common.IsKubernetesVersionGe(eng.ExpandedDefinition.Properties.OrchestratorProfile.OrchestratorVersion, "1.16.0") {
role = n.Metadata.Labels["kubernetes.azure.com/role"]
} else {
role = n.Metadata.Labels["kubernetes.io/role"]
}
if role == "master" {
By("Ensuring that we get zones for each master node")
zones := n.Metadata.Labels["failure-domain.beta.kubernetes.io/zone"]
contains := strings.Contains(zones, "-")
Expect(contains).To(Equal(true))
}
}
} else {
Skip("Availability zones was not configured for master profile for this Cluster Definition")
}
})
})
Describe("with all zoned agent pools", func() {
It("should be labeled with zones for each node", func() {
if eng.ExpandedDefinition.Properties.HasZonesForAllAgentPools() {
nodes, err := node.GetReadyWithRetry(1*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
for _, n := range nodes {
var role string
if common.IsKubernetesVersionGe(eng.ExpandedDefinition.Properties.OrchestratorProfile.OrchestratorVersion, "1.16.0") {
role = n.Metadata.Labels["kubernetes.azure.com/role"]
} else {
role = n.Metadata.Labels["kubernetes.io/role"]
}
if role == "agent" {
By("Ensuring that we get zones for each agent node")
zones := n.Metadata.Labels["failure-domain.beta.kubernetes.io/zone"]
contains := strings.Contains(zones, "-")
Expect(contains).To(Equal(true))
}
}
} else {
Skip("Availability zones was not configured for this Cluster Definition")
}
})
})
Describe("with NetworkPolicy enabled", func() {
It("should apply various network policies and enforce access to nginx pod", func() {
if eng.HasNetworkPolicy("calico") || eng.HasNetworkPolicy("azure") ||
eng.HasNetworkPolicy("cilium") || eng.HasNetworkPolicy("antrea") {
nsDev, nsProd := "development", "production"
By("Creating development namespace")
namespaceDev, err := namespace.CreateNamespaceDeleteIfExist(nsDev)
Expect(err).NotTo(HaveOccurred())
By("Creating production namespace")
namespaceProd, err := namespace.CreateNamespaceDeleteIfExist(nsProd)
Expect(err).NotTo(HaveOccurred())
By("Labelling development namespace")
err = namespaceDev.Label("purpose=development")
Expect(err).NotTo(HaveOccurred())
By("Labelling production namespace")
err = namespaceProd.Label("purpose=production")
Expect(err).NotTo(HaveOccurred())
By("Creating frontendProd, backend and network-policy pod deployments")
r := rand.New(rand.NewSource(time.Now().UnixNano()))
randInt := r.Intn(99999)
frontendProdDeploymentName := fmt.Sprintf("frontend-prod-%s-%v", cfg.Name, randInt)
frontendProdDeployment, err := deployment.CreateDeploymentFromImageWithRetry("library/nginx:latest", frontendProdDeploymentName, nsProd, "webapp", "frontend", 3*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
frontendDevDeploymentName := fmt.Sprintf("frontend-dev-%s-%v", cfg.Name, randInt+100000)
frontendDevDeployment, err := deployment.CreateDeploymentFromImageWithRetry("library/nginx:latest", frontendDevDeploymentName, nsDev, "webapp", "frontend", 3*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
backendDeploymentName := fmt.Sprintf("backend-%s-%v", cfg.Name, randInt+200000)
backendDeployment, err := deployment.CreateDeploymentFromImageWithRetry("library/nginx:latest", backendDeploymentName, nsDev, "webapp", "backend", 3*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
nwpolicyDeploymentName := fmt.Sprintf("network-policy-%s-%v", cfg.Name, randInt+300000)
nwpolicyDeployment, err := deployment.CreateDeploymentFromImageWithRetry("library/nginx:latest", nwpolicyDeploymentName, nsDev, "", "", 3*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
By("Ensure there is a running frontend-prod pod")
networkpolicy.EnsureRunningPodExists(frontendProdDeploymentName, nsProd, 4, sleepBetweenRetriesWhenWaitingForPodReady, cfg.Timeout)
By("Ensure there is a running frontend-dev pod")
networkpolicy.EnsureRunningPodExists(frontendDevDeploymentName, nsDev, 4, sleepBetweenRetriesWhenWaitingForPodReady, cfg.Timeout)
By("Ensure there is a running backend pod")
networkpolicy.EnsureRunningPodExists(backendDeploymentName, nsDev, 4, sleepBetweenRetriesWhenWaitingForPodReady, cfg.Timeout)
By("Ensure there is a running network-policy pod")
networkpolicy.EnsureRunningPodExists(nwpolicyDeploymentName, nsDev, 4, sleepBetweenRetriesWhenWaitingForPodReady, cfg.Timeout)
By("Ensuring we have outbound internet access from the frontend-prod pods")
frontendProdPods := networkpolicy.GetRunningPodsFromDeployment(frontendProdDeployment)
networkpolicy.EnsureOutboundInternetAccess(frontendProdPods, cfg)
By("Ensuring we have outbound internet access from the frontend-dev pods")
frontendDevPods := networkpolicy.GetRunningPodsFromDeployment(frontendDevDeployment)
networkpolicy.EnsureOutboundInternetAccess(frontendDevPods, cfg)
By("Ensuring we have outbound internet access from the backend pods")
backendPods := networkpolicy.GetRunningPodsFromDeployment(backendDeployment)
networkpolicy.EnsureOutboundInternetAccess(backendPods, cfg)
By("Ensuring we have outbound internet access from the network-policy pods")
nwpolicyPods := networkpolicy.GetRunningPodsFromDeployment(nwpolicyDeployment)
networkpolicy.EnsureOutboundInternetAccess(nwpolicyPods, cfg)
By("Ensuring we have connectivity from network-policy pods to frontend-prod pods")
networkpolicy.EnsureConnectivityResultBetweenPods(nwpolicyPods, frontendProdPods, validateNetworkPolicyTimeout, true)
By("Ensuring we have connectivity from network-policy pods to backend pods")
networkpolicy.EnsureConnectivityResultBetweenPods(nwpolicyPods, backendPods, validateNetworkPolicyTimeout, true)
By("Applying a network policy to deny ingress access to app: webapp, role: backend pods in development namespace")
nwpolicyName, namespace, nwpolicyFileName := "backend-deny-ingress", nsDev, "backend-policy-deny-ingress.yaml"
networkpolicy.ApplyNetworkPolicy(nwpolicyName, namespace, nwpolicyFileName, PolicyDir)
By("Ensuring we no longer have ingress access from the network-policy pods to backend pods")
networkpolicy.EnsureConnectivityResultBetweenPods(nwpolicyPods, backendPods, validateNetworkPolicyTimeout, false)
By("Cleaning up after ourselves")
networkpolicy.DeleteNetworkPolicy(nwpolicyName, namespace)
By("Applying a network policy to deny egress access in development namespace")
nwpolicyName, namespace, nwpolicyFileName = "backend-deny-egress", nsDev, "backend-policy-deny-egress.yaml"
networkpolicy.ApplyNetworkPolicy(nwpolicyName, nsDev, nwpolicyFileName, PolicyDir)
By("Ensuring we no longer have egress access from the network-policy pods to backend pods")
networkpolicy.EnsureConnectivityResultBetweenPods(nwpolicyPods, backendPods, validateNetworkPolicyTimeout, false)
networkpolicy.EnsureConnectivityResultBetweenPods(frontendDevPods, backendPods, validateNetworkPolicyTimeout, false)
By("Cleaning up after ourselves")
networkpolicy.DeleteNetworkPolicy(nwpolicyName, namespace)
if common.IsKubernetesVersionGe(eng.ExpandedDefinition.Properties.OrchestratorProfile.OrchestratorVersion, "1.11.0") {
By("Applying a network policy to allow egress access to app: webapp, role: frontend pods in any namespace from pods with app: webapp, role: backend labels in development namespace")
nwpolicyName, namespace, nwpolicyFileName := "backend-allow-egress-pod-label", nsDev, "backend-policy-allow-egress-pod-label.yaml"
networkpolicy.ApplyNetworkPolicy(nwpolicyName, namespace, nwpolicyFileName, PolicyDir)
By("Ensuring we have egress access from pods with matching labels")
networkpolicy.EnsureConnectivityResultBetweenPods(backendPods, frontendDevPods, validateNetworkPolicyTimeout, true)
networkpolicy.EnsureConnectivityResultBetweenPods(backendPods, frontendProdPods, validateNetworkPolicyTimeout, true)
By("Ensuring we don't have ingress access from pods without matching labels")
networkpolicy.EnsureConnectivityResultBetweenPods(backendPods, nwpolicyPods, validateNetworkPolicyTimeout, false)
By("Cleaning up after ourselves")
networkpolicy.DeleteNetworkPolicy(nwpolicyName, namespace)
By("Applying a network policy to allow egress access to app: webapp, role: frontend pods from pods with app: webapp, role: backend labels in same development namespace")
nwpolicyName, namespace, nwpolicyFileName = "backend-allow-egress-pod-namespace-label", nsDev, "backend-policy-allow-egress-pod-namespace-label.yaml"
networkpolicy.ApplyNetworkPolicy(nwpolicyName, namespace, nwpolicyFileName, PolicyDir)
By("Ensuring we have egress access from pods with matching labels")
networkpolicy.EnsureConnectivityResultBetweenPods(backendPods, frontendDevPods, validateNetworkPolicyTimeout, true)
By("Ensuring we don't have ingress access from pods without matching labels")
networkpolicy.EnsureConnectivityResultBetweenPods(backendPods, frontendProdPods, validateNetworkPolicyTimeout, false)
networkpolicy.EnsureConnectivityResultBetweenPods(backendPods, nwpolicyPods, validateNetworkPolicyTimeout, false)
By("Cleaning up after ourselves")
networkpolicy.DeleteNetworkPolicy(nwpolicyName, namespace)
By("Applying a network policy to only allow ingress access to app: webapp, role: backend pods in development namespace from pods in any namespace with the same labels")
nwpolicyName, namespace, nwpolicyFileName = "backend-allow-ingress-pod-label", nsDev, "backend-policy-allow-ingress-pod-label.yaml"
networkpolicy.ApplyNetworkPolicy(nwpolicyName, namespace, nwpolicyFileName, PolicyDir)
By("Ensuring we have ingress access from pods with matching labels")
networkpolicy.EnsureConnectivityResultBetweenPods(backendPods, backendPods, validateNetworkPolicyTimeout, true)
By("Ensuring we don't have ingress access from pods without matching labels")
networkpolicy.EnsureConnectivityResultBetweenPods(nwpolicyPods, backendPods, validateNetworkPolicyTimeout, false)
By("Cleaning up after ourselves")
networkpolicy.DeleteNetworkPolicy(nwpolicyName, namespace)
By("Applying a network policy to only allow ingress access to app: webapp role:backends in development namespace from pods with label app:webapp, role: frontendProd within namespace with label purpose: development")
nwpolicyName, namespace, nwpolicyFileName = "backend-policy-allow-ingress-pod-namespace-label", nsDev, "backend-policy-allow-ingress-pod-namespace-label.yaml"
networkpolicy.ApplyNetworkPolicy(nwpolicyName, namespace, nwpolicyFileName, PolicyDir)
By("Ensuring we don't have ingress access from role:frontend pods in production namespace")
networkpolicy.EnsureConnectivityResultBetweenPods(frontendProdPods, backendPods, validateNetworkPolicyTimeout, false)
By("Ensuring we have ingress access from role:frontend pods in development namespace")
networkpolicy.EnsureConnectivityResultBetweenPods(frontendDevPods, backendPods, validateNetworkPolicyTimeout, true)
By("Cleaning up after ourselves")
networkpolicy.DeleteNetworkPolicy(nwpolicyName, namespace)
}
By("Cleaning up after ourselves")
err = frontendProdDeployment.Delete(util.DefaultDeleteRetries)
Expect(err).NotTo(HaveOccurred())
err = frontendDevDeployment.Delete(util.DefaultDeleteRetries)
Expect(err).NotTo(HaveOccurred())
err = backendDeployment.Delete(util.DefaultDeleteRetries)
Expect(err).NotTo(HaveOccurred())
err = nwpolicyDeployment.Delete(util.DefaultDeleteRetries)
Expect(err).NotTo(HaveOccurred())
err = namespaceDev.Delete()
Expect(err).NotTo(HaveOccurred())
err = namespaceProd.Delete()
Expect(err).NotTo(HaveOccurred())
} else {
Skip("Calico or Azure or Cilium or Antrea network policy was not provisioned for this Cluster Definition")
}
})
})
Describe("with a windows agent pool", func() {
It("should be able to deploy and scale an iis webserver", func() {
if eng.HasWindowsAgents() {
windowsImages, err := eng.GetWindowsTestImages()
Expect(err).NotTo(HaveOccurred())
r := rand.New(rand.NewSource(time.Now().UnixNano()))
deploymentPrefix := fmt.Sprintf("iis-%s", cfg.Name)
deploymentName := fmt.Sprintf("%s-%v", deploymentPrefix, r.Intn(99999))
By("Creating a deployment with 1 pod running IIS")
iisDeploy, err := deployment.CreateWindowsDeployWithHostportDeleteIfExist(deploymentPrefix, windowsImages.IIS, deploymentName, "default", 80, -1)
Expect(err).NotTo(HaveOccurred())
By("Waiting on pod to be Ready")
running, err := pod.WaitOnSuccesses(deploymentName, "default", 4, true, sleepBetweenRetriesWhenWaitingForPodReady, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(Equal(true))
By("Exposing a LoadBalancer for the pod")
err = iisDeploy.ExposeDeleteIfExist(deploymentPrefix, "default", "LoadBalancer", 80, 80)
Expect(err).NotTo(HaveOccurred())
iisService, err := service.Get(deploymentName, "default")
Expect(err).NotTo(HaveOccurred())
err = iisService.WaitForIngress(cfg.LBTimeout, 5*time.Second)
Expect(err).NotTo(HaveOccurred())
By("Verifying that the service is reachable and returns the default IIS start page")
err = iisService.ValidateWithRetry("(IIS Windows Server)", sleepBetweenRetriesWhenWaitingForPodReady, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
By("Checking that each pod can reach the internet")
var iisPods []pod.Pod
iisPods, err = iisDeploy.PodsRunning()
Expect(err).NotTo(HaveOccurred())
Expect(len(iisPods)).ToNot(BeZero())
for _, iisPod := range iisPods {
var pass bool
pass, err = iisPod.CheckWindowsOutboundConnection(sleepBetweenRetriesWhenWaitingForPodReady, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(pass).To(BeTrue())
}
By("Scaling deployment to 5 pods")
err = iisDeploy.ScaleDeployment(5)
Expect(err).NotTo(HaveOccurred())
_, err = iisDeploy.WaitForReplicas(5, 5, 2*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
By("Waiting on 5 pods to be Ready")
running, err = pod.WaitOnSuccesses(deploymentName, "default", 4, true, sleepBetweenRetriesWhenWaitingForPodReady, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(Equal(true))
iisPods, err = iisDeploy.PodsRunning()
Expect(err).NotTo(HaveOccurred())
Expect(len(iisPods)).To(Equal(5))
By("Verifying that the service is reachable and returns the default IIS start page")
err = iisService.ValidateWithRetry("(IIS Windows Server)", sleepBetweenRetriesWhenWaitingForPodReady, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
By("Checking that each pod can reach the internet")
iisPods, err = iisDeploy.PodsRunning()
Expect(err).NotTo(HaveOccurred())
Expect(len(iisPods)).ToNot(BeZero())
for _, iisPod := range iisPods {
var pass bool
pass, err = iisPod.CheckWindowsOutboundConnection(sleepBetweenRetriesWhenWaitingForPodReady, timeoutWhenWaitingForPodOutboundAccess)
Expect(err).NotTo(HaveOccurred())
Expect(pass).To(BeTrue())
}
By("Scaling deployment to 2 pods")
err = iisDeploy.ScaleDeployment(2)
Expect(err).NotTo(HaveOccurred())
_, err = iisDeploy.WaitForReplicas(2, 2, 2*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
iisPods, err = iisDeploy.PodsRunning()
Expect(err).NotTo(HaveOccurred())
Expect(len(iisPods)).To(Equal(2))
By("Verifying that the service is reachable and returns the default IIS start page")
err = iisService.ValidateWithRetry("(IIS Windows Server)", sleepBetweenRetriesWhenWaitingForPodReady, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
By("Checking that each pod can reach the internet")
iisPods, err = iisDeploy.PodsRunning()
Expect(err).NotTo(HaveOccurred())
Expect(len(iisPods)).ToNot(BeZero())
for _, iisPod := range iisPods {
var pass bool
pass, err = iisPod.CheckWindowsOutboundConnection(sleepBetweenRetriesWhenWaitingForPodReady, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(pass).To(BeTrue())
}
By("Ensuring we can donwnload files through the ELB")
for _, iisPod := range iisPods {
fileGenCmd := "(1..(500kb/34)).foreach({-join ('4489bfc5648d4ab58c7129a1d5f2f061') }) | Add-Content C:\\inetpub\\wwwroot\\iisstart.htm"
out, err := iisPod.Exec("--", "powershell", "-command", fileGenCmd)
log.Printf("%s\n", string(out))
Expect(err).NotTo(HaveOccurred())
}
err = iisService.ValidateWithRetry("(IIS Windows Server)", 30*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
By("Verifying pods & services can be deleted")
err = iisDeploy.Delete(util.DefaultDeleteRetries)
Expect(err).NotTo(HaveOccurred())
err = iisService.Delete(util.DefaultDeleteRetries)
Expect(err).NotTo(HaveOccurred())
} else {
Skip("No windows agent was provisioned for this Cluster Definition")
}
})
It("should be able to resolve DNS across windows and linux deployments", func() {
if eng.HasWindowsAgents() {
if eng.HasNetworkPlugin(api.NetworkPluginKubenet) {
Skip("This tests is not enabled for kubenet CNI on windows")
}
windowsImages, err := eng.GetWindowsTestImages()
Expect(err).NotTo(HaveOccurred())
r := rand.New(rand.NewSource(time.Now().UnixNano()))
deploymentPrefix := fmt.Sprintf("iis-dns-%s", cfg.Name)
windowsDeploymentName := fmt.Sprintf("%s-%v", deploymentPrefix, r.Intn(99999))
By("Creating a deployment running IIS")
windowsIISDeployment, err := deployment.CreateWindowsDeployWithHostportDeleteIfExist(deploymentPrefix, windowsImages.IIS, windowsDeploymentName, "default", 80, -1)
Expect(err).NotTo(HaveOccurred())
deploymentPrefix = fmt.Sprintf("nginx-dns-%s", cfg.Name)
nginxDeploymentName := fmt.Sprintf("%s-%v", deploymentPrefix, r.Intn(99999))
By("Creating a nginx deployment")
linuxNginxDeploy, err := deployment.CreateLinuxDeployDeleteIfExists(deploymentPrefix, "library/nginx:latest", nginxDeploymentName, "default", "", "", cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
By("Ensure there is a Running nginx pod")
running, err := pod.WaitOnSuccesses(nginxDeploymentName, "default", 4, true, sleepBetweenRetriesWhenWaitingForPodReady, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(Equal(true))
By("Ensure there is a Running iis pod")
running, err = pod.WaitOnSuccesses(windowsDeploymentName, "default", 4, true, sleepBetweenRetriesWhenWaitingForPodReady, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(Equal(true))
By("Exposing a internal service for the linux nginx deployment")
err = linuxNginxDeploy.ExposeIfNotExist("ClusterIP", 80, 80)
Expect(err).NotTo(HaveOccurred())
linuxService, err := service.Get(nginxDeploymentName, "default")
Expect(err).NotTo(HaveOccurred())
By("Exposing a internal service for the windows iis deployment")
err = windowsIISDeployment.ExposeIfNotExist("ClusterIP", 80, 80)
Expect(err).NotTo(HaveOccurred())
windowsService, err := service.Get(windowsDeploymentName, "default")
Expect(err).NotTo(HaveOccurred())
By("Connecting to Windows from another Windows deployment")
name := fmt.Sprintf("windows-2-windows-%s", cfg.Name)
command := fmt.Sprintf("iwr -UseBasicParsing -TimeoutSec 60 %s", windowsService.Metadata.Name)
successes, err := pod.RunCommandMultipleTimes(pod.RunWindowsPod, windowsImages.ServerCore, name, command, cfg.StabilityIterations, 1*time.Second, singleCommandTimeout, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(successes).To(Equal(cfg.StabilityIterations))
By("Connecting to Linux from Windows deployment")
name = fmt.Sprintf("windows-2-linux-%s", cfg.Name)
command = fmt.Sprintf("iwr -UseBasicParsing -TimeoutSec 60 %s", linuxService.Metadata.Name)
successes, err = pod.RunCommandMultipleTimes(pod.RunWindowsPod, windowsImages.ServerCore, name, command, cfg.StabilityIterations, 1*time.Second, singleCommandTimeout, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(successes).To(Equal(cfg.StabilityIterations))
By("Connecting to Windows from Linux deployment")
name = fmt.Sprintf("linux-2-windows-%s", cfg.Name)
command = fmt.Sprintf("wget %s", windowsService.Metadata.Name)
successes, err = pod.RunCommandMultipleTimes(pod.RunLinuxPod, "alpine", name, command, cfg.StabilityIterations, 1*time.Second, singleCommandTimeout, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(successes).To(Equal(cfg.StabilityIterations))
By("Cleaning up after ourselves")
err = windowsIISDeployment.Delete(util.DefaultDeleteRetries)
Expect(err).NotTo(HaveOccurred())
err = linuxNginxDeploy.Delete(util.DefaultDeleteRetries)
Expect(err).NotTo(HaveOccurred())
err = windowsService.Delete(util.DefaultDeleteRetries)
Expect(err).NotTo(HaveOccurred())
err = linuxService.Delete(util.DefaultDeleteRetries)
Expect(err).NotTo(HaveOccurred())
} else {
Skip("No windows agent was provisioned for this Cluster Definition")
}
})
// Windows Bug 18213017: Kubernetes Hostport mappings don't work
/*
It("should be able to reach hostport in an iis webserver", func() {
if eng.HasWindowsAgents() {
r := rand.New(rand.NewSource(time.Now().UnixNano()))
hostport := 8123
deploymentName := fmt.Sprintf("iis-%s-%v", cfg.Name, r.Intn(99999))
iisDeploy, err := deployment.CreateWindowsDeployIfNotExist(iisImage, deploymentName, "default", 80, hostport)
Expect(err).NotTo(HaveOccurred())
running, err := pod.WaitOnSuccesses(deploymentName, "default", 4, 30*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(Equal(true))
iisPods, err := iisDeploy.PodsRunning()
Expect(err).NotTo(HaveOccurred())
Expect(len(iisPods)).ToNot(BeZero())
kubeConfig, err := GetConfigWithRetry(3*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
master := fmt.Sprintf("azureuser@%s", kubeConfig.GetServerName())
for _, iisPod := range iisPods {
valid := iisPod.ValidateHostPort("(IIS Windows Server)", 10, 10*time.Second, master, masterSSHPrivateKeyFilepath)
Expect(valid).To(BeTrue())
}
err = iisDeploy.Delete(kubectlOutput)
Expect(err).NotTo(HaveOccurred())
} else {
Skip("No windows agent was provisioned for this Cluster Definition")
}
})*/
It("should be able to attach azure file", func() {
if eng.HasWindowsAgents() && !eng.ExpandedDefinition.Properties.OrchestratorProfile.KubernetesConfig.NeedsContainerd() {
useCloudControllerManager := to.Bool(eng.ExpandedDefinition.Properties.OrchestratorProfile.KubernetesConfig.UseCloudControllerManager)
if to.Bool(eng.ExpandedDefinition.Properties.OrchestratorProfile.KubernetesConfig.UseManagedIdentity) && useCloudControllerManager {
Skip("cloud-controller-manager storageclass doesn't work w/ MSI")
}
orchestratorVersion := eng.ExpandedDefinition.Properties.OrchestratorProfile.OrchestratorVersion
if orchestratorVersion == "1.11.0" {
// Failure in 1.11.0 - https://github.com/kubernetes/kubernetes/issues/65845, fixed in 1.11.1
Skip("Kubernetes 1.11.0 has a known issue creating Azure PersistentVolumeClaim")
} else if common.IsKubernetesVersionGe(orchestratorVersion, "1.8.0") {
windowsImages, err := eng.GetWindowsTestImages()
Expect(err).NotTo(HaveOccurred())
iisAzurefileYaml, err := pod.ReplaceContainerImageFromFile(filepath.Join(WorkloadDir, "iis-azurefile.yaml"), windowsImages.IIS)
Expect(err).NotTo(HaveOccurred())
defer os.Remove(iisAzurefileYaml)
By("Creating an AzureFile storage class")
storageclassName := "azurefile" // should be the same as in storageclass-azurefile.yaml
scFilename := "storageclass-azurefile.yaml"
if useCloudControllerManager && common.IsKubernetesVersionGe(orchestratorVersion, "1.16.0") {
scFilename = "storageclass-azurefile-external.yaml"
}
sc, err := storageclass.CreateStorageClassFromFileWithRetry(filepath.Join(WorkloadDir, scFilename), storageclassName, 3*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
ready, err := sc.WaitOnReady(5*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(ready).To(Equal(true))
By("Creating a persistent volume claim")
pvcName := "pvc-azurefile" // should be the same as in pvc-azurefile.yaml
pvc, err := persistentvolumeclaims.CreatePVCFromFileDeleteIfExist(filepath.Join(WorkloadDir, "pvc-azurefile.yaml"), pvcName, "default", 3*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
ready, err = pvc.WaitOnReady("default", 5*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(ready).To(Equal(true))
By("Launching an IIS pod using the volume claim")
podName := "iis-azurefile" // should be the same as in iis-azurefile.yaml
iisPod, err := pod.CreatePodFromFileWithRetry(iisAzurefileYaml, podName, "default", 1*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
ready, err = iisPod.WaitOnReady(true, sleepBetweenRetriesWhenWaitingForPodReady, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(ready).To(Equal(true))
By("Checking that the pod can access volume")
valid, err := iisPod.ValidateAzureFile("mnt\\azure", 10*time.Second, 3*time.Minute)
Expect(valid).To(BeTrue())
Expect(err).NotTo(HaveOccurred())
err = iisPod.Delete(util.DefaultDeleteRetries)
Expect(err).NotTo(HaveOccurred())
err = pvc.Delete(util.DefaultDeleteRetries)
Expect(err).NotTo(HaveOccurred())
} else {
Skip("Kubernetes version needs to be 1.8 and up for Azure File test")
}
} else {
Skip("No windows agent was provisioned for this Cluster Definition")
}
})
// This test is not parallelizable due to tainting nodes with NoSchedule
It("should expect containers to be recreated after node restart", func() {
if eng.HasWindowsAgents() {
for _, profile := range eng.ExpandedDefinition.Properties.AgentPoolProfiles {
if profile.IsWindows() {
if profile.AvailabilityProfile == api.AvailabilitySet {
Skip("AvailabilitySet is configured for this Cluster Definition")
}
}
}
if eng.HasNetworkPlugin(api.NetworkPluginKubenet) {
Skip("This tests is not enabled for kubenet CNI on windows")
}
windowsImages, err := eng.GetWindowsTestImages()
Expect(err).NotTo(HaveOccurred())
r := rand.New(rand.NewSource(time.Now().UnixNano()))
deploymentPrefix := fmt.Sprintf("iis-%s", cfg.Name)
deploymentName := fmt.Sprintf("%s-%v", deploymentPrefix, r.Intn(99999))
By("Creating a deployment with 1 pod running IIS")
iisDeploy, err := deployment.CreateWindowsDeployWithHostportDeleteIfExist(deploymentPrefix, windowsImages.IIS, deploymentName, "default", 80, -1)
Expect(err).NotTo(HaveOccurred())
By("Waiting on pod to be Ready")
running, err := pod.WaitOnSuccesses(deploymentName, "default", 4, true, sleepBetweenRetriesWhenWaitingForPodReady, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(Equal(true))
By("Exposing a LoadBalancer for the pod")
err = iisDeploy.ExposeDeleteIfExist(deploymentPrefix, "default", "LoadBalancer", 80, 80)
Expect(err).NotTo(HaveOccurred())
iisService, err := service.Get(deploymentName, "default")
Expect(err).NotTo(HaveOccurred())
err = iisService.WaitForIngress(cfg.LBTimeout, 5*time.Second)
Expect(err).NotTo(HaveOccurred())
By("Verifying that the service is reachable and returns the default IIS start page")
err = iisService.ValidateWithRetry("(IIS Windows Server)", sleepBetweenRetriesWhenWaitingForPodReady, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
pods, err := iisDeploy.Pods()
Expect(err).NotTo(HaveOccurred())
nodeName := pods[0].Spec.NodeName
ctx, cancel := context.WithTimeout(context.Background(), 6000*time.Second)
defer cancel()
By("Adding taint to all other Windows nodes")
nodeList, err := node.Get()
for _, n := range nodeList.Nodes {
if n.IsWindows() && n.Metadata.Name != nodeName {
n.AddTaint(node.Taint{Key: "key", Value: "value", Effect: "NoSchedule"})
}
}
// Removing taints
defer func(nodeList *node.List, nodeName string) {
for _, n := range nodeList.Nodes {
if n.IsWindows() && n.Metadata.Name != nodeName {
n.RemoveTaint(node.Taint{Key: "key", Value: "value", Effect: "NoSchedule"})
}
}
}(nodeList, nodeName)
By("Restarting VM " + nodeName + " in resource group " + cfg.ResourceGroup)
// Getting vmss for the vm
vmssPage, err := azureClient.ListVirtualMachineScaleSets(ctx, cfg.ResourceGroup)
vmssList := vmssPage.Values()
// Name of VMSS of nodeName
var vmssName string
// InstanceID of VM in its VMSS
var instanceID string
for _, vmss := range vmssList {
if !strings.Contains(nodeName, *vmss.Name) {
continue
}
vmName := *vmss.Name + "_" + nodeName[len(nodeName)-1:]
vmPage, err := azureClient.ListVirtualMachineScaleSetVMs(ctx, cfg.ResourceGroup, *vmss.Name)
Expect(err).NotTo(HaveOccurred())
vmList := vmPage.Values()
for _, vm := range vmList {
if vmName == *vm.Name {
vmssName = *vmss.Name
instanceID = *vm.InstanceID
break
}
}
if instanceID != "" {
break
}
}
// TODO refactor to remove the "compute" usage so the test can be run on Azure Stack
instanceIDs := &compute.VirtualMachineScaleSetVMInstanceIDs{&[]string{instanceID}}
err = azureClient.RestartVirtualMachineScaleSets(ctx, cfg.ResourceGroup, vmssName, instanceIDs)
Expect(err).NotTo(HaveOccurred())
//Wait for VM to come up
time.Sleep(30 * time.Second)
By("Verifying that the service is still reachable and returns the default IIS start page")
err = iisService.ValidateWithRetry("(IIS Windows Server)", sleepBetweenRetriesWhenWaitingForPodReady, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
} else {
Skip("No windows agent was provisioned for this Cluster Definition")
}
})
// verifies that the pod logs continue to flow even during rotation
// https://github.com/Azure/aks-engine/issues/3573
It("should be able to rotate docker logs", func() {
if !eng.HasWindowsAgents() {
Skip("No windows agent was provisioned for this Cluster Definition")
}
if !eng.ExpandedDefinition.Properties.OrchestratorProfile.KubernetesConfig.RequiresDocker() {
Skip("Skip docker validations on non-docker-backed clusters")
}
windowsImages, err := eng.GetWindowsTestImages()
loggingPodFile, err := pod.ReplaceContainerImageFromFile(filepath.Join(WorkloadDir, "validate-windows-logging.yaml"), windowsImages.ServerCore)
Expect(err).NotTo(HaveOccurred())
defer os.Remove(loggingPodFile)
By("launching a pod that logs too much")
podName := "validate-windows-logging" // should be the same as in iis-azurefile.yaml
loggingPod, err := pod.CreatePodFromFileWithRetry(loggingPodFile, podName, "default", 1*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
ready, err := loggingPod.WaitOnReady(false, sleepBetweenRetriesWhenWaitingForPodReady, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(ready).To(Equal(true))
By("validating the logs continue to flow")
logsRotated, err := loggingPod.ValidateLogsRotate(20*time.Second, 2*time.Minute)
Expect(err).NotTo(HaveOccurred())
Expect(logsRotated).To(Equal(true))
})
// metrics endpoints failing in 1.18+
// https://github.com/kubernetes/kubernetes/issues/95735
It("windows should be able to get node metrics when high cpu", func() {
if !eng.HasWindowsAgents() || !cfg.ValidateCPULoad {
Skip("Will not validate effects of CPU load against nodes")
}
windowsImages, err := eng.GetWindowsTestImages()
cpuConsumptionDeploymentFile, err := pod.ReplaceContainerImageFromFile(filepath.Join(WorkloadDir, "validate-windows-cpu-consumption.yaml"), windowsImages.ServerCore)
Expect(err).NotTo(HaveOccurred())
defer os.Remove(cpuConsumptionDeploymentFile)
By("launching a deployment that consumes too much CPU")
deploymentName := "validate-windows-cpu-consumption" // should be the same as in yaml
cpuDeployment, err := deployment.CreateDeploymentFromFileWithRetry(cpuConsumptionDeploymentFile, deploymentName, "default", 1*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
running, err := pod.WaitOnSuccesses(deploymentName, "default", 4, true, sleepBetweenRetriesWhenWaitingForPodReady, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(Equal(true))
By("Scaling deployment to consuming allocatable")
nodeList, err := node.GetWithRetry(1*time.Second, cfg.Timeout)
cpuCapacity := 0
for _, n := range nodeList {
if n.IsWindows() {
c, err := strconv.Atoi(n.Status.Capacity.CPU)
Expect(err).NotTo(HaveOccurred())
cpuCapacity = cpuCapacity + c
}
}
// scale over allocatable for windows to make sure it's packed (.25 is limit on deployment)
deployCount := int(math.Round((float64(cpuCapacity) / 0.25)))
err = cpuDeployment.ScaleDeployment(deployCount * 2)
Expect(err).NotTo(HaveOccurred())
By("should be able to get nodes metrics")
checkMetrics := func() error {
log.Printf("running top nodes")
err = node.TopNodes()
return err
}
_, err = cpuDeployment.WaitForReplicasWithAction(deployCount, deployCount*2, 2*time.Second, cfg.Timeout, checkMetrics)
Expect(err).NotTo(HaveOccurred())
cpuPods, err := cpuDeployment.PodsRunning()
Expect(err).NotTo(HaveOccurred())
Expect(len(cpuPods)).To(BeNumerically(">=", deployCount))
By("should be able to get nodes metrics")
err = node.TopNodesWithRetry(1*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
By("Verifying pods & services can be deleted")
err = cpuDeployment.Delete(util.DefaultDeleteRetries)
Expect(err).NotTo(HaveOccurred())
})
})
Describe("after the cluster has been up for a while", func() {
It("dns-liveness pod should not have any restarts", func() {
if !cfg.RebootControlPlaneNodes {
pod, err := pod.Get("dns-liveness", "default", podLookupRetries)
Expect(err).NotTo(HaveOccurred())
running, err := pod.WaitOnReady(true, sleepBetweenRetriesWhenWaitingForPodReady, 3*time.Minute)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(Equal(true))
restarts := pod.Status.ContainerStatuses[0].RestartCount
if cfg.SoakClusterName == "" {
err = pod.Delete(util.DefaultDeleteRetries)
Expect(err).NotTo(HaveOccurred())
Expect(restarts).To(Equal(0))
} else {
log.Printf("%d DNS livenessProbe restarts since this cluster was created...\n", restarts)
}
}
})
It("should have healthy time synchronization", func() {
if cfg.BlockSSHPort {
Skip("SSH port is blocked")
} else if !eng.ExpandedDefinition.Properties.HasNonRegularPriorityScaleset() {
nodes, err := node.GetReadyWithRetry(1*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
timeSyncValidateScript := "time-sync-validate.sh"
err = sshConn.CopyTo(timeSyncValidateScript)
Expect(err).NotTo(HaveOccurred())
timeSyncValidationCommand := fmt.Sprintf("\"/tmp/%s\"", timeSyncValidateScript)
err = sshConn.Execute(timeSyncValidationCommand, false)
Expect(err).NotTo(HaveOccurred())
for _, n := range nodes {
if n.IsUbuntu() && !firstMasterRegexp.MatchString(n.Metadata.Name) {
err := sshConn.CopyToRemoteWithRetry(n.Metadata.Name, "/tmp/"+timeSyncValidateScript, sleepBetweenRetriesRemoteSSHCommand, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
err = sshConn.ExecuteRemoteWithRetry(n.Metadata.Name, timeSyncValidationCommand, false, sleepBetweenRetriesRemoteSSHCommand, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
}
}
} else {
Skip("Skip per-node tests in low-priority VMSS cluster configuration scenario")
}
})
It("should be able to autoscale", func() {
var numCoreDNSPods int
var testCoreDNSScaleOut bool
if eng.AnyAgentIsLinux() && eng.ExpandedDefinition.Properties.OrchestratorProfile.KubernetesConfig.EnableAggregatedAPIs {
// Inspired by http://blog.kubernetes.io/2016/07/autoscaling-in-kubernetes.html
r := rand.New(rand.NewSource(time.Now().UnixNano()))
By("Creating a php-apache deployment")
phpApacheDeploy, err := deployment.CreateLinuxDeployIfNotExist("deis/hpa-example", longRunningApacheDeploymentName, "default", "", "", 3*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
By("Ensuring that the php-apache pod is running")
running, err := pod.WaitOnSuccesses(longRunningApacheDeploymentName, "default", 4, true, sleepBetweenRetriesWhenWaitingForPodReady, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(running).To(Equal(true))
By("Ensuring that the php-apache pod has outbound internet access")
pods, err := phpApacheDeploy.PodsRunning()
Expect(err).NotTo(HaveOccurred())
for _, p := range pods {
pass, outboundErr := p.CheckLinuxOutboundConnection(5*time.Second, cfg.Timeout)
Expect(outboundErr).NotTo(HaveOccurred())
Expect(pass).To(BeTrue())
}
By("Exposing TCP 80 internally on the php-apache deployment")
err = phpApacheDeploy.ExposeIfNotExist("ClusterIP", 80, 80)
Expect(err).NotTo(HaveOccurred())
By("Assigning hpa configuration to the php-apache deployment")
// Apply autoscale characteristics to deployment
var cpuTarget, totalMaxPods int
if clusterAutoscalerEngaged {
nodeList, err := node.GetWithRetry(1*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
if hasAddon, addon := eng.HasAddon("coredns"); hasAddon {
nodesPerReplica, _ := strconv.Atoi(addon.Config["nodes-per-replica"])
corednsPods, err := pod.GetAllByPrefixWithRetry("coredns", "kube-system", 3*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
corednsAutoscalerPods, err := pod.GetAllByPrefixWithRetry("coredns-autoscaler", "kube-system", 3*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
numCoreDNSPods = len(corednsPods) - len(corednsAutoscalerPods)
coreDNSNodesOverhead := nodesPerReplica - (len(nodeList) * numCoreDNSPods)
var clusterAutoscalerNodesOverhead int
for _, pool := range clusterAutoscalerAddon.Pools {
p := eng.ExpandedDefinition.Properties.GetAgentPoolIndexByName(pool.Name)
maxNodes, _ := strconv.Atoi(pool.Config["max-nodes"])
if maxNodes > eng.ExpandedDefinition.Properties.AgentPoolProfiles[p].Count {
clusterAutoscalerNodesOverhead += (maxNodes - eng.ExpandedDefinition.Properties.AgentPoolProfiles[p].Count)
}
}
if coreDNSNodesOverhead >= 0 && coreDNSNodesOverhead < clusterAutoscalerNodesOverhead {
testCoreDNSScaleOut = true
By("Validating that coredns pods scale out with nodes")
log.Printf("%d coredns pods before scaling out\n", numCoreDNSPods)
}
}
cpuTarget = 50
for _, profile := range eng.ExpandedDefinition.Properties.AgentPoolProfiles {
// TODO enable cluster-autoscaler tests for Windows
if profile.IsLinux() {
for _, pool := range clusterAutoscalerAddon.Pools {
if pool.Name == profile.Name {
maxPods, _ := strconv.Atoi(profile.KubernetesConfig.KubeletConfig["--max-pods"])
totalMaxPods += (profile.Count * maxPods)
}
}
}
}
maxPods, _ := strconv.Atoi(eng.ExpandedDefinition.Properties.MasterProfile.KubernetesConfig.KubeletConfig["--max-pods"])
totalMaxPods += (len(masterNodes) * maxPods)
} else {
cpuTarget = 50
totalMaxPods = 10
}
err = phpApacheDeploy.CreateDeploymentHPADeleteIfExist(cpuTarget, 1, totalMaxPods+1)
Expect(err).NotTo(HaveOccurred())
h, err := hpa.Get(longRunningApacheDeploymentName, "default", 10)
Expect(err).NotTo(HaveOccurred())
By("Sending load to the php-apache service by creating a 3 replica deployment")
// Launch a simple busybox pod that wget's continuously to the apache serviceto simulate load
commandString := fmt.Sprintf("while true; do wget -q -O- http://%s.default.svc.cluster.local; done", longRunningApacheDeploymentName)
loadTestPrefix := fmt.Sprintf("load-test-%s", cfg.Name)
loadTestName := fmt.Sprintf("%s-%v", loadTestPrefix, r.Intn(99999))
numLoadTestPods := 3
if clusterAutoscalerEngaged {
numLoadTestPods = (totalMaxPods / 2)
}
loadTestDeploy, err := deployment.RunLinuxDeployDeleteIfExists(loadTestPrefix, "busybox", loadTestName, "default", commandString, numLoadTestPods)
Expect(err).NotTo(HaveOccurred())
By("Ensuring we have more than 1 apache-php pods due to hpa enforcement")
_, err = phpApacheDeploy.WaitForReplicas(2, -1, 5*time.Second, cfg.Timeout)
if err != nil {
e := h.Describe()
Expect(e).NotTo(HaveOccurred())
}
Expect(err).NotTo(HaveOccurred())
if clusterAutoscalerEngaged {
By("Ensuring at least one more node was added by cluster-autoscaler")
ready := node.WaitOnReadyMin(eng.NodeCount()+1, 10*time.Second, true, cfg.Timeout)
Expect(ready).To(BeTrue())
if testCoreDNSScaleOut {
By("Ensuring at least one more coredns pod was added by coredns-autoscaler")
d, err := deployment.GetWithRetry("coredns", "kube-system", 5*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
numCoreDNSAutoscalerPods := 1
_, err = d.WaitForReplicas(numCoreDNSPods+numCoreDNSAutoscalerPods+1, -1, 5*time.Second, cfg.Timeout)
if err != nil {
pod.PrintPodsLogs("coredns-autoscaler", "kube-system", 5*time.Second, 1*time.Minute)
}
Expect(err).NotTo(HaveOccurred())
corednsPods, err := pod.GetAllByPrefixWithRetry("coredns", "kube-system", 3*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
corednsAutoscalerPods, err := pod.GetAllByPrefixWithRetry("coredns-autoscaler", "kube-system", 3*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
newNumCoreDNSPods := len(corednsPods) - len(corednsAutoscalerPods)
log.Printf("%d coredns pods after scaling out\n", newNumCoreDNSPods)
Expect(err).NotTo(HaveOccurred())
Expect(newNumCoreDNSPods > numCoreDNSPods).To(BeTrue())
}
}
By("Stopping load")
err = loadTestDeploy.Delete(util.DefaultDeleteRetries)
Expect(err).NotTo(HaveOccurred())
var nodes []node.Node
if clusterAutoscalerEngaged {
By("Wait a few more mins for additional nodes to come online, so that we can more effectively calculate node count reduction")
time.Sleep(3 * time.Minute)
nodes, err = node.GetWithRetry(1*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
}
By("Ensuring we only have 1 apache-php pod after stopping load")
_, err = phpApacheDeploy.WaitForReplicas(-1, 1, 5*time.Second, cfg.Timeout)
if err != nil {
e := h.Describe()
Expect(e).NotTo(HaveOccurred())
}
Expect(err).NotTo(HaveOccurred())
if clusterAutoscalerEngaged {
By(fmt.Sprintf("Ensuring at least one node is removed by cluster-autoscaler, waiting until we have fewer than %d nodes...", len(nodes)))
ready := node.WaitOnReadyMax(len(nodes)-1, 30*time.Second, cfg.Timeout*2)
Expect(ready).To(BeTrue())
}
By("Deleting HPA configuration")
err = h.Delete(util.DefaultDeleteRetries)
Expect(err).NotTo(HaveOccurred())
} else {
Skip("This flavor/version of Kubernetes doesn't support hpa autoscale")
}
})
It("should have node labels specific to masters or agents", func() {
nodes, err := node.GetReadyWithRetry(1*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
for _, n := range nodes {
role := "master"
if !strings.HasPrefix(n.Metadata.Name, fmt.Sprintf("%s-", common.LegacyControlPlaneVMPrefix)) {
if eng.ExpandedDefinition.Properties.HasNonRegularPriorityScaleset() {
continue
} else {
role = "agent"
}
}
labels := n.Metadata.Labels
Expect(labels).To(HaveKeyWithValue("kubernetes.io/role", role))
Expect(labels).To(HaveKey(fmt.Sprintf("node-role.kubernetes.io/%s", role)))
if role == "master" && common.IsKubernetesVersionGe(
eng.ExpandedDefinition.Properties.OrchestratorProfile.OrchestratorVersion, "1.17.1") {
Expect(labels).To(HaveKeyWithValue("node.kubernetes.io/exclude-from-external-load-balancers", "true"))
Expect(labels).To(HaveKeyWithValue("node.kubernetes.io/exclude-disruption", "true"))
}
// Check node labels applied by cloud-node-manager
if hasAddon, _ := eng.HasAddon(common.CloudNodeManagerAddonName); hasAddon {
// Can't extract zone from API model, so just ensure that zone-related labels exist
Expect(labels).To(HaveKey("failure-domain.beta.kubernetes.io/zone"))
Expect(labels).To(HaveKey("topology.kubernetes.io/zone"))
region := eng.ExpandedDefinition.Location
Expect(labels).To(HaveKeyWithValue("failure-domain.beta.kubernetes.io/region", region))
Expect(labels).To(HaveKeyWithValue("topology.kubernetes.io/region", region))
var instanceType string
switch role {
case "master":
instanceType = eng.ExpandedDefinition.Properties.MasterProfile.VMSize
case "agent":
osType := api.Linux
if n.IsWindows() {
osType = api.Windows
}
instanceType = util.GetAgentVMSize(eng.ExpandedDefinition.Properties.AgentPoolProfiles, osType)
}
Expect(labels).To(HaveKeyWithValue("beta.kubernetes.io/instance-type", instanceType))
Expect(labels).To(HaveKeyWithValue("node.kubernetes.io/instance-type", instanceType))
}
}
})
It("should have arc agents running", func() {
if hasArc, _ := eng.HasAddon(common.AzureArcOnboardingAddonName); hasArc {
By("Checking the onboarding job succeeded")
succeeded, err := job.WaitOnSucceeded("azure-arc-onboarding", "azure-arc-onboarding", 30*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(succeeded).To(Equal(true))
By("Checking ready status of each pod in namespace azure-arc")
pods, err := pod.GetAll("azure-arc")
Expect(err).NotTo(HaveOccurred())
Expect(len(pods.Pods)).ToNot(BeZero())
for _, currentPod := range pods.Pods {
log.Printf("Checking %s - ready: %t, restarts: %d", currentPod.Metadata.Name, currentPod.Status.ContainerStatuses[0].Ready, currentPod.Status.ContainerStatuses[0].RestartCount)
Expect(currentPod.Status.ContainerStatuses[0].Ready).To(BeTrue())
tooManyRestarts := 5
Expect(currentPod.Status.ContainerStatuses[0].RestartCount).To(BeNumerically("<", tooManyRestarts))
}
} else {
Skip("Onboarding connected cluster was not requested")
}
})
It("should have resilient kubelet and docker systemd services", func() {
if cfg.BlockSSHPort {
Skip("SSH port is blocked")
} else if !eng.ExpandedDefinition.Properties.HasNonRegularPriorityScaleset() {
nodes, err := node.GetReadyWithRetry(1*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
systemdValidateScript := "systemd-validate.sh"
err = sshConn.CopyTo(systemdValidateScript)
Expect(err).NotTo(HaveOccurred())
systemdValidationCommand := fmt.Sprintf("/tmp/%s", systemdValidateScript)
err = sshConn.Execute(systemdValidationCommand, false)
Expect(err).NotTo(HaveOccurred())
for _, n := range nodes {
if n.IsUbuntu() && !firstMasterRegexp.MatchString(n.Metadata.Name) {
err := sshConn.CopyToRemoteWithRetry(n.Metadata.Name, "/tmp/"+systemdValidateScript, sleepBetweenRetriesRemoteSSHCommand, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
systemdValidationCommand = fmt.Sprintf("/tmp/%s", systemdValidateScript)
err = sshConn.ExecuteRemoteWithRetry(n.Metadata.Name, systemdValidationCommand, false, sleepBetweenRetriesRemoteSSHCommand, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
}
}
} else {
Skip("Skip per-node tests in low-priority VMSS cluster configuration scenario")
}
})
It("should be able to install vmss node prototype", func() {
if cfg.RunVMSSNodePrototype {
if eng.ExpandedDefinition.Properties.HasVMSSAgentPool() {
newKaminoNodes, err := strconv.Atoi(cfg.KaminoVMSSNewNodes)
Expect(err).NotTo(HaveOccurred())
By("Installing kured with node annotations configuration")
var kuredCommand *exec.Cmd
if cfg.KuredLocalChartPath == "" {
kuredCommand = exec.Command("helm", "install", "--wait", "--generate-name", "--repo", "https://weaveworks.github.io/kured", "kured", "--set", "configuration.annotateNodes=true", "--set", "configuration.period=1m")
} else {
kuredCommand = exec.Command("helm", "install", "--wait", "kured", cfg.KuredLocalChartPath, "--set", "configuration.annotateNodes=true", "--set", "configuration.period=1m")
}
util.PrintCommand(kuredCommand)
out, err := kuredCommand.CombinedOutput()
log.Printf("%s\n", out)
Expect(err).NotTo(HaveOccurred())
nodes, err := node.GetReadyWithRetry(1*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
var numAgentNodes, numControlPlaneNodes int
controlPlaneNodeRegexStr := fmt.Sprintf("^%s-.*", common.LegacyControlPlaneVMPrefix)
controlPlaneNodeRegexp, err := regexp.Compile(controlPlaneNodeRegexStr)
Expect(err).NotTo(HaveOccurred())
for _, n := range nodes {
if n.IsLinux() {
if controlPlaneNodeRegexp.MatchString(n.Metadata.Name) {
numControlPlaneNodes++
} else {
numAgentNodes++
}
}
}
var largeContainerDaemonset *daemonset.Daemonset
var numLargeContainerPods int
if !cfg.KaminoVMSSPrototypeDryRun {
By("Creating a DaemonSet with a large container")
var err error
largeContainerDaemonset, err = daemonset.CreateDaemonsetDeleteIfExists(filepath.Join(WorkloadDir, "large-container-daemonset.yaml"), "large-container-daemonset", "default", "app", "large-container-daemonset", 5*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
start := time.Now()
pods, err := pod.WaitForMinRunningByLabelWithRetry(numAgentNodes, "app", "large-container-daemonset", "default", 1*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
numLargeContainerPods = len(pods)
Expect(pods).NotTo(BeEmpty())
elapsed := time.Since(start)
log.Printf("Took %s to schedule %d Pods with large containers via DaemonSet\n", elapsed, numLargeContainerPods)
}
By("Marking all nodes as needing reboots")
for _, n := range nodes {
if n.IsLinux() && !controlPlaneNodeRegexp.MatchString(n.Metadata.Name) {
err = sshConn.ExecuteRemoteWithRetry(n.Metadata.Name, fmt.Sprintf("\"sudo touch /var/run/reboot-required\""), false, 30*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
}
}
By("Waiting for one node to be marked as SchedulingDisabled by kured")
ready := node.WaitOnReadyMax(len(nodes)-1, 5*time.Second, cfg.Timeout)
Expect(ready).To(BeTrue())
By("Waiting for nodes to be be rebooted and annotated correctly")
_, err = node.WaitForNodesWithAnnotation(numAgentNodes, "weave.works/kured-most-recent-reboot-needed", "", 5*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
_, err = node.WaitForNodesWithAnnotation(0, "weave.works/kured-reboot-in-progress", "", 1*time.Minute, time.Duration(5*numAgentNodes)*time.Minute)
Expect(err).NotTo(HaveOccurred())
By("Waiting for all nodes to be Ready again")
ready = node.WaitOnReady(len(nodes), 30*time.Second, cfg.Timeout)
Expect(ready).To(Equal(true))
ctx, cancel := context.WithTimeout(context.Background(), cfg.Timeout)
defer cancel()
// Getting vmss for the vm
vmssPage, err := azureClient.ListVirtualMachineScaleSets(ctx, cfg.ResourceGroup)
Expect(err).NotTo(HaveOccurred())
Expect(vmssPage).NotTo(BeNil())
vmssList := vmssPage.Values()
// Name of VMSS of nodeName
var vmssName string
var vmssSku *compute.Sku
var timeToAddNewNodeBaseline, timeToLargeContainerDaemonsetRunningBaseline time.Duration
for i, vmss := range vmssList {
vmssName = *vmss.Name
vmssSku = vmss.Sku
Expect(vmssName).NotTo(BeEmpty())
Expect(vmssSku).NotTo(BeNil())
originalCapacity := *vmssSku.Capacity
// get a pre-kamino scaling baseline against the first VMSS in the cluster
if !cfg.KaminoVMSSPrototypeDryRun && i == 0 {
By(fmt.Sprintf("Adding one new node to VMSS %s get a baseline", vmssName))
ctx2, cancel2 := context.WithTimeout(context.Background(), cfg.Timeout)
defer cancel2()
start := time.Now()
err = azureClient.SetVirtualMachineScaleSetCapacity(
ctx2,
cfg.ResourceGroup,
vmssName,
compute.Sku{
Name: vmssSku.Name,
Capacity: to.Int64Ptr(originalCapacity + 1),
},
eng.ExpandedDefinition.Location,
)
Expect(err).NotTo(HaveOccurred())
By("Waiting for the new node to become Ready")
ready := node.WaitOnReadyMin(numAgentNodes+1, 500*time.Millisecond, false, cfg.Timeout)
Expect(ready).To(BeTrue())
timeToAddNewNodeBaseline = time.Since(start)
log.Printf("Took %s to add 1 node\n", timeToAddNewNodeBaseline)
By("Ensuring that we have one additional large container pod after scaling out by one")
start = time.Now()
_, err = pod.WaitForMinRunningByLabelWithRetry(numLargeContainerPods+1, "app", "large-container-daemonset", "default", 5*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
numLargeContainerPods++
timeToLargeContainerDaemonsetRunningBaseline = time.Since(start)
log.Printf("Took %s for large-container-daemonset pod to reach Running state on new node\n", timeToLargeContainerDaemonsetRunningBaseline)
}
vmssNodes, err := node.GetByRegexWithRetry(fmt.Sprintf("^%s", vmssName), 1*time.Minute, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(len(vmssNodes)).To(BeNumerically(">", 0))
helmName := fmt.Sprintf("vmss-prototype-%s", vmssName)
cmd := exec.Command("helm", "status", helmName)
out, err = cmd.CombinedOutput()
if err == nil {
By(fmt.Sprintf("Found pre-existing '%s' helm release, deleting it...", helmName))
cmd := exec.Command("helm", "delete", helmName)
out, err := cmd.CombinedOutput()
log.Printf("%s\n", out)
Expect(err).NotTo(HaveOccurred())
}
commandArgsSlice := []string{"upgrade", "--install"}
if cfg.KaminoVMSSPrototypeLocalChartPath == "" {
commandArgsSlice = append(commandArgsSlice, []string{"--repo", "https://jackfrancis.github.io/kamino/", helmName, "vmss-prototype"}...)
} else {
commandArgsSlice = append(commandArgsSlice, []string{helmName, cfg.KaminoVMSSPrototypeLocalChartPath}...)
}
newUpdatedNodes := newKaminoNodes
// account for the node we already added above to take a pre-kamino scaling baseline against the first VMSS in the cluster
if i == 0 {
newUpdatedNodes--
}
commandArgsSlice = append(commandArgsSlice, []string{"--namespace", "default", "--set", fmt.Sprintf("kamino.name=%s", vmssName), "--set", "kamino.scheduleOnControlPlane=true", "--set", fmt.Sprintf("kamino.newUpdatedNodes=%d", newUpdatedNodes), "--set", "kamino.logLevel=DEBUG", "--set", fmt.Sprintf("kamino.targetVMSS=%s", vmssName), "--set", "kamino.auto.lastPatchAnnotation=weave.works/kured-most-recent-reboot-needed", "--set", "kamino.auto.pendingRebootAnnotation=weave.works/kured-reboot-in-progress", "--set", "kamino.auto.minimumReadyTime=1s"}...)
if cfg.KaminoVMSSPrototypeImageRegistry != "" {
commandArgsSlice = append(commandArgsSlice, []string{"--set", fmt.Sprintf("kamino.container.imageRegistry=%s", cfg.KaminoVMSSPrototypeImageRegistry)}...)
}
if cfg.KaminoVMSSPrototypeImageRepository != "" {
commandArgsSlice = append(commandArgsSlice, []string{"--set", fmt.Sprintf("kamino.container.imageRepository=%s", cfg.KaminoVMSSPrototypeImageRepository)}...)
}
if cfg.KaminoVMSSPrototypeImageTag != "" {
commandArgsSlice = append(commandArgsSlice, []string{"--set", fmt.Sprintf("kamino.container.imageTag=%s", cfg.KaminoVMSSPrototypeImageTag), "--set", "kamino.container.pullByHash=false"}...)
}
if cfg.KaminoVMSSPrototypeDryRun {
commandArgsSlice = append(commandArgsSlice, []string{"--set", "kamino.auto.dryRun=true"}...)
}
cmd = exec.Command("helm", commandArgsSlice...)
util.PrintCommand(cmd)
out, err = cmd.CombinedOutput()
log.Printf("%s\n", out)
Expect(err).NotTo(HaveOccurred())
}
start := time.Now()
numVMSS := len(vmssList)
By("Ensuring that the kamino-vmss-prototype pods runs to completion")
succeededPods, getSucceededErr := pod.WaitForMinSucceededByLabelWithRetry(numVMSS, "app", "kamino-vmss-prototype", "default", timeToLargeContainerDaemonsetRunningBaseline, sigPublishingTimeout)
jobs, err := job.GetAllByLabelWithRetry("app", "kamino-vmss-prototype", "default", 5*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(len(jobs)).To(Equal(numVMSS))
for _, j := range jobs {
err = j.Describe()
Expect(err).NotTo(HaveOccurred())
}
pods, err := pod.GetAllByLabelWithRetry("app", "kamino-vmss-prototype", "default", 5*time.Second, cfg.Timeout)
Expect(err).NotTo(HaveOccurred())
Expect(len(pods)).To(Equal(numVMSS))
for _, p := range pods {
err = p.Describe()
Expect(err).NotTo(HaveOccurred())
err = p.Logs()
Expect(err).NotTo(HaveOccurred())
}
Expect(getSucceededErr).NotTo(HaveOccurred())
Expect(len(succeededPods)).To(Equal(numVMSS))
elapsed := time.Since(start)
log.Printf("Took %s to run kamino-vmss-prototype Jobs to completion\n", elapsed)
if !cfg.KaminoVMSSPrototypeDryRun {
newKaminoNodes *= numVMSS
numNodesExpected := numAgentNodes + newKaminoNodes + numControlPlaneNodes
numLargeContainerPodsExpected := numAgentNodes + newKaminoNodes
By(fmt.Sprintf("Waiting for the %d new nodes created from prototype(s) to become Ready; waiting for %d total nodes", newKaminoNodes, numNodesExpected))
timeToWaitForLargeCluster := time.Duration(newKaminoNodes/1000) * time.Hour
timeToWaitForNewNodes := timeToWaitForLargeCluster
if cfg.Timeout > timeToWaitForLargeCluster {
timeToWaitForNewNodes = cfg.Timeout
}
start := time.Now()
ready := node.WaitOnReadyMin(numNodesExpected, 1*time.Minute, false, timeToWaitForNewNodes)
if !ready {
nodes, err := node.GetReadyWithRetry(1*time.Second, cfg.Timeout)
if err != nil {
log.Printf("Not enough Ready nodes! Expected %d, but only %d nodes are Ready", numNodesExpected, len(nodes))
}
}
Expect(ready).To(BeTrue())
elapsed = time.Since(start)
log.Printf("Took %s to add %d nodes derived from peer node prototype(s)\n", elapsed, newKaminoNodes)
By("Ensuring that we have additional large container pods after scaling out")
start = time.Now()
p, err := pod.WaitForMinRunningByLabelWithRetry(numLargeContainerPodsExpected, "app", "large-container-daemonset", "default", 5*time.Second, timeToLargeContainerDaemonsetRunningBaseline)
if err != nil {
log.Printf("%d large container pods were ready before %s", len(p), timeToLargeContainerDaemonsetRunningBaseline)
_, err = pod.WaitForMinRunningByLabelWithRetry(numLargeContainerPodsExpected, "app", "large-container-daemonset", "default", 5*time.Second, 1*time.Hour)
Expect(err).NotTo(HaveOccurred())
elapsed = time.Since(start)
} else {
elapsed = time.Since(start)
}
log.Printf("Took %s for %d large-container-daemonset pods to reach Running state on new node built from prototype\n", elapsed, numLargeContainerPods+newKaminoNodes)
By("Deleting large container DaemonSet")
err = largeContainerDaemonset.Delete(util.DefaultDeleteRetries)
Expect(err).NotTo(HaveOccurred())
}
} else {
Skip("no VMSS node pools")
}
} else {
Skip("InstallVMSSNodePrototype disabled")
}
})
})
})
func RunVMSSHealthCheck(cfg config.Config) (*exec.Cmd, error) {
outfile, err := os.Create(fmt.Sprintf("./vmss-health-check-%s.out", cfg.ResourceGroup))
if err != nil {
return nil, err
}
defer outfile.Close()
cmd := exec.Command("scripts/vmss-health-check.sh")
cmd.Env = append(cmd.Env, fmt.Sprintf("RESOURCE_GROUP=%s", cfg.ResourceGroup))
cmd.Env = append(cmd.Env, "LOOP_FOREVER=true")
cmd.Stdout = outfile
err = cmd.Start()
return cmd, err
}
|
Expect(s).To(HaveLen(3))
// the payload is the second element
|
serie.go
|
package pandascore
import (
"fmt"
"strings"
"time"
)
// SerieSortField defines the values usable to sort on series.
type SerieSortField string
const (
// SerieSortIDAsc can be set to sort per id asc order
SerieSortIDAsc SerieSortField = "id"
// SerieSortIDDesc can be set to sort per id desc order
SerieSortIDDesc SerieSortField = "-id"
// SerieSortModifiedAtAsc can be set to sort per modifed at value asc order
SerieSortModifiedAtAsc SerieSortField = "modified_at"
// SerieSortModifiedAtDesc can be set to sort per modifed at value desc order
SerieSortModifiedAtDesc SerieSortField = "-modified_at"
// SerieSortNameAsc can be set to sort per name asc order
SerieSortNameAsc SerieSortField = "name"
// SerieSortNameDesc can be set to sort per name desc order
SerieSortNameDesc SerieSortField = "-name"
// SerieSortSlugAsc can be set to sort per slug asc order
SerieSortSlugAsc SerieSortField = "slug"
// SerieSortSlugDesc can be set to sort per slug desc order
SerieSortSlugDesc SerieSortField = "-slug"
// SerieSortBeginAtAsc can be set to sort per begin_at asc order
SerieSortBeginAtAsc SerieSortField = "begin_at"
// SerieSortBeginAtDesc can be set to sort per begin_at desc order
SerieSortBeginAtDesc SerieSortField = "-begin_at"
// SerieSortDescriptionAsc can be set to sort per description asc order
SerieSortDescriptionAsc SerieSortField = "description"
// SerieSortDescriptionDesc can be set to sort per description desc order
SerieSortDescriptionDesc SerieSortField = "-description"
// SerieSortEndAtAsc can be set to sort per end_at asc order
SerieSortEndAtAsc SerieSortField = "end_at"
// SerieSortEndAtDesc can be set to sort per end_at desc order
SerieSortEndAtDesc SerieSortField = "-end_at"
// SerieSortLeagueIDAsc can be set to sort per league_id asc order
SerieSortLeagueIDAsc SerieSortField = "league_id"
// SerieSortLeagueIDDesc can be set to sort per league_id desc order
SerieSortLeagueIDDesc SerieSortField = "-league_id"
// SerieSortSeasonAsc can be set to sort per season asc order
SerieSortSeasonAsc SerieSortField = "season"
// SerieSortSeasonDesc can be set to sort per season desc order
SerieSortSeasonDesc SerieSortField = "-season"
// SerieSortTierAsc can be set to sort per tier asc order
SerieSortTierAsc SerieSortField = "tier"
// SerieSortTierDesc can be set to sort per tier desc order
SerieSortTierDesc SerieSortField = "-tier"
// SerieSortWinnerIDAsc can be set to sort per winner_id asc order
SerieSortWinnerIDAsc SerieSortField = "winner_id"
// SerieSortWinnerIDDesc can be set to sort per winner_id desc order
SerieSortWinnerIDDesc SerieSortField = "-winner_id"
// SerieSortWinnerTypeAsc can be set to sort per winner_type asc order
SerieSortWinnerTypeAsc SerieSortField = "winner_type"
// SerieSortWinnerTypeDesc can be set to sort per winner_type desc order
SerieSortWinnerTypeDesc SerieSortField = "-winner_type"
// SerieSortYearAsc can be set to sort per year asc order
SerieSortYearAsc SerieSortField = "year"
// SerieSortYearDesc can be set to sort per year desc order
SerieSortYearDesc SerieSortField = "-year"
)
// SerieRangeField defines the values usable to range on series.
type SerieRangeField string
const (
// SerieRangeID can be set to range on id
SerieRangeID SerieRangeField = "id"
// SerieRangeLeagueID can be set to range on league_id
SerieRangeLeagueID SerieRangeField = "league_id"
// SerieRangeModifiedAt can be set to range on modified_at
SerieRangeModifiedAt SerieRangeField = "modified_at"
// SerieRangeName can be set to range on name
SerieRangeName SerieRangeField = "name"
// SerieRangeDescription can be set to range on description
SerieRangeDescription SerieRangeField = "description"
// SerieRangeSlug can be set to range on slug
SerieRangeSlug SerieRangeField = "slug"
// SerieRangeBeginAt can be set to range on begin_at
SerieRangeBeginAt SerieRangeField = "begin_at"
// SerieRangeEndAt can be set to range on end_at
SerieRangeEndAt SerieRangeField = "end_at"
// SerieRangeSeason can be set to range on season
SerieRangeSeason SerieRangeField = "season"
// SerieRangeTier can be set to range on tier
SerieRangeTier SerieRangeField = "tier"
// SerieRangeWinnerID can be set to range on winner_id
SerieRangeWinnerID SerieRangeField = "winner_id"
// SerieRangeWinnerType can be set to range on winner type
SerieRangeWinnerType SerieRangeField = "winner_type"
)
|
// SerieSearchField defines the values usable to search on series.
type SerieSearchField string
const (
// SerieListSearchName can be set to search on the name field
SerieListSearchName SerieSearchField = "name"
// SerieListSearchDescription can be set to search on the description field
SerieListSearchDescription SerieSearchField = "description"
// SerieListSearchSeason can be set to search on the season field
SerieListSearchSeason SerieSearchField = "season"
// SerieListSearchSlug can be set to search on the slug field
SerieListSearchSlug SerieSearchField = "slug"
// SerieListSearchTier can be set to search on the tier field
SerieListSearchTier SerieSearchField = "tier"
// SerieListSearchWinnerType can be set to search on the winner_type field
SerieListSearchWinnerType SerieSearchField = "winner_type"
)
// SerieFilterField defines the values usable to filter on series.
type SerieFilterField string
const (
// SerieListFilterID can be set to Filter on the id field
SerieListFilterID SerieFilterField = "id"
// SerieListFilterLeagueID can be set to Filter on the league_id field
SerieListFilterLeagueID SerieFilterField = "league_id"
// SerieListFilterName can be set to Filter on the name field
SerieListFilterName SerieFilterField = "name"
// SerieListFilterSlug can be set to Filter on the slug field
SerieListFilterSlug SerieFilterField = "slug"
// SerieListFilterDescription can be set to Filter on the description field
SerieListFilterDescription SerieFilterField = "description"
// SerieListFilterSeason can be set to Filter on the season field
SerieListFilterSeason SerieFilterField = "season"
// SerieListFilterTier can be set to Filter on the tier field
SerieListFilterTier SerieFilterField = "tier"
// SerieListFilterYear can be set to Filter on the year field
SerieListFilterYear SerieFilterField = "year"
// SerieListFilterWinnerID can be set to Filter on the winner_id field
SerieListFilterWinnerID SerieFilterField = "winner_id"
// SerieListFilterWinnerType can be set to Filter on the winner_type field
SerieListFilterWinnerType SerieFilterField = "winner_type"
// SerieListFilterBeginAt can be set to Filter on the begin_at field
SerieListFilterBeginAt SerieFilterField = "begin_at"
// SerieListFilterEndAt can be set to Filter on the end_at field
SerieListFilterEndAt SerieFilterField = "end_at"
// SerieListFilterModifiedAt can be set to Filter on the modified_at field
SerieListFilterModifiedAt SerieFilterField = "modified_at"
)
// SerieListParams contains all the possibilities usable to customize the query on listing the series.
// All the fields are independant and optionnal.
type SerieListParams struct {
VideoGame VideoGameParam
PageParams *PageParams
SortParams []SerieSortField
RangeParams map[SerieRangeField]Range
SearchParams map[SerieSearchField]string
FilterParams map[SerieFilterField]string
}
// StringifyParams generates a string with all the parameters and/or default values to be then usable in the query url.
func (p *SerieListParams) StringifyParams() string {
// base of the string containing all the parameters.
stringified := ""
var pageSize, pageNumber int
if p.PageParams != nil {
pageSize = p.PageParams.PageSize
pageNumber = p.PageParams.PageNumber
}
// Handle cases on which PageParams is not null and one of the values is not set
if pageSize == 0 {
pageSize = 50
}
if pageNumber == 0 {
pageNumber = 1
}
stringified = fmt.Sprintf("page[size]=%d&page[number]=%d", pageSize, pageNumber)
if p.SortParams != nil {
sortSlice := []string{}
for _, s := range p.SortParams {
sortSlice = append(sortSlice, fmt.Sprintf("sort=%s", s))
}
stringified = fmt.Sprintf("%s&%s", stringified, strings.Join(sortSlice, "&"))
}
if p.RangeParams != nil {
for k, v := range p.RangeParams {
stringified = fmt.Sprintf("%s&range[%s]=%s,%s", stringified, k, v.Min, v.Max)
}
}
if p.SearchParams != nil {
for k, v := range p.SearchParams {
stringified = fmt.Sprintf("%s&search[%s]=%s", stringified, k, v)
}
}
if p.FilterParams != nil {
for k, v := range p.FilterParams {
stringified = fmt.Sprintf("%s&filter[%s]=%s", stringified, k, v)
}
}
return stringified
}
// Serie is the resource as defined in the PandaScore API.
// More details can be found here : https://developers.pandascore.co/doc/index.htm#operation/get_series
type Serie struct {
ID int `json:"id"`
LeagueID int `json:"league_id"`
FullName string `json:"full_name"`
Name string `json:"name"`
Slug string `json:"slug"`
Description string `json:"description"`
Season string `json:"season"`
Tier string `json:"tier"`
Year int `json:"year"`
WinnerID int `json:"winner_id"`
WinnerType string `json:"winner_type"`
League struct {
ID int `json:"id"`
Name string `json:"name"`
Slug string `json:"slug"`
URL string `json:"url"`
ImageURL string `json:"image_url"`
ModifiedAt time.Time `json:"modified_at"`
} `json:"league"`
VideoGame struct {
ID int `json:"id"`
Name string `json:"name"`
Slug string `json:"slug"`
} `json:"videogame"`
VideoGameTitle struct {
ID int `json:"id"`
VideoGameID int `json:"videogame_id"`
Name string `json:"name"`
} `json:"videogame_title"`
Tournaments []struct {
ID int `json:"id"`
LeagueID int `json:"league_id"`
SerieID int `json:"serie_id"`
Name string `json:"name"`
Slug string `json:"slug"`
LiveSupported bool `json:"live_supported"`
PrizePool string `json:"prizepool"`
WinnerID int `json:"winner_id"`
WinnerType string `json:"winner_type"`
BeginAt time.Time `json:"begin_at"`
EndAt time.Time `json:"end_at"`
ModifiedAt time.Time `json:"modified_at"`
} `json:"tournaments"`
BeginAt time.Time `json:"begin_at"`
EndAt time.Time `json:"end_at"`
ModifiedAt time.Time `json:"modified_at"`
}
| |
test_search_index_client.py
|
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import pytest
try:
from unittest import mock
except ImportError:
import mock
from azure.core.credentials import AzureKeyCredential
from azure.search.documents import SearchClient, ApiVersion
from azure.search.documents.indexes import SearchIndexClient, SearchIndexerClient
from azure.search.documents.indexes.models import SearchIndexerDataContainer, SearchIndexerDataSourceConnection
CREDENTIAL = AzureKeyCredential(key="test_api_key")
class TestSearchIndexClient(object):
def test_index_init(self):
client = SearchIndexClient("endpoint", CREDENTIAL)
assert client._headers == {
"api-key": "test_api_key",
"Accept": "application/json;odata.metadata=minimal",
}
def test_index_credential_roll(self):
credential = AzureKeyCredential(key="old_api_key")
client = SearchIndexClient("endpoint", credential)
assert client._headers == {
"api-key": "old_api_key",
"Accept": "application/json;odata.metadata=minimal",
}
credential.update("new_api_key")
assert client._headers == {
"api-key": "new_api_key",
"Accept": "application/json;odata.metadata=minimal",
}
def test_get_search_client(self):
credential = AzureKeyCredential(key="old_api_key")
client = SearchIndexClient("endpoint", credential)
search_client = client.get_search_client('index')
assert isinstance(search_client, SearchClient)
@mock.patch(
"azure.search.documents.indexes._generated._operations_mixin.SearchClientOperationsMixin.get_service_statistics"
)
def test_get_service_statistics(self, mock_get_stats):
client = SearchIndexClient("endpoint", CREDENTIAL)
client.get_service_statistics()
assert mock_get_stats.called
assert mock_get_stats.call_args[0] == ()
assert mock_get_stats.call_args[1] == {"headers": client._headers}
@mock.patch(
"azure.search.documents.indexes._generated._operations_mixin.SearchClientOperationsMixin.get_service_statistics"
)
def test_get_service_statistics_v2020_06_30(self, mock_get_stats):
client = SearchIndexClient("endpoint", CREDENTIAL, api_version=ApiVersion.V2020_06_30)
client.get_service_statistics()
assert mock_get_stats.called
assert mock_get_stats.call_args[0] == ()
assert mock_get_stats.call_args[1] == {"headers": client._headers}
def test_index_endpoint_https(self):
credential = AzureKeyCredential(key="old_api_key")
client = SearchIndexClient("endpoint", credential)
assert client._endpoint.startswith('https')
client = SearchIndexClient("https://endpoint", credential)
assert client._endpoint.startswith('https')
with pytest.raises(ValueError):
client = SearchIndexClient("http://endpoint", credential)
with pytest.raises(ValueError):
client = SearchIndexClient(12345, credential)
class TestSearchIndexerClient(object):
def test_indexer_init(self):
client = SearchIndexerClient("endpoint", CREDENTIAL)
assert client._headers == {
"api-key": "test_api_key",
"Accept": "application/json;odata.metadata=minimal",
}
def test_indexer_credential_roll(self):
credential = AzureKeyCredential(key="old_api_key")
client = SearchIndexerClient("endpoint", credential)
assert client._headers == {
"api-key": "old_api_key",
"Accept": "application/json;odata.metadata=minimal",
}
credential.update("new_api_key")
assert client._headers == {
"api-key": "new_api_key",
"Accept": "application/json;odata.metadata=minimal",
}
def test_indexer_endpoint_https(self):
credential = AzureKeyCredential(key="old_api_key")
client = SearchIndexerClient("endpoint", credential)
assert client._endpoint.startswith('https')
client = SearchIndexerClient("https://endpoint", credential)
assert client._endpoint.startswith('https')
with pytest.raises(ValueError):
client = SearchIndexerClient("http://endpoint", credential)
with pytest.raises(ValueError):
client = SearchIndexerClient(12345, credential)
def
|
(self):
container = SearchIndexerDataContainer(name='searchcontainer')
data_source_connection = SearchIndexerDataSourceConnection(
name="test",
type="azureblob",
connection_string="",
container=container
)
packed_data_source_connection = data_source_connection._to_generated()
assert packed_data_source_connection.credentials.connection_string == "<unchanged>"
|
test_datasource_with_empty_connection_string
|
reader.py
|
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from . import core
import sys
import six
import numpy as np
import threading
import paddle
from .framework import Program, Variable, program_guard, default_main_program, default_startup_program, in_dygraph_mode, cpu_places, _current_expected_place, _in_eager_mode
from .executor import global_scope
from .data_feeder import DataFeeder, BatchedTensorProvider
from .multiprocess_utils import multiprocess_queue_set, CleanupFuncRegistrar, _cleanup_mmap, _cleanup, _set_SIGCHLD_handler
from .dataloader import BatchSampler, Dataset, IterableDataset
from .dataloader.dataloader_iter import _DataLoaderIterSingleProcess, _DataLoaderIterMultiProcess, _DatasetKind, default_collate_fn
from .dataloader.batch_sampler import _InfiniteIterableSampler
from .layers.io import monkey_patch_reader_methods, _copy_reader_var_, double_buffer
from .unique_name import UniqueNameGenerator
from .framework import _get_paddle_place, _get_paddle_place_list
from paddle.fluid.framework import _set_expected_place, _current_expected_place
import logging
import warnings
### Dygraph DataLoader configs ###
import os
import multiprocessing
import signal
# NOTE: queue has a different name in python2 and python3
import queue
# NOTE: [ avoid hanging & failed quickly ] These value is used in getting data from another process
QUEUE_GET_TIMEOUT = 60
__all__ = ['PyReader', 'DataLoader', 'default_collate_fn']
data_loader_unique_name_generator = UniqueNameGenerator()
KEEP_DATA_LOADER_ORDER = True
USE_PINNED_MEMORY = None
def keep_data_loader_order(*args):
global KEEP_DATA_LOADER_ORDER
if len(args) == 0:
return KEEP_DATA_LOADER_ORDER
else:
assert len(args) == 1 and isinstance(args[0], bool)
KEEP_DATA_LOADER_ORDER = args[0]
def use_pinned_memory(*args):
global USE_PINNED_MEMORY
if len(args) == 0:
return USE_PINNED_MEMORY
else:
assert len(args) == 1 and isinstance(args[0], bool)
USE_PINNED_MEMORY = args[0]
def _convert_places(places):
if not isinstance(places, (list, tuple)):
places = [places]
ret = []
for p in places:
if not isinstance(p, core.Place):
tmp = core.Place()
tmp.set_place(p)
p = tmp
ret.append(p)
return ret
# NOTE(chenweihang): _reader_process_loop must be top level method to be pickled
def _reader_process_loop(batch_reader, data_queue):
try:
# set signal handler
core._set_process_signal_handler()
# NOTE: [ mmap files clear ] When the child process exits unexpectedly,
# some shared memory objects may have been applied for but have not yet
# been put into the inter-process Queue. This part of the object needs
# to be cleaned up when the process ends.
CleanupFuncRegistrar.register(_cleanup_mmap)
for batch in batch_reader():
tensor_list = core._convert_to_tensor_list(batch)
data_queue.put(tensor_list)
core._remove_tensor_list_mmap_fds(tensor_list)
data_queue.put(None)
except KeyboardInterrupt:
# NOTE: Main process will raise KeyboardInterrupt anyways, ignore it in child process
pass
except:
six.reraise(*sys.exc_info())
class DataLoaderBase(object):
def __init__(self):
self._places = None
def __call__(self):
return self
def next(self):
'''
Get the next item in the DataLoader object. This method
should not be called by users directly. It is used for
implementing iterator protocol of Python 2.x inside
PaddlePaddle framework.
'''
return self.__next__()
def __iter__(self):
raise NotImplementedError()
def __next__(self):
raise NotImplementedError()
@classmethod
def _check_input_array(cls, item):
arr = np.asarray(item)
if arr.dtype == np.object:
raise TypeError(
"\n\tFaild to convert input data to a regular ndarray :\n\t* Usually "
"this means the input data contains nested lists with different lengths. "
"\n\t* Check the reader function passed to 'decorate_batch_generator'"
" to locate the data causes this issue.\n\t* Please consider using "
"'fluid.create_lod_tensor' to convert it to a LoD-Tensor.")
return arr
class DataLoader(object):
"""
DataLoader prodives an iterator which iterates given dataset
once by the batch_sampler.
DataLoader supports single-process and multi-prcess data loading,
multi-process workers will be used to load data asynchronously if
:attr:`num_workers` is set as a positive number.
DataLoader supports map-style dataset and iterable-style dataset.
For map-style datast(can get a sample from dataset with a given
index), please see :code:`paddle.io.Dataset`.
For iterable-style datast(get samples from dataset iteratively,
like a Python iterator), please see :code:`paddle.io.IterableDataset`.
For :code:`batch_sampler` please see :code:`paddle.io.BatchSampler`
.. note::
GPU tensor operation is not supported in subprocess currently,
please don't use GPU tensor operations in pipeline which will
be performed in subprocess, such as dataset transforms, collte_fn,
etc. Numpy array and CPU tensor operation is supported.
**Disable automatic batching**
In certain cases such as some NLP tasks, instead of automatic batching,
handling batching manually in dataset is needed by users. For these
cases, automatic batching is disabled if both :attr:`batch_size` and
:attr:`batch_sampler` is set as None, each data got from :attr:`dataset`
should be batched data and will be processed with function define by
:attr:`collate_fn` or :attr:`default_collate_fn`.
.. note::
When automatic batching is disabled, :attr:`default_collate_fn` will
do nothing to data from dataset.
Args:
dataset(Dataset): the dataset to load data from, should be an
instance of subclass of :code:`paddle.io.Dataset` or
:code:`paddle.io.IterableDataset`.
feed_list (list(Tensor)|tuple(Tensor)): feed Tensor list.
The Tensors should be created by :code:`paddle.static.data()`.
:attr:`feed_list` must be set if :attr:`return_list` is
False. Default None.
places(list(Place)|tuple(Place)|list(str)|optional): a list of Place,
to put data onto, :attr:`places` can be None, if
:attr:`places` is None, default place(CPUPlace or CUDAPlace(0))
will be used. Default None. If ``places`` is list of string,
the string in the list can be ``cpu``, ``gpu:x`` and ``gpu_pinned``,
where ``x`` is the index of the GPUs.
return_list (bool): whether the return value on each device is
presented as a list. If :attr:`return_list=False`, the return
value on each device would be a dict of str -> Tensor, where
the key of the dict is the name of each fed Tensors. If
:attr:`return_list=True`, the return value on each device would
be a list(Tensor). :attr:`return_list` can only be True
in dynamic graph mode. Default True.
batch_sampler(BatchSampler): an instance of `paddle.io.BatchSampler`
to generate batch indices to draw samples from :attr:`dataset`
and combine a batch. Default None.
batch_size(int|None): sample number in a mini-batch, a substitution
parameter for :attr:`batch_sampler`, if :attr:`batch_sampler`
is not set, a default `paddle.io.BatchSampler` will be used
and initialize by :attr:`batch_size`, :attr:`shuffle` and
:attr:`drop_last`. Default 1.
shuffle(bool): whther to shuffle indices order before genrate
batch indices, a substitution parameter for :attr:`batch_sampler`
see :attr:`batch_size`. Default False.
drop_last(bool): whether drop the last incomplete batch dataset size
is not divisible by the batch size, a substitution parameter
for :attr:`batch_sampler`, see :attr:`batch_size`. Default False
collate_fn(callable): function to generate mini-batch data by merging
the sample list, None for only stack each fields of sample in axis
0(same as :attr::`np.stack(..., axis=0)`). Default None
num_workers(int): the number of subprocess to load data, 0 for no
subprocess used and loading data in main process. Default 0
use_buffer_reader (bool): whether to use bufferred reader.
If use_buffer_reader=True, the DataLoader would prefetch next
batch data asynchronously, so it would speed up data feeding
and occupies a little more CPU or GPU memory, i.e., the memory
of one batch input data. Default True.
use_shared_memory (bool): whether to use shared memory to speed up
putting data into inter-process queue, set :attr:`use_shared_memory`
as True only when the shared memory space on your machine(e.g.
space of '/dev/shm' on Linux operating sysytem) is large enough.
Shared memory will only be enabled in multi-process mode(num_workers
> 0). Default True.
timeout(int): the timeout value for getting data form output queue
of subprocesses. Default 0.
worker_init_fn(callable): init function which will be called with
worker id on each subproces starting if not set as None. Default
None.
Returns:
DataLoader: an iterable object for data iterating, each elemnet of the generated data is a Tensor.
Examples:
.. code-block:: python
import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.io import Dataset, BatchSampler, DataLoader
BATCH_NUM = 20
BATCH_SIZE = 16
EPOCH_NUM = 4
IMAGE_SIZE = 784
CLASS_NUM = 10
# define a random dataset
class RandomDataset(Dataset):
def __init__(self, num_samples):
self.num_samples = num_samples
def __getitem__(self, idx):
image = np.random.random([IMAGE_SIZE]).astype('float32')
label = np.random.randint(0, CLASS_NUM - 1, (1, )).astype('int64')
return image, label
def __len__(self):
return self.num_samples
dataset = RandomDataset(BATCH_NUM * BATCH_SIZE)
class SimpleNet(nn.Layer):
def __init__(self):
super(SimpleNet, self).__init__()
self.fc = nn.Linear(IMAGE_SIZE, CLASS_NUM)
def forward(self, image, label=None):
return self.fc(image)
simple_net = SimpleNet()
opt = paddle.optimizer.SGD(learning_rate=1e-3,
parameters=simple_net.parameters())
loader = DataLoader(dataset,
batch_size=BATCH_SIZE,
shuffle=True,
drop_last=True,
num_workers=2)
for e in range(EPOCH_NUM):
for i, (image, label) in enumerate(loader()):
out = simple_net(image)
loss = F.cross_entropy(out, label)
avg_loss = paddle.mean(loss)
avg_loss.backward()
opt.minimize(avg_loss)
simple_net.clear_gradients()
print("Epoch {} batch {}: loss = {}".format(e, i, np.mean(loss.numpy())))
.. note::
For reading iterable dataset with multiprocess Dataloader,
please see :code:`paddle.io.IterableDataset`
"""
def __init__(self,
dataset,
feed_list=None,
places=None,
return_list=True,
batch_sampler=None,
batch_size=1,
shuffle=False,
drop_last=False,
collate_fn=None,
num_workers=0,
use_buffer_reader=True,
use_shared_memory=True,
timeout=0,
worker_init_fn=None,
persistent_workers=False):
self.return_list = return_list
self.collate_fn = collate_fn
self.use_buffer_reader = use_buffer_reader
self.worker_init_fn = worker_init_fn
self.dataset = dataset
if not return_list and not in_dygraph_mode():
assert feed_list is not None, \
"feed_list should be set when return_list=False"
self.feed_list = feed_list
if places is None:
places = _current_expected_place()
if isinstance(places, (list, tuple)):
places = _get_paddle_place_list(places)
else:
places = _get_paddle_place(places)
self.places = _convert_places(places)
assert num_workers >= 0, "num_workers should be a non-negative value"
if num_workers > 0 and (sys.platform == 'darwin' or
sys.platform == 'win32'):
warnings.warn(
"DataLoader with multi-process mode is not supported on MacOs and Windows currently." \
" Please use signle-process mode with num_workers = 0 instead")
num_workers = 0
self.num_workers = num_workers
self.use_shared_memory = use_shared_memory
if use_shared_memory and num_workers == 0:
self.use_shared_memory = False
assert timeout >= 0, "timeout should be a non-negative value"
self.timeout = timeout
if isinstance(dataset, IterableDataset):
self.dataset_kind = _DatasetKind.ITER
if shuffle:
raise ValueError(
"IterableDataset not support shuffle, but got shuffle={}".
format(shuffle))
if batch_sampler is not None:
raise ValueError(
"IterableDataset expect unspecified batch_sampler")
else:
self.dataset_kind = _DatasetKind.MAP
if batch_sampler is not None:
assert batch_size == 1 and not shuffle and not drop_last, \
"batch_size/shuffle/drop_last should not be set when " \
"batch_sampler is given"
self.batch_sampler = batch_sampler
self.batch_size = None
elif batch_size is None:
self.batch_sampler = None
self.batch_size = None
else:
assert batch_size > 0, \
"batch_size should be None or a positive value when " \
"batch_sampler is not given"
self.batch_size = batch_size
if isinstance(dataset, IterableDataset):
self.batch_sampler = _InfiniteIterableSampler(dataset,
batch_size)
else:
self.batch_sampler = BatchSampler(
dataset=dataset,
batch_size=batch_size,
shuffle=shuffle,
drop_last=drop_last)
self.drop_last = drop_last
self.auto_collate_batch = self.batch_sampler is not None
self.pin_memory = False
if in_dygraph_mode():
self.pin_memory = use_pinned_memory() or True
self._persistent_workers = persistent_workers
self._iterator = None
def __len__(self):
if self.dataset_kind == _DatasetKind.ITER:
raise ValueError("length of IterableDataset not supported")
else:
if self.auto_collate_batch:
return len(self.batch_sampler)
else:
return len(self.dataset)
def __iter__(self):
if self.num_workers == 0:
return _DataLoaderIterSingleProcess(self)
elif self._persistent_workers:
if self._iterator is None:
self._iterator = _DataLoaderIterMultiProcess(self)
else:
self._iterator._reset()
return self._iterator
else:
return _DataLoaderIterMultiProcess(self)
def __call__(self):
return self.__iter__()
@staticmethod
def from_generator(feed_list=None,
capacity=None,
use_double_buffer=True,
iterable=True,
return_list=False,
use_multiprocess=False,
drop_last=True):
"""
.. warning::
This API will be deprecated in the future, it is recommended to use
:code:`paddle.io.DataLoader` which supports multi-processes acceleration.
.. note::
**The framework ensures that the data loading order of DataLoader is exactly the same as the user-defined data source.**
Create a DataLoader object for loading data from Python generator.
Data would be prefetched using Python thread and be pushed
into a queue asynchronously.
The created DataLoader object provides 3 methods to set the data source
:code:`set_sample_generator` , :code:`set_sample_list_generator` and
:code:`set_batch_generator` . Please see the following example codes
to know their usages.
If iterable = True, the created DataLoader object is a Python generator
object, which is iterable using for-range loop.
If iterable = False, the created DataLoader object provides
:code:`start()` and :code:`reset()` method to control the data reading
process.
Args:
feed_list (list(Tensor)|tuple(Tensor)): feed Tensor list.
The Tensors should be created by :code:`fluid.data()`.
capacity (int): capacity of the queue maintained in DataLoader.
The unit is batch number. Set larger capacity if your reader
is fast.
use_double_buffer (bool): whether to use double_buffer_reader.
If use_double_buffer=True, the DataLoader would prefetch next
batch data asynchronously, so it would speed up data feeding
and occupies a little more CPU or GPU memory, i.e., the memory
of one batch input data.
iterable (bool): whether the created DataLoader is iterable.
return_list (bool): whether the return value on each device is
presented as a list. It is only valid when iterable=True.
If return_list=False, the return value on each device would
be a dict of str -> LoDTensor, where the key of the dict is
the name of each fed Tensors. If return_list=True, the
return value on each device would be a list(LoDTensor). It is
recommended to use return_list=False in static graph mode and
use return_list=True in dygraph mode.
use_multiprocess (bool): whether to use multi-process to speed up
the data loading process in dygraph. Note: this parameter only
can be used in the dygraph mode. In the static graph mode,
whether this parameter is set or not has no effect.
The Default value is False.
drop_last (bool): whether to drop the last batches whose number is
less than the CPU core/GPU card number. The default value is
True. In training phase, users should not set drop_last=False,
because all CPU cores/GPU cards must read data from DataLoader.
In inference phase, users can set drop_last=False, so that the
last batches whose number is less than the CPU core/GPU card
number can be tested.
Returns:
loader (DataLoader): the created DataLoader object.
Examples 1:
.. code-block:: python
'''
Example in static graph mode
'''
import numpy as np
import paddle
import paddle.static as static
import paddle.nn.functional as F
BATCH_NUM = 10
BATCH_SIZE = 16
EPOCH_NUM = 4
CLASS_NUM = 10
ITERABLE = True # whether the created DataLoader object is iterable
USE_GPU = False # whether to use GPU
DATA_FORMAT = 'batch_generator' # data format of data source user provides
paddle.enable_static()
def simple_net(image, label):
fc_tmp = static.nn.fc(image, size=CLASS_NUM)
cross_entropy = F.softmax_with_cross_entropy(image, label)
loss = paddle.mean(cross_entropy)
sgd = paddle.optimizer.SGD(learning_rate=1e-3)
sgd.minimize(loss)
return loss
def get_random_images_and_labels(image_shape, label_shape):
image = np.random.random(size=image_shape).astype('float32')
label = np.random.random(size=label_shape).astype('int64')
return image, label
# If the data generator yields one sample each time,
# use DataLoader.set_sample_generator to set the data source.
def sample_generator_creator():
def __reader__():
for _ in range(BATCH_NUM * BATCH_SIZE):
image, label = get_random_images_and_labels([784], [1])
yield image, label
return __reader__
# If the data generator yield list of samples each time,
# use DataLoader.set_sample_list_generator to set the data source.
def sample_list_generator_creator():
def __reader__():
for _ in range(BATCH_NUM):
sample_list = []
for _ in range(BATCH_SIZE):
image, label = get_random_images_and_labels([784], [1])
sample_list.append([image, label])
yield sample_list
return __reader__
# If the data generator yields a batch each time,
# use DataLoader.set_batch_generator to set the data source.
def batch_generator_creator():
def __reader__():
for _ in range(BATCH_NUM):
batch_image, batch_label = get_random_images_and_labels([BATCH_SIZE, 784], [BATCH_SIZE, 1])
yield batch_image, batch_label
return __reader__
# If DataLoader is iterable, use for loop to train the network
def train_iterable(exe, prog, loss, loader):
for _ in range(EPOCH_NUM):
for data in loader():
exe.run(prog, feed=data, fetch_list=[loss])
# If DataLoader is not iterable, use start() and reset() method to control the process
def train_non_iterable(exe, prog, loss, loader):
for _ in range(EPOCH_NUM):
loader.start() # call DataLoader.start() before each epoch starts
try:
while True:
exe.run(prog, fetch_list=[loss])
except paddle.core.EOFException:
loader.reset() # call DataLoader.reset() after catching EOFException
def set_data_source(loader, places):
if DATA_FORMAT == 'sample_generator':
loader.set_sample_generator(sample_generator_creator(), batch_size=BATCH_SIZE, drop_last=True, places=places)
elif DATA_FORMAT == 'sample_list_generator':
loader.set_sample_list_generator(sample_list_generator_creator(), places=places)
elif DATA_FORMAT == 'batch_generator':
loader.set_batch_generator(batch_generator_creator(), places=places)
else:
raise ValueError('Unsupported data format')
image = static.data(name='image', shape=[None, 784], dtype='float32')
label = static.data(name='label', shape=[None, 1], dtype='int64')
# Define DataLoader
loader = paddle.io.DataLoader.from_generator(feed_list=[image, label], capacity=16, iterable=ITERABLE)
# Define network
loss = simple_net(image, label)
# Set data source of DataLoader
#
# If DataLoader is iterable, places must be given and the number of places must be the same with device number.
# - If you are using GPU, call `paddle.static.cuda_places()` to get all GPU places.
# - If you are using CPU, call `paddle.static.cpu_places()` to get all CPU places.
#
# If DataLoader is not iterable, places can be None.
places = static.cuda_places() if USE_GPU else static.cpu_places()
set_data_source(loader, places)
exe = static.Executor(places[0])
exe.run(static.default_startup_program())
prog = static.CompiledProgram(static.default_main_program()).with_data_parallel(loss_name=loss.name)
if loader.iterable:
train_iterable(exe, prog, loss, loader)
else:
train_non_iterable(exe, prog, loss, loader)
Examples 2:
.. code-block:: python
'''
Example in dynamic graph mode.
'''
import numpy as np
import paddle
import paddle.nn as nn
import paddle.optimizer as opt
import paddle.distributed as dist
BATCH_SIZE = 16
BATCH_NUM = 4
EPOCH_NUM = 4
IMAGE_SIZE = 784
CLASS_NUM = 10
USE_GPU = False # whether to use GPU
def _get_random_images_and_labels(image_shape, label_shape):
image = np.random.random(size=image_shape).astype('float32')
label = np.random.random(size=label_shape).astype('int64')
return image, label
def __reader__():
for _ in range(BATCH_NUM):
batch_image, batch_label = _get_random_images_and_labels(
[BATCH_SIZE, IMAGE_SIZE], [BATCH_SIZE, CLASS_NUM])
yield batch_image, batch_label
def random_batch_reader():
return __reader__
class LinearNet(nn.Layer):
def __init__(self):
super(LinearNet, self).__init__()
self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM)
@paddle.jit.to_static
def forward(self, x):
return self._linear(x)
# set device
paddle.set_device('gpu' if USE_GPU else 'cpu')
# create network
layer = LinearNet()
dp_layer = paddle.DataParallel(layer)
loss_fn = nn.CrossEntropyLoss()
adam = opt.Adam(learning_rate=0.001, parameters=dp_layer.parameters())
# create data loader
loader = paddle.io.DataLoader.from_generator(capacity=5)
loader.set_batch_generator(random_batch_reader())
for epoch_id in range(EPOCH_NUM):
for batch_id, (image, label) in enumerate(loader()):
out = layer(image)
loss = loss_fn(out, label)
loss.backward()
adam.step()
adam.clear_grad()
print("Epoch {} batch {}: loss = {}".format(
epoch_id, batch_id, np.mean(loss.numpy())))
Examples 3:
.. code-block:: python
'''
Example of `drop_last` using in static graph multi-cards mode
'''
import paddle
import paddle.static as static
import numpy as np
import os
# We use 2 CPU cores to run inference network
os.environ['CPU_NUM'] = '2'
paddle.enable_static()
# The data source has only 3 batches, which can not be
# divided evenly to each CPU core
def batch_generator():
for i in range(3):
yield np.array([i+1]).astype('float32'),
x = static.data(name='x', shape=[None], dtype='float32')
y = x * x
def run_inference(drop_last):
loader = paddle.io.DataLoader.from_generator(feed_list=[x],
capacity=8, drop_last=drop_last)
loader.set_batch_generator(batch_generator, static.cpu_places())
exe = static.Executor(paddle.CPUPlace())
prog = static.CompiledProgram(static.default_main_program())
prog = prog.with_data_parallel()
result = []
for data in loader():
each_ret, = exe.run(prog, feed=data, fetch_list=[y])
result.extend(each_ret)
return result
# Set drop_last to True, so that the last batch whose
# number is less than CPU core number would be discarded.
print(run_inference(drop_last=True)) # [1.0, 4.0]
# Set drop_last to False, so that the last batch whose
# number is less than CPU core number can be tested.
print(run_inference(drop_last=False)) # [1.0, 4.0, 9.0]
"""
if in_dygraph_mode():
return DygraphGeneratorLoader(feed_list, capacity,
use_double_buffer, iterable,
return_list, use_multiprocess)
else:
return GeneratorLoader(feed_list, capacity, use_double_buffer,
iterable, return_list, drop_last)
@staticmethod
def from_dataset(dataset, places, drop_last=True):
"""
.. warning::
This API will be deprecated in the future, it is recommended to use
:code:`paddle.io.DataLoader` which supports multi-processes acceleration.
Create an iterable DataLoader object for loading data from Dataset.
Dataset is only supported in Linux system currently.
Args:
dataset (InMemoryDataset|QueueDataset): the dataset object.
places (list(CUDAPlace)|list(CPUPlace)|list(str)): places where the result
data should be converted. If places is list of string, the string in the list
can be ``cpu``, ``gpu:x`` and ``gpu_pinned``, where x is the index of the GPUs.
drop_last (bool): whether to drop the last batch whose sample
number is less than batch size. If drop_last = True, they
would be dropped. If drop_last = False, they would be kept.
Returns:
loader (DataLoader): the created DataLoader object, which can be
treated as a Python generator.
Examples:
.. code-block:: python
import paddle
import paddle.static as static
paddle.enable_static()
image = static.data(name='image', shape=[None, 784], dtype='float32')
label = static.data(name='label', shape=[None, 1], dtype='int64')
dataset = paddle.distributed.QueueDataset()
dataset.init(
batch_size=32,
pipe_command='cat',
use_var=[image, label])
dataset.set_filelist(['a.txt', 'b.txt', 'c.txt'])
loader = paddle.io.DataLoader.from_dataset(dataset, static.cpu_places())
"""
return DatasetLoader(dataset, places, drop_last)
class DygraphGeneratorLoader(DataLoaderBase):
"""
The GeneratorLoader of dygraph
The multiprocess dygraph GeneratorLoader's most functions are different from
static graph GeneratorLoader, Separate implementation to keep code readable.
"""
def __init__(self,
feed_list=None,
capacity=None,
use_double_buffer=True,
iterable=True,
return_list=True,
use_multiprocess=False):
self._batch_reader = None
self._places = None
self._feed_list = feed_list
if not capacity:
raise ValueError("Please give value to capacity.")
self._capacity = capacity
self._use_double_buffer = use_double_buffer
if not iterable:
warnings.warn(
"Please NOTE: DygraphGeneratorLoader supports iterable mode only. Change to iterable mode."
)
self._iterable = True
if not return_list:
warnings.warn(
"Please NOTE: DygraphGeneratorLoader supports returning as list only. Change to return as list."
)
self._return_list = True
# NOTE: the multiprocessing in different platform is incompatible, we will solve it later
self._use_multiprocess = use_multiprocess
if self._use_multiprocess and (sys.platform == 'darwin' or
sys.platform == 'win32'):
warnings.warn(
"NOTE: DygraphGeneratorLoader with multiprocess mode is not currently supported on MacOs and Windows."
)
self._use_multiprocess = False
if self._use_multiprocess:
# NOTE: the multiprocessing.Queue used to save loading data in self._process
self._data_queue = None
# NOTE: this process is used to load data asynchronously from self._batch_reader
self._process = None
# NOTE: the C++ LoDTensorBlockingQueue instance
self._blocking_queue = None
# NOTE: 1. In multiprocess mode, this thread is used to get next batch data from
# self._data_queue, then push it into self._blocking_queue; 2. In singleprocess
# mode, this thread is used to get next batch data from self._batch_reader, then
# push it into self._blocking_queue
self._thread = None
self._pin_memory = True if use_pinned_memory(
) is None else use_pinned_memory()
@property
def queue(self):
return self._blocking_queue
@property
def iterable(self):
return self._iterable
def _clear_and_remove_data_queue(self):
if self._data_queue is not None:
while True:
try:
self._data_queue.get_nowait()
except queue.Empty:
break
global multiprocess_queue_set
multiprocess_queue_set.remove(self._data_queue)
def _wait_thread_ends(self):
thread = self._thread
if thread is not None:
self._blocking_queue.close()
thread.join()
def _wait_process_ends(self):
process = self._process
if process is not None:
process.join()
# erase process id
core._erase_process_pids(id(self))
def _init_iterable(self):
self._wait_thread_ends()
if self._use_multiprocess:
self._wait_process_ends()
self._var_names = []
self._shapes = []
self._dtypes = []
self._need_check_feed = []
self._blocking_queue = core.init_lod_tensor_blocking_queue(
core.Variable(), self._capacity, False)
self._reader = None
self._reader = core.create_py_reader(
self.queue, self._var_names, self._shapes, self._dtypes,
self._need_check_feed, self._places, self._use_double_buffer, True,
self._pin_memory)
def _start(self):
if self._use_multiprocess:
# clear old _data_queue and remove it from multiprocess_queue_set
self._clear_and_remove_data_queue()
# set data_queue and process
self._data_queue = multiprocessing.Queue(self._capacity)
# add _data_queue into global queue set
global multiprocess_queue_set
multiprocess_queue_set.add(self._data_queue)
self._process = multiprocessing.Process(
target=_reader_process_loop,
args=(self._batch_reader, self._data_queue))
self._process.daemon = True
self._process.start()
# Set child process signal handler
# NOTE: [ avoiding hang ] 1. if the child process dies due to bus error/segfault
# or just hang, the main process will hang waiting for data, so here need to deal
# with SIGSEGV and SIGBUS of child process; 2. if the main process end before child
# process, it shuts the all its daemonic children down with a SIGTERM (instead of
# joining them without a timeout), so here nedd to deal with SIGTERM.
core._set_process_pids(id(self), [self._process.pid])
_set_SIGCHLD_handler()
# Set reader_thread
self._thread_done_event = threading.Event()
self._thread = threading.Thread(
target=self._reader_thread_loop_for_multiprocess,
args=(_current_expected_place(), ))
self._thread.daemon = True
self._thread.start()
else:
self._thread = threading.Thread(
target=self._reader_thread_loop_for_singleprocess,
args=(_current_expected_place(), ))
self._thread.daemon = True
self._thread.start()
def _reset(self):
self._reader.reset()
self._wait_thread_ends()
if self._use_multiprocess:
self._wait_process_ends()
def __iter__(self):
assert self.iterable, "DataLoader is not iterable"
assert self._batch_reader is not None, \
"Data source of DataLoader has not set yet"
self._init_iterable()
self._start()
return self
def __next__(self):
try:
if _in_eager_mode():
return core.eager.read_next_tensor_list(
self._reader.read_next_list()[0])
else:
return self._reader.read_next_var_list()
except StopIteration:
self._reset()
six.reraise(*sys.exc_info())
def _exit_thread_expectedly(self):
self._thread_done_event.set()
self._blocking_queue.close()
def _exit_thread_unexpectedly(self):
self._thread_done_event.set()
self._blocking_queue.kill()
logging.error("DataLoader reader thread raised an exception!")
def _reader_thread_loop_for_multiprocess(self, legacy_expected_place):
# See _DataLoaderIterSingleProcess._thread_loop() for why set expected place here.
_set_expected_place(legacy_expected_place)
while not self._thread_done_event.is_set():
try:
# NOTE: [ avoid hanging ] Even with carefully designed data dependencies
# (i.e., a put() always corresponding to a get()), hanging on get() can
# still happen when data in queue is corrupted (e.g., due to
# Queue.cancel_join_thread or unexpected exit). So we set a timeout whenever
# we try to get data from `data_queue`
# NOTE: [ avoid failed quickly ] Here, the time setting of QUEUE_GET_TIMEOUT
# is relatively long, currently it is 60 seconds, because in some models,
# if the reader child process starts with a heavy burden, the child process
# has no enough time to put the data in the queue when the main process
# start trying to get data from queue. At this time, the child thread needs
# to wait slightly longer
tensor_list = self._data_queue.get(timeout=QUEUE_GET_TIMEOUT)
except:
# NOTE [ avoid handing ] After adding the shared memory mechanism, not only
# the queue.Empty exception will occur here, but other exceptions will also
# occur, such as mmap failure. If it is not handled here, it will hang.
self._exit_thread_unexpectedly()
logging.error(
"DataLoader reader thread failed to read data from the multiprocessing.Queue."
)
six.reraise(*sys.exc_info())
if not self._thread_done_event.is_set():
if tensor_list is not None:
try:
array = core.LoDTensorArray()
for tensor in tensor_list:
array.append(tensor)
if not self._blocking_queue.push(array):
self._blocking_queue.close()
except:
self._exit_thread_unexpectedly()
six.reraise(*sys.exc_info())
else:
self._exit_thread_expectedly()
def _reader_thread_loop_for_singleprocess(self, legacy_expected_place):
try:
# See _DataLoaderIterSingleProcess._thread_loop() for why set expected place here.
_set_expected_place(legacy_expected_place)
for sample in self._batch_reader():
array = core.LoDTensorArray()
for item in sample:
if not isinstance(item, core.LoDTensor):
item = self._check_input_array(item)
tmp = core.LoDTensor()
tmp.set(item, core.CPUPlace())
item = tmp
array.append(item)
if not self._blocking_queue.push(array):
break
self._blocking_queue.close()
self._thread = None
except Exception:
self._blocking_queue.kill()
self._thread = None
logging.warning(
"DygraphDataLoader reader thread raised an exception.")
six.reraise(*sys.exc_info())
def set_sample_generator(self,
reader,
batch_size,
drop_last=True,
places=None):
assert batch_size > 0, "batch_size must be larger than 0"
if isinstance(places, (list, tuple)):
places = _get_paddle_place_list(places)
else:
places = _get_paddle_place(places)
self.set_sample_list_generator(
paddle.batch(
reader, batch_size=batch_size, drop_last=drop_last),
places=places)
return self
def set_sample_list_generator(self, reader, places=None):
if isinstance(places, (list, tuple)):
places = _get_paddle_place_list(places)
else:
places = _get_paddle_place(places)
def __batch_reader_impl__():
for batch in reader():
slots = []
for items in batch:
for i, item in enumerate(items):
if len(slots) < len(items):
slots.append([item])
else:
slots[i].append(item)
yield slots
self.set_batch_generator(__batch_reader_impl__, places)
return self
def set_batch_generator(self, reader, places=None):
if isinstance(places, (list, tuple)):
places = _get_paddle_place_list(places)
else:
places = _get_paddle_place(places)
self._batch_reader = reader
if places is None:
places = _current_expected_place()
self._places = _convert_places(places)
assert len(self._places) == 1, \
"Number of places must be 1 in imperative mode"
return self
class GeneratorLoader(DataLoaderBase):
def __init__(self,
feed_list=None,
capacity=None,
use_double_buffer=True,
iterable=True,
return_list=False,
drop_last=True):
self._tensor_reader = None
self._places = None
self._thread = None
self._queue = None
self._feed_list = feed_list
self._exited = False
self._drop_last = drop_last
self._keep_order = keep_data_loader_order()
if not capacity:
raise ValueError("Please give value to capacity.")
self._iterable = iterable
self._return_list = return_list
if not self._feed_list:
raise Exception("Feed list must be given under static mode.")
self._use_double_buffer = use_double_buffer
self._capacity = capacity
if not self._iterable:
self._init_non_iterable()
def _wait_thread_ends(self):
# Get self._thread first to prevent data race, because __thread_main__
# would set self._thread be None at the end
thread = self._thread
if thread is not None and self._iterable:
self._queue.close()
thread.join()
def _init_iterable(self):
self._wait_thread_ends()
self._var_names = [v.name for v in self._feed_list]
self._shapes = [v.shape for v in self._feed_list]
self._dtypes = [v.dtype for v in self._feed_list]
self._need_check_feed = [
v.desc.need_check_feed() for v in self._feed_list
]
self._queue = core.init_lod_tensor_blocking_queue(
core.Variable(), self._capacity, self._keep_order)
self._reader = None
self._reader = core.create_py_reader(
self.queue, self._var_names, self._shapes, self._dtypes,
self._need_check_feed, self._places, self._use_double_buffer,
self._drop_last, False)
def _init_non_iterable(self):
lod_levels = []
dtypes = []
shape_concat = []
ranks = []
shapes = []
need_check_feed = []
for feed_data in self._feed_list:
dtypes.append(feed_data.dtype)
shape_concat.extend(feed_data.shape)
ranks.append(len(feed_data.shape))
shapes.append(feed_data.shape)
lod_levels.append(feed_data.lod_level)
need_check_feed.append(int(feed_data.desc.need_check_feed()))
queue_name = data_loader_unique_name_generator(
'lod_tensor_blocking_queue')
reader_name = data_loader_unique_name_generator('create_py_reader')
double_buffer_name = data_loader_unique_name_generator('double_buffer')
var = global_scope().var(queue_name)
self._queue = core.init_lod_tensor_blocking_queue(var, self._capacity,
self._keep_order)
if self._keep_order:
block = default_main_program().current_block()
else:
block = default_startup_program().current_block()
reader_var = block.create_var(name=reader_name)
dtype_int = [int(t) for t in dtypes]
block.append_op(
type='create_py_reader',
inputs={'blocking_queue': [queue_name]},
outputs={'Out': [reader_var]},
attrs={
'shape_concat': shape_concat,
'lod_levels': lod_levels,
'dtypes': dtype_int,
'need_check_feed': need_check_feed,
'ranks': ranks
})
reader_var.desc.set_dtypes(dtypes)
reader_var.persistable = True
reader_var.stop_gradient = True
if self._keep_order:
main_prog_var = reader_var
reader = main_prog_var
reader.reset = self._queue.reset
else:
main_prog_var = _copy_reader_var_(
default_main_program().current_block(), reader_var)
main_prog_var.stop_gradient = True
main_prog_var.persistable = True
reader = monkey_patch_reader_methods(main_prog_var)
if self._use_double_buffer:
double_buffer_reader = double_buffer(
reader, name=double_buffer_name)
# we return a double buffer reader. However, the reset method comes from
# py_reader.
double_buffer_reader.reset = reader.reset
reader = double_buffer_reader
self._reader = reader
default_main_program().current_block().append_op(
type='read',
inputs={'Reader': [self._reader]},
outputs={'Out': self._feed_list},
attrs={'drop_last': self._drop_last})
@property
def queue(self):
return self._queue
@property
def iterable(self):
return self._iterable
def __iter__(self):
assert self.iterable, "DataLoader is not iterable"
assert self._tensor_reader is not None, \
"Data source of DataLoader has not set yet"
self._init_iterable()
self._start()
return self
def __next__(self):
try:
if self._return_list:
data = self._reader.read_next_list()
for i in range(len(data)):
data[i] = data[i]._move_to_list()
return data
else:
return self._reader.read_next()
except StopIteration:
self._queue.close()
self._reset()
six.reraise(*sys.exc_info())
def start(self):
assert not self._iterable, "start() cannot be called when DataLoader is iterable"
self._start()
def reset(self):
assert not self._iterable, "reset() cannot be called when DataLoader is iterable"
self._reset()
def _start(self):
def __thread_main__(legacy_expected_place):
try:
# See _DataLoaderIterSingleProcess._thread_loop() for why set expected place here.
_set_expected_place(legacy_expected_place)
while not self._queue.wait_for_inited(1):
if self._exited:
return
for tensors in self._tensor_reader():
array = core.LoDTensorArray()
for item in tensors:
if not isinstance(item, core.LoDTensor):
item = self._check_input_array(item)
tmp = core.LoDTensor()
tmp.set(item, core.CPUPlace())
item = tmp
array.append(item)
if not self._queue.push(array):
break
self._queue.close()
self._thread = None
except Exception as ex:
self._queue.kill()
self._thread = None
logging.warning('Your reader has raised an exception!')
six.reraise(*sys.exc_info())
self._thread = threading.Thread(
target=__thread_main__, args=(_current_expected_place(), ))
self._thread.daemon = True
self._thread.start()
def _reset(self):
self._queue.close()
self._exited = True
thread = self._thread
if thread is not None:
thread.join()
self._exited = False
self._reader.reset()
def set_sample_generator(self,
reader,
batch_size,
drop_last=True,
places=None):
assert batch_size > 0, "batch_size must be larger than 0"
if isinstance(places, (list, tuple)):
places = _get_paddle_place_list(places)
else:
places = _get_paddle_place(places)
has_lod = False
for f in self._feed_list:
if f.lod_level != 0:
has_lod = True
break
if has_lod:
self.set_sample_list_generator(
paddle.batch(
reader, batch_size=batch_size, drop_last=drop_last),
places=places)
else:
reader = BatchedTensorProvider(
feed_list=self._feed_list,
place=core.CPUPlace(),
batch_size=batch_size,
generator=reader,
drop_last=drop_last)
self.set_batch_generator(reader, places=places)
return self
def
|
(self, reader, places=None):
if isinstance(places, (list, tuple)):
places = _get_paddle_place_list(places)
else:
places = _get_paddle_place(places)
with program_guard(Program(), Program()):
feeder = DataFeeder(
feed_list=self._feed_list, place=core.CPUPlace())
paddle_reader = feeder.decorate_reader(reader, multi_devices=False)
def __tensor_reader_impl__():
for slots in paddle_reader():
yield [slots[var.name] for var in self._feed_list]
self.set_batch_generator(__tensor_reader_impl__, places)
return self
def set_batch_generator(self, reader, places=None):
if isinstance(places, (list, tuple)):
places = _get_paddle_place_list(places)
else:
places = _get_paddle_place(places)
self._tensor_reader = reader
if self._iterable:
assert places is not None, "Places cannot be None when DataLoader is iterable"
self._places = _convert_places(places)
else:
if places is not None:
logging.info(
'places would be ommited when DataLoader is not iterable')
return self
class PyReader(DataLoaderBase):
r"""
Create a reader object for data feeding in Python.
Data would be prefetched using Python thread and be pushed
into a queue asynchronously. Data in the queue would be extracted
automatically when `Executor.run(...)` is called.
Args:
feed_list (list(Variable)|tuple(Variable)): feed variable list.
The variables should be created by :code:`fluid.layers.data()`.
capacity (int): capacity of the queue maintained in PyReader.
The unit is batch number. Set larger capacity if your reader
is fast.
use_double_buffer (bool): whether to use double_buffer_reader.
If use_double_buffer=True, PyReader would prefetch next
batch data asynchronously, so it would speed up data feeding
and occupies a little more CPU or GPU memory, i.e., the memory
of one batch input data.
iterable (bool): whether the created PyReader is iterable.
return_list (bool): whether the return value on each device is
presented as a list. It is only valid when iterable=True.
If return_list=False, the return value on each device would
be a dict of str -> LoDTensor, where the key of the dict is
the name of each fed variables. If return_list=True, the
return value on each device would be a list(LoDTensor). It is
recommended to use return_list=False in static graph mode and
use return_list=True in dygraph mode.
Returns:
the created reader object.
Return type:
reader(Reader)
Examples:
1. If iterable = False, the created PyReader object is almost the
same as :code:`fluid.layers.py_reader()`. Operators would be
inserted into the program. User should call :code:`start()`
before each epoch and catch :code:`fluid.core.EOFException`
thrown by :code:`Executor.run()` when epoch ends. Once the
exception is caught, user should call :code:`reset()` to reset
the reader manually.
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
EPOCH_NUM = 3
ITER_NUM = 5
BATCH_SIZE = 3
def network(image, label):
# User-defined network, here is an example of softmax regression.
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
def reader_creator_random_image_and_label(height, width):
def reader():
for i in range(ITER_NUM):
fake_image = np.random.uniform(low=0,
high=255,
size=[height, width])
fake_label = np.ones([1])
yield fake_image, fake_label
return reader
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label],
capacity=4,
iterable=False)
user_defined_reader = reader_creator_random_image_and_label(784, 784)
reader.decorate_sample_list_generator(
paddle.batch(user_defined_reader, batch_size=BATCH_SIZE))
loss = network(image, label)
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for i in range(EPOCH_NUM):
reader.start()
while True:
try:
executor.run(feed=None)
except fluid.core.EOFException:
reader.reset()
break
2. If iterable=True, the created PyReader object is decoupled with
the program. No operator would be inserted into the program.
In this case, the created reader is a Python generator, which
is iterable. User should feed the data yielded from PyReader
object into :code:`Executor.run(feed=...)`.
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
EPOCH_NUM = 3
ITER_NUM = 5
BATCH_SIZE = 10
def network(image, label):
# User-defined network, here is an example of softmax regression.
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
def reader_creator_random_image(height, width):
def reader():
for i in range(ITER_NUM):
fake_image = np.random.uniform(low=0, high=255, size=[height, width])
fake_label = np.ones([1])
yield fake_image, fake_label
return reader
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True, return_list=False)
user_defined_reader = reader_creator_random_image(784, 784)
reader.decorate_sample_list_generator(
paddle.batch(user_defined_reader, batch_size=BATCH_SIZE),
fluid.core.CPUPlace())
loss = network(image, label)
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for _ in range(EPOCH_NUM):
for data in reader():
executor.run(feed=data, fetch_list=[loss])
3. If return_list=True, the return values would be presented as list instead of dict.
This is usually used in dygraph mode.
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
ITER_NUM = 5
BATCH_SIZE = 10
def reader_creator_random_image(height, width):
def reader():
for i in range(ITER_NUM):
yield np.random.uniform(low=0, high=255, size=[height, width]), \
np.random.random_integers(low=0, high=9, size=[1])
return reader
place = fluid.CPUPlace()
with fluid.dygraph.guard(place):
py_reader = fluid.io.PyReader(capacity=2, return_list=True)
user_defined_reader = reader_creator_random_image(784, 784)
py_reader.decorate_sample_list_generator(
paddle.batch(user_defined_reader, batch_size=BATCH_SIZE),
place)
for image, label in py_reader():
relu = fluid.layers.relu(image)
"""
def __init__(self,
feed_list=None,
capacity=None,
use_double_buffer=True,
iterable=True,
return_list=False):
self._loader = DataLoader.from_generator(
feed_list, capacity, use_double_buffer, iterable, return_list)
@property
def queue(self):
return self._loader.queue
@property
def iterable(self):
return self._loader.iterable
def __iter__(self):
return self._loader.__iter__()
def __next__(self):
return self._loader.__next__()
def start(self):
'''
Start the data feeding thread.
Can only call when the reader object is not iterable.
Example:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
BATCH_SIZE = 10
def generator():
for i in range(5):
yield np.random.uniform(low=0, high=255, size=[784, 784]),
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False)
reader.decorate_sample_list_generator(
paddle.batch(generator, batch_size=BATCH_SIZE))
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for i in range(3):
reader.start()
while True:
try:
executor.run(feed=None)
except fluid.core.EOFException:
reader.reset()
break
'''
self._loader.start()
def reset(self):
'''
Reset the reader object when :code:`fluid.core.EOFException` raises.
Can only call when the reader object is not iterable.
Example:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
BATCH_SIZE = 10
def generator():
for i in range(5):
yield np.random.uniform(low=0, high=255, size=[784, 784]),
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
reader = fluid.io.PyReader(feed_list=[image], capacity=4, iterable=False)
reader.decorate_sample_list_generator(
paddle.batch(generator, batch_size=BATCH_SIZE))
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for i in range(3):
reader.start()
while True:
try:
executor.run(feed=None)
except fluid.core.EOFException:
reader.reset()
break
'''
self._loader.reset()
def decorate_sample_generator(self,
sample_generator,
batch_size,
drop_last=True,
places=None):
'''
Set the data source of the PyReader object.
The provided :code:`sample_generator` should be a Python generator,
which yields list(numpy.ndarray)-typed data of each sample.
:code:`places` must be set when the PyReader object is iterable.
If all inputs have no lods, this method is faster than
:code:`decorate_sample_list_generator(paddle.batch(sample_generator, ...))` .
Args:
sample_generator (generator): Python generator that yields
list(numpy.ndarray)-typed sample data.
batch_size (int): batch size. Must be larger than 0.
drop_last (bool): Whether to drop the last batch when sample number
is less than batch_size.
places (None|list(CUDAPlace)|list(CPUPlace)): place list. Must
be provided when PyReader is iterable.
Example:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
EPOCH_NUM = 3
ITER_NUM = 15
BATCH_SIZE = 3
def network(image, label):
# User-defined network, here is an example of softmax regression.
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
def random_image_and_label_generator(height, width):
def generator():
for i in range(ITER_NUM):
fake_image = np.random.uniform(low=0,
high=255,
size=[height, width])
fake_label = np.array([1])
yield fake_image, fake_label
return generator
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True)
user_defined_generator = random_image_and_label_generator(784, 784)
reader.decorate_sample_generator(user_defined_generator,
batch_size=BATCH_SIZE,
places=[fluid.CPUPlace()])
loss = network(image, label)
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for _ in range(EPOCH_NUM):
for data in reader():
executor.run(feed=data, fetch_list=[loss])
'''
self._loader.set_sample_generator(sample_generator, batch_size,
drop_last, places)
def decorate_sample_list_generator(self, reader, places=None):
'''
Set the data source of the PyReader object.
The provided :code:`reader` should be a Python generator,
which yields list(numpy.ndarray) typed batched data.
:code:`places` must be set when the PyReader object is iterable.
Args:
reader (generator): Python generator that yields
list(numpy.ndarray)-typed batched data.
places (None|list(CUDAPlace)|list(CPUPlace)): place list. Must
be provided when PyReader is iterable.
Example:
.. code-block:: python
import paddle
import paddle.fluid as fluid
import numpy as np
EPOCH_NUM = 3
ITER_NUM = 15
BATCH_SIZE = 3
def network(image, label):
# User-defined network, here is an example of softmax regression.
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
def random_image_and_label_generator(height, width):
def generator():
for i in range(ITER_NUM):
fake_image = np.random.uniform(low=0,
high=255,
size=[height, width])
fake_label = np.ones([1])
yield fake_image, fake_label
return generator
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True)
user_defined_generator = random_image_and_label_generator(784, 784)
reader.decorate_sample_list_generator(
paddle.batch(user_defined_generator, batch_size=BATCH_SIZE),
fluid.core.CPUPlace())
loss = network(image, label)
executor = fluid.Executor(fluid.core.CPUPlace())
executor.run(fluid.default_startup_program())
for _ in range(EPOCH_NUM):
for data in reader():
executor.run(feed=data, fetch_list=[loss])
'''
self._loader.set_sample_list_generator(reader, places)
def decorate_batch_generator(self, reader, places=None):
'''
Set the data source of the PyReader object.
The provided :code:`reader` should be a Python generator,
which yields numpy.ndarray-typed or LoDTensor-typed batched data.
:code:`places` must be set when the PyReader object is iterable.
Args:
reader (generator): Python generator that yields LoDTensor-typed
batched data.
places (None|list(CUDAPlace)|list(CPUPlace)): place list. Must
be provided when PyReader is iterable.
Example:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
EPOCH_NUM = 3
ITER_NUM = 15
BATCH_SIZE = 3
def network(image, label):
# User-defined network, here is an example of softmax regression.
predict = fluid.layers.fc(input=image, size=10, act='softmax')
return fluid.layers.cross_entropy(input=predict, label=label)
def random_image_and_label_generator(height, width):
def generator():
for i in range(ITER_NUM):
batch_image = np.random.uniform(low=0,
high=255,
size=[BATCH_SIZE, height, width])
batch_label = np.ones([BATCH_SIZE, 1])
batch_image = batch_image.astype('float32')
batch_label = batch_label.astype('int64')
yield batch_image, batch_label
return generator
image = fluid.data(name='image', shape=[None, 784, 784], dtype='float32')
label = fluid.data(name='label', shape=[None, 1], dtype='int64')
reader = fluid.io.PyReader(feed_list=[image, label], capacity=4, iterable=True)
user_defined_generator = random_image_and_label_generator(784, 784)
reader.decorate_batch_generator(user_defined_generator, fluid.CPUPlace())
loss = network(image, label)
executor = fluid.Executor(fluid.CPUPlace())
executor.run(fluid.default_startup_program())
for _ in range(EPOCH_NUM):
for data in reader():
executor.run(feed=data, fetch_list=[loss])
'''
self._loader.set_batch_generator(reader, places)
class DatasetLoader(DataLoaderBase):
def __init__(self, dataset, places, drop_last):
assert isinstance(dataset, paddle.distributed.fleet.dataset.
DatasetBase), "dataset must be type of DatasetBase"
assert not in_dygraph_mode(
), "DatasetLoader is not supported in dygraph mode yet"
if isinstance(places, (list, tuple)):
places = _get_paddle_place_list(places)
else:
places = _get_paddle_place(places)
thread_num = len(places)
assert len(dataset.filelist) >= thread_num, \
"Filelist number of dataset {} must be not less than place number {}".format(len(dataset.filelist), thread_num)
if dataset.thread_num != 0 and dataset.thread_num != thread_num:
logging.warn('thread_num {} which is set in Dataset is ignored'.
format(dataset.thread_num))
dataset._set_thread(thread_num)
if isinstance(dataset, paddle.distributed.fleet.dataset.
InMemoryDataset) and dataset.queue_num > thread_num:
logging.warn("queue_num {} which is set in Dataset is ignored".
format(dataset.queue_num))
dataset._set_queue_num(thread_num)
self._dataset = dataset
use_slots = [
slot.name for slot in dataset.proto_desc.multi_slot_desc.slots
if slot.is_used
]
self._iterable_dataset = core.IterableDatasetWrapper(
dataset.dataset, use_slots,
_convert_places(places), dataset.proto_desc.batch_size, drop_last)
def __iter__(self):
self._dataset._finish_to_run()
self._dataset._prepare_to_run()
self._iterable_dataset._start()
return self
def __next__(self):
return self._iterable_dataset._next()
|
set_sample_list_generator
|
object.rs
|
use std::io::{Result, Write};
use case::CaseExt;
use analysis;
use library;
use env::Env;
use super::child_properties;
use super::function;
use super::general;
use super::properties;
use super::signal;
use super::trait_impls;
use super::trampoline;
pub fn generate(
w: &mut Write,
env: &Env,
analysis: &analysis::object::Info,
generate_display_trait: bool,
) -> Result<()> {
try!(general::start_comments(w, &env.config));
try!(general::uses(w, env, &analysis.imports));
try!(general::define_object_type(
w,
env,
&analysis.name,
&analysis.c_type,
&analysis.c_class_type.as_ref().map(|s| &s[..]),
&analysis.rust_class_type.as_ref().map(|s| &s[..]),
&analysis.get_type,
analysis.is_interface,
&analysis.supertypes,
));
if need_generate_inherent(analysis) {
try!(writeln!(w));
try!(write!(w, "impl {} {{", analysis.name));
for func_analysis in &analysis.constructors() {
try!(function::generate(w, env, func_analysis, false, false, 1));
}
if !need_generate_trait(analysis) {
for func_analysis in &analysis.methods() {
try!(function::generate(w, env, func_analysis, false, false, 1));
}
for property in &analysis.properties {
try!(properties::generate(w, env, property, false, false, 1));
}
for child_property in &analysis.child_properties {
try!(child_properties::generate(
w,
env,
child_property,
false,
false,
1,
));
}
}
for func_analysis in &analysis.functions() {
try!(function::generate(w, env, func_analysis, false, false, 1));
}
if !need_generate_trait(analysis) {
for signal_analysis in analysis
.signals
.iter()
.chain(analysis.notify_signals.iter())
{
try!(signal::generate(
w,
env,
signal_analysis,
&analysis.trampolines,
false,
false,
1,
));
}
}
try!(writeln!(w, "}}"));
try!(general::declare_default_from_new(
w,
env,
&analysis.name,
&analysis.functions
));
}
try!(trait_impls::generate(
w,
&analysis.name,
&analysis.functions,
&analysis.specials,
if need_generate_trait(analysis) {
Some(&analysis.trait_name)
} else {
None
},
));
if analysis.concurrency != library::Concurrency::None {
try!(writeln!(w));
}
match analysis.concurrency {
library::Concurrency::Send | library::Concurrency::SendSync => {
try!(writeln!(w, "unsafe impl Send for {} {{}}", analysis.name));
}
library::Concurrency::SendUnique => {
if env.namespaces.is_glib_crate {
try!(writeln!(w, "unsafe impl ::SendUnique for {} {{", analysis.name));
} else {
try!(writeln!(w, "unsafe impl glib::SendUnique for {} {{", analysis.name));
}
try!(writeln!(w, " fn is_unique(&self) -> bool {{"));
try!(writeln!(w, " self.ref_count() == 1"));
try!(writeln!(w, " }}"));
try!(writeln!(w, "}}"));
},
_ => (),
}
if let library::Concurrency::SendSync = analysis.concurrency {
try!(writeln!(w, "unsafe impl Sync for {} {{}}", analysis.name));
}
if !analysis.final_type {
try!(writeln!(w));
try!(writeln!(w, "pub const NONE_{}: Option<&{}> = None;", analysis.name.to_snake().to_uppercase(), analysis.name));
}
if need_generate_trait(analysis) {
try!(writeln!(w));
try!(generate_trait(w, env, analysis));
}
if !analysis.trampolines.is_empty() {
for trampoline in &analysis.trampolines {
try!(trampoline::generate(
w,
env,
trampoline,
need_generate_trait(analysis),
&analysis.name,
));
}
}
if generate_display_trait {
try!(writeln!(
w,
"\nimpl fmt::Display for {} {{",
analysis.name,
));
// Generate Display trait implementation.
try!(writeln!(w, "\tfn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {{\n\
\t\twrite!(f, \"{}\")\n\
\t}}\n\
}}", analysis.name));
}
Ok(())
}
fn generate_trait(
w: &mut Write,
env: &Env,
analysis: &analysis::object::Info,
) -> Result<()> {
try!(write!(w, "pub trait {}: 'static {{", analysis.trait_name));
for func_analysis in &analysis.methods() {
try!(function::generate(w, env, func_analysis, true, true, 1));
}
for property in &analysis.properties {
try!(properties::generate(w, env, property, true, true, 1));
}
for child_property in &analysis.child_properties {
try!(child_properties::generate(
w,
env,
child_property,
true,
true,
1,
));
}
for signal_analysis in analysis
.signals
.iter()
.chain(analysis.notify_signals.iter())
{
try!(signal::generate(
w,
env,
signal_analysis,
&analysis.trampolines,
true,
true,
1,
));
}
try!(writeln!(w, "}}"));
try!(writeln!(w));
try!(write!(
w,
"impl<O: IsA<{}>> {} for O {{",
analysis.name,
analysis.trait_name,
));
for func_analysis in &analysis.methods() {
try!(function::generate(w, env, func_analysis, true, false, 1));
}
for property in &analysis.properties {
try!(properties::generate(w, env, property, true, false, 1));
}
for child_property in &analysis.child_properties {
try!(child_properties::generate(
w,
env,
child_property,
true,
false,
1,
));
}
for signal_analysis in analysis
.signals
.iter()
.chain(analysis.notify_signals.iter())
{
try!(signal::generate(
w,
env,
signal_analysis,
&analysis.trampolines,
true,
false,
1,
));
}
try!(writeln!(w, "}}"));
Ok(())
}
fn need_generate_inherent(analysis: &analysis::object::Info) -> bool
|
fn need_generate_trait(analysis: &analysis::object::Info) -> bool {
analysis.generate_trait
}
pub fn generate_reexports(
env: &Env,
analysis: &analysis::object::Info,
module_name: &str,
contents: &mut Vec<String>,
traits: &mut Vec<String>,
) {
let mut cfgs: Vec<String> = Vec::new();
if let Some(cfg) = general::cfg_condition_string(&analysis.cfg_condition, false, 0) {
cfgs.push(cfg);
}
if let Some(cfg) = general::version_condition_string(env, analysis.version, false, 0) {
cfgs.push(cfg);
}
contents.push("".to_owned());
contents.extend_from_slice(&cfgs);
contents.push(format!("mod {};", module_name));
contents.extend_from_slice(&cfgs);
let none_type = if !analysis.final_type {
format!(", NONE_{}", analysis.name.to_snake().to_uppercase())
} else {
String::new()
};
if let Some(ref class_name) = analysis.rust_class_type {
contents.push(format!("pub use self::{}::{{{}, {}{}}};", module_name, analysis.name, class_name, none_type));
} else {
contents.push(format!("pub use self::{}::{{{}{}}};", module_name, analysis.name, none_type));
}
if need_generate_trait(analysis) {
contents.extend_from_slice(&cfgs);
contents.push(format!(
"pub use self::{}::{};",
module_name,
analysis.trait_name
));
for cfg in &cfgs {
traits.push(format!("\t{}", cfg));
}
traits.push(format!("\tpub use super::{};", analysis.trait_name));
}
}
|
{
analysis.has_constructors || analysis.has_functions || !need_generate_trait(analysis)
}
|
err.rs
|
#[doc = r" Value read from the register"]
pub struct R {
bits: u32,
}
#[doc = r" Value to write to the register"]
pub struct W {
bits: u32,
}
impl super::ERR {
#[doc = r" Modifies the contents of the register"]
#[inline]
pub fn modify<F>(&self, f: F)
where
for<'w> F: FnOnce(&R, &'w mut W) -> &'w mut W,
{
let bits = self.register.get();
let r = R { bits: bits };
let mut w = W { bits: bits };
f(&r, &mut w);
self.register.set(w.bits);
}
#[doc = r" Reads the contents of the register"]
#[inline]
pub fn read(&self) -> R {
R {
bits: self.register.get(),
}
}
#[doc = r" Writes to the register"]
#[inline]
pub fn write<F>(&self, f: F)
where
F: FnOnce(&mut W) -> &mut W,
{
let mut w = W::reset_value();
f(&mut w);
self.register.set(w.bits);
}
#[doc = r" Writes the reset value to the register"]
#[inline]
pub fn reset(&self) {
self.write(|w| w)
}
}
#[doc = "Possible values of the field `ERR0`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ERR0R {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR0R {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
ERR0R::_0 => false,
ERR0R::_1 => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> ERR0R {
match value {
false => ERR0R::_0,
true => ERR0R::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline]
pub fn is_0(&self) -> bool {
*self == ERR0R::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline]
pub fn is_1(&self) -> bool {
*self == ERR0R::_1
}
}
#[doc = "Possible values of the field `ERR1`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ERR1R {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR1R {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
ERR1R::_0 => false,
ERR1R::_1 => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> ERR1R {
match value {
false => ERR1R::_0,
true => ERR1R::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline]
pub fn is_0(&self) -> bool {
*self == ERR1R::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline]
pub fn is_1(&self) -> bool {
*self == ERR1R::_1
}
}
#[doc = "Possible values of the field `ERR2`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ERR2R {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR2R {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
ERR2R::_0 => false,
ERR2R::_1 => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> ERR2R {
match value {
false => ERR2R::_0,
true => ERR2R::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline]
pub fn is_0(&self) -> bool {
*self == ERR2R::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline]
pub fn is_1(&self) -> bool {
*self == ERR2R::_1
}
}
#[doc = "Possible values of the field `ERR3`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ERR3R {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR3R {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
ERR3R::_0 => false,
ERR3R::_1 => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> ERR3R {
match value {
false => ERR3R::_0,
true => ERR3R::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline]
pub fn is_0(&self) -> bool {
*self == ERR3R::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline]
pub fn is_1(&self) -> bool {
*self == ERR3R::_1
}
}
#[doc = "Possible values of the field `ERR4`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ERR4R {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR4R {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
ERR4R::_0 => false,
ERR4R::_1 => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> ERR4R {
match value {
false => ERR4R::_0,
true => ERR4R::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline]
pub fn is_0(&self) -> bool {
*self == ERR4R::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline]
pub fn is_1(&self) -> bool {
*self == ERR4R::_1
}
}
#[doc = "Possible values of the field `ERR5`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ERR5R {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR5R {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
ERR5R::_0 => false,
ERR5R::_1 => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> ERR5R {
match value {
false => ERR5R::_0,
true => ERR5R::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline]
pub fn is_0(&self) -> bool {
*self == ERR5R::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline]
pub fn is_1(&self) -> bool {
*self == ERR5R::_1
}
}
#[doc = "Possible values of the field `ERR6`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ERR6R {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR6R {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
ERR6R::_0 => false,
ERR6R::_1 => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> ERR6R {
match value {
false => ERR6R::_0,
true => ERR6R::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline]
pub fn is_0(&self) -> bool {
*self == ERR6R::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline]
pub fn is_1(&self) -> bool {
*self == ERR6R::_1
}
}
#[doc = "Possible values of the field `ERR7`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ERR7R {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR7R {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
ERR7R::_0 => false,
ERR7R::_1 => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> ERR7R {
match value {
false => ERR7R::_0,
true => ERR7R::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline]
pub fn is_0(&self) -> bool {
*self == ERR7R::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline]
pub fn is_1(&self) -> bool {
*self == ERR7R::_1
}
}
#[doc = "Possible values of the field `ERR8`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ERR8R {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR8R {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
ERR8R::_0 => false,
ERR8R::_1 => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> ERR8R {
match value {
false => ERR8R::_0,
true => ERR8R::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline]
pub fn is_0(&self) -> bool {
*self == ERR8R::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline]
pub fn is_1(&self) -> bool {
*self == ERR8R::_1
}
}
#[doc = "Possible values of the field `ERR9`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ERR9R {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR9R {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
ERR9R::_0 => false,
ERR9R::_1 => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> ERR9R {
match value {
false => ERR9R::_0,
true => ERR9R::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline]
pub fn is_0(&self) -> bool {
*self == ERR9R::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline]
pub fn is_1(&self) -> bool {
*self == ERR9R::_1
}
}
#[doc = "Possible values of the field `ERR10`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ERR10R {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR10R {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
ERR10R::_0 => false,
ERR10R::_1 => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> ERR10R {
match value {
false => ERR10R::_0,
true => ERR10R::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline]
pub fn is_0(&self) -> bool {
*self == ERR10R::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline]
pub fn is_1(&self) -> bool {
*self == ERR10R::_1
}
}
#[doc = "Possible values of the field `ERR11`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ERR11R {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR11R {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
ERR11R::_0 => false,
ERR11R::_1 => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> ERR11R {
match value {
false => ERR11R::_0,
true => ERR11R::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline]
pub fn is_0(&self) -> bool {
*self == ERR11R::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline]
pub fn is_1(&self) -> bool {
*self == ERR11R::_1
}
}
#[doc = "Possible values of the field `ERR12`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ERR12R {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR12R {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
ERR12R::_0 => false,
ERR12R::_1 => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> ERR12R {
match value {
false => ERR12R::_0,
true => ERR12R::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline]
pub fn is_0(&self) -> bool {
*self == ERR12R::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline]
pub fn is_1(&self) -> bool {
*self == ERR12R::_1
}
}
#[doc = "Possible values of the field `ERR13`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ERR13R {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR13R {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
ERR13R::_0 => false,
ERR13R::_1 => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> ERR13R {
match value {
false => ERR13R::_0,
true => ERR13R::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline]
pub fn is_0(&self) -> bool {
*self == ERR13R::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline]
pub fn is_1(&self) -> bool {
*self == ERR13R::_1
}
}
#[doc = "Possible values of the field `ERR14`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ERR14R {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR14R {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
ERR14R::_0 => false,
ERR14R::_1 => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> ERR14R {
match value {
false => ERR14R::_0,
true => ERR14R::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline]
pub fn is_0(&self) -> bool {
*self == ERR14R::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline]
pub fn is_1(&self) -> bool {
*self == ERR14R::_1
}
}
#[doc = "Possible values of the field `ERR15`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ERR15R {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR15R {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
ERR15R::_0 => false,
ERR15R::_1 => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> ERR15R {
match value {
false => ERR15R::_0,
true => ERR15R::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline]
pub fn is_0(&self) -> bool {
*self == ERR15R::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline]
pub fn is_1(&self) -> bool {
*self == ERR15R::_1
}
}
#[doc = "Possible values of the field `ERR16`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ERR16R {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR16R {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
ERR16R::_0 => false,
ERR16R::_1 => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> ERR16R {
match value {
false => ERR16R::_0,
true => ERR16R::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline]
pub fn is_0(&self) -> bool {
*self == ERR16R::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline]
pub fn is_1(&self) -> bool {
*self == ERR16R::_1
}
}
#[doc = "Possible values of the field `ERR17`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ERR17R {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR17R {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
ERR17R::_0 => false,
ERR17R::_1 => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> ERR17R {
match value {
false => ERR17R::_0,
true => ERR17R::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline]
pub fn is_0(&self) -> bool {
*self == ERR17R::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline]
pub fn is_1(&self) -> bool {
*self == ERR17R::_1
}
}
#[doc = "Possible values of the field `ERR18`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ERR18R {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR18R {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
ERR18R::_0 => false,
ERR18R::_1 => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> ERR18R {
match value {
false => ERR18R::_0,
true => ERR18R::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline]
pub fn is_0(&self) -> bool {
*self == ERR18R::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline]
pub fn is_1(&self) -> bool {
*self == ERR18R::_1
}
}
#[doc = "Possible values of the field `ERR19`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ERR19R {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR19R {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
ERR19R::_0 => false,
ERR19R::_1 => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> ERR19R {
match value {
false => ERR19R::_0,
true => ERR19R::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline]
pub fn is_0(&self) -> bool {
*self == ERR19R::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline]
pub fn is_1(&self) -> bool {
*self == ERR19R::_1
}
}
#[doc = "Possible values of the field `ERR20`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ERR20R {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR20R {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
ERR20R::_0 => false,
ERR20R::_1 => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> ERR20R {
match value {
false => ERR20R::_0,
true => ERR20R::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline]
pub fn is_0(&self) -> bool {
*self == ERR20R::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline]
pub fn is_1(&self) -> bool {
*self == ERR20R::_1
}
}
#[doc = "Possible values of the field `ERR21`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ERR21R {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR21R {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
ERR21R::_0 => false,
ERR21R::_1 => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> ERR21R {
match value {
false => ERR21R::_0,
true => ERR21R::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline]
pub fn is_0(&self) -> bool {
*self == ERR21R::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline]
pub fn is_1(&self) -> bool {
*self == ERR21R::_1
}
}
#[doc = "Possible values of the field `ERR22`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ERR22R {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR22R {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
ERR22R::_0 => false,
ERR22R::_1 => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> ERR22R {
match value {
false => ERR22R::_0,
true => ERR22R::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline]
pub fn is_0(&self) -> bool {
*self == ERR22R::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline]
pub fn is_1(&self) -> bool {
*self == ERR22R::_1
}
}
#[doc = "Possible values of the field `ERR23`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ERR23R {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR23R {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
ERR23R::_0 => false,
ERR23R::_1 => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> ERR23R {
match value {
false => ERR23R::_0,
true => ERR23R::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline]
pub fn is_0(&self) -> bool {
*self == ERR23R::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline]
pub fn is_1(&self) -> bool {
*self == ERR23R::_1
}
}
#[doc = "Possible values of the field `ERR24`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ERR24R {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR24R {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
ERR24R::_0 => false,
ERR24R::_1 => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> ERR24R {
match value {
false => ERR24R::_0,
true => ERR24R::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline]
pub fn is_0(&self) -> bool {
*self == ERR24R::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline]
pub fn is_1(&self) -> bool {
*self == ERR24R::_1
}
}
#[doc = "Possible values of the field `ERR25`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ERR25R {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR25R {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
ERR25R::_0 => false,
ERR25R::_1 => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> ERR25R {
match value {
false => ERR25R::_0,
true => ERR25R::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline]
pub fn is_0(&self) -> bool {
*self == ERR25R::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline]
pub fn is_1(&self) -> bool {
*self == ERR25R::_1
}
}
#[doc = "Possible values of the field `ERR26`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ERR26R {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR26R {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
ERR26R::_0 => false,
ERR26R::_1 => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> ERR26R {
match value {
false => ERR26R::_0,
true => ERR26R::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline]
pub fn is_0(&self) -> bool {
*self == ERR26R::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline]
pub fn is_1(&self) -> bool {
*self == ERR26R::_1
}
}
#[doc = "Possible values of the field `ERR27`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ERR27R {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR27R {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
ERR27R::_0 => false,
ERR27R::_1 => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> ERR27R {
match value {
false => ERR27R::_0,
true => ERR27R::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline]
pub fn is_0(&self) -> bool {
*self == ERR27R::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline]
pub fn is_1(&self) -> bool {
*self == ERR27R::_1
}
}
#[doc = "Possible values of the field `ERR28`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ERR28R {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR28R {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
ERR28R::_0 => false,
ERR28R::_1 => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> ERR28R {
match value {
false => ERR28R::_0,
true => ERR28R::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline]
pub fn is_0(&self) -> bool {
*self == ERR28R::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline]
pub fn is_1(&self) -> bool {
*self == ERR28R::_1
}
}
#[doc = "Possible values of the field `ERR29`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ERR29R {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR29R {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
ERR29R::_0 => false,
ERR29R::_1 => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> ERR29R {
match value {
false => ERR29R::_0,
true => ERR29R::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline]
pub fn is_0(&self) -> bool {
*self == ERR29R::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline]
pub fn is_1(&self) -> bool {
*self == ERR29R::_1
}
}
#[doc = "Possible values of the field `ERR30`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ERR30R {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR30R {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
ERR30R::_0 => false,
ERR30R::_1 => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> ERR30R {
match value {
false => ERR30R::_0,
true => ERR30R::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline]
pub fn is_0(&self) -> bool {
*self == ERR30R::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline]
pub fn is_1(&self) -> bool {
*self == ERR30R::_1
}
}
#[doc = "Possible values of the field `ERR31`"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum ERR31R {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR31R {
#[doc = r" Returns `true` if the bit is clear (0)"]
#[inline]
pub fn bit_is_clear(&self) -> bool {
!self.bit()
}
#[doc = r" Returns `true` if the bit is set (1)"]
#[inline]
pub fn bit_is_set(&self) -> bool {
self.bit()
}
#[doc = r" Value of the field as raw bits"]
#[inline]
pub fn bit(&self) -> bool {
match *self {
ERR31R::_0 => false,
ERR31R::_1 => true,
}
}
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _from(value: bool) -> ERR31R {
match value {
false => ERR31R::_0,
true => ERR31R::_1,
}
}
#[doc = "Checks if the value of the field is `_0`"]
#[inline]
pub fn is_0(&self) -> bool {
*self == ERR31R::_0
}
#[doc = "Checks if the value of the field is `_1`"]
#[inline]
pub fn is_1(&self) -> bool {
*self == ERR31R::_1
}
}
#[doc = "Values that can be written to the field `ERR0`"]
pub enum ERR0W {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR0W {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
ERR0W::_0 => false,
ERR0W::_1 => true,
}
}
}
#[doc = r" Proxy"]
pub struct _ERR0W<'a> {
w: &'a mut W,
}
impl<'a> _ERR0W<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: ERR0W) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "An error in this channel has not occurred"]
#[inline]
pub fn _0(self) -> &'a mut W {
self.variant(ERR0W::_0)
}
#[doc = "An error in this channel has occurred"]
#[inline]
pub fn _1(self) -> &'a mut W {
self.variant(ERR0W::_1)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 0;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `ERR1`"]
pub enum ERR1W {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR1W {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
ERR1W::_0 => false,
ERR1W::_1 => true,
}
}
}
#[doc = r" Proxy"]
pub struct _ERR1W<'a> {
w: &'a mut W,
}
impl<'a> _ERR1W<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: ERR1W) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "An error in this channel has not occurred"]
#[inline]
pub fn _0(self) -> &'a mut W {
self.variant(ERR1W::_0)
}
#[doc = "An error in this channel has occurred"]
#[inline]
pub fn _1(self) -> &'a mut W {
self.variant(ERR1W::_1)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 1;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `ERR2`"]
pub enum ERR2W {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR2W {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
ERR2W::_0 => false,
ERR2W::_1 => true,
}
}
}
#[doc = r" Proxy"]
pub struct _ERR2W<'a> {
w: &'a mut W,
}
impl<'a> _ERR2W<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: ERR2W) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "An error in this channel has not occurred"]
#[inline]
pub fn _0(self) -> &'a mut W {
self.variant(ERR2W::_0)
}
#[doc = "An error in this channel has occurred"]
#[inline]
pub fn _1(self) -> &'a mut W {
self.variant(ERR2W::_1)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 2;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `ERR3`"]
pub enum ERR3W {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR3W {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
ERR3W::_0 => false,
ERR3W::_1 => true,
}
}
}
#[doc = r" Proxy"]
pub struct _ERR3W<'a> {
w: &'a mut W,
}
impl<'a> _ERR3W<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: ERR3W) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "An error in this channel has not occurred"]
#[inline]
pub fn _0(self) -> &'a mut W {
self.variant(ERR3W::_0)
}
#[doc = "An error in this channel has occurred"]
#[inline]
pub fn _1(self) -> &'a mut W {
self.variant(ERR3W::_1)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 3;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `ERR4`"]
pub enum ERR4W {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR4W {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
ERR4W::_0 => false,
ERR4W::_1 => true,
}
}
}
#[doc = r" Proxy"]
pub struct _ERR4W<'a> {
w: &'a mut W,
}
impl<'a> _ERR4W<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: ERR4W) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "An error in this channel has not occurred"]
#[inline]
pub fn _0(self) -> &'a mut W {
self.variant(ERR4W::_0)
}
#[doc = "An error in this channel has occurred"]
#[inline]
pub fn _1(self) -> &'a mut W {
self.variant(ERR4W::_1)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 4;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `ERR5`"]
pub enum ERR5W {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR5W {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
ERR5W::_0 => false,
ERR5W::_1 => true,
}
}
}
#[doc = r" Proxy"]
pub struct _ERR5W<'a> {
w: &'a mut W,
}
impl<'a> _ERR5W<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: ERR5W) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "An error in this channel has not occurred"]
#[inline]
pub fn _0(self) -> &'a mut W {
self.variant(ERR5W::_0)
}
#[doc = "An error in this channel has occurred"]
#[inline]
pub fn _1(self) -> &'a mut W {
self.variant(ERR5W::_1)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 5;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `ERR6`"]
pub enum ERR6W {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR6W {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
ERR6W::_0 => false,
ERR6W::_1 => true,
}
}
}
#[doc = r" Proxy"]
pub struct _ERR6W<'a> {
w: &'a mut W,
}
impl<'a> _ERR6W<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: ERR6W) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "An error in this channel has not occurred"]
#[inline]
pub fn _0(self) -> &'a mut W {
self.variant(ERR6W::_0)
}
#[doc = "An error in this channel has occurred"]
#[inline]
pub fn _1(self) -> &'a mut W {
self.variant(ERR6W::_1)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 6;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `ERR7`"]
pub enum ERR7W {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR7W {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
ERR7W::_0 => false,
ERR7W::_1 => true,
}
}
}
#[doc = r" Proxy"]
pub struct _ERR7W<'a> {
w: &'a mut W,
}
impl<'a> _ERR7W<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: ERR7W) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "An error in this channel has not occurred"]
#[inline]
pub fn _0(self) -> &'a mut W {
self.variant(ERR7W::_0)
}
#[doc = "An error in this channel has occurred"]
#[inline]
pub fn _1(self) -> &'a mut W {
self.variant(ERR7W::_1)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 7;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `ERR8`"]
pub enum ERR8W {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR8W {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
ERR8W::_0 => false,
ERR8W::_1 => true,
}
}
}
#[doc = r" Proxy"]
pub struct _ERR8W<'a> {
w: &'a mut W,
}
impl<'a> _ERR8W<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: ERR8W) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "An error in this channel has not occurred"]
#[inline]
pub fn _0(self) -> &'a mut W {
self.variant(ERR8W::_0)
}
#[doc = "An error in this channel has occurred"]
#[inline]
pub fn _1(self) -> &'a mut W {
self.variant(ERR8W::_1)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 8;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `ERR9`"]
pub enum ERR9W {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR9W {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
ERR9W::_0 => false,
ERR9W::_1 => true,
}
}
}
#[doc = r" Proxy"]
pub struct _ERR9W<'a> {
w: &'a mut W,
}
impl<'a> _ERR9W<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: ERR9W) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "An error in this channel has not occurred"]
#[inline]
pub fn _0(self) -> &'a mut W {
self.variant(ERR9W::_0)
}
#[doc = "An error in this channel has occurred"]
#[inline]
pub fn _1(self) -> &'a mut W {
self.variant(ERR9W::_1)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 9;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `ERR10`"]
pub enum ERR10W {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR10W {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
ERR10W::_0 => false,
ERR10W::_1 => true,
}
}
}
#[doc = r" Proxy"]
pub struct _ERR10W<'a> {
w: &'a mut W,
}
impl<'a> _ERR10W<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: ERR10W) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "An error in this channel has not occurred"]
#[inline]
pub fn _0(self) -> &'a mut W {
self.variant(ERR10W::_0)
}
#[doc = "An error in this channel has occurred"]
#[inline]
pub fn _1(self) -> &'a mut W {
self.variant(ERR10W::_1)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 10;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `ERR11`"]
pub enum ERR11W {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR11W {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
ERR11W::_0 => false,
ERR11W::_1 => true,
}
}
}
#[doc = r" Proxy"]
pub struct _ERR11W<'a> {
w: &'a mut W,
}
impl<'a> _ERR11W<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: ERR11W) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "An error in this channel has not occurred"]
#[inline]
pub fn _0(self) -> &'a mut W {
self.variant(ERR11W::_0)
}
#[doc = "An error in this channel has occurred"]
#[inline]
pub fn _1(self) -> &'a mut W {
self.variant(ERR11W::_1)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 11;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `ERR12`"]
pub enum ERR12W {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR12W {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
ERR12W::_0 => false,
ERR12W::_1 => true,
}
}
}
#[doc = r" Proxy"]
pub struct _ERR12W<'a> {
w: &'a mut W,
}
impl<'a> _ERR12W<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: ERR12W) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "An error in this channel has not occurred"]
#[inline]
pub fn _0(self) -> &'a mut W {
self.variant(ERR12W::_0)
}
#[doc = "An error in this channel has occurred"]
#[inline]
pub fn _1(self) -> &'a mut W {
self.variant(ERR12W::_1)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 12;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `ERR13`"]
pub enum ERR13W {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR13W {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
ERR13W::_0 => false,
ERR13W::_1 => true,
}
}
}
#[doc = r" Proxy"]
pub struct _ERR13W<'a> {
w: &'a mut W,
}
impl<'a> _ERR13W<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: ERR13W) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "An error in this channel has not occurred"]
#[inline]
pub fn _0(self) -> &'a mut W {
self.variant(ERR13W::_0)
}
#[doc = "An error in this channel has occurred"]
#[inline]
pub fn _1(self) -> &'a mut W {
self.variant(ERR13W::_1)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 13;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `ERR14`"]
pub enum ERR14W {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR14W {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
ERR14W::_0 => false,
ERR14W::_1 => true,
}
}
}
#[doc = r" Proxy"]
pub struct _ERR14W<'a> {
w: &'a mut W,
}
impl<'a> _ERR14W<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: ERR14W) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "An error in this channel has not occurred"]
#[inline]
pub fn _0(self) -> &'a mut W {
self.variant(ERR14W::_0)
}
#[doc = "An error in this channel has occurred"]
#[inline]
pub fn _1(self) -> &'a mut W {
self.variant(ERR14W::_1)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 14;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `ERR15`"]
pub enum ERR15W {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR15W {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
ERR15W::_0 => false,
ERR15W::_1 => true,
}
}
}
#[doc = r" Proxy"]
pub struct _ERR15W<'a> {
w: &'a mut W,
}
impl<'a> _ERR15W<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: ERR15W) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "An error in this channel has not occurred"]
#[inline]
pub fn _0(self) -> &'a mut W {
self.variant(ERR15W::_0)
}
#[doc = "An error in this channel has occurred"]
#[inline]
pub fn _1(self) -> &'a mut W {
self.variant(ERR15W::_1)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 15;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `ERR16`"]
pub enum ERR16W {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR16W {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
ERR16W::_0 => false,
ERR16W::_1 => true,
}
}
}
#[doc = r" Proxy"]
pub struct _ERR16W<'a> {
w: &'a mut W,
}
impl<'a> _ERR16W<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: ERR16W) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "An error in this channel has not occurred"]
#[inline]
pub fn _0(self) -> &'a mut W {
self.variant(ERR16W::_0)
}
#[doc = "An error in this channel has occurred"]
#[inline]
pub fn _1(self) -> &'a mut W {
self.variant(ERR16W::_1)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 16;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `ERR17`"]
pub enum ERR17W {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR17W {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
ERR17W::_0 => false,
ERR17W::_1 => true,
}
}
}
#[doc = r" Proxy"]
pub struct _ERR17W<'a> {
w: &'a mut W,
}
impl<'a> _ERR17W<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: ERR17W) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "An error in this channel has not occurred"]
#[inline]
pub fn _0(self) -> &'a mut W {
self.variant(ERR17W::_0)
}
#[doc = "An error in this channel has occurred"]
#[inline]
pub fn _1(self) -> &'a mut W {
self.variant(ERR17W::_1)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 17;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `ERR18`"]
pub enum ERR18W {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR18W {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
ERR18W::_0 => false,
ERR18W::_1 => true,
}
}
}
#[doc = r" Proxy"]
pub struct _ERR18W<'a> {
w: &'a mut W,
}
impl<'a> _ERR18W<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: ERR18W) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "An error in this channel has not occurred"]
#[inline]
pub fn _0(self) -> &'a mut W {
self.variant(ERR18W::_0)
}
#[doc = "An error in this channel has occurred"]
#[inline]
pub fn _1(self) -> &'a mut W {
self.variant(ERR18W::_1)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 18;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `ERR19`"]
pub enum ERR19W {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR19W {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
ERR19W::_0 => false,
ERR19W::_1 => true,
}
}
}
#[doc = r" Proxy"]
pub struct _ERR19W<'a> {
w: &'a mut W,
}
impl<'a> _ERR19W<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: ERR19W) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "An error in this channel has not occurred"]
#[inline]
pub fn _0(self) -> &'a mut W {
self.variant(ERR19W::_0)
}
#[doc = "An error in this channel has occurred"]
#[inline]
pub fn _1(self) -> &'a mut W {
self.variant(ERR19W::_1)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 19;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `ERR20`"]
pub enum ERR20W {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR20W {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
ERR20W::_0 => false,
ERR20W::_1 => true,
}
}
}
#[doc = r" Proxy"]
pub struct _ERR20W<'a> {
w: &'a mut W,
}
impl<'a> _ERR20W<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: ERR20W) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "An error in this channel has not occurred"]
#[inline]
pub fn _0(self) -> &'a mut W {
self.variant(ERR20W::_0)
}
#[doc = "An error in this channel has occurred"]
#[inline]
pub fn _1(self) -> &'a mut W {
self.variant(ERR20W::_1)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 20;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `ERR21`"]
pub enum ERR21W {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR21W {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
ERR21W::_0 => false,
ERR21W::_1 => true,
}
}
}
#[doc = r" Proxy"]
pub struct _ERR21W<'a> {
w: &'a mut W,
}
impl<'a> _ERR21W<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: ERR21W) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "An error in this channel has not occurred"]
#[inline]
pub fn _0(self) -> &'a mut W {
self.variant(ERR21W::_0)
}
#[doc = "An error in this channel has occurred"]
#[inline]
pub fn _1(self) -> &'a mut W {
self.variant(ERR21W::_1)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 21;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `ERR22`"]
pub enum ERR22W {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR22W {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
ERR22W::_0 => false,
ERR22W::_1 => true,
}
}
}
#[doc = r" Proxy"]
pub struct _ERR22W<'a> {
w: &'a mut W,
}
impl<'a> _ERR22W<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: ERR22W) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "An error in this channel has not occurred"]
#[inline]
pub fn _0(self) -> &'a mut W {
self.variant(ERR22W::_0)
}
#[doc = "An error in this channel has occurred"]
#[inline]
pub fn _1(self) -> &'a mut W {
self.variant(ERR22W::_1)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
|
const OFFSET: u8 = 22;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `ERR23`"]
pub enum ERR23W {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR23W {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
ERR23W::_0 => false,
ERR23W::_1 => true,
}
}
}
#[doc = r" Proxy"]
pub struct _ERR23W<'a> {
w: &'a mut W,
}
impl<'a> _ERR23W<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: ERR23W) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "An error in this channel has not occurred"]
#[inline]
pub fn _0(self) -> &'a mut W {
self.variant(ERR23W::_0)
}
#[doc = "An error in this channel has occurred"]
#[inline]
pub fn _1(self) -> &'a mut W {
self.variant(ERR23W::_1)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 23;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `ERR24`"]
pub enum ERR24W {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR24W {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
ERR24W::_0 => false,
ERR24W::_1 => true,
}
}
}
#[doc = r" Proxy"]
pub struct _ERR24W<'a> {
w: &'a mut W,
}
impl<'a> _ERR24W<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: ERR24W) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "An error in this channel has not occurred"]
#[inline]
pub fn _0(self) -> &'a mut W {
self.variant(ERR24W::_0)
}
#[doc = "An error in this channel has occurred"]
#[inline]
pub fn _1(self) -> &'a mut W {
self.variant(ERR24W::_1)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 24;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `ERR25`"]
pub enum ERR25W {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR25W {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
ERR25W::_0 => false,
ERR25W::_1 => true,
}
}
}
#[doc = r" Proxy"]
pub struct _ERR25W<'a> {
w: &'a mut W,
}
impl<'a> _ERR25W<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: ERR25W) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "An error in this channel has not occurred"]
#[inline]
pub fn _0(self) -> &'a mut W {
self.variant(ERR25W::_0)
}
#[doc = "An error in this channel has occurred"]
#[inline]
pub fn _1(self) -> &'a mut W {
self.variant(ERR25W::_1)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 25;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `ERR26`"]
pub enum ERR26W {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR26W {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
ERR26W::_0 => false,
ERR26W::_1 => true,
}
}
}
#[doc = r" Proxy"]
pub struct _ERR26W<'a> {
w: &'a mut W,
}
impl<'a> _ERR26W<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: ERR26W) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "An error in this channel has not occurred"]
#[inline]
pub fn _0(self) -> &'a mut W {
self.variant(ERR26W::_0)
}
#[doc = "An error in this channel has occurred"]
#[inline]
pub fn _1(self) -> &'a mut W {
self.variant(ERR26W::_1)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 26;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `ERR27`"]
pub enum ERR27W {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR27W {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
ERR27W::_0 => false,
ERR27W::_1 => true,
}
}
}
#[doc = r" Proxy"]
pub struct _ERR27W<'a> {
w: &'a mut W,
}
impl<'a> _ERR27W<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: ERR27W) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "An error in this channel has not occurred"]
#[inline]
pub fn _0(self) -> &'a mut W {
self.variant(ERR27W::_0)
}
#[doc = "An error in this channel has occurred"]
#[inline]
pub fn _1(self) -> &'a mut W {
self.variant(ERR27W::_1)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 27;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `ERR28`"]
pub enum ERR28W {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR28W {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
ERR28W::_0 => false,
ERR28W::_1 => true,
}
}
}
#[doc = r" Proxy"]
pub struct _ERR28W<'a> {
w: &'a mut W,
}
impl<'a> _ERR28W<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: ERR28W) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "An error in this channel has not occurred"]
#[inline]
pub fn _0(self) -> &'a mut W {
self.variant(ERR28W::_0)
}
#[doc = "An error in this channel has occurred"]
#[inline]
pub fn _1(self) -> &'a mut W {
self.variant(ERR28W::_1)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 28;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `ERR29`"]
pub enum ERR29W {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR29W {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
ERR29W::_0 => false,
ERR29W::_1 => true,
}
}
}
#[doc = r" Proxy"]
pub struct _ERR29W<'a> {
w: &'a mut W,
}
impl<'a> _ERR29W<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: ERR29W) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "An error in this channel has not occurred"]
#[inline]
pub fn _0(self) -> &'a mut W {
self.variant(ERR29W::_0)
}
#[doc = "An error in this channel has occurred"]
#[inline]
pub fn _1(self) -> &'a mut W {
self.variant(ERR29W::_1)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 29;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `ERR30`"]
pub enum ERR30W {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR30W {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
ERR30W::_0 => false,
ERR30W::_1 => true,
}
}
}
#[doc = r" Proxy"]
pub struct _ERR30W<'a> {
w: &'a mut W,
}
impl<'a> _ERR30W<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: ERR30W) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "An error in this channel has not occurred"]
#[inline]
pub fn _0(self) -> &'a mut W {
self.variant(ERR30W::_0)
}
#[doc = "An error in this channel has occurred"]
#[inline]
pub fn _1(self) -> &'a mut W {
self.variant(ERR30W::_1)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 30;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
#[doc = "Values that can be written to the field `ERR31`"]
pub enum ERR31W {
#[doc = "An error in this channel has not occurred"]
_0,
#[doc = "An error in this channel has occurred"]
_1,
}
impl ERR31W {
#[allow(missing_docs)]
#[doc(hidden)]
#[inline]
pub fn _bits(&self) -> bool {
match *self {
ERR31W::_0 => false,
ERR31W::_1 => true,
}
}
}
#[doc = r" Proxy"]
pub struct _ERR31W<'a> {
w: &'a mut W,
}
impl<'a> _ERR31W<'a> {
#[doc = r" Writes `variant` to the field"]
#[inline]
pub fn variant(self, variant: ERR31W) -> &'a mut W {
{
self.bit(variant._bits())
}
}
#[doc = "An error in this channel has not occurred"]
#[inline]
pub fn _0(self) -> &'a mut W {
self.variant(ERR31W::_0)
}
#[doc = "An error in this channel has occurred"]
#[inline]
pub fn _1(self) -> &'a mut W {
self.variant(ERR31W::_1)
}
#[doc = r" Sets the field bit"]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r" Clears the field bit"]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r" Writes raw bits to the field"]
#[inline]
pub fn bit(self, value: bool) -> &'a mut W {
const MASK: bool = true;
const OFFSET: u8 = 31;
self.w.bits &= !((MASK as u32) << OFFSET);
self.w.bits |= ((value & MASK) as u32) << OFFSET;
self.w
}
}
impl R {
#[doc = r" Value of the register as raw bits"]
#[inline]
pub fn bits(&self) -> u32 {
self.bits
}
#[doc = "Bit 0 - Error In Channel 0"]
#[inline]
pub fn err0(&self) -> ERR0R {
ERR0R::_from({
const MASK: bool = true;
const OFFSET: u8 = 0;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 1 - Error In Channel 1"]
#[inline]
pub fn err1(&self) -> ERR1R {
ERR1R::_from({
const MASK: bool = true;
const OFFSET: u8 = 1;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 2 - Error In Channel 2"]
#[inline]
pub fn err2(&self) -> ERR2R {
ERR2R::_from({
const MASK: bool = true;
const OFFSET: u8 = 2;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 3 - Error In Channel 3"]
#[inline]
pub fn err3(&self) -> ERR3R {
ERR3R::_from({
const MASK: bool = true;
const OFFSET: u8 = 3;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 4 - Error In Channel 4"]
#[inline]
pub fn err4(&self) -> ERR4R {
ERR4R::_from({
const MASK: bool = true;
const OFFSET: u8 = 4;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 5 - Error In Channel 5"]
#[inline]
pub fn err5(&self) -> ERR5R {
ERR5R::_from({
const MASK: bool = true;
const OFFSET: u8 = 5;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 6 - Error In Channel 6"]
#[inline]
pub fn err6(&self) -> ERR6R {
ERR6R::_from({
const MASK: bool = true;
const OFFSET: u8 = 6;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 7 - Error In Channel 7"]
#[inline]
pub fn err7(&self) -> ERR7R {
ERR7R::_from({
const MASK: bool = true;
const OFFSET: u8 = 7;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 8 - Error In Channel 8"]
#[inline]
pub fn err8(&self) -> ERR8R {
ERR8R::_from({
const MASK: bool = true;
const OFFSET: u8 = 8;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 9 - Error In Channel 9"]
#[inline]
pub fn err9(&self) -> ERR9R {
ERR9R::_from({
const MASK: bool = true;
const OFFSET: u8 = 9;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 10 - Error In Channel 10"]
#[inline]
pub fn err10(&self) -> ERR10R {
ERR10R::_from({
const MASK: bool = true;
const OFFSET: u8 = 10;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 11 - Error In Channel 11"]
#[inline]
pub fn err11(&self) -> ERR11R {
ERR11R::_from({
const MASK: bool = true;
const OFFSET: u8 = 11;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 12 - Error In Channel 12"]
#[inline]
pub fn err12(&self) -> ERR12R {
ERR12R::_from({
const MASK: bool = true;
const OFFSET: u8 = 12;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 13 - Error In Channel 13"]
#[inline]
pub fn err13(&self) -> ERR13R {
ERR13R::_from({
const MASK: bool = true;
const OFFSET: u8 = 13;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 14 - Error In Channel 14"]
#[inline]
pub fn err14(&self) -> ERR14R {
ERR14R::_from({
const MASK: bool = true;
const OFFSET: u8 = 14;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 15 - Error In Channel 15"]
#[inline]
pub fn err15(&self) -> ERR15R {
ERR15R::_from({
const MASK: bool = true;
const OFFSET: u8 = 15;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 16 - Error In Channel 16"]
#[inline]
pub fn err16(&self) -> ERR16R {
ERR16R::_from({
const MASK: bool = true;
const OFFSET: u8 = 16;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 17 - Error In Channel 17"]
#[inline]
pub fn err17(&self) -> ERR17R {
ERR17R::_from({
const MASK: bool = true;
const OFFSET: u8 = 17;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 18 - Error In Channel 18"]
#[inline]
pub fn err18(&self) -> ERR18R {
ERR18R::_from({
const MASK: bool = true;
const OFFSET: u8 = 18;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 19 - Error In Channel 19"]
#[inline]
pub fn err19(&self) -> ERR19R {
ERR19R::_from({
const MASK: bool = true;
const OFFSET: u8 = 19;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 20 - Error In Channel 20"]
#[inline]
pub fn err20(&self) -> ERR20R {
ERR20R::_from({
const MASK: bool = true;
const OFFSET: u8 = 20;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 21 - Error In Channel 21"]
#[inline]
pub fn err21(&self) -> ERR21R {
ERR21R::_from({
const MASK: bool = true;
const OFFSET: u8 = 21;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 22 - Error In Channel 22"]
#[inline]
pub fn err22(&self) -> ERR22R {
ERR22R::_from({
const MASK: bool = true;
const OFFSET: u8 = 22;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 23 - Error In Channel 23"]
#[inline]
pub fn err23(&self) -> ERR23R {
ERR23R::_from({
const MASK: bool = true;
const OFFSET: u8 = 23;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 24 - Error In Channel 24"]
#[inline]
pub fn err24(&self) -> ERR24R {
ERR24R::_from({
const MASK: bool = true;
const OFFSET: u8 = 24;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 25 - Error In Channel 25"]
#[inline]
pub fn err25(&self) -> ERR25R {
ERR25R::_from({
const MASK: bool = true;
const OFFSET: u8 = 25;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 26 - Error In Channel 26"]
#[inline]
pub fn err26(&self) -> ERR26R {
ERR26R::_from({
const MASK: bool = true;
const OFFSET: u8 = 26;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 27 - Error In Channel 27"]
#[inline]
pub fn err27(&self) -> ERR27R {
ERR27R::_from({
const MASK: bool = true;
const OFFSET: u8 = 27;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 28 - Error In Channel 28"]
#[inline]
pub fn err28(&self) -> ERR28R {
ERR28R::_from({
const MASK: bool = true;
const OFFSET: u8 = 28;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 29 - Error In Channel 29"]
#[inline]
pub fn err29(&self) -> ERR29R {
ERR29R::_from({
const MASK: bool = true;
const OFFSET: u8 = 29;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 30 - Error In Channel 30"]
#[inline]
pub fn err30(&self) -> ERR30R {
ERR30R::_from({
const MASK: bool = true;
const OFFSET: u8 = 30;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
#[doc = "Bit 31 - Error In Channel 31"]
#[inline]
pub fn err31(&self) -> ERR31R {
ERR31R::_from({
const MASK: bool = true;
const OFFSET: u8 = 31;
((self.bits >> OFFSET) & MASK as u32) != 0
})
}
}
impl W {
#[doc = r" Reset value of the register"]
#[inline]
pub fn reset_value() -> W {
W { bits: 0 }
}
#[doc = r" Writes raw bits to the register"]
#[inline]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.bits = bits;
self
}
#[doc = "Bit 0 - Error In Channel 0"]
#[inline]
pub fn err0(&mut self) -> _ERR0W {
_ERR0W { w: self }
}
#[doc = "Bit 1 - Error In Channel 1"]
#[inline]
pub fn err1(&mut self) -> _ERR1W {
_ERR1W { w: self }
}
#[doc = "Bit 2 - Error In Channel 2"]
#[inline]
pub fn err2(&mut self) -> _ERR2W {
_ERR2W { w: self }
}
#[doc = "Bit 3 - Error In Channel 3"]
#[inline]
pub fn err3(&mut self) -> _ERR3W {
_ERR3W { w: self }
}
#[doc = "Bit 4 - Error In Channel 4"]
#[inline]
pub fn err4(&mut self) -> _ERR4W {
_ERR4W { w: self }
}
#[doc = "Bit 5 - Error In Channel 5"]
#[inline]
pub fn err5(&mut self) -> _ERR5W {
_ERR5W { w: self }
}
#[doc = "Bit 6 - Error In Channel 6"]
#[inline]
pub fn err6(&mut self) -> _ERR6W {
_ERR6W { w: self }
}
#[doc = "Bit 7 - Error In Channel 7"]
#[inline]
pub fn err7(&mut self) -> _ERR7W {
_ERR7W { w: self }
}
#[doc = "Bit 8 - Error In Channel 8"]
#[inline]
pub fn err8(&mut self) -> _ERR8W {
_ERR8W { w: self }
}
#[doc = "Bit 9 - Error In Channel 9"]
#[inline]
pub fn err9(&mut self) -> _ERR9W {
_ERR9W { w: self }
}
#[doc = "Bit 10 - Error In Channel 10"]
#[inline]
pub fn err10(&mut self) -> _ERR10W {
_ERR10W { w: self }
}
#[doc = "Bit 11 - Error In Channel 11"]
#[inline]
pub fn err11(&mut self) -> _ERR11W {
_ERR11W { w: self }
}
#[doc = "Bit 12 - Error In Channel 12"]
#[inline]
pub fn err12(&mut self) -> _ERR12W {
_ERR12W { w: self }
}
#[doc = "Bit 13 - Error In Channel 13"]
#[inline]
pub fn err13(&mut self) -> _ERR13W {
_ERR13W { w: self }
}
#[doc = "Bit 14 - Error In Channel 14"]
#[inline]
pub fn err14(&mut self) -> _ERR14W {
_ERR14W { w: self }
}
#[doc = "Bit 15 - Error In Channel 15"]
#[inline]
pub fn err15(&mut self) -> _ERR15W {
_ERR15W { w: self }
}
#[doc = "Bit 16 - Error In Channel 16"]
#[inline]
pub fn err16(&mut self) -> _ERR16W {
_ERR16W { w: self }
}
#[doc = "Bit 17 - Error In Channel 17"]
#[inline]
pub fn err17(&mut self) -> _ERR17W {
_ERR17W { w: self }
}
#[doc = "Bit 18 - Error In Channel 18"]
#[inline]
pub fn err18(&mut self) -> _ERR18W {
_ERR18W { w: self }
}
#[doc = "Bit 19 - Error In Channel 19"]
#[inline]
pub fn err19(&mut self) -> _ERR19W {
_ERR19W { w: self }
}
#[doc = "Bit 20 - Error In Channel 20"]
#[inline]
pub fn err20(&mut self) -> _ERR20W {
_ERR20W { w: self }
}
#[doc = "Bit 21 - Error In Channel 21"]
#[inline]
pub fn err21(&mut self) -> _ERR21W {
_ERR21W { w: self }
}
#[doc = "Bit 22 - Error In Channel 22"]
#[inline]
pub fn err22(&mut self) -> _ERR22W {
_ERR22W { w: self }
}
#[doc = "Bit 23 - Error In Channel 23"]
#[inline]
pub fn err23(&mut self) -> _ERR23W {
_ERR23W { w: self }
}
#[doc = "Bit 24 - Error In Channel 24"]
#[inline]
pub fn err24(&mut self) -> _ERR24W {
_ERR24W { w: self }
}
#[doc = "Bit 25 - Error In Channel 25"]
#[inline]
pub fn err25(&mut self) -> _ERR25W {
_ERR25W { w: self }
}
#[doc = "Bit 26 - Error In Channel 26"]
#[inline]
pub fn err26(&mut self) -> _ERR26W {
_ERR26W { w: self }
}
#[doc = "Bit 27 - Error In Channel 27"]
#[inline]
pub fn err27(&mut self) -> _ERR27W {
_ERR27W { w: self }
}
#[doc = "Bit 28 - Error In Channel 28"]
#[inline]
pub fn err28(&mut self) -> _ERR28W {
_ERR28W { w: self }
}
#[doc = "Bit 29 - Error In Channel 29"]
#[inline]
pub fn err29(&mut self) -> _ERR29W {
_ERR29W { w: self }
}
#[doc = "Bit 30 - Error In Channel 30"]
#[inline]
pub fn err30(&mut self) -> _ERR30W {
_ERR30W { w: self }
}
#[doc = "Bit 31 - Error In Channel 31"]
#[inline]
pub fn err31(&mut self) -> _ERR31W {
_ERR31W { w: self }
}
}
| |
utils.py
|
import logging
import time
import os
import torch
from utils.lr_scheduler import WarmupMultiStepLR
from net import Network
def create_logger(cfg):
dataset = cfg.DATASET.DATASET
net_type = cfg.BACKBONE.TYPE
module_type = cfg.MODULE.TYPE
log_dir = os.path.join(cfg.OUTPUT_DIR, cfg.NAME, "logs")
if not os.path.exists(log_dir):
os.makedirs(log_dir)
time_str = time.strftime("%Y-%m-%d-%H-%M")
log_name = "{}_{}_{}_{}.log".format(dataset, net_type, module_type, time_str)
log_file = os.path.join(log_dir, log_name)
# set up logger
print("=> creating log {}".format(log_file))
head = "%(asctime)-15s %(message)s"
logging.basicConfig(filename=str(log_file), format=head)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
console = logging.StreamHandler()
logging.getLogger("").addHandler(console)
logger.info("---------------------Cfg is set as follow--------------------")
logger.info(cfg)
logger.info("-------------------------------------------------------------")
return logger, log_file
def get_optimizer(cfg, model):
base_lr = cfg.TRAIN.OPTIMIZER.BASE_LR
params = []
for name, p in model.named_parameters():
if p.requires_grad:
params.append({"params": p})
if cfg.TRAIN.OPTIMIZER.TYPE == "SGD":
optimizer = torch.optim.SGD(
params,
lr=base_lr,
momentum=cfg.TRAIN.OPTIMIZER.MOMENTUM,
weight_decay=cfg.TRAIN.OPTIMIZER.WEIGHT_DECAY,
nesterov=True,
)
elif cfg.TRAIN.OPTIMIZER.TYPE == "ADAM":
optimizer = torch.optim.Adam(
params,
lr=base_lr,
betas=(0.9, 0.999),
weight_decay=cfg.TRAIN.OPTIMIZER.WEIGHT_DECAY,
)
return optimizer
def
|
(cfg, optimizer):
if cfg.TRAIN.LR_SCHEDULER.TYPE == "multistep":
scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer,
cfg.TRAIN.LR_SCHEDULER.LR_STEP,
gamma=cfg.TRAIN.LR_SCHEDULER.LR_FACTOR,
)
elif cfg.TRAIN.LR_SCHEDULER.TYPE == "cosine":
if cfg.TRAIN.LR_SCHEDULER.COSINE_DECAY_END > 0:
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, T_max=cfg.TRAIN.LR_SCHEDULER.COSINE_DECAY_END, eta_min=1e-4
)
else:
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(
optimizer, T_max=cfg.TRAIN.MAX_EPOCH, eta_min=1e-4
)
elif cfg.TRAIN.LR_SCHEDULER.TYPE == "warmup":
scheduler = WarmupMultiStepLR(
optimizer,
cfg.TRAIN.LR_SCHEDULER.LR_STEP,
gamma=cfg.TRAIN.LR_SCHEDULER.LR_FACTOR,
warmup_epochs=cfg.TRAIN.LR_SCHEDULER.WARM_EPOCH,
)
else:
raise NotImplementedError("Unsupported LR Scheduler: {}".format(cfg.TRAIN.LR_SCHEDULER.TYPE))
return scheduler
def get_model(cfg, num_classes, device, logger):
model = Network(cfg, mode="train", num_classes=num_classes)
if cfg.BACKBONE.FREEZE == True:
model.freeze_backbone()
logger.info("Backbone has been freezed")
if cfg.CPU_MODE:
model = model.to(device)
else:
model = torch.nn.DataParallel(model).cuda()
return model
def get_category_list(annotations, num_classes, cfg):
num_list = [0] * num_classes
cat_list = []
print("Weight List has been produced")
for anno in annotations:
category_id = anno["category_id"]
num_list[category_id] += 1
cat_list.append(category_id)
return num_list, cat_list
|
get_scheduler
|
fp16_utils.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Trainer for mixed precision training."""
import warnings
import mxnet as mx
import gluonnlp as nlp
class FP16Trainer:
""" Trainer for mixed precision training.
Parameters
----------
trainer: gluon.Trainer
the original gluon Trainer object for fp32 training.
dynamic_loss_scale: bool. Default is True
whether to use dynamic loss scaling. This is recommended for optimizing model
parameters using FP16.
loss_scaler_params : dict
Key-word arguments to be passed to loss scaler constructor. For example,
`{"init_scale" : 2.**10, "scale_window" : 2000, "tolerance" : 0.05}`
for `DynamicLossScaler`.
See each `LossScaler` for a list of supported arguments'
"""
def __init__(self, trainer, dynamic_loss_scale=True, loss_scaler_params=None):
if trainer._kvstore_params['update_on_kvstore'] is not False and trainer._kvstore:
err = 'Only gluon.Trainer created with update_on_kvstore=False is supported.'
raise NotImplementedError(err)
self.fp32_trainer = trainer
loss_scaler_params = loss_scaler_params if loss_scaler_params else {}
self._scaler = DynamicLossScaler(**loss_scaler_params) if dynamic_loss_scale \
else StaticLossScaler(**loss_scaler_params)
# if the optimizer supports NaN check, we can always defer the NaN check to the optimizer
# TODO(haibin) this should be added via registry
self._support_nan_check = trainer._optimizer.__class__.__name__ == 'BERTAdam'
def backward(self, loss):
"""backward propagation with loss"""
with mx.autograd.record():
if isinstance(loss, (tuple, list)):
ls = [l * self._scaler.loss_scale for l in loss]
else:
ls = loss * self._scaler.loss_scale
mx.autograd.backward(ls)
def step(self, batch_size, max_norm=None):
"""Makes one step of parameter update. Should be called after
`fp16_optimizer.backward()`, and outside of `record()` scope.
Parameters
----------
batch_size : int
Batch size of data processed. Gradient will be normalized by `1/batch_size`.
Set this to 1 if you normalized loss manually with `loss = mean(loss)`.
max_norm : NDArray, optional, default is None
max value for global 2-norm of gradients.
"""
self.fp32_trainer.allreduce_grads()
step_size = batch_size * self._scaler.loss_scale
if max_norm:
_, ratio, is_finite = nlp.utils.grad_global_norm(self.fp32_trainer._params,
max_norm * self._scaler.loss_scale)
step_size = ratio * step_size
if self._support_nan_check:
self.fp32_trainer.update(step_size)
overflow = is_finite.asscalar() < 1
else:
overflow = is_finite.asscalar() < 1
if not overflow:
step_size = step_size.asscalar()
self.fp32_trainer.update(step_size)
else:
# TODO(haibin) optimize the performance when max_norm is not present
# sequentially adding isnan/isinf results may be slow
if self._support_nan_check:
self.fp32_trainer.update(step_size)
overflow = self._scaler.has_overflow(self.fp32_trainer._params)
else:
overflow = self._scaler.has_overflow(self.fp32_trainer._params)
if not overflow:
self.fp32_trainer.update(step_size)
# update scale based on overflow information
self._scaler.update_scale(overflow)
class LossScaler:
"""Abstract loss scaler"""
def has_overflow(self, params):
""" detect inf and nan """
is_not_finite = 0
for param in params:
if param.grad_req != 'null':
grad = param.list_grad()[0]
is_not_finite += mx.nd.contrib.isnan(grad).sum().astype('float32', copy=False)
is_not_finite += mx.nd.contrib.isinf(grad).sum().astype('float32', copy=False)
# NDArray is implicitly converted to bool
if is_not_finite == 0:
return False
else:
return True
def update_scale(self, overflow):
raise NotImplementedError()
class
|
(LossScaler):
"""Static loss scaler"""
def __init__(self, init_scale=1):
self.loss_scale = init_scale
def update_scale(self, overflow):
"""update loss scale"""
class DynamicLossScaler(LossScaler):
"""Class that manages dynamic loss scaling.
There are two problems regarding gradient scale when fp16 is used for training.
One is overflow: the fp16 gradient is too large that it causes NaN.
To combat such an issue, we need to scale down the gradient when such an event
is detected. The other is underflow: the gradient is too small such that the
precision suffers. This is hard to detect though. What dynamic loss scaler does
it that, it starts the scale at a relatively large value (e.g. 2**15).
Everytime when a NaN is detected in the gradient, the scale is reduced (by default)
by 2x. On the other hand, if a NaN is not detected for a long time
(e.g. 2000 steps), then the scale is increased (by default) by 2x."""
def __init__(self, init_scale=2.**10, scale_factor=2., scale_window=2000,
tolerance=0.):
self.loss_scale = init_scale
self.scale_factor = scale_factor
self.scale_window = scale_window
self.tolerance = tolerance
self._num_steps = 0
self._last_overflow_iter = -1
self._last_rescale_iter = -1
self._overflows_since_rescale = 0
def update_scale(self, overflow):
"""dynamically update loss scale"""
iter_since_rescale = self._num_steps - self._last_rescale_iter
if overflow:
self._last_overflow_iter = self._num_steps
self._overflows_since_rescale += 1
percentage = self._overflows_since_rescale / float(iter_since_rescale)
# we tolerate a certrain amount of NaNs before actually scaling it down
if percentage >= self.tolerance:
self.loss_scale /= self.scale_factor
self._last_rescale_iter = self._num_steps
self._overflows_since_rescale = 0
if self.loss_scale < 1:
warnings.warn('DynamicLossScaler: overflow detected. set loss_scale = %s'%
self.loss_scale)
elif (self._num_steps - self._last_overflow_iter) % self.scale_window == 0:
self.loss_scale *= self.scale_factor
self._last_rescale_iter = self._num_steps
self._num_steps += 1
|
StaticLossScaler
|
control.py
|
# Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
def evaluate(self):
|
"""
Method required by RAVEN to run this as an ControlFunction in LogicalModel.
@ In, self, object, object to store members on
@ Out, model, str, the name of external model that
will be executed by hybrid model
"""
model = None
if self.x > 0.5 and self.y > 1.5:
model = 'poly'
else:
model = 'exp'
return model
|
|
apps.py
|
from __future__ import unicode_literals
from django.apps import AppConfig
class
|
(AppConfig):
name = 'meat'
|
MeatConfig
|
main.ts
|
import 'reflect-metadata'
import { NestFactory } from '@nestjs/core';
import { AppModule } from './app.module';
async function
|
() {
const app = await NestFactory.create(AppModule);
await app.listen(3000);
}
bootstrap();
|
bootstrap
|
303_range_sum_query_immutable.py
|
class NumArray:
def __init__(self, nums: List[int]):
self.n = list(accumulate(nums))
def sumRange(self, left: int, right: int) -> int:
|
return self.n[right]- (self.n[left-1] if left>0 else 0)
|
|
attack.py
|
"""
Implement white-box attacks on top of IBM ART.
@author: Ying Meng (y(dot)meng201011(at)gmail(dot)com)
"""
import numpy as np
import torch
# from art.attacks.evasion.fast_gradient import FastGradientMethod
# from art.attacks.evasion.projected_gradient_descent import ProjectedGradientDescent
from art.attacks.evasion.carlini import CarliniL2Method, CarliniLInfMethod
from art.attacks.evasion.deepfool import DeepFool
from art.attacks.evasion.saliency_map import SaliencyMapMethod
from art.attacks.evasion.iterative_method import BasicIterativeMethod
from art.attacks.evasion.spatial_transformation import SpatialTransformation
from art.attacks.evasion.hop_skip_jump import HopSkipJump
from art.attacks.evasion.zoo import ZooAttack
from attacks.fast_gradient import FastGradientMethod
from attacks.pgd import ProjectedGradientDescent
from attacks.utils import WHITEBOX_ATTACK as ATTACK
def generate(model, data_loader, attack_args, device=None):
"""
Generate adversarial examples.
:param model: an instances of art.classifiers.classifier. The targeted model.
:param data_loader: a tuple of benign samples and corresponding true labels.
:param attack_args: dictionary. adversarial configurations.
:param device: string. cuda (for gpu) or cpu.
:return:
"""
attack = attack_args.get('attack').lower()
eot = attack_args.get('eot')
if eot and attack not in [ATTACK.FGSM.value, ATTACK.PGD.value]:
raise NotImplementedError("`EOT` is not supported for {} attack yet.".format(attack))
print(">>> Generating {}(EOT:{}) examples.".format(attack_args.get('description'),
"ON" if eot else "OFF"))
if device is None:
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
images, labels = data_loader
if attack == ATTACK.FGSM.value:
return _fgsm(model, images, labels, attack_args)
elif attack == ATTACK.CW.value:
return _cw(model, images, labels, attack_args)
elif attack == ATTACK.PGD.value:
return _pgd(model, images, labels, attack_args)
elif attack == ATTACK.BIM.value:
return _bim(model, images, labels, attack_args)
elif attack == ATTACK.JSMA.value:
return _jsma(model, images, labels, attack_args)
elif attack == ATTACK.DF.value:
return _df(model, images, labels, attack_args)
elif attack == ATTACK.MIM.value:
return _mim(model, images, labels, attack_args)
elif attack == ATTACK.OP.value:
return _op(model, images, labels, attack_args)
elif attack == ATTACK.HOP_SKIP_JUMP.value:
raise _hop_skip_jump(model, images, labels, attack_args)
elif attack == ATTACK.SPATIAL_TRANS.value:
return _spatial(model, images, labels, attack_args)
elif attack == ATTACK.ZOO.value:
return _zoo(model, images, labels, attack_args)
else:
raise ValueError('{} is not supported.'.format(attack))
def _fgsm(model, data, labels, attack_args):
"""
Fast Gradient Sign Method
Explaining and Harnessing Adversarial Examples
by Ian J. Goodfellow, Jonathon Shlens, Christian Szegedy
``https://arxiv.org/abs/1412.6572``
:param model:
:param data:
:param labels:
:param attack_args:
:param distribution: dictionary. the configurations of distribution (for EOT)
:return:
"""
eps = attack_args.get('eps', 0.3)
targeted = attack_args.get('targeted', False)
num_random_init = attack_args.get('num_random_init', 0)
minimal = attack_args.get('minimal', False)
if attack_args.get("eot"):
distribution = attack_args.get('distribution', None)
else:
distribution = None
attacker = FastGradientMethod(model, eps=eps, eps_step=eps, targeted=targeted,
num_random_init=num_random_init, minimal=minimal,
distribution=distribution)
return attacker.generate(data, labels)
def _cw(model, data, labels, attack_args):
"""
Carlini & Wanger
Towards Evaluating the Robustness of Neural Networks
by Nicholas Carlini, David Wagner
``https://arxiv.org/abs/1608.04644``
:param model:
:param data:
:param labels:
:param attack_args:
:return:
"""
norm = attack_args.get('norm').lower()
lr = attack_args.get('lr')
max_iter = attack_args.get('max_iter', 10)
# use default values for the following arguments
confidence = attack_args.get('confidence', 0.0)
targeted = attack_args.get('targeted', False)
init_const = attack_args.get('init_const', 0.01)
max_halving = attack_args.get('max_halving', 5)
max_doubling = attack_args.get('max_doubling', 5)
if norm == 'l2':
print('>>> Generating CW_l2 examples.')
binary_search_steps = attack_args.get('binary_search_steps', 10)
attacker = CarliniL2Method(classifier=model, confidence=confidence, targeted=targeted, learning_rate=lr,
binary_search_steps=binary_search_steps, max_iter=max_iter,
initial_const=init_const, max_halving=max_halving,
max_doubling=max_doubling)
elif norm == 'linf':
print('>>> Generating CW_linf examples.')
eps = attack_args.get('eps', 0.3)
attacker = CarliniLInfMethod(classifier=model, confidence=confidence, targeted=targeted, learning_rate=lr,
max_iter=max_iter, max_halving=max_halving, max_doubling=max_doubling, eps=eps)
else:
raise ValueError('Support `l2` and `linf` norms. But found {}'.format(norm))
return attacker.generate(data, labels)
def _pgd(model, data, labels, attack_args):
"""
Projected Gradient Descent
Towards deep learning models resistant to adversarial attacks
by Aleksander Madry, Aleksandar Makelov, Ludwig Schmidt, Dimitris Tsipras, and Adrian Vladu.
``https://arxiv.org/abs/1706.06083``
:param model:
:param data:
:param labels:
:param attack_args:
:return:
"""
eps = attack_args.get('eps', 0.3)
eps_step = attack_args.get('eps_step', eps/10.)
max_iter = attack_args.get('max_iter', 10)
norm = _get_norm_value(attack_args.get('norm', 'linf'))
targeted = attack_args.get('targeted', False)
num_random_init = attack_args.get('num_random_init', 0)
random_eps = attack_args.get('random_eps', False)
if attack_args.get("eot"):
distribution = attack_args.get('distribution', None)
else:
distribution = None
attacker = ProjectedGradientDescent(classifier=model, norm=norm, eps=eps, eps_step=eps_step,
max_iter=max_iter, targeted=targeted,
num_random_init=num_random_init, random_eps=random_eps,
distribution=distribution)
return attacker.generate(data, labels)
def _bim(model, data, labels, attack_args):
"""
Basic Iteractive Method
ADVERSARIAL EXAMPLES IN THE PHYSICAL WORLD
Alexey Kurakin, Ian J. Goodfellow, Samy Bengio
``https://arxiv.org/pdf/1607.02533.pdf``
:param model:
:param data:
:param labels:
:param attack_args:
:return:
"""
eps = attack_args.get('eps', 0.3)
eps_step = attack_args.get('eps_step', eps/10.)
max_iter = attack_args.get('max_iter', 100)
|
def _jsma(model, data, labels, attack_args):
theta = attack_args.get('theta', 0.15)
gamma = attack_args.get('gamma', 0.5)
batch_size = attack_args.get('batch_size', 1)
attacker = SaliencyMapMethod(classifier=model, theta=theta, gamma=gamma, batch_size=batch_size)
return attacker.generate(data, labels)
def _df(model, data, labels, attack_args):
max_iter = attack_args.get('max_iter', 100)
eps = attack_args.get('eps', 0.01)
nb_grads = attack_args.get('nb_grads', 10)
attacker = DeepFool(classifier=model, max_iter=max_iter, epsilon=eps, nb_grads=nb_grads)
return attacker.generate(data, labels)
def _mim(model, data, labels, attack_args):
raise NotImplementedError
def _op(model, data, labels, attack_args):
raise NotImplementedError
def _spatial(model, data, labels, attack_args):
max_translation = attack_args.get('max_translation', 0.2)
num_translations = attack_args.get('num_translations', 1)
max_rotation = attack_args.get('max_rotation', 10)
num_rotations = attack_args.get('num_rotations', 1)
attacker = SpatialTransformation(classifier=model,
max_translation=max_translation, num_translations=num_translations,
max_rotation=max_rotation, num_rotations=num_rotations)
return attacker.generate(data, labels)
def _hop_skip_jump(model, data, labels, attack_args):
norm = _get_norm_value(attack_args.get('norm', 'l2'))
max_iter = attack_args.get('max_iter', 50)
max_eval = attack_args.get('max_eval', 10000)
init_eval = attack_args.get('init_eval', 100)
init_size = attack_args.get('init_size', 100)
targeted = attack_args.get('targeted', False)
attacker = HopSkipJump(classifier=model, targeted=targeted, norm=norm,
max_iter=max_iter, max_eval=max_eval,
init_eval=init_eval, init_size=init_size)
return attacker.generate(data, labels)
def _zoo(model, data, labels, attack_args):
lr = attack_args.get('learning_rate', 0.01)
max_iter = attack_args.get('max_iter', 10)
binary_search_steps = attack_args.get('binary_search_steps', 1)
confidence = attack_args.get('confidence', 0.0)
targeted = attack_args.get('targeted', False)
init_const = attack_args.get('init_const', 1e-3)
abort_early = attack_args.get('abort_early', True)
use_resize = attack_args.get('use_resize', True)
use_importance = attack_args.get('use_importance', True)
nb_parallel = attack_args.get('nb_parallel', 128)
variable_h = attack_args.get('variable_h', 1e-4)
attacker = ZooAttack(classifier=model, confidence=confidence, targeted=targeted,
learning_rate=lr, max_iter=max_iter, binary_search_steps=binary_search_steps,
initial_const=init_const, abort_early=abort_early, use_resize=use_resize,
use_importance=use_importance, nb_parallel=nb_parallel, variable_h=variable_h)
return attacker.generate(data, labels)
def _get_norm_value(norm):
"""
Convert a string norm to a numeric value.
:param norm:
:return:
"""
norm = norm.lower()
if norm == 'linf':
value = np.inf
elif norm == 'l2':
value = 2
else:
raise ValueError('Support `l2` and `linf` norms. But found {}.'.format(norm))
return value
|
targeted = attack_args.get('targeted', False)
attacker = BasicIterativeMethod(classifier=model, eps=eps, eps_step=eps_step,
max_iter=max_iter, targeted=targeted)
return attacker.generate(data, labels)
|
policy_test.go
|
package bootstrappolicy_test
import (
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/ghodss/yaml"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/util/diff"
"github.com/openshift/origin/pkg/api/v1"
"github.com/openshift/origin/pkg/cmd/server/bootstrappolicy"
// install all APIs
_ "github.com/openshift/origin/pkg/api/install"
)
func TestOpenshiftRoles(t *testing.T)
|
func TestBootstrapProjectRoleBindings(t *testing.T) {
roleBindings := bootstrappolicy.GetBootstrapServiceAccountProjectRoleBindings("myproject")
list := &api.List{}
for i := range roleBindings {
list.Items = append(list.Items, &roleBindings[i])
}
testObjects(t, list, "bootstrap_service_account_project_role_bindings.yaml")
}
func TestBootstrapClusterRoleBindings(t *testing.T) {
roleBindings := bootstrappolicy.GetBootstrapClusterRoleBindings()
list := &api.List{}
for i := range roleBindings {
list.Items = append(list.Items, &roleBindings[i])
}
testObjects(t, list, "bootstrap_cluster_role_bindings.yaml")
}
func TestBootstrapClusterRoles(t *testing.T) {
roles := bootstrappolicy.GetBootstrapClusterRoles()
list := &api.List{}
for i := range roles {
list.Items = append(list.Items, &roles[i])
}
testObjects(t, list, "bootstrap_cluster_roles.yaml")
}
func testObjects(t *testing.T, list *api.List, fixtureFilename string) {
filename := filepath.Join("../../../../test/fixtures/bootstrappolicy", fixtureFilename)
expectedYAML, err := ioutil.ReadFile(filename)
if err != nil {
t.Fatal(err)
}
if err := runtime.EncodeList(api.Codecs.LegacyCodec(v1.SchemeGroupVersion), list.Items); err != nil {
t.Fatal(err)
}
jsonData, err := runtime.Encode(api.Codecs.LegacyCodec(v1.SchemeGroupVersion), list)
if err != nil {
t.Fatal(err)
}
yamlData, err := yaml.JSONToYAML(jsonData)
if err != nil {
t.Fatal(err)
}
if string(yamlData) != string(expectedYAML) {
t.Errorf("Bootstrap policy data does not match the test fixture in %s", filename)
const updateEnvVar = "UPDATE_BOOTSTRAP_POLICY_FIXTURE_DATA"
if os.Getenv(updateEnvVar) == "true" {
if err := ioutil.WriteFile(filename, []byte(yamlData), os.FileMode(0755)); err == nil {
t.Logf("Updated data in %s", filename)
t.Logf("Verify the diff, commit changes, and rerun the tests")
} else {
t.Logf("Could not update data in %s: %v", filename, err)
}
} else {
t.Logf("Diff between bootstrap data and fixture data in %s:\n-------------\n%s", filename, diff.StringDiff(string(yamlData), string(expectedYAML)))
t.Logf("If the change is expected, re-run with %s=true to update the fixtures", updateEnvVar)
}
}
}
|
{
roles := bootstrappolicy.GetBootstrapOpenshiftRoles("openshift")
list := &api.List{}
for i := range roles {
list.Items = append(list.Items, &roles[i])
}
testObjects(t, list, "bootstrap_openshift_roles.yaml")
}
|
sigma_and.rs
|
use ergotree_ir::mir::constant::TryExtractInto;
use ergotree_ir::mir::sigma_and::SigmaAnd;
use ergotree_ir::mir::value::Value;
use ergotree_ir::sigma_protocol::sigma_boolean::cand::Cand;
use ergotree_ir::sigma_protocol::sigma_boolean::SigmaProp;
use crate::eval::env::Env;
use crate::eval::EvalContext;
use crate::eval::EvalError;
use crate::eval::Evaluable;
impl Evaluable for SigmaAnd {
fn eval(&self, env: &Env, ctx: &mut EvalContext) -> Result<Value, EvalError>
|
}
#[allow(clippy::unwrap_used)]
#[allow(clippy::panic)]
#[cfg(test)]
mod tests {
use ergotree_ir::sigma_protocol::sigma_boolean::SigmaBoolean;
use ergotree_ir::sigma_protocol::sigma_boolean::SigmaConjecture;
use std::convert::TryInto;
use std::rc::Rc;
use crate::eval::context::Context;
use crate::eval::tests::eval_out;
use super::*;
use ergotree_ir::mir::expr::Expr;
use proptest::collection;
use proptest::prelude::*;
use sigma_test_util::force_any_val;
proptest! {
#![proptest_config(ProptestConfig::with_cases(8))]
#[test]
fn eval(sigmaprops in collection::vec(any::<SigmaProp>(), 2..10)) {
let items = sigmaprops.clone().into_iter().map(|sp| Expr::Const(sp.into())).collect();
let expr: Expr = SigmaAnd::new(items).unwrap().into();
let ctx = Rc::new(force_any_val::<Context>());
let res = eval_out::<SigmaProp>(&expr, ctx);
let expected_sb: Vec<SigmaBoolean> = sigmaprops.into_iter().map(|sp| sp.into()).collect();
prop_assert!(matches!(res.clone().into(), SigmaBoolean::SigmaConjecture(SigmaConjecture::Cand(_))));
if let SigmaBoolean::SigmaConjecture(SigmaConjecture::Cand(Cand {items: actual_sb})) = res.into() {
prop_assert_eq!(actual_sb, expected_sb.try_into().unwrap());
}
}
}
}
|
{
let items_v_res = self.items.try_mapped_ref(|it| it.eval(env, ctx));
let items_sigmabool = items_v_res?
.try_mapped(|it| it.try_extract_into::<SigmaProp>())?
.mapped(|it| it.value().clone());
Ok(Value::SigmaProp(Box::new(SigmaProp::new(
Cand::normalized(items_sigmabool),
))))
}
|
nodejs_test.go
|
//go:build test_integration && test_local
/*
Copyright 2017 The Nuclio Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package test
import (
"testing"
"github.com/nuclio/nuclio/pkg/processor/build/runtime/test/suite"
"github.com/nuclio/nuclio/pkg/processor/trigger/http/test/suite"
"github.com/stretchr/testify/suite"
)
type TestSuite struct {
buildsuite.TestSuite
}
func (suite *TestSuite) SetupSuite() {
suite.TestSuite.SetupSuite()
suite.TestSuite.RuntimeSuite = suite
suite.TestSuite.ArchivePattern = "nodejs"
}
func (suite *TestSuite) GetFunctionInfo(functionName string) buildsuite.FunctionInfo {
functionInfo := buildsuite.FunctionInfo{
Runtime: "nodejs",
}
switch functionName {
case "reverser":
functionInfo.Path = []string{suite.GetTestFunctionsDir(), "common", "reverser", "nodejs", "handler.js"}
functionInfo.Handler = "handler"
case "json-parser-with-function-config":
functionInfo.Path = []string{suite.GetTestFunctionsDir(), "common", "json-parser-with-function-config", "nodejs"}
case "json-parser-with-inline-function-config":
functionInfo.Path = []string{suite.GetTestFunctionsDir(), "common", "json-parser-with-inline-function-config", "nodejs", "handler.js"}
case "invalid-inline-config":
functionInfo.Path = []string{suite.GetTestFunctionsDir(), "common", "invalid-inline-config", "nodejs", "handler.js"}
case "long-initialization":
functionInfo.Path = []string{suite.GetTestFunctionsDir(), "common", "long-initialization", "nodejs", "sleepy.js"}
case "context-init-fail":
functionInfo.Path = []string{suite.GetTestFunctionsDir(), "common", "context-init-fail", "nodejs", "contextinitfail.js"}
default:
suite.Logger.InfoWith("Test skipped", "functionName", functionName)
functionInfo.Skip = true
}
return functionInfo
}
func (suite *TestSuite) TestBuildWithContextInitializer() {
createFunctionOptions := suite.GetDeployOptions("context-init",
suite.GetFunctionPath(suite.GetTestFunctionsDir(), "common", "context-init", "nodejs", "contextinit.js"))
suite.DeployFunctionAndRequest(createFunctionOptions,
&httpsuite.Request{
RequestMethod: "POST",
RequestBody: "10",
ExpectedResponseBody: "20",
})
}
func TestIntegrationSuite(t *testing.T)
|
{
if testing.Short() {
return
}
suite.Run(t, new(TestSuite))
}
|
|
edge_image.py
|
import cv2
import argparse
import numpy as np
def process_edge_image(input, output):
print('edge', input, output)
img = cv2.imread(input)
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img = cv2.GaussianBlur(img, (3, 3), 0)
|
edges = cv2.Canny(img, ret * 0.5, ret)
cv2.imwrite(output, 255 - edges)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('input', help='input image')
parser.add_argument('output', help='output image')
args = parser.parse_args()
process_edge_image(args.input, args.output)
|
ret, thr = cv2.threshold(img, 0, 255, cv2.THRESH_OTSU)
|
test-upgrade.py
|
import pytest
import os
import platform
import time
import requests
from validators import (
validate_dns_dashboard,
validate_storage,
validate_ingress,
validate_ambassador,
validate_gpu,
validate_registry,
validate_forward,
validate_metrics_server,
validate_prometheus,
validate_fluentd,
validate_jaeger,
validate_kubeflow,
validate_cilium,
validate_metallb_config,
validate_multus,
)
from subprocess import check_call, CalledProcessError, check_output
from utils import (
microk8s_enable,
wait_for_pod_state,
wait_for_installation,
run_until_success,
)
upgrade_from = os.environ.get('UPGRADE_MICROK8S_FROM', 'beta')
# Have UPGRADE_MICROK8S_TO point to a file to upgrade to that file
upgrade_to = os.environ.get('UPGRADE_MICROK8S_TO', 'edge')
under_time_pressure = os.environ.get('UNDER_TIME_PRESSURE', 'False')
class TestUpgrade(object):
|
def is_container():
'''
Returns: True if the deployment is in a VM/container.
'''
try:
if os.path.isdir('/run/systemd/system'):
container = check_output('sudo systemd-detect-virt --container'.split())
print("Tests are running in {}".format(container))
return True
except CalledProcessError:
print("systemd-detect-virt did not detect a container")
if os.path.exists('/run/container_type'):
return True
try:
check_call("sudo grep -E (lxc|hypervisor) /proc/1/environ /proc/cpuinfo".split())
print("Tests are running in an undetectable container")
return True
except CalledProcessError:
print("no indication of a container in /proc")
return False
|
"""
Validates a microk8s upgrade path
"""
def test_upgrade(self):
"""
Deploy, probe, upgrade, validate nothing broke.
"""
print("Testing upgrade from {} to {}".format(upgrade_from, upgrade_to))
cmd = "sudo snap install microk8s --classic --channel={}".format(upgrade_from)
run_until_success(cmd)
wait_for_installation()
if is_container():
# In some setups (eg LXC on GCE) the hashsize nf_conntrack file under
# sys is marked as rw but any update on it is failing causing kube-proxy
# to fail.
here = os.path.dirname(os.path.abspath(__file__))
apply_patch = os.path.join(here, "patch-kube-proxy.sh")
check_call("sudo {}".format(apply_patch).split())
# Run through the validators and
# select those that were valid for the original snap
test_matrix = {}
try:
enable = microk8s_enable("dns")
wait_for_pod_state("", "kube-system", "running", label="k8s-app=kube-dns")
assert "Nothing to do for" not in enable
enable = microk8s_enable("dashboard")
assert "Nothing to do for" not in enable
validate_dns_dashboard()
test_matrix['dns_dashboard'] = validate_dns_dashboard
except:
print('Will not test dns-dashboard')
try:
enable = microk8s_enable("storage")
assert "Nothing to do for" not in enable
validate_storage()
test_matrix['storage'] = validate_storage
except:
print('Will not test storage')
try:
enable = microk8s_enable("ingress")
assert "Nothing to do for" not in enable
validate_ingress()
test_matrix['ingress'] = validate_ingress
except:
print('Will not test ingress')
try:
enable = microk8s_enable("gpu")
assert "Nothing to do for" not in enable
validate_gpu()
test_matrix['gpu'] = validate_gpu
except:
print('Will not test gpu')
try:
enable = microk8s_enable("registry")
assert "Nothing to do for" not in enable
validate_registry()
test_matrix['registry'] = validate_registry
except:
print('Will not test registry')
try:
validate_forward()
test_matrix['forward'] = validate_forward
except:
print('Will not test port forward')
try:
enable = microk8s_enable("metrics-server")
assert "Nothing to do for" not in enable
validate_metrics_server()
test_matrix['metrics_server'] = validate_metrics_server
except:
print('Will not test the metrics server')
# AMD64 only tests
if platform.machine() == 'x86_64' and under_time_pressure == 'False':
'''
# Prometheus operator on our lxc is chashlooping disabling the test for now.
try:
enable = microk8s_enable("prometheus", timeout_insec=30)
assert "Nothing to do for" not in enable
validate_prometheus()
test_matrix['prometheus'] = validate_prometheus
except:
print('Will not test the prometheus')
# The kubeflow deployment is huge. It will not fit comfortably
# with the rest of the addons on the same machine during an upgrade
# we will need to find another way to test it.
try:
enable = microk8s_enable("kubeflow", timeout_insec=30)
assert "Nothing to do for" not in enable
validate_kubeflow()
test_matrix['kubeflow'] = validate_kubeflow
except:
print('Will not test kubeflow')
'''
try:
enable = microk8s_enable("fluentd", timeout_insec=30)
assert "Nothing to do for" not in enable
validate_fluentd()
test_matrix['fluentd'] = validate_fluentd
except:
print('Will not test the fluentd')
try:
enable = microk8s_enable("jaeger", timeout_insec=30)
assert "Nothing to do for" not in enable
validate_jaeger()
test_matrix['jaeger'] = validate_jaeger
except:
print('Will not test the jaeger addon')
try:
enable = microk8s_enable("cilium", timeout_insec=300)
assert "Nothing to do for" not in enable
validate_cilium()
test_matrix['cilium'] = validate_cilium
except:
print('Will not test the cilium addon')
try:
ip_ranges = (
"192.168.0.105-192.168.0.105,192.168.0.110-192.168.0.111,192.168.1.240/28"
)
enable = microk8s_enable("{}:{}".format("metallb", ip_ranges), timeout_insec=500)
assert "MetalLB is enabled" in enable and "Nothing to do for" not in enable
validate_metallb_config(ip_ranges)
test_matrix['metallb'] = validate_metallb_config
except:
print("Will not test the metallb addon")
try:
enable = microk8s_enable("multus", timeout_insec=150)
assert "Nothing to do for" not in enable
validate_multus()
test_matrix['multus'] = validate_multus
except:
print('Will not test the multus addon')
# Refresh the snap to the target
if upgrade_to.endswith('.snap'):
cmd = "sudo snap install {} --classic --dangerous".format(upgrade_to)
else:
cmd = "sudo snap refresh microk8s --channel={}".format(upgrade_to)
run_until_success(cmd)
# Allow for the refresh to be processed
time.sleep(10)
wait_for_installation()
# Test any validations that were valid for the original snap
for test, validation in test_matrix.items():
print("Testing {}".format(test))
validation()
if not is_container():
# On lxc umount docker overlay is not permitted.
check_call("sudo snap remove microk8s".split())
|
main_test.go
|
// Copyright 2019 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sdsingressk8sca
import (
"testing"
"istio.io/istio/pkg/test/framework"
"istio.io/istio/pkg/test/framework/components/galley"
"istio.io/istio/pkg/test/framework/components/istio"
"istio.io/istio/pkg/test/framework/components/pilot"
"istio.io/istio/pkg/test/framework/resource"
"istio.io/istio/pkg/test/framework/resource/environment"
"istio.io/istio/tests/integration/security/sds_ingress/util"
)
var (
inst istio.Instance
g galley.Instance
p pilot.Instance
)
func
|
(m *testing.M) {
// Integration test for the ingress SDS multiple Gateway flow when
// the control plane certificate provider is k8s CA.
framework.
NewSuite("sds_ingress_k8sca", m).
RequireSingleCluster().
SetupOnEnv(environment.Kube, istio.Setup(&inst, setupConfig)).
Setup(func(ctx resource.Context) (err error) {
if g, err = galley.New(ctx, galley.Config{}); err != nil {
return err
}
if p, err = pilot.New(ctx, pilot.Config{
Galley: g,
}); err != nil {
return err
}
return nil
}).
Run()
}
func setupConfig(cfg *istio.Config) {
if cfg == nil {
return
}
cfg.ControlPlaneValues = `
values:
global:
pilotCertProvider: kubernetes
`
}
func TestMtlsGatewaysK8sca(t *testing.T) {
framework.
NewTest(t).
RequiresEnvironment(environment.Kube).
Run(func(ctx framework.TestContext) {
util.RunTestMultiMtlsGateways(ctx, inst, g)
})
}
func TestTlsGatewaysK8sca(t *testing.T) {
framework.
NewTest(t).
RequiresEnvironment(environment.Kube).
Run(func(ctx framework.TestContext) {
util.RunTestMultiTLSGateways(ctx, inst, g)
})
}
|
TestMain
|
app.js
|
/******/ (function(modules) { // webpackBootstrap
/******/ // The module cache
/******/ var installedModules = {};
/******/
/******/ // The require function
/******/ function __webpack_require__(moduleId) {
/******/
/******/ // Check if module is in cache
/******/ if(installedModules[moduleId]) {
/******/ return installedModules[moduleId].exports;
/******/ }
/******/ // Create a new module (and put it into the cache)
/******/ var module = installedModules[moduleId] = {
/******/ i: moduleId,
/******/ l: false,
/******/ exports: {}
/******/ };
/******/
/******/ // Execute the module function
/******/ modules[moduleId].call(module.exports, module, module.exports, __webpack_require__);
/******/
/******/ // Flag the module as loaded
/******/ module.l = true;
/******/
/******/ // Return the exports of the module
/******/ return module.exports;
/******/ }
/******/
/******/
/******/ // expose the modules object (__webpack_modules__)
/******/ __webpack_require__.m = modules;
/******/
/******/ // expose the module cache
/******/ __webpack_require__.c = installedModules;
/******/
/******/ // define getter function for harmony exports
/******/ __webpack_require__.d = function(exports, name, getter) {
/******/ if(!__webpack_require__.o(exports, name)) {
/******/ Object.defineProperty(exports, name, { enumerable: true, get: getter });
/******/ }
/******/ };
/******/
/******/ // define __esModule on exports
/******/ __webpack_require__.r = function(exports) {
/******/ if(typeof Symbol !== 'undefined' && Symbol.toStringTag) {
/******/ Object.defineProperty(exports, Symbol.toStringTag, { value: 'Module' });
/******/ }
/******/ Object.defineProperty(exports, '__esModule', { value: true });
/******/ };
/******/
/******/ // create a fake namespace object
/******/ // mode & 1: value is a module id, require it
/******/ // mode & 2: merge all properties of value into the ns
/******/ // mode & 4: return value when already ns object
/******/ // mode & 8|1: behave like require
/******/ __webpack_require__.t = function(value, mode) {
/******/ if(mode & 1) value = __webpack_require__(value);
/******/ if(mode & 8) return value;
/******/ if((mode & 4) && typeof value === 'object' && value && value.__esModule) return value;
/******/ var ns = Object.create(null);
/******/ __webpack_require__.r(ns);
/******/ Object.defineProperty(ns, 'default', { enumerable: true, value: value });
/******/ if(mode & 2 && typeof value != 'string') for(var key in value) __webpack_require__.d(ns, key, function(key) { return value[key]; }.bind(null, key));
/******/ return ns;
/******/ };
/******/
/******/ // getDefaultExport function for compatibility with non-harmony modules
/******/ __webpack_require__.n = function(module) {
/******/ var getter = module && module.__esModule ?
/******/ function getDefault() { return module['default']; } :
/******/ function getModuleExports() { return module; };
/******/ __webpack_require__.d(getter, 'a', getter);
/******/ return getter;
/******/ };
/******/
/******/ // Object.prototype.hasOwnProperty.call
/******/ __webpack_require__.o = function(object, property) { return Object.prototype.hasOwnProperty.call(object, property); };
/******/
/******/ // __webpack_public_path__
/******/ __webpack_require__.p = "/";
/******/
/******/
/******/ // Load entry module and return exports
/******/ return __webpack_require__(__webpack_require__.s = 0);
/******/ })
/************************************************************************/
/******/ ({
/***/ "./node_modules/alpinejs/dist/alpine.js":
/*!**********************************************!*\
!*** ./node_modules/alpinejs/dist/alpine.js ***!
\**********************************************/
/*! no static exports found */
/***/ (function(module, exports, __webpack_require__) {
(function (global, factory) {
true ? module.exports = factory() :
undefined;
}(this, (function () { 'use strict';
function _defineProperty(obj, key, value) {
if (key in obj) {
Object.defineProperty(obj, key, {
value: value,
enumerable: true,
configurable: true,
writable: true
});
} else {
obj[key] = value;
}
return obj;
}
function ownKeys(object, enumerableOnly) {
var keys = Object.keys(object);
if (Object.getOwnPropertySymbols) {
var symbols = Object.getOwnPropertySymbols(object);
if (enumerableOnly) symbols = symbols.filter(function (sym) {
return Object.getOwnPropertyDescriptor(object, sym).enumerable;
});
keys.push.apply(keys, symbols);
}
return keys;
}
function _objectSpread2(target) {
for (var i = 1; i < arguments.length; i++) {
var source = arguments[i] != null ? arguments[i] : {};
if (i % 2) {
ownKeys(Object(source), true).forEach(function (key) {
_defineProperty(target, key, source[key]);
});
} else if (Object.getOwnPropertyDescriptors) {
Object.defineProperties(target, Object.getOwnPropertyDescriptors(source));
} else {
ownKeys(Object(source)).forEach(function (key) {
Object.defineProperty(target, key, Object.getOwnPropertyDescriptor(source, key));
});
}
}
return target;
}
// Thanks @stimulus:
// https://github.com/stimulusjs/stimulus/blob/master/packages/%40stimulus/core/src/application.ts
function domReady() {
return new Promise(resolve => {
if (document.readyState == "loading") {
document.addEventListener("DOMContentLoaded", resolve);
} else {
resolve();
}
});
}
function arrayUnique(array) {
return Array.from(new Set(array));
}
function isTesting() {
return navigator.userAgent.includes("Node.js") || navigator.userAgent.includes("jsdom");
}
function checkedAttrLooseCompare(valueA, valueB) {
return valueA == valueB;
}
function warnIfMalformedTemplate(el, directive) {
if (el.tagName.toLowerCase() !== 'template') {
console.warn(`Alpine: [${directive}] directive should only be added to <template> tags. See https://github.com/alpinejs/alpine#${directive}`);
} else if (el.content.childElementCount !== 1) {
console.warn(`Alpine: <template> tag with [${directive}] encountered with multiple element roots. Make sure <template> only has a single child element.`);
}
}
function kebabCase(subject) {
return subject.replace(/([a-z])([A-Z])/g, '$1-$2').replace(/[_\s]/, '-').toLowerCase();
}
function camelCase(subject) {
return subject.toLowerCase().replace(/-(\w)/g, (match, char) => char.toUpperCase());
}
function walk(el, callback) {
if (callback(el) === false) return;
let node = el.firstElementChild;
while (node) {
walk(node, callback);
node = node.nextElementSibling;
}
}
function debounce(func, wait) {
var timeout;
return function () {
var context = this,
args = arguments;
var later = function later() {
timeout = null;
func.apply(context, args);
};
clearTimeout(timeout);
timeout = setTimeout(later, wait);
};
}
const handleError = (el, expression, error) => {
console.warn(`Alpine Error: "${error}"\n\nExpression: "${expression}"\nElement:`, el);
if (!isTesting()) {
throw error;
}
};
function tryCatch(cb, {
el,
expression
}) {
try {
const value = cb();
return value instanceof Promise ? value.catch(e => handleError(el, expression, e)) : value;
} catch (e) {
handleError(el, expression, e);
}
}
function saferEval(el, expression, dataContext, additionalHelperVariables = {}) {
return tryCatch(() => {
if (typeof expression === 'function') {
return expression.call(dataContext);
}
return new Function(['$data', ...Object.keys(additionalHelperVariables)], `var __alpine_result; with($data) { __alpine_result = ${expression} }; return __alpine_result`)(dataContext, ...Object.values(additionalHelperVariables));
}, {
el,
expression
});
}
function saferEvalNoReturn(el, expression, dataContext, additionalHelperVariables = {}) {
return tryCatch(() => {
if (typeof expression === 'function') {
return Promise.resolve(expression.call(dataContext, additionalHelperVariables['$event']));
}
let AsyncFunction = Function;
/* MODERN-ONLY:START */
AsyncFunction = Object.getPrototypeOf(async function () {}).constructor;
/* MODERN-ONLY:END */
// For the cases when users pass only a function reference to the caller: `x-on:click="foo"`
// Where "foo" is a function. Also, we'll pass the function the event instance when we call it.
if (Object.keys(dataContext).includes(expression)) {
let methodReference = new Function(['dataContext', ...Object.keys(additionalHelperVariables)], `with(dataContext) { return ${expression} }`)(dataContext, ...Object.values(additionalHelperVariables));
if (typeof methodReference === 'function') {
return Promise.resolve(methodReference.call(dataContext, additionalHelperVariables['$event']));
} else {
return Promise.resolve();
}
}
return Promise.resolve(new AsyncFunction(['dataContext', ...Object.keys(additionalHelperVariables)], `with(dataContext) { ${expression} }`)(dataContext, ...Object.values(additionalHelperVariables)));
}, {
el,
expression
});
}
const xAttrRE = /^x-(on|bind|data|text|html|model|if|for|show|cloak|transition|ref|spread)\b/;
function isXAttr(attr) {
const name = replaceAtAndColonWithStandardSyntax(attr.name);
return xAttrRE.test(name);
}
function getXAttrs(el, component, type) {
let directives = Array.from(el.attributes).filter(isXAttr).map(parseHtmlAttribute); // Get an object of directives from x-spread.
let spreadDirective = directives.filter(directive => directive.type === 'spread')[0];
if (spreadDirective) {
let spreadObject = saferEval(el, spreadDirective.expression, component.$data); // Add x-spread directives to the pile of existing directives.
directives = directives.concat(Object.entries(spreadObject).map(([name, value]) => parseHtmlAttribute({
name,
value
})));
}
if (type) return directives.filter(i => i.type === type);
return sortDirectives(directives);
}
function sortDirectives(directives) {
let directiveOrder = ['bind', 'model', 'show', 'catch-all'];
return directives.sort((a, b) => {
let typeA = directiveOrder.indexOf(a.type) === -1 ? 'catch-all' : a.type;
let typeB = directiveOrder.indexOf(b.type) === -1 ? 'catch-all' : b.type;
return directiveOrder.indexOf(typeA) - directiveOrder.indexOf(typeB);
});
}
function parseHtmlAttribute({
name,
value
}) {
const normalizedName = replaceAtAndColonWithStandardSyntax(name);
const typeMatch = normalizedName.match(xAttrRE);
const valueMatch = normalizedName.match(/:([a-zA-Z0-9\-:]+)/);
const modifiers = normalizedName.match(/\.[^.\]]+(?=[^\]]*$)/g) || [];
return {
type: typeMatch ? typeMatch[1] : null,
value: valueMatch ? valueMatch[1] : null,
modifiers: modifiers.map(i => i.replace('.', '')),
expression: value
};
}
function isBooleanAttr(attrName) {
// As per HTML spec table https://html.spec.whatwg.org/multipage/indices.html#attributes-3:boolean-attribute
// Array roughly ordered by estimated usage
const booleanAttributes = ['disabled', 'checked', 'required', 'readonly', 'hidden', 'open', 'selected', 'autofocus', 'itemscope', 'multiple', 'novalidate', 'allowfullscreen', 'allowpaymentrequest', 'formnovalidate', 'autoplay', 'controls', 'loop', 'muted', 'playsinline', 'default', 'ismap', 'reversed', 'async', 'defer', 'nomodule'];
return booleanAttributes.includes(attrName);
}
function replaceAtAndColonWithStandardSyntax(name) {
if (name.startsWith('@')) {
return name.replace('@', 'x-on:');
} else if (name.startsWith(':')) {
return name.replace(':', 'x-bind:');
}
return name;
}
function convertClassStringToArray(classList, filterFn = Boolean) {
return classList.split(' ').filter(filterFn);
}
const TRANSITION_TYPE_IN = 'in';
const TRANSITION_TYPE_OUT = 'out';
const TRANSITION_CANCELLED = 'cancelled';
function transitionIn(el, show, reject, component, forceSkip = false) {
// We don't want to transition on the initial page load.
if (forceSkip) return show();
if (el.__x_transition && el.__x_transition.type === TRANSITION_TYPE_IN) {
// there is already a similar transition going on, this was probably triggered by
// a change in a different property, let's just leave the previous one doing its job
return;
}
const attrs = getXAttrs(el, component, 'transition');
const showAttr = getXAttrs(el, component, 'show')[0]; // If this is triggered by a x-show.transition.
if (showAttr && showAttr.modifiers.includes('transition')) {
let modifiers = showAttr.modifiers; // If x-show.transition.out, we'll skip the "in" transition.
if (modifiers.includes('out') && !modifiers.includes('in')) return show();
const settingBothSidesOfTransition = modifiers.includes('in') && modifiers.includes('out'); // If x-show.transition.in...out... only use "in" related modifiers for this transition.
modifiers = settingBothSidesOfTransition ? modifiers.filter((i, index) => index < modifiers.indexOf('out')) : modifiers;
transitionHelperIn(el, modifiers, show, reject); // Otherwise, we can assume x-transition:enter.
} else if (attrs.some(attr => ['enter', 'enter-start', 'enter-end'].includes(attr.value))) {
transitionClassesIn(el, component, attrs, show, reject);
} else {
// If neither, just show that damn thing.
show();
}
}
function transitionOut(el, hide, reject, component, forceSkip = false) {
// We don't want to transition on the initial page load.
if (forceSkip) return hide();
if (el.__x_transition && el.__x_transition.type === TRANSITION_TYPE_OUT) {
// there is already a similar transition going on, this was probably triggered by
// a change in a different property, let's just leave the previous one doing its job
return;
}
const attrs = getXAttrs(el, component, 'transition');
const showAttr = getXAttrs(el, component, 'show')[0];
if (showAttr && showAttr.modifiers.includes('transition')) {
let modifiers = showAttr.modifiers;
if (modifiers.includes('in') && !modifiers.includes('out')) return hide();
const settingBothSidesOfTransition = modifiers.includes('in') && modifiers.includes('out');
modifiers = settingBothSidesOfTransition ? modifiers.filter((i, index) => index > modifiers.indexOf('out')) : modifiers;
transitionHelperOut(el, modifiers, settingBothSidesOfTransition, hide, reject);
} else if (attrs.some(attr => ['leave', 'leave-start', 'leave-end'].includes(attr.value))) {
transitionClassesOut(el, component, attrs, hide, reject);
} else {
hide();
}
}
function transitionHelperIn(el, modifiers, showCallback, reject) {
// Default values inspired by: https://material.io/design/motion/speed.html#duration
const styleValues = {
duration: modifierValue(modifiers, 'duration', 150),
origin: modifierValue(modifiers, 'origin', 'center'),
first: {
opacity: 0,
scale: modifierValue(modifiers, 'scale', 95)
},
second: {
opacity: 1,
scale: 100
}
};
transitionHelper(el, modifiers, showCallback, () => {}, reject, styleValues, TRANSITION_TYPE_IN);
}
function transitionHelperOut(el, modifiers, settingBothSidesOfTransition, hideCallback, reject) {
// Make the "out" transition .5x slower than the "in". (Visually better)
// HOWEVER, if they explicitly set a duration for the "out" transition,
// use that.
const duration = settingBothSidesOfTransition ? modifierValue(modifiers, 'duration', 150) : modifierValue(modifiers, 'duration', 150) / 2;
const styleValues = {
duration: duration,
origin: modifierValue(modifiers, 'origin', 'center'),
first: {
opacity: 1,
scale: 100
},
second: {
opacity: 0,
scale: modifierValue(modifiers, 'scale', 95)
}
};
transitionHelper(el, modifiers, () => {}, hideCallback, reject, styleValues, TRANSITION_TYPE_OUT);
}
function modifierValue(modifiers, key, fallback) {
// If the modifier isn't present, use the default.
if (modifiers.indexOf(key) === -1) return fallback; // If it IS present, grab the value after it: x-show.transition.duration.500ms
const rawValue = modifiers[modifiers.indexOf(key) + 1];
if (!rawValue) return fallback;
if (key === 'scale') {
// Check if the very next value is NOT a number and return the fallback.
// If x-show.transition.scale, we'll use the default scale value.
// That is how a user opts out of the opacity transition.
if (!isNumeric(rawValue)) return fallback;
}
if (key === 'duration') {
// Support x-show.transition.duration.500ms && duration.500
let match = rawValue.match(/([0-9]+)ms/);
if (match) return match[1];
}
if (key === 'origin') {
// Support chaining origin directions: x-show.transition.top.right
if (['top', 'right', 'left', 'center', 'bottom'].includes(modifiers[modifiers.indexOf(key) + 2])) {
return [rawValue, modifiers[modifiers.indexOf(key) + 2]].join(' ');
}
}
return rawValue;
}
function transitionHelper(el, modifiers, hook1, hook2, reject, styleValues, type) {
// clear the previous transition if exists to avoid caching the wrong styles
if (el.__x_transition) {
el.__x_transition.cancel && el.__x_transition.cancel();
} // If the user set these style values, we'll put them back when we're done with them.
const opacityCache = el.style.opacity;
const transformCache = el.style.transform;
const transformOriginCache = el.style.transformOrigin; // If no modifiers are present: x-show.transition, we'll default to both opacity and scale.
const noModifiers = !modifiers.includes('opacity') && !modifiers.includes('scale');
const transitionOpacity = noModifiers || modifiers.includes('opacity');
const transitionScale = noModifiers || modifiers.includes('scale'); // These are the explicit stages of a transition (same stages for in and for out).
// This way you can get a birds eye view of the hooks, and the differences
// between them.
const stages = {
start() {
if (transitionOpacity) el.style.opacity = styleValues.first.opacity;
if (transitionScale) el.style.transform = `scale(${styleValues.first.scale / 100})`;
},
during() {
if (transitionScale) el.style.transformOrigin = styleValues.origin;
el.style.transitionProperty = [transitionOpacity ? `opacity` : ``, transitionScale ? `transform` : ``].join(' ').trim();
el.style.transitionDuration = `${styleValues.duration / 1000}s`;
el.style.transitionTimingFunction = `cubic-bezier(0.4, 0.0, 0.2, 1)`;
},
show() {
hook1();
},
end() {
if (transitionOpacity) el.style.opacity = styleValues.second.opacity;
if (transitionScale) el.style.transform = `scale(${styleValues.second.scale / 100})`;
},
hide() {
hook2();
},
cleanup() {
if (transitionOpacity) el.style.opacity = opacityCache;
if (transitionScale) el.style.transform = transformCache;
if (transitionScale) el.style.transformOrigin = transformOriginCache;
el.style.transitionProperty = null;
el.style.transitionDuration = null;
el.style.transitionTimingFunction = null;
}
};
transition(el, stages, type, reject);
}
const ensureStringExpression = (expression, el, component) => {
return typeof expression === 'function' ? component.evaluateReturnExpression(el, expression) : expression;
};
function transitionClassesIn(el, component, directives, showCallback, reject) {
const enter = convertClassStringToArray(ensureStringExpression((directives.find(i => i.value === 'enter') || {
expression: ''
}).expression, el, component));
const enterStart = convertClassStringToArray(ensureStringExpression((directives.find(i => i.value === 'enter-start') || {
expression: ''
}).expression, el, component));
const enterEnd = convertClassStringToArray(ensureStringExpression((directives.find(i => i.value === 'enter-end') || {
expression: ''
}).expression, el, component));
transitionClasses(el, enter, enterStart, enterEnd, showCallback, () => {}, TRANSITION_TYPE_IN, reject);
}
function transitionClassesOut(el, component, directives, hideCallback, reject) {
const leave = convertClassStringToArray(ensureStringExpression((directives.find(i => i.value === 'leave') || {
expression: ''
}).expression, el, component));
const leaveStart = convertClassStringToArray(ensureStringExpression((directives.find(i => i.value === 'leave-start') || {
expression: ''
}).expression, el, component));
const leaveEnd = convertClassStringToArray(ensureStringExpression((directives.find(i => i.value === 'leave-end') || {
expression: ''
}).expression, el, component));
transitionClasses(el, leave, leaveStart, leaveEnd, () => {}, hideCallback, TRANSITION_TYPE_OUT, reject);
}
function transitionClasses(el, classesDuring, classesStart, classesEnd, hook1, hook2, type, reject) {
// clear the previous transition if exists to avoid caching the wrong classes
if (el.__x_transition) {
el.__x_transition.cancel && el.__x_transition.cancel();
}
const originalClasses = el.__x_original_classes || [];
const stages = {
start() {
el.classList.add(...classesStart);
},
during() {
el.classList.add(...classesDuring);
},
show() {
hook1();
},
end() {
// Don't remove classes that were in the original class attribute.
el.classList.remove(...classesStart.filter(i => !originalClasses.includes(i)));
el.classList.add(...classesEnd);
},
hide() {
hook2();
},
cleanup() {
el.classList.remove(...classesDuring.filter(i => !originalClasses.includes(i)));
el.classList.remove(...classesEnd.filter(i => !originalClasses.includes(i)));
}
};
transition(el, stages, type, reject);
}
function transition(el, stages, type, reject) {
const finish = once(() => {
stages.hide(); // Adding an "isConnected" check, in case the callback
// removed the element from the DOM.
if (el.isConnected) {
stages.cleanup();
}
delete el.__x_transition;
});
el.__x_transition = {
// Set transition type so we can avoid clearing transition if the direction is the same
type: type,
// create a callback for the last stages of the transition so we can call it
// from different point and early terminate it. Once will ensure that function
// is only called one time.
cancel: once(() => {
reject(TRANSITION_CANCELLED);
finish();
}),
finish,
// This store the next animation frame so we can cancel it
nextFrame: null
};
stages.start();
stages.during();
el.__x_transition.nextFrame = requestAnimationFrame(() => {
// Note: Safari's transitionDuration property will list out comma separated transition durations
// for every single transition property. Let's grab the first one and call it a day.
let duration = Number(getComputedStyle(el).transitionDuration.replace(/,.*/, '').replace('s', '')) * 1000;
if (duration === 0) {
duration = Number(getComputedStyle(el).animationDuration.replace('s', '')) * 1000;
}
stages.show();
el.__x_transition.nextFrame = requestAnimationFrame(() => {
stages.end();
setTimeout(el.__x_transition.finish, duration);
});
});
}
function isNumeric(subject) {
return !Array.isArray(subject) && !isNaN(subject);
} // Thanks @vuejs
// https://github.com/vuejs/vue/blob/4de4649d9637262a9b007720b59f80ac72a5620c/src/shared/util.js
function once(callback) {
let called = false;
return function () {
if (!called) {
called = true;
callback.apply(this, arguments);
}
};
}
function handleForDirective(component, templateEl, expression, initialUpdate, extraVars) {
warnIfMalformedTemplate(templateEl, 'x-for');
let iteratorNames = typeof expression === 'function' ? parseForExpression(component.evaluateReturnExpression(templateEl, expression)) : parseForExpression(expression);
let items = evaluateItemsAndReturnEmptyIfXIfIsPresentAndFalseOnElement(component, templateEl, iteratorNames, extraVars); // As we walk the array, we'll also walk the DOM (updating/creating as we go).
let currentEl = templateEl;
items.forEach((item, index) => {
let iterationScopeVariables = getIterationScopeVariables(iteratorNames, item, index, items, extraVars());
let currentKey = generateKeyForIteration(component, templateEl, index, iterationScopeVariables);
let nextEl = lookAheadForMatchingKeyedElementAndMoveItIfFound(currentEl.nextElementSibling, currentKey); // If we haven't found a matching key, insert the element at the current position.
if (!nextEl) {
nextEl = addElementInLoopAfterCurrentEl(templateEl, currentEl); // And transition it in if it's not the first page load.
transitionIn(nextEl, () => {}, () => {}, component, initialUpdate);
nextEl.__x_for = iterationScopeVariables;
component.initializeElements(nextEl, () => nextEl.__x_for); // Otherwise update the element we found.
} else {
// Temporarily remove the key indicator to allow the normal "updateElements" to work.
delete nextEl.__x_for_key;
nextEl.__x_for = iterationScopeVariables;
component.updateElements(nextEl, () => nextEl.__x_for);
}
currentEl = nextEl;
currentEl.__x_for_key = currentKey;
});
removeAnyLeftOverElementsFromPreviousUpdate(currentEl, component);
} // This was taken from VueJS 2.* core. Thanks Vue!
function parseForExpression(expression) {
let forIteratorRE = /,([^,\}\]]*)(?:,([^,\}\]]*))?$/;
let stripParensRE = /^\(|\)$/g;
let forAliasRE = /([\s\S]*?)\s+(?:in|of)\s+([\s\S]*)/;
let inMatch = expression.match(forAliasRE);
if (!inMatch) return;
let res = {};
res.items = inMatch[2].trim();
let item = inMatch[1].trim().replace(stripParensRE, '');
let iteratorMatch = item.match(forIteratorRE);
if (iteratorMatch) {
res.item = item.replace(forIteratorRE, '').trim();
res.index = iteratorMatch[1].trim();
if (iteratorMatch[2]) {
res.collection = iteratorMatch[2].trim();
}
} else {
res.item = item;
}
return res;
}
function getIterationScopeVariables(iteratorNames, item, index, items, extraVars) {
// We must create a new object, so each iteration has a new scope
let scopeVariables = extraVars ? _objectSpread2({}, extraVars) : {};
scopeVariables[iteratorNames.item] = item;
if (iteratorNames.index) scopeVariables[iteratorNames.index] = index;
if (iteratorNames.collection) scopeVariables[iteratorNames.collection] = items;
return scopeVariables;
}
function generateKeyForIteration(component, el, index, iterationScopeVariables) {
let bindKeyAttribute = getXAttrs(el, component, 'bind').filter(attr => attr.value === 'key')[0]; // If the dev hasn't specified a key, just return the index of the iteration.
if (!bindKeyAttribute) return index;
return component.evaluateReturnExpression(el, bindKeyAttribute.expression, () => iterationScopeVariables);
}
function evaluateItemsAndReturnEmptyIfXIfIsPresentAndFalseOnElement(component, el, iteratorNames, extraVars) {
let ifAttribute = getXAttrs(el, component, 'if')[0];
if (ifAttribute && !component.evaluateReturnExpression(el, ifAttribute.expression)) {
return [];
}
let items = component.evaluateReturnExpression(el, iteratorNames.items, extraVars); // This adds support for the `i in n` syntax.
if (isNumeric(items) && items > 0) {
items = Array.from(Array(items).keys(), i => i + 1);
}
return items;
}
function addElementInLoopAfterCurrentEl(templateEl, currentEl) {
let clone = document.importNode(templateEl.content, true);
currentEl.parentElement.insertBefore(clone, currentEl.nextElementSibling);
return currentEl.nextElementSibling;
}
function lookAheadForMatchingKeyedElementAndMoveItIfFound(nextEl, currentKey) {
if (!nextEl) return; // If we are already past the x-for generated elements, we don't need to look ahead.
if (nextEl.__x_for_key === undefined) return; // If the the key's DO match, no need to look ahead.
if (nextEl.__x_for_key === currentKey) return nextEl; // If they don't, we'll look ahead for a match.
// If we find it, we'll move it to the current position in the loop.
let tmpNextEl = nextEl;
while (tmpNextEl) {
if (tmpNextEl.__x_for_key === currentKey) {
return tmpNextEl.parentElement.insertBefore(tmpNextEl, nextEl);
}
tmpNextEl = tmpNextEl.nextElementSibling && tmpNextEl.nextElementSibling.__x_for_key !== undefined ? tmpNextEl.nextElementSibling : false;
}
}
function removeAnyLeftOverElementsFromPreviousUpdate(currentEl, component) {
var nextElementFromOldLoop = currentEl.nextElementSibling && currentEl.nextElementSibling.__x_for_key !== undefined ? currentEl.nextElementSibling : false;
while (nextElementFromOldLoop) {
let nextElementFromOldLoopImmutable = nextElementFromOldLoop;
let nextSibling = nextElementFromOldLoop.nextElementSibling;
transitionOut(nextElementFromOldLoop, () => {
nextElementFromOldLoopImmutable.remove();
}, () => {}, component);
nextElementFromOldLoop = nextSibling && nextSibling.__x_for_key !== undefined ? nextSibling : false;
}
}
function handleAttributeBindingDirective(component, el, attrName, expression, extraVars, attrType, modifiers) {
var value = component.evaluateReturnExpression(el, expression, extraVars);
if (attrName === 'value') {
if (Alpine.ignoreFocusedForValueBinding && document.activeElement.isSameNode(el)) return; // If nested model key is undefined, set the default value to empty string.
if (value === undefined && expression.match(/\./)) {
value = '';
}
if (el.type === 'radio') {
// Set radio value from x-bind:value, if no "value" attribute exists.
// If there are any initial state values, radio will have a correct
// "checked" value since x-bind:value is processed before x-model.
if (el.attributes.value === undefined && attrType === 'bind') {
el.value = value;
} else if (attrType !== 'bind') {
el.checked = checkedAttrLooseCompare(el.value, value);
}
} else if (el.type === 'checkbox') {
// If we are explicitly binding a string to the :value, set the string,
// If the value is a boolean, leave it alone, it will be set to "on"
// automatically.
if (typeof value !== 'boolean' && ![null, undefined].includes(value) && attrType === 'bind') {
el.value = String(value);
} else if (attrType !== 'bind') {
if (Array.isArray(value)) {
// I'm purposely not using Array.includes here because it's
// strict, and because of Numeric/String mis-casting, I
// want the "includes" to be "fuzzy".
el.checked = value.some(val => checkedAttrLooseCompare(val, el.value));
} else {
el.checked = !!value;
}
}
} else if (el.tagName === 'SELECT') {
updateSelect(el, value);
} else {
if (el.value === value) return;
el.value = value;
}
} else if (attrName === 'class') {
if (Array.isArray(value)) {
const originalClasses = el.__x_original_classes || [];
el.setAttribute('class', arrayUnique(originalClasses.concat(value)).join(' '));
} else if (typeof value === 'object') {
// Sorting the keys / class names by their boolean value will ensure that
// anything that evaluates to `false` and needs to remove classes is run first.
const keysSortedByBooleanValue = Object.keys(value).sort((a, b) => value[a] - value[b]);
keysSortedByBooleanValue.forEach(classNames => {
if (value[classNames]) {
convertClassStringToArray(classNames).forEach(className => el.classList.add(className));
} else {
convertClassStringToArray(classNames).forEach(className => el.classList.remove(className));
}
});
} else {
const originalClasses = el.__x_original_classes || [];
const newClasses = value ? convertClassStringToArray(value) : [];
el.setAttribute('class', arrayUnique(originalClasses.concat(newClasses)).join(' '));
}
} else {
attrName = modifiers.includes('camel') ? camelCase(attrName) : attrName; // If an attribute's bound value is null, undefined or false, remove the attribute
if ([null, undefined, false].includes(value)) {
el.removeAttribute(attrName);
} else {
isBooleanAttr(attrName) ? setIfChanged(el, attrName, attrName) : setIfChanged(el, attrName, value);
}
}
}
function setIfChanged(el, attrName, value) {
if (el.getAttribute(attrName) != value) {
el.setAttribute(attrName, value);
}
}
function updateSelect(el, value) {
const arrayWrappedValue = [].concat(value).map(value => {
return value + '';
});
Array.from(el.options).forEach(option => {
option.selected = arrayWrappedValue.includes(option.value || option.text);
});
}
function handleTextDirective(el, output, expression) {
// If nested model key is undefined, set the default value to empty string.
if (output === undefined && expression.match(/\./)) {
output = '';
}
el.textContent = output;
}
function handleHtmlDirective(component, el, expression, extraVars) {
el.innerHTML = component.evaluateReturnExpression(el, expression, extraVars);
}
function handleShowDirective(component, el, value, modifiers, initialUpdate = false) {
const hide = () => {
el.style.display = 'none';
el.__x_is_shown = false;
};
const show = () => {
if (el.style.length === 1 && el.style.display === 'none') {
el.removeAttribute('style');
} else {
el.style.removeProperty('display');
}
el.__x_is_shown = true;
};
if (initialUpdate === true) {
if (value) {
show();
} else {
hide();
}
return;
}
const handle = (resolve, reject) => {
if (value) {
if (el.style.display === 'none' || el.__x_transition) {
transitionIn(el, () => {
show();
}, reject, component);
}
resolve(() => {});
} else {
if (el.style.display !== 'none') {
transitionOut(el, () => {
resolve(() => {
hide();
});
}, reject, component);
} else {
resolve(() => {});
}
}
}; // The working of x-show is a bit complex because we need to
// wait for any child transitions to finish before hiding
// some element. Also, this has to be done recursively.
// If x-show.immediate, foregoe the waiting.
if (modifiers.includes('immediate')) {
handle(finish => finish(), () => {});
return;
} // x-show is encountered during a DOM tree walk. If an element
// we encounter is NOT a child of another x-show element we
// can execute the previous x-show stack (if one exists).
if (component.showDirectiveLastElement && !component.showDirectiveLastElement.contains(el)) {
component.executeAndClearRemainingShowDirectiveStack();
}
component.showDirectiveStack.push(handle);
component.showDirectiveLastElement = el;
}
function handleIfDirective(component, el, expressionResult, initialUpdate, extraVars) {
warnIfMalformedTemplate(el, 'x-if');
const elementHasAlreadyBeenAdded = el.nextElementSibling && el.nextElementSibling.__x_inserted_me === true;
if (expressionResult && (!elementHasAlreadyBeenAdded || el.__x_transition)) {
const clone = document.importNode(el.content, true);
el.parentElement.insertBefore(clone, el.nextElementSibling);
transitionIn(el.nextElementSibling, () => {}, () => {}, component, initialUpdate);
component.initializeElements(el.nextElementSibling, extraVars);
el.nextElementSibling.__x_inserted_me = true;
} else if (!expressionResult && elementHasAlreadyBeenAdded) {
transitionOut(el.nextElementSibling, () => {
el.nextElementSibling.remove();
}, () => {}, component, initialUpdate);
}
}
function registerListener(component, el, event, modifiers, expression, extraVars = {}) {
const options = {
passive: modifiers.includes('passive')
};
if (modifiers.includes('camel')) {
event = camelCase(event);
}
if (modifiers.includes('away')) {
let handler = e => {
// Don't do anything if the click came from the element or within it.
if (el.contains(e.target)) return; // Don't do anything if this element isn't currently visible.
if (el.offsetWidth < 1 && el.offsetHeight < 1) return; // Now that we are sure the element is visible, AND the click
// is from outside it, let's run the expression.
runListenerHandler(component, expression, e, extraVars);
if (modifiers.includes('once')) {
document.removeEventListener(event, handler, options);
}
}; // Listen for this event at the root level.
document.addEventListener(event, handler, options);
} else {
let listenerTarget = modifiers.includes('window') ? window : modifiers.includes('document') ? document : el;
let handler = e => {
// Remove this global event handler if the element that declared it
// has been removed. It's now stale.
if (listenerTarget === window || listenerTarget === document) {
if (!document.body.contains(el)) {
listenerTarget.removeEventListener(event, handler, options);
return;
}
}
if (isKeyEvent(event)) {
if (isListeningForASpecificKeyThatHasntBeenPressed(e, modifiers)) {
return;
}
}
if (modifiers.includes('prevent')) e.preventDefault();
if (modifiers.includes('stop')) e.stopPropagation(); // If the .self modifier isn't present, or if it is present and
// the target element matches the element we are registering the
// event on, run the handler
if (!modifiers.includes('self') || e.target === el) {
const returnValue = runListenerHandler(component, expression, e, extraVars);
returnValue.then(value => {
if (value === false) {
e.preventDefault();
} else {
if (modifiers.includes('once')) {
listenerTarget.removeEventListener(event, handler, options);
}
}
});
}
};
if (modifiers.includes('debounce')) {
let nextModifier = modifiers[modifiers.indexOf('debounce') + 1] || 'invalid-wait';
let wait = isNumeric(nextModifier.split('ms')[0]) ? Number(nextModifier.split('ms')[0]) : 250;
handler = debounce(handler, wait);
}
listenerTarget.addEventListener(event, handler, options);
}
}
function runListenerHandler(component, expression, e, extraVars) {
return component.evaluateCommandExpression(e.target, expression, () => {
return _objectSpread2(_objectSpread2({}, extraVars()), {}, {
'$event': e
});
});
}
function isKeyEvent(event) {
return ['keydown', 'keyup'].includes(event);
}
function isListeningForASpecificKeyThatHasntBeenPressed(e, modifiers) {
let keyModifiers = modifiers.filter(i => {
return !['window', 'document', 'prevent', 'stop'].includes(i);
});
if (keyModifiers.includes('debounce')) {
let debounceIndex = keyModifiers.indexOf('debounce');
keyModifiers.splice(debounceIndex, isNumeric((keyModifiers[debounceIndex + 1] || 'invalid-wait').split('ms')[0]) ? 2 : 1);
} // If no modifier is specified, we'll call it a press.
if (keyModifiers.length === 0) return false; // If one is passed, AND it matches the key pressed, we'll call it a press.
if (keyModifiers.length === 1 && keyModifiers[0] === keyToModifier(e.key)) return false; // The user is listening for key combinations.
const systemKeyModifiers = ['ctrl', 'shift', 'alt', 'meta', 'cmd', 'super'];
const selectedSystemKeyModifiers = systemKeyModifiers.filter(modifier => keyModifiers.includes(modifier));
keyModifiers = keyModifiers.filter(i => !selectedSystemKeyModifiers.includes(i));
if (selectedSystemKeyModifiers.length > 0) {
const activelyPressedKeyModifiers = selectedSystemKeyModifiers.filter(modifier => {
// Alias "cmd" and "super" to "meta"
if (modifier === 'cmd' || modifier === 'super') modifier = 'meta';
return e[`${modifier}Key`];
}); // If all the modifiers selected are pressed, ...
if (activelyPressedKeyModifiers.length === selectedSystemKeyModifiers.length) {
// AND the remaining key is pressed as well. It's a press.
if (keyModifiers[0] === keyToModifier(e.key)) return false;
}
} // We'll call it NOT a valid keypress.
return true;
}
function keyToModifier(key) {
switch (key) {
case '/':
return 'slash';
case ' ':
case 'Spacebar':
return 'space';
default:
return key && kebabCase(key);
}
}
function registerModelListener(component, el, modifiers, expression, extraVars) {
// If the element we are binding to is a select, a radio, or checkbox
// we'll listen for the change event instead of the "input" event.
var event = el.tagName.toLowerCase() === 'select' || ['checkbox', 'radio'].includes(el.type) || modifiers.includes('lazy') ? 'change' : 'input';
const listenerExpression = `${expression} = rightSideOfExpression($event, ${expression})`;
registerListener(component, el, event, modifiers, listenerExpression, () => {
return _objectSpread2(_objectSpread2({}, extraVars()), {}, {
rightSideOfExpression: generateModelAssignmentFunction(el, modifiers, expression)
});
});
}
function generateModelAssignmentFunction(el, modifiers, expression) {
if (el.type === 'radio') {
// Radio buttons only work properly when they share a name attribute.
// People might assume we take care of that for them, because
// they already set a shared "x-model" attribute.
if (!el.hasAttribute('name')) el.setAttribute('name', expression);
}
return (event, currentValue) => {
// Check for event.detail due to an issue where IE11 handles other events as a CustomEvent.
if (event instanceof CustomEvent && event.detail) {
return event.detail;
} else if (el.type === 'checkbox') {
// If the data we are binding to is an array, toggle its value inside the array.
if (Array.isArray(currentValue)) {
const newValue = modifiers.includes('number') ? safeParseNumber(event.target.value) : event.target.value;
return event.target.checked ? currentValue.concat([newValue]) : currentValue.filter(el => !checkedAttrLooseCompare(el, newValue));
} else {
return event.target.checked;
}
} else if (el.tagName.toLowerCase() === 'select' && el.multiple) {
return modifiers.includes('number') ? Array.from(event.target.selectedOptions).map(option => {
const rawValue = option.value || option.text;
return safeParseNumber(rawValue);
}) : Array.from(event.target.selectedOptions).map(option => {
return option.value || option.text;
});
} else {
const rawValue = event.target.value;
return modifiers.includes('number') ? safeParseNumber(rawValue) : modifiers.includes('trim') ? rawValue.trim() : rawValue;
}
};
}
function safeParseNumber(rawValue) {
const number = rawValue ? parseFloat(rawValue) : null;
return isNumeric(number) ? number : rawValue;
}
/**
* Copyright (C) 2017 salesforce.com, inc.
*/
const { isArray } = Array;
const { getPrototypeOf, create: ObjectCreate, defineProperty: ObjectDefineProperty, defineProperties: ObjectDefineProperties, isExtensible, getOwnPropertyDescriptor, getOwnPropertyNames, getOwnPropertySymbols, preventExtensions, hasOwnProperty, } = Object;
const { push: ArrayPush, concat: ArrayConcat, map: ArrayMap, } = Array.prototype;
function isUndefined(obj) {
return obj === undefined;
}
function isFunction(obj) {
return typeof obj === 'function';
}
function isObject(obj) {
return typeof obj === 'object';
}
const proxyToValueMap = new WeakMap();
function registerProxy(proxy, value) {
proxyToValueMap.set(proxy, value);
}
const unwrap = (replicaOrAny) => proxyToValueMap.get(replicaOrAny) || replicaOrAny;
function wrapValue(membrane, value) {
return membrane.valueIsObservable(value) ? membrane.getProxy(value) : value;
}
/**
* Unwrap property descriptors will set value on original descriptor
* We only need to unwrap if value is specified
* @param descriptor external descrpitor provided to define new property on original value
*/
function unwrapDescriptor(descriptor) {
if (hasOwnProperty.call(descriptor, 'value')) {
descriptor.value = unwrap(descriptor.value);
}
return descriptor;
}
function lockShadowTarget(membrane, shadowTarget, originalTarget) {
const targetKeys = ArrayConcat.call(getOwnPropertyNames(originalTarget), getOwnPropertySymbols(originalTarget));
targetKeys.forEach((key) => {
let descriptor = getOwnPropertyDescriptor(originalTarget, key);
// We do not need to wrap the descriptor if configurable
// Because we can deal with wrapping it when user goes through
// Get own property descriptor. There is also a chance that this descriptor
// could change sometime in the future, so we can defer wrapping
// until we need to
if (!descriptor.configurable) {
descriptor = wrapDescriptor(membrane, descriptor, wrapValue);
}
ObjectDefineProperty(shadowTarget, key, descriptor);
});
preventExtensions(shadowTarget);
}
class ReactiveProxyHandler {
constructor(membrane, value) {
this.originalTarget = value;
this.membrane = membrane;
}
get(shadowTarget, key) {
const { originalTarget, membrane } = this;
const value = originalTarget[key];
const { valueObserved } = membrane;
valueObserved(originalTarget, key);
return membrane.getProxy(value);
}
set(shadowTarget, key, value) {
const { originalTarget, membrane: { valueMutated } } = this;
const oldValue = originalTarget[key];
if (oldValue !== value) {
originalTarget[key] = value;
valueMutated(originalTarget, key);
}
else if (key === 'length' && isArray(originalTarget)) {
// fix for issue #236: push will add the new index, and by the time length
// is updated, the internal length is already equal to the new length value
// therefore, the oldValue is equal to the value. This is the forking logic
// to support this use case.
valueMutated(originalTarget, key);
}
return true;
}
deleteProperty(shadowTarget, key) {
const { originalTarget, membrane: { valueMutated } } = this;
delete originalTarget[key];
valueMutated(originalTarget, key);
return true;
}
apply(shadowTarget, thisArg, argArray) {
/* No op */
}
construct(target, argArray, newTarget) {
/* No op */
}
has(shadowTarget, key) {
const { originalTarget, membrane: { valueObserved } } = this;
valueObserved(originalTarget, key);
return key in originalTarget;
}
ownKeys(shadowTarget) {
const { originalTarget } = this;
return ArrayConcat.call(getOwnPropertyNames(originalTarget), getOwnPropertySymbols(originalTarget));
}
isExtensible(shadowTarget) {
const shadowIsExtensible = isExtensible(shadowTarget);
if (!shadowIsExtensible) {
return shadowIsExtensible;
}
const { originalTarget, membrane } = this;
const targetIsExtensible = isExtensible(originalTarget);
if (!targetIsExtensible) {
lockShadowTarget(membrane, shadowTarget, originalTarget);
}
return targetIsExtensible;
}
setPrototypeOf(shadowTarget, prototype) {
}
getPrototypeOf(shadowTarget) {
const { originalTarget } = this;
return getPrototypeOf(originalTarget);
}
getOwnPropertyDescriptor(shadowTarget, key) {
const { originalTarget, membrane } = this;
const { valueObserved } = this.membrane;
// keys looked up via hasOwnProperty need to be reactive
valueObserved(originalTarget, key);
let desc = getOwnPropertyDescriptor(originalTarget, key);
if (isUndefined(desc)) {
return desc;
}
const shadowDescriptor = getOwnPropertyDescriptor(shadowTarget, key);
if (!isUndefined(shadowDescriptor)) {
return shadowDescriptor;
}
// Note: by accessing the descriptor, the key is marked as observed
// but access to the value, setter or getter (if available) cannot observe
// mutations, just like regular methods, in which case we just do nothing.
desc = wrapDescriptor(membrane, desc, wrapValue);
if (!desc.configurable) {
// If descriptor from original target is not configurable,
// We must copy the wrapped descriptor over to the shadow target.
// Otherwise, proxy will throw an invariant error.
// This is our last chance to lock the value.
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Proxy/handler/getOwnPropertyDescriptor#Invariants
ObjectDefineProperty(shadowTarget, key, desc);
}
return desc;
}
preventExtensions(shadowTarget) {
const { originalTarget, membrane } = this;
lockShadowTarget(membrane, shadowTarget, originalTarget);
preventExtensions(originalTarget);
return true;
}
defineProperty(shadowTarget, key, descriptor) {
const { originalTarget, membrane } = this;
const { valueMutated } = membrane;
const { configurable } = descriptor;
// We have to check for value in descriptor
// because Object.freeze(proxy) calls this method
// with only { configurable: false, writeable: false }
// Additionally, method will only be called with writeable:false
// if the descriptor has a value, as opposed to getter/setter
// So we can just check if writable is present and then see if
// value is present. This eliminates getter and setter descriptors
if (hasOwnProperty.call(descriptor, 'writable') && !hasOwnProperty.call(descriptor, 'value')) {
const originalDescriptor = getOwnPropertyDescriptor(originalTarget, key);
descriptor.value = originalDescriptor.value;
}
ObjectDefineProperty(originalTarget, key, unwrapDescriptor(descriptor));
if (configurable === false) {
ObjectDefineProperty(shadowTarget, key, wrapDescriptor(membrane, descriptor, wrapValue));
}
valueMutated(originalTarget, key);
return true;
}
}
function wrapReadOnlyValue(membrane, value) {
return membrane.valueIsObservable(value) ? membrane.getReadOnlyProxy(value) : value;
}
class ReadOnlyHandler {
constructor(membrane, value) {
this.originalTarget = value;
this.membrane = membrane;
}
get(shadowTarget, key) {
const { membrane, originalTarget } = this;
const value = originalTarget[key];
const { valueObserved } = membrane;
valueObserved(originalTarget, key);
return membrane.getReadOnlyProxy(value);
}
set(shadowTarget, key, value) {
return false;
}
deleteProperty(shadowTarget, key) {
return false;
|
/* No op */
}
construct(target, argArray, newTarget) {
/* No op */
}
has(shadowTarget, key) {
const { originalTarget, membrane: { valueObserved } } = this;
valueObserved(originalTarget, key);
return key in originalTarget;
}
ownKeys(shadowTarget) {
const { originalTarget } = this;
return ArrayConcat.call(getOwnPropertyNames(originalTarget), getOwnPropertySymbols(originalTarget));
}
setPrototypeOf(shadowTarget, prototype) {
}
getOwnPropertyDescriptor(shadowTarget, key) {
const { originalTarget, membrane } = this;
const { valueObserved } = membrane;
// keys looked up via hasOwnProperty need to be reactive
valueObserved(originalTarget, key);
let desc = getOwnPropertyDescriptor(originalTarget, key);
if (isUndefined(desc)) {
return desc;
}
const shadowDescriptor = getOwnPropertyDescriptor(shadowTarget, key);
if (!isUndefined(shadowDescriptor)) {
return shadowDescriptor;
}
// Note: by accessing the descriptor, the key is marked as observed
// but access to the value or getter (if available) cannot be observed,
// just like regular methods, in which case we just do nothing.
desc = wrapDescriptor(membrane, desc, wrapReadOnlyValue);
if (hasOwnProperty.call(desc, 'set')) {
desc.set = undefined; // readOnly membrane does not allow setters
}
if (!desc.configurable) {
// If descriptor from original target is not configurable,
// We must copy the wrapped descriptor over to the shadow target.
// Otherwise, proxy will throw an invariant error.
// This is our last chance to lock the value.
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Proxy/handler/getOwnPropertyDescriptor#Invariants
ObjectDefineProperty(shadowTarget, key, desc);
}
return desc;
}
preventExtensions(shadowTarget) {
return false;
}
defineProperty(shadowTarget, key, descriptor) {
return false;
}
}
function createShadowTarget(value) {
let shadowTarget = undefined;
if (isArray(value)) {
shadowTarget = [];
}
else if (isObject(value)) {
shadowTarget = {};
}
return shadowTarget;
}
const ObjectDotPrototype = Object.prototype;
function defaultValueIsObservable(value) {
// intentionally checking for null
if (value === null) {
return false;
}
// treat all non-object types, including undefined, as non-observable values
if (typeof value !== 'object') {
return false;
}
if (isArray(value)) {
return true;
}
const proto = getPrototypeOf(value);
return (proto === ObjectDotPrototype || proto === null || getPrototypeOf(proto) === null);
}
const defaultValueObserved = (obj, key) => {
/* do nothing */
};
const defaultValueMutated = (obj, key) => {
/* do nothing */
};
const defaultValueDistortion = (value) => value;
function wrapDescriptor(membrane, descriptor, getValue) {
const { set, get } = descriptor;
if (hasOwnProperty.call(descriptor, 'value')) {
descriptor.value = getValue(membrane, descriptor.value);
}
else {
if (!isUndefined(get)) {
descriptor.get = function () {
// invoking the original getter with the original target
return getValue(membrane, get.call(unwrap(this)));
};
}
if (!isUndefined(set)) {
descriptor.set = function (value) {
// At this point we don't have a clear indication of whether
// or not a valid mutation will occur, we don't have the key,
// and we are not sure why and how they are invoking this setter.
// Nevertheless we preserve the original semantics by invoking the
// original setter with the original target and the unwrapped value
set.call(unwrap(this), membrane.unwrapProxy(value));
};
}
}
return descriptor;
}
class ReactiveMembrane {
constructor(options) {
this.valueDistortion = defaultValueDistortion;
this.valueMutated = defaultValueMutated;
this.valueObserved = defaultValueObserved;
this.valueIsObservable = defaultValueIsObservable;
this.objectGraph = new WeakMap();
if (!isUndefined(options)) {
const { valueDistortion, valueMutated, valueObserved, valueIsObservable } = options;
this.valueDistortion = isFunction(valueDistortion) ? valueDistortion : defaultValueDistortion;
this.valueMutated = isFunction(valueMutated) ? valueMutated : defaultValueMutated;
this.valueObserved = isFunction(valueObserved) ? valueObserved : defaultValueObserved;
this.valueIsObservable = isFunction(valueIsObservable) ? valueIsObservable : defaultValueIsObservable;
}
}
getProxy(value) {
const unwrappedValue = unwrap(value);
const distorted = this.valueDistortion(unwrappedValue);
if (this.valueIsObservable(distorted)) {
const o = this.getReactiveState(unwrappedValue, distorted);
// when trying to extract the writable version of a readonly
// we return the readonly.
return o.readOnly === value ? value : o.reactive;
}
return distorted;
}
getReadOnlyProxy(value) {
value = unwrap(value);
const distorted = this.valueDistortion(value);
if (this.valueIsObservable(distorted)) {
return this.getReactiveState(value, distorted).readOnly;
}
return distorted;
}
unwrapProxy(p) {
return unwrap(p);
}
getReactiveState(value, distortedValue) {
const { objectGraph, } = this;
let reactiveState = objectGraph.get(distortedValue);
if (reactiveState) {
return reactiveState;
}
const membrane = this;
reactiveState = {
get reactive() {
const reactiveHandler = new ReactiveProxyHandler(membrane, distortedValue);
// caching the reactive proxy after the first time it is accessed
const proxy = new Proxy(createShadowTarget(distortedValue), reactiveHandler);
registerProxy(proxy, value);
ObjectDefineProperty(this, 'reactive', { value: proxy });
return proxy;
},
get readOnly() {
const readOnlyHandler = new ReadOnlyHandler(membrane, distortedValue);
// caching the readOnly proxy after the first time it is accessed
const proxy = new Proxy(createShadowTarget(distortedValue), readOnlyHandler);
registerProxy(proxy, value);
ObjectDefineProperty(this, 'readOnly', { value: proxy });
return proxy;
}
};
objectGraph.set(distortedValue, reactiveState);
return reactiveState;
}
}
/** version: 0.26.0 */
function wrap(data, mutationCallback) {
let membrane = new ReactiveMembrane({
valueMutated(target, key) {
mutationCallback(target, key);
}
});
return {
data: membrane.getProxy(data),
membrane: membrane
};
}
function unwrap$1(membrane, observable) {
let unwrappedData = membrane.unwrapProxy(observable);
let copy = {};
Object.keys(unwrappedData).forEach(key => {
if (['$el', '$refs', '$nextTick', '$watch'].includes(key)) return;
copy[key] = unwrappedData[key];
});
return copy;
}
class Component {
constructor(el, componentForClone = null) {
this.$el = el;
const dataAttr = this.$el.getAttribute('x-data');
const dataExpression = dataAttr === '' ? '{}' : dataAttr;
const initExpression = this.$el.getAttribute('x-init');
let dataExtras = {
$el: this.$el
};
let canonicalComponentElementReference = componentForClone ? componentForClone.$el : this.$el;
Object.entries(Alpine.magicProperties).forEach(([name, callback]) => {
Object.defineProperty(dataExtras, `$${name}`, {
get: function get() {
return callback(canonicalComponentElementReference);
}
});
});
this.unobservedData = componentForClone ? componentForClone.getUnobservedData() : saferEval(el, dataExpression, dataExtras);
// Construct a Proxy-based observable. This will be used to handle reactivity.
let {
membrane,
data
} = this.wrapDataInObservable(this.unobservedData);
this.$data = data;
this.membrane = membrane; // After making user-supplied data methods reactive, we can now add
// our magic properties to the original data for access.
this.unobservedData.$el = this.$el;
this.unobservedData.$refs = this.getRefsProxy();
this.nextTickStack = [];
this.unobservedData.$nextTick = callback => {
this.nextTickStack.push(callback);
};
this.watchers = {};
this.unobservedData.$watch = (property, callback) => {
if (!this.watchers[property]) this.watchers[property] = [];
this.watchers[property].push(callback);
};
/* MODERN-ONLY:START */
// We remove this piece of code from the legacy build.
// In IE11, we have already defined our helpers at this point.
// Register custom magic properties.
Object.entries(Alpine.magicProperties).forEach(([name, callback]) => {
Object.defineProperty(this.unobservedData, `$${name}`, {
get: function get() {
return callback(canonicalComponentElementReference, this.$el);
}
});
});
/* MODERN-ONLY:END */
this.showDirectiveStack = [];
this.showDirectiveLastElement;
componentForClone || Alpine.onBeforeComponentInitializeds.forEach(callback => callback(this));
var initReturnedCallback; // If x-init is present AND we aren't cloning (skip x-init on clone)
if (initExpression && !componentForClone) {
// We want to allow data manipulation, but not trigger DOM updates just yet.
// We haven't even initialized the elements with their Alpine bindings. I mean c'mon.
this.pauseReactivity = true;
initReturnedCallback = this.evaluateReturnExpression(this.$el, initExpression);
this.pauseReactivity = false;
} // Register all our listeners and set all our attribute bindings.
this.initializeElements(this.$el); // Use mutation observer to detect new elements being added within this component at run-time.
// Alpine's just so darn flexible amirite?
this.listenForNewElementsToInitialize();
if (typeof initReturnedCallback === 'function') {
// Run the callback returned from the "x-init" hook to allow the user to do stuff after
// Alpine's got it's grubby little paws all over everything.
initReturnedCallback.call(this.$data);
}
componentForClone || setTimeout(() => {
Alpine.onComponentInitializeds.forEach(callback => callback(this));
}, 0);
}
getUnobservedData() {
return unwrap$1(this.membrane, this.$data);
}
wrapDataInObservable(data) {
var self = this;
let updateDom = debounce(function () {
self.updateElements(self.$el);
}, 0);
return wrap(data, (target, key) => {
if (self.watchers[key]) {
// If there's a watcher for this specific key, run it.
self.watchers[key].forEach(callback => callback(target[key]));
} else if (Array.isArray(target)) {
// Arrays are special cases, if any of the items change, we consider the array as mutated.
Object.keys(self.watchers).forEach(fullDotNotationKey => {
let dotNotationParts = fullDotNotationKey.split('.'); // Ignore length mutations since they would result in duplicate calls.
// For example, when calling push, we would get a mutation for the item's key
// and a second mutation for the length property.
if (key === 'length') return;
dotNotationParts.reduce((comparisonData, part) => {
if (Object.is(target, comparisonData[part])) {
self.watchers[fullDotNotationKey].forEach(callback => callback(target));
}
return comparisonData[part];
}, self.unobservedData);
});
} else {
// Let's walk through the watchers with "dot-notation" (foo.bar) and see
// if this mutation fits any of them.
Object.keys(self.watchers).filter(i => i.includes('.')).forEach(fullDotNotationKey => {
let dotNotationParts = fullDotNotationKey.split('.'); // If this dot-notation watcher's last "part" doesn't match the current
// key, then skip it early for performance reasons.
if (key !== dotNotationParts[dotNotationParts.length - 1]) return; // Now, walk through the dot-notation "parts" recursively to find
// a match, and call the watcher if one's found.
dotNotationParts.reduce((comparisonData, part) => {
if (Object.is(target, comparisonData)) {
// Run the watchers.
self.watchers[fullDotNotationKey].forEach(callback => callback(target[key]));
}
return comparisonData[part];
}, self.unobservedData);
});
} // Don't react to data changes for cases like the `x-created` hook.
if (self.pauseReactivity) return;
updateDom();
});
}
walkAndSkipNestedComponents(el, callback, initializeComponentCallback = () => {}) {
walk(el, el => {
// We've hit a component.
if (el.hasAttribute('x-data')) {
// If it's not the current one.
if (!el.isSameNode(this.$el)) {
// Initialize it if it's not.
if (!el.__x) initializeComponentCallback(el); // Now we'll let that sub-component deal with itself.
return false;
}
}
return callback(el);
});
}
initializeElements(rootEl, extraVars = () => {}) {
this.walkAndSkipNestedComponents(rootEl, el => {
// Don't touch spawns from for loop
if (el.__x_for_key !== undefined) return false; // Don't touch spawns from if directives
if (el.__x_inserted_me !== undefined) return false;
this.initializeElement(el, extraVars);
}, el => {
el.__x = new Component(el);
});
this.executeAndClearRemainingShowDirectiveStack();
this.executeAndClearNextTickStack(rootEl);
}
initializeElement(el, extraVars) {
// To support class attribute merging, we have to know what the element's
// original class attribute looked like for reference.
if (el.hasAttribute('class') && getXAttrs(el, this).length > 0) {
el.__x_original_classes = convertClassStringToArray(el.getAttribute('class'));
}
this.registerListeners(el, extraVars);
this.resolveBoundAttributes(el, true, extraVars);
}
updateElements(rootEl, extraVars = () => {}) {
this.walkAndSkipNestedComponents(rootEl, el => {
// Don't touch spawns from for loop (and check if the root is actually a for loop in a parent, don't skip it.)
if (el.__x_for_key !== undefined && !el.isSameNode(this.$el)) return false;
this.updateElement(el, extraVars);
}, el => {
el.__x = new Component(el);
});
this.executeAndClearRemainingShowDirectiveStack();
this.executeAndClearNextTickStack(rootEl);
}
executeAndClearNextTickStack(el) {
// Skip spawns from alpine directives
if (el === this.$el && this.nextTickStack.length > 0) {
// We run the tick stack after the next frame to allow any
// running transitions to pass the initial show stage.
requestAnimationFrame(() => {
while (this.nextTickStack.length > 0) {
this.nextTickStack.shift()();
}
});
}
}
executeAndClearRemainingShowDirectiveStack() {
// The goal here is to start all the x-show transitions
// and build a nested promise chain so that elements
// only hide when the children are finished hiding.
this.showDirectiveStack.reverse().map(handler => {
return new Promise((resolve, reject) => {
handler(resolve, reject);
});
}).reduce((promiseChain, promise) => {
return promiseChain.then(() => {
return promise.then(finishElement => {
finishElement();
});
});
}, Promise.resolve(() => {})).catch(e => {
if (e !== TRANSITION_CANCELLED) throw e;
}); // We've processed the handler stack. let's clear it.
this.showDirectiveStack = [];
this.showDirectiveLastElement = undefined;
}
updateElement(el, extraVars) {
this.resolveBoundAttributes(el, false, extraVars);
}
registerListeners(el, extraVars) {
getXAttrs(el, this).forEach(({
type,
value,
modifiers,
expression
}) => {
switch (type) {
case 'on':
registerListener(this, el, value, modifiers, expression, extraVars);
break;
case 'model':
registerModelListener(this, el, modifiers, expression, extraVars);
break;
}
});
}
resolveBoundAttributes(el, initialUpdate = false, extraVars) {
let attrs = getXAttrs(el, this);
attrs.forEach(({
type,
value,
modifiers,
expression
}) => {
switch (type) {
case 'model':
handleAttributeBindingDirective(this, el, 'value', expression, extraVars, type, modifiers);
break;
case 'bind':
// The :key binding on an x-for is special, ignore it.
if (el.tagName.toLowerCase() === 'template' && value === 'key') return;
handleAttributeBindingDirective(this, el, value, expression, extraVars, type, modifiers);
break;
case 'text':
var output = this.evaluateReturnExpression(el, expression, extraVars);
handleTextDirective(el, output, expression);
break;
case 'html':
handleHtmlDirective(this, el, expression, extraVars);
break;
case 'show':
var output = this.evaluateReturnExpression(el, expression, extraVars);
handleShowDirective(this, el, output, modifiers, initialUpdate);
break;
case 'if':
// If this element also has x-for on it, don't process x-if.
// We will let the "x-for" directive handle the "if"ing.
if (attrs.some(i => i.type === 'for')) return;
var output = this.evaluateReturnExpression(el, expression, extraVars);
handleIfDirective(this, el, output, initialUpdate, extraVars);
break;
case 'for':
handleForDirective(this, el, expression, initialUpdate, extraVars);
break;
case 'cloak':
el.removeAttribute('x-cloak');
break;
}
});
}
evaluateReturnExpression(el, expression, extraVars = () => {}) {
return saferEval(el, expression, this.$data, _objectSpread2(_objectSpread2({}, extraVars()), {}, {
$dispatch: this.getDispatchFunction(el)
}));
}
evaluateCommandExpression(el, expression, extraVars = () => {}) {
return saferEvalNoReturn(el, expression, this.$data, _objectSpread2(_objectSpread2({}, extraVars()), {}, {
$dispatch: this.getDispatchFunction(el)
}));
}
getDispatchFunction(el) {
return (event, detail = {}) => {
el.dispatchEvent(new CustomEvent(event, {
detail,
bubbles: true
}));
};
}
listenForNewElementsToInitialize() {
const targetNode = this.$el;
const observerOptions = {
childList: true,
attributes: true,
subtree: true
};
const observer = new MutationObserver(mutations => {
for (let i = 0; i < mutations.length; i++) {
// Filter out mutations triggered from child components.
const closestParentComponent = mutations[i].target.closest('[x-data]');
if (!(closestParentComponent && closestParentComponent.isSameNode(this.$el))) continue;
if (mutations[i].type === 'attributes' && mutations[i].attributeName === 'x-data') {
const xAttr = mutations[i].target.getAttribute('x-data') || '{}';
const rawData = saferEval(this.$el, xAttr, {
$el: this.$el
});
Object.keys(rawData).forEach(key => {
if (this.$data[key] !== rawData[key]) {
this.$data[key] = rawData[key];
}
});
}
if (mutations[i].addedNodes.length > 0) {
mutations[i].addedNodes.forEach(node => {
if (node.nodeType !== 1 || node.__x_inserted_me) return;
if (node.matches('[x-data]') && !node.__x) {
node.__x = new Component(node);
return;
}
this.initializeElements(node);
});
}
}
});
observer.observe(targetNode, observerOptions);
}
getRefsProxy() {
var self = this;
var refObj = {};
// One of the goals of this is to not hold elements in memory, but rather re-evaluate
// the DOM when the system needs something from it. This way, the framework is flexible and
// friendly to outside DOM changes from libraries like Vue/Livewire.
// For this reason, I'm using an "on-demand" proxy to fake a "$refs" object.
return new Proxy(refObj, {
get(object, property) {
if (property === '$isAlpineProxy') return true;
var ref; // We can't just query the DOM because it's hard to filter out refs in
// nested components.
self.walkAndSkipNestedComponents(self.$el, el => {
if (el.hasAttribute('x-ref') && el.getAttribute('x-ref') === property) {
ref = el;
}
});
return ref;
}
});
}
}
const Alpine = {
version: "2.8.0",
pauseMutationObserver: false,
magicProperties: {},
onComponentInitializeds: [],
onBeforeComponentInitializeds: [],
ignoreFocusedForValueBinding: false,
start: async function start() {
if (!isTesting()) {
await domReady();
}
this.discoverComponents(el => {
this.initializeComponent(el);
}); // It's easier and more performant to just support Turbolinks than listen
// to MutationObserver mutations at the document level.
document.addEventListener("turbolinks:load", () => {
this.discoverUninitializedComponents(el => {
this.initializeComponent(el);
});
});
this.listenForNewUninitializedComponentsAtRunTime();
},
discoverComponents: function discoverComponents(callback) {
const rootEls = document.querySelectorAll('[x-data]');
rootEls.forEach(rootEl => {
callback(rootEl);
});
},
discoverUninitializedComponents: function discoverUninitializedComponents(callback, el = null) {
const rootEls = (el || document).querySelectorAll('[x-data]');
Array.from(rootEls).filter(el => el.__x === undefined).forEach(rootEl => {
callback(rootEl);
});
},
listenForNewUninitializedComponentsAtRunTime: function listenForNewUninitializedComponentsAtRunTime() {
const targetNode = document.querySelector('body');
const observerOptions = {
childList: true,
attributes: true,
subtree: true
};
const observer = new MutationObserver(mutations => {
if (this.pauseMutationObserver) return;
for (let i = 0; i < mutations.length; i++) {
if (mutations[i].addedNodes.length > 0) {
mutations[i].addedNodes.forEach(node => {
// Discard non-element nodes (like line-breaks)
if (node.nodeType !== 1) return; // Discard any changes happening within an existing component.
// They will take care of themselves.
if (node.parentElement && node.parentElement.closest('[x-data]')) return;
this.discoverUninitializedComponents(el => {
this.initializeComponent(el);
}, node.parentElement);
});
}
}
});
observer.observe(targetNode, observerOptions);
},
initializeComponent: function initializeComponent(el) {
if (!el.__x) {
// Wrap in a try/catch so that we don't prevent other components
// from initializing when one component contains an error.
try {
el.__x = new Component(el);
} catch (error) {
setTimeout(() => {
throw error;
}, 0);
}
}
},
clone: function clone(component, newEl) {
if (!newEl.__x) {
newEl.__x = new Component(newEl, component);
}
},
addMagicProperty: function addMagicProperty(name, callback) {
this.magicProperties[name] = callback;
},
onComponentInitialized: function onComponentInitialized(callback) {
this.onComponentInitializeds.push(callback);
},
onBeforeComponentInitialized: function onBeforeComponentInitialized(callback) {
this.onBeforeComponentInitializeds.push(callback);
}
};
if (!isTesting()) {
window.Alpine = Alpine;
if (window.deferLoadingAlpine) {
window.deferLoadingAlpine(function () {
window.Alpine.start();
});
} else {
window.Alpine.start();
}
}
return Alpine;
})));
/***/ }),
/***/ "./node_modules/axios/index.js":
/*!*************************************!*\
!*** ./node_modules/axios/index.js ***!
\*************************************/
/*! no static exports found */
/***/ (function(module, exports, __webpack_require__) {
module.exports = __webpack_require__(/*! ./lib/axios */ "./node_modules/axios/lib/axios.js");
/***/ }),
/***/ "./node_modules/axios/lib/adapters/xhr.js":
/*!************************************************!*\
!*** ./node_modules/axios/lib/adapters/xhr.js ***!
\************************************************/
/*! no static exports found */
/***/ (function(module, exports, __webpack_require__) {
"use strict";
var utils = __webpack_require__(/*! ./../utils */ "./node_modules/axios/lib/utils.js");
var settle = __webpack_require__(/*! ./../core/settle */ "./node_modules/axios/lib/core/settle.js");
var buildURL = __webpack_require__(/*! ./../helpers/buildURL */ "./node_modules/axios/lib/helpers/buildURL.js");
var buildFullPath = __webpack_require__(/*! ../core/buildFullPath */ "./node_modules/axios/lib/core/buildFullPath.js");
var parseHeaders = __webpack_require__(/*! ./../helpers/parseHeaders */ "./node_modules/axios/lib/helpers/parseHeaders.js");
var isURLSameOrigin = __webpack_require__(/*! ./../helpers/isURLSameOrigin */ "./node_modules/axios/lib/helpers/isURLSameOrigin.js");
var createError = __webpack_require__(/*! ../core/createError */ "./node_modules/axios/lib/core/createError.js");
module.exports = function xhrAdapter(config) {
return new Promise(function dispatchXhrRequest(resolve, reject) {
var requestData = config.data;
var requestHeaders = config.headers;
if (utils.isFormData(requestData)) {
delete requestHeaders['Content-Type']; // Let the browser set it
}
var request = new XMLHttpRequest();
// HTTP basic authentication
if (config.auth) {
var username = config.auth.username || '';
var password = config.auth.password || '';
requestHeaders.Authorization = 'Basic ' + btoa(username + ':' + password);
}
var fullPath = buildFullPath(config.baseURL, config.url);
request.open(config.method.toUpperCase(), buildURL(fullPath, config.params, config.paramsSerializer), true);
// Set the request timeout in MS
request.timeout = config.timeout;
// Listen for ready state
request.onreadystatechange = function handleLoad() {
if (!request || request.readyState !== 4) {
return;
}
// The request errored out and we didn't get a response, this will be
// handled by onerror instead
// With one exception: request that using file: protocol, most browsers
// will return status as 0 even though it's a successful request
if (request.status === 0 && !(request.responseURL && request.responseURL.indexOf('file:') === 0)) {
return;
}
// Prepare the response
var responseHeaders = 'getAllResponseHeaders' in request ? parseHeaders(request.getAllResponseHeaders()) : null;
var responseData = !config.responseType || config.responseType === 'text' ? request.responseText : request.response;
var response = {
data: responseData,
status: request.status,
statusText: request.statusText,
headers: responseHeaders,
config: config,
request: request
};
settle(resolve, reject, response);
// Clean up request
request = null;
};
// Handle browser request cancellation (as opposed to a manual cancellation)
request.onabort = function handleAbort() {
if (!request) {
return;
}
reject(createError('Request aborted', config, 'ECONNABORTED', request));
// Clean up request
request = null;
};
// Handle low level network errors
request.onerror = function handleError() {
// Real errors are hidden from us by the browser
// onerror should only fire if it's a network error
reject(createError('Network Error', config, null, request));
// Clean up request
request = null;
};
// Handle timeout
request.ontimeout = function handleTimeout() {
var timeoutErrorMessage = 'timeout of ' + config.timeout + 'ms exceeded';
if (config.timeoutErrorMessage) {
timeoutErrorMessage = config.timeoutErrorMessage;
}
reject(createError(timeoutErrorMessage, config, 'ECONNABORTED',
request));
// Clean up request
request = null;
};
// Add xsrf header
// This is only done if running in a standard browser environment.
// Specifically not if we're in a web worker, or react-native.
if (utils.isStandardBrowserEnv()) {
var cookies = __webpack_require__(/*! ./../helpers/cookies */ "./node_modules/axios/lib/helpers/cookies.js");
// Add xsrf header
var xsrfValue = (config.withCredentials || isURLSameOrigin(fullPath)) && config.xsrfCookieName ?
cookies.read(config.xsrfCookieName) :
undefined;
if (xsrfValue) {
requestHeaders[config.xsrfHeaderName] = xsrfValue;
}
}
// Add headers to the request
if ('setRequestHeader' in request) {
utils.forEach(requestHeaders, function setRequestHeader(val, key) {
if (typeof requestData === 'undefined' && key.toLowerCase() === 'content-type') {
// Remove Content-Type if data is undefined
delete requestHeaders[key];
} else {
// Otherwise add header to the request
request.setRequestHeader(key, val);
}
});
}
// Add withCredentials to request if needed
if (!utils.isUndefined(config.withCredentials)) {
request.withCredentials = !!config.withCredentials;
}
// Add responseType to request if needed
if (config.responseType) {
try {
request.responseType = config.responseType;
} catch (e) {
// Expected DOMException thrown by browsers not compatible XMLHttpRequest Level 2.
// But, this can be suppressed for 'json' type as it can be parsed by default 'transformResponse' function.
if (config.responseType !== 'json') {
throw e;
}
}
}
// Handle progress if needed
if (typeof config.onDownloadProgress === 'function') {
request.addEventListener('progress', config.onDownloadProgress);
}
// Not all browsers support upload events
if (typeof config.onUploadProgress === 'function' && request.upload) {
request.upload.addEventListener('progress', config.onUploadProgress);
}
if (config.cancelToken) {
// Handle cancellation
config.cancelToken.promise.then(function onCanceled(cancel) {
if (!request) {
return;
}
request.abort();
reject(cancel);
// Clean up request
request = null;
});
}
if (requestData === undefined) {
requestData = null;
}
// Send the request
request.send(requestData);
});
};
/***/ }),
/***/ "./node_modules/axios/lib/axios.js":
/*!*****************************************!*\
!*** ./node_modules/axios/lib/axios.js ***!
\*****************************************/
/*! no static exports found */
/***/ (function(module, exports, __webpack_require__) {
"use strict";
var utils = __webpack_require__(/*! ./utils */ "./node_modules/axios/lib/utils.js");
var bind = __webpack_require__(/*! ./helpers/bind */ "./node_modules/axios/lib/helpers/bind.js");
var Axios = __webpack_require__(/*! ./core/Axios */ "./node_modules/axios/lib/core/Axios.js");
var mergeConfig = __webpack_require__(/*! ./core/mergeConfig */ "./node_modules/axios/lib/core/mergeConfig.js");
var defaults = __webpack_require__(/*! ./defaults */ "./node_modules/axios/lib/defaults.js");
/**
* Create an instance of Axios
*
* @param {Object} defaultConfig The default config for the instance
* @return {Axios} A new instance of Axios
*/
function createInstance(defaultConfig) {
var context = new Axios(defaultConfig);
var instance = bind(Axios.prototype.request, context);
// Copy axios.prototype to instance
utils.extend(instance, Axios.prototype, context);
// Copy context to instance
utils.extend(instance, context);
return instance;
}
// Create the default instance to be exported
var axios = createInstance(defaults);
// Expose Axios class to allow class inheritance
axios.Axios = Axios;
// Factory for creating new instances
axios.create = function create(instanceConfig) {
return createInstance(mergeConfig(axios.defaults, instanceConfig));
};
// Expose Cancel & CancelToken
axios.Cancel = __webpack_require__(/*! ./cancel/Cancel */ "./node_modules/axios/lib/cancel/Cancel.js");
axios.CancelToken = __webpack_require__(/*! ./cancel/CancelToken */ "./node_modules/axios/lib/cancel/CancelToken.js");
axios.isCancel = __webpack_require__(/*! ./cancel/isCancel */ "./node_modules/axios/lib/cancel/isCancel.js");
// Expose all/spread
axios.all = function all(promises) {
return Promise.all(promises);
};
axios.spread = __webpack_require__(/*! ./helpers/spread */ "./node_modules/axios/lib/helpers/spread.js");
module.exports = axios;
// Allow use of default import syntax in TypeScript
module.exports.default = axios;
/***/ }),
/***/ "./node_modules/axios/lib/cancel/Cancel.js":
/*!*************************************************!*\
!*** ./node_modules/axios/lib/cancel/Cancel.js ***!
\*************************************************/
/*! no static exports found */
/***/ (function(module, exports, __webpack_require__) {
"use strict";
/**
* A `Cancel` is an object that is thrown when an operation is canceled.
*
* @class
* @param {string=} message The message.
*/
function Cancel(message) {
this.message = message;
}
Cancel.prototype.toString = function toString() {
return 'Cancel' + (this.message ? ': ' + this.message : '');
};
Cancel.prototype.__CANCEL__ = true;
module.exports = Cancel;
/***/ }),
/***/ "./node_modules/axios/lib/cancel/CancelToken.js":
/*!******************************************************!*\
!*** ./node_modules/axios/lib/cancel/CancelToken.js ***!
\******************************************************/
/*! no static exports found */
/***/ (function(module, exports, __webpack_require__) {
"use strict";
var Cancel = __webpack_require__(/*! ./Cancel */ "./node_modules/axios/lib/cancel/Cancel.js");
/**
* A `CancelToken` is an object that can be used to request cancellation of an operation.
*
* @class
* @param {Function} executor The executor function.
*/
function CancelToken(executor) {
if (typeof executor !== 'function') {
throw new TypeError('executor must be a function.');
}
var resolvePromise;
this.promise = new Promise(function promiseExecutor(resolve) {
resolvePromise = resolve;
});
var token = this;
executor(function cancel(message) {
if (token.reason) {
// Cancellation has already been requested
return;
}
token.reason = new Cancel(message);
resolvePromise(token.reason);
});
}
/**
* Throws a `Cancel` if cancellation has been requested.
*/
CancelToken.prototype.throwIfRequested = function throwIfRequested() {
if (this.reason) {
throw this.reason;
}
};
/**
* Returns an object that contains a new `CancelToken` and a function that, when called,
* cancels the `CancelToken`.
*/
CancelToken.source = function source() {
var cancel;
var token = new CancelToken(function executor(c) {
cancel = c;
});
return {
token: token,
cancel: cancel
};
};
module.exports = CancelToken;
/***/ }),
/***/ "./node_modules/axios/lib/cancel/isCancel.js":
/*!***************************************************!*\
!*** ./node_modules/axios/lib/cancel/isCancel.js ***!
\***************************************************/
/*! no static exports found */
/***/ (function(module, exports, __webpack_require__) {
"use strict";
module.exports = function isCancel(value) {
return !!(value && value.__CANCEL__);
};
/***/ }),
/***/ "./node_modules/axios/lib/core/Axios.js":
/*!**********************************************!*\
!*** ./node_modules/axios/lib/core/Axios.js ***!
\**********************************************/
/*! no static exports found */
/***/ (function(module, exports, __webpack_require__) {
"use strict";
var utils = __webpack_require__(/*! ./../utils */ "./node_modules/axios/lib/utils.js");
var buildURL = __webpack_require__(/*! ../helpers/buildURL */ "./node_modules/axios/lib/helpers/buildURL.js");
var InterceptorManager = __webpack_require__(/*! ./InterceptorManager */ "./node_modules/axios/lib/core/InterceptorManager.js");
var dispatchRequest = __webpack_require__(/*! ./dispatchRequest */ "./node_modules/axios/lib/core/dispatchRequest.js");
var mergeConfig = __webpack_require__(/*! ./mergeConfig */ "./node_modules/axios/lib/core/mergeConfig.js");
/**
* Create a new instance of Axios
*
* @param {Object} instanceConfig The default config for the instance
*/
function Axios(instanceConfig) {
this.defaults = instanceConfig;
this.interceptors = {
request: new InterceptorManager(),
response: new InterceptorManager()
};
}
/**
* Dispatch a request
*
* @param {Object} config The config specific for this request (merged with this.defaults)
*/
Axios.prototype.request = function request(config) {
/*eslint no-param-reassign:0*/
// Allow for axios('example/url'[, config]) a la fetch API
if (typeof config === 'string') {
config = arguments[1] || {};
config.url = arguments[0];
} else {
config = config || {};
}
config = mergeConfig(this.defaults, config);
// Set config.method
if (config.method) {
config.method = config.method.toLowerCase();
} else if (this.defaults.method) {
config.method = this.defaults.method.toLowerCase();
} else {
config.method = 'get';
}
// Hook up interceptors middleware
var chain = [dispatchRequest, undefined];
var promise = Promise.resolve(config);
this.interceptors.request.forEach(function unshiftRequestInterceptors(interceptor) {
chain.unshift(interceptor.fulfilled, interceptor.rejected);
});
this.interceptors.response.forEach(function pushResponseInterceptors(interceptor) {
chain.push(interceptor.fulfilled, interceptor.rejected);
});
while (chain.length) {
promise = promise.then(chain.shift(), chain.shift());
}
return promise;
};
Axios.prototype.getUri = function getUri(config) {
config = mergeConfig(this.defaults, config);
return buildURL(config.url, config.params, config.paramsSerializer).replace(/^\?/, '');
};
// Provide aliases for supported request methods
utils.forEach(['delete', 'get', 'head', 'options'], function forEachMethodNoData(method) {
/*eslint func-names:0*/
Axios.prototype[method] = function(url, config) {
return this.request(utils.merge(config || {}, {
method: method,
url: url
}));
};
});
utils.forEach(['post', 'put', 'patch'], function forEachMethodWithData(method) {
/*eslint func-names:0*/
Axios.prototype[method] = function(url, data, config) {
return this.request(utils.merge(config || {}, {
method: method,
url: url,
data: data
}));
};
});
module.exports = Axios;
/***/ }),
/***/ "./node_modules/axios/lib/core/InterceptorManager.js":
/*!***********************************************************!*\
!*** ./node_modules/axios/lib/core/InterceptorManager.js ***!
\***********************************************************/
/*! no static exports found */
/***/ (function(module, exports, __webpack_require__) {
"use strict";
var utils = __webpack_require__(/*! ./../utils */ "./node_modules/axios/lib/utils.js");
function InterceptorManager() {
this.handlers = [];
}
/**
* Add a new interceptor to the stack
*
* @param {Function} fulfilled The function to handle `then` for a `Promise`
* @param {Function} rejected The function to handle `reject` for a `Promise`
*
* @return {Number} An ID used to remove interceptor later
*/
InterceptorManager.prototype.use = function use(fulfilled, rejected) {
this.handlers.push({
fulfilled: fulfilled,
rejected: rejected
});
return this.handlers.length - 1;
};
/**
* Remove an interceptor from the stack
*
* @param {Number} id The ID that was returned by `use`
*/
InterceptorManager.prototype.eject = function eject(id) {
if (this.handlers[id]) {
this.handlers[id] = null;
}
};
/**
* Iterate over all the registered interceptors
*
* This method is particularly useful for skipping over any
* interceptors that may have become `null` calling `eject`.
*
* @param {Function} fn The function to call for each interceptor
*/
InterceptorManager.prototype.forEach = function forEach(fn) {
utils.forEach(this.handlers, function forEachHandler(h) {
if (h !== null) {
fn(h);
}
});
};
module.exports = InterceptorManager;
/***/ }),
/***/ "./node_modules/axios/lib/core/buildFullPath.js":
/*!******************************************************!*\
!*** ./node_modules/axios/lib/core/buildFullPath.js ***!
\******************************************************/
/*! no static exports found */
/***/ (function(module, exports, __webpack_require__) {
"use strict";
var isAbsoluteURL = __webpack_require__(/*! ../helpers/isAbsoluteURL */ "./node_modules/axios/lib/helpers/isAbsoluteURL.js");
var combineURLs = __webpack_require__(/*! ../helpers/combineURLs */ "./node_modules/axios/lib/helpers/combineURLs.js");
/**
* Creates a new URL by combining the baseURL with the requestedURL,
* only when the requestedURL is not already an absolute URL.
* If the requestURL is absolute, this function returns the requestedURL untouched.
*
* @param {string} baseURL The base URL
* @param {string} requestedURL Absolute or relative URL to combine
* @returns {string} The combined full path
*/
module.exports = function buildFullPath(baseURL, requestedURL) {
if (baseURL && !isAbsoluteURL(requestedURL)) {
return combineURLs(baseURL, requestedURL);
}
return requestedURL;
};
/***/ }),
/***/ "./node_modules/axios/lib/core/createError.js":
/*!****************************************************!*\
!*** ./node_modules/axios/lib/core/createError.js ***!
\****************************************************/
/*! no static exports found */
/***/ (function(module, exports, __webpack_require__) {
"use strict";
var enhanceError = __webpack_require__(/*! ./enhanceError */ "./node_modules/axios/lib/core/enhanceError.js");
/**
* Create an Error with the specified message, config, error code, request and response.
*
* @param {string} message The error message.
* @param {Object} config The config.
* @param {string} [code] The error code (for example, 'ECONNABORTED').
* @param {Object} [request] The request.
* @param {Object} [response] The response.
* @returns {Error} The created error.
*/
module.exports = function createError(message, config, code, request, response) {
var error = new Error(message);
return enhanceError(error, config, code, request, response);
};
/***/ }),
/***/ "./node_modules/axios/lib/core/dispatchRequest.js":
/*!********************************************************!*\
!*** ./node_modules/axios/lib/core/dispatchRequest.js ***!
\********************************************************/
/*! no static exports found */
/***/ (function(module, exports, __webpack_require__) {
"use strict";
var utils = __webpack_require__(/*! ./../utils */ "./node_modules/axios/lib/utils.js");
var transformData = __webpack_require__(/*! ./transformData */ "./node_modules/axios/lib/core/transformData.js");
var isCancel = __webpack_require__(/*! ../cancel/isCancel */ "./node_modules/axios/lib/cancel/isCancel.js");
var defaults = __webpack_require__(/*! ../defaults */ "./node_modules/axios/lib/defaults.js");
/**
* Throws a `Cancel` if cancellation has been requested.
*/
function throwIfCancellationRequested(config) {
if (config.cancelToken) {
config.cancelToken.throwIfRequested();
}
}
/**
* Dispatch a request to the server using the configured adapter.
*
* @param {object} config The config that is to be used for the request
* @returns {Promise} The Promise to be fulfilled
*/
module.exports = function dispatchRequest(config) {
throwIfCancellationRequested(config);
// Ensure headers exist
config.headers = config.headers || {};
// Transform request data
config.data = transformData(
config.data,
config.headers,
config.transformRequest
);
// Flatten headers
config.headers = utils.merge(
config.headers.common || {},
config.headers[config.method] || {},
config.headers
);
utils.forEach(
['delete', 'get', 'head', 'post', 'put', 'patch', 'common'],
function cleanHeaderConfig(method) {
delete config.headers[method];
}
);
var adapter = config.adapter || defaults.adapter;
return adapter(config).then(function onAdapterResolution(response) {
throwIfCancellationRequested(config);
// Transform response data
response.data = transformData(
response.data,
response.headers,
config.transformResponse
);
return response;
}, function onAdapterRejection(reason) {
if (!isCancel(reason)) {
throwIfCancellationRequested(config);
// Transform response data
if (reason && reason.response) {
reason.response.data = transformData(
reason.response.data,
reason.response.headers,
config.transformResponse
);
}
}
return Promise.reject(reason);
});
};
/***/ }),
/***/ "./node_modules/axios/lib/core/enhanceError.js":
/*!*****************************************************!*\
!*** ./node_modules/axios/lib/core/enhanceError.js ***!
\*****************************************************/
/*! no static exports found */
/***/ (function(module, exports, __webpack_require__) {
"use strict";
/**
* Update an Error with the specified config, error code, and response.
*
* @param {Error} error The error to update.
* @param {Object} config The config.
* @param {string} [code] The error code (for example, 'ECONNABORTED').
* @param {Object} [request] The request.
* @param {Object} [response] The response.
* @returns {Error} The error.
*/
module.exports = function enhanceError(error, config, code, request, response) {
error.config = config;
if (code) {
error.code = code;
}
error.request = request;
error.response = response;
error.isAxiosError = true;
error.toJSON = function() {
return {
// Standard
message: this.message,
name: this.name,
// Microsoft
description: this.description,
number: this.number,
// Mozilla
fileName: this.fileName,
lineNumber: this.lineNumber,
columnNumber: this.columnNumber,
stack: this.stack,
// Axios
config: this.config,
code: this.code
};
};
return error;
};
/***/ }),
/***/ "./node_modules/axios/lib/core/mergeConfig.js":
/*!****************************************************!*\
!*** ./node_modules/axios/lib/core/mergeConfig.js ***!
\****************************************************/
/*! no static exports found */
/***/ (function(module, exports, __webpack_require__) {
"use strict";
var utils = __webpack_require__(/*! ../utils */ "./node_modules/axios/lib/utils.js");
/**
* Config-specific merge-function which creates a new config-object
* by merging two configuration objects together.
*
* @param {Object} config1
* @param {Object} config2
* @returns {Object} New object resulting from merging config2 to config1
*/
module.exports = function mergeConfig(config1, config2) {
// eslint-disable-next-line no-param-reassign
config2 = config2 || {};
var config = {};
var valueFromConfig2Keys = ['url', 'method', 'params', 'data'];
var mergeDeepPropertiesKeys = ['headers', 'auth', 'proxy'];
var defaultToConfig2Keys = [
'baseURL', 'url', 'transformRequest', 'transformResponse', 'paramsSerializer',
'timeout', 'withCredentials', 'adapter', 'responseType', 'xsrfCookieName',
'xsrfHeaderName', 'onUploadProgress', 'onDownloadProgress',
'maxContentLength', 'validateStatus', 'maxRedirects', 'httpAgent',
'httpsAgent', 'cancelToken', 'socketPath'
];
utils.forEach(valueFromConfig2Keys, function valueFromConfig2(prop) {
if (typeof config2[prop] !== 'undefined') {
config[prop] = config2[prop];
}
});
utils.forEach(mergeDeepPropertiesKeys, function mergeDeepProperties(prop) {
if (utils.isObject(config2[prop])) {
config[prop] = utils.deepMerge(config1[prop], config2[prop]);
} else if (typeof config2[prop] !== 'undefined') {
config[prop] = config2[prop];
} else if (utils.isObject(config1[prop])) {
config[prop] = utils.deepMerge(config1[prop]);
} else if (typeof config1[prop] !== 'undefined') {
config[prop] = config1[prop];
}
});
utils.forEach(defaultToConfig2Keys, function defaultToConfig2(prop) {
if (typeof config2[prop] !== 'undefined') {
config[prop] = config2[prop];
} else if (typeof config1[prop] !== 'undefined') {
config[prop] = config1[prop];
}
});
var axiosKeys = valueFromConfig2Keys
.concat(mergeDeepPropertiesKeys)
.concat(defaultToConfig2Keys);
var otherKeys = Object
.keys(config2)
.filter(function filterAxiosKeys(key) {
return axiosKeys.indexOf(key) === -1;
});
utils.forEach(otherKeys, function otherKeysDefaultToConfig2(prop) {
if (typeof config2[prop] !== 'undefined') {
config[prop] = config2[prop];
} else if (typeof config1[prop] !== 'undefined') {
config[prop] = config1[prop];
}
});
return config;
};
/***/ }),
/***/ "./node_modules/axios/lib/core/settle.js":
/*!***********************************************!*\
!*** ./node_modules/axios/lib/core/settle.js ***!
\***********************************************/
/*! no static exports found */
/***/ (function(module, exports, __webpack_require__) {
"use strict";
var createError = __webpack_require__(/*! ./createError */ "./node_modules/axios/lib/core/createError.js");
/**
* Resolve or reject a Promise based on response status.
*
* @param {Function} resolve A function that resolves the promise.
* @param {Function} reject A function that rejects the promise.
* @param {object} response The response.
*/
module.exports = function settle(resolve, reject, response) {
var validateStatus = response.config.validateStatus;
if (!validateStatus || validateStatus(response.status)) {
resolve(response);
} else {
reject(createError(
'Request failed with status code ' + response.status,
response.config,
null,
response.request,
response
));
}
};
/***/ }),
/***/ "./node_modules/axios/lib/core/transformData.js":
/*!******************************************************!*\
!*** ./node_modules/axios/lib/core/transformData.js ***!
\******************************************************/
/*! no static exports found */
/***/ (function(module, exports, __webpack_require__) {
"use strict";
var utils = __webpack_require__(/*! ./../utils */ "./node_modules/axios/lib/utils.js");
/**
* Transform the data for a request or a response
*
* @param {Object|String} data The data to be transformed
* @param {Array} headers The headers for the request or response
* @param {Array|Function} fns A single function or Array of functions
* @returns {*} The resulting transformed data
*/
module.exports = function transformData(data, headers, fns) {
/*eslint no-param-reassign:0*/
utils.forEach(fns, function transform(fn) {
data = fn(data, headers);
});
return data;
};
/***/ }),
/***/ "./node_modules/axios/lib/defaults.js":
/*!********************************************!*\
!*** ./node_modules/axios/lib/defaults.js ***!
\********************************************/
/*! no static exports found */
/***/ (function(module, exports, __webpack_require__) {
"use strict";
/* WEBPACK VAR INJECTION */(function(process) {
var utils = __webpack_require__(/*! ./utils */ "./node_modules/axios/lib/utils.js");
var normalizeHeaderName = __webpack_require__(/*! ./helpers/normalizeHeaderName */ "./node_modules/axios/lib/helpers/normalizeHeaderName.js");
var DEFAULT_CONTENT_TYPE = {
'Content-Type': 'application/x-www-form-urlencoded'
};
function setContentTypeIfUnset(headers, value) {
if (!utils.isUndefined(headers) && utils.isUndefined(headers['Content-Type'])) {
headers['Content-Type'] = value;
}
}
function getDefaultAdapter() {
var adapter;
if (typeof XMLHttpRequest !== 'undefined') {
// For browsers use XHR adapter
adapter = __webpack_require__(/*! ./adapters/xhr */ "./node_modules/axios/lib/adapters/xhr.js");
} else if (typeof process !== 'undefined' && Object.prototype.toString.call(process) === '[object process]') {
// For node use HTTP adapter
adapter = __webpack_require__(/*! ./adapters/http */ "./node_modules/axios/lib/adapters/xhr.js");
}
return adapter;
}
var defaults = {
adapter: getDefaultAdapter(),
transformRequest: [function transformRequest(data, headers) {
normalizeHeaderName(headers, 'Accept');
normalizeHeaderName(headers, 'Content-Type');
if (utils.isFormData(data) ||
utils.isArrayBuffer(data) ||
utils.isBuffer(data) ||
utils.isStream(data) ||
utils.isFile(data) ||
utils.isBlob(data)
) {
return data;
}
if (utils.isArrayBufferView(data)) {
return data.buffer;
}
if (utils.isURLSearchParams(data)) {
setContentTypeIfUnset(headers, 'application/x-www-form-urlencoded;charset=utf-8');
return data.toString();
}
if (utils.isObject(data)) {
setContentTypeIfUnset(headers, 'application/json;charset=utf-8');
return JSON.stringify(data);
}
return data;
}],
transformResponse: [function transformResponse(data) {
/*eslint no-param-reassign:0*/
if (typeof data === 'string') {
try {
data = JSON.parse(data);
} catch (e) { /* Ignore */ }
}
return data;
}],
/**
* A timeout in milliseconds to abort a request. If set to 0 (default) a
* timeout is not created.
*/
timeout: 0,
xsrfCookieName: 'XSRF-TOKEN',
xsrfHeaderName: 'X-XSRF-TOKEN',
maxContentLength: -1,
validateStatus: function validateStatus(status) {
return status >= 200 && status < 300;
}
};
defaults.headers = {
common: {
'Accept': 'application/json, text/plain, */*'
}
};
utils.forEach(['delete', 'get', 'head'], function forEachMethodNoData(method) {
defaults.headers[method] = {};
});
utils.forEach(['post', 'put', 'patch'], function forEachMethodWithData(method) {
defaults.headers[method] = utils.merge(DEFAULT_CONTENT_TYPE);
});
module.exports = defaults;
/* WEBPACK VAR INJECTION */}.call(this, __webpack_require__(/*! ./../../process/browser.js */ "./node_modules/process/browser.js")))
/***/ }),
/***/ "./node_modules/axios/lib/helpers/bind.js":
/*!************************************************!*\
!*** ./node_modules/axios/lib/helpers/bind.js ***!
\************************************************/
/*! no static exports found */
/***/ (function(module, exports, __webpack_require__) {
"use strict";
module.exports = function bind(fn, thisArg) {
return function wrap() {
var args = new Array(arguments.length);
for (var i = 0; i < args.length; i++) {
args[i] = arguments[i];
}
return fn.apply(thisArg, args);
};
};
/***/ }),
/***/ "./node_modules/axios/lib/helpers/buildURL.js":
/*!****************************************************!*\
!*** ./node_modules/axios/lib/helpers/buildURL.js ***!
\****************************************************/
/*! no static exports found */
/***/ (function(module, exports, __webpack_require__) {
"use strict";
var utils = __webpack_require__(/*! ./../utils */ "./node_modules/axios/lib/utils.js");
function encode(val) {
return encodeURIComponent(val).
replace(/%40/gi, '@').
replace(/%3A/gi, ':').
replace(/%24/g, '$').
replace(/%2C/gi, ',').
replace(/%20/g, '+').
replace(/%5B/gi, '[').
replace(/%5D/gi, ']');
}
/**
* Build a URL by appending params to the end
*
* @param {string} url The base of the url (e.g., http://www.google.com)
* @param {object} [params] The params to be appended
* @returns {string} The formatted url
*/
module.exports = function buildURL(url, params, paramsSerializer) {
/*eslint no-param-reassign:0*/
if (!params) {
return url;
}
var serializedParams;
if (paramsSerializer) {
serializedParams = paramsSerializer(params);
} else if (utils.isURLSearchParams(params)) {
serializedParams = params.toString();
} else {
var parts = [];
utils.forEach(params, function serialize(val, key) {
if (val === null || typeof val === 'undefined') {
return;
}
if (utils.isArray(val)) {
key = key + '[]';
} else {
val = [val];
}
utils.forEach(val, function parseValue(v) {
if (utils.isDate(v)) {
v = v.toISOString();
} else if (utils.isObject(v)) {
v = JSON.stringify(v);
}
parts.push(encode(key) + '=' + encode(v));
});
});
serializedParams = parts.join('&');
}
if (serializedParams) {
var hashmarkIndex = url.indexOf('#');
if (hashmarkIndex !== -1) {
url = url.slice(0, hashmarkIndex);
}
url += (url.indexOf('?') === -1 ? '?' : '&') + serializedParams;
}
return url;
};
/***/ }),
/***/ "./node_modules/axios/lib/helpers/combineURLs.js":
/*!*******************************************************!*\
!*** ./node_modules/axios/lib/helpers/combineURLs.js ***!
\*******************************************************/
/*! no static exports found */
/***/ (function(module, exports, __webpack_require__) {
"use strict";
/**
* Creates a new URL by combining the specified URLs
*
* @param {string} baseURL The base URL
* @param {string} relativeURL The relative URL
* @returns {string} The combined URL
*/
module.exports = function combineURLs(baseURL, relativeURL) {
return relativeURL
? baseURL.replace(/\/+$/, '') + '/' + relativeURL.replace(/^\/+/, '')
: baseURL;
};
/***/ }),
/***/ "./node_modules/axios/lib/helpers/cookies.js":
/*!***************************************************!*\
!*** ./node_modules/axios/lib/helpers/cookies.js ***!
\***************************************************/
/*! no static exports found */
/***/ (function(module, exports, __webpack_require__) {
"use strict";
var utils = __webpack_require__(/*! ./../utils */ "./node_modules/axios/lib/utils.js");
module.exports = (
utils.isStandardBrowserEnv() ?
// Standard browser envs support document.cookie
(function standardBrowserEnv() {
return {
write: function write(name, value, expires, path, domain, secure) {
var cookie = [];
cookie.push(name + '=' + encodeURIComponent(value));
if (utils.isNumber(expires)) {
cookie.push('expires=' + new Date(expires).toGMTString());
}
if (utils.isString(path)) {
cookie.push('path=' + path);
}
if (utils.isString(domain)) {
cookie.push('domain=' + domain);
}
if (secure === true) {
cookie.push('secure');
}
document.cookie = cookie.join('; ');
},
read: function read(name) {
var match = document.cookie.match(new RegExp('(^|;\\s*)(' + name + ')=([^;]*)'));
return (match ? decodeURIComponent(match[3]) : null);
},
remove: function remove(name) {
this.write(name, '', Date.now() - 86400000);
}
};
})() :
// Non standard browser env (web workers, react-native) lack needed support.
(function nonStandardBrowserEnv() {
return {
write: function write() {},
read: function read() { return null; },
remove: function remove() {}
};
})()
);
/***/ }),
/***/ "./node_modules/axios/lib/helpers/isAbsoluteURL.js":
/*!*********************************************************!*\
!*** ./node_modules/axios/lib/helpers/isAbsoluteURL.js ***!
\*********************************************************/
/*! no static exports found */
/***/ (function(module, exports, __webpack_require__) {
"use strict";
/**
* Determines whether the specified URL is absolute
*
* @param {string} url The URL to test
* @returns {boolean} True if the specified URL is absolute, otherwise false
*/
module.exports = function isAbsoluteURL(url) {
// A URL is considered absolute if it begins with "<scheme>://" or "//" (protocol-relative URL).
// RFC 3986 defines scheme name as a sequence of characters beginning with a letter and followed
// by any combination of letters, digits, plus, period, or hyphen.
return /^([a-z][a-z\d\+\-\.]*:)?\/\//i.test(url);
};
/***/ }),
/***/ "./node_modules/axios/lib/helpers/isURLSameOrigin.js":
/*!***********************************************************!*\
!*** ./node_modules/axios/lib/helpers/isURLSameOrigin.js ***!
\***********************************************************/
/*! no static exports found */
/***/ (function(module, exports, __webpack_require__) {
"use strict";
var utils = __webpack_require__(/*! ./../utils */ "./node_modules/axios/lib/utils.js");
module.exports = (
utils.isStandardBrowserEnv() ?
// Standard browser envs have full support of the APIs needed to test
// whether the request URL is of the same origin as current location.
(function standardBrowserEnv() {
var msie = /(msie|trident)/i.test(navigator.userAgent);
var urlParsingNode = document.createElement('a');
var originURL;
/**
* Parse a URL to discover it's components
*
* @param {String} url The URL to be parsed
* @returns {Object}
*/
function resolveURL(url) {
var href = url;
if (msie) {
// IE needs attribute set twice to normalize properties
urlParsingNode.setAttribute('href', href);
href = urlParsingNode.href;
}
urlParsingNode.setAttribute('href', href);
// urlParsingNode provides the UrlUtils interface - http://url.spec.whatwg.org/#urlutils
return {
href: urlParsingNode.href,
protocol: urlParsingNode.protocol ? urlParsingNode.protocol.replace(/:$/, '') : '',
host: urlParsingNode.host,
search: urlParsingNode.search ? urlParsingNode.search.replace(/^\?/, '') : '',
hash: urlParsingNode.hash ? urlParsingNode.hash.replace(/^#/, '') : '',
hostname: urlParsingNode.hostname,
port: urlParsingNode.port,
pathname: (urlParsingNode.pathname.charAt(0) === '/') ?
urlParsingNode.pathname :
'/' + urlParsingNode.pathname
};
}
originURL = resolveURL(window.location.href);
/**
* Determine if a URL shares the same origin as the current location
*
* @param {String} requestURL The URL to test
* @returns {boolean} True if URL shares the same origin, otherwise false
*/
return function isURLSameOrigin(requestURL) {
var parsed = (utils.isString(requestURL)) ? resolveURL(requestURL) : requestURL;
return (parsed.protocol === originURL.protocol &&
parsed.host === originURL.host);
};
})() :
// Non standard browser envs (web workers, react-native) lack needed support.
(function nonStandardBrowserEnv() {
return function isURLSameOrigin() {
return true;
};
})()
);
/***/ }),
/***/ "./node_modules/axios/lib/helpers/normalizeHeaderName.js":
/*!***************************************************************!*\
!*** ./node_modules/axios/lib/helpers/normalizeHeaderName.js ***!
\***************************************************************/
/*! no static exports found */
/***/ (function(module, exports, __webpack_require__) {
"use strict";
var utils = __webpack_require__(/*! ../utils */ "./node_modules/axios/lib/utils.js");
module.exports = function normalizeHeaderName(headers, normalizedName) {
utils.forEach(headers, function processHeader(value, name) {
if (name !== normalizedName && name.toUpperCase() === normalizedName.toUpperCase()) {
headers[normalizedName] = value;
delete headers[name];
}
});
};
/***/ }),
/***/ "./node_modules/axios/lib/helpers/parseHeaders.js":
/*!********************************************************!*\
!*** ./node_modules/axios/lib/helpers/parseHeaders.js ***!
\********************************************************/
/*! no static exports found */
/***/ (function(module, exports, __webpack_require__) {
"use strict";
var utils = __webpack_require__(/*! ./../utils */ "./node_modules/axios/lib/utils.js");
// Headers whose duplicates are ignored by node
// c.f. https://nodejs.org/api/http.html#http_message_headers
var ignoreDuplicateOf = [
'age', 'authorization', 'content-length', 'content-type', 'etag',
'expires', 'from', 'host', 'if-modified-since', 'if-unmodified-since',
'last-modified', 'location', 'max-forwards', 'proxy-authorization',
'referer', 'retry-after', 'user-agent'
];
/**
* Parse headers into an object
*
* ```
* Date: Wed, 27 Aug 2014 08:58:49 GMT
* Content-Type: application/json
* Connection: keep-alive
* Transfer-Encoding: chunked
* ```
*
* @param {String} headers Headers needing to be parsed
* @returns {Object} Headers parsed into an object
*/
module.exports = function parseHeaders(headers) {
var parsed = {};
var key;
var val;
var i;
if (!headers) { return parsed; }
utils.forEach(headers.split('\n'), function parser(line) {
i = line.indexOf(':');
key = utils.trim(line.substr(0, i)).toLowerCase();
val = utils.trim(line.substr(i + 1));
if (key) {
if (parsed[key] && ignoreDuplicateOf.indexOf(key) >= 0) {
return;
}
if (key === 'set-cookie') {
parsed[key] = (parsed[key] ? parsed[key] : []).concat([val]);
} else {
parsed[key] = parsed[key] ? parsed[key] + ', ' + val : val;
}
}
});
return parsed;
};
/***/ }),
/***/ "./node_modules/axios/lib/helpers/spread.js":
/*!**************************************************!*\
!*** ./node_modules/axios/lib/helpers/spread.js ***!
\**************************************************/
/*! no static exports found */
/***/ (function(module, exports, __webpack_require__) {
"use strict";
/**
* Syntactic sugar for invoking a function and expanding an array for arguments.
*
* Common use case would be to use `Function.prototype.apply`.
*
* ```js
* function f(x, y, z) {}
* var args = [1, 2, 3];
* f.apply(null, args);
* ```
*
* With `spread` this example can be re-written.
*
* ```js
* spread(function(x, y, z) {})([1, 2, 3]);
* ```
*
* @param {Function} callback
* @returns {Function}
*/
module.exports = function spread(callback) {
return function wrap(arr) {
return callback.apply(null, arr);
};
};
/***/ }),
/***/ "./node_modules/axios/lib/utils.js":
/*!*****************************************!*\
!*** ./node_modules/axios/lib/utils.js ***!
\*****************************************/
/*! no static exports found */
/***/ (function(module, exports, __webpack_require__) {
"use strict";
var bind = __webpack_require__(/*! ./helpers/bind */ "./node_modules/axios/lib/helpers/bind.js");
/*global toString:true*/
// utils is a library of generic helper functions non-specific to axios
var toString = Object.prototype.toString;
/**
* Determine if a value is an Array
*
* @param {Object} val The value to test
* @returns {boolean} True if value is an Array, otherwise false
*/
function isArray(val) {
return toString.call(val) === '[object Array]';
}
/**
* Determine if a value is undefined
*
* @param {Object} val The value to test
* @returns {boolean} True if the value is undefined, otherwise false
*/
function isUndefined(val) {
return typeof val === 'undefined';
}
/**
* Determine if a value is a Buffer
*
* @param {Object} val The value to test
* @returns {boolean} True if value is a Buffer, otherwise false
*/
function isBuffer(val) {
return val !== null && !isUndefined(val) && val.constructor !== null && !isUndefined(val.constructor)
&& typeof val.constructor.isBuffer === 'function' && val.constructor.isBuffer(val);
}
/**
* Determine if a value is an ArrayBuffer
*
* @param {Object} val The value to test
* @returns {boolean} True if value is an ArrayBuffer, otherwise false
*/
function isArrayBuffer(val) {
return toString.call(val) === '[object ArrayBuffer]';
}
/**
* Determine if a value is a FormData
*
* @param {Object} val The value to test
* @returns {boolean} True if value is an FormData, otherwise false
*/
function isFormData(val) {
return (typeof FormData !== 'undefined') && (val instanceof FormData);
}
/**
* Determine if a value is a view on an ArrayBuffer
*
* @param {Object} val The value to test
* @returns {boolean} True if value is a view on an ArrayBuffer, otherwise false
*/
function isArrayBufferView(val) {
var result;
if ((typeof ArrayBuffer !== 'undefined') && (ArrayBuffer.isView)) {
result = ArrayBuffer.isView(val);
} else {
result = (val) && (val.buffer) && (val.buffer instanceof ArrayBuffer);
}
return result;
}
/**
* Determine if a value is a String
*
* @param {Object} val The value to test
* @returns {boolean} True if value is a String, otherwise false
*/
function isString(val) {
return typeof val === 'string';
}
/**
* Determine if a value is a Number
*
* @param {Object} val The value to test
* @returns {boolean} True if value is a Number, otherwise false
*/
function isNumber(val) {
return typeof val === 'number';
}
/**
* Determine if a value is an Object
*
* @param {Object} val The value to test
* @returns {boolean} True if value is an Object, otherwise false
*/
function isObject(val) {
return val !== null && typeof val === 'object';
}
/**
* Determine if a value is a Date
*
* @param {Object} val The value to test
* @returns {boolean} True if value is a Date, otherwise false
*/
function isDate(val) {
return toString.call(val) === '[object Date]';
}
/**
* Determine if a value is a File
*
* @param {Object} val The value to test
* @returns {boolean} True if value is a File, otherwise false
*/
function isFile(val) {
return toString.call(val) === '[object File]';
}
/**
* Determine if a value is a Blob
*
* @param {Object} val The value to test
* @returns {boolean} True if value is a Blob, otherwise false
*/
function isBlob(val) {
return toString.call(val) === '[object Blob]';
}
/**
* Determine if a value is a Function
*
* @param {Object} val The value to test
* @returns {boolean} True if value is a Function, otherwise false
*/
function isFunction(val) {
return toString.call(val) === '[object Function]';
}
/**
* Determine if a value is a Stream
*
* @param {Object} val The value to test
* @returns {boolean} True if value is a Stream, otherwise false
*/
function isStream(val) {
return isObject(val) && isFunction(val.pipe);
}
/**
* Determine if a value is a URLSearchParams object
*
* @param {Object} val The value to test
* @returns {boolean} True if value is a URLSearchParams object, otherwise false
*/
function isURLSearchParams(val) {
return typeof URLSearchParams !== 'undefined' && val instanceof URLSearchParams;
}
/**
* Trim excess whitespace off the beginning and end of a string
*
* @param {String} str The String to trim
* @returns {String} The String freed of excess whitespace
*/
function trim(str) {
return str.replace(/^\s*/, '').replace(/\s*$/, '');
}
/**
* Determine if we're running in a standard browser environment
*
* This allows axios to run in a web worker, and react-native.
* Both environments support XMLHttpRequest, but not fully standard globals.
*
* web workers:
* typeof window -> undefined
* typeof document -> undefined
*
* react-native:
* navigator.product -> 'ReactNative'
* nativescript
* navigator.product -> 'NativeScript' or 'NS'
*/
function isStandardBrowserEnv() {
if (typeof navigator !== 'undefined' && (navigator.product === 'ReactNative' ||
navigator.product === 'NativeScript' ||
navigator.product === 'NS')) {
return false;
}
return (
typeof window !== 'undefined' &&
typeof document !== 'undefined'
);
}
/**
* Iterate over an Array or an Object invoking a function for each item.
*
* If `obj` is an Array callback will be called passing
* the value, index, and complete array for each item.
*
* If 'obj' is an Object callback will be called passing
* the value, key, and complete object for each property.
*
* @param {Object|Array} obj The object to iterate
* @param {Function} fn The callback to invoke for each item
*/
function forEach(obj, fn) {
// Don't bother if no value provided
if (obj === null || typeof obj === 'undefined') {
return;
}
// Force an array if not already something iterable
if (typeof obj !== 'object') {
/*eslint no-param-reassign:0*/
obj = [obj];
}
if (isArray(obj)) {
// Iterate over array values
for (var i = 0, l = obj.length; i < l; i++) {
fn.call(null, obj[i], i, obj);
}
} else {
// Iterate over object keys
for (var key in obj) {
if (Object.prototype.hasOwnProperty.call(obj, key)) {
fn.call(null, obj[key], key, obj);
}
}
}
}
/**
* Accepts varargs expecting each argument to be an object, then
* immutably merges the properties of each object and returns result.
*
* When multiple objects contain the same key the later object in
* the arguments list will take precedence.
*
* Example:
*
* ```js
* var result = merge({foo: 123}, {foo: 456});
* console.log(result.foo); // outputs 456
* ```
*
* @param {Object} obj1 Object to merge
* @returns {Object} Result of all merge properties
*/
function merge(/* obj1, obj2, obj3, ... */) {
var result = {};
function assignValue(val, key) {
if (typeof result[key] === 'object' && typeof val === 'object') {
result[key] = merge(result[key], val);
} else {
result[key] = val;
}
}
for (var i = 0, l = arguments.length; i < l; i++) {
forEach(arguments[i], assignValue);
}
return result;
}
/**
* Function equal to merge with the difference being that no reference
* to original objects is kept.
*
* @see merge
* @param {Object} obj1 Object to merge
* @returns {Object} Result of all merge properties
*/
function deepMerge(/* obj1, obj2, obj3, ... */) {
var result = {};
function assignValue(val, key) {
if (typeof result[key] === 'object' && typeof val === 'object') {
result[key] = deepMerge(result[key], val);
} else if (typeof val === 'object') {
result[key] = deepMerge({}, val);
} else {
result[key] = val;
}
}
for (var i = 0, l = arguments.length; i < l; i++) {
forEach(arguments[i], assignValue);
}
return result;
}
/**
* Extends object a by mutably adding to it the properties of object b.
*
* @param {Object} a The object to be extended
* @param {Object} b The object to copy properties from
* @param {Object} thisArg The object to bind function to
* @return {Object} The resulting value of object a
*/
function extend(a, b, thisArg) {
forEach(b, function assignValue(val, key) {
if (thisArg && typeof val === 'function') {
a[key] = bind(val, thisArg);
} else {
a[key] = val;
}
});
return a;
}
module.exports = {
isArray: isArray,
isArrayBuffer: isArrayBuffer,
isBuffer: isBuffer,
isFormData: isFormData,
isArrayBufferView: isArrayBufferView,
isString: isString,
isNumber: isNumber,
isObject: isObject,
isUndefined: isUndefined,
isDate: isDate,
isFile: isFile,
isBlob: isBlob,
isFunction: isFunction,
isStream: isStream,
isURLSearchParams: isURLSearchParams,
isStandardBrowserEnv: isStandardBrowserEnv,
forEach: forEach,
merge: merge,
deepMerge: deepMerge,
extend: extend,
trim: trim
};
/***/ }),
/***/ "./node_modules/lodash/lodash.js":
/*!***************************************!*\
!*** ./node_modules/lodash/lodash.js ***!
\***************************************/
/*! no static exports found */
/***/ (function(module, exports, __webpack_require__) {
/* WEBPACK VAR INJECTION */(function(global, module) {var __WEBPACK_AMD_DEFINE_RESULT__;/**
* @license
* Lodash <https://lodash.com/>
* Copyright OpenJS Foundation and other contributors <https://openjsf.org/>
* Released under MIT license <https://lodash.com/license>
* Based on Underscore.js 1.8.3 <http://underscorejs.org/LICENSE>
* Copyright Jeremy Ashkenas, DocumentCloud and Investigative Reporters & Editors
*/
;(function() {
/** Used as a safe reference for `undefined` in pre-ES5 environments. */
var undefined;
/** Used as the semantic version number. */
var VERSION = '4.17.20';
/** Used as the size to enable large array optimizations. */
var LARGE_ARRAY_SIZE = 200;
/** Error message constants. */
var CORE_ERROR_TEXT = 'Unsupported core-js use. Try https://npms.io/search?q=ponyfill.',
FUNC_ERROR_TEXT = 'Expected a function';
/** Used to stand-in for `undefined` hash values. */
var HASH_UNDEFINED = '__lodash_hash_undefined__';
/** Used as the maximum memoize cache size. */
var MAX_MEMOIZE_SIZE = 500;
/** Used as the internal argument placeholder. */
var PLACEHOLDER = '__lodash_placeholder__';
/** Used to compose bitmasks for cloning. */
var CLONE_DEEP_FLAG = 1,
CLONE_FLAT_FLAG = 2,
CLONE_SYMBOLS_FLAG = 4;
/** Used to compose bitmasks for value comparisons. */
var COMPARE_PARTIAL_FLAG = 1,
COMPARE_UNORDERED_FLAG = 2;
/** Used to compose bitmasks for function metadata. */
var WRAP_BIND_FLAG = 1,
WRAP_BIND_KEY_FLAG = 2,
WRAP_CURRY_BOUND_FLAG = 4,
WRAP_CURRY_FLAG = 8,
WRAP_CURRY_RIGHT_FLAG = 16,
WRAP_PARTIAL_FLAG = 32,
WRAP_PARTIAL_RIGHT_FLAG = 64,
WRAP_ARY_FLAG = 128,
WRAP_REARG_FLAG = 256,
WRAP_FLIP_FLAG = 512;
/** Used as default options for `_.truncate`. */
var DEFAULT_TRUNC_LENGTH = 30,
DEFAULT_TRUNC_OMISSION = '...';
/** Used to detect hot functions by number of calls within a span of milliseconds. */
var HOT_COUNT = 800,
HOT_SPAN = 16;
/** Used to indicate the type of lazy iteratees. */
var LAZY_FILTER_FLAG = 1,
LAZY_MAP_FLAG = 2,
LAZY_WHILE_FLAG = 3;
/** Used as references for various `Number` constants. */
var INFINITY = 1 / 0,
MAX_SAFE_INTEGER = 9007199254740991,
MAX_INTEGER = 1.7976931348623157e+308,
NAN = 0 / 0;
/** Used as references for the maximum length and index of an array. */
var MAX_ARRAY_LENGTH = 4294967295,
MAX_ARRAY_INDEX = MAX_ARRAY_LENGTH - 1,
HALF_MAX_ARRAY_LENGTH = MAX_ARRAY_LENGTH >>> 1;
/** Used to associate wrap methods with their bit flags. */
var wrapFlags = [
['ary', WRAP_ARY_FLAG],
['bind', WRAP_BIND_FLAG],
['bindKey', WRAP_BIND_KEY_FLAG],
['curry', WRAP_CURRY_FLAG],
['curryRight', WRAP_CURRY_RIGHT_FLAG],
['flip', WRAP_FLIP_FLAG],
['partial', WRAP_PARTIAL_FLAG],
['partialRight', WRAP_PARTIAL_RIGHT_FLAG],
['rearg', WRAP_REARG_FLAG]
];
/** `Object#toString` result references. */
var argsTag = '[object Arguments]',
arrayTag = '[object Array]',
asyncTag = '[object AsyncFunction]',
boolTag = '[object Boolean]',
dateTag = '[object Date]',
domExcTag = '[object DOMException]',
errorTag = '[object Error]',
funcTag = '[object Function]',
genTag = '[object GeneratorFunction]',
mapTag = '[object Map]',
numberTag = '[object Number]',
nullTag = '[object Null]',
objectTag = '[object Object]',
promiseTag = '[object Promise]',
proxyTag = '[object Proxy]',
regexpTag = '[object RegExp]',
setTag = '[object Set]',
stringTag = '[object String]',
symbolTag = '[object Symbol]',
undefinedTag = '[object Undefined]',
weakMapTag = '[object WeakMap]',
weakSetTag = '[object WeakSet]';
var arrayBufferTag = '[object ArrayBuffer]',
dataViewTag = '[object DataView]',
float32Tag = '[object Float32Array]',
float64Tag = '[object Float64Array]',
int8Tag = '[object Int8Array]',
int16Tag = '[object Int16Array]',
int32Tag = '[object Int32Array]',
uint8Tag = '[object Uint8Array]',
uint8ClampedTag = '[object Uint8ClampedArray]',
uint16Tag = '[object Uint16Array]',
uint32Tag = '[object Uint32Array]';
/** Used to match empty string literals in compiled template source. */
var reEmptyStringLeading = /\b__p \+= '';/g,
reEmptyStringMiddle = /\b(__p \+=) '' \+/g,
reEmptyStringTrailing = /(__e\(.*?\)|\b__t\)) \+\n'';/g;
/** Used to match HTML entities and HTML characters. */
var reEscapedHtml = /&(?:amp|lt|gt|quot|#39);/g,
reUnescapedHtml = /[&<>"']/g,
reHasEscapedHtml = RegExp(reEscapedHtml.source),
reHasUnescapedHtml = RegExp(reUnescapedHtml.source);
/** Used to match template delimiters. */
var reEscape = /<%-([\s\S]+?)%>/g,
reEvaluate = /<%([\s\S]+?)%>/g,
reInterpolate = /<%=([\s\S]+?)%>/g;
/** Used to match property names within property paths. */
var reIsDeepProp = /\.|\[(?:[^[\]]*|(["'])(?:(?!\1)[^\\]|\\.)*?\1)\]/,
reIsPlainProp = /^\w*$/,
rePropName = /[^.[\]]+|\[(?:(-?\d+(?:\.\d+)?)|(["'])((?:(?!\2)[^\\]|\\.)*?)\2)\]|(?=(?:\.|\[\])(?:\.|\[\]|$))/g;
/**
* Used to match `RegExp`
* [syntax characters](http://ecma-international.org/ecma-262/7.0/#sec-patterns).
*/
var reRegExpChar = /[\\^$.*+?()[\]{}|]/g,
reHasRegExpChar = RegExp(reRegExpChar.source);
/** Used to match leading and trailing whitespace. */
var reTrim = /^\s+|\s+$/g,
reTrimStart = /^\s+/,
reTrimEnd = /\s+$/;
/** Used to match wrap detail comments. */
var reWrapComment = /\{(?:\n\/\* \[wrapped with .+\] \*\/)?\n?/,
reWrapDetails = /\{\n\/\* \[wrapped with (.+)\] \*/,
reSplitDetails = /,? & /;
/** Used to match words composed of alphanumeric characters. */
var reAsciiWord = /[^\x00-\x2f\x3a-\x40\x5b-\x60\x7b-\x7f]+/g;
/** Used to match backslashes in property paths. */
var reEscapeChar = /\\(\\)?/g;
/**
* Used to match
* [ES template delimiters](http://ecma-international.org/ecma-262/7.0/#sec-template-literal-lexical-components).
*/
var reEsTemplate = /\$\{([^\\}]*(?:\\.[^\\}]*)*)\}/g;
/** Used to match `RegExp` flags from their coerced string values. */
var reFlags = /\w*$/;
/** Used to detect bad signed hexadecimal string values. */
var reIsBadHex = /^[-+]0x[0-9a-f]+$/i;
/** Used to detect binary string values. */
var reIsBinary = /^0b[01]+$/i;
/** Used to detect host constructors (Safari). */
var reIsHostCtor = /^\[object .+?Constructor\]$/;
/** Used to detect octal string values. */
var reIsOctal = /^0o[0-7]+$/i;
/** Used to detect unsigned integer values. */
var reIsUint = /^(?:0|[1-9]\d*)$/;
/** Used to match Latin Unicode letters (excluding mathematical operators). */
var reLatin = /[\xc0-\xd6\xd8-\xf6\xf8-\xff\u0100-\u017f]/g;
/** Used to ensure capturing order of template delimiters. */
var reNoMatch = /($^)/;
/** Used to match unescaped characters in compiled string literals. */
var reUnescapedString = /['\n\r\u2028\u2029\\]/g;
/** Used to compose unicode character classes. */
var rsAstralRange = '\\ud800-\\udfff',
rsComboMarksRange = '\\u0300-\\u036f',
reComboHalfMarksRange = '\\ufe20-\\ufe2f',
rsComboSymbolsRange = '\\u20d0-\\u20ff',
rsComboRange = rsComboMarksRange + reComboHalfMarksRange + rsComboSymbolsRange,
rsDingbatRange = '\\u2700-\\u27bf',
rsLowerRange = 'a-z\\xdf-\\xf6\\xf8-\\xff',
rsMathOpRange = '\\xac\\xb1\\xd7\\xf7',
rsNonCharRange = '\\x00-\\x2f\\x3a-\\x40\\x5b-\\x60\\x7b-\\xbf',
rsPunctuationRange = '\\u2000-\\u206f',
rsSpaceRange = ' \\t\\x0b\\f\\xa0\\ufeff\\n\\r\\u2028\\u2029\\u1680\\u180e\\u2000\\u2001\\u2002\\u2003\\u2004\\u2005\\u2006\\u2007\\u2008\\u2009\\u200a\\u202f\\u205f\\u3000',
rsUpperRange = 'A-Z\\xc0-\\xd6\\xd8-\\xde',
rsVarRange = '\\ufe0e\\ufe0f',
rsBreakRange = rsMathOpRange + rsNonCharRange + rsPunctuationRange + rsSpaceRange;
/** Used to compose unicode capture groups. */
var rsApos = "['\u2019]",
rsAstral = '[' + rsAstralRange + ']',
rsBreak = '[' + rsBreakRange + ']',
rsCombo = '[' + rsComboRange + ']',
rsDigits = '\\d+',
rsDingbat = '[' + rsDingbatRange + ']',
rsLower = '[' + rsLowerRange + ']',
rsMisc = '[^' + rsAstralRange + rsBreakRange + rsDigits + rsDingbatRange + rsLowerRange + rsUpperRange + ']',
rsFitz = '\\ud83c[\\udffb-\\udfff]',
rsModifier = '(?:' + rsCombo + '|' + rsFitz + ')',
rsNonAstral = '[^' + rsAstralRange + ']',
rsRegional = '(?:\\ud83c[\\udde6-\\uddff]){2}',
rsSurrPair = '[\\ud800-\\udbff][\\udc00-\\udfff]',
rsUpper = '[' + rsUpperRange + ']',
rsZWJ = '\\u200d';
/** Used to compose unicode regexes. */
var rsMiscLower = '(?:' + rsLower + '|' + rsMisc + ')',
rsMiscUpper = '(?:' + rsUpper + '|' + rsMisc + ')',
rsOptContrLower = '(?:' + rsApos + '(?:d|ll|m|re|s|t|ve))?',
rsOptContrUpper = '(?:' + rsApos + '(?:D|LL|M|RE|S|T|VE))?',
reOptMod = rsModifier + '?',
rsOptVar = '[' + rsVarRange + ']?',
rsOptJoin = '(?:' + rsZWJ + '(?:' + [rsNonAstral, rsRegional, rsSurrPair].join('|') + ')' + rsOptVar + reOptMod + ')*',
rsOrdLower = '\\d*(?:1st|2nd|3rd|(?![123])\\dth)(?=\\b|[A-Z_])',
rsOrdUpper = '\\d*(?:1ST|2ND|3RD|(?![123])\\dTH)(?=\\b|[a-z_])',
rsSeq = rsOptVar + reOptMod + rsOptJoin,
rsEmoji = '(?:' + [rsDingbat, rsRegional, rsSurrPair].join('|') + ')' + rsSeq,
rsSymbol = '(?:' + [rsNonAstral + rsCombo + '?', rsCombo, rsRegional, rsSurrPair, rsAstral].join('|') + ')';
/** Used to match apostrophes. */
var reApos = RegExp(rsApos, 'g');
/**
* Used to match [combining diacritical marks](https://en.wikipedia.org/wiki/Combining_Diacritical_Marks) and
* [combining diacritical marks for symbols](https://en.wikipedia.org/wiki/Combining_Diacritical_Marks_for_Symbols).
*/
var reComboMark = RegExp(rsCombo, 'g');
/** Used to match [string symbols](https://mathiasbynens.be/notes/javascript-unicode). */
var reUnicode = RegExp(rsFitz + '(?=' + rsFitz + ')|' + rsSymbol + rsSeq, 'g');
/** Used to match complex or compound words. */
var reUnicodeWord = RegExp([
rsUpper + '?' + rsLower + '+' + rsOptContrLower + '(?=' + [rsBreak, rsUpper, '$'].join('|') + ')',
rsMiscUpper + '+' + rsOptContrUpper + '(?=' + [rsBreak, rsUpper + rsMiscLower, '$'].join('|') + ')',
rsUpper + '?' + rsMiscLower + '+' + rsOptContrLower,
rsUpper + '+' + rsOptContrUpper,
rsOrdUpper,
rsOrdLower,
rsDigits,
rsEmoji
].join('|'), 'g');
/** Used to detect strings with [zero-width joiners or code points from the astral planes](http://eev.ee/blog/2015/09/12/dark-corners-of-unicode/). */
var reHasUnicode = RegExp('[' + rsZWJ + rsAstralRange + rsComboRange + rsVarRange + ']');
/** Used to detect strings that need a more robust regexp to match words. */
var reHasUnicodeWord = /[a-z][A-Z]|[A-Z]{2}[a-z]|[0-9][a-zA-Z]|[a-zA-Z][0-9]|[^a-zA-Z0-9 ]/;
/** Used to assign default `context` object properties. */
var contextProps = [
'Array', 'Buffer', 'DataView', 'Date', 'Error', 'Float32Array', 'Float64Array',
'Function', 'Int8Array', 'Int16Array', 'Int32Array', 'Map', 'Math', 'Object',
'Promise', 'RegExp', 'Set', 'String', 'Symbol', 'TypeError', 'Uint8Array',
'Uint8ClampedArray', 'Uint16Array', 'Uint32Array', 'WeakMap',
'_', 'clearTimeout', 'isFinite', 'parseInt', 'setTimeout'
];
/** Used to make template sourceURLs easier to identify. */
var templateCounter = -1;
/** Used to identify `toStringTag` values of typed arrays. */
var typedArrayTags = {};
typedArrayTags[float32Tag] = typedArrayTags[float64Tag] =
typedArrayTags[int8Tag] = typedArrayTags[int16Tag] =
typedArrayTags[int32Tag] = typedArrayTags[uint8Tag] =
typedArrayTags[uint8ClampedTag] = typedArrayTags[uint16Tag] =
typedArrayTags[uint32Tag] = true;
typedArrayTags[argsTag] = typedArrayTags[arrayTag] =
typedArrayTags[arrayBufferTag] = typedArrayTags[boolTag] =
typedArrayTags[dataViewTag] = typedArrayTags[dateTag] =
typedArrayTags[errorTag] = typedArrayTags[funcTag] =
typedArrayTags[mapTag] = typedArrayTags[numberTag] =
typedArrayTags[objectTag] = typedArrayTags[regexpTag] =
typedArrayTags[setTag] = typedArrayTags[stringTag] =
typedArrayTags[weakMapTag] = false;
/** Used to identify `toStringTag` values supported by `_.clone`. */
var cloneableTags = {};
cloneableTags[argsTag] = cloneableTags[arrayTag] =
cloneableTags[arrayBufferTag] = cloneableTags[dataViewTag] =
cloneableTags[boolTag] = cloneableTags[dateTag] =
cloneableTags[float32Tag] = cloneableTags[float64Tag] =
cloneableTags[int8Tag] = cloneableTags[int16Tag] =
cloneableTags[int32Tag] = cloneableTags[mapTag] =
cloneableTags[numberTag] = cloneableTags[objectTag] =
cloneableTags[regexpTag] = cloneableTags[setTag] =
cloneableTags[stringTag] = cloneableTags[symbolTag] =
cloneableTags[uint8Tag] = cloneableTags[uint8ClampedTag] =
cloneableTags[uint16Tag] = cloneableTags[uint32Tag] = true;
cloneableTags[errorTag] = cloneableTags[funcTag] =
cloneableTags[weakMapTag] = false;
/** Used to map Latin Unicode letters to basic Latin letters. */
var deburredLetters = {
// Latin-1 Supplement block.
'\xc0': 'A', '\xc1': 'A', '\xc2': 'A', '\xc3': 'A', '\xc4': 'A', '\xc5': 'A',
'\xe0': 'a', '\xe1': 'a', '\xe2': 'a', '\xe3': 'a', '\xe4': 'a', '\xe5': 'a',
'\xc7': 'C', '\xe7': 'c',
'\xd0': 'D', '\xf0': 'd',
'\xc8': 'E', '\xc9': 'E', '\xca': 'E', '\xcb': 'E',
'\xe8': 'e', '\xe9': 'e', '\xea': 'e', '\xeb': 'e',
'\xcc': 'I', '\xcd': 'I', '\xce': 'I', '\xcf': 'I',
'\xec': 'i', '\xed': 'i', '\xee': 'i', '\xef': 'i',
'\xd1': 'N', '\xf1': 'n',
'\xd2': 'O', '\xd3': 'O', '\xd4': 'O', '\xd5': 'O', '\xd6': 'O', '\xd8': 'O',
'\xf2': 'o', '\xf3': 'o', '\xf4': 'o', '\xf5': 'o', '\xf6': 'o', '\xf8': 'o',
'\xd9': 'U', '\xda': 'U', '\xdb': 'U', '\xdc': 'U',
'\xf9': 'u', '\xfa': 'u', '\xfb': 'u', '\xfc': 'u',
'\xdd': 'Y', '\xfd': 'y', '\xff': 'y',
'\xc6': 'Ae', '\xe6': 'ae',
'\xde': 'Th', '\xfe': 'th',
'\xdf': 'ss',
// Latin Extended-A block.
'\u0100': 'A', '\u0102': 'A', '\u0104': 'A',
'\u0101': 'a', '\u0103': 'a', '\u0105': 'a',
'\u0106': 'C', '\u0108': 'C', '\u010a': 'C', '\u010c': 'C',
'\u0107': 'c', '\u0109': 'c', '\u010b': 'c', '\u010d': 'c',
'\u010e': 'D', '\u0110': 'D', '\u010f': 'd', '\u0111': 'd',
'\u0112': 'E', '\u0114': 'E', '\u0116': 'E', '\u0118': 'E', '\u011a': 'E',
'\u0113': 'e', '\u0115': 'e', '\u0117': 'e', '\u0119': 'e', '\u011b': 'e',
'\u011c': 'G', '\u011e': 'G', '\u0120': 'G', '\u0122': 'G',
'\u011d': 'g', '\u011f': 'g', '\u0121': 'g', '\u0123': 'g',
'\u0124': 'H', '\u0126': 'H', '\u0125': 'h', '\u0127': 'h',
'\u0128': 'I', '\u012a': 'I', '\u012c': 'I', '\u012e': 'I', '\u0130': 'I',
'\u0129': 'i', '\u012b': 'i', '\u012d': 'i', '\u012f': 'i', '\u0131': 'i',
'\u0134': 'J', '\u0135': 'j',
'\u0136': 'K', '\u0137': 'k', '\u0138': 'k',
'\u0139': 'L', '\u013b': 'L', '\u013d': 'L', '\u013f': 'L', '\u0141': 'L',
'\u013a': 'l', '\u013c': 'l', '\u013e': 'l', '\u0140': 'l', '\u0142': 'l',
'\u0143': 'N', '\u0145': 'N', '\u0147': 'N', '\u014a': 'N',
'\u0144': 'n', '\u0146': 'n', '\u0148': 'n', '\u014b': 'n',
'\u014c': 'O', '\u014e': 'O', '\u0150': 'O',
'\u014d': 'o', '\u014f': 'o', '\u0151': 'o',
'\u0154': 'R', '\u0156': 'R', '\u0158': 'R',
'\u0155': 'r', '\u0157': 'r', '\u0159': 'r',
'\u015a': 'S', '\u015c': 'S', '\u015e': 'S', '\u0160': 'S',
'\u015b': 's', '\u015d': 's', '\u015f': 's', '\u0161': 's',
'\u0162': 'T', '\u0164': 'T', '\u0166': 'T',
'\u0163': 't', '\u0165': 't', '\u0167': 't',
'\u0168': 'U', '\u016a': 'U', '\u016c': 'U', '\u016e': 'U', '\u0170': 'U', '\u0172': 'U',
'\u0169': 'u', '\u016b': 'u', '\u016d': 'u', '\u016f': 'u', '\u0171': 'u', '\u0173': 'u',
'\u0174': 'W', '\u0175': 'w',
'\u0176': 'Y', '\u0177': 'y', '\u0178': 'Y',
'\u0179': 'Z', '\u017b': 'Z', '\u017d': 'Z',
'\u017a': 'z', '\u017c': 'z', '\u017e': 'z',
'\u0132': 'IJ', '\u0133': 'ij',
'\u0152': 'Oe', '\u0153': 'oe',
'\u0149': "'n", '\u017f': 's'
};
/** Used to map characters to HTML entities. */
var htmlEscapes = {
'&': '&',
'<': '<',
'>': '>',
'"': '"',
"'": '''
};
/** Used to map HTML entities to characters. */
var htmlUnescapes = {
'&': '&',
'<': '<',
'>': '>',
'"': '"',
''': "'"
};
/** Used to escape characters for inclusion in compiled string literals. */
var stringEscapes = {
'\\': '\\',
"'": "'",
'\n': 'n',
'\r': 'r',
'\u2028': 'u2028',
'\u2029': 'u2029'
};
/** Built-in method references without a dependency on `root`. */
var freeParseFloat = parseFloat,
freeParseInt = parseInt;
/** Detect free variable `global` from Node.js. */
var freeGlobal = typeof global == 'object' && global && global.Object === Object && global;
/** Detect free variable `self`. */
var freeSelf = typeof self == 'object' && self && self.Object === Object && self;
/** Used as a reference to the global object. */
var root = freeGlobal || freeSelf || Function('return this')();
/** Detect free variable `exports`. */
var freeExports = true && exports && !exports.nodeType && exports;
/** Detect free variable `module`. */
var freeModule = freeExports && typeof module == 'object' && module && !module.nodeType && module;
/** Detect the popular CommonJS extension `module.exports`. */
var moduleExports = freeModule && freeModule.exports === freeExports;
/** Detect free variable `process` from Node.js. */
var freeProcess = moduleExports && freeGlobal.process;
/** Used to access faster Node.js helpers. */
var nodeUtil = (function() {
try {
// Use `util.types` for Node.js 10+.
var types = freeModule && freeModule.require && freeModule.require('util').types;
if (types) {
return types;
}
// Legacy `process.binding('util')` for Node.js < 10.
return freeProcess && freeProcess.binding && freeProcess.binding('util');
} catch (e) {}
}());
/* Node.js helper references. */
var nodeIsArrayBuffer = nodeUtil && nodeUtil.isArrayBuffer,
nodeIsDate = nodeUtil && nodeUtil.isDate,
nodeIsMap = nodeUtil && nodeUtil.isMap,
nodeIsRegExp = nodeUtil && nodeUtil.isRegExp,
nodeIsSet = nodeUtil && nodeUtil.isSet,
nodeIsTypedArray = nodeUtil && nodeUtil.isTypedArray;
/*--------------------------------------------------------------------------*/
/**
* A faster alternative to `Function#apply`, this function invokes `func`
* with the `this` binding of `thisArg` and the arguments of `args`.
*
* @private
* @param {Function} func The function to invoke.
* @param {*} thisArg The `this` binding of `func`.
* @param {Array} args The arguments to invoke `func` with.
* @returns {*} Returns the result of `func`.
*/
function apply(func, thisArg, args) {
switch (args.length) {
case 0: return func.call(thisArg);
case 1: return func.call(thisArg, args[0]);
case 2: return func.call(thisArg, args[0], args[1]);
case 3: return func.call(thisArg, args[0], args[1], args[2]);
}
return func.apply(thisArg, args);
}
/**
* A specialized version of `baseAggregator` for arrays.
*
* @private
* @param {Array} [array] The array to iterate over.
* @param {Function} setter The function to set `accumulator` values.
* @param {Function} iteratee The iteratee to transform keys.
* @param {Object} accumulator The initial aggregated object.
* @returns {Function} Returns `accumulator`.
*/
function arrayAggregator(array, setter, iteratee, accumulator) {
var index = -1,
length = array == null ? 0 : array.length;
while (++index < length) {
var value = array[index];
setter(accumulator, value, iteratee(value), array);
}
return accumulator;
}
/**
* A specialized version of `_.forEach` for arrays without support for
* iteratee shorthands.
*
* @private
* @param {Array} [array] The array to iterate over.
* @param {Function} iteratee The function invoked per iteration.
* @returns {Array} Returns `array`.
*/
function arrayEach(array, iteratee) {
var index = -1,
length = array == null ? 0 : array.length;
while (++index < length) {
if (iteratee(array[index], index, array) === false) {
break;
}
}
return array;
}
/**
* A specialized version of `_.forEachRight` for arrays without support for
* iteratee shorthands.
*
* @private
* @param {Array} [array] The array to iterate over.
* @param {Function} iteratee The function invoked per iteration.
* @returns {Array} Returns `array`.
*/
function arrayEachRight(array, iteratee) {
var length = array == null ? 0 : array.length;
while (length--) {
if (iteratee(array[length], length, array) === false) {
break;
}
}
return array;
}
/**
* A specialized version of `_.every` for arrays without support for
* iteratee shorthands.
*
* @private
* @param {Array} [array] The array to iterate over.
* @param {Function} predicate The function invoked per iteration.
* @returns {boolean} Returns `true` if all elements pass the predicate check,
* else `false`.
*/
function arrayEvery(array, predicate) {
var index = -1,
length = array == null ? 0 : array.length;
while (++index < length) {
if (!predicate(array[index], index, array)) {
return false;
}
}
return true;
}
/**
* A specialized version of `_.filter` for arrays without support for
* iteratee shorthands.
*
* @private
* @param {Array} [array] The array to iterate over.
* @param {Function} predicate The function invoked per iteration.
* @returns {Array} Returns the new filtered array.
*/
function arrayFilter(array, predicate) {
var index = -1,
length = array == null ? 0 : array.length,
resIndex = 0,
result = [];
while (++index < length) {
var value = array[index];
if (predicate(value, index, array)) {
result[resIndex++] = value;
}
}
return result;
}
/**
* A specialized version of `_.includes` for arrays without support for
* specifying an index to search from.
*
* @private
* @param {Array} [array] The array to inspect.
* @param {*} target The value to search for.
* @returns {boolean} Returns `true` if `target` is found, else `false`.
*/
function arrayIncludes(array, value) {
var length = array == null ? 0 : array.length;
return !!length && baseIndexOf(array, value, 0) > -1;
}
/**
* This function is like `arrayIncludes` except that it accepts a comparator.
*
* @private
* @param {Array} [array] The array to inspect.
* @param {*} target The value to search for.
* @param {Function} comparator The comparator invoked per element.
* @returns {boolean} Returns `true` if `target` is found, else `false`.
*/
function arrayIncludesWith(array, value, comparator) {
var index = -1,
length = array == null ? 0 : array.length;
while (++index < length) {
if (comparator(value, array[index])) {
return true;
}
}
return false;
}
/**
* A specialized version of `_.map` for arrays without support for iteratee
* shorthands.
*
* @private
* @param {Array} [array] The array to iterate over.
* @param {Function} iteratee The function invoked per iteration.
* @returns {Array} Returns the new mapped array.
*/
function arrayMap(array, iteratee) {
var index = -1,
length = array == null ? 0 : array.length,
result = Array(length);
while (++index < length) {
result[index] = iteratee(array[index], index, array);
}
return result;
}
/**
* Appends the elements of `values` to `array`.
*
* @private
* @param {Array} array The array to modify.
* @param {Array} values The values to append.
* @returns {Array} Returns `array`.
*/
function arrayPush(array, values) {
var index = -1,
length = values.length,
offset = array.length;
while (++index < length) {
array[offset + index] = values[index];
}
return array;
}
/**
* A specialized version of `_.reduce` for arrays without support for
* iteratee shorthands.
*
* @private
* @param {Array} [array] The array to iterate over.
* @param {Function} iteratee The function invoked per iteration.
* @param {*} [accumulator] The initial value.
* @param {boolean} [initAccum] Specify using the first element of `array` as
* the initial value.
* @returns {*} Returns the accumulated value.
*/
function arrayReduce(array, iteratee, accumulator, initAccum) {
var index = -1,
length = array == null ? 0 : array.length;
if (initAccum && length) {
accumulator = array[++index];
}
while (++index < length) {
accumulator = iteratee(accumulator, array[index], index, array);
}
return accumulator;
}
/**
* A specialized version of `_.reduceRight` for arrays without support for
* iteratee shorthands.
*
* @private
* @param {Array} [array] The array to iterate over.
* @param {Function} iteratee The function invoked per iteration.
* @param {*} [accumulator] The initial value.
* @param {boolean} [initAccum] Specify using the last element of `array` as
* the initial value.
* @returns {*} Returns the accumulated value.
*/
function arrayReduceRight(array, iteratee, accumulator, initAccum) {
var length = array == null ? 0 : array.length;
if (initAccum && length) {
accumulator = array[--length];
}
while (length--) {
accumulator = iteratee(accumulator, array[length], length, array);
}
return accumulator;
}
/**
* A specialized version of `_.some` for arrays without support for iteratee
* shorthands.
*
* @private
* @param {Array} [array] The array to iterate over.
* @param {Function} predicate The function invoked per iteration.
* @returns {boolean} Returns `true` if any element passes the predicate check,
* else `false`.
*/
function arraySome(array, predicate) {
var index = -1,
length = array == null ? 0 : array.length;
while (++index < length) {
if (predicate(array[index], index, array)) {
return true;
}
}
return false;
}
/**
* Gets the size of an ASCII `string`.
*
* @private
* @param {string} string The string inspect.
* @returns {number} Returns the string size.
*/
var asciiSize = baseProperty('length');
/**
* Converts an ASCII `string` to an array.
*
* @private
* @param {string} string The string to convert.
* @returns {Array} Returns the converted array.
*/
function asciiToArray(string) {
return string.split('');
}
/**
* Splits an ASCII `string` into an array of its words.
*
* @private
* @param {string} The string to inspect.
* @returns {Array} Returns the words of `string`.
*/
function asciiWords(string) {
return string.match(reAsciiWord) || [];
}
/**
* The base implementation of methods like `_.findKey` and `_.findLastKey`,
* without support for iteratee shorthands, which iterates over `collection`
* using `eachFunc`.
*
* @private
* @param {Array|Object} collection The collection to inspect.
* @param {Function} predicate The function invoked per iteration.
* @param {Function} eachFunc The function to iterate over `collection`.
* @returns {*} Returns the found element or its key, else `undefined`.
*/
function baseFindKey(collection, predicate, eachFunc) {
var result;
eachFunc(collection, function(value, key, collection) {
if (predicate(value, key, collection)) {
result = key;
return false;
}
});
return result;
}
/**
* The base implementation of `_.findIndex` and `_.findLastIndex` without
* support for iteratee shorthands.
*
* @private
* @param {Array} array The array to inspect.
* @param {Function} predicate The function invoked per iteration.
* @param {number} fromIndex The index to search from.
* @param {boolean} [fromRight] Specify iterating from right to left.
* @returns {number} Returns the index of the matched value, else `-1`.
*/
function baseFindIndex(array, predicate, fromIndex, fromRight) {
var length = array.length,
index = fromIndex + (fromRight ? 1 : -1);
while ((fromRight ? index-- : ++index < length)) {
if (predicate(array[index], index, array)) {
return index;
}
}
return -1;
}
/**
* The base implementation of `_.indexOf` without `fromIndex` bounds checks.
*
* @private
* @param {Array} array The array to inspect.
* @param {*} value The value to search for.
* @param {number} fromIndex The index to search from.
* @returns {number} Returns the index of the matched value, else `-1`.
*/
function baseIndexOf(array, value, fromIndex) {
return value === value
? strictIndexOf(array, value, fromIndex)
: baseFindIndex(array, baseIsNaN, fromIndex);
}
/**
* This function is like `baseIndexOf` except that it accepts a comparator.
*
* @private
* @param {Array} array The array to inspect.
* @param {*} value The value to search for.
* @param {number} fromIndex The index to search from.
* @param {Function} comparator The comparator invoked per element.
* @returns {number} Returns the index of the matched value, else `-1`.
*/
function baseIndexOfWith(array, value, fromIndex, comparator) {
var index = fromIndex - 1,
length = array.length;
while (++index < length) {
if (comparator(array[index], value)) {
return index;
}
}
return -1;
}
/**
* The base implementation of `_.isNaN` without support for number objects.
*
* @private
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is `NaN`, else `false`.
*/
function baseIsNaN(value) {
return value !== value;
}
/**
* The base implementation of `_.mean` and `_.meanBy` without support for
* iteratee shorthands.
*
* @private
* @param {Array} array The array to iterate over.
* @param {Function} iteratee The function invoked per iteration.
* @returns {number} Returns the mean.
*/
function baseMean(array, iteratee) {
var length = array == null ? 0 : array.length;
return length ? (baseSum(array, iteratee) / length) : NAN;
}
/**
* The base implementation of `_.property` without support for deep paths.
*
* @private
* @param {string} key The key of the property to get.
* @returns {Function} Returns the new accessor function.
*/
function baseProperty(key) {
return function(object) {
return object == null ? undefined : object[key];
};
}
/**
* The base implementation of `_.propertyOf` without support for deep paths.
*
* @private
* @param {Object} object The object to query.
* @returns {Function} Returns the new accessor function.
*/
function basePropertyOf(object) {
return function(key) {
return object == null ? undefined : object[key];
};
}
/**
* The base implementation of `_.reduce` and `_.reduceRight`, without support
* for iteratee shorthands, which iterates over `collection` using `eachFunc`.
*
* @private
* @param {Array|Object} collection The collection to iterate over.
* @param {Function} iteratee The function invoked per iteration.
* @param {*} accumulator The initial value.
* @param {boolean} initAccum Specify using the first or last element of
* `collection` as the initial value.
* @param {Function} eachFunc The function to iterate over `collection`.
* @returns {*} Returns the accumulated value.
*/
function baseReduce(collection, iteratee, accumulator, initAccum, eachFunc) {
eachFunc(collection, function(value, index, collection) {
accumulator = initAccum
? (initAccum = false, value)
: iteratee(accumulator, value, index, collection);
});
return accumulator;
}
/**
* The base implementation of `_.sortBy` which uses `comparer` to define the
* sort order of `array` and replaces criteria objects with their corresponding
* values.
*
* @private
* @param {Array} array The array to sort.
* @param {Function} comparer The function to define sort order.
* @returns {Array} Returns `array`.
*/
function baseSortBy(array, comparer) {
var length = array.length;
array.sort(comparer);
while (length--) {
array[length] = array[length].value;
}
return array;
}
/**
* The base implementation of `_.sum` and `_.sumBy` without support for
* iteratee shorthands.
*
* @private
* @param {Array} array The array to iterate over.
* @param {Function} iteratee The function invoked per iteration.
* @returns {number} Returns the sum.
*/
function baseSum(array, iteratee) {
var result,
index = -1,
length = array.length;
while (++index < length) {
var current = iteratee(array[index]);
if (current !== undefined) {
result = result === undefined ? current : (result + current);
}
}
return result;
}
/**
* The base implementation of `_.times` without support for iteratee shorthands
* or max array length checks.
*
* @private
* @param {number} n The number of times to invoke `iteratee`.
* @param {Function} iteratee The function invoked per iteration.
* @returns {Array} Returns the array of results.
*/
function baseTimes(n, iteratee) {
var index = -1,
result = Array(n);
while (++index < n) {
result[index] = iteratee(index);
}
return result;
}
/**
* The base implementation of `_.toPairs` and `_.toPairsIn` which creates an array
* of key-value pairs for `object` corresponding to the property names of `props`.
*
* @private
* @param {Object} object The object to query.
* @param {Array} props The property names to get values for.
* @returns {Object} Returns the key-value pairs.
*/
function baseToPairs(object, props) {
return arrayMap(props, function(key) {
return [key, object[key]];
});
}
/**
* The base implementation of `_.unary` without support for storing metadata.
*
* @private
* @param {Function} func The function to cap arguments for.
* @returns {Function} Returns the new capped function.
*/
function baseUnary(func) {
return function(value) {
return func(value);
};
}
/**
* The base implementation of `_.values` and `_.valuesIn` which creates an
* array of `object` property values corresponding to the property names
* of `props`.
*
* @private
* @param {Object} object The object to query.
* @param {Array} props The property names to get values for.
* @returns {Object} Returns the array of property values.
*/
function baseValues(object, props) {
return arrayMap(props, function(key) {
return object[key];
});
}
/**
* Checks if a `cache` value for `key` exists.
*
* @private
* @param {Object} cache The cache to query.
* @param {string} key The key of the entry to check.
* @returns {boolean} Returns `true` if an entry for `key` exists, else `false`.
*/
function cacheHas(cache, key) {
return cache.has(key);
}
/**
* Used by `_.trim` and `_.trimStart` to get the index of the first string symbol
* that is not found in the character symbols.
*
* @private
* @param {Array} strSymbols The string symbols to inspect.
* @param {Array} chrSymbols The character symbols to find.
* @returns {number} Returns the index of the first unmatched string symbol.
*/
function charsStartIndex(strSymbols, chrSymbols) {
var index = -1,
length = strSymbols.length;
while (++index < length && baseIndexOf(chrSymbols, strSymbols[index], 0) > -1) {}
return index;
}
/**
* Used by `_.trim` and `_.trimEnd` to get the index of the last string symbol
* that is not found in the character symbols.
*
* @private
* @param {Array} strSymbols The string symbols to inspect.
* @param {Array} chrSymbols The character symbols to find.
* @returns {number} Returns the index of the last unmatched string symbol.
*/
function charsEndIndex(strSymbols, chrSymbols) {
var index = strSymbols.length;
while (index-- && baseIndexOf(chrSymbols, strSymbols[index], 0) > -1) {}
return index;
}
/**
* Gets the number of `placeholder` occurrences in `array`.
*
* @private
* @param {Array} array The array to inspect.
* @param {*} placeholder The placeholder to search for.
* @returns {number} Returns the placeholder count.
*/
function countHolders(array, placeholder) {
var length = array.length,
result = 0;
while (length--) {
if (array[length] === placeholder) {
++result;
}
}
return result;
}
/**
* Used by `_.deburr` to convert Latin-1 Supplement and Latin Extended-A
* letters to basic Latin letters.
*
* @private
* @param {string} letter The matched letter to deburr.
* @returns {string} Returns the deburred letter.
*/
var deburrLetter = basePropertyOf(deburredLetters);
/**
* Used by `_.escape` to convert characters to HTML entities.
*
* @private
* @param {string} chr The matched character to escape.
* @returns {string} Returns the escaped character.
*/
var escapeHtmlChar = basePropertyOf(htmlEscapes);
/**
* Used by `_.template` to escape characters for inclusion in compiled string literals.
*
* @private
* @param {string} chr The matched character to escape.
* @returns {string} Returns the escaped character.
*/
function escapeStringChar(chr) {
return '\\' + stringEscapes[chr];
}
/**
* Gets the value at `key` of `object`.
*
* @private
* @param {Object} [object] The object to query.
* @param {string} key The key of the property to get.
* @returns {*} Returns the property value.
*/
function getValue(object, key) {
return object == null ? undefined : object[key];
}
/**
* Checks if `string` contains Unicode symbols.
*
* @private
* @param {string} string The string to inspect.
* @returns {boolean} Returns `true` if a symbol is found, else `false`.
*/
function hasUnicode(string) {
return reHasUnicode.test(string);
}
/**
* Checks if `string` contains a word composed of Unicode symbols.
*
* @private
* @param {string} string The string to inspect.
* @returns {boolean} Returns `true` if a word is found, else `false`.
*/
function hasUnicodeWord(string) {
return reHasUnicodeWord.test(string);
}
/**
* Converts `iterator` to an array.
*
* @private
* @param {Object} iterator The iterator to convert.
* @returns {Array} Returns the converted array.
*/
function iteratorToArray(iterator) {
var data,
result = [];
while (!(data = iterator.next()).done) {
result.push(data.value);
}
return result;
}
/**
* Converts `map` to its key-value pairs.
*
* @private
* @param {Object} map The map to convert.
* @returns {Array} Returns the key-value pairs.
*/
function mapToArray(map) {
var index = -1,
result = Array(map.size);
map.forEach(function(value, key) {
result[++index] = [key, value];
});
return result;
}
/**
* Creates a unary function that invokes `func` with its argument transformed.
*
* @private
* @param {Function} func The function to wrap.
* @param {Function} transform The argument transform.
* @returns {Function} Returns the new function.
*/
function overArg(func, transform) {
return function(arg) {
return func(transform(arg));
};
}
/**
* Replaces all `placeholder` elements in `array` with an internal placeholder
* and returns an array of their indexes.
*
* @private
* @param {Array} array The array to modify.
* @param {*} placeholder The placeholder to replace.
* @returns {Array} Returns the new array of placeholder indexes.
*/
function replaceHolders(array, placeholder) {
var index = -1,
length = array.length,
resIndex = 0,
result = [];
while (++index < length) {
var value = array[index];
if (value === placeholder || value === PLACEHOLDER) {
array[index] = PLACEHOLDER;
result[resIndex++] = index;
}
}
return result;
}
/**
* Converts `set` to an array of its values.
*
* @private
* @param {Object} set The set to convert.
* @returns {Array} Returns the values.
*/
function setToArray(set) {
var index = -1,
result = Array(set.size);
set.forEach(function(value) {
result[++index] = value;
});
return result;
}
/**
* Converts `set` to its value-value pairs.
*
* @private
* @param {Object} set The set to convert.
* @returns {Array} Returns the value-value pairs.
*/
function setToPairs(set) {
var index = -1,
result = Array(set.size);
set.forEach(function(value) {
result[++index] = [value, value];
});
return result;
}
/**
* A specialized version of `_.indexOf` which performs strict equality
* comparisons of values, i.e. `===`.
*
* @private
* @param {Array} array The array to inspect.
* @param {*} value The value to search for.
* @param {number} fromIndex The index to search from.
* @returns {number} Returns the index of the matched value, else `-1`.
*/
function strictIndexOf(array, value, fromIndex) {
var index = fromIndex - 1,
length = array.length;
while (++index < length) {
if (array[index] === value) {
return index;
}
}
return -1;
}
/**
* A specialized version of `_.lastIndexOf` which performs strict equality
* comparisons of values, i.e. `===`.
*
* @private
* @param {Array} array The array to inspect.
* @param {*} value The value to search for.
* @param {number} fromIndex The index to search from.
* @returns {number} Returns the index of the matched value, else `-1`.
*/
function strictLastIndexOf(array, value, fromIndex) {
var index = fromIndex + 1;
while (index--) {
if (array[index] === value) {
return index;
}
}
return index;
}
/**
* Gets the number of symbols in `string`.
*
* @private
* @param {string} string The string to inspect.
* @returns {number} Returns the string size.
*/
function stringSize(string) {
return hasUnicode(string)
? unicodeSize(string)
: asciiSize(string);
}
/**
* Converts `string` to an array.
*
* @private
* @param {string} string The string to convert.
* @returns {Array} Returns the converted array.
*/
function stringToArray(string) {
return hasUnicode(string)
? unicodeToArray(string)
: asciiToArray(string);
}
/**
* Used by `_.unescape` to convert HTML entities to characters.
*
* @private
* @param {string} chr The matched character to unescape.
* @returns {string} Returns the unescaped character.
*/
var unescapeHtmlChar = basePropertyOf(htmlUnescapes);
/**
* Gets the size of a Unicode `string`.
*
* @private
* @param {string} string The string inspect.
* @returns {number} Returns the string size.
*/
function unicodeSize(string) {
var result = reUnicode.lastIndex = 0;
while (reUnicode.test(string)) {
++result;
}
return result;
}
/**
* Converts a Unicode `string` to an array.
*
* @private
* @param {string} string The string to convert.
* @returns {Array} Returns the converted array.
*/
function unicodeToArray(string) {
return string.match(reUnicode) || [];
}
/**
* Splits a Unicode `string` into an array of its words.
*
* @private
* @param {string} The string to inspect.
* @returns {Array} Returns the words of `string`.
*/
function unicodeWords(string) {
return string.match(reUnicodeWord) || [];
}
/*--------------------------------------------------------------------------*/
/**
* Create a new pristine `lodash` function using the `context` object.
*
* @static
* @memberOf _
* @since 1.1.0
* @category Util
* @param {Object} [context=root] The context object.
* @returns {Function} Returns a new `lodash` function.
* @example
*
* _.mixin({ 'foo': _.constant('foo') });
*
* var lodash = _.runInContext();
* lodash.mixin({ 'bar': lodash.constant('bar') });
*
* _.isFunction(_.foo);
* // => true
* _.isFunction(_.bar);
* // => false
*
* lodash.isFunction(lodash.foo);
* // => false
* lodash.isFunction(lodash.bar);
* // => true
*
* // Create a suped-up `defer` in Node.js.
* var defer = _.runInContext({ 'setTimeout': setImmediate }).defer;
*/
var runInContext = (function runInContext(context) {
context = context == null ? root : _.defaults(root.Object(), context, _.pick(root, contextProps));
/** Built-in constructor references. */
var Array = context.Array,
Date = context.Date,
Error = context.Error,
Function = context.Function,
Math = context.Math,
Object = context.Object,
RegExp = context.RegExp,
String = context.String,
TypeError = context.TypeError;
/** Used for built-in method references. */
var arrayProto = Array.prototype,
funcProto = Function.prototype,
objectProto = Object.prototype;
/** Used to detect overreaching core-js shims. */
var coreJsData = context['__core-js_shared__'];
/** Used to resolve the decompiled source of functions. */
var funcToString = funcProto.toString;
/** Used to check objects for own properties. */
var hasOwnProperty = objectProto.hasOwnProperty;
/** Used to generate unique IDs. */
var idCounter = 0;
/** Used to detect methods masquerading as native. */
var maskSrcKey = (function() {
var uid = /[^.]+$/.exec(coreJsData && coreJsData.keys && coreJsData.keys.IE_PROTO || '');
return uid ? ('Symbol(src)_1.' + uid) : '';
}());
/**
* Used to resolve the
* [`toStringTag`](http://ecma-international.org/ecma-262/7.0/#sec-object.prototype.tostring)
* of values.
*/
var nativeObjectToString = objectProto.toString;
/** Used to infer the `Object` constructor. */
var objectCtorString = funcToString.call(Object);
/** Used to restore the original `_` reference in `_.noConflict`. */
var oldDash = root._;
/** Used to detect if a method is native. */
var reIsNative = RegExp('^' +
funcToString.call(hasOwnProperty).replace(reRegExpChar, '\\$&')
.replace(/hasOwnProperty|(function).*?(?=\\\()| for .+?(?=\\\])/g, '$1.*?') + '$'
);
/** Built-in value references. */
var Buffer = moduleExports ? context.Buffer : undefined,
Symbol = context.Symbol,
Uint8Array = context.Uint8Array,
allocUnsafe = Buffer ? Buffer.allocUnsafe : undefined,
getPrototype = overArg(Object.getPrototypeOf, Object),
objectCreate = Object.create,
propertyIsEnumerable = objectProto.propertyIsEnumerable,
splice = arrayProto.splice,
spreadableSymbol = Symbol ? Symbol.isConcatSpreadable : undefined,
symIterator = Symbol ? Symbol.iterator : undefined,
symToStringTag = Symbol ? Symbol.toStringTag : undefined;
var defineProperty = (function() {
try {
var func = getNative(Object, 'defineProperty');
func({}, '', {});
return func;
} catch (e) {}
}());
/** Mocked built-ins. */
var ctxClearTimeout = context.clearTimeout !== root.clearTimeout && context.clearTimeout,
ctxNow = Date && Date.now !== root.Date.now && Date.now,
ctxSetTimeout = context.setTimeout !== root.setTimeout && context.setTimeout;
/* Built-in method references for those with the same name as other `lodash` methods. */
var nativeCeil = Math.ceil,
nativeFloor = Math.floor,
nativeGetSymbols = Object.getOwnPropertySymbols,
nativeIsBuffer = Buffer ? Buffer.isBuffer : undefined,
nativeIsFinite = context.isFinite,
nativeJoin = arrayProto.join,
nativeKeys = overArg(Object.keys, Object),
nativeMax = Math.max,
nativeMin = Math.min,
nativeNow = Date.now,
nativeParseInt = context.parseInt,
nativeRandom = Math.random,
nativeReverse = arrayProto.reverse;
/* Built-in method references that are verified to be native. */
var DataView = getNative(context, 'DataView'),
Map = getNative(context, 'Map'),
Promise = getNative(context, 'Promise'),
Set = getNative(context, 'Set'),
WeakMap = getNative(context, 'WeakMap'),
nativeCreate = getNative(Object, 'create');
/** Used to store function metadata. */
var metaMap = WeakMap && new WeakMap;
/** Used to lookup unminified function names. */
var realNames = {};
/** Used to detect maps, sets, and weakmaps. */
var dataViewCtorString = toSource(DataView),
mapCtorString = toSource(Map),
promiseCtorString = toSource(Promise),
setCtorString = toSource(Set),
weakMapCtorString = toSource(WeakMap);
/** Used to convert symbols to primitives and strings. */
var symbolProto = Symbol ? Symbol.prototype : undefined,
symbolValueOf = symbolProto ? symbolProto.valueOf : undefined,
symbolToString = symbolProto ? symbolProto.toString : undefined;
/*------------------------------------------------------------------------*/
/**
* Creates a `lodash` object which wraps `value` to enable implicit method
* chain sequences. Methods that operate on and return arrays, collections,
* and functions can be chained together. Methods that retrieve a single value
* or may return a primitive value will automatically end the chain sequence
* and return the unwrapped value. Otherwise, the value must be unwrapped
* with `_#value`.
*
* Explicit chain sequences, which must be unwrapped with `_#value`, may be
* enabled using `_.chain`.
*
* The execution of chained methods is lazy, that is, it's deferred until
* `_#value` is implicitly or explicitly called.
*
* Lazy evaluation allows several methods to support shortcut fusion.
* Shortcut fusion is an optimization to merge iteratee calls; this avoids
* the creation of intermediate arrays and can greatly reduce the number of
* iteratee executions. Sections of a chain sequence qualify for shortcut
* fusion if the section is applied to an array and iteratees accept only
* one argument. The heuristic for whether a section qualifies for shortcut
* fusion is subject to change.
*
* Chaining is supported in custom builds as long as the `_#value` method is
* directly or indirectly included in the build.
*
* In addition to lodash methods, wrappers have `Array` and `String` methods.
*
* The wrapper `Array` methods are:
* `concat`, `join`, `pop`, `push`, `shift`, `sort`, `splice`, and `unshift`
*
* The wrapper `String` methods are:
* `replace` and `split`
*
* The wrapper methods that support shortcut fusion are:
* `at`, `compact`, `drop`, `dropRight`, `dropWhile`, `filter`, `find`,
* `findLast`, `head`, `initial`, `last`, `map`, `reject`, `reverse`, `slice`,
* `tail`, `take`, `takeRight`, `takeRightWhile`, `takeWhile`, and `toArray`
*
* The chainable wrapper methods are:
* `after`, `ary`, `assign`, `assignIn`, `assignInWith`, `assignWith`, `at`,
* `before`, `bind`, `bindAll`, `bindKey`, `castArray`, `chain`, `chunk`,
* `commit`, `compact`, `concat`, `conforms`, `constant`, `countBy`, `create`,
* `curry`, `debounce`, `defaults`, `defaultsDeep`, `defer`, `delay`,
* `difference`, `differenceBy`, `differenceWith`, `drop`, `dropRight`,
* `dropRightWhile`, `dropWhile`, `extend`, `extendWith`, `fill`, `filter`,
* `flatMap`, `flatMapDeep`, `flatMapDepth`, `flatten`, `flattenDeep`,
* `flattenDepth`, `flip`, `flow`, `flowRight`, `fromPairs`, `functions`,
* `functionsIn`, `groupBy`, `initial`, `intersection`, `intersectionBy`,
* `intersectionWith`, `invert`, `invertBy`, `invokeMap`, `iteratee`, `keyBy`,
* `keys`, `keysIn`, `map`, `mapKeys`, `mapValues`, `matches`, `matchesProperty`,
* `memoize`, `merge`, `mergeWith`, `method`, `methodOf`, `mixin`, `negate`,
* `nthArg`, `omit`, `omitBy`, `once`, `orderBy`, `over`, `overArgs`,
* `overEvery`, `overSome`, `partial`, `partialRight`, `partition`, `pick`,
* `pickBy`, `plant`, `property`, `propertyOf`, `pull`, `pullAll`, `pullAllBy`,
* `pullAllWith`, `pullAt`, `push`, `range`, `rangeRight`, `rearg`, `reject`,
* `remove`, `rest`, `reverse`, `sampleSize`, `set`, `setWith`, `shuffle`,
* `slice`, `sort`, `sortBy`, `splice`, `spread`, `tail`, `take`, `takeRight`,
* `takeRightWhile`, `takeWhile`, `tap`, `throttle`, `thru`, `toArray`,
* `toPairs`, `toPairsIn`, `toPath`, `toPlainObject`, `transform`, `unary`,
* `union`, `unionBy`, `unionWith`, `uniq`, `uniqBy`, `uniqWith`, `unset`,
* `unshift`, `unzip`, `unzipWith`, `update`, `updateWith`, `values`,
* `valuesIn`, `without`, `wrap`, `xor`, `xorBy`, `xorWith`, `zip`,
* `zipObject`, `zipObjectDeep`, and `zipWith`
*
* The wrapper methods that are **not** chainable by default are:
* `add`, `attempt`, `camelCase`, `capitalize`, `ceil`, `clamp`, `clone`,
* `cloneDeep`, `cloneDeepWith`, `cloneWith`, `conformsTo`, `deburr`,
* `defaultTo`, `divide`, `each`, `eachRight`, `endsWith`, `eq`, `escape`,
* `escapeRegExp`, `every`, `find`, `findIndex`, `findKey`, `findLast`,
* `findLastIndex`, `findLastKey`, `first`, `floor`, `forEach`, `forEachRight`,
* `forIn`, `forInRight`, `forOwn`, `forOwnRight`, `get`, `gt`, `gte`, `has`,
* `hasIn`, `head`, `identity`, `includes`, `indexOf`, `inRange`, `invoke`,
* `isArguments`, `isArray`, `isArrayBuffer`, `isArrayLike`, `isArrayLikeObject`,
* `isBoolean`, `isBuffer`, `isDate`, `isElement`, `isEmpty`, `isEqual`,
* `isEqualWith`, `isError`, `isFinite`, `isFunction`, `isInteger`, `isLength`,
* `isMap`, `isMatch`, `isMatchWith`, `isNaN`, `isNative`, `isNil`, `isNull`,
* `isNumber`, `isObject`, `isObjectLike`, `isPlainObject`, `isRegExp`,
* `isSafeInteger`, `isSet`, `isString`, `isUndefined`, `isTypedArray`,
* `isWeakMap`, `isWeakSet`, `join`, `kebabCase`, `last`, `lastIndexOf`,
* `lowerCase`, `lowerFirst`, `lt`, `lte`, `max`, `maxBy`, `mean`, `meanBy`,
* `min`, `minBy`, `multiply`, `noConflict`, `noop`, `now`, `nth`, `pad`,
* `padEnd`, `padStart`, `parseInt`, `pop`, `random`, `reduce`, `reduceRight`,
* `repeat`, `result`, `round`, `runInContext`, `sample`, `shift`, `size`,
* `snakeCase`, `some`, `sortedIndex`, `sortedIndexBy`, `sortedLastIndex`,
* `sortedLastIndexBy`, `startCase`, `startsWith`, `stubArray`, `stubFalse`,
* `stubObject`, `stubString`, `stubTrue`, `subtract`, `sum`, `sumBy`,
* `template`, `times`, `toFinite`, `toInteger`, `toJSON`, `toLength`,
* `toLower`, `toNumber`, `toSafeInteger`, `toString`, `toUpper`, `trim`,
* `trimEnd`, `trimStart`, `truncate`, `unescape`, `uniqueId`, `upperCase`,
* `upperFirst`, `value`, and `words`
*
* @name _
* @constructor
* @category Seq
* @param {*} value The value to wrap in a `lodash` instance.
* @returns {Object} Returns the new `lodash` wrapper instance.
* @example
*
* function square(n) {
* return n * n;
* }
*
* var wrapped = _([1, 2, 3]);
*
* // Returns an unwrapped value.
* wrapped.reduce(_.add);
* // => 6
*
* // Returns a wrapped value.
* var squares = wrapped.map(square);
*
* _.isArray(squares);
* // => false
*
* _.isArray(squares.value());
* // => true
*/
function lodash(value) {
if (isObjectLike(value) && !isArray(value) && !(value instanceof LazyWrapper)) {
if (value instanceof LodashWrapper) {
return value;
}
if (hasOwnProperty.call(value, '__wrapped__')) {
return wrapperClone(value);
}
}
return new LodashWrapper(value);
}
/**
* The base implementation of `_.create` without support for assigning
* properties to the created object.
*
* @private
* @param {Object} proto The object to inherit from.
* @returns {Object} Returns the new object.
*/
var baseCreate = (function() {
function object() {}
return function(proto) {
if (!isObject(proto)) {
return {};
}
if (objectCreate) {
return objectCreate(proto);
}
object.prototype = proto;
var result = new object;
object.prototype = undefined;
return result;
};
}());
/**
* The function whose prototype chain sequence wrappers inherit from.
*
* @private
*/
function baseLodash() {
// No operation performed.
}
/**
* The base constructor for creating `lodash` wrapper objects.
*
* @private
* @param {*} value The value to wrap.
* @param {boolean} [chainAll] Enable explicit method chain sequences.
*/
function LodashWrapper(value, chainAll) {
this.__wrapped__ = value;
this.__actions__ = [];
this.__chain__ = !!chainAll;
this.__index__ = 0;
this.__values__ = undefined;
}
/**
* By default, the template delimiters used by lodash are like those in
* embedded Ruby (ERB) as well as ES2015 template strings. Change the
* following template settings to use alternative delimiters.
*
* @static
* @memberOf _
* @type {Object}
*/
lodash.templateSettings = {
/**
* Used to detect `data` property values to be HTML-escaped.
*
* @memberOf _.templateSettings
* @type {RegExp}
*/
'escape': reEscape,
/**
* Used to detect code to be evaluated.
*
* @memberOf _.templateSettings
* @type {RegExp}
*/
'evaluate': reEvaluate,
/**
* Used to detect `data` property values to inject.
*
* @memberOf _.templateSettings
* @type {RegExp}
*/
'interpolate': reInterpolate,
/**
* Used to reference the data object in the template text.
*
* @memberOf _.templateSettings
* @type {string}
*/
'variable': '',
/**
* Used to import variables into the compiled template.
*
* @memberOf _.templateSettings
* @type {Object}
*/
'imports': {
/**
* A reference to the `lodash` function.
*
* @memberOf _.templateSettings.imports
* @type {Function}
*/
'_': lodash
}
};
// Ensure wrappers are instances of `baseLodash`.
lodash.prototype = baseLodash.prototype;
lodash.prototype.constructor = lodash;
LodashWrapper.prototype = baseCreate(baseLodash.prototype);
LodashWrapper.prototype.constructor = LodashWrapper;
/*------------------------------------------------------------------------*/
/**
* Creates a lazy wrapper object which wraps `value` to enable lazy evaluation.
*
* @private
* @constructor
* @param {*} value The value to wrap.
*/
function LazyWrapper(value) {
this.__wrapped__ = value;
this.__actions__ = [];
this.__dir__ = 1;
this.__filtered__ = false;
this.__iteratees__ = [];
this.__takeCount__ = MAX_ARRAY_LENGTH;
this.__views__ = [];
}
/**
* Creates a clone of the lazy wrapper object.
*
* @private
* @name clone
* @memberOf LazyWrapper
* @returns {Object} Returns the cloned `LazyWrapper` object.
*/
function lazyClone() {
var result = new LazyWrapper(this.__wrapped__);
result.__actions__ = copyArray(this.__actions__);
result.__dir__ = this.__dir__;
result.__filtered__ = this.__filtered__;
result.__iteratees__ = copyArray(this.__iteratees__);
result.__takeCount__ = this.__takeCount__;
result.__views__ = copyArray(this.__views__);
return result;
}
/**
* Reverses the direction of lazy iteration.
*
* @private
* @name reverse
* @memberOf LazyWrapper
* @returns {Object} Returns the new reversed `LazyWrapper` object.
*/
function lazyReverse() {
if (this.__filtered__) {
var result = new LazyWrapper(this);
result.__dir__ = -1;
result.__filtered__ = true;
} else {
result = this.clone();
result.__dir__ *= -1;
}
return result;
}
/**
* Extracts the unwrapped value from its lazy wrapper.
*
* @private
* @name value
* @memberOf LazyWrapper
* @returns {*} Returns the unwrapped value.
*/
function lazyValue() {
var array = this.__wrapped__.value(),
dir = this.__dir__,
isArr = isArray(array),
isRight = dir < 0,
arrLength = isArr ? array.length : 0,
view = getView(0, arrLength, this.__views__),
start = view.start,
end = view.end,
length = end - start,
index = isRight ? end : (start - 1),
iteratees = this.__iteratees__,
iterLength = iteratees.length,
resIndex = 0,
takeCount = nativeMin(length, this.__takeCount__);
if (!isArr || (!isRight && arrLength == length && takeCount == length)) {
return baseWrapperValue(array, this.__actions__);
}
var result = [];
outer:
while (length-- && resIndex < takeCount) {
index += dir;
var iterIndex = -1,
value = array[index];
while (++iterIndex < iterLength) {
var data = iteratees[iterIndex],
iteratee = data.iteratee,
type = data.type,
computed = iteratee(value);
if (type == LAZY_MAP_FLAG) {
value = computed;
} else if (!computed) {
if (type == LAZY_FILTER_FLAG) {
continue outer;
} else {
break outer;
}
}
}
result[resIndex++] = value;
}
return result;
}
// Ensure `LazyWrapper` is an instance of `baseLodash`.
LazyWrapper.prototype = baseCreate(baseLodash.prototype);
LazyWrapper.prototype.constructor = LazyWrapper;
/*------------------------------------------------------------------------*/
/**
* Creates a hash object.
*
* @private
* @constructor
* @param {Array} [entries] The key-value pairs to cache.
*/
function Hash(entries) {
var index = -1,
length = entries == null ? 0 : entries.length;
this.clear();
while (++index < length) {
var entry = entries[index];
this.set(entry[0], entry[1]);
}
}
/**
* Removes all key-value entries from the hash.
*
* @private
* @name clear
* @memberOf Hash
*/
function hashClear() {
this.__data__ = nativeCreate ? nativeCreate(null) : {};
this.size = 0;
}
/**
* Removes `key` and its value from the hash.
*
* @private
* @name delete
* @memberOf Hash
* @param {Object} hash The hash to modify.
* @param {string} key The key of the value to remove.
* @returns {boolean} Returns `true` if the entry was removed, else `false`.
*/
function hashDelete(key) {
var result = this.has(key) && delete this.__data__[key];
this.size -= result ? 1 : 0;
return result;
}
/**
* Gets the hash value for `key`.
*
* @private
* @name get
* @memberOf Hash
* @param {string} key The key of the value to get.
* @returns {*} Returns the entry value.
*/
function hashGet(key) {
var data = this.__data__;
if (nativeCreate) {
var result = data[key];
return result === HASH_UNDEFINED ? undefined : result;
}
return hasOwnProperty.call(data, key) ? data[key] : undefined;
}
/**
* Checks if a hash value for `key` exists.
*
* @private
* @name has
* @memberOf Hash
* @param {string} key The key of the entry to check.
* @returns {boolean} Returns `true` if an entry for `key` exists, else `false`.
*/
function hashHas(key) {
var data = this.__data__;
return nativeCreate ? (data[key] !== undefined) : hasOwnProperty.call(data, key);
}
/**
* Sets the hash `key` to `value`.
*
* @private
* @name set
* @memberOf Hash
* @param {string} key The key of the value to set.
* @param {*} value The value to set.
* @returns {Object} Returns the hash instance.
*/
function hashSet(key, value) {
var data = this.__data__;
this.size += this.has(key) ? 0 : 1;
data[key] = (nativeCreate && value === undefined) ? HASH_UNDEFINED : value;
return this;
}
// Add methods to `Hash`.
Hash.prototype.clear = hashClear;
Hash.prototype['delete'] = hashDelete;
Hash.prototype.get = hashGet;
Hash.prototype.has = hashHas;
Hash.prototype.set = hashSet;
/*------------------------------------------------------------------------*/
/**
* Creates an list cache object.
*
* @private
* @constructor
* @param {Array} [entries] The key-value pairs to cache.
*/
function ListCache(entries) {
var index = -1,
length = entries == null ? 0 : entries.length;
this.clear();
while (++index < length) {
var entry = entries[index];
this.set(entry[0], entry[1]);
}
}
/**
* Removes all key-value entries from the list cache.
*
* @private
* @name clear
* @memberOf ListCache
*/
function listCacheClear() {
this.__data__ = [];
this.size = 0;
}
/**
* Removes `key` and its value from the list cache.
*
* @private
* @name delete
* @memberOf ListCache
* @param {string} key The key of the value to remove.
* @returns {boolean} Returns `true` if the entry was removed, else `false`.
*/
function listCacheDelete(key) {
var data = this.__data__,
index = assocIndexOf(data, key);
if (index < 0) {
return false;
}
var lastIndex = data.length - 1;
if (index == lastIndex) {
data.pop();
} else {
splice.call(data, index, 1);
}
--this.size;
return true;
}
/**
* Gets the list cache value for `key`.
*
* @private
* @name get
* @memberOf ListCache
* @param {string} key The key of the value to get.
* @returns {*} Returns the entry value.
*/
function listCacheGet(key) {
var data = this.__data__,
index = assocIndexOf(data, key);
return index < 0 ? undefined : data[index][1];
}
/**
* Checks if a list cache value for `key` exists.
*
* @private
* @name has
* @memberOf ListCache
* @param {string} key The key of the entry to check.
* @returns {boolean} Returns `true` if an entry for `key` exists, else `false`.
*/
function listCacheHas(key) {
return assocIndexOf(this.__data__, key) > -1;
}
/**
* Sets the list cache `key` to `value`.
*
* @private
* @name set
* @memberOf ListCache
* @param {string} key The key of the value to set.
* @param {*} value The value to set.
* @returns {Object} Returns the list cache instance.
*/
function listCacheSet(key, value) {
var data = this.__data__,
index = assocIndexOf(data, key);
if (index < 0) {
++this.size;
data.push([key, value]);
} else {
data[index][1] = value;
}
return this;
}
// Add methods to `ListCache`.
ListCache.prototype.clear = listCacheClear;
ListCache.prototype['delete'] = listCacheDelete;
ListCache.prototype.get = listCacheGet;
ListCache.prototype.has = listCacheHas;
ListCache.prototype.set = listCacheSet;
/*------------------------------------------------------------------------*/
/**
* Creates a map cache object to store key-value pairs.
*
* @private
* @constructor
* @param {Array} [entries] The key-value pairs to cache.
*/
function MapCache(entries) {
var index = -1,
length = entries == null ? 0 : entries.length;
this.clear();
while (++index < length) {
var entry = entries[index];
this.set(entry[0], entry[1]);
}
}
/**
* Removes all key-value entries from the map.
*
* @private
* @name clear
* @memberOf MapCache
*/
function mapCacheClear() {
this.size = 0;
this.__data__ = {
'hash': new Hash,
'map': new (Map || ListCache),
'string': new Hash
};
}
/**
* Removes `key` and its value from the map.
*
* @private
* @name delete
* @memberOf MapCache
* @param {string} key The key of the value to remove.
* @returns {boolean} Returns `true` if the entry was removed, else `false`.
*/
function mapCacheDelete(key) {
var result = getMapData(this, key)['delete'](key);
this.size -= result ? 1 : 0;
return result;
}
/**
* Gets the map value for `key`.
*
* @private
* @name get
* @memberOf MapCache
* @param {string} key The key of the value to get.
* @returns {*} Returns the entry value.
*/
function mapCacheGet(key) {
return getMapData(this, key).get(key);
}
/**
* Checks if a map value for `key` exists.
*
* @private
* @name has
* @memberOf MapCache
* @param {string} key The key of the entry to check.
* @returns {boolean} Returns `true` if an entry for `key` exists, else `false`.
*/
function mapCacheHas(key) {
return getMapData(this, key).has(key);
}
/**
* Sets the map `key` to `value`.
*
* @private
* @name set
* @memberOf MapCache
* @param {string} key The key of the value to set.
* @param {*} value The value to set.
* @returns {Object} Returns the map cache instance.
*/
function mapCacheSet(key, value) {
var data = getMapData(this, key),
size = data.size;
data.set(key, value);
this.size += data.size == size ? 0 : 1;
return this;
}
// Add methods to `MapCache`.
MapCache.prototype.clear = mapCacheClear;
MapCache.prototype['delete'] = mapCacheDelete;
MapCache.prototype.get = mapCacheGet;
MapCache.prototype.has = mapCacheHas;
MapCache.prototype.set = mapCacheSet;
/*------------------------------------------------------------------------*/
/**
*
* Creates an array cache object to store unique values.
*
* @private
* @constructor
* @param {Array} [values] The values to cache.
*/
function SetCache(values) {
var index = -1,
length = values == null ? 0 : values.length;
this.__data__ = new MapCache;
while (++index < length) {
this.add(values[index]);
}
}
/**
* Adds `value` to the array cache.
*
* @private
* @name add
* @memberOf SetCache
* @alias push
* @param {*} value The value to cache.
* @returns {Object} Returns the cache instance.
*/
function setCacheAdd(value) {
this.__data__.set(value, HASH_UNDEFINED);
return this;
}
/**
* Checks if `value` is in the array cache.
*
* @private
* @name has
* @memberOf SetCache
* @param {*} value The value to search for.
* @returns {number} Returns `true` if `value` is found, else `false`.
*/
function setCacheHas(value) {
return this.__data__.has(value);
}
// Add methods to `SetCache`.
SetCache.prototype.add = SetCache.prototype.push = setCacheAdd;
SetCache.prototype.has = setCacheHas;
/*------------------------------------------------------------------------*/
/**
* Creates a stack cache object to store key-value pairs.
*
* @private
* @constructor
* @param {Array} [entries] The key-value pairs to cache.
*/
function Stack(entries) {
var data = this.__data__ = new ListCache(entries);
this.size = data.size;
}
/**
* Removes all key-value entries from the stack.
*
* @private
* @name clear
* @memberOf Stack
*/
function stackClear() {
this.__data__ = new ListCache;
this.size = 0;
}
/**
* Removes `key` and its value from the stack.
*
* @private
* @name delete
* @memberOf Stack
* @param {string} key The key of the value to remove.
* @returns {boolean} Returns `true` if the entry was removed, else `false`.
*/
function stackDelete(key) {
var data = this.__data__,
result = data['delete'](key);
this.size = data.size;
return result;
}
/**
* Gets the stack value for `key`.
*
* @private
* @name get
* @memberOf Stack
* @param {string} key The key of the value to get.
* @returns {*} Returns the entry value.
*/
function stackGet(key) {
return this.__data__.get(key);
}
/**
* Checks if a stack value for `key` exists.
*
* @private
* @name has
* @memberOf Stack
* @param {string} key The key of the entry to check.
* @returns {boolean} Returns `true` if an entry for `key` exists, else `false`.
*/
function stackHas(key) {
return this.__data__.has(key);
}
/**
* Sets the stack `key` to `value`.
*
* @private
* @name set
* @memberOf Stack
* @param {string} key The key of the value to set.
* @param {*} value The value to set.
* @returns {Object} Returns the stack cache instance.
*/
function stackSet(key, value) {
var data = this.__data__;
if (data instanceof ListCache) {
var pairs = data.__data__;
if (!Map || (pairs.length < LARGE_ARRAY_SIZE - 1)) {
pairs.push([key, value]);
this.size = ++data.size;
return this;
}
data = this.__data__ = new MapCache(pairs);
}
data.set(key, value);
this.size = data.size;
return this;
}
// Add methods to `Stack`.
Stack.prototype.clear = stackClear;
Stack.prototype['delete'] = stackDelete;
Stack.prototype.get = stackGet;
Stack.prototype.has = stackHas;
Stack.prototype.set = stackSet;
/*------------------------------------------------------------------------*/
/**
* Creates an array of the enumerable property names of the array-like `value`.
*
* @private
* @param {*} value The value to query.
* @param {boolean} inherited Specify returning inherited property names.
* @returns {Array} Returns the array of property names.
*/
function arrayLikeKeys(value, inherited) {
var isArr = isArray(value),
isArg = !isArr && isArguments(value),
isBuff = !isArr && !isArg && isBuffer(value),
isType = !isArr && !isArg && !isBuff && isTypedArray(value),
skipIndexes = isArr || isArg || isBuff || isType,
result = skipIndexes ? baseTimes(value.length, String) : [],
length = result.length;
for (var key in value) {
if ((inherited || hasOwnProperty.call(value, key)) &&
!(skipIndexes && (
// Safari 9 has enumerable `arguments.length` in strict mode.
key == 'length' ||
// Node.js 0.10 has enumerable non-index properties on buffers.
(isBuff && (key == 'offset' || key == 'parent')) ||
// PhantomJS 2 has enumerable non-index properties on typed arrays.
(isType && (key == 'buffer' || key == 'byteLength' || key == 'byteOffset')) ||
// Skip index properties.
isIndex(key, length)
))) {
result.push(key);
}
}
return result;
}
/**
* A specialized version of `_.sample` for arrays.
*
* @private
* @param {Array} array The array to sample.
* @returns {*} Returns the random element.
*/
function arraySample(array) {
var length = array.length;
return length ? array[baseRandom(0, length - 1)] : undefined;
}
/**
* A specialized version of `_.sampleSize` for arrays.
*
* @private
* @param {Array} array The array to sample.
* @param {number} n The number of elements to sample.
* @returns {Array} Returns the random elements.
*/
function arraySampleSize(array, n) {
return shuffleSelf(copyArray(array), baseClamp(n, 0, array.length));
}
/**
* A specialized version of `_.shuffle` for arrays.
*
* @private
* @param {Array} array The array to shuffle.
* @returns {Array} Returns the new shuffled array.
*/
function arrayShuffle(array) {
return shuffleSelf(copyArray(array));
}
/**
* This function is like `assignValue` except that it doesn't assign
* `undefined` values.
*
* @private
* @param {Object} object The object to modify.
* @param {string} key The key of the property to assign.
* @param {*} value The value to assign.
*/
function assignMergeValue(object, key, value) {
if ((value !== undefined && !eq(object[key], value)) ||
(value === undefined && !(key in object))) {
baseAssignValue(object, key, value);
}
}
/**
* Assigns `value` to `key` of `object` if the existing value is not equivalent
* using [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero)
* for equality comparisons.
*
* @private
* @param {Object} object The object to modify.
* @param {string} key The key of the property to assign.
* @param {*} value The value to assign.
*/
function assignValue(object, key, value) {
var objValue = object[key];
if (!(hasOwnProperty.call(object, key) && eq(objValue, value)) ||
(value === undefined && !(key in object))) {
baseAssignValue(object, key, value);
}
}
/**
* Gets the index at which the `key` is found in `array` of key-value pairs.
*
* @private
* @param {Array} array The array to inspect.
* @param {*} key The key to search for.
* @returns {number} Returns the index of the matched value, else `-1`.
*/
function assocIndexOf(array, key) {
var length = array.length;
while (length--) {
if (eq(array[length][0], key)) {
return length;
}
}
return -1;
}
/**
* Aggregates elements of `collection` on `accumulator` with keys transformed
* by `iteratee` and values set by `setter`.
*
* @private
* @param {Array|Object} collection The collection to iterate over.
* @param {Function} setter The function to set `accumulator` values.
* @param {Function} iteratee The iteratee to transform keys.
* @param {Object} accumulator The initial aggregated object.
* @returns {Function} Returns `accumulator`.
*/
function baseAggregator(collection, setter, iteratee, accumulator) {
baseEach(collection, function(value, key, collection) {
setter(accumulator, value, iteratee(value), collection);
});
return accumulator;
}
/**
* The base implementation of `_.assign` without support for multiple sources
* or `customizer` functions.
*
* @private
* @param {Object} object The destination object.
* @param {Object} source The source object.
* @returns {Object} Returns `object`.
*/
function baseAssign(object, source) {
return object && copyObject(source, keys(source), object);
}
/**
* The base implementation of `_.assignIn` without support for multiple sources
* or `customizer` functions.
*
* @private
* @param {Object} object The destination object.
* @param {Object} source The source object.
* @returns {Object} Returns `object`.
*/
function baseAssignIn(object, source) {
return object && copyObject(source, keysIn(source), object);
}
/**
* The base implementation of `assignValue` and `assignMergeValue` without
* value checks.
*
* @private
* @param {Object} object The object to modify.
* @param {string} key The key of the property to assign.
* @param {*} value The value to assign.
*/
function baseAssignValue(object, key, value) {
if (key == '__proto__' && defineProperty) {
defineProperty(object, key, {
'configurable': true,
'enumerable': true,
'value': value,
'writable': true
});
} else {
object[key] = value;
}
}
/**
* The base implementation of `_.at` without support for individual paths.
*
* @private
* @param {Object} object The object to iterate over.
* @param {string[]} paths The property paths to pick.
* @returns {Array} Returns the picked elements.
*/
function baseAt(object, paths) {
var index = -1,
length = paths.length,
result = Array(length),
skip = object == null;
while (++index < length) {
result[index] = skip ? undefined : get(object, paths[index]);
}
return result;
}
/**
* The base implementation of `_.clamp` which doesn't coerce arguments.
*
* @private
* @param {number} number The number to clamp.
* @param {number} [lower] The lower bound.
* @param {number} upper The upper bound.
* @returns {number} Returns the clamped number.
*/
function baseClamp(number, lower, upper) {
if (number === number) {
if (upper !== undefined) {
number = number <= upper ? number : upper;
}
if (lower !== undefined) {
number = number >= lower ? number : lower;
}
}
return number;
}
/**
* The base implementation of `_.clone` and `_.cloneDeep` which tracks
* traversed objects.
*
* @private
* @param {*} value The value to clone.
* @param {boolean} bitmask The bitmask flags.
* 1 - Deep clone
* 2 - Flatten inherited properties
* 4 - Clone symbols
* @param {Function} [customizer] The function to customize cloning.
* @param {string} [key] The key of `value`.
* @param {Object} [object] The parent object of `value`.
* @param {Object} [stack] Tracks traversed objects and their clone counterparts.
* @returns {*} Returns the cloned value.
*/
function baseClone(value, bitmask, customizer, key, object, stack) {
var result,
isDeep = bitmask & CLONE_DEEP_FLAG,
isFlat = bitmask & CLONE_FLAT_FLAG,
isFull = bitmask & CLONE_SYMBOLS_FLAG;
if (customizer) {
result = object ? customizer(value, key, object, stack) : customizer(value);
}
if (result !== undefined) {
return result;
}
if (!isObject(value)) {
return value;
}
var isArr = isArray(value);
if (isArr) {
result = initCloneArray(value);
if (!isDeep) {
return copyArray(value, result);
}
} else {
var tag = getTag(value),
isFunc = tag == funcTag || tag == genTag;
if (isBuffer(value)) {
return cloneBuffer(value, isDeep);
}
if (tag == objectTag || tag == argsTag || (isFunc && !object)) {
result = (isFlat || isFunc) ? {} : initCloneObject(value);
if (!isDeep) {
return isFlat
? copySymbolsIn(value, baseAssignIn(result, value))
: copySymbols(value, baseAssign(result, value));
}
} else {
if (!cloneableTags[tag]) {
return object ? value : {};
}
result = initCloneByTag(value, tag, isDeep);
}
}
// Check for circular references and return its corresponding clone.
stack || (stack = new Stack);
var stacked = stack.get(value);
if (stacked) {
return stacked;
}
stack.set(value, result);
if (isSet(value)) {
value.forEach(function(subValue) {
result.add(baseClone(subValue, bitmask, customizer, subValue, value, stack));
});
} else if (isMap(value)) {
value.forEach(function(subValue, key) {
result.set(key, baseClone(subValue, bitmask, customizer, key, value, stack));
});
}
var keysFunc = isFull
? (isFlat ? getAllKeysIn : getAllKeys)
: (isFlat ? keysIn : keys);
var props = isArr ? undefined : keysFunc(value);
arrayEach(props || value, function(subValue, key) {
if (props) {
key = subValue;
subValue = value[key];
}
// Recursively populate clone (susceptible to call stack limits).
assignValue(result, key, baseClone(subValue, bitmask, customizer, key, value, stack));
});
return result;
}
/**
* The base implementation of `_.conforms` which doesn't clone `source`.
*
* @private
* @param {Object} source The object of property predicates to conform to.
* @returns {Function} Returns the new spec function.
*/
function baseConforms(source) {
var props = keys(source);
return function(object) {
return baseConformsTo(object, source, props);
};
}
/**
* The base implementation of `_.conformsTo` which accepts `props` to check.
*
* @private
* @param {Object} object The object to inspect.
* @param {Object} source The object of property predicates to conform to.
* @returns {boolean} Returns `true` if `object` conforms, else `false`.
*/
function baseConformsTo(object, source, props) {
var length = props.length;
if (object == null) {
return !length;
}
object = Object(object);
while (length--) {
var key = props[length],
predicate = source[key],
value = object[key];
if ((value === undefined && !(key in object)) || !predicate(value)) {
return false;
}
}
return true;
}
/**
* The base implementation of `_.delay` and `_.defer` which accepts `args`
* to provide to `func`.
*
* @private
* @param {Function} func The function to delay.
* @param {number} wait The number of milliseconds to delay invocation.
* @param {Array} args The arguments to provide to `func`.
* @returns {number|Object} Returns the timer id or timeout object.
*/
function baseDelay(func, wait, args) {
if (typeof func != 'function') {
throw new TypeError(FUNC_ERROR_TEXT);
}
return setTimeout(function() { func.apply(undefined, args); }, wait);
}
/**
* The base implementation of methods like `_.difference` without support
* for excluding multiple arrays or iteratee shorthands.
*
* @private
* @param {Array} array The array to inspect.
* @param {Array} values The values to exclude.
* @param {Function} [iteratee] The iteratee invoked per element.
* @param {Function} [comparator] The comparator invoked per element.
* @returns {Array} Returns the new array of filtered values.
*/
function baseDifference(array, values, iteratee, comparator) {
var index = -1,
includes = arrayIncludes,
isCommon = true,
length = array.length,
result = [],
valuesLength = values.length;
if (!length) {
return result;
}
if (iteratee) {
values = arrayMap(values, baseUnary(iteratee));
}
if (comparator) {
includes = arrayIncludesWith;
isCommon = false;
}
else if (values.length >= LARGE_ARRAY_SIZE) {
includes = cacheHas;
isCommon = false;
values = new SetCache(values);
}
outer:
while (++index < length) {
var value = array[index],
computed = iteratee == null ? value : iteratee(value);
value = (comparator || value !== 0) ? value : 0;
if (isCommon && computed === computed) {
var valuesIndex = valuesLength;
while (valuesIndex--) {
if (values[valuesIndex] === computed) {
continue outer;
}
}
result.push(value);
}
else if (!includes(values, computed, comparator)) {
result.push(value);
}
}
return result;
}
/**
* The base implementation of `_.forEach` without support for iteratee shorthands.
*
* @private
* @param {Array|Object} collection The collection to iterate over.
* @param {Function} iteratee The function invoked per iteration.
* @returns {Array|Object} Returns `collection`.
*/
var baseEach = createBaseEach(baseForOwn);
/**
* The base implementation of `_.forEachRight` without support for iteratee shorthands.
*
* @private
* @param {Array|Object} collection The collection to iterate over.
* @param {Function} iteratee The function invoked per iteration.
* @returns {Array|Object} Returns `collection`.
*/
var baseEachRight = createBaseEach(baseForOwnRight, true);
/**
* The base implementation of `_.every` without support for iteratee shorthands.
*
* @private
* @param {Array|Object} collection The collection to iterate over.
* @param {Function} predicate The function invoked per iteration.
* @returns {boolean} Returns `true` if all elements pass the predicate check,
* else `false`
*/
function baseEvery(collection, predicate) {
var result = true;
baseEach(collection, function(value, index, collection) {
result = !!predicate(value, index, collection);
return result;
});
return result;
}
/**
* The base implementation of methods like `_.max` and `_.min` which accepts a
* `comparator` to determine the extremum value.
*
* @private
* @param {Array} array The array to iterate over.
* @param {Function} iteratee The iteratee invoked per iteration.
* @param {Function} comparator The comparator used to compare values.
* @returns {*} Returns the extremum value.
*/
function baseExtremum(array, iteratee, comparator) {
var index = -1,
length = array.length;
while (++index < length) {
var value = array[index],
current = iteratee(value);
if (current != null && (computed === undefined
? (current === current && !isSymbol(current))
: comparator(current, computed)
)) {
var computed = current,
result = value;
}
}
return result;
}
/**
* The base implementation of `_.fill` without an iteratee call guard.
*
* @private
* @param {Array} array The array to fill.
* @param {*} value The value to fill `array` with.
* @param {number} [start=0] The start position.
* @param {number} [end=array.length] The end position.
* @returns {Array} Returns `array`.
*/
function baseFill(array, value, start, end) {
var length = array.length;
start = toInteger(start);
if (start < 0) {
start = -start > length ? 0 : (length + start);
}
end = (end === undefined || end > length) ? length : toInteger(end);
if (end < 0) {
end += length;
}
end = start > end ? 0 : toLength(end);
while (start < end) {
array[start++] = value;
}
return array;
}
/**
* The base implementation of `_.filter` without support for iteratee shorthands.
*
* @private
* @param {Array|Object} collection The collection to iterate over.
* @param {Function} predicate The function invoked per iteration.
* @returns {Array} Returns the new filtered array.
*/
function baseFilter(collection, predicate) {
var result = [];
baseEach(collection, function(value, index, collection) {
if (predicate(value, index, collection)) {
result.push(value);
}
});
return result;
}
/**
* The base implementation of `_.flatten` with support for restricting flattening.
*
* @private
* @param {Array} array The array to flatten.
* @param {number} depth The maximum recursion depth.
* @param {boolean} [predicate=isFlattenable] The function invoked per iteration.
* @param {boolean} [isStrict] Restrict to values that pass `predicate` checks.
* @param {Array} [result=[]] The initial result value.
* @returns {Array} Returns the new flattened array.
*/
function baseFlatten(array, depth, predicate, isStrict, result) {
var index = -1,
length = array.length;
predicate || (predicate = isFlattenable);
result || (result = []);
while (++index < length) {
var value = array[index];
if (depth > 0 && predicate(value)) {
if (depth > 1) {
// Recursively flatten arrays (susceptible to call stack limits).
baseFlatten(value, depth - 1, predicate, isStrict, result);
} else {
arrayPush(result, value);
}
} else if (!isStrict) {
result[result.length] = value;
}
}
return result;
}
/**
* The base implementation of `baseForOwn` which iterates over `object`
* properties returned by `keysFunc` and invokes `iteratee` for each property.
* Iteratee functions may exit iteration early by explicitly returning `false`.
*
* @private
* @param {Object} object The object to iterate over.
* @param {Function} iteratee The function invoked per iteration.
* @param {Function} keysFunc The function to get the keys of `object`.
* @returns {Object} Returns `object`.
*/
var baseFor = createBaseFor();
/**
* This function is like `baseFor` except that it iterates over properties
* in the opposite order.
*
* @private
* @param {Object} object The object to iterate over.
* @param {Function} iteratee The function invoked per iteration.
* @param {Function} keysFunc The function to get the keys of `object`.
* @returns {Object} Returns `object`.
*/
var baseForRight = createBaseFor(true);
/**
* The base implementation of `_.forOwn` without support for iteratee shorthands.
*
* @private
* @param {Object} object The object to iterate over.
* @param {Function} iteratee The function invoked per iteration.
* @returns {Object} Returns `object`.
*/
function baseForOwn(object, iteratee) {
return object && baseFor(object, iteratee, keys);
}
/**
* The base implementation of `_.forOwnRight` without support for iteratee shorthands.
*
* @private
* @param {Object} object The object to iterate over.
* @param {Function} iteratee The function invoked per iteration.
* @returns {Object} Returns `object`.
*/
function baseForOwnRight(object, iteratee) {
return object && baseForRight(object, iteratee, keys);
}
/**
* The base implementation of `_.functions` which creates an array of
* `object` function property names filtered from `props`.
*
* @private
* @param {Object} object The object to inspect.
* @param {Array} props The property names to filter.
* @returns {Array} Returns the function names.
*/
function baseFunctions(object, props) {
return arrayFilter(props, function(key) {
return isFunction(object[key]);
});
}
/**
* The base implementation of `_.get` without support for default values.
*
* @private
* @param {Object} object The object to query.
* @param {Array|string} path The path of the property to get.
* @returns {*} Returns the resolved value.
*/
function baseGet(object, path) {
path = castPath(path, object);
var index = 0,
length = path.length;
while (object != null && index < length) {
object = object[toKey(path[index++])];
}
return (index && index == length) ? object : undefined;
}
/**
* The base implementation of `getAllKeys` and `getAllKeysIn` which uses
* `keysFunc` and `symbolsFunc` to get the enumerable property names and
* symbols of `object`.
*
* @private
* @param {Object} object The object to query.
* @param {Function} keysFunc The function to get the keys of `object`.
* @param {Function} symbolsFunc The function to get the symbols of `object`.
* @returns {Array} Returns the array of property names and symbols.
*/
function baseGetAllKeys(object, keysFunc, symbolsFunc) {
var result = keysFunc(object);
return isArray(object) ? result : arrayPush(result, symbolsFunc(object));
}
/**
* The base implementation of `getTag` without fallbacks for buggy environments.
*
* @private
* @param {*} value The value to query.
* @returns {string} Returns the `toStringTag`.
*/
function baseGetTag(value) {
if (value == null) {
return value === undefined ? undefinedTag : nullTag;
}
return (symToStringTag && symToStringTag in Object(value))
? getRawTag(value)
: objectToString(value);
}
/**
* The base implementation of `_.gt` which doesn't coerce arguments.
*
* @private
* @param {*} value The value to compare.
* @param {*} other The other value to compare.
* @returns {boolean} Returns `true` if `value` is greater than `other`,
* else `false`.
*/
function baseGt(value, other) {
return value > other;
}
/**
* The base implementation of `_.has` without support for deep paths.
*
* @private
* @param {Object} [object] The object to query.
* @param {Array|string} key The key to check.
* @returns {boolean} Returns `true` if `key` exists, else `false`.
*/
function baseHas(object, key) {
return object != null && hasOwnProperty.call(object, key);
}
/**
* The base implementation of `_.hasIn` without support for deep paths.
*
* @private
* @param {Object} [object] The object to query.
* @param {Array|string} key The key to check.
* @returns {boolean} Returns `true` if `key` exists, else `false`.
*/
function baseHasIn(object, key) {
return object != null && key in Object(object);
}
/**
* The base implementation of `_.inRange` which doesn't coerce arguments.
*
* @private
* @param {number} number The number to check.
* @param {number} start The start of the range.
* @param {number} end The end of the range.
* @returns {boolean} Returns `true` if `number` is in the range, else `false`.
*/
function baseInRange(number, start, end) {
return number >= nativeMin(start, end) && number < nativeMax(start, end);
}
/**
* The base implementation of methods like `_.intersection`, without support
* for iteratee shorthands, that accepts an array of arrays to inspect.
*
* @private
* @param {Array} arrays The arrays to inspect.
* @param {Function} [iteratee] The iteratee invoked per element.
* @param {Function} [comparator] The comparator invoked per element.
* @returns {Array} Returns the new array of shared values.
*/
function baseIntersection(arrays, iteratee, comparator) {
var includes = comparator ? arrayIncludesWith : arrayIncludes,
length = arrays[0].length,
othLength = arrays.length,
othIndex = othLength,
caches = Array(othLength),
maxLength = Infinity,
result = [];
while (othIndex--) {
var array = arrays[othIndex];
if (othIndex && iteratee) {
array = arrayMap(array, baseUnary(iteratee));
}
maxLength = nativeMin(array.length, maxLength);
caches[othIndex] = !comparator && (iteratee || (length >= 120 && array.length >= 120))
? new SetCache(othIndex && array)
: undefined;
}
array = arrays[0];
var index = -1,
seen = caches[0];
outer:
while (++index < length && result.length < maxLength) {
var value = array[index],
computed = iteratee ? iteratee(value) : value;
value = (comparator || value !== 0) ? value : 0;
if (!(seen
? cacheHas(seen, computed)
: includes(result, computed, comparator)
)) {
othIndex = othLength;
while (--othIndex) {
var cache = caches[othIndex];
if (!(cache
? cacheHas(cache, computed)
: includes(arrays[othIndex], computed, comparator))
) {
continue outer;
}
}
if (seen) {
seen.push(computed);
}
result.push(value);
}
}
return result;
}
/**
* The base implementation of `_.invert` and `_.invertBy` which inverts
* `object` with values transformed by `iteratee` and set by `setter`.
*
* @private
* @param {Object} object The object to iterate over.
* @param {Function} setter The function to set `accumulator` values.
* @param {Function} iteratee The iteratee to transform values.
* @param {Object} accumulator The initial inverted object.
* @returns {Function} Returns `accumulator`.
*/
function baseInverter(object, setter, iteratee, accumulator) {
baseForOwn(object, function(value, key, object) {
setter(accumulator, iteratee(value), key, object);
});
return accumulator;
}
/**
* The base implementation of `_.invoke` without support for individual
* method arguments.
*
* @private
* @param {Object} object The object to query.
* @param {Array|string} path The path of the method to invoke.
* @param {Array} args The arguments to invoke the method with.
* @returns {*} Returns the result of the invoked method.
*/
function baseInvoke(object, path, args) {
path = castPath(path, object);
object = parent(object, path);
var func = object == null ? object : object[toKey(last(path))];
return func == null ? undefined : apply(func, object, args);
}
/**
* The base implementation of `_.isArguments`.
*
* @private
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is an `arguments` object,
*/
function baseIsArguments(value) {
return isObjectLike(value) && baseGetTag(value) == argsTag;
}
/**
* The base implementation of `_.isArrayBuffer` without Node.js optimizations.
*
* @private
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is an array buffer, else `false`.
*/
function baseIsArrayBuffer(value) {
return isObjectLike(value) && baseGetTag(value) == arrayBufferTag;
}
/**
* The base implementation of `_.isDate` without Node.js optimizations.
*
* @private
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a date object, else `false`.
*/
function baseIsDate(value) {
return isObjectLike(value) && baseGetTag(value) == dateTag;
}
/**
* The base implementation of `_.isEqual` which supports partial comparisons
* and tracks traversed objects.
*
* @private
* @param {*} value The value to compare.
* @param {*} other The other value to compare.
* @param {boolean} bitmask The bitmask flags.
* 1 - Unordered comparison
* 2 - Partial comparison
* @param {Function} [customizer] The function to customize comparisons.
* @param {Object} [stack] Tracks traversed `value` and `other` objects.
* @returns {boolean} Returns `true` if the values are equivalent, else `false`.
*/
function baseIsEqual(value, other, bitmask, customizer, stack) {
if (value === other) {
return true;
}
if (value == null || other == null || (!isObjectLike(value) && !isObjectLike(other))) {
return value !== value && other !== other;
}
return baseIsEqualDeep(value, other, bitmask, customizer, baseIsEqual, stack);
}
/**
* A specialized version of `baseIsEqual` for arrays and objects which performs
* deep comparisons and tracks traversed objects enabling objects with circular
* references to be compared.
*
* @private
* @param {Object} object The object to compare.
* @param {Object} other The other object to compare.
* @param {number} bitmask The bitmask flags. See `baseIsEqual` for more details.
* @param {Function} customizer The function to customize comparisons.
* @param {Function} equalFunc The function to determine equivalents of values.
* @param {Object} [stack] Tracks traversed `object` and `other` objects.
* @returns {boolean} Returns `true` if the objects are equivalent, else `false`.
*/
function baseIsEqualDeep(object, other, bitmask, customizer, equalFunc, stack) {
var objIsArr = isArray(object),
othIsArr = isArray(other),
objTag = objIsArr ? arrayTag : getTag(object),
othTag = othIsArr ? arrayTag : getTag(other);
objTag = objTag == argsTag ? objectTag : objTag;
othTag = othTag == argsTag ? objectTag : othTag;
var objIsObj = objTag == objectTag,
othIsObj = othTag == objectTag,
isSameTag = objTag == othTag;
if (isSameTag && isBuffer(object)) {
if (!isBuffer(other)) {
return false;
}
objIsArr = true;
objIsObj = false;
}
if (isSameTag && !objIsObj) {
stack || (stack = new Stack);
return (objIsArr || isTypedArray(object))
? equalArrays(object, other, bitmask, customizer, equalFunc, stack)
: equalByTag(object, other, objTag, bitmask, customizer, equalFunc, stack);
}
if (!(bitmask & COMPARE_PARTIAL_FLAG)) {
var objIsWrapped = objIsObj && hasOwnProperty.call(object, '__wrapped__'),
othIsWrapped = othIsObj && hasOwnProperty.call(other, '__wrapped__');
if (objIsWrapped || othIsWrapped) {
var objUnwrapped = objIsWrapped ? object.value() : object,
othUnwrapped = othIsWrapped ? other.value() : other;
stack || (stack = new Stack);
return equalFunc(objUnwrapped, othUnwrapped, bitmask, customizer, stack);
}
}
if (!isSameTag) {
return false;
}
stack || (stack = new Stack);
return equalObjects(object, other, bitmask, customizer, equalFunc, stack);
}
/**
* The base implementation of `_.isMap` without Node.js optimizations.
*
* @private
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a map, else `false`.
*/
function baseIsMap(value) {
return isObjectLike(value) && getTag(value) == mapTag;
}
/**
* The base implementation of `_.isMatch` without support for iteratee shorthands.
*
* @private
* @param {Object} object The object to inspect.
* @param {Object} source The object of property values to match.
* @param {Array} matchData The property names, values, and compare flags to match.
* @param {Function} [customizer] The function to customize comparisons.
* @returns {boolean} Returns `true` if `object` is a match, else `false`.
*/
function baseIsMatch(object, source, matchData, customizer) {
var index = matchData.length,
length = index,
noCustomizer = !customizer;
if (object == null) {
return !length;
}
object = Object(object);
while (index--) {
var data = matchData[index];
if ((noCustomizer && data[2])
? data[1] !== object[data[0]]
: !(data[0] in object)
) {
return false;
}
}
while (++index < length) {
data = matchData[index];
var key = data[0],
objValue = object[key],
srcValue = data[1];
if (noCustomizer && data[2]) {
if (objValue === undefined && !(key in object)) {
return false;
}
} else {
var stack = new Stack;
if (customizer) {
var result = customizer(objValue, srcValue, key, object, source, stack);
}
if (!(result === undefined
? baseIsEqual(srcValue, objValue, COMPARE_PARTIAL_FLAG | COMPARE_UNORDERED_FLAG, customizer, stack)
: result
)) {
return false;
}
}
}
return true;
}
/**
* The base implementation of `_.isNative` without bad shim checks.
*
* @private
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a native function,
* else `false`.
*/
function baseIsNative(value) {
if (!isObject(value) || isMasked(value)) {
return false;
}
var pattern = isFunction(value) ? reIsNative : reIsHostCtor;
return pattern.test(toSource(value));
}
/**
* The base implementation of `_.isRegExp` without Node.js optimizations.
*
* @private
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a regexp, else `false`.
*/
function baseIsRegExp(value) {
return isObjectLike(value) && baseGetTag(value) == regexpTag;
}
/**
* The base implementation of `_.isSet` without Node.js optimizations.
*
* @private
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a set, else `false`.
*/
function baseIsSet(value) {
return isObjectLike(value) && getTag(value) == setTag;
}
/**
* The base implementation of `_.isTypedArray` without Node.js optimizations.
*
* @private
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a typed array, else `false`.
*/
function baseIsTypedArray(value) {
return isObjectLike(value) &&
isLength(value.length) && !!typedArrayTags[baseGetTag(value)];
}
/**
* The base implementation of `_.iteratee`.
*
* @private
* @param {*} [value=_.identity] The value to convert to an iteratee.
* @returns {Function} Returns the iteratee.
*/
function baseIteratee(value) {
// Don't store the `typeof` result in a variable to avoid a JIT bug in Safari 9.
// See https://bugs.webkit.org/show_bug.cgi?id=156034 for more details.
if (typeof value == 'function') {
return value;
}
if (value == null) {
return identity;
}
if (typeof value == 'object') {
return isArray(value)
? baseMatchesProperty(value[0], value[1])
: baseMatches(value);
}
return property(value);
}
/**
* The base implementation of `_.keys` which doesn't treat sparse arrays as dense.
*
* @private
* @param {Object} object The object to query.
* @returns {Array} Returns the array of property names.
*/
function baseKeys(object) {
if (!isPrototype(object)) {
return nativeKeys(object);
}
var result = [];
for (var key in Object(object)) {
if (hasOwnProperty.call(object, key) && key != 'constructor') {
result.push(key);
}
}
return result;
}
/**
* The base implementation of `_.keysIn` which doesn't treat sparse arrays as dense.
*
* @private
* @param {Object} object The object to query.
* @returns {Array} Returns the array of property names.
*/
function baseKeysIn(object) {
if (!isObject(object)) {
return nativeKeysIn(object);
}
var isProto = isPrototype(object),
result = [];
for (var key in object) {
if (!(key == 'constructor' && (isProto || !hasOwnProperty.call(object, key)))) {
result.push(key);
}
}
return result;
}
/**
* The base implementation of `_.lt` which doesn't coerce arguments.
*
* @private
* @param {*} value The value to compare.
* @param {*} other The other value to compare.
* @returns {boolean} Returns `true` if `value` is less than `other`,
* else `false`.
*/
function baseLt(value, other) {
return value < other;
}
/**
* The base implementation of `_.map` without support for iteratee shorthands.
*
* @private
* @param {Array|Object} collection The collection to iterate over.
* @param {Function} iteratee The function invoked per iteration.
* @returns {Array} Returns the new mapped array.
*/
function baseMap(collection, iteratee) {
var index = -1,
result = isArrayLike(collection) ? Array(collection.length) : [];
baseEach(collection, function(value, key, collection) {
result[++index] = iteratee(value, key, collection);
});
return result;
}
/**
* The base implementation of `_.matches` which doesn't clone `source`.
*
* @private
* @param {Object} source The object of property values to match.
* @returns {Function} Returns the new spec function.
*/
function baseMatches(source) {
var matchData = getMatchData(source);
if (matchData.length == 1 && matchData[0][2]) {
return matchesStrictComparable(matchData[0][0], matchData[0][1]);
}
return function(object) {
return object === source || baseIsMatch(object, source, matchData);
};
}
/**
* The base implementation of `_.matchesProperty` which doesn't clone `srcValue`.
*
* @private
* @param {string} path The path of the property to get.
* @param {*} srcValue The value to match.
* @returns {Function} Returns the new spec function.
*/
function baseMatchesProperty(path, srcValue) {
if (isKey(path) && isStrictComparable(srcValue)) {
return matchesStrictComparable(toKey(path), srcValue);
}
return function(object) {
var objValue = get(object, path);
return (objValue === undefined && objValue === srcValue)
? hasIn(object, path)
: baseIsEqual(srcValue, objValue, COMPARE_PARTIAL_FLAG | COMPARE_UNORDERED_FLAG);
};
}
/**
* The base implementation of `_.merge` without support for multiple sources.
*
* @private
* @param {Object} object The destination object.
* @param {Object} source The source object.
* @param {number} srcIndex The index of `source`.
* @param {Function} [customizer] The function to customize merged values.
* @param {Object} [stack] Tracks traversed source values and their merged
* counterparts.
*/
function baseMerge(object, source, srcIndex, customizer, stack) {
if (object === source) {
return;
}
baseFor(source, function(srcValue, key) {
stack || (stack = new Stack);
if (isObject(srcValue)) {
baseMergeDeep(object, source, key, srcIndex, baseMerge, customizer, stack);
}
else {
var newValue = customizer
? customizer(safeGet(object, key), srcValue, (key + ''), object, source, stack)
: undefined;
if (newValue === undefined) {
newValue = srcValue;
}
assignMergeValue(object, key, newValue);
}
}, keysIn);
}
/**
* A specialized version of `baseMerge` for arrays and objects which performs
* deep merges and tracks traversed objects enabling objects with circular
* references to be merged.
*
* @private
* @param {Object} object The destination object.
* @param {Object} source The source object.
* @param {string} key The key of the value to merge.
* @param {number} srcIndex The index of `source`.
* @param {Function} mergeFunc The function to merge values.
* @param {Function} [customizer] The function to customize assigned values.
* @param {Object} [stack] Tracks traversed source values and their merged
* counterparts.
*/
function baseMergeDeep(object, source, key, srcIndex, mergeFunc, customizer, stack) {
var objValue = safeGet(object, key),
srcValue = safeGet(source, key),
stacked = stack.get(srcValue);
if (stacked) {
assignMergeValue(object, key, stacked);
return;
}
var newValue = customizer
? customizer(objValue, srcValue, (key + ''), object, source, stack)
: undefined;
var isCommon = newValue === undefined;
if (isCommon) {
var isArr = isArray(srcValue),
isBuff = !isArr && isBuffer(srcValue),
isTyped = !isArr && !isBuff && isTypedArray(srcValue);
newValue = srcValue;
if (isArr || isBuff || isTyped) {
if (isArray(objValue)) {
newValue = objValue;
}
else if (isArrayLikeObject(objValue)) {
newValue = copyArray(objValue);
}
else if (isBuff) {
isCommon = false;
newValue = cloneBuffer(srcValue, true);
}
else if (isTyped) {
isCommon = false;
newValue = cloneTypedArray(srcValue, true);
}
else {
newValue = [];
}
}
else if (isPlainObject(srcValue) || isArguments(srcValue)) {
newValue = objValue;
if (isArguments(objValue)) {
newValue = toPlainObject(objValue);
}
else if (!isObject(objValue) || isFunction(objValue)) {
newValue = initCloneObject(srcValue);
}
}
else {
isCommon = false;
}
}
if (isCommon) {
// Recursively merge objects and arrays (susceptible to call stack limits).
stack.set(srcValue, newValue);
mergeFunc(newValue, srcValue, srcIndex, customizer, stack);
stack['delete'](srcValue);
}
assignMergeValue(object, key, newValue);
}
/**
* The base implementation of `_.nth` which doesn't coerce arguments.
*
* @private
* @param {Array} array The array to query.
* @param {number} n The index of the element to return.
* @returns {*} Returns the nth element of `array`.
*/
function baseNth(array, n) {
var length = array.length;
if (!length) {
return;
}
n += n < 0 ? length : 0;
return isIndex(n, length) ? array[n] : undefined;
}
/**
* The base implementation of `_.orderBy` without param guards.
*
* @private
* @param {Array|Object} collection The collection to iterate over.
* @param {Function[]|Object[]|string[]} iteratees The iteratees to sort by.
* @param {string[]} orders The sort orders of `iteratees`.
* @returns {Array} Returns the new sorted array.
*/
function baseOrderBy(collection, iteratees, orders) {
if (iteratees.length) {
iteratees = arrayMap(iteratees, function(iteratee) {
if (isArray(iteratee)) {
return function(value) {
return baseGet(value, iteratee.length === 1 ? iteratee[0] : iteratee);
}
}
return iteratee;
});
} else {
iteratees = [identity];
}
var index = -1;
iteratees = arrayMap(iteratees, baseUnary(getIteratee()));
var result = baseMap(collection, function(value, key, collection) {
var criteria = arrayMap(iteratees, function(iteratee) {
return iteratee(value);
});
return { 'criteria': criteria, 'index': ++index, 'value': value };
});
return baseSortBy(result, function(object, other) {
return compareMultiple(object, other, orders);
});
}
/**
* The base implementation of `_.pick` without support for individual
* property identifiers.
*
* @private
* @param {Object} object The source object.
* @param {string[]} paths The property paths to pick.
* @returns {Object} Returns the new object.
*/
function basePick(object, paths) {
return basePickBy(object, paths, function(value, path) {
return hasIn(object, path);
});
}
/**
* The base implementation of `_.pickBy` without support for iteratee shorthands.
*
* @private
* @param {Object} object The source object.
* @param {string[]} paths The property paths to pick.
* @param {Function} predicate The function invoked per property.
* @returns {Object} Returns the new object.
*/
function basePickBy(object, paths, predicate) {
var index = -1,
length = paths.length,
result = {};
while (++index < length) {
var path = paths[index],
value = baseGet(object, path);
if (predicate(value, path)) {
baseSet(result, castPath(path, object), value);
}
}
return result;
}
/**
* A specialized version of `baseProperty` which supports deep paths.
*
* @private
* @param {Array|string} path The path of the property to get.
* @returns {Function} Returns the new accessor function.
*/
function basePropertyDeep(path) {
return function(object) {
return baseGet(object, path);
};
}
/**
* The base implementation of `_.pullAllBy` without support for iteratee
* shorthands.
*
* @private
* @param {Array} array The array to modify.
* @param {Array} values The values to remove.
* @param {Function} [iteratee] The iteratee invoked per element.
* @param {Function} [comparator] The comparator invoked per element.
* @returns {Array} Returns `array`.
*/
function basePullAll(array, values, iteratee, comparator) {
var indexOf = comparator ? baseIndexOfWith : baseIndexOf,
index = -1,
length = values.length,
seen = array;
if (array === values) {
values = copyArray(values);
}
if (iteratee) {
seen = arrayMap(array, baseUnary(iteratee));
}
while (++index < length) {
var fromIndex = 0,
value = values[index],
computed = iteratee ? iteratee(value) : value;
while ((fromIndex = indexOf(seen, computed, fromIndex, comparator)) > -1) {
if (seen !== array) {
splice.call(seen, fromIndex, 1);
}
splice.call(array, fromIndex, 1);
}
}
return array;
}
/**
* The base implementation of `_.pullAt` without support for individual
* indexes or capturing the removed elements.
*
* @private
* @param {Array} array The array to modify.
* @param {number[]} indexes The indexes of elements to remove.
* @returns {Array} Returns `array`.
*/
function basePullAt(array, indexes) {
var length = array ? indexes.length : 0,
lastIndex = length - 1;
while (length--) {
var index = indexes[length];
if (length == lastIndex || index !== previous) {
var previous = index;
if (isIndex(index)) {
splice.call(array, index, 1);
} else {
baseUnset(array, index);
}
}
}
return array;
}
/**
* The base implementation of `_.random` without support for returning
* floating-point numbers.
*
* @private
* @param {number} lower The lower bound.
* @param {number} upper The upper bound.
* @returns {number} Returns the random number.
*/
function baseRandom(lower, upper) {
return lower + nativeFloor(nativeRandom() * (upper - lower + 1));
}
/**
* The base implementation of `_.range` and `_.rangeRight` which doesn't
* coerce arguments.
*
* @private
* @param {number} start The start of the range.
* @param {number} end The end of the range.
* @param {number} step The value to increment or decrement by.
* @param {boolean} [fromRight] Specify iterating from right to left.
* @returns {Array} Returns the range of numbers.
*/
function baseRange(start, end, step, fromRight) {
var index = -1,
length = nativeMax(nativeCeil((end - start) / (step || 1)), 0),
result = Array(length);
while (length--) {
result[fromRight ? length : ++index] = start;
start += step;
}
return result;
}
/**
* The base implementation of `_.repeat` which doesn't coerce arguments.
*
* @private
* @param {string} string The string to repeat.
* @param {number} n The number of times to repeat the string.
* @returns {string} Returns the repeated string.
*/
function baseRepeat(string, n) {
var result = '';
if (!string || n < 1 || n > MAX_SAFE_INTEGER) {
return result;
}
// Leverage the exponentiation by squaring algorithm for a faster repeat.
// See https://en.wikipedia.org/wiki/Exponentiation_by_squaring for more details.
do {
if (n % 2) {
result += string;
}
n = nativeFloor(n / 2);
if (n) {
string += string;
}
} while (n);
return result;
}
/**
* The base implementation of `_.rest` which doesn't validate or coerce arguments.
*
* @private
* @param {Function} func The function to apply a rest parameter to.
* @param {number} [start=func.length-1] The start position of the rest parameter.
* @returns {Function} Returns the new function.
*/
function baseRest(func, start) {
return setToString(overRest(func, start, identity), func + '');
}
/**
* The base implementation of `_.sample`.
*
* @private
* @param {Array|Object} collection The collection to sample.
* @returns {*} Returns the random element.
*/
function baseSample(collection) {
return arraySample(values(collection));
}
/**
* The base implementation of `_.sampleSize` without param guards.
*
* @private
* @param {Array|Object} collection The collection to sample.
* @param {number} n The number of elements to sample.
* @returns {Array} Returns the random elements.
*/
function baseSampleSize(collection, n) {
var array = values(collection);
return shuffleSelf(array, baseClamp(n, 0, array.length));
}
/**
* The base implementation of `_.set`.
*
* @private
* @param {Object} object The object to modify.
* @param {Array|string} path The path of the property to set.
* @param {*} value The value to set.
* @param {Function} [customizer] The function to customize path creation.
* @returns {Object} Returns `object`.
*/
function baseSet(object, path, value, customizer) {
if (!isObject(object)) {
return object;
}
path = castPath(path, object);
var index = -1,
length = path.length,
lastIndex = length - 1,
nested = object;
while (nested != null && ++index < length) {
var key = toKey(path[index]),
newValue = value;
if (key === '__proto__' || key === 'constructor' || key === 'prototype') {
return object;
}
if (index != lastIndex) {
var objValue = nested[key];
newValue = customizer ? customizer(objValue, key, nested) : undefined;
if (newValue === undefined) {
newValue = isObject(objValue)
? objValue
: (isIndex(path[index + 1]) ? [] : {});
}
}
assignValue(nested, key, newValue);
nested = nested[key];
}
return object;
}
/**
* The base implementation of `setData` without support for hot loop shorting.
*
* @private
* @param {Function} func The function to associate metadata with.
* @param {*} data The metadata.
* @returns {Function} Returns `func`.
*/
var baseSetData = !metaMap ? identity : function(func, data) {
metaMap.set(func, data);
return func;
};
/**
* The base implementation of `setToString` without support for hot loop shorting.
*
* @private
* @param {Function} func The function to modify.
* @param {Function} string The `toString` result.
* @returns {Function} Returns `func`.
*/
var baseSetToString = !defineProperty ? identity : function(func, string) {
return defineProperty(func, 'toString', {
'configurable': true,
'enumerable': false,
'value': constant(string),
'writable': true
});
};
/**
* The base implementation of `_.shuffle`.
*
* @private
* @param {Array|Object} collection The collection to shuffle.
* @returns {Array} Returns the new shuffled array.
*/
function baseShuffle(collection) {
return shuffleSelf(values(collection));
}
/**
* The base implementation of `_.slice` without an iteratee call guard.
*
* @private
* @param {Array} array The array to slice.
* @param {number} [start=0] The start position.
* @param {number} [end=array.length] The end position.
* @returns {Array} Returns the slice of `array`.
*/
function baseSlice(array, start, end) {
var index = -1,
length = array.length;
if (start < 0) {
start = -start > length ? 0 : (length + start);
}
end = end > length ? length : end;
if (end < 0) {
end += length;
}
length = start > end ? 0 : ((end - start) >>> 0);
start >>>= 0;
var result = Array(length);
while (++index < length) {
result[index] = array[index + start];
}
return result;
}
/**
* The base implementation of `_.some` without support for iteratee shorthands.
*
* @private
* @param {Array|Object} collection The collection to iterate over.
* @param {Function} predicate The function invoked per iteration.
* @returns {boolean} Returns `true` if any element passes the predicate check,
* else `false`.
*/
function baseSome(collection, predicate) {
var result;
baseEach(collection, function(value, index, collection) {
result = predicate(value, index, collection);
return !result;
});
return !!result;
}
/**
* The base implementation of `_.sortedIndex` and `_.sortedLastIndex` which
* performs a binary search of `array` to determine the index at which `value`
* should be inserted into `array` in order to maintain its sort order.
*
* @private
* @param {Array} array The sorted array to inspect.
* @param {*} value The value to evaluate.
* @param {boolean} [retHighest] Specify returning the highest qualified index.
* @returns {number} Returns the index at which `value` should be inserted
* into `array`.
*/
function baseSortedIndex(array, value, retHighest) {
var low = 0,
high = array == null ? low : array.length;
if (typeof value == 'number' && value === value && high <= HALF_MAX_ARRAY_LENGTH) {
while (low < high) {
var mid = (low + high) >>> 1,
computed = array[mid];
if (computed !== null && !isSymbol(computed) &&
(retHighest ? (computed <= value) : (computed < value))) {
low = mid + 1;
} else {
high = mid;
}
}
return high;
}
return baseSortedIndexBy(array, value, identity, retHighest);
}
/**
* The base implementation of `_.sortedIndexBy` and `_.sortedLastIndexBy`
* which invokes `iteratee` for `value` and each element of `array` to compute
* their sort ranking. The iteratee is invoked with one argument; (value).
*
* @private
* @param {Array} array The sorted array to inspect.
* @param {*} value The value to evaluate.
* @param {Function} iteratee The iteratee invoked per element.
* @param {boolean} [retHighest] Specify returning the highest qualified index.
* @returns {number} Returns the index at which `value` should be inserted
* into `array`.
*/
function baseSortedIndexBy(array, value, iteratee, retHighest) {
var low = 0,
high = array == null ? 0 : array.length;
if (high === 0) {
return 0;
}
value = iteratee(value);
var valIsNaN = value !== value,
valIsNull = value === null,
valIsSymbol = isSymbol(value),
valIsUndefined = value === undefined;
while (low < high) {
var mid = nativeFloor((low + high) / 2),
computed = iteratee(array[mid]),
othIsDefined = computed !== undefined,
othIsNull = computed === null,
othIsReflexive = computed === computed,
othIsSymbol = isSymbol(computed);
if (valIsNaN) {
var setLow = retHighest || othIsReflexive;
} else if (valIsUndefined) {
setLow = othIsReflexive && (retHighest || othIsDefined);
} else if (valIsNull) {
setLow = othIsReflexive && othIsDefined && (retHighest || !othIsNull);
} else if (valIsSymbol) {
setLow = othIsReflexive && othIsDefined && !othIsNull && (retHighest || !othIsSymbol);
} else if (othIsNull || othIsSymbol) {
setLow = false;
} else {
setLow = retHighest ? (computed <= value) : (computed < value);
}
if (setLow) {
low = mid + 1;
} else {
high = mid;
}
}
return nativeMin(high, MAX_ARRAY_INDEX);
}
/**
* The base implementation of `_.sortedUniq` and `_.sortedUniqBy` without
* support for iteratee shorthands.
*
* @private
* @param {Array} array The array to inspect.
* @param {Function} [iteratee] The iteratee invoked per element.
* @returns {Array} Returns the new duplicate free array.
*/
function baseSortedUniq(array, iteratee) {
var index = -1,
length = array.length,
resIndex = 0,
result = [];
while (++index < length) {
var value = array[index],
computed = iteratee ? iteratee(value) : value;
if (!index || !eq(computed, seen)) {
var seen = computed;
result[resIndex++] = value === 0 ? 0 : value;
}
}
return result;
}
/**
* The base implementation of `_.toNumber` which doesn't ensure correct
* conversions of binary, hexadecimal, or octal string values.
*
* @private
* @param {*} value The value to process.
* @returns {number} Returns the number.
*/
function baseToNumber(value) {
if (typeof value == 'number') {
return value;
}
if (isSymbol(value)) {
return NAN;
}
return +value;
}
/**
* The base implementation of `_.toString` which doesn't convert nullish
* values to empty strings.
*
* @private
* @param {*} value The value to process.
* @returns {string} Returns the string.
*/
function baseToString(value) {
// Exit early for strings to avoid a performance hit in some environments.
if (typeof value == 'string') {
return value;
}
if (isArray(value)) {
// Recursively convert values (susceptible to call stack limits).
return arrayMap(value, baseToString) + '';
}
if (isSymbol(value)) {
return symbolToString ? symbolToString.call(value) : '';
}
var result = (value + '');
return (result == '0' && (1 / value) == -INFINITY) ? '-0' : result;
}
/**
* The base implementation of `_.uniqBy` without support for iteratee shorthands.
*
* @private
* @param {Array} array The array to inspect.
* @param {Function} [iteratee] The iteratee invoked per element.
* @param {Function} [comparator] The comparator invoked per element.
* @returns {Array} Returns the new duplicate free array.
*/
function baseUniq(array, iteratee, comparator) {
var index = -1,
includes = arrayIncludes,
length = array.length,
isCommon = true,
result = [],
seen = result;
if (comparator) {
isCommon = false;
includes = arrayIncludesWith;
}
else if (length >= LARGE_ARRAY_SIZE) {
var set = iteratee ? null : createSet(array);
if (set) {
return setToArray(set);
}
isCommon = false;
includes = cacheHas;
seen = new SetCache;
}
else {
seen = iteratee ? [] : result;
}
outer:
while (++index < length) {
var value = array[index],
computed = iteratee ? iteratee(value) : value;
value = (comparator || value !== 0) ? value : 0;
if (isCommon && computed === computed) {
var seenIndex = seen.length;
while (seenIndex--) {
if (seen[seenIndex] === computed) {
continue outer;
}
}
if (iteratee) {
seen.push(computed);
}
result.push(value);
}
else if (!includes(seen, computed, comparator)) {
if (seen !== result) {
seen.push(computed);
}
result.push(value);
}
}
return result;
}
/**
* The base implementation of `_.unset`.
*
* @private
* @param {Object} object The object to modify.
* @param {Array|string} path The property path to unset.
* @returns {boolean} Returns `true` if the property is deleted, else `false`.
*/
function baseUnset(object, path) {
path = castPath(path, object);
object = parent(object, path);
return object == null || delete object[toKey(last(path))];
}
/**
* The base implementation of `_.update`.
*
* @private
* @param {Object} object The object to modify.
* @param {Array|string} path The path of the property to update.
* @param {Function} updater The function to produce the updated value.
* @param {Function} [customizer] The function to customize path creation.
* @returns {Object} Returns `object`.
*/
function baseUpdate(object, path, updater, customizer) {
return baseSet(object, path, updater(baseGet(object, path)), customizer);
}
/**
* The base implementation of methods like `_.dropWhile` and `_.takeWhile`
* without support for iteratee shorthands.
*
* @private
* @param {Array} array The array to query.
* @param {Function} predicate The function invoked per iteration.
* @param {boolean} [isDrop] Specify dropping elements instead of taking them.
* @param {boolean} [fromRight] Specify iterating from right to left.
* @returns {Array} Returns the slice of `array`.
*/
function baseWhile(array, predicate, isDrop, fromRight) {
var length = array.length,
index = fromRight ? length : -1;
while ((fromRight ? index-- : ++index < length) &&
predicate(array[index], index, array)) {}
return isDrop
? baseSlice(array, (fromRight ? 0 : index), (fromRight ? index + 1 : length))
: baseSlice(array, (fromRight ? index + 1 : 0), (fromRight ? length : index));
}
/**
* The base implementation of `wrapperValue` which returns the result of
* performing a sequence of actions on the unwrapped `value`, where each
* successive action is supplied the return value of the previous.
*
* @private
* @param {*} value The unwrapped value.
* @param {Array} actions Actions to perform to resolve the unwrapped value.
* @returns {*} Returns the resolved value.
*/
function baseWrapperValue(value, actions) {
var result = value;
if (result instanceof LazyWrapper) {
result = result.value();
}
return arrayReduce(actions, function(result, action) {
return action.func.apply(action.thisArg, arrayPush([result], action.args));
}, result);
}
/**
* The base implementation of methods like `_.xor`, without support for
* iteratee shorthands, that accepts an array of arrays to inspect.
*
* @private
* @param {Array} arrays The arrays to inspect.
* @param {Function} [iteratee] The iteratee invoked per element.
* @param {Function} [comparator] The comparator invoked per element.
* @returns {Array} Returns the new array of values.
*/
function baseXor(arrays, iteratee, comparator) {
var length = arrays.length;
if (length < 2) {
return length ? baseUniq(arrays[0]) : [];
}
var index = -1,
result = Array(length);
while (++index < length) {
var array = arrays[index],
othIndex = -1;
while (++othIndex < length) {
if (othIndex != index) {
result[index] = baseDifference(result[index] || array, arrays[othIndex], iteratee, comparator);
}
}
}
return baseUniq(baseFlatten(result, 1), iteratee, comparator);
}
/**
* This base implementation of `_.zipObject` which assigns values using `assignFunc`.
*
* @private
* @param {Array} props The property identifiers.
* @param {Array} values The property values.
* @param {Function} assignFunc The function to assign values.
* @returns {Object} Returns the new object.
*/
function baseZipObject(props, values, assignFunc) {
var index = -1,
length = props.length,
valsLength = values.length,
result = {};
while (++index < length) {
var value = index < valsLength ? values[index] : undefined;
assignFunc(result, props[index], value);
}
return result;
}
/**
* Casts `value` to an empty array if it's not an array like object.
*
* @private
* @param {*} value The value to inspect.
* @returns {Array|Object} Returns the cast array-like object.
*/
function castArrayLikeObject(value) {
return isArrayLikeObject(value) ? value : [];
}
/**
* Casts `value` to `identity` if it's not a function.
*
* @private
* @param {*} value The value to inspect.
* @returns {Function} Returns cast function.
*/
function castFunction(value) {
return typeof value == 'function' ? value : identity;
}
/**
* Casts `value` to a path array if it's not one.
*
* @private
* @param {*} value The value to inspect.
* @param {Object} [object] The object to query keys on.
* @returns {Array} Returns the cast property path array.
*/
function castPath(value, object) {
if (isArray(value)) {
return value;
}
return isKey(value, object) ? [value] : stringToPath(toString(value));
}
/**
* A `baseRest` alias which can be replaced with `identity` by module
* replacement plugins.
*
* @private
* @type {Function}
* @param {Function} func The function to apply a rest parameter to.
* @returns {Function} Returns the new function.
*/
var castRest = baseRest;
/**
* Casts `array` to a slice if it's needed.
*
* @private
* @param {Array} array The array to inspect.
* @param {number} start The start position.
* @param {number} [end=array.length] The end position.
* @returns {Array} Returns the cast slice.
*/
function castSlice(array, start, end) {
var length = array.length;
end = end === undefined ? length : end;
return (!start && end >= length) ? array : baseSlice(array, start, end);
}
/**
* A simple wrapper around the global [`clearTimeout`](https://mdn.io/clearTimeout).
*
* @private
* @param {number|Object} id The timer id or timeout object of the timer to clear.
*/
var clearTimeout = ctxClearTimeout || function(id) {
return root.clearTimeout(id);
};
/**
* Creates a clone of `buffer`.
*
* @private
* @param {Buffer} buffer The buffer to clone.
* @param {boolean} [isDeep] Specify a deep clone.
* @returns {Buffer} Returns the cloned buffer.
*/
function cloneBuffer(buffer, isDeep) {
if (isDeep) {
return buffer.slice();
}
var length = buffer.length,
result = allocUnsafe ? allocUnsafe(length) : new buffer.constructor(length);
buffer.copy(result);
return result;
}
/**
* Creates a clone of `arrayBuffer`.
*
* @private
* @param {ArrayBuffer} arrayBuffer The array buffer to clone.
* @returns {ArrayBuffer} Returns the cloned array buffer.
*/
function cloneArrayBuffer(arrayBuffer) {
var result = new arrayBuffer.constructor(arrayBuffer.byteLength);
new Uint8Array(result).set(new Uint8Array(arrayBuffer));
return result;
}
/**
* Creates a clone of `dataView`.
*
* @private
* @param {Object} dataView The data view to clone.
* @param {boolean} [isDeep] Specify a deep clone.
* @returns {Object} Returns the cloned data view.
*/
function cloneDataView(dataView, isDeep) {
var buffer = isDeep ? cloneArrayBuffer(dataView.buffer) : dataView.buffer;
return new dataView.constructor(buffer, dataView.byteOffset, dataView.byteLength);
}
/**
* Creates a clone of `regexp`.
*
* @private
* @param {Object} regexp The regexp to clone.
* @returns {Object} Returns the cloned regexp.
*/
function cloneRegExp(regexp) {
var result = new regexp.constructor(regexp.source, reFlags.exec(regexp));
result.lastIndex = regexp.lastIndex;
return result;
}
/**
* Creates a clone of the `symbol` object.
*
* @private
* @param {Object} symbol The symbol object to clone.
* @returns {Object} Returns the cloned symbol object.
*/
function cloneSymbol(symbol) {
return symbolValueOf ? Object(symbolValueOf.call(symbol)) : {};
}
/**
* Creates a clone of `typedArray`.
*
* @private
* @param {Object} typedArray The typed array to clone.
* @param {boolean} [isDeep] Specify a deep clone.
* @returns {Object} Returns the cloned typed array.
*/
function cloneTypedArray(typedArray, isDeep) {
var buffer = isDeep ? cloneArrayBuffer(typedArray.buffer) : typedArray.buffer;
return new typedArray.constructor(buffer, typedArray.byteOffset, typedArray.length);
}
/**
* Compares values to sort them in ascending order.
*
* @private
* @param {*} value The value to compare.
* @param {*} other The other value to compare.
* @returns {number} Returns the sort order indicator for `value`.
*/
function compareAscending(value, other) {
if (value !== other) {
var valIsDefined = value !== undefined,
valIsNull = value === null,
valIsReflexive = value === value,
valIsSymbol = isSymbol(value);
var othIsDefined = other !== undefined,
othIsNull = other === null,
othIsReflexive = other === other,
othIsSymbol = isSymbol(other);
if ((!othIsNull && !othIsSymbol && !valIsSymbol && value > other) ||
(valIsSymbol && othIsDefined && othIsReflexive && !othIsNull && !othIsSymbol) ||
(valIsNull && othIsDefined && othIsReflexive) ||
(!valIsDefined && othIsReflexive) ||
!valIsReflexive) {
return 1;
}
if ((!valIsNull && !valIsSymbol && !othIsSymbol && value < other) ||
(othIsSymbol && valIsDefined && valIsReflexive && !valIsNull && !valIsSymbol) ||
(othIsNull && valIsDefined && valIsReflexive) ||
(!othIsDefined && valIsReflexive) ||
!othIsReflexive) {
return -1;
}
}
return 0;
}
/**
* Used by `_.orderBy` to compare multiple properties of a value to another
* and stable sort them.
*
* If `orders` is unspecified, all values are sorted in ascending order. Otherwise,
* specify an order of "desc" for descending or "asc" for ascending sort order
* of corresponding values.
*
* @private
* @param {Object} object The object to compare.
* @param {Object} other The other object to compare.
* @param {boolean[]|string[]} orders The order to sort by for each property.
* @returns {number} Returns the sort order indicator for `object`.
*/
function compareMultiple(object, other, orders) {
var index = -1,
objCriteria = object.criteria,
othCriteria = other.criteria,
length = objCriteria.length,
ordersLength = orders.length;
while (++index < length) {
var result = compareAscending(objCriteria[index], othCriteria[index]);
if (result) {
if (index >= ordersLength) {
return result;
}
var order = orders[index];
return result * (order == 'desc' ? -1 : 1);
}
}
// Fixes an `Array#sort` bug in the JS engine embedded in Adobe applications
// that causes it, under certain circumstances, to provide the same value for
// `object` and `other`. See https://github.com/jashkenas/underscore/pull/1247
// for more details.
//
// This also ensures a stable sort in V8 and other engines.
// See https://bugs.chromium.org/p/v8/issues/detail?id=90 for more details.
return object.index - other.index;
}
/**
* Creates an array that is the composition of partially applied arguments,
* placeholders, and provided arguments into a single array of arguments.
*
* @private
* @param {Array} args The provided arguments.
* @param {Array} partials The arguments to prepend to those provided.
* @param {Array} holders The `partials` placeholder indexes.
* @params {boolean} [isCurried] Specify composing for a curried function.
* @returns {Array} Returns the new array of composed arguments.
*/
function composeArgs(args, partials, holders, isCurried) {
var argsIndex = -1,
argsLength = args.length,
holdersLength = holders.length,
leftIndex = -1,
leftLength = partials.length,
rangeLength = nativeMax(argsLength - holdersLength, 0),
result = Array(leftLength + rangeLength),
isUncurried = !isCurried;
while (++leftIndex < leftLength) {
result[leftIndex] = partials[leftIndex];
}
while (++argsIndex < holdersLength) {
if (isUncurried || argsIndex < argsLength) {
result[holders[argsIndex]] = args[argsIndex];
}
}
while (rangeLength--) {
result[leftIndex++] = args[argsIndex++];
}
return result;
}
/**
* This function is like `composeArgs` except that the arguments composition
* is tailored for `_.partialRight`.
*
* @private
* @param {Array} args The provided arguments.
* @param {Array} partials The arguments to append to those provided.
* @param {Array} holders The `partials` placeholder indexes.
* @params {boolean} [isCurried] Specify composing for a curried function.
* @returns {Array} Returns the new array of composed arguments.
*/
function composeArgsRight(args, partials, holders, isCurried) {
var argsIndex = -1,
argsLength = args.length,
holdersIndex = -1,
holdersLength = holders.length,
rightIndex = -1,
rightLength = partials.length,
rangeLength = nativeMax(argsLength - holdersLength, 0),
result = Array(rangeLength + rightLength),
isUncurried = !isCurried;
while (++argsIndex < rangeLength) {
result[argsIndex] = args[argsIndex];
}
var offset = argsIndex;
while (++rightIndex < rightLength) {
result[offset + rightIndex] = partials[rightIndex];
}
while (++holdersIndex < holdersLength) {
if (isUncurried || argsIndex < argsLength) {
result[offset + holders[holdersIndex]] = args[argsIndex++];
}
}
return result;
}
/**
* Copies the values of `source` to `array`.
*
* @private
* @param {Array} source The array to copy values from.
* @param {Array} [array=[]] The array to copy values to.
* @returns {Array} Returns `array`.
*/
function copyArray(source, array) {
var index = -1,
length = source.length;
array || (array = Array(length));
while (++index < length) {
array[index] = source[index];
}
return array;
}
/**
* Copies properties of `source` to `object`.
*
* @private
* @param {Object} source The object to copy properties from.
* @param {Array} props The property identifiers to copy.
* @param {Object} [object={}] The object to copy properties to.
* @param {Function} [customizer] The function to customize copied values.
* @returns {Object} Returns `object`.
*/
function copyObject(source, props, object, customizer) {
var isNew = !object;
object || (object = {});
var index = -1,
length = props.length;
while (++index < length) {
var key = props[index];
var newValue = customizer
? customizer(object[key], source[key], key, object, source)
: undefined;
if (newValue === undefined) {
newValue = source[key];
}
if (isNew) {
baseAssignValue(object, key, newValue);
} else {
assignValue(object, key, newValue);
}
}
return object;
}
/**
* Copies own symbols of `source` to `object`.
*
* @private
* @param {Object} source The object to copy symbols from.
* @param {Object} [object={}] The object to copy symbols to.
* @returns {Object} Returns `object`.
*/
function copySymbols(source, object) {
return copyObject(source, getSymbols(source), object);
}
/**
* Copies own and inherited symbols of `source` to `object`.
*
* @private
* @param {Object} source The object to copy symbols from.
* @param {Object} [object={}] The object to copy symbols to.
* @returns {Object} Returns `object`.
*/
function copySymbolsIn(source, object) {
return copyObject(source, getSymbolsIn(source), object);
}
/**
* Creates a function like `_.groupBy`.
*
* @private
* @param {Function} setter The function to set accumulator values.
* @param {Function} [initializer] The accumulator object initializer.
* @returns {Function} Returns the new aggregator function.
*/
function createAggregator(setter, initializer) {
return function(collection, iteratee) {
var func = isArray(collection) ? arrayAggregator : baseAggregator,
accumulator = initializer ? initializer() : {};
return func(collection, setter, getIteratee(iteratee, 2), accumulator);
};
}
/**
* Creates a function like `_.assign`.
*
* @private
* @param {Function} assigner The function to assign values.
* @returns {Function} Returns the new assigner function.
*/
function createAssigner(assigner) {
return baseRest(function(object, sources) {
var index = -1,
length = sources.length,
customizer = length > 1 ? sources[length - 1] : undefined,
guard = length > 2 ? sources[2] : undefined;
customizer = (assigner.length > 3 && typeof customizer == 'function')
? (length--, customizer)
: undefined;
if (guard && isIterateeCall(sources[0], sources[1], guard)) {
customizer = length < 3 ? undefined : customizer;
length = 1;
}
object = Object(object);
while (++index < length) {
var source = sources[index];
if (source) {
assigner(object, source, index, customizer);
}
}
return object;
});
}
/**
* Creates a `baseEach` or `baseEachRight` function.
*
* @private
* @param {Function} eachFunc The function to iterate over a collection.
* @param {boolean} [fromRight] Specify iterating from right to left.
* @returns {Function} Returns the new base function.
*/
function createBaseEach(eachFunc, fromRight) {
return function(collection, iteratee) {
if (collection == null) {
return collection;
}
if (!isArrayLike(collection)) {
return eachFunc(collection, iteratee);
}
var length = collection.length,
index = fromRight ? length : -1,
iterable = Object(collection);
while ((fromRight ? index-- : ++index < length)) {
if (iteratee(iterable[index], index, iterable) === false) {
break;
}
}
return collection;
};
}
/**
* Creates a base function for methods like `_.forIn` and `_.forOwn`.
*
* @private
* @param {boolean} [fromRight] Specify iterating from right to left.
* @returns {Function} Returns the new base function.
*/
function createBaseFor(fromRight) {
return function(object, iteratee, keysFunc) {
var index = -1,
iterable = Object(object),
props = keysFunc(object),
length = props.length;
while (length--) {
var key = props[fromRight ? length : ++index];
if (iteratee(iterable[key], key, iterable) === false) {
break;
}
}
return object;
};
}
/**
* Creates a function that wraps `func` to invoke it with the optional `this`
* binding of `thisArg`.
*
* @private
* @param {Function} func The function to wrap.
* @param {number} bitmask The bitmask flags. See `createWrap` for more details.
* @param {*} [thisArg] The `this` binding of `func`.
* @returns {Function} Returns the new wrapped function.
*/
function createBind(func, bitmask, thisArg) {
var isBind = bitmask & WRAP_BIND_FLAG,
Ctor = createCtor(func);
function wrapper() {
var fn = (this && this !== root && this instanceof wrapper) ? Ctor : func;
return fn.apply(isBind ? thisArg : this, arguments);
}
return wrapper;
}
/**
* Creates a function like `_.lowerFirst`.
*
* @private
* @param {string} methodName The name of the `String` case method to use.
* @returns {Function} Returns the new case function.
*/
function createCaseFirst(methodName) {
return function(string) {
string = toString(string);
var strSymbols = hasUnicode(string)
? stringToArray(string)
: undefined;
var chr = strSymbols
? strSymbols[0]
: string.charAt(0);
var trailing = strSymbols
? castSlice(strSymbols, 1).join('')
: string.slice(1);
return chr[methodName]() + trailing;
};
}
/**
* Creates a function like `_.camelCase`.
*
* @private
* @param {Function} callback The function to combine each word.
* @returns {Function} Returns the new compounder function.
*/
function createCompounder(callback) {
return function(string) {
return arrayReduce(words(deburr(string).replace(reApos, '')), callback, '');
};
}
/**
* Creates a function that produces an instance of `Ctor` regardless of
* whether it was invoked as part of a `new` expression or by `call` or `apply`.
*
* @private
* @param {Function} Ctor The constructor to wrap.
* @returns {Function} Returns the new wrapped function.
*/
function createCtor(Ctor) {
return function() {
// Use a `switch` statement to work with class constructors. See
// http://ecma-international.org/ecma-262/7.0/#sec-ecmascript-function-objects-call-thisargument-argumentslist
// for more details.
var args = arguments;
switch (args.length) {
case 0: return new Ctor;
case 1: return new Ctor(args[0]);
case 2: return new Ctor(args[0], args[1]);
case 3: return new Ctor(args[0], args[1], args[2]);
case 4: return new Ctor(args[0], args[1], args[2], args[3]);
case 5: return new Ctor(args[0], args[1], args[2], args[3], args[4]);
case 6: return new Ctor(args[0], args[1], args[2], args[3], args[4], args[5]);
case 7: return new Ctor(args[0], args[1], args[2], args[3], args[4], args[5], args[6]);
}
var thisBinding = baseCreate(Ctor.prototype),
result = Ctor.apply(thisBinding, args);
// Mimic the constructor's `return` behavior.
// See https://es5.github.io/#x13.2.2 for more details.
return isObject(result) ? result : thisBinding;
};
}
/**
* Creates a function that wraps `func` to enable currying.
*
* @private
* @param {Function} func The function to wrap.
* @param {number} bitmask The bitmask flags. See `createWrap` for more details.
* @param {number} arity The arity of `func`.
* @returns {Function} Returns the new wrapped function.
*/
function createCurry(func, bitmask, arity) {
var Ctor = createCtor(func);
function wrapper() {
var length = arguments.length,
args = Array(length),
index = length,
placeholder = getHolder(wrapper);
while (index--) {
args[index] = arguments[index];
}
var holders = (length < 3 && args[0] !== placeholder && args[length - 1] !== placeholder)
? []
: replaceHolders(args, placeholder);
length -= holders.length;
if (length < arity) {
return createRecurry(
func, bitmask, createHybrid, wrapper.placeholder, undefined,
args, holders, undefined, undefined, arity - length);
}
var fn = (this && this !== root && this instanceof wrapper) ? Ctor : func;
return apply(fn, this, args);
}
return wrapper;
}
/**
* Creates a `_.find` or `_.findLast` function.
*
* @private
* @param {Function} findIndexFunc The function to find the collection index.
* @returns {Function} Returns the new find function.
*/
function createFind(findIndexFunc) {
return function(collection, predicate, fromIndex) {
var iterable = Object(collection);
if (!isArrayLike(collection)) {
var iteratee = getIteratee(predicate, 3);
collection = keys(collection);
predicate = function(key) { return iteratee(iterable[key], key, iterable); };
}
var index = findIndexFunc(collection, predicate, fromIndex);
return index > -1 ? iterable[iteratee ? collection[index] : index] : undefined;
};
}
/**
* Creates a `_.flow` or `_.flowRight` function.
*
* @private
* @param {boolean} [fromRight] Specify iterating from right to left.
* @returns {Function} Returns the new flow function.
*/
function createFlow(fromRight) {
return flatRest(function(funcs) {
var length = funcs.length,
index = length,
prereq = LodashWrapper.prototype.thru;
if (fromRight) {
funcs.reverse();
}
while (index--) {
var func = funcs[index];
if (typeof func != 'function') {
throw new TypeError(FUNC_ERROR_TEXT);
}
if (prereq && !wrapper && getFuncName(func) == 'wrapper') {
var wrapper = new LodashWrapper([], true);
}
}
index = wrapper ? index : length;
while (++index < length) {
func = funcs[index];
var funcName = getFuncName(func),
data = funcName == 'wrapper' ? getData(func) : undefined;
if (data && isLaziable(data[0]) &&
data[1] == (WRAP_ARY_FLAG | WRAP_CURRY_FLAG | WRAP_PARTIAL_FLAG | WRAP_REARG_FLAG) &&
!data[4].length && data[9] == 1
) {
wrapper = wrapper[getFuncName(data[0])].apply(wrapper, data[3]);
} else {
wrapper = (func.length == 1 && isLaziable(func))
? wrapper[funcName]()
: wrapper.thru(func);
}
}
return function() {
var args = arguments,
value = args[0];
if (wrapper && args.length == 1 && isArray(value)) {
return wrapper.plant(value).value();
}
var index = 0,
result = length ? funcs[index].apply(this, args) : value;
while (++index < length) {
result = funcs[index].call(this, result);
}
return result;
};
});
}
/**
* Creates a function that wraps `func` to invoke it with optional `this`
* binding of `thisArg`, partial application, and currying.
*
* @private
* @param {Function|string} func The function or method name to wrap.
* @param {number} bitmask The bitmask flags. See `createWrap` for more details.
* @param {*} [thisArg] The `this` binding of `func`.
* @param {Array} [partials] The arguments to prepend to those provided to
* the new function.
* @param {Array} [holders] The `partials` placeholder indexes.
* @param {Array} [partialsRight] The arguments to append to those provided
* to the new function.
* @param {Array} [holdersRight] The `partialsRight` placeholder indexes.
* @param {Array} [argPos] The argument positions of the new function.
* @param {number} [ary] The arity cap of `func`.
* @param {number} [arity] The arity of `func`.
* @returns {Function} Returns the new wrapped function.
*/
function createHybrid(func, bitmask, thisArg, partials, holders, partialsRight, holdersRight, argPos, ary, arity) {
var isAry = bitmask & WRAP_ARY_FLAG,
isBind = bitmask & WRAP_BIND_FLAG,
isBindKey = bitmask & WRAP_BIND_KEY_FLAG,
isCurried = bitmask & (WRAP_CURRY_FLAG | WRAP_CURRY_RIGHT_FLAG),
isFlip = bitmask & WRAP_FLIP_FLAG,
Ctor = isBindKey ? undefined : createCtor(func);
function wrapper() {
var length = arguments.length,
args = Array(length),
index = length;
while (index--) {
args[index] = arguments[index];
}
if (isCurried) {
var placeholder = getHolder(wrapper),
holdersCount = countHolders(args, placeholder);
}
if (partials) {
args = composeArgs(args, partials, holders, isCurried);
}
if (partialsRight) {
args = composeArgsRight(args, partialsRight, holdersRight, isCurried);
}
length -= holdersCount;
if (isCurried && length < arity) {
var newHolders = replaceHolders(args, placeholder);
return createRecurry(
func, bitmask, createHybrid, wrapper.placeholder, thisArg,
args, newHolders, argPos, ary, arity - length
);
}
var thisBinding = isBind ? thisArg : this,
fn = isBindKey ? thisBinding[func] : func;
length = args.length;
if (argPos) {
args = reorder(args, argPos);
} else if (isFlip && length > 1) {
args.reverse();
}
if (isAry && ary < length) {
args.length = ary;
}
if (this && this !== root && this instanceof wrapper) {
fn = Ctor || createCtor(fn);
}
return fn.apply(thisBinding, args);
}
return wrapper;
}
/**
* Creates a function like `_.invertBy`.
*
* @private
* @param {Function} setter The function to set accumulator values.
* @param {Function} toIteratee The function to resolve iteratees.
* @returns {Function} Returns the new inverter function.
*/
function createInverter(setter, toIteratee) {
return function(object, iteratee) {
return baseInverter(object, setter, toIteratee(iteratee), {});
};
}
/**
* Creates a function that performs a mathematical operation on two values.
*
* @private
* @param {Function} operator The function to perform the operation.
* @param {number} [defaultValue] The value used for `undefined` arguments.
* @returns {Function} Returns the new mathematical operation function.
*/
function createMathOperation(operator, defaultValue) {
return function(value, other) {
var result;
if (value === undefined && other === undefined) {
return defaultValue;
}
if (value !== undefined) {
result = value;
}
if (other !== undefined) {
if (result === undefined) {
return other;
}
if (typeof value == 'string' || typeof other == 'string') {
value = baseToString(value);
other = baseToString(other);
} else {
value = baseToNumber(value);
other = baseToNumber(other);
}
result = operator(value, other);
}
return result;
};
}
/**
* Creates a function like `_.over`.
*
* @private
* @param {Function} arrayFunc The function to iterate over iteratees.
* @returns {Function} Returns the new over function.
*/
function createOver(arrayFunc) {
return flatRest(function(iteratees) {
iteratees = arrayMap(iteratees, baseUnary(getIteratee()));
return baseRest(function(args) {
var thisArg = this;
return arrayFunc(iteratees, function(iteratee) {
return apply(iteratee, thisArg, args);
});
});
});
}
/**
* Creates the padding for `string` based on `length`. The `chars` string
* is truncated if the number of characters exceeds `length`.
*
* @private
* @param {number} length The padding length.
* @param {string} [chars=' '] The string used as padding.
* @returns {string} Returns the padding for `string`.
*/
function createPadding(length, chars) {
chars = chars === undefined ? ' ' : baseToString(chars);
var charsLength = chars.length;
if (charsLength < 2) {
return charsLength ? baseRepeat(chars, length) : chars;
}
var result = baseRepeat(chars, nativeCeil(length / stringSize(chars)));
return hasUnicode(chars)
? castSlice(stringToArray(result), 0, length).join('')
: result.slice(0, length);
}
/**
* Creates a function that wraps `func` to invoke it with the `this` binding
* of `thisArg` and `partials` prepended to the arguments it receives.
*
* @private
* @param {Function} func The function to wrap.
* @param {number} bitmask The bitmask flags. See `createWrap` for more details.
* @param {*} thisArg The `this` binding of `func`.
* @param {Array} partials The arguments to prepend to those provided to
* the new function.
* @returns {Function} Returns the new wrapped function.
*/
function createPartial(func, bitmask, thisArg, partials) {
var isBind = bitmask & WRAP_BIND_FLAG,
Ctor = createCtor(func);
function wrapper() {
var argsIndex = -1,
argsLength = arguments.length,
leftIndex = -1,
leftLength = partials.length,
args = Array(leftLength + argsLength),
fn = (this && this !== root && this instanceof wrapper) ? Ctor : func;
while (++leftIndex < leftLength) {
args[leftIndex] = partials[leftIndex];
}
while (argsLength--) {
args[leftIndex++] = arguments[++argsIndex];
}
return apply(fn, isBind ? thisArg : this, args);
}
return wrapper;
}
/**
* Creates a `_.range` or `_.rangeRight` function.
*
* @private
* @param {boolean} [fromRight] Specify iterating from right to left.
* @returns {Function} Returns the new range function.
*/
function createRange(fromRight) {
return function(start, end, step) {
if (step && typeof step != 'number' && isIterateeCall(start, end, step)) {
end = step = undefined;
}
// Ensure the sign of `-0` is preserved.
start = toFinite(start);
if (end === undefined) {
end = start;
start = 0;
} else {
end = toFinite(end);
}
step = step === undefined ? (start < end ? 1 : -1) : toFinite(step);
return baseRange(start, end, step, fromRight);
};
}
/**
* Creates a function that performs a relational operation on two values.
*
* @private
* @param {Function} operator The function to perform the operation.
* @returns {Function} Returns the new relational operation function.
*/
function createRelationalOperation(operator) {
return function(value, other) {
if (!(typeof value == 'string' && typeof other == 'string')) {
value = toNumber(value);
other = toNumber(other);
}
return operator(value, other);
};
}
/**
* Creates a function that wraps `func` to continue currying.
*
* @private
* @param {Function} func The function to wrap.
* @param {number} bitmask The bitmask flags. See `createWrap` for more details.
* @param {Function} wrapFunc The function to create the `func` wrapper.
* @param {*} placeholder The placeholder value.
* @param {*} [thisArg] The `this` binding of `func`.
* @param {Array} [partials] The arguments to prepend to those provided to
* the new function.
* @param {Array} [holders] The `partials` placeholder indexes.
* @param {Array} [argPos] The argument positions of the new function.
* @param {number} [ary] The arity cap of `func`.
* @param {number} [arity] The arity of `func`.
* @returns {Function} Returns the new wrapped function.
*/
function createRecurry(func, bitmask, wrapFunc, placeholder, thisArg, partials, holders, argPos, ary, arity) {
var isCurry = bitmask & WRAP_CURRY_FLAG,
newHolders = isCurry ? holders : undefined,
newHoldersRight = isCurry ? undefined : holders,
newPartials = isCurry ? partials : undefined,
newPartialsRight = isCurry ? undefined : partials;
bitmask |= (isCurry ? WRAP_PARTIAL_FLAG : WRAP_PARTIAL_RIGHT_FLAG);
bitmask &= ~(isCurry ? WRAP_PARTIAL_RIGHT_FLAG : WRAP_PARTIAL_FLAG);
if (!(bitmask & WRAP_CURRY_BOUND_FLAG)) {
bitmask &= ~(WRAP_BIND_FLAG | WRAP_BIND_KEY_FLAG);
}
var newData = [
func, bitmask, thisArg, newPartials, newHolders, newPartialsRight,
newHoldersRight, argPos, ary, arity
];
var result = wrapFunc.apply(undefined, newData);
if (isLaziable(func)) {
setData(result, newData);
}
result.placeholder = placeholder;
return setWrapToString(result, func, bitmask);
}
/**
* Creates a function like `_.round`.
*
* @private
* @param {string} methodName The name of the `Math` method to use when rounding.
* @returns {Function} Returns the new round function.
*/
function createRound(methodName) {
var func = Math[methodName];
return function(number, precision) {
number = toNumber(number);
precision = precision == null ? 0 : nativeMin(toInteger(precision), 292);
if (precision && nativeIsFinite(number)) {
// Shift with exponential notation to avoid floating-point issues.
// See [MDN](https://mdn.io/round#Examples) for more details.
var pair = (toString(number) + 'e').split('e'),
value = func(pair[0] + 'e' + (+pair[1] + precision));
pair = (toString(value) + 'e').split('e');
return +(pair[0] + 'e' + (+pair[1] - precision));
}
return func(number);
};
}
/**
* Creates a set object of `values`.
*
* @private
* @param {Array} values The values to add to the set.
* @returns {Object} Returns the new set.
*/
var createSet = !(Set && (1 / setToArray(new Set([,-0]))[1]) == INFINITY) ? noop : function(values) {
return new Set(values);
};
/**
* Creates a `_.toPairs` or `_.toPairsIn` function.
*
* @private
* @param {Function} keysFunc The function to get the keys of a given object.
* @returns {Function} Returns the new pairs function.
*/
function createToPairs(keysFunc) {
return function(object) {
var tag = getTag(object);
if (tag == mapTag) {
return mapToArray(object);
}
if (tag == setTag) {
return setToPairs(object);
}
return baseToPairs(object, keysFunc(object));
};
}
/**
* Creates a function that either curries or invokes `func` with optional
* `this` binding and partially applied arguments.
*
* @private
* @param {Function|string} func The function or method name to wrap.
* @param {number} bitmask The bitmask flags.
* 1 - `_.bind`
* 2 - `_.bindKey`
* 4 - `_.curry` or `_.curryRight` of a bound function
* 8 - `_.curry`
* 16 - `_.curryRight`
* 32 - `_.partial`
* 64 - `_.partialRight`
* 128 - `_.rearg`
* 256 - `_.ary`
* 512 - `_.flip`
* @param {*} [thisArg] The `this` binding of `func`.
* @param {Array} [partials] The arguments to be partially applied.
* @param {Array} [holders] The `partials` placeholder indexes.
* @param {Array} [argPos] The argument positions of the new function.
* @param {number} [ary] The arity cap of `func`.
* @param {number} [arity] The arity of `func`.
* @returns {Function} Returns the new wrapped function.
*/
function createWrap(func, bitmask, thisArg, partials, holders, argPos, ary, arity) {
var isBindKey = bitmask & WRAP_BIND_KEY_FLAG;
if (!isBindKey && typeof func != 'function') {
throw new TypeError(FUNC_ERROR_TEXT);
}
var length = partials ? partials.length : 0;
if (!length) {
bitmask &= ~(WRAP_PARTIAL_FLAG | WRAP_PARTIAL_RIGHT_FLAG);
partials = holders = undefined;
}
ary = ary === undefined ? ary : nativeMax(toInteger(ary), 0);
arity = arity === undefined ? arity : toInteger(arity);
length -= holders ? holders.length : 0;
if (bitmask & WRAP_PARTIAL_RIGHT_FLAG) {
var partialsRight = partials,
holdersRight = holders;
partials = holders = undefined;
}
var data = isBindKey ? undefined : getData(func);
var newData = [
func, bitmask, thisArg, partials, holders, partialsRight, holdersRight,
argPos, ary, arity
];
if (data) {
mergeData(newData, data);
}
func = newData[0];
bitmask = newData[1];
thisArg = newData[2];
partials = newData[3];
holders = newData[4];
arity = newData[9] = newData[9] === undefined
? (isBindKey ? 0 : func.length)
: nativeMax(newData[9] - length, 0);
if (!arity && bitmask & (WRAP_CURRY_FLAG | WRAP_CURRY_RIGHT_FLAG)) {
bitmask &= ~(WRAP_CURRY_FLAG | WRAP_CURRY_RIGHT_FLAG);
}
if (!bitmask || bitmask == WRAP_BIND_FLAG) {
var result = createBind(func, bitmask, thisArg);
} else if (bitmask == WRAP_CURRY_FLAG || bitmask == WRAP_CURRY_RIGHT_FLAG) {
result = createCurry(func, bitmask, arity);
} else if ((bitmask == WRAP_PARTIAL_FLAG || bitmask == (WRAP_BIND_FLAG | WRAP_PARTIAL_FLAG)) && !holders.length) {
result = createPartial(func, bitmask, thisArg, partials);
} else {
result = createHybrid.apply(undefined, newData);
}
var setter = data ? baseSetData : setData;
return setWrapToString(setter(result, newData), func, bitmask);
}
/**
* Used by `_.defaults` to customize its `_.assignIn` use to assign properties
* of source objects to the destination object for all destination properties
* that resolve to `undefined`.
*
* @private
* @param {*} objValue The destination value.
* @param {*} srcValue The source value.
* @param {string} key The key of the property to assign.
* @param {Object} object The parent object of `objValue`.
* @returns {*} Returns the value to assign.
*/
function customDefaultsAssignIn(objValue, srcValue, key, object) {
if (objValue === undefined ||
(eq(objValue, objectProto[key]) && !hasOwnProperty.call(object, key))) {
return srcValue;
}
return objValue;
}
/**
* Used by `_.defaultsDeep` to customize its `_.merge` use to merge source
* objects into destination objects that are passed thru.
*
* @private
* @param {*} objValue The destination value.
* @param {*} srcValue The source value.
* @param {string} key The key of the property to merge.
* @param {Object} object The parent object of `objValue`.
* @param {Object} source The parent object of `srcValue`.
* @param {Object} [stack] Tracks traversed source values and their merged
* counterparts.
* @returns {*} Returns the value to assign.
*/
function customDefaultsMerge(objValue, srcValue, key, object, source, stack) {
if (isObject(objValue) && isObject(srcValue)) {
// Recursively merge objects and arrays (susceptible to call stack limits).
stack.set(srcValue, objValue);
baseMerge(objValue, srcValue, undefined, customDefaultsMerge, stack);
stack['delete'](srcValue);
}
return objValue;
}
/**
* Used by `_.omit` to customize its `_.cloneDeep` use to only clone plain
* objects.
*
* @private
* @param {*} value The value to inspect.
* @param {string} key The key of the property to inspect.
* @returns {*} Returns the uncloned value or `undefined` to defer cloning to `_.cloneDeep`.
*/
function customOmitClone(value) {
return isPlainObject(value) ? undefined : value;
}
/**
* A specialized version of `baseIsEqualDeep` for arrays with support for
* partial deep comparisons.
*
* @private
* @param {Array} array The array to compare.
* @param {Array} other The other array to compare.
* @param {number} bitmask The bitmask flags. See `baseIsEqual` for more details.
* @param {Function} customizer The function to customize comparisons.
* @param {Function} equalFunc The function to determine equivalents of values.
* @param {Object} stack Tracks traversed `array` and `other` objects.
* @returns {boolean} Returns `true` if the arrays are equivalent, else `false`.
*/
function equalArrays(array, other, bitmask, customizer, equalFunc, stack) {
var isPartial = bitmask & COMPARE_PARTIAL_FLAG,
arrLength = array.length,
othLength = other.length;
if (arrLength != othLength && !(isPartial && othLength > arrLength)) {
return false;
}
// Check that cyclic values are equal.
var arrStacked = stack.get(array);
var othStacked = stack.get(other);
if (arrStacked && othStacked) {
return arrStacked == other && othStacked == array;
}
var index = -1,
result = true,
seen = (bitmask & COMPARE_UNORDERED_FLAG) ? new SetCache : undefined;
stack.set(array, other);
stack.set(other, array);
// Ignore non-index properties.
while (++index < arrLength) {
var arrValue = array[index],
othValue = other[index];
if (customizer) {
var compared = isPartial
? customizer(othValue, arrValue, index, other, array, stack)
: customizer(arrValue, othValue, index, array, other, stack);
}
if (compared !== undefined) {
if (compared) {
continue;
}
result = false;
break;
}
// Recursively compare arrays (susceptible to call stack limits).
if (seen) {
if (!arraySome(other, function(othValue, othIndex) {
if (!cacheHas(seen, othIndex) &&
(arrValue === othValue || equalFunc(arrValue, othValue, bitmask, customizer, stack))) {
return seen.push(othIndex);
}
})) {
result = false;
break;
}
} else if (!(
arrValue === othValue ||
equalFunc(arrValue, othValue, bitmask, customizer, stack)
)) {
result = false;
break;
}
}
stack['delete'](array);
stack['delete'](other);
return result;
}
/**
* A specialized version of `baseIsEqualDeep` for comparing objects of
* the same `toStringTag`.
*
* **Note:** This function only supports comparing values with tags of
* `Boolean`, `Date`, `Error`, `Number`, `RegExp`, or `String`.
*
* @private
* @param {Object} object The object to compare.
* @param {Object} other The other object to compare.
* @param {string} tag The `toStringTag` of the objects to compare.
* @param {number} bitmask The bitmask flags. See `baseIsEqual` for more details.
* @param {Function} customizer The function to customize comparisons.
* @param {Function} equalFunc The function to determine equivalents of values.
* @param {Object} stack Tracks traversed `object` and `other` objects.
* @returns {boolean} Returns `true` if the objects are equivalent, else `false`.
*/
function equalByTag(object, other, tag, bitmask, customizer, equalFunc, stack) {
switch (tag) {
case dataViewTag:
if ((object.byteLength != other.byteLength) ||
(object.byteOffset != other.byteOffset)) {
return false;
}
object = object.buffer;
other = other.buffer;
case arrayBufferTag:
if ((object.byteLength != other.byteLength) ||
!equalFunc(new Uint8Array(object), new Uint8Array(other))) {
return false;
}
return true;
case boolTag:
case dateTag:
case numberTag:
// Coerce booleans to `1` or `0` and dates to milliseconds.
// Invalid dates are coerced to `NaN`.
return eq(+object, +other);
case errorTag:
return object.name == other.name && object.message == other.message;
case regexpTag:
case stringTag:
// Coerce regexes to strings and treat strings, primitives and objects,
// as equal. See http://www.ecma-international.org/ecma-262/7.0/#sec-regexp.prototype.tostring
// for more details.
return object == (other + '');
case mapTag:
var convert = mapToArray;
case setTag:
var isPartial = bitmask & COMPARE_PARTIAL_FLAG;
convert || (convert = setToArray);
if (object.size != other.size && !isPartial) {
return false;
}
// Assume cyclic values are equal.
var stacked = stack.get(object);
if (stacked) {
return stacked == other;
}
bitmask |= COMPARE_UNORDERED_FLAG;
// Recursively compare objects (susceptible to call stack limits).
stack.set(object, other);
var result = equalArrays(convert(object), convert(other), bitmask, customizer, equalFunc, stack);
stack['delete'](object);
return result;
case symbolTag:
if (symbolValueOf) {
return symbolValueOf.call(object) == symbolValueOf.call(other);
}
}
return false;
}
/**
* A specialized version of `baseIsEqualDeep` for objects with support for
* partial deep comparisons.
*
* @private
* @param {Object} object The object to compare.
* @param {Object} other The other object to compare.
* @param {number} bitmask The bitmask flags. See `baseIsEqual` for more details.
* @param {Function} customizer The function to customize comparisons.
* @param {Function} equalFunc The function to determine equivalents of values.
* @param {Object} stack Tracks traversed `object` and `other` objects.
* @returns {boolean} Returns `true` if the objects are equivalent, else `false`.
*/
function equalObjects(object, other, bitmask, customizer, equalFunc, stack) {
var isPartial = bitmask & COMPARE_PARTIAL_FLAG,
objProps = getAllKeys(object),
objLength = objProps.length,
othProps = getAllKeys(other),
othLength = othProps.length;
if (objLength != othLength && !isPartial) {
return false;
}
var index = objLength;
while (index--) {
var key = objProps[index];
if (!(isPartial ? key in other : hasOwnProperty.call(other, key))) {
return false;
}
}
// Check that cyclic values are equal.
var objStacked = stack.get(object);
var othStacked = stack.get(other);
if (objStacked && othStacked) {
return objStacked == other && othStacked == object;
}
var result = true;
stack.set(object, other);
stack.set(other, object);
var skipCtor = isPartial;
while (++index < objLength) {
key = objProps[index];
var objValue = object[key],
othValue = other[key];
if (customizer) {
var compared = isPartial
? customizer(othValue, objValue, key, other, object, stack)
: customizer(objValue, othValue, key, object, other, stack);
}
// Recursively compare objects (susceptible to call stack limits).
if (!(compared === undefined
? (objValue === othValue || equalFunc(objValue, othValue, bitmask, customizer, stack))
: compared
)) {
result = false;
break;
}
skipCtor || (skipCtor = key == 'constructor');
}
if (result && !skipCtor) {
var objCtor = object.constructor,
othCtor = other.constructor;
// Non `Object` object instances with different constructors are not equal.
if (objCtor != othCtor &&
('constructor' in object && 'constructor' in other) &&
!(typeof objCtor == 'function' && objCtor instanceof objCtor &&
typeof othCtor == 'function' && othCtor instanceof othCtor)) {
result = false;
}
}
stack['delete'](object);
stack['delete'](other);
return result;
}
/**
* A specialized version of `baseRest` which flattens the rest array.
*
* @private
* @param {Function} func The function to apply a rest parameter to.
* @returns {Function} Returns the new function.
*/
function flatRest(func) {
return setToString(overRest(func, undefined, flatten), func + '');
}
/**
* Creates an array of own enumerable property names and symbols of `object`.
*
* @private
* @param {Object} object The object to query.
* @returns {Array} Returns the array of property names and symbols.
*/
function getAllKeys(object) {
return baseGetAllKeys(object, keys, getSymbols);
}
/**
* Creates an array of own and inherited enumerable property names and
* symbols of `object`.
*
* @private
* @param {Object} object The object to query.
* @returns {Array} Returns the array of property names and symbols.
*/
function getAllKeysIn(object) {
return baseGetAllKeys(object, keysIn, getSymbolsIn);
}
/**
* Gets metadata for `func`.
*
* @private
* @param {Function} func The function to query.
* @returns {*} Returns the metadata for `func`.
*/
var getData = !metaMap ? noop : function(func) {
return metaMap.get(func);
};
/**
* Gets the name of `func`.
*
* @private
* @param {Function} func The function to query.
* @returns {string} Returns the function name.
*/
function getFuncName(func) {
var result = (func.name + ''),
array = realNames[result],
length = hasOwnProperty.call(realNames, result) ? array.length : 0;
while (length--) {
var data = array[length],
otherFunc = data.func;
if (otherFunc == null || otherFunc == func) {
return data.name;
}
}
return result;
}
/**
* Gets the argument placeholder value for `func`.
*
* @private
* @param {Function} func The function to inspect.
* @returns {*} Returns the placeholder value.
*/
function getHolder(func) {
var object = hasOwnProperty.call(lodash, 'placeholder') ? lodash : func;
return object.placeholder;
}
/**
* Gets the appropriate "iteratee" function. If `_.iteratee` is customized,
* this function returns the custom method, otherwise it returns `baseIteratee`.
* If arguments are provided, the chosen function is invoked with them and
* its result is returned.
*
* @private
* @param {*} [value] The value to convert to an iteratee.
* @param {number} [arity] The arity of the created iteratee.
* @returns {Function} Returns the chosen function or its result.
*/
function getIteratee() {
var result = lodash.iteratee || iteratee;
result = result === iteratee ? baseIteratee : result;
return arguments.length ? result(arguments[0], arguments[1]) : result;
}
/**
* Gets the data for `map`.
*
* @private
* @param {Object} map The map to query.
* @param {string} key The reference key.
* @returns {*} Returns the map data.
*/
function getMapData(map, key) {
var data = map.__data__;
return isKeyable(key)
? data[typeof key == 'string' ? 'string' : 'hash']
: data.map;
}
/**
* Gets the property names, values, and compare flags of `object`.
*
* @private
* @param {Object} object The object to query.
* @returns {Array} Returns the match data of `object`.
*/
function getMatchData(object) {
var result = keys(object),
length = result.length;
while (length--) {
var key = result[length],
value = object[key];
result[length] = [key, value, isStrictComparable(value)];
}
return result;
}
/**
* Gets the native function at `key` of `object`.
*
* @private
* @param {Object} object The object to query.
* @param {string} key The key of the method to get.
* @returns {*} Returns the function if it's native, else `undefined`.
*/
function getNative(object, key) {
var value = getValue(object, key);
return baseIsNative(value) ? value : undefined;
}
/**
* A specialized version of `baseGetTag` which ignores `Symbol.toStringTag` values.
*
* @private
* @param {*} value The value to query.
* @returns {string} Returns the raw `toStringTag`.
*/
function getRawTag(value) {
var isOwn = hasOwnProperty.call(value, symToStringTag),
tag = value[symToStringTag];
try {
value[symToStringTag] = undefined;
var unmasked = true;
} catch (e) {}
var result = nativeObjectToString.call(value);
if (unmasked) {
if (isOwn) {
value[symToStringTag] = tag;
} else {
delete value[symToStringTag];
}
}
return result;
}
/**
* Creates an array of the own enumerable symbols of `object`.
*
* @private
* @param {Object} object The object to query.
* @returns {Array} Returns the array of symbols.
*/
var getSymbols = !nativeGetSymbols ? stubArray : function(object) {
if (object == null) {
return [];
}
object = Object(object);
return arrayFilter(nativeGetSymbols(object), function(symbol) {
return propertyIsEnumerable.call(object, symbol);
});
};
/**
* Creates an array of the own and inherited enumerable symbols of `object`.
*
* @private
* @param {Object} object The object to query.
* @returns {Array} Returns the array of symbols.
*/
var getSymbolsIn = !nativeGetSymbols ? stubArray : function(object) {
var result = [];
while (object) {
arrayPush(result, getSymbols(object));
object = getPrototype(object);
}
return result;
};
/**
* Gets the `toStringTag` of `value`.
*
* @private
* @param {*} value The value to query.
* @returns {string} Returns the `toStringTag`.
*/
var getTag = baseGetTag;
// Fallback for data views, maps, sets, and weak maps in IE 11 and promises in Node.js < 6.
if ((DataView && getTag(new DataView(new ArrayBuffer(1))) != dataViewTag) ||
(Map && getTag(new Map) != mapTag) ||
(Promise && getTag(Promise.resolve()) != promiseTag) ||
(Set && getTag(new Set) != setTag) ||
(WeakMap && getTag(new WeakMap) != weakMapTag)) {
getTag = function(value) {
var result = baseGetTag(value),
Ctor = result == objectTag ? value.constructor : undefined,
ctorString = Ctor ? toSource(Ctor) : '';
if (ctorString) {
switch (ctorString) {
case dataViewCtorString: return dataViewTag;
case mapCtorString: return mapTag;
case promiseCtorString: return promiseTag;
case setCtorString: return setTag;
case weakMapCtorString: return weakMapTag;
}
}
return result;
};
}
/**
* Gets the view, applying any `transforms` to the `start` and `end` positions.
*
* @private
* @param {number} start The start of the view.
* @param {number} end The end of the view.
* @param {Array} transforms The transformations to apply to the view.
* @returns {Object} Returns an object containing the `start` and `end`
* positions of the view.
*/
function getView(start, end, transforms) {
var index = -1,
length = transforms.length;
while (++index < length) {
var data = transforms[index],
size = data.size;
switch (data.type) {
case 'drop': start += size; break;
case 'dropRight': end -= size; break;
case 'take': end = nativeMin(end, start + size); break;
case 'takeRight': start = nativeMax(start, end - size); break;
}
}
return { 'start': start, 'end': end };
}
/**
* Extracts wrapper details from the `source` body comment.
*
* @private
* @param {string} source The source to inspect.
* @returns {Array} Returns the wrapper details.
*/
function getWrapDetails(source) {
var match = source.match(reWrapDetails);
return match ? match[1].split(reSplitDetails) : [];
}
/**
* Checks if `path` exists on `object`.
*
* @private
* @param {Object} object The object to query.
* @param {Array|string} path The path to check.
* @param {Function} hasFunc The function to check properties.
* @returns {boolean} Returns `true` if `path` exists, else `false`.
*/
function hasPath(object, path, hasFunc) {
path = castPath(path, object);
var index = -1,
length = path.length,
result = false;
while (++index < length) {
var key = toKey(path[index]);
if (!(result = object != null && hasFunc(object, key))) {
break;
}
object = object[key];
}
if (result || ++index != length) {
return result;
}
length = object == null ? 0 : object.length;
return !!length && isLength(length) && isIndex(key, length) &&
(isArray(object) || isArguments(object));
}
/**
* Initializes an array clone.
*
* @private
* @param {Array} array The array to clone.
* @returns {Array} Returns the initialized clone.
*/
function initCloneArray(array) {
var length = array.length,
result = new array.constructor(length);
// Add properties assigned by `RegExp#exec`.
if (length && typeof array[0] == 'string' && hasOwnProperty.call(array, 'index')) {
result.index = array.index;
result.input = array.input;
}
return result;
}
/**
* Initializes an object clone.
*
* @private
* @param {Object} object The object to clone.
* @returns {Object} Returns the initialized clone.
*/
function initCloneObject(object) {
return (typeof object.constructor == 'function' && !isPrototype(object))
? baseCreate(getPrototype(object))
: {};
}
/**
* Initializes an object clone based on its `toStringTag`.
*
* **Note:** This function only supports cloning values with tags of
* `Boolean`, `Date`, `Error`, `Map`, `Number`, `RegExp`, `Set`, or `String`.
*
* @private
* @param {Object} object The object to clone.
* @param {string} tag The `toStringTag` of the object to clone.
* @param {boolean} [isDeep] Specify a deep clone.
* @returns {Object} Returns the initialized clone.
*/
function initCloneByTag(object, tag, isDeep) {
var Ctor = object.constructor;
switch (tag) {
case arrayBufferTag:
return cloneArrayBuffer(object);
case boolTag:
case dateTag:
return new Ctor(+object);
case dataViewTag:
return cloneDataView(object, isDeep);
case float32Tag: case float64Tag:
case int8Tag: case int16Tag: case int32Tag:
case uint8Tag: case uint8ClampedTag: case uint16Tag: case uint32Tag:
return cloneTypedArray(object, isDeep);
case mapTag:
return new Ctor;
case numberTag:
case stringTag:
return new Ctor(object);
case regexpTag:
return cloneRegExp(object);
case setTag:
return new Ctor;
case symbolTag:
return cloneSymbol(object);
}
}
/**
* Inserts wrapper `details` in a comment at the top of the `source` body.
*
* @private
* @param {string} source The source to modify.
* @returns {Array} details The details to insert.
* @returns {string} Returns the modified source.
*/
function insertWrapDetails(source, details) {
var length = details.length;
if (!length) {
return source;
}
var lastIndex = length - 1;
details[lastIndex] = (length > 1 ? '& ' : '') + details[lastIndex];
details = details.join(length > 2 ? ', ' : ' ');
return source.replace(reWrapComment, '{\n/* [wrapped with ' + details + '] */\n');
}
/**
* Checks if `value` is a flattenable `arguments` object or array.
*
* @private
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is flattenable, else `false`.
*/
function isFlattenable(value) {
return isArray(value) || isArguments(value) ||
!!(spreadableSymbol && value && value[spreadableSymbol]);
}
/**
* Checks if `value` is a valid array-like index.
*
* @private
* @param {*} value The value to check.
* @param {number} [length=MAX_SAFE_INTEGER] The upper bounds of a valid index.
* @returns {boolean} Returns `true` if `value` is a valid index, else `false`.
*/
function isIndex(value, length) {
var type = typeof value;
length = length == null ? MAX_SAFE_INTEGER : length;
return !!length &&
(type == 'number' ||
(type != 'symbol' && reIsUint.test(value))) &&
(value > -1 && value % 1 == 0 && value < length);
}
/**
* Checks if the given arguments are from an iteratee call.
*
* @private
* @param {*} value The potential iteratee value argument.
* @param {*} index The potential iteratee index or key argument.
* @param {*} object The potential iteratee object argument.
* @returns {boolean} Returns `true` if the arguments are from an iteratee call,
* else `false`.
*/
function isIterateeCall(value, index, object) {
if (!isObject(object)) {
return false;
}
var type = typeof index;
if (type == 'number'
? (isArrayLike(object) && isIndex(index, object.length))
: (type == 'string' && index in object)
) {
return eq(object[index], value);
}
return false;
}
/**
* Checks if `value` is a property name and not a property path.
*
* @private
* @param {*} value The value to check.
* @param {Object} [object] The object to query keys on.
* @returns {boolean} Returns `true` if `value` is a property name, else `false`.
*/
function isKey(value, object) {
if (isArray(value)) {
return false;
}
var type = typeof value;
if (type == 'number' || type == 'symbol' || type == 'boolean' ||
value == null || isSymbol(value)) {
return true;
}
return reIsPlainProp.test(value) || !reIsDeepProp.test(value) ||
(object != null && value in Object(object));
}
/**
* Checks if `value` is suitable for use as unique object key.
*
* @private
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is suitable, else `false`.
*/
function isKeyable(value) {
var type = typeof value;
return (type == 'string' || type == 'number' || type == 'symbol' || type == 'boolean')
? (value !== '__proto__')
: (value === null);
}
/**
* Checks if `func` has a lazy counterpart.
*
* @private
* @param {Function} func The function to check.
* @returns {boolean} Returns `true` if `func` has a lazy counterpart,
* else `false`.
*/
function isLaziable(func) {
var funcName = getFuncName(func),
other = lodash[funcName];
if (typeof other != 'function' || !(funcName in LazyWrapper.prototype)) {
return false;
}
if (func === other) {
return true;
}
var data = getData(other);
return !!data && func === data[0];
}
/**
* Checks if `func` has its source masked.
*
* @private
* @param {Function} func The function to check.
* @returns {boolean} Returns `true` if `func` is masked, else `false`.
*/
function isMasked(func) {
return !!maskSrcKey && (maskSrcKey in func);
}
/**
* Checks if `func` is capable of being masked.
*
* @private
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `func` is maskable, else `false`.
*/
var isMaskable = coreJsData ? isFunction : stubFalse;
/**
* Checks if `value` is likely a prototype object.
*
* @private
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a prototype, else `false`.
*/
function isPrototype(value) {
var Ctor = value && value.constructor,
proto = (typeof Ctor == 'function' && Ctor.prototype) || objectProto;
return value === proto;
}
/**
* Checks if `value` is suitable for strict equality comparisons, i.e. `===`.
*
* @private
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` if suitable for strict
* equality comparisons, else `false`.
*/
function isStrictComparable(value) {
return value === value && !isObject(value);
}
/**
* A specialized version of `matchesProperty` for source values suitable
* for strict equality comparisons, i.e. `===`.
*
* @private
* @param {string} key The key of the property to get.
* @param {*} srcValue The value to match.
* @returns {Function} Returns the new spec function.
*/
function matchesStrictComparable(key, srcValue) {
return function(object) {
if (object == null) {
return false;
}
return object[key] === srcValue &&
(srcValue !== undefined || (key in Object(object)));
};
}
/**
* A specialized version of `_.memoize` which clears the memoized function's
* cache when it exceeds `MAX_MEMOIZE_SIZE`.
*
* @private
* @param {Function} func The function to have its output memoized.
* @returns {Function} Returns the new memoized function.
*/
function memoizeCapped(func) {
var result = memoize(func, function(key) {
if (cache.size === MAX_MEMOIZE_SIZE) {
cache.clear();
}
return key;
});
var cache = result.cache;
return result;
}
/**
* Merges the function metadata of `source` into `data`.
*
* Merging metadata reduces the number of wrappers used to invoke a function.
* This is possible because methods like `_.bind`, `_.curry`, and `_.partial`
* may be applied regardless of execution order. Methods like `_.ary` and
* `_.rearg` modify function arguments, making the order in which they are
* executed important, preventing the merging of metadata. However, we make
* an exception for a safe combined case where curried functions have `_.ary`
* and or `_.rearg` applied.
*
* @private
* @param {Array} data The destination metadata.
* @param {Array} source The source metadata.
* @returns {Array} Returns `data`.
*/
function mergeData(data, source) {
var bitmask = data[1],
srcBitmask = source[1],
newBitmask = bitmask | srcBitmask,
isCommon = newBitmask < (WRAP_BIND_FLAG | WRAP_BIND_KEY_FLAG | WRAP_ARY_FLAG);
var isCombo =
((srcBitmask == WRAP_ARY_FLAG) && (bitmask == WRAP_CURRY_FLAG)) ||
((srcBitmask == WRAP_ARY_FLAG) && (bitmask == WRAP_REARG_FLAG) && (data[7].length <= source[8])) ||
((srcBitmask == (WRAP_ARY_FLAG | WRAP_REARG_FLAG)) && (source[7].length <= source[8]) && (bitmask == WRAP_CURRY_FLAG));
// Exit early if metadata can't be merged.
if (!(isCommon || isCombo)) {
return data;
}
// Use source `thisArg` if available.
if (srcBitmask & WRAP_BIND_FLAG) {
data[2] = source[2];
// Set when currying a bound function.
newBitmask |= bitmask & WRAP_BIND_FLAG ? 0 : WRAP_CURRY_BOUND_FLAG;
}
// Compose partial arguments.
var value = source[3];
if (value) {
var partials = data[3];
data[3] = partials ? composeArgs(partials, value, source[4]) : value;
data[4] = partials ? replaceHolders(data[3], PLACEHOLDER) : source[4];
}
// Compose partial right arguments.
value = source[5];
if (value) {
partials = data[5];
data[5] = partials ? composeArgsRight(partials, value, source[6]) : value;
data[6] = partials ? replaceHolders(data[5], PLACEHOLDER) : source[6];
}
// Use source `argPos` if available.
value = source[7];
if (value) {
data[7] = value;
}
// Use source `ary` if it's smaller.
if (srcBitmask & WRAP_ARY_FLAG) {
data[8] = data[8] == null ? source[8] : nativeMin(data[8], source[8]);
}
// Use source `arity` if one is not provided.
if (data[9] == null) {
data[9] = source[9];
}
// Use source `func` and merge bitmasks.
data[0] = source[0];
data[1] = newBitmask;
return data;
}
/**
* This function is like
* [`Object.keys`](http://ecma-international.org/ecma-262/7.0/#sec-object.keys)
* except that it includes inherited enumerable properties.
*
* @private
* @param {Object} object The object to query.
* @returns {Array} Returns the array of property names.
*/
function nativeKeysIn(object) {
var result = [];
if (object != null) {
for (var key in Object(object)) {
result.push(key);
}
}
return result;
}
/**
* Converts `value` to a string using `Object.prototype.toString`.
*
* @private
* @param {*} value The value to convert.
* @returns {string} Returns the converted string.
*/
function objectToString(value) {
return nativeObjectToString.call(value);
}
/**
* A specialized version of `baseRest` which transforms the rest array.
*
* @private
* @param {Function} func The function to apply a rest parameter to.
* @param {number} [start=func.length-1] The start position of the rest parameter.
* @param {Function} transform The rest array transform.
* @returns {Function} Returns the new function.
*/
function overRest(func, start, transform) {
start = nativeMax(start === undefined ? (func.length - 1) : start, 0);
return function() {
var args = arguments,
index = -1,
length = nativeMax(args.length - start, 0),
array = Array(length);
while (++index < length) {
array[index] = args[start + index];
}
index = -1;
var otherArgs = Array(start + 1);
while (++index < start) {
otherArgs[index] = args[index];
}
otherArgs[start] = transform(array);
return apply(func, this, otherArgs);
};
}
/**
* Gets the parent value at `path` of `object`.
*
* @private
* @param {Object} object The object to query.
* @param {Array} path The path to get the parent value of.
* @returns {*} Returns the parent value.
*/
function parent(object, path) {
return path.length < 2 ? object : baseGet(object, baseSlice(path, 0, -1));
}
/**
* Reorder `array` according to the specified indexes where the element at
* the first index is assigned as the first element, the element at
* the second index is assigned as the second element, and so on.
*
* @private
* @param {Array} array The array to reorder.
* @param {Array} indexes The arranged array indexes.
* @returns {Array} Returns `array`.
*/
function reorder(array, indexes) {
var arrLength = array.length,
length = nativeMin(indexes.length, arrLength),
oldArray = copyArray(array);
while (length--) {
var index = indexes[length];
array[length] = isIndex(index, arrLength) ? oldArray[index] : undefined;
}
return array;
}
/**
* Gets the value at `key`, unless `key` is "__proto__" or "constructor".
*
* @private
* @param {Object} object The object to query.
* @param {string} key The key of the property to get.
* @returns {*} Returns the property value.
*/
function safeGet(object, key) {
if (key === 'constructor' && typeof object[key] === 'function') {
return;
}
if (key == '__proto__') {
return;
}
return object[key];
}
/**
* Sets metadata for `func`.
*
* **Note:** If this function becomes hot, i.e. is invoked a lot in a short
* period of time, it will trip its breaker and transition to an identity
* function to avoid garbage collection pauses in V8. See
* [V8 issue 2070](https://bugs.chromium.org/p/v8/issues/detail?id=2070)
* for more details.
*
* @private
* @param {Function} func The function to associate metadata with.
* @param {*} data The metadata.
* @returns {Function} Returns `func`.
*/
var setData = shortOut(baseSetData);
/**
* A simple wrapper around the global [`setTimeout`](https://mdn.io/setTimeout).
*
* @private
* @param {Function} func The function to delay.
* @param {number} wait The number of milliseconds to delay invocation.
* @returns {number|Object} Returns the timer id or timeout object.
*/
var setTimeout = ctxSetTimeout || function(func, wait) {
return root.setTimeout(func, wait);
};
/**
* Sets the `toString` method of `func` to return `string`.
*
* @private
* @param {Function} func The function to modify.
* @param {Function} string The `toString` result.
* @returns {Function} Returns `func`.
*/
var setToString = shortOut(baseSetToString);
/**
* Sets the `toString` method of `wrapper` to mimic the source of `reference`
* with wrapper details in a comment at the top of the source body.
*
* @private
* @param {Function} wrapper The function to modify.
* @param {Function} reference The reference function.
* @param {number} bitmask The bitmask flags. See `createWrap` for more details.
* @returns {Function} Returns `wrapper`.
*/
function setWrapToString(wrapper, reference, bitmask) {
var source = (reference + '');
return setToString(wrapper, insertWrapDetails(source, updateWrapDetails(getWrapDetails(source), bitmask)));
}
/**
* Creates a function that'll short out and invoke `identity` instead
* of `func` when it's called `HOT_COUNT` or more times in `HOT_SPAN`
* milliseconds.
*
* @private
* @param {Function} func The function to restrict.
* @returns {Function} Returns the new shortable function.
*/
function shortOut(func) {
var count = 0,
lastCalled = 0;
return function() {
var stamp = nativeNow(),
remaining = HOT_SPAN - (stamp - lastCalled);
lastCalled = stamp;
if (remaining > 0) {
if (++count >= HOT_COUNT) {
return arguments[0];
}
} else {
count = 0;
}
return func.apply(undefined, arguments);
};
}
/**
* A specialized version of `_.shuffle` which mutates and sets the size of `array`.
*
* @private
* @param {Array} array The array to shuffle.
* @param {number} [size=array.length] The size of `array`.
* @returns {Array} Returns `array`.
*/
function shuffleSelf(array, size) {
var index = -1,
length = array.length,
lastIndex = length - 1;
size = size === undefined ? length : size;
while (++index < size) {
var rand = baseRandom(index, lastIndex),
value = array[rand];
array[rand] = array[index];
array[index] = value;
}
array.length = size;
return array;
}
/**
* Converts `string` to a property path array.
*
* @private
* @param {string} string The string to convert.
* @returns {Array} Returns the property path array.
*/
var stringToPath = memoizeCapped(function(string) {
var result = [];
if (string.charCodeAt(0) === 46 /* . */) {
result.push('');
}
string.replace(rePropName, function(match, number, quote, subString) {
result.push(quote ? subString.replace(reEscapeChar, '$1') : (number || match));
});
return result;
});
/**
* Converts `value` to a string key if it's not a string or symbol.
*
* @private
* @param {*} value The value to inspect.
* @returns {string|symbol} Returns the key.
*/
function toKey(value) {
if (typeof value == 'string' || isSymbol(value)) {
return value;
}
var result = (value + '');
return (result == '0' && (1 / value) == -INFINITY) ? '-0' : result;
}
/**
* Converts `func` to its source code.
*
* @private
* @param {Function} func The function to convert.
* @returns {string} Returns the source code.
*/
function toSource(func) {
if (func != null) {
try {
return funcToString.call(func);
} catch (e) {}
try {
return (func + '');
} catch (e) {}
}
return '';
}
/**
* Updates wrapper `details` based on `bitmask` flags.
*
* @private
* @returns {Array} details The details to modify.
* @param {number} bitmask The bitmask flags. See `createWrap` for more details.
* @returns {Array} Returns `details`.
*/
function updateWrapDetails(details, bitmask) {
arrayEach(wrapFlags, function(pair) {
var value = '_.' + pair[0];
if ((bitmask & pair[1]) && !arrayIncludes(details, value)) {
details.push(value);
}
});
return details.sort();
}
/**
* Creates a clone of `wrapper`.
*
* @private
* @param {Object} wrapper The wrapper to clone.
* @returns {Object} Returns the cloned wrapper.
*/
function wrapperClone(wrapper) {
if (wrapper instanceof LazyWrapper) {
return wrapper.clone();
}
var result = new LodashWrapper(wrapper.__wrapped__, wrapper.__chain__);
result.__actions__ = copyArray(wrapper.__actions__);
result.__index__ = wrapper.__index__;
result.__values__ = wrapper.__values__;
return result;
}
/*------------------------------------------------------------------------*/
/**
* Creates an array of elements split into groups the length of `size`.
* If `array` can't be split evenly, the final chunk will be the remaining
* elements.
*
* @static
* @memberOf _
* @since 3.0.0
* @category Array
* @param {Array} array The array to process.
* @param {number} [size=1] The length of each chunk
* @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`.
* @returns {Array} Returns the new array of chunks.
* @example
*
* _.chunk(['a', 'b', 'c', 'd'], 2);
* // => [['a', 'b'], ['c', 'd']]
*
* _.chunk(['a', 'b', 'c', 'd'], 3);
* // => [['a', 'b', 'c'], ['d']]
*/
function chunk(array, size, guard) {
if ((guard ? isIterateeCall(array, size, guard) : size === undefined)) {
size = 1;
} else {
size = nativeMax(toInteger(size), 0);
}
var length = array == null ? 0 : array.length;
if (!length || size < 1) {
return [];
}
var index = 0,
resIndex = 0,
result = Array(nativeCeil(length / size));
while (index < length) {
result[resIndex++] = baseSlice(array, index, (index += size));
}
return result;
}
/**
* Creates an array with all falsey values removed. The values `false`, `null`,
* `0`, `""`, `undefined`, and `NaN` are falsey.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Array
* @param {Array} array The array to compact.
* @returns {Array} Returns the new array of filtered values.
* @example
*
* _.compact([0, 1, false, 2, '', 3]);
* // => [1, 2, 3]
*/
function compact(array) {
var index = -1,
length = array == null ? 0 : array.length,
resIndex = 0,
result = [];
while (++index < length) {
var value = array[index];
if (value) {
result[resIndex++] = value;
}
}
return result;
}
/**
* Creates a new array concatenating `array` with any additional arrays
* and/or values.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Array
* @param {Array} array The array to concatenate.
* @param {...*} [values] The values to concatenate.
* @returns {Array} Returns the new concatenated array.
* @example
*
* var array = [1];
* var other = _.concat(array, 2, [3], [[4]]);
*
* console.log(other);
* // => [1, 2, 3, [4]]
*
* console.log(array);
* // => [1]
*/
function concat() {
var length = arguments.length;
if (!length) {
return [];
}
var args = Array(length - 1),
array = arguments[0],
index = length;
while (index--) {
args[index - 1] = arguments[index];
}
return arrayPush(isArray(array) ? copyArray(array) : [array], baseFlatten(args, 1));
}
/**
* Creates an array of `array` values not included in the other given arrays
* using [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero)
* for equality comparisons. The order and references of result values are
* determined by the first array.
*
* **Note:** Unlike `_.pullAll`, this method returns a new array.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Array
* @param {Array} array The array to inspect.
* @param {...Array} [values] The values to exclude.
* @returns {Array} Returns the new array of filtered values.
* @see _.without, _.xor
* @example
*
* _.difference([2, 1], [2, 3]);
* // => [1]
*/
var difference = baseRest(function(array, values) {
return isArrayLikeObject(array)
? baseDifference(array, baseFlatten(values, 1, isArrayLikeObject, true))
: [];
});
/**
* This method is like `_.difference` except that it accepts `iteratee` which
* is invoked for each element of `array` and `values` to generate the criterion
* by which they're compared. The order and references of result values are
* determined by the first array. The iteratee is invoked with one argument:
* (value).
*
* **Note:** Unlike `_.pullAllBy`, this method returns a new array.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Array
* @param {Array} array The array to inspect.
* @param {...Array} [values] The values to exclude.
* @param {Function} [iteratee=_.identity] The iteratee invoked per element.
* @returns {Array} Returns the new array of filtered values.
* @example
*
* _.differenceBy([2.1, 1.2], [2.3, 3.4], Math.floor);
* // => [1.2]
*
* // The `_.property` iteratee shorthand.
* _.differenceBy([{ 'x': 2 }, { 'x': 1 }], [{ 'x': 1 }], 'x');
* // => [{ 'x': 2 }]
*/
var differenceBy = baseRest(function(array, values) {
var iteratee = last(values);
if (isArrayLikeObject(iteratee)) {
iteratee = undefined;
}
return isArrayLikeObject(array)
? baseDifference(array, baseFlatten(values, 1, isArrayLikeObject, true), getIteratee(iteratee, 2))
: [];
});
/**
* This method is like `_.difference` except that it accepts `comparator`
* which is invoked to compare elements of `array` to `values`. The order and
* references of result values are determined by the first array. The comparator
* is invoked with two arguments: (arrVal, othVal).
*
* **Note:** Unlike `_.pullAllWith`, this method returns a new array.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Array
* @param {Array} array The array to inspect.
* @param {...Array} [values] The values to exclude.
* @param {Function} [comparator] The comparator invoked per element.
* @returns {Array} Returns the new array of filtered values.
* @example
*
* var objects = [{ 'x': 1, 'y': 2 }, { 'x': 2, 'y': 1 }];
*
* _.differenceWith(objects, [{ 'x': 1, 'y': 2 }], _.isEqual);
* // => [{ 'x': 2, 'y': 1 }]
*/
var differenceWith = baseRest(function(array, values) {
var comparator = last(values);
if (isArrayLikeObject(comparator)) {
comparator = undefined;
}
return isArrayLikeObject(array)
? baseDifference(array, baseFlatten(values, 1, isArrayLikeObject, true), undefined, comparator)
: [];
});
/**
* Creates a slice of `array` with `n` elements dropped from the beginning.
*
* @static
* @memberOf _
* @since 0.5.0
* @category Array
* @param {Array} array The array to query.
* @param {number} [n=1] The number of elements to drop.
* @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`.
* @returns {Array} Returns the slice of `array`.
* @example
*
* _.drop([1, 2, 3]);
* // => [2, 3]
*
* _.drop([1, 2, 3], 2);
* // => [3]
*
* _.drop([1, 2, 3], 5);
* // => []
*
* _.drop([1, 2, 3], 0);
* // => [1, 2, 3]
*/
function drop(array, n, guard) {
var length = array == null ? 0 : array.length;
if (!length) {
return [];
}
n = (guard || n === undefined) ? 1 : toInteger(n);
return baseSlice(array, n < 0 ? 0 : n, length);
}
/**
* Creates a slice of `array` with `n` elements dropped from the end.
*
* @static
* @memberOf _
* @since 3.0.0
* @category Array
* @param {Array} array The array to query.
* @param {number} [n=1] The number of elements to drop.
* @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`.
* @returns {Array} Returns the slice of `array`.
* @example
*
* _.dropRight([1, 2, 3]);
* // => [1, 2]
*
* _.dropRight([1, 2, 3], 2);
* // => [1]
*
* _.dropRight([1, 2, 3], 5);
* // => []
*
* _.dropRight([1, 2, 3], 0);
* // => [1, 2, 3]
*/
function dropRight(array, n, guard) {
var length = array == null ? 0 : array.length;
if (!length) {
return [];
}
n = (guard || n === undefined) ? 1 : toInteger(n);
n = length - n;
return baseSlice(array, 0, n < 0 ? 0 : n);
}
/**
* Creates a slice of `array` excluding elements dropped from the end.
* Elements are dropped until `predicate` returns falsey. The predicate is
* invoked with three arguments: (value, index, array).
*
* @static
* @memberOf _
* @since 3.0.0
* @category Array
* @param {Array} array The array to query.
* @param {Function} [predicate=_.identity] The function invoked per iteration.
* @returns {Array} Returns the slice of `array`.
* @example
*
* var users = [
* { 'user': 'barney', 'active': true },
* { 'user': 'fred', 'active': false },
* { 'user': 'pebbles', 'active': false }
* ];
*
* _.dropRightWhile(users, function(o) { return !o.active; });
* // => objects for ['barney']
*
* // The `_.matches` iteratee shorthand.
* _.dropRightWhile(users, { 'user': 'pebbles', 'active': false });
* // => objects for ['barney', 'fred']
*
* // The `_.matchesProperty` iteratee shorthand.
* _.dropRightWhile(users, ['active', false]);
* // => objects for ['barney']
*
* // The `_.property` iteratee shorthand.
* _.dropRightWhile(users, 'active');
* // => objects for ['barney', 'fred', 'pebbles']
*/
function dropRightWhile(array, predicate) {
return (array && array.length)
? baseWhile(array, getIteratee(predicate, 3), true, true)
: [];
}
/**
* Creates a slice of `array` excluding elements dropped from the beginning.
* Elements are dropped until `predicate` returns falsey. The predicate is
* invoked with three arguments: (value, index, array).
*
* @static
* @memberOf _
* @since 3.0.0
* @category Array
* @param {Array} array The array to query.
* @param {Function} [predicate=_.identity] The function invoked per iteration.
* @returns {Array} Returns the slice of `array`.
* @example
*
* var users = [
* { 'user': 'barney', 'active': false },
* { 'user': 'fred', 'active': false },
* { 'user': 'pebbles', 'active': true }
* ];
*
* _.dropWhile(users, function(o) { return !o.active; });
* // => objects for ['pebbles']
*
* // The `_.matches` iteratee shorthand.
* _.dropWhile(users, { 'user': 'barney', 'active': false });
* // => objects for ['fred', 'pebbles']
*
* // The `_.matchesProperty` iteratee shorthand.
* _.dropWhile(users, ['active', false]);
* // => objects for ['pebbles']
*
* // The `_.property` iteratee shorthand.
* _.dropWhile(users, 'active');
* // => objects for ['barney', 'fred', 'pebbles']
*/
function dropWhile(array, predicate) {
return (array && array.length)
? baseWhile(array, getIteratee(predicate, 3), true)
: [];
}
/**
* Fills elements of `array` with `value` from `start` up to, but not
* including, `end`.
*
* **Note:** This method mutates `array`.
*
* @static
* @memberOf _
* @since 3.2.0
* @category Array
* @param {Array} array The array to fill.
* @param {*} value The value to fill `array` with.
* @param {number} [start=0] The start position.
* @param {number} [end=array.length] The end position.
* @returns {Array} Returns `array`.
* @example
*
* var array = [1, 2, 3];
*
* _.fill(array, 'a');
* console.log(array);
* // => ['a', 'a', 'a']
*
* _.fill(Array(3), 2);
* // => [2, 2, 2]
*
* _.fill([4, 6, 8, 10], '*', 1, 3);
* // => [4, '*', '*', 10]
*/
function fill(array, value, start, end) {
var length = array == null ? 0 : array.length;
if (!length) {
return [];
}
if (start && typeof start != 'number' && isIterateeCall(array, value, start)) {
start = 0;
end = length;
}
return baseFill(array, value, start, end);
}
/**
* This method is like `_.find` except that it returns the index of the first
* element `predicate` returns truthy for instead of the element itself.
*
* @static
* @memberOf _
* @since 1.1.0
* @category Array
* @param {Array} array The array to inspect.
* @param {Function} [predicate=_.identity] The function invoked per iteration.
* @param {number} [fromIndex=0] The index to search from.
* @returns {number} Returns the index of the found element, else `-1`.
* @example
*
* var users = [
* { 'user': 'barney', 'active': false },
* { 'user': 'fred', 'active': false },
* { 'user': 'pebbles', 'active': true }
* ];
*
* _.findIndex(users, function(o) { return o.user == 'barney'; });
* // => 0
*
* // The `_.matches` iteratee shorthand.
* _.findIndex(users, { 'user': 'fred', 'active': false });
* // => 1
*
* // The `_.matchesProperty` iteratee shorthand.
* _.findIndex(users, ['active', false]);
* // => 0
*
* // The `_.property` iteratee shorthand.
* _.findIndex(users, 'active');
* // => 2
*/
function findIndex(array, predicate, fromIndex) {
var length = array == null ? 0 : array.length;
if (!length) {
return -1;
}
var index = fromIndex == null ? 0 : toInteger(fromIndex);
if (index < 0) {
index = nativeMax(length + index, 0);
}
return baseFindIndex(array, getIteratee(predicate, 3), index);
}
/**
* This method is like `_.findIndex` except that it iterates over elements
* of `collection` from right to left.
*
* @static
* @memberOf _
* @since 2.0.0
* @category Array
* @param {Array} array The array to inspect.
* @param {Function} [predicate=_.identity] The function invoked per iteration.
* @param {number} [fromIndex=array.length-1] The index to search from.
* @returns {number} Returns the index of the found element, else `-1`.
* @example
*
* var users = [
* { 'user': 'barney', 'active': true },
* { 'user': 'fred', 'active': false },
* { 'user': 'pebbles', 'active': false }
* ];
*
* _.findLastIndex(users, function(o) { return o.user == 'pebbles'; });
* // => 2
*
* // The `_.matches` iteratee shorthand.
* _.findLastIndex(users, { 'user': 'barney', 'active': true });
* // => 0
*
* // The `_.matchesProperty` iteratee shorthand.
* _.findLastIndex(users, ['active', false]);
* // => 2
*
* // The `_.property` iteratee shorthand.
* _.findLastIndex(users, 'active');
* // => 0
*/
function findLastIndex(array, predicate, fromIndex) {
var length = array == null ? 0 : array.length;
if (!length) {
return -1;
}
var index = length - 1;
if (fromIndex !== undefined) {
index = toInteger(fromIndex);
index = fromIndex < 0
? nativeMax(length + index, 0)
: nativeMin(index, length - 1);
}
return baseFindIndex(array, getIteratee(predicate, 3), index, true);
}
/**
* Flattens `array` a single level deep.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Array
* @param {Array} array The array to flatten.
* @returns {Array} Returns the new flattened array.
* @example
*
* _.flatten([1, [2, [3, [4]], 5]]);
* // => [1, 2, [3, [4]], 5]
*/
function flatten(array) {
var length = array == null ? 0 : array.length;
return length ? baseFlatten(array, 1) : [];
}
/**
* Recursively flattens `array`.
*
* @static
* @memberOf _
* @since 3.0.0
* @category Array
* @param {Array} array The array to flatten.
* @returns {Array} Returns the new flattened array.
* @example
*
* _.flattenDeep([1, [2, [3, [4]], 5]]);
* // => [1, 2, 3, 4, 5]
*/
function flattenDeep(array) {
var length = array == null ? 0 : array.length;
return length ? baseFlatten(array, INFINITY) : [];
}
/**
* Recursively flatten `array` up to `depth` times.
*
* @static
* @memberOf _
* @since 4.4.0
* @category Array
* @param {Array} array The array to flatten.
* @param {number} [depth=1] The maximum recursion depth.
* @returns {Array} Returns the new flattened array.
* @example
*
* var array = [1, [2, [3, [4]], 5]];
*
* _.flattenDepth(array, 1);
* // => [1, 2, [3, [4]], 5]
*
* _.flattenDepth(array, 2);
* // => [1, 2, 3, [4], 5]
*/
function flattenDepth(array, depth) {
var length = array == null ? 0 : array.length;
if (!length) {
return [];
}
depth = depth === undefined ? 1 : toInteger(depth);
return baseFlatten(array, depth);
}
/**
* The inverse of `_.toPairs`; this method returns an object composed
* from key-value `pairs`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Array
* @param {Array} pairs The key-value pairs.
* @returns {Object} Returns the new object.
* @example
*
* _.fromPairs([['a', 1], ['b', 2]]);
* // => { 'a': 1, 'b': 2 }
*/
function fromPairs(pairs) {
var index = -1,
length = pairs == null ? 0 : pairs.length,
result = {};
while (++index < length) {
var pair = pairs[index];
result[pair[0]] = pair[1];
}
return result;
}
/**
* Gets the first element of `array`.
*
* @static
* @memberOf _
* @since 0.1.0
* @alias first
* @category Array
* @param {Array} array The array to query.
* @returns {*} Returns the first element of `array`.
* @example
*
* _.head([1, 2, 3]);
* // => 1
*
* _.head([]);
* // => undefined
*/
function head(array) {
return (array && array.length) ? array[0] : undefined;
}
/**
* Gets the index at which the first occurrence of `value` is found in `array`
* using [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero)
* for equality comparisons. If `fromIndex` is negative, it's used as the
* offset from the end of `array`.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Array
* @param {Array} array The array to inspect.
* @param {*} value The value to search for.
* @param {number} [fromIndex=0] The index to search from.
* @returns {number} Returns the index of the matched value, else `-1`.
* @example
*
* _.indexOf([1, 2, 1, 2], 2);
* // => 1
*
* // Search from the `fromIndex`.
* _.indexOf([1, 2, 1, 2], 2, 2);
* // => 3
*/
function indexOf(array, value, fromIndex) {
var length = array == null ? 0 : array.length;
if (!length) {
return -1;
}
var index = fromIndex == null ? 0 : toInteger(fromIndex);
if (index < 0) {
index = nativeMax(length + index, 0);
}
return baseIndexOf(array, value, index);
}
/**
* Gets all but the last element of `array`.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Array
* @param {Array} array The array to query.
* @returns {Array} Returns the slice of `array`.
* @example
*
* _.initial([1, 2, 3]);
* // => [1, 2]
*/
function initial(array) {
var length = array == null ? 0 : array.length;
return length ? baseSlice(array, 0, -1) : [];
}
/**
* Creates an array of unique values that are included in all given arrays
* using [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero)
* for equality comparisons. The order and references of result values are
* determined by the first array.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Array
* @param {...Array} [arrays] The arrays to inspect.
* @returns {Array} Returns the new array of intersecting values.
* @example
*
* _.intersection([2, 1], [2, 3]);
* // => [2]
*/
var intersection = baseRest(function(arrays) {
var mapped = arrayMap(arrays, castArrayLikeObject);
return (mapped.length && mapped[0] === arrays[0])
? baseIntersection(mapped)
: [];
});
/**
* This method is like `_.intersection` except that it accepts `iteratee`
* which is invoked for each element of each `arrays` to generate the criterion
* by which they're compared. The order and references of result values are
* determined by the first array. The iteratee is invoked with one argument:
* (value).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Array
* @param {...Array} [arrays] The arrays to inspect.
* @param {Function} [iteratee=_.identity] The iteratee invoked per element.
* @returns {Array} Returns the new array of intersecting values.
* @example
*
* _.intersectionBy([2.1, 1.2], [2.3, 3.4], Math.floor);
* // => [2.1]
*
* // The `_.property` iteratee shorthand.
* _.intersectionBy([{ 'x': 1 }], [{ 'x': 2 }, { 'x': 1 }], 'x');
* // => [{ 'x': 1 }]
*/
var intersectionBy = baseRest(function(arrays) {
var iteratee = last(arrays),
mapped = arrayMap(arrays, castArrayLikeObject);
if (iteratee === last(mapped)) {
iteratee = undefined;
} else {
mapped.pop();
}
return (mapped.length && mapped[0] === arrays[0])
? baseIntersection(mapped, getIteratee(iteratee, 2))
: [];
});
/**
* This method is like `_.intersection` except that it accepts `comparator`
* which is invoked to compare elements of `arrays`. The order and references
* of result values are determined by the first array. The comparator is
* invoked with two arguments: (arrVal, othVal).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Array
* @param {...Array} [arrays] The arrays to inspect.
* @param {Function} [comparator] The comparator invoked per element.
* @returns {Array} Returns the new array of intersecting values.
* @example
*
* var objects = [{ 'x': 1, 'y': 2 }, { 'x': 2, 'y': 1 }];
* var others = [{ 'x': 1, 'y': 1 }, { 'x': 1, 'y': 2 }];
*
* _.intersectionWith(objects, others, _.isEqual);
* // => [{ 'x': 1, 'y': 2 }]
*/
var intersectionWith = baseRest(function(arrays) {
var comparator = last(arrays),
mapped = arrayMap(arrays, castArrayLikeObject);
comparator = typeof comparator == 'function' ? comparator : undefined;
if (comparator) {
mapped.pop();
}
return (mapped.length && mapped[0] === arrays[0])
? baseIntersection(mapped, undefined, comparator)
: [];
});
/**
* Converts all elements in `array` into a string separated by `separator`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Array
* @param {Array} array The array to convert.
* @param {string} [separator=','] The element separator.
* @returns {string} Returns the joined string.
* @example
*
* _.join(['a', 'b', 'c'], '~');
* // => 'a~b~c'
*/
function join(array, separator) {
return array == null ? '' : nativeJoin.call(array, separator);
}
/**
* Gets the last element of `array`.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Array
* @param {Array} array The array to query.
* @returns {*} Returns the last element of `array`.
* @example
*
* _.last([1, 2, 3]);
* // => 3
*/
function last(array) {
var length = array == null ? 0 : array.length;
return length ? array[length - 1] : undefined;
}
/**
* This method is like `_.indexOf` except that it iterates over elements of
* `array` from right to left.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Array
* @param {Array} array The array to inspect.
* @param {*} value The value to search for.
* @param {number} [fromIndex=array.length-1] The index to search from.
* @returns {number} Returns the index of the matched value, else `-1`.
* @example
*
* _.lastIndexOf([1, 2, 1, 2], 2);
* // => 3
*
* // Search from the `fromIndex`.
* _.lastIndexOf([1, 2, 1, 2], 2, 2);
* // => 1
*/
function lastIndexOf(array, value, fromIndex) {
var length = array == null ? 0 : array.length;
if (!length) {
return -1;
}
var index = length;
if (fromIndex !== undefined) {
index = toInteger(fromIndex);
index = index < 0 ? nativeMax(length + index, 0) : nativeMin(index, length - 1);
}
return value === value
? strictLastIndexOf(array, value, index)
: baseFindIndex(array, baseIsNaN, index, true);
}
/**
* Gets the element at index `n` of `array`. If `n` is negative, the nth
* element from the end is returned.
*
* @static
* @memberOf _
* @since 4.11.0
* @category Array
* @param {Array} array The array to query.
* @param {number} [n=0] The index of the element to return.
* @returns {*} Returns the nth element of `array`.
* @example
*
* var array = ['a', 'b', 'c', 'd'];
*
* _.nth(array, 1);
* // => 'b'
*
* _.nth(array, -2);
* // => 'c';
*/
function nth(array, n) {
return (array && array.length) ? baseNth(array, toInteger(n)) : undefined;
}
/**
* Removes all given values from `array` using
* [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero)
* for equality comparisons.
*
* **Note:** Unlike `_.without`, this method mutates `array`. Use `_.remove`
* to remove elements from an array by predicate.
*
* @static
* @memberOf _
* @since 2.0.0
* @category Array
* @param {Array} array The array to modify.
* @param {...*} [values] The values to remove.
* @returns {Array} Returns `array`.
* @example
*
* var array = ['a', 'b', 'c', 'a', 'b', 'c'];
*
* _.pull(array, 'a', 'c');
* console.log(array);
* // => ['b', 'b']
*/
var pull = baseRest(pullAll);
/**
* This method is like `_.pull` except that it accepts an array of values to remove.
*
* **Note:** Unlike `_.difference`, this method mutates `array`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Array
* @param {Array} array The array to modify.
* @param {Array} values The values to remove.
* @returns {Array} Returns `array`.
* @example
*
* var array = ['a', 'b', 'c', 'a', 'b', 'c'];
*
* _.pullAll(array, ['a', 'c']);
* console.log(array);
* // => ['b', 'b']
*/
function pullAll(array, values) {
return (array && array.length && values && values.length)
? basePullAll(array, values)
: array;
}
/**
* This method is like `_.pullAll` except that it accepts `iteratee` which is
* invoked for each element of `array` and `values` to generate the criterion
* by which they're compared. The iteratee is invoked with one argument: (value).
*
* **Note:** Unlike `_.differenceBy`, this method mutates `array`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Array
* @param {Array} array The array to modify.
* @param {Array} values The values to remove.
* @param {Function} [iteratee=_.identity] The iteratee invoked per element.
* @returns {Array} Returns `array`.
* @example
*
* var array = [{ 'x': 1 }, { 'x': 2 }, { 'x': 3 }, { 'x': 1 }];
*
* _.pullAllBy(array, [{ 'x': 1 }, { 'x': 3 }], 'x');
* console.log(array);
* // => [{ 'x': 2 }]
*/
function pullAllBy(array, values, iteratee) {
return (array && array.length && values && values.length)
? basePullAll(array, values, getIteratee(iteratee, 2))
: array;
}
/**
* This method is like `_.pullAll` except that it accepts `comparator` which
* is invoked to compare elements of `array` to `values`. The comparator is
* invoked with two arguments: (arrVal, othVal).
*
* **Note:** Unlike `_.differenceWith`, this method mutates `array`.
*
* @static
* @memberOf _
* @since 4.6.0
* @category Array
* @param {Array} array The array to modify.
* @param {Array} values The values to remove.
* @param {Function} [comparator] The comparator invoked per element.
* @returns {Array} Returns `array`.
* @example
*
* var array = [{ 'x': 1, 'y': 2 }, { 'x': 3, 'y': 4 }, { 'x': 5, 'y': 6 }];
*
* _.pullAllWith(array, [{ 'x': 3, 'y': 4 }], _.isEqual);
* console.log(array);
* // => [{ 'x': 1, 'y': 2 }, { 'x': 5, 'y': 6 }]
*/
function pullAllWith(array, values, comparator) {
return (array && array.length && values && values.length)
? basePullAll(array, values, undefined, comparator)
: array;
}
/**
* Removes elements from `array` corresponding to `indexes` and returns an
* array of removed elements.
*
* **Note:** Unlike `_.at`, this method mutates `array`.
*
* @static
* @memberOf _
* @since 3.0.0
* @category Array
* @param {Array} array The array to modify.
* @param {...(number|number[])} [indexes] The indexes of elements to remove.
* @returns {Array} Returns the new array of removed elements.
* @example
*
* var array = ['a', 'b', 'c', 'd'];
* var pulled = _.pullAt(array, [1, 3]);
*
* console.log(array);
* // => ['a', 'c']
*
* console.log(pulled);
* // => ['b', 'd']
*/
var pullAt = flatRest(function(array, indexes) {
var length = array == null ? 0 : array.length,
result = baseAt(array, indexes);
basePullAt(array, arrayMap(indexes, function(index) {
return isIndex(index, length) ? +index : index;
}).sort(compareAscending));
return result;
});
/**
* Removes all elements from `array` that `predicate` returns truthy for
* and returns an array of the removed elements. The predicate is invoked
* with three arguments: (value, index, array).
*
* **Note:** Unlike `_.filter`, this method mutates `array`. Use `_.pull`
* to pull elements from an array by value.
*
* @static
* @memberOf _
* @since 2.0.0
* @category Array
* @param {Array} array The array to modify.
* @param {Function} [predicate=_.identity] The function invoked per iteration.
* @returns {Array} Returns the new array of removed elements.
* @example
*
* var array = [1, 2, 3, 4];
* var evens = _.remove(array, function(n) {
* return n % 2 == 0;
* });
*
* console.log(array);
* // => [1, 3]
*
* console.log(evens);
* // => [2, 4]
*/
function remove(array, predicate) {
var result = [];
if (!(array && array.length)) {
return result;
}
var index = -1,
indexes = [],
length = array.length;
predicate = getIteratee(predicate, 3);
while (++index < length) {
var value = array[index];
if (predicate(value, index, array)) {
result.push(value);
indexes.push(index);
}
}
basePullAt(array, indexes);
return result;
}
/**
* Reverses `array` so that the first element becomes the last, the second
* element becomes the second to last, and so on.
*
* **Note:** This method mutates `array` and is based on
* [`Array#reverse`](https://mdn.io/Array/reverse).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Array
* @param {Array} array The array to modify.
* @returns {Array} Returns `array`.
* @example
*
* var array = [1, 2, 3];
*
* _.reverse(array);
* // => [3, 2, 1]
*
* console.log(array);
* // => [3, 2, 1]
*/
function reverse(array) {
return array == null ? array : nativeReverse.call(array);
}
/**
* Creates a slice of `array` from `start` up to, but not including, `end`.
*
* **Note:** This method is used instead of
* [`Array#slice`](https://mdn.io/Array/slice) to ensure dense arrays are
* returned.
*
* @static
* @memberOf _
* @since 3.0.0
* @category Array
* @param {Array} array The array to slice.
* @param {number} [start=0] The start position.
* @param {number} [end=array.length] The end position.
* @returns {Array} Returns the slice of `array`.
*/
function slice(array, start, end) {
var length = array == null ? 0 : array.length;
if (!length) {
return [];
}
if (end && typeof end != 'number' && isIterateeCall(array, start, end)) {
start = 0;
end = length;
}
else {
start = start == null ? 0 : toInteger(start);
end = end === undefined ? length : toInteger(end);
}
return baseSlice(array, start, end);
}
/**
* Uses a binary search to determine the lowest index at which `value`
* should be inserted into `array` in order to maintain its sort order.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Array
* @param {Array} array The sorted array to inspect.
* @param {*} value The value to evaluate.
* @returns {number} Returns the index at which `value` should be inserted
* into `array`.
* @example
*
* _.sortedIndex([30, 50], 40);
* // => 1
*/
function sortedIndex(array, value) {
return baseSortedIndex(array, value);
}
/**
* This method is like `_.sortedIndex` except that it accepts `iteratee`
* which is invoked for `value` and each element of `array` to compute their
* sort ranking. The iteratee is invoked with one argument: (value).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Array
* @param {Array} array The sorted array to inspect.
* @param {*} value The value to evaluate.
* @param {Function} [iteratee=_.identity] The iteratee invoked per element.
* @returns {number} Returns the index at which `value` should be inserted
* into `array`.
* @example
*
* var objects = [{ 'x': 4 }, { 'x': 5 }];
*
* _.sortedIndexBy(objects, { 'x': 4 }, function(o) { return o.x; });
* // => 0
*
* // The `_.property` iteratee shorthand.
* _.sortedIndexBy(objects, { 'x': 4 }, 'x');
* // => 0
*/
function sortedIndexBy(array, value, iteratee) {
return baseSortedIndexBy(array, value, getIteratee(iteratee, 2));
}
/**
* This method is like `_.indexOf` except that it performs a binary
* search on a sorted `array`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Array
* @param {Array} array The array to inspect.
* @param {*} value The value to search for.
* @returns {number} Returns the index of the matched value, else `-1`.
* @example
*
* _.sortedIndexOf([4, 5, 5, 5, 6], 5);
* // => 1
*/
function sortedIndexOf(array, value) {
var length = array == null ? 0 : array.length;
if (length) {
var index = baseSortedIndex(array, value);
if (index < length && eq(array[index], value)) {
return index;
}
}
return -1;
}
/**
* This method is like `_.sortedIndex` except that it returns the highest
* index at which `value` should be inserted into `array` in order to
* maintain its sort order.
*
* @static
* @memberOf _
* @since 3.0.0
* @category Array
* @param {Array} array The sorted array to inspect.
* @param {*} value The value to evaluate.
* @returns {number} Returns the index at which `value` should be inserted
* into `array`.
* @example
*
* _.sortedLastIndex([4, 5, 5, 5, 6], 5);
* // => 4
*/
function sortedLastIndex(array, value) {
return baseSortedIndex(array, value, true);
}
/**
* This method is like `_.sortedLastIndex` except that it accepts `iteratee`
* which is invoked for `value` and each element of `array` to compute their
* sort ranking. The iteratee is invoked with one argument: (value).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Array
* @param {Array} array The sorted array to inspect.
* @param {*} value The value to evaluate.
* @param {Function} [iteratee=_.identity] The iteratee invoked per element.
* @returns {number} Returns the index at which `value` should be inserted
* into `array`.
* @example
*
* var objects = [{ 'x': 4 }, { 'x': 5 }];
*
* _.sortedLastIndexBy(objects, { 'x': 4 }, function(o) { return o.x; });
* // => 1
*
* // The `_.property` iteratee shorthand.
* _.sortedLastIndexBy(objects, { 'x': 4 }, 'x');
* // => 1
*/
function sortedLastIndexBy(array, value, iteratee) {
return baseSortedIndexBy(array, value, getIteratee(iteratee, 2), true);
}
/**
* This method is like `_.lastIndexOf` except that it performs a binary
* search on a sorted `array`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Array
* @param {Array} array The array to inspect.
* @param {*} value The value to search for.
* @returns {number} Returns the index of the matched value, else `-1`.
* @example
*
* _.sortedLastIndexOf([4, 5, 5, 5, 6], 5);
* // => 3
*/
function sortedLastIndexOf(array, value) {
var length = array == null ? 0 : array.length;
if (length) {
var index = baseSortedIndex(array, value, true) - 1;
if (eq(array[index], value)) {
return index;
}
}
return -1;
}
/**
* This method is like `_.uniq` except that it's designed and optimized
* for sorted arrays.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Array
* @param {Array} array The array to inspect.
* @returns {Array} Returns the new duplicate free array.
* @example
*
* _.sortedUniq([1, 1, 2]);
* // => [1, 2]
*/
function sortedUniq(array) {
return (array && array.length)
? baseSortedUniq(array)
: [];
}
/**
* This method is like `_.uniqBy` except that it's designed and optimized
* for sorted arrays.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Array
* @param {Array} array The array to inspect.
* @param {Function} [iteratee] The iteratee invoked per element.
* @returns {Array} Returns the new duplicate free array.
* @example
*
* _.sortedUniqBy([1.1, 1.2, 2.3, 2.4], Math.floor);
* // => [1.1, 2.3]
*/
function sortedUniqBy(array, iteratee) {
return (array && array.length)
? baseSortedUniq(array, getIteratee(iteratee, 2))
: [];
}
/**
* Gets all but the first element of `array`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Array
* @param {Array} array The array to query.
* @returns {Array} Returns the slice of `array`.
* @example
*
* _.tail([1, 2, 3]);
* // => [2, 3]
*/
function tail(array) {
var length = array == null ? 0 : array.length;
return length ? baseSlice(array, 1, length) : [];
}
/**
* Creates a slice of `array` with `n` elements taken from the beginning.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Array
* @param {Array} array The array to query.
* @param {number} [n=1] The number of elements to take.
* @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`.
* @returns {Array} Returns the slice of `array`.
* @example
*
* _.take([1, 2, 3]);
* // => [1]
*
* _.take([1, 2, 3], 2);
* // => [1, 2]
*
* _.take([1, 2, 3], 5);
* // => [1, 2, 3]
*
* _.take([1, 2, 3], 0);
* // => []
*/
function take(array, n, guard) {
if (!(array && array.length)) {
return [];
}
n = (guard || n === undefined) ? 1 : toInteger(n);
return baseSlice(array, 0, n < 0 ? 0 : n);
}
/**
* Creates a slice of `array` with `n` elements taken from the end.
*
* @static
* @memberOf _
* @since 3.0.0
* @category Array
* @param {Array} array The array to query.
* @param {number} [n=1] The number of elements to take.
* @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`.
* @returns {Array} Returns the slice of `array`.
* @example
*
* _.takeRight([1, 2, 3]);
* // => [3]
*
* _.takeRight([1, 2, 3], 2);
* // => [2, 3]
*
* _.takeRight([1, 2, 3], 5);
* // => [1, 2, 3]
*
* _.takeRight([1, 2, 3], 0);
* // => []
*/
function takeRight(array, n, guard) {
var length = array == null ? 0 : array.length;
if (!length) {
return [];
}
n = (guard || n === undefined) ? 1 : toInteger(n);
n = length - n;
return baseSlice(array, n < 0 ? 0 : n, length);
}
/**
* Creates a slice of `array` with elements taken from the end. Elements are
* taken until `predicate` returns falsey. The predicate is invoked with
* three arguments: (value, index, array).
*
* @static
* @memberOf _
* @since 3.0.0
* @category Array
* @param {Array} array The array to query.
* @param {Function} [predicate=_.identity] The function invoked per iteration.
* @returns {Array} Returns the slice of `array`.
* @example
*
* var users = [
* { 'user': 'barney', 'active': true },
* { 'user': 'fred', 'active': false },
* { 'user': 'pebbles', 'active': false }
* ];
*
* _.takeRightWhile(users, function(o) { return !o.active; });
* // => objects for ['fred', 'pebbles']
*
* // The `_.matches` iteratee shorthand.
* _.takeRightWhile(users, { 'user': 'pebbles', 'active': false });
* // => objects for ['pebbles']
*
* // The `_.matchesProperty` iteratee shorthand.
* _.takeRightWhile(users, ['active', false]);
* // => objects for ['fred', 'pebbles']
*
* // The `_.property` iteratee shorthand.
* _.takeRightWhile(users, 'active');
* // => []
*/
function takeRightWhile(array, predicate) {
return (array && array.length)
? baseWhile(array, getIteratee(predicate, 3), false, true)
: [];
}
/**
* Creates a slice of `array` with elements taken from the beginning. Elements
* are taken until `predicate` returns falsey. The predicate is invoked with
* three arguments: (value, index, array).
*
* @static
* @memberOf _
* @since 3.0.0
* @category Array
* @param {Array} array The array to query.
* @param {Function} [predicate=_.identity] The function invoked per iteration.
* @returns {Array} Returns the slice of `array`.
* @example
*
* var users = [
* { 'user': 'barney', 'active': false },
* { 'user': 'fred', 'active': false },
* { 'user': 'pebbles', 'active': true }
* ];
*
* _.takeWhile(users, function(o) { return !o.active; });
* // => objects for ['barney', 'fred']
*
* // The `_.matches` iteratee shorthand.
* _.takeWhile(users, { 'user': 'barney', 'active': false });
* // => objects for ['barney']
*
* // The `_.matchesProperty` iteratee shorthand.
* _.takeWhile(users, ['active', false]);
* // => objects for ['barney', 'fred']
*
* // The `_.property` iteratee shorthand.
* _.takeWhile(users, 'active');
* // => []
*/
function takeWhile(array, predicate) {
return (array && array.length)
? baseWhile(array, getIteratee(predicate, 3))
: [];
}
/**
* Creates an array of unique values, in order, from all given arrays using
* [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero)
* for equality comparisons.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Array
* @param {...Array} [arrays] The arrays to inspect.
* @returns {Array} Returns the new array of combined values.
* @example
*
* _.union([2], [1, 2]);
* // => [2, 1]
*/
var union = baseRest(function(arrays) {
return baseUniq(baseFlatten(arrays, 1, isArrayLikeObject, true));
});
/**
* This method is like `_.union` except that it accepts `iteratee` which is
* invoked for each element of each `arrays` to generate the criterion by
* which uniqueness is computed. Result values are chosen from the first
* array in which the value occurs. The iteratee is invoked with one argument:
* (value).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Array
* @param {...Array} [arrays] The arrays to inspect.
* @param {Function} [iteratee=_.identity] The iteratee invoked per element.
* @returns {Array} Returns the new array of combined values.
* @example
*
* _.unionBy([2.1], [1.2, 2.3], Math.floor);
* // => [2.1, 1.2]
*
* // The `_.property` iteratee shorthand.
* _.unionBy([{ 'x': 1 }], [{ 'x': 2 }, { 'x': 1 }], 'x');
* // => [{ 'x': 1 }, { 'x': 2 }]
*/
var unionBy = baseRest(function(arrays) {
var iteratee = last(arrays);
if (isArrayLikeObject(iteratee)) {
iteratee = undefined;
}
return baseUniq(baseFlatten(arrays, 1, isArrayLikeObject, true), getIteratee(iteratee, 2));
});
/**
* This method is like `_.union` except that it accepts `comparator` which
* is invoked to compare elements of `arrays`. Result values are chosen from
* the first array in which the value occurs. The comparator is invoked
* with two arguments: (arrVal, othVal).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Array
* @param {...Array} [arrays] The arrays to inspect.
* @param {Function} [comparator] The comparator invoked per element.
* @returns {Array} Returns the new array of combined values.
* @example
*
* var objects = [{ 'x': 1, 'y': 2 }, { 'x': 2, 'y': 1 }];
* var others = [{ 'x': 1, 'y': 1 }, { 'x': 1, 'y': 2 }];
*
* _.unionWith(objects, others, _.isEqual);
* // => [{ 'x': 1, 'y': 2 }, { 'x': 2, 'y': 1 }, { 'x': 1, 'y': 1 }]
*/
var unionWith = baseRest(function(arrays) {
var comparator = last(arrays);
comparator = typeof comparator == 'function' ? comparator : undefined;
return baseUniq(baseFlatten(arrays, 1, isArrayLikeObject, true), undefined, comparator);
});
/**
* Creates a duplicate-free version of an array, using
* [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero)
* for equality comparisons, in which only the first occurrence of each element
* is kept. The order of result values is determined by the order they occur
* in the array.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Array
* @param {Array} array The array to inspect.
* @returns {Array} Returns the new duplicate free array.
* @example
*
* _.uniq([2, 1, 2]);
* // => [2, 1]
*/
function uniq(array) {
return (array && array.length) ? baseUniq(array) : [];
}
/**
* This method is like `_.uniq` except that it accepts `iteratee` which is
* invoked for each element in `array` to generate the criterion by which
* uniqueness is computed. The order of result values is determined by the
* order they occur in the array. The iteratee is invoked with one argument:
* (value).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Array
* @param {Array} array The array to inspect.
* @param {Function} [iteratee=_.identity] The iteratee invoked per element.
* @returns {Array} Returns the new duplicate free array.
* @example
*
* _.uniqBy([2.1, 1.2, 2.3], Math.floor);
* // => [2.1, 1.2]
*
* // The `_.property` iteratee shorthand.
* _.uniqBy([{ 'x': 1 }, { 'x': 2 }, { 'x': 1 }], 'x');
* // => [{ 'x': 1 }, { 'x': 2 }]
*/
function uniqBy(array, iteratee) {
return (array && array.length) ? baseUniq(array, getIteratee(iteratee, 2)) : [];
}
/**
* This method is like `_.uniq` except that it accepts `comparator` which
* is invoked to compare elements of `array`. The order of result values is
* determined by the order they occur in the array.The comparator is invoked
* with two arguments: (arrVal, othVal).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Array
* @param {Array} array The array to inspect.
* @param {Function} [comparator] The comparator invoked per element.
* @returns {Array} Returns the new duplicate free array.
* @example
*
* var objects = [{ 'x': 1, 'y': 2 }, { 'x': 2, 'y': 1 }, { 'x': 1, 'y': 2 }];
*
* _.uniqWith(objects, _.isEqual);
* // => [{ 'x': 1, 'y': 2 }, { 'x': 2, 'y': 1 }]
*/
function uniqWith(array, comparator) {
comparator = typeof comparator == 'function' ? comparator : undefined;
return (array && array.length) ? baseUniq(array, undefined, comparator) : [];
}
/**
* This method is like `_.zip` except that it accepts an array of grouped
* elements and creates an array regrouping the elements to their pre-zip
* configuration.
*
* @static
* @memberOf _
* @since 1.2.0
* @category Array
* @param {Array} array The array of grouped elements to process.
* @returns {Array} Returns the new array of regrouped elements.
* @example
*
* var zipped = _.zip(['a', 'b'], [1, 2], [true, false]);
* // => [['a', 1, true], ['b', 2, false]]
*
* _.unzip(zipped);
* // => [['a', 'b'], [1, 2], [true, false]]
*/
function unzip(array) {
if (!(array && array.length)) {
return [];
}
var length = 0;
array = arrayFilter(array, function(group) {
if (isArrayLikeObject(group)) {
length = nativeMax(group.length, length);
return true;
}
});
return baseTimes(length, function(index) {
return arrayMap(array, baseProperty(index));
});
}
/**
* This method is like `_.unzip` except that it accepts `iteratee` to specify
* how regrouped values should be combined. The iteratee is invoked with the
* elements of each group: (...group).
*
* @static
* @memberOf _
* @since 3.8.0
* @category Array
* @param {Array} array The array of grouped elements to process.
* @param {Function} [iteratee=_.identity] The function to combine
* regrouped values.
* @returns {Array} Returns the new array of regrouped elements.
* @example
*
* var zipped = _.zip([1, 2], [10, 20], [100, 200]);
* // => [[1, 10, 100], [2, 20, 200]]
*
* _.unzipWith(zipped, _.add);
* // => [3, 30, 300]
*/
function unzipWith(array, iteratee) {
if (!(array && array.length)) {
return [];
}
var result = unzip(array);
if (iteratee == null) {
return result;
}
return arrayMap(result, function(group) {
return apply(iteratee, undefined, group);
});
}
/**
* Creates an array excluding all given values using
* [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero)
* for equality comparisons.
*
* **Note:** Unlike `_.pull`, this method returns a new array.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Array
* @param {Array} array The array to inspect.
* @param {...*} [values] The values to exclude.
* @returns {Array} Returns the new array of filtered values.
* @see _.difference, _.xor
* @example
*
* _.without([2, 1, 2, 3], 1, 2);
* // => [3]
*/
var without = baseRest(function(array, values) {
return isArrayLikeObject(array)
? baseDifference(array, values)
: [];
});
/**
* Creates an array of unique values that is the
* [symmetric difference](https://en.wikipedia.org/wiki/Symmetric_difference)
* of the given arrays. The order of result values is determined by the order
* they occur in the arrays.
*
* @static
* @memberOf _
* @since 2.4.0
* @category Array
* @param {...Array} [arrays] The arrays to inspect.
* @returns {Array} Returns the new array of filtered values.
* @see _.difference, _.without
* @example
*
* _.xor([2, 1], [2, 3]);
* // => [1, 3]
*/
var xor = baseRest(function(arrays) {
return baseXor(arrayFilter(arrays, isArrayLikeObject));
});
/**
* This method is like `_.xor` except that it accepts `iteratee` which is
* invoked for each element of each `arrays` to generate the criterion by
* which by which they're compared. The order of result values is determined
* by the order they occur in the arrays. The iteratee is invoked with one
* argument: (value).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Array
* @param {...Array} [arrays] The arrays to inspect.
* @param {Function} [iteratee=_.identity] The iteratee invoked per element.
* @returns {Array} Returns the new array of filtered values.
* @example
*
* _.xorBy([2.1, 1.2], [2.3, 3.4], Math.floor);
* // => [1.2, 3.4]
*
* // The `_.property` iteratee shorthand.
* _.xorBy([{ 'x': 1 }], [{ 'x': 2 }, { 'x': 1 }], 'x');
* // => [{ 'x': 2 }]
*/
var xorBy = baseRest(function(arrays) {
var iteratee = last(arrays);
if (isArrayLikeObject(iteratee)) {
iteratee = undefined;
}
return baseXor(arrayFilter(arrays, isArrayLikeObject), getIteratee(iteratee, 2));
});
/**
* This method is like `_.xor` except that it accepts `comparator` which is
* invoked to compare elements of `arrays`. The order of result values is
* determined by the order they occur in the arrays. The comparator is invoked
* with two arguments: (arrVal, othVal).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Array
* @param {...Array} [arrays] The arrays to inspect.
* @param {Function} [comparator] The comparator invoked per element.
* @returns {Array} Returns the new array of filtered values.
* @example
*
* var objects = [{ 'x': 1, 'y': 2 }, { 'x': 2, 'y': 1 }];
* var others = [{ 'x': 1, 'y': 1 }, { 'x': 1, 'y': 2 }];
*
* _.xorWith(objects, others, _.isEqual);
* // => [{ 'x': 2, 'y': 1 }, { 'x': 1, 'y': 1 }]
*/
var xorWith = baseRest(function(arrays) {
var comparator = last(arrays);
comparator = typeof comparator == 'function' ? comparator : undefined;
return baseXor(arrayFilter(arrays, isArrayLikeObject), undefined, comparator);
});
/**
* Creates an array of grouped elements, the first of which contains the
* first elements of the given arrays, the second of which contains the
* second elements of the given arrays, and so on.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Array
* @param {...Array} [arrays] The arrays to process.
* @returns {Array} Returns the new array of grouped elements.
* @example
*
* _.zip(['a', 'b'], [1, 2], [true, false]);
* // => [['a', 1, true], ['b', 2, false]]
*/
var zip = baseRest(unzip);
/**
* This method is like `_.fromPairs` except that it accepts two arrays,
* one of property identifiers and one of corresponding values.
*
* @static
* @memberOf _
* @since 0.4.0
* @category Array
* @param {Array} [props=[]] The property identifiers.
* @param {Array} [values=[]] The property values.
* @returns {Object} Returns the new object.
* @example
*
* _.zipObject(['a', 'b'], [1, 2]);
* // => { 'a': 1, 'b': 2 }
*/
function zipObject(props, values) {
return baseZipObject(props || [], values || [], assignValue);
}
/**
* This method is like `_.zipObject` except that it supports property paths.
*
* @static
* @memberOf _
* @since 4.1.0
* @category Array
* @param {Array} [props=[]] The property identifiers.
* @param {Array} [values=[]] The property values.
* @returns {Object} Returns the new object.
* @example
*
* _.zipObjectDeep(['a.b[0].c', 'a.b[1].d'], [1, 2]);
* // => { 'a': { 'b': [{ 'c': 1 }, { 'd': 2 }] } }
*/
function zipObjectDeep(props, values) {
return baseZipObject(props || [], values || [], baseSet);
}
/**
* This method is like `_.zip` except that it accepts `iteratee` to specify
* how grouped values should be combined. The iteratee is invoked with the
* elements of each group: (...group).
*
* @static
* @memberOf _
* @since 3.8.0
* @category Array
* @param {...Array} [arrays] The arrays to process.
* @param {Function} [iteratee=_.identity] The function to combine
* grouped values.
* @returns {Array} Returns the new array of grouped elements.
* @example
*
* _.zipWith([1, 2], [10, 20], [100, 200], function(a, b, c) {
* return a + b + c;
* });
* // => [111, 222]
*/
var zipWith = baseRest(function(arrays) {
var length = arrays.length,
iteratee = length > 1 ? arrays[length - 1] : undefined;
iteratee = typeof iteratee == 'function' ? (arrays.pop(), iteratee) : undefined;
return unzipWith(arrays, iteratee);
});
/*------------------------------------------------------------------------*/
/**
* Creates a `lodash` wrapper instance that wraps `value` with explicit method
* chain sequences enabled. The result of such sequences must be unwrapped
* with `_#value`.
*
* @static
* @memberOf _
* @since 1.3.0
* @category Seq
* @param {*} value The value to wrap.
* @returns {Object} Returns the new `lodash` wrapper instance.
* @example
*
* var users = [
* { 'user': 'barney', 'age': 36 },
* { 'user': 'fred', 'age': 40 },
* { 'user': 'pebbles', 'age': 1 }
* ];
*
* var youngest = _
* .chain(users)
* .sortBy('age')
* .map(function(o) {
* return o.user + ' is ' + o.age;
* })
* .head()
* .value();
* // => 'pebbles is 1'
*/
function chain(value) {
var result = lodash(value);
result.__chain__ = true;
return result;
}
/**
* This method invokes `interceptor` and returns `value`. The interceptor
* is invoked with one argument; (value). The purpose of this method is to
* "tap into" a method chain sequence in order to modify intermediate results.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Seq
* @param {*} value The value to provide to `interceptor`.
* @param {Function} interceptor The function to invoke.
* @returns {*} Returns `value`.
* @example
*
* _([1, 2, 3])
* .tap(function(array) {
* // Mutate input array.
* array.pop();
* })
* .reverse()
* .value();
* // => [2, 1]
*/
function tap(value, interceptor) {
interceptor(value);
return value;
}
/**
* This method is like `_.tap` except that it returns the result of `interceptor`.
* The purpose of this method is to "pass thru" values replacing intermediate
* results in a method chain sequence.
*
* @static
* @memberOf _
* @since 3.0.0
* @category Seq
* @param {*} value The value to provide to `interceptor`.
* @param {Function} interceptor The function to invoke.
* @returns {*} Returns the result of `interceptor`.
* @example
*
* _(' abc ')
* .chain()
* .trim()
* .thru(function(value) {
* return [value];
* })
* .value();
* // => ['abc']
*/
function thru(value, interceptor) {
return interceptor(value);
}
/**
* This method is the wrapper version of `_.at`.
*
* @name at
* @memberOf _
* @since 1.0.0
* @category Seq
* @param {...(string|string[])} [paths] The property paths to pick.
* @returns {Object} Returns the new `lodash` wrapper instance.
* @example
*
* var object = { 'a': [{ 'b': { 'c': 3 } }, 4] };
*
* _(object).at(['a[0].b.c', 'a[1]']).value();
* // => [3, 4]
*/
var wrapperAt = flatRest(function(paths) {
var length = paths.length,
start = length ? paths[0] : 0,
value = this.__wrapped__,
interceptor = function(object) { return baseAt(object, paths); };
if (length > 1 || this.__actions__.length ||
!(value instanceof LazyWrapper) || !isIndex(start)) {
return this.thru(interceptor);
}
value = value.slice(start, +start + (length ? 1 : 0));
value.__actions__.push({
'func': thru,
'args': [interceptor],
'thisArg': undefined
});
return new LodashWrapper(value, this.__chain__).thru(function(array) {
if (length && !array.length) {
array.push(undefined);
}
return array;
});
});
/**
* Creates a `lodash` wrapper instance with explicit method chain sequences enabled.
*
* @name chain
* @memberOf _
* @since 0.1.0
* @category Seq
* @returns {Object} Returns the new `lodash` wrapper instance.
* @example
*
* var users = [
* { 'user': 'barney', 'age': 36 },
* { 'user': 'fred', 'age': 40 }
* ];
*
* // A sequence without explicit chaining.
* _(users).head();
* // => { 'user': 'barney', 'age': 36 }
*
* // A sequence with explicit chaining.
* _(users)
* .chain()
* .head()
* .pick('user')
* .value();
* // => { 'user': 'barney' }
*/
function wrapperChain() {
return chain(this);
}
/**
* Executes the chain sequence and returns the wrapped result.
*
* @name commit
* @memberOf _
* @since 3.2.0
* @category Seq
* @returns {Object} Returns the new `lodash` wrapper instance.
* @example
*
* var array = [1, 2];
* var wrapped = _(array).push(3);
*
* console.log(array);
* // => [1, 2]
*
* wrapped = wrapped.commit();
* console.log(array);
* // => [1, 2, 3]
*
* wrapped.last();
* // => 3
*
* console.log(array);
* // => [1, 2, 3]
*/
function wrapperCommit() {
return new LodashWrapper(this.value(), this.__chain__);
}
/**
* Gets the next value on a wrapped object following the
* [iterator protocol](https://mdn.io/iteration_protocols#iterator).
*
* @name next
* @memberOf _
* @since 4.0.0
* @category Seq
* @returns {Object} Returns the next iterator value.
* @example
*
* var wrapped = _([1, 2]);
*
* wrapped.next();
* // => { 'done': false, 'value': 1 }
*
* wrapped.next();
* // => { 'done': false, 'value': 2 }
*
* wrapped.next();
* // => { 'done': true, 'value': undefined }
*/
function wrapperNext() {
if (this.__values__ === undefined) {
this.__values__ = toArray(this.value());
}
var done = this.__index__ >= this.__values__.length,
value = done ? undefined : this.__values__[this.__index__++];
return { 'done': done, 'value': value };
}
/**
* Enables the wrapper to be iterable.
*
* @name Symbol.iterator
* @memberOf _
* @since 4.0.0
* @category Seq
* @returns {Object} Returns the wrapper object.
* @example
*
* var wrapped = _([1, 2]);
*
* wrapped[Symbol.iterator]() === wrapped;
* // => true
*
* Array.from(wrapped);
* // => [1, 2]
*/
function wrapperToIterator() {
return this;
}
/**
* Creates a clone of the chain sequence planting `value` as the wrapped value.
*
* @name plant
* @memberOf _
* @since 3.2.0
* @category Seq
* @param {*} value The value to plant.
* @returns {Object} Returns the new `lodash` wrapper instance.
* @example
*
* function square(n) {
* return n * n;
* }
*
* var wrapped = _([1, 2]).map(square);
* var other = wrapped.plant([3, 4]);
*
* other.value();
* // => [9, 16]
*
* wrapped.value();
* // => [1, 4]
*/
function wrapperPlant(value) {
var result,
parent = this;
while (parent instanceof baseLodash) {
var clone = wrapperClone(parent);
clone.__index__ = 0;
clone.__values__ = undefined;
if (result) {
previous.__wrapped__ = clone;
} else {
result = clone;
}
var previous = clone;
parent = parent.__wrapped__;
}
previous.__wrapped__ = value;
return result;
}
/**
* This method is the wrapper version of `_.reverse`.
*
* **Note:** This method mutates the wrapped array.
*
* @name reverse
* @memberOf _
* @since 0.1.0
* @category Seq
* @returns {Object} Returns the new `lodash` wrapper instance.
* @example
*
* var array = [1, 2, 3];
*
* _(array).reverse().value()
* // => [3, 2, 1]
*
* console.log(array);
* // => [3, 2, 1]
*/
function wrapperReverse() {
var value = this.__wrapped__;
if (value instanceof LazyWrapper) {
var wrapped = value;
if (this.__actions__.length) {
wrapped = new LazyWrapper(this);
}
wrapped = wrapped.reverse();
wrapped.__actions__.push({
'func': thru,
'args': [reverse],
'thisArg': undefined
});
return new LodashWrapper(wrapped, this.__chain__);
}
return this.thru(reverse);
}
/**
* Executes the chain sequence to resolve the unwrapped value.
*
* @name value
* @memberOf _
* @since 0.1.0
* @alias toJSON, valueOf
* @category Seq
* @returns {*} Returns the resolved unwrapped value.
* @example
*
* _([1, 2, 3]).value();
* // => [1, 2, 3]
*/
function wrapperValue() {
return baseWrapperValue(this.__wrapped__, this.__actions__);
}
/*------------------------------------------------------------------------*/
/**
* Creates an object composed of keys generated from the results of running
* each element of `collection` thru `iteratee`. The corresponding value of
* each key is the number of times the key was returned by `iteratee`. The
* iteratee is invoked with one argument: (value).
*
* @static
* @memberOf _
* @since 0.5.0
* @category Collection
* @param {Array|Object} collection The collection to iterate over.
* @param {Function} [iteratee=_.identity] The iteratee to transform keys.
* @returns {Object} Returns the composed aggregate object.
* @example
*
* _.countBy([6.1, 4.2, 6.3], Math.floor);
* // => { '4': 1, '6': 2 }
*
* // The `_.property` iteratee shorthand.
* _.countBy(['one', 'two', 'three'], 'length');
* // => { '3': 2, '5': 1 }
*/
var countBy = createAggregator(function(result, value, key) {
if (hasOwnProperty.call(result, key)) {
++result[key];
} else {
baseAssignValue(result, key, 1);
}
});
/**
* Checks if `predicate` returns truthy for **all** elements of `collection`.
* Iteration is stopped once `predicate` returns falsey. The predicate is
* invoked with three arguments: (value, index|key, collection).
*
* **Note:** This method returns `true` for
* [empty collections](https://en.wikipedia.org/wiki/Empty_set) because
* [everything is true](https://en.wikipedia.org/wiki/Vacuous_truth) of
* elements of empty collections.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Collection
* @param {Array|Object} collection The collection to iterate over.
* @param {Function} [predicate=_.identity] The function invoked per iteration.
* @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`.
* @returns {boolean} Returns `true` if all elements pass the predicate check,
* else `false`.
* @example
*
* _.every([true, 1, null, 'yes'], Boolean);
* // => false
*
* var users = [
* { 'user': 'barney', 'age': 36, 'active': false },
* { 'user': 'fred', 'age': 40, 'active': false }
* ];
*
* // The `_.matches` iteratee shorthand.
* _.every(users, { 'user': 'barney', 'active': false });
* // => false
*
* // The `_.matchesProperty` iteratee shorthand.
* _.every(users, ['active', false]);
* // => true
*
* // The `_.property` iteratee shorthand.
* _.every(users, 'active');
* // => false
*/
function every(collection, predicate, guard) {
var func = isArray(collection) ? arrayEvery : baseEvery;
if (guard && isIterateeCall(collection, predicate, guard)) {
predicate = undefined;
}
return func(collection, getIteratee(predicate, 3));
}
/**
* Iterates over elements of `collection`, returning an array of all elements
* `predicate` returns truthy for. The predicate is invoked with three
* arguments: (value, index|key, collection).
*
* **Note:** Unlike `_.remove`, this method returns a new array.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Collection
* @param {Array|Object} collection The collection to iterate over.
* @param {Function} [predicate=_.identity] The function invoked per iteration.
* @returns {Array} Returns the new filtered array.
* @see _.reject
* @example
*
* var users = [
* { 'user': 'barney', 'age': 36, 'active': true },
* { 'user': 'fred', 'age': 40, 'active': false }
* ];
*
* _.filter(users, function(o) { return !o.active; });
* // => objects for ['fred']
*
* // The `_.matches` iteratee shorthand.
* _.filter(users, { 'age': 36, 'active': true });
* // => objects for ['barney']
*
* // The `_.matchesProperty` iteratee shorthand.
* _.filter(users, ['active', false]);
* // => objects for ['fred']
*
* // The `_.property` iteratee shorthand.
* _.filter(users, 'active');
* // => objects for ['barney']
*
* // Combining several predicates using `_.overEvery` or `_.overSome`.
* _.filter(users, _.overSome([{ 'age': 36 }, ['age', 40]]));
* // => objects for ['fred', 'barney']
*/
function filter(collection, predicate) {
var func = isArray(collection) ? arrayFilter : baseFilter;
return func(collection, getIteratee(predicate, 3));
}
/**
* Iterates over elements of `collection`, returning the first element
* `predicate` returns truthy for. The predicate is invoked with three
* arguments: (value, index|key, collection).
*
* @static
* @memberOf _
* @since 0.1.0
* @category Collection
* @param {Array|Object} collection The collection to inspect.
* @param {Function} [predicate=_.identity] The function invoked per iteration.
* @param {number} [fromIndex=0] The index to search from.
* @returns {*} Returns the matched element, else `undefined`.
* @example
*
* var users = [
* { 'user': 'barney', 'age': 36, 'active': true },
* { 'user': 'fred', 'age': 40, 'active': false },
* { 'user': 'pebbles', 'age': 1, 'active': true }
* ];
*
* _.find(users, function(o) { return o.age < 40; });
* // => object for 'barney'
*
* // The `_.matches` iteratee shorthand.
* _.find(users, { 'age': 1, 'active': true });
* // => object for 'pebbles'
*
* // The `_.matchesProperty` iteratee shorthand.
* _.find(users, ['active', false]);
* // => object for 'fred'
*
* // The `_.property` iteratee shorthand.
* _.find(users, 'active');
* // => object for 'barney'
*/
var find = createFind(findIndex);
/**
* This method is like `_.find` except that it iterates over elements of
* `collection` from right to left.
*
* @static
* @memberOf _
* @since 2.0.0
* @category Collection
* @param {Array|Object} collection The collection to inspect.
* @param {Function} [predicate=_.identity] The function invoked per iteration.
* @param {number} [fromIndex=collection.length-1] The index to search from.
* @returns {*} Returns the matched element, else `undefined`.
* @example
*
* _.findLast([1, 2, 3, 4], function(n) {
* return n % 2 == 1;
* });
* // => 3
*/
var findLast = createFind(findLastIndex);
/**
* Creates a flattened array of values by running each element in `collection`
* thru `iteratee` and flattening the mapped results. The iteratee is invoked
* with three arguments: (value, index|key, collection).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Collection
* @param {Array|Object} collection The collection to iterate over.
* @param {Function} [iteratee=_.identity] The function invoked per iteration.
* @returns {Array} Returns the new flattened array.
* @example
*
* function duplicate(n) {
* return [n, n];
* }
*
* _.flatMap([1, 2], duplicate);
* // => [1, 1, 2, 2]
*/
function flatMap(collection, iteratee) {
return baseFlatten(map(collection, iteratee), 1);
}
/**
* This method is like `_.flatMap` except that it recursively flattens the
* mapped results.
*
* @static
* @memberOf _
* @since 4.7.0
* @category Collection
* @param {Array|Object} collection The collection to iterate over.
* @param {Function} [iteratee=_.identity] The function invoked per iteration.
* @returns {Array} Returns the new flattened array.
* @example
*
* function duplicate(n) {
* return [[[n, n]]];
* }
*
* _.flatMapDeep([1, 2], duplicate);
* // => [1, 1, 2, 2]
*/
function flatMapDeep(collection, iteratee) {
return baseFlatten(map(collection, iteratee), INFINITY);
}
/**
* This method is like `_.flatMap` except that it recursively flattens the
* mapped results up to `depth` times.
*
* @static
* @memberOf _
* @since 4.7.0
* @category Collection
* @param {Array|Object} collection The collection to iterate over.
* @param {Function} [iteratee=_.identity] The function invoked per iteration.
* @param {number} [depth=1] The maximum recursion depth.
* @returns {Array} Returns the new flattened array.
* @example
*
* function duplicate(n) {
* return [[[n, n]]];
* }
*
* _.flatMapDepth([1, 2], duplicate, 2);
* // => [[1, 1], [2, 2]]
*/
function flatMapDepth(collection, iteratee, depth) {
depth = depth === undefined ? 1 : toInteger(depth);
return baseFlatten(map(collection, iteratee), depth);
}
/**
* Iterates over elements of `collection` and invokes `iteratee` for each element.
* The iteratee is invoked with three arguments: (value, index|key, collection).
* Iteratee functions may exit iteration early by explicitly returning `false`.
*
* **Note:** As with other "Collections" methods, objects with a "length"
* property are iterated like arrays. To avoid this behavior use `_.forIn`
* or `_.forOwn` for object iteration.
*
* @static
* @memberOf _
* @since 0.1.0
* @alias each
* @category Collection
* @param {Array|Object} collection The collection to iterate over.
* @param {Function} [iteratee=_.identity] The function invoked per iteration.
* @returns {Array|Object} Returns `collection`.
* @see _.forEachRight
* @example
*
* _.forEach([1, 2], function(value) {
* console.log(value);
* });
* // => Logs `1` then `2`.
*
* _.forEach({ 'a': 1, 'b': 2 }, function(value, key) {
* console.log(key);
* });
* // => Logs 'a' then 'b' (iteration order is not guaranteed).
*/
function forEach(collection, iteratee) {
var func = isArray(collection) ? arrayEach : baseEach;
return func(collection, getIteratee(iteratee, 3));
}
/**
* This method is like `_.forEach` except that it iterates over elements of
* `collection` from right to left.
*
* @static
* @memberOf _
* @since 2.0.0
* @alias eachRight
* @category Collection
* @param {Array|Object} collection The collection to iterate over.
* @param {Function} [iteratee=_.identity] The function invoked per iteration.
* @returns {Array|Object} Returns `collection`.
* @see _.forEach
* @example
*
* _.forEachRight([1, 2], function(value) {
* console.log(value);
* });
* // => Logs `2` then `1`.
*/
function forEachRight(collection, iteratee) {
var func = isArray(collection) ? arrayEachRight : baseEachRight;
return func(collection, getIteratee(iteratee, 3));
}
/**
* Creates an object composed of keys generated from the results of running
* each element of `collection` thru `iteratee`. The order of grouped values
* is determined by the order they occur in `collection`. The corresponding
* value of each key is an array of elements responsible for generating the
* key. The iteratee is invoked with one argument: (value).
*
* @static
* @memberOf _
* @since 0.1.0
* @category Collection
* @param {Array|Object} collection The collection to iterate over.
* @param {Function} [iteratee=_.identity] The iteratee to transform keys.
* @returns {Object} Returns the composed aggregate object.
* @example
*
* _.groupBy([6.1, 4.2, 6.3], Math.floor);
* // => { '4': [4.2], '6': [6.1, 6.3] }
*
* // The `_.property` iteratee shorthand.
* _.groupBy(['one', 'two', 'three'], 'length');
* // => { '3': ['one', 'two'], '5': ['three'] }
*/
var groupBy = createAggregator(function(result, value, key) {
if (hasOwnProperty.call(result, key)) {
result[key].push(value);
} else {
baseAssignValue(result, key, [value]);
}
});
/**
* Checks if `value` is in `collection`. If `collection` is a string, it's
* checked for a substring of `value`, otherwise
* [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero)
* is used for equality comparisons. If `fromIndex` is negative, it's used as
* the offset from the end of `collection`.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Collection
* @param {Array|Object|string} collection The collection to inspect.
* @param {*} value The value to search for.
* @param {number} [fromIndex=0] The index to search from.
* @param- {Object} [guard] Enables use as an iteratee for methods like `_.reduce`.
* @returns {boolean} Returns `true` if `value` is found, else `false`.
* @example
*
* _.includes([1, 2, 3], 1);
* // => true
*
* _.includes([1, 2, 3], 1, 2);
* // => false
*
* _.includes({ 'a': 1, 'b': 2 }, 1);
* // => true
*
* _.includes('abcd', 'bc');
* // => true
*/
function includes(collection, value, fromIndex, guard) {
collection = isArrayLike(collection) ? collection : values(collection);
fromIndex = (fromIndex && !guard) ? toInteger(fromIndex) : 0;
var length = collection.length;
if (fromIndex < 0) {
fromIndex = nativeMax(length + fromIndex, 0);
}
return isString(collection)
? (fromIndex <= length && collection.indexOf(value, fromIndex) > -1)
: (!!length && baseIndexOf(collection, value, fromIndex) > -1);
}
/**
* Invokes the method at `path` of each element in `collection`, returning
* an array of the results of each invoked method. Any additional arguments
* are provided to each invoked method. If `path` is a function, it's invoked
* for, and `this` bound to, each element in `collection`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Collection
* @param {Array|Object} collection The collection to iterate over.
* @param {Array|Function|string} path The path of the method to invoke or
* the function invoked per iteration.
* @param {...*} [args] The arguments to invoke each method with.
* @returns {Array} Returns the array of results.
* @example
*
* _.invokeMap([[5, 1, 7], [3, 2, 1]], 'sort');
* // => [[1, 5, 7], [1, 2, 3]]
*
* _.invokeMap([123, 456], String.prototype.split, '');
* // => [['1', '2', '3'], ['4', '5', '6']]
*/
var invokeMap = baseRest(function(collection, path, args) {
var index = -1,
isFunc = typeof path == 'function',
result = isArrayLike(collection) ? Array(collection.length) : [];
baseEach(collection, function(value) {
result[++index] = isFunc ? apply(path, value, args) : baseInvoke(value, path, args);
});
return result;
});
/**
* Creates an object composed of keys generated from the results of running
* each element of `collection` thru `iteratee`. The corresponding value of
* each key is the last element responsible for generating the key. The
* iteratee is invoked with one argument: (value).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Collection
* @param {Array|Object} collection The collection to iterate over.
* @param {Function} [iteratee=_.identity] The iteratee to transform keys.
* @returns {Object} Returns the composed aggregate object.
* @example
*
* var array = [
* { 'dir': 'left', 'code': 97 },
* { 'dir': 'right', 'code': 100 }
* ];
*
* _.keyBy(array, function(o) {
* return String.fromCharCode(o.code);
* });
* // => { 'a': { 'dir': 'left', 'code': 97 }, 'd': { 'dir': 'right', 'code': 100 } }
*
* _.keyBy(array, 'dir');
* // => { 'left': { 'dir': 'left', 'code': 97 }, 'right': { 'dir': 'right', 'code': 100 } }
*/
var keyBy = createAggregator(function(result, value, key) {
baseAssignValue(result, key, value);
});
/**
* Creates an array of values by running each element in `collection` thru
* `iteratee`. The iteratee is invoked with three arguments:
* (value, index|key, collection).
*
* Many lodash methods are guarded to work as iteratees for methods like
* `_.every`, `_.filter`, `_.map`, `_.mapValues`, `_.reject`, and `_.some`.
*
* The guarded methods are:
* `ary`, `chunk`, `curry`, `curryRight`, `drop`, `dropRight`, `every`,
* `fill`, `invert`, `parseInt`, `random`, `range`, `rangeRight`, `repeat`,
* `sampleSize`, `slice`, `some`, `sortBy`, `split`, `take`, `takeRight`,
* `template`, `trim`, `trimEnd`, `trimStart`, and `words`
*
* @static
* @memberOf _
* @since 0.1.0
* @category Collection
* @param {Array|Object} collection The collection to iterate over.
* @param {Function} [iteratee=_.identity] The function invoked per iteration.
* @returns {Array} Returns the new mapped array.
* @example
*
* function square(n) {
* return n * n;
* }
*
* _.map([4, 8], square);
* // => [16, 64]
*
* _.map({ 'a': 4, 'b': 8 }, square);
* // => [16, 64] (iteration order is not guaranteed)
*
* var users = [
* { 'user': 'barney' },
* { 'user': 'fred' }
* ];
*
* // The `_.property` iteratee shorthand.
* _.map(users, 'user');
* // => ['barney', 'fred']
*/
function map(collection, iteratee) {
var func = isArray(collection) ? arrayMap : baseMap;
return func(collection, getIteratee(iteratee, 3));
}
/**
* This method is like `_.sortBy` except that it allows specifying the sort
* orders of the iteratees to sort by. If `orders` is unspecified, all values
* are sorted in ascending order. Otherwise, specify an order of "desc" for
* descending or "asc" for ascending sort order of corresponding values.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Collection
* @param {Array|Object} collection The collection to iterate over.
* @param {Array[]|Function[]|Object[]|string[]} [iteratees=[_.identity]]
* The iteratees to sort by.
* @param {string[]} [orders] The sort orders of `iteratees`.
* @param- {Object} [guard] Enables use as an iteratee for methods like `_.reduce`.
* @returns {Array} Returns the new sorted array.
* @example
*
* var users = [
* { 'user': 'fred', 'age': 48 },
* { 'user': 'barney', 'age': 34 },
* { 'user': 'fred', 'age': 40 },
* { 'user': 'barney', 'age': 36 }
* ];
*
* // Sort by `user` in ascending order and by `age` in descending order.
* _.orderBy(users, ['user', 'age'], ['asc', 'desc']);
* // => objects for [['barney', 36], ['barney', 34], ['fred', 48], ['fred', 40]]
*/
function orderBy(collection, iteratees, orders, guard) {
if (collection == null) {
return [];
}
if (!isArray(iteratees)) {
iteratees = iteratees == null ? [] : [iteratees];
}
orders = guard ? undefined : orders;
if (!isArray(orders)) {
orders = orders == null ? [] : [orders];
}
return baseOrderBy(collection, iteratees, orders);
}
/**
* Creates an array of elements split into two groups, the first of which
* contains elements `predicate` returns truthy for, the second of which
* contains elements `predicate` returns falsey for. The predicate is
* invoked with one argument: (value).
*
* @static
* @memberOf _
* @since 3.0.0
* @category Collection
* @param {Array|Object} collection The collection to iterate over.
* @param {Function} [predicate=_.identity] The function invoked per iteration.
* @returns {Array} Returns the array of grouped elements.
* @example
*
* var users = [
* { 'user': 'barney', 'age': 36, 'active': false },
* { 'user': 'fred', 'age': 40, 'active': true },
* { 'user': 'pebbles', 'age': 1, 'active': false }
* ];
*
* _.partition(users, function(o) { return o.active; });
* // => objects for [['fred'], ['barney', 'pebbles']]
*
* // The `_.matches` iteratee shorthand.
* _.partition(users, { 'age': 1, 'active': false });
* // => objects for [['pebbles'], ['barney', 'fred']]
*
* // The `_.matchesProperty` iteratee shorthand.
* _.partition(users, ['active', false]);
* // => objects for [['barney', 'pebbles'], ['fred']]
*
* // The `_.property` iteratee shorthand.
* _.partition(users, 'active');
* // => objects for [['fred'], ['barney', 'pebbles']]
*/
var partition = createAggregator(function(result, value, key) {
result[key ? 0 : 1].push(value);
}, function() { return [[], []]; });
/**
* Reduces `collection` to a value which is the accumulated result of running
* each element in `collection` thru `iteratee`, where each successive
* invocation is supplied the return value of the previous. If `accumulator`
* is not given, the first element of `collection` is used as the initial
* value. The iteratee is invoked with four arguments:
* (accumulator, value, index|key, collection).
*
* Many lodash methods are guarded to work as iteratees for methods like
* `_.reduce`, `_.reduceRight`, and `_.transform`.
*
* The guarded methods are:
* `assign`, `defaults`, `defaultsDeep`, `includes`, `merge`, `orderBy`,
* and `sortBy`
*
* @static
* @memberOf _
* @since 0.1.0
* @category Collection
* @param {Array|Object} collection The collection to iterate over.
* @param {Function} [iteratee=_.identity] The function invoked per iteration.
* @param {*} [accumulator] The initial value.
* @returns {*} Returns the accumulated value.
* @see _.reduceRight
* @example
*
* _.reduce([1, 2], function(sum, n) {
* return sum + n;
* }, 0);
* // => 3
*
* _.reduce({ 'a': 1, 'b': 2, 'c': 1 }, function(result, value, key) {
* (result[value] || (result[value] = [])).push(key);
* return result;
* }, {});
* // => { '1': ['a', 'c'], '2': ['b'] } (iteration order is not guaranteed)
*/
function reduce(collection, iteratee, accumulator) {
var func = isArray(collection) ? arrayReduce : baseReduce,
initAccum = arguments.length < 3;
return func(collection, getIteratee(iteratee, 4), accumulator, initAccum, baseEach);
}
/**
* This method is like `_.reduce` except that it iterates over elements of
* `collection` from right to left.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Collection
* @param {Array|Object} collection The collection to iterate over.
* @param {Function} [iteratee=_.identity] The function invoked per iteration.
* @param {*} [accumulator] The initial value.
* @returns {*} Returns the accumulated value.
* @see _.reduce
* @example
*
* var array = [[0, 1], [2, 3], [4, 5]];
*
* _.reduceRight(array, function(flattened, other) {
* return flattened.concat(other);
* }, []);
* // => [4, 5, 2, 3, 0, 1]
*/
function reduceRight(collection, iteratee, accumulator) {
var func = isArray(collection) ? arrayReduceRight : baseReduce,
initAccum = arguments.length < 3;
return func(collection, getIteratee(iteratee, 4), accumulator, initAccum, baseEachRight);
}
/**
* The opposite of `_.filter`; this method returns the elements of `collection`
* that `predicate` does **not** return truthy for.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Collection
* @param {Array|Object} collection The collection to iterate over.
* @param {Function} [predicate=_.identity] The function invoked per iteration.
* @returns {Array} Returns the new filtered array.
* @see _.filter
* @example
*
* var users = [
* { 'user': 'barney', 'age': 36, 'active': false },
* { 'user': 'fred', 'age': 40, 'active': true }
* ];
*
* _.reject(users, function(o) { return !o.active; });
* // => objects for ['fred']
*
* // The `_.matches` iteratee shorthand.
* _.reject(users, { 'age': 40, 'active': true });
* // => objects for ['barney']
*
* // The `_.matchesProperty` iteratee shorthand.
* _.reject(users, ['active', false]);
* // => objects for ['fred']
*
* // The `_.property` iteratee shorthand.
* _.reject(users, 'active');
* // => objects for ['barney']
*/
function reject(collection, predicate) {
var func = isArray(collection) ? arrayFilter : baseFilter;
return func(collection, negate(getIteratee(predicate, 3)));
}
/**
* Gets a random element from `collection`.
*
* @static
* @memberOf _
* @since 2.0.0
* @category Collection
* @param {Array|Object} collection The collection to sample.
* @returns {*} Returns the random element.
* @example
*
* _.sample([1, 2, 3, 4]);
* // => 2
*/
function sample(collection) {
var func = isArray(collection) ? arraySample : baseSample;
return func(collection);
}
/**
* Gets `n` random elements at unique keys from `collection` up to the
* size of `collection`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Collection
* @param {Array|Object} collection The collection to sample.
* @param {number} [n=1] The number of elements to sample.
* @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`.
* @returns {Array} Returns the random elements.
* @example
*
* _.sampleSize([1, 2, 3], 2);
* // => [3, 1]
*
* _.sampleSize([1, 2, 3], 4);
* // => [2, 3, 1]
*/
function sampleSize(collection, n, guard) {
if ((guard ? isIterateeCall(collection, n, guard) : n === undefined)) {
n = 1;
} else {
n = toInteger(n);
}
var func = isArray(collection) ? arraySampleSize : baseSampleSize;
return func(collection, n);
}
/**
* Creates an array of shuffled values, using a version of the
* [Fisher-Yates shuffle](https://en.wikipedia.org/wiki/Fisher-Yates_shuffle).
*
* @static
* @memberOf _
* @since 0.1.0
* @category Collection
* @param {Array|Object} collection The collection to shuffle.
* @returns {Array} Returns the new shuffled array.
* @example
*
* _.shuffle([1, 2, 3, 4]);
* // => [4, 1, 3, 2]
*/
function shuffle(collection) {
var func = isArray(collection) ? arrayShuffle : baseShuffle;
return func(collection);
}
/**
* Gets the size of `collection` by returning its length for array-like
* values or the number of own enumerable string keyed properties for objects.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Collection
* @param {Array|Object|string} collection The collection to inspect.
* @returns {number} Returns the collection size.
* @example
*
* _.size([1, 2, 3]);
* // => 3
*
* _.size({ 'a': 1, 'b': 2 });
* // => 2
*
* _.size('pebbles');
* // => 7
*/
function size(collection) {
if (collection == null) {
return 0;
}
if (isArrayLike(collection)) {
return isString(collection) ? stringSize(collection) : collection.length;
}
var tag = getTag(collection);
if (tag == mapTag || tag == setTag) {
return collection.size;
}
return baseKeys(collection).length;
}
/**
* Checks if `predicate` returns truthy for **any** element of `collection`.
* Iteration is stopped once `predicate` returns truthy. The predicate is
* invoked with three arguments: (value, index|key, collection).
*
* @static
* @memberOf _
* @since 0.1.0
* @category Collection
* @param {Array|Object} collection The collection to iterate over.
* @param {Function} [predicate=_.identity] The function invoked per iteration.
* @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`.
* @returns {boolean} Returns `true` if any element passes the predicate check,
* else `false`.
* @example
*
* _.some([null, 0, 'yes', false], Boolean);
* // => true
*
* var users = [
* { 'user': 'barney', 'active': true },
* { 'user': 'fred', 'active': false }
* ];
*
* // The `_.matches` iteratee shorthand.
* _.some(users, { 'user': 'barney', 'active': false });
* // => false
*
* // The `_.matchesProperty` iteratee shorthand.
* _.some(users, ['active', false]);
* // => true
*
* // The `_.property` iteratee shorthand.
* _.some(users, 'active');
* // => true
*/
function some(collection, predicate, guard) {
var func = isArray(collection) ? arraySome : baseSome;
if (guard && isIterateeCall(collection, predicate, guard)) {
predicate = undefined;
}
return func(collection, getIteratee(predicate, 3));
}
/**
* Creates an array of elements, sorted in ascending order by the results of
* running each element in a collection thru each iteratee. This method
* performs a stable sort, that is, it preserves the original sort order of
* equal elements. The iteratees are invoked with one argument: (value).
*
* @static
* @memberOf _
* @since 0.1.0
* @category Collection
* @param {Array|Object} collection The collection to iterate over.
* @param {...(Function|Function[])} [iteratees=[_.identity]]
* The iteratees to sort by.
* @returns {Array} Returns the new sorted array.
* @example
*
* var users = [
* { 'user': 'fred', 'age': 48 },
* { 'user': 'barney', 'age': 36 },
* { 'user': 'fred', 'age': 30 },
* { 'user': 'barney', 'age': 34 }
* ];
*
* _.sortBy(users, [function(o) { return o.user; }]);
* // => objects for [['barney', 36], ['barney', 34], ['fred', 48], ['fred', 30]]
*
* _.sortBy(users, ['user', 'age']);
* // => objects for [['barney', 34], ['barney', 36], ['fred', 30], ['fred', 48]]
*/
var sortBy = baseRest(function(collection, iteratees) {
if (collection == null) {
return [];
}
var length = iteratees.length;
if (length > 1 && isIterateeCall(collection, iteratees[0], iteratees[1])) {
iteratees = [];
} else if (length > 2 && isIterateeCall(iteratees[0], iteratees[1], iteratees[2])) {
iteratees = [iteratees[0]];
}
return baseOrderBy(collection, baseFlatten(iteratees, 1), []);
});
/*------------------------------------------------------------------------*/
/**
* Gets the timestamp of the number of milliseconds that have elapsed since
* the Unix epoch (1 January 1970 00:00:00 UTC).
*
* @static
* @memberOf _
* @since 2.4.0
* @category Date
* @returns {number} Returns the timestamp.
* @example
*
* _.defer(function(stamp) {
* console.log(_.now() - stamp);
* }, _.now());
* // => Logs the number of milliseconds it took for the deferred invocation.
*/
var now = ctxNow || function() {
return root.Date.now();
};
/*------------------------------------------------------------------------*/
/**
* The opposite of `_.before`; this method creates a function that invokes
* `func` once it's called `n` or more times.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Function
* @param {number} n The number of calls before `func` is invoked.
* @param {Function} func The function to restrict.
* @returns {Function} Returns the new restricted function.
* @example
*
* var saves = ['profile', 'settings'];
*
* var done = _.after(saves.length, function() {
* console.log('done saving!');
* });
*
* _.forEach(saves, function(type) {
* asyncSave({ 'type': type, 'complete': done });
* });
* // => Logs 'done saving!' after the two async saves have completed.
*/
function after(n, func) {
if (typeof func != 'function') {
throw new TypeError(FUNC_ERROR_TEXT);
}
n = toInteger(n);
return function() {
if (--n < 1) {
return func.apply(this, arguments);
}
};
}
/**
* Creates a function that invokes `func`, with up to `n` arguments,
* ignoring any additional arguments.
*
* @static
* @memberOf _
* @since 3.0.0
* @category Function
* @param {Function} func The function to cap arguments for.
* @param {number} [n=func.length] The arity cap.
* @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`.
* @returns {Function} Returns the new capped function.
* @example
*
* _.map(['6', '8', '10'], _.ary(parseInt, 1));
* // => [6, 8, 10]
*/
function ary(func, n, guard) {
n = guard ? undefined : n;
n = (func && n == null) ? func.length : n;
return createWrap(func, WRAP_ARY_FLAG, undefined, undefined, undefined, undefined, n);
}
/**
* Creates a function that invokes `func`, with the `this` binding and arguments
* of the created function, while it's called less than `n` times. Subsequent
* calls to the created function return the result of the last `func` invocation.
*
* @static
* @memberOf _
* @since 3.0.0
* @category Function
* @param {number} n The number of calls at which `func` is no longer invoked.
* @param {Function} func The function to restrict.
* @returns {Function} Returns the new restricted function.
* @example
*
* jQuery(element).on('click', _.before(5, addContactToList));
* // => Allows adding up to 4 contacts to the list.
*/
function before(n, func) {
var result;
if (typeof func != 'function') {
throw new TypeError(FUNC_ERROR_TEXT);
}
n = toInteger(n);
return function() {
if (--n > 0) {
result = func.apply(this, arguments);
}
if (n <= 1) {
func = undefined;
}
return result;
};
}
/**
* Creates a function that invokes `func` with the `this` binding of `thisArg`
* and `partials` prepended to the arguments it receives.
*
* The `_.bind.placeholder` value, which defaults to `_` in monolithic builds,
* may be used as a placeholder for partially applied arguments.
*
* **Note:** Unlike native `Function#bind`, this method doesn't set the "length"
* property of bound functions.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Function
* @param {Function} func The function to bind.
* @param {*} thisArg The `this` binding of `func`.
* @param {...*} [partials] The arguments to be partially applied.
* @returns {Function} Returns the new bound function.
* @example
*
* function greet(greeting, punctuation) {
* return greeting + ' ' + this.user + punctuation;
* }
*
* var object = { 'user': 'fred' };
*
* var bound = _.bind(greet, object, 'hi');
* bound('!');
* // => 'hi fred!'
*
* // Bound with placeholders.
* var bound = _.bind(greet, object, _, '!');
* bound('hi');
* // => 'hi fred!'
*/
var bind = baseRest(function(func, thisArg, partials) {
var bitmask = WRAP_BIND_FLAG;
if (partials.length) {
var holders = replaceHolders(partials, getHolder(bind));
bitmask |= WRAP_PARTIAL_FLAG;
}
return createWrap(func, bitmask, thisArg, partials, holders);
});
/**
* Creates a function that invokes the method at `object[key]` with `partials`
* prepended to the arguments it receives.
*
* This method differs from `_.bind` by allowing bound functions to reference
* methods that may be redefined or don't yet exist. See
* [Peter Michaux's article](http://peter.michaux.ca/articles/lazy-function-definition-pattern)
* for more details.
*
* The `_.bindKey.placeholder` value, which defaults to `_` in monolithic
* builds, may be used as a placeholder for partially applied arguments.
*
* @static
* @memberOf _
* @since 0.10.0
* @category Function
* @param {Object} object The object to invoke the method on.
* @param {string} key The key of the method.
* @param {...*} [partials] The arguments to be partially applied.
* @returns {Function} Returns the new bound function.
* @example
*
* var object = {
* 'user': 'fred',
* 'greet': function(greeting, punctuation) {
* return greeting + ' ' + this.user + punctuation;
* }
* };
*
* var bound = _.bindKey(object, 'greet', 'hi');
* bound('!');
* // => 'hi fred!'
*
* object.greet = function(greeting, punctuation) {
* return greeting + 'ya ' + this.user + punctuation;
* };
*
* bound('!');
* // => 'hiya fred!'
*
* // Bound with placeholders.
* var bound = _.bindKey(object, 'greet', _, '!');
* bound('hi');
* // => 'hiya fred!'
*/
var bindKey = baseRest(function(object, key, partials) {
var bitmask = WRAP_BIND_FLAG | WRAP_BIND_KEY_FLAG;
if (partials.length) {
var holders = replaceHolders(partials, getHolder(bindKey));
bitmask |= WRAP_PARTIAL_FLAG;
}
return createWrap(key, bitmask, object, partials, holders);
});
/**
* Creates a function that accepts arguments of `func` and either invokes
* `func` returning its result, if at least `arity` number of arguments have
* been provided, or returns a function that accepts the remaining `func`
* arguments, and so on. The arity of `func` may be specified if `func.length`
* is not sufficient.
*
* The `_.curry.placeholder` value, which defaults to `_` in monolithic builds,
* may be used as a placeholder for provided arguments.
*
* **Note:** This method doesn't set the "length" property of curried functions.
*
* @static
* @memberOf _
* @since 2.0.0
* @category Function
* @param {Function} func The function to curry.
* @param {number} [arity=func.length] The arity of `func`.
* @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`.
* @returns {Function} Returns the new curried function.
* @example
*
* var abc = function(a, b, c) {
* return [a, b, c];
* };
*
* var curried = _.curry(abc);
*
* curried(1)(2)(3);
* // => [1, 2, 3]
*
* curried(1, 2)(3);
* // => [1, 2, 3]
*
* curried(1, 2, 3);
* // => [1, 2, 3]
*
* // Curried with placeholders.
* curried(1)(_, 3)(2);
* // => [1, 2, 3]
*/
function curry(func, arity, guard) {
arity = guard ? undefined : arity;
var result = createWrap(func, WRAP_CURRY_FLAG, undefined, undefined, undefined, undefined, undefined, arity);
result.placeholder = curry.placeholder;
return result;
}
/**
* This method is like `_.curry` except that arguments are applied to `func`
* in the manner of `_.partialRight` instead of `_.partial`.
*
* The `_.curryRight.placeholder` value, which defaults to `_` in monolithic
* builds, may be used as a placeholder for provided arguments.
*
* **Note:** This method doesn't set the "length" property of curried functions.
*
* @static
* @memberOf _
* @since 3.0.0
* @category Function
* @param {Function} func The function to curry.
* @param {number} [arity=func.length] The arity of `func`.
* @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`.
* @returns {Function} Returns the new curried function.
* @example
*
* var abc = function(a, b, c) {
* return [a, b, c];
* };
*
* var curried = _.curryRight(abc);
*
* curried(3)(2)(1);
* // => [1, 2, 3]
*
* curried(2, 3)(1);
* // => [1, 2, 3]
*
* curried(1, 2, 3);
* // => [1, 2, 3]
*
* // Curried with placeholders.
* curried(3)(1, _)(2);
* // => [1, 2, 3]
*/
function curryRight(func, arity, guard) {
arity = guard ? undefined : arity;
var result = createWrap(func, WRAP_CURRY_RIGHT_FLAG, undefined, undefined, undefined, undefined, undefined, arity);
result.placeholder = curryRight.placeholder;
return result;
}
/**
* Creates a debounced function that delays invoking `func` until after `wait`
* milliseconds have elapsed since the last time the debounced function was
* invoked. The debounced function comes with a `cancel` method to cancel
* delayed `func` invocations and a `flush` method to immediately invoke them.
* Provide `options` to indicate whether `func` should be invoked on the
* leading and/or trailing edge of the `wait` timeout. The `func` is invoked
* with the last arguments provided to the debounced function. Subsequent
* calls to the debounced function return the result of the last `func`
* invocation.
*
* **Note:** If `leading` and `trailing` options are `true`, `func` is
* invoked on the trailing edge of the timeout only if the debounced function
* is invoked more than once during the `wait` timeout.
*
* If `wait` is `0` and `leading` is `false`, `func` invocation is deferred
* until to the next tick, similar to `setTimeout` with a timeout of `0`.
*
* See [David Corbacho's article](https://css-tricks.com/debouncing-throttling-explained-examples/)
* for details over the differences between `_.debounce` and `_.throttle`.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Function
* @param {Function} func The function to debounce.
* @param {number} [wait=0] The number of milliseconds to delay.
* @param {Object} [options={}] The options object.
* @param {boolean} [options.leading=false]
* Specify invoking on the leading edge of the timeout.
* @param {number} [options.maxWait]
* The maximum time `func` is allowed to be delayed before it's invoked.
* @param {boolean} [options.trailing=true]
* Specify invoking on the trailing edge of the timeout.
* @returns {Function} Returns the new debounced function.
* @example
*
* // Avoid costly calculations while the window size is in flux.
* jQuery(window).on('resize', _.debounce(calculateLayout, 150));
*
* // Invoke `sendMail` when clicked, debouncing subsequent calls.
* jQuery(element).on('click', _.debounce(sendMail, 300, {
* 'leading': true,
* 'trailing': false
* }));
*
* // Ensure `batchLog` is invoked once after 1 second of debounced calls.
* var debounced = _.debounce(batchLog, 250, { 'maxWait': 1000 });
* var source = new EventSource('/stream');
* jQuery(source).on('message', debounced);
*
* // Cancel the trailing debounced invocation.
* jQuery(window).on('popstate', debounced.cancel);
*/
function debounce(func, wait, options) {
var lastArgs,
lastThis,
maxWait,
result,
timerId,
lastCallTime,
lastInvokeTime = 0,
leading = false,
maxing = false,
trailing = true;
if (typeof func != 'function') {
throw new TypeError(FUNC_ERROR_TEXT);
}
wait = toNumber(wait) || 0;
if (isObject(options)) {
leading = !!options.leading;
maxing = 'maxWait' in options;
maxWait = maxing ? nativeMax(toNumber(options.maxWait) || 0, wait) : maxWait;
trailing = 'trailing' in options ? !!options.trailing : trailing;
}
function invokeFunc(time) {
var args = lastArgs,
thisArg = lastThis;
lastArgs = lastThis = undefined;
lastInvokeTime = time;
result = func.apply(thisArg, args);
return result;
}
function leadingEdge(time) {
// Reset any `maxWait` timer.
lastInvokeTime = time;
// Start the timer for the trailing edge.
timerId = setTimeout(timerExpired, wait);
// Invoke the leading edge.
return leading ? invokeFunc(time) : result;
}
function remainingWait(time) {
var timeSinceLastCall = time - lastCallTime,
timeSinceLastInvoke = time - lastInvokeTime,
timeWaiting = wait - timeSinceLastCall;
return maxing
? nativeMin(timeWaiting, maxWait - timeSinceLastInvoke)
: timeWaiting;
}
function shouldInvoke(time) {
var timeSinceLastCall = time - lastCallTime,
timeSinceLastInvoke = time - lastInvokeTime;
// Either this is the first call, activity has stopped and we're at the
// trailing edge, the system time has gone backwards and we're treating
// it as the trailing edge, or we've hit the `maxWait` limit.
return (lastCallTime === undefined || (timeSinceLastCall >= wait) ||
(timeSinceLastCall < 0) || (maxing && timeSinceLastInvoke >= maxWait));
}
function timerExpired() {
var time = now();
if (shouldInvoke(time)) {
return trailingEdge(time);
}
// Restart the timer.
timerId = setTimeout(timerExpired, remainingWait(time));
}
function trailingEdge(time) {
timerId = undefined;
// Only invoke if we have `lastArgs` which means `func` has been
// debounced at least once.
if (trailing && lastArgs) {
return invokeFunc(time);
}
lastArgs = lastThis = undefined;
return result;
}
function cancel() {
if (timerId !== undefined) {
clearTimeout(timerId);
}
lastInvokeTime = 0;
lastArgs = lastCallTime = lastThis = timerId = undefined;
}
function flush() {
return timerId === undefined ? result : trailingEdge(now());
}
function debounced() {
var time = now(),
isInvoking = shouldInvoke(time);
lastArgs = arguments;
lastThis = this;
lastCallTime = time;
if (isInvoking) {
if (timerId === undefined) {
return leadingEdge(lastCallTime);
}
if (maxing) {
// Handle invocations in a tight loop.
clearTimeout(timerId);
timerId = setTimeout(timerExpired, wait);
return invokeFunc(lastCallTime);
}
}
if (timerId === undefined) {
timerId = setTimeout(timerExpired, wait);
}
return result;
}
debounced.cancel = cancel;
debounced.flush = flush;
return debounced;
}
/**
* Defers invoking the `func` until the current call stack has cleared. Any
* additional arguments are provided to `func` when it's invoked.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Function
* @param {Function} func The function to defer.
* @param {...*} [args] The arguments to invoke `func` with.
* @returns {number} Returns the timer id.
* @example
*
* _.defer(function(text) {
* console.log(text);
* }, 'deferred');
* // => Logs 'deferred' after one millisecond.
*/
var defer = baseRest(function(func, args) {
return baseDelay(func, 1, args);
});
/**
* Invokes `func` after `wait` milliseconds. Any additional arguments are
* provided to `func` when it's invoked.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Function
* @param {Function} func The function to delay.
* @param {number} wait The number of milliseconds to delay invocation.
* @param {...*} [args] The arguments to invoke `func` with.
* @returns {number} Returns the timer id.
* @example
*
* _.delay(function(text) {
* console.log(text);
* }, 1000, 'later');
* // => Logs 'later' after one second.
*/
var delay = baseRest(function(func, wait, args) {
return baseDelay(func, toNumber(wait) || 0, args);
});
/**
* Creates a function that invokes `func` with arguments reversed.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Function
* @param {Function} func The function to flip arguments for.
* @returns {Function} Returns the new flipped function.
* @example
*
* var flipped = _.flip(function() {
* return _.toArray(arguments);
* });
*
* flipped('a', 'b', 'c', 'd');
* // => ['d', 'c', 'b', 'a']
*/
function flip(func) {
return createWrap(func, WRAP_FLIP_FLAG);
}
/**
* Creates a function that memoizes the result of `func`. If `resolver` is
* provided, it determines the cache key for storing the result based on the
* arguments provided to the memoized function. By default, the first argument
* provided to the memoized function is used as the map cache key. The `func`
* is invoked with the `this` binding of the memoized function.
*
* **Note:** The cache is exposed as the `cache` property on the memoized
* function. Its creation may be customized by replacing the `_.memoize.Cache`
* constructor with one whose instances implement the
* [`Map`](http://ecma-international.org/ecma-262/7.0/#sec-properties-of-the-map-prototype-object)
* method interface of `clear`, `delete`, `get`, `has`, and `set`.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Function
* @param {Function} func The function to have its output memoized.
* @param {Function} [resolver] The function to resolve the cache key.
* @returns {Function} Returns the new memoized function.
* @example
*
* var object = { 'a': 1, 'b': 2 };
* var other = { 'c': 3, 'd': 4 };
*
* var values = _.memoize(_.values);
* values(object);
* // => [1, 2]
*
* values(other);
* // => [3, 4]
*
* object.a = 2;
* values(object);
* // => [1, 2]
*
* // Modify the result cache.
* values.cache.set(object, ['a', 'b']);
* values(object);
* // => ['a', 'b']
*
* // Replace `_.memoize.Cache`.
* _.memoize.Cache = WeakMap;
*/
function memoize(func, resolver) {
if (typeof func != 'function' || (resolver != null && typeof resolver != 'function')) {
throw new TypeError(FUNC_ERROR_TEXT);
}
var memoized = function() {
var args = arguments,
key = resolver ? resolver.apply(this, args) : args[0],
cache = memoized.cache;
if (cache.has(key)) {
return cache.get(key);
}
var result = func.apply(this, args);
memoized.cache = cache.set(key, result) || cache;
return result;
};
memoized.cache = new (memoize.Cache || MapCache);
return memoized;
}
// Expose `MapCache`.
memoize.Cache = MapCache;
/**
* Creates a function that negates the result of the predicate `func`. The
* `func` predicate is invoked with the `this` binding and arguments of the
* created function.
*
* @static
* @memberOf _
* @since 3.0.0
* @category Function
* @param {Function} predicate The predicate to negate.
* @returns {Function} Returns the new negated function.
* @example
*
* function isEven(n) {
* return n % 2 == 0;
* }
*
* _.filter([1, 2, 3, 4, 5, 6], _.negate(isEven));
* // => [1, 3, 5]
*/
function negate(predicate) {
if (typeof predicate != 'function') {
throw new TypeError(FUNC_ERROR_TEXT);
}
return function() {
var args = arguments;
switch (args.length) {
case 0: return !predicate.call(this);
case 1: return !predicate.call(this, args[0]);
case 2: return !predicate.call(this, args[0], args[1]);
case 3: return !predicate.call(this, args[0], args[1], args[2]);
}
return !predicate.apply(this, args);
};
}
/**
* Creates a function that is restricted to invoking `func` once. Repeat calls
* to the function return the value of the first invocation. The `func` is
* invoked with the `this` binding and arguments of the created function.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Function
* @param {Function} func The function to restrict.
* @returns {Function} Returns the new restricted function.
* @example
*
* var initialize = _.once(createApplication);
* initialize();
* initialize();
* // => `createApplication` is invoked once
*/
function once(func) {
return before(2, func);
}
/**
* Creates a function that invokes `func` with its arguments transformed.
*
* @static
* @since 4.0.0
* @memberOf _
* @category Function
* @param {Function} func The function to wrap.
* @param {...(Function|Function[])} [transforms=[_.identity]]
* The argument transforms.
* @returns {Function} Returns the new function.
* @example
*
* function doubled(n) {
* return n * 2;
* }
*
* function square(n) {
* return n * n;
* }
*
* var func = _.overArgs(function(x, y) {
* return [x, y];
* }, [square, doubled]);
*
* func(9, 3);
* // => [81, 6]
*
* func(10, 5);
* // => [100, 10]
*/
var overArgs = castRest(function(func, transforms) {
transforms = (transforms.length == 1 && isArray(transforms[0]))
? arrayMap(transforms[0], baseUnary(getIteratee()))
: arrayMap(baseFlatten(transforms, 1), baseUnary(getIteratee()));
var funcsLength = transforms.length;
return baseRest(function(args) {
var index = -1,
length = nativeMin(args.length, funcsLength);
while (++index < length) {
args[index] = transforms[index].call(this, args[index]);
}
return apply(func, this, args);
});
});
/**
* Creates a function that invokes `func` with `partials` prepended to the
* arguments it receives. This method is like `_.bind` except it does **not**
* alter the `this` binding.
*
* The `_.partial.placeholder` value, which defaults to `_` in monolithic
* builds, may be used as a placeholder for partially applied arguments.
*
* **Note:** This method doesn't set the "length" property of partially
* applied functions.
*
* @static
* @memberOf _
* @since 0.2.0
* @category Function
* @param {Function} func The function to partially apply arguments to.
* @param {...*} [partials] The arguments to be partially applied.
* @returns {Function} Returns the new partially applied function.
* @example
*
* function greet(greeting, name) {
* return greeting + ' ' + name;
* }
*
* var sayHelloTo = _.partial(greet, 'hello');
* sayHelloTo('fred');
* // => 'hello fred'
*
* // Partially applied with placeholders.
* var greetFred = _.partial(greet, _, 'fred');
* greetFred('hi');
* // => 'hi fred'
*/
var partial = baseRest(function(func, partials) {
var holders = replaceHolders(partials, getHolder(partial));
return createWrap(func, WRAP_PARTIAL_FLAG, undefined, partials, holders);
});
/**
* This method is like `_.partial` except that partially applied arguments
* are appended to the arguments it receives.
*
* The `_.partialRight.placeholder` value, which defaults to `_` in monolithic
* builds, may be used as a placeholder for partially applied arguments.
*
* **Note:** This method doesn't set the "length" property of partially
* applied functions.
*
* @static
* @memberOf _
* @since 1.0.0
* @category Function
* @param {Function} func The function to partially apply arguments to.
* @param {...*} [partials] The arguments to be partially applied.
* @returns {Function} Returns the new partially applied function.
* @example
*
* function greet(greeting, name) {
* return greeting + ' ' + name;
* }
*
* var greetFred = _.partialRight(greet, 'fred');
* greetFred('hi');
* // => 'hi fred'
*
* // Partially applied with placeholders.
* var sayHelloTo = _.partialRight(greet, 'hello', _);
* sayHelloTo('fred');
* // => 'hello fred'
*/
var partialRight = baseRest(function(func, partials) {
var holders = replaceHolders(partials, getHolder(partialRight));
return createWrap(func, WRAP_PARTIAL_RIGHT_FLAG, undefined, partials, holders);
});
/**
* Creates a function that invokes `func` with arguments arranged according
* to the specified `indexes` where the argument value at the first index is
* provided as the first argument, the argument value at the second index is
* provided as the second argument, and so on.
*
* @static
* @memberOf _
* @since 3.0.0
* @category Function
* @param {Function} func The function to rearrange arguments for.
* @param {...(number|number[])} indexes The arranged argument indexes.
* @returns {Function} Returns the new function.
* @example
*
* var rearged = _.rearg(function(a, b, c) {
* return [a, b, c];
* }, [2, 0, 1]);
*
* rearged('b', 'c', 'a')
* // => ['a', 'b', 'c']
*/
var rearg = flatRest(function(func, indexes) {
return createWrap(func, WRAP_REARG_FLAG, undefined, undefined, undefined, indexes);
});
/**
* Creates a function that invokes `func` with the `this` binding of the
* created function and arguments from `start` and beyond provided as
* an array.
*
* **Note:** This method is based on the
* [rest parameter](https://mdn.io/rest_parameters).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Function
* @param {Function} func The function to apply a rest parameter to.
* @param {number} [start=func.length-1] The start position of the rest parameter.
* @returns {Function} Returns the new function.
* @example
*
* var say = _.rest(function(what, names) {
* return what + ' ' + _.initial(names).join(', ') +
* (_.size(names) > 1 ? ', & ' : '') + _.last(names);
* });
*
* say('hello', 'fred', 'barney', 'pebbles');
* // => 'hello fred, barney, & pebbles'
*/
function rest(func, start) {
if (typeof func != 'function') {
throw new TypeError(FUNC_ERROR_TEXT);
}
start = start === undefined ? start : toInteger(start);
return baseRest(func, start);
}
/**
* Creates a function that invokes `func` with the `this` binding of the
* create function and an array of arguments much like
* [`Function#apply`](http://www.ecma-international.org/ecma-262/7.0/#sec-function.prototype.apply).
*
* **Note:** This method is based on the
* [spread operator](https://mdn.io/spread_operator).
*
* @static
* @memberOf _
* @since 3.2.0
* @category Function
* @param {Function} func The function to spread arguments over.
* @param {number} [start=0] The start position of the spread.
* @returns {Function} Returns the new function.
* @example
*
* var say = _.spread(function(who, what) {
* return who + ' says ' + what;
* });
*
* say(['fred', 'hello']);
* // => 'fred says hello'
*
* var numbers = Promise.all([
* Promise.resolve(40),
* Promise.resolve(36)
* ]);
*
* numbers.then(_.spread(function(x, y) {
* return x + y;
* }));
* // => a Promise of 76
*/
function spread(func, start) {
if (typeof func != 'function') {
throw new TypeError(FUNC_ERROR_TEXT);
}
start = start == null ? 0 : nativeMax(toInteger(start), 0);
return baseRest(function(args) {
var array = args[start],
otherArgs = castSlice(args, 0, start);
if (array) {
arrayPush(otherArgs, array);
}
return apply(func, this, otherArgs);
});
}
/**
* Creates a throttled function that only invokes `func` at most once per
* every `wait` milliseconds. The throttled function comes with a `cancel`
* method to cancel delayed `func` invocations and a `flush` method to
* immediately invoke them. Provide `options` to indicate whether `func`
* should be invoked on the leading and/or trailing edge of the `wait`
* timeout. The `func` is invoked with the last arguments provided to the
* throttled function. Subsequent calls to the throttled function return the
* result of the last `func` invocation.
*
* **Note:** If `leading` and `trailing` options are `true`, `func` is
* invoked on the trailing edge of the timeout only if the throttled function
* is invoked more than once during the `wait` timeout.
*
* If `wait` is `0` and `leading` is `false`, `func` invocation is deferred
* until to the next tick, similar to `setTimeout` with a timeout of `0`.
*
* See [David Corbacho's article](https://css-tricks.com/debouncing-throttling-explained-examples/)
* for details over the differences between `_.throttle` and `_.debounce`.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Function
* @param {Function} func The function to throttle.
* @param {number} [wait=0] The number of milliseconds to throttle invocations to.
* @param {Object} [options={}] The options object.
* @param {boolean} [options.leading=true]
* Specify invoking on the leading edge of the timeout.
* @param {boolean} [options.trailing=true]
* Specify invoking on the trailing edge of the timeout.
* @returns {Function} Returns the new throttled function.
* @example
*
* // Avoid excessively updating the position while scrolling.
* jQuery(window).on('scroll', _.throttle(updatePosition, 100));
*
* // Invoke `renewToken` when the click event is fired, but not more than once every 5 minutes.
* var throttled = _.throttle(renewToken, 300000, { 'trailing': false });
* jQuery(element).on('click', throttled);
*
* // Cancel the trailing throttled invocation.
* jQuery(window).on('popstate', throttled.cancel);
*/
function throttle(func, wait, options) {
var leading = true,
trailing = true;
if (typeof func != 'function') {
throw new TypeError(FUNC_ERROR_TEXT);
}
if (isObject(options)) {
leading = 'leading' in options ? !!options.leading : leading;
trailing = 'trailing' in options ? !!options.trailing : trailing;
}
return debounce(func, wait, {
'leading': leading,
'maxWait': wait,
'trailing': trailing
});
}
/**
* Creates a function that accepts up to one argument, ignoring any
* additional arguments.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Function
* @param {Function} func The function to cap arguments for.
* @returns {Function} Returns the new capped function.
* @example
*
* _.map(['6', '8', '10'], _.unary(parseInt));
* // => [6, 8, 10]
*/
function unary(func) {
return ary(func, 1);
}
/**
* Creates a function that provides `value` to `wrapper` as its first
* argument. Any additional arguments provided to the function are appended
* to those provided to the `wrapper`. The wrapper is invoked with the `this`
* binding of the created function.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Function
* @param {*} value The value to wrap.
* @param {Function} [wrapper=identity] The wrapper function.
* @returns {Function} Returns the new function.
* @example
*
* var p = _.wrap(_.escape, function(func, text) {
* return '<p>' + func(text) + '</p>';
* });
*
* p('fred, barney, & pebbles');
* // => '<p>fred, barney, & pebbles</p>'
*/
function wrap(value, wrapper) {
return partial(castFunction(wrapper), value);
}
/*------------------------------------------------------------------------*/
/**
* Casts `value` as an array if it's not one.
*
* @static
* @memberOf _
* @since 4.4.0
* @category Lang
* @param {*} value The value to inspect.
* @returns {Array} Returns the cast array.
* @example
*
* _.castArray(1);
* // => [1]
*
* _.castArray({ 'a': 1 });
* // => [{ 'a': 1 }]
*
* _.castArray('abc');
* // => ['abc']
*
* _.castArray(null);
* // => [null]
*
* _.castArray(undefined);
* // => [undefined]
*
* _.castArray();
* // => []
*
* var array = [1, 2, 3];
* console.log(_.castArray(array) === array);
* // => true
*/
function castArray() {
if (!arguments.length) {
return [];
}
var value = arguments[0];
return isArray(value) ? value : [value];
}
/**
* Creates a shallow clone of `value`.
*
* **Note:** This method is loosely based on the
* [structured clone algorithm](https://mdn.io/Structured_clone_algorithm)
* and supports cloning arrays, array buffers, booleans, date objects, maps,
* numbers, `Object` objects, regexes, sets, strings, symbols, and typed
* arrays. The own enumerable properties of `arguments` objects are cloned
* as plain objects. An empty object is returned for uncloneable values such
* as error objects, functions, DOM nodes, and WeakMaps.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Lang
* @param {*} value The value to clone.
* @returns {*} Returns the cloned value.
* @see _.cloneDeep
* @example
*
* var objects = [{ 'a': 1 }, { 'b': 2 }];
*
* var shallow = _.clone(objects);
* console.log(shallow[0] === objects[0]);
* // => true
*/
function clone(value) {
return baseClone(value, CLONE_SYMBOLS_FLAG);
}
/**
* This method is like `_.clone` except that it accepts `customizer` which
* is invoked to produce the cloned value. If `customizer` returns `undefined`,
* cloning is handled by the method instead. The `customizer` is invoked with
* up to four arguments; (value [, index|key, object, stack]).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Lang
* @param {*} value The value to clone.
* @param {Function} [customizer] The function to customize cloning.
* @returns {*} Returns the cloned value.
* @see _.cloneDeepWith
* @example
*
* function customizer(value) {
* if (_.isElement(value)) {
* return value.cloneNode(false);
* }
* }
*
* var el = _.cloneWith(document.body, customizer);
*
* console.log(el === document.body);
* // => false
* console.log(el.nodeName);
* // => 'BODY'
* console.log(el.childNodes.length);
* // => 0
*/
function cloneWith(value, customizer) {
customizer = typeof customizer == 'function' ? customizer : undefined;
return baseClone(value, CLONE_SYMBOLS_FLAG, customizer);
}
/**
* This method is like `_.clone` except that it recursively clones `value`.
*
* @static
* @memberOf _
* @since 1.0.0
* @category Lang
* @param {*} value The value to recursively clone.
* @returns {*} Returns the deep cloned value.
* @see _.clone
* @example
*
* var objects = [{ 'a': 1 }, { 'b': 2 }];
*
* var deep = _.cloneDeep(objects);
* console.log(deep[0] === objects[0]);
* // => false
*/
function cloneDeep(value) {
return baseClone(value, CLONE_DEEP_FLAG | CLONE_SYMBOLS_FLAG);
}
/**
* This method is like `_.cloneWith` except that it recursively clones `value`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Lang
* @param {*} value The value to recursively clone.
* @param {Function} [customizer] The function to customize cloning.
* @returns {*} Returns the deep cloned value.
* @see _.cloneWith
* @example
*
* function customizer(value) {
* if (_.isElement(value)) {
* return value.cloneNode(true);
* }
* }
*
* var el = _.cloneDeepWith(document.body, customizer);
*
* console.log(el === document.body);
* // => false
* console.log(el.nodeName);
* // => 'BODY'
* console.log(el.childNodes.length);
* // => 20
*/
function cloneDeepWith(value, customizer) {
customizer = typeof customizer == 'function' ? customizer : undefined;
return baseClone(value, CLONE_DEEP_FLAG | CLONE_SYMBOLS_FLAG, customizer);
}
/**
* Checks if `object` conforms to `source` by invoking the predicate
* properties of `source` with the corresponding property values of `object`.
*
* **Note:** This method is equivalent to `_.conforms` when `source` is
* partially applied.
*
* @static
* @memberOf _
* @since 4.14.0
* @category Lang
* @param {Object} object The object to inspect.
* @param {Object} source The object of property predicates to conform to.
* @returns {boolean} Returns `true` if `object` conforms, else `false`.
* @example
*
* var object = { 'a': 1, 'b': 2 };
*
* _.conformsTo(object, { 'b': function(n) { return n > 1; } });
* // => true
*
* _.conformsTo(object, { 'b': function(n) { return n > 2; } });
* // => false
*/
function conformsTo(object, source) {
return source == null || baseConformsTo(object, source, keys(source));
}
/**
* Performs a
* [`SameValueZero`](http://ecma-international.org/ecma-262/7.0/#sec-samevaluezero)
* comparison between two values to determine if they are equivalent.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Lang
* @param {*} value The value to compare.
* @param {*} other The other value to compare.
* @returns {boolean} Returns `true` if the values are equivalent, else `false`.
* @example
*
* var object = { 'a': 1 };
* var other = { 'a': 1 };
*
* _.eq(object, object);
* // => true
*
* _.eq(object, other);
* // => false
*
* _.eq('a', 'a');
* // => true
*
* _.eq('a', Object('a'));
* // => false
*
* _.eq(NaN, NaN);
* // => true
*/
function eq(value, other) {
return value === other || (value !== value && other !== other);
}
/**
* Checks if `value` is greater than `other`.
*
* @static
* @memberOf _
* @since 3.9.0
* @category Lang
* @param {*} value The value to compare.
* @param {*} other The other value to compare.
* @returns {boolean} Returns `true` if `value` is greater than `other`,
* else `false`.
* @see _.lt
* @example
*
* _.gt(3, 1);
* // => true
*
* _.gt(3, 3);
* // => false
*
* _.gt(1, 3);
* // => false
*/
var gt = createRelationalOperation(baseGt);
/**
* Checks if `value` is greater than or equal to `other`.
*
* @static
* @memberOf _
* @since 3.9.0
* @category Lang
* @param {*} value The value to compare.
* @param {*} other The other value to compare.
* @returns {boolean} Returns `true` if `value` is greater than or equal to
* `other`, else `false`.
* @see _.lte
* @example
*
* _.gte(3, 1);
* // => true
*
* _.gte(3, 3);
* // => true
*
* _.gte(1, 3);
* // => false
*/
var gte = createRelationalOperation(function(value, other) {
return value >= other;
});
/**
* Checks if `value` is likely an `arguments` object.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is an `arguments` object,
* else `false`.
* @example
*
* _.isArguments(function() { return arguments; }());
* // => true
*
* _.isArguments([1, 2, 3]);
* // => false
*/
var isArguments = baseIsArguments(function() { return arguments; }()) ? baseIsArguments : function(value) {
return isObjectLike(value) && hasOwnProperty.call(value, 'callee') &&
!propertyIsEnumerable.call(value, 'callee');
};
/**
* Checks if `value` is classified as an `Array` object.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is an array, else `false`.
* @example
*
* _.isArray([1, 2, 3]);
* // => true
*
* _.isArray(document.body.children);
* // => false
*
* _.isArray('abc');
* // => false
*
* _.isArray(_.noop);
* // => false
*/
var isArray = Array.isArray;
/**
* Checks if `value` is classified as an `ArrayBuffer` object.
*
* @static
* @memberOf _
* @since 4.3.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is an array buffer, else `false`.
* @example
*
* _.isArrayBuffer(new ArrayBuffer(2));
* // => true
*
* _.isArrayBuffer(new Array(2));
* // => false
*/
var isArrayBuffer = nodeIsArrayBuffer ? baseUnary(nodeIsArrayBuffer) : baseIsArrayBuffer;
/**
* Checks if `value` is array-like. A value is considered array-like if it's
* not a function and has a `value.length` that's an integer greater than or
* equal to `0` and less than or equal to `Number.MAX_SAFE_INTEGER`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is array-like, else `false`.
* @example
*
* _.isArrayLike([1, 2, 3]);
* // => true
*
* _.isArrayLike(document.body.children);
* // => true
*
* _.isArrayLike('abc');
* // => true
*
* _.isArrayLike(_.noop);
* // => false
*/
function isArrayLike(value) {
return value != null && isLength(value.length) && !isFunction(value);
}
/**
* This method is like `_.isArrayLike` except that it also checks if `value`
* is an object.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is an array-like object,
* else `false`.
* @example
*
* _.isArrayLikeObject([1, 2, 3]);
* // => true
*
* _.isArrayLikeObject(document.body.children);
* // => true
*
* _.isArrayLikeObject('abc');
* // => false
*
* _.isArrayLikeObject(_.noop);
* // => false
*/
function isArrayLikeObject(value) {
return isObjectLike(value) && isArrayLike(value);
}
/**
* Checks if `value` is classified as a boolean primitive or object.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a boolean, else `false`.
* @example
*
* _.isBoolean(false);
* // => true
*
* _.isBoolean(null);
* // => false
*/
function isBoolean(value) {
return value === true || value === false ||
(isObjectLike(value) && baseGetTag(value) == boolTag);
}
/**
* Checks if `value` is a buffer.
*
* @static
* @memberOf _
* @since 4.3.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a buffer, else `false`.
* @example
*
* _.isBuffer(new Buffer(2));
* // => true
*
* _.isBuffer(new Uint8Array(2));
* // => false
*/
var isBuffer = nativeIsBuffer || stubFalse;
/**
* Checks if `value` is classified as a `Date` object.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a date object, else `false`.
* @example
*
* _.isDate(new Date);
* // => true
*
* _.isDate('Mon April 23 2012');
* // => false
*/
var isDate = nodeIsDate ? baseUnary(nodeIsDate) : baseIsDate;
/**
* Checks if `value` is likely a DOM element.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a DOM element, else `false`.
* @example
*
* _.isElement(document.body);
* // => true
*
* _.isElement('<body>');
* // => false
*/
function isElement(value) {
return isObjectLike(value) && value.nodeType === 1 && !isPlainObject(value);
}
/**
* Checks if `value` is an empty object, collection, map, or set.
*
* Objects are considered empty if they have no own enumerable string keyed
* properties.
*
* Array-like values such as `arguments` objects, arrays, buffers, strings, or
* jQuery-like collections are considered empty if they have a `length` of `0`.
* Similarly, maps and sets are considered empty if they have a `size` of `0`.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is empty, else `false`.
* @example
*
* _.isEmpty(null);
* // => true
*
* _.isEmpty(true);
* // => true
*
* _.isEmpty(1);
* // => true
*
* _.isEmpty([1, 2, 3]);
* // => false
*
* _.isEmpty({ 'a': 1 });
* // => false
*/
function isEmpty(value) {
if (value == null) {
return true;
}
if (isArrayLike(value) &&
(isArray(value) || typeof value == 'string' || typeof value.splice == 'function' ||
isBuffer(value) || isTypedArray(value) || isArguments(value))) {
return !value.length;
}
var tag = getTag(value);
if (tag == mapTag || tag == setTag) {
return !value.size;
}
if (isPrototype(value)) {
return !baseKeys(value).length;
}
for (var key in value) {
if (hasOwnProperty.call(value, key)) {
return false;
}
}
return true;
}
/**
* Performs a deep comparison between two values to determine if they are
* equivalent.
*
* **Note:** This method supports comparing arrays, array buffers, booleans,
* date objects, error objects, maps, numbers, `Object` objects, regexes,
* sets, strings, symbols, and typed arrays. `Object` objects are compared
* by their own, not inherited, enumerable properties. Functions and DOM
* nodes are compared by strict equality, i.e. `===`.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Lang
* @param {*} value The value to compare.
* @param {*} other The other value to compare.
* @returns {boolean} Returns `true` if the values are equivalent, else `false`.
* @example
*
* var object = { 'a': 1 };
* var other = { 'a': 1 };
*
* _.isEqual(object, other);
* // => true
*
* object === other;
* // => false
*/
function isEqual(value, other) {
return baseIsEqual(value, other);
}
/**
* This method is like `_.isEqual` except that it accepts `customizer` which
* is invoked to compare values. If `customizer` returns `undefined`, comparisons
* are handled by the method instead. The `customizer` is invoked with up to
* six arguments: (objValue, othValue [, index|key, object, other, stack]).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Lang
* @param {*} value The value to compare.
* @param {*} other The other value to compare.
* @param {Function} [customizer] The function to customize comparisons.
* @returns {boolean} Returns `true` if the values are equivalent, else `false`.
* @example
*
* function isGreeting(value) {
* return /^h(?:i|ello)$/.test(value);
* }
*
* function customizer(objValue, othValue) {
* if (isGreeting(objValue) && isGreeting(othValue)) {
* return true;
* }
* }
*
* var array = ['hello', 'goodbye'];
* var other = ['hi', 'goodbye'];
*
* _.isEqualWith(array, other, customizer);
* // => true
*/
function isEqualWith(value, other, customizer) {
customizer = typeof customizer == 'function' ? customizer : undefined;
var result = customizer ? customizer(value, other) : undefined;
return result === undefined ? baseIsEqual(value, other, undefined, customizer) : !!result;
}
/**
* Checks if `value` is an `Error`, `EvalError`, `RangeError`, `ReferenceError`,
* `SyntaxError`, `TypeError`, or `URIError` object.
*
* @static
* @memberOf _
* @since 3.0.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is an error object, else `false`.
* @example
*
* _.isError(new Error);
* // => true
*
* _.isError(Error);
* // => false
*/
function isError(value) {
if (!isObjectLike(value)) {
return false;
}
var tag = baseGetTag(value);
return tag == errorTag || tag == domExcTag ||
(typeof value.message == 'string' && typeof value.name == 'string' && !isPlainObject(value));
}
/**
* Checks if `value` is a finite primitive number.
*
* **Note:** This method is based on
* [`Number.isFinite`](https://mdn.io/Number/isFinite).
*
* @static
* @memberOf _
* @since 0.1.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a finite number, else `false`.
* @example
*
* _.isFinite(3);
* // => true
*
* _.isFinite(Number.MIN_VALUE);
* // => true
*
* _.isFinite(Infinity);
* // => false
*
* _.isFinite('3');
* // => false
*/
function isFinite(value) {
return typeof value == 'number' && nativeIsFinite(value);
}
/**
* Checks if `value` is classified as a `Function` object.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a function, else `false`.
* @example
*
* _.isFunction(_);
* // => true
*
* _.isFunction(/abc/);
* // => false
*/
function isFunction(value) {
if (!isObject(value)) {
return false;
}
// The use of `Object#toString` avoids issues with the `typeof` operator
// in Safari 9 which returns 'object' for typed arrays and other constructors.
var tag = baseGetTag(value);
return tag == funcTag || tag == genTag || tag == asyncTag || tag == proxyTag;
}
/**
* Checks if `value` is an integer.
*
* **Note:** This method is based on
* [`Number.isInteger`](https://mdn.io/Number/isInteger).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is an integer, else `false`.
* @example
*
* _.isInteger(3);
* // => true
*
* _.isInteger(Number.MIN_VALUE);
* // => false
*
* _.isInteger(Infinity);
* // => false
*
* _.isInteger('3');
* // => false
*/
function isInteger(value) {
return typeof value == 'number' && value == toInteger(value);
}
/**
* Checks if `value` is a valid array-like length.
*
* **Note:** This method is loosely based on
* [`ToLength`](http://ecma-international.org/ecma-262/7.0/#sec-tolength).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a valid length, else `false`.
* @example
*
* _.isLength(3);
* // => true
*
* _.isLength(Number.MIN_VALUE);
* // => false
*
* _.isLength(Infinity);
* // => false
*
* _.isLength('3');
* // => false
*/
function isLength(value) {
return typeof value == 'number' &&
value > -1 && value % 1 == 0 && value <= MAX_SAFE_INTEGER;
}
/**
* Checks if `value` is the
* [language type](http://www.ecma-international.org/ecma-262/7.0/#sec-ecmascript-language-types)
* of `Object`. (e.g. arrays, functions, objects, regexes, `new Number(0)`, and `new String('')`)
*
* @static
* @memberOf _
* @since 0.1.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is an object, else `false`.
* @example
*
* _.isObject({});
* // => true
*
* _.isObject([1, 2, 3]);
* // => true
*
* _.isObject(_.noop);
* // => true
*
* _.isObject(null);
* // => false
*/
function isObject(value) {
var type = typeof value;
return value != null && (type == 'object' || type == 'function');
}
/**
* Checks if `value` is object-like. A value is object-like if it's not `null`
* and has a `typeof` result of "object".
*
* @static
* @memberOf _
* @since 4.0.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is object-like, else `false`.
* @example
*
* _.isObjectLike({});
* // => true
*
* _.isObjectLike([1, 2, 3]);
* // => true
*
* _.isObjectLike(_.noop);
* // => false
*
* _.isObjectLike(null);
* // => false
*/
function isObjectLike(value) {
return value != null && typeof value == 'object';
}
/**
* Checks if `value` is classified as a `Map` object.
*
* @static
* @memberOf _
* @since 4.3.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a map, else `false`.
* @example
*
* _.isMap(new Map);
* // => true
*
* _.isMap(new WeakMap);
* // => false
*/
var isMap = nodeIsMap ? baseUnary(nodeIsMap) : baseIsMap;
/**
* Performs a partial deep comparison between `object` and `source` to
* determine if `object` contains equivalent property values.
*
* **Note:** This method is equivalent to `_.matches` when `source` is
* partially applied.
*
* Partial comparisons will match empty array and empty object `source`
* values against any array or object value, respectively. See `_.isEqual`
* for a list of supported value comparisons.
*
* @static
* @memberOf _
* @since 3.0.0
* @category Lang
* @param {Object} object The object to inspect.
* @param {Object} source The object of property values to match.
* @returns {boolean} Returns `true` if `object` is a match, else `false`.
* @example
*
* var object = { 'a': 1, 'b': 2 };
*
* _.isMatch(object, { 'b': 2 });
* // => true
*
* _.isMatch(object, { 'b': 1 });
* // => false
*/
function isMatch(object, source) {
return object === source || baseIsMatch(object, source, getMatchData(source));
}
/**
* This method is like `_.isMatch` except that it accepts `customizer` which
* is invoked to compare values. If `customizer` returns `undefined`, comparisons
* are handled by the method instead. The `customizer` is invoked with five
* arguments: (objValue, srcValue, index|key, object, source).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Lang
* @param {Object} object The object to inspect.
* @param {Object} source The object of property values to match.
* @param {Function} [customizer] The function to customize comparisons.
* @returns {boolean} Returns `true` if `object` is a match, else `false`.
* @example
*
* function isGreeting(value) {
* return /^h(?:i|ello)$/.test(value);
* }
*
* function customizer(objValue, srcValue) {
* if (isGreeting(objValue) && isGreeting(srcValue)) {
* return true;
* }
* }
*
* var object = { 'greeting': 'hello' };
* var source = { 'greeting': 'hi' };
*
* _.isMatchWith(object, source, customizer);
* // => true
*/
function isMatchWith(object, source, customizer) {
customizer = typeof customizer == 'function' ? customizer : undefined;
return baseIsMatch(object, source, getMatchData(source), customizer);
}
/**
* Checks if `value` is `NaN`.
*
* **Note:** This method is based on
* [`Number.isNaN`](https://mdn.io/Number/isNaN) and is not the same as
* global [`isNaN`](https://mdn.io/isNaN) which returns `true` for
* `undefined` and other non-number values.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is `NaN`, else `false`.
* @example
*
* _.isNaN(NaN);
* // => true
*
* _.isNaN(new Number(NaN));
* // => true
*
* isNaN(undefined);
* // => true
*
* _.isNaN(undefined);
* // => false
*/
function isNaN(value) {
// An `NaN` primitive is the only value that is not equal to itself.
// Perform the `toStringTag` check first to avoid errors with some
// ActiveX objects in IE.
return isNumber(value) && value != +value;
}
/**
* Checks if `value` is a pristine native function.
*
* **Note:** This method can't reliably detect native functions in the presence
* of the core-js package because core-js circumvents this kind of detection.
* Despite multiple requests, the core-js maintainer has made it clear: any
* attempt to fix the detection will be obstructed. As a result, we're left
* with little choice but to throw an error. Unfortunately, this also affects
* packages, like [babel-polyfill](https://www.npmjs.com/package/babel-polyfill),
* which rely on core-js.
*
* @static
* @memberOf _
* @since 3.0.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a native function,
* else `false`.
* @example
*
* _.isNative(Array.prototype.push);
* // => true
*
* _.isNative(_);
* // => false
*/
function isNative(value) {
if (isMaskable(value)) {
throw new Error(CORE_ERROR_TEXT);
}
return baseIsNative(value);
}
/**
* Checks if `value` is `null`.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is `null`, else `false`.
* @example
*
* _.isNull(null);
* // => true
*
* _.isNull(void 0);
* // => false
*/
function isNull(value) {
return value === null;
}
/**
* Checks if `value` is `null` or `undefined`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is nullish, else `false`.
* @example
*
* _.isNil(null);
* // => true
*
* _.isNil(void 0);
* // => true
*
* _.isNil(NaN);
* // => false
*/
function isNil(value) {
return value == null;
}
/**
* Checks if `value` is classified as a `Number` primitive or object.
*
* **Note:** To exclude `Infinity`, `-Infinity`, and `NaN`, which are
* classified as numbers, use the `_.isFinite` method.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a number, else `false`.
* @example
*
* _.isNumber(3);
* // => true
*
* _.isNumber(Number.MIN_VALUE);
* // => true
*
* _.isNumber(Infinity);
* // => true
*
* _.isNumber('3');
* // => false
*/
function isNumber(value) {
return typeof value == 'number' ||
(isObjectLike(value) && baseGetTag(value) == numberTag);
}
/**
* Checks if `value` is a plain object, that is, an object created by the
* `Object` constructor or one with a `[[Prototype]]` of `null`.
*
* @static
* @memberOf _
* @since 0.8.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a plain object, else `false`.
* @example
*
* function Foo() {
* this.a = 1;
* }
*
* _.isPlainObject(new Foo);
* // => false
*
* _.isPlainObject([1, 2, 3]);
* // => false
*
* _.isPlainObject({ 'x': 0, 'y': 0 });
* // => true
*
* _.isPlainObject(Object.create(null));
* // => true
*/
function isPlainObject(value) {
if (!isObjectLike(value) || baseGetTag(value) != objectTag) {
return false;
}
var proto = getPrototype(value);
if (proto === null) {
return true;
}
var Ctor = hasOwnProperty.call(proto, 'constructor') && proto.constructor;
return typeof Ctor == 'function' && Ctor instanceof Ctor &&
funcToString.call(Ctor) == objectCtorString;
}
/**
* Checks if `value` is classified as a `RegExp` object.
*
* @static
* @memberOf _
* @since 0.1.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a regexp, else `false`.
* @example
*
* _.isRegExp(/abc/);
* // => true
*
* _.isRegExp('/abc/');
* // => false
*/
var isRegExp = nodeIsRegExp ? baseUnary(nodeIsRegExp) : baseIsRegExp;
/**
* Checks if `value` is a safe integer. An integer is safe if it's an IEEE-754
* double precision number which isn't the result of a rounded unsafe integer.
*
* **Note:** This method is based on
* [`Number.isSafeInteger`](https://mdn.io/Number/isSafeInteger).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a safe integer, else `false`.
* @example
*
* _.isSafeInteger(3);
* // => true
*
* _.isSafeInteger(Number.MIN_VALUE);
* // => false
*
* _.isSafeInteger(Infinity);
* // => false
*
* _.isSafeInteger('3');
* // => false
*/
function isSafeInteger(value) {
return isInteger(value) && value >= -MAX_SAFE_INTEGER && value <= MAX_SAFE_INTEGER;
}
/**
* Checks if `value` is classified as a `Set` object.
*
* @static
* @memberOf _
* @since 4.3.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a set, else `false`.
* @example
*
* _.isSet(new Set);
* // => true
*
* _.isSet(new WeakSet);
* // => false
*/
var isSet = nodeIsSet ? baseUnary(nodeIsSet) : baseIsSet;
/**
* Checks if `value` is classified as a `String` primitive or object.
*
* @static
* @since 0.1.0
* @memberOf _
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a string, else `false`.
* @example
*
* _.isString('abc');
* // => true
*
* _.isString(1);
* // => false
*/
function isString(value) {
return typeof value == 'string' ||
(!isArray(value) && isObjectLike(value) && baseGetTag(value) == stringTag);
}
/**
* Checks if `value` is classified as a `Symbol` primitive or object.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a symbol, else `false`.
* @example
*
* _.isSymbol(Symbol.iterator);
* // => true
*
* _.isSymbol('abc');
* // => false
*/
function isSymbol(value) {
return typeof value == 'symbol' ||
(isObjectLike(value) && baseGetTag(value) == symbolTag);
}
/**
* Checks if `value` is classified as a typed array.
*
* @static
* @memberOf _
* @since 3.0.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a typed array, else `false`.
* @example
*
* _.isTypedArray(new Uint8Array);
* // => true
*
* _.isTypedArray([]);
* // => false
*/
var isTypedArray = nodeIsTypedArray ? baseUnary(nodeIsTypedArray) : baseIsTypedArray;
/**
* Checks if `value` is `undefined`.
*
* @static
* @since 0.1.0
* @memberOf _
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is `undefined`, else `false`.
* @example
*
* _.isUndefined(void 0);
* // => true
*
* _.isUndefined(null);
* // => false
*/
function isUndefined(value) {
return value === undefined;
}
/**
* Checks if `value` is classified as a `WeakMap` object.
*
* @static
* @memberOf _
* @since 4.3.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a weak map, else `false`.
* @example
*
* _.isWeakMap(new WeakMap);
* // => true
*
* _.isWeakMap(new Map);
* // => false
*/
function isWeakMap(value) {
return isObjectLike(value) && getTag(value) == weakMapTag;
}
/**
* Checks if `value` is classified as a `WeakSet` object.
*
* @static
* @memberOf _
* @since 4.3.0
* @category Lang
* @param {*} value The value to check.
* @returns {boolean} Returns `true` if `value` is a weak set, else `false`.
* @example
*
* _.isWeakSet(new WeakSet);
* // => true
*
* _.isWeakSet(new Set);
* // => false
*/
function isWeakSet(value) {
return isObjectLike(value) && baseGetTag(value) == weakSetTag;
}
/**
* Checks if `value` is less than `other`.
*
* @static
* @memberOf _
* @since 3.9.0
* @category Lang
* @param {*} value The value to compare.
* @param {*} other The other value to compare.
* @returns {boolean} Returns `true` if `value` is less than `other`,
* else `false`.
* @see _.gt
* @example
*
* _.lt(1, 3);
* // => true
*
* _.lt(3, 3);
* // => false
*
* _.lt(3, 1);
* // => false
*/
var lt = createRelationalOperation(baseLt);
/**
* Checks if `value` is less than or equal to `other`.
*
* @static
* @memberOf _
* @since 3.9.0
* @category Lang
* @param {*} value The value to compare.
* @param {*} other The other value to compare.
* @returns {boolean} Returns `true` if `value` is less than or equal to
* `other`, else `false`.
* @see _.gte
* @example
*
* _.lte(1, 3);
* // => true
*
* _.lte(3, 3);
* // => true
*
* _.lte(3, 1);
* // => false
*/
var lte = createRelationalOperation(function(value, other) {
return value <= other;
});
/**
* Converts `value` to an array.
*
* @static
* @since 0.1.0
* @memberOf _
* @category Lang
* @param {*} value The value to convert.
* @returns {Array} Returns the converted array.
* @example
*
* _.toArray({ 'a': 1, 'b': 2 });
* // => [1, 2]
*
* _.toArray('abc');
* // => ['a', 'b', 'c']
*
* _.toArray(1);
* // => []
*
* _.toArray(null);
* // => []
*/
function toArray(value) {
if (!value) {
return [];
}
if (isArrayLike(value)) {
return isString(value) ? stringToArray(value) : copyArray(value);
}
if (symIterator && value[symIterator]) {
return iteratorToArray(value[symIterator]());
}
var tag = getTag(value),
func = tag == mapTag ? mapToArray : (tag == setTag ? setToArray : values);
return func(value);
}
/**
* Converts `value` to a finite number.
*
* @static
* @memberOf _
* @since 4.12.0
* @category Lang
* @param {*} value The value to convert.
* @returns {number} Returns the converted number.
* @example
*
* _.toFinite(3.2);
* // => 3.2
*
* _.toFinite(Number.MIN_VALUE);
* // => 5e-324
*
* _.toFinite(Infinity);
* // => 1.7976931348623157e+308
*
* _.toFinite('3.2');
* // => 3.2
*/
function toFinite(value) {
if (!value) {
return value === 0 ? value : 0;
}
value = toNumber(value);
if (value === INFINITY || value === -INFINITY) {
var sign = (value < 0 ? -1 : 1);
return sign * MAX_INTEGER;
}
return value === value ? value : 0;
}
/**
* Converts `value` to an integer.
*
* **Note:** This method is loosely based on
* [`ToInteger`](http://www.ecma-international.org/ecma-262/7.0/#sec-tointeger).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Lang
* @param {*} value The value to convert.
* @returns {number} Returns the converted integer.
* @example
*
* _.toInteger(3.2);
* // => 3
*
* _.toInteger(Number.MIN_VALUE);
* // => 0
*
* _.toInteger(Infinity);
* // => 1.7976931348623157e+308
*
* _.toInteger('3.2');
* // => 3
*/
function toInteger(value) {
var result = toFinite(value),
remainder = result % 1;
return result === result ? (remainder ? result - remainder : result) : 0;
}
/**
* Converts `value` to an integer suitable for use as the length of an
* array-like object.
*
* **Note:** This method is based on
* [`ToLength`](http://ecma-international.org/ecma-262/7.0/#sec-tolength).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Lang
* @param {*} value The value to convert.
* @returns {number} Returns the converted integer.
* @example
*
* _.toLength(3.2);
* // => 3
*
* _.toLength(Number.MIN_VALUE);
* // => 0
*
* _.toLength(Infinity);
* // => 4294967295
*
* _.toLength('3.2');
* // => 3
*/
function toLength(value) {
return value ? baseClamp(toInteger(value), 0, MAX_ARRAY_LENGTH) : 0;
}
/**
* Converts `value` to a number.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Lang
* @param {*} value The value to process.
* @returns {number} Returns the number.
* @example
*
* _.toNumber(3.2);
* // => 3.2
*
* _.toNumber(Number.MIN_VALUE);
* // => 5e-324
*
* _.toNumber(Infinity);
* // => Infinity
*
* _.toNumber('3.2');
* // => 3.2
*/
function toNumber(value) {
if (typeof value == 'number') {
return value;
}
if (isSymbol(value)) {
return NAN;
}
if (isObject(value)) {
var other = typeof value.valueOf == 'function' ? value.valueOf() : value;
value = isObject(other) ? (other + '') : other;
}
if (typeof value != 'string') {
return value === 0 ? value : +value;
}
value = value.replace(reTrim, '');
var isBinary = reIsBinary.test(value);
return (isBinary || reIsOctal.test(value))
? freeParseInt(value.slice(2), isBinary ? 2 : 8)
: (reIsBadHex.test(value) ? NAN : +value);
}
/**
* Converts `value` to a plain object flattening inherited enumerable string
* keyed properties of `value` to own properties of the plain object.
*
* @static
* @memberOf _
* @since 3.0.0
* @category Lang
* @param {*} value The value to convert.
* @returns {Object} Returns the converted plain object.
* @example
*
* function Foo() {
* this.b = 2;
* }
*
* Foo.prototype.c = 3;
*
* _.assign({ 'a': 1 }, new Foo);
* // => { 'a': 1, 'b': 2 }
*
* _.assign({ 'a': 1 }, _.toPlainObject(new Foo));
* // => { 'a': 1, 'b': 2, 'c': 3 }
*/
function toPlainObject(value) {
return copyObject(value, keysIn(value));
}
/**
* Converts `value` to a safe integer. A safe integer can be compared and
* represented correctly.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Lang
* @param {*} value The value to convert.
* @returns {number} Returns the converted integer.
* @example
*
* _.toSafeInteger(3.2);
* // => 3
*
* _.toSafeInteger(Number.MIN_VALUE);
* // => 0
*
* _.toSafeInteger(Infinity);
* // => 9007199254740991
*
* _.toSafeInteger('3.2');
* // => 3
*/
function toSafeInteger(value) {
return value
? baseClamp(toInteger(value), -MAX_SAFE_INTEGER, MAX_SAFE_INTEGER)
: (value === 0 ? value : 0);
}
/**
* Converts `value` to a string. An empty string is returned for `null`
* and `undefined` values. The sign of `-0` is preserved.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Lang
* @param {*} value The value to convert.
* @returns {string} Returns the converted string.
* @example
*
* _.toString(null);
* // => ''
*
* _.toString(-0);
* // => '-0'
*
* _.toString([1, 2, 3]);
* // => '1,2,3'
*/
function toString(value) {
return value == null ? '' : baseToString(value);
}
/*------------------------------------------------------------------------*/
/**
* Assigns own enumerable string keyed properties of source objects to the
* destination object. Source objects are applied from left to right.
* Subsequent sources overwrite property assignments of previous sources.
*
* **Note:** This method mutates `object` and is loosely based on
* [`Object.assign`](https://mdn.io/Object/assign).
*
* @static
* @memberOf _
* @since 0.10.0
* @category Object
* @param {Object} object The destination object.
* @param {...Object} [sources] The source objects.
* @returns {Object} Returns `object`.
* @see _.assignIn
* @example
*
* function Foo() {
* this.a = 1;
* }
*
* function Bar() {
* this.c = 3;
* }
*
* Foo.prototype.b = 2;
* Bar.prototype.d = 4;
*
* _.assign({ 'a': 0 }, new Foo, new Bar);
* // => { 'a': 1, 'c': 3 }
*/
var assign = createAssigner(function(object, source) {
if (isPrototype(source) || isArrayLike(source)) {
copyObject(source, keys(source), object);
return;
}
for (var key in source) {
if (hasOwnProperty.call(source, key)) {
assignValue(object, key, source[key]);
}
}
});
/**
* This method is like `_.assign` except that it iterates over own and
* inherited source properties.
*
* **Note:** This method mutates `object`.
*
* @static
* @memberOf _
* @since 4.0.0
* @alias extend
* @category Object
* @param {Object} object The destination object.
* @param {...Object} [sources] The source objects.
* @returns {Object} Returns `object`.
* @see _.assign
* @example
*
* function Foo() {
* this.a = 1;
* }
*
* function Bar() {
* this.c = 3;
* }
*
* Foo.prototype.b = 2;
* Bar.prototype.d = 4;
*
* _.assignIn({ 'a': 0 }, new Foo, new Bar);
* // => { 'a': 1, 'b': 2, 'c': 3, 'd': 4 }
*/
var assignIn = createAssigner(function(object, source) {
copyObject(source, keysIn(source), object);
});
/**
* This method is like `_.assignIn` except that it accepts `customizer`
* which is invoked to produce the assigned values. If `customizer` returns
* `undefined`, assignment is handled by the method instead. The `customizer`
* is invoked with five arguments: (objValue, srcValue, key, object, source).
*
* **Note:** This method mutates `object`.
*
* @static
* @memberOf _
* @since 4.0.0
* @alias extendWith
* @category Object
* @param {Object} object The destination object.
* @param {...Object} sources The source objects.
* @param {Function} [customizer] The function to customize assigned values.
* @returns {Object} Returns `object`.
* @see _.assignWith
* @example
*
* function customizer(objValue, srcValue) {
* return _.isUndefined(objValue) ? srcValue : objValue;
* }
*
* var defaults = _.partialRight(_.assignInWith, customizer);
*
* defaults({ 'a': 1 }, { 'b': 2 }, { 'a': 3 });
* // => { 'a': 1, 'b': 2 }
*/
var assignInWith = createAssigner(function(object, source, srcIndex, customizer) {
copyObject(source, keysIn(source), object, customizer);
});
/**
* This method is like `_.assign` except that it accepts `customizer`
* which is invoked to produce the assigned values. If `customizer` returns
* `undefined`, assignment is handled by the method instead. The `customizer`
* is invoked with five arguments: (objValue, srcValue, key, object, source).
*
* **Note:** This method mutates `object`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Object
* @param {Object} object The destination object.
* @param {...Object} sources The source objects.
* @param {Function} [customizer] The function to customize assigned values.
* @returns {Object} Returns `object`.
* @see _.assignInWith
* @example
*
* function customizer(objValue, srcValue) {
* return _.isUndefined(objValue) ? srcValue : objValue;
* }
*
* var defaults = _.partialRight(_.assignWith, customizer);
*
* defaults({ 'a': 1 }, { 'b': 2 }, { 'a': 3 });
* // => { 'a': 1, 'b': 2 }
*/
var assignWith = createAssigner(function(object, source, srcIndex, customizer) {
copyObject(source, keys(source), object, customizer);
});
/**
* Creates an array of values corresponding to `paths` of `object`.
*
* @static
* @memberOf _
* @since 1.0.0
* @category Object
* @param {Object} object The object to iterate over.
* @param {...(string|string[])} [paths] The property paths to pick.
* @returns {Array} Returns the picked values.
* @example
*
* var object = { 'a': [{ 'b': { 'c': 3 } }, 4] };
*
* _.at(object, ['a[0].b.c', 'a[1]']);
* // => [3, 4]
*/
var at = flatRest(baseAt);
/**
* Creates an object that inherits from the `prototype` object. If a
* `properties` object is given, its own enumerable string keyed properties
* are assigned to the created object.
*
* @static
* @memberOf _
* @since 2.3.0
* @category Object
* @param {Object} prototype The object to inherit from.
* @param {Object} [properties] The properties to assign to the object.
* @returns {Object} Returns the new object.
* @example
*
* function Shape() {
* this.x = 0;
* this.y = 0;
* }
*
* function Circle() {
* Shape.call(this);
* }
*
* Circle.prototype = _.create(Shape.prototype, {
* 'constructor': Circle
* });
*
* var circle = new Circle;
* circle instanceof Circle;
* // => true
*
* circle instanceof Shape;
* // => true
*/
function create(prototype, properties) {
var result = baseCreate(prototype);
return properties == null ? result : baseAssign(result, properties);
}
/**
* Assigns own and inherited enumerable string keyed properties of source
* objects to the destination object for all destination properties that
* resolve to `undefined`. Source objects are applied from left to right.
* Once a property is set, additional values of the same property are ignored.
*
* **Note:** This method mutates `object`.
*
* @static
* @since 0.1.0
* @memberOf _
* @category Object
* @param {Object} object The destination object.
* @param {...Object} [sources] The source objects.
* @returns {Object} Returns `object`.
* @see _.defaultsDeep
* @example
*
* _.defaults({ 'a': 1 }, { 'b': 2 }, { 'a': 3 });
* // => { 'a': 1, 'b': 2 }
*/
var defaults = baseRest(function(object, sources) {
object = Object(object);
var index = -1;
var length = sources.length;
var guard = length > 2 ? sources[2] : undefined;
if (guard && isIterateeCall(sources[0], sources[1], guard)) {
length = 1;
}
while (++index < length) {
var source = sources[index];
var props = keysIn(source);
var propsIndex = -1;
var propsLength = props.length;
while (++propsIndex < propsLength) {
var key = props[propsIndex];
var value = object[key];
if (value === undefined ||
(eq(value, objectProto[key]) && !hasOwnProperty.call(object, key))) {
object[key] = source[key];
}
}
}
return object;
});
/**
* This method is like `_.defaults` except that it recursively assigns
* default properties.
*
* **Note:** This method mutates `object`.
*
* @static
* @memberOf _
* @since 3.10.0
* @category Object
* @param {Object} object The destination object.
* @param {...Object} [sources] The source objects.
* @returns {Object} Returns `object`.
* @see _.defaults
* @example
*
* _.defaultsDeep({ 'a': { 'b': 2 } }, { 'a': { 'b': 1, 'c': 3 } });
* // => { 'a': { 'b': 2, 'c': 3 } }
*/
var defaultsDeep = baseRest(function(args) {
args.push(undefined, customDefaultsMerge);
return apply(mergeWith, undefined, args);
});
/**
* This method is like `_.find` except that it returns the key of the first
* element `predicate` returns truthy for instead of the element itself.
*
* @static
* @memberOf _
* @since 1.1.0
* @category Object
* @param {Object} object The object to inspect.
* @param {Function} [predicate=_.identity] The function invoked per iteration.
* @returns {string|undefined} Returns the key of the matched element,
* else `undefined`.
* @example
*
* var users = {
* 'barney': { 'age': 36, 'active': true },
* 'fred': { 'age': 40, 'active': false },
* 'pebbles': { 'age': 1, 'active': true }
* };
*
* _.findKey(users, function(o) { return o.age < 40; });
* // => 'barney' (iteration order is not guaranteed)
*
* // The `_.matches` iteratee shorthand.
* _.findKey(users, { 'age': 1, 'active': true });
* // => 'pebbles'
*
* // The `_.matchesProperty` iteratee shorthand.
* _.findKey(users, ['active', false]);
* // => 'fred'
*
* // The `_.property` iteratee shorthand.
* _.findKey(users, 'active');
* // => 'barney'
*/
function findKey(object, predicate) {
return baseFindKey(object, getIteratee(predicate, 3), baseForOwn);
}
/**
* This method is like `_.findKey` except that it iterates over elements of
* a collection in the opposite order.
*
* @static
* @memberOf _
* @since 2.0.0
* @category Object
* @param {Object} object The object to inspect.
* @param {Function} [predicate=_.identity] The function invoked per iteration.
* @returns {string|undefined} Returns the key of the matched element,
* else `undefined`.
* @example
*
* var users = {
* 'barney': { 'age': 36, 'active': true },
* 'fred': { 'age': 40, 'active': false },
* 'pebbles': { 'age': 1, 'active': true }
* };
*
* _.findLastKey(users, function(o) { return o.age < 40; });
* // => returns 'pebbles' assuming `_.findKey` returns 'barney'
*
* // The `_.matches` iteratee shorthand.
* _.findLastKey(users, { 'age': 36, 'active': true });
* // => 'barney'
*
* // The `_.matchesProperty` iteratee shorthand.
* _.findLastKey(users, ['active', false]);
* // => 'fred'
*
* // The `_.property` iteratee shorthand.
* _.findLastKey(users, 'active');
* // => 'pebbles'
*/
function findLastKey(object, predicate) {
return baseFindKey(object, getIteratee(predicate, 3), baseForOwnRight);
}
/**
* Iterates over own and inherited enumerable string keyed properties of an
* object and invokes `iteratee` for each property. The iteratee is invoked
* with three arguments: (value, key, object). Iteratee functions may exit
* iteration early by explicitly returning `false`.
*
* @static
* @memberOf _
* @since 0.3.0
* @category Object
* @param {Object} object The object to iterate over.
* @param {Function} [iteratee=_.identity] The function invoked per iteration.
* @returns {Object} Returns `object`.
* @see _.forInRight
* @example
*
* function Foo() {
* this.a = 1;
* this.b = 2;
* }
*
* Foo.prototype.c = 3;
*
* _.forIn(new Foo, function(value, key) {
* console.log(key);
* });
* // => Logs 'a', 'b', then 'c' (iteration order is not guaranteed).
*/
function forIn(object, iteratee) {
return object == null
? object
: baseFor(object, getIteratee(iteratee, 3), keysIn);
}
/**
* This method is like `_.forIn` except that it iterates over properties of
* `object` in the opposite order.
*
* @static
* @memberOf _
* @since 2.0.0
* @category Object
* @param {Object} object The object to iterate over.
* @param {Function} [iteratee=_.identity] The function invoked per iteration.
* @returns {Object} Returns `object`.
* @see _.forIn
* @example
*
* function Foo() {
* this.a = 1;
* this.b = 2;
* }
*
* Foo.prototype.c = 3;
*
* _.forInRight(new Foo, function(value, key) {
* console.log(key);
* });
* // => Logs 'c', 'b', then 'a' assuming `_.forIn` logs 'a', 'b', then 'c'.
*/
function forInRight(object, iteratee) {
return object == null
? object
: baseForRight(object, getIteratee(iteratee, 3), keysIn);
}
/**
* Iterates over own enumerable string keyed properties of an object and
* invokes `iteratee` for each property. The iteratee is invoked with three
* arguments: (value, key, object). Iteratee functions may exit iteration
* early by explicitly returning `false`.
*
* @static
* @memberOf _
* @since 0.3.0
* @category Object
* @param {Object} object The object to iterate over.
* @param {Function} [iteratee=_.identity] The function invoked per iteration.
* @returns {Object} Returns `object`.
* @see _.forOwnRight
* @example
*
* function Foo() {
* this.a = 1;
* this.b = 2;
* }
*
* Foo.prototype.c = 3;
*
* _.forOwn(new Foo, function(value, key) {
* console.log(key);
* });
* // => Logs 'a' then 'b' (iteration order is not guaranteed).
*/
function forOwn(object, iteratee) {
return object && baseForOwn(object, getIteratee(iteratee, 3));
}
/**
* This method is like `_.forOwn` except that it iterates over properties of
* `object` in the opposite order.
*
* @static
* @memberOf _
* @since 2.0.0
* @category Object
* @param {Object} object The object to iterate over.
* @param {Function} [iteratee=_.identity] The function invoked per iteration.
* @returns {Object} Returns `object`.
* @see _.forOwn
* @example
*
* function Foo() {
* this.a = 1;
* this.b = 2;
* }
*
* Foo.prototype.c = 3;
*
* _.forOwnRight(new Foo, function(value, key) {
* console.log(key);
* });
* // => Logs 'b' then 'a' assuming `_.forOwn` logs 'a' then 'b'.
*/
function forOwnRight(object, iteratee) {
return object && baseForOwnRight(object, getIteratee(iteratee, 3));
}
/**
* Creates an array of function property names from own enumerable properties
* of `object`.
*
* @static
* @since 0.1.0
* @memberOf _
* @category Object
* @param {Object} object The object to inspect.
* @returns {Array} Returns the function names.
* @see _.functionsIn
* @example
*
* function Foo() {
* this.a = _.constant('a');
* this.b = _.constant('b');
* }
*
* Foo.prototype.c = _.constant('c');
*
* _.functions(new Foo);
* // => ['a', 'b']
*/
function functions(object) {
return object == null ? [] : baseFunctions(object, keys(object));
}
/**
* Creates an array of function property names from own and inherited
* enumerable properties of `object`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Object
* @param {Object} object The object to inspect.
* @returns {Array} Returns the function names.
* @see _.functions
* @example
*
* function Foo() {
* this.a = _.constant('a');
* this.b = _.constant('b');
* }
*
* Foo.prototype.c = _.constant('c');
*
* _.functionsIn(new Foo);
* // => ['a', 'b', 'c']
*/
function functionsIn(object) {
return object == null ? [] : baseFunctions(object, keysIn(object));
}
/**
* Gets the value at `path` of `object`. If the resolved value is
* `undefined`, the `defaultValue` is returned in its place.
*
* @static
* @memberOf _
* @since 3.7.0
* @category Object
* @param {Object} object The object to query.
* @param {Array|string} path The path of the property to get.
* @param {*} [defaultValue] The value returned for `undefined` resolved values.
* @returns {*} Returns the resolved value.
* @example
*
* var object = { 'a': [{ 'b': { 'c': 3 } }] };
*
* _.get(object, 'a[0].b.c');
* // => 3
*
* _.get(object, ['a', '0', 'b', 'c']);
* // => 3
*
* _.get(object, 'a.b.c', 'default');
* // => 'default'
*/
function get(object, path, defaultValue) {
var result = object == null ? undefined : baseGet(object, path);
return result === undefined ? defaultValue : result;
}
/**
* Checks if `path` is a direct property of `object`.
*
* @static
* @since 0.1.0
* @memberOf _
* @category Object
* @param {Object} object The object to query.
* @param {Array|string} path The path to check.
* @returns {boolean} Returns `true` if `path` exists, else `false`.
* @example
*
* var object = { 'a': { 'b': 2 } };
* var other = _.create({ 'a': _.create({ 'b': 2 }) });
*
* _.has(object, 'a');
* // => true
*
* _.has(object, 'a.b');
* // => true
*
* _.has(object, ['a', 'b']);
* // => true
*
* _.has(other, 'a');
* // => false
*/
function has(object, path) {
return object != null && hasPath(object, path, baseHas);
}
/**
* Checks if `path` is a direct or inherited property of `object`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Object
* @param {Object} object The object to query.
* @param {Array|string} path The path to check.
* @returns {boolean} Returns `true` if `path` exists, else `false`.
* @example
*
* var object = _.create({ 'a': _.create({ 'b': 2 }) });
*
* _.hasIn(object, 'a');
* // => true
*
* _.hasIn(object, 'a.b');
* // => true
*
* _.hasIn(object, ['a', 'b']);
* // => true
*
* _.hasIn(object, 'b');
* // => false
*/
function hasIn(object, path) {
return object != null && hasPath(object, path, baseHasIn);
}
/**
* Creates an object composed of the inverted keys and values of `object`.
* If `object` contains duplicate values, subsequent values overwrite
* property assignments of previous values.
*
* @static
* @memberOf _
* @since 0.7.0
* @category Object
* @param {Object} object The object to invert.
* @returns {Object} Returns the new inverted object.
* @example
*
* var object = { 'a': 1, 'b': 2, 'c': 1 };
*
* _.invert(object);
* // => { '1': 'c', '2': 'b' }
*/
var invert = createInverter(function(result, value, key) {
if (value != null &&
typeof value.toString != 'function') {
value = nativeObjectToString.call(value);
}
result[value] = key;
}, constant(identity));
/**
* This method is like `_.invert` except that the inverted object is generated
* from the results of running each element of `object` thru `iteratee`. The
* corresponding inverted value of each inverted key is an array of keys
* responsible for generating the inverted value. The iteratee is invoked
* with one argument: (value).
*
* @static
* @memberOf _
* @since 4.1.0
* @category Object
* @param {Object} object The object to invert.
* @param {Function} [iteratee=_.identity] The iteratee invoked per element.
* @returns {Object} Returns the new inverted object.
* @example
*
* var object = { 'a': 1, 'b': 2, 'c': 1 };
*
* _.invertBy(object);
* // => { '1': ['a', 'c'], '2': ['b'] }
*
* _.invertBy(object, function(value) {
* return 'group' + value;
* });
* // => { 'group1': ['a', 'c'], 'group2': ['b'] }
*/
var invertBy = createInverter(function(result, value, key) {
if (value != null &&
typeof value.toString != 'function') {
value = nativeObjectToString.call(value);
}
if (hasOwnProperty.call(result, value)) {
result[value].push(key);
} else {
result[value] = [key];
}
}, getIteratee);
/**
* Invokes the method at `path` of `object`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Object
* @param {Object} object The object to query.
* @param {Array|string} path The path of the method to invoke.
* @param {...*} [args] The arguments to invoke the method with.
* @returns {*} Returns the result of the invoked method.
* @example
*
* var object = { 'a': [{ 'b': { 'c': [1, 2, 3, 4] } }] };
*
* _.invoke(object, 'a[0].b.c.slice', 1, 3);
* // => [2, 3]
*/
var invoke = baseRest(baseInvoke);
/**
* Creates an array of the own enumerable property names of `object`.
*
* **Note:** Non-object values are coerced to objects. See the
* [ES spec](http://ecma-international.org/ecma-262/7.0/#sec-object.keys)
* for more details.
*
* @static
* @since 0.1.0
* @memberOf _
* @category Object
* @param {Object} object The object to query.
* @returns {Array} Returns the array of property names.
* @example
*
* function Foo() {
* this.a = 1;
* this.b = 2;
* }
*
* Foo.prototype.c = 3;
*
* _.keys(new Foo);
* // => ['a', 'b'] (iteration order is not guaranteed)
*
* _.keys('hi');
* // => ['0', '1']
*/
function keys(object) {
return isArrayLike(object) ? arrayLikeKeys(object) : baseKeys(object);
}
/**
* Creates an array of the own and inherited enumerable property names of `object`.
*
* **Note:** Non-object values are coerced to objects.
*
* @static
* @memberOf _
* @since 3.0.0
* @category Object
* @param {Object} object The object to query.
* @returns {Array} Returns the array of property names.
* @example
*
* function Foo() {
* this.a = 1;
* this.b = 2;
* }
*
* Foo.prototype.c = 3;
*
* _.keysIn(new Foo);
* // => ['a', 'b', 'c'] (iteration order is not guaranteed)
*/
function keysIn(object) {
return isArrayLike(object) ? arrayLikeKeys(object, true) : baseKeysIn(object);
}
/**
* The opposite of `_.mapValues`; this method creates an object with the
* same values as `object` and keys generated by running each own enumerable
* string keyed property of `object` thru `iteratee`. The iteratee is invoked
* with three arguments: (value, key, object).
*
* @static
* @memberOf _
* @since 3.8.0
* @category Object
* @param {Object} object The object to iterate over.
* @param {Function} [iteratee=_.identity] The function invoked per iteration.
* @returns {Object} Returns the new mapped object.
* @see _.mapValues
* @example
*
* _.mapKeys({ 'a': 1, 'b': 2 }, function(value, key) {
* return key + value;
* });
* // => { 'a1': 1, 'b2': 2 }
*/
function mapKeys(object, iteratee) {
var result = {};
iteratee = getIteratee(iteratee, 3);
baseForOwn(object, function(value, key, object) {
baseAssignValue(result, iteratee(value, key, object), value);
});
return result;
}
/**
* Creates an object with the same keys as `object` and values generated
* by running each own enumerable string keyed property of `object` thru
* `iteratee`. The iteratee is invoked with three arguments:
* (value, key, object).
*
* @static
* @memberOf _
* @since 2.4.0
* @category Object
* @param {Object} object The object to iterate over.
* @param {Function} [iteratee=_.identity] The function invoked per iteration.
* @returns {Object} Returns the new mapped object.
* @see _.mapKeys
* @example
*
* var users = {
* 'fred': { 'user': 'fred', 'age': 40 },
* 'pebbles': { 'user': 'pebbles', 'age': 1 }
* };
*
* _.mapValues(users, function(o) { return o.age; });
* // => { 'fred': 40, 'pebbles': 1 } (iteration order is not guaranteed)
*
* // The `_.property` iteratee shorthand.
* _.mapValues(users, 'age');
* // => { 'fred': 40, 'pebbles': 1 } (iteration order is not guaranteed)
*/
function mapValues(object, iteratee) {
var result = {};
iteratee = getIteratee(iteratee, 3);
baseForOwn(object, function(value, key, object) {
baseAssignValue(result, key, iteratee(value, key, object));
});
return result;
}
/**
* This method is like `_.assign` except that it recursively merges own and
* inherited enumerable string keyed properties of source objects into the
* destination object. Source properties that resolve to `undefined` are
* skipped if a destination value exists. Array and plain object properties
* are merged recursively. Other objects and value types are overridden by
* assignment. Source objects are applied from left to right. Subsequent
* sources overwrite property assignments of previous sources.
*
* **Note:** This method mutates `object`.
*
* @static
* @memberOf _
* @since 0.5.0
* @category Object
* @param {Object} object The destination object.
* @param {...Object} [sources] The source objects.
* @returns {Object} Returns `object`.
* @example
*
* var object = {
* 'a': [{ 'b': 2 }, { 'd': 4 }]
* };
*
* var other = {
* 'a': [{ 'c': 3 }, { 'e': 5 }]
* };
*
* _.merge(object, other);
* // => { 'a': [{ 'b': 2, 'c': 3 }, { 'd': 4, 'e': 5 }] }
*/
var merge = createAssigner(function(object, source, srcIndex) {
baseMerge(object, source, srcIndex);
});
/**
* This method is like `_.merge` except that it accepts `customizer` which
* is invoked to produce the merged values of the destination and source
* properties. If `customizer` returns `undefined`, merging is handled by the
* method instead. The `customizer` is invoked with six arguments:
* (objValue, srcValue, key, object, source, stack).
*
* **Note:** This method mutates `object`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Object
* @param {Object} object The destination object.
* @param {...Object} sources The source objects.
* @param {Function} customizer The function to customize assigned values.
* @returns {Object} Returns `object`.
* @example
*
* function customizer(objValue, srcValue) {
* if (_.isArray(objValue)) {
* return objValue.concat(srcValue);
* }
* }
*
* var object = { 'a': [1], 'b': [2] };
* var other = { 'a': [3], 'b': [4] };
*
* _.mergeWith(object, other, customizer);
* // => { 'a': [1, 3], 'b': [2, 4] }
*/
var mergeWith = createAssigner(function(object, source, srcIndex, customizer) {
baseMerge(object, source, srcIndex, customizer);
});
/**
* The opposite of `_.pick`; this method creates an object composed of the
* own and inherited enumerable property paths of `object` that are not omitted.
*
* **Note:** This method is considerably slower than `_.pick`.
*
* @static
* @since 0.1.0
* @memberOf _
* @category Object
* @param {Object} object The source object.
* @param {...(string|string[])} [paths] The property paths to omit.
* @returns {Object} Returns the new object.
* @example
*
* var object = { 'a': 1, 'b': '2', 'c': 3 };
*
* _.omit(object, ['a', 'c']);
* // => { 'b': '2' }
*/
var omit = flatRest(function(object, paths) {
var result = {};
if (object == null) {
return result;
}
var isDeep = false;
paths = arrayMap(paths, function(path) {
path = castPath(path, object);
isDeep || (isDeep = path.length > 1);
return path;
});
copyObject(object, getAllKeysIn(object), result);
if (isDeep) {
result = baseClone(result, CLONE_DEEP_FLAG | CLONE_FLAT_FLAG | CLONE_SYMBOLS_FLAG, customOmitClone);
}
var length = paths.length;
while (length--) {
baseUnset(result, paths[length]);
}
return result;
});
/**
* The opposite of `_.pickBy`; this method creates an object composed of
* the own and inherited enumerable string keyed properties of `object` that
* `predicate` doesn't return truthy for. The predicate is invoked with two
* arguments: (value, key).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Object
* @param {Object} object The source object.
* @param {Function} [predicate=_.identity] The function invoked per property.
* @returns {Object} Returns the new object.
* @example
*
* var object = { 'a': 1, 'b': '2', 'c': 3 };
*
* _.omitBy(object, _.isNumber);
* // => { 'b': '2' }
*/
function omitBy(object, predicate) {
return pickBy(object, negate(getIteratee(predicate)));
}
/**
* Creates an object composed of the picked `object` properties.
*
* @static
* @since 0.1.0
* @memberOf _
* @category Object
* @param {Object} object The source object.
* @param {...(string|string[])} [paths] The property paths to pick.
* @returns {Object} Returns the new object.
* @example
*
* var object = { 'a': 1, 'b': '2', 'c': 3 };
*
* _.pick(object, ['a', 'c']);
* // => { 'a': 1, 'c': 3 }
*/
var pick = flatRest(function(object, paths) {
return object == null ? {} : basePick(object, paths);
});
/**
* Creates an object composed of the `object` properties `predicate` returns
* truthy for. The predicate is invoked with two arguments: (value, key).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Object
* @param {Object} object The source object.
* @param {Function} [predicate=_.identity] The function invoked per property.
* @returns {Object} Returns the new object.
* @example
*
* var object = { 'a': 1, 'b': '2', 'c': 3 };
*
* _.pickBy(object, _.isNumber);
* // => { 'a': 1, 'c': 3 }
*/
function pickBy(object, predicate) {
if (object == null) {
return {};
}
var props = arrayMap(getAllKeysIn(object), function(prop) {
return [prop];
});
predicate = getIteratee(predicate);
return basePickBy(object, props, function(value, path) {
return predicate(value, path[0]);
});
}
/**
* This method is like `_.get` except that if the resolved value is a
* function it's invoked with the `this` binding of its parent object and
* its result is returned.
*
* @static
* @since 0.1.0
* @memberOf _
* @category Object
* @param {Object} object The object to query.
* @param {Array|string} path The path of the property to resolve.
* @param {*} [defaultValue] The value returned for `undefined` resolved values.
* @returns {*} Returns the resolved value.
* @example
*
* var object = { 'a': [{ 'b': { 'c1': 3, 'c2': _.constant(4) } }] };
*
* _.result(object, 'a[0].b.c1');
* // => 3
*
* _.result(object, 'a[0].b.c2');
* // => 4
*
* _.result(object, 'a[0].b.c3', 'default');
* // => 'default'
*
* _.result(object, 'a[0].b.c3', _.constant('default'));
* // => 'default'
*/
function result(object, path, defaultValue) {
path = castPath(path, object);
var index = -1,
length = path.length;
// Ensure the loop is entered when path is empty.
if (!length) {
length = 1;
object = undefined;
}
while (++index < length) {
var value = object == null ? undefined : object[toKey(path[index])];
if (value === undefined) {
index = length;
value = defaultValue;
}
object = isFunction(value) ? value.call(object) : value;
}
return object;
}
/**
* Sets the value at `path` of `object`. If a portion of `path` doesn't exist,
* it's created. Arrays are created for missing index properties while objects
* are created for all other missing properties. Use `_.setWith` to customize
* `path` creation.
*
* **Note:** This method mutates `object`.
*
* @static
* @memberOf _
* @since 3.7.0
* @category Object
* @param {Object} object The object to modify.
* @param {Array|string} path The path of the property to set.
* @param {*} value The value to set.
* @returns {Object} Returns `object`.
* @example
*
* var object = { 'a': [{ 'b': { 'c': 3 } }] };
*
* _.set(object, 'a[0].b.c', 4);
* console.log(object.a[0].b.c);
* // => 4
*
* _.set(object, ['x', '0', 'y', 'z'], 5);
* console.log(object.x[0].y.z);
* // => 5
*/
function set(object, path, value) {
return object == null ? object : baseSet(object, path, value);
}
/**
* This method is like `_.set` except that it accepts `customizer` which is
* invoked to produce the objects of `path`. If `customizer` returns `undefined`
* path creation is handled by the method instead. The `customizer` is invoked
* with three arguments: (nsValue, key, nsObject).
*
* **Note:** This method mutates `object`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Object
* @param {Object} object The object to modify.
* @param {Array|string} path The path of the property to set.
* @param {*} value The value to set.
* @param {Function} [customizer] The function to customize assigned values.
* @returns {Object} Returns `object`.
* @example
*
* var object = {};
*
* _.setWith(object, '[0][1]', 'a', Object);
* // => { '0': { '1': 'a' } }
*/
function setWith(object, path, value, customizer) {
customizer = typeof customizer == 'function' ? customizer : undefined;
return object == null ? object : baseSet(object, path, value, customizer);
}
/**
* Creates an array of own enumerable string keyed-value pairs for `object`
* which can be consumed by `_.fromPairs`. If `object` is a map or set, its
* entries are returned.
*
* @static
* @memberOf _
* @since 4.0.0
* @alias entries
* @category Object
* @param {Object} object The object to query.
* @returns {Array} Returns the key-value pairs.
* @example
*
* function Foo() {
* this.a = 1;
* this.b = 2;
* }
*
* Foo.prototype.c = 3;
*
* _.toPairs(new Foo);
* // => [['a', 1], ['b', 2]] (iteration order is not guaranteed)
*/
var toPairs = createToPairs(keys);
/**
* Creates an array of own and inherited enumerable string keyed-value pairs
* for `object` which can be consumed by `_.fromPairs`. If `object` is a map
* or set, its entries are returned.
*
* @static
* @memberOf _
* @since 4.0.0
* @alias entriesIn
* @category Object
* @param {Object} object The object to query.
* @returns {Array} Returns the key-value pairs.
* @example
*
* function Foo() {
* this.a = 1;
* this.b = 2;
* }
*
* Foo.prototype.c = 3;
*
* _.toPairsIn(new Foo);
* // => [['a', 1], ['b', 2], ['c', 3]] (iteration order is not guaranteed)
*/
var toPairsIn = createToPairs(keysIn);
/**
* An alternative to `_.reduce`; this method transforms `object` to a new
* `accumulator` object which is the result of running each of its own
* enumerable string keyed properties thru `iteratee`, with each invocation
* potentially mutating the `accumulator` object. If `accumulator` is not
* provided, a new object with the same `[[Prototype]]` will be used. The
* iteratee is invoked with four arguments: (accumulator, value, key, object).
* Iteratee functions may exit iteration early by explicitly returning `false`.
*
* @static
* @memberOf _
* @since 1.3.0
* @category Object
* @param {Object} object The object to iterate over.
* @param {Function} [iteratee=_.identity] The function invoked per iteration.
* @param {*} [accumulator] The custom accumulator value.
* @returns {*} Returns the accumulated value.
* @example
*
* _.transform([2, 3, 4], function(result, n) {
* result.push(n *= n);
* return n % 2 == 0;
* }, []);
* // => [4, 9]
*
* _.transform({ 'a': 1, 'b': 2, 'c': 1 }, function(result, value, key) {
* (result[value] || (result[value] = [])).push(key);
* }, {});
* // => { '1': ['a', 'c'], '2': ['b'] }
*/
function transform(object, iteratee, accumulator) {
var isArr = isArray(object),
isArrLike = isArr || isBuffer(object) || isTypedArray(object);
iteratee = getIteratee(iteratee, 4);
if (accumulator == null) {
var Ctor = object && object.constructor;
if (isArrLike) {
accumulator = isArr ? new Ctor : [];
}
else if (isObject(object)) {
accumulator = isFunction(Ctor) ? baseCreate(getPrototype(object)) : {};
}
else {
accumulator = {};
}
}
(isArrLike ? arrayEach : baseForOwn)(object, function(value, index, object) {
return iteratee(accumulator, value, index, object);
});
return accumulator;
}
/**
* Removes the property at `path` of `object`.
*
* **Note:** This method mutates `object`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Object
* @param {Object} object The object to modify.
* @param {Array|string} path The path of the property to unset.
* @returns {boolean} Returns `true` if the property is deleted, else `false`.
* @example
*
* var object = { 'a': [{ 'b': { 'c': 7 } }] };
* _.unset(object, 'a[0].b.c');
* // => true
*
* console.log(object);
* // => { 'a': [{ 'b': {} }] };
*
* _.unset(object, ['a', '0', 'b', 'c']);
* // => true
*
* console.log(object);
* // => { 'a': [{ 'b': {} }] };
*/
function unset(object, path) {
return object == null ? true : baseUnset(object, path);
}
/**
* This method is like `_.set` except that accepts `updater` to produce the
* value to set. Use `_.updateWith` to customize `path` creation. The `updater`
* is invoked with one argument: (value).
*
* **Note:** This method mutates `object`.
*
* @static
* @memberOf _
* @since 4.6.0
* @category Object
* @param {Object} object The object to modify.
* @param {Array|string} path The path of the property to set.
* @param {Function} updater The function to produce the updated value.
* @returns {Object} Returns `object`.
* @example
*
* var object = { 'a': [{ 'b': { 'c': 3 } }] };
*
* _.update(object, 'a[0].b.c', function(n) { return n * n; });
* console.log(object.a[0].b.c);
* // => 9
*
* _.update(object, 'x[0].y.z', function(n) { return n ? n + 1 : 0; });
* console.log(object.x[0].y.z);
* // => 0
*/
function update(object, path, updater) {
return object == null ? object : baseUpdate(object, path, castFunction(updater));
}
/**
* This method is like `_.update` except that it accepts `customizer` which is
* invoked to produce the objects of `path`. If `customizer` returns `undefined`
* path creation is handled by the method instead. The `customizer` is invoked
* with three arguments: (nsValue, key, nsObject).
*
* **Note:** This method mutates `object`.
*
* @static
* @memberOf _
* @since 4.6.0
* @category Object
* @param {Object} object The object to modify.
* @param {Array|string} path The path of the property to set.
* @param {Function} updater The function to produce the updated value.
* @param {Function} [customizer] The function to customize assigned values.
* @returns {Object} Returns `object`.
* @example
*
* var object = {};
*
* _.updateWith(object, '[0][1]', _.constant('a'), Object);
* // => { '0': { '1': 'a' } }
*/
function updateWith(object, path, updater, customizer) {
customizer = typeof customizer == 'function' ? customizer : undefined;
return object == null ? object : baseUpdate(object, path, castFunction(updater), customizer);
}
/**
* Creates an array of the own enumerable string keyed property values of `object`.
*
* **Note:** Non-object values are coerced to objects.
*
* @static
* @since 0.1.0
* @memberOf _
* @category Object
* @param {Object} object The object to query.
* @returns {Array} Returns the array of property values.
* @example
*
* function Foo() {
* this.a = 1;
* this.b = 2;
* }
*
* Foo.prototype.c = 3;
*
* _.values(new Foo);
* // => [1, 2] (iteration order is not guaranteed)
*
* _.values('hi');
* // => ['h', 'i']
*/
function values(object) {
return object == null ? [] : baseValues(object, keys(object));
}
/**
* Creates an array of the own and inherited enumerable string keyed property
* values of `object`.
*
* **Note:** Non-object values are coerced to objects.
*
* @static
* @memberOf _
* @since 3.0.0
* @category Object
* @param {Object} object The object to query.
* @returns {Array} Returns the array of property values.
* @example
*
* function Foo() {
* this.a = 1;
* this.b = 2;
* }
*
* Foo.prototype.c = 3;
*
* _.valuesIn(new Foo);
* // => [1, 2, 3] (iteration order is not guaranteed)
*/
function valuesIn(object) {
return object == null ? [] : baseValues(object, keysIn(object));
}
/*------------------------------------------------------------------------*/
/**
* Clamps `number` within the inclusive `lower` and `upper` bounds.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Number
* @param {number} number The number to clamp.
* @param {number} [lower] The lower bound.
* @param {number} upper The upper bound.
* @returns {number} Returns the clamped number.
* @example
*
* _.clamp(-10, -5, 5);
* // => -5
*
* _.clamp(10, -5, 5);
* // => 5
*/
function clamp(number, lower, upper) {
if (upper === undefined) {
upper = lower;
lower = undefined;
}
if (upper !== undefined) {
upper = toNumber(upper);
upper = upper === upper ? upper : 0;
}
if (lower !== undefined) {
lower = toNumber(lower);
lower = lower === lower ? lower : 0;
}
return baseClamp(toNumber(number), lower, upper);
}
/**
* Checks if `n` is between `start` and up to, but not including, `end`. If
* `end` is not specified, it's set to `start` with `start` then set to `0`.
* If `start` is greater than `end` the params are swapped to support
* negative ranges.
*
* @static
* @memberOf _
* @since 3.3.0
* @category Number
* @param {number} number The number to check.
* @param {number} [start=0] The start of the range.
* @param {number} end The end of the range.
* @returns {boolean} Returns `true` if `number` is in the range, else `false`.
* @see _.range, _.rangeRight
* @example
*
* _.inRange(3, 2, 4);
* // => true
*
* _.inRange(4, 8);
* // => true
*
* _.inRange(4, 2);
* // => false
*
* _.inRange(2, 2);
* // => false
*
* _.inRange(1.2, 2);
* // => true
*
* _.inRange(5.2, 4);
* // => false
*
* _.inRange(-3, -2, -6);
* // => true
*/
function inRange(number, start, end) {
start = toFinite(start);
if (end === undefined) {
end = start;
start = 0;
} else {
end = toFinite(end);
}
number = toNumber(number);
return baseInRange(number, start, end);
}
/**
* Produces a random number between the inclusive `lower` and `upper` bounds.
* If only one argument is provided a number between `0` and the given number
* is returned. If `floating` is `true`, or either `lower` or `upper` are
* floats, a floating-point number is returned instead of an integer.
*
* **Note:** JavaScript follows the IEEE-754 standard for resolving
* floating-point values which can produce unexpected results.
*
* @static
* @memberOf _
* @since 0.7.0
* @category Number
* @param {number} [lower=0] The lower bound.
* @param {number} [upper=1] The upper bound.
* @param {boolean} [floating] Specify returning a floating-point number.
* @returns {number} Returns the random number.
* @example
*
* _.random(0, 5);
* // => an integer between 0 and 5
*
* _.random(5);
* // => also an integer between 0 and 5
*
* _.random(5, true);
* // => a floating-point number between 0 and 5
*
* _.random(1.2, 5.2);
* // => a floating-point number between 1.2 and 5.2
*/
function random(lower, upper, floating) {
if (floating && typeof floating != 'boolean' && isIterateeCall(lower, upper, floating)) {
upper = floating = undefined;
}
if (floating === undefined) {
if (typeof upper == 'boolean') {
floating = upper;
upper = undefined;
}
else if (typeof lower == 'boolean') {
floating = lower;
lower = undefined;
}
}
if (lower === undefined && upper === undefined) {
lower = 0;
upper = 1;
}
else {
lower = toFinite(lower);
if (upper === undefined) {
upper = lower;
lower = 0;
} else {
upper = toFinite(upper);
}
}
if (lower > upper) {
var temp = lower;
lower = upper;
upper = temp;
}
if (floating || lower % 1 || upper % 1) {
var rand = nativeRandom();
return nativeMin(lower + (rand * (upper - lower + freeParseFloat('1e-' + ((rand + '').length - 1)))), upper);
}
return baseRandom(lower, upper);
}
/*------------------------------------------------------------------------*/
/**
* Converts `string` to [camel case](https://en.wikipedia.org/wiki/CamelCase).
*
* @static
* @memberOf _
* @since 3.0.0
* @category String
* @param {string} [string=''] The string to convert.
* @returns {string} Returns the camel cased string.
* @example
*
* _.camelCase('Foo Bar');
* // => 'fooBar'
*
* _.camelCase('--foo-bar--');
* // => 'fooBar'
*
* _.camelCase('__FOO_BAR__');
* // => 'fooBar'
*/
var camelCase = createCompounder(function(result, word, index) {
word = word.toLowerCase();
return result + (index ? capitalize(word) : word);
});
/**
* Converts the first character of `string` to upper case and the remaining
* to lower case.
*
* @static
* @memberOf _
* @since 3.0.0
* @category String
* @param {string} [string=''] The string to capitalize.
* @returns {string} Returns the capitalized string.
* @example
*
* _.capitalize('FRED');
* // => 'Fred'
*/
function capitalize(string) {
return upperFirst(toString(string).toLowerCase());
}
/**
* Deburrs `string` by converting
* [Latin-1 Supplement](https://en.wikipedia.org/wiki/Latin-1_Supplement_(Unicode_block)#Character_table)
* and [Latin Extended-A](https://en.wikipedia.org/wiki/Latin_Extended-A)
* letters to basic Latin letters and removing
* [combining diacritical marks](https://en.wikipedia.org/wiki/Combining_Diacritical_Marks).
*
* @static
* @memberOf _
* @since 3.0.0
* @category String
* @param {string} [string=''] The string to deburr.
* @returns {string} Returns the deburred string.
* @example
*
* _.deburr('déjà vu');
* // => 'deja vu'
*/
function deburr(string) {
string = toString(string);
return string && string.replace(reLatin, deburrLetter).replace(reComboMark, '');
}
/**
* Checks if `string` ends with the given target string.
*
* @static
* @memberOf _
* @since 3.0.0
* @category String
* @param {string} [string=''] The string to inspect.
* @param {string} [target] The string to search for.
* @param {number} [position=string.length] The position to search up to.
* @returns {boolean} Returns `true` if `string` ends with `target`,
* else `false`.
* @example
*
* _.endsWith('abc', 'c');
* // => true
*
* _.endsWith('abc', 'b');
* // => false
*
* _.endsWith('abc', 'b', 2);
* // => true
*/
function endsWith(string, target, position) {
string = toString(string);
target = baseToString(target);
var length = string.length;
position = position === undefined
? length
: baseClamp(toInteger(position), 0, length);
var end = position;
position -= target.length;
return position >= 0 && string.slice(position, end) == target;
}
/**
* Converts the characters "&", "<", ">", '"', and "'" in `string` to their
* corresponding HTML entities.
*
* **Note:** No other characters are escaped. To escape additional
* characters use a third-party library like [_he_](https://mths.be/he).
*
* Though the ">" character is escaped for symmetry, characters like
* ">" and "/" don't need escaping in HTML and have no special meaning
* unless they're part of a tag or unquoted attribute value. See
* [Mathias Bynens's article](https://mathiasbynens.be/notes/ambiguous-ampersands)
* (under "semi-related fun fact") for more details.
*
* When working with HTML you should always
* [quote attribute values](http://wonko.com/post/html-escaping) to reduce
* XSS vectors.
*
* @static
* @since 0.1.0
* @memberOf _
* @category String
* @param {string} [string=''] The string to escape.
* @returns {string} Returns the escaped string.
* @example
*
* _.escape('fred, barney, & pebbles');
* // => 'fred, barney, & pebbles'
*/
function escape(string) {
string = toString(string);
return (string && reHasUnescapedHtml.test(string))
? string.replace(reUnescapedHtml, escapeHtmlChar)
: string;
}
/**
* Escapes the `RegExp` special characters "^", "$", "\", ".", "*", "+",
* "?", "(", ")", "[", "]", "{", "}", and "|" in `string`.
*
* @static
* @memberOf _
* @since 3.0.0
* @category String
* @param {string} [string=''] The string to escape.
* @returns {string} Returns the escaped string.
* @example
*
* _.escapeRegExp('[lodash](https://lodash.com/)');
* // => '\[lodash\]\(https://lodash\.com/\)'
*/
function escapeRegExp(string) {
string = toString(string);
return (string && reHasRegExpChar.test(string))
? string.replace(reRegExpChar, '\\$&')
: string;
}
/**
* Converts `string` to
* [kebab case](https://en.wikipedia.org/wiki/Letter_case#Special_case_styles).
*
* @static
* @memberOf _
* @since 3.0.0
* @category String
* @param {string} [string=''] The string to convert.
* @returns {string} Returns the kebab cased string.
* @example
*
* _.kebabCase('Foo Bar');
* // => 'foo-bar'
*
* _.kebabCase('fooBar');
* // => 'foo-bar'
*
* _.kebabCase('__FOO_BAR__');
* // => 'foo-bar'
*/
var kebabCase = createCompounder(function(result, word, index) {
return result + (index ? '-' : '') + word.toLowerCase();
});
/**
* Converts `string`, as space separated words, to lower case.
*
* @static
* @memberOf _
* @since 4.0.0
* @category String
* @param {string} [string=''] The string to convert.
* @returns {string} Returns the lower cased string.
* @example
*
* _.lowerCase('--Foo-Bar--');
* // => 'foo bar'
*
* _.lowerCase('fooBar');
* // => 'foo bar'
*
* _.lowerCase('__FOO_BAR__');
* // => 'foo bar'
*/
var lowerCase = createCompounder(function(result, word, index) {
return result + (index ? ' ' : '') + word.toLowerCase();
});
/**
* Converts the first character of `string` to lower case.
*
* @static
* @memberOf _
* @since 4.0.0
* @category String
* @param {string} [string=''] The string to convert.
* @returns {string} Returns the converted string.
* @example
*
* _.lowerFirst('Fred');
* // => 'fred'
*
* _.lowerFirst('FRED');
* // => 'fRED'
*/
var lowerFirst = createCaseFirst('toLowerCase');
/**
* Pads `string` on the left and right sides if it's shorter than `length`.
* Padding characters are truncated if they can't be evenly divided by `length`.
*
* @static
* @memberOf _
* @since 3.0.0
* @category String
* @param {string} [string=''] The string to pad.
* @param {number} [length=0] The padding length.
* @param {string} [chars=' '] The string used as padding.
* @returns {string} Returns the padded string.
* @example
*
* _.pad('abc', 8);
* // => ' abc '
*
* _.pad('abc', 8, '_-');
* // => '_-abc_-_'
*
* _.pad('abc', 3);
* // => 'abc'
*/
function pad(string, length, chars) {
string = toString(string);
length = toInteger(length);
var strLength = length ? stringSize(string) : 0;
if (!length || strLength >= length) {
return string;
}
var mid = (length - strLength) / 2;
return (
createPadding(nativeFloor(mid), chars) +
string +
createPadding(nativeCeil(mid), chars)
);
}
/**
* Pads `string` on the right side if it's shorter than `length`. Padding
* characters are truncated if they exceed `length`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category String
* @param {string} [string=''] The string to pad.
* @param {number} [length=0] The padding length.
* @param {string} [chars=' '] The string used as padding.
* @returns {string} Returns the padded string.
* @example
*
* _.padEnd('abc', 6);
* // => 'abc '
*
* _.padEnd('abc', 6, '_-');
* // => 'abc_-_'
*
* _.padEnd('abc', 3);
* // => 'abc'
*/
function padEnd(string, length, chars) {
string = toString(string);
length = toInteger(length);
var strLength = length ? stringSize(string) : 0;
return (length && strLength < length)
? (string + createPadding(length - strLength, chars))
: string;
}
/**
* Pads `string` on the left side if it's shorter than `length`. Padding
* characters are truncated if they exceed `length`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category String
* @param {string} [string=''] The string to pad.
* @param {number} [length=0] The padding length.
* @param {string} [chars=' '] The string used as padding.
* @returns {string} Returns the padded string.
* @example
*
* _.padStart('abc', 6);
* // => ' abc'
*
* _.padStart('abc', 6, '_-');
* // => '_-_abc'
*
* _.padStart('abc', 3);
* // => 'abc'
*/
function padStart(string, length, chars) {
string = toString(string);
length = toInteger(length);
var strLength = length ? stringSize(string) : 0;
return (length && strLength < length)
? (createPadding(length - strLength, chars) + string)
: string;
}
/**
* Converts `string` to an integer of the specified radix. If `radix` is
* `undefined` or `0`, a `radix` of `10` is used unless `value` is a
* hexadecimal, in which case a `radix` of `16` is used.
*
* **Note:** This method aligns with the
* [ES5 implementation](https://es5.github.io/#x15.1.2.2) of `parseInt`.
*
* @static
* @memberOf _
* @since 1.1.0
* @category String
* @param {string} string The string to convert.
* @param {number} [radix=10] The radix to interpret `value` by.
* @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`.
* @returns {number} Returns the converted integer.
* @example
*
* _.parseInt('08');
* // => 8
*
* _.map(['6', '08', '10'], _.parseInt);
* // => [6, 8, 10]
*/
function parseInt(string, radix, guard) {
if (guard || radix == null) {
radix = 0;
} else if (radix) {
radix = +radix;
}
return nativeParseInt(toString(string).replace(reTrimStart, ''), radix || 0);
}
/**
* Repeats the given string `n` times.
*
* @static
* @memberOf _
* @since 3.0.0
* @category String
* @param {string} [string=''] The string to repeat.
* @param {number} [n=1] The number of times to repeat the string.
* @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`.
* @returns {string} Returns the repeated string.
* @example
*
* _.repeat('*', 3);
* // => '***'
*
* _.repeat('abc', 2);
* // => 'abcabc'
*
* _.repeat('abc', 0);
* // => ''
*/
function repeat(string, n, guard) {
if ((guard ? isIterateeCall(string, n, guard) : n === undefined)) {
n = 1;
} else {
n = toInteger(n);
}
return baseRepeat(toString(string), n);
}
/**
* Replaces matches for `pattern` in `string` with `replacement`.
*
* **Note:** This method is based on
* [`String#replace`](https://mdn.io/String/replace).
*
* @static
* @memberOf _
* @since 4.0.0
* @category String
* @param {string} [string=''] The string to modify.
* @param {RegExp|string} pattern The pattern to replace.
* @param {Function|string} replacement The match replacement.
* @returns {string} Returns the modified string.
* @example
*
* _.replace('Hi Fred', 'Fred', 'Barney');
* // => 'Hi Barney'
*/
function replace() {
var args = arguments,
string = toString(args[0]);
return args.length < 3 ? string : string.replace(args[1], args[2]);
}
/**
* Converts `string` to
* [snake case](https://en.wikipedia.org/wiki/Snake_case).
*
* @static
* @memberOf _
* @since 3.0.0
* @category String
* @param {string} [string=''] The string to convert.
* @returns {string} Returns the snake cased string.
* @example
*
* _.snakeCase('Foo Bar');
* // => 'foo_bar'
*
* _.snakeCase('fooBar');
* // => 'foo_bar'
*
* _.snakeCase('--FOO-BAR--');
* // => 'foo_bar'
*/
var snakeCase = createCompounder(function(result, word, index) {
return result + (index ? '_' : '') + word.toLowerCase();
});
/**
* Splits `string` by `separator`.
*
* **Note:** This method is based on
* [`String#split`](https://mdn.io/String/split).
*
* @static
* @memberOf _
* @since 4.0.0
* @category String
* @param {string} [string=''] The string to split.
* @param {RegExp|string} separator The separator pattern to split by.
* @param {number} [limit] The length to truncate results to.
* @returns {Array} Returns the string segments.
* @example
*
* _.split('a-b-c', '-', 2);
* // => ['a', 'b']
*/
function split(string, separator, limit) {
if (limit && typeof limit != 'number' && isIterateeCall(string, separator, limit)) {
separator = limit = undefined;
}
limit = limit === undefined ? MAX_ARRAY_LENGTH : limit >>> 0;
if (!limit) {
return [];
}
string = toString(string);
if (string && (
typeof separator == 'string' ||
(separator != null && !isRegExp(separator))
)) {
separator = baseToString(separator);
if (!separator && hasUnicode(string)) {
return castSlice(stringToArray(string), 0, limit);
}
}
return string.split(separator, limit);
}
/**
* Converts `string` to
* [start case](https://en.wikipedia.org/wiki/Letter_case#Stylistic_or_specialised_usage).
*
* @static
* @memberOf _
* @since 3.1.0
* @category String
* @param {string} [string=''] The string to convert.
* @returns {string} Returns the start cased string.
* @example
*
* _.startCase('--foo-bar--');
* // => 'Foo Bar'
*
* _.startCase('fooBar');
* // => 'Foo Bar'
*
* _.startCase('__FOO_BAR__');
* // => 'FOO BAR'
*/
var startCase = createCompounder(function(result, word, index) {
return result + (index ? ' ' : '') + upperFirst(word);
});
/**
* Checks if `string` starts with the given target string.
*
* @static
* @memberOf _
* @since 3.0.0
* @category String
* @param {string} [string=''] The string to inspect.
* @param {string} [target] The string to search for.
* @param {number} [position=0] The position to search from.
* @returns {boolean} Returns `true` if `string` starts with `target`,
* else `false`.
* @example
*
* _.startsWith('abc', 'a');
* // => true
*
* _.startsWith('abc', 'b');
* // => false
*
* _.startsWith('abc', 'b', 1);
* // => true
*/
function startsWith(string, target, position) {
string = toString(string);
position = position == null
? 0
: baseClamp(toInteger(position), 0, string.length);
target = baseToString(target);
return string.slice(position, position + target.length) == target;
}
/**
* Creates a compiled template function that can interpolate data properties
* in "interpolate" delimiters, HTML-escape interpolated data properties in
* "escape" delimiters, and execute JavaScript in "evaluate" delimiters. Data
* properties may be accessed as free variables in the template. If a setting
* object is given, it takes precedence over `_.templateSettings` values.
*
* **Note:** In the development build `_.template` utilizes
* [sourceURLs](http://www.html5rocks.com/en/tutorials/developertools/sourcemaps/#toc-sourceurl)
* for easier debugging.
*
* For more information on precompiling templates see
* [lodash's custom builds documentation](https://lodash.com/custom-builds).
*
* For more information on Chrome extension sandboxes see
* [Chrome's extensions documentation](https://developer.chrome.com/extensions/sandboxingEval).
*
* @static
* @since 0.1.0
* @memberOf _
* @category String
* @param {string} [string=''] The template string.
* @param {Object} [options={}] The options object.
* @param {RegExp} [options.escape=_.templateSettings.escape]
* The HTML "escape" delimiter.
* @param {RegExp} [options.evaluate=_.templateSettings.evaluate]
* The "evaluate" delimiter.
* @param {Object} [options.imports=_.templateSettings.imports]
* An object to import into the template as free variables.
* @param {RegExp} [options.interpolate=_.templateSettings.interpolate]
* The "interpolate" delimiter.
* @param {string} [options.sourceURL='lodash.templateSources[n]']
* The sourceURL of the compiled template.
* @param {string} [options.variable='obj']
* The data object variable name.
* @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`.
* @returns {Function} Returns the compiled template function.
* @example
*
* // Use the "interpolate" delimiter to create a compiled template.
* var compiled = _.template('hello <%= user %>!');
* compiled({ 'user': 'fred' });
* // => 'hello fred!'
*
* // Use the HTML "escape" delimiter to escape data property values.
* var compiled = _.template('<b><%- value %></b>');
* compiled({ 'value': '<script>' });
* // => '<b><script></b>'
*
* // Use the "evaluate" delimiter to execute JavaScript and generate HTML.
* var compiled = _.template('<% _.forEach(users, function(user) { %><li><%- user %></li><% }); %>');
* compiled({ 'users': ['fred', 'barney'] });
* // => '<li>fred</li><li>barney</li>'
*
* // Use the internal `print` function in "evaluate" delimiters.
* var compiled = _.template('<% print("hello " + user); %>!');
* compiled({ 'user': 'barney' });
* // => 'hello barney!'
*
* // Use the ES template literal delimiter as an "interpolate" delimiter.
* // Disable support by replacing the "interpolate" delimiter.
* var compiled = _.template('hello ${ user }!');
* compiled({ 'user': 'pebbles' });
* // => 'hello pebbles!'
*
* // Use backslashes to treat delimiters as plain text.
* var compiled = _.template('<%= "\\<%- value %\\>" %>');
* compiled({ 'value': 'ignored' });
* // => '<%- value %>'
*
* // Use the `imports` option to import `jQuery` as `jq`.
* var text = '<% jq.each(users, function(user) { %><li><%- user %></li><% }); %>';
* var compiled = _.template(text, { 'imports': { 'jq': jQuery } });
* compiled({ 'users': ['fred', 'barney'] });
* // => '<li>fred</li><li>barney</li>'
*
* // Use the `sourceURL` option to specify a custom sourceURL for the template.
* var compiled = _.template('hello <%= user %>!', { 'sourceURL': '/basic/greeting.jst' });
* compiled(data);
* // => Find the source of "greeting.jst" under the Sources tab or Resources panel of the web inspector.
*
* // Use the `variable` option to ensure a with-statement isn't used in the compiled template.
* var compiled = _.template('hi <%= data.user %>!', { 'variable': 'data' });
* compiled.source;
* // => function(data) {
* // var __t, __p = '';
* // __p += 'hi ' + ((__t = ( data.user )) == null ? '' : __t) + '!';
* // return __p;
* // }
*
* // Use custom template delimiters.
* _.templateSettings.interpolate = /{{([\s\S]+?)}}/g;
* var compiled = _.template('hello {{ user }}!');
* compiled({ 'user': 'mustache' });
* // => 'hello mustache!'
*
* // Use the `source` property to inline compiled templates for meaningful
* // line numbers in error messages and stack traces.
* fs.writeFileSync(path.join(process.cwd(), 'jst.js'), '\
* var JST = {\
* "main": ' + _.template(mainText).source + '\
* };\
* ');
*/
function template(string, options, guard) {
// Based on John Resig's `tmpl` implementation
// (http://ejohn.org/blog/javascript-micro-templating/)
// and Laura Doktorova's doT.js (https://github.com/olado/doT).
var settings = lodash.templateSettings;
if (guard && isIterateeCall(string, options, guard)) {
options = undefined;
}
string = toString(string);
options = assignInWith({}, options, settings, customDefaultsAssignIn);
var imports = assignInWith({}, options.imports, settings.imports, customDefaultsAssignIn),
importsKeys = keys(imports),
importsValues = baseValues(imports, importsKeys);
var isEscaping,
isEvaluating,
index = 0,
interpolate = options.interpolate || reNoMatch,
source = "__p += '";
// Compile the regexp to match each delimiter.
var reDelimiters = RegExp(
(options.escape || reNoMatch).source + '|' +
interpolate.source + '|' +
(interpolate === reInterpolate ? reEsTemplate : reNoMatch).source + '|' +
(options.evaluate || reNoMatch).source + '|$'
, 'g');
// Use a sourceURL for easier debugging.
// The sourceURL gets injected into the source that's eval-ed, so be careful
// to normalize all kinds of whitespace, so e.g. newlines (and unicode versions of it) can't sneak in
// and escape the comment, thus injecting code that gets evaled.
var sourceURL = '//# sourceURL=' +
(hasOwnProperty.call(options, 'sourceURL')
? (options.sourceURL + '').replace(/\s/g, ' ')
: ('lodash.templateSources[' + (++templateCounter) + ']')
) + '\n';
string.replace(reDelimiters, function(match, escapeValue, interpolateValue, esTemplateValue, evaluateValue, offset) {
interpolateValue || (interpolateValue = esTemplateValue);
// Escape characters that can't be included in string literals.
source += string.slice(index, offset).replace(reUnescapedString, escapeStringChar);
// Replace delimiters with snippets.
if (escapeValue) {
isEscaping = true;
source += "' +\n__e(" + escapeValue + ") +\n'";
}
if (evaluateValue) {
isEvaluating = true;
source += "';\n" + evaluateValue + ";\n__p += '";
}
if (interpolateValue) {
source += "' +\n((__t = (" + interpolateValue + ")) == null ? '' : __t) +\n'";
}
index = offset + match.length;
// The JS engine embedded in Adobe products needs `match` returned in
// order to produce the correct `offset` value.
return match;
});
source += "';\n";
// If `variable` is not specified wrap a with-statement around the generated
// code to add the data object to the top of the scope chain.
var variable = hasOwnProperty.call(options, 'variable') && options.variable;
if (!variable) {
source = 'with (obj) {\n' + source + '\n}\n';
}
// Cleanup code by stripping empty strings.
source = (isEvaluating ? source.replace(reEmptyStringLeading, '') : source)
.replace(reEmptyStringMiddle, '$1')
.replace(reEmptyStringTrailing, '$1;');
// Frame code as the function body.
source = 'function(' + (variable || 'obj') + ') {\n' +
(variable
? ''
: 'obj || (obj = {});\n'
) +
"var __t, __p = ''" +
(isEscaping
? ', __e = _.escape'
: ''
) +
(isEvaluating
? ', __j = Array.prototype.join;\n' +
"function print() { __p += __j.call(arguments, '') }\n"
: ';\n'
) +
source +
'return __p\n}';
var result = attempt(function() {
return Function(importsKeys, sourceURL + 'return ' + source)
.apply(undefined, importsValues);
});
// Provide the compiled function's source by its `toString` method or
// the `source` property as a convenience for inlining compiled templates.
result.source = source;
if (isError(result)) {
throw result;
}
return result;
}
/**
* Converts `string`, as a whole, to lower case just like
* [String#toLowerCase](https://mdn.io/toLowerCase).
*
* @static
* @memberOf _
* @since 4.0.0
* @category String
* @param {string} [string=''] The string to convert.
* @returns {string} Returns the lower cased string.
* @example
*
* _.toLower('--Foo-Bar--');
* // => '--foo-bar--'
*
* _.toLower('fooBar');
* // => 'foobar'
*
* _.toLower('__FOO_BAR__');
* // => '__foo_bar__'
*/
function toLower(value) {
return toString(value).toLowerCase();
}
/**
* Converts `string`, as a whole, to upper case just like
* [String#toUpperCase](https://mdn.io/toUpperCase).
*
* @static
* @memberOf _
* @since 4.0.0
* @category String
* @param {string} [string=''] The string to convert.
* @returns {string} Returns the upper cased string.
* @example
*
* _.toUpper('--foo-bar--');
* // => '--FOO-BAR--'
*
* _.toUpper('fooBar');
* // => 'FOOBAR'
*
* _.toUpper('__foo_bar__');
* // => '__FOO_BAR__'
*/
function toUpper(value) {
return toString(value).toUpperCase();
}
/**
* Removes leading and trailing whitespace or specified characters from `string`.
*
* @static
* @memberOf _
* @since 3.0.0
* @category String
* @param {string} [string=''] The string to trim.
* @param {string} [chars=whitespace] The characters to trim.
* @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`.
* @returns {string} Returns the trimmed string.
* @example
*
* _.trim(' abc ');
* // => 'abc'
*
* _.trim('-_-abc-_-', '_-');
* // => 'abc'
*
* _.map([' foo ', ' bar '], _.trim);
* // => ['foo', 'bar']
*/
function trim(string, chars, guard) {
string = toString(string);
if (string && (guard || chars === undefined)) {
return string.replace(reTrim, '');
}
if (!string || !(chars = baseToString(chars))) {
return string;
}
var strSymbols = stringToArray(string),
chrSymbols = stringToArray(chars),
start = charsStartIndex(strSymbols, chrSymbols),
end = charsEndIndex(strSymbols, chrSymbols) + 1;
return castSlice(strSymbols, start, end).join('');
}
/**
* Removes trailing whitespace or specified characters from `string`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category String
* @param {string} [string=''] The string to trim.
* @param {string} [chars=whitespace] The characters to trim.
* @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`.
* @returns {string} Returns the trimmed string.
* @example
*
* _.trimEnd(' abc ');
* // => ' abc'
*
* _.trimEnd('-_-abc-_-', '_-');
* // => '-_-abc'
*/
function trimEnd(string, chars, guard) {
string = toString(string);
if (string && (guard || chars === undefined)) {
return string.replace(reTrimEnd, '');
}
if (!string || !(chars = baseToString(chars))) {
return string;
}
var strSymbols = stringToArray(string),
end = charsEndIndex(strSymbols, stringToArray(chars)) + 1;
return castSlice(strSymbols, 0, end).join('');
}
/**
* Removes leading whitespace or specified characters from `string`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category String
* @param {string} [string=''] The string to trim.
* @param {string} [chars=whitespace] The characters to trim.
* @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`.
* @returns {string} Returns the trimmed string.
* @example
*
* _.trimStart(' abc ');
* // => 'abc '
*
* _.trimStart('-_-abc-_-', '_-');
* // => 'abc-_-'
*/
function trimStart(string, chars, guard) {
string = toString(string);
if (string && (guard || chars === undefined)) {
return string.replace(reTrimStart, '');
}
if (!string || !(chars = baseToString(chars))) {
return string;
}
var strSymbols = stringToArray(string),
start = charsStartIndex(strSymbols, stringToArray(chars));
return castSlice(strSymbols, start).join('');
}
/**
* Truncates `string` if it's longer than the given maximum string length.
* The last characters of the truncated string are replaced with the omission
* string which defaults to "...".
*
* @static
* @memberOf _
* @since 4.0.0
* @category String
* @param {string} [string=''] The string to truncate.
* @param {Object} [options={}] The options object.
* @param {number} [options.length=30] The maximum string length.
* @param {string} [options.omission='...'] The string to indicate text is omitted.
* @param {RegExp|string} [options.separator] The separator pattern to truncate to.
* @returns {string} Returns the truncated string.
* @example
*
* _.truncate('hi-diddly-ho there, neighborino');
* // => 'hi-diddly-ho there, neighbo...'
*
* _.truncate('hi-diddly-ho there, neighborino', {
* 'length': 24,
* 'separator': ' '
* });
* // => 'hi-diddly-ho there,...'
*
* _.truncate('hi-diddly-ho there, neighborino', {
* 'length': 24,
* 'separator': /,? +/
* });
* // => 'hi-diddly-ho there...'
*
* _.truncate('hi-diddly-ho there, neighborino', {
* 'omission': ' [...]'
* });
* // => 'hi-diddly-ho there, neig [...]'
*/
function truncate(string, options) {
var length = DEFAULT_TRUNC_LENGTH,
omission = DEFAULT_TRUNC_OMISSION;
if (isObject(options)) {
var separator = 'separator' in options ? options.separator : separator;
length = 'length' in options ? toInteger(options.length) : length;
omission = 'omission' in options ? baseToString(options.omission) : omission;
}
string = toString(string);
var strLength = string.length;
if (hasUnicode(string)) {
var strSymbols = stringToArray(string);
strLength = strSymbols.length;
}
if (length >= strLength) {
return string;
}
var end = length - stringSize(omission);
if (end < 1) {
return omission;
}
var result = strSymbols
? castSlice(strSymbols, 0, end).join('')
: string.slice(0, end);
if (separator === undefined) {
return result + omission;
}
if (strSymbols) {
end += (result.length - end);
}
if (isRegExp(separator)) {
if (string.slice(end).search(separator)) {
var match,
substring = result;
if (!separator.global) {
separator = RegExp(separator.source, toString(reFlags.exec(separator)) + 'g');
}
separator.lastIndex = 0;
while ((match = separator.exec(substring))) {
var newEnd = match.index;
}
result = result.slice(0, newEnd === undefined ? end : newEnd);
}
} else if (string.indexOf(baseToString(separator), end) != end) {
var index = result.lastIndexOf(separator);
if (index > -1) {
result = result.slice(0, index);
}
}
return result + omission;
}
/**
* The inverse of `_.escape`; this method converts the HTML entities
* `&`, `<`, `>`, `"`, and `'` in `string` to
* their corresponding characters.
*
* **Note:** No other HTML entities are unescaped. To unescape additional
* HTML entities use a third-party library like [_he_](https://mths.be/he).
*
* @static
* @memberOf _
* @since 0.6.0
* @category String
* @param {string} [string=''] The string to unescape.
* @returns {string} Returns the unescaped string.
* @example
*
* _.unescape('fred, barney, & pebbles');
* // => 'fred, barney, & pebbles'
*/
function unescape(string) {
string = toString(string);
return (string && reHasEscapedHtml.test(string))
? string.replace(reEscapedHtml, unescapeHtmlChar)
: string;
}
/**
* Converts `string`, as space separated words, to upper case.
*
* @static
* @memberOf _
* @since 4.0.0
* @category String
* @param {string} [string=''] The string to convert.
* @returns {string} Returns the upper cased string.
* @example
*
* _.upperCase('--foo-bar');
* // => 'FOO BAR'
*
* _.upperCase('fooBar');
* // => 'FOO BAR'
*
* _.upperCase('__foo_bar__');
* // => 'FOO BAR'
*/
var upperCase = createCompounder(function(result, word, index) {
return result + (index ? ' ' : '') + word.toUpperCase();
});
/**
* Converts the first character of `string` to upper case.
*
* @static
* @memberOf _
* @since 4.0.0
* @category String
* @param {string} [string=''] The string to convert.
* @returns {string} Returns the converted string.
* @example
*
* _.upperFirst('fred');
* // => 'Fred'
*
* _.upperFirst('FRED');
* // => 'FRED'
*/
var upperFirst = createCaseFirst('toUpperCase');
/**
* Splits `string` into an array of its words.
*
* @static
* @memberOf _
* @since 3.0.0
* @category String
* @param {string} [string=''] The string to inspect.
* @param {RegExp|string} [pattern] The pattern to match words.
* @param- {Object} [guard] Enables use as an iteratee for methods like `_.map`.
* @returns {Array} Returns the words of `string`.
* @example
*
* _.words('fred, barney, & pebbles');
* // => ['fred', 'barney', 'pebbles']
*
* _.words('fred, barney, & pebbles', /[^, ]+/g);
* // => ['fred', 'barney', '&', 'pebbles']
*/
function words(string, pattern, guard) {
string = toString(string);
pattern = guard ? undefined : pattern;
if (pattern === undefined) {
return hasUnicodeWord(string) ? unicodeWords(string) : asciiWords(string);
}
return string.match(pattern) || [];
}
/*------------------------------------------------------------------------*/
/**
* Attempts to invoke `func`, returning either the result or the caught error
* object. Any additional arguments are provided to `func` when it's invoked.
*
* @static
* @memberOf _
* @since 3.0.0
* @category Util
* @param {Function} func The function to attempt.
* @param {...*} [args] The arguments to invoke `func` with.
* @returns {*} Returns the `func` result or error object.
* @example
*
* // Avoid throwing errors for invalid selectors.
* var elements = _.attempt(function(selector) {
* return document.querySelectorAll(selector);
* }, '>_>');
*
* if (_.isError(elements)) {
* elements = [];
* }
*/
var attempt = baseRest(function(func, args) {
try {
return apply(func, undefined, args);
} catch (e) {
return isError(e) ? e : new Error(e);
}
});
/**
* Binds methods of an object to the object itself, overwriting the existing
* method.
*
* **Note:** This method doesn't set the "length" property of bound functions.
*
* @static
* @since 0.1.0
* @memberOf _
* @category Util
* @param {Object} object The object to bind and assign the bound methods to.
* @param {...(string|string[])} methodNames The object method names to bind.
* @returns {Object} Returns `object`.
* @example
*
* var view = {
* 'label': 'docs',
* 'click': function() {
* console.log('clicked ' + this.label);
* }
* };
*
* _.bindAll(view, ['click']);
* jQuery(element).on('click', view.click);
* // => Logs 'clicked docs' when clicked.
*/
var bindAll = flatRest(function(object, methodNames) {
arrayEach(methodNames, function(key) {
key = toKey(key);
baseAssignValue(object, key, bind(object[key], object));
});
return object;
});
/**
* Creates a function that iterates over `pairs` and invokes the corresponding
* function of the first predicate to return truthy. The predicate-function
* pairs are invoked with the `this` binding and arguments of the created
* function.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Util
* @param {Array} pairs The predicate-function pairs.
* @returns {Function} Returns the new composite function.
* @example
*
* var func = _.cond([
* [_.matches({ 'a': 1 }), _.constant('matches A')],
* [_.conforms({ 'b': _.isNumber }), _.constant('matches B')],
* [_.stubTrue, _.constant('no match')]
* ]);
*
* func({ 'a': 1, 'b': 2 });
* // => 'matches A'
*
* func({ 'a': 0, 'b': 1 });
* // => 'matches B'
*
* func({ 'a': '1', 'b': '2' });
* // => 'no match'
*/
function cond(pairs) {
var length = pairs == null ? 0 : pairs.length,
toIteratee = getIteratee();
pairs = !length ? [] : arrayMap(pairs, function(pair) {
if (typeof pair[1] != 'function') {
throw new TypeError(FUNC_ERROR_TEXT);
}
return [toIteratee(pair[0]), pair[1]];
});
return baseRest(function(args) {
var index = -1;
while (++index < length) {
var pair = pairs[index];
if (apply(pair[0], this, args)) {
return apply(pair[1], this, args);
}
}
});
}
/**
* Creates a function that invokes the predicate properties of `source` with
* the corresponding property values of a given object, returning `true` if
* all predicates return truthy, else `false`.
*
* **Note:** The created function is equivalent to `_.conformsTo` with
* `source` partially applied.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Util
* @param {Object} source The object of property predicates to conform to.
* @returns {Function} Returns the new spec function.
* @example
*
* var objects = [
* { 'a': 2, 'b': 1 },
* { 'a': 1, 'b': 2 }
* ];
*
* _.filter(objects, _.conforms({ 'b': function(n) { return n > 1; } }));
* // => [{ 'a': 1, 'b': 2 }]
*/
function conforms(source) {
return baseConforms(baseClone(source, CLONE_DEEP_FLAG));
}
/**
* Creates a function that returns `value`.
*
* @static
* @memberOf _
* @since 2.4.0
* @category Util
* @param {*} value The value to return from the new function.
* @returns {Function} Returns the new constant function.
* @example
*
* var objects = _.times(2, _.constant({ 'a': 1 }));
*
* console.log(objects);
* // => [{ 'a': 1 }, { 'a': 1 }]
*
* console.log(objects[0] === objects[1]);
* // => true
*/
function constant(value) {
return function() {
return value;
};
}
/**
* Checks `value` to determine whether a default value should be returned in
* its place. The `defaultValue` is returned if `value` is `NaN`, `null`,
* or `undefined`.
*
* @static
* @memberOf _
* @since 4.14.0
* @category Util
* @param {*} value The value to check.
* @param {*} defaultValue The default value.
* @returns {*} Returns the resolved value.
* @example
*
* _.defaultTo(1, 10);
* // => 1
*
* _.defaultTo(undefined, 10);
* // => 10
*/
function defaultTo(value, defaultValue) {
return (value == null || value !== value) ? defaultValue : value;
}
/**
* Creates a function that returns the result of invoking the given functions
* with the `this` binding of the created function, where each successive
* invocation is supplied the return value of the previous.
*
* @static
* @memberOf _
* @since 3.0.0
* @category Util
* @param {...(Function|Function[])} [funcs] The functions to invoke.
* @returns {Function} Returns the new composite function.
* @see _.flowRight
* @example
*
* function square(n) {
* return n * n;
* }
*
* var addSquare = _.flow([_.add, square]);
* addSquare(1, 2);
* // => 9
*/
var flow = createFlow();
/**
* This method is like `_.flow` except that it creates a function that
* invokes the given functions from right to left.
*
* @static
* @since 3.0.0
* @memberOf _
* @category Util
* @param {...(Function|Function[])} [funcs] The functions to invoke.
* @returns {Function} Returns the new composite function.
* @see _.flow
* @example
*
* function square(n) {
* return n * n;
* }
*
* var addSquare = _.flowRight([square, _.add]);
* addSquare(1, 2);
* // => 9
*/
var flowRight = createFlow(true);
/**
* This method returns the first argument it receives.
*
* @static
* @since 0.1.0
* @memberOf _
* @category Util
* @param {*} value Any value.
* @returns {*} Returns `value`.
* @example
*
* var object = { 'a': 1 };
*
* console.log(_.identity(object) === object);
* // => true
*/
function identity(value) {
return value;
}
/**
* Creates a function that invokes `func` with the arguments of the created
* function. If `func` is a property name, the created function returns the
* property value for a given element. If `func` is an array or object, the
* created function returns `true` for elements that contain the equivalent
* source properties, otherwise it returns `false`.
*
* @static
* @since 4.0.0
* @memberOf _
* @category Util
* @param {*} [func=_.identity] The value to convert to a callback.
* @returns {Function} Returns the callback.
* @example
*
* var users = [
* { 'user': 'barney', 'age': 36, 'active': true },
* { 'user': 'fred', 'age': 40, 'active': false }
* ];
*
* // The `_.matches` iteratee shorthand.
* _.filter(users, _.iteratee({ 'user': 'barney', 'active': true }));
* // => [{ 'user': 'barney', 'age': 36, 'active': true }]
*
* // The `_.matchesProperty` iteratee shorthand.
* _.filter(users, _.iteratee(['user', 'fred']));
* // => [{ 'user': 'fred', 'age': 40 }]
*
* // The `_.property` iteratee shorthand.
* _.map(users, _.iteratee('user'));
* // => ['barney', 'fred']
*
* // Create custom iteratee shorthands.
* _.iteratee = _.wrap(_.iteratee, function(iteratee, func) {
* return !_.isRegExp(func) ? iteratee(func) : function(string) {
* return func.test(string);
* };
* });
*
* _.filter(['abc', 'def'], /ef/);
* // => ['def']
*/
function iteratee(func) {
return baseIteratee(typeof func == 'function' ? func : baseClone(func, CLONE_DEEP_FLAG));
}
/**
* Creates a function that performs a partial deep comparison between a given
* object and `source`, returning `true` if the given object has equivalent
* property values, else `false`.
*
* **Note:** The created function is equivalent to `_.isMatch` with `source`
* partially applied.
*
* Partial comparisons will match empty array and empty object `source`
* values against any array or object value, respectively. See `_.isEqual`
* for a list of supported value comparisons.
*
* **Note:** Multiple values can be checked by combining several matchers
* using `_.overSome`
*
* @static
* @memberOf _
* @since 3.0.0
* @category Util
* @param {Object} source The object of property values to match.
* @returns {Function} Returns the new spec function.
* @example
*
* var objects = [
* { 'a': 1, 'b': 2, 'c': 3 },
* { 'a': 4, 'b': 5, 'c': 6 }
* ];
*
* _.filter(objects, _.matches({ 'a': 4, 'c': 6 }));
* // => [{ 'a': 4, 'b': 5, 'c': 6 }]
*
* // Checking for several possible values
* _.filter(objects, _.overSome([_.matches({ 'a': 1 }), _.matches({ 'a': 4 })]));
* // => [{ 'a': 1, 'b': 2, 'c': 3 }, { 'a': 4, 'b': 5, 'c': 6 }]
*/
function matches(source) {
return baseMatches(baseClone(source, CLONE_DEEP_FLAG));
}
/**
* Creates a function that performs a partial deep comparison between the
* value at `path` of a given object to `srcValue`, returning `true` if the
* object value is equivalent, else `false`.
*
* **Note:** Partial comparisons will match empty array and empty object
* `srcValue` values against any array or object value, respectively. See
* `_.isEqual` for a list of supported value comparisons.
*
* **Note:** Multiple values can be checked by combining several matchers
* using `_.overSome`
*
* @static
* @memberOf _
* @since 3.2.0
* @category Util
* @param {Array|string} path The path of the property to get.
* @param {*} srcValue The value to match.
* @returns {Function} Returns the new spec function.
* @example
*
* var objects = [
* { 'a': 1, 'b': 2, 'c': 3 },
* { 'a': 4, 'b': 5, 'c': 6 }
* ];
*
* _.find(objects, _.matchesProperty('a', 4));
* // => { 'a': 4, 'b': 5, 'c': 6 }
*
* // Checking for several possible values
* _.filter(objects, _.overSome([_.matchesProperty('a', 1), _.matchesProperty('a', 4)]));
* // => [{ 'a': 1, 'b': 2, 'c': 3 }, { 'a': 4, 'b': 5, 'c': 6 }]
*/
function matchesProperty(path, srcValue) {
return baseMatchesProperty(path, baseClone(srcValue, CLONE_DEEP_FLAG));
}
/**
* Creates a function that invokes the method at `path` of a given object.
* Any additional arguments are provided to the invoked method.
*
* @static
* @memberOf _
* @since 3.7.0
* @category Util
* @param {Array|string} path The path of the method to invoke.
* @param {...*} [args] The arguments to invoke the method with.
* @returns {Function} Returns the new invoker function.
* @example
*
* var objects = [
* { 'a': { 'b': _.constant(2) } },
* { 'a': { 'b': _.constant(1) } }
* ];
*
* _.map(objects, _.method('a.b'));
* // => [2, 1]
*
* _.map(objects, _.method(['a', 'b']));
* // => [2, 1]
*/
var method = baseRest(function(path, args) {
return function(object) {
return baseInvoke(object, path, args);
};
});
/**
* The opposite of `_.method`; this method creates a function that invokes
* the method at a given path of `object`. Any additional arguments are
* provided to the invoked method.
*
* @static
* @memberOf _
* @since 3.7.0
* @category Util
* @param {Object} object The object to query.
* @param {...*} [args] The arguments to invoke the method with.
* @returns {Function} Returns the new invoker function.
* @example
*
* var array = _.times(3, _.constant),
* object = { 'a': array, 'b': array, 'c': array };
*
* _.map(['a[2]', 'c[0]'], _.methodOf(object));
* // => [2, 0]
*
* _.map([['a', '2'], ['c', '0']], _.methodOf(object));
* // => [2, 0]
*/
var methodOf = baseRest(function(object, args) {
return function(path) {
return baseInvoke(object, path, args);
};
});
/**
* Adds all own enumerable string keyed function properties of a source
* object to the destination object. If `object` is a function, then methods
* are added to its prototype as well.
*
* **Note:** Use `_.runInContext` to create a pristine `lodash` function to
* avoid conflicts caused by modifying the original.
*
* @static
* @since 0.1.0
* @memberOf _
* @category Util
* @param {Function|Object} [object=lodash] The destination object.
* @param {Object} source The object of functions to add.
* @param {Object} [options={}] The options object.
* @param {boolean} [options.chain=true] Specify whether mixins are chainable.
* @returns {Function|Object} Returns `object`.
* @example
*
* function vowels(string) {
* return _.filter(string, function(v) {
* return /[aeiou]/i.test(v);
* });
* }
*
* _.mixin({ 'vowels': vowels });
* _.vowels('fred');
* // => ['e']
*
* _('fred').vowels().value();
* // => ['e']
*
* _.mixin({ 'vowels': vowels }, { 'chain': false });
* _('fred').vowels();
* // => ['e']
*/
function mixin(object, source, options) {
var props = keys(source),
methodNames = baseFunctions(source, props);
if (options == null &&
!(isObject(source) && (methodNames.length || !props.length))) {
options = source;
source = object;
object = this;
methodNames = baseFunctions(source, keys(source));
}
var chain = !(isObject(options) && 'chain' in options) || !!options.chain,
isFunc = isFunction(object);
arrayEach(methodNames, function(methodName) {
var func = source[methodName];
object[methodName] = func;
if (isFunc) {
object.prototype[methodName] = function() {
var chainAll = this.__chain__;
if (chain || chainAll) {
var result = object(this.__wrapped__),
actions = result.__actions__ = copyArray(this.__actions__);
actions.push({ 'func': func, 'args': arguments, 'thisArg': object });
result.__chain__ = chainAll;
return result;
}
return func.apply(object, arrayPush([this.value()], arguments));
};
}
});
return object;
}
/**
* Reverts the `_` variable to its previous value and returns a reference to
* the `lodash` function.
*
* @static
* @since 0.1.0
* @memberOf _
* @category Util
* @returns {Function} Returns the `lodash` function.
* @example
*
* var lodash = _.noConflict();
*/
function noConflict() {
if (root._ === this) {
root._ = oldDash;
}
return this;
}
/**
* This method returns `undefined`.
*
* @static
* @memberOf _
* @since 2.3.0
* @category Util
* @example
*
* _.times(2, _.noop);
* // => [undefined, undefined]
*/
function noop() {
// No operation performed.
}
/**
* Creates a function that gets the argument at index `n`. If `n` is negative,
* the nth argument from the end is returned.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Util
* @param {number} [n=0] The index of the argument to return.
* @returns {Function} Returns the new pass-thru function.
* @example
*
* var func = _.nthArg(1);
* func('a', 'b', 'c', 'd');
* // => 'b'
*
* var func = _.nthArg(-2);
* func('a', 'b', 'c', 'd');
* // => 'c'
*/
function nthArg(n) {
n = toInteger(n);
return baseRest(function(args) {
return baseNth(args, n);
});
}
/**
* Creates a function that invokes `iteratees` with the arguments it receives
* and returns their results.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Util
* @param {...(Function|Function[])} [iteratees=[_.identity]]
* The iteratees to invoke.
* @returns {Function} Returns the new function.
* @example
*
* var func = _.over([Math.max, Math.min]);
*
* func(1, 2, 3, 4);
* // => [4, 1]
*/
var over = createOver(arrayMap);
/**
* Creates a function that checks if **all** of the `predicates` return
* truthy when invoked with the arguments it receives.
*
* Following shorthands are possible for providing predicates.
* Pass an `Object` and it will be used as an parameter for `_.matches` to create the predicate.
* Pass an `Array` of parameters for `_.matchesProperty` and the predicate will be created using them.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Util
* @param {...(Function|Function[])} [predicates=[_.identity]]
* The predicates to check.
* @returns {Function} Returns the new function.
* @example
*
* var func = _.overEvery([Boolean, isFinite]);
*
* func('1');
* // => true
*
* func(null);
* // => false
*
* func(NaN);
* // => false
*/
var overEvery = createOver(arrayEvery);
/**
* Creates a function that checks if **any** of the `predicates` return
* truthy when invoked with the arguments it receives.
*
* Following shorthands are possible for providing predicates.
* Pass an `Object` and it will be used as an parameter for `_.matches` to create the predicate.
* Pass an `Array` of parameters for `_.matchesProperty` and the predicate will be created using them.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Util
* @param {...(Function|Function[])} [predicates=[_.identity]]
* The predicates to check.
* @returns {Function} Returns the new function.
* @example
*
* var func = _.overSome([Boolean, isFinite]);
*
* func('1');
* // => true
*
* func(null);
* // => true
*
* func(NaN);
* // => false
*
* var matchesFunc = _.overSome([{ 'a': 1 }, { 'a': 2 }])
* var matchesPropertyFunc = _.overSome([['a', 1], ['a', 2]])
*/
var overSome = createOver(arraySome);
/**
* Creates a function that returns the value at `path` of a given object.
*
* @static
* @memberOf _
* @since 2.4.0
* @category Util
* @param {Array|string} path The path of the property to get.
* @returns {Function} Returns the new accessor function.
* @example
*
* var objects = [
* { 'a': { 'b': 2 } },
* { 'a': { 'b': 1 } }
* ];
*
* _.map(objects, _.property('a.b'));
* // => [2, 1]
*
* _.map(_.sortBy(objects, _.property(['a', 'b'])), 'a.b');
* // => [1, 2]
*/
function property(path) {
return isKey(path) ? baseProperty(toKey(path)) : basePropertyDeep(path);
}
/**
* The opposite of `_.property`; this method creates a function that returns
* the value at a given path of `object`.
*
* @static
* @memberOf _
* @since 3.0.0
* @category Util
* @param {Object} object The object to query.
* @returns {Function} Returns the new accessor function.
* @example
*
* var array = [0, 1, 2],
* object = { 'a': array, 'b': array, 'c': array };
*
* _.map(['a[2]', 'c[0]'], _.propertyOf(object));
* // => [2, 0]
*
* _.map([['a', '2'], ['c', '0']], _.propertyOf(object));
* // => [2, 0]
*/
function propertyOf(object) {
return function(path) {
return object == null ? undefined : baseGet(object, path);
};
}
/**
* Creates an array of numbers (positive and/or negative) progressing from
* `start` up to, but not including, `end`. A step of `-1` is used if a negative
* `start` is specified without an `end` or `step`. If `end` is not specified,
* it's set to `start` with `start` then set to `0`.
*
* **Note:** JavaScript follows the IEEE-754 standard for resolving
* floating-point values which can produce unexpected results.
*
* @static
* @since 0.1.0
* @memberOf _
* @category Util
* @param {number} [start=0] The start of the range.
* @param {number} end The end of the range.
* @param {number} [step=1] The value to increment or decrement by.
* @returns {Array} Returns the range of numbers.
* @see _.inRange, _.rangeRight
* @example
*
* _.range(4);
* // => [0, 1, 2, 3]
*
* _.range(-4);
* // => [0, -1, -2, -3]
*
* _.range(1, 5);
* // => [1, 2, 3, 4]
*
* _.range(0, 20, 5);
* // => [0, 5, 10, 15]
*
* _.range(0, -4, -1);
* // => [0, -1, -2, -3]
*
* _.range(1, 4, 0);
* // => [1, 1, 1]
*
* _.range(0);
* // => []
*/
var range = createRange();
/**
* This method is like `_.range` except that it populates values in
* descending order.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Util
* @param {number} [start=0] The start of the range.
* @param {number} end The end of the range.
* @param {number} [step=1] The value to increment or decrement by.
* @returns {Array} Returns the range of numbers.
* @see _.inRange, _.range
* @example
*
* _.rangeRight(4);
* // => [3, 2, 1, 0]
*
* _.rangeRight(-4);
* // => [-3, -2, -1, 0]
*
* _.rangeRight(1, 5);
* // => [4, 3, 2, 1]
*
* _.rangeRight(0, 20, 5);
* // => [15, 10, 5, 0]
*
* _.rangeRight(0, -4, -1);
* // => [-3, -2, -1, 0]
*
* _.rangeRight(1, 4, 0);
* // => [1, 1, 1]
*
* _.rangeRight(0);
* // => []
*/
var rangeRight = createRange(true);
/**
* This method returns a new empty array.
*
* @static
* @memberOf _
* @since 4.13.0
* @category Util
* @returns {Array} Returns the new empty array.
* @example
*
* var arrays = _.times(2, _.stubArray);
*
* console.log(arrays);
* // => [[], []]
*
* console.log(arrays[0] === arrays[1]);
* // => false
*/
function stubArray() {
return [];
}
/**
* This method returns `false`.
*
* @static
* @memberOf _
* @since 4.13.0
* @category Util
* @returns {boolean} Returns `false`.
* @example
*
* _.times(2, _.stubFalse);
* // => [false, false]
*/
function stubFalse() {
return false;
}
/**
* This method returns a new empty object.
*
* @static
* @memberOf _
* @since 4.13.0
* @category Util
* @returns {Object} Returns the new empty object.
* @example
*
* var objects = _.times(2, _.stubObject);
*
* console.log(objects);
* // => [{}, {}]
*
* console.log(objects[0] === objects[1]);
* // => false
*/
function stubObject() {
return {};
}
/**
* This method returns an empty string.
*
* @static
* @memberOf _
* @since 4.13.0
* @category Util
* @returns {string} Returns the empty string.
* @example
*
* _.times(2, _.stubString);
* // => ['', '']
*/
function stubString() {
return '';
}
/**
* This method returns `true`.
*
* @static
* @memberOf _
* @since 4.13.0
* @category Util
* @returns {boolean} Returns `true`.
* @example
*
* _.times(2, _.stubTrue);
* // => [true, true]
*/
function stubTrue() {
return true;
}
/**
* Invokes the iteratee `n` times, returning an array of the results of
* each invocation. The iteratee is invoked with one argument; (index).
*
* @static
* @since 0.1.0
* @memberOf _
* @category Util
* @param {number} n The number of times to invoke `iteratee`.
* @param {Function} [iteratee=_.identity] The function invoked per iteration.
* @returns {Array} Returns the array of results.
* @example
*
* _.times(3, String);
* // => ['0', '1', '2']
*
* _.times(4, _.constant(0));
* // => [0, 0, 0, 0]
*/
function times(n, iteratee) {
n = toInteger(n);
if (n < 1 || n > MAX_SAFE_INTEGER) {
return [];
}
var index = MAX_ARRAY_LENGTH,
length = nativeMin(n, MAX_ARRAY_LENGTH);
iteratee = getIteratee(iteratee);
n -= MAX_ARRAY_LENGTH;
var result = baseTimes(length, iteratee);
while (++index < n) {
iteratee(index);
}
return result;
}
/**
* Converts `value` to a property path array.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Util
* @param {*} value The value to convert.
* @returns {Array} Returns the new property path array.
* @example
*
* _.toPath('a.b.c');
* // => ['a', 'b', 'c']
*
* _.toPath('a[0].b.c');
* // => ['a', '0', 'b', 'c']
*/
function toPath(value) {
if (isArray(value)) {
return arrayMap(value, toKey);
}
return isSymbol(value) ? [value] : copyArray(stringToPath(toString(value)));
}
/**
* Generates a unique ID. If `prefix` is given, the ID is appended to it.
*
* @static
* @since 0.1.0
* @memberOf _
* @category Util
* @param {string} [prefix=''] The value to prefix the ID with.
* @returns {string} Returns the unique ID.
* @example
*
* _.uniqueId('contact_');
* // => 'contact_104'
*
* _.uniqueId();
* // => '105'
*/
function uniqueId(prefix) {
var id = ++idCounter;
return toString(prefix) + id;
}
/*------------------------------------------------------------------------*/
/**
* Adds two numbers.
*
* @static
* @memberOf _
* @since 3.4.0
* @category Math
* @param {number} augend The first number in an addition.
* @param {number} addend The second number in an addition.
* @returns {number} Returns the total.
* @example
*
* _.add(6, 4);
* // => 10
*/
var add = createMathOperation(function(augend, addend) {
return augend + addend;
}, 0);
/**
* Computes `number` rounded up to `precision`.
*
* @static
* @memberOf _
* @since 3.10.0
* @category Math
* @param {number} number The number to round up.
* @param {number} [precision=0] The precision to round up to.
* @returns {number} Returns the rounded up number.
* @example
*
* _.ceil(4.006);
* // => 5
*
* _.ceil(6.004, 2);
* // => 6.01
*
* _.ceil(6040, -2);
* // => 6100
*/
var ceil = createRound('ceil');
/**
* Divide two numbers.
*
* @static
* @memberOf _
* @since 4.7.0
* @category Math
* @param {number} dividend The first number in a division.
* @param {number} divisor The second number in a division.
* @returns {number} Returns the quotient.
* @example
*
* _.divide(6, 4);
* // => 1.5
*/
var divide = createMathOperation(function(dividend, divisor) {
return dividend / divisor;
}, 1);
/**
* Computes `number` rounded down to `precision`.
*
* @static
* @memberOf _
* @since 3.10.0
* @category Math
* @param {number} number The number to round down.
* @param {number} [precision=0] The precision to round down to.
* @returns {number} Returns the rounded down number.
* @example
*
* _.floor(4.006);
* // => 4
*
* _.floor(0.046, 2);
* // => 0.04
*
* _.floor(4060, -2);
* // => 4000
*/
var floor = createRound('floor');
/**
* Computes the maximum value of `array`. If `array` is empty or falsey,
* `undefined` is returned.
*
* @static
* @since 0.1.0
* @memberOf _
* @category Math
* @param {Array} array The array to iterate over.
* @returns {*} Returns the maximum value.
* @example
*
* _.max([4, 2, 8, 6]);
* // => 8
*
* _.max([]);
* // => undefined
*/
function max(array) {
return (array && array.length)
? baseExtremum(array, identity, baseGt)
: undefined;
}
/**
* This method is like `_.max` except that it accepts `iteratee` which is
* invoked for each element in `array` to generate the criterion by which
* the value is ranked. The iteratee is invoked with one argument: (value).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Math
* @param {Array} array The array to iterate over.
* @param {Function} [iteratee=_.identity] The iteratee invoked per element.
* @returns {*} Returns the maximum value.
* @example
*
* var objects = [{ 'n': 1 }, { 'n': 2 }];
*
* _.maxBy(objects, function(o) { return o.n; });
* // => { 'n': 2 }
*
* // The `_.property` iteratee shorthand.
* _.maxBy(objects, 'n');
* // => { 'n': 2 }
*/
function maxBy(array, iteratee) {
return (array && array.length)
? baseExtremum(array, getIteratee(iteratee, 2), baseGt)
: undefined;
}
/**
* Computes the mean of the values in `array`.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Math
* @param {Array} array The array to iterate over.
* @returns {number} Returns the mean.
* @example
*
* _.mean([4, 2, 8, 6]);
* // => 5
*/
function mean(array) {
return baseMean(array, identity);
}
/**
* This method is like `_.mean` except that it accepts `iteratee` which is
* invoked for each element in `array` to generate the value to be averaged.
* The iteratee is invoked with one argument: (value).
*
* @static
* @memberOf _
* @since 4.7.0
* @category Math
* @param {Array} array The array to iterate over.
* @param {Function} [iteratee=_.identity] The iteratee invoked per element.
* @returns {number} Returns the mean.
* @example
*
* var objects = [{ 'n': 4 }, { 'n': 2 }, { 'n': 8 }, { 'n': 6 }];
*
* _.meanBy(objects, function(o) { return o.n; });
* // => 5
*
* // The `_.property` iteratee shorthand.
* _.meanBy(objects, 'n');
* // => 5
*/
function meanBy(array, iteratee) {
return baseMean(array, getIteratee(iteratee, 2));
}
/**
* Computes the minimum value of `array`. If `array` is empty or falsey,
* `undefined` is returned.
*
* @static
* @since 0.1.0
* @memberOf _
* @category Math
* @param {Array} array The array to iterate over.
* @returns {*} Returns the minimum value.
* @example
*
* _.min([4, 2, 8, 6]);
* // => 2
*
* _.min([]);
* // => undefined
*/
function min(array) {
return (array && array.length)
? baseExtremum(array, identity, baseLt)
: undefined;
}
/**
* This method is like `_.min` except that it accepts `iteratee` which is
* invoked for each element in `array` to generate the criterion by which
* the value is ranked. The iteratee is invoked with one argument: (value).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Math
* @param {Array} array The array to iterate over.
* @param {Function} [iteratee=_.identity] The iteratee invoked per element.
* @returns {*} Returns the minimum value.
* @example
*
* var objects = [{ 'n': 1 }, { 'n': 2 }];
*
* _.minBy(objects, function(o) { return o.n; });
* // => { 'n': 1 }
*
* // The `_.property` iteratee shorthand.
* _.minBy(objects, 'n');
* // => { 'n': 1 }
*/
function minBy(array, iteratee) {
return (array && array.length)
? baseExtremum(array, getIteratee(iteratee, 2), baseLt)
: undefined;
}
/**
* Multiply two numbers.
*
* @static
* @memberOf _
* @since 4.7.0
* @category Math
* @param {number} multiplier The first number in a multiplication.
* @param {number} multiplicand The second number in a multiplication.
* @returns {number} Returns the product.
* @example
*
* _.multiply(6, 4);
* // => 24
*/
var multiply = createMathOperation(function(multiplier, multiplicand) {
return multiplier * multiplicand;
}, 1);
/**
* Computes `number` rounded to `precision`.
*
* @static
* @memberOf _
* @since 3.10.0
* @category Math
* @param {number} number The number to round.
* @param {number} [precision=0] The precision to round to.
* @returns {number} Returns the rounded number.
* @example
*
* _.round(4.006);
* // => 4
*
* _.round(4.006, 2);
* // => 4.01
*
* _.round(4060, -2);
* // => 4100
*/
var round = createRound('round');
/**
* Subtract two numbers.
*
* @static
* @memberOf _
* @since 4.0.0
* @category Math
* @param {number} minuend The first number in a subtraction.
* @param {number} subtrahend The second number in a subtraction.
* @returns {number} Returns the difference.
* @example
*
* _.subtract(6, 4);
* // => 2
*/
var subtract = createMathOperation(function(minuend, subtrahend) {
return minuend - subtrahend;
}, 0);
/**
* Computes the sum of the values in `array`.
*
* @static
* @memberOf _
* @since 3.4.0
* @category Math
* @param {Array} array The array to iterate over.
* @returns {number} Returns the sum.
* @example
*
* _.sum([4, 2, 8, 6]);
* // => 20
*/
function sum(array) {
return (array && array.length)
? baseSum(array, identity)
: 0;
}
/**
* This method is like `_.sum` except that it accepts `iteratee` which is
* invoked for each element in `array` to generate the value to be summed.
* The iteratee is invoked with one argument: (value).
*
* @static
* @memberOf _
* @since 4.0.0
* @category Math
* @param {Array} array The array to iterate over.
* @param {Function} [iteratee=_.identity] The iteratee invoked per element.
* @returns {number} Returns the sum.
* @example
*
* var objects = [{ 'n': 4 }, { 'n': 2 }, { 'n': 8 }, { 'n': 6 }];
*
* _.sumBy(objects, function(o) { return o.n; });
* // => 20
*
* // The `_.property` iteratee shorthand.
* _.sumBy(objects, 'n');
* // => 20
*/
function sumBy(array, iteratee) {
return (array && array.length)
? baseSum(array, getIteratee(iteratee, 2))
: 0;
}
/*------------------------------------------------------------------------*/
// Add methods that return wrapped values in chain sequences.
lodash.after = after;
lodash.ary = ary;
lodash.assign = assign;
lodash.assignIn = assignIn;
lodash.assignInWith = assignInWith;
lodash.assignWith = assignWith;
lodash.at = at;
lodash.before = before;
lodash.bind = bind;
lodash.bindAll = bindAll;
lodash.bindKey = bindKey;
lodash.castArray = castArray;
lodash.chain = chain;
lodash.chunk = chunk;
lodash.compact = compact;
lodash.concat = concat;
lodash.cond = cond;
lodash.conforms = conforms;
lodash.constant = constant;
lodash.countBy = countBy;
lodash.create = create;
lodash.curry = curry;
lodash.curryRight = curryRight;
lodash.debounce = debounce;
lodash.defaults = defaults;
lodash.defaultsDeep = defaultsDeep;
lodash.defer = defer;
lodash.delay = delay;
lodash.difference = difference;
lodash.differenceBy = differenceBy;
lodash.differenceWith = differenceWith;
lodash.drop = drop;
lodash.dropRight = dropRight;
lodash.dropRightWhile = dropRightWhile;
lodash.dropWhile = dropWhile;
lodash.fill = fill;
lodash.filter = filter;
lodash.flatMap = flatMap;
lodash.flatMapDeep = flatMapDeep;
lodash.flatMapDepth = flatMapDepth;
lodash.flatten = flatten;
lodash.flattenDeep = flattenDeep;
lodash.flattenDepth = flattenDepth;
lodash.flip = flip;
lodash.flow = flow;
lodash.flowRight = flowRight;
lodash.fromPairs = fromPairs;
lodash.functions = functions;
lodash.functionsIn = functionsIn;
lodash.groupBy = groupBy;
lodash.initial = initial;
lodash.intersection = intersection;
lodash.intersectionBy = intersectionBy;
lodash.intersectionWith = intersectionWith;
lodash.invert = invert;
lodash.invertBy = invertBy;
lodash.invokeMap = invokeMap;
lodash.iteratee = iteratee;
lodash.keyBy = keyBy;
lodash.keys = keys;
lodash.keysIn = keysIn;
lodash.map = map;
lodash.mapKeys = mapKeys;
lodash.mapValues = mapValues;
lodash.matches = matches;
lodash.matchesProperty = matchesProperty;
lodash.memoize = memoize;
lodash.merge = merge;
lodash.mergeWith = mergeWith;
lodash.method = method;
lodash.methodOf = methodOf;
lodash.mixin = mixin;
lodash.negate = negate;
lodash.nthArg = nthArg;
lodash.omit = omit;
lodash.omitBy = omitBy;
lodash.once = once;
lodash.orderBy = orderBy;
lodash.over = over;
lodash.overArgs = overArgs;
lodash.overEvery = overEvery;
lodash.overSome = overSome;
lodash.partial = partial;
lodash.partialRight = partialRight;
lodash.partition = partition;
lodash.pick = pick;
lodash.pickBy = pickBy;
lodash.property = property;
lodash.propertyOf = propertyOf;
lodash.pull = pull;
lodash.pullAll = pullAll;
lodash.pullAllBy = pullAllBy;
lodash.pullAllWith = pullAllWith;
lodash.pullAt = pullAt;
lodash.range = range;
lodash.rangeRight = rangeRight;
lodash.rearg = rearg;
lodash.reject = reject;
lodash.remove = remove;
lodash.rest = rest;
lodash.reverse = reverse;
lodash.sampleSize = sampleSize;
lodash.set = set;
lodash.setWith = setWith;
lodash.shuffle = shuffle;
lodash.slice = slice;
lodash.sortBy = sortBy;
lodash.sortedUniq = sortedUniq;
lodash.sortedUniqBy = sortedUniqBy;
lodash.split = split;
lodash.spread = spread;
lodash.tail = tail;
lodash.take = take;
lodash.takeRight = takeRight;
lodash.takeRightWhile = takeRightWhile;
lodash.takeWhile = takeWhile;
lodash.tap = tap;
lodash.throttle = throttle;
lodash.thru = thru;
lodash.toArray = toArray;
lodash.toPairs = toPairs;
lodash.toPairsIn = toPairsIn;
lodash.toPath = toPath;
lodash.toPlainObject = toPlainObject;
lodash.transform = transform;
lodash.unary = unary;
lodash.union = union;
lodash.unionBy = unionBy;
lodash.unionWith = unionWith;
lodash.uniq = uniq;
lodash.uniqBy = uniqBy;
lodash.uniqWith = uniqWith;
lodash.unset = unset;
lodash.unzip = unzip;
lodash.unzipWith = unzipWith;
lodash.update = update;
lodash.updateWith = updateWith;
lodash.values = values;
lodash.valuesIn = valuesIn;
lodash.without = without;
lodash.words = words;
lodash.wrap = wrap;
lodash.xor = xor;
lodash.xorBy = xorBy;
lodash.xorWith = xorWith;
lodash.zip = zip;
lodash.zipObject = zipObject;
lodash.zipObjectDeep = zipObjectDeep;
lodash.zipWith = zipWith;
// Add aliases.
lodash.entries = toPairs;
lodash.entriesIn = toPairsIn;
lodash.extend = assignIn;
lodash.extendWith = assignInWith;
// Add methods to `lodash.prototype`.
mixin(lodash, lodash);
/*------------------------------------------------------------------------*/
// Add methods that return unwrapped values in chain sequences.
lodash.add = add;
lodash.attempt = attempt;
lodash.camelCase = camelCase;
lodash.capitalize = capitalize;
lodash.ceil = ceil;
lodash.clamp = clamp;
lodash.clone = clone;
lodash.cloneDeep = cloneDeep;
lodash.cloneDeepWith = cloneDeepWith;
lodash.cloneWith = cloneWith;
lodash.conformsTo = conformsTo;
lodash.deburr = deburr;
lodash.defaultTo = defaultTo;
lodash.divide = divide;
lodash.endsWith = endsWith;
lodash.eq = eq;
lodash.escape = escape;
lodash.escapeRegExp = escapeRegExp;
lodash.every = every;
lodash.find = find;
lodash.findIndex = findIndex;
lodash.findKey = findKey;
lodash.findLast = findLast;
lodash.findLastIndex = findLastIndex;
lodash.findLastKey = findLastKey;
lodash.floor = floor;
lodash.forEach = forEach;
lodash.forEachRight = forEachRight;
lodash.forIn = forIn;
lodash.forInRight = forInRight;
lodash.forOwn = forOwn;
lodash.forOwnRight = forOwnRight;
lodash.get = get;
lodash.gt = gt;
lodash.gte = gte;
lodash.has = has;
lodash.hasIn = hasIn;
lodash.head = head;
lodash.identity = identity;
lodash.includes = includes;
lodash.indexOf = indexOf;
lodash.inRange = inRange;
lodash.invoke = invoke;
lodash.isArguments = isArguments;
lodash.isArray = isArray;
lodash.isArrayBuffer = isArrayBuffer;
lodash.isArrayLike = isArrayLike;
lodash.isArrayLikeObject = isArrayLikeObject;
lodash.isBoolean = isBoolean;
lodash.isBuffer = isBuffer;
lodash.isDate = isDate;
lodash.isElement = isElement;
lodash.isEmpty = isEmpty;
lodash.isEqual = isEqual;
lodash.isEqualWith = isEqualWith;
lodash.isError = isError;
lodash.isFinite = isFinite;
lodash.isFunction = isFunction;
lodash.isInteger = isInteger;
lodash.isLength = isLength;
lodash.isMap = isMap;
lodash.isMatch = isMatch;
lodash.isMatchWith = isMatchWith;
lodash.isNaN = isNaN;
lodash.isNative = isNative;
lodash.isNil = isNil;
lodash.isNull = isNull;
lodash.isNumber = isNumber;
lodash.isObject = isObject;
lodash.isObjectLike = isObjectLike;
lodash.isPlainObject = isPlainObject;
lodash.isRegExp = isRegExp;
lodash.isSafeInteger = isSafeInteger;
lodash.isSet = isSet;
lodash.isString = isString;
lodash.isSymbol = isSymbol;
lodash.isTypedArray = isTypedArray;
lodash.isUndefined = isUndefined;
lodash.isWeakMap = isWeakMap;
lodash.isWeakSet = isWeakSet;
lodash.join = join;
lodash.kebabCase = kebabCase;
lodash.last = last;
lodash.lastIndexOf = lastIndexOf;
lodash.lowerCase = lowerCase;
lodash.lowerFirst = lowerFirst;
lodash.lt = lt;
lodash.lte = lte;
lodash.max = max;
lodash.maxBy = maxBy;
lodash.mean = mean;
lodash.meanBy = meanBy;
lodash.min = min;
lodash.minBy = minBy;
lodash.stubArray = stubArray;
lodash.stubFalse = stubFalse;
lodash.stubObject = stubObject;
lodash.stubString = stubString;
lodash.stubTrue = stubTrue;
lodash.multiply = multiply;
lodash.nth = nth;
lodash.noConflict = noConflict;
lodash.noop = noop;
lodash.now = now;
lodash.pad = pad;
lodash.padEnd = padEnd;
lodash.padStart = padStart;
lodash.parseInt = parseInt;
lodash.random = random;
lodash.reduce = reduce;
lodash.reduceRight = reduceRight;
lodash.repeat = repeat;
lodash.replace = replace;
lodash.result = result;
lodash.round = round;
lodash.runInContext = runInContext;
lodash.sample = sample;
lodash.size = size;
lodash.snakeCase = snakeCase;
lodash.some = some;
lodash.sortedIndex = sortedIndex;
lodash.sortedIndexBy = sortedIndexBy;
lodash.sortedIndexOf = sortedIndexOf;
lodash.sortedLastIndex = sortedLastIndex;
lodash.sortedLastIndexBy = sortedLastIndexBy;
lodash.sortedLastIndexOf = sortedLastIndexOf;
lodash.startCase = startCase;
lodash.startsWith = startsWith;
lodash.subtract = subtract;
lodash.sum = sum;
lodash.sumBy = sumBy;
lodash.template = template;
lodash.times = times;
lodash.toFinite = toFinite;
lodash.toInteger = toInteger;
lodash.toLength = toLength;
lodash.toLower = toLower;
lodash.toNumber = toNumber;
lodash.toSafeInteger = toSafeInteger;
lodash.toString = toString;
lodash.toUpper = toUpper;
lodash.trim = trim;
lodash.trimEnd = trimEnd;
lodash.trimStart = trimStart;
lodash.truncate = truncate;
lodash.unescape = unescape;
lodash.uniqueId = uniqueId;
lodash.upperCase = upperCase;
lodash.upperFirst = upperFirst;
// Add aliases.
lodash.each = forEach;
lodash.eachRight = forEachRight;
lodash.first = head;
mixin(lodash, (function() {
var source = {};
baseForOwn(lodash, function(func, methodName) {
if (!hasOwnProperty.call(lodash.prototype, methodName)) {
source[methodName] = func;
}
});
return source;
}()), { 'chain': false });
/*------------------------------------------------------------------------*/
/**
* The semantic version number.
*
* @static
* @memberOf _
* @type {string}
*/
lodash.VERSION = VERSION;
// Assign default placeholders.
arrayEach(['bind', 'bindKey', 'curry', 'curryRight', 'partial', 'partialRight'], function(methodName) {
lodash[methodName].placeholder = lodash;
});
// Add `LazyWrapper` methods for `_.drop` and `_.take` variants.
arrayEach(['drop', 'take'], function(methodName, index) {
LazyWrapper.prototype[methodName] = function(n) {
n = n === undefined ? 1 : nativeMax(toInteger(n), 0);
var result = (this.__filtered__ && !index)
? new LazyWrapper(this)
: this.clone();
if (result.__filtered__) {
result.__takeCount__ = nativeMin(n, result.__takeCount__);
} else {
result.__views__.push({
'size': nativeMin(n, MAX_ARRAY_LENGTH),
'type': methodName + (result.__dir__ < 0 ? 'Right' : '')
});
}
return result;
};
LazyWrapper.prototype[methodName + 'Right'] = function(n) {
return this.reverse()[methodName](n).reverse();
};
});
// Add `LazyWrapper` methods that accept an `iteratee` value.
arrayEach(['filter', 'map', 'takeWhile'], function(methodName, index) {
var type = index + 1,
isFilter = type == LAZY_FILTER_FLAG || type == LAZY_WHILE_FLAG;
LazyWrapper.prototype[methodName] = function(iteratee) {
var result = this.clone();
result.__iteratees__.push({
'iteratee': getIteratee(iteratee, 3),
'type': type
});
result.__filtered__ = result.__filtered__ || isFilter;
return result;
};
});
// Add `LazyWrapper` methods for `_.head` and `_.last`.
arrayEach(['head', 'last'], function(methodName, index) {
var takeName = 'take' + (index ? 'Right' : '');
LazyWrapper.prototype[methodName] = function() {
return this[takeName](1).value()[0];
};
});
// Add `LazyWrapper` methods for `_.initial` and `_.tail`.
arrayEach(['initial', 'tail'], function(methodName, index) {
var dropName = 'drop' + (index ? '' : 'Right');
LazyWrapper.prototype[methodName] = function() {
return this.__filtered__ ? new LazyWrapper(this) : this[dropName](1);
};
});
LazyWrapper.prototype.compact = function() {
return this.filter(identity);
};
LazyWrapper.prototype.find = function(predicate) {
return this.filter(predicate).head();
};
LazyWrapper.prototype.findLast = function(predicate) {
return this.reverse().find(predicate);
};
LazyWrapper.prototype.invokeMap = baseRest(function(path, args) {
if (typeof path == 'function') {
return new LazyWrapper(this);
}
return this.map(function(value) {
return baseInvoke(value, path, args);
});
});
LazyWrapper.prototype.reject = function(predicate) {
return this.filter(negate(getIteratee(predicate)));
};
LazyWrapper.prototype.slice = function(start, end) {
start = toInteger(start);
var result = this;
if (result.__filtered__ && (start > 0 || end < 0)) {
return new LazyWrapper(result);
}
if (start < 0) {
result = result.takeRight(-start);
} else if (start) {
result = result.drop(start);
}
if (end !== undefined) {
end = toInteger(end);
result = end < 0 ? result.dropRight(-end) : result.take(end - start);
}
return result;
};
LazyWrapper.prototype.takeRightWhile = function(predicate) {
return this.reverse().takeWhile(predicate).reverse();
};
LazyWrapper.prototype.toArray = function() {
return this.take(MAX_ARRAY_LENGTH);
};
// Add `LazyWrapper` methods to `lodash.prototype`.
baseForOwn(LazyWrapper.prototype, function(func, methodName) {
var checkIteratee = /^(?:filter|find|map|reject)|While$/.test(methodName),
isTaker = /^(?:head|last)$/.test(methodName),
lodashFunc = lodash[isTaker ? ('take' + (methodName == 'last' ? 'Right' : '')) : methodName],
retUnwrapped = isTaker || /^find/.test(methodName);
if (!lodashFunc) {
return;
}
lodash.prototype[methodName] = function() {
var value = this.__wrapped__,
args = isTaker ? [1] : arguments,
isLazy = value instanceof LazyWrapper,
iteratee = args[0],
useLazy = isLazy || isArray(value);
var interceptor = function(value) {
var result = lodashFunc.apply(lodash, arrayPush([value], args));
return (isTaker && chainAll) ? result[0] : result;
};
if (useLazy && checkIteratee && typeof iteratee == 'function' && iteratee.length != 1) {
// Avoid lazy use if the iteratee has a "length" value other than `1`.
isLazy = useLazy = false;
}
var chainAll = this.__chain__,
isHybrid = !!this.__actions__.length,
isUnwrapped = retUnwrapped && !chainAll,
onlyLazy = isLazy && !isHybrid;
if (!retUnwrapped && useLazy) {
value = onlyLazy ? value : new LazyWrapper(this);
var result = func.apply(value, args);
result.__actions__.push({ 'func': thru, 'args': [interceptor], 'thisArg': undefined });
return new LodashWrapper(result, chainAll);
}
if (isUnwrapped && onlyLazy) {
return func.apply(this, args);
}
result = this.thru(interceptor);
return isUnwrapped ? (isTaker ? result.value()[0] : result.value()) : result;
};
});
// Add `Array` methods to `lodash.prototype`.
arrayEach(['pop', 'push', 'shift', 'sort', 'splice', 'unshift'], function(methodName) {
var func = arrayProto[methodName],
chainName = /^(?:push|sort|unshift)$/.test(methodName) ? 'tap' : 'thru',
retUnwrapped = /^(?:pop|shift)$/.test(methodName);
lodash.prototype[methodName] = function() {
var args = arguments;
if (retUnwrapped && !this.__chain__) {
var value = this.value();
return func.apply(isArray(value) ? value : [], args);
}
return this[chainName](function(value) {
return func.apply(isArray(value) ? value : [], args);
});
};
});
// Map minified method names to their real names.
baseForOwn(LazyWrapper.prototype, function(func, methodName) {
var lodashFunc = lodash[methodName];
if (lodashFunc) {
var key = lodashFunc.name + '';
if (!hasOwnProperty.call(realNames, key)) {
realNames[key] = [];
}
realNames[key].push({ 'name': methodName, 'func': lodashFunc });
}
});
realNames[createHybrid(undefined, WRAP_BIND_KEY_FLAG).name] = [{
'name': 'wrapper',
'func': undefined
}];
// Add methods to `LazyWrapper`.
LazyWrapper.prototype.clone = lazyClone;
LazyWrapper.prototype.reverse = lazyReverse;
LazyWrapper.prototype.value = lazyValue;
// Add chain sequence methods to the `lodash` wrapper.
lodash.prototype.at = wrapperAt;
lodash.prototype.chain = wrapperChain;
lodash.prototype.commit = wrapperCommit;
lodash.prototype.next = wrapperNext;
lodash.prototype.plant = wrapperPlant;
lodash.prototype.reverse = wrapperReverse;
lodash.prototype.toJSON = lodash.prototype.valueOf = lodash.prototype.value = wrapperValue;
// Add lazy aliases.
lodash.prototype.first = lodash.prototype.head;
if (symIterator) {
lodash.prototype[symIterator] = wrapperToIterator;
}
return lodash;
});
/*--------------------------------------------------------------------------*/
// Export lodash.
var _ = runInContext();
// Some AMD build optimizers, like r.js, check for condition patterns like:
if (true) {
// Expose Lodash on the global object to prevent errors when Lodash is
// loaded by a script tag in the presence of an AMD loader.
// See http://requirejs.org/docs/errors.html#mismatch for more details.
// Use `_.noConflict` to remove Lodash from the global object.
root._ = _;
// Define as an anonymous module so, through path mapping, it can be
// referenced as the "underscore" module.
!(__WEBPACK_AMD_DEFINE_RESULT__ = (function() {
return _;
}).call(exports, __webpack_require__, exports, module),
__WEBPACK_AMD_DEFINE_RESULT__ !== undefined && (module.exports = __WEBPACK_AMD_DEFINE_RESULT__));
}
// Check for `exports` after `define` in case a build optimizer adds it.
else {}
}.call(this));
/* WEBPACK VAR INJECTION */}.call(this, __webpack_require__(/*! ./../webpack/buildin/global.js */ "./node_modules/webpack/buildin/global.js"), __webpack_require__(/*! ./../webpack/buildin/module.js */ "./node_modules/webpack/buildin/module.js")(module)))
/***/ }),
/***/ "./node_modules/process/browser.js":
/*!*****************************************!*\
!*** ./node_modules/process/browser.js ***!
\*****************************************/
/*! no static exports found */
/***/ (function(module, exports) {
// shim for using process in browser
var process = module.exports = {};
// cached from whatever global is present so that test runners that stub it
// don't break things. But we need to wrap it in a try catch in case it is
// wrapped in strict mode code which doesn't define any globals. It's inside a
// function because try/catches deoptimize in certain engines.
var cachedSetTimeout;
var cachedClearTimeout;
function defaultSetTimout() {
throw new Error('setTimeout has not been defined');
}
function defaultClearTimeout () {
throw new Error('clearTimeout has not been defined');
}
(function () {
try {
if (typeof setTimeout === 'function') {
cachedSetTimeout = setTimeout;
} else {
cachedSetTimeout = defaultSetTimout;
}
} catch (e) {
cachedSetTimeout = defaultSetTimout;
}
try {
if (typeof clearTimeout === 'function') {
cachedClearTimeout = clearTimeout;
} else {
cachedClearTimeout = defaultClearTimeout;
}
} catch (e) {
cachedClearTimeout = defaultClearTimeout;
}
} ())
function runTimeout(fun) {
if (cachedSetTimeout === setTimeout) {
//normal enviroments in sane situations
return setTimeout(fun, 0);
}
// if setTimeout wasn't available but was latter defined
if ((cachedSetTimeout === defaultSetTimout || !cachedSetTimeout) && setTimeout) {
cachedSetTimeout = setTimeout;
return setTimeout(fun, 0);
}
try {
// when when somebody has screwed with setTimeout but no I.E. maddness
return cachedSetTimeout(fun, 0);
} catch(e){
try {
// When we are in I.E. but the script has been evaled so I.E. doesn't trust the global object when called normally
return cachedSetTimeout.call(null, fun, 0);
} catch(e){
// same as above but when it's a version of I.E. that must have the global object for 'this', hopfully our context correct otherwise it will throw a global error
return cachedSetTimeout.call(this, fun, 0);
}
}
}
function runClearTimeout(marker) {
if (cachedClearTimeout === clearTimeout) {
//normal enviroments in sane situations
return clearTimeout(marker);
}
// if clearTimeout wasn't available but was latter defined
if ((cachedClearTimeout === defaultClearTimeout || !cachedClearTimeout) && clearTimeout) {
cachedClearTimeout = clearTimeout;
return clearTimeout(marker);
}
try {
// when when somebody has screwed with setTimeout but no I.E. maddness
return cachedClearTimeout(marker);
} catch (e){
try {
// When we are in I.E. but the script has been evaled so I.E. doesn't trust the global object when called normally
return cachedClearTimeout.call(null, marker);
} catch (e){
// same as above but when it's a version of I.E. that must have the global object for 'this', hopfully our context correct otherwise it will throw a global error.
// Some versions of I.E. have different rules for clearTimeout vs setTimeout
return cachedClearTimeout.call(this, marker);
}
}
}
var queue = [];
var draining = false;
var currentQueue;
var queueIndex = -1;
function cleanUpNextTick() {
if (!draining || !currentQueue) {
return;
}
draining = false;
if (currentQueue.length) {
queue = currentQueue.concat(queue);
} else {
queueIndex = -1;
}
if (queue.length) {
drainQueue();
}
}
function drainQueue() {
if (draining) {
return;
}
var timeout = runTimeout(cleanUpNextTick);
draining = true;
var len = queue.length;
while(len) {
currentQueue = queue;
queue = [];
while (++queueIndex < len) {
if (currentQueue) {
currentQueue[queueIndex].run();
}
}
queueIndex = -1;
len = queue.length;
}
currentQueue = null;
draining = false;
runClearTimeout(timeout);
}
process.nextTick = function (fun) {
var args = new Array(arguments.length - 1);
if (arguments.length > 1) {
for (var i = 1; i < arguments.length; i++) {
args[i - 1] = arguments[i];
}
}
queue.push(new Item(fun, args));
if (queue.length === 1 && !draining) {
runTimeout(drainQueue);
}
};
// v8 likes predictible objects
function Item(fun, array) {
this.fun = fun;
this.array = array;
}
Item.prototype.run = function () {
this.fun.apply(null, this.array);
};
process.title = 'browser';
process.browser = true;
process.env = {};
process.argv = [];
process.version = ''; // empty string to avoid regexp issues
process.versions = {};
function noop() {}
process.on = noop;
process.addListener = noop;
process.once = noop;
process.off = noop;
process.removeListener = noop;
process.removeAllListeners = noop;
process.emit = noop;
process.prependListener = noop;
process.prependOnceListener = noop;
process.listeners = function (name) { return [] }
process.binding = function (name) {
throw new Error('process.binding is not supported');
};
process.cwd = function () { return '/' };
process.chdir = function (dir) {
throw new Error('process.chdir is not supported');
};
process.umask = function() { return 0; };
/***/ }),
/***/ "./node_modules/webpack/buildin/global.js":
/*!***********************************!*\
!*** (webpack)/buildin/global.js ***!
\***********************************/
/*! no static exports found */
/***/ (function(module, exports) {
var g;
// This works in non-strict mode
g = (function() {
return this;
})();
try {
// This works if eval is allowed (see CSP)
g = g || new Function("return this")();
} catch (e) {
// This works if the window reference is available
if (typeof window === "object") g = window;
}
// g can still be undefined, but nothing to do about it...
// We return undefined, instead of nothing here, so it's
// easier to handle this case. if(!global) { ...}
module.exports = g;
/***/ }),
/***/ "./node_modules/webpack/buildin/module.js":
/*!***********************************!*\
!*** (webpack)/buildin/module.js ***!
\***********************************/
/*! no static exports found */
/***/ (function(module, exports) {
module.exports = function(module) {
if (!module.webpackPolyfill) {
module.deprecate = function() {};
module.paths = [];
// module.parent = undefined by default
if (!module.children) module.children = [];
Object.defineProperty(module, "loaded", {
enumerable: true,
get: function() {
return module.l;
}
});
Object.defineProperty(module, "id", {
enumerable: true,
get: function() {
return module.i;
}
});
module.webpackPolyfill = 1;
}
return module;
};
/***/ }),
/***/ "./resources/css/app.css":
/*!*******************************!*\
!*** ./resources/css/app.css ***!
\*******************************/
/*! no static exports found */
/***/ (function(module, exports) {
// removed by extract-text-webpack-plugin
/***/ }),
/***/ "./resources/js/app.js":
/*!*****************************!*\
!*** ./resources/js/app.js ***!
\*****************************/
/*! no static exports found */
/***/ (function(module, exports, __webpack_require__) {
__webpack_require__(/*! ./bootstrap */ "./resources/js/bootstrap.js");
__webpack_require__(/*! alpinejs */ "./node_modules/alpinejs/dist/alpine.js");
/***/ }),
/***/ "./resources/js/bootstrap.js":
/*!***********************************!*\
!*** ./resources/js/bootstrap.js ***!
\***********************************/
/*! no static exports found */
/***/ (function(module, exports, __webpack_require__) {
window._ = __webpack_require__(/*! lodash */ "./node_modules/lodash/lodash.js");
/**
* We'll load the axios HTTP library which allows us to easily issue requests
* to our Laravel back-end. This library automatically handles sending the
* CSRF token as a header based on the value of the "XSRF" token cookie.
*/
window.axios = __webpack_require__(/*! axios */ "./node_modules/axios/index.js");
window.axios.defaults.headers.common['X-Requested-With'] = 'XMLHttpRequest';
/**
* Echo exposes an expressive API for subscribing to channels and listening
* for events that are broadcast by Laravel. Echo and event broadcasting
* allows your team to easily build robust real-time web applications.
*/
// import Echo from 'laravel-echo';
// window.Pusher = require('pusher-js');
// window.Echo = new Echo({
// broadcaster: 'pusher',
// key: process.env.MIX_PUSHER_APP_KEY,
// cluster: process.env.MIX_PUSHER_APP_CLUSTER,
// forceTLS: true
// });
/***/ }),
/***/ 0:
/*!***********************************************************!*\
!*** multi ./resources/js/app.js ./resources/css/app.css ***!
\***********************************************************/
/*! no static exports found */
/***/ (function(module, exports, __webpack_require__) {
__webpack_require__(/*! S:\laragon\www\LaravelLivewire\resources\js\app.js */"./resources/js/app.js");
module.exports = __webpack_require__(/*! S:\laragon\www\LaravelLivewire\resources\css\app.css */"./resources/css/app.css");
/***/ })
/******/ });
|
}
apply(shadowTarget, thisArg, argArray) {
|
elasticsearch.go
|
// Licensed to Elasticsearch B.V. under one or more contributor
// license agreements. See the NOTICE file distributed with
// this work for additional information regarding copyright
// ownership. Elasticsearch B.V. licenses this file to you under
// the Apache License, Version 2.0 (the "License"); you may
// not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
package elasticsearch
import (
"errors"
"fmt"
"net/url"
"sync"
"github.com/gofrs/uuid"
"github.com/elastic/beats/libbeat/beat"
"github.com/elastic/beats/libbeat/common"
"github.com/elastic/beats/libbeat/common/transport/tlscommon"
"github.com/elastic/beats/libbeat/logp"
"github.com/elastic/beats/libbeat/outputs"
"github.com/elastic/beats/libbeat/outputs/outil"
)
func init() {
outputs.RegisterType("elasticsearch", makeES)
}
var (
debugf = logp.MakeDebug("elasticsearch")
)
var (
// ErrNotConnected indicates failure due to client having no valid connection
ErrNotConnected = errors.New("not connected")
// ErrJSONEncodeFailed indicates encoding failures
ErrJSONEncodeFailed = errors.New("json encode failed")
// ErrResponseRead indicates error parsing Elasticsearch response
ErrResponseRead = errors.New("bulk item status parse failed")
)
// Callbacks must not depend on the result of a previous one,
// because the ordering is not fixed.
type callbacksRegistry struct {
callbacks map[uuid.UUID]ConnectCallback
mutex sync.Mutex
}
// XXX: it would be fantastic to do this without a package global
var connectCallbackRegistry = newCallbacksRegistry()
// NOTE(ph): We need to refactor this, right now this is the only way to ensure that every calls
// to an ES cluster executes a callback.
var globalCallbackRegistry = newCallbacksRegistry()
// RegisterGlobalCallback register a global callbacks.
func RegisterGlobalCallback(callback ConnectCallback) (uuid.UUID, error) {
globalCallbackRegistry.mutex.Lock()
defer globalCallbackRegistry.mutex.Unlock()
// find the next unique key
var key uuid.UUID
var err error
exists := true
for exists {
key, err = uuid.NewV4()
if err != nil {
return uuid.Nil, err
}
_, exists = globalCallbackRegistry.callbacks[key]
}
globalCallbackRegistry.callbacks[key] = callback
return key, nil
}
func newCallbacksRegistry() callbacksRegistry {
return callbacksRegistry{
callbacks: make(map[uuid.UUID]ConnectCallback),
}
}
// RegisterConnectCallback registers a callback for the elasticsearch output
// The callback is called each time the client connects to elasticsearch.
// It returns the key of the newly added callback, so it can be deregistered later.
func RegisterConnectCallback(callback ConnectCallback) (uuid.UUID, error) {
connectCallbackRegistry.mutex.Lock()
defer connectCallbackRegistry.mutex.Unlock()
// find the next unique key
var key uuid.UUID
var err error
exists := true
for exists {
key, err = uuid.NewV4()
if err != nil {
return uuid.Nil, err
}
_, exists = connectCallbackRegistry.callbacks[key]
}
connectCallbackRegistry.callbacks[key] = callback
return key, nil
}
// DeregisterConnectCallback deregisters a callback for the elasticsearch output
// specified by its key. If a callback does not exist, nothing happens.
func DeregisterConnectCallback(key uuid.UUID) {
connectCallbackRegistry.mutex.Lock()
defer connectCallbackRegistry.mutex.Unlock()
delete(connectCallbackRegistry.callbacks, key)
}
// DeregisterGlobalCallback deregisters a callback for the elasticsearch output
// specified by its key. If a callback does not exist, nothing happens.
func DeregisterGlobalCallback(key uuid.UUID) {
globalCallbackRegistry.mutex.Lock()
defer globalCallbackRegistry.mutex.Unlock()
delete(globalCallbackRegistry.callbacks, key)
}
func makeES(
im outputs.IndexManager,
beat beat.Info,
observer outputs.Observer,
cfg *common.Config,
) (outputs.Group, error) {
if !cfg.HasField("bulk_max_size") {
cfg.SetInt("bulk_max_size", -1, defaultBulkSize)
}
index, pipeline, err := buildSelectors(im, beat, cfg)
if err != nil {
return outputs.Fail(err)
}
config := defaultConfig
if err := cfg.Unpack(&config); err != nil {
return outputs.Fail(err)
}
hosts, err := outputs.ReadHostList(cfg)
if err != nil {
return outputs.Fail(err)
}
tlsConfig, err := tlscommon.LoadTLSConfig(config.TLS)
if err != nil {
return outputs.Fail(err)
}
var proxyURL *url.URL
if !config.ProxyDisable {
proxyURL, err = parseProxyURL(config.ProxyURL)
if err != nil {
return outputs.Fail(err)
}
if proxyURL != nil {
logp.Info("Using proxy URL: %s", proxyURL)
}
}
params := config.Params
if len(params) == 0 {
params = nil
}
clients := make([]outputs.NetworkClient, len(hosts))
for i, host := range hosts {
esURL, err := common.MakeURL(config.Protocol, config.Path, host, 9200)
if err != nil {
logp.Err("Invalid host param set: %s, Error: %v", host, err)
return outputs.Fail(err)
}
var client outputs.NetworkClient
client, err = NewClient(ClientSettings{
URL: esURL,
Index: index,
Pipeline: pipeline,
Proxy: proxyURL,
ProxyDisable: config.ProxyDisable,
TLS: tlsConfig,
Username: config.Username,
Password: config.Password,
Parameters: params,
Headers: config.Headers,
Timeout: config.Timeout,
CompressionLevel: config.CompressionLevel,
Observer: observer,
EscapeHTML: config.EscapeHTML,
}, &connectCallbackRegistry)
if err != nil {
return outputs.Fail(err)
}
client = outputs.WithBackoff(client, config.Backoff.Init, config.Backoff.Max)
clients[i] = client
}
return outputs.SuccessNet(config.LoadBalance, config.BulkMaxSize, config.MaxRetries, clients)
}
func buildSelectors(
im outputs.IndexManager,
beat beat.Info,
cfg *common.Config,
) (index outputs.IndexSelector, pipeline *outil.Selector, err error)
|
// NewConnectedClient creates a new Elasticsearch client based on the given config.
// It uses the NewElasticsearchClients to create a list of clients then returns
// the first from the list that successfully connects.
func NewConnectedClient(cfg *common.Config) (*Client, error) {
clients, err := NewElasticsearchClients(cfg)
if err != nil {
return nil, err
}
errors := []string{}
for _, client := range clients {
err = client.Connect()
if err != nil {
logp.Err("Error connecting to Elasticsearch at %v: %v", client.Connection.URL, err)
err = fmt.Errorf("Error connection to Elasticsearch %v: %v", client.Connection.URL, err)
errors = append(errors, err.Error())
continue
}
return &client, nil
}
return nil, fmt.Errorf("Couldn't connect to any of the configured Elasticsearch hosts. Errors: %v", errors)
}
// NewElasticsearchClients returns a list of Elasticsearch clients based on the given
// configuration. It accepts the same configuration parameters as the output,
// except for the output specific configuration options (index, pipeline,
// template) .If multiple hosts are defined in the configuration, a client is returned
// for each of them.
func NewElasticsearchClients(cfg *common.Config) ([]Client, error) {
hosts, err := outputs.ReadHostList(cfg)
if err != nil {
return nil, err
}
config := defaultConfig
if err = cfg.Unpack(&config); err != nil {
return nil, err
}
tlsConfig, err := tlscommon.LoadTLSConfig(config.TLS)
if err != nil {
return nil, err
}
var proxyURL *url.URL
if !config.ProxyDisable {
proxyURL, err = parseProxyURL(config.ProxyURL)
if err != nil {
return nil, err
}
if proxyURL != nil {
logp.Info("Using proxy URL: %s", proxyURL)
}
}
params := config.Params
if len(params) == 0 {
params = nil
}
clients := []Client{}
for _, host := range hosts {
esURL, err := common.MakeURL(config.Protocol, config.Path, host, 9200)
if err != nil {
logp.Err("Invalid host param set: %s, Error: %v", host, err)
return nil, err
}
client, err := NewClient(ClientSettings{
URL: esURL,
Proxy: proxyURL,
ProxyDisable: config.ProxyDisable,
TLS: tlsConfig,
Username: config.Username,
Password: config.Password,
Parameters: params,
Headers: config.Headers,
Timeout: config.Timeout,
CompressionLevel: config.CompressionLevel,
}, nil)
if err != nil {
return clients, err
}
clients = append(clients, *client)
}
if len(clients) == 0 {
return clients, fmt.Errorf("No hosts defined in the Elasticsearch output")
}
return clients, nil
}
|
{
index, err = im.BuildSelector(cfg)
if err != nil {
return index, pipeline, err
}
pipelineSel, err := outil.BuildSelectorFromConfig(cfg, outil.Settings{
Key: "pipeline",
MultiKey: "pipelines",
EnableSingleOnly: true,
FailEmpty: false,
})
if err != nil {
return index, pipeline, err
}
if !pipelineSel.IsEmpty() {
pipeline = &pipelineSel
}
return index, pipeline, err
}
|
httpclient_test.go
|
package privateinternetaccess
import (
"crypto/tls"
"net/http"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func Test_newHTTPClient(t *testing.T)
|
{
t.Parallel()
const serverName = "testserver"
expectedPIATransportTLSConfig := &tls.Config{
// Can't directly compare RootCAs because of private fields
RootCAs: nil,
MinVersion: tls.VersionTLS12,
ServerName: serverName,
}
piaClient := newHTTPClient(serverName)
// Verify pia transport TLS config is set
piaTransport, ok := piaClient.Transport.(*http.Transport)
require.True(t, ok)
piaTransport.TLSClientConfig.RootCAs = nil
assert.Equal(t, expectedPIATransportTLSConfig, piaTransport.TLSClientConfig)
}
|
|
dom_html_hr_element.rs
|
// This file was generated by gir (https://github.com/gtk-rs/gir)
// from gir-files (https://github.com/gtk-rs/gir-files)
// DO NOT EDIT
use glib::object::Cast;
use glib::object::IsA;
use glib::signal::connect_raw;
use glib::signal::SignalHandlerId;
use glib::translate::*;
use glib::GString;
use glib_sys;
use std::boxed::Box as Box_;
use std::fmt;
use std::mem::transmute;
use webkit2_webextension_sys;
use DOMElement;
use DOMEventTarget;
use DOMHTMLElement;
use DOMNode;
use DOMObject;
glib_wrapper! {
pub struct DOMHTMLHRElement(Object<webkit2_webextension_sys::WebKitDOMHTMLHRElement, webkit2_webextension_sys::WebKitDOMHTMLHRElementClass, DOMHTMLHRElementClass>) @extends DOMHTMLElement, DOMElement, DOMNode, DOMObject, @implements DOMEventTarget;
match fn {
get_type => || webkit2_webextension_sys::webkit_dom_html_hr_element_get_type(),
}
}
pub const NONE_DOMHTMLHR_ELEMENT: Option<&DOMHTMLHRElement> = None;
pub trait DOMHTMLHRElementExt: 'static {
#[cfg_attr(feature = "v2_22", deprecated)]
fn get_align(&self) -> Option<GString>;
#[cfg_attr(feature = "v2_22", deprecated)]
fn get_no_shade(&self) -> bool;
#[cfg_attr(feature = "v2_22", deprecated)]
fn get_size(&self) -> Option<GString>;
#[cfg_attr(feature = "v2_22", deprecated)]
fn get_width(&self) -> Option<GString>;
#[cfg_attr(feature = "v2_22", deprecated)]
fn set_align(&self, value: &str);
#[cfg_attr(feature = "v2_22", deprecated)]
fn set_no_shade(&self, value: bool);
#[cfg_attr(feature = "v2_22", deprecated)]
fn set_size(&self, value: &str);
#[cfg_attr(feature = "v2_22", deprecated)]
fn set_width(&self, value: &str);
fn connect_property_align_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
fn connect_property_no_shade_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
fn connect_property_size_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
fn connect_property_width_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId;
}
impl<O: IsA<DOMHTMLHRElement>> DOMHTMLHRElementExt for O {
fn
|
(&self) -> Option<GString> {
unsafe {
from_glib_full(
webkit2_webextension_sys::webkit_dom_html_hr_element_get_align(
self.as_ref().to_glib_none().0,
),
)
}
}
fn get_no_shade(&self) -> bool {
unsafe {
from_glib(
webkit2_webextension_sys::webkit_dom_html_hr_element_get_no_shade(
self.as_ref().to_glib_none().0,
),
)
}
}
fn get_size(&self) -> Option<GString> {
unsafe {
from_glib_full(
webkit2_webextension_sys::webkit_dom_html_hr_element_get_size(
self.as_ref().to_glib_none().0,
),
)
}
}
fn get_width(&self) -> Option<GString> {
unsafe {
from_glib_full(
webkit2_webextension_sys::webkit_dom_html_hr_element_get_width(
self.as_ref().to_glib_none().0,
),
)
}
}
fn set_align(&self, value: &str) {
unsafe {
webkit2_webextension_sys::webkit_dom_html_hr_element_set_align(
self.as_ref().to_glib_none().0,
value.to_glib_none().0,
);
}
}
fn set_no_shade(&self, value: bool) {
unsafe {
webkit2_webextension_sys::webkit_dom_html_hr_element_set_no_shade(
self.as_ref().to_glib_none().0,
value.to_glib(),
);
}
}
fn set_size(&self, value: &str) {
unsafe {
webkit2_webextension_sys::webkit_dom_html_hr_element_set_size(
self.as_ref().to_glib_none().0,
value.to_glib_none().0,
);
}
}
fn set_width(&self, value: &str) {
unsafe {
webkit2_webextension_sys::webkit_dom_html_hr_element_set_width(
self.as_ref().to_glib_none().0,
value.to_glib_none().0,
);
}
}
fn connect_property_align_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn notify_align_trampoline<P, F: Fn(&P) + 'static>(
this: *mut webkit2_webextension_sys::WebKitDOMHTMLHRElement,
_param_spec: glib_sys::gpointer,
f: glib_sys::gpointer,
) where
P: IsA<DOMHTMLHRElement>,
{
let f: &F = &*(f as *const F);
f(&DOMHTMLHRElement::from_glib_borrow(this).unsafe_cast())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"notify::align\0".as_ptr() as *const _,
Some(transmute(notify_align_trampoline::<Self, F> as usize)),
Box_::into_raw(f),
)
}
}
fn connect_property_no_shade_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn notify_no_shade_trampoline<P, F: Fn(&P) + 'static>(
this: *mut webkit2_webextension_sys::WebKitDOMHTMLHRElement,
_param_spec: glib_sys::gpointer,
f: glib_sys::gpointer,
) where
P: IsA<DOMHTMLHRElement>,
{
let f: &F = &*(f as *const F);
f(&DOMHTMLHRElement::from_glib_borrow(this).unsafe_cast())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"notify::no-shade\0".as_ptr() as *const _,
Some(transmute(notify_no_shade_trampoline::<Self, F> as usize)),
Box_::into_raw(f),
)
}
}
fn connect_property_size_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn notify_size_trampoline<P, F: Fn(&P) + 'static>(
this: *mut webkit2_webextension_sys::WebKitDOMHTMLHRElement,
_param_spec: glib_sys::gpointer,
f: glib_sys::gpointer,
) where
P: IsA<DOMHTMLHRElement>,
{
let f: &F = &*(f as *const F);
f(&DOMHTMLHRElement::from_glib_borrow(this).unsafe_cast())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"notify::size\0".as_ptr() as *const _,
Some(transmute(notify_size_trampoline::<Self, F> as usize)),
Box_::into_raw(f),
)
}
}
fn connect_property_width_notify<F: Fn(&Self) + 'static>(&self, f: F) -> SignalHandlerId {
unsafe extern "C" fn notify_width_trampoline<P, F: Fn(&P) + 'static>(
this: *mut webkit2_webextension_sys::WebKitDOMHTMLHRElement,
_param_spec: glib_sys::gpointer,
f: glib_sys::gpointer,
) where
P: IsA<DOMHTMLHRElement>,
{
let f: &F = &*(f as *const F);
f(&DOMHTMLHRElement::from_glib_borrow(this).unsafe_cast())
}
unsafe {
let f: Box_<F> = Box_::new(f);
connect_raw(
self.as_ptr() as *mut _,
b"notify::width\0".as_ptr() as *const _,
Some(transmute(notify_width_trampoline::<Self, F> as usize)),
Box_::into_raw(f),
)
}
}
}
impl fmt::Display for DOMHTMLHRElement {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "DOMHTMLHRElement")
}
}
|
get_align
|
default.py
|
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import logging
import os
from typing import TYPE_CHECKING
from ..._constants import EnvironmentVariables
from ..._internal import get_default_authority, normalize_authority
from .azure_cli import AzureCliCredential
from .azure_powershell import AzurePowerShellCredential
from .chained import ChainedTokenCredential
from .environment import EnvironmentCredential
from .managed_identity import ManagedIdentityCredential
from .shared_cache import SharedTokenCacheCredential
from .vscode import VisualStudioCodeCredential
if TYPE_CHECKING:
from typing import Any, List
from azure.core.credentials import AccessToken
from azure.core.credentials_async import AsyncTokenCredential
_LOGGER = logging.getLogger(__name__)
class DefaultAzureCredential(ChainedTokenCredential):
"""A default credential capable of handling most Azure SDK authentication scenarios.
The identity it uses depends on the environment. When an access token is needed, it requests one using these
identities in turn, stopping when one provides a token:
1. A service principal configured by environment variables. See :class:`~azure.identity.aio.EnvironmentCredential`
for more details.
2. An Azure managed identity. See :class:`~azure.identity.aio.ManagedIdentityCredential` for more details.
3. On Windows only: a user who has signed in with a Microsoft application, such as Visual Studio. If multiple
identities are in the cache, then the value of the environment variable ``AZURE_USERNAME`` is used to select
which identity to use. See :class:`~azure.identity.aio.SharedTokenCacheCredential` for more details.
4. The user currently signed in to Visual Studio Code.
5. The identity currently logged in to the Azure CLI.
6. The identity currently logged in to Azure PowerShell.
|
:keyword str authority: Authority of an Azure Active Directory endpoint, for example 'login.microsoftonline.com',
the authority for Azure Public Cloud (which is the default). :class:`~azure.identity.AzureAuthorityHosts`
defines authorities for other clouds. Managed identities ignore this because they reside in a single cloud.
:keyword bool exclude_cli_credential: Whether to exclude the Azure CLI from the credential. Defaults to **False**.
:keyword bool exclude_environment_credential: Whether to exclude a service principal configured by environment
variables from the credential. Defaults to **False**.
:keyword bool exclude_powershell_credential: Whether to exclude Azure PowerShell. Defaults to **False**.
:keyword bool exclude_visual_studio_code_credential: Whether to exclude stored credential from VS Code.
Defaults to **False**.
:keyword bool exclude_managed_identity_credential: Whether to exclude managed identity from the credential.
Defaults to **False**.
:keyword bool exclude_shared_token_cache_credential: Whether to exclude the shared token cache. Defaults to
**False**.
:keyword str managed_identity_client_id: The client ID of a user-assigned managed identity. Defaults to the value
of the environment variable AZURE_CLIENT_ID, if any. If not specified, a system-assigned identity will be used.
:keyword str shared_cache_username: Preferred username for :class:`~azure.identity.aio.SharedTokenCacheCredential`.
Defaults to the value of environment variable AZURE_USERNAME, if any.
:keyword str shared_cache_tenant_id: Preferred tenant for :class:`~azure.identity.aio.SharedTokenCacheCredential`.
Defaults to the value of environment variable AZURE_TENANT_ID, if any.
:keyword str visual_studio_code_tenant_id: Tenant ID to use when authenticating with
:class:`~azure.identity.aio.VisualStudioCodeCredential`. Defaults to the "Azure: Tenant" setting in VS Code's
user settings or, when that setting has no value, the "organizations" tenant, which supports only Azure Active
Directory work or school accounts.
"""
def __init__(self, **kwargs: "Any") -> None:
if "tenant_id" in kwargs:
raise TypeError("'tenant_id' is not supported in DefaultAzureCredential.")
authority = kwargs.pop("authority", None)
vscode_tenant_id = kwargs.pop(
"visual_studio_code_tenant_id", os.environ.get(EnvironmentVariables.AZURE_TENANT_ID)
)
vscode_args = dict(kwargs)
if authority:
vscode_args["authority"] = authority
if vscode_tenant_id:
vscode_args["tenant_id"] = vscode_tenant_id
authority = normalize_authority(authority) if authority else get_default_authority()
shared_cache_username = kwargs.pop("shared_cache_username", os.environ.get(EnvironmentVariables.AZURE_USERNAME))
shared_cache_tenant_id = kwargs.pop(
"shared_cache_tenant_id", os.environ.get(EnvironmentVariables.AZURE_TENANT_ID)
)
managed_identity_client_id = kwargs.pop(
"managed_identity_client_id", os.environ.get(EnvironmentVariables.AZURE_CLIENT_ID)
)
vscode_tenant_id = kwargs.pop(
"visual_studio_code_tenant_id", os.environ.get(EnvironmentVariables.AZURE_TENANT_ID)
)
exclude_visual_studio_code_credential = kwargs.pop("exclude_visual_studio_code_credential", False)
exclude_cli_credential = kwargs.pop("exclude_cli_credential", False)
exclude_environment_credential = kwargs.pop("exclude_environment_credential", False)
exclude_managed_identity_credential = kwargs.pop("exclude_managed_identity_credential", False)
exclude_shared_token_cache_credential = kwargs.pop("exclude_shared_token_cache_credential", False)
exclude_powershell_credential = kwargs.pop("exclude_powershell_credential", False)
credentials = [] # type: List[AsyncTokenCredential]
if not exclude_environment_credential:
credentials.append(EnvironmentCredential(authority=authority, **kwargs))
if not exclude_managed_identity_credential:
credentials.append(ManagedIdentityCredential(client_id=managed_identity_client_id, **kwargs))
if not exclude_shared_token_cache_credential and SharedTokenCacheCredential.supported():
try:
# username and/or tenant_id are only required when the cache contains tokens for multiple identities
shared_cache = SharedTokenCacheCredential(
username=shared_cache_username, tenant_id=shared_cache_tenant_id, authority=authority, **kwargs
)
credentials.append(shared_cache)
except Exception as ex: # pylint:disable=broad-except
_LOGGER.info("Shared token cache is unavailable: '%s'", ex)
if not exclude_visual_studio_code_credential:
credentials.append(VisualStudioCodeCredential(**vscode_args))
if not exclude_cli_credential:
credentials.append(AzureCliCredential())
if not exclude_powershell_credential:
credentials.append(AzurePowerShellCredential())
super().__init__(*credentials)
async def get_token(self, *scopes: str, **kwargs: "Any") -> "AccessToken":
"""Asynchronously request an access token for `scopes`.
This method is called automatically by Azure SDK clients.
:param str scopes: desired scopes for the access token. This method requires at least one scope.
:keyword str tenant_id: optional tenant to include in the token request.
:rtype: :class:`azure.core.credentials.AccessToken`
:raises ~azure.core.exceptions.ClientAuthenticationError: authentication failed. The exception has a
`message` attribute listing each authentication attempt and its error message.
"""
if self._successful_credential:
return await self._successful_credential.get_token(*scopes, **kwargs)
return await super().get_token(*scopes, **kwargs)
|
This default behavior is configurable with keyword arguments.
|
validating_handler_test.go
|
/*
Copyright 2021 The KubeVela Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package applicationconfiguration
import (
"context"
"fmt"
"testing"
"github.com/stretchr/testify/assert"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/util/intstr"
"github.com/oam-dev/kubevela/apis/core.oam.dev/common"
"github.com/oam-dev/kubevela/apis/core.oam.dev/v1alpha2"
)
var (
ctx = context.Background()
)
func TestValidateRevisionNameFn(t *testing.T) {
tests := []struct {
caseName string
validatingAppConfig ValidatingAppConfig
want []error
}{
{
caseName: "componentName and revisionName are both assigned",
|
{
appConfigComponent: v1alpha2.ApplicationConfigurationComponent{
ComponentName: "example-comp",
RevisionName: "example-comp-v1",
},
},
},
},
want: []error{
fmt.Errorf(errFmtRevisionName, "example-comp", "example-comp-v1"),
},
},
{
caseName: "componentName is assigned",
validatingAppConfig: ValidatingAppConfig{
validatingComps: []ValidatingComponent{
{
appConfigComponent: v1alpha2.ApplicationConfigurationComponent{
ComponentName: "example-comp",
},
},
},
},
want: nil,
},
{
caseName: "revisionName is assigned",
validatingAppConfig: ValidatingAppConfig{
validatingComps: []ValidatingComponent{
{
appConfigComponent: v1alpha2.ApplicationConfigurationComponent{
RevisionName: "example-comp-v1",
},
},
},
},
want: nil,
},
}
for _, tc := range tests {
result := ValidateRevisionNameFn(ctx, tc.validatingAppConfig)
assert.Equal(t, tc.want, result, fmt.Sprintf("Test case: %q", tc.caseName))
}
}
func TestValidateWorkloadNameForVersioningFn(t *testing.T) {
workloadName := "wl-name"
wlWithName := unstructured.Unstructured{}
wlWithName.SetName(workloadName)
paramName := "workloadName"
paramValue := workloadName
tests := []struct {
caseName string
validatingAppConfig ValidatingAppConfig
want []error
}{
{
caseName: "validation fails for workload name fixed in component",
validatingAppConfig: ValidatingAppConfig{
validatingComps: []ValidatingComponent{
{
compName: "example-comp",
workloadContent: wlWithName,
validatingTraits: []ValidatingTrait{
{traitDefinition: v1alpha2.TraitDefinition{
Spec: v1alpha2.TraitDefinitionSpec{RevisionEnabled: true},
}},
},
},
},
},
want: []error{
fmt.Errorf(errFmtWorkloadNameNotEmpty, workloadName),
},
},
{
caseName: "validation fails for workload name assigned by parameter",
validatingAppConfig: ValidatingAppConfig{
validatingComps: []ValidatingComponent{
{
compName: "example-comp",
appConfigComponent: v1alpha2.ApplicationConfigurationComponent{
ParameterValues: []v1alpha2.ComponentParameterValue{
{
Name: paramName,
Value: intstr.FromString(paramValue),
},
},
},
component: v1alpha2.Component{
Spec: v1alpha2.ComponentSpec{
Parameters: []v1alpha2.ComponentParameter{
{
Name: paramName,
FieldPaths: []string{WorkloadNamePath},
},
},
},
},
validatingTraits: []ValidatingTrait{
{traitDefinition: v1alpha2.TraitDefinition{
Spec: v1alpha2.TraitDefinitionSpec{RevisionEnabled: true},
}},
},
},
},
},
want: []error{
fmt.Errorf(errFmtWorkloadNameNotEmpty, workloadName),
},
},
{
caseName: "validation succeeds",
validatingAppConfig: ValidatingAppConfig{
validatingComps: []ValidatingComponent{
{
compName: "example-comp",
validatingTraits: []ValidatingTrait{
{traitDefinition: v1alpha2.TraitDefinition{
Spec: v1alpha2.TraitDefinitionSpec{RevisionEnabled: true},
}},
},
},
},
},
want: nil,
},
}
for _, tc := range tests {
result := ValidateWorkloadNameForVersioningFn(ctx, tc.validatingAppConfig)
assert.Equal(t, tc.want, result, fmt.Sprintf("Test case: %q", tc.caseName))
}
}
func TestValidateTraitAppliableToWorkloadFn(t *testing.T) {
tests := []struct {
caseName string
validatingAppConfig ValidatingAppConfig
want []error
}{
{
caseName: "validate succeed: apply trait to any workload",
validatingAppConfig: ValidatingAppConfig{
validatingComps: []ValidatingComponent{
{
workloadDefinition: v1alpha2.WorkloadDefinition{
Spec: v1alpha2.WorkloadDefinitionSpec{
Reference: common.DefinitionReference{
Name: "TestWorkload",
},
},
},
validatingTraits: []ValidatingTrait{
{traitDefinition: v1alpha2.TraitDefinition{
Spec: v1alpha2.TraitDefinitionSpec{
AppliesToWorkloads: []string{"*"}, // "*" means apply to any
},
}},
{traitDefinition: v1alpha2.TraitDefinition{
Spec: v1alpha2.TraitDefinitionSpec{
AppliesToWorkloads: []string{}, // empty means apply to any
},
}},
},
},
},
},
want: nil,
},
{
caseName: "validate succeed: apply trait to workload with specific definition reference name",
validatingAppConfig: ValidatingAppConfig{
validatingComps: []ValidatingComponent{
{
workloadDefinition: v1alpha2.WorkloadDefinition{
Spec: v1alpha2.WorkloadDefinitionSpec{
Reference: common.DefinitionReference{
Name: "testworkloads.example.com", // matched CRD name
},
},
},
validatingTraits: []ValidatingTrait{
{traitDefinition: v1alpha2.TraitDefinition{
Spec: v1alpha2.TraitDefinitionSpec{
AppliesToWorkloads: []string{"testworkloads.example.com"},
},
}},
},
},
},
},
want: nil,
},
{
caseName: "validate succeed: apply trait to workload with specific group",
validatingAppConfig: ValidatingAppConfig{
validatingComps: []ValidatingComponent{
{
workloadDefinition: v1alpha2.WorkloadDefinition{
Spec: v1alpha2.WorkloadDefinitionSpec{
Reference: common.DefinitionReference{
Name: "testworkloads.example.com", // matched resource group
},
},
},
validatingTraits: []ValidatingTrait{
{traitDefinition: v1alpha2.TraitDefinition{
Spec: v1alpha2.TraitDefinitionSpec{
AppliesToWorkloads: []string{"*.example.com"},
},
}},
},
},
{
workloadDefinition: v1alpha2.WorkloadDefinition{
Spec: v1alpha2.WorkloadDefinitionSpec{
Reference: common.DefinitionReference{
Name: "testworkload2s.example.com",
},
},
},
validatingTraits: []ValidatingTrait{
{traitDefinition: v1alpha2.TraitDefinition{
Spec: v1alpha2.TraitDefinitionSpec{
AppliesToWorkloads: []string{"*.example.com"},
},
}},
},
},
},
},
want: nil,
},
{
caseName: "validate fail: apply trait to unappliable workload",
validatingAppConfig: ValidatingAppConfig{
validatingComps: []ValidatingComponent{
{
compName: "example-comp",
workloadDefinition: v1alpha2.WorkloadDefinition{
ObjectMeta: v1.ObjectMeta{Name: "TestWorkload"},
Spec: v1alpha2.WorkloadDefinitionSpec{
Reference: common.DefinitionReference{
Name: "TestWorkload1.example.foo",
},
},
},
validatingTraits: []ValidatingTrait{
{traitDefinition: v1alpha2.TraitDefinition{
ObjectMeta: v1.ObjectMeta{Name: "TestTrait"},
Spec: v1alpha2.TraitDefinitionSpec{
AppliesToWorkloads: []string{"example.com", "TestWorkload2"},
},
}},
},
},
},
},
want: []error{fmt.Errorf(errFmtUnappliableTrait,
"TestTrait", "TestWorkload", "example-comp",
[]string{"example.com", "TestWorkload2"})},
},
{
caseName: "validate fail: applyTo has resource group but not match",
validatingAppConfig: ValidatingAppConfig{
validatingComps: []ValidatingComponent{
{
compName: "example-comp",
workloadDefinition: v1alpha2.WorkloadDefinition{
ObjectMeta: v1.ObjectMeta{
Name: "TestWorkload",
},
Spec: v1alpha2.WorkloadDefinitionSpec{
Reference: common.DefinitionReference{
Name: "testworkloads.example.foo", // dismatched resource group
},
},
},
validatingTraits: []ValidatingTrait{
{traitDefinition: v1alpha2.TraitDefinition{
ObjectMeta: v1.ObjectMeta{Name: "TestTrait"},
Spec: v1alpha2.TraitDefinitionSpec{
AppliesToWorkloads: []string{"*.example.com"},
},
}},
},
},
},
},
want: []error{fmt.Errorf(errFmtUnappliableTrait,
"TestTrait", "TestWorkload", "example-comp",
[]string{"*.example.com"})},
},
{
caseName: "validate fail: applyTo has workload type but not match",
validatingAppConfig: ValidatingAppConfig{
validatingComps: []ValidatingComponent{
{
compName: "example-comp",
workloadDefinition: v1alpha2.WorkloadDefinition{
ObjectMeta: v1.ObjectMeta{
Name: "TestWorkload",
},
Spec: v1alpha2.WorkloadDefinitionSpec{
Reference: common.DefinitionReference{
Name: "bars.example.com", // dismatched workload type
},
},
},
validatingTraits: []ValidatingTrait{
{traitDefinition: v1alpha2.TraitDefinition{
ObjectMeta: v1.ObjectMeta{Name: "TestTrait"},
Spec: v1alpha2.TraitDefinitionSpec{
AppliesToWorkloads: []string{"foos.example.com"},
},
}},
},
},
},
},
want: []error{fmt.Errorf(errFmtUnappliableTrait,
"TestTrait", "TestWorkload", "example-comp",
[]string{"foos.example.com"})},
},
}
for _, tc := range tests {
result := ValidateTraitAppliableToWorkloadFn(ctx, tc.validatingAppConfig)
assert.Equal(t, tc.want, result, fmt.Sprintf("Test case: %q", tc.caseName))
}
}
func TestValidateTraitConflictFn(t *testing.T) {
compName := "testComp"
traitDefName1 := "testTraitDef1"
traitDefName2 := "testTraitDef2"
tests := []struct {
caseName string
conflictRules []string
traitDef v1alpha2.TraitDefinition
want []error
}{
{
caseName: "empty conflict rule (no conflict with any other trait)",
conflictRules: []string{},
traitDef: v1alpha2.TraitDefinition{
ObjectMeta: v1.ObjectMeta{
Name: traitDefName2,
},
},
want: []error{},
},
{
caseName: "'*' conflict rule (conflict with all other trait)",
conflictRules: []string{"*"},
traitDef: v1alpha2.TraitDefinition{
ObjectMeta: v1.ObjectMeta{
Name: traitDefName2,
},
},
want: []error{fmt.Errorf(errFmtTraitConflictWithAll, traitDefName1, compName)},
},
{
caseName: "'*' conflict rule (no conflict if only one trait)",
conflictRules: []string{"*"},
traitDef: v1alpha2.TraitDefinition{
ObjectMeta: v1.ObjectMeta{
Name: "remove me",
},
},
want: []error{},
},
{
caseName: "Trait group conflict",
conflictRules: []string{"*.example.com"},
traitDef: v1alpha2.TraitDefinition{
ObjectMeta: v1.ObjectMeta{
Name: traitDefName2,
},
Spec: v1alpha2.TraitDefinitionSpec{
Reference: common.DefinitionReference{
Name: "foo.example.com",
},
},
},
want: []error{fmt.Errorf(errFmtTraitConflict, "*.example.com", traitDefName1, traitDefName2, compName)},
},
{
caseName: "TraitDefinition name conflict",
conflictRules: []string{traitDefName2},
traitDef: v1alpha2.TraitDefinition{
ObjectMeta: v1.ObjectMeta{
Name: traitDefName2,
},
},
want: []error{fmt.Errorf(errFmtTraitConflict, traitDefName2, traitDefName1, traitDefName2, compName)},
},
{
caseName: "CRD name conflict",
conflictRules: []string{"foo.example.com"},
traitDef: v1alpha2.TraitDefinition{
ObjectMeta: v1.ObjectMeta{
Name: traitDefName2,
},
Spec: v1alpha2.TraitDefinitionSpec{
Reference: common.DefinitionReference{
Name: "foo.example.com",
},
},
},
want: []error{fmt.Errorf(errFmtTraitConflict, "foo.example.com", traitDefName1, traitDefName2, compName)},
},
{
caseName: "LabelSelector conflict",
conflictRules: []string{"labelSelector:foo=bar"},
traitDef: v1alpha2.TraitDefinition{
ObjectMeta: v1.ObjectMeta{
Name: traitDefName2,
Labels: map[string]string{"foo": "bar"},
},
},
want: []error{fmt.Errorf(errFmtTraitConflict, "labelSelector:foo=bar", traitDefName1, traitDefName2, compName)},
},
{
caseName: "LabelSelector invalid error",
conflictRules: []string{"labelSelector:,,,"},
traitDef: v1alpha2.TraitDefinition{
ObjectMeta: v1.ObjectMeta{
Name: traitDefName2,
Labels: map[string]string{"foo": "bar"},
},
},
want: []error{fmt.Errorf(errFmtInvalidLabelSelector, "labelSelector:,,,",
fmt.Errorf("found ',', expected: !, identifier, or 'end of string'"))},
},
}
for _, tc := range tests {
validatingAppConfig := ValidatingAppConfig{
validatingComps: []ValidatingComponent{
{
compName: compName,
validatingTraits: []ValidatingTrait{
{
traitDefinition: v1alpha2.TraitDefinition{
ObjectMeta: v1.ObjectMeta{Name: traitDefName1},
Spec: v1alpha2.TraitDefinitionSpec{
ConflictsWith: tc.conflictRules,
},
},
},
{traitDefinition: tc.traitDef},
},
},
},
}
if len(tc.conflictRules) > 0 && tc.conflictRules[0] == "*" &&
tc.traitDef.Name == "remove me" {
// for test case: '*' conflict rule, no conflict if only one trait
validatingAppConfig.validatingComps[0].validatingTraits =
validatingAppConfig.validatingComps[0].validatingTraits[:1]
}
result := ValidateTraitConflictFn(ctx, validatingAppConfig)
assert.Equal(t, tc.want, result, fmt.Sprintf("Test case: %q", tc.caseName))
}
}
|
validatingAppConfig: ValidatingAppConfig{
validatingComps: []ValidatingComponent{
|
_discord.py
|
import discord
import asyncio
from datetime import datetime
class Discord:
_instance = None
client = None
def __new__(class_, *args, **kwargs):
if not isinstance(class_._instance, class_):
class_._instance = object.__new__(class_, *args, **kwargs)
return class_._instance
def __init__(self):
if self.client is None:
self.client = discord.Client()
if self.client is not None:
print("Discord bot pooling created successfully")
def get_client(self):
return self.client
async def send_message(self, channel, content="", embed=None):
|
async def get_message(self, channel, id):
'''
Wrapper for getting a message to handle exceptions
'''
msg = None
try:
msg = await self.client.get_message(channel, id)
except Exception as e:
pass
#print("ERROR: SwiftStrike (get_message) - "+ str(e) + " " + datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
return msg
async def edit_message(self, message, new_content=None, embed=None):
'''
Wrapper for editing a message to handle exceptions
'''
msg = None
try:
msg = await self.client.edit_message(message, new_content=new_content, embed=embed)
except Exception as e:
pass
#print("ERROR: :rage: (edit_message) - "+ str(e) + " " + datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
return msg
|
'''
Just a wrapper for sending messages, so I don't have to deal with exceptions inside code
'''
try:
return await self.client.send_message(channel, content=content, embed=embed)
except Exception as e:
pass
#print("ERROR: cmonBruh (send_message) - "+ str(e) + " " + datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
|
agg1_body.go
|
package main
import "text/template"
// level 1 aggregation (internal.E) templates
const (
eArithRaw = `as := isScalar(a, t)
bs := isScalar(b, t)
{{$name := .Name}}
switch t {
{{range .Kinds -}}
case {{reflectKind .}}:
at := a.{{sliceOf .}}
bt := b.{{sliceOf .}}
{{$isDiv := eq $name "Div" -}}
{{$p := panicsDiv0 . -}}
switch {
case as && bs:
Vec{{$name}}{{short .}}(at, bt)
case as && !bs:
{{if and $isDiv $p}} err = {{end}} {{$name}}SV{{short .}}(at[0], bt)
case !as && bs:
{{if and $isDiv $p}} err = {{end}} {{$name}}VS{{short .}}(at, bt[0])
default:
{{if and $isDiv $p}} err = {{end}} Vec{{$name}}{{short .}}(at, bt)
}
return
{{end -}}
default:
return errors.Errorf("Unsupported type %v for {{$name}}", t)
}
`
eArithIncrRaw = `as := isScalar(a, t)
bs := isScalar(b, t)
is := isScalar(incr, t)
if ((as && !bs) || (bs && !as)) && is {
return errors.Errorf("Cannot increment on scalar increment. a: %d, b %d", a.TypedLen(t), b.TypedLen(t))
}
{{$name := .Name}}
switch t {
{{range .Kinds -}}
case {{reflectKind .}}:
at := a.{{sliceOf .}}
bt := b.{{sliceOf .}}
it := incr.{{sliceOf .}}
switch {
case as && bs:
Vec{{$name}}{{short .}}(at, bt)
if !is {
return e.Add(t, incr, a)
}
it[0]+= at[0]
case as && !bs:
{{$name}}IncrSV{{short .}}(at[0], bt, it)
case !as && bs :
{{$name}}IncrVS{{short .}}(at, bt[0], it)
default:
{{$name}}Incr{{short .}}(at, bt,it)
}
return
{{end -}}
default:
return errors.Errorf("Unsupported type %v for {{$name}}", t)
}
`
eArithIterRaw = `as := isScalar(a, t)
bs := isScalar(b, t)
{{$name := .Name}}
switch t {
{{range .Kinds -}}
case {{reflectKind .}}:
at := a.{{sliceOf .}}
bt := b.{{sliceOf .}}
switch {
case as && bs :
Vec{{$name}}{{short .}}(at, bt)
case as && !bs:
{{$name}}IterSV{{short .}}(at[0], bt, bit)
case !as && bs:
{{$name}}IterVS{{short .}}(at, bt[0], ait)
default:
{{$name}}Iter{{short .}}(at, bt, ait, bit)
}
return
{{end -}}
default:
return errors.Errorf("Unsupported type %v for {{$name}}", t)
}
`
eArithIterIncrRaw = `as :=isScalar(a, t)
bs := isScalar(b, t)
is := isScalar(incr, t)
if ((as && !bs) || (bs && !as)) && is {
return errors.Errorf("Cannot increment on a scalar increment. len(a): %d, len(b) %d", a.TypedLen(t), b.TypedLen(t))
}
{{$name := .Name}}
switch t {
{{range .Kinds -}}
case {{reflectKind .}}:
at := a.{{sliceOf .}}
bt := b.{{sliceOf .}}
it := incr.{{sliceOf .}}
switch {
case as && bs:
Vec{{$name}}{{short .}}(at, bt)
if !is {
return e.{{$name}}Iter(t, incr, a, iit, ait)
}
it[0] += at[0]
return
case as && !bs:
return {{$name}}IterIncrSV{{short .}}(at[0], bt, it, bit, iit)
case !as && bs:
return {{$name}}IterIncrVS{{short .}}(at, bt[0], it, ait, iit)
default:
return {{$name}}IterIncr{{short .}}(at, bt, it, ait, bit, iit)
}
{{end -}}
default:
return errors.Errorf("Unsupported type %v for {{$name}}", t)
}
`
eMapRaw = `as := isScalar(a, t)
switch t {
{{range .Kinds -}}
case {{reflectKind .}}:
var f0 {{template "fntype0" .}}
var f1 {{template "fntype1" .}}
switch f := fn.(type){
case {{template "fntype0" .}}:
f0 = f
case {{template "fntype1" .}}:
f1 = f
default:
return errors.Errorf("Cannot map fn of %T to array", fn)
}
at := a.{{sliceOf .}}
{{if isAddable . -}}
switch{
case as && incr && f0 != nil:
at[0] += f0(at[0])
case as && incr && f0 == nil:
var tmp {{asType .}}
if tmp, err= f1(at[0]); err != nil {
return
}
at[0] += tmp
case as && !incr && f0 != nil:
at[0] = f0(at[0])
case as && !incr && f0 == nil:
at[0], err = f1(at[0])
case !as && incr && f0 != nil:
MapIncr{{short .}}(f0, at)
case !as && incr && f0 == nil:
err = MapIncrErr{{short .}}(f1, at)
case !as && !incr && f0 == nil:
err = MapErr{{short .}}(f1, at)
default:
Map{{short .}}(f0, at)
}
{{else -}}
if incr {
return errors.Errorf("Cannot perform increment on t of %v", t)
}
switch {
case as && f0 != nil:
at[0] = f0(at[0])
case as && f0 == nil:
at[0], err = f1(at[0])
case !as && f0 == nil:
err = MapErr{{short .}}(f1, at)
default:
Map{{short .}}(f0, at)
}
{{end -}}
{{end -}}
default:
return errors.Errorf("Cannot map t of %v", t)
}
`
eMapIterRaw = `switch t {
{{range .Kinds -}}
case {{reflectKind .}}:
at := a.{{sliceOf .}}
var f0 {{template "fntype0" .}}
var f1 {{template "fntype1" .}}
switch f := fn.(type){
case {{template "fntype0" .}}:
f0 = f
case {{template "fntype1" .}}:
f1 = f
default:
return errors.Errorf("Cannot map fn of %T to array", fn)
}
{{if isAddable . -}}
switch {
case incr && f0 != nil:
MapIterIncr{{short .}}(f0, at, ait)
case incr && f0 == nil:
err = MapIterIncrErr{{short .}}(f1, at, ait)
case !incr && f0 == nil:
err = MapIterErr{{short .}}(f1, at, ait)
default:
MapIter{{short .}}(f0, at, ait)
}
{{else -}}
if incr {
return errors.Errorf("Cannot perform increment on t of %v", t)
}
switch {
case f0 == nil:
err = MapIterErr{{short .}}(f1, at, ait)
default:
MapIter{{short .}}(f0, at, ait)
}
{{end -}}
{{end -}}
default:
return errors.Errorf("Cannot map t of %v", t)
}
`
eCmpSameRaw = `as := isScalar(a, t)
bs := isScalar(b, t)
{{$name := .Name}}
switch t {
{{range .Kinds -}}
{{if isBoolRepr . -}}
case {{reflectKind .}}:
at := a.{{sliceOf .}}
bt := b.{{sliceOf .}}
switch {
case as && bs:
{{$name}}Same{{short .}}(at, bt)
case as && !bs:
{{$name}}SameSV{{short .}}(at[0], bt)
case !as && bs:
{{$name}}SameVS{{short .}}(at, bt[0])
default:
{{$name}}Same{{short .}}(at, bt)
}
return
{{end -}}
{{end -}}
default:
return errors.Errorf("Unsupported type %v for {{$name}}", t)
}`
eCmpBoolRaw = `as := isScalar(a, t)
bs := isScalar(b, t)
rs := isScalar(retVal, Bool)
rt := retVal.Bools()
if ((as && !bs) || (bs && !as)) && rs {
return errors.Errorf("retVal is a scalar. a: %d, b %d", a.TypedLen(t), b.TypedLen(t))
}
{{$name := .Name}}
switch t {
{{range .Kinds -}}
case {{reflectKind .}}:
at := a.{{sliceOf .}}
bt := b.{{sliceOf .}}
switch {
case as && bs:
{{$name}}{{short .}}(at, bt, rt)
case as && !bs:
{{$name}}SV{{short .}}(at[0], bt, rt)
case !as && bs :
{{$name}}VS{{short .}}(at, bt[0], rt)
default:
{{$name}}{{short .}}(at, bt, rt)
}
return
{{end -}}
default:
return errors.Errorf("Unsupported type %v for {{$name}}", t)
}
`
eCmpSameIterRaw = `as := isScalar(a, t)
bs := isScalar(b, t)
{{$name := .Name}}
switch t {
{{range .Kinds -}}
{{if isBoolRepr . -}}
case {{reflectKind .}}:
at := a.{{sliceOf .}}
bt := b.{{sliceOf .}}
switch {
case as && bs :
{{$name}}Same{{short .}}(at, bt)
case as && !bs:
{{$name}}SameIterSV{{short .}}(at[0], bt, bit)
case !as && bs:
{{$name}}SameIterVS{{short .}}(at, bt[0], ait)
default:
{{$name}}SameIter{{short .}}(at, bt, ait, bit)
}
return
{{end -}}
{{end -}}
default:
return errors.Errorf("Unsupported type %v for {{$name}}", t)
}
`
eCmpBoolIterRaw = `as :=isScalar(a, t)
bs := isScalar(b, t)
rs := isScalar(retVal, Bool)
rt := retVal.Bools()
if ((as && !bs) || (bs && !as)) && rs {
return errors.Errorf("retVal is scalar while len(a): %d, len(b) %d", a.TypedLen(t), b.TypedLen(t))
}
{{$name := .Name}}
switch t {
{{range .Kinds -}}
case {{reflectKind .}}:
at := a.{{sliceOf .}}
bt := b.{{sliceOf .}}
switch {
case as && bs:
{{$name}}{{short .}}(at, bt, rt)
return
case as && !bs:
return {{$name}}IterSV{{short .}}(at[0], bt, rt, bit, rit)
case !as && bs:
return {{$name}}IterVS{{short .}}(at, bt[0], rt, ait, rit)
default:
return {{$name}}Iter{{short .}}(at, bt, rt, ait, bit, rit)
}
{{end -}}
default:
return errors.Errorf("Unsupported type %v for {{$name}}", t)
}
`
complexAbs = `{{if eq .Kind.String "complex64" -}}
{{else if eq .Kind.String "complex128" -}}
{{end -}}
`
eReduceFirstRaw = `{{$name := .Name -}}
switch t {
{{range .Kinds -}}
case {{reflectKind .}}:
dt := data.{{sliceOf .}}
rt := retVal.{{sliceOf .}}
switch f := fn.(type){
case func([]{{asType .}}, []{{asType .}}):
{{$name | unexport}}{{short .}}(dt, rt, split, size, f)
case func({{asType .}}, {{asType .}}) {{asType .}}:
generic{{$name}}{{short .}}(dt, rt, split, size, f)
default:
return errors.Errorf(reductionErrMsg, fn)
}
return nil
{{end -}}
default:
return errors.Errorf("Unsupported type %v for {{$name}}", t)
}
`
eReduceLastRaw = `var ok bool
{{$name := .Name -}}
switch t {
{{range .Kinds -}}
case {{reflectKind .}}:
var def {{asType .}}
if def, ok = defaultValue.({{asType .}}); !ok {
return errors.Errorf(defaultValueErrMsg, def, defaultValue, defaultValue)
}
dt := data.{{sliceOf .}}
rt := retVal.{{sliceOf .}}
switch f := fn.(type){
case func([]{{asType .}}) {{asType .}}:
{{$name | unexport}}{{short .}}(dt, rt, dimSize, def, f)
case func({{asType .}}, {{asType .}}) {{asType .}}:
generic{{$name}}{{short .}}(dt, rt, dimSize, def, f)
default:
return errors.Errorf(reductionErrMsg, fn)
}
return nil
{{end -}}
default:
return errors.Errorf("Unsupported type %v for {{$name}}", t)
}
`
eReduceDefaultRaw = `var ok bool
{{$name := .Name -}}
switch t {
{{range .Kinds -}}
case {{reflectKind .}}:
var f func({{asType .}}, {{asType .}}) {{asType .}}
if f, ok = fn.(func({{asType .}}, {{asType .}}) {{asType .}}); !ok {
return errors.Errorf(reductionErrMsg, fn)
}
dt := data.{{sliceOf .}}
rt := retVal.{{sliceOf .}}
{{$name | unexport}}{{short .}}(dt, rt, dim0, dimSize, outerStride, stride, expected, f)
return nil
{{end -}}
default:
return errors.Errorf("Unsupported type %v for {{$name}}", t)
}
`
eReduceRaw = `var ok bool
switch t{
{{range .Kinds -}}
case {{reflectKind .}}:
var f func({{asType .}}, {{asType .}}) {{asType .}}
var def {{asType .}}
if f, ok = fn.(func({{asType .}}, {{asType .}}) {{asType .}}); !ok {
return nil, errors.Errorf(reductionErrMsg, fn)
}
if def, ok = defaultValue.({{asType .}}); !ok {
return nil, errors.Errorf(defaultValueErrMsg, def, defaultValue, defaultValue)
}
retVal = Reduce{{short .}}(f, def, a.{{sliceOf .}}...)
return
{{end -}}
default:
return nil, errors.Errorf("Unsupported type %v for Reduce", t)
}
`
eUnaryRaw = `{{$name := .Name -}}
switch t {
{{range .Kinds -}}
case {{reflectKind .}}:
{{$name}}{{short .}}(a.{{sliceOf .}})
return nil
{{end -}}
default:
return errors.Errorf("Unsupported type %v for {{$name}}", t)
}
`
eUnaryIterRaw = `{{$name := .Name -}}
switch t {
{{range .Kinds -}}
case {{reflectKind .}}:
return {{$name}}{{short .}}(a.{{sliceOf .}}, ait)
{{end -}}
default:
return errors.Errorf("Unsupported type %v for {{$name}}", t)
}
`
eUnaryClampRaw = `var ok bool
switch t {
{{range .Kinds -}}
case {{reflectKind .}}:
var min, max {{asType .}}
if min, ok = minVal.({{asType .}}); !ok {
return errors.Wrap(errors.Errorf(typeMismatch, min, minVal), "Clamp() min")
}
if max, ok = maxVal.({{asType .}}); !ok {
return errors.Wrap(errors.Errorf(typeMismatch, max, maxVal), "Clamp() max")
}
Clamp{{short .}}(a.{{sliceOf .}}, min, max)
return nil
{{end -}}
default:
return errors.Errorf("Unsupported type %v for Clamp", t)
}
`
eUnaryClampIterRaw = `var ok bool
switch t {
{{range .Kinds -}}
case {{reflectKind .}}:
var min, max {{asType .}}
if min, ok = minVal.({{asType .}}); !ok {
return errors.Wrap(errors.Errorf(typeMismatch, min, minVal), "Clamp() min")
}
if max, ok = maxVal.({{asType .}}); !ok {
return errors.Wrap(errors.Errorf(typeMismatch, max, maxVal), "Clamp() max")
}
return ClampIter{{short .}}(a.{{sliceOf .}}, ait, min, max)
{{end -}}
default:
return errors.Errorf("Unsupported type %v for Clamp", t)
}
`
eArgmaxRaw = `var next int
{{$name := .Name -}}
switch t {
{{range .Kinds -}}
case {{reflectKind .}}:
data := a.{{sliceOf .}}
tmp := make([]{{asType .}}, 0, lastSize)
for next, err = it.Next(); err == nil; next, err = it.Next() {
tmp = append(tmp, data[next])
if len(tmp) == lastSize {
am := Arg{{$name}}{{short .}}(tmp)
indices = append(indices, am)
// reset
tmp = tmp[:0]
}
}
if _, ok := err.(NoOpError); ok {
err = nil
}
return
{{end -}}
default:
return nil, errors.Errorf("Unsupported type %v for Arg{{.Name}}", t)
}
`
eArgmaxMaskedRaw = `newMask := make([]bool, 0, lastSize)
var next int
{{$name := .Name -}}
switch t {
{{range .Kinds -}}
case {{reflectKind .}}:
data := a.{{sliceOf .}}
tmp := make([]{{asType .}}, 0, lastSize)
for next, err = it.Next(); err == nil; next, err = it.Next() {
tmp = append(tmp, data[next])
newMask = append(newMask, mask[next])
if len(tmp) == lastSize {
am := Arg{{$name}}Masked{{short .}}(tmp, mask)
indices = append(indices, am)
// reset
tmp = tmp[:0]
newMask = newMask[:0]
}
}
if _, ok := err.(NoOpError); ok {
err = nil
}
return
{{end -}}
default:
return nil, errors.Errorf("Unsupported type %v for Arg{{.Name}}", t)
}
`
eArgmaxFlatRaw = `switch t {
{{$name := .Name -}}
{{range .Kinds -}}
case {{reflectKind .}}:
return Arg{{$name}}{{short .}}(a.{{sliceOf .}})
{{end -}}
default:
return -1
}
`
eArgmaxFlatMaskedRaw = `switch t {
{{$name := .Name -}}
{{range .Kinds -}}
case {{reflectKind .}}:
return Arg{{$name}}Masked{{short .}}(a.{{sliceOf .}}, mask)
{{end -}}
default:
return -1
}
`
)
var (
eArith *template.Template
eArithIncr *template.Template
eArithIter *template.Template
eArithIterIncr *template.Template
eMap *template.Template
eMapIter *template.Template
eCmpBool *template.Template
eCmpSame *template.Template
eCmpBoolIter *template.Template
eCmpSameIter *template.Template
eReduce *template.Template
eReduceFirst *template.Template
eReduceLast *template.Template
eReduceDefault *template.Template
eUnary *template.Template
eUnaryIter *template.Template
eUnaryClamp *template.Template
eUnaryClampIter *template.Template
eArgmax *template.Template
eArgmaxMasked *template.Template
eArgmaxFlat *template.Template
eArgmaxFlatMasked *template.Template
)
func
|
() {
eArith = template.Must(template.New("eArith").Funcs(funcs).Parse(eArithRaw))
eArithIncr = template.Must(template.New("eArithIncr").Funcs(funcs).Parse(eArithIncrRaw))
eArithIter = template.Must(template.New("eArithIter").Funcs(funcs).Parse(eArithIterRaw))
eArithIterIncr = template.Must(template.New("eArithIterIncr").Funcs(funcs).Parse(eArithIterIncrRaw))
eMap = template.Must(template.New("eMap").Funcs(funcs).Parse(eMapRaw))
eMapIter = template.Must(template.New("eMapIter").Funcs(funcs).Parse(eMapIterRaw))
eCmpBool = template.Must(template.New("eCmpBool").Funcs(funcs).Parse(eCmpBoolRaw))
eCmpSame = template.Must(template.New("eCmpSame").Funcs(funcs).Parse(eCmpSameRaw))
eCmpBoolIter = template.Must(template.New("eCmpBoolIter").Funcs(funcs).Parse(eCmpBoolIterRaw))
eCmpSameIter = template.Must(template.New("eCmpSameIter").Funcs(funcs).Parse(eCmpSameIterRaw))
eReduce = template.Must(template.New("eReduce").Funcs(funcs).Parse(eReduceRaw))
eReduceFirst = template.Must(template.New("eReduceFirst").Funcs(funcs).Parse(eReduceFirstRaw))
eReduceLast = template.Must(template.New("eReduceLast").Funcs(funcs).Parse(eReduceLastRaw))
eReduceDefault = template.Must(template.New("eReduceDefault").Funcs(funcs).Parse(eReduceDefaultRaw))
eUnary = template.Must(template.New("eUnary").Funcs(funcs).Parse(eUnaryRaw))
eUnaryIter = template.Must(template.New("eUnaryIter").Funcs(funcs).Parse(eUnaryIterRaw))
eUnaryClamp = template.Must(template.New("eUnaryClamp").Funcs(funcs).Parse(eUnaryClampRaw))
eUnaryClampIter = template.Must(template.New("eUnaryClampIter").Funcs(funcs).Parse(eUnaryClampIterRaw))
eArgmax = template.Must(template.New("argmax").Funcs(funcs).Parse(eArgmaxRaw))
eArgmaxMasked = template.Must(template.New("argmaxMasked").Funcs(funcs).Parse(eArgmaxMaskedRaw))
eArgmaxFlat = template.Must(template.New("argmaxFlat").Funcs(funcs).Parse(eArgmaxFlatRaw))
eArgmaxFlatMasked = template.Must(template.New("argmaxFlatMasked").Funcs(funcs).Parse(eArgmaxFlatMaskedRaw))
}
|
init
|
accordion.js
|
async function solution() {
const main = document.getElementById('main');
const url = `http://localhost:3030/jsonstore/advanced/articles/list`;
const response = await fetch(url);
const data = await response.json();
data.forEach(a => {
const divAccordion = createElement('div', '', ['class', 'accordion']);
const divHead = createElement('div', '', ['class', 'head']);
const span = createElement('span', a.title);
const button = createElement('button', 'More', ['class', 'button', 'id', a._id]);
|
button.addEventListener('click', toggle);
divHead.appendChild(span);
divHead.appendChild(button);
const divExtra = createElement('div', '', ['class', 'extra']);
const p = createElement('p');
divExtra.appendChild(p);
divAccordion.appendChild(divHead);
divAccordion.appendChild(divExtra);
main.appendChild(divAccordion);
});
async function toggle(event) {
const accordion = event.target.parentNode.parentNode;
const id = event.target.id;
const p = event.target.parentNode.parentNode.children[1].firstChild;
const extra = event.target.parentNode.parentNode.children[1];
const url = `http://localhost:3030/jsonstore/advanced/articles/details/${id}`;
const response = await fetch(url);
const data = await response.json();
p.textContent = data.content;
const inHidden = event.target.textContent == 'More';
extra.style.display = inHidden ? 'block' : 'none';
event.target.textContent = inHidden ? 'Less' : 'More';
}
function createElement(type, content, attributes = []) {
const element = document.createElement(type);
if (content) {
element.textContent = content;
}
if (attributes.length > 0) {
for (let i = 0; i < attributes.length; i += 2) {
element.setAttribute(attributes[i], attributes[i + 1]);
}
}
return element;
}
}
solution();
| |
server.go
|
package main
import (
"bytes"
"crypto/rand"
"fmt"
"log"
"net"
"golang.org/x/crypto/nacl/box"
)
// Shared constants representing a known message types,
// sizes used to establish buffers, and service signature
// to easily drop unknown traffic.
const (
MessageHandshake byte = iota
MessageDisconnected
MessageChat
)
const (
BufferSize = 508
NaClPadding = 16
NaClNonceSize = 24
MaxIdentitySize = 20
MaxMessageSize = BufferSize - (NaClNonceSize + NaClPadding + MaxIdentitySize + 2) // 2 spaces for formatting
KeySize = 32
)
var Signature = [...]byte{1, 2, 3, 4}
var errInvalidKeySize = fmt.Errorf("keys must be %d bytes...", KeySize)
// A server implementation that creates a goroutine per
// connection and passes a shared channel to communicate.
type Server struct {
priv [32]byte
pub [32]byte
c *net.UDPConn
clients map[string]Client
}
// Clear all clients and close the server.
func (s *Server) Close() error {
s.clients = make(map[string]Client, 0)
return s.c.Close()
}
// Parse the address and start the server with chosen buffer size.
func (s *Server) Init(address string) error {
pub, priv, err := box.GenerateKey(rand.Reader)
if err != nil {
return err
}
copy(s.priv[:], priv[:])
copy(s.pub[:], pub[:])
if serverAddr, err := net.ResolveUDPAddr("udp", address); err != nil
|
else if s.c, err = net.ListenUDP("udp", serverAddr); err != nil {
return err
}
s.c.SetReadBuffer(BufferSize)
s.c.SetWriteBuffer(BufferSize)
s.clients = make(map[string]Client, 0)
return nil
}
// Listen for new connections to create clients with their own goroutines.
//
// Since UDP is "connectionless" we may want to track the last received
// message time so we can clear "idle" clients after a fixed time.
//
// Writing a resilient client to handle disconnection would be interesting.
func (s *Server) Run() {
b := make([]byte, BufferSize)
for {
l, addr, err := s.c.ReadFromUDP(b)
if err != nil {
log.Printf("failed to read from connection: %s\n", err)
continue
}
go s.MessageProcess(addr, b[:l])
}
}
func (s *Server) MessageProcess(addr *net.UDPAddr, message []byte) {
if len(message) < len(Signature)+1 || !bytes.Equal(message[:len(Signature)], Signature[:]) {
log.Printf("Signature does not match for address %s, discarding...\n", addr.String())
return
}
switch message[len(Signature)] {
case MessageHandshake:
s.HandshakeReceive(addr, message[len(Signature)+1:])
case MessageChat:
s.MessageReceive(addr, message[len(Signature)+1:])
default:
log.Printf("unknown message type (%d) from address: %s\n", message[len(Signature)], addr.String())
}
}
func (s *Server) HandshakeReceive(addr *net.UDPAddr, shake []byte) {
if len(shake) < KeySize {
log.Printf("handshake failed due to key size (%d): %s", len(shake), errInvalidKeySize)
s.Disconnected(addr, "invalid key size...")
return
} else if len(shake) > KeySize+MaxIdentitySize {
log.Printf("identity too large (%d: %s), sending disconnected...\n", len(shake[KeySize:]), string(shake[KeySize:]))
s.Disconnected(addr, "identity too large...")
return
}
var pub [KeySize]byte
copy(pub[:], shake[:KeySize])
identity := make([]byte, len(shake)-KeySize)
copy(identity, shake[KeySize:len(shake)])
c := Client{a: addr, identity: string(identity)}
box.Precompute(&c.key, &pub, &s.priv)
data := append(append(Signature[:], MessageHandshake), s.pub[:]...)
if _, err := s.c.WriteToUDP(data, addr); err != nil {
log.Printf("failed to write handshake message to %s: %s", addr.String(), err)
s.Disconnected(addr, "failed to send handshake reply...")
return
}
s.clients[addr.String()] = c
log.Printf("Established connection with %#v\n", c)
}
// Sends a disconnected message to the client, with a reason that might
// be useful with an interactive interface or for debugging.
func (s *Server) Disconnected(addr *net.UDPAddr, reason string) {
data := append(append(Signature[:], MessageDisconnected), []byte(reason)...)
s.c.WriteToUDP(data, addr)
}
func (s *Server) MessageReceive(addr *net.UDPAddr, ciphertext []byte) {
if _, ok := s.clients[addr.String()]; !ok {
log.Printf("No registered client %s\n", addr.String())
s.Disconnected(addr, "not registered...")
return
}
var nonce [24]byte
copy(nonce[:], ciphertext[:24])
key := s.clients[addr.String()].key
message, ok := box.OpenAfterPrecomputation(nil, ciphertext[24:], &nonce, &key)
if !ok {
log.Printf("failed to decrypt chat message from %s\n", addr.String())
s.Disconnected(addr, "failed to decrypt chat message...")
return
}
if len(message) > MaxMessageSize {
log.Printf("message received from %s is too large: %s\n", addr.String, string(message))
}
if _, err := rand.Read(nonce[:]); err != nil {
log.Printf("failed to generate new nonce: %s\n", err)
return
}
message = append([]byte(s.clients[addr.String()].identity+": "), message...)
// @note: ideally this would send each message on a goroutine,
// but simply prefixing with go allows the client to change in the loop
// which breaks.
//
// @note: even with goroutines there may be a bias as to who receives
// first due to the map order.
//
// @note: this may not be concurrently safe since new connections can occur
// in parallel to sending messages, which may lead to a race condition on
// the array of clients.
log.Printf("Sending %s to clients: %#v", message, s.clients)
for _, client := range s.clients {
s.MessageSend(&client, nonce, message)
}
}
func (s *Server) MessageSend(c *Client, nonce [24]byte, message []byte) {
ciphertext := box.SealAfterPrecomputation(nonce[:], message, &nonce, &c.key)
log.Printf("Identity: %s", c.identity)
data := append(append(Signature[:], MessageChat), ciphertext...)
s.c.WriteToUDP(data, c.a)
}
|
{
return err
}
|
retry.rs
|
use crate::Error;
use backoff::{backoff::Backoff, ExponentialBackoff};
use futures::Future;
use std::{fmt::Debug, time::Duration};
/// Gets the default retrying policy. This should be used for unexpected errors, not for operations
/// that are expected to take a while to succeed. That is, it is unsuitable for e.g. awaiting bitcoin
/// confirmation proof, due to potentially high retrying time.
/// Note: technically this is not a constant due to the use of `Default::default()`
fn get_exponential_backoff() -> ExponentialBackoff {
ExponentialBackoff {
max_elapsed_time: Some(Duration::from_secs(60 * 10)), // retry for at most 10 minutes
max_interval: Duration::from_secs(60 * 2), // only delay up to 2 minutes
initial_interval: Duration::from_secs(1), // first retry after 1 second
current_interval: Duration::from_secs(1), // increasing interval duration
multiplier: 2.0, // delay doubles every time
randomization_factor: 0.25, // add random value within 25%
..Default::default()
}
}
pub enum RetryPolicy<E> {
Skip(E),
Throw(E),
}
pub async fn notify_retry<E, L, FL, R, FR, T>(call: L, verify: R) -> Result<T, Error>
where
E: Debug,
L: Fn() -> FL,
FL: Future<Output = Result<T, E>>,
R: Fn(Result<T, E>) -> FR,
FR: Future<Output = Result<T, RetryPolicy<Error>>>,
{
let mut backoff = get_exponential_backoff();
loop {
let err = match verify(call().await).await {
Ok(ok) => return Ok(ok),
Err(RetryPolicy::Skip(err)) => err,
Err(RetryPolicy::Throw(err)) => return Err(err),
};
match backoff.next_backoff() {
Some(wait) =>
|
None => break Err(Error::Timeout),
}
}
}
|
{
// error occurred, sleep before retrying
log::warn!("{:?} - next retry in {:.3} s", err, wait.as_secs_f64());
tokio::time::delay_for(wait).await;
}
|
main-browser-aot.ts
|
// polyfills
import 'zone.js/dist/zone';
// tslint:disable-next-line
import 'reflect-metadata';
// angular
import { enableProdMode } from '@angular/core';
import { platformBrowserDynamic } from '@angular/platform-browser-dynamic';
// libs
import { bootloader } from '@angularclass/bootloader';
// app
import { AppBrowserModuleNgFactory } from './app/app.browser.module.ngfactory';
export function
|
(): any {
return platformBrowserDynamic()
.bootstrapModuleFactory(AppBrowserModuleNgFactory);
}
enableProdMode();
bootloader(main);
|
main
|
models.go
|
package pkger
import (
"encoding/json"
"errors"
"reflect"
"time"
"github.com/influxdata/influxdb/v2/kit/platform"
errors2 "github.com/influxdata/influxdb/v2/kit/platform/errors"
"github.com/influxdata/influxdb/v2"
icheck "github.com/influxdata/influxdb/v2/notification/check"
"github.com/influxdata/influxdb/v2/notification/endpoint"
)
// Package kind types.
const (
KindUnknown Kind = ""
KindBucket Kind = "Bucket"
KindCheck Kind = "Check"
KindCheckDeadman Kind = "CheckDeadman"
KindCheckThreshold Kind = "CheckThreshold"
KindDashboard Kind = "Dashboard"
KindLabel Kind = "Label"
KindNotificationEndpoint Kind = "NotificationEndpoint"
KindNotificationEndpointHTTP Kind = "NotificationEndpointHTTP"
KindNotificationEndpointPagerDuty Kind = "NotificationEndpointPagerDuty"
KindNotificationEndpointSlack Kind = "NotificationEndpointSlack"
KindNotificationRule Kind = "NotificationRule"
KindPackage Kind = "Package"
KindTask Kind = "Task"
KindTelegraf Kind = "Telegraf"
KindVariable Kind = "Variable"
)
// Kinds is a list of known pkger kinds.
func
|
() []Kind {
var out []Kind
for k := range kinds {
out = append(out, k)
}
return out
}
var kinds = map[Kind]bool{
KindBucket: true,
KindCheck: true,
KindCheckDeadman: true,
KindCheckThreshold: true,
KindDashboard: true,
KindLabel: true,
KindNotificationEndpoint: true,
KindNotificationEndpointHTTP: true,
KindNotificationEndpointPagerDuty: true,
KindNotificationEndpointSlack: true,
KindNotificationRule: true,
KindTask: true,
KindTelegraf: true,
KindVariable: true,
}
// Kind is a resource kind.
type Kind string
// String provides the kind in human readable form.
func (k Kind) String() string {
if kinds[k] {
return string(k)
}
if k == KindUnknown {
return "unknown"
}
return string(k)
}
// OK validates the kind is valid.
func (k Kind) OK() error {
if k == KindUnknown {
return errors.New("invalid kind")
}
if !kinds[k] {
return errors.New("unsupported kind provided")
}
return nil
}
// ResourceType converts a kind to a known resource type (if applicable).
func (k Kind) ResourceType() influxdb.ResourceType {
switch k {
case KindBucket:
return influxdb.BucketsResourceType
case KindCheck, KindCheckDeadman, KindCheckThreshold:
return influxdb.ChecksResourceType
case KindDashboard:
return influxdb.DashboardsResourceType
case KindLabel:
return influxdb.LabelsResourceType
case KindNotificationEndpoint,
KindNotificationEndpointHTTP,
KindNotificationEndpointPagerDuty,
KindNotificationEndpointSlack:
return influxdb.NotificationEndpointResourceType
case KindNotificationRule:
return influxdb.NotificationRuleResourceType
case KindTask:
return influxdb.TasksResourceType
case KindTelegraf:
return influxdb.TelegrafsResourceType
case KindVariable:
return influxdb.VariablesResourceType
default:
return ""
}
}
func (k Kind) is(comps ...Kind) bool {
for _, c := range comps {
if c == k {
return true
}
}
return false
}
// SafeID is an equivalent influxdb.ID that encodes safely with
// zero values (influxdb.ID == 0).
type SafeID platform.ID
// Encode will safely encode the id.
func (s SafeID) Encode() ([]byte, error) {
id := platform.ID(s)
b, _ := id.Encode()
return b, nil
}
// String prints a encoded string representation of the id.
func (s SafeID) String() string {
return platform.ID(s).String()
}
// DiffIdentifier are the identifying fields for any given resource. Each resource
// dictates if the resource is new, to be removed, or will remain.
type DiffIdentifier struct {
ID SafeID `json:"id"`
StateStatus StateStatus `json:"stateStatus"`
MetaName string `json:"templateMetaName"`
Kind Kind `json:"kind"`
}
// IsNew indicates the resource is new to the platform.
func (d DiffIdentifier) IsNew() bool {
return d.ID == 0
}
// Diff is the result of a service DryRun call. The diff outlines
// what is new and or updated from the current state of the platform.
type Diff struct {
Buckets []DiffBucket `json:"buckets"`
Checks []DiffCheck `json:"checks"`
Dashboards []DiffDashboard `json:"dashboards"`
Labels []DiffLabel `json:"labels"`
LabelMappings []DiffLabelMapping `json:"labelMappings"`
NotificationEndpoints []DiffNotificationEndpoint `json:"notificationEndpoints"`
NotificationRules []DiffNotificationRule `json:"notificationRules"`
Tasks []DiffTask `json:"tasks"`
Telegrafs []DiffTelegraf `json:"telegrafConfigs"`
Variables []DiffVariable `json:"variables"`
}
// HasConflicts provides a binary t/f if there are any changes within package
// after dry run is complete.
func (d Diff) HasConflicts() bool {
for _, b := range d.Buckets {
if b.hasConflict() {
return true
}
}
for _, l := range d.Labels {
if l.hasConflict() {
return true
}
}
for _, v := range d.Variables {
if v.hasConflict() {
return true
}
}
return false
}
type (
// DiffBucket is a diff of an individual bucket.
DiffBucket struct {
DiffIdentifier
New DiffBucketValues `json:"new"`
Old *DiffBucketValues `json:"old"`
}
// DiffBucketValues are the varying values for a bucket.
DiffBucketValues struct {
Name string `json:"name"`
Description string `json:"description"`
RetentionRules retentionRules `json:"retentionRules"`
SchemaType string `json:"schemaType,omitempty"`
MeasurementSchemas measurementSchemas `json:"measurementSchemas,omitempty"`
}
)
func (d DiffBucket) hasConflict() bool {
return !d.IsNew() && d.Old != nil && !reflect.DeepEqual(*d.Old, d.New)
}
// DiffCheckValues are the varying values for a check.
type DiffCheckValues struct {
influxdb.Check
}
// MarshalJSON implementation here is forced by the embedded check value here.
func (d DiffCheckValues) MarshalJSON() ([]byte, error) {
if d.Check == nil {
return json.Marshal(nil)
}
return json.Marshal(d.Check)
}
// UnmarshalJSON decodes the check values.
func (d *DiffCheckValues) UnmarshalJSON(b []byte) (err error) {
d.Check, err = icheck.UnmarshalJSON(b)
if errors2.EInternal == errors2.ErrorCode(err) {
return nil
}
return err
}
// DiffCheck is a diff of an individual check.
type DiffCheck struct {
DiffIdentifier
New DiffCheckValues `json:"new"`
Old *DiffCheckValues `json:"old"`
}
type (
// DiffDashboard is a diff of an individual dashboard.
DiffDashboard struct {
DiffIdentifier
New DiffDashboardValues `json:"new"`
Old *DiffDashboardValues `json:"old"`
}
// DiffDashboardValues are values for a dashboard.
DiffDashboardValues struct {
Name string `json:"name"`
Desc string `json:"description"`
Charts []DiffChart `json:"charts"`
}
)
// DiffChart is a diff of oa chart. Since all charts are new right now.
// the SummaryChart is reused here.
type DiffChart SummaryChart
func (d *DiffChart) MarshalJSON() ([]byte, error) {
return json.Marshal((*SummaryChart)(d))
}
func (d *DiffChart) UnmarshalJSON(b []byte) error {
var sumChart SummaryChart
if err := json.Unmarshal(b, &sumChart); err != nil {
return err
}
*d = DiffChart(sumChart)
return nil
}
type (
// DiffLabel is a diff of an individual label.
DiffLabel struct {
DiffIdentifier
New DiffLabelValues `json:"new"`
Old *DiffLabelValues `json:"old"`
}
// DiffLabelValues are the varying values for a label.
DiffLabelValues struct {
Name string `json:"name"`
Color string `json:"color"`
Description string `json:"description"`
}
)
func (d DiffLabel) hasConflict() bool {
return !d.IsNew() && d.Old != nil && *d.Old != d.New
}
// StateStatus indicates the status of a diff or summary resource
type StateStatus string
const (
StateStatusExists StateStatus = "exists"
StateStatusNew StateStatus = "new"
StateStatusRemove StateStatus = "remove"
)
// DiffLabelMapping is a diff of an individual label mapping. A
// single resource may have multiple mappings to multiple labels.
// A label can have many mappings to other resources.
type DiffLabelMapping struct {
StateStatus StateStatus `json:"stateStatus"`
ResType influxdb.ResourceType `json:"resourceType"`
ResID SafeID `json:"resourceID"`
ResName string `json:"resourceName"`
ResMetaName string `json:"resourceTemplateMetaName"`
LabelID SafeID `json:"labelID"`
LabelName string `json:"labelName"`
LabelMetaName string `json:"labelTemplateMetaName"`
}
//func (d DiffLabelMapping) IsNew() bool {
// return d.StateStatus == StateStatusNew
//}
// DiffNotificationEndpointValues are the varying values for a notification endpoint.
type DiffNotificationEndpointValues struct {
influxdb.NotificationEndpoint
}
// MarshalJSON implementation here is forced by the embedded check value here.
func (d DiffNotificationEndpointValues) MarshalJSON() ([]byte, error) {
if d.NotificationEndpoint == nil {
return json.Marshal(nil)
}
return json.Marshal(d.NotificationEndpoint)
}
// UnmarshalJSON decodes the notification endpoint. This is necessary unfortunately.
func (d *DiffNotificationEndpointValues) UnmarshalJSON(b []byte) (err error) {
d.NotificationEndpoint, err = endpoint.UnmarshalJSON(b)
if errors2.EInvalid == errors2.ErrorCode(err) {
return nil
}
return
}
// DiffNotificationEndpoint is a diff of an individual notification endpoint.
type DiffNotificationEndpoint struct {
DiffIdentifier
New DiffNotificationEndpointValues `json:"new"`
Old *DiffNotificationEndpointValues `json:"old"`
}
type (
// DiffNotificationRule is a diff of an individual notification rule.
DiffNotificationRule struct {
DiffIdentifier
New DiffNotificationRuleValues `json:"new"`
Old *DiffNotificationRuleValues `json:"old"`
}
// DiffNotificationRuleValues are the values for an individual rule.
DiffNotificationRuleValues struct {
Name string `json:"name"`
Description string `json:"description"`
// These 3 fields represent the relationship of the rule to the endpoint.
EndpointID SafeID `json:"endpointID"`
EndpointName string `json:"endpointName"`
EndpointType string `json:"endpointType"`
Every string `json:"every"`
Offset string `json:"offset"`
MessageTemplate string `json:"messageTemplate"`
StatusRules []SummaryStatusRule `json:"statusRules"`
TagRules []SummaryTagRule `json:"tagRules"`
}
)
type (
// DiffTask is a diff of an individual task.
DiffTask struct {
DiffIdentifier
New DiffTaskValues `json:"new"`
Old *DiffTaskValues `json:"old"`
}
// DiffTaskValues are the values for an individual task.
DiffTaskValues struct {
Name string `json:"name"`
Cron string `json:"cron"`
Description string `json:"description"`
Every string `json:"every"`
Offset string `json:"offset"`
Query string `json:"query"`
Status influxdb.Status `json:"status"`
}
)
// DiffTelegraf is a diff of an individual telegraf. This resource is always new.
type DiffTelegraf struct {
DiffIdentifier
New influxdb.TelegrafConfig `json:"new"`
Old *influxdb.TelegrafConfig `json:"old"`
}
type (
// DiffVariable is a diff of an individual variable.
DiffVariable struct {
DiffIdentifier
New DiffVariableValues `json:"new"`
Old *DiffVariableValues `json:"old,omitempty"` // using omitempty here to signal there was no prev state with a nil
}
// DiffVariableValues are the varying values for a variable.
DiffVariableValues struct {
Name string `json:"name"`
Description string `json:"description"`
Args *influxdb.VariableArguments `json:"args"`
}
)
func (d DiffVariable) hasConflict() bool {
return !d.IsNew() && d.Old != nil && !reflect.DeepEqual(*d.Old, d.New)
}
// Summary is a definition of all the resources that have or
// will be created from a pkg.
type Summary struct {
Buckets []SummaryBucket `json:"buckets"`
Checks []SummaryCheck `json:"checks"`
Dashboards []SummaryDashboard `json:"dashboards"`
NotificationEndpoints []SummaryNotificationEndpoint `json:"notificationEndpoints"`
NotificationRules []SummaryNotificationRule `json:"notificationRules"`
Labels []SummaryLabel `json:"labels"`
LabelMappings []SummaryLabelMapping `json:"labelMappings"`
MissingEnvs []string `json:"missingEnvRefs"`
MissingSecrets []string `json:"missingSecrets"`
Tasks []SummaryTask `json:"summaryTask"`
TelegrafConfigs []SummaryTelegraf `json:"telegrafConfigs"`
Variables []SummaryVariable `json:"variables"`
}
// SummaryIdentifier establishes the shared identifiers for a given resource
// within a template.
type SummaryIdentifier struct {
Kind Kind `json:"kind"`
MetaName string `json:"templateMetaName"`
EnvReferences []SummaryReference `json:"envReferences"`
}
// SummaryBucket provides a summary of a pkg bucket.
type SummaryBucket struct {
SummaryIdentifier
ID SafeID `json:"id,omitempty"`
OrgID SafeID `json:"orgID,omitempty"`
Name string `json:"name"`
Description string `json:"description"`
// TODO: return retention rules?
RetentionPeriod time.Duration `json:"retentionPeriod"`
SchemaType string `json:"schemaType,omitempty"`
MeasurementSchemas []SummaryMeasurementSchema `json:"measurementSchemas,omitempty"`
LabelAssociations []SummaryLabel `json:"labelAssociations"`
}
type SummaryMeasurementSchema struct {
Name string `json:"name"`
Columns []SummaryMeasurementSchemaColumn `json:"columns"`
}
type SummaryMeasurementSchemaColumn struct {
Name string `json:"name"`
Type string `json:"type"`
DataType string `json:"dataType,omitempty"`
}
// SummaryCheck provides a summary of a pkg check.
type SummaryCheck struct {
SummaryIdentifier
Check influxdb.Check `json:"check"`
Status influxdb.Status `json:"status"`
LabelAssociations []SummaryLabel `json:"labelAssociations"`
}
func (s *SummaryCheck) UnmarshalJSON(b []byte) error {
var out struct {
SummaryIdentifier
Status string `json:"status"`
LabelAssociations []SummaryLabel `json:"labelAssociations"`
Check json.RawMessage `json:"check"`
}
if err := json.Unmarshal(b, &out); err != nil {
return err
}
s.SummaryIdentifier = out.SummaryIdentifier
s.Status = influxdb.Status(out.Status)
s.LabelAssociations = out.LabelAssociations
var err error
s.Check, err = icheck.UnmarshalJSON(out.Check)
return err
}
// SummaryDashboard provides a summary of a pkg dashboard.
type SummaryDashboard struct {
SummaryIdentifier
ID SafeID `json:"id"`
OrgID SafeID `json:"orgID"`
Name string `json:"name"`
Description string `json:"description"`
Charts []SummaryChart `json:"charts"`
LabelAssociations []SummaryLabel `json:"labelAssociations"`
}
// SummaryChart provides a summary of a pkg dashboard's chart.
type SummaryChart struct {
Properties influxdb.ViewProperties `json:"-"`
XPosition int `json:"xPos"`
YPosition int `json:"yPos"`
Height int `json:"height"`
Width int `json:"width"`
}
// MarshalJSON marshals a summary chart.
func (s *SummaryChart) MarshalJSON() ([]byte, error) {
b, err := influxdb.MarshalViewPropertiesJSON(s.Properties)
if err != nil {
return nil, err
}
type alias SummaryChart
out := struct {
Props json.RawMessage `json:"properties"`
alias
}{
Props: b,
alias: alias(*s),
}
return json.Marshal(out)
}
// UnmarshalJSON unmarshals a view properties and other data.
func (s *SummaryChart) UnmarshalJSON(b []byte) error {
type alias SummaryChart
a := (*alias)(s)
if err := json.Unmarshal(b, a); err != nil {
return err
}
s.XPosition = a.XPosition
s.XPosition = a.YPosition
s.Height = a.Height
s.Width = a.Width
vp, err := influxdb.UnmarshalViewPropertiesJSON(b)
if err != nil {
return err
}
s.Properties = vp
return nil
}
// SummaryNotificationEndpoint provides a summary of a pkg notification endpoint.
type SummaryNotificationEndpoint struct {
SummaryIdentifier
NotificationEndpoint influxdb.NotificationEndpoint `json:"notificationEndpoint"`
LabelAssociations []SummaryLabel `json:"labelAssociations"`
}
// UnmarshalJSON unmarshals the notificatio endpoint. This is necessary b/c of
// the notification endpoint does not have a means ot unmarshal itself.
func (s *SummaryNotificationEndpoint) UnmarshalJSON(b []byte) error {
var a struct {
SummaryIdentifier
NotificationEndpoint json.RawMessage `json:"notificationEndpoint"`
LabelAssociations []SummaryLabel `json:"labelAssociations"`
}
if err := json.Unmarshal(b, &a); err != nil {
return err
}
s.SummaryIdentifier = a.SummaryIdentifier
s.LabelAssociations = a.LabelAssociations
e, err := endpoint.UnmarshalJSON(a.NotificationEndpoint)
s.NotificationEndpoint = e
return err
}
// Summary types for NotificationRules which provide a summary of a pkg notification rule.
type (
SummaryNotificationRule struct {
SummaryIdentifier
ID SafeID `json:"id"`
Name string `json:"name"`
Description string `json:"description"`
// These fields represent the relationship of the rule to the endpoint.
EndpointID SafeID `json:"endpointID"`
EndpointMetaName string `json:"endpointTemplateMetaName"`
EndpointType string `json:"endpointType"`
Every string `json:"every"`
Offset string `json:"offset"`
MessageTemplate string `json:"messageTemplate"`
Status influxdb.Status `json:"status"`
StatusRules []SummaryStatusRule `json:"statusRules"`
TagRules []SummaryTagRule `json:"tagRules"`
LabelAssociations []SummaryLabel `json:"labelAssociations"`
}
SummaryStatusRule struct {
CurrentLevel string `json:"currentLevel"`
PreviousLevel string `json:"previousLevel"`
}
SummaryTagRule struct {
Key string `json:"key"`
Value string `json:"value"`
Operator string `json:"operator"`
}
)
// SummaryLabel provides a summary of a pkg label.
type SummaryLabel struct {
SummaryIdentifier
ID SafeID `json:"id"`
OrgID SafeID `json:"orgID"`
Name string `json:"name"`
Properties struct {
Color string `json:"color"`
Description string `json:"description"`
} `json:"properties"`
}
// SummaryLabelMapping provides a summary of a label mapped with a single resource.
type SummaryLabelMapping struct {
exists bool
Status StateStatus `json:"status,omitempty"`
ResourceID SafeID `json:"resourceID"`
ResourceMetaName string `json:"resourceTemplateMetaName"`
ResourceName string `json:"resourceName"`
ResourceType influxdb.ResourceType `json:"resourceType"`
LabelMetaName string `json:"labelTemplateMetaName"`
LabelName string `json:"labelName"`
LabelID SafeID `json:"labelID"`
}
// SummaryReference informs the consumer of required references for
// this resource.
type SummaryReference struct {
Field string `json:"resourceField"`
EnvRefKey string `json:"envRefKey"`
ValType string `json:"valueType"`
Value interface{} `json:"value"`
DefaultValue interface{} `json:"defaultValue"`
}
// SummaryTask provides a summary of a task.
type SummaryTask struct {
SummaryIdentifier
ID SafeID `json:"id"`
Name string `json:"name"`
Cron string `json:"cron"`
Description string `json:"description"`
Every string `json:"every"`
Offset string `json:"offset"`
Query string `json:"query"`
Status influxdb.Status `json:"status"`
LabelAssociations []SummaryLabel `json:"labelAssociations"`
}
// SummaryTelegraf provides a summary of a pkg telegraf config.
type SummaryTelegraf struct {
SummaryIdentifier
TelegrafConfig influxdb.TelegrafConfig `json:"telegrafConfig"`
LabelAssociations []SummaryLabel `json:"labelAssociations"`
}
// SummaryVariable provides a summary of a pkg variable.
type SummaryVariable struct {
SummaryIdentifier
ID SafeID `json:"id,omitempty"`
OrgID SafeID `json:"orgID,omitempty"`
Name string `json:"name"`
Description string `json:"description"`
Selected []string `json:"variables"`
Arguments *influxdb.VariableArguments `json:"arguments"`
LabelAssociations []SummaryLabel `json:"labelAssociations"`
}
|
Kinds
|
policies.py
|
from typing import Any, Dict, List, Optional, Type
import gym
import torch as th
from torch import nn
from stable_baselines3.common.policies import BasePolicy, register_policy
from stable_baselines3.common.torch_layers import BaseFeaturesExtractor, FlattenExtractor, NatureCNN, create_mlp
from stable_baselines3.common.type_aliases import Schedule
class QNetwork(BasePolicy):
"""
Action-Value (Q-Value) network for DQN
:param observation_space: Observation space
:param action_space: Action space
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
features_extractor: nn.Module,
features_dim: int,
net_arch: Optional[List[int]] = None,
activation_fn: Type[nn.Module] = nn.ReLU,
normalize_images: bool = True,
):
super(QNetwork, self).__init__(
observation_space,
action_space,
features_extractor=features_extractor,
normalize_images=normalize_images,
)
if net_arch is None:
net_arch = [64, 64]
self.net_arch = net_arch
self.activation_fn = activation_fn
self.features_extractor = features_extractor
self.features_dim = features_dim
self.normalize_images = normalize_images
action_dim = self.action_space.n # number of actions
q_net = create_mlp(self.features_dim, action_dim, self.net_arch, self.activation_fn)
self.q_net = nn.Sequential(*q_net)
def forward(self, obs: th.Tensor) -> th.Tensor:
"""
Predict the q-values.
|
:param obs: Observation
:return: The estimated Q-Value for each action.
"""
return self.q_net(self.extract_features(obs))
def _predict(self, observation: th.Tensor, deterministic: bool = True) -> th.Tensor:
q_values = self.forward(observation)
# Greedy action
action = q_values.argmax(dim=1).reshape(-1)
return action
def _get_data(self) -> Dict[str, Any]:
data = super()._get_data()
data.update(
dict(
net_arch=self.net_arch,
features_dim=self.features_dim,
activation_fn=self.activation_fn,
features_extractor=self.features_extractor,
epsilon=self.epsilon,
)
)
return data
class DQNPolicy(BasePolicy):
"""
Policy class with Q-Value Net and target net for DQN
:param observation_space: Observation space
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param features_extractor_class: Features extractor to use.
:param features_extractor_kwargs: Keyword arguments
to pass to the features extractor.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[List[int]] = None,
activation_fn: Type[nn.Module] = nn.ReLU,
features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
super(DQNPolicy, self).__init__(
observation_space,
action_space,
features_extractor_class,
features_extractor_kwargs,
optimizer_class=optimizer_class,
optimizer_kwargs=optimizer_kwargs,
)
if net_arch is None:
if features_extractor_class == FlattenExtractor:
net_arch = [64, 64]
else:
net_arch = []
self.net_arch = net_arch
self.activation_fn = activation_fn
self.normalize_images = normalize_images
self.net_args = {
"observation_space": self.observation_space,
"action_space": self.action_space,
"net_arch": self.net_arch,
"activation_fn": self.activation_fn,
"normalize_images": normalize_images,
}
self.q_net, self.q_net_target = None, None
self._build(lr_schedule)
def _build(self, lr_schedule: Schedule) -> None:
"""
Create the network and the optimizer.
:param lr_schedule: Learning rate schedule
lr_schedule(1) is the initial learning rate
"""
self.q_net = self.make_q_net()
self.q_net_target = self.make_q_net()
self.q_net_target.load_state_dict(self.q_net.state_dict())
# Setup optimizer with initial learning rate
self.optimizer = self.optimizer_class(self.parameters(), lr=lr_schedule(1), **self.optimizer_kwargs)
def make_q_net(self) -> QNetwork:
# Make sure we always have separate networks for features extractors etc
net_args = self._update_features_extractor(self.net_args, features_extractor=None)
return QNetwork(**net_args).to(self.device)
def forward(self, obs: th.Tensor, deterministic: bool = True) -> th.Tensor:
return self._predict(obs, deterministic=deterministic)
def _predict(self, obs: th.Tensor, deterministic: bool = True) -> th.Tensor:
return self.q_net._predict(obs, deterministic=deterministic)
def _get_data(self) -> Dict[str, Any]:
data = super()._get_data()
data.update(
dict(
net_arch=self.net_args["net_arch"],
activation_fn=self.net_args["activation_fn"],
lr_schedule=self._dummy_schedule, # dummy lr schedule, not needed for loading policy alone
optimizer_class=self.optimizer_class,
optimizer_kwargs=self.optimizer_kwargs,
features_extractor_class=self.features_extractor_class,
features_extractor_kwargs=self.features_extractor_kwargs,
)
)
return data
MlpPolicy = DQNPolicy
class CnnPolicy(DQNPolicy):
"""
Policy class for DQN when using images as input.
:param observation_space: Observation space
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param features_extractor_class: Features extractor to use.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[List[int]] = None,
activation_fn: Type[nn.Module] = nn.ReLU,
features_extractor_class: Type[BaseFeaturesExtractor] = NatureCNN,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
):
super(CnnPolicy, self).__init__(
observation_space,
action_space,
lr_schedule,
net_arch,
activation_fn,
features_extractor_class,
features_extractor_kwargs,
normalize_images,
optimizer_class,
optimizer_kwargs,
)
register_policy("MlpPolicy", MlpPolicy)
register_policy("CnnPolicy", CnnPolicy)
| |
test_charge_description.py
|
from maintain_frontend import main
from flask_testing import TestCase
from unit_tests.utilities import Utilities
from maintain_frontend.dependencies.session_api.session import Session
from maintain_frontend.models import LocalLandChargeItem
from maintain_frontend.constants.permissions import Permissions
from flask import url_for
from unittest.mock import patch, PropertyMock
CHARGE_DESCRIPTION_PATH = 'maintain_frontend.add_land_charge.charge_description'
class TestChargeDescription(TestCase):
def create_app(self):
main.app.testing = True
Utilities.mock_session_cookie_flask_test(self)
return main.app
def
|
(self):
main.app.config['Testing'] = True
main.app.config['WTF_CSRF_ENABLED'] = False
def test_get_charge_description_success(self):
self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value')
state = LocalLandChargeItem()
self.mock_session.return_value.add_charge_state = state
self.mock_session.return_value.user.permissions = [Permissions.add_llc]
response = self.client.get(url_for('add_land_charge.get_charge_description'))
self.assert_status(response, 200)
self.assert_template_used('charge_description.html')
def test_get_charge_description_add_charge_state_none(self):
self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value')
self.mock_session.return_value.add_charge_state = None
self.mock_session.return_value.user.permissions = [Permissions.add_llc]
response = self.client.get(url_for('add_land_charge.get_charge_description'))
self.assert_status(response, 302)
self.assertRedirects(response, url_for('add_land_charge.new'))
@patch('{}.g'.format(CHARGE_DESCRIPTION_PATH))
def test_get_charge_description_exception(self, mock_g):
self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value')
raise_exception = PropertyMock(side_effect=Exception('test exception'))
type(mock_g).session = raise_exception
try:
response = self.client.get(url_for('add_land_charge.get_charge_description'))
except Exception:
self.assertStatus(response, 302)
self.assertRedirects(response, url_for("add_land_charge.new"))
def test_get_charge_description_no_permission(self):
self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value')
self.mock_session.return_value.user.permissions = []
response = self.client.get(url_for('add_land_charge.get_charge_description'))
self.assert_status(response, 302)
self.assertRedirects(response, '/not-authorised')
@patch('{}.ReviewRouter'.format(CHARGE_DESCRIPTION_PATH))
def test_post_charge_description_success(self, mock_review_router):
self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value')
mock_review_router.get_redirect_url.return_value = url_for('add_land_charge.get_charge_date')
state = LocalLandChargeItem()
self.mock_session.return_value.add_charge_state = state
self.mock_session.return_value.user.permissions = [Permissions.add_llc]
response = self.client.post(url_for('add_land_charge.post_charge_description'),
data={'charge-description': 'description'})
mock_review_router.update_edited_field.assert_called_with('supplementary_information', 'description')
self.assert_status(response, 302)
self.assertRedirects(response, url_for('add_land_charge.get_charge_date'))
def test_post_charge_description_add_charge_state_none(self):
self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value')
self.mock_session.return_value.add_charge_state = None
self.mock_session.return_value.user.permissions = [Permissions.add_llc]
response = self.client.post(url_for('add_land_charge.post_charge_description'),
data={'charge-description': 'description'})
self.assert_status(response, 302)
self.assertRedirects(response, url_for('add_land_charge.new'))
def test_post_charge_description_max_length_validation(self):
self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value')
state = LocalLandChargeItem()
self.mock_session.return_value.add_charge_state = state
self.mock_session.return_value.user.permissions = [Permissions.add_llc]
response = self.client.post(url_for('add_land_charge.post_charge_description'),
data={'charge-description': 'a' * 1501})
self.assert_status(response, 400)
self.assert_template_used('charge_description.html')
self.assertIn('Answer too long', response.data.decode())
self.assertIn('Reduce your answer to 1500 characters or fewer', response.data.decode())
def test_post_charge_description_no_permission(self):
self.client.set_cookie('localhost', Session.session_cookie_name, 'cookie_value')
self.mock_session.return_value.user.permissions = []
response = self.client.get(url_for('add_land_charge.post_charge_description'),
data={'charge-description': 'description'})
self.assert_status(response, 302)
self.assertRedirects(response, '/not-authorised')
|
setUp
|
main.rs
|
use std::{
collections::HashMap,
fmt::Display,
fs::{self, DirEntry, File},
hash::Hash,
io::{stderr, stdout, ErrorKind, Write},
path::PathBuf,
process::Command,
vec,
};
use byteorder::{WriteBytesExt, BE};
use clap::Parser;
use anyhow::anyhow;
use colored::Colorize;
#[derive(Parser, Debug)]
#[clap(author, version, about, long_about = None)]
struct Args {
#[clap(
short,
long,
help = "The source, can be either a folder with frames, or a video which will get processed to a folder with frames by ffmpeg."
)]
input: PathBuf,
#[clap(
short,
long,
help = "The output file, the same path, but with a different file extension will be used for files like parity check."
)]
output: PathBuf,
#[clap(
short = 'r',
long,
default_value_t = 30,
help = "The encoded framerate"
)]
framerate: u8,
#[clap(short, long, help = "Width of the output video")]
width: u16,
#[clap(short, long, help = "Height of the output video")]
height: u16,
#[clap(
short,
long,
help = "Generates a parity file along with the m1vf file. Will use the file specified in --output, but with an different file extension."
)]
parity: bool,
#[clap(
short,
long,
help = "Generates a text file, similar to an flipbook (which can come in handy when debugging), along with the m1vf file. Will use the file specified in --output, but with an different file extension."
)]
text: bool,
}
type ENDIANESS = BE;
const THREASHHOLD: u8 = 127;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
#[repr(u8)]
enum Algorithm {
Uncompressed = 0x0,
Same = 0x1,
RunLengthRows = 0x2,
RunLengthColumns = 0x3,
PartialRowsUncompressed = 0x4,
PartialColumnsUncompressed = 0x5,
}
const ALGORITHMS: [Algorithm; 4] = [
Algorithm::Uncompressed,
Algorithm::Same,
Algorithm::RunLengthRows,
Algorithm::RunLengthColumns,
];
impl Algorithm {
pub fn encode(&self, current_frame: &Vec<u8>, last_frame: &Vec<u8>, video_width: u16, video_height: u16) -> anyhow::Result<Vec<u8>> {
match self {
Algorithm::Uncompressed => uncompressed(¤t_frame),
Algorithm::Same => {
let mut same = true;
for i in 0..current_frame.len() {
if current_frame[i] != last_frame[i] {
same = false;
break;
}
}
if same {
Ok(Vec::with_capacity(0))
} else {
Err(anyhow!("Not the same"))
}
}
Algorithm::RunLengthRows => run_length_rows(current_frame),
Algorithm::RunLengthColumns => run_length_columns(current_frame, video_width, video_height),
_ => {
unreachable!()
}
}
}
pub fn name(&self) -> String {
String::from(match self {
Algorithm::Uncompressed => "1bpp",
Algorithm::Same => "Frame repeat",
Algorithm::RunLengthRows => "Run-length Row",
Algorithm::RunLengthColumns => "Run-length Column",
Algorithm::PartialRowsUncompressed => "Partial Rows 1bpp",
Algorithm::PartialColumnsUncompressed => "Partial Rows 1bpp",
})
}
}
fn main() -> anyhow::Result<()> {
let args = Args::parse();
if args.input.is_file() {
let err = fs::create_dir("frames");
match fs::create_dir("frames") {
Ok(()) => {}
Err(e) => {
if e.kind() == ErrorKind::AlreadyExists {
fs::remove_dir_all("frames")?;
fs::create_dir("frames")?;
println!("{}", "`frames` already existed, deleted.".yellow());
}
}
}
println!("extracting frames via ffmpeg...");
let command = Command::new("ffmpeg")
.arg("-i")
.arg((&args.input).to_str().ok_or(anyhow!("Invalid file path"))?)
.arg("-vf")
.arg(format!(
"scale={}:{}:flags=neighbor",
args.width, args.height
))
.arg("-r")
.arg(format!("{}", args.framerate))
.arg("frames/%09d.bmp")
.output()?;
}
let contents = fs::read_dir(if args.input.is_dir() {
args.input
} else {
PathBuf::from("frames")
})?;
let contents: Vec<DirEntry> = contents.map(|entry| entry.unwrap()).collect();
let mut output = File::create(&args.output)?;
let mut parity = None;
if args.parity {
let mut parity_path = args.output.clone();
parity_path.set_extension("parity");
parity = Some(File::create(parity_path)?);
}
let mut text = None;
if args.text {
let mut text_path = args.output.clone();
text_path.set_extension("frames.txt");
text = Some(File::create(text_path)?);
}
let mut last_pixels = vec![0u8; (args.width as usize * args.height as usize) as usize];
// used for statistics for how many times a certian algorithm is used
let mut statistics: HashMap<Algorithm, u32> = HashMap::new();
for algo in ALGORITHMS {
statistics.insert(algo, 0);
}
let mut compressed_frames = Vec::with_capacity(contents.len());
let mut progress = 0;
for frame in &contents {
let image = image::open(frame.path())?;
let image = image.grayscale();
let image = image.as_luma8().ok_or(anyhow!("Unable to get as luma8"))?;
//dither(&mut image, &BiLevel);
let pixels: Vec<u8> = image
.pixels()
.into_iter()
.map(|p| if p.0[0] > THREASHHOLD { 1 } else { 0 })
.collect();
// create parity file, where every byte is either 1 or 0, and it encodes every pixel from every frame.
// the file is used to check if the decoder and encoder read the same results and to easier identify bugs.
// To enable the generation of the parity file pass the `--parity (-p)` argument to the encoder.
if let Some(parity) = &mut parity {
parity.write_all(&pixels)?;
}
// Render the animation to text
if let Some(text) = &mut text {
write!(
text,
"\n Frame {} - {}x{}",
progress, args.width, args.height
)?;
for (i, pixel) in pixels.iter().enumerate() {
if i % args.width as usize == 0 {
writeln!(text, "")?;
}
write!(text, "{}", if *pixel == 1 { "██" } else { " " })?;
}
writeln!(text, "")?;
}
let mut algos: Vec<(Algorithm, Vec<u8>)> = Vec::with_capacity(ALGORITHMS.len());
for algo in ALGORITHMS {
let encoded = algo.encode(&pixels, &last_pixels, args.width, args.height);
if encoded.is_ok() {
algos.push((algo, encoded?));
}
}
let mut smallest = &algos[0];
for possible_algo in &algos {
if possible_algo.1.len() < smallest.1.len() {
smallest = possible_algo;
}
}
let count = statistics.get_mut(&smallest.0).unwrap();
*count += 1;
compressed_frames.push(smallest.clone());
last_pixels = pixels;
progress += 1;
print!("\rExtracting frames:\t{}/{}... ", progress, contents.len());
stdout().flush().unwrap();
}
println!("{}", "[done]".green());
output.write_u16::<ENDIANESS>(args.width)?;
output.write_u16::<ENDIANESS>(args.height)?;
output.write_u8(args.framerate)?;
let mut last_algorithm: Option<Algorithm> = None;
let mut algorithm_count = 0;
for compressed_frame_index in 0..compressed_frames.len() {
let compressed_frame = &compressed_frames[compressed_frame_index];
if last_algorithm.is_none() {
last_algorithm = Some(compressed_frame.0);
} else if last_algorithm.unwrap() == compressed_frame.0 {
algorithm_count += 1;
}
// println!(
// "{:6}: {:?} (CURRENT: {:?}) ({})",
// compressed_frame_index, last_algorithm, compressed_frame.0, algorithm_count
// );
if algorithm_count == 31 {
// new spec
// println!("write");
output.write_all(&encode_algos(
&compressed_frames,
algorithm_count,
compressed_frame_index,
last_algorithm.unwrap(),
)?)?;
algorithm_count = 0;
last_algorithm = None;
} else if last_algorithm.unwrap() != compressed_frame.0 {
// println!("write change");
output.write_all(&encode_algos(
&compressed_frames,
algorithm_count,
compressed_frame_index - 1,
last_algorithm.unwrap(),
)?)?;
algorithm_count = 0;
last_algorithm = Some(compressed_frame.0);
}
print!("\rEncoding frames:\t{}/{}... ", progress, contents.len());
stdout().flush().unwrap();
}
println!("{}", "[done]".green());
output.write_all(&encode_algos(
&compressed_frames,
algorithm_count,
compressed_frames.len() - 1,
last_algorithm.unwrap(),
)?)?;
println!("done.");
println!("{:20} {}", "NAME", "COUNT");
for (algo, count) in statistics {
println!("{:20} {}", algo.name(), count);
}
Ok(())
}
fn encode_algos(
compressed_frames: &Vec<(Algorithm, Vec<u8>)>,
algorithm_count: u8,
compressed_frame_index: usize,
last_algorithm: Algorithm,
) -> anyhow::Result<Vec<u8>> {
let mut buf = Vec::new();
buf.push(((last_algorithm as u8) << 5) | (algorithm_count)); // new spec
// println!(
// "{:08b}: {}..={}",
// (((last_algorithm as u8) << 4) | (algorithm_count)),
// (compressed_frame_index - (algorithm_count) as usize),
// compressed_frame_index
// );
for i in (compressed_frame_index - (algorithm_count) as usize)..=compressed_frame_index {
let (_, frame) = &compressed_frames[i];
buf.write_all(&frame)?;
}
Ok(buf)
}
fn uncompressed(pixels: &Vec<u8>) -> anyhow::Result<Vec<u8>> {
let mut buf = Vec::new();
let mut index = 7;
let mut cbyte = 0;
for pixel in pixels {
cbyte |= pixel << index;
if index == 0 {
index = 7;
buf.write_u8(cbyte)?;
cbyte = 0;
} else {
index -= 1;
}
}
Ok(buf)
}
fn encode_rl_byte(color: u8, repeat: u8) -> u8 {
(color << 7) | repeat
}
fn run_length_rows(pixels: &Vec<u8>) -> anyhow::Result<Vec<u8>> {
let mut buf = Vec::new();
let mut color: Option<u8> = None;
let mut repeat: u8 = 0;
for pixel in pixels {
if color.is_none() {
repeat = 0;
color = Some(*pixel);
} else if *pixel == color.unwrap() {
repeat += 1;
}
|
color = Some(*pixel);
} else if repeat == 0b0111_1111 {
buf.push(encode_rl_byte(color.unwrap(), repeat));
repeat = 0;
color = None;
}
}
if color.is_some() {
buf.push(encode_rl_byte(color.unwrap(), repeat));
}
Ok(buf)
}
fn run_length_columns(pixels: &Vec<u8>, video_width: u16, video_height: u16) -> anyhow::Result<Vec<u8>> {
let mut buf = Vec::new();
let mut color: Option<u8> = None;
let mut repeat: u8 = 0;
for i in 0..video_width {
for j in 0..video_height {
let pixel = &pixels[j as usize * 20 + i as usize];
if color.is_none() {
repeat = 0;
color = Some(*pixel);
} else if *pixel == color.unwrap() {
repeat += 1;
}
if *pixel != color.unwrap() {
buf.push(encode_rl_byte(color.unwrap(), repeat));
repeat = 0;
color = Some(*pixel);
} else if repeat == 0b0111_1111 {
buf.push(encode_rl_byte(color.unwrap(), repeat));
repeat = 0;
color = None;
}
}
}
if color.is_some() {
buf.push(encode_rl_byte(color.unwrap(), repeat));
}
Ok(buf)
}
|
if *pixel != color.unwrap() {
buf.push(encode_rl_byte(color.unwrap(), repeat));
repeat = 0;
|
group_user.py
|
"""
群成员权限验证
"""
from typing import Iterable, List
from app import db
from app.models import Group, GroupUser, MainUser, GroupUserRelation
|
def is_(role: List[str], main_user: MainUser, group_id, platform):
"""
该用户是否是指定群组的管理员
需要用户先绑定群组!
:param role: 群角色,可选 'admin' 或 'owner',可多选
:param main_user: 用户对象
:param group_id: 群组 id
:param platform: 群组所属平台
:return: boolean
"""
group = get_group(group_id, platform)
# 查询到 main_user 绑定的属于该群的账号
group_users_id: Iterable[GroupUser.id] = db.session.query(GroupUser.id) \
.filter_by(platform=platform, main_user_id=main_user.id)
group_user_relationship: Iterable[GroupUserRelation] = db.session.query(GroupUserRelation) \
.filter_by(platform=platform, group_db_id=group.id) \
.filter(GroupUserRelation.user_db_id.in_(group_users_id))
# 挨个检查各账号是否是 admin,是就返回 True
for r in group_user_relationship:
if 'admin' in role and r.is_admin:
return True
elif 'owner' in role and r.is_owner:
return True
return False
|
from app.utils.db import get_group
|
http.go
|
package client
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"mime/multipart"
"net/http"
"net/url"
"os"
"path/filepath"
"strings"
api "github.com/epinio/epinio/internal/api/v1"
apierrors "github.com/epinio/epinio/pkg/api/core/v1/errors"
"github.com/go-logr/logr"
"github.com/pkg/errors"
)
type responseError struct {
error
statusCode int
}
func (re *responseError) Unwrap() error { return re.error }
func (re *responseError) StatusCode() int { return re.statusCode }
func wrapResponseError(err error, code int) *responseError {
return &responseError{error: err, statusCode: code}
}
func (c *Client) get(endpoint string) ([]byte, error) {
return c.do(endpoint, "GET", "")
}
func (c *Client) post(endpoint string, data string) ([]byte, error) {
return c.do(endpoint, "POST", data)
}
func (c *Client) patch(endpoint string, data string) ([]byte, error) {
return c.do(endpoint, "PATCH", data)
}
func (c *Client) delete(endpoint string) ([]byte, error) {
return c.do(endpoint, "DELETE", "")
}
// upload the given path as param "file" in a multipart form
func (c *Client) upload(endpoint string, path string) ([]byte, error) {
uri := fmt.Sprintf("%s%s/%s", c.URL, api.Root, endpoint)
// open the tarball
file, err := os.Open(path)
if err != nil
|
defer file.Close()
// create multipart form
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
part, err := writer.CreateFormFile("file", filepath.Base(file.Name()))
if err != nil {
return nil, errors.Wrap(err, "failed to create multiform part")
}
_, err = io.Copy(part, file)
if err != nil {
return nil, errors.Wrap(err, "failed to write to multiform part")
}
err = writer.Close()
if err != nil {
return nil, errors.Wrap(err, "failed to close multiform")
}
// make the request
request, err := http.NewRequest("POST", uri, body)
if err != nil {
return nil, errors.Wrap(err, "failed to build request")
}
request.SetBasicAuth(c.user, c.password)
request.Header.Add("Content-Type", writer.FormDataContentType())
response, err := (&http.Client{}).Do(request)
if err != nil {
return nil, errors.Wrap(err, "failed to POST to upload")
}
defer response.Body.Close()
bodyBytes, _ := ioutil.ReadAll(response.Body)
if response.StatusCode == http.StatusCreated {
return bodyBytes, nil
}
if response.StatusCode != http.StatusOK {
return nil, wrapResponseError(fmt.Errorf("server status code: %s\n%s",
http.StatusText(response.StatusCode), string(bodyBytes)),
response.StatusCode)
}
// object was not created, but status was ok?
return bodyBytes, nil
}
func (c *Client) do(endpoint, method, requestBody string) ([]byte, error) {
uri := fmt.Sprintf("%s%s/%s", c.URL, api.Root, endpoint)
c.log.Info(fmt.Sprintf("%s %s", method, uri))
reqLog := requestLogger(c.log, method, uri, requestBody)
request, err := http.NewRequest(method, uri, strings.NewReader(requestBody))
if err != nil {
reqLog.V(1).Error(err, "cannot build request")
return []byte{}, err
}
request.SetBasicAuth(c.user, c.password)
response, err := (&http.Client{}).Do(request)
if err != nil {
reqLog.V(1).Error(err, "request failed")
castedErr, ok := err.(*url.Error)
if !ok {
return []byte{}, errors.New("couldn't cast request Error!")
}
if castedErr.Timeout() {
return []byte{}, errors.New("request cancelled or timed out")
}
return []byte{}, errors.Wrap(err, "making the request")
}
defer response.Body.Close()
reqLog.V(1).Info("request finished")
respLog := responseLogger(c.log, response)
bodyBytes, err := ioutil.ReadAll(response.Body)
if err != nil {
respLog.V(1).Error(err, "failed to read response body")
return []byte{}, wrapResponseError(err, response.StatusCode)
}
respLog.V(1).Info("response received")
if response.StatusCode == http.StatusCreated {
return bodyBytes, nil
}
// TODO why is != 200 an error? there are valid codes in the 2xx, 3xx range
if response.StatusCode != http.StatusOK {
err := formatError(bodyBytes, response)
respLog.V(1).Error(err, "response is not StatusOK")
return bodyBytes, wrapResponseError(err, response.StatusCode)
}
return bodyBytes, nil
}
type errorFunc = func(response *http.Response, bodyBytes []byte, err error) error
// doWithCustomErrorHandling has a special handler for "response type" errors.
// These are errors where the server send a valid http response, but the status
// code is not 200.
// The errorFunc allows us to inspect the response, even unmarshal it into an
// api.ErrorResponse and change the returned error.
// Note: it's only used by ServiceDelete and that could be changed to transmit
// it's data in a normal Response, instead of an error?
func (c *Client) doWithCustomErrorHandling(endpoint, method, requestBody string, f errorFunc) ([]byte, error) {
uri := fmt.Sprintf("%s%s/%s", c.URL, api.Root, endpoint)
c.log.Info(fmt.Sprintf("%s %s", method, uri))
reqLog := requestLogger(c.log, method, uri, requestBody)
request, err := http.NewRequest(method, uri, strings.NewReader(requestBody))
if err != nil {
reqLog.V(1).Error(err, "cannot build request")
return []byte{}, err
}
request.SetBasicAuth(c.user, c.password)
response, err := (&http.Client{}).Do(request)
if err != nil {
reqLog.V(1).Error(err, "request failed")
return []byte{}, err
}
defer response.Body.Close()
reqLog.V(1).Info("request finished")
respLog := responseLogger(c.log, response)
bodyBytes, err := ioutil.ReadAll(response.Body)
if err != nil {
respLog.V(1).Error(err, "failed to read response body")
return []byte{}, wrapResponseError(err, response.StatusCode)
}
respLog.V(1).Info("response received")
if response.StatusCode == http.StatusCreated {
return bodyBytes, nil
}
// TODO why is != 200 an error? there are valid codes in the 2xx, 3xx range
// TODO we can remove doWithCustomErrorHandling, if we let the caller handle the response code?
if response.StatusCode != http.StatusOK {
err := f(response, bodyBytes, formatError(bodyBytes, response))
if err != nil {
respLog.V(1).Error(err, "response is not StatusOK after custom error handling")
return bodyBytes, wrapResponseError(err, response.StatusCode)
}
return bodyBytes, nil
}
return bodyBytes, nil
}
func requestLogger(l logr.Logger, method string, uri string, body string) logr.Logger {
log := l
if log.V(5).Enabled() {
log = log.WithValues(
"method", method,
"uri", uri,
)
}
if log.V(15).Enabled() {
log = log.WithValues("body", body)
}
return log
}
func responseLogger(l logr.Logger, response *http.Response) logr.Logger {
log := l.WithValues("status", response.StatusCode)
if log.V(15).Enabled() {
log = log.WithValues("header", response.Header)
if response.TLS != nil {
log = log.WithValues("TLSServerName", response.TLS.ServerName)
}
}
return log
}
func formatError(bodyBytes []byte, response *http.Response) error {
t := "response body is empty"
if len(bodyBytes) > 0 {
var eResponse apierrors.ErrorResponse
if err := json.Unmarshal(bodyBytes, &eResponse); err != nil {
return errors.Wrapf(err, "cannot parse JSON response: '%s'", bodyBytes)
}
titles := make([]string, 0, len(eResponse.Errors))
for _, e := range eResponse.Errors {
titles = append(titles, e.Title)
}
t = strings.Join(titles, ", ")
}
return errors.Errorf("%s: %s", http.StatusText(response.StatusCode), t)
}
|
{
return nil, errors.Wrap(err, "failed to open tarball")
}
|
fetch-data.js
|
import { logEventToServer } from "./server-log.js";
import { Parser } from "papaparse";
export const fetchData = async () => {
console.log("fetching data");
const fetchStartTime = new Date().getTime();
let resp = null;
let respError = null;
if (window.location.hostname === "localhost") {
resp = await fetch(
'https://onemocneni-aktualne.mzcr.cz/api/v2/covid-19/orp.csv'
//"/orp.csv"
);
} else {
resp = await fetch(
'https://onemocneni-aktualne.mzcr.cz/api/v2/covid-19/orp.csv'
//"https://onemocneni-aktualne.mzcr.cz/api/account/verejne-distribuovana-data/file/dip%252Fweb_orp.csv"
);
}
|
logEventToServer("fetch-data-failed");
throw new Error(
`Chyba při stahování dat ${resp.status} ${resp.statusText}`
);
}
const dataText = await resp.text();
const data = new Parser({
delimiter: ",",
newline: "\r\n",
header: true,
}).parse(dataText);
console.log("data fetched");
const fetchEndTime = new Date().getTime()
logEventToServer(`fetch-finished/${fetchEndTime - fetchStartTime}`);
return data;
};
|
if (resp == null) {
logEventToServer("fetch-data-failed");
throw new Error("Nepodařilo se získat odpověď");
} else if (resp.status != 200) {
|
Http.js
|
export default class
|
{
static post = (url, data, callback)=>{
$.ajax({
type: 'POST',
url : url,
data: data,
dataType:'text',
success: function( response ){
callback( response);
return true;
},
error: function(xhr, statut, error){
console.log(JSON.stringify(xhr));
}
});
}
static get = ( url, callback)=>{
$.ajax({
type : 'GET',
url : url,
dataType : 'json',
success: function( response){
callback( response);
},
error: function(xhr, statut, error){
alert("error"+ JSON.stringify(xhr));
//connecting('Connecting Database failed #youtubedbapi','Connecting failures are usually caused by network difficulties, or maintenance activity.',200) ;
},
complete:function(){
$('#searchProgress').hide();
}
});
}
}
|
Http
|
contact.page.js
|
alert('Contact page javascript working');
});
|
document.getElementById('contact-btn').addEventListener('click', ()=>{
|
|
print.rs
|
extern crate glib;
use glib::*;
use std::sync::{Arc, Mutex};
// Funny thing: we can't put those two tests in two different functions, otherwise they might
// conflict with the results of the other one (or it would be mandatory to run the tests on only
// one thread).
#[test]
fn check_print_handler()
|
{
//
// g_print check part
//
let count = Arc::new(Mutex::new(0));
set_print_handler(clone!(@weak count => move |_| {
// we don't care about the message in here!
*count.lock().expect("failed to lock 1") += 1;
}));
g_print!("test");
assert_eq!(*count.lock().expect("failed to lock 2"), 1);
g_printerr!("one");
assert_eq!(*count.lock().expect("failed to lock 3"), 1);
g_print!("another");
assert_eq!(*count.lock().expect("failed to lock 4"), 2);
unset_print_handler();
g_print!("tadam");
assert_eq!(*count.lock().expect("failed to lock 5"), 2);
g_printerr!("toudoum");
assert_eq!(*count.lock().expect("failed to lock 6"), 2);
//
// g_printerr check part
//
let count = Arc::new(Mutex::new(0));
set_printerr_handler(clone!(@weak count => move |_| {
// we don't care about the message in here!
*count.lock().expect("failed to lock a") += 1;
}));
g_printerr!("test");
assert_eq!(*count.lock().expect("failed to lock b"), 1);
g_print!("one");
assert_eq!(*count.lock().expect("failed to lock c"), 1);
g_printerr!("another");
assert_eq!(*count.lock().expect("failed to lock d"), 2);
unset_printerr_handler();
g_printerr!("tadam");
assert_eq!(*count.lock().expect("failed to lock e"), 2);
g_print!("toudoum");
assert_eq!(*count.lock().expect("failed to lock f"), 2);
}
|
|
bytes_amount.rs
|
use std::ops::{Add, Sub};
use serde::{Deserialize, Serialize};
use crate::fr32::padded_bytes;
use crate::fr32::unpadded_bytes;
pub struct PoStProofBytesAmount(pub usize);
pub struct PoRepProofBytesAmount(pub usize);
#[derive(Debug, Clone, Copy, PartialEq, PartialOrd, Serialize, Deserialize)]
pub struct UnpaddedByteIndex(pub u64);
#[derive(Debug, Clone, Copy, PartialEq, PartialOrd, Serialize, Deserialize)]
pub struct UnpaddedBytesAmount(pub u64);
#[derive(Debug, Clone, Copy, PartialEq, PartialOrd, Serialize, Deserialize)]
pub struct PaddedBytesAmount(pub u64);
impl From<UnpaddedBytesAmount> for u64 {
fn from(n: UnpaddedBytesAmount) -> Self {
n.0
}
}
impl From<UnpaddedBytesAmount> for usize {
fn from(n: UnpaddedBytesAmount) -> Self {
n.0 as usize
}
}
impl From<UnpaddedBytesAmount> for PaddedBytesAmount {
fn from(n: UnpaddedBytesAmount) -> Self {
PaddedBytesAmount(padded_bytes(n.0 as usize) as u64)
}
}
impl From<PaddedBytesAmount> for u64 {
fn from(n: PaddedBytesAmount) -> Self {
n.0
}
}
impl From<PaddedBytesAmount> for usize {
fn from(n: PaddedBytesAmount) -> Self {
n.0 as usize
}
}
impl From<PaddedBytesAmount> for UnpaddedBytesAmount {
fn from(n: PaddedBytesAmount) -> Self {
UnpaddedBytesAmount(unpadded_bytes(n.0))
}
}
impl From<UnpaddedBytesAmount> for UnpaddedByteIndex {
fn from(n: UnpaddedBytesAmount) -> Self {
UnpaddedByteIndex(n.0)
}
}
impl From<UnpaddedByteIndex> for u64 {
fn from(n: UnpaddedByteIndex) -> Self {
n.0
}
}
impl From<UnpaddedByteIndex> for usize {
fn from(n: UnpaddedByteIndex) -> Self {
n.0 as usize
}
}
impl Add for UnpaddedBytesAmount {
type Output = UnpaddedBytesAmount;
fn add(self, other: UnpaddedBytesAmount) -> UnpaddedBytesAmount {
UnpaddedBytesAmount(self.0 + other.0)
}
}
impl Add for PaddedBytesAmount {
type Output = PaddedBytesAmount;
fn add(self, other: PaddedBytesAmount) -> PaddedBytesAmount {
PaddedBytesAmount(self.0 + other.0)
}
}
impl Sub for UnpaddedBytesAmount {
type Output = UnpaddedBytesAmount;
fn sub(self, other: UnpaddedBytesAmount) -> UnpaddedBytesAmount {
UnpaddedBytesAmount(self.0 - other.0)
}
}
impl Sub for PaddedBytesAmount {
type Output = PaddedBytesAmount;
fn sub(self, other: PaddedBytesAmount) -> PaddedBytesAmount {
PaddedBytesAmount(self.0 - other.0)
}
}
impl From<PoStProofBytesAmount> for usize {
fn from(x: PoStProofBytesAmount) -> Self {
x.0
}
}
impl From<PoRepProofBytesAmount> for usize {
fn
|
(x: PoRepProofBytesAmount) -> Self {
x.0
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn allowed_operations() {
let a = UnpaddedBytesAmount(1);
let b = UnpaddedBytesAmount(2);
let c = UnpaddedBytesAmount(3);
let d = PaddedBytesAmount(1);
let e = PaddedBytesAmount(2);
let f = PaddedBytesAmount(3);
// Operations between UnpaddedBytesAmounts are allowed
assert_eq!(a + b, c);
assert_eq!(c - b, a);
// Operations between PaddedBytesAmounts are allowed
assert_eq!(d + e, f);
assert_eq!(f - e, d);
// Mixed operations fail at compile time.
// assert_eq!(a + b, f);
// Coercion to primitives work
assert_eq!(1u64 + u64::from(b), 3u64);
assert_eq!(1usize + usize::from(b), 3usize);
assert_eq!(1u64 + u64::from(e), 3u64);
assert_eq!(1usize + usize::from(e), 3usize);
// But not between BytesAmount types
// assert_eq!(a + UnpaddedBytesAmount::from(e), c);
// assert_eq!(d + UnpaddedBytesAmount::from(b), f);
// But must be explicit or won't compile.
// assert_eq!(1u64 + b, 3u64);
// assert_eq!(1usize + b, 3usize);
// assert_eq!(1u64 + u64::from(e), 3u64);
// assert_eq!(1usize + usize::from(e), 3usize);
}
}
|
from
|
rawSearchService.test.ts
|
/*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import * as assert from 'assert';
import * as path from 'path';
import { getPathFromAmdModule } from 'vs/base/common/amd';
import { CancelablePromise, createCancelablePromise } from 'vs/base/common/async';
import { Emitter, Event } from 'vs/base/common/event';
import { URI } from 'vs/base/common/uri';
import { IFileQuery, IFileSearchStats, IFolderQuery, IProgress, ISearchEngineStats, QueryType } from 'vs/platform/search/common/search';
import { SearchService as RawSearchService } from 'vs/workbench/services/search/node/rawSearchService';
import { IRawFileMatch, ISearchEngine, ISearchEngineSuccess, ISerializedFileMatch, ISerializedSearchComplete, ISerializedSearchProgressItem, ISerializedSearchSuccess } from 'vs/workbench/services/search/node/search';
import { DiskSearch } from 'vs/workbench/services/search/node/searchService';
const TEST_FOLDER_QUERIES = [
{ folder: URI.file(path.normalize('/some/where')) }
];
const TEST_FIXTURES = path.normalize(getPathFromAmdModule(require, './fixtures'));
const MULTIROOT_QUERIES: IFolderQuery[] = [
{ folder: URI.file(path.join(TEST_FIXTURES, 'examples')) },
{ folder: URI.file(path.join(TEST_FIXTURES, 'more')) }
];
const stats: ISearchEngineStats = {
traversal: 'node',
fileWalkTime: 0,
cmdTime: 1,
directoriesWalked: 2,
filesWalked: 3
};
class TestSearchEngine implements ISearchEngine<IRawFileMatch> {
static last: TestSearchEngine;
private isCanceled = false;
constructor(private result: () => IRawFileMatch, public config?: IFileQuery) {
TestSearchEngine.last = this;
}
search(onResult: (match: IRawFileMatch) => void, onProgress: (progress: IProgress) => void, done: (error: Error, complete: ISearchEngineSuccess) => void): void {
const self = this;
(function next() {
process.nextTick(() => {
if (self.isCanceled) {
done(null!, {
limitHit: false,
stats: stats
});
return;
}
const result = self.result();
if (!result) {
done(null!, {
limitHit: false,
stats: stats
});
} else {
onResult(result);
next();
}
});
})();
}
cancel(): void {
this.isCanceled = true;
}
}
const testTimeout = 5000;
suite('RawSearchService', () => {
const rawSearch: IFileQuery = {
type: QueryType.File,
folderQueries: TEST_FOLDER_QUERIES,
filePattern: 'a'
};
const rawMatch: IRawFileMatch = {
base: path.normalize('/some'),
relativePath: 'where',
basename: 'where',
size: 123
};
const match: ISerializedFileMatch = {
path: path.normalize('/some/where')
};
test('Individual results', async function () {
this.timeout(testTimeout);
let i = 5;
const Engine = TestSearchEngine.bind(null, () => i-- && rawMatch);
const service = new RawSearchService();
let results = 0;
const cb: (p: ISerializedSearchProgressItem) => void = value => {
if (!Array.isArray(value)) {
assert.deepStrictEqual(value, match);
results++;
} else {
assert.fail(JSON.stringify(value));
}
};
await service.doFileSearchWithEngine(Engine, rawSearch, cb, null!, 0);
return assert.strictEqual(results, 5);
});
test('Batch results', async function () {
this.timeout(testTimeout);
let i = 25;
const Engine = TestSearchEngine.bind(null, () => i-- && rawMatch);
const service = new RawSearchService();
const results: number[] = [];
const cb: (p: ISerializedSearchProgressItem) => void = value => {
if (Array.isArray(value)) {
value.forEach(m => {
assert.deepStrictEqual(m, match);
});
results.push(value.length);
} else {
assert.fail(JSON.stringify(value));
}
};
await service.doFileSearchWithEngine(Engine, rawSearch, cb, undefined, 10);
assert.deepStrictEqual(results, [10, 10, 5]);
});
test('Collect batched results', async function () {
this.timeout(testTimeout);
const uriPath = '/some/where';
let i = 25;
const Engine = TestSearchEngine.bind(null, () => i-- && rawMatch);
const service = new RawSearchService();
function
|
(config: IFileQuery, batchSize: number): Event<ISerializedSearchProgressItem | ISerializedSearchComplete> {
let promise: CancelablePromise<ISerializedSearchSuccess | void>;
const emitter = new Emitter<ISerializedSearchProgressItem | ISerializedSearchComplete>({
onFirstListenerAdd: () => {
promise = createCancelablePromise(token => service.doFileSearchWithEngine(Engine, config, p => emitter.fire(p), token, batchSize)
.then(c => emitter.fire(c), err => emitter.fire({ type: 'error', error: err })));
},
onLastListenerRemove: () => {
promise.cancel();
}
});
return emitter.event;
}
const progressResults: any[] = [];
const onProgress = match => {
assert.strictEqual(match.resource.path, uriPath);
progressResults.push(match);
};
const result_2 = await DiskSearch.collectResultsFromEvent(fileSearch(rawSearch, 10), onProgress);
assert.strictEqual(result_2.results.length, 25, 'Result');
assert.strictEqual(progressResults.length, 25, 'Progress');
});
test('Multi-root with include pattern and maxResults', async function () {
this.timeout(testTimeout);
const service = new RawSearchService();
const query: IFileQuery = {
type: QueryType.File,
folderQueries: MULTIROOT_QUERIES,
maxResults: 1,
includePattern: {
'*.txt': true,
'*.js': true
},
};
const result = await DiskSearch.collectResultsFromEvent(service.fileSearch(query));
assert.strictEqual(result.results.length, 1, 'Result');
});
test('Multi-root with include pattern and exists', async function () {
this.timeout(testTimeout);
const service = new RawSearchService();
const query: IFileQuery = {
type: QueryType.File,
folderQueries: MULTIROOT_QUERIES,
exists: true,
includePattern: {
'*.txt': true,
'*.js': true
},
};
const result = await DiskSearch.collectResultsFromEvent(service.fileSearch(query));
assert.strictEqual(result.results.length, 0, 'Result');
assert.ok(result.limitHit);
});
test('Sorted results', async function () {
this.timeout(testTimeout);
const paths = ['bab', 'bbc', 'abb'];
const matches: IRawFileMatch[] = paths.map(relativePath => ({
base: path.normalize('/some/where'),
relativePath,
basename: relativePath,
size: 3
}));
const Engine = TestSearchEngine.bind(null, () => matches.shift());
const service = new RawSearchService();
const results: any[] = [];
const cb = value => {
if (Array.isArray(value)) {
results.push(...value.map(v => v.path));
} else {
assert.fail(JSON.stringify(value));
}
};
await service.doFileSearchWithEngine(Engine, {
type: QueryType.File,
folderQueries: TEST_FOLDER_QUERIES,
filePattern: 'bb',
sortByScore: true,
maxResults: 2
}, cb, undefined, 1);
assert.notStrictEqual(typeof TestSearchEngine.last.config!.maxResults, 'number');
assert.deepStrictEqual(results, [path.normalize('/some/where/bbc'), path.normalize('/some/where/bab')]);
});
test('Sorted result batches', async function () {
this.timeout(testTimeout);
let i = 25;
const Engine = TestSearchEngine.bind(null, () => i-- && rawMatch);
const service = new RawSearchService();
const results: number[] = [];
const cb = value => {
if (Array.isArray(value)) {
value.forEach(m => {
assert.deepStrictEqual(m, match);
});
results.push(value.length);
} else {
assert.fail(JSON.stringify(value));
}
};
await service.doFileSearchWithEngine(Engine, {
type: QueryType.File,
folderQueries: TEST_FOLDER_QUERIES,
filePattern: 'a',
sortByScore: true,
maxResults: 23
}, cb, undefined, 10);
assert.deepStrictEqual(results, [10, 10, 3]);
});
test('Cached results', function () {
this.timeout(testTimeout);
const paths = ['bcb', 'bbc', 'aab'];
const matches: IRawFileMatch[] = paths.map(relativePath => ({
base: path.normalize('/some/where'),
relativePath,
basename: relativePath,
size: 3
}));
const Engine = TestSearchEngine.bind(null, () => matches.shift());
const service = new RawSearchService();
const results: any[] = [];
const cb = value => {
if (Array.isArray(value)) {
results.push(...value.map(v => v.path));
} else {
assert.fail(JSON.stringify(value));
}
};
return service.doFileSearchWithEngine(Engine, {
type: QueryType.File,
folderQueries: TEST_FOLDER_QUERIES,
filePattern: 'b',
sortByScore: true,
cacheKey: 'x'
}, cb, undefined, -1).then(complete => {
assert.strictEqual((<IFileSearchStats>complete.stats).fromCache, false);
assert.deepStrictEqual(results, [path.normalize('/some/where/bcb'), path.normalize('/some/where/bbc'), path.normalize('/some/where/aab')]);
}).then(async () => {
const results: any[] = [];
const cb = value => {
if (Array.isArray(value)) {
results.push(...value.map(v => v.path));
} else {
assert.fail(JSON.stringify(value));
}
};
try {
const complete = await service.doFileSearchWithEngine(Engine, {
type: QueryType.File,
folderQueries: TEST_FOLDER_QUERIES,
filePattern: 'bc',
sortByScore: true,
cacheKey: 'x'
}, cb, undefined, -1);
assert.ok((<IFileSearchStats>complete.stats).fromCache);
assert.deepStrictEqual(results, [path.normalize('/some/where/bcb'), path.normalize('/some/where/bbc')]);
}
catch (e) { }
}).then(() => {
return service.clearCache('x');
}).then(async () => {
matches.push({
base: path.normalize('/some/where'),
relativePath: 'bc',
basename: 'bc',
size: 3
});
const results: any[] = [];
const cb = value => {
if (Array.isArray(value)) {
results.push(...value.map(v => v.path));
} else {
assert.fail(JSON.stringify(value));
}
};
const complete = await service.doFileSearchWithEngine(Engine, {
type: QueryType.File,
folderQueries: TEST_FOLDER_QUERIES,
filePattern: 'bc',
sortByScore: true,
cacheKey: 'x'
}, cb, undefined, -1);
assert.strictEqual((<IFileSearchStats>complete.stats).fromCache, false);
assert.deepStrictEqual(results, [path.normalize('/some/where/bc')]);
});
});
});
|
fileSearch
|
DatePickerComponent.ios.tsx
|
import React from 'react';
import { DatePickerIOS, StyleSheet, Text, View } from 'react-native';
import { TextSize } from '../styles';
import { Field } from './Field';
import { FieldIcon } from './FieldIcon';
export interface IDatePickerComponent {
value?: Date;
onChange?: any;
dateTimeFormat?: (value: Date | undefined, mode?: 'datetime' | 'date' | 'time') => string;
onValueChange?: any;
prettyPrint?: any;
pickerWrapper?: any;
mode?: any;
onPress?: () => void;
valueStyle?: any;
labelComponent?: any;
iconLeft?: any;
iconRight?: any;
labelStyle?: any;
label?: string;
containerStyle?: any;
valueContainerStyle?: any;
minimumDate?: Date;
maximumDate?: Date;
minuteInterval?: any;
timeZoneOffsetInMinutes?: number;
}
export interface IState {
date?: Date;
isPickerVisible: boolean;
}
export class DatePickerComponent extends React.Component<IDatePickerComponent, IState> {
constructor(props: IDatePickerComponent) {
super(props);
this.state = {
date: props.value,
isPickerVisible: false,
};
}
// public componentWillReceiveProps(nextProps: IDatePickerComponent) {
// if (this.props.value !== nextProps.value) {
// this.setState({ date: nextProps.value });
// }
// }
public setDate(value: Date) {
this.setState({ date: value });
this.props.onChange && this.props.onChange(
(this.props.prettyPrint) ? this.dateTimeFormat(value, this.props.mode) : value);
this.props.onValueChange && this.props.onValueChange(value);
}
public render() {
const {
maximumDate,
minimumDate,
minuteInterval,
mode,
timeZoneOffsetInMinutes,
} = this.props;
const valueString = this.dateTimeFormat(this.state.date, this.props.mode);
const datePicker = <DatePickerIOS
maximumDate={maximumDate}
minimumDate={minimumDate}
minuteInterval={minuteInterval}
mode={mode}
timeZoneOffsetInMinutes={timeZoneOffsetInMinutes}
date={this.state.date || new Date()}
onDateChange={this.handleValueChange.bind(this)}
/>;
const pickerWrapper = React.cloneElement(
this.props.pickerWrapper || <View />,
{ onHidePicker: () => { this.setState({ isPickerVisible: false }); } },
datePicker,
);
let iconLeft = this.props.iconLeft;
let iconRight = this.props.iconRight;
|
: iconLeft[1];
} else if (iconLeft) {
iconLeft = <FieldIcon align='left' icon={iconLeft} />;
}
if (iconRight && iconRight.constructor === Array) {
iconRight = (!this.state.isPickerVisible)
? iconRight[0]
: iconRight[1];
} else if (iconRight) {
iconRight = <FieldIcon align='right' icon={iconRight} />;
}
const labelComponent = (this.props.labelComponent)
? this.props.labelComponent
: <Text style={[formStyles.fieldText, this.props.labelStyle]}>{this.props.label}</Text>;
return (
<View>
<Field {...this.props}
ref='inputBox'
onPress={() => this.togglePicker()}>
<View style={[formStyles.fieldContainer,
formStyles.horizontalContainer,
this.props.containerStyle]}
onLayout={this.handleLayoutChange.bind(this)}>
{(iconLeft)
? iconLeft
: null
}
{labelComponent}
<View style={[
formStyles.alignRight,
formStyles.horizontalContainer,
this.props.valueContainerStyle,
]}>
<Text style={[formStyles.fieldValue, this.props.valueStyle]}>{valueString}</Text>
{(iconRight)
? iconRight
: null
}
</View>
</View>
</Field>
{(this.state.isPickerVisible) ?
pickerWrapper : null
}
</View>
);
}
protected handleLayoutChange(e: any) {
this.setState(e.nativeEvent.layout);
}
protected handleValueChange(value: Date) {
this.setState({ date: value });
this.props.onChange && this.props.onChange(
(this.props.prettyPrint) ? this.dateTimeFormat(value, this.props.mode) : value);
this.props.onValueChange && this.props.onValueChange(value);
}
protected togglePicker() {
this.setState({ isPickerVisible: !this.state.isPickerVisible });
this.props.onPress && this.props.onPress();
}
protected dateTimeFormat(value: Date | undefined, mode?: 'datetime' | 'time'): string {
if (this.props.dateTimeFormat) {
return this.props.dateTimeFormat(value, mode);
}
if (!value) { return ''; }
let result = '';
switch (mode) {
case 'datetime':
result = value.toLocaleDateString()
+ ' '
+ value.toLocaleTimeString();
break;
case 'time':
result = value.toLocaleTimeString();
break;
default:
result = value.toLocaleDateString();
}
return result;
}
}
const formStyles = StyleSheet.create({
form: {
},
alignRight: {
marginTop: 7, position: 'absolute', right: 10,
},
horizontalContainer: {
flexDirection: 'row',
justifyContent: 'flex-start',
},
fieldContainer: {
backgroundColor: 'white',
justifyContent: 'center',
height: 45,
},
fieldValue: {
fontSize: TextSize.normal,
marginRight: 10,
paddingTop: 4,
justifyContent: 'center',
color: '#C7C7CC',
},
fieldText: {
fontSize: TextSize.normal,
paddingLeft: 0,
paddingRight: 10,
marginTop: 5,
justifyContent: 'center',
lineHeight: 32,
},
input: {
paddingLeft: 10,
paddingRight: 10,
},
helpTextContainer: {
marginTop: 9,
marginBottom: 25,
paddingLeft: 20,
paddingRight: 20,
},
helpText: {
color: '#7a7a7a',
},
});
|
if (iconLeft && iconLeft.constructor === Array) {
iconLeft = (!this.state.isPickerVisible)
? iconLeft[0]
|
.eslintrc.js
|
module.exports = {
parser: '@typescript-eslint/parser',
parserOptions: {
project: 'tsconfig.json',
sourceType: 'module'
},
plugins: ['@typescript-eslint/eslint-plugin'],
extends: [
'plugin:@typescript-eslint/eslint-recommended',
'plugin:@typescript-eslint/recommended',
'prettier',
'prettier/@typescript-eslint'
],
root: true,
env: {
node: true,
jest: true
},
rules: {
'@typescript-eslint/interface-name-prefix': 'off',
'@typescript-eslint/explicit-function-return-type': 'off',
|
'@typescript-eslint/no-explicit-any': 'off'
},
ignorePatterns: ['.eslintrc.js']
};
|
'@typescript-eslint/explicit-module-boundary-types': 'off',
|
types.go
|
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1beta1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended
// to make sure that all the tuple expansions are valid.
type Rule struct {
// APIGroups is the API groups the resources belong to. '*' is all groups.
// If '*' is present, the length of the slice must be one.
// Required.
APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,1,rep,name=apiGroups"`
// APIVersions is the API versions the resources belong to. '*' is all versions.
// If '*' is present, the length of the slice must be one.
// Required.
APIVersions []string `json:"apiVersions,omitempty" protobuf:"bytes,2,rep,name=apiVersions"`
// Resources is a list of resources this rule applies to.
//
// For example:
// 'pods' means pods.
// 'pods/log' means the log subresource of pods.
// '*' means all resources, but not subresources.
// 'pods/*' means all subresources of pods.
// '*/scale' means all scale subresources.
// '*/*' means all resources and their subresources.
//
// If wildcard is present, the validation rule will ensure resources do not
// overlap with each other.
//
// Depending on the enclosing object, subresources might not be allowed.
// Required.
Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"`
}
type FailurePolicyType string
const (
// Ignore means that an error calling the webhook is ignored.
Ignore FailurePolicyType = "Ignore"
// Fail means that an error calling the webhook causes the admission to fail.
Fail FailurePolicyType = "Fail"
)
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ValidatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and object without changing it.
type ValidatingWebhookConfiguration struct {
metav1.TypeMeta `json:",inline"`
// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Webhooks is a list of webhooks and the affected resources and operations.
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
Webhooks []Webhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// ValidatingWebhookConfigurationList is a list of ValidatingWebhookConfiguration.
type ValidatingWebhookConfigurationList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of ValidatingWebhookConfiguration.
Items []ValidatingWebhookConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// MutatingWebhookConfiguration describes the configuration of and admission webhook that accept or reject and may change the object.
type MutatingWebhookConfiguration struct {
metav1.TypeMeta `json:",inline"`
// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.
// +optional
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// Webhooks is a list of webhooks and the affected resources and operations.
// +optional
// +patchMergeKey=name
// +patchStrategy=merge
Webhooks []Webhook `json:"webhooks,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=Webhooks"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// MutatingWebhookConfigurationList is a list of MutatingWebhookConfiguration.
type MutatingWebhookConfigurationList struct {
metav1.TypeMeta `json:",inline"`
// Standard list metadata.
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
// +optional
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
// List of MutatingWebhookConfiguration.
Items []MutatingWebhookConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"`
}
// Webhook describes an admission webhook and the resources and operations it applies to.
type Webhook struct {
// The name of the admission webhook.
// Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where
// "imagepolicy" is the name of the webhook, and kubernetes.io is the name
// of the organization.
// Required.
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
// ClientConfig defines how to communicate with the hook.
// Required
ClientConfig WebhookClientConfig `json:"clientConfig" protobuf:"bytes,2,opt,name=clientConfig"`
// Rules describes what operations on what resources/subresources the webhook cares about.
// The webhook cares about an operation if it matches _any_ Rule.
// However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks
// from putting the cluster in a state which cannot be recovered from without completely
// disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called
// on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.
Rules []RuleWithOperations `json:"rules,omitempty" protobuf:"bytes,3,rep,name=rules"`
// FailurePolicy defines how unrecognized errors from the admission endpoint are handled -
// allowed values are Ignore or Fail. Defaults to Ignore.
// +optional
FailurePolicy *FailurePolicyType `json:"failurePolicy,omitempty" protobuf:"bytes,4,opt,name=failurePolicy,casttype=FailurePolicyType"`
// NamespaceSelector decides whether to run the webhook on an object based
// on whether the namespace for that object matches the selector. If the
// object itself is a namespace, the matching is performed on
// object.metadata.labels. If the object is another cluster scoped resource,
// it never skips the webhook.
//
// For example, to run the webhook on any objects whose namespace is not
// associated with "runlevel" of "0" or "1"; you will set the selector as
// follows:
// "namespaceSelector": {
// "matchExpressions": [
// {
// "key": "runlevel",
// "operator": "NotIn",
// "values": [
// "0",
// "1"
// ]
// }
// ]
// }
//
// If instead you want to only run the webhook on any objects whose
// namespace is associated with the "environment" of "prod" or "staging";
// you will set the selector as follows:
// "namespaceSelector": {
// "matchExpressions": [
// {
// "key": "environment",
// "operator": "In",
// "values": [
// "prod",
// "staging"
// ]
// }
// ]
// }
//
// See
// https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/
// for more examples of label selectors.
//
// Default to the empty LabelSelector, which matches everything.
// +optional
NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,5,opt,name=namespaceSelector"`
}
// RuleWithOperations is a tuple of Operations and Resources. It is recommended to make
// sure that all the tuple expansions are valid.
type RuleWithOperations struct {
// Operations is the operations the admission hook cares about - CREATE, UPDATE, or *
// for all operations.
// If '*' is present, the length of the slice must be one.
// Required.
Operations []OperationType `json:"operations,omitempty" protobuf:"bytes,1,rep,name=operations,casttype=OperationType"`
// Rule is embedded, it describes other criteria of the rule, like
// APIGroups, APIVersions, Resources, etc.
Rule `json:",inline" protobuf:"bytes,2,opt,name=rule"`
}
type OperationType string
// The constants should be kept in sync with those defined in k8s.io/kubernetes/pkg/admission/interface.go.
const (
OperationAll OperationType = "*"
Create OperationType = "CREATE"
Update OperationType = "UPDATE"
Delete OperationType = "DELETE"
Connect OperationType = "CONNECT"
)
// WebhookClientConfig contains the information to make a TLS
// connection with the webhook
type WebhookClientConfig struct {
// `url` gives the location of the webhook, in standard URL form
// (`[scheme://]host:port/path`). Exactly one of `url` or `service`
// must be specified.
//
// The `host` should not refer to a service running in the cluster; use
// the `service` field instead. The host might be resolved via external
// DNS in some apiservers (e.g., `kube-apiserver` cannot resolve
// in-cluster DNS as that would be a layering violation). `host` may
// also be an IP address.
//
// Please note that using `localhost` or `127.0.0.1` as a `host` is
// risky unless you take great care to run this webhook on all hosts
// which run an apiserver which might need to make calls to this
// webhook. Such installs are likely to be non-portable, i.e., not easy
// to turn up in a new cluster.
//
// The scheme must be "https"; the URL must begin with "https://".
//
// A path is optional, and if present may be any string permissible in
// a URL. You may use the path to pass an arbitrary string to the
// webhook, for example, a cluster identifier.
//
// Attempting to use a user or basic auth e.g. "user:password@" is not
// allowed. Fragments ("#...") and query parameters ("?...") are not
// allowed, either.
//
// +optional
URL *string `json:"url,omitempty" protobuf:"bytes,3,opt,name=url"`
// `service` is a reference to the service for this webhook. Either
// `service` or `url` must be specified.
//
// If the webhook is running within the cluster, then you should use `service`.
//
// Port 443 will be used if it is open, otherwise it is an error.
//
// +optional
Service *ServiceReference `json:"service" protobuf:"bytes,1,opt,name=service"`
// `caBundle` is a PEM encoded CA bundle which will be used to validate
// the webhook's server certificate.
// Required.
CABundle []byte `json:"caBundle" protobuf:"bytes,2,opt,name=caBundle"`
}
|
// ServiceReference holds a reference to Service.legacy.k8s.io
type ServiceReference struct {
// `namespace` is the namespace of the service.
// Required
Namespace string `json:"namespace" protobuf:"bytes,1,opt,name=namespace"`
// `name` is the name of the service.
// Required
Name string `json:"name" protobuf:"bytes,2,opt,name=name"`
// `path` is an optional URL path which will be sent in any request to
// this service.
// +optional
Path *string `json:"path,omitempty" protobuf:"bytes,3,opt,name=path"`
}
| |
shave.ts
|
export type Opts = {
character?: string
classname?: string
spaces?: boolean
charclassname?: string
}
function generateArrayOfNodes(target: string | NodeList): Array<Node> {
if (typeof target === 'string') {
return [...document.querySelectorAll(target)]
} else if ('length' in target) {
return [...target]
} else {
return [target]
}
}
export default function shave(target: string | NodeList, maxHeight: number, opts: Opts = {}): void {
if (typeof maxHeight === 'undefined' || isNaN(maxHeight)) {
throw Error('maxHeight is required')
}
const els = generateArrayOfNodes(target)
if (!els.length) {
return
}
const {
character = '…',
classname = 'js-shave',
spaces: initialSpaces = true,
charclassname = 'js-shave-char',
} = opts
/**
* @notes
* the initialSpaces + spaces variable definition below fixes
* a previous bug where spaces being a boolean type wasn't clear
* meaning people were using (a string, in example—which is truthy)
* hence, doing it this way is a non-breaking change
*/
const spaces = typeof initialSpaces === 'boolean' ? initialSpaces : true
const charHtml = `<span class="${charclassname}">${character}</span>`
for (let i = 0; i < els.length; i += 1) {
const el = els[i] as HTMLElement
const styles = el.style
const span = el.querySelector(`.${classname}`)
const textProp = el.textContent === undefined ? 'innerText' : 'textContent'
// If element text has already been shaved
if (span) {
// Remove the ellipsis to recapture the original text
const charList = el.querySelectorAll(`.${charclassname}`)
for (let i = 0; i < charList.length; i++) {
const char = charList[i]
char.parentNode.removeChild(char)
}
// innerText could not get the invisible element (such as 'display: none;'),so it special treatment is required.
if (textProp === 'innerText') {
const elWithShavedTextList = el.querySelectorAll(`.${classname}`)
for (let i = 0; i < elWithShavedTextList.length; i++) {
const elWithShavedText = elWithShavedTextList[i] as HTMLElement
elWithShavedText.style.display = null
elWithShavedText.style.fontSize = '0px'
}
}
el[textProp] = el[textProp] // eslint-disable-line
// nuke span, recombine text
}
const fullText = el[textProp]
const words: string | string[] = spaces ? fullText.split(' ') : fullText
// If 0 or 1 words, we're done
if (words.length < 2) {
continue
}
// Temporarily remove any CSS height for text height calculation
const heightStyle = styles.height
styles.height = 'auto'
const maxHeightStyle = styles.maxHeight
styles.maxHeight = 'none'
// If already short enough, we're done
if (el.offsetHeight <= maxHeight) {
styles.height = heightStyle
styles.maxHeight = maxHeightStyle
continue
}
// Binary search for number of words which can fit in allotted height
|
let pivot
while (min < max) {
pivot = (min + max + 1) >> 1 // eslint-disable-line no-bitwise
el[textProp] = spaces
? ((words.slice(0, pivot) as string[]).join(' ') as string)
: (words as string).slice(0, pivot)
el.insertAdjacentHTML('beforeend', charHtml)
if (el.offsetHeight > maxHeight) {
max = pivot - 1
} else {
min = pivot
}
}
el[textProp] = spaces ? ((words.slice(0, max) as string[]).join(' ') as string) : (words as string).slice(0, max)
el.insertAdjacentHTML('beforeend', charHtml)
const diff: string = spaces
? ` ${(words.slice(max) as string[]).join(' ') as string}`
: (words as string).slice(max)
const shavedText = document.createTextNode(diff)
const elWithShavedText = document.createElement('span')
elWithShavedText.classList.add(classname)
elWithShavedText.style.display = 'none'
elWithShavedText.appendChild(shavedText)
el.insertAdjacentElement('beforeend', elWithShavedText)
styles.height = heightStyle
styles.maxHeight = maxHeightStyle
}
}
|
let max = words.length - 1
let min = 0
|
utils.ts
|
import { IQueueItem } from './interfaces';
type MaybeError = Error | undefined;
const NUMBER = /\d+/;
const enum QueueItemPosition {
target,
method,
args,
stack
}
export const TIMERS_OFFSET = 6;
export function isCoercableNumber(suspect) {
let type = typeof suspect;
return type === 'number' && suspect === suspect || type === 'string' && NUMBER.test(suspect);
}
export function
|
(options) {
return options.onError || (options.onErrorTarget && options.onErrorTarget[options.onErrorMethod]);
}
export function findItem(target, method, collection) {
let index = -1;
for (let i = 0, l = collection.length; i < l; i += 4) {
if (collection[i] === target && collection[i + 1] === method) {
index = i;
break;
}
}
return index;
}
export function findTimerItem(target, method, collection) {
let index = -1;
for (let i = 2, l = collection.length; i < l; i += 6) {
if (collection[i] === target && collection[i + 1] === method) {
index = i - 2;
break;
}
}
return index;
}
export function getQueueItems(items: any[], queueItemLength: number, queueItemPositionOffset: number = 0): IQueueItem[] {
let queueItems: IQueueItem[] = [];
for (let i = 0; i < items.length; i += queueItemLength) {
let maybeError: MaybeError = items[i + QueueItemPosition.stack + queueItemPositionOffset];
let queueItem = {
target: items[i + QueueItemPosition.target + queueItemPositionOffset],
method: items[i + QueueItemPosition.method + queueItemPositionOffset],
args: items[i + QueueItemPosition.args + queueItemPositionOffset],
stack: maybeError !== undefined && 'stack' in maybeError ? maybeError.stack : ''
};
queueItems.push(queueItem);
}
return queueItems;
}
|
getOnError
|
integration_test.go
|
/*
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package integration
import (
"encoding/json"
"flag"
"fmt"
"log"
"math"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"testing"
"time"
"github.com/google/go-containerregistry/pkg/name"
"github.com/google/go-containerregistry/pkg/v1/daemon"
"github.com/pkg/errors"
"github.com/GoogleContainerTools/kaniko/pkg/timing"
"github.com/GoogleContainerTools/kaniko/pkg/util"
"github.com/GoogleContainerTools/kaniko/testutil"
)
var config *integrationTestConfig
var imageBuilder *DockerFileBuilder
var allDockerfiles []string
const (
daemonPrefix = "daemon://"
integrationPath = "integration"
dockerfilesPath = "dockerfiles"
emptyContainerDiff = `[
{
"Image1": "%s",
"Image2": "%s",
"DiffType": "File",
"Diff": {
"Adds": null,
"Dels": null,
"Mods": null
}
},
{
"Image1": "%s",
"Image2": "%s",
"DiffType": "Metadata",
"Diff": {
"Adds": [],
"Dels": []
}
}
]`
)
func getDockerMajorVersion() int {
out, err := exec.Command("docker", "version", "--format", "{{.Server.Version}}").Output()
if err != nil {
log.Fatal("Error getting docker version of server:", err)
}
versionArr := strings.Split(string(out), ".")
ver, err := strconv.Atoi(versionArr[0])
if err != nil {
log.Fatal("Error getting docker version of server during parsing version string:", err)
}
return ver
}
func launchTests(m *testing.M) (int, error) {
if config.isGcrRepository() {
contextFile, err := CreateIntegrationTarball()
if err != nil {
return 1, errors.Wrap(err, "Failed to create tarball of integration files for build context")
}
fileInBucket, err := UploadFileToBucket(config.gcsBucket, contextFile, contextFile)
if err != nil {
return 1, errors.Wrap(err, "Failed to upload build context")
}
if err = os.Remove(contextFile); err != nil {
return 1, errors.Wrap(err, fmt.Sprintf("Failed to remove tarball at %s", contextFile))
}
RunOnInterrupt(func() { DeleteFromBucket(fileInBucket) })
defer DeleteFromBucket(fileInBucket)
} else {
var err error
var migratedFiles []string
if migratedFiles, err = MigrateGCRRegistry(dockerfilesPath, allDockerfiles, config.imageRepo); err != nil {
RollbackMigratedFiles(dockerfilesPath, migratedFiles)
return 1, errors.Wrap(err, "Fail to migrate dockerfiles from gcs")
}
RunOnInterrupt(func() { RollbackMigratedFiles(dockerfilesPath, migratedFiles) })
defer RollbackMigratedFiles(dockerfilesPath, migratedFiles)
}
if err := buildRequiredImages(); err != nil {
return 1, errors.Wrap(err, "Error while building images")
}
imageBuilder = NewDockerFileBuilder()
return m.Run(), nil
}
func TestMain(m *testing.M) {
var err error
if !meetsRequirements() {
fmt.Println("Missing required tools")
os.Exit(1)
}
if allDockerfiles, err = FindDockerFiles(dockerfilesPath); err != nil {
fmt.Println("Coudn't create map of dockerfiles", err)
os.Exit(1)
} else {
config = initIntegrationTestConfig()
exitCode, err := launchTests(m)
if err != nil {
fmt.Println(err)
}
os.Exit(exitCode)
}
}
func buildRequiredImages() error {
setupCommands := []struct {
name string
command []string
}{
{
name: "Building kaniko image",
command: []string{"docker", "build", "-t", ExecutorImage, "-f", "../deploy/Dockerfile", ".."},
},
{
name: "Building cache warmer image",
command: []string{"docker", "build", "-t", WarmerImage, "-f", "../deploy/Dockerfile_warmer", ".."},
},
{
name: "Building onbuild base image",
command: []string{"docker", "build", "-t", config.onbuildBaseImage, "-f", fmt.Sprintf("%s/Dockerfile_onbuild_base", dockerfilesPath), "."},
},
{
name: "Pushing onbuild base image",
command: []string{"docker", "push", config.onbuildBaseImage},
},
{
name: "Building hardlink base image",
command: []string{"docker", "build", "-t", config.hardlinkBaseImage, "-f", fmt.Sprintf("%s/Dockerfile_hardlink_base", dockerfilesPath), "."},
},
{
name: "Pushing hardlink base image",
command: []string{"docker", "push", config.hardlinkBaseImage},
},
}
for _, setupCmd := range setupCommands {
fmt.Println(setupCmd.name)
cmd := exec.Command(setupCmd.command[0], setupCmd.command[1:]...)
if out, err := RunCommandWithoutTest(cmd); err != nil {
return errors.Wrap(err, fmt.Sprintf("%s failed: %s", setupCmd.name, string(out)))
}
}
return nil
}
func
|
(t *testing.T) {
for _, dockerfile := range allDockerfiles {
t.Run("test_"+dockerfile, func(t *testing.T) {
dockerfile := dockerfile
t.Parallel()
if _, ok := imageBuilder.DockerfilesToIgnore[dockerfile]; ok {
t.SkipNow()
}
if _, ok := imageBuilder.TestCacheDockerfiles[dockerfile]; ok {
t.SkipNow()
}
buildImage(t, dockerfile, imageBuilder)
dockerImage := GetDockerImage(config.imageRepo, dockerfile)
kanikoImage := GetKanikoImage(config.imageRepo, dockerfile)
diff := containerDiff(t, daemonPrefix+dockerImage, kanikoImage, "--no-cache")
expected := fmt.Sprintf(emptyContainerDiff, dockerImage, kanikoImage, dockerImage, kanikoImage)
checkContainerDiffOutput(t, diff, expected)
})
}
err := logBenchmarks("benchmark")
if err != nil {
t.Logf("Failed to create benchmark file: %v", err)
}
}
func getGitRepo() string {
var branch, repoSlug string
if _, ok := os.LookupEnv("TRAVIS"); ok {
if os.Getenv("TRAVIS_PULL_REQUEST") != "false" {
branch = os.Getenv("TRAVIS_PULL_REQUEST_BRANCH")
repoSlug = os.Getenv("TRAVIS_PULL_REQUEST_SLUG")
log.Printf("Travis CI Pull request source repo: %s branch: %s\n", repoSlug, branch)
} else {
branch = os.Getenv("TRAVIS_BRANCH")
repoSlug = os.Getenv("TRAVIS_REPO_SLUG")
log.Printf("Travis CI repo: %s branch: %s\n", repoSlug, branch)
}
return "github.com/" + repoSlug + "#refs/heads/" + branch
}
return "github.com/GoogleContainerTools/kaniko"
}
func TestGitBuildcontext(t *testing.T) {
repo := getGitRepo()
dockerfile := fmt.Sprintf("%s/%s/Dockerfile_test_run_2", integrationPath, dockerfilesPath)
// Build with docker
dockerImage := GetDockerImage(config.imageRepo, "Dockerfile_test_git")
dockerCmd := exec.Command("docker",
append([]string{"build",
"-t", dockerImage,
"-f", dockerfile,
repo})...)
out, err := RunCommandWithoutTest(dockerCmd)
if err != nil {
t.Errorf("Failed to build image %s with docker command %q: %s %s", dockerImage, dockerCmd.Args, err, string(out))
}
// Build with kaniko
kanikoImage := GetKanikoImage(config.imageRepo, "Dockerfile_test_git")
dockerRunFlags := []string{"run", "--net=host"}
dockerRunFlags = addServiceAccountFlags(dockerRunFlags, config.serviceAccount)
dockerRunFlags = append(dockerRunFlags, ExecutorImage,
"-f", dockerfile,
"-d", kanikoImage,
"-c", fmt.Sprintf("git://%s", repo))
kanikoCmd := exec.Command("docker", dockerRunFlags...)
out, err = RunCommandWithoutTest(kanikoCmd)
if err != nil {
t.Errorf("Failed to build image %s with kaniko command %q: %v %s", dockerImage, kanikoCmd.Args, err, string(out))
}
diff := containerDiff(t, daemonPrefix+dockerImage, kanikoImage, "--no-cache")
expected := fmt.Sprintf(emptyContainerDiff, dockerImage, kanikoImage, dockerImage, kanikoImage)
checkContainerDiffOutput(t, diff, expected)
}
func TestBuildViaRegistryMirror(t *testing.T) {
repo := getGitRepo()
dockerfile := "integration/dockerfiles/Dockerfile_registry_mirror"
// Build with docker
dockerImage := GetDockerImage(config.imageRepo, "Dockerfile_registry_mirror")
dockerCmd := exec.Command("docker",
append([]string{"build",
"-t", dockerImage,
"-f", dockerfile,
repo})...)
out, err := RunCommandWithoutTest(dockerCmd)
if err != nil {
t.Errorf("Failed to build image %s with docker command %q: %s %s", dockerImage, dockerCmd.Args, err, string(out))
}
// Build with kaniko
kanikoImage := GetKanikoImage(config.imageRepo, "Dockerfile_registry_mirror")
dockerRunFlags := []string{"run", "--net=host"}
dockerRunFlags = addServiceAccountFlags(dockerRunFlags, config.serviceAccount)
dockerRunFlags = append(dockerRunFlags, ExecutorImage,
"-f", dockerfile,
"-d", kanikoImage,
"--registry-mirror", "us-mirror.gcr.io",
"-c", fmt.Sprintf("git://%s", repo))
kanikoCmd := exec.Command("docker", dockerRunFlags...)
out, err = RunCommandWithoutTest(kanikoCmd)
if err != nil {
t.Errorf("Failed to build image %s with kaniko command %q: %v %s", dockerImage, kanikoCmd.Args, err, string(out))
}
diff := containerDiff(t, daemonPrefix+dockerImage, kanikoImage, "--no-cache")
expected := fmt.Sprintf(emptyContainerDiff, dockerImage, kanikoImage, dockerImage, kanikoImage)
checkContainerDiffOutput(t, diff, expected)
}
func TestLayers(t *testing.T) {
offset := map[string]int{
"Dockerfile_test_add": 12,
"Dockerfile_test_scratch": 3,
}
for _, dockerfile := range allDockerfiles {
t.Run("test_layer_"+dockerfile, func(t *testing.T) {
dockerfile := dockerfile
t.Parallel()
if _, ok := imageBuilder.DockerfilesToIgnore[dockerfile]; ok {
t.SkipNow()
}
buildImage(t, dockerfile, imageBuilder)
// Pull the kaniko image
dockerImage := GetDockerImage(config.imageRepo, dockerfile)
kanikoImage := GetKanikoImage(config.imageRepo, dockerfile)
pullCmd := exec.Command("docker", "pull", kanikoImage)
RunCommand(pullCmd, t)
checkLayers(t, dockerImage, kanikoImage, offset[dockerfile])
})
}
err := logBenchmarks("benchmark_layers")
if err != nil {
t.Logf("Failed to create benchmark file: %v", err)
}
}
func buildImage(t *testing.T, dockerfile string, imageBuilder *DockerFileBuilder) {
if err := imageBuilder.BuildImage(config, dockerfilesPath, dockerfile); err != nil {
t.Errorf("Error building image: %s", err)
t.FailNow()
}
return
}
// Build each image with kaniko twice, and then make sure they're exactly the same
func TestCache(t *testing.T) {
populateVolumeCache()
for dockerfile := range imageBuilder.TestCacheDockerfiles {
t.Run("test_cache_"+dockerfile, func(t *testing.T) {
dockerfile := dockerfile
t.Parallel()
cache := filepath.Join(config.imageRepo, "cache", fmt.Sprintf("%v", time.Now().UnixNano()))
// Build the initial image which will cache layers
if err := imageBuilder.buildCachedImages(config, cache, dockerfilesPath, 0); err != nil {
t.Fatalf("error building cached image for the first time: %v", err)
}
// Build the second image which should pull from the cache
if err := imageBuilder.buildCachedImages(config, cache, dockerfilesPath, 1); err != nil {
t.Fatalf("error building cached image for the first time: %v", err)
}
// Make sure both images are the same
kanikoVersion0 := GetVersionedKanikoImage(config.imageRepo, dockerfile, 0)
kanikoVersion1 := GetVersionedKanikoImage(config.imageRepo, dockerfile, 1)
diff := containerDiff(t, kanikoVersion0, kanikoVersion1)
expected := fmt.Sprintf(emptyContainerDiff, kanikoVersion0, kanikoVersion1, kanikoVersion0, kanikoVersion1)
checkContainerDiffOutput(t, diff, expected)
})
}
if err := logBenchmarks("benchmark_cache"); err != nil {
t.Logf("Failed to create benchmark file: %v", err)
}
}
func TestRelativePaths(t *testing.T) {
dockerfile := "Dockerfile_relative_copy"
t.Run("test_relative_"+dockerfile, func(t *testing.T) {
t.Parallel()
dockerfile = filepath.Join("./dockerfiles", dockerfile)
contextPath := "./context"
err := imageBuilder.buildRelativePathsImage(
config.imageRepo,
dockerfile,
config.serviceAccount,
contextPath,
)
if err != nil {
t.Fatal(err)
}
dockerImage := GetDockerImage(config.imageRepo, "test_relative_"+dockerfile)
kanikoImage := GetKanikoImage(config.imageRepo, "test_relative_"+dockerfile)
diff := containerDiff(t, daemonPrefix+dockerImage, kanikoImage, "--no-cache")
expected := fmt.Sprintf(emptyContainerDiff, dockerImage, kanikoImage, dockerImage, kanikoImage)
checkContainerDiffOutput(t, diff, expected)
})
}
type fileDiff struct {
Name string
Size int
}
type fileDiffResult struct {
Adds []fileDiff
Dels []fileDiff
}
type metaDiffResult struct {
Adds []string
Dels []string
}
type diffOutput struct {
Image1 string
Image2 string
DiffType string
Diff interface{}
}
func (diff *diffOutput) UnmarshalJSON(data []byte) error {
type Alias diffOutput
aux := &struct{ *Alias }{Alias: (*Alias)(diff)}
var rawJSON json.RawMessage
aux.Diff = &rawJSON
err := json.Unmarshal(data, &aux)
if err != nil {
return err
}
switch diff.DiffType {
case "File":
var dst fileDiffResult
err = json.Unmarshal(rawJSON, &dst)
diff.Diff = &dst
case "Metadata":
var dst metaDiffResult
err = json.Unmarshal(rawJSON, &dst)
diff.Diff = &dst
}
if err != nil {
return err
}
return err
}
var allowedDiffPaths = []string{"/sys"}
func checkContainerDiffOutput(t *testing.T, diff []byte, expected string) {
// Let's compare the json objects themselves instead of strings to avoid
// issues with spaces and indents
t.Helper()
diffInt := []diffOutput{}
expectedInt := []diffOutput{}
err := json.Unmarshal(diff, &diffInt)
if err != nil {
t.Error(err)
}
err = json.Unmarshal([]byte(expected), &expectedInt)
if err != nil {
t.Error(err)
}
// Some differences (whitelisted paths, etc.) are known and expected.
fdr := diffInt[0].Diff.(*fileDiffResult)
fdr.Adds = filterFileDiff(fdr.Adds)
fdr.Dels = filterFileDiff(fdr.Dels)
// Remove some of the meta diffs that shouldn't be checked
mdr := diffInt[1].Diff.(*metaDiffResult)
mdr.Adds = filterMetaDiff(mdr.Adds)
mdr.Dels = filterMetaDiff(mdr.Dels)
testutil.CheckErrorAndDeepEqual(t, false, nil, expectedInt, diffInt)
}
func filterMetaDiff(metaDiff []string) []string {
// TODO remove this once we agree testing shouldn't run on docker 18.xx
// currently docker 18.xx will build an image with Metadata set
// ArgsEscaped: true, however Docker 19.xx will build an image and have
// ArgsEscaped: false
if config.dockerMajorVersion == 19 {
return metaDiff
}
newDiffs := []string{}
for _, meta := range metaDiff {
if !strings.HasPrefix(meta, "ArgsEscaped") {
newDiffs = append(newDiffs, meta)
}
}
return newDiffs
}
func filterFileDiff(f []fileDiff) []fileDiff {
var newDiffs []fileDiff
for _, diff := range f {
isWhitelisted := false
for _, p := range allowedDiffPaths {
if util.HasFilepathPrefix(diff.Name, p, false) {
isWhitelisted = true
break
}
}
if !isWhitelisted {
newDiffs = append(newDiffs, diff)
}
}
return newDiffs
}
func checkLayers(t *testing.T, image1, image2 string, offset int) {
t.Helper()
img1, err := getImageDetails(image1)
if err != nil {
t.Fatalf("Couldn't get details from image reference for (%s): %s", image1, err)
}
img2, err := getImageDetails(image2)
if err != nil {
t.Fatalf("Couldn't get details from image reference for (%s): %s", image2, err)
}
actualOffset := int(math.Abs(float64(img1.numLayers - img2.numLayers)))
if actualOffset != offset {
t.Fatalf("Difference in number of layers in each image is %d but should be %d. Image 1: %s, Image 2: %s", actualOffset, offset, img1, img2)
}
}
func getImageDetails(image string) (*imageDetails, error) {
ref, err := name.ParseReference(image, name.WeakValidation)
if err != nil {
return nil, fmt.Errorf("Couldn't parse referance to image %s: %s", image, err)
}
imgRef, err := daemon.Image(ref)
if err != nil {
return nil, fmt.Errorf("Couldn't get reference to image %s from daemon: %s", image, err)
}
layers, err := imgRef.Layers()
if err != nil {
return nil, fmt.Errorf("Error getting layers for image %s: %s", image, err)
}
digest, err := imgRef.Digest()
if err != nil {
return nil, fmt.Errorf("Error getting digest for image %s: %s", image, err)
}
return &imageDetails{
name: image,
numLayers: len(layers),
digest: digest.Hex,
}, nil
}
func logBenchmarks(benchmark string) error {
if b, err := strconv.ParseBool(os.Getenv("BENCHMARK")); err == nil && b {
f, err := os.Create(benchmark)
if err != nil {
return err
}
f.WriteString(timing.Summary())
defer f.Close()
}
return nil
}
type imageDetails struct {
name string
numLayers int
digest string
}
func (i imageDetails) String() string {
return fmt.Sprintf("Image: [%s] Digest: [%s] Number of Layers: [%d]", i.name, i.digest, i.numLayers)
}
func initIntegrationTestConfig() *integrationTestConfig {
var c integrationTestConfig
flag.StringVar(&c.gcsBucket, "bucket", "gs://kaniko-test-bucket", "The gcs bucket argument to uploaded the tar-ed contents of the `integration` dir to.")
flag.StringVar(&c.imageRepo, "repo", "gcr.io/kaniko-test", "The (docker) image repo to build and push images to during the test. `gcloud` must be authenticated with this repo or serviceAccount must be set.")
flag.StringVar(&c.serviceAccount, "serviceAccount", "", "The path to the service account push images to GCR and upload/download files to GCS.")
flag.Parse()
if len(c.serviceAccount) > 0 {
absPath, err := filepath.Abs("../" + c.serviceAccount)
if err != nil {
log.Fatalf("Error getting absolute path for service account: %s\n", c.serviceAccount)
}
if _, err := os.Stat(absPath); os.IsNotExist(err) {
log.Fatalf("Service account does not exist: %s\n", absPath)
}
c.serviceAccount = absPath
os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", absPath)
}
if c.imageRepo == "" {
log.Fatal("You must provide a image repository")
}
if c.isGcrRepository() && c.gcsBucket == "" {
log.Fatalf("You must provide a gcs bucket when using a Google Container Registry (\"%s\" was provided)", c.imageRepo)
}
if !strings.HasSuffix(c.imageRepo, "/") {
c.imageRepo = c.imageRepo + "/"
}
c.dockerMajorVersion = getDockerMajorVersion()
c.onbuildBaseImage = c.imageRepo + "onbuild-base:latest"
c.hardlinkBaseImage = c.imageRepo + "hardlink-base:latest"
return &c
}
func meetsRequirements() bool {
requiredTools := []string{"container-diff", "gsutil"}
hasRequirements := true
for _, tool := range requiredTools {
_, err := exec.LookPath(tool)
if err != nil {
fmt.Printf("You must have %s installed and on your PATH\n", tool)
hasRequirements = false
}
}
return hasRequirements
}
// containerDiff compares the container images image1 and image2.
func containerDiff(t *testing.T, image1, image2 string, flags ...string) []byte {
flags = append([]string{"diff"}, flags...)
flags = append(flags, image1, image2,
"-q", "--type=file", "--type=metadata", "--json")
containerdiffCmd := exec.Command("container-diff", flags...)
diff := RunCommand(containerdiffCmd, t)
t.Logf("diff = %s", string(diff))
return diff
}
|
TestRun
|
config_flow.py
|
"""Adds config flow for Airly."""
from airly import Airly
from airly.exceptions import AirlyError
import async_timeout
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import (
CONF_API_KEY,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_NAME,
HTTP_UNAUTHORIZED,
)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from .const import ( # pylint:disable=unused-import
DEFAULT_NAME,
DOMAIN,
NO_AIRLY_SENSORS,
)
class AirlyFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Config flow for Airly."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
def
|
(self):
"""Initialize."""
self._errors = {}
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
self._errors = {}
websession = async_get_clientsession(self.hass)
if user_input is not None:
await self.async_set_unique_id(
f"{user_input[CONF_LATITUDE]}-{user_input[CONF_LONGITUDE]}"
)
self._abort_if_unique_id_configured()
try:
location_valid = await test_location(
websession,
user_input["api_key"],
user_input["latitude"],
user_input["longitude"],
)
except AirlyError as err:
if err.status_code == HTTP_UNAUTHORIZED:
self._errors["base"] = "invalid_api_key"
else:
if not location_valid:
self._errors["base"] = "wrong_location"
if not self._errors:
return self.async_create_entry(
title=user_input[CONF_NAME], data=user_input
)
return self._show_config_form(
name=DEFAULT_NAME,
api_key="",
latitude=self.hass.config.latitude,
longitude=self.hass.config.longitude,
)
def _show_config_form(self, name=None, api_key=None, latitude=None, longitude=None):
"""Show the configuration form to edit data."""
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(CONF_API_KEY, default=api_key): str,
vol.Optional(
CONF_LATITUDE, default=self.hass.config.latitude
): cv.latitude,
vol.Optional(
CONF_LONGITUDE, default=self.hass.config.longitude
): cv.longitude,
vol.Optional(CONF_NAME, default=name): str,
}
),
errors=self._errors,
)
async def test_location(client, api_key, latitude, longitude):
"""Return true if location is valid."""
airly = Airly(api_key, client)
measurements = airly.create_measurements_session_point(
latitude=latitude, longitude=longitude
)
with async_timeout.timeout(10):
await measurements.update()
current = measurements.current
if current["indexes"][0]["description"] == NO_AIRLY_SENSORS:
return False
return True
|
__init__
|
alternative_trigger.rs
|
// This file was generated by gir (https://github.com/gtk-rs/gir)
// from gir-files (https://github.com/gtk-rs/gir-files.git)
// DO NOT EDIT
use crate::ShortcutTrigger;
use glib::object::Cast;
use glib::object::IsA;
use glib::translate::*;
use glib::StaticType;
use glib::ToValue;
use std::fmt;
glib::wrapper! {
pub struct AlternativeTrigger(Object<ffi::GtkAlternativeTrigger, ffi::GtkAlternativeTriggerClass>) @extends ShortcutTrigger;
match fn {
get_type => || ffi::gtk_alternative_trigger_get_type(),
}
}
impl AlternativeTrigger {
#[doc(alias = "gtk_alternative_trigger_new")]
pub fn new<P: IsA<ShortcutTrigger>, Q: IsA<ShortcutTrigger>>(
first: &P,
second: &Q,
) -> AlternativeTrigger {
skip_assert_initialized!();
unsafe {
ShortcutTrigger::from_glib_full(ffi::gtk_alternative_trigger_new(
first.as_ref().to_glib_full(),
second.as_ref().to_glib_full(),
))
.unsafe_cast()
}
}
#[doc(alias = "gtk_alternative_trigger_get_first")]
pub fn get_first(&self) -> ShortcutTrigger {
unsafe {
from_glib_none(ffi::gtk_alternative_trigger_get_first(
self.to_glib_none().0,
))
}
}
#[doc(alias = "gtk_alternative_trigger_get_second")]
pub fn get_second(&self) -> ShortcutTrigger {
unsafe {
from_glib_none(ffi::gtk_alternative_trigger_get_second(
self.to_glib_none().0,
))
}
}
}
#[derive(Clone, Default)]
pub struct AlternativeTriggerBuilder {
first: Option<ShortcutTrigger>,
second: Option<ShortcutTrigger>,
}
impl AlternativeTriggerBuilder {
pub fn new() -> Self {
Self::default()
}
pub fn build(self) -> AlternativeTrigger {
let mut properties: Vec<(&str, &dyn ToValue)> = vec![];
if let Some(ref first) = self.first {
properties.push(("first", first));
}
if let Some(ref second) = self.second {
properties.push(("second", second));
}
let ret = glib::Object::new::<AlternativeTrigger>(&properties).expect("object new");
ret
}
pub fn first<P: IsA<ShortcutTrigger>>(mut self, first: &P) -> Self {
self.first = Some(first.clone().upcast());
self
}
pub fn
|
<P: IsA<ShortcutTrigger>>(mut self, second: &P) -> Self {
self.second = Some(second.clone().upcast());
self
}
}
impl fmt::Display for AlternativeTrigger {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str("AlternativeTrigger")
}
}
|
second
|
O.py
|
def for_O():
"""printing capital 'O' using for loop"""
for row in range(5):
for col in range(5):
if col==0 and row not in(0,4) or col==4 and row not in(0,4) or row==0 and col in(1,2,3) or row==4 and col in(1,2,3) :
print("*",end=" ")
else:
print(" ",end=" ")
print()
def while_O():
"""printing capital 'O' using while loop"""
i=0
while i<5:
j=0
while j<5:
|
print("*",end=" ")
else:
print(" ",end=" ")
j+=1
i+=1
print()
|
if j==0 and i not in(0,4) or i==0 and j not in(0,4)or i==4 and j not in(0,4)or j==4 and i not in(0,4):
|
utils_test.go
|
package utils
import (
"encoding/json"
"fmt"
"testing"
)
func TestCalcPassword(t *testing.T)
|
// TestJSONMarshalLevel 结论:使用匿名结构体嵌套,json marshal不会引入新的一层,匿名的指针结构体和值结构体都不会
func TestJSONMarshalLevel(t *testing.T) {
type Inner struct {
Name string `json:"name"`
}
wrapper := &struct {
InnerIns Inner
*Inner
Password string `json:"pwd"`
}{
InnerIns: Inner{Name: "wrapper"},
Inner: &Inner{Name: "direct"},
Password: "123",
}
wrapperByte, _ := json.Marshal(wrapper)
fmt.Println("> wrapper: ", string(wrapperByte))
// > wrapper: {"InnerIns":{"name":"wrapper"},"name":"direct","pwd":"123"}
}
|
{
pwdHash := CalcSHA256("123456")
fmt.Println("> --- Test calc password ---")
fmt.Println("> password hash :", pwdHash)
fmt.Println("> final password:", CalcSHA256(pwdHash, "zdoZPfZxsT"))
}
|
test.sh.py
|
a = [0, 1, 2]
|
b = [0, 1]
for a, b in zip(a, b):
print(a, b)
|
|
main.rs
|
use aoc::{read_input_for_day, read_part_for_day};
use std::io;
fn part_one(input: &Vec<i64>, preamble_size: usize) -> Result<i64, ()> {
let mut result = Err(());
for idx in preamble_size..input.len() {
let expected = input.get(idx).unwrap();
let start = idx - preamble_size;
let prev_range = &input[start..idx];
let mut found_pair = false;
for n in prev_range {
let difference = expected - n;
if prev_range.contains(&difference) {
found_pair = true;
break;
}
}
if !found_pair {
result = Ok(*expected);
}
}
result
}
fn part_two(input: &Vec<i64>, expected_sum: i64) -> Result<i64, ()>
|
fn main() -> io::Result<()> {
let input = read_input_for_day::<i64>("day-9")?;
let part = read_part_for_day();
let answer = if part == 1 {
part_one(&input, 25).unwrap()
} else {
part_two(&input, 14144619).unwrap()
};
println!("Day-9 part {} answer: {}", part, answer);
Ok(())
}
|
{
let mut result = Err(());
let mut contig_set: Vec<&i64>;
for start in 0..input.len() {
let mut sum = 0;
contig_set = vec![];
for idx in start..input.len() {
let n = input.get(idx).unwrap();
sum += n;
contig_set.push(n);
if sum == expected_sum {
contig_set.sort();
let lowest = contig_set.first().unwrap();
let highest = contig_set.last().unwrap();
result = Ok(*lowest + *highest);
break;
} else if sum > expected_sum {
break;
}
}
if result.is_ok() {
break;
}
}
result
}
|
weightedtarget_config.go
|
/*
*
* Copyright 2020 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package weightedtarget
import (
"encoding/json"
internalserviceconfig "github.com/qiaohao9/grpc/internal/serviceconfig"
"github.com/qiaohao9/grpc/serviceconfig"
)
// Target represents one target with the weight and the child policy.
type Target struct {
// Weight is the weight of the child policy.
Weight uint32 `json:"weight,omitempty"`
// ChildPolicy is the child policy and it's config.
ChildPolicy *internalserviceconfig.BalancerConfig `json:"childPolicy,omitempty"`
}
// LBConfig is the balancer config for weighted_target.
type LBConfig struct {
serviceconfig.LoadBalancingConfig `json:"-"`
Targets map[string]Target `json:"targets,omitempty"`
}
func parseConfig(c json.RawMessage) (*LBConfig, error) {
var cfg LBConfig
if err := json.Unmarshal(c, &cfg); err != nil
|
return &cfg, nil
}
|
{
return nil, err
}
|
webpack.config.js
|
/* global __dirname, module, require */
const path = require('path');
const webpack = require('webpack');
const GenerateJsonPlugin = require('generate-json-webpack-plugin');
// Addon directory
const dest_dir = path.resolve(__dirname, 'addon');
// Make manifest.json out of package.json information
const package = require('./package.json');
const manifest = require('./webext-manifest.json');
manifest.version = package.version;
manifest.description = package.description;
manifest.author = package.author.name;
module.exports = {
target: 'web',
node: false,
entry: {
'resources/dist/options': './src/browser/options.js',
'resources/dist/background': './src/lib/background.js'
},
output: {
path: dest_dir,
filename: '[name].js'
},
plugins: [
new webpack.DefinePlugin({
'process.env': {
NODE_ENV: '"production"'
}
}),
new GenerateJsonPlugin('manifest.json', manifest, null, 2)
|
]
};
|
|
runtime.go
|
// Copyright 2019-present Facebook Inc. All rights reserved.
// This source code is licensed under the Apache 2.0 license found
// in the LICENSE file in the root directory of this source tree.
// Code generated by entc, DO NOT EDIT.
package runtime
import (
"context"
"github.com/joegilley/ent/entc/integration/privacy/ent/schema"
"github.com/joegilley/ent/entc/integration/privacy/ent/task"
"github.com/joegilley/ent/entc/integration/privacy/ent/team"
"github.com/joegilley/ent/entc/integration/privacy/ent/user"
"github.com/joegilley/ent"
"github.com/joegilley/ent/privacy"
)
// The init function reads all schema descriptors with runtime code
// (default values, validators, hooks and policies) and stitches it
// to their package variables.
func init()
|
const (
Version = "(devel)" // Version of ent codegen.
)
|
{
taskMixin := schema.Task{}.Mixin()
task.Policy = privacy.NewPolicies(taskMixin[0], taskMixin[1], schema.Task{})
task.Hooks[0] = func(next ent.Mutator) ent.Mutator {
return ent.MutateFunc(func(ctx context.Context, m ent.Mutation) (ent.Value, error) {
if err := task.Policy.EvalMutation(ctx, m); err != nil {
return nil, err
}
return next.Mutate(ctx, m)
})
}
taskHooks := schema.Task{}.Hooks()
task.Hooks[1] = taskHooks[0]
taskFields := schema.Task{}.Fields()
_ = taskFields
// taskDescTitle is the schema descriptor for title field.
taskDescTitle := taskFields[0].Descriptor()
// task.TitleValidator is a validator for the "title" field. It is called by the builders before save.
task.TitleValidator = taskDescTitle.Validators[0].(func(string) error)
teamMixin := schema.Team{}.Mixin()
team.Policy = privacy.NewPolicies(teamMixin[0], schema.Team{})
team.Hooks[0] = func(next ent.Mutator) ent.Mutator {
return ent.MutateFunc(func(ctx context.Context, m ent.Mutation) (ent.Value, error) {
if err := team.Policy.EvalMutation(ctx, m); err != nil {
return nil, err
}
return next.Mutate(ctx, m)
})
}
teamFields := schema.Team{}.Fields()
_ = teamFields
// teamDescName is the schema descriptor for name field.
teamDescName := teamFields[0].Descriptor()
// team.NameValidator is a validator for the "name" field. It is called by the builders before save.
team.NameValidator = teamDescName.Validators[0].(func(string) error)
userMixin := schema.User{}.Mixin()
user.Policy = privacy.NewPolicies(userMixin[0], userMixin[1], schema.User{})
user.Hooks[0] = func(next ent.Mutator) ent.Mutator {
return ent.MutateFunc(func(ctx context.Context, m ent.Mutation) (ent.Value, error) {
if err := user.Policy.EvalMutation(ctx, m); err != nil {
return nil, err
}
return next.Mutate(ctx, m)
})
}
userFields := schema.User{}.Fields()
_ = userFields
// userDescName is the schema descriptor for name field.
userDescName := userFields[0].Descriptor()
// user.NameValidator is a validator for the "name" field. It is called by the builders before save.
user.NameValidator = userDescName.Validators[0].(func(string) error)
}
|
go116_export.go
|
// export by github.com/goplus/igop/cmd/qexp
//+build go1.16,!go1.17
package cgi
import (
q "net/http/cgi"
"reflect"
"github.com/goplus/igop"
)
func init() {
igop.RegisterPackage(&igop.Package{
Name: "cgi",
Path: "net/http/cgi",
Deps: map[string]string{
"bufio": "bufio",
"crypto/tls": "tls",
"errors": "errors",
"fmt": "fmt",
"io": "io",
|
"log": "log",
"net": "net",
"net/http": "http",
"net/textproto": "textproto",
"net/url": "url",
"os": "os",
"os/exec": "exec",
"path/filepath": "filepath",
"regexp": "regexp",
"runtime": "runtime",
"strconv": "strconv",
"strings": "strings",
"vendor/golang.org/x/net/http/httpguts": "httpguts",
},
Interfaces: map[string]reflect.Type{},
NamedTypes: map[string]igop.NamedType{
"Handler": {reflect.TypeOf((*q.Handler)(nil)).Elem(), "", "ServeHTTP,handleInternalRedirect,printf,stderr"},
},
AliasTypes: map[string]reflect.Type{},
Vars: map[string]reflect.Value{},
Funcs: map[string]reflect.Value{
"Request": reflect.ValueOf(q.Request),
"RequestFromMap": reflect.ValueOf(q.RequestFromMap),
"Serve": reflect.ValueOf(q.Serve),
},
TypedConsts: map[string]igop.TypedConst{},
UntypedConsts: map[string]igop.UntypedConst{},
})
}
| |
avghp.py
|
class Avghp(BaseMoveEffect):
def after_action(self):
user_hp = self.scene.board.get_data(self.move.user).current_hp
user_max_hp = self.scene.board.get_actor(self.move.user).stats[0]
target_hp = self.scene.board.get_data(self.move.target).current_hp
target_max_hp = self.scene.board.get_actor(self.move.target).stats[0]
average = (user_hp + target_hp) // 2
self.scene.board.set_hp(self.move.user, min(average, user_max_hp))
self.scene.board.set_hp(self.move.target, min(average, target_max_hp))
return True, False, False
|
from game.combat.effects.moveeffect.basemoveeffect import BaseMoveEffect
|
|
vec_index.rs
|
use slice_index::SliceIndex;
use FlannError;
use Indexable;
use Parameters;
pub struct VecIndex<T: Indexable + 'static> {
storage: Vec<Vec<T>>,
slice_index: Option<SliceIndex<'static, T>>,
}
impl<T: Indexable> Drop for VecIndex<T> {
fn drop(&mut self) {
// We absolutely must destroy the index before our storage because
// we are basically lying about the lifetime of the index using unsafe.
// Be careful when changing this!
self.slice_index.take();
}
}
impl<T: Indexable> std::ops::Deref for VecIndex<T> {
type Target = SliceIndex<'static, T>;
fn deref(&self) -> &Self::Target {
self.slice_index.as_ref().unwrap()
}
}
impl<T: Indexable> std::ops::DerefMut for VecIndex<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.slice_index.as_mut().unwrap()
}
}
impl<T: Indexable> VecIndex<T> {
pub fn new<I, P>(
point_len: usize,
points: I,
parameters: Parameters,
) -> Result<Self, FlannError>
where
|
{
let mut points_vec = Vec::new();
for point in points {
let count = point.into_iter().map(|d| points_vec.push(d)).count();
if count != point_len {
return Err(FlannError::InvalidPointDimensionality {
expected: point_len,
got: count,
});
}
}
if points_vec.is_empty() {
return Err(FlannError::ZeroInputPoints);
}
let index = SliceIndex::new(
point_len,
unsafe { std::mem::transmute(&points_vec[..]) },
parameters,
)?;
Ok(Self {
storage: vec![points_vec],
slice_index: Some(index),
})
}
/// Adds a point to the index.
pub fn add(&mut self, point: Vec<T>) -> Result<(), FlannError> {
self.slice_index
.as_mut()
.unwrap()
.add_slice(unsafe { std::mem::transmute(&point[..]) })?;
self.storage.push(point);
Ok(())
}
/// Adds multiple points to the index.
pub fn add_many<I, P>(&mut self, points: I) -> Result<(), FlannError>
where
I: IntoIterator<Item = P>,
P: IntoIterator<Item = T>,
{
let mut points_vec = Vec::new();
for point in points {
let count = point.into_iter().map(|d| points_vec.push(d)).count();
if count != self.point_len {
return Err(FlannError::InvalidPointDimensionality {
expected: self.point_len,
got: count,
});
}
}
self.add_many_slices(unsafe { std::mem::transmute(&points_vec[..]) })?;
self.storage.push(points_vec);
Ok(())
}
}
|
I: IntoIterator<Item = P>,
P: IntoIterator<Item = T>,
|
cart.component.ts
|
import { Component, OnInit } from '@angular/core';
import { CartService } from 'src/app/services/cart.service';
@Component({
selector: 'app-cart',
templateUrl: './cart.component.html',
styleUrls: ['./cart.component.scss']
})
export class CartComponent implements OnInit {
|
constructor(public cart: CartService) { }
ngOnInit() {
}
removeErrorMsg() {
this.cart.errorMsg = null;
}
}
| |
config.py
|
# coding: utf-8
"""Configuration file to set up Pybo
"""
from pathlib import Path
import yaml
default_config = '''tokenizer:
trie_files:
- &part 'particles.txt'
- &ancient ancient.txt
- &except exceptions.txt
- &uncomp uncompound_lexicon.txt
- &tsikchen tsikchen.txt
- &oral0 oral_corpus_0.txt
- &oral1 oral_corpus_1.txt
- &oral2 oral_corpus_2.txt
- &oral3 oral_corpus_3.txt
|
- &mgd mgd.txt
- &verb verbs.txt
skrt_files:
- &skrt ~ssanskrit.txt
pos_files:
- &tibdict ~pTibetan.DICT
freq_files:
- &freq_mgd ~fmgd.txt
Profile:
empty: []
pytib: [*ancient, *except, *uncomp, *tsikchen, *tibdict, *part]
POS: [*ancient, *except, *uncomp, *tsikchen, *tibdict, *part]
PP: [*part]
GMD: [*ancient, *except, *uncomp, *tsikchen, *mgd, *verb, *tibdict, *skrt, *freq_mgd, *part]
pipeline:
basic:
pre: pre_basic
tok: spaces
proc: spaces_fulltext
frm: plaintext
pybo_raw_content:
pre: pre_basic
tok: pybo
pybo_profile: GMD
proc: pybo_raw_content
frm: plaintext
pybo_raw_lines:
pre: pre_basic_lines
tok: pybo
pybo_profile: GMD
proc: pybo_raw_content
frm: plaintext
syls:
pre: pre_basic
tok: syls
proc: spaces_fulltext
frm: plaintext
pybo_raw_types:
pre: pre_basic
tok: pybo
pybo_profile: GMD
proc: pybo_raw_types
frm: types'''
class Config:
"""Configuration class
Attributes :
filename: Complete filename of the configuration file
config : Dictionary object containing all the configuration elements
"""
def __init__(self, filename):
"""Initialize the class
Converting the configuration file into a Python dictionnary object which
contains all the necesary parameters to set up Pybo properly.
The text file has to respect the YAML writing rules.
For more information: 'https://pyyaml.org/wiki/PyYAML'
:param filename: Filename of the file with its extension
"""
self.filename = Path(filename).resolve()
if self.filename.suffix != ".yaml":
raise Exception("Unrecognised file extension. It only supports .yaml files")
# if the file doesn't exist, write it with the default values
if not self.filename.is_file():
with self.filename.open('w', encoding='utf-8-sig') as f:
f.write(default_config)
with self.filename.open('r', encoding='utf-8-sig') as g:
self.config = yaml.load(g.read())
def get_tokenizer_profile(self, profile):
"""Get the profile configuration list
Each profile has a list of files which can be collected by this function.
:param profile: the profile name
:return: the list of files of the selected profile
"""
return self.config["tokenizer"]["Profile"][profile]
def get_pipeline_profile(self, profile):
return self.config["pipeline"][profile]
def add_pipeline_profile(self, profile):
print('ok')
args_list = ['pre', 'tok', 'proc', 'frm', # components
'pybo_profile', # pybo
'left', 'right', # concs
'filename'] # others
key = list(profile.keys())
assert len(key) == 1
key = key[0]
parts = profile[key]
component_keys = list(parts.keys())
assert len(component_keys) >= 4
for c in component_keys:
assert c in args_list
self.config['pipeline'][key] = parts
def reset_default(self):
"""Resets the configuration file to the default values"""
with self.filename.open('w', encoding='utf-8-sig') as f:
f.write(default_config)
if __name__ == '__main__':
config = Config("pybo.yaml")
config.add_pipeline_profile({'test': {'pre': 'test', 'tok': 'test1', 'proc': 'test2', 'frm': 'test3'}})
config.reset_default()
print(config.get_tokenizer_profile('POS'))
|
- &record recordings_4.txt
|
push_context.go
|
// Copyright 2018 Istio Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package model
import (
"encoding/json"
"sort"
"sync"
"time"
networking "istio.io/api/networking/v1alpha3"
"istio.io/istio/pilot/pkg/features"
"istio.io/istio/pilot/pkg/monitoring"
"istio.io/istio/pkg/config/constants"
"istio.io/istio/pkg/config/host"
"istio.io/istio/pkg/config/labels"
"istio.io/istio/pkg/config/protocol"
"istio.io/istio/pkg/config/schemas"
"istio.io/istio/pkg/config/visibility"
)
// PushContext tracks the status of a push - metrics and errors.
// Metrics are reset after a push - at the beginning all
// values are zero, and when push completes the status is reset.
// The struct is exposed in a debug endpoint - fields public to allow
// easy serialization as json.
type PushContext struct {
proxyStatusMutex sync.RWMutex
// ProxyStatus is keyed by the error code, and holds a map keyed
// by the ID.
ProxyStatus map[string]map[string]ProxyPushStatus
// Mutex is used to protect the below store.
// All data is set when the PushContext object is populated in `InitContext`,
// data should not be changed by plugins.
Mutex sync.Mutex `json:"-"`
// Synthesized from env.Mesh
defaultServiceExportTo map[visibility.Instance]bool
defaultVirtualServiceExportTo map[visibility.Instance]bool
defaultDestinationRuleExportTo map[visibility.Instance]bool
// privateServices are reachable within the same namespace.
privateServicesByNamespace map[string][]*Service
// publicServices are services reachable within the mesh.
publicServices []*Service
privateVirtualServicesByNamespace map[string][]Config
publicVirtualServices []Config
// destination rules are of three types:
// namespaceLocalDestRules: all public/private dest rules pertaining to a service defined in a given namespace
// namespaceExportedDestRules: all public dest rules pertaining to a service defined in a namespace
// allExportedDestRules: all (public) dest rules across all namespaces
// We need the allExportedDestRules in addition to namespaceExportedDestRules because we select
// the dest rule based on the most specific host match, and not just any destination rule
namespaceLocalDestRules map[string]*processedDestRules
namespaceExportedDestRules map[string]*processedDestRules
allExportedDestRules *processedDestRules
// sidecars for each namespace
sidecarsByNamespace map[string][]*SidecarScope
// envoy filters for each namespace including global config namespace
envoyFiltersByNamespace map[string][]*EnvoyFilterWrapper
// gateways for each namespace
gatewaysByNamespace map[string][]Config
////////// END ////////
// The following data is either a global index or used in the inbound path.
// Namespace specific views do not apply here.
// ServiceByHostnameAndNamespace has all services, indexed by hostname then namesace.
ServiceByHostnameAndNamespace map[host.Name]map[string]*Service `json:"-"`
// AuthzPolicies stores the existing authorization policies in the cluster. Could be nil if there
// are no authorization policies in the cluster.
AuthzPolicies *AuthorizationPolicies `json:"-"`
// Env has a pointer to the shared environment used to create the snapshot.
Env *Environment `json:"-"`
// ServiceAccounts contains a map of hostname and port to service accounts.
ServiceAccounts map[host.Name]map[int][]string `json:"-"`
initDone bool
}
type processedDestRules struct {
// List of dest rule hosts. We match with the most specific host first
hosts []host.Name
// Map of dest rule host and the merged destination rules for that host
destRule map[host.Name]*combinedDestinationRule
}
// XDSUpdater is used for direct updates of the xDS model and incremental push.
// Pilot uses multiple registries - for example each K8S cluster is a registry instance,
// as well as consul and future EDS or MCP sources. Each registry is responsible for
// tracking a set of endpoints associated with mesh services, and calling the EDSUpdate
// on changes. A registry may group endpoints for a service in smaller subsets - for
// example by deployment, or to deal with very large number of endpoints for a service.
// We want to avoid passing around large objects - like full list of endpoints for a registry,
// or the full list of endpoints for a service across registries, since it limits scalability.
//
// Future optimizations will include grouping the endpoints by labels, gateway or region to
// reduce the time when subsetting or split-horizon is used. This design assumes pilot
// tracks all endpoints in the mesh and they fit in RAM - so limit is few M endpoints.
// It is possible to split the endpoint tracking in future.
type XDSUpdater interface {
// EDSUpdate is called when the list of endpoints or labels in a ServiceEntry is
// changed. For each cluster and hostname, the full list of active endpoints (including empty list)
// must be sent. The shard name is used as a key - current implementation is using the registry
// name.
EDSUpdate(shard, hostname string, namespace string, entry []*IstioEndpoint) error
// SvcUpdate is called when a service port mapping definition is updated.
// This interface is WIP - labels, annotations and other changes to service may be
// updated to force a EDS and CDS recomputation and incremental push, as it doesn't affect
// LDS/RDS.
SvcUpdate(shard, hostname string, ports map[string]uint32, rports map[uint32]string)
// WorkloadUpdate is called by a registry when the labels or annotations on a workload have changed.
// The 'id' is the IP address of the pod for k8s if the pod is in the main/default network.
// In future it will include the 'network id' for pods in a different network, behind a zvpn gate.
// The IP is used because K8S Endpoints object associated with a Service only include the IP.
// We use Endpoints to track the membership to a service and readiness.
WorkloadUpdate(id string, labels map[string]string, annotations map[string]string)
// ConfigUpdate is called to notify the XDS server of config updates and request a push.
// The requests may be collapsed and throttled.
// This replaces the 'cache invalidation' model.
ConfigUpdate(req *PushRequest)
}
// PushRequest defines a request to push to proxies
// It is used to send updates to the config update debouncer and pass to the PushQueue.
type PushRequest struct {
// Full determines whether a full push is required or not. If set to false, only endpoints will be sent.
Full bool
// TargetNamespaces contains a list of namespaces that were changed in the update.
// This is used as an optimization to avoid unnecessary pushes to proxies that are scoped with a Sidecar.
// Currently, this will only scope EDS updates, as config updates are more complicated.
// If this is empty, then proxies in all namespaces will get an update
// If this is present, then only proxies that import this namespace will get an update
TargetNamespaces map[string]struct{}
// EdsUpdates keeps track of all service updated since last full push.
// Key is the hostname (serviceName).
// This is used by incremental eds.
EdsUpdates map[string]struct{}
// Push stores the push context to use for the update. This may initially be nil, as we will
// debounce changes before a PushContext is eventually created.
Push *PushContext
// Start represents the time a push was started. This represents the time of adding to the PushQueue.
// Note that this does not include time spent debouncing.
Start time.Time
}
// Merge two update requests together
func (first *PushRequest) Merge(other *PushRequest) *PushRequest {
if first == nil {
return other
}
if other == nil {
return first
}
merged := &PushRequest{
// Keep the first (older) start time
Start: first.Start,
// If either is full we need a full push
Full: first.Full || other.Full,
// The other push context is presumed to be later and more up to date
Push: other.Push,
}
// Only merge EdsUpdates when incremental eds push needed.
if !merged.Full {
merged.EdsUpdates = make(map[string]struct{})
// Merge the updates
for update := range first.EdsUpdates {
merged.EdsUpdates[update] = struct{}{}
}
for update := range other.EdsUpdates {
merged.EdsUpdates[update] = struct{}{}
}
} else {
merged.EdsUpdates = nil
}
if !features.ScopePushes.Get() {
// If push scoping is not enabled, we do not care about target namespaces
return merged
}
// If either does not specify only namespaces, this means update all namespaces
if len(first.TargetNamespaces) == 0 || len(other.TargetNamespaces) == 0 {
return merged
}
// Merge the target namespaces
merged.TargetNamespaces = make(map[string]struct{})
for update := range first.TargetNamespaces {
merged.TargetNamespaces[update] = struct{}{}
}
for update := range other.TargetNamespaces {
merged.TargetNamespaces[update] = struct{}{}
}
return merged
}
// ProxyPushStatus represents an event captured during config push to proxies.
// It may contain additional message and the affected proxy.
type ProxyPushStatus struct {
Proxy string `json:"proxy,omitempty"`
Message string `json:"message,omitempty"`
}
type combinedDestinationRule struct {
subsets map[string]struct{} // list of subsets seen so far
// We are not doing ports
config *Config
}
// Add will add an case to the metric.
func (ps *PushContext) Add(metric monitoring.Metric, key string, proxy *Proxy, msg string) {
if ps == nil {
log.Infof("Metric without context %s %v %s", key, proxy, msg)
return
}
ps.proxyStatusMutex.Lock()
defer ps.proxyStatusMutex.Unlock()
metricMap, f := ps.ProxyStatus[metric.Name()]
if !f {
metricMap = map[string]ProxyPushStatus{}
ps.ProxyStatus[metric.Name()] = metricMap
}
ev := ProxyPushStatus{Message: msg}
if proxy != nil {
ev.Proxy = proxy.ID
}
metricMap[key] = ev
}
var (
// EndpointNoPod tracks endpoints without an associated pod. This is an error condition, since
// we can't figure out the labels. It may be a transient problem, if endpoint is processed before
// pod.
EndpointNoPod = monitoring.NewGauge(
"endpoint_no_pod",
"Endpoints without an associated pod.",
)
// ProxyStatusNoService represents proxies not selected by any service
// This can be normal - for workloads that act only as client, or are not covered by a Service.
// It can also be an error, for example in cases the Endpoint list of a service was not updated by the time
// the sidecar calls.
// Updated by GetProxyServiceInstances
ProxyStatusNoService = monitoring.NewGauge(
"pilot_no_ip",
"Pods not found in the endpoint table, possibly invalid.",
)
// ProxyStatusEndpointNotReady represents proxies found not be ready.
// Updated by GetProxyServiceInstances. Normal condition when starting
// an app with readiness, error if it doesn't change to 0.
ProxyStatusEndpointNotReady = monitoring.NewGauge(
"pilot_endpoint_not_ready",
"Endpoint found in unready state.",
)
// ProxyStatusConflictOutboundListenerTCPOverHTTP metric tracks number of
// wildcard TCP listeners that conflicted with existing wildcard HTTP listener on same port
ProxyStatusConflictOutboundListenerTCPOverHTTP = monitoring.NewGauge(
"pilot_conflict_outbound_listener_tcp_over_current_http",
"Number of conflicting wildcard tcp listeners with current wildcard http listener.",
)
// ProxyStatusConflictOutboundListenerTCPOverTCP metric tracks number of
// TCP listeners that conflicted with existing TCP listeners on same port
ProxyStatusConflictOutboundListenerTCPOverTCP = monitoring.NewGauge(
"pilot_conflict_outbound_listener_tcp_over_current_tcp",
"Number of conflicting tcp listeners with current tcp listener.",
)
// ProxyStatusConflictOutboundListenerHTTPOverTCP metric tracks number of
// wildcard HTTP listeners that conflicted with existing wildcard TCP listener on same port
ProxyStatusConflictOutboundListenerHTTPOverTCP = monitoring.NewGauge(
"pilot_conflict_outbound_listener_http_over_current_tcp",
"Number of conflicting wildcard http listeners with current wildcard tcp listener.",
)
// ProxyStatusConflictInboundListener tracks cases of multiple inbound
// listeners - 2 services selecting the same port of the pod.
ProxyStatusConflictInboundListener = monitoring.NewGauge(
"pilot_conflict_inbound_listener",
"Number of conflicting inbound listeners.",
)
// DuplicatedClusters tracks duplicate clusters seen while computing CDS
DuplicatedClusters = monitoring.NewGauge(
"pilot_duplicate_envoy_clusters",
"Duplicate envoy clusters caused by service entries with same hostname",
)
// ProxyStatusClusterNoInstances tracks clusters (services) without workloads.
ProxyStatusClusterNoInstances = monitoring.NewGauge(
"pilot_eds_no_instances",
"Number of clusters without instances.",
)
// DuplicatedDomains tracks rejected VirtualServices due to duplicated hostname.
DuplicatedDomains = monitoring.NewGauge(
"pilot_vservice_dup_domain",
"Virtual services with dup domains.",
)
// DuplicatedSubsets tracks duplicate subsets that we rejected while merging multiple destination rules for same host
DuplicatedSubsets = monitoring.NewGauge(
"pilot_destrule_subsets",
"Duplicate subsets across destination rules for same host",
)
// totalVirtualServices tracks the total number of virtual service
totalVirtualServices = monitoring.NewGauge(
"pilot_virt_services",
"Total virtual services known to pilot.",
)
// LastPushStatus preserves the metrics and data collected during lasts global push.
// It can be used by debugging tools to inspect the push event. It will be reset after each push with the
// new version.
LastPushStatus *PushContext
// LastPushMutex will protect the LastPushStatus
LastPushMutex sync.Mutex
// All metrics we registered.
metrics = []monitoring.Metric{
EndpointNoPod,
ProxyStatusNoService,
ProxyStatusEndpointNotReady,
ProxyStatusConflictOutboundListenerTCPOverHTTP,
ProxyStatusConflictOutboundListenerTCPOverTCP,
ProxyStatusConflictOutboundListenerHTTPOverTCP,
ProxyStatusConflictInboundListener,
DuplicatedClusters,
ProxyStatusClusterNoInstances,
DuplicatedDomains,
DuplicatedSubsets,
}
)
func init() {
for _, m := range metrics {
monitoring.MustRegisterViews(m)
}
monitoring.MustRegisterViews(totalVirtualServices)
}
// NewPushContext creates a new PushContext structure to track push status.
func NewPushContext() *PushContext {
// TODO: detect push in progress, don't update status if set
return &PushContext{
publicServices: []*Service{},
privateServicesByNamespace: map[string][]*Service{},
publicVirtualServices: []Config{},
privateVirtualServicesByNamespace: map[string][]Config{},
namespaceLocalDestRules: map[string]*processedDestRules{},
namespaceExportedDestRules: map[string]*processedDestRules{},
allExportedDestRules: &processedDestRules{
hosts: make([]host.Name, 0),
destRule: map[host.Name]*combinedDestinationRule{},
},
sidecarsByNamespace: map[string][]*SidecarScope{},
envoyFiltersByNamespace: map[string][]*EnvoyFilterWrapper{},
gatewaysByNamespace: map[string][]Config{},
ServiceByHostnameAndNamespace: map[host.Name]map[string]*Service{},
ProxyStatus: map[string]map[string]ProxyPushStatus{},
ServiceAccounts: map[host.Name]map[int][]string{},
}
}
// JSON implements json.Marshaller, with a lock.
func (ps *PushContext) JSON() ([]byte, error) {
if ps == nil {
return []byte{'{', '}'}, nil
}
ps.proxyStatusMutex.RLock()
defer ps.proxyStatusMutex.RUnlock()
return json.MarshalIndent(ps, "", " ")
}
// OnConfigChange is called when a config change is detected.
func (ps *PushContext) OnConfigChange() {
LastPushMutex.Lock()
LastPushStatus = ps
LastPushMutex.Unlock()
ps.UpdateMetrics()
}
// UpdateMetrics will update the prometheus metrics based on the
// current status of the push.
func (ps *PushContext) UpdateMetrics() {
ps.proxyStatusMutex.RLock()
defer ps.proxyStatusMutex.RUnlock()
for _, pm := range metrics {
mmap := ps.ProxyStatus[pm.Name()]
pm.Record(float64(len(mmap)))
}
}
// Services returns the list of services that are visible to a Proxy in a given config namespace
func (ps *PushContext) Services(proxy *Proxy) []*Service {
// If proxy has a sidecar scope that is user supplied, then get the services from the sidecar scope
// sidecarScope.config is nil if there is no sidecar scope for the namespace
if proxy != nil && proxy.SidecarScope != nil && proxy.Type == SidecarProxy {
return proxy.SidecarScope.Services()
}
out := make([]*Service, 0)
// First add private services
if proxy == nil {
for _, privateServices := range ps.privateServicesByNamespace {
out = append(out, privateServices...)
}
} else {
out = append(out, ps.privateServicesByNamespace[proxy.ConfigNamespace]...)
}
// Second add public services
out = append(out, ps.publicServices...)
return out
}
// VirtualServices lists all virtual services bound to the specified gateways
// This replaces store.VirtualServices. Used only by the gateways
// Sidecars use the egressListener.VirtualServices().
func (ps *PushContext) VirtualServices(proxy *Proxy, gateways map[string]bool) []Config {
configs := make([]Config, 0)
out := make([]Config, 0)
// filter out virtual services not reachable
// First private virtual service
if proxy == nil {
for _, virtualSvcs := range ps.privateVirtualServicesByNamespace {
configs = append(configs, virtualSvcs...)
}
} else {
configs = append(configs, ps.privateVirtualServicesByNamespace[proxy.ConfigNamespace]...)
}
// Second public virtual service
configs = append(configs, ps.publicVirtualServices...)
for _, cfg := range configs {
rule := cfg.Spec.(*networking.VirtualService)
if len(rule.Gateways) == 0 {
// This rule applies only to IstioMeshGateway
if gateways[constants.IstioMeshGateway] {
out = append(out, cfg)
}
} else {
for _, g := range rule.Gateways {
// note: Gateway names do _not_ use wildcard matching, so we do not use Name.Matches here
if gateways[resolveGatewayName(g, cfg.ConfigMeta)] {
out = append(out, cfg)
break
} else if g == constants.IstioMeshGateway && gateways[g] {
// "mesh" gateway cannot be expanded into FQDN
out = append(out, cfg)
break
}
}
}
}
return out
}
// getSidecarScope returns a SidecarScope object associated with the
// proxy. The SidecarScope object is a semi-processed view of the service
// registry, and config state associated with the sidecar crd. The scope contains
// a set of inbound and outbound listeners, services/configs per listener,
// etc. The sidecar scopes are precomputed in the initSidecarContext
// function based on the Sidecar API objects in each namespace. If there is
// no sidecar api object, a default sidecarscope is assigned to the
// namespace which enables connectivity to all services in the mesh.
//
// Callers can check if the sidecarScope is from user generated object or not
// by checking the sidecarScope.Config field, that contains the user provided config
func (ps *PushContext) getSidecarScope(proxy *Proxy, workloadLabels labels.Collection) *SidecarScope {
// Find the most specific matching sidecar config from the proxy's
// config namespace If none found, construct a sidecarConfig on the fly
// that allows the sidecar to talk to any namespace (the default
// behavior in the absence of sidecars).
if sidecars, ok := ps.sidecarsByNamespace[proxy.ConfigNamespace]; ok {
// TODO: logic to merge multiple sidecar resources
// Currently we assume that there will be only one sidecar config for a namespace.
var defaultSidecar *SidecarScope
for _, wrapper := range sidecars {
if wrapper.Config != nil {
sidecar := wrapper.Config.Spec.(*networking.Sidecar)
// if there is no workload selector, the config applies to all workloads
// if there is a workload selector, check for matching workload labels
if sidecar.GetWorkloadSelector() != nil {
workloadSelector := labels.Instance(sidecar.GetWorkloadSelector().GetLabels())
if !workloadLabels.IsSupersetOf(workloadSelector) {
continue
}
return wrapper
}
defaultSidecar = wrapper
continue
}
// Not sure when this can happen (Config = nil ?)
if defaultSidecar != nil {
return defaultSidecar // still return the valid one
}
return wrapper
}
if defaultSidecar != nil {
return defaultSidecar // still return the valid one
}
}
return DefaultSidecarScopeForNamespace(ps, proxy.ConfigNamespace)
}
// GetAllSidecarScopes returns a map of namespace and the set of SidecarScope
// object associated with the namespace. This will be used by the CDS code to
// precompute CDS output for each sidecar scope. Since we have a default sidecarscope
// for namespaces that dont explicitly have one, we are guaranteed to
// have the CDS output cached for every namespace/sidecar scope combo.
func (ps *PushContext) GetAllSidecarScopes() map[string][]*SidecarScope {
return ps.sidecarsByNamespace
}
// DestinationRule returns a destination rule for a service name in a given domain.
func (ps *PushContext) DestinationRule(proxy *Proxy, service *Service) *Config {
// FIXME: this code should be removed once the EDS issue is fixed
if proxy == nil {
if hostname, ok := MostSpecificHostMatch(service.Hostname, ps.allExportedDestRules.hosts); ok {
return ps.allExportedDestRules.destRule[hostname].config
}
return nil
}
// If proxy has a sidecar scope that is user supplied, then get the destination rules from the sidecar scope
// sidecarScope.config is nil if there is no sidecar scope for the namespace
if proxy.SidecarScope != nil && proxy.Type == SidecarProxy {
// If there is a sidecar scope for this proxy, return the destination rule
// from the sidecar scope.
return proxy.SidecarScope.DestinationRule(service.Hostname)
}
// If the proxy config namespace is same as the root config namespace
// look for dest rules in the service's namespace first. This hack is needed
// because sometimes, istio-system tends to become the root config namespace.
// Destination rules are defined here for global purposes. We dont want these
// catch all destination rules to be the only dest rule, when processing CDS for
// proxies like the istio-ingressgateway or istio-egressgateway.
// If there are no service specific dest rules, we will end up picking up the same
// rules anyway, later in the code
if proxy.ConfigNamespace != ps.Env.Mesh.RootNamespace {
// search through the DestinationRules in proxy's namespace first
if ps.namespaceLocalDestRules[proxy.ConfigNamespace] != nil {
if hostname, ok := MostSpecificHostMatch(service.Hostname,
ps.namespaceLocalDestRules[proxy.ConfigNamespace].hosts); ok {
return ps.namespaceLocalDestRules[proxy.ConfigNamespace].destRule[hostname].config
}
}
}
// if no private/public rule matched in the calling proxy's namespace,
// check the target service's namespace for public rules
if service.Attributes.Namespace != "" && ps.namespaceExportedDestRules[service.Attributes.Namespace] != nil {
if hostname, ok := MostSpecificHostMatch(service.Hostname,
ps.namespaceExportedDestRules[service.Attributes.Namespace].hosts); ok {
return ps.namespaceExportedDestRules[service.Attributes.Namespace].destRule[hostname].config
}
}
// if no public/private rule in calling proxy's namespace matched, and no public rule in the
// target service's namespace matched, search for any public destination rule in the config root namespace
// NOTE: This does mean that we are effectively ignoring private dest rules in the config root namespace
if ps.namespaceExportedDestRules[ps.Env.Mesh.RootNamespace] != nil {
if hostname, ok := MostSpecificHostMatch(service.Hostname,
ps.namespaceExportedDestRules[ps.Env.Mesh.RootNamespace].hosts); ok {
return ps.namespaceExportedDestRules[ps.Env.Mesh.RootNamespace].destRule[hostname].config
}
}
return nil
}
// SubsetToLabels returns the labels associated with a subset of a given service.
func (ps *PushContext) SubsetToLabels(proxy *Proxy, subsetName string, hostname host.Name) labels.Collection {
// empty subset
if subsetName == "" {
return nil
}
cfg := ps.DestinationRule(proxy, &Service{Hostname: hostname})
if cfg == nil {
return nil
}
rule := cfg.Spec.(*networking.DestinationRule)
for _, subset := range rule.Subsets {
if subset.Name == subsetName {
return []labels.Instance{subset.Labels}
}
}
return nil
}
// InitContext will initialize the data structures used for code generation.
// This should be called before starting the push, from the thread creating
// the push context.
func (ps *PushContext) InitContext(env *Environment) error {
ps.Mutex.Lock()
defer ps.Mutex.Unlock()
if ps.initDone {
return nil
}
ps.Env = env
var err error
// Must be initialized first
// as initServiceRegistry/VirtualServices/Destrules
// use the default export map
ps.initDefaultExportMaps()
if err = ps.initServiceRegistry(env); err != nil {
return err
}
if err = ps.initVirtualServices(env); err != nil {
return err
}
if err = ps.initDestinationRules(env); err != nil {
return err
}
if err = ps.initAuthorizationPolicies(env); err != nil {
rbacLog.Errorf("failed to initialize authorization policies: %v", err)
return err
}
if err = ps.initEnvoyFilters(env); err != nil {
return err
}
if features.ScopeGatewayToNamespace.Get() {
if err = ps.initGateways(env); err != nil {
return err
}
}
// Must be initialized in the end
if err = ps.initSidecarScopes(env); err != nil {
return err
}
ps.initDone = true
return nil
}
// Caches list of services in the registry, and creates a map
// of hostname to service
func (ps *PushContext) initServiceRegistry(env *Environment) error {
services, err := env.Services()
if err != nil {
return err
}
// Sort the services in order of creation.
allServices := sortServicesByCreationTime(services)
for _, s := range allServices {
ns := s.Attributes.Namespace
if len(s.Attributes.ExportTo) == 0 {
if ps.defaultServiceExportTo[visibility.Private] {
ps.privateServicesByNamespace[ns] = append(ps.privateServicesByNamespace[ns], s)
} else if ps.defaultServiceExportTo[visibility.Public] {
ps.publicServices = append(ps.publicServices, s)
}
} else {
if s.Attributes.ExportTo[visibility.Private] {
ps.privateServicesByNamespace[ns] = append(ps.privateServicesByNamespace[ns], s)
} else {
ps.publicServices = append(ps.publicServices, s)
}
}
if _, f := ps.ServiceByHostnameAndNamespace[s.Hostname]; !f {
ps.ServiceByHostnameAndNamespace[s.Hostname] = map[string]*Service{}
}
ps.ServiceByHostnameAndNamespace[s.Hostname][s.Attributes.Namespace] = s
}
ps.initServiceAccounts(env, allServices)
return nil
}
// sortServicesByCreationTime sorts the list of services in ascending order by their creation time (if available).
func sortServicesByCreationTime(services []*Service) []*Service
|
// Caches list of service accounts in the registry
func (ps *PushContext) initServiceAccounts(env *Environment, services []*Service) {
for _, svc := range services {
ps.ServiceAccounts[svc.Hostname] = map[int][]string{}
for _, port := range svc.Ports {
if port.Protocol == protocol.UDP {
continue
}
ps.ServiceAccounts[svc.Hostname][port.Port] = env.GetIstioServiceAccounts(svc, []int{port.Port})
}
}
}
// Caches list of virtual services
func (ps *PushContext) initVirtualServices(env *Environment) error {
virtualServices, err := env.List(schemas.VirtualService.Type, NamespaceAll)
if err != nil {
return err
}
// values returned from ConfigStore.List are immutable.
// Therefore, we make a copy
vservices := make([]Config, len(virtualServices))
for i := range vservices {
vservices[i] = virtualServices[i].DeepCopy()
}
totalVirtualServices.Record(float64(len(virtualServices)))
// TODO(rshriram): parse each virtual service and maintain a map of the
// virtualservice name, the list of registry hosts in the VS and non
// registry DNS names in the VS. This should cut down processing in
// the RDS code. See separateVSHostsAndServices in route/route.go
sortConfigByCreationTime(vservices)
// convert all shortnames in virtual services into FQDNs
for _, r := range vservices {
rule := r.Spec.(*networking.VirtualService)
// resolve top level hosts
for i, h := range rule.Hosts {
rule.Hosts[i] = string(ResolveShortnameToFQDN(h, r.ConfigMeta))
}
// resolve gateways to bind to
for i, g := range rule.Gateways {
if g != constants.IstioMeshGateway {
rule.Gateways[i] = resolveGatewayName(g, r.ConfigMeta)
}
}
// resolve host in http route.destination, route.mirror
for _, d := range rule.Http {
for _, m := range d.Match {
for i, g := range m.Gateways {
if g != constants.IstioMeshGateway {
m.Gateways[i] = resolveGatewayName(g, r.ConfigMeta)
}
}
}
for _, w := range d.Route {
w.Destination.Host = string(ResolveShortnameToFQDN(w.Destination.Host, r.ConfigMeta))
}
if d.Mirror != nil {
d.Mirror.Host = string(ResolveShortnameToFQDN(d.Mirror.Host, r.ConfigMeta))
}
}
//resolve host in tcp route.destination
for _, d := range rule.Tcp {
for _, m := range d.Match {
for i, g := range m.Gateways {
if g != constants.IstioMeshGateway {
m.Gateways[i] = resolveGatewayName(g, r.ConfigMeta)
}
}
}
for _, w := range d.Route {
w.Destination.Host = string(ResolveShortnameToFQDN(w.Destination.Host, r.ConfigMeta))
}
}
//resolve host in tls route.destination
for _, tls := range rule.Tls {
for _, m := range tls.Match {
for i, g := range m.Gateways {
if g != constants.IstioMeshGateway {
m.Gateways[i] = resolveGatewayName(g, r.ConfigMeta)
}
}
}
for _, w := range tls.Route {
w.Destination.Host = string(ResolveShortnameToFQDN(w.Destination.Host, r.ConfigMeta))
}
}
}
for _, virtualService := range vservices {
ns := virtualService.Namespace
rule := virtualService.Spec.(*networking.VirtualService)
if len(rule.ExportTo) == 0 {
// No exportTo in virtualService. Use the global default
// TODO: We currently only honor ., * and ~
if ps.defaultVirtualServiceExportTo[visibility.Private] {
// add to local namespace only
ps.privateVirtualServicesByNamespace[ns] = append(ps.privateVirtualServicesByNamespace[ns], virtualService)
} else if ps.defaultVirtualServiceExportTo[visibility.Public] {
ps.publicVirtualServices = append(ps.publicVirtualServices, virtualService)
}
} else {
// TODO: we currently only process the first element in the array
// and currently only consider . or * which maps to public/private
if visibility.Instance(rule.ExportTo[0]) == visibility.Private {
// add to local namespace only
ps.privateVirtualServicesByNamespace[ns] = append(ps.privateVirtualServicesByNamespace[ns], virtualService)
} else {
// ~ is not valid in the exportTo fields in virtualServices, services, destination rules
// and we currently only allow . or *. So treat this as public export
ps.publicVirtualServices = append(ps.publicVirtualServices, virtualService)
}
}
}
return nil
}
func (ps *PushContext) initDefaultExportMaps() {
ps.defaultDestinationRuleExportTo = make(map[visibility.Instance]bool)
if ps.Env.Mesh.DefaultDestinationRuleExportTo != nil {
for _, e := range ps.Env.Mesh.DefaultDestinationRuleExportTo {
ps.defaultDestinationRuleExportTo[visibility.Instance(e)] = true
}
} else {
// default to *
ps.defaultDestinationRuleExportTo[visibility.Public] = true
}
ps.defaultServiceExportTo = make(map[visibility.Instance]bool)
if ps.Env.Mesh.DefaultServiceExportTo != nil {
for _, e := range ps.Env.Mesh.DefaultServiceExportTo {
ps.defaultServiceExportTo[visibility.Instance(e)] = true
}
} else {
ps.defaultServiceExportTo[visibility.Public] = true
}
ps.defaultVirtualServiceExportTo = make(map[visibility.Instance]bool)
if ps.Env.Mesh.DefaultVirtualServiceExportTo != nil {
for _, e := range ps.Env.Mesh.DefaultVirtualServiceExportTo {
ps.defaultVirtualServiceExportTo[visibility.Instance(e)] = true
}
} else {
ps.defaultVirtualServiceExportTo[visibility.Public] = true
}
}
// initSidecarScopes synthesizes Sidecar CRDs into objects called
// SidecarScope. The SidecarScope object is a semi-processed view of the
// service registry, and config state associated with the sidecar CRD. The
// scope contains a set of inbound and outbound listeners, services/configs
// per listener, etc. The sidecar scopes are precomputed based on the
// Sidecar API objects in each namespace. If there is no sidecar api object
// for a namespace, a default sidecarscope is assigned to the namespace
// which enables connectivity to all services in the mesh.
//
// When proxies connect to Pilot, we identify the sidecar scope associated
// with the proxy and derive listeners/routes/clusters based on the sidecar
// scope.
func (ps *PushContext) initSidecarScopes(env *Environment) error {
sidecarConfigs, err := env.List(schemas.Sidecar.Type, NamespaceAll)
if err != nil {
return err
}
sortConfigByCreationTime(sidecarConfigs)
sidecarConfigWithSelector := make([]Config, 0)
sidecarConfigWithoutSelector := make([]Config, 0)
for _, sidecarConfig := range sidecarConfigs {
sidecar := sidecarConfig.Spec.(*networking.Sidecar)
if sidecar.WorkloadSelector != nil {
sidecarConfigWithSelector = append(sidecarConfigWithSelector, sidecarConfig)
} else {
sidecarConfigWithoutSelector = append(sidecarConfigWithoutSelector, sidecarConfig)
}
}
sidecarNum := len(sidecarConfigs)
sidecarConfigs = make([]Config, 0, sidecarNum)
sidecarConfigs = append(sidecarConfigs, sidecarConfigWithSelector...)
sidecarConfigs = append(sidecarConfigs, sidecarConfigWithoutSelector...)
ps.sidecarsByNamespace = make(map[string][]*SidecarScope, sidecarNum)
for _, sidecarConfig := range sidecarConfigs {
sidecarConfig := sidecarConfig
ps.sidecarsByNamespace[sidecarConfig.Namespace] = append(ps.sidecarsByNamespace[sidecarConfig.Namespace],
ConvertToSidecarScope(ps, &sidecarConfig, sidecarConfig.Namespace))
}
// Hold reference root namespace's sidecar config
// Root namespace can have only one sidecar config object
// Currently we expect that it has no workloadSelectors
var rootNSConfig *Config
if env.Mesh.RootNamespace != "" {
for _, sidecarConfig := range sidecarConfigs {
if sidecarConfig.Namespace == env.Mesh.RootNamespace &&
sidecarConfig.Spec.(*networking.Sidecar).WorkloadSelector == nil {
rootNSConfig = &sidecarConfig
break
}
}
}
// build sidecar scopes for other namespaces that dont have a sidecar CRD object.
// Derive the sidecar scope from the root namespace's sidecar object if present. Else fallback
// to the default Istio behavior mimicked by the DefaultSidecarScopeForNamespace function.
for _, nsMap := range ps.ServiceByHostnameAndNamespace {
for ns := range nsMap {
if len(ps.sidecarsByNamespace[ns]) == 0 {
// use the contents from the root namespace or the default if there is no root namespace
ps.sidecarsByNamespace[ns] = []*SidecarScope{ConvertToSidecarScope(ps, rootNSConfig, ns)}
}
}
}
return nil
}
// Split out of DestinationRule expensive conversions - once per push.
func (ps *PushContext) initDestinationRules(env *Environment) error {
configs, err := env.List(schemas.DestinationRule.Type, NamespaceAll)
if err != nil {
return err
}
ps.SetDestinationRules(configs)
return nil
}
// SetDestinationRules is updates internal structures using a set of configs.
// Split out of DestinationRule expensive conversions, computed once per push.
// This also allows tests to inject a config without having the mock.
// This will not work properly for Sidecars, which will precompute their destination rules on init
func (ps *PushContext) SetDestinationRules(configs []Config) {
// Sort by time first. So if two destination rule have top level traffic policies
// we take the first one.
sortConfigByCreationTime(configs)
namespaceLocalDestRules := make(map[string]*processedDestRules)
namespaceExportedDestRules := make(map[string]*processedDestRules)
allExportedDestRules := &processedDestRules{
hosts: make([]host.Name, 0),
destRule: map[host.Name]*combinedDestinationRule{},
}
for i := range configs {
rule := configs[i].Spec.(*networking.DestinationRule)
rule.Host = string(ResolveShortnameToFQDN(rule.Host, configs[i].ConfigMeta))
// Store in an index for the config's namespace
// a proxy from this namespace will first look here for the destination rule for a given service
// This pool consists of both public/private destination rules.
// TODO: when exportTo is fully supported, only add the rule here if exportTo is '.'
// The global exportTo doesn't matter here (its either . or * - both of which are applicable here)
if _, exist := namespaceLocalDestRules[configs[i].Namespace]; !exist {
namespaceLocalDestRules[configs[i].Namespace] = &processedDestRules{
hosts: make([]host.Name, 0),
destRule: map[host.Name]*combinedDestinationRule{},
}
}
// Merge this destination rule with any public/private dest rules for same host in the same namespace
// If there are no duplicates, the dest rule will be added to the list
namespaceLocalDestRules[configs[i].Namespace].hosts = ps.combineSingleDestinationRule(
namespaceLocalDestRules[configs[i].Namespace].hosts,
namespaceLocalDestRules[configs[i].Namespace].destRule,
configs[i])
isPubliclyExported := false
if len(rule.ExportTo) == 0 {
// No exportTo in destinationRule. Use the global default
// TODO: We currently only honor ., * and ~
if ps.defaultDestinationRuleExportTo[visibility.Public] {
isPubliclyExported = true
}
} else {
// TODO: we currently only process the first element in the array
// and currently only consider . or * which maps to public/private
if visibility.Instance(rule.ExportTo[0]) != visibility.Private {
// ~ is not valid in the exportTo fields in virtualServices, services, destination rules
// and we currently only allow . or *. So treat this as public export
isPubliclyExported = true
}
}
if isPubliclyExported {
if _, exist := namespaceExportedDestRules[configs[i].Namespace]; !exist {
namespaceExportedDestRules[configs[i].Namespace] = &processedDestRules{
hosts: make([]host.Name, 0),
destRule: map[host.Name]*combinedDestinationRule{},
}
}
// Merge this destination rule with any public dest rule for the same host in the same namespace
// If there are no duplicates, the dest rule will be added to the list
namespaceExportedDestRules[configs[i].Namespace].hosts = ps.combineSingleDestinationRule(
namespaceExportedDestRules[configs[i].Namespace].hosts,
namespaceExportedDestRules[configs[i].Namespace].destRule,
configs[i])
// Merge this destination rule with any public dest rule for the same host
// across all namespaces. If there are no duplicates, the dest rule will be added to the list
allExportedDestRules.hosts = ps.combineSingleDestinationRule(
allExportedDestRules.hosts, allExportedDestRules.destRule, configs[i])
}
}
// presort it so that we don't sort it for each DestinationRule call.
// sort.Sort for Hostnames will automatically sort from the most specific to least specific
for ns := range namespaceLocalDestRules {
sort.Sort(host.Names(namespaceLocalDestRules[ns].hosts))
}
for ns := range namespaceExportedDestRules {
sort.Sort(host.Names(namespaceExportedDestRules[ns].hosts))
}
sort.Sort(host.Names(allExportedDestRules.hosts))
ps.namespaceLocalDestRules = namespaceLocalDestRules
ps.namespaceExportedDestRules = namespaceExportedDestRules
ps.allExportedDestRules = allExportedDestRules
}
func (ps *PushContext) initAuthorizationPolicies(env *Environment) error {
var err error
if ps.AuthzPolicies, err = NewAuthzPolicies(env); err != nil {
rbacLog.Errorf("failed to initialize authorization policies: %v", err)
return err
}
return nil
}
// pre computes envoy filters per namespace
func (ps *PushContext) initEnvoyFilters(env *Environment) error {
envoyFilterConfigs, err := env.List(schemas.EnvoyFilter.Type, NamespaceAll)
if err != nil {
return err
}
sortConfigByCreationTime(envoyFilterConfigs)
ps.envoyFiltersByNamespace = make(map[string][]*EnvoyFilterWrapper)
for _, envoyFilterConfig := range envoyFilterConfigs {
efw := convertToEnvoyFilterWrapper(&envoyFilterConfig)
if _, exists := ps.envoyFiltersByNamespace[envoyFilterConfig.Namespace]; !exists {
ps.envoyFiltersByNamespace[envoyFilterConfig.Namespace] = make([]*EnvoyFilterWrapper, 0)
}
ps.envoyFiltersByNamespace[envoyFilterConfig.Namespace] = append(ps.envoyFiltersByNamespace[envoyFilterConfig.Namespace], efw)
}
return nil
}
func (ps *PushContext) EnvoyFilters(proxy *Proxy) []*EnvoyFilterWrapper {
// this should never happen
if proxy == nil {
return nil
}
out := make([]*EnvoyFilterWrapper, 0)
// EnvoyFilters supports inheritance (global ones plus namespace local ones).
// First get all the filter configs from the config root namespace
// and then add the ones from proxy's own namespace
if ps.Env.Mesh.RootNamespace != "" && len(ps.envoyFiltersByNamespace[ps.Env.Mesh.RootNamespace]) > 0 {
// if there is no workload selector, the config applies to all workloads
// if there is a workload selector, check for matching workload labels
for _, efw := range ps.envoyFiltersByNamespace[ps.Env.Mesh.RootNamespace] {
if efw.workloadSelector == nil || proxy.WorkloadLabels.IsSupersetOf(efw.workloadSelector) {
out = append(out, efw)
}
}
}
for _, efw := range ps.envoyFiltersByNamespace[proxy.ConfigNamespace] {
if efw.workloadSelector == nil || proxy.WorkloadLabels.IsSupersetOf(efw.workloadSelector) {
out = append(out, efw)
}
}
return out
}
// pre computes gateways per namespace
func (ps *PushContext) initGateways(env *Environment) error {
gatewayConfigs, err := env.List(schemas.Gateway.Type, NamespaceAll)
if err != nil {
return err
}
sortConfigByCreationTime(gatewayConfigs)
ps.gatewaysByNamespace = make(map[string][]Config)
for _, gatewayConfig := range gatewayConfigs {
if _, exists := ps.gatewaysByNamespace[gatewayConfig.Namespace]; !exists {
ps.gatewaysByNamespace[gatewayConfig.Namespace] = make([]Config, 0)
}
ps.gatewaysByNamespace[gatewayConfig.Namespace] = append(ps.gatewaysByNamespace[gatewayConfig.Namespace], gatewayConfig)
}
return nil
}
func (ps *PushContext) Gateways(proxy *Proxy) []Config {
// this should never happen
if proxy == nil {
return nil
}
out := make([]Config, 0)
for _, cfg := range ps.gatewaysByNamespace[proxy.ConfigNamespace] {
gw := cfg.Spec.(*networking.Gateway)
if gw.GetSelector() == nil {
// no selector. Applies to all workloads asking for the gateway
out = append(out, cfg)
} else {
gatewaySelector := labels.Instance(gw.GetSelector())
if proxy.WorkloadLabels.IsSupersetOf(gatewaySelector) {
out = append(out, cfg)
}
}
}
return out
}
|
{
sort.SliceStable(services, func(i, j int) bool {
return services[i].CreationTime.Before(services[j].CreationTime)
})
return services
}
|
require-umd-inject.js
|
const config = require('../config/tamper');
const generateScript = () => {
return config.require?.map(v => `<script src="${v}"></script>`).join('\n') ?? '';
};
const htmlPlugin = () => {
return {
name: 'html-require-inject',
transformIndexHtml(html) {
return html.replace(/<script class="inject-block"><\/script>/, generateScript());
},
|
};
};
module.exports = htmlPlugin;
| |
resource_kubernetes_horizontal_pod_autoscaler.go
|
package kubernetes
import (
"fmt"
"log"
"github.com/hashicorp/terraform/helper/schema"
"k8s.io/apimachinery/pkg/api/errors"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
pkgApi "k8s.io/apimachinery/pkg/types"
api "k8s.io/kubernetes/pkg/apis/autoscaling/v1"
kubernetes "k8s.io/kubernetes/pkg/client/clientset_generated/clientset"
)
func resourceKubernetesHorizontalPodAutoscaler() *schema.Resource {
return &schema.Resource{
Create: resourceKubernetesHorizontalPodAutoscalerCreate,
Read: resourceKubernetesHorizontalPodAutoscalerRead,
Exists: resourceKubernetesHorizontalPodAutoscalerExists,
Update: resourceKubernetesHorizontalPodAutoscalerUpdate,
Delete: resourceKubernetesHorizontalPodAutoscalerDelete,
Importer: &schema.ResourceImporter{
State: schema.ImportStatePassthrough,
},
Schema: map[string]*schema.Schema{
"metadata": namespacedMetadataSchema("horizontal pod autoscaler", true),
"spec": {
Type: schema.TypeList,
Description: "Behaviour of the autoscaler. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#spec-and-status.",
Required: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"max_replicas": {
Type: schema.TypeInt,
Description: "Upper limit for the number of pods that can be set by the autoscaler.",
Required: true,
},
"min_replicas": {
Type: schema.TypeInt,
Description: "Lower limit for the number of pods that can be set by the autoscaler, defaults to `1`.",
Optional: true,
Default: 1,
},
"scale_target_ref": {
Type: schema.TypeList,
Description: "Reference to scaled resource. e.g. Replication Controller",
Required: true,
MaxItems: 1,
Elem: &schema.Resource{
Schema: map[string]*schema.Schema{
"api_version": {
Type: schema.TypeString,
Description: "API version of the referent",
Optional: true,
},
"kind": {
Type: schema.TypeString,
Description: "Kind of the referent. e.g. `ReplicationController`. More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#types-kinds",
Required: true,
},
"name": {
Type: schema.TypeString,
Description: "Name of the referent. More info: http://kubernetes.io/docs/user-guide/identifiers#names",
Required: true,
},
},
},
},
"target_cpu_utilization_percentage": {
Type: schema.TypeInt,
Description: "Target average CPU utilization (represented as a percentage of requested CPU) over all the pods. If not specified the default autoscaling policy will be used.",
Optional: true,
Computed: true,
},
},
},
},
},
}
}
func resourceKubernetesHorizontalPodAutoscalerCreate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*kubernetes.Clientset)
metadata := expandMetadata(d.Get("metadata").([]interface{}))
svc := api.HorizontalPodAutoscaler{
ObjectMeta: metadata,
Spec: expandHorizontalPodAutoscalerSpec(d.Get("spec").([]interface{})),
}
log.Printf("[INFO] Creating new horizontal pod autoscaler: %#v", svc)
out, err := conn.AutoscalingV1().HorizontalPodAutoscalers(metadata.Namespace).Create(&svc)
if err != nil {
return err
}
log.Printf("[INFO] Submitted new horizontal pod autoscaler: %#v", out)
d.SetId(buildId(out.ObjectMeta))
return resourceKubernetesHorizontalPodAutoscalerRead(d, meta)
}
func resourceKubernetesHorizontalPodAutoscalerRead(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*kubernetes.Clientset)
namespace, name := idParts(d.Id())
log.Printf("[INFO] Reading horizontal pod autoscaler %s", name)
svc, err := conn.AutoscalingV1().HorizontalPodAutoscalers(namespace).Get(name, meta_v1.GetOptions{})
if err != nil {
log.Printf("[DEBUG] Received error: %#v", err)
return err
}
log.Printf("[INFO] Received horizontal pod autoscaler: %#v", svc)
err = d.Set("metadata", flattenMetadata(svc.ObjectMeta))
if err != nil {
return err
}
flattened := flattenHorizontalPodAutoscalerSpec(svc.Spec)
log.Printf("[DEBUG] Flattened horizontal pod autoscaler spec: %#v", flattened)
err = d.Set("spec", flattened)
if err != nil {
return err
}
return nil
}
func resourceKubernetesHorizontalPodAutoscalerUpdate(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*kubernetes.Clientset)
namespace, name := idParts(d.Id())
ops := patchMetadata("metadata.0.", "/metadata/", d)
if d.HasChange("spec") {
diffOps := patchHorizontalPodAutoscalerSpec("spec.0.", "/spec", d)
ops = append(ops, diffOps...)
}
data, err := ops.MarshalJSON()
if err != nil {
return fmt.Errorf("Failed to marshal update operations: %s", err)
}
log.Printf("[INFO] Updating horizontal pod autoscaler %q: %v", name, string(data))
out, err := conn.AutoscalingV1().HorizontalPodAutoscalers(namespace).Patch(name, pkgApi.JSONPatchType, data)
if err != nil {
return fmt.Errorf("Failed to update horizontal pod autoscaler: %s", err)
}
log.Printf("[INFO] Submitted updated horizontal pod autoscaler: %#v", out)
d.SetId(buildId(out.ObjectMeta))
return resourceKubernetesHorizontalPodAutoscalerRead(d, meta)
}
func resourceKubernetesHorizontalPodAutoscalerDelete(d *schema.ResourceData, meta interface{}) error {
conn := meta.(*kubernetes.Clientset)
namespace, name := idParts(d.Id())
log.Printf("[INFO] Deleting horizontal pod autoscaler: %#v", name)
err := conn.AutoscalingV1().HorizontalPodAutoscalers(namespace).Delete(name, &meta_v1.DeleteOptions{})
if err != nil {
return err
}
log.Printf("[INFO] Horizontal Pod Autoscaler %s deleted", name)
d.SetId("")
return nil
}
func resourceKubernetesHorizontalPodAutoscalerExists(d *schema.ResourceData, meta interface{}) (bool, error)
|
{
conn := meta.(*kubernetes.Clientset)
namespace, name := idParts(d.Id())
log.Printf("[INFO] Checking horizontal pod autoscaler %s", name)
_, err := conn.AutoscalingV1().HorizontalPodAutoscalers(namespace).Get(name, meta_v1.GetOptions{})
if err != nil {
if statusErr, ok := err.(*errors.StatusError); ok && statusErr.ErrStatus.Code == 404 {
return false, nil
}
log.Printf("[DEBUG] Received error: %#v", err)
}
return true, err
}
|
|
notebook.rs
|
extern crate gtk;
use gtk::prelude::*;
use gtk::{IconSize, Orientation, ReliefStyle, Widget};
struct Notebook {
notebook: gtk::Notebook,
tabs: Vec<gtk::Box>
}
impl Notebook {
fn
|
() -> Notebook {
Notebook {
notebook: gtk::Notebook::new(),
tabs: Vec::new()
}
}
fn create_tab(&mut self, title: &str, widget: Widget) -> u32 {
let close_image = gtk::Image::new_from_icon_name("window-close",
IconSize::Button.into());
let button = gtk::Button::new();
let label = gtk::Label::new(Some(title));
let tab = gtk::Box::new(Orientation::Horizontal, 0);
button.set_relief(ReliefStyle::None);
ButtonExt::set_focus_on_click(&button, false);
button.add(&close_image);
tab.pack_start(&label, false, false, 0);
tab.pack_start(&button, false, false, 0);
tab.show_all();
let index = self.notebook.append_page(&widget, Some(&tab));
let notebook_clone = self.notebook.clone();
button.connect_clicked(move |_| {
let index = notebook_clone.page_num(&widget).unwrap();
notebook_clone.remove_page(Some(index));
});
self.tabs.push(tab);
index
}
}
fn main() {
if gtk::init().is_err() {
println!("Failed to initialize GTK.");
return;
}
let window = gtk::Window::new(gtk::WindowType::Toplevel);
window.set_title("Notebook");
window.set_position(gtk::WindowPosition::Center);
window.set_default_size(640, 480);
window.connect_delete_event(|_, _| {
gtk::main_quit();
Inhibit(false)
});
let mut notebook = Notebook::new();
for i in 1..4 {
let title = format!("sheet {}", i);
let label = gtk::Label::new(Some(&title));
notebook.create_tab(&title, label.upcast());
}
window.add(¬ebook.notebook);
window.show_all();
gtk::main();
}
|
new
|
0003_auto_20210108_1000.py
|
# Generated by Django 3.1.5 on 2021-01-08 10:00
from django.db import migrations, models
class Migration(migrations.Migration):
|
dependencies = [
('data', '0002_remove_classifydata_pending_tag'),
]
operations = [
migrations.CreateModel(
name='ClassifyTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tag', models.CharField(default='', help_text='标签种类', max_length=30, verbose_name='标签种类')),
],
),
migrations.AddField(
model_name='classifydata',
name='status',
field=models.BooleanField(default=False, help_text='是否完成', verbose_name='是否完成'),
),
]
|
|
feik.js
|
const chai = require('chai');
const expect = chai.expect;
const should = chai.should();
const Fk = require('../');
describe('Test API', function() {
beforeEach(function() {
this.feik = new Fk();
});
|
expect(this.feik).to.be.a('object');
});
it('Expect the feik instance has the "name" namespace', function() {
expect(this.feik.name).to.be.a('object');
});
it('Expect the feik instance has the "internet" namespace', function() {
expect(this.feik.internet).to.be.a('object');
});
it('Expect the feik instance has the "date" namespace', function() {
expect(this.feik.date).to.be.a('object');
});
it('Expect the feik instance has the "address" namespace', function() {
expect(this.feik.address).to.be.a('object');
});
it('Expect the "name" namespace has the "firstName" method', function () {
expect(this.feik.name.firstName).to.be.a('function');
});
it('Expect the "name" namespace has the "lastName" method', function () {
expect(this.feik.name.lastName).to.be.a('function');
});
it('Expect the "internet" namespace has the "email" method', function () {
expect(this.feik.internet.email).to.be.a('function');
});
it('Expect the "date" namespace has the "day" method', function () {
expect(this.feik.date.day).to.be.a('function');
});
it('Expect the "date" namespace has the "month" method', function () {
expect(this.feik.date.month).to.be.a('function');
});
it('Expect the "address" namespace has the "country" method', function () {
expect(this.feik.address.country).to.be.a('function');
});
});
|
it('Expect the feik instance to be an object', function() {
|
.prettierrc.js
|
module.exports = {
singleQuote: true,
|
semi: true
}
|
|
test_create_records.py
|
import unittest
import time
from vika import Vika
from . import TEST_TABLE, TEST_API_BASE, TEST_API_TOKEN
class TestCreateRecords(unittest.TestCase):
def setUp(self):
vika = Vika(TEST_API_TOKEN)
vika.set_api_base(TEST_API_BASE)
self.dst = vika.datasheet(TEST_TABLE)
def test_record_create(self):
time.sleep(1)
record = self.dst.records.create({
"title": "高等数学"
})
time.sleep(1)
self.assertIsNotNone(record._id)
records = self.dst.records.bulk_create([
{
"title": "离散数学"
},
{
"title": "线性代数"
}
])
self.created_records = records + [record]
for rec in records:
self.assertIsNotNone(rec._id)
def tearDown(self):
self.dst.delete_records(
|
n__':
unittest.main()
|
self.created_records)
if __name__ == '__mai
|
dfa.rs
|
// Copyright 2014-2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
/*!
The DFA matching engine.
A DFA provides faster matching because the engine is in exactly one state at
any point in time. In the NFA, there may be multiple active states, and
considerable CPU cycles are spent shuffling them around. In finite automata
speak, the DFA follows epsilon transitions in the regex far less than the NFA.
A DFA is a classic trade off between time and space. The NFA is slower, but
its memory requirements are typically small and predictable. The DFA is faster,
but given the right regex and the right input, the number of states in the
DFA can grow exponentially. To mitigate this space problem, we do two things:
1. We implement an *online* DFA. That is, the DFA is constructed from the NFA
during a search. When a new state is computed, it is stored in a cache so
that it may be reused. An important consequence of this implementation
is that states that are never reached for a particular input are never
computed. (This is impossible in an "offline" DFA which needs to compute
all possible states up front.)
2. If the cache gets too big, we wipe it and continue matching.
In pathological cases, a new state can be created for every byte of input.
(e.g., The regex `(a|b)*a(a|b){20}` on a long sequence of a's and b's.)
In this case, performance regresses to slightly slower than the full NFA
simulation, in large part because the cache becomes useless. If the cache
is wiped too frequently, the DFA quits and control falls back to one of the
NFA simulations.
Because of the "lazy" nature of this DFA, the inner matching loop is
considerably more complex than one might expect out of a DFA. A number of
tricks are employed to make it fast. Tread carefully.
N.B. While this implementation is heavily commented, Russ Cox's series of
articles on regexes is strongly recommended: https://swtch.com/~rsc/regexp/
(As is the DFA implementation in RE2, which heavily influenced this
implementation.)
*/
use std::string::String;
use std::string::ToString;
use std::vec::Vec;
use std::borrow::ToOwned;
use std::collections::HashMap;
use std::fmt;
use std::iter::repeat;
use std::mem;
use std::sync::Arc;
use exec::ProgramCache;
use prog::{Inst, Program};
use sparse::SparseSet;
/// Return true if and only if the given program can be executed by a DFA.
///
/// Generally, a DFA is always possible. A pathological case where it is not
/// possible is if the number of NFA states exceeds `u32::MAX`, in which case,
/// this function will return false.
///
/// This function will also return false if the given program has any Unicode
/// instructions (Char or Ranges) since the DFA operates on bytes only.
pub fn can_exec(insts: &Program) -> bool {
use prog::Inst::*;
// If for some reason we manage to allocate a regex program with more
// than i32::MAX instructions, then we can't execute the DFA because we
// use 32 bit instruction pointer deltas for memory savings.
// If i32::MAX is the largest positive delta,
// then -i32::MAX == i32::MIN + 1 is the largest negative delta,
// and we are OK to use 32 bits.
if insts.dfa_size_limit == 0 || insts.len() > ::std::i32::MAX as usize {
return false;
}
for inst in insts {
match *inst {
Char(_) | Ranges(_) => return false,
EmptyLook(_) | Match(_) | Save(_) | Split(_) | Bytes(_) => {}
}
}
true
}
/// A reusable cache of DFA states.
///
/// This cache is reused between multiple invocations of the same regex
/// program. (It is not shared simultaneously between threads. If there is
/// contention, then new caches are created.)
#[derive(Debug)]
pub struct Cache {
/// Group persistent DFA related cache state together. The sparse sets
/// listed below are used as scratch space while computing uncached states.
inner: CacheInner,
/// qcur and qnext are ordered sets with constant time
/// addition/membership/clearing-whole-set and linear time iteration. They
/// are used to manage the sets of NFA states in DFA states when computing
/// cached DFA states. In particular, the order of the NFA states matters
/// for leftmost-first style matching. Namely, when computing a cached
/// state, the set of NFA states stops growing as soon as the first Match
/// instruction is observed.
qcur: SparseSet,
qnext: SparseSet,
}
/// `CacheInner` is logically just a part of Cache, but groups together fields
/// that aren't passed as function parameters throughout search. (This split
/// is mostly an artifact of the borrow checker. It is happily paid.)
#[derive(Debug)]
struct CacheInner {
/// A cache of pre-compiled DFA states, keyed by the set of NFA states
/// and the set of empty-width flags set at the byte in the input when the
/// state was observed.
///
/// A StatePtr is effectively a `*State`, but to avoid various inconvenient
/// things, we just pass indexes around manually. The performance impact of
/// this is probably an instruction or two in the inner loop. However, on
/// 64 bit, each StatePtr is half the size of a *State.
compiled: StateMap,
/// The transition table.
///
/// The transition table is laid out in row-major order, where states are
/// rows and the transitions for each state are columns. At a high level,
/// given state `s` and byte `b`, the next state can be found at index
/// `s * 256 + b`.
///
/// This is, of course, a lie. A StatePtr is actually a pointer to the
/// *start* of a row in this table. When indexing in the DFA's inner loop,
/// this removes the need to multiply the StatePtr by the stride. Yes, it
/// matters. This reduces the number of states we can store, but: the
/// stride is rarely 256 since we define transitions in terms of
/// *equivalence classes* of bytes. Each class corresponds to a set of
/// bytes that never discriminate a distinct path through the DFA from each
/// other.
trans: Transitions,
/// A set of cached start states, which are limited to the number of
/// permutations of flags set just before the initial byte of input. (The
/// index into this vec is a `EmptyFlags`.)
///
/// N.B. A start state can be "dead" (i.e., no possible match), so we
/// represent it with a StatePtr.
start_states: Vec<StatePtr>,
/// Stack scratch space used to follow epsilon transitions in the NFA.
/// (This permits us to avoid recursion.)
///
/// The maximum stack size is the number of NFA states.
stack: Vec<InstPtr>,
/// The total number of times this cache has been flushed by the DFA
/// because of space constraints.
flush_count: u64,
/// The total heap size of the DFA's cache. We use this to determine when
/// we should flush the cache.
size: usize,
/// Scratch space used when building instruction pointer lists for new
/// states. This helps amortize allocation.
insts_scratch_space: Vec<u8>,
}
/// The transition table.
///
/// It is laid out in row-major order, with states as rows and byte class
/// transitions as columns.
///
/// The transition table is responsible for producing valid `StatePtrs`. A
/// `StatePtr` points to the start of a particular row in this table. When
/// indexing to find the next state this allows us to avoid a multiplication
/// when computing an index into the table.
#[derive(Clone)]
struct Transitions {
/// The table.
table: Vec<StatePtr>,
/// The stride.
num_byte_classes: usize,
}
/// Fsm encapsulates the actual execution of the DFA.
#[derive(Debug)]
pub struct Fsm<'a> {
/// prog contains the NFA instruction opcodes. DFA execution uses either
/// the `dfa` instructions or the `dfa_reverse` instructions from
/// `exec::ExecReadOnly`. (It never uses `ExecReadOnly.nfa`, which may have
/// Unicode opcodes that cannot be executed by the DFA.)
prog: &'a Program,
/// The start state. We record it here because the pointer may change
/// when the cache is wiped.
start: StatePtr,
/// The current position in the input.
at: usize,
/// Should we quit after seeing the first match? e.g., When the caller
/// uses `is_match` or `shortest_match`.
quit_after_match: bool,
/// The last state that matched.
///
/// When no match has occurred, this is set to STATE_UNKNOWN.
///
/// This is only useful when matching regex sets. The last match state
/// is useful because it contains all of the match instructions seen,
/// thereby allowing us to enumerate which regexes in the set matched.
last_match_si: StatePtr,
/// The input position of the last cache flush. We use this to determine
/// if we're thrashing in the cache too often. If so, the DFA quits so
/// that we can fall back to the NFA algorithm.
last_cache_flush: usize,
/// All cached DFA information that is persisted between searches.
cache: &'a mut CacheInner,
}
/// The result of running the DFA.
///
/// Generally, the result is either a match or not a match, but sometimes the
/// DFA runs too slowly because the cache size is too small. In that case, it
/// gives up with the intent of falling back to the NFA algorithm.
///
/// The DFA can also give up if it runs out of room to create new states, or if
/// it sees non-ASCII bytes in the presence of a Unicode word boundary.
#[derive(Clone, Debug)]
pub enum Result<T> {
Match(T),
NoMatch(usize),
Quit,
}
impl<T> Result<T> {
/// Returns true if this result corresponds to a match.
pub fn is_match(&self) -> bool {
match *self {
Result::Match(_) => true,
Result::NoMatch(_) | Result::Quit => false,
}
}
/// Maps the given function onto T and returns the result.
///
/// If this isn't a match, then this is a no-op.
pub fn map<U, F: FnMut(T) -> U>(self, mut f: F) -> Result<U> {
match self {
Result::Match(t) => Result::Match(f(t)),
Result::NoMatch(x) => Result::NoMatch(x),
Result::Quit => Result::Quit,
}
}
/// Sets the non-match position.
///
/// If this isn't a non-match, then this is a no-op.
fn set_non_match(self, at: usize) -> Result<T> {
match self {
Result::NoMatch(_) => Result::NoMatch(at),
r => r,
}
}
}
/// `State` is a DFA state. It contains an ordered set of NFA states (not
/// necessarily complete) and a smattering of flags.
///
/// The flags are packed into the first byte of data.
///
/// States don't carry their transitions. Instead, transitions are stored in
/// a single row-major table.
///
/// Delta encoding is used to store the instruction pointers.
/// The first instruction pointer is stored directly starting
/// at data[1], and each following pointer is stored as an offset
/// to the previous one. If a delta is in the range -127..127,
/// it is packed into a single byte; Otherwise the byte 128 (-128 as an i8)
/// is coded as a flag, followed by 4 bytes encoding the delta.
#[derive(Clone, Eq, Hash, PartialEq)]
struct State {
data: Arc<[u8]>,
}
/// `InstPtr` is a 32 bit pointer into a sequence of opcodes (i.e., it indexes
/// an NFA state).
///
/// Throughout this library, this is usually set to `usize`, but we force a
/// `u32` here for the DFA to save on space.
type InstPtr = u32;
/// Adds ip to data using delta encoding with respect to prev.
///
/// After completion, `data` will contain `ip` and `prev` will be set to `ip`.
fn push_inst_ptr(data: &mut Vec<u8>, prev: &mut InstPtr, ip: InstPtr) {
let delta = (ip as i32) - (*prev as i32);
write_vari32(data, delta);
*prev = ip;
}
struct InstPtrs<'a> {
base: usize,
data: &'a [u8],
}
impl <'a>Iterator for InstPtrs<'a> {
type Item = usize;
fn next(&mut self) -> Option<usize> {
if self.data.is_empty() {
return None;
}
let (delta, nread) = read_vari32(self.data);
let base = self.base as i32 + delta;
debug_assert!(base >= 0);
debug_assert!(nread > 0);
self.data = &self.data[nread..];
self.base = base as usize;
Some(self.base)
}
}
impl State {
fn flags(&self) -> StateFlags {
StateFlags(self.data[0])
}
fn inst_ptrs(&self) -> InstPtrs {
InstPtrs {
base: 0,
data: &self.data[1..],
}
}
}
/// `StatePtr` is a 32 bit pointer to the start of a row in the transition
/// table.
///
/// It has many special values. There are two types of special values:
/// sentinels and flags.
///
/// Sentinels corresponds to special states that carry some kind of
/// significance. There are three such states: unknown, dead and quit states.
///
/// Unknown states are states that haven't been computed yet. They indicate
/// that a transition should be filled in that points to either an existing
/// cached state or a new state altogether. In general, an unknown state means
/// "follow the NFA's epsilon transitions."
///
/// Dead states are states that can never lead to a match, no matter what
/// subsequent input is observed. This means that the DFA should quit
/// immediately and return the longest match it has found thus far.
///
/// Quit states are states that imply the DFA is not capable of matching the
/// regex correctly. Currently, this is only used when a Unicode word boundary
/// exists in the regex *and* a non-ASCII byte is observed.
///
/// The other type of state pointer is a state pointer with special flag bits.
/// There are two flags: a start flag and a match flag. The lower bits of both
/// kinds always contain a "valid" `StatePtr` (indicated by the `STATE_MAX`
/// mask).
///
/// The start flag means that the state is a start state, and therefore may be
/// subject to special prefix scanning optimizations.
///
/// The match flag means that the state is a match state, and therefore the
/// current position in the input (while searching) should be recorded.
///
/// The above exists mostly in the service of making the inner loop fast.
/// In particular, the inner *inner* loop looks something like this:
///
/// ```ignore
/// while state <= STATE_MAX and i < len(text):
/// state = state.next[i]
/// ```
///
/// This is nice because it lets us execute a lazy DFA as if it were an
/// entirely offline DFA (i.e., with very few instructions). The loop will
/// quit only when we need to examine a case that needs special attention.
type StatePtr = u32;
/// An unknown state means that the state has not been computed yet, and that
/// the only way to progress is to compute it.
const STATE_UNKNOWN: StatePtr = 1<<31;
/// A dead state means that the state has been computed and it is known that
/// once it is entered, no future match can ever occur.
const STATE_DEAD: StatePtr = STATE_UNKNOWN + 1;
/// A quit state means that the DFA came across some input that it doesn't
/// know how to process correctly. The DFA should quit and another matching
/// engine should be run in its place.
const STATE_QUIT: StatePtr = STATE_DEAD + 1;
/// A start state is a state that the DFA can start in.
///
/// Note that start states have their lower bits set to a state pointer.
const STATE_START: StatePtr = 1<<30;
/// A match state means that the regex has successfully matched.
///
/// Note that match states have their lower bits set to a state pointer.
const STATE_MATCH: StatePtr = 1<<29;
/// The maximum state pointer. This is useful to mask out the "valid" state
/// pointer from a state with the "start" or "match" bits set.
///
/// It doesn't make sense to use this with unknown, dead or quit state
/// pointers, since those pointers are sentinels and never have their lower
/// bits set to anything meaningful.
const STATE_MAX: StatePtr = STATE_MATCH - 1;
/// Byte is a u8 in spirit, but a u16 in practice so that we can represent the
/// special EOF sentinel value.
#[derive(Copy, Clone, Debug)]
struct Byte(u16);
/// A set of flags for zero-width assertions.
#[derive(Clone, Copy, Eq, Debug, Default, Hash, PartialEq)]
struct EmptyFlags {
start: bool,
end: bool,
start_line: bool,
end_line: bool,
word_boundary: bool,
not_word_boundary: bool,
}
/// A set of flags describing various configurations of a DFA state. This is
/// represented by a `u8` so that it is compact.
#[derive(Clone, Copy, Eq, Default, Hash, PartialEq)]
struct StateFlags(u8);
impl Cache {
/// Create new empty cache for the DFA engine.
pub fn new(prog: &Program) -> Self {
// We add 1 to account for the special EOF byte.
let num_byte_classes = (prog.byte_classes[255] as usize + 1) + 1;
let starts = vec![STATE_UNKNOWN; 256];
let mut cache = Cache {
inner: CacheInner {
compiled: StateMap::new(num_byte_classes),
trans: Transitions::new(num_byte_classes),
start_states: starts,
stack: vec![],
flush_count: 0,
size: 0,
insts_scratch_space: vec![],
},
qcur: SparseSet::new(prog.insts.len()),
qnext: SparseSet::new(prog.insts.len()),
};
cache.inner.reset_size();
cache
}
}
impl CacheInner {
/// Resets the cache size to account for fixed costs, such as the program
/// and stack sizes.
fn reset_size(&mut self) {
self.size =
(self.start_states.len() * mem::size_of::<StatePtr>())
+ (self.stack.len() * mem::size_of::<InstPtr>());
}
}
impl<'a> Fsm<'a> {
#[inline(always)] // reduces constant overhead
pub fn forward(
prog: &'a Program,
cache: &ProgramCache,
quit_after_match: bool,
text: &[u8],
at: usize,
) -> Result<usize> {
let mut cache = cache.borrow_mut();
let cache = &mut cache.dfa;
let mut dfa = Fsm {
prog: prog,
start: 0, // filled in below
at: at,
quit_after_match: quit_after_match,
last_match_si: STATE_UNKNOWN,
last_cache_flush: at,
cache: &mut cache.inner,
};
let (empty_flags, state_flags) = dfa.start_flags(text, at);
dfa.start = match dfa.start_state(
&mut cache.qcur,
empty_flags,
state_flags,
) {
None => return Result::Quit,
Some(STATE_DEAD) => return Result::NoMatch(at),
Some(si) => si,
};
debug_assert!(dfa.start != STATE_UNKNOWN);
dfa.exec_at(&mut cache.qcur, &mut cache.qnext, text)
}
#[inline(always)] // reduces constant overhead
pub fn reverse(
prog: &'a Program,
cache: &ProgramCache,
quit_after_match: bool,
text: &[u8],
at: usize,
) -> Result<usize> {
let mut cache = cache.borrow_mut();
let cache = &mut cache.dfa_reverse;
let mut dfa = Fsm {
prog: prog,
start: 0, // filled in below
at: at,
quit_after_match: quit_after_match,
last_match_si: STATE_UNKNOWN,
last_cache_flush: at,
cache: &mut cache.inner,
};
let (empty_flags, state_flags) = dfa.start_flags_reverse(text, at);
dfa.start = match dfa.start_state(
&mut cache.qcur,
empty_flags,
state_flags,
) {
None => return Result::Quit,
Some(STATE_DEAD) => return Result::NoMatch(at),
Some(si) => si,
};
debug_assert!(dfa.start != STATE_UNKNOWN);
dfa.exec_at_reverse(&mut cache.qcur, &mut cache.qnext, text)
}
#[inline(always)] // reduces constant overhead
pub fn forward_many(
prog: &'a Program,
cache: &ProgramCache,
matches: &mut [bool],
text: &[u8],
at: usize,
) -> Result<usize> {
debug_assert!(matches.len() == prog.matches.len());
let mut cache = cache.borrow_mut();
let cache = &mut cache.dfa;
let mut dfa = Fsm {
prog: prog,
start: 0, // filled in below
at: at,
quit_after_match: false,
last_match_si: STATE_UNKNOWN,
last_cache_flush: at,
cache: &mut cache.inner,
};
let (empty_flags, state_flags) = dfa.start_flags(text, at);
dfa.start = match dfa.start_state(
&mut cache.qcur,
empty_flags,
state_flags,
) {
None => return Result::Quit,
Some(STATE_DEAD) => return Result::NoMatch(at),
Some(si) => si,
};
debug_assert!(dfa.start != STATE_UNKNOWN);
let result = dfa.exec_at(&mut cache.qcur, &mut cache.qnext, text);
if result.is_match() {
if matches.len() == 1 {
matches[0] = true;
} else {
debug_assert!(dfa.last_match_si != STATE_UNKNOWN);
debug_assert!(dfa.last_match_si != STATE_DEAD);
for ip in dfa.state(dfa.last_match_si).inst_ptrs() {
if let Inst::Match(slot) = dfa.prog[ip] {
matches[slot] = true;
}
}
}
}
result
}
/// Executes the DFA on a forward NFA.
///
/// {qcur,qnext} are scratch ordered sets which may be non-empty.
#[inline(always)] // reduces constant overhead
fn exec_at(
&mut self,
qcur: &mut SparseSet,
qnext: &mut SparseSet,
text: &[u8],
) -> Result<usize> {
// For the most part, the DFA is basically:
//
// last_match = null
// while current_byte != EOF:
// si = current_state.next[current_byte]
// if si is match
// last_match = si
// return last_match
//
// However, we need to deal with a few things:
//
// 1. This is an *online* DFA, so the current state's next list
// may not point to anywhere yet, so we must go out and compute
// them. (They are then cached into the current state's next list
// to avoid re-computation.)
// 2. If we come across a state that is known to be dead (i.e., never
// leads to a match), then we can quit early.
// 3. If the caller just wants to know if a match occurs, then we
// can quit as soon as we know we have a match. (Full leftmost
// first semantics require continuing on.)
// 4. If we're in the start state, then we can use a pre-computed set
// of prefix literals to skip quickly along the input.
// 5. After the input is exhausted, we run the DFA on one symbol
// that stands for EOF. This is useful for handling empty width
// assertions.
// 6. We can't actually do state.next[byte]. Instead, we have to do
// state.next[byte_classes[byte]], which permits us to keep the
// 'next' list very small.
//
// Since there's a bunch of extra stuff we need to consider, we do some
// pretty hairy tricks to get the inner loop to run as fast as
// possible.
debug_assert!(!self.prog.is_reverse);
// The last match is the currently known ending match position. It is
// reported as an index to the most recent byte that resulted in a
// transition to a match state and is always stored in capture slot `1`
// when searching forwards. Its maximum value is `text.len()`.
let mut result = Result::NoMatch(self.at);
let (mut prev_si, mut next_si) = (self.start, self.start);
let mut at = self.at;
while at < text.len() {
// This is the real inner loop. We take advantage of special bits
// set in the state pointer to determine whether a state is in the
// "common" case or not. Specifically, the common case is a
// non-match non-start non-dead state that has already been
// computed. So long as we remain in the common case, this inner
// loop will chew through the input.
//
// We also unroll the loop 4 times to amortize the cost of checking
// whether we've consumed the entire input. We are also careful
// to make sure that `prev_si` always represents the previous state
// and `next_si` always represents the next state after the loop
// exits, even if it isn't always true inside the loop.
while next_si <= STATE_MAX && at < text.len() {
// Argument for safety is in the definition of next_si.
prev_si = unsafe { self.next_si(next_si, text, at) };
at += 1;
if prev_si > STATE_MAX || at + 2 >= text.len() {
mem::swap(&mut prev_si, &mut next_si);
break;
}
next_si = unsafe { self.next_si(prev_si, text, at) };
at += 1;
if next_si > STATE_MAX {
break;
}
prev_si = unsafe { self.next_si(next_si, text, at) };
at += 1;
if prev_si > STATE_MAX {
mem::swap(&mut prev_si, &mut next_si);
break;
}
next_si = unsafe { self.next_si(prev_si, text, at) };
at += 1;
}
if next_si & STATE_MATCH > 0 {
// A match state is outside of the common case because it needs
// special case analysis. In particular, we need to record the
// last position as having matched and possibly quit the DFA if
// we don't need to keep matching.
next_si &= !STATE_MATCH;
result = Result::Match(at - 1);
if self.quit_after_match {
return result;
}
self.last_match_si = next_si;
prev_si = next_si;
// This permits short-circuiting when matching a regex set.
// In particular, if this DFA state contains only match states,
// then it's impossible to extend the set of matches since
// match states are final. Therefore, we can quit.
if self.prog.matches.len() > 1 {
let state = self.state(next_si);
let just_matches = state.inst_ptrs()
.all(|ip| self.prog[ip].is_match());
if just_matches {
return result;
}
}
// Another inner loop! If the DFA stays in this particular
// match state, then we can rip through all of the input
// very quickly, and only recording the match location once
// we've left this particular state.
let cur = at;
while (next_si & !STATE_MATCH) == prev_si
&& at + 2 < text.len() {
// Argument for safety is in the definition of next_si.
next_si = unsafe {
self.next_si(next_si & !STATE_MATCH, text, at)
};
at += 1;
}
if at > cur {
result = Result::Match(at - 2);
}
} else if next_si & STATE_START > 0 {
// A start state isn't in the common case because we may
// what to do quick prefix scanning. If the program doesn't
// have a detected prefix, then start states are actually
// considered common and this case is never reached.
debug_assert!(self.has_prefix());
next_si &= !STATE_START;
prev_si = next_si;
at = match self.prefix_at(text, at) {
None => return Result::NoMatch(text.len()),
Some(i) => i,
};
} else if next_si >= STATE_UNKNOWN {
if next_si == STATE_QUIT {
return Result::Quit;
}
// Finally, this corresponds to the case where the transition
// entered a state that can never lead to a match or a state
// that hasn't been computed yet. The latter being the "slow"
// path.
let byte = Byte::byte(text[at - 1]);
// We no longer care about the special bits in the state
// pointer.
prev_si &= STATE_MAX;
// Record where we are. This is used to track progress for
// determining whether we should quit if we've flushed the
// cache too much.
self.at = at;
next_si = match self.next_state(qcur, qnext, prev_si, byte) {
None => return Result::Quit,
Some(STATE_DEAD) => return result.set_non_match(at),
Some(si) => si,
};
debug_assert!(next_si != STATE_UNKNOWN);
if next_si & STATE_MATCH > 0 {
next_si &= !STATE_MATCH;
result = Result::Match(at - 1);
if self.quit_after_match {
return result;
}
self.last_match_si = next_si;
}
prev_si = next_si;
} else {
prev_si = next_si;
}
}
// Run the DFA once more on the special EOF senitnel value.
// We don't care about the special bits in the state pointer any more,
// so get rid of them.
prev_si &= STATE_MAX;
prev_si = match self.next_state(qcur, qnext, prev_si, Byte::eof()) {
None => return Result::Quit,
Some(STATE_DEAD) => return result.set_non_match(text.len()),
Some(si) => si & !STATE_START,
};
debug_assert!(prev_si != STATE_UNKNOWN);
if prev_si & STATE_MATCH > 0 {
prev_si &= !STATE_MATCH;
self.last_match_si = prev_si;
result = Result::Match(text.len());
}
result
}
/// Executes the DFA on a reverse NFA.
#[inline(always)] // reduces constant overhead
fn exec_at_reverse(
&mut self,
qcur: &mut SparseSet,
qnext: &mut SparseSet,
text: &[u8],
) -> Result<usize> {
// The comments in `exec_at` above mostly apply here too. The main
// difference is that we move backwards over the input and we look for
// the longest possible match instead of the leftmost-first match.
//
// N.B. The code duplication here is regrettable. Efforts to improve
// it without sacrificing performance are welcome. ---AG
debug_assert!(self.prog.is_reverse);
let mut result = Result::NoMatch(self.at);
let (mut prev_si, mut next_si) = (self.start, self.start);
let mut at = self.at;
while at > 0 {
while next_si <= STATE_MAX && at > 0 {
// Argument for safety is in the definition of next_si.
at -= 1;
prev_si = unsafe { self.next_si(next_si, text, at) };
if prev_si > STATE_MAX || at <= 4 {
mem::swap(&mut prev_si, &mut next_si);
break;
}
at -= 1;
next_si = unsafe { self.next_si(prev_si, text, at) };
if next_si > STATE_MAX {
break;
}
at -= 1;
prev_si = unsafe { self.next_si(next_si, text, at) };
if prev_si > STATE_MAX {
mem::swap(&mut prev_si, &mut next_si);
break;
}
at -= 1;
next_si = unsafe { self.next_si(prev_si, text, at) };
}
if next_si & STATE_MATCH > 0 {
next_si &= !STATE_MATCH;
result = Result::Match(at + 1);
if self.quit_after_match {
return result
}
self.last_match_si = next_si;
prev_si = next_si;
let cur = at;
while (next_si & !STATE_MATCH) == prev_si && at >= 2 {
// Argument for safety is in the definition of next_si.
at -= 1;
next_si = unsafe {
self.next_si(next_si & !STATE_MATCH, text, at)
};
}
if at < cur {
result = Result::Match(at + 2);
}
} else if next_si >= STATE_UNKNOWN {
if next_si == STATE_QUIT {
return Result::Quit;
}
let byte = Byte::byte(text[at]);
prev_si &= STATE_MAX;
self.at = at;
next_si = match self.next_state(qcur, qnext, prev_si, byte) {
None => return Result::Quit,
Some(STATE_DEAD) => return result.set_non_match(at),
Some(si) => si,
};
debug_assert!(next_si != STATE_UNKNOWN);
if next_si & STATE_MATCH > 0 {
next_si &= !STATE_MATCH;
result = Result::Match(at + 1);
if self.quit_after_match {
return result;
}
self.last_match_si = next_si;
}
prev_si = next_si;
} else {
prev_si = next_si;
}
}
// Run the DFA once more on the special EOF senitnel value.
prev_si = match self.next_state(qcur, qnext, prev_si, Byte::eof()) {
None => return Result::Quit,
Some(STATE_DEAD) => return result.set_non_match(0),
Some(si) => si,
};
debug_assert!(prev_si != STATE_UNKNOWN);
if prev_si & STATE_MATCH > 0 {
prev_si &= !STATE_MATCH;
self.last_match_si = prev_si;
result = Result::Match(0);
}
result
}
/// next_si transitions to the next state, where the transition input
/// corresponds to text[i].
///
/// This elides bounds checks, and is therefore unsafe.
#[inline(always)]
unsafe fn next_si(&self, si: StatePtr, text: &[u8], i: usize) -> StatePtr {
// What is the argument for safety here?
// We have three unchecked accesses that could possibly violate safety:
//
// 1. The given byte of input (`text[i]`).
// 2. The class of the byte of input (`classes[text[i]]`).
// 3. The transition for the class (`trans[si + cls]`).
//
// (1) is only safe when calling next_si is guarded by
// `i < text.len()`.
//
// (2) is the easiest case to guarantee since `text[i]` is always a
// `u8` and `self.prog.byte_classes` always has length `u8::MAX`.
// (See `ByteClassSet.byte_classes` in `compile.rs`.)
//
// (3) is only safe if (1)+(2) are safe. Namely, the transitions
// of every state are defined to have length equal to the number of
// byte classes in the program. Therefore, a valid class leads to a
// valid transition. (All possible transitions are valid lookups, even
// if it points to a state that hasn't been computed yet.) (3) also
// relies on `si` being correct, but StatePtrs should only ever be
// retrieved from the transition table, which ensures they are correct.
debug_assert!(i < text.len());
let b = *text.get_unchecked(i);
debug_assert!((b as usize) < self.prog.byte_classes.len());
let cls = *self.prog.byte_classes.get_unchecked(b as usize);
self.cache.trans.next_unchecked(si, cls as usize)
}
/// Computes the next state given the current state and the current input
/// byte (which may be EOF).
///
/// If STATE_DEAD is returned, then there is no valid state transition.
/// This implies that no permutation of future input can lead to a match
/// state.
///
/// STATE_UNKNOWN can never be returned.
fn exec_byte(
&mut self,
qcur: &mut SparseSet,
qnext: &mut SparseSet,
mut si: StatePtr,
b: Byte,
) -> Option<StatePtr> {
use prog::Inst::*;
// Initialize a queue with the current DFA state's NFA states.
qcur.clear();
for ip in self.state(si).inst_ptrs() {
qcur.insert(ip);
}
// Before inspecting the current byte, we may need to also inspect
// whether the position immediately preceding the current byte
// satisfies the empty assertions found in the current state.
//
// We only need to do this step if there are any empty assertions in
// the current state.
let is_word_last = self.state(si).flags().is_word();
let is_word = b.is_ascii_word();
if self.state(si).flags().has_empty() {
// Compute the flags immediately preceding the current byte.
// This means we only care about the "end" or "end line" flags.
// (The "start" flags are computed immediately proceding the
// current byte and is handled below.)
let mut flags = EmptyFlags::default();
if b.is_eof() {
flags.end = true;
flags.end_line = true;
} else if b.as_byte().map_or(false, |b| b == b'\n') {
flags.end_line = true;
}
if is_word_last == is_word {
flags.not_word_boundary = true;
} else {
flags.word_boundary = true;
}
// Now follow epsilon transitions from every NFA state, but make
// sure we only follow transitions that satisfy our flags.
qnext.clear();
for &ip in &*qcur {
self.follow_epsilons(usize_to_u32(ip), qnext, flags);
}
mem::swap(qcur, qnext);
}
// Now we set flags for immediately after the current byte. Since start
// states are processed separately, and are the only states that can
// have the StartText flag set, we therefore only need to worry about
// the StartLine flag here.
//
// We do also keep track of whether this DFA state contains a NFA state
// that is a matching state. This is precisely how we delay the DFA
// matching by one byte in order to process the special EOF sentinel
// byte. Namely, if this DFA state containing a matching NFA state,
// then it is the *next* DFA state that is marked as a match.
let mut empty_flags = EmptyFlags::default();
let mut state_flags = StateFlags::default();
empty_flags.start_line = b.as_byte().map_or(false, |b| b == b'\n');
if b.is_ascii_word() {
state_flags.set_word();
}
// Now follow all epsilon transitions again, but only after consuming
// the current byte.
qnext.clear();
for &ip in &*qcur {
match self.prog[ip as usize] {
// These states never happen in a byte-based program.
Char(_) | Ranges(_) => unreachable!(),
// These states are handled when following epsilon transitions.
Save(_) | Split(_) | EmptyLook(_) => {}
Match(_) => {
state_flags.set_match();
if !self.continue_past_first_match() {
break;
} else if self.prog.matches.len() > 1
&& !qnext.contains(ip as usize) {
// If we are continuing on to find other matches,
// then keep a record of the match states we've seen.
qnext.insert(ip);
}
}
Bytes(ref inst) => {
if b.as_byte().map_or(false, |b| inst.matches(b)) {
self.follow_epsilons(
inst.goto as InstPtr, qnext, empty_flags);
}
}
}
}
let cache =
if b.is_eof() && self.prog.matches.len() > 1 {
// If we're processing the last byte of the input and we're
// matching a regex set, then make the next state contain the
// previous states transitions. We do this so that the main
// matching loop can extract all of the match instructions.
mem::swap(qcur, qnext);
// And don't cache this state because it's totally bunk.
false
} else {
true
};
// We've now built up the set of NFA states that ought to comprise the
// next DFA state, so try to find it in the cache, and if it doesn't
// exist, cache it.
//
// N.B. We pass `&mut si` here because the cache may clear itself if
// it has gotten too full. When that happens, the location of the
// current state may change.
let mut next = match self.cached_state(
qnext,
state_flags,
Some(&mut si),
) {
None => return None,
Some(next) => next,
};
if (self.start & !STATE_START) == next {
// Start states can never be match states since all matches are
// delayed by one byte.
debug_assert!(!self.state(next).flags().is_match());
next = self.start_ptr(next);
}
if next <= STATE_MAX && self.state(next).flags().is_match() {
next |= STATE_MATCH;
}
debug_assert!(next != STATE_UNKNOWN);
// And now store our state in the current state's next list.
if cache {
let cls = self.byte_class(b);
self.cache.trans.set_next(si, cls, next);
}
Some(next)
}
/// Follows the epsilon transitions starting at (and including) `ip`. The
/// resulting states are inserted into the ordered set `q`.
///
/// Conditional epsilon transitions (i.e., empty width assertions) are only
/// followed if they are satisfied by the given flags, which should
/// represent the flags set at the current location in the input.
///
/// If the current location corresponds to the empty string, then only the
/// end line and/or end text flags may be set. If the current location
/// corresponds to a real byte in the input, then only the start line
/// and/or start text flags may be set.
///
/// As an exception to the above, when finding the initial state, any of
/// the above flags may be set:
///
/// If matching starts at the beginning of the input, then start text and
/// start line should be set. If the input is empty, then end text and end
/// line should also be set.
///
/// If matching starts after the beginning of the input, then only start
/// line should be set if the preceding byte is `\n`. End line should never
/// be set in this case. (Even if the proceding byte is a `\n`, it will
/// be handled in a subsequent DFA state.)
fn follow_epsilons(
&mut self,
ip: InstPtr,
q: &mut SparseSet,
flags: EmptyFlags,
) {
use prog::Inst::*;
use prog::EmptyLook::*;
// We need to traverse the NFA to follow epsilon transitions, so avoid
// recursion with an explicit stack.
self.cache.stack.push(ip);
while let Some(mut ip) = self.cache.stack.pop() {
// Try to munch through as many states as possible without
// pushes/pops to the stack.
loop {
// Don't visit states we've already added.
if q.contains(ip as usize) {
break;
}
q.insert(ip as usize);
match self.prog[ip as usize] {
Char(_) | Ranges(_) => unreachable!(),
Match(_) | Bytes(_) => {
break;
}
EmptyLook(ref inst) => {
// Only follow empty assertion states if our flags
// satisfy the assertion.
match inst.look {
StartLine if flags.start_line => {
ip = inst.goto as InstPtr;
}
EndLine if flags.end_line => {
ip = inst.goto as InstPtr;
}
StartText if flags.start => {
ip = inst.goto as InstPtr;
}
EndText if flags.end => {
ip = inst.goto as InstPtr;
}
WordBoundaryAscii if flags.word_boundary => {
ip = inst.goto as InstPtr;
}
NotWordBoundaryAscii if flags.not_word_boundary => {
ip = inst.goto as InstPtr;
}
WordBoundary if flags.word_boundary => {
ip = inst.goto as InstPtr;
}
NotWordBoundary if flags.not_word_boundary => {
ip = inst.goto as InstPtr;
}
StartLine | EndLine | StartText | EndText
| WordBoundaryAscii | NotWordBoundaryAscii
| WordBoundary | NotWordBoundary => {
break;
}
}
}
Save(ref inst) => {
ip = inst.goto as InstPtr;
}
Split(ref inst) => {
self.cache.stack.push(inst.goto2 as InstPtr);
ip = inst.goto1 as InstPtr;
}
}
}
}
}
/// Find a previously computed state matching the given set of instructions
/// and is_match bool.
///
/// The given set of instructions should represent a single state in the
/// NFA along with all states reachable without consuming any input.
///
/// The is_match bool should be true if and only if the preceding DFA state
/// contains an NFA matching state. The cached state produced here will
/// then signify a match. (This enables us to delay a match by one byte,
/// in order to account for the EOF sentinel byte.)
///
/// If the cache is full, then it is wiped before caching a new state.
///
/// The current state should be specified if it exists, since it will need
/// to be preserved if the cache clears itself. (Start states are
/// always saved, so they should not be passed here.) It takes a mutable
/// pointer to the index because if the cache is cleared, the state's
/// location may change.
fn cached_state(
&mut self,
q: &SparseSet,
mut state_flags: StateFlags,
current_state: Option<&mut StatePtr>,
) -> Option<StatePtr> {
// If we couldn't come up with a non-empty key to represent this state,
// then it is dead and can never lead to a match.
//
// Note that inst_flags represent the set of empty width assertions
// in q. We use this as an optimization in exec_byte to determine when
// we should follow epsilon transitions at the empty string preceding
// the current byte.
let key = match self.cached_state_key(q, &mut state_flags) {
None => return Some(STATE_DEAD),
Some(v) => v,
};
// In the cache? Cool. Done.
if let Some(si) = self
.cache
.compiled
.get_ptr(&key)
{
return Some(si);
}
// If the cache has gotten too big, wipe it.
if self.approximate_size() > self.prog.dfa_size_limit
&& !self.clear_cache_and_save(current_state)
{
// Ooops. DFA is giving up.
return None;
}
// Allocate room for our state and add it.
self.add_state(key)
}
/// Produces a key suitable for describing a state in the DFA cache.
///
/// The key invariant here is that equivalent keys are produced for any two
/// sets of ordered NFA states (and toggling of whether the previous NFA
/// states contain a match state) that do not discriminate a match for any
/// input.
///
/// Specifically, q should be an ordered set of NFA states and is_match
/// should be true if and only if the previous NFA states contained a match
/// state.
fn cached_state_key(
&mut self,
q: &SparseSet,
state_flags: &mut StateFlags,
) -> Option<State> {
use prog::Inst::*;
// We need to build up enough information to recognize pre-built states
// in the DFA. Generally speaking, this includes every instruction
// except for those which are purely epsilon transitions, e.g., the
// Save and Split instructions.
//
// Empty width assertions are also epsilon transitions, but since they
// are conditional, we need to make them part of a state's key in the
// cache.
let mut insts = mem::replace(
&mut self.cache.insts_scratch_space,
vec![],
);
insts.clear();
// Reserve 1 byte for flags.
insts.push(0);
let mut prev = 0;
for &ip in q {
let ip = usize_to_u32(ip);
match self.prog[ip as usize] {
Char(_) | Ranges(_) => unreachable!(),
Save(_) | Split(_) => {}
Bytes(_) => push_inst_ptr(&mut insts, &mut prev, ip),
EmptyLook(_) => {
state_flags.set_empty();
push_inst_ptr(&mut insts, &mut prev, ip)
}
Match(_) => {
push_inst_ptr(&mut insts, &mut prev, ip);
if !self.continue_past_first_match() {
break;
}
}
}
}
// If we couldn't transition to any other instructions and we didn't
// see a match when expanding NFA states previously, then this is a
// dead state and no amount of additional input can transition out
// of this state.
let opt_state =
if insts.len() == 1 && !state_flags.is_match() {
None
} else {
let StateFlags(f) = *state_flags;
insts[0] = f;
Some(State { data: Arc::from(&*insts) })
};
self.cache.insts_scratch_space = insts;
opt_state
}
/// Clears the cache, but saves and restores current_state if it is not
/// none.
///
/// The current state must be provided here in case its location in the
/// cache changes.
///
/// This returns false if the cache is not cleared and the DFA should
/// give up.
fn clear_cache_and_save(
&mut self,
current_state: Option<&mut StatePtr>,
) -> bool {
if self.cache.compiled.is_empty() {
// Nothing to clear...
return true;
}
match current_state {
None => self.clear_cache(),
Some(si) => {
let cur = self.state(*si).clone();
if !self.clear_cache() {
return false;
}
// The unwrap is OK because we just cleared the cache and
// therefore know that the next state pointer won't exceed
// STATE_MAX.
*si = self.restore_state(cur).unwrap();
true
}
}
}
/// Wipes the state cache, but saves and restores the current start state.
///
/// This returns false if the cache is not cleared and the DFA should
/// give up.
fn clear_cache(&mut self) -> bool {
// Bail out of the DFA if we're moving too "slowly."
// A heuristic from RE2: assume the DFA is too slow if it is processing
// 10 or fewer bytes per state.
// Additionally, we permit the cache to be flushed a few times before
// caling it quits.
let nstates = self.cache.compiled.len();
if self.cache.flush_count >= 3
&& self.at >= self.last_cache_flush
&& (self.at - self.last_cache_flush) <= 10 * nstates {
return false;
}
// Update statistics tracking cache flushes.
self.last_cache_flush = self.at;
self.cache.flush_count += 1;
// OK, actually flush the cache.
let start = self.state(self.start & !STATE_START).clone();
let last_match = if self.last_match_si <= STATE_MAX {
Some(self.state(self.last_match_si).clone())
} else {
None
};
self.cache.reset_size();
self.cache.trans.clear();
self.cache.compiled.clear();
for s in &mut self.cache.start_states {
*s = STATE_UNKNOWN;
}
// The unwraps are OK because we just cleared the cache and therefore
// know that the next state pointer won't exceed STATE_MAX.
let start_ptr = self.restore_state(start).unwrap();
self.start = self.start_ptr(start_ptr);
if let Some(last_match) = last_match {
self.last_match_si = self.restore_state(last_match).unwrap();
}
true
}
/// Restores the given state back into the cache, and returns a pointer
/// to it.
fn restore_state(&mut self, state: State) -> Option<StatePtr> {
// If we've already stored this state, just return a pointer to it.
// None will be the wiser.
if let Some(si) = self.cache.compiled.get_ptr(&state) {
return Some(si);
}
self.add_state(state)
}
/// Returns the next state given the current state si and current byte
/// b. {qcur,qnext} are used as scratch space for storing ordered NFA
/// states.
///
/// This tries to fetch the next state from the cache, but if that fails,
/// it computes the next state, caches it and returns a pointer to it.
///
/// The pointer can be to a real state, or it can be STATE_DEAD.
/// STATE_UNKNOWN cannot be returned.
///
/// None is returned if a new state could not be allocated (i.e., the DFA
/// ran out of space and thinks it's running too slowly).
fn next_state(
&mut self,
qcur: &mut SparseSet,
qnext: &mut SparseSet,
si: StatePtr,
b: Byte,
) -> Option<StatePtr> {
if si == STATE_DEAD {
return Some(STATE_DEAD);
}
match self.cache.trans.next(si, self.byte_class(b)) {
STATE_UNKNOWN => self.exec_byte(qcur, qnext, si, b),
STATE_QUIT => None,
STATE_DEAD => Some(STATE_DEAD),
nsi => Some(nsi),
}
}
/// Computes and returns the start state, where searching begins at
/// position `at` in `text`. If the state has already been computed,
/// then it is pulled from the cache. If the state hasn't been cached,
/// then it is computed, cached and a pointer to it is returned.
///
/// This may return STATE_DEAD but never STATE_UNKNOWN.
#[inline(always)] // reduces constant overhead
fn start_state(
&mut self,
q: &mut SparseSet,
empty_flags: EmptyFlags,
state_flags: StateFlags,
) -> Option<StatePtr> {
// Compute an index into our cache of start states based on the set
// of empty/state flags set at the current position in the input. We
// don't use every flag since not all flags matter. For example, since
// matches are delayed by one byte, start states can never be match
// states.
let flagi = {
(((empty_flags.start as u8) << 0) |
((empty_flags.end as u8) << 1) |
((empty_flags.start_line as u8) << 2) |
((empty_flags.end_line as u8) << 3) |
((empty_flags.word_boundary as u8) << 4) |
((empty_flags.not_word_boundary as u8) << 5) |
((state_flags.is_word() as u8) << 6))
as usize
};
match self.cache.start_states[flagi] {
STATE_UNKNOWN => {}
STATE_DEAD => return Some(STATE_DEAD),
si => return Some(si),
}
q.clear();
let start = usize_to_u32(self.prog.start);
self.follow_epsilons(start, q, empty_flags);
// Start states can never be match states because we delay every match
// by one byte. Given an empty string and an empty match, the match
// won't actually occur until the DFA processes the special EOF
// sentinel byte.
let sp = match self.cached_state(q, state_flags, None) {
None => return None,
Some(sp) => self.start_ptr(sp),
};
self.cache.start_states[flagi] = sp;
Some(sp)
}
/// Computes the set of starting flags for the given position in text.
///
/// This should only be used when executing the DFA forwards over the
/// input.
fn start_flags(&self, text: &[u8], at: usize) -> (EmptyFlags, StateFlags) {
let mut empty_flags = EmptyFlags::default();
let mut state_flags = StateFlags::default();
empty_flags.start = at == 0;
empty_flags.end = text.is_empty();
empty_flags.start_line = at == 0 || text[at - 1] == b'\n';
empty_flags.end_line = text.is_empty();
let is_word_last = at > 0 && Byte::byte(text[at - 1]).is_ascii_word();
let is_word = at < text.len() && Byte::byte(text[at]).is_ascii_word();
if is_word_last {
state_flags.set_word();
}
if is_word == is_word_last {
empty_flags.not_word_boundary = true;
} else {
empty_flags.word_boundary = true;
}
(empty_flags, state_flags)
}
/// Computes the set of starting flags for the given position in text.
///
/// This should only be used when executing the DFA in reverse over the
/// input.
fn start_flags_reverse(
&self,
text: &[u8],
at: usize,
) -> (EmptyFlags, StateFlags) {
let mut empty_flags = EmptyFlags::default();
let mut state_flags = StateFlags::default();
empty_flags.start = at == text.len();
empty_flags.end = text.is_empty();
empty_flags.start_line = at == text.len() || text[at] == b'\n';
empty_flags.end_line = text.is_empty();
let is_word_last =
at < text.len() && Byte::byte(text[at]).is_ascii_word();
let is_word = at > 0 && Byte::byte(text[at - 1]).is_ascii_word();
if is_word_last {
state_flags.set_word();
}
if is_word == is_word_last {
empty_flags.not_word_boundary = true;
} else {
empty_flags.word_boundary = true;
}
(empty_flags, state_flags)
}
/// Returns a reference to a State given a pointer to it.
fn state(&self, si: StatePtr) -> &State {
self.cache.compiled.get_state(si).unwrap()
}
/// Adds the given state to the DFA.
///
/// This allocates room for transitions out of this state in
/// self.cache.trans. The transitions can be set with the returned
/// StatePtr.
///
/// If None is returned, then the state limit was reached and the DFA
/// should quit.
fn add_state(&mut self, state: State) -> Option<StatePtr> {
// This will fail if the next state pointer exceeds STATE_PTR. In
// practice, the cache limit will prevent us from ever getting here,
// but maybe callers will set the cache size to something ridiculous...
let si = match self.cache.trans.add() {
None => return None,
Some(si) => si,
};
// If the program has a Unicode word boundary, then set any transitions
// for non-ASCII bytes to STATE_QUIT. If the DFA stumbles over such a
// transition, then it will quit and an alternative matching engine
// will take over.
if self.prog.has_unicode_word_boundary {
for b in 128..256 {
let cls = self.byte_class(Byte::byte(b as u8));
self.cache.trans.set_next(si, cls, STATE_QUIT);
}
}
// Finally, put our actual state on to our heap of states and index it
// so we can find it later.
self.cache.size +=
self.cache.trans.state_heap_size()
+ state.data.len()
+ (2 * mem::size_of::<State>())
+ mem::size_of::<StatePtr>();
self.cache.compiled.insert(state, si);
// Transition table and set of states and map should all be in sync.
debug_assert!(self.cache.compiled.len()
== self.cache.trans.num_states());
Some(si)
}
/// Quickly finds the next occurrence of any literal prefixes in the regex.
/// If there are no literal prefixes, then the current position is
/// returned. If there are literal prefixes and one could not be found,
/// then None is returned.
///
/// This should only be called when the DFA is in a start state.
fn prefix_at(&self, text: &[u8], at: usize) -> Option<usize> {
self.prog.prefixes.find(&text[at..]).map(|(s, _)| at + s)
}
/// Returns the number of byte classes required to discriminate transitions
/// in each state.
///
/// invariant: num_byte_classes() == len(State.next)
fn num_byte_classes(&self) -> usize {
// We add 1 to account for the special EOF byte.
(self.prog.byte_classes[255] as usize + 1) + 1
}
/// Given an input byte or the special EOF sentinel, return its
/// corresponding byte class.
#[inline(always)]
fn byte_class(&self, b: Byte) -> usize {
match b.as_byte() {
None => self.num_byte_classes() - 1,
Some(b) => self.u8_class(b),
}
}
/// Like byte_class, but explicitly for u8s.
#[inline(always)]
fn u8_class(&self, b: u8) -> usize {
self.prog.byte_classes[b as usize] as usize
}
/// Returns true if the DFA should continue searching past the first match.
///
/// Leftmost first semantics in the DFA are preserved by not following NFA
/// transitions after the first match is seen.
///
/// On occasion, we want to avoid leftmost first semantics to find either
/// the longest match (for reverse search) or all possible matches (for
/// regex sets).
fn continue_past_first_match(&self) -> bool {
self.prog.is_reverse || self.prog.matches.len() > 1
}
/// Returns true if there is a prefix we can quickly search for.
fn has_prefix(&self) -> bool {
!self.prog.is_reverse
&& !self.prog.prefixes.is_empty()
&& !self.prog.is_anchored_start
}
/// Sets the STATE_START bit in the given state pointer if and only if
/// we have a prefix to scan for.
///
/// If there's no prefix, then it's a waste to treat the start state
/// specially.
fn start_ptr(&self, si: StatePtr) -> StatePtr {
if self.has_prefix() {
si | STATE_START
} else {
si
}
}
/// Approximate size returns the approximate heap space currently used by
/// the DFA. It is used to determine whether the DFA's state cache needs to
/// be wiped. Namely, it is possible that for certain regexes on certain
/// inputs, a new state could be created for every byte of input. (This is
/// bad for memory use, so we bound it with a cache.)
fn approximate_size(&self) -> usize {
self.cache.size + self.prog.approximate_size()
}
}
/// An abstraction for representing a map of states. The map supports two
/// different ways of state lookup. One is fast constant time access via a
/// state pointer. The other is a hashmap lookup based on the DFA's
/// constituent NFA states.
///
/// A DFA state internally uses an Arc such that we only need to store the
/// set of NFA states on the heap once, even though we support looking up
/// states by two different means. A more natural way to express this might
/// use raw pointers, but an Arc is safe and effectively achieves the same
/// thing.
#[derive(Debug)]
struct StateMap {
/// The keys are not actually static but rely on always pointing to a
/// buffer in `states` which will never be moved except when clearing
/// the map or on drop, in which case the keys of this map will be
/// removed before
map: HashMap<State, StatePtr>,
/// Our set of states. Note that `StatePtr / num_byte_classes` indexes
/// this Vec rather than just a `StatePtr`.
states: Vec<State>,
/// The number of byte classes in the DFA. Used to index `states`.
num_byte_classes: usize,
}
impl StateMap {
fn new(num_byte_classes: usize) -> StateMap {
StateMap {
map: HashMap::new(),
states: vec![],
num_byte_classes: num_byte_classes,
}
}
fn len(&self) -> usize {
self.states.len()
}
fn is_empty(&self) -> bool {
self.states.is_empty()
}
fn get_ptr(&self, state: &State) -> Option<StatePtr> {
self.map.get(state).cloned()
}
fn get_state(&self, si: StatePtr) -> Option<&State> {
self.states.get(si as usize / self.num_byte_classes)
}
fn insert(&mut self, state: State, si: StatePtr) {
self.map.insert(state.clone(), si);
self.states.push(state);
}
fn clear(&mut self) {
self.map.clear();
self.states.clear();
}
}
impl Transitions {
/// Create a new transition table.
///
/// The number of byte classes corresponds to the stride. Every state will
/// have `num_byte_classes` slots for transitions.
fn new(num_byte_classes: usize) -> Transitions {
Transitions {
table: vec![],
num_byte_classes: num_byte_classes,
}
}
/// Returns the total number of states currently in this table.
fn num_states(&self) -> usize {
self.table.len() / self.num_byte_classes
}
/// Allocates room for one additional state and returns a pointer to it.
///
/// If there's no more room, None is returned.
fn add(&mut self) -> Option<StatePtr> {
let si = self.table.len();
if si > STATE_MAX as usize {
return None;
}
self.table.extend(repeat(STATE_UNKNOWN).take(self.num_byte_classes));
Some(usize_to_u32(si))
}
/// Clears the table of all states.
fn clear(&mut self) {
self.table.clear();
}
/// Sets the transition from (si, cls) to next.
fn set_next(&mut self, si: StatePtr, cls: usize, next: StatePtr) {
self.table[si as usize + cls] = next;
}
/// Returns the transition corresponding to (si, cls).
fn next(&self, si: StatePtr, cls: usize) -> StatePtr {
self.table[si as usize + cls]
}
/// The heap size, in bytes, of a single state in the transition table.
fn state_heap_size(&self) -> usize {
self.num_byte_classes * mem::size_of::<StatePtr>()
}
/// Like `next`, but uses unchecked access and is therefore unsafe.
unsafe fn next_unchecked(&self, si: StatePtr, cls: usize) -> StatePtr {
debug_assert!((si as usize) < self.table.len());
debug_assert!(cls < self.num_byte_classes);
*self.table.get_unchecked(si as usize + cls)
}
}
impl StateFlags {
fn is_match(&self) -> bool {
self.0 & 0b0000000_1 > 0
}
fn set_match(&mut self) {
self.0 |= 0b0000000_1;
}
fn is_word(&self) -> bool {
self.0 & 0b000000_1_0 > 0
}
fn set_word(&mut self) {
self.0 |= 0b000000_1_0;
}
fn has_empty(&self) -> bool {
self.0 & 0b00000_1_00 > 0
}
fn set_empty(&mut self) {
self.0 |= 0b00000_1_00;
}
}
impl Byte {
fn byte(b: u8) -> Self { Byte(b as u16) }
fn eof() -> Self { Byte(256) }
fn is_eof(&self) -> bool { self.0 == 256 }
fn is_ascii_word(&self) -> bool {
let b = match self.as_byte() {
None => return false,
Some(b) => b,
};
match b {
b'A'...b'Z' | b'a'...b'z' | b'0'...b'9' | b'_' => true,
_ => false,
}
}
fn as_byte(&self) -> Option<u8> {
if self.is_eof() {
None
} else {
Some(self.0 as u8)
}
}
}
impl fmt::Debug for State {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let ips: Vec<usize> = self.inst_ptrs().collect();
f.debug_struct("State")
.field("flags", &self.flags())
.field("insts", &ips)
.finish()
}
}
impl fmt::Debug for Transitions {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut fmtd = f.debug_map();
for si in 0..self.num_states() {
let s = si * self.num_byte_classes;
let e = s + self.num_byte_classes;
fmtd.entry(&si.to_string(), &TransitionsRow(&self.table[s..e]));
}
fmtd.finish()
}
}
struct TransitionsRow<'a>(&'a [StatePtr]);
impl<'a> fmt::Debug for TransitionsRow<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut fmtd = f.debug_map();
for (b, si) in self.0.iter().enumerate() {
match *si {
STATE_UNKNOWN => {}
STATE_DEAD => {
fmtd.entry(&vb(b as usize), &"DEAD");
}
si => {
fmtd.entry(&vb(b as usize), &si.to_string());
}
}
}
fmtd.finish()
}
}
impl fmt::Debug for StateFlags {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("StateFlags")
.field("is_match", &self.is_match())
.field("is_word", &self.is_word())
.field("has_empty", &self.has_empty())
.finish()
}
}
/// Helper function for formatting a byte as a nice-to-read escaped string.
fn vb(b: usize) -> String {
use std::ascii::escape_default;
if b > ::std::u8::MAX as usize {
"EOF".to_owned()
} else {
let escaped = escape_default(b as u8).collect::<Vec<u8>>();
String::from_utf8_lossy(&escaped).into_owned()
}
}
fn usize_to_u32(n: usize) -> u32 {
if (n as u64) > (::std::u32::MAX as u64) {
panic!("BUG: {} is too big to fit into u32", n)
}
n as u32
}
#[allow(dead_code)] // useful for debugging
fn show_state_ptr(si: StatePtr) -> String {
let mut s = format!("{:?}", si & STATE_MAX);
if si == STATE_UNKNOWN {
s = format!("{} (unknown)", s);
}
if si == STATE_DEAD {
s = format!("{} (dead)", s);
}
if si == STATE_QUIT {
s = format!("{} (quit)", s);
}
if si & STATE_START > 0 {
s = format!("{} (start)", s);
}
if si & STATE_MATCH > 0 {
s = format!("{} (match)", s);
}
s
}
/// https://developers.google.com/protocol-buffers/docs/encoding#varints
fn write_vari32(data: &mut Vec<u8>, n: i32) {
let mut un = (n as u32) << 1;
if n < 0 {
un = !un;
}
write_varu32(data, un)
}
/// https://developers.google.com/protocol-buffers/docs/encoding#varints
fn read_vari32(data: &[u8]) -> (i32, usize) {
let (un, i) = read_varu32(data);
let mut n = (un >> 1) as i32;
if un & 1 != 0 {
n = !n;
}
(n, i)
}
/// https://developers.google.com/protocol-buffers/docs/encoding#varints
fn write_varu32(data: &mut Vec<u8>, mut n: u32) {
while n >= 0b1000_0000 {
data.push((n as u8) | 0b1000_0000);
n >>= 7;
}
data.push(n as u8);
}
/// https://developers.google.com/protocol-buffers/docs/encoding#varints
fn read_varu32(data: &[u8]) -> (u32, usize) {
let mut n: u32 = 0;
let mut shift: u32 = 0;
for (i, &b) in data.iter().enumerate() {
if b < 0b1000_0000 {
return (n | ((b as u32) << shift), i + 1);
}
n |= ((b as u32) & 0b0111_1111) << shift;
shift += 7;
}
(0, 0)
}
#[cfg(test)]
mod tests {
extern crate rand;
use std::sync::Arc;
use quickcheck::{QuickCheck, StdGen, quickcheck};
use super::{
StateFlags, State, push_inst_ptr,
write_varu32, read_varu32, write_vari32, read_vari32,
};
#[test]
fn prop_state_encode_decode() {
fn p(ips: Vec<u32>, flags: u8) -> bool {
let mut data = vec![flags];
let mut prev = 0;
for &ip in ips.iter() {
push_inst_ptr(&mut data, &mut prev, ip);
}
let state = State { data: Arc::from(&data[..]) };
let expected: Vec<usize> =
ips.into_iter().map(|ip| ip as usize).collect();
let got: Vec<usize> = state.inst_ptrs().collect();
expected == got && state.flags() == StateFlags(flags)
}
QuickCheck::new()
.gen(StdGen::new(self::rand::thread_rng(), 10_000))
.quickcheck(p as fn(Vec<u32>, u8) -> bool);
}
#[test]
fn prop_read_write_u32()
|
#[test]
fn prop_read_write_i32() {
fn p(n: i32) -> bool {
let mut buf = vec![];
write_vari32(&mut buf, n);
let (got, nread) = read_vari32(&buf);
nread == buf.len() && got == n
}
quickcheck(p as fn(i32) -> bool);
}
}
|
{
fn p(n: u32) -> bool {
let mut buf = vec![];
write_varu32(&mut buf, n);
let (got, nread) = read_varu32(&buf);
nread == buf.len() && got == n
}
quickcheck(p as fn(u32) -> bool);
}
|
builtin_math.go
|
// Copyright 2015 PingCAP, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Copyright 2013 The ql Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSES/QL-LICENSE file.
package expression
import (
"fmt"
"hash/crc32"
"math"
"strconv"
"strings"
"github.com/cznic/mathutil"
"github.com/pingcap/tidb/parser/ast"
"github.com/pingcap/tidb/parser/mysql"
"github.com/pingcap/tidb/sessionctx"
"github.com/pingcap/tidb/types"
"github.com/pingcap/tidb/util/chunk"
utilMath "github.com/pingcap/tidb/util/math"
"github.com/pingcap/tipb/go-tipb"
)
var (
_ functionClass = &absFunctionClass{}
_ functionClass = &roundFunctionClass{}
_ functionClass = &ceilFunctionClass{}
_ functionClass = &floorFunctionClass{}
_ functionClass = &logFunctionClass{}
_ functionClass = &log2FunctionClass{}
_ functionClass = &log10FunctionClass{}
_ functionClass = &randFunctionClass{}
_ functionClass = &powFunctionClass{}
_ functionClass = &convFunctionClass{}
_ functionClass = &crc32FunctionClass{}
_ functionClass = &signFunctionClass{}
_ functionClass = &sqrtFunctionClass{}
_ functionClass = &acosFunctionClass{}
_ functionClass = &asinFunctionClass{}
_ functionClass = &atanFunctionClass{}
_ functionClass = &cosFunctionClass{}
_ functionClass = &cotFunctionClass{}
_ functionClass = °reesFunctionClass{}
_ functionClass = &expFunctionClass{}
_ functionClass = &piFunctionClass{}
_ functionClass = &radiansFunctionClass{}
_ functionClass = &sinFunctionClass{}
_ functionClass = &tanFunctionClass{}
_ functionClass = &truncateFunctionClass{}
)
var (
_ builtinFunc = &builtinAbsRealSig{}
_ builtinFunc = &builtinAbsIntSig{}
_ builtinFunc = &builtinAbsUIntSig{}
_ builtinFunc = &builtinAbsDecSig{}
_ builtinFunc = &builtinRoundRealSig{}
_ builtinFunc = &builtinRoundIntSig{}
_ builtinFunc = &builtinRoundDecSig{}
_ builtinFunc = &builtinRoundWithFracRealSig{}
_ builtinFunc = &builtinRoundWithFracIntSig{}
_ builtinFunc = &builtinRoundWithFracDecSig{}
_ builtinFunc = &builtinCeilRealSig{}
_ builtinFunc = &builtinCeilIntToDecSig{}
_ builtinFunc = &builtinCeilIntToIntSig{}
_ builtinFunc = &builtinCeilDecToIntSig{}
_ builtinFunc = &builtinCeilDecToDecSig{}
_ builtinFunc = &builtinFloorRealSig{}
_ builtinFunc = &builtinFloorIntToDecSig{}
_ builtinFunc = &builtinFloorIntToIntSig{}
_ builtinFunc = &builtinFloorDecToIntSig{}
_ builtinFunc = &builtinFloorDecToDecSig{}
_ builtinFunc = &builtinLog1ArgSig{}
_ builtinFunc = &builtinLog2ArgsSig{}
_ builtinFunc = &builtinLog2Sig{}
_ builtinFunc = &builtinLog10Sig{}
_ builtinFunc = &builtinRandSig{}
_ builtinFunc = &builtinRandWithSeedFirstGenSig{}
_ builtinFunc = &builtinPowSig{}
_ builtinFunc = &builtinConvSig{}
_ builtinFunc = &builtinCRC32Sig{}
_ builtinFunc = &builtinSignSig{}
_ builtinFunc = &builtinSqrtSig{}
_ builtinFunc = &builtinAcosSig{}
_ builtinFunc = &builtinAsinSig{}
_ builtinFunc = &builtinAtan1ArgSig{}
_ builtinFunc = &builtinAtan2ArgsSig{}
_ builtinFunc = &builtinCosSig{}
_ builtinFunc = &builtinCotSig{}
_ builtinFunc = &builtinDegreesSig{}
_ builtinFunc = &builtinExpSig{}
_ builtinFunc = &builtinPISig{}
_ builtinFunc = &builtinRadiansSig{}
_ builtinFunc = &builtinSinSig{}
_ builtinFunc = &builtinTanSig{}
_ builtinFunc = &builtinTruncateIntSig{}
_ builtinFunc = &builtinTruncateRealSig{}
_ builtinFunc = &builtinTruncateDecimalSig{}
_ builtinFunc = &builtinTruncateUintSig{}
)
type absFunctionClass struct {
baseFunctionClass
}
func (c *absFunctionClass) getFunction(ctx sessionctx.Context, args []Expression) (builtinFunc, error) {
if err := c.verifyArgs(args); err != nil {
return nil, c.verifyArgs(args)
}
argFieldTp := args[0].GetType()
argTp := argFieldTp.EvalType()
if argTp != types.ETInt && argTp != types.ETDecimal {
argTp = types.ETReal
}
bf, err := newBaseBuiltinFuncWithTp(ctx, c.funcName, args, argTp, argTp)
if err != nil {
return nil, err
}
if mysql.HasUnsignedFlag(argFieldTp.GetFlag()) {
bf.tp.AddFlag(mysql.UnsignedFlag)
}
if argTp == types.ETReal {
flen, decimal := mysql.GetDefaultFieldLengthAndDecimal(mysql.TypeDouble)
bf.tp.SetFlen(flen)
bf.tp.SetDecimal(decimal)
} else {
bf.tp.SetFlen(argFieldTp.GetFlen())
bf.tp.SetDecimal(argFieldTp.GetDecimal())
}
var sig builtinFunc
switch argTp {
case types.ETInt:
if mysql.HasUnsignedFlag(argFieldTp.GetFlag()) {
sig = &builtinAbsUIntSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_AbsUInt)
} else {
sig = &builtinAbsIntSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_AbsInt)
}
case types.ETDecimal:
sig = &builtinAbsDecSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_AbsDecimal)
case types.ETReal:
sig = &builtinAbsRealSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_AbsReal)
default:
panic("unexpected argTp")
}
return sig, nil
}
type builtinAbsRealSig struct {
baseBuiltinFunc
}
func (b *builtinAbsRealSig) Clone() builtinFunc {
newSig := &builtinAbsRealSig{}
newSig.cloneFrom(&b.baseBuiltinFunc)
return newSig
}
// evalReal evals ABS(value).
// See https://dev.mysql.com/doc/refman/5.7/en/mathematical-functions.html#function_abs
func (b *builtinAbsRealSig) evalReal(row chunk.Row) (float64, bool, error) {
val, isNull, err := b.args[0].EvalReal(b.ctx, row)
if isNull || err != nil {
return 0, isNull, err
}
return math.Abs(val), false, nil
}
type builtinAbsIntSig struct {
baseBuiltinFunc
}
func (b *builtinAbsIntSig) Clone() builtinFunc {
newSig := &builtinAbsIntSig{}
newSig.cloneFrom(&b.baseBuiltinFunc)
return newSig
}
// evalInt evals ABS(value).
// See https://dev.mysql.com/doc/refman/5.7/en/mathematical-functions.html#function_abs
func (b *builtinAbsIntSig) evalInt(row chunk.Row) (int64, bool, error) {
val, isNull, err := b.args[0].EvalInt(b.ctx, row)
if isNull || err != nil {
return 0, isNull, err
}
if val >= 0 {
return val, false, nil
}
if val == math.MinInt64 {
return 0, false, types.ErrOverflow.GenWithStackByArgs("BIGINT", fmt.Sprintf("abs(%d)", val))
}
return -val, false, nil
}
type builtinAbsUIntSig struct {
baseBuiltinFunc
}
func (b *builtinAbsUIntSig) Clone() builtinFunc {
newSig := &builtinAbsUIntSig{}
newSig.cloneFrom(&b.baseBuiltinFunc)
return newSig
}
// evalInt evals ABS(value).
// See https://dev.mysql.com/doc/refman/5.7/en/mathematical-functions.html#function_abs
func (b *builtinAbsUIntSig) evalInt(row chunk.Row) (int64, bool, error) {
return b.args[0].EvalInt(b.ctx, row)
}
type builtinAbsDecSig struct {
baseBuiltinFunc
}
func (b *builtinAbsDecSig) Clone() builtinFunc {
newSig := &builtinAbsDecSig{}
newSig.cloneFrom(&b.baseBuiltinFunc)
return newSig
}
// evalDecimal evals ABS(value).
// See https://dev.mysql.com/doc/refman/5.7/en/mathematical-functions.html#function_abs
func (b *builtinAbsDecSig) evalDecimal(row chunk.Row) (*types.MyDecimal, bool, error) {
val, isNull, err := b.args[0].EvalDecimal(b.ctx, row)
if isNull || err != nil {
return nil, isNull, err
}
to := new(types.MyDecimal)
if !val.IsNegative() {
*to = *val
} else {
if err = types.DecimalSub(new(types.MyDecimal), val, to); err != nil {
return nil, true, err
}
}
return to, false, nil
}
func (c *roundFunctionClass) getFunction(ctx sessionctx.Context, args []Expression) (builtinFunc, error) {
if err := c.verifyArgs(args); err != nil {
return nil, c.verifyArgs(args)
}
argTp := args[0].GetType().EvalType()
if argTp != types.ETInt && argTp != types.ETDecimal {
argTp = types.ETReal
}
argTps := []types.EvalType{argTp}
if len(args) > 1 {
argTps = append(argTps, types.ETInt)
}
bf, err := newBaseBuiltinFuncWithTp(ctx, c.funcName, args, argTp, argTps...)
if err != nil {
return nil, err
}
argFieldTp := args[0].GetType()
if mysql.HasUnsignedFlag(argFieldTp.GetFlag()) {
bf.tp.AddFlag(mysql.UnsignedFlag)
}
// ETInt or ETReal is set correctly by newBaseBuiltinFuncWithTp, only need to handle ETDecimal.
if argTp == types.ETDecimal {
bf.tp.SetFlen(argFieldTp.GetFlen())
bf.tp.SetDecimal(calculateDecimal4RoundAndTruncate(ctx, args, argTp))
if bf.tp.GetDecimal() != types.UnspecifiedLength {
if argFieldTp.GetDecimal() != types.UnspecifiedLength {
decimalDelta := bf.tp.GetDecimal() - argFieldTp.GetDecimal()
bf.tp.SetFlen(bf.tp.GetFlen() + mathutil.Max(decimalDelta, 0))
} else {
bf.tp.SetFlen(argFieldTp.GetFlen() + bf.tp.GetDecimal())
}
}
}
var sig builtinFunc
if len(args) > 1 {
switch argTp {
case types.ETInt:
sig = &builtinRoundWithFracIntSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_RoundWithFracInt)
case types.ETDecimal:
sig = &builtinRoundWithFracDecSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_RoundWithFracDec)
case types.ETReal:
sig = &builtinRoundWithFracRealSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_RoundWithFracReal)
default:
panic("unexpected argTp")
}
} else {
switch argTp {
case types.ETInt:
sig = &builtinRoundIntSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_RoundInt)
case types.ETDecimal:
sig = &builtinRoundDecSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_RoundDec)
case types.ETReal:
sig = &builtinRoundRealSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_RoundReal)
default:
panic("unexpected argTp")
}
}
return sig, nil
}
// calculateDecimal4RoundAndTruncate calculates tp.decimals of round/truncate func.
func calculateDecimal4RoundAndTruncate(ctx sessionctx.Context, args []Expression, retType types.EvalType) int {
if retType == types.ETInt || len(args) <= 1 {
return 0
}
secondConst, secondIsConst := args[1].(*Constant)
if !secondIsConst {
return args[0].GetType().GetDecimal()
}
argDec, isNull, err := secondConst.EvalInt(ctx, chunk.Row{})
if err != nil || isNull || argDec < 0 {
return 0
}
if argDec > mysql.MaxDecimalScale {
return mysql.MaxDecimalScale
}
return int(argDec)
}
type builtinRoundRealSig struct {
baseBuiltinFunc
}
func (b *builtinRoundRealSig) Clone() builtinFunc {
newSig := &builtinRoundRealSig{}
newSig.cloneFrom(&b.baseBuiltinFunc)
return newSig
}
// evalReal evals ROUND(value).
// See https://dev.mysql.com/doc/refman/5.7/en/mathematical-functions.html#function_round
func (b *builtinRoundRealSig) evalReal(row chunk.Row) (float64, bool, error) {
val, isNull, err := b.args[0].EvalReal(b.ctx, row)
if isNull || err != nil {
return 0, isNull, err
}
return types.Round(val, 0), false, nil
}
type builtinRoundIntSig struct {
baseBuiltinFunc
}
func (b *builtinRoundIntSig) Clone() builtinFunc {
newSig := &builtinRoundIntSig{}
newSig.cloneFrom(&b.baseBuiltinFunc)
return newSig
}
// evalInt evals ROUND(value).
// See https://dev.mysql.com/doc/refman/5.7/en/mathematical-functions.html#function_round
func (b *builtinRoundIntSig) evalInt(row chunk.Row) (int64, bool, error) {
return b.args[0].EvalInt(b.ctx, row)
}
type builtinRoundDecSig struct {
baseBuiltinFunc
}
func (b *builtinRoundDecSig) Clone() builtinFunc {
newSig := &builtinRoundDecSig{}
newSig.cloneFrom(&b.baseBuiltinFunc)
return newSig
}
// evalDecimal evals ROUND(value).
// See https://dev.mysql.com/doc/refman/5.7/en/mathematical-functions.html#function_round
func (b *builtinRoundDecSig) evalDecimal(row chunk.Row) (*types.MyDecimal, bool, error) {
val, isNull, err := b.args[0].EvalDecimal(b.ctx, row)
if isNull || err != nil {
return nil, isNull, err
}
to := new(types.MyDecimal)
if err = val.Round(to, 0, types.ModeHalfUp); err != nil {
return nil, true, err
}
return to, false, nil
}
type builtinRoundWithFracRealSig struct {
baseBuiltinFunc
}
func (b *builtinRoundWithFracRealSig) Clone() builtinFunc {
newSig := &builtinRoundWithFracRealSig{}
newSig.cloneFrom(&b.baseBuiltinFunc)
return newSig
}
// evalReal evals ROUND(value, frac).
// See https://dev.mysql.com/doc/refman/5.7/en/mathematical-functions.html#function_round
func (b *builtinRoundWithFracRealSig) evalReal(row chunk.Row) (float64, bool, error) {
val, isNull, err := b.args[0].EvalReal(b.ctx, row)
if isNull || err != nil {
return 0, isNull, err
}
frac, isNull, err := b.args[1].EvalInt(b.ctx, row)
if isNull || err != nil {
return 0, isNull, err
}
return types.Round(val, int(frac)), false, nil
}
type builtinRoundWithFracIntSig struct {
baseBuiltinFunc
}
func (b *builtinRoundWithFracIntSig) Clone() builtinFunc {
newSig := &builtinRoundWithFracIntSig{}
newSig.cloneFrom(&b.baseBuiltinFunc)
return newSig
}
// evalInt evals ROUND(value, frac).
// See https://dev.mysql.com/doc/refman/5.7/en/mathematical-functions.html#function_round
func (b *builtinRoundWithFracIntSig) evalInt(row chunk.Row) (int64, bool, error) {
val, isNull, err := b.args[0].EvalInt(b.ctx, row)
if isNull || err != nil {
return 0, isNull, err
}
frac, isNull, err := b.args[1].EvalInt(b.ctx, row)
if isNull || err != nil {
return 0, isNull, err
}
return int64(types.Round(float64(val), int(frac))), false, nil
}
type builtinRoundWithFracDecSig struct {
baseBuiltinFunc
}
func (b *builtinRoundWithFracDecSig) Clone() builtinFunc {
newSig := &builtinRoundWithFracDecSig{}
newSig.cloneFrom(&b.baseBuiltinFunc)
return newSig
}
// evalDecimal evals ROUND(value, frac).
// See https://dev.mysql.com/doc/refman/5.7/en/mathematical-functions.html#function_round
func (b *builtinRoundWithFracDecSig) evalDecimal(row chunk.Row) (*types.MyDecimal, bool, error) {
val, isNull, err := b.args[0].EvalDecimal(b.ctx, row)
if isNull || err != nil {
return nil, isNull, err
}
frac, isNull, err := b.args[1].EvalInt(b.ctx, row)
if isNull || err != nil {
return nil, isNull, err
}
to := new(types.MyDecimal)
if err = val.Round(to, mathutil.Min(int(frac), b.tp.GetDecimal()), types.ModeHalfUp); err != nil {
return nil, true, err
}
return to, false, nil
}
type ceilFunctionClass struct {
baseFunctionClass
}
func (c *ceilFunctionClass) getFunction(ctx sessionctx.Context, args []Expression) (sig builtinFunc, err error) {
if err = c.verifyArgs(args); err != nil {
return nil, err
}
retTp, argTp := getEvalTp4FloorAndCeil(args[0])
bf, err := newBaseBuiltinFuncWithTp(ctx, c.funcName, args, retTp, argTp)
if err != nil {
return nil, err
}
setFlag4FloorAndCeil(bf.tp, args[0])
// ETInt or ETReal is set correctly by newBaseBuiltinFuncWithTp, only need to handle ETDecimal.
if retTp == types.ETDecimal {
bf.tp.SetFlen(args[0].GetType().GetFlen())
bf.tp.SetDecimal(0)
}
switch argTp {
case types.ETInt:
if retTp == types.ETInt {
sig = &builtinCeilIntToIntSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_CeilIntToInt)
} else {
sig = &builtinCeilIntToDecSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_CeilIntToDec)
}
case types.ETDecimal:
if retTp == types.ETInt {
sig = &builtinCeilDecToIntSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_CeilDecToInt)
} else {
sig = &builtinCeilDecToDecSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_CeilDecToDec)
}
default:
sig = &builtinCeilRealSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_CeilReal)
}
return sig, nil
}
type builtinCeilRealSig struct {
baseBuiltinFunc
}
func (b *builtinCeilRealSig) Clone() builtinFunc {
newSig := &builtinCeilRealSig{}
newSig.cloneFrom(&b.baseBuiltinFunc)
return newSig
}
// evalReal evals a builtinCeilRealSig.
// See https://dev.mysql.com/doc/refman/5.7/en/mathematical-functions.html#function_ceil
func (b *builtinCeilRealSig) evalReal(row chunk.Row) (float64, bool, error) {
val, isNull, err := b.args[0].EvalReal(b.ctx, row)
if isNull || err != nil {
return 0, isNull, err
}
return math.Ceil(val), false, nil
}
type builtinCeilIntToIntSig struct {
baseBuiltinFunc
}
func (b *builtinCeilIntToIntSig) Clone() builtinFunc {
newSig := &builtinCeilIntToIntSig{}
newSig.cloneFrom(&b.baseBuiltinFunc)
return newSig
}
// evalInt evals a builtinCeilIntToIntSig.
// See https://dev.mysql.com/doc/refman/5.7/en/mathematical-functions.html#function_ceil
func (b *builtinCeilIntToIntSig) evalInt(row chunk.Row) (int64, bool, error) {
return b.args[0].EvalInt(b.ctx, row)
}
type builtinCeilIntToDecSig struct {
baseBuiltinFunc
}
func (b *builtinCeilIntToDecSig) Clone() builtinFunc {
newSig := &builtinCeilIntToDecSig{}
newSig.cloneFrom(&b.baseBuiltinFunc)
return newSig
}
// evalDecimal evals a builtinCeilIntToDecSig.
// See https://dev.mysql.com/doc/refman/5.7/en/mathematical-functions.html#function_Ceil
func (b *builtinCeilIntToDecSig) evalDecimal(row chunk.Row) (*types.MyDecimal, bool, error) {
val, isNull, err := b.args[0].EvalInt(b.ctx, row)
if isNull || err != nil {
return nil, true, err
}
if mysql.HasUnsignedFlag(b.args[0].GetType().GetFlag()) || val >= 0 {
return types.NewDecFromUint(uint64(val)), false, nil
}
return types.NewDecFromInt(val), false, nil
}
type builtinCeilDecToIntSig struct {
baseBuiltinFunc
}
func (b *builtinCeilDecToIntSig) Clone() builtinFunc {
newSig := &builtinCeilDecToIntSig{}
newSig.cloneFrom(&b.baseBuiltinFunc)
return newSig
}
// evalInt evals a builtinCeilDecToIntSig.
// Ceil receives
func (b *builtinCeilDecToIntSig) evalInt(row chunk.Row) (int64, bool, error) {
val, isNull, err := b.args[0].EvalDecimal(b.ctx, row)
if isNull || err != nil {
return 0, isNull, err
}
// err here will only be ErrOverFlow(will never happen) or ErrTruncate(can be ignored).
res, err := val.ToInt()
if err == types.ErrTruncated {
err = nil
if !val.IsNegative() {
res = res + 1
}
}
return res, false, err
}
type builtinCeilDecToDecSig struct {
baseBuiltinFunc
}
func (b *builtinCeilDecToDecSig) Clone() builtinFunc {
newSig := &builtinCeilDecToDecSig{}
newSig.cloneFrom(&b.baseBuiltinFunc)
return newSig
}
// evalDecimal evals a builtinCeilDecToDecSig.
func (b *builtinCeilDecToDecSig) evalDecimal(row chunk.Row) (*types.MyDecimal, bool, error) {
val, isNull, err := b.args[0].EvalDecimal(b.ctx, row)
if isNull || err != nil {
return nil, isNull, err
}
res := new(types.MyDecimal)
if val.IsNegative() {
err = val.Round(res, 0, types.ModeTruncate)
return res, err != nil, err
}
err = val.Round(res, 0, types.ModeTruncate)
if err != nil || res.Compare(val) == 0 {
return res, err != nil, err
}
err = types.DecimalAdd(res, types.NewDecFromInt(1), res)
return res, err != nil, err
}
type floorFunctionClass struct {
baseFunctionClass
}
// getEvalTp4FloorAndCeil gets the types.EvalType of FLOOR and CEIL.
func
|
(arg Expression) (retTp, argTp types.EvalType) {
fieldTp := arg.GetType()
retTp, argTp = types.ETInt, fieldTp.EvalType()
switch argTp {
case types.ETInt:
retTp = types.ETInt
case types.ETDecimal:
if fieldTp.GetFlen()-fieldTp.GetDecimal() > mysql.MaxIntWidth-2 { // len(math.MaxInt64) - 1
retTp = types.ETDecimal
}
default:
retTp, argTp = types.ETReal, types.ETReal
}
return retTp, argTp
}
// setFlag4FloorAndCeil sets return flag of FLOOR and CEIL.
func setFlag4FloorAndCeil(tp *types.FieldType, arg Expression) {
fieldTp := arg.GetType()
if (fieldTp.GetType() == mysql.TypeLong || fieldTp.GetType() == mysql.TypeLonglong || fieldTp.GetType() == mysql.TypeNewDecimal) && mysql.HasUnsignedFlag(fieldTp.GetFlag()) {
tp.AddFlag(mysql.UnsignedFlag)
}
// TODO: when argument type is timestamp, add not null flag.
}
func (c *floorFunctionClass) getFunction(ctx sessionctx.Context, args []Expression) (sig builtinFunc, err error) {
if err = c.verifyArgs(args); err != nil {
return nil, err
}
retTp, argTp := getEvalTp4FloorAndCeil(args[0])
bf, err := newBaseBuiltinFuncWithTp(ctx, c.funcName, args, retTp, argTp)
if err != nil {
return nil, err
}
setFlag4FloorAndCeil(bf.tp, args[0])
// ETInt or ETReal is set correctly by newBaseBuiltinFuncWithTp, only need to handle ETDecimal.
if retTp == types.ETDecimal {
bf.tp.SetFlen(args[0].GetType().GetFlen())
bf.tp.SetDecimal(0)
}
switch argTp {
case types.ETInt:
if retTp == types.ETInt {
sig = &builtinFloorIntToIntSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_FloorIntToInt)
} else {
sig = &builtinFloorIntToDecSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_FloorIntToDec)
}
case types.ETDecimal:
if retTp == types.ETInt {
sig = &builtinFloorDecToIntSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_FloorDecToInt)
} else {
sig = &builtinFloorDecToDecSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_FloorDecToDec)
}
default:
sig = &builtinFloorRealSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_FloorReal)
}
return sig, nil
}
type builtinFloorRealSig struct {
baseBuiltinFunc
}
func (b *builtinFloorRealSig) Clone() builtinFunc {
newSig := &builtinFloorRealSig{}
newSig.cloneFrom(&b.baseBuiltinFunc)
return newSig
}
// evalReal evals a builtinFloorRealSig.
// See https://dev.mysql.com/doc/refman/5.7/en/mathematical-functions.html#function_floor
func (b *builtinFloorRealSig) evalReal(row chunk.Row) (float64, bool, error) {
val, isNull, err := b.args[0].EvalReal(b.ctx, row)
if isNull || err != nil {
return 0, isNull, err
}
return math.Floor(val), false, nil
}
type builtinFloorIntToIntSig struct {
baseBuiltinFunc
}
func (b *builtinFloorIntToIntSig) Clone() builtinFunc {
newSig := &builtinFloorIntToIntSig{}
newSig.cloneFrom(&b.baseBuiltinFunc)
return newSig
}
// evalInt evals a builtinFloorIntToIntSig.
// See https://dev.mysql.com/doc/refman/5.7/en/mathematical-functions.html#function_floor
func (b *builtinFloorIntToIntSig) evalInt(row chunk.Row) (int64, bool, error) {
return b.args[0].EvalInt(b.ctx, row)
}
type builtinFloorIntToDecSig struct {
baseBuiltinFunc
}
func (b *builtinFloorIntToDecSig) Clone() builtinFunc {
newSig := &builtinFloorIntToDecSig{}
newSig.cloneFrom(&b.baseBuiltinFunc)
return newSig
}
// evalDecimal evals a builtinFloorIntToDecSig.
// See https://dev.mysql.com/doc/refman/5.7/en/mathematical-functions.html#function_floor
func (b *builtinFloorIntToDecSig) evalDecimal(row chunk.Row) (*types.MyDecimal, bool, error) {
val, isNull, err := b.args[0].EvalInt(b.ctx, row)
if isNull || err != nil {
return nil, true, err
}
if mysql.HasUnsignedFlag(b.args[0].GetType().GetFlag()) || val >= 0 {
return types.NewDecFromUint(uint64(val)), false, nil
}
return types.NewDecFromInt(val), false, nil
}
type builtinFloorDecToIntSig struct {
baseBuiltinFunc
}
func (b *builtinFloorDecToIntSig) Clone() builtinFunc {
newSig := &builtinFloorDecToIntSig{}
newSig.cloneFrom(&b.baseBuiltinFunc)
return newSig
}
// evalInt evals a builtinFloorDecToIntSig.
// floor receives
func (b *builtinFloorDecToIntSig) evalInt(row chunk.Row) (int64, bool, error) {
val, isNull, err := b.args[0].EvalDecimal(b.ctx, row)
if isNull || err != nil {
return 0, isNull, err
}
// err here will only be ErrOverFlow(will never happen) or ErrTruncate(can be ignored).
res, err := val.ToInt()
if err == types.ErrTruncated {
err = nil
if val.IsNegative() {
res--
}
}
return res, false, err
}
type builtinFloorDecToDecSig struct {
baseBuiltinFunc
}
func (b *builtinFloorDecToDecSig) Clone() builtinFunc {
newSig := &builtinFloorDecToDecSig{}
newSig.cloneFrom(&b.baseBuiltinFunc)
return newSig
}
// evalDecimal evals a builtinFloorDecToDecSig.
func (b *builtinFloorDecToDecSig) evalDecimal(row chunk.Row) (*types.MyDecimal, bool, error) {
val, isNull, err := b.args[0].EvalDecimal(b.ctx, row)
if isNull || err != nil {
return nil, true, err
}
res := new(types.MyDecimal)
if !val.IsNegative() {
err = val.Round(res, 0, types.ModeTruncate)
return res, err != nil, err
}
err = val.Round(res, 0, types.ModeTruncate)
if err != nil || res.Compare(val) == 0 {
return res, err != nil, err
}
err = types.DecimalSub(res, types.NewDecFromInt(1), res)
return res, err != nil, err
}
type logFunctionClass struct {
baseFunctionClass
}
func (c *logFunctionClass) getFunction(ctx sessionctx.Context, args []Expression) (builtinFunc, error) {
if err := c.verifyArgs(args); err != nil {
return nil, err
}
var (
sig builtinFunc
bf baseBuiltinFunc
argsLen = len(args)
)
var err error
if argsLen == 1 {
bf, err = newBaseBuiltinFuncWithTp(ctx, c.funcName, args, types.ETReal, types.ETReal)
if err != nil {
return nil, err
}
} else {
bf, err = newBaseBuiltinFuncWithTp(ctx, c.funcName, args, types.ETReal, types.ETReal, types.ETReal)
if err != nil {
return nil, err
}
}
if argsLen == 1 {
sig = &builtinLog1ArgSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_Log1Arg)
} else {
sig = &builtinLog2ArgsSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_Log2Args)
}
return sig, nil
}
type builtinLog1ArgSig struct {
baseBuiltinFunc
}
func (b *builtinLog1ArgSig) Clone() builtinFunc {
newSig := &builtinLog1ArgSig{}
newSig.cloneFrom(&b.baseBuiltinFunc)
return newSig
}
// evalReal evals a builtinLog1ArgSig, corresponding to log(x).
// See https://dev.mysql.com/doc/refman/5.7/en/mathematical-functions.html#function_log
func (b *builtinLog1ArgSig) evalReal(row chunk.Row) (float64, bool, error) {
val, isNull, err := b.args[0].EvalReal(b.ctx, row)
if isNull || err != nil {
return 0, isNull, err
}
if val <= 0 {
b.ctx.GetSessionVars().StmtCtx.AppendWarning(ErrInvalidArgumentForLogarithm)
return 0, true, nil
}
return math.Log(val), false, nil
}
type builtinLog2ArgsSig struct {
baseBuiltinFunc
}
func (b *builtinLog2ArgsSig) Clone() builtinFunc {
newSig := &builtinLog2ArgsSig{}
newSig.cloneFrom(&b.baseBuiltinFunc)
return newSig
}
// evalReal evals a builtinLog2ArgsSig, corresponding to log(b, x).
// See https://dev.mysql.com/doc/refman/5.7/en/mathematical-functions.html#function_log
func (b *builtinLog2ArgsSig) evalReal(row chunk.Row) (float64, bool, error) {
val1, isNull, err := b.args[0].EvalReal(b.ctx, row)
if isNull || err != nil {
return 0, isNull, err
}
val2, isNull, err := b.args[1].EvalReal(b.ctx, row)
if isNull || err != nil {
return 0, isNull, err
}
if val1 <= 0 || val1 == 1 || val2 <= 0 {
b.ctx.GetSessionVars().StmtCtx.AppendWarning(ErrInvalidArgumentForLogarithm)
return 0, true, nil
}
return math.Log(val2) / math.Log(val1), false, nil
}
type log2FunctionClass struct {
baseFunctionClass
}
func (c *log2FunctionClass) getFunction(ctx sessionctx.Context, args []Expression) (builtinFunc, error) {
if err := c.verifyArgs(args); err != nil {
return nil, err
}
bf, err := newBaseBuiltinFuncWithTp(ctx, c.funcName, args, types.ETReal, types.ETReal)
if err != nil {
return nil, err
}
sig := &builtinLog2Sig{bf}
sig.setPbCode(tipb.ScalarFuncSig_Log2)
return sig, nil
}
type builtinLog2Sig struct {
baseBuiltinFunc
}
func (b *builtinLog2Sig) Clone() builtinFunc {
newSig := &builtinLog2Sig{}
newSig.cloneFrom(&b.baseBuiltinFunc)
return newSig
}
// evalReal evals a builtinLog2Sig.
// See https://dev.mysql.com/doc/refman/5.7/en/mathematical-functions.html#function_log2
func (b *builtinLog2Sig) evalReal(row chunk.Row) (float64, bool, error) {
val, isNull, err := b.args[0].EvalReal(b.ctx, row)
if isNull || err != nil {
return 0, isNull, err
}
if val <= 0 {
b.ctx.GetSessionVars().StmtCtx.AppendWarning(ErrInvalidArgumentForLogarithm)
return 0, true, nil
}
return math.Log2(val), false, nil
}
type log10FunctionClass struct {
baseFunctionClass
}
func (c *log10FunctionClass) getFunction(ctx sessionctx.Context, args []Expression) (builtinFunc, error) {
if err := c.verifyArgs(args); err != nil {
return nil, err
}
bf, err := newBaseBuiltinFuncWithTp(ctx, c.funcName, args, types.ETReal, types.ETReal)
if err != nil {
return nil, err
}
sig := &builtinLog10Sig{bf}
sig.setPbCode(tipb.ScalarFuncSig_Log10)
return sig, nil
}
type builtinLog10Sig struct {
baseBuiltinFunc
}
func (b *builtinLog10Sig) Clone() builtinFunc {
newSig := &builtinLog10Sig{}
newSig.cloneFrom(&b.baseBuiltinFunc)
return newSig
}
// evalReal evals a builtinLog10Sig.
// See https://dev.mysql.com/doc/refman/5.7/en/mathematical-functions.html#function_log10
func (b *builtinLog10Sig) evalReal(row chunk.Row) (float64, bool, error) {
val, isNull, err := b.args[0].EvalReal(b.ctx, row)
if isNull || err != nil {
return 0, isNull, err
}
if val <= 0 {
b.ctx.GetSessionVars().StmtCtx.AppendWarning(ErrInvalidArgumentForLogarithm)
return 0, true, nil
}
return math.Log10(val), false, nil
}
type randFunctionClass struct {
baseFunctionClass
}
func (c *randFunctionClass) getFunction(ctx sessionctx.Context, args []Expression) (builtinFunc, error) {
if err := c.verifyArgs(args); err != nil {
return nil, err
}
var sig builtinFunc
var argTps []types.EvalType
if len(args) > 0 {
argTps = []types.EvalType{types.ETInt}
}
bf, err := newBaseBuiltinFuncWithTp(ctx, c.funcName, args, types.ETReal, argTps...)
if err != nil {
return nil, err
}
bt := bf
if len(args) == 0 {
sig = &builtinRandSig{bt, ctx.GetSessionVars().Rng}
sig.setPbCode(tipb.ScalarFuncSig_Rand)
} else if _, isConstant := args[0].(*Constant); isConstant {
// According to MySQL manual:
// If an integer argument N is specified, it is used as the seed value:
// With a constant initializer argument, the seed is initialized once
// when the statement is prepared, prior to execution.
seed, isNull, err := args[0].EvalInt(ctx, chunk.Row{})
if err != nil {
return nil, err
}
if isNull {
// When the seed is null we need to use 0 as the seed.
// The behavior same as MySQL.
seed = 0
}
sig = &builtinRandSig{bt, utilMath.NewWithSeed(seed)}
sig.setPbCode(tipb.ScalarFuncSig_Rand)
} else {
sig = &builtinRandWithSeedFirstGenSig{bt}
sig.setPbCode(tipb.ScalarFuncSig_RandWithSeedFirstGen)
}
return sig, nil
}
type builtinRandSig struct {
baseBuiltinFunc
mysqlRng *utilMath.MysqlRng
}
func (b *builtinRandSig) Clone() builtinFunc {
newSig := &builtinRandSig{mysqlRng: b.mysqlRng}
newSig.cloneFrom(&b.baseBuiltinFunc)
return newSig
}
// evalReal evals RAND().
// See https://dev.mysql.com/doc/refman/5.7/en/mathematical-functions.html#function_rand
func (b *builtinRandSig) evalReal(row chunk.Row) (float64, bool, error) {
res := b.mysqlRng.Gen()
return res, false, nil
}
type builtinRandWithSeedFirstGenSig struct {
baseBuiltinFunc
}
func (b *builtinRandWithSeedFirstGenSig) Clone() builtinFunc {
newSig := &builtinRandWithSeedFirstGenSig{}
newSig.cloneFrom(&b.baseBuiltinFunc)
return newSig
}
// evalReal evals RAND(N).
// See https://dev.mysql.com/doc/refman/5.7/en/mathematical-functions.html#function_rand
func (b *builtinRandWithSeedFirstGenSig) evalReal(row chunk.Row) (float64, bool, error) {
seed, isNull, err := b.args[0].EvalInt(b.ctx, row)
if err != nil {
return 0, true, err
}
// b.args[0] is promised to be a non-constant(such as a column name) in
// builtinRandWithSeedFirstGenSig, the seed is initialized with the value for each
// invocation of RAND().
var rng *utilMath.MysqlRng
if !isNull {
rng = utilMath.NewWithSeed(seed)
} else {
rng = utilMath.NewWithSeed(0)
}
return rng.Gen(), false, nil
}
type powFunctionClass struct {
baseFunctionClass
}
func (c *powFunctionClass) getFunction(ctx sessionctx.Context, args []Expression) (builtinFunc, error) {
if err := c.verifyArgs(args); err != nil {
return nil, err
}
bf, err := newBaseBuiltinFuncWithTp(ctx, c.funcName, args, types.ETReal, types.ETReal, types.ETReal)
if err != nil {
return nil, err
}
sig := &builtinPowSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_Pow)
return sig, nil
}
type builtinPowSig struct {
baseBuiltinFunc
}
func (b *builtinPowSig) Clone() builtinFunc {
newSig := &builtinPowSig{}
newSig.cloneFrom(&b.baseBuiltinFunc)
return newSig
}
// evalReal evals POW(x, y).
// See https://dev.mysql.com/doc/refman/5.7/en/mathematical-functions.html#function_pow
func (b *builtinPowSig) evalReal(row chunk.Row) (float64, bool, error) {
x, isNull, err := b.args[0].EvalReal(b.ctx, row)
if isNull || err != nil {
return 0, isNull, err
}
y, isNull, err := b.args[1].EvalReal(b.ctx, row)
if isNull || err != nil {
return 0, isNull, err
}
power := math.Pow(x, y)
if math.IsInf(power, -1) || math.IsInf(power, 1) || math.IsNaN(power) {
return 0, false, types.ErrOverflow.GenWithStackByArgs("DOUBLE", fmt.Sprintf("pow(%s, %s)", strconv.FormatFloat(x, 'f', -1, 64), strconv.FormatFloat(y, 'f', -1, 64)))
}
return power, false, nil
}
type roundFunctionClass struct {
baseFunctionClass
}
type convFunctionClass struct {
baseFunctionClass
}
func (c *convFunctionClass) getFunction(ctx sessionctx.Context, args []Expression) (builtinFunc, error) {
if err := c.verifyArgs(args); err != nil {
return nil, err
}
bf, err := newBaseBuiltinFuncWithTp(ctx, c.funcName, args, types.ETString, types.ETString, types.ETInt, types.ETInt)
if err != nil {
return nil, err
}
charset, collate := ctx.GetSessionVars().GetCharsetInfo()
bf.tp.SetCharset(charset)
bf.tp.SetCollate(collate)
bf.tp.SetFlen(64)
sig := &builtinConvSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_Conv)
return sig, nil
}
type builtinConvSig struct {
baseBuiltinFunc
}
func (b *builtinConvSig) Clone() builtinFunc {
newSig := &builtinConvSig{}
newSig.cloneFrom(&b.baseBuiltinFunc)
return newSig
}
// evalString evals CONV(N,from_base,to_base).
// See https://dev.mysql.com/doc/refman/5.7/en/mathematical-functions.html#function_conv.
func (b *builtinConvSig) evalString(row chunk.Row) (res string, isNull bool, err error) {
var str string
switch x := b.args[0].(type) {
case *Constant:
if x.Value.Kind() == types.KindBinaryLiteral {
str = x.Value.GetBinaryLiteral().ToBitLiteralString(true)
}
case *ScalarFunction:
if x.FuncName.L == ast.Cast {
arg0 := x.GetArgs()[0]
if arg0.GetType().Hybrid() || IsBinaryLiteral(arg0) {
str, isNull, err = arg0.EvalString(b.ctx, row)
if isNull || err != nil {
return str, isNull, err
}
d := types.NewStringDatum(str)
str = d.GetBinaryLiteral().ToBitLiteralString(true)
}
}
}
fromBase, isNull, err := b.args[1].EvalInt(b.ctx, row)
if isNull || err != nil {
return res, isNull, err
}
toBase, isNull, err := b.args[2].EvalInt(b.ctx, row)
if isNull || err != nil {
return res, isNull, err
}
if len(str) == 0 {
str, isNull, err = b.args[0].EvalString(b.ctx, row)
if isNull || err != nil {
return res, isNull, err
}
} else {
str, isNull, err = b.conv(str[2:], 2, fromBase)
if err != nil {
return str, isNull, err
}
}
return b.conv(str, fromBase, toBase)
}
func (b *builtinConvSig) conv(str string, fromBase, toBase int64) (res string, isNull bool, err error) {
var (
signed bool
negative bool
ignoreSign bool
)
if fromBase < 0 {
fromBase = -fromBase
signed = true
}
if toBase < 0 {
toBase = -toBase
ignoreSign = true
}
if fromBase > 36 || fromBase < 2 || toBase > 36 || toBase < 2 {
return res, true, nil
}
str = getValidPrefix(strings.TrimSpace(str), fromBase)
if len(str) == 0 {
return "0", false, nil
}
if str[0] == '-' {
negative = true
str = str[1:]
}
val, err := strconv.ParseUint(str, int(fromBase), 64)
if err != nil {
return res, false, types.ErrOverflow.GenWithStackByArgs("BIGINT UNSINGED", str)
}
if signed {
if negative && val > -math.MinInt64 {
val = -math.MinInt64
}
if !negative && val > math.MaxInt64 {
val = math.MaxInt64
}
}
if negative {
val = -val
}
if int64(val) < 0 {
negative = true
} else {
negative = false
}
if ignoreSign && negative {
val = 0 - val
}
s := strconv.FormatUint(val, int(toBase))
if negative && ignoreSign {
s = "-" + s
}
res = strings.ToUpper(s)
return res, false, nil
}
type crc32FunctionClass struct {
baseFunctionClass
}
func (c *crc32FunctionClass) getFunction(ctx sessionctx.Context, args []Expression) (builtinFunc, error) {
if err := c.verifyArgs(args); err != nil {
return nil, err
}
bf, err := newBaseBuiltinFuncWithTp(ctx, c.funcName, args, types.ETInt, types.ETString)
if err != nil {
return nil, err
}
bf.tp.SetFlen(10)
bf.tp.AddFlag(mysql.UnsignedFlag)
sig := &builtinCRC32Sig{bf}
sig.setPbCode(tipb.ScalarFuncSig_CRC32)
return sig, nil
}
type builtinCRC32Sig struct {
baseBuiltinFunc
}
func (b *builtinCRC32Sig) Clone() builtinFunc {
newSig := &builtinCRC32Sig{}
newSig.cloneFrom(&b.baseBuiltinFunc)
return newSig
}
// evalInt evals a CRC32(expr).
// See https://dev.mysql.com/doc/refman/5.7/en/mathematical-functions.html#function_crc32
func (b *builtinCRC32Sig) evalInt(row chunk.Row) (int64, bool, error) {
x, isNull, err := b.args[0].EvalString(b.ctx, row)
if isNull || err != nil {
return 0, isNull, err
}
r := crc32.ChecksumIEEE([]byte(x))
return int64(r), false, nil
}
type signFunctionClass struct {
baseFunctionClass
}
func (c *signFunctionClass) getFunction(ctx sessionctx.Context, args []Expression) (builtinFunc, error) {
if err := c.verifyArgs(args); err != nil {
return nil, err
}
bf, err := newBaseBuiltinFuncWithTp(ctx, c.funcName, args, types.ETInt, types.ETReal)
if err != nil {
return nil, err
}
sig := &builtinSignSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_Sign)
return sig, nil
}
type builtinSignSig struct {
baseBuiltinFunc
}
func (b *builtinSignSig) Clone() builtinFunc {
newSig := &builtinSignSig{}
newSig.cloneFrom(&b.baseBuiltinFunc)
return newSig
}
// evalInt evals SIGN(v).
// See https://dev.mysql.com/doc/refman/5.7/en/mathematical-functions.html#function_sign
func (b *builtinSignSig) evalInt(row chunk.Row) (int64, bool, error) {
val, isNull, err := b.args[0].EvalReal(b.ctx, row)
if isNull || err != nil {
return 0, isNull, err
}
if val > 0 {
return 1, false, nil
} else if val == 0 {
return 0, false, nil
} else {
return -1, false, nil
}
}
type sqrtFunctionClass struct {
baseFunctionClass
}
func (c *sqrtFunctionClass) getFunction(ctx sessionctx.Context, args []Expression) (builtinFunc, error) {
if err := c.verifyArgs(args); err != nil {
return nil, err
}
bf, err := newBaseBuiltinFuncWithTp(ctx, c.funcName, args, types.ETReal, types.ETReal)
if err != nil {
return nil, err
}
sig := &builtinSqrtSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_Sqrt)
return sig, nil
}
type builtinSqrtSig struct {
baseBuiltinFunc
}
func (b *builtinSqrtSig) Clone() builtinFunc {
newSig := &builtinSqrtSig{}
newSig.cloneFrom(&b.baseBuiltinFunc)
return newSig
}
// evalReal evals a SQRT(x).
// See https://dev.mysql.com/doc/refman/5.7/en/mathematical-functions.html#function_sqrt
func (b *builtinSqrtSig) evalReal(row chunk.Row) (float64, bool, error) {
val, isNull, err := b.args[0].EvalReal(b.ctx, row)
if isNull || err != nil {
return 0, isNull, err
}
if val < 0 {
return 0, true, nil
}
return math.Sqrt(val), false, nil
}
type acosFunctionClass struct {
baseFunctionClass
}
func (c *acosFunctionClass) getFunction(ctx sessionctx.Context, args []Expression) (builtinFunc, error) {
if err := c.verifyArgs(args); err != nil {
return nil, err
}
bf, err := newBaseBuiltinFuncWithTp(ctx, c.funcName, args, types.ETReal, types.ETReal)
if err != nil {
return nil, err
}
sig := &builtinAcosSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_Acos)
return sig, nil
}
type builtinAcosSig struct {
baseBuiltinFunc
}
func (b *builtinAcosSig) Clone() builtinFunc {
newSig := &builtinAcosSig{}
newSig.cloneFrom(&b.baseBuiltinFunc)
return newSig
}
// evalReal evals a builtinAcosSig.
// See https://dev.mysql.com/doc/refman/5.7/en/mathematical-functions.html#function_acos
func (b *builtinAcosSig) evalReal(row chunk.Row) (float64, bool, error) {
val, isNull, err := b.args[0].EvalReal(b.ctx, row)
if isNull || err != nil {
return 0, isNull, err
}
if val < -1 || val > 1 {
return 0, true, nil
}
return math.Acos(val), false, nil
}
type asinFunctionClass struct {
baseFunctionClass
}
func (c *asinFunctionClass) getFunction(ctx sessionctx.Context, args []Expression) (builtinFunc, error) {
if err := c.verifyArgs(args); err != nil {
return nil, err
}
bf, err := newBaseBuiltinFuncWithTp(ctx, c.funcName, args, types.ETReal, types.ETReal)
if err != nil {
return nil, err
}
sig := &builtinAsinSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_Asin)
return sig, nil
}
type builtinAsinSig struct {
baseBuiltinFunc
}
func (b *builtinAsinSig) Clone() builtinFunc {
newSig := &builtinAsinSig{}
newSig.cloneFrom(&b.baseBuiltinFunc)
return newSig
}
// evalReal evals a builtinAsinSig.
// See https://dev.mysql.com/doc/refman/5.7/en/mathematical-functions.html#function_asin
func (b *builtinAsinSig) evalReal(row chunk.Row) (float64, bool, error) {
val, isNull, err := b.args[0].EvalReal(b.ctx, row)
if isNull || err != nil {
return 0, isNull, err
}
if val < -1 || val > 1 {
return 0, true, nil
}
return math.Asin(val), false, nil
}
type atanFunctionClass struct {
baseFunctionClass
}
func (c *atanFunctionClass) getFunction(ctx sessionctx.Context, args []Expression) (builtinFunc, error) {
if err := c.verifyArgs(args); err != nil {
return nil, err
}
var (
sig builtinFunc
bf baseBuiltinFunc
argsLen = len(args)
)
var err error
if argsLen == 1 {
bf, err = newBaseBuiltinFuncWithTp(ctx, c.funcName, args, types.ETReal, types.ETReal)
if err != nil {
return nil, err
}
} else {
bf, err = newBaseBuiltinFuncWithTp(ctx, c.funcName, args, types.ETReal, types.ETReal, types.ETReal)
if err != nil {
return nil, err
}
}
if argsLen == 1 {
sig = &builtinAtan1ArgSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_Atan1Arg)
} else {
sig = &builtinAtan2ArgsSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_Atan2Args)
}
return sig, nil
}
type builtinAtan1ArgSig struct {
baseBuiltinFunc
}
func (b *builtinAtan1ArgSig) Clone() builtinFunc {
newSig := &builtinAtan1ArgSig{}
newSig.cloneFrom(&b.baseBuiltinFunc)
return newSig
}
// evalReal evals a builtinAtan1ArgSig, corresponding to atan(x).
// See https://dev.mysql.com/doc/refman/5.7/en/mathematical-functions.html#function_atan
func (b *builtinAtan1ArgSig) evalReal(row chunk.Row) (float64, bool, error) {
val, isNull, err := b.args[0].EvalReal(b.ctx, row)
if isNull || err != nil {
return 0, isNull, err
}
return math.Atan(val), false, nil
}
type builtinAtan2ArgsSig struct {
baseBuiltinFunc
}
func (b *builtinAtan2ArgsSig) Clone() builtinFunc {
newSig := &builtinAtan2ArgsSig{}
newSig.cloneFrom(&b.baseBuiltinFunc)
return newSig
}
// evalReal evals a builtinAtan1ArgSig, corresponding to atan(y, x).
// See https://dev.mysql.com/doc/refman/5.7/en/mathematical-functions.html#function_atan
func (b *builtinAtan2ArgsSig) evalReal(row chunk.Row) (float64, bool, error) {
val1, isNull, err := b.args[0].EvalReal(b.ctx, row)
if isNull || err != nil {
return 0, isNull, err
}
val2, isNull, err := b.args[1].EvalReal(b.ctx, row)
if isNull || err != nil {
return 0, isNull, err
}
return math.Atan2(val1, val2), false, nil
}
type cosFunctionClass struct {
baseFunctionClass
}
func (c *cosFunctionClass) getFunction(ctx sessionctx.Context, args []Expression) (builtinFunc, error) {
if err := c.verifyArgs(args); err != nil {
return nil, err
}
bf, err := newBaseBuiltinFuncWithTp(ctx, c.funcName, args, types.ETReal, types.ETReal)
if err != nil {
return nil, err
}
sig := &builtinCosSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_Cos)
return sig, nil
}
type builtinCosSig struct {
baseBuiltinFunc
}
func (b *builtinCosSig) Clone() builtinFunc {
newSig := &builtinCosSig{}
newSig.cloneFrom(&b.baseBuiltinFunc)
return newSig
}
// evalReal evals a builtinCosSig.
// See https://dev.mysql.com/doc/refman/5.7/en/mathematical-functions.html#function_cos
func (b *builtinCosSig) evalReal(row chunk.Row) (float64, bool, error) {
val, isNull, err := b.args[0].EvalReal(b.ctx, row)
if isNull || err != nil {
return 0, isNull, err
}
return math.Cos(val), false, nil
}
type cotFunctionClass struct {
baseFunctionClass
}
func (c *cotFunctionClass) getFunction(ctx sessionctx.Context, args []Expression) (builtinFunc, error) {
if err := c.verifyArgs(args); err != nil {
return nil, err
}
bf, err := newBaseBuiltinFuncWithTp(ctx, c.funcName, args, types.ETReal, types.ETReal)
if err != nil {
return nil, err
}
sig := &builtinCotSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_Cot)
return sig, nil
}
type builtinCotSig struct {
baseBuiltinFunc
}
func (b *builtinCotSig) Clone() builtinFunc {
newSig := &builtinCotSig{}
newSig.cloneFrom(&b.baseBuiltinFunc)
return newSig
}
// evalReal evals a builtinCotSig.
// See https://dev.mysql.com/doc/refman/5.7/en/mathematical-functions.html#function_cot
func (b *builtinCotSig) evalReal(row chunk.Row) (float64, bool, error) {
val, isNull, err := b.args[0].EvalReal(b.ctx, row)
if isNull || err != nil {
return 0, isNull, err
}
tan := math.Tan(val)
if tan != 0 {
cot := 1 / tan
if !math.IsInf(cot, 0) && !math.IsNaN(cot) {
return cot, false, nil
}
}
return 0, false, types.ErrOverflow.GenWithStackByArgs("DOUBLE", fmt.Sprintf("cot(%s)", strconv.FormatFloat(val, 'f', -1, 64)))
}
type degreesFunctionClass struct {
baseFunctionClass
}
func (c *degreesFunctionClass) getFunction(ctx sessionctx.Context, args []Expression) (builtinFunc, error) {
if err := c.verifyArgs(args); err != nil {
return nil, err
}
bf, err := newBaseBuiltinFuncWithTp(ctx, c.funcName, args, types.ETReal, types.ETReal)
if err != nil {
return nil, err
}
sig := &builtinDegreesSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_Degrees)
return sig, nil
}
type builtinDegreesSig struct {
baseBuiltinFunc
}
func (b *builtinDegreesSig) Clone() builtinFunc {
newSig := &builtinDegreesSig{}
newSig.cloneFrom(&b.baseBuiltinFunc)
return newSig
}
// evalReal evals a builtinDegreesSig.
// See https://dev.mysql.com/doc/refman/5.7/en/mathematical-functions.html#function_degrees
func (b *builtinDegreesSig) evalReal(row chunk.Row) (float64, bool, error) {
val, isNull, err := b.args[0].EvalReal(b.ctx, row)
if isNull || err != nil {
return 0, isNull, err
}
res := val * 180 / math.Pi
return res, false, nil
}
type expFunctionClass struct {
baseFunctionClass
}
func (c *expFunctionClass) getFunction(ctx sessionctx.Context, args []Expression) (builtinFunc, error) {
if err := c.verifyArgs(args); err != nil {
return nil, err
}
bf, err := newBaseBuiltinFuncWithTp(ctx, c.funcName, args, types.ETReal, types.ETReal)
if err != nil {
return nil, err
}
sig := &builtinExpSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_Exp)
return sig, nil
}
type builtinExpSig struct {
baseBuiltinFunc
}
func (b *builtinExpSig) Clone() builtinFunc {
newSig := &builtinExpSig{}
newSig.cloneFrom(&b.baseBuiltinFunc)
return newSig
}
// evalReal evals a builtinExpSig.
// See https://dev.mysql.com/doc/refman/5.7/en/mathematical-functions.html#function_exp
func (b *builtinExpSig) evalReal(row chunk.Row) (float64, bool, error) {
val, isNull, err := b.args[0].EvalReal(b.ctx, row)
if isNull || err != nil {
return 0, isNull, err
}
exp := math.Exp(val)
if math.IsInf(exp, 0) || math.IsNaN(exp) {
s := fmt.Sprintf("exp(%s)", b.args[0].String())
return 0, false, types.ErrOverflow.GenWithStackByArgs("DOUBLE", s)
}
return exp, false, nil
}
type piFunctionClass struct {
baseFunctionClass
}
func (c *piFunctionClass) getFunction(ctx sessionctx.Context, args []Expression) (builtinFunc, error) {
if err := c.verifyArgs(args); err != nil {
return nil, err
}
var (
bf baseBuiltinFunc
sig builtinFunc
)
var err error
bf, err = newBaseBuiltinFuncWithTp(ctx, c.funcName, args, types.ETReal)
if err != nil {
return nil, err
}
bf.tp.SetDecimal(6)
bf.tp.SetFlen(8)
sig = &builtinPISig{bf}
sig.setPbCode(tipb.ScalarFuncSig_PI)
return sig, nil
}
type builtinPISig struct {
baseBuiltinFunc
}
func (b *builtinPISig) Clone() builtinFunc {
newSig := &builtinPISig{}
newSig.cloneFrom(&b.baseBuiltinFunc)
return newSig
}
// evalReal evals a builtinPISig.
// See https://dev.mysql.com/doc/refman/5.7/en/mathematical-functions.html#function_pi
func (b *builtinPISig) evalReal(_ chunk.Row) (float64, bool, error) {
return float64(math.Pi), false, nil
}
type radiansFunctionClass struct {
baseFunctionClass
}
func (c *radiansFunctionClass) getFunction(ctx sessionctx.Context, args []Expression) (builtinFunc, error) {
if err := c.verifyArgs(args); err != nil {
return nil, err
}
bf, err := newBaseBuiltinFuncWithTp(ctx, c.funcName, args, types.ETReal, types.ETReal)
if err != nil {
return nil, err
}
sig := &builtinRadiansSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_Radians)
return sig, nil
}
type builtinRadiansSig struct {
baseBuiltinFunc
}
func (b *builtinRadiansSig) Clone() builtinFunc {
newSig := &builtinRadiansSig{}
newSig.cloneFrom(&b.baseBuiltinFunc)
return newSig
}
// evalReal evals RADIANS(X).
// See https://dev.mysql.com/doc/refman/5.7/en/mathematical-functions.html#function_radians
func (b *builtinRadiansSig) evalReal(row chunk.Row) (float64, bool, error) {
x, isNull, err := b.args[0].EvalReal(b.ctx, row)
if isNull || err != nil {
return 0, isNull, err
}
return x * math.Pi / 180, false, nil
}
type sinFunctionClass struct {
baseFunctionClass
}
func (c *sinFunctionClass) getFunction(ctx sessionctx.Context, args []Expression) (builtinFunc, error) {
if err := c.verifyArgs(args); err != nil {
return nil, err
}
bf, err := newBaseBuiltinFuncWithTp(ctx, c.funcName, args, types.ETReal, types.ETReal)
if err != nil {
return nil, err
}
sig := &builtinSinSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_Sin)
return sig, nil
}
type builtinSinSig struct {
baseBuiltinFunc
}
func (b *builtinSinSig) Clone() builtinFunc {
newSig := &builtinSinSig{}
newSig.cloneFrom(&b.baseBuiltinFunc)
return newSig
}
// evalReal evals a builtinSinSig.
// See https://dev.mysql.com/doc/refman/5.7/en/mathematical-functions.html#function_sin
func (b *builtinSinSig) evalReal(row chunk.Row) (float64, bool, error) {
val, isNull, err := b.args[0].EvalReal(b.ctx, row)
if isNull || err != nil {
return 0, isNull, err
}
return math.Sin(val), false, nil
}
type tanFunctionClass struct {
baseFunctionClass
}
func (c *tanFunctionClass) getFunction(ctx sessionctx.Context, args []Expression) (builtinFunc, error) {
if err := c.verifyArgs(args); err != nil {
return nil, err
}
bf, err := newBaseBuiltinFuncWithTp(ctx, c.funcName, args, types.ETReal, types.ETReal)
if err != nil {
return nil, err
}
sig := &builtinTanSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_Tan)
return sig, nil
}
type builtinTanSig struct {
baseBuiltinFunc
}
func (b *builtinTanSig) Clone() builtinFunc {
newSig := &builtinTanSig{}
newSig.cloneFrom(&b.baseBuiltinFunc)
return newSig
}
// evalReal evals a builtinTanSig.
// See https://dev.mysql.com/doc/refman/5.7/en/mathematical-functions.html#function_tan
func (b *builtinTanSig) evalReal(row chunk.Row) (float64, bool, error) {
val, isNull, err := b.args[0].EvalReal(b.ctx, row)
if isNull || err != nil {
return 0, isNull, err
}
return math.Tan(val), false, nil
}
type truncateFunctionClass struct {
baseFunctionClass
}
func (c *truncateFunctionClass) getFunction(ctx sessionctx.Context, args []Expression) (builtinFunc, error) {
if err := c.verifyArgs(args); err != nil {
return nil, err
}
argTp := args[0].GetType().EvalType()
if argTp.IsStringKind() {
argTp = types.ETReal
}
bf, err := newBaseBuiltinFuncWithTp(ctx, c.funcName, args, argTp, argTp, types.ETInt)
if err != nil {
return nil, err
}
// ETInt or ETReal is set correctly by newBaseBuiltinFuncWithTp, only need to handle ETDecimal.
if argTp == types.ETDecimal {
bf.tp.SetDecimal(calculateDecimal4RoundAndTruncate(ctx, args, argTp))
bf.tp.SetFlen(args[0].GetType().GetFlen() - args[0].GetType().GetDecimal() + bf.tp.GetDecimal())
}
bf.tp.AddFlag(args[0].GetType().GetFlag())
var sig builtinFunc
switch argTp {
case types.ETInt:
if mysql.HasUnsignedFlag(args[0].GetType().GetFlag()) {
sig = &builtinTruncateUintSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_TruncateUint)
} else {
sig = &builtinTruncateIntSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_TruncateInt)
}
case types.ETReal:
sig = &builtinTruncateRealSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_TruncateReal)
case types.ETDecimal:
sig = &builtinTruncateDecimalSig{bf}
sig.setPbCode(tipb.ScalarFuncSig_TruncateDecimal)
default:
return nil, errIncorrectArgs.GenWithStackByArgs("truncate")
}
return sig, nil
}
type builtinTruncateDecimalSig struct {
baseBuiltinFunc
}
func (b *builtinTruncateDecimalSig) Clone() builtinFunc {
newSig := &builtinTruncateDecimalSig{}
newSig.cloneFrom(&b.baseBuiltinFunc)
return newSig
}
// evalDecimal evals a TRUNCATE(X,D).
// See https://dev.mysql.com/doc/refman/5.7/en/mathematical-functions.html#function_truncate
func (b *builtinTruncateDecimalSig) evalDecimal(row chunk.Row) (*types.MyDecimal, bool, error) {
x, isNull, err := b.args[0].EvalDecimal(b.ctx, row)
if isNull || err != nil {
return nil, isNull, err
}
d, isNull, err := b.args[1].EvalInt(b.ctx, row)
if isNull || err != nil {
return nil, isNull, err
}
result := new(types.MyDecimal)
if err := x.Round(result, mathutil.Min(int(d), b.getRetTp().GetDecimal()), types.ModeTruncate); err != nil {
return nil, true, err
}
return result, false, nil
}
type builtinTruncateRealSig struct {
baseBuiltinFunc
}
func (b *builtinTruncateRealSig) Clone() builtinFunc {
newSig := &builtinTruncateRealSig{}
newSig.cloneFrom(&b.baseBuiltinFunc)
return newSig
}
// evalReal evals a TRUNCATE(X,D).
// See https://dev.mysql.com/doc/refman/5.7/en/mathematical-functions.html#function_truncate
func (b *builtinTruncateRealSig) evalReal(row chunk.Row) (float64, bool, error) {
x, isNull, err := b.args[0].EvalReal(b.ctx, row)
if isNull || err != nil {
return 0, isNull, err
}
d, isNull, err := b.args[1].EvalInt(b.ctx, row)
if isNull || err != nil {
return 0, isNull, err
}
return types.Truncate(x, int(d)), false, nil
}
type builtinTruncateIntSig struct {
baseBuiltinFunc
}
func (b *builtinTruncateIntSig) Clone() builtinFunc {
newSig := &builtinTruncateIntSig{}
newSig.cloneFrom(&b.baseBuiltinFunc)
return newSig
}
// evalInt evals a TRUNCATE(X,D).
// See https://dev.mysql.com/doc/refman/5.7/en/mathematical-functions.html#function_truncate
func (b *builtinTruncateIntSig) evalInt(row chunk.Row) (int64, bool, error) {
x, isNull, err := b.args[0].EvalInt(b.ctx, row)
if isNull || err != nil {
return 0, isNull, err
}
if mysql.HasUnsignedFlag(b.args[1].GetType().GetFlag()) {
return x, false, nil
}
d, isNull, err := b.args[1].EvalInt(b.ctx, row)
if isNull || err != nil {
return 0, isNull, err
}
if d >= 0 {
return x, false, nil
}
// -MinInt = MinInt, special case
if d == mathutil.MinInt {
return 0, false, nil
}
shift := int64(math.Pow10(int(-d)))
return x / shift * shift, false, nil
}
func (b *builtinTruncateUintSig) Clone() builtinFunc {
newSig := &builtinTruncateUintSig{}
newSig.cloneFrom(&b.baseBuiltinFunc)
return newSig
}
type builtinTruncateUintSig struct {
baseBuiltinFunc
}
// evalInt evals a TRUNCATE(X,D).
// See https://dev.mysql.com/doc/refman/5.7/en/mathematical-functions.html#function_truncate
func (b *builtinTruncateUintSig) evalInt(row chunk.Row) (int64, bool, error) {
x, isNull, err := b.args[0].EvalInt(b.ctx, row)
if isNull || err != nil {
return 0, isNull, err
}
if mysql.HasUnsignedFlag(b.args[1].GetType().GetFlag()) {
return x, false, nil
}
uintx := uint64(x)
d, isNull, err := b.args[1].EvalInt(b.ctx, row)
if isNull || err != nil {
return 0, isNull, err
}
if d >= 0 {
return x, false, nil
}
// -MinInt = MinInt, special case
if d == mathutil.MinInt {
return 0, false, nil
}
shift := uint64(math.Pow10(int(-d)))
return int64(uintx / shift * shift), false, nil
}
|
getEvalTp4FloorAndCeil
|
components.rs
|
use gtk::prelude::{BoxExt, ButtonExt, DialogExt, GtkWindowExt, ToggleButtonExt, WidgetExt};
use relm4::{
send, AppUpdate, ComponentUpdate, Components, Model, RelmApp, RelmComponent, Sender,
WidgetPlus, Widgets,
};
enum HeaderMsg {
View,
Edit,
Export,
}
struct HeaderModel {}
impl Model for HeaderModel {
type Msg = HeaderMsg;
type Widgets = HeaderWidgets;
type Components = ();
}
impl ComponentUpdate<AppModel> for HeaderModel {
fn init_model(_parent_model: &AppModel) -> Self {
HeaderModel {}
}
fn update(
&mut self,
msg: HeaderMsg,
_components: &(),
_sender: Sender<HeaderMsg>,
parent_sender: Sender<AppMsg>,
) {
match msg {
HeaderMsg::View => {
send!(parent_sender, AppMsg::SetMode(AppMode::View));
}
HeaderMsg::Edit => {
send!(parent_sender, AppMsg::SetMode(AppMode::Edit));
}
HeaderMsg::Export => {
send!(parent_sender, AppMsg::SetMode(AppMode::Export));
}
}
}
}
#[relm4_macros::widget]
impl Widgets<HeaderModel, AppModel> for HeaderWidgets {
view! {
gtk::HeaderBar {
set_title_widget = Some(>k::Box) {
add_class_name: "linked",
append: group = >k::ToggleButton {
set_label: "View",
set_active: true,
connect_toggled(sender) => move |btn| {
if btn.is_active() {
send!(sender, HeaderMsg::View);
}
},
},
append = >k::ToggleButton {
set_label: "Edit",
set_group: Some(&group),
connect_toggled(sender) => move |btn| {
if btn.is_active() {
send!(sender, HeaderMsg::Edit);
}
},
},
append = >k::ToggleButton {
set_label: "Export",
set_group: Some(&group),
connect_toggled(sender) => move |btn| {
if btn.is_active() {
send!(sender, HeaderMsg::Export);
}
},
},
}
}
}
}
struct DialogModel {
hidden: bool,
}
enum DialogMsg {
Show,
Accept,
Cancel,
}
impl Model for DialogModel {
type Msg = DialogMsg;
type Widgets = DialogWidgets;
type Components = ();
}
impl ComponentUpdate<AppModel> for DialogModel {
fn init_model(_parent_model: &AppModel) -> Self {
DialogModel { hidden: true }
}
fn update(
&mut self,
msg: DialogMsg,
_components: &(),
_sender: Sender<DialogMsg>,
parent_sender: Sender<AppMsg>,
) {
match msg {
DialogMsg::Show => self.hidden = false,
DialogMsg::Accept => {
self.hidden = true;
send!(parent_sender, AppMsg::Close);
}
DialogMsg::Cancel => self.hidden = true,
}
}
}
#[relm4_macros::widget]
impl Widgets<DialogModel, AppModel> for DialogWidgets {
view! {
gtk::MessageDialog {
set_transient_for: Some(&parent_widgets.main_window),
set_modal: true,
set_visible: watch!(!model.hidden),
set_text: Some("Do you want to close before saving?"),
set_secondary_text: Some("All unsaved changes will be lost"),
add_button: args!("Close", gtk::ResponseType::Accept),
add_button: args!("Cancel", gtk::ResponseType::Cancel),
connect_response(sender) => move |_, resp| {
send!(sender, if resp == gtk::ResponseType::Accept {
DialogMsg::Accept
} else {
DialogMsg::Cancel
});
}
}
}
}
struct AppComponents {
header: RelmComponent<HeaderModel, AppModel>,
dialog: RelmComponent<DialogModel, AppModel>,
}
impl Components<AppModel> for AppComponents {
fn init_components(
parent_model: &AppModel,
parent_widgets: &AppWidgets,
parent_sender: Sender<AppMsg>,
) -> Self
|
}
#[derive(Debug)]
enum AppMode {
View,
Edit,
Export,
}
enum AppMsg {
SetMode(AppMode),
CloseRequest,
Close,
}
struct AppModel {
mode: AppMode,
}
impl Model for AppModel {
type Msg = AppMsg;
type Widgets = AppWidgets;
type Components = AppComponents;
}
#[relm4_macros::widget]
impl Widgets<AppModel, ()> for AppWidgets {
view! {
main_window = gtk::ApplicationWindow {
set_default_width: 500,
set_default_height: 250,
set_titlebar: component!(Some(components.header.root_widget())),
set_child = Some(>k::Label) {
set_label: watch!(&format!("Placeholder for {:?}", model.mode)),
},
connect_close_request(sender) => move |_| {
send!(sender, AppMsg::CloseRequest);
gtk::Inhibit(true)
}
}
}
}
impl AppUpdate for AppModel {
fn update(&mut self, msg: AppMsg, components: &AppComponents, _sender: Sender<AppMsg>) -> bool {
match msg {
AppMsg::SetMode(mode) => {
self.mode = mode;
}
AppMsg::CloseRequest => {
components.dialog.send(DialogMsg::Show).unwrap();
}
AppMsg::Close => {
return false;
}
}
true
}
}
fn main() {
let model = AppModel {
mode: AppMode::View,
};
let relm = RelmApp::new(model);
relm.run();
}
|
{
AppComponents {
header: RelmComponent::new(parent_model, parent_widgets, parent_sender.clone()),
dialog: RelmComponent::new(parent_model, parent_widgets, parent_sender),
}
}
|
analyze.py
|
#! /usr/bin/env python3
""" -------------------------------
analyse.py
Copyright (C) 2018 RISE
This code was produced by RISE
The 2013-04-10 version
bonsai/src_v02/analyze.py
simple analysis of pandas dataframes data
such as
1. find duplicated rows
|
2. number of unique values in a column
3. number of unique values in common
between two columns in two different
files
4.
------------------------------------"""
import global_settings as gs
import numpy as np
import pandas as pd
import bonsai_io as bio
import common
import copy
def nr_of_unique_rows(df):
d = df.drop_duplicates()
return len(d)
def nr_of_unique_values_in_cols(df, cols):
c = df.drop_duplicates(subset = cols)
return len(c)
def nr_of_unique_values(df, col):
c = df[col].dropna()
c = c.drop_duplicates()
return len(c)
"""
def nr_of_unique_numeric_values(df, col):
c = df[col].dropna()
c = c.drop_duplicates()
c = c.str.isnumeric()
c = c[c].index.values
"""
def nr_of_nonnan_values(df, col):
c = df[col].dropna()
return len(c)
def nr_of_unique_digital_values(df, col):
c = df[col].dropna()
c = c.drop_duplicates()
c = c.str.isdigit()
c = c[c].index.values
# df = df.drop_duplicates(subset = col)
# df = df[ df[col].dropna().str.isdigit() ]
# df = df[ df[col].str.contains('\d', regex=True) ]
return len(c)
def duplicated_rows(df):
df['dup'] = df.duplicated()
df = df[df['dup'] == True]
return df
def print_duplicated_rows(df, nr):
dup = duplicated_rows(df)
print('Nr of rows in total', len(df))
print('Nr of duplicated rows', len(dup))
nr = min( nr,len(dup) )
if nr > 0:
print('the first', nr,' of them')
print(dup[0:nr])
return dup
def unique_number_values(df, col):
df = df.drop_duplicates(subset = col)
df = df[ df[col].str.contains('\d', regex=True) ]
return df
def info(df, name = ''):
print()
if name != '':
print()
print('--------------------------------------------------')
print()
print('\tInfo on the file\n\t' + name)
print()
print('--------------------------------------------------')
print()
df_unique_nr = nr_of_unique_rows(df)
print(' shape', df.shape)
print(' unique rows', df_unique_nr)
for c in df.columns:
print()
print('\tInfo on non-nan values of column', c)
print()
nonnan_nr = nr_of_nonnan_values(df, c)
unique_nr = nr_of_unique_values(df, c)
digital_nr = nr_of_unique_digital_values(df, c)
# numeric_nr = nr_of_unique_numeric_values(df, c)
print('non-nan values', nonnan_nr)
print(' unique values', unique_nr)
print('digital values', digital_nr)
# print('numeric values', unique_nr)
print()
# return unique_number_values(df, 'ICD10')
# df = df[ df[c].str.contains('\d', regex=True) ]
def readall():
dia = bio.read_generated_dia()
dgr = bio.read_diagroups()
per = bio.readperson()
ctr = bio.readcontrol()
inc = bio.readincare()
nic = bio.readnicare()
dru = bio.readdrug()
dcl = bio.readdrugclasses()
tre = bio.readtreatment()
sur = bio.readsurgery()
cau = bio.readcause()
data = [
dia,
dgr,
per,
ctr,
inc,
nic,
dru,
dcl,
tre,
sur,
cau
]
name = [
'diagnos ',
'diagnosgrupp ',
'person ',
'kontrollgrupp ',
'sluten v_rd ',
'_ppen v_rd ',
'l_kemedel ',
'l_kemedelsgrupper',
'behandling ',
'kirurgi ',
'orsak ',
]
return data, name
def info_on_all():
data, name = readall()
for i in range(0, len(name)):
info(data[i], name[i])
def compare_lopnr(dfx, dfy, namex = 'data 1', namey = 'data 2'):
xs = list(dfx['LopNr'].values)
ys = list(dfy['LopNr'].values)
sx = set(xs)
sy = set(ys)
cut = sx & sy
ux = sx - sy
uy = sy - sx
print()
# print('shape ' + namex + '\t\t', dfx.shape)
# print('shape ' + namey + '\t\t', dfy.shape)
# print('unique Lopnr ' + namex + '\t', len(xs))
# print('unique Lopnr ' + namey + '\t', len(ys))
print('common Lopnr\t\t\t', len(cut))
print('Lopnr in ' + namex + ' only\t', len(ux))
print('Lopnr in ' + namey + ' only\t', len(uy))
print()
ux = list(ux)
uy = list(uy)
ux.sort
uy.sort
return ux, uy
def readlopnr():
dia = bio.read_generated_dia()
per = bio.readperson()
ctr = bio.readcontrol()
inc = bio.readincare()
nic = bio.readnicare()
dru = bio.readdrug()
tre = bio.readtreatment()
sur = bio.readsurgery()
cau = bio.readcause()
data = [dia, per, ctr, inc, nic, dru, tre, sur, cau]
name = [
'diagnos ',
'person ',
'kontrollgrupp',
'sluten v_rd ',
'_ppen v_rd ',
'l_kemedel ',
'behandling ',
'kirurgi ',
'orsak ',
]
return data, name
def pairwise_lopnr_comparisions():
data, name = readlopnr()
for i in range(0, len(name)):
for j in range(i+1, len(name)):
print()
print('--------------------------------------------------')
print()
print('\tComparing ' + name[i] + ' with ' + name[j])
print()
print('--------------------------------------------------')
print()
compare_lopnr(data[i], data[j], name[i], name[j])
""" -------------------------------
4. count amd list various types of diagnosis
codes in care data
------------------------------------"""
"""
def is_icd10_class(x):
if not common.isstr(x):
return False
if common.is_icd10(x):
return False
if len(x) < 3:
return False
if not x[0].isupper():
return False
return x[1].isdigit() and x[2].isdigit()
"""
def code_count(xs):
if not isinstance(xs, str):
return 0
return len(xs.split())
def icd10_count(xs):
if not isinstance(xs, str):
return 0
count = 0
for x in xs.split():
if common.is_icd10(x):
# print(x)
count += 1
return count
def not_icd10_count(xs):
if not isinstance(xs, str):
return 0
count = 0
for x in xs.split():
if not common.is_icd10(x):
# print(x)
count += 1
return count
def icd10_class_count(xs):
if not isinstance(xs, str):
return 0
count = 0
for x in xs.split():
if common.is_icd10_class(x):
# print(x)
count += 1
return count
"""
def code_list(xs):
if not isinstance(xs, str):
return 0
return len(xs.split())
"""
def count_and_print(df, table = False):
dia = 'DIAGNOS'
dfc = copy.copy(df)
dfc['code_count'] = df[dia].apply(code_count)
dfc['icd10_count'] = df[dia].apply(icd10_count)
dfc['not_icd10_count'] = df[dia].apply(not_icd10_count)
dfc['icd10_class_count'] = df[dia].apply(icd10_class_count)
nr_of_codes = dfc['code_count'].sum()
nr_of_icd10 = dfc['icd10_count'].sum()
nr_of_not_icd10 = dfc['not_icd10_count'].sum()
nr_of_class_codes = dfc['icd10_class_count'].sum()
if table:
print('nr_of_lines\t', len(df))
print('nr_of_codes\t', nr_of_codes)
print('nr_of_icd10\t', nr_of_icd10)
print('nr_of_not_icd10\t', nr_of_not_icd10)
print('nr_of_icd10_class_codes\t', nr_of_class_codes)
else:
print(' nr_of_lines', len(df))
print(' nr_of_codes', nr_of_codes)
print(' nr_of_icd10', nr_of_icd10)
print(' nr_of_not_icd10', nr_of_not_icd10)
print(' nr_of_icd10_class_codes', nr_of_class_codes)
"""
for c in df1[dia].values:
print('\t', c)
"""
def print_dates(df, table = False):
date = 'INDATUM'
if table:
print('first date\t', df[date].min())
print('last date\t', df[date].max())
else:
print(' first date', df[date].min())
print(' last date', df[date].max())
def icd10_class_list(xs):
if not isinstance(xs, str):
return []
codes = []
for x in xs.split():
if common.is_icd10_class(x):
codes += [x]
#print(codes)
return codes
def flat(xs):
ys = []
for x in xs:
ys += x
return ys
def print_class_codes(df):
dia = 'DIAGNOS'
dfc = copy.copy(df)
dfc['icd10_class'] = df[dia].apply(icd10_class_list)
dfc['is_class'] = dfc['icd10_class'].apply(lambda x: x != [])
dfc = dfc[dfc['is_class']]
codes = np.unique(flat(list(dfc['icd10_class'].values)))
for c in codes:
print('\t', c)
def diagnosis_code_count(df, print_class = False, table = False):
date = 'INDATUM'
nr = 'LopNr'
icd10_start = np.datetime64('1998-01-01')
"""
size0 = len(df)
df = df.dropna().reset_index(drop=True)
print('nr of empty lines:', size0- len(df))
"""
df[date] = df[date].apply(bio.str2time)
df = df.sort_values(date).dropna().reset_index(drop=True)
df1 = df[df[date] < icd10_start]
df2 = df[df[date] >= icd10_start]
print()
print('code counts before 1998_01_01:')
print()
print_dates(df1, table = table)
count_and_print(df1, table = table)
print()
print('code counts from 1998_01_01')
print()
print_dates(df2, table = table)
count_and_print(df2, table = table)
if print_class:
print()
print(' all icd10_class_codes:')
print_class_codes(df2)
print()
| |
resolveCredentials.ts
|
import { Auth, JsonFileCache } from '@expo/apple-utils';
import chalk from 'chalk';
import * as fs from 'fs-extra';
import wrapAnsi from 'wrap-ansi';
import Log, { learnMore } from '../../../log';
import { promptAsync } from '../../../prompts';
import * as Keychain from './keychain';
/**
* Get the username and possibly the password from the environment variables or the supplied options.
* Password is optional because it's only needed for authentication, but not for re-authentication.
*
* @param options
*/
export async function resolveCredentialsAsync(
options: Partial<Auth.UserCredentials>
): Promise<Partial<Auth.UserCredentials>> {
const credentials = getAppleIdFromEnvironmentOrOptions(options);
if (!credentials.username) {
credentials.username = await promptUsernameAsync();
}
return credentials;
}
function getAppleIdFromEnvironmentOrOptions({
username,
password,
...userCredentials
}: Partial<Auth.UserCredentials>): Partial<Auth.UserCredentials> {
const passedAppleId = username || process.env.EXPO_APPLE_ID;
// Only resolve the password if the username was provided.
const passedAppleIdPassword = passedAppleId
? password || process.env.EXPO_APPLE_PASSWORD
: undefined;
return {
...userCredentials,
username: passedAppleId,
password: passedAppleIdPassword,
};
}
async function promptUsernameAsync(): Promise<string> {
Log.log('\u203A Log in to your Apple Developer account to continue');
// Get the email address that was last used and set it as
// the default value for quicker authentication.
const lastAppleId = await getCachedUsernameAsync();
const { username } = await promptAsync({
type: 'text',
name: 'username',
message: `Apple ID:`,
validate: (val: string) => val !== '',
initial: lastAppleId ?? undefined,
});
if (username && username !== lastAppleId) {
await cacheUsernameAsync(username);
}
return username;
}
async function cacheUsernameAsync(username: string): Promise<void> {
// If a new email was used then store it as a suggestion for next time.
// This functionality is disabled using the keychain mechanism.
if (!Keychain.EXPO_NO_KEYCHAIN && username) {
const cachedPath = JsonFileCache.usernameCachePath();
await JsonFileCache.cacheAsync(cachedPath, { username });
}
}
export async function promptPasswordAsync({
username,
}: Pick<Auth.UserCredentials, 'username'>): Promise<string> {
const cachedPassword = await getCachedPasswordAsync({ username });
if (cachedPassword) {
Log.log(`\u203A Using password for ${username} from your local Keychain`);
Log.log(` ${learnMore('https://docs.expo.dev/distribution/security#keychain')}`);
return cachedPassword;
}
// https://docs.expo.dev/distribution/security/#apple-developer-account-credentials
Log.log(
wrapAnsi(
chalk.bold(
`\u203A The password is only used to authenticate with Apple and never stored on EAS servers`
),
process.stdout.columns || 80
)
);
Log.log(` ${learnMore('https://bit.ly/2VtGWhU')}`);
const { password } = await promptAsync({
type: 'password',
name: 'password',
message: () => `Password (for ${username}):`,
validate: (val: string) => val !== '',
});
// TODO: Save only after the auth completes successfully.
await cachePasswordAsync({ username, password });
return password;
}
async function getCachedUsernameAsync(): Promise<string | null> {
if (Keychain.EXPO_NO_KEYCHAIN) {
// Clear last used apple ID.
await fs.remove(JsonFileCache.usernameCachePath());
return null;
}
const cached = await JsonFileCache.getCacheAsync(JsonFileCache.usernameCachePath());
const lastAppleId = cached?.username ?? null;
return typeof lastAppleId === 'string' ? lastAppleId : null;
}
/**
* Returns the same prefix used by Fastlane in order to potentially share access between services.
* [Cite. Fastlane](https://github.com/fastlane/fastlane/blob/f831062fa6f4b216b8ee38949adfe28fc11a0a8e/credentials_manager/lib/credentials_manager/account_manager.rb#L8).
*
|
*/
function getKeychainServiceName(appleId: string): string {
return `deliver.${appleId}`;
}
export async function deletePasswordAsync({
username,
}: Pick<Auth.UserCredentials, 'username'>): Promise<boolean> {
const serviceName = getKeychainServiceName(username);
const success = await Keychain.deletePasswordAsync({ username, serviceName });
if (success) {
Log.log('\u203A Removed Apple ID password from the native Keychain');
}
return success;
}
async function getCachedPasswordAsync({
username,
}: Pick<Auth.UserCredentials, 'username'>): Promise<string | null> {
// If the user opts out, delete the password.
if (Keychain.EXPO_NO_KEYCHAIN) {
await deletePasswordAsync({ username });
return null;
}
const serviceName = getKeychainServiceName(username);
return Keychain.getPasswordAsync({ username, serviceName });
}
async function cachePasswordAsync({ username, password }: Auth.UserCredentials): Promise<boolean> {
if (Keychain.EXPO_NO_KEYCHAIN) {
Log.log('\u203A Skip storing Apple ID password in the local Keychain.');
return false;
}
Log.log(`\u203A Saving Apple ID password to the local Keychain`);
Log.log(` ${learnMore('https://docs.expo.dev/distribution/security#keychain')}`);
const serviceName = getKeychainServiceName(username);
return Keychain.setPasswordAsync({ username, password, serviceName });
}
|
* @param appleId email address
|
config.go
|
package conf
import (
"crypto/rand"
"errors"
"fmt"
"os"
"strconv"
"strings"
"time"
)
var F = envInt("F", intPtr(1))
var N = 3*F + 1
var M = envInt("M", intPtr(1))
func envInt(key string, val *int) int {
v := os.Getenv(key)
if v != "" {
d, err := strconv.Atoi(v)
if err != nil {
panic(errors.New(fmt.Sprintf("Env %s is not int", key)))
}
return d
} else if val != nil {
return *val
} else {
panic(errors.New(fmt.Sprintf("Env %s not found", key)))
}
}
func intPtr(d int) *int {
return &d
}
//goland:noinspection GoUnusedGlobalVariable
var KeySize = 2048
var ClientTimeout = 10 * time.Second
var CPInterval = 5
var Extra = []byte("extra")
var UdpMulticastAddr = "239.255.0.1:10001"
var UdpMulticastInterfaces = []string{"enp5s0", "p2p1"}
var UdpBufSize = 1 * 1024 * 1024
var IpPrefix = os.Getenv("IP_PREFIX")
func GetReqAddr(id int) string {
i := strings.LastIndex(IpPrefix, ".")
if i == -1 {
panic(errors.New("invalid ip prefix"))
}
last, err := strconv.Atoi(IpPrefix[i+1:])
if err != nil {
panic(err)
}
return fmt.Sprintf("%s.%d:10000", IpPrefix[:i], id+last)
}
func GetListenAddr(_ int) string {
return ":10000"
}
var RandInputSize = 64
func
|
() ([]byte, error) {
in := make([]byte, RandInputSize)
_, err := rand.Read(in)
if err != nil {
return nil, err
}
return in, nil
}
|
GetRandInput
|
LeadNoteRepository.ts
|
import { Query } from '@app/core';
import { LeadNote, FindLeadNoteQuery } from '@app/crm';
export interface LeadNoteRepository {
findById: (id: string) => Promise<LeadNote>;
findOne: (query: Query) => Promise<LeadNote>;
|
find: (query: FindLeadNoteQuery) => Promise<any>;
count: (query: Query) => Promise<number>;
create: (entity: LeadNote) => Promise<string>;
update: (entity: Partial<LeadNote>) => Promise<void>;
del: (id: string) => Promise<void>;
ensureIndexes: () => Promise<void>;
delByCriteria: (criteria: any) => Promise<any>;
findByLeadOrContactId: (leadId?: string, contactId?: string) => Promise<any>;
}
| |
content_type.rs
|
use std::{
error::Error as StdError,
fmt::{self, Display},
str::FromStr,
};
use mime::Mime;
use super::{Header, HeaderName};
use crate::BoxError;
/// `Content-Type` of the body
///
/// This struct can represent any valid [mime type], which can be parsed via
/// [`ContentType::parse`]. Constants are provided for the most-used mime-types.
///
/// Defined in [RFC2045](https://tools.ietf.org/html/rfc2045#section-5)
///
/// [mime type]: https://www.iana.org/assignments/media-types/media-types.xhtml
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct ContentType(Mime);
impl ContentType {
/// A `ContentType` of type `text/plain; charset=utf-8`
///
/// Indicates that the body is in utf-8 encoded plain text.
pub const TEXT_PLAIN: ContentType = Self::from_mime(mime::TEXT_PLAIN_UTF_8);
/// A `ContentType` of type `text/html; charset=utf-8`
///
/// Indicates that the body is in utf-8 encoded html.
pub const TEXT_HTML: ContentType = Self::from_mime(mime::TEXT_HTML_UTF_8);
/// Parse `s` into `ContentType`
pub fn parse(s: &str) -> Result<ContentType, ContentTypeErr> {
Ok(Self::from_mime(s.parse().map_err(ContentTypeErr)?))
}
pub(crate) const fn from_mime(mime: Mime) -> Self {
Self(mime)
}
pub(crate) fn as_ref(&self) -> &Mime {
&self.0
}
}
impl Header for ContentType {
fn name() -> HeaderName {
HeaderName::new_from_ascii_str("Content-Type")
}
fn parse(s: &str) -> Result<Self, BoxError> {
Ok(Self(s.parse()?))
|
}
}
impl FromStr for ContentType {
type Err = ContentTypeErr;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Self::parse(s)
}
}
#[cfg(feature = "mime03")]
#[cfg_attr(docsrs, doc(cfg(feature = "mime03")))]
impl From<Mime> for ContentType {
fn from(mime: Mime) -> Self {
Self::from_mime(mime)
}
}
/// An error occurred while trying to [`ContentType::parse`].
#[derive(Debug)]
pub struct ContentTypeErr(mime::FromStrError);
impl StdError for ContentTypeErr {
fn source(&self) -> Option<&(dyn StdError + 'static)> {
Some(&self.0)
}
}
impl Display for ContentTypeErr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
Display::fmt(&self.0, f)
}
}
// -- Serialization and Deserialization --
#[cfg(feature = "serde")]
mod serde {
use serde::de::{self, Deserialize, Deserializer, Visitor};
use serde::ser::{Serialize, Serializer};
use std::fmt;
use super::ContentType;
impl Serialize for ContentType {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_newtype_struct("ContentType", &format!("{}", &self.0))
}
}
impl<'de> Deserialize<'de> for ContentType {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct ContentTypeVisitor;
impl<'de> Visitor<'de> for ContentTypeVisitor {
type Value = ContentType;
// The error message which states what the Visitor expects to
// receive
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a ContentType string like `text/plain`")
}
fn visit_str<E>(self, mime: &str) -> Result<ContentType, E>
where
E: de::Error,
{
match ContentType::parse(mime) {
Ok(content_type) => Ok(content_type),
Err(_) => Err(E::custom(format!(
"Couldn't parse the following MIME-Type: {}",
mime
))),
}
}
}
deserializer.deserialize_str(ContentTypeVisitor)
}
}
}
#[cfg(test)]
mod test {
use super::ContentType;
use crate::message::header::{HeaderName, Headers};
#[test]
fn format_content_type() {
let mut headers = Headers::new();
headers.set(ContentType::TEXT_PLAIN);
assert_eq!(
headers.to_string(),
"Content-Type: text/plain; charset=utf-8\r\n"
);
headers.set(ContentType::TEXT_HTML);
assert_eq!(
headers.to_string(),
"Content-Type: text/html; charset=utf-8\r\n"
);
}
#[test]
fn parse_content_type() {
let mut headers = Headers::new();
headers.insert_raw(
HeaderName::new_from_ascii_str("Content-Type"),
"text/plain; charset=utf-8".to_string(),
);
assert_eq!(headers.get::<ContentType>(), Some(ContentType::TEXT_PLAIN));
headers.insert_raw(
HeaderName::new_from_ascii_str("Content-Type"),
"text/html; charset=utf-8".to_string(),
);
assert_eq!(headers.get::<ContentType>(), Some(ContentType::TEXT_HTML));
}
}
|
}
fn display(&self) -> String {
self.0.to_string()
|
party_risk_limits_report_ack.rs
|
use serde::{Deserialize, Serialize};
#[derive(Serialize, Deserialize, Clone, Debug, Default, PartialEq)]
pub struct PartyRiskLimitsReportAck {
/// MsgType = DE
#[serde(flatten)]
pub standard_message_header: super::super::standard_message_header::StandardMessageHeader<'D', 'E'>,
/// The identifier of the Party Risk Limit Report (35=CM) or Party Risk Limit Update Report (35=CR) message.
#[serde(rename = "1667")]
pub risk_limit_report_id: String,
/// RiskLimitRequestID
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "1666")]
pub risk_limit_request_id: Option<String>,
/// RiskLimitReportStatus
#[serde(rename = "2316")]
pub risk_limit_report_status: RiskLimitReportStatus,
/// Conditionally required when RiskLimitReportStatus(2316) = 1 (Rejected).
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "2317")]
pub risk_limit_report_reject_reason: Option<RiskLimitReportRejectReason>,
/// PartyRiskLimitsUpdateGrp
#[serde(flatten)]
pub party_risk_limits_update_grp: Option<super::super::party_risk_limits_update_grp::PartyRiskLimitsUpdateGrp>,
/// TransactTime
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "60")]
pub transact_time: Option<fix_common::UTCTimestamp>,
/// RejectText
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "1328")]
pub reject_text: Option<String>,
/// Must be set if EncodedRejectText(1665) field is specified and must immediately precede it.
#[serde(rename = "1664")]
/// Encoded (non-ASCII characters) representation of the RejectText(1328) field in the encoded format specified via the MessageEncoding(347)
/// field.
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(alias = "1665")]
pub encoded_reject_text: Option<fix_common::EncodedText<1665>>,
/// Text
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(rename = "58")]
pub text: Option<String>,
/// Must be set if EncodedText(355) field is specified and must immediately precede it.
#[serde(rename = "354")]
/// Encoded (non-ASCII characters) representation of the Text(58) field in the encoded format specified via the MessageEncoding(347)
/// field.
#[serde(skip_serializing_if = "Option::is_none")]
#[serde(alias = "355")]
pub encoded_text: Option<fix_common::EncodedText<355>>,
/// Standard Message Trailer
#[serde(flatten)]
pub standard_message_trailer: super::super::standard_message_trailer::StandardMessageTrailer,
}
#[derive(Serialize, Deserialize, Clone, Copy, Debug, PartialEq)]
pub enum
|
{
/// Accepted
#[serde(rename = "0")]
Accepted,
/// Rejected
#[serde(rename = "1")]
Rejected,
}
impl Default for RiskLimitReportStatus {
fn default() -> Self {
RiskLimitReportStatus::Accepted
}
}
#[derive(Serialize, Deserialize, Clone, Copy, Debug, PartialEq)]
pub enum RiskLimitReportRejectReason {
/// Unknown RiskLimitReportID(1667)
#[serde(rename = "0")]
UnknownRiskLimitReportId,
/// Unknown party
#[serde(rename = "1")]
UnknownParty,
/// Other
#[serde(rename = "99")]
Other,
}
impl Default for RiskLimitReportRejectReason {
fn default() -> Self {
RiskLimitReportRejectReason::UnknownRiskLimitReportId
}
}
|
RiskLimitReportStatus
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.