file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
datastore.go | package fleet
import (
"context"
"time"
)
type CarveStore interface {
NewCarve(ctx context.Context, metadata *CarveMetadata) (*CarveMetadata, error)
UpdateCarve(ctx context.Context, metadata *CarveMetadata) error
Carve(ctx context.Context, carveId int64) (*CarveMetadata, error)
CarveBySessionId(ctx context.Context, sessionId string) (*CarveMetadata, error)
CarveByName(ctx context.Context, name string) (*CarveMetadata, error)
ListCarves(ctx context.Context, opt CarveListOptions) ([]*CarveMetadata, error)
NewBlock(ctx context.Context, metadata *CarveMetadata, blockId int64, data []byte) error
GetBlock(ctx context.Context, metadata *CarveMetadata, blockId int64) ([]byte, error)
// CleanupCarves will mark carves older than 24 hours expired, and delete the associated data blocks. This behaves
// differently for carves stored in S3 (check the implementation godoc comment for more details)
CleanupCarves(ctx context.Context, now time.Time) (expired int, err error)
}
// Datastore combines all the interfaces in the Fleet DAL
type Datastore interface {
CarveStore
///////////////////////////////////////////////////////////////////////////////
// UserStore contains methods for managing users in a datastore
NewUser(ctx context.Context, user *User) (*User, error)
ListUsers(ctx context.Context, opt UserListOptions) ([]*User, error)
UserByEmail(ctx context.Context, email string) (*User, error)
UserByID(ctx context.Context, id uint) (*User, error)
SaveUser(ctx context.Context, user *User) error
SaveUsers(ctx context.Context, users []*User) error
// DeleteUser permanently deletes the user identified by the provided ID.
DeleteUser(ctx context.Context, id uint) error
// PendingEmailChange creates a record with a pending email change for a user identified by uid. The change record
// is keyed by a unique token. The token is emailed to the user with a link that they can use to confirm the change.
PendingEmailChange(ctx context.Context, userID uint, newEmail, token string) error
// ConfirmPendingEmailChange will confirm new email address identified by token is valid. The new email will be
// written to user record. userID is the ID of the user whose e-mail is being changed.
ConfirmPendingEmailChange(ctx context.Context, userID uint, token string) (string, error)
///////////////////////////////////////////////////////////////////////////////
// QueryStore
// ApplyQueries applies a list of queries (likely from a yaml file) to the datastore. Existing queries are updated,
// and new queries are created.
ApplyQueries(ctx context.Context, authorID uint, queries []*Query) error
// NewQuery creates a new query object in thie datastore. The returned query should have the ID updated.
NewQuery(ctx context.Context, query *Query, opts ...OptionalArg) (*Query, error)
// SaveQuery saves changes to an existing query object.
SaveQuery(ctx context.Context, query *Query) error
// DeleteQuery deletes an existing query object.
DeleteQuery(ctx context.Context, name string) error
// DeleteQueries deletes the existing query objects with the provided IDs. The number of deleted queries is returned
// along with any error.
DeleteQueries(ctx context.Context, ids []uint) (uint, error)
// Query returns the query associated with the provided ID. Associated packs should also be loaded.
Query(ctx context.Context, id uint) (*Query, error)
// ListQueries returns a list of queries with the provided sorting and paging options. Associated packs should also
// be loaded.
ListQueries(ctx context.Context, opt ListQueryOptions) ([]*Query, error)
// QueryByName looks up a query by name.
QueryByName(ctx context.Context, name string, opts ...OptionalArg) (*Query, error)
///////////////////////////////////////////////////////////////////////////////
// CampaignStore defines the distributed query campaign related datastore methods
// NewDistributedQueryCampaign creates a new distributed query campaign
NewDistributedQueryCampaign(ctx context.Context, camp *DistributedQueryCampaign) (*DistributedQueryCampaign, error)
// DistributedQueryCampaign loads a distributed query campaign by ID
DistributedQueryCampaign(ctx context.Context, id uint) (*DistributedQueryCampaign, error)
// SaveDistributedQueryCampaign updates an existing distributed query campaign
SaveDistributedQueryCampaign(ctx context.Context, camp *DistributedQueryCampaign) error
// DistributedQueryCampaignTargetIDs gets the IDs of the targets for the query campaign of the provided ID
DistributedQueryCampaignTargetIDs(ctx context.Context, id uint) (targets *HostTargets, err error)
// NewDistributedQueryCampaignTarget adds a new target to an existing distributed query campaign
NewDistributedQueryCampaignTarget(ctx context.Context, target *DistributedQueryCampaignTarget) (*DistributedQueryCampaignTarget, error)
// CleanupDistributedQueryCampaigns will clean and trim metadata for old distributed query campaigns. Any campaign
// in the QueryWaiting state will be moved to QueryComplete after one minute. Any campaign in the QueryRunning state
// will be moved to QueryComplete after one day. Times are from creation time. The now parameter makes this method
// easier to test. The return values indicate how many campaigns were expired and any error.
CleanupDistributedQueryCampaigns(ctx context.Context, now time.Time) (expired uint, err error)
DistributedQueryCampaignsForQuery(ctx context.Context, queryID uint) ([]*DistributedQueryCampaign, error)
///////////////////////////////////////////////////////////////////////////////
// PackStore is the datastore interface for managing query packs.
// ApplyPackSpecs applies a list of PackSpecs to the datastore, creating and updating packs as necessary.
ApplyPackSpecs(ctx context.Context, specs []*PackSpec) error
// GetPackSpecs returns all of the stored PackSpecs.
GetPackSpecs(ctx context.Context) ([]*PackSpec, error)
// GetPackSpec returns the spec for the named pack.
GetPackSpec(ctx context.Context, name string) (*PackSpec, error)
// NewPack creates a new pack in the datastore.
NewPack(ctx context.Context, pack *Pack, opts ...OptionalArg) (*Pack, error)
// SavePack updates an existing pack in the datastore.
SavePack(ctx context.Context, pack *Pack) error
// DeletePack deletes a pack record from the datastore.
DeletePack(ctx context.Context, name string) error
// Pack retrieves a pack from the datastore by ID.
Pack(ctx context.Context, pid uint) (*Pack, error)
// ListPacks lists all packs in the datastore.
ListPacks(ctx context.Context, opt PackListOptions) ([]*Pack, error)
// PackByName fetches pack if it exists, if the pack exists the bool return value is true
PackByName(ctx context.Context, name string, opts ...OptionalArg) (*Pack, bool, error)
// ListPacksForHost lists the packs that a host should execute.
ListPacksForHost(ctx context.Context, hid uint) (packs []*Pack, err error)
// EnsureGlobalPack gets or inserts a pack with type global
EnsureGlobalPack(ctx context.Context) (*Pack, error)
// EnsureTeamPack gets or inserts a pack with type global
EnsureTeamPack(ctx context.Context, teamID uint) (*Pack, error)
///////////////////////////////////////////////////////////////////////////////
// LabelStore
// ApplyLabelSpecs applies a list of LabelSpecs to the datastore, creating and updating labels as necessary.
ApplyLabelSpecs(ctx context.Context, specs []*LabelSpec) error
// GetLabelSpecs returns all of the stored LabelSpecs.
GetLabelSpecs(ctx context.Context) ([]*LabelSpec, error)
// GetLabelSpec returns the spec for the named label.
GetLabelSpec(ctx context.Context, name string) (*LabelSpec, error)
NewLabel(ctx context.Context, Label *Label, opts ...OptionalArg) (*Label, error)
SaveLabel(ctx context.Context, label *Label) (*Label, error)
DeleteLabel(ctx context.Context, name string) error
Label(ctx context.Context, lid uint) (*Label, error)
ListLabels(ctx context.Context, filter TeamFilter, opt ListOptions) ([]*Label, error)
// LabelQueriesForHost returns the label queries that should be executed for the given host.
// Results are returned in a map of label id -> query
LabelQueriesForHost(ctx context.Context, host *Host) (map[string]string, error)
// RecordLabelQueryExecutions saves the results of label queries. The results map is a map of label id -> whether or
// not the label matches. The time parameter is the timestamp to save with the query execution.
RecordLabelQueryExecutions(ctx context.Context, host *Host, results map[uint]*bool, t time.Time, deferredSaveHost bool) error
// ListLabelsForHost returns the labels that the given host is in.
ListLabelsForHost(ctx context.Context, hid uint) ([]*Label, error)
// ListHostsInLabel returns a slice of hosts in the label with the given ID.
ListHostsInLabel(ctx context.Context, filter TeamFilter, lid uint, opt HostListOptions) ([]*Host, error)
// ListUniqueHostsInLabels returns a slice of all of the hosts in the given label IDs. A host will only appear once
// in the results even if it is in multiple of the provided labels.
ListUniqueHostsInLabels(ctx context.Context, filter TeamFilter, labels []uint) ([]*Host, error)
SearchLabels(ctx context.Context, filter TeamFilter, query string, omit ...uint) ([]*Label, error)
// LabelIDsByName Retrieve the IDs associated with the given labels
LabelIDsByName(ctx context.Context, labels []string) ([]uint, error)
// Methods used for async processing of host label query results.
AsyncBatchInsertLabelMembership(ctx context.Context, batch [][2]uint) error
AsyncBatchDeleteLabelMembership(ctx context.Context, batch [][2]uint) error
AsyncBatchUpdateLabelTimestamp(ctx context.Context, ids []uint, ts time.Time) error
///////////////////////////////////////////////////////////////////////////////
// HostStore
// NewHost is deprecated and will be removed. Hosts should always be enrolled via EnrollHost.
NewHost(ctx context.Context, host *Host) (*Host, error)
SaveHost(ctx context.Context, host *Host) error
SerialSaveHost(ctx context.Context, host *Host) error
DeleteHost(ctx context.Context, hid uint) error
Host(ctx context.Context, id uint) (*Host, error)
// EnrollHost will enroll a new host with the given identifier, setting the node key, and team. Implementations of
// this method should respect the provided host enrollment cooldown, by returning an error if the host has enrolled
// within the cooldown period.
EnrollHost(ctx context.Context, osqueryHostId, nodeKey string, teamID *uint, cooldown time.Duration) (*Host, error)
ListHosts(ctx context.Context, filter TeamFilter, opt HostListOptions) ([]*Host, error)
// AuthenticateHost authenticates and returns host metadata by node key. This method should not return the host
// "additional" information as this is not typically necessary for the operations performed by the osquery
// endpoints.
AuthenticateHost(ctx context.Context, nodeKey string) (*Host, error)
MarkHostSeen(ctx context.Context, host *Host, t time.Time) error
MarkHostsSeen(ctx context.Context, hostIDs []uint, t time.Time) error
SearchHosts(ctx context.Context, filter TeamFilter, query string, omit ...uint) ([]*Host, error)
// CleanupIncomingHosts deletes hosts that have enrolled but never updated their status details. This clears dead
// "incoming hosts" that never complete their registration.
// A host is considered incoming if both the hostname and osquery_version fields are empty. This means that multiple
// different osquery queries failed to populate details.
CleanupIncomingHosts(ctx context.Context, now time.Time) error
// GenerateHostStatusStatistics retrieves the count of online, offline, MIA and new hosts.
GenerateHostStatusStatistics(ctx context.Context, filter TeamFilter, now time.Time) (*HostSummary, error)
// HostIDsByName Retrieve the IDs associated with the given hostnames
HostIDsByName(ctx context.Context, filter TeamFilter, hostnames []string) ([]uint, error)
// HostByIdentifier returns one host matching the provided identifier. Possible matches can be on
// osquery_host_identifier, node_key, UUID, or hostname.
HostByIdentifier(ctx context.Context, identifier string) (*Host, error)
// AddHostsToTeam adds hosts to an existing team, clearing their team settings if teamID is nil.
AddHostsToTeam(ctx context.Context, teamID *uint, hostIDs []uint) error
TotalAndUnseenHostsSince(ctx context.Context, daysCount int) (int, int, error)
DeleteHosts(ctx context.Context, ids []uint) error
CountHosts(ctx context.Context, filter TeamFilter, opt HostListOptions) (int, error)
CountHostsInLabel(ctx context.Context, filter TeamFilter, lid uint, opt HostListOptions) (int, error)
// ListPoliciesForHost lists the policies that a host will check and whether they are passing
ListPoliciesForHost(ctx context.Context, hid uint) ([]*HostPolicy, error)
///////////////////////////////////////////////////////////////////////////////
// TargetStore
// CountHostsInTargets returns the metrics of the hosts in the provided labels, teams, and explicit host IDs.
CountHostsInTargets(ctx context.Context, filter TeamFilter, targets HostTargets, now time.Time) (TargetMetrics, error)
// HostIDsInTargets returns the host IDs of the hosts in the provided labels, teams, and explicit host IDs. The
// returned host IDs should be sorted in ascending order.
HostIDsInTargets(ctx context.Context, filter TeamFilter, targets HostTargets) ([]uint, error)
///////////////////////////////////////////////////////////////////////////////
// PasswordResetStore manages password resets in the Datastore
NewPasswordResetRequest(ctx context.Context, req *PasswordResetRequest) (*PasswordResetRequest, error)
DeletePasswordResetRequestsForUser(ctx context.Context, userID uint) error
FindPassswordResetByToken(ctx context.Context, token string) (*PasswordResetRequest, error)
///////////////////////////////////////////////////////////////////////////////
// SessionStore is the abstract interface that all session backends must conform to.
// SessionByKey returns, given a session key, a session object or an error if one could not be found for the given
// key
SessionByKey(ctx context.Context, key string) (*Session, error)
// SessionByID returns, given a session id, find and return a session object or an error if one could not be found
// for the given id
SessionByID(ctx context.Context, id uint) (*Session, error)
// ListSessionsForUser finds all the active sessions for a given user
ListSessionsForUser(ctx context.Context, id uint) ([]*Session, error)
// NewSession stores a new session struct
NewSession(ctx context.Context, session *Session) (*Session, error)
// DestroySession destroys the currently tracked session
DestroySession(ctx context.Context, session *Session) error
// DestroyAllSessionsForUser destroys all of the sessions for a given user
DestroyAllSessionsForUser(ctx context.Context, id uint) error
// MarkSessionAccessed marks the currently tracked session as access to extend expiration
MarkSessionAccessed(ctx context.Context, session *Session) error
///////////////////////////////////////////////////////////////////////////////
// AppConfigStore contains method for saving and retrieving application configuration
NewAppConfig(ctx context.Context, info *AppConfig) (*AppConfig, error)
AppConfig(ctx context.Context) (*AppConfig, error)
SaveAppConfig(ctx context.Context, info *AppConfig) error
// VerifyEnrollSecret checks that the provided secret matches an active enroll secret. If it is successfully
// matched, that secret is returned. Otherwise, an error is returned.
VerifyEnrollSecret(ctx context.Context, secret string) (*EnrollSecret, error)
// GetEnrollSecrets gets the enroll secrets for a team (or global if teamID is nil).
GetEnrollSecrets(ctx context.Context, teamID *uint) ([]*EnrollSecret, error)
// ApplyEnrollSecrets replaces the current enroll secrets for a team with the provided secrets.
ApplyEnrollSecrets(ctx context.Context, teamID *uint, secrets []*EnrollSecret) error
///////////////////////////////////////////////////////////////////////////////
// InviteStore contains the methods for managing user invites in a datastore.
// NewInvite creates and stores a new invitation in a DB.
NewInvite(ctx context.Context, i *Invite) (*Invite, error)
// ListInvites lists all invites in the datastore.
ListInvites(ctx context.Context, opt ListOptions) ([]*Invite, error)
// Invite retrieves an invite by its ID.
Invite(ctx context.Context, id uint) (*Invite, error)
// InviteByEmail retrieves an invite for a specific email address.
InviteByEmail(ctx context.Context, email string) (*Invite, error)
// InviteByToken retrieves and invite using the token string.
InviteByToken(ctx context.Context, token string) (*Invite, error)
// DeleteInvite deletes an invitation.
DeleteInvite(ctx context.Context, id uint) error
///////////////////////////////////////////////////////////////////////////////
// ScheduledQueryStore
ListScheduledQueriesInPack(ctx context.Context, id uint, opts ListOptions) ([]*ScheduledQuery, error)
NewScheduledQuery(ctx context.Context, sq *ScheduledQuery, opts ...OptionalArg) (*ScheduledQuery, error)
SaveScheduledQuery(ctx context.Context, sq *ScheduledQuery) (*ScheduledQuery, error)
DeleteScheduledQuery(ctx context.Context, id uint) error
ScheduledQuery(ctx context.Context, id uint) (*ScheduledQuery, error)
CleanupOrphanScheduledQueryStats(ctx context.Context) error
CleanupOrphanLabelMembership(ctx context.Context) error
CleanupExpiredHosts(ctx context.Context) error
///////////////////////////////////////////////////////////////////////////////
// TeamStore
// NewTeam creates a new Team object in the store.
NewTeam(ctx context.Context, team *Team) (*Team, error)
// SaveTeam saves any changes to the team.
SaveTeam(ctx context.Context, team *Team) (*Team, error)
// Team retrieves the Team by ID.
Team(ctx context.Context, tid uint) (*Team, error)
// Team deletes the Team by ID.
DeleteTeam(ctx context.Context, tid uint) error
// TeamByName retrieves the Team by Name.
TeamByName(ctx context.Context, name string) (*Team, error)
// ListTeams lists teams with the ordering and filters in the provided options.
ListTeams(ctx context.Context, filter TeamFilter, opt ListOptions) ([]*Team, error)
// SearchTeams searches teams using the provided query and ommitting the provided existing selection.
SearchTeams(ctx context.Context, filter TeamFilter, matchQuery string, omit ...uint) ([]*Team, error)
// TeamEnrollSecrets lists the enroll secrets for the team.
TeamEnrollSecrets(ctx context.Context, teamID uint) ([]*EnrollSecret, error)
///////////////////////////////////////////////////////////////////////////////
// SoftwareStore
SaveHostSoftware(ctx context.Context, host *Host) error
LoadHostSoftware(ctx context.Context, host *Host) error
AllSoftwareWithoutCPEIterator(ctx context.Context) (SoftwareIterator, error)
AddCPEForSoftware(ctx context.Context, software Software, cpe string) error
AllCPEs(ctx context.Context) ([]string, error)
InsertCVEForCPE(ctx context.Context, cve string, cpes []string) error
SoftwareByID(ctx context.Context, id uint) (*Software, error)
///////////////////////////////////////////////////////////////////////////////
// ActivitiesStore
NewActivity(ctx context.Context, user *User, activityType string, details *map[string]interface{}) error
ListActivities(ctx context.Context, opt ListOptions) ([]*Activity, error)
///////////////////////////////////////////////////////////////////////////////
// StatisticsStore
ShouldSendStatistics(ctx context.Context, frequency time.Duration) (StatisticsPayload, bool, error)
RecordStatisticsSent(ctx context.Context) error
///////////////////////////////////////////////////////////////////////////////
// GlobalPoliciesStore
NewGlobalPolicy(ctx context.Context, queryID uint, resolution string) (*Policy, error)
Policy(ctx context.Context, id uint) (*Policy, error)
RecordPolicyQueryExecutions(ctx context.Context, host *Host, results map[uint]*bool, updated time.Time, deferredSaveHost bool) error
ListGlobalPolicies(ctx context.Context) ([]*Policy, error)
DeleteGlobalPolicies(ctx context.Context, ids []uint) ([]uint, error)
PolicyQueriesForHost(ctx context.Context, host *Host) (map[string]string, error)
ApplyPolicySpecs(ctx context.Context, specs []*PolicySpec) error
// MigrateTables creates and migrates the table schemas
MigrateTables(ctx context.Context) error
// MigrateData populates built-in data
MigrateData(ctx context.Context) error
// MigrationStatus returns nil if migrations are complete, and an error if migrations need to be run.
MigrationStatus(ctx context.Context) (MigrationStatus, error)
ListSoftware(ctx context.Context, opt SoftwareListOptions) ([]Software, error)
///////////////////////////////////////////////////////////////////////////////
// Team Policies
NewTeamPolicy(ctx context.Context, teamID uint, queryID uint, resolution string) (*Policy, error)
ListTeamPolicies(ctx context.Context, teamID uint) ([]*Policy, error)
DeleteTeamPolicies(ctx context.Context, teamID uint, ids []uint) ([]uint, error)
TeamPolicy(ctx context.Context, teamID uint, policyID uint) (*Policy, error)
///////////////////////////////////////////////////////////////////////////////
// Locking
// Lock tries to get an atomic lock on an instance named with `name`
// and an `owner` identified by a random string per instance.
// Subsequently locking the same resource name for the same owner
// renews the lock expiration.
// It returns true, nil if it managed to obtain a lock on the instance.
// false and potentially an error otherwise.
// This must not be blocking.
Lock(ctx context.Context, name string, owner string, expiration time.Duration) (bool, error)
// Unlock tries to unlock the lock by that `name` for the specified
// `owner`. Unlocking when not holding the lock shouldn't error
Unlock(ctx context.Context, name string, owner string) error
///////////////////////////////////////////////////////////////////////////////
// Aggregated Stats
UpdateScheduledQueryAggregatedStats(ctx context.Context) error
UpdateQueryAggregatedStats(ctx context.Context) error
}
type MigrationStatus int
const (
NoMigrationsCompleted = iota
SomeMigrationsCompleted
AllMigrationsCompleted
)
// NotFoundError is returned when the datastore resource cannot be found.
type NotFoundError interface {
error
IsNotFound() bool
}
func IsNotFound(err error) bool {
e, ok := err.(NotFoundError)
if !ok {
return false
}
return e.IsNotFound()
}
// AlreadyExistsError is returned when creating a datastore resource that already exists.
type AlreadyExistsError interface {
error
IsExists() bool
}
// ForeignKeyError is returned when the operation fails due to foreign key constraints.
type ForeignKeyError interface {
error
IsForeignKey() bool
}
func IsForeignKey(err error) bool {
e, ok := err.(ForeignKeyError)
if !ok |
return e.IsForeignKey()
}
type OptionalArg func() interface{}
| {
return false
} |
conv_scale_fusion.py | # Copyright (c) 2021, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
import numpy as np
from coremltools.converters.mil.mil.passes.pass_registry import register_pass
from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass
from coremltools.converters.mil.mil import Builder as mb
def _try_to_transform(conv_op, scale_op, block):
# get the scale
if scale_op.x.val is None and scale_op.y.val is None:
return False
scale_var = scale_op.x if scale_op.x.val is not None else scale_op.y
scale = scale_var.val
# for the scalar case, the scalar can be either
# 1. a python int/float
# 2. a 0d numpy array
# 3. a 1d numpy array with shape (1,)
is_scalar = True
if isinstance(scale, np.ndarray):
if scale.shape == ():
scale = scale.tolist()
elif scale.shape == (1) or scale.shape == (1,):
scale = scale[0]
else:
is_scalar = False
# get weight and bias and groups from conv layer
if conv_op.weight.val is None:
return False
conv_weight = conv_op.weight.val
conv_bias = conv_op.bias
groups = conv_op.groups.val
# get type of the conv layer
is_deconv = conv_op.op_type == 'conv_transpose'
is_conv_1d = len(conv_weight.shape) == 3
# D_in denotes the spatial dimensions for conv kernel weight
# for conv_transpose, conv_weight has shape [Cin, Cout / groups, *D_in]
# for conv, conv_weight has shape [Cout, Cin / groups, *D_in]
if is_deconv:
Cout = conv_weight.shape[1] * groups
Cin = conv_weight.shape[0]
else:
Cout = conv_weight.shape[0]
Cin = conv_weight.shape[1] * groups
# for the vector scale case, check if the shape is broacastable
if not is_scalar:
if not np.product(scale.shape) == Cout:
return False
if len(scale.shape) == len(conv_weight.shape):
if not scale.shape[1] == Cout:
return False
elif len(scale.shape) == len(conv_weight.shape) - 1:
if not scale.shape[0] == Cout:
return False
else:
return False
# transform the scale to 1./scale for the real_div case
if scale_op.op_type == "real_div":
scale = 1./scale
# get the type of the conv weight
conv_weight_type = conv_weight.dtype
# create bias for conv if not exist
if conv_bias is None:
conv_bias = np.zeros(Cout)
else:
conv_bias = conv_bias.val
conv_bias = conv_bias.astype(conv_weight_type)
# get the original shape of weight and bias
origin_weight_shape = conv_weight.shape
origin_bias_shape = conv_bias.shape
# update the weight/bias for conv layer
if is_scalar:
new_conv_bias = np.array(conv_bias * scale).astype(conv_weight_type)
new_conv_weight = np.array(conv_weight * scale).astype(conv_weight_type)
else:
scale = np.reshape(scale, (Cout))
new_conv_bias = np.array(conv_bias * scale).astype(conv_weight_type)
new_conv_weight = []
if is_deconv:
conv_weight = np.transpose(conv_weight, [1, 0, 2] if is_conv_1d else [1, 0, 2, 3])
conv_weight = np.reshape(conv_weight, [Cout, Cin // groups] + list(conv_weight.shape[2:]))
for i in range(Cout):
_conv_weight = conv_weight[i] * scale[i]
new_conv_weight.append(_conv_weight)
new_conv_weight = np.array(new_conv_weight).astype(conv_weight_type)
if is_deconv:
new_conv_weight = np.reshape(new_conv_weight, [Cout // groups, Cin] + list(new_conv_weight.shape[2:]))
new_conv_weight = np.transpose(new_conv_weight, [1, 0, 2] if is_conv_1d else [1, 0, 2, 3])
# make sure the updated weight and bias have the same shape as the original ones
assert new_conv_weight.shape == origin_weight_shape, "conv weight should have the same shape before and after the fuse_conv_scale pass."
assert new_conv_bias.shape == origin_bias_shape, "conv bias should have the same shape before and after the fuse_conv_scale pass."
# create a new conv op with the new weight, bias value, copying rest of the attributes
out_name = scale_op.outputs[0].name
conv_kargs = {"weight": new_conv_weight, "bias": new_conv_bias, "name": out_name, "before_op": conv_op}
for k, v in conv_op.inputs.items():
if k in ["weight", "bias"]:
continue
conv_kargs[k] = v
if is_deconv:
x = mb.conv_transpose(**conv_kargs)
else:
x = mb.conv(**conv_kargs)
scale_op.enclosing_block.replace_uses_of_var_after_op(
anchor_op=scale_op, old_var=scale_op.outputs[0], new_var=x
)
# Remove all the ops at once
block.remove_ops([conv_op, scale_op])
return True
@register_pass(namespace="common")
class fuse_conv_scale(AbstractGraphPass):
"""
Fold mul/div into conv/conv_transpose by updating the weight/bias of the convolution layers.
The scale const can be a single number (scalar) or a vector with a broacasable shape,
for instance, if the output of the conv/deconv layer is (B, Cout, H, W),
const of shape (Cout, 1, 1) and (1, Cout, 1, 1) are allowed.
Given:
%2 = conv(%1)
...
%3 = mul(%2, constant) # where constant is the scale constant
...
Result:
%3 = conv(%1)
...
"""
def __init__(self):
self.ops_to_skip = set()
def | (self, prog):
pass
def _fuse_conv_scale_block(self, block):
def _match_pattern(op):
if op.op_type == "conv" or op.op_type == "conv_transpose":
# abort fusion if op output is also a block output
if op.outputs[0] in op.enclosing_block.outputs:
return None
# find batch_norm op
child_ops = op.outputs[0].child_ops
if len(child_ops) == 1:
scale_op_candidate = list(child_ops)[0]
if scale_op_candidate.op_type in ["mul", "real_div"]:
return scale_op_candidate
return None
fusion_occurred = False
for op in list(block.operations):
for b in op.blocks:
block_changed = True
while block_changed:
block_changed = self._fuse_conv_scale_block(b)
if len(op.blocks) > 0:
# This op can't be conv or conv_transpose
continue
scale_op = _match_pattern(op)
if op in self.ops_to_skip or scale_op in self.ops_to_skip:
continue
if scale_op is not None:
with block:
fusion_occurred = _try_to_transform(op, scale_op, block)
# has to break as the downstream iterator is affected.
if fusion_occurred:
return fusion_occurred
return fusion_occurred
def apply(self, prog):
self.set_ops_to_skip(prog)
for f in prog.functions.values():
block_changed = True
while block_changed:
block_changed = self._fuse_conv_scale_block(f)
| set_ops_to_skip |
error_meta.rs | // Code generated by software.amazon.smithy.rust.codegen.smithy-rs. DO NOT EDIT.
/// All possible error types for this service.
#[non_exhaustive]
#[derive(std::fmt::Debug)]
pub enum Error {
/// <p>Your request was throttled because you have exceeded the limit of allowed client calls. Try making the call later.</p>
ClientLimitExceededException(crate::error::ClientLimitExceededException),
/// <p>The value for this input parameter is invalid.</p>
InvalidArgumentException(crate::error::InvalidArgumentException),
/// <p>The specified client is invalid.</p>
InvalidClientException(crate::error::InvalidClientException),
/// <p>The caller is not authorized to perform this operation.</p>
NotAuthorizedException(crate::error::NotAuthorizedException),
/// <p>The specified resource is not found.</p>
ResourceNotFoundException(crate::error::ResourceNotFoundException),
/// <p>If the client session is expired. Once the client is connected, the session is valid for 45 minutes. Client should reconnect to the channel to continue sending/receiving messages.</p>
SessionExpiredException(crate::error::SessionExpiredException),
/// An unhandled error occurred.
Unhandled(Box<dyn std::error::Error + Send + Sync + 'static>),
}
impl std::fmt::Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Error::ClientLimitExceededException(inner) => inner.fmt(f),
Error::InvalidArgumentException(inner) => inner.fmt(f),
Error::InvalidClientException(inner) => inner.fmt(f),
Error::NotAuthorizedException(inner) => inner.fmt(f),
Error::ResourceNotFoundException(inner) => inner.fmt(f),
Error::SessionExpiredException(inner) => inner.fmt(f),
Error::Unhandled(inner) => inner.fmt(f),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::GetIceServerConfigError, R>> for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::GetIceServerConfigError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::GetIceServerConfigErrorKind::ClientLimitExceededException(inner) => {
Error::ClientLimitExceededException(inner)
}
crate::error::GetIceServerConfigErrorKind::InvalidArgumentException(inner) => {
Error::InvalidArgumentException(inner)
}
crate::error::GetIceServerConfigErrorKind::InvalidClientException(inner) => {
Error::InvalidClientException(inner)
}
crate::error::GetIceServerConfigErrorKind::NotAuthorizedException(inner) => {
Error::NotAuthorizedException(inner)
}
crate::error::GetIceServerConfigErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::GetIceServerConfigErrorKind::SessionExpiredException(inner) => {
Error::SessionExpiredException(inner)
}
crate::error::GetIceServerConfigErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
}
}
}
impl<R> From<aws_smithy_http::result::SdkError<crate::error::SendAlexaOfferToMasterError, R>>
for Error
where
R: Send + Sync + std::fmt::Debug + 'static,
{
fn from(
err: aws_smithy_http::result::SdkError<crate::error::SendAlexaOfferToMasterError, R>,
) -> Self {
match err {
aws_smithy_http::result::SdkError::ServiceError { err, .. } => match err.kind {
crate::error::SendAlexaOfferToMasterErrorKind::ClientLimitExceededException(
inner,
) => Error::ClientLimitExceededException(inner),
crate::error::SendAlexaOfferToMasterErrorKind::InvalidArgumentException(inner) => {
Error::InvalidArgumentException(inner)
}
crate::error::SendAlexaOfferToMasterErrorKind::NotAuthorizedException(inner) => {
Error::NotAuthorizedException(inner)
}
crate::error::SendAlexaOfferToMasterErrorKind::ResourceNotFoundException(inner) => {
Error::ResourceNotFoundException(inner)
}
crate::error::SendAlexaOfferToMasterErrorKind::Unhandled(inner) => {
Error::Unhandled(inner)
}
},
_ => Error::Unhandled(err.into()),
} | }
}
impl std::error::Error for Error {} |
|
text.go | package main
import (
"flag"
"fmt"
"os"
"github.com/wix-playground/govips/pkg/vips"
)
var (
flagIn = flag.String("in", "", "file to load")
flagMessage = flag.String("message", "", "message to write")
flagOut = flag.String("out", "", "file to write out")
reportLeaksFlag = flag.Bool("leaks", false, "Outputs vips memory")
)
func main() {
flag.Usage = func() {
fmt.Fprintf(os.Stderr, "text -in input_file -message message -out output_file")
}
flag.Parse()
vips.Startup(nil)
if err := text(*flagIn, *flagMessage, *flagOut); err != nil {
panic(err)
}
vips.Shutdown()
if *reportLeaksFlag {
vips.PrintObjectReport("text")
}
}
func | (inputFile, message, outputFile string) error {
_, _, err := vips.NewTransform().
Label(&vips.LabelParams{
Text: message,
Opacity: 1.0,
Width: vips.ScaleOf(0.9),
Height: vips.ScaleOf(1.0),
Alignment: vips.AlignCenter,
Color: vips.Color{R: 255, G: 255, B: 255},
}).
LoadFile(inputFile).
OutputFile(outputFile).
Apply()
return err
}
| text |
node.rs | use crate::core::Result;
use std::os::linux::fs::MetadataExt;
use std::path::Path;
static DRM_DIR_NAME: &str = "/dev/dri";
static DRM_PRIMARY_MINOR_NAME: &str = "card";
static DRM_CONTROL_MINOR_NAME: &str = "controlD";
static DRM_RENDER_MINOR_NAME: &str = "renderD";
fn major(dev: u64) -> u64 {
let mut major = (dev & 0x00000000000fff00) >> 8;
major |= (dev & 0xfffff00000000000) >> 32;
major
}
fn minor(dev: u64) -> u64 {
let mut minor = (dev & 0x00000000000000ff) >> 0;
minor |= (dev & 0x00000ffffff00000) >> 12;
minor
}
#[derive(Debug, PartialEq)]
pub enum DrmNodeType {
Primary = 0,
Control = 1,
Render = 2,
}
impl DrmNodeType {
/// # Examples
///
/// DRM Node types:
///
/// ```
/// let node_type = rust_drm::DrmNodeType::from_minor_name("card0").unwrap();
/// assert_eq!(node_type, rust_drm::DrmNodeType::Primary);
///
/// let node_type = rust_drm::DrmNodeType::from_minor_name("controlD128").unwrap();
/// assert_eq!(node_type, rust_drm::DrmNodeType::Control);
///
/// let node_type = rust_drm::DrmNodeType::from_minor_name("renderD128").unwrap();
/// assert_eq!(node_type, rust_drm::DrmNodeType::Render);
/// ```
///
/// Unknown DRM Node type:
///
/// ```
/// assert!(rust_drm::DrmNodeType::from_minor_name("unknownD128").is_err());
/// ```
pub fn from_minor_name(name: &str) -> Result<DrmNodeType> { | match name {
s if s.starts_with(DRM_PRIMARY_MINOR_NAME) => Ok(DrmNodeType::Primary),
s if s.starts_with(DRM_CONTROL_MINOR_NAME) => Ok(DrmNodeType::Control),
s if s.starts_with(DRM_RENDER_MINOR_NAME) => Ok(DrmNodeType::Render),
_ => Err(format!("Could not match {} to DRM Node Type", name))?,
}
}
}
pub struct DrmNode {
major: u64,
minor: u64,
}
impl DrmNode {
pub fn from_device_name(device_name: &str) -> Result<DrmNode> {
let node_path = Path::new(DRM_DIR_NAME).join(device_name);
let meta = std::fs::metadata(node_path)?;
let st_rdev = meta.st_rdev();
Ok(DrmNode {
major: major(st_rdev),
minor: minor(st_rdev),
})
}
pub fn device_dir_exists(&self) -> bool {
let drm_device_dir_name = format!("/sys/dev/char/{}:{}/device/drm", self.major, self.minor);
let drm_device_path = Path::new(&drm_device_dir_name);
drm_device_path.exists() && drm_device_path.is_dir()
}
pub fn get_device_path(&self) -> std::path::PathBuf {
let drm_device_dir_name = format!("/sys/dev/char/{}:{}/device", self.major, self.minor);
Path::new(&drm_device_dir_name).canonicalize().unwrap()
}
pub fn get_subsystem_path(&self) -> std::path::PathBuf {
let drm_device_dir_name = format!(
"/sys/dev/char/{}:{}/device/subsystem",
self.major, self.minor
);
Path::new(&drm_device_dir_name).canonicalize().unwrap()
}
pub fn get_config_path(&self) -> std::path::PathBuf {
let drm_device_dir_name =
format!("/sys/dev/char/{}:{}/device/config", self.major, self.minor);
Path::new(&drm_device_dir_name).canonicalize().unwrap()
}
} | |
concurency.go | package dynamostream
import (
"sync"
)
type concurrencyManager struct {
keyChans map[string]chan *Record
wg sync.WaitGroup
}
func | (nwork int) *concurrencyManager {
c := &concurrencyManager{
keyChans: make(map[string]chan *Record),
}
c.wg.Add(nwork)
return c
}
func (c *concurrencyManager) Send(record *Record, getKey KeyHandler, handler EventMessageHandler, onError EventMessageErrorHandler) {
key := getKey(record)
keyChan, ok := c.keyChans[key]
if !ok {
c.keyChans[key] = make(chan *Record, 1)
keyChan = c.keyChans[key]
go func() {
for {
rec, more := <-keyChan
if !more {
return
}
if err := handler(rec.DynamoDB.NewImage.EventMessage); err != nil {
onError(rec.DynamoDB.NewImage.EventMessage, err)
}
// fmt.Println("do", key, *(rec.DynamoDB.Keys["id"].S), rec.DynamoDB.NewImage.EventMessage.EventID)
c.wg.Done()
}
}()
}
keyChan <- record
}
func (c *concurrencyManager) Wait() {
c.wg.Wait()
c.Close()
}
func (c *concurrencyManager) Close() {
for _, keyChan := range c.keyChans {
close(keyChan)
}
}
| newConcurrencyManager |
tickets.facade.ts | import { Injectable } from '@angular/core';
import { Observable } from 'rxjs';
import { TicketsService } from './api/tickets.service';
import { TicketsState } from './state/tickets.state';
import { Ticket } from './models/ticket.model';
import { Deployment } from './models/deployment.model';
@Injectable()
export class | {
constructor(
private ticketsService: TicketsService,
private ticketsState: TicketsState
) { }
isUpdating$(): Observable<boolean> {
return this.ticketsState.isUpdating$();
}
deployTickets(deployment: Deployment): void{
this.ticketsService.deployTickets(deployment)
.subscribe( _ => this.loadDeployments());
}
getDeployments$(): Observable<Deployment[]> {
return this.ticketsState.getDeployments$();
}
loadDeployments(): void{
this.ticketsService.getDeployments$()
.subscribe(deployments => this.ticketsState.setDeployments(deployments.results))
}
getTickets$(): Observable<Ticket[]> {
return this.ticketsState.getTickets$();
}
loadTickets(): void{
this.ticketsService.getTickets()
.subscribe(tickets => this.ticketsState.setTickets(tickets.results))
}
deleteTicket(id: number): void{
this.ticketsService.deleteTicket(id)
.subscribe( _ => this.ticketsState.updateDeleted(id));
}
createTicket(ticket: Ticket): void{
this.ticketsService.createTicket(ticket)
.subscribe( _ => this.loadTickets());
}
updateTicket(id: number, ticket: Ticket): void{
this.ticketsService.updateTicket(id,ticket)
.subscribe( _ => this.loadTickets());
}
}
| TicketsFacade |
setVertexBuffer.spec.ts | export const description = `
setVertexBuffer validation tests.
`;
import { makeTestGroup } from '../../../common/framework/test_group.js';
import { range } from '../../../common/framework/util/util.js';
import { ValidationTest } from './validation_test.js';
class F extends ValidationTest {
getVertexBuffer(): GPUBuffer {
return this.device.createBuffer({
size: 256,
usage: GPUBufferUsage.VERTEX,
});
}
createRenderPipeline(bufferCount: number): GPURenderPipeline {
return this.device.createRenderPipeline({
vertexStage: {
module: this.device.createShaderModule({
code: `
${range(
bufferCount,
i => `\n[[location(${i})]] var<in> a_position${i} : vec3<f32>;`
).join('')}
[[builtin(position)]] var<out> Position : vec4<f32>;
[[stage(vertex)]] fn main() -> void {
Position = vec4<f32>(0.0, 0.0, 0.0, 1.0);
return;
}`,
}),
entryPoint: 'main',
},
fragmentStage: {
module: this.device.createShaderModule({
code: `
[[location(0)]] var<out> fragColor : vec4<f32>;
[[stage(fragment)]] fn main() -> void {
fragColor = vec4<f32>(0.0, 1.0, 0.0, 1.0);
return;
}`,
}),
entryPoint: 'main',
},
primitiveTopology: 'triangle-list',
colorStates: [{ format: 'rgba8unorm' }],
vertexState: {
vertexBuffers: [
{
arrayStride: 3 * 4,
attributes: range(bufferCount, i => ({
format: 'float3',
offset: 0,
shaderLocation: i,
})),
},
],
},
});
}
beginRenderPass(commandEncoder: GPUCommandEncoder): GPURenderPassEncoder {
const attachmentTexture = this.device.createTexture({
format: 'rgba8unorm',
size: { width: 16, height: 16, depth: 1 },
usage: GPUTextureUsage.OUTPUT_ATTACHMENT,
});
return commandEncoder.beginRenderPass({
colorAttachments: [
{
attachment: attachmentTexture.createView(),
loadValue: { r: 1.0, g: 0.0, b: 0.0, a: 1.0 },
},
],
});
}
}
| g.test('vertex_buffers_inherit_from_previous_pipeline').fn(async t => {
const pipeline1 = t.createRenderPipeline(1);
const pipeline2 = t.createRenderPipeline(2);
const vertexBuffer1 = t.getVertexBuffer();
const vertexBuffer2 = t.getVertexBuffer();
{
// Check failure when vertex buffer is not set
const commandEncoder = t.device.createCommandEncoder();
const renderPass = t.beginRenderPass(commandEncoder);
renderPass.setPipeline(pipeline1);
renderPass.draw(3);
renderPass.endPass();
t.expectValidationError(() => {
commandEncoder.finish();
});
}
{
// Check success when vertex buffer is inherited from previous pipeline
const commandEncoder = t.device.createCommandEncoder();
const renderPass = t.beginRenderPass(commandEncoder);
renderPass.setPipeline(pipeline2);
renderPass.setVertexBuffer(0, vertexBuffer1);
renderPass.setVertexBuffer(1, vertexBuffer2);
renderPass.draw(3);
renderPass.setPipeline(pipeline1);
renderPass.draw(3);
renderPass.endPass();
commandEncoder.finish();
}
});
g.test('vertex_buffers_do_not_inherit_between_render_passes').fn(async t => {
const pipeline1 = t.createRenderPipeline(1);
const pipeline2 = t.createRenderPipeline(2);
const vertexBuffer1 = t.getVertexBuffer();
const vertexBuffer2 = t.getVertexBuffer();
{
// Check success when vertex buffer is set for each render pass
const commandEncoder = t.device.createCommandEncoder();
{
const renderPass = t.beginRenderPass(commandEncoder);
renderPass.setPipeline(pipeline2);
renderPass.setVertexBuffer(0, vertexBuffer1);
renderPass.setVertexBuffer(1, vertexBuffer2);
renderPass.draw(3);
renderPass.endPass();
}
{
const renderPass = t.beginRenderPass(commandEncoder);
renderPass.setPipeline(pipeline1);
renderPass.setVertexBuffer(0, vertexBuffer1);
renderPass.draw(3);
renderPass.endPass();
}
commandEncoder.finish();
}
{
// Check failure because vertex buffer is not inherited in second subpass
const commandEncoder = t.device.createCommandEncoder();
{
const renderPass = t.beginRenderPass(commandEncoder);
renderPass.setPipeline(pipeline2);
renderPass.setVertexBuffer(0, vertexBuffer1);
renderPass.setVertexBuffer(1, vertexBuffer2);
renderPass.draw(3);
renderPass.endPass();
}
{
const renderPass = t.beginRenderPass(commandEncoder);
renderPass.setPipeline(pipeline1);
renderPass.draw(3);
renderPass.endPass();
}
t.expectValidationError(() => {
commandEncoder.finish();
});
}
}); | export const g = makeTestGroup(F);
|
get-step-template.ts | import ObjectID from 'bson-objectid';
import { insertErms } from './insert-erms';
// Builds a block
function getCodeBlock(label: string, content?: string) {
return `\`\`\`${label}
${typeof content !== 'undefined' ? content : ''}
\`\`\`\n`;
}
// Builds a section
function getSeedSection(content: string, label: string) {
return content
? `
## --${label}--
${content}`
: '';
}
type StepOptions = {
challengeId: ObjectID;
challengeSeeds: Record<string, ChallengeSeed>;
stepBetween: boolean;
stepNum: number;
};
export interface ChallengeSeed {
contents: string;
ext: string;
editableRegionBoundaries: number[];
head?: string;
tail?: string;
}
// Build the base markdown for a step
function | ({
challengeId,
challengeSeeds,
stepBetween,
stepNum
}: StepOptions): string {
const seedTexts = Object.values(challengeSeeds)
.map(({ contents, ext, editableRegionBoundaries }: ChallengeSeed) => {
let fullContents = contents;
if (editableRegionBoundaries.length >= 2) {
fullContents = insertErms(contents, editableRegionBoundaries);
}
return getCodeBlock(ext, fullContents);
})
.join('\n');
const seedHeads = Object.values(challengeSeeds)
.filter(({ head }: ChallengeSeed) => head)
.map(({ ext, head }: ChallengeSeed) => getCodeBlock(ext, head))
.join('\n');
const seedTails = Object.values(challengeSeeds)
.filter(({ tail }: ChallengeSeed) => tail)
.map(({ ext, tail }: ChallengeSeed) => getCodeBlock(ext, tail))
.join('\n');
const descStepNum = stepBetween ? stepNum + 1 : stepNum;
const stepDescription = `${
stepBetween ? 'new ' : ''
}step ${descStepNum} instructions`;
const seedChallengeSection = getSeedSection(seedTexts, 'seed-contents');
const seedHeadSection = getSeedSection(seedHeads, 'before-user-code');
const seedTailSection = getSeedSection(seedTails, 'after-user-code');
return (
`---
id: ${challengeId.toString()}
title: Step ${stepNum}
challengeType: 0
dashedName: step-${stepNum}
---
# --description--
${stepDescription}
# --hints--
Test 1
${getCodeBlock('js')}
# --seed--` +
seedChallengeSection +
seedHeadSection +
seedTailSection
);
}
export { getStepTemplate };
| getStepTemplate |
apply_federation.go | /*
Copyright 2016 The Kubernetes Authors. | you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package federation
import (
"fmt"
"k8s.io/kops/upup/pkg/fi/fitasks"
"k8s.io/kops/upup/pkg/fi"
"crypto/rsa"
crypto_rand "crypto/rand"
k8sapiv1 "k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kops/federation/tasks"
"text/template"
"bytes"
"k8s.io/kops/federation/model"
"k8s.io/kops/federation/targets/kubernetes"
kopsapi "k8s.io/kops/pkg/apis/kops"
"k8s.io/kops/pkg/client/simple"
"k8s.io/kops/pkg/apis/kops/registry"
"k8s.io/kubernetes/pkg/api/errors"
"k8s.io/kops/upup/pkg/kutil"
"k8s.io/kops/upup/pkg/fi/k8sapi"
"k8s.io/kubernetes/pkg/client/clientset_generated/release_1_3"
"k8s.io/kubernetes/federation/client/clientset_generated/federation_release_1_4"
"github.com/golang/glog"
"strings"
)
type ApplyFederationOperation struct {
Federation *kopsapi.Federation
KopsClient simple.Clientset
namespace string
name string
apiserverDeploymentName string
apiserverServiceName string
apiserverHostName string
dnsZoneName string
apiserverSecretName string
}
func (o*ApplyFederationOperation) FindKubecfg() (*kutil.KubeconfigBuilder, error) {
// TODO: Only if not yet set?
// hasKubecfg, err := hasKubecfg(f.Name)
// if err != nil {
// glog.Warningf("error reading kubecfg: %v", err)
// hasKubecfg = true
// }
// Loop through looking for a configured cluster
for _, controller := range o.Federation.Spec.Controllers {
cluster, err := o.KopsClient.Clusters().Get(controller)
if err != nil {
return nil, fmt.Errorf("error reading cluster %q: %v", controller, err)
}
context, err := o.federationContextForCluster(cluster)
if err != nil {
return nil, err
}
apiserverKeypair := o.buildApiserverKeypair()
federationConfiguration := &FederationConfiguration{
Namespace: o.namespace,
ApiserverSecretName: o.apiserverSecretName,
ApiserverServiceName: o.apiserverServiceName,
ApiserverKeypair: apiserverKeypair,
KubeconfigSecretName:"federation-apiserver-kubeconfig",
}
k, err := federationConfiguration.extractKubecfg(context, o.Federation)
if err != nil {
return nil, err
}
if k == nil {
continue
}
return k, nil
}
return nil, nil
}
func (o*ApplyFederationOperation) Run() error {
o.namespace = "federation"
o.name = "federation"
o.apiserverDeploymentName = "federation-apiserver"
o.apiserverServiceName = o.apiserverDeploymentName
o.apiserverSecretName = "federation-apiserver-secrets"
o.dnsZoneName = o.Federation.Spec.DNSName
o.apiserverHostName = "api." + o.dnsZoneName
// TODO: sync clusters
var controllerKubernetesClients []release_1_3.Interface
for _, controller := range o.Federation.Spec.Controllers {
cluster, err := o.KopsClient.Clusters().Get(controller)
if err != nil {
return fmt.Errorf("error reading cluster %q: %v", controller, err)
}
context, err := o.federationContextForCluster(cluster)
if err != nil {
return err
}
err = o.runOnCluster(context, cluster)
if err != nil {
return err
}
k8s := context.Target.(*kubernetes.KubernetesTarget).KubernetesClient
controllerKubernetesClients = append(controllerKubernetesClients, k8s)
}
federationKubecfg, err := o.FindKubecfg()
if err != nil {
return err
}
federationRestConfig, err := federationKubecfg.BuildRestConfig()
if err != nil {
return err
}
federationControllerClient, err := federation_release_1_4.NewForConfig(federationRestConfig)
if err != nil {
return err
}
//k8sControllerClient, err := release_1_3.NewForConfig(federationRestConfig)
//if err != nil {
// return err
//}
for _, member := range o.Federation.Spec.Members {
glog.V(2).Infof("configuring member cluster %q", member)
cluster, err := o.KopsClient.Clusters().Get(member)
if err != nil {
return fmt.Errorf("error reading cluster %q: %v", member, err)
}
clusterName := strings.Replace(cluster.Name, ".", "-", -1)
a := &FederationCluster{
FederationNamespace : o.namespace,
ControllerKubernetesClients: controllerKubernetesClients,
FederationClient: federationControllerClient,
ClusterSecretName: "secret-" + cluster.Name,
ClusterName: clusterName,
ApiserverHostname: cluster.Spec.MasterPublicName,
}
err = a.Run(cluster)
if err != nil {
return err
}
}
// Create default namespace
glog.V(2).Infof("Ensuring default namespace exists")
if _, err := o.ensureFederationNamespace(federationControllerClient, "default"); err != nil {
return err
}
return nil
}
// Builds a fi.Context applying to the federation namespace in the specified cluster
// Note that this operates inside the cluster, for example the KeyStore is backed by secrets in the namespace
func (o*ApplyFederationOperation) federationContextForCluster(cluster *kopsapi.Cluster) (*fi.Context, error) {
clusterKeystore, err := registry.KeyStore(cluster)
if err != nil {
return nil, err
}
target, err := kubernetes.NewKubernetesTarget(o.KopsClient, clusterKeystore, cluster)
if err != nil {
return nil, err
}
federationKeystore := k8sapi.NewKubernetesKeystore(target.KubernetesClient, o.namespace)
checkExisting := true
context, err := fi.NewContext(target, nil, federationKeystore, nil, nil, checkExisting, nil)
if err != nil {
return nil, err
}
return context, nil
}
func (o*ApplyFederationOperation) buildApiserverKeypair() (*fitasks.Keypair) {
keypairName := "secret-" + o.apiserverHostName
keypair := &fitasks.Keypair{
Name: fi.String(keypairName),
Subject: "cn=" + o.Federation.Name,
Type: "server",
}
// So it has a valid cert inside the cluster
if o.apiserverServiceName != "" {
keypair.AlternateNames = append(keypair.AlternateNames, o.apiserverServiceName)
}
// So it has a valid cert outside the cluster
if o.apiserverHostName != "" {
keypair.AlternateNames = append(keypair.AlternateNames, o.apiserverHostName)
}
return keypair
}
func (o*ApplyFederationOperation) runOnCluster(context *fi.Context, cluster *kopsapi.Cluster) error {
_, _, err := EnsureCASecret(context.Keystore)
if err != nil {
return err
}
apiserverKeypair := o.buildApiserverKeypair()
err = apiserverKeypair.Run(context)
if err != nil {
return err
}
err = o.EnsureNamespace(context)
if err != nil {
return err
}
federationConfiguration := &FederationConfiguration{
ApiserverServiceName: o.apiserverServiceName,
Namespace: o.namespace,
ApiserverSecretName: o.apiserverSecretName,
ApiserverKeypair: apiserverKeypair,
KubeconfigSecretName:"federation-apiserver-kubeconfig",
}
err = federationConfiguration.EnsureConfiguration(context)
if err != nil {
return err
}
templateData, err := model.Asset("manifest.yaml")
if err != nil {
return fmt.Errorf("error loading manifest: %v", err)
}
manifest, err := o.executeTemplate("manifest", string(templateData))
if err != nil {
return fmt.Errorf("error expanding manifest template: %v", err)
}
applyManifestTask := tasks.KubernetesResource{
Name: fi.String(o.name),
Manifest: fi.WrapResource(fi.NewStringResource(manifest)),
}
err = applyManifestTask.Run(context)
if err != nil {
return err
}
return nil
}
func (o*ApplyFederationOperation) buildTemplateData() map[string]string {
namespace := o.namespace
name := o.name
dnsZoneName := o.dnsZoneName
apiserverHostname := o.apiserverHostName
// The names of the k8s apiserver & controller-manager objects
apiserverDeploymentName := "federation-apiserver"
controllerDeploymentName := "federation-controller-manager"
imageRepo := "gcr.io/google_containers/hyperkube-amd64"
imageTag := "v1.4.0"
federationDNSProvider := "aws-route53"
federationDNSProviderConfig := ""
// TODO: define exactly what these do...
serviceCIDR := "10.10.0.0/24"
federationAdmissionControl := "NamespaceLifecycle"
data := make(map[string]string)
data["FEDERATION_NAMESPACE"] = namespace
data["FEDERATION_NAME"] = name
data["FEDERATION_APISERVER_DEPLOYMENT_NAME"] = apiserverDeploymentName
data["FEDERATION_CONTROLLER_MANAGER_DEPLOYMENT_NAME"] = controllerDeploymentName
data["FEDERATION_APISERVER_IMAGE_REPO"] = imageRepo
data["FEDERATION_APISERVER_IMAGE_TAG"] = imageTag
data["FEDERATION_CONTROLLER_MANAGER_IMAGE_REPO"] = imageRepo
data["FEDERATION_CONTROLLER_MANAGER_IMAGE_TAG"] = imageTag
data["FEDERATION_SERVICE_CIDR"] = serviceCIDR
data["EXTERNAL_HOSTNAME"] = apiserverHostname
data["FEDERATION_ADMISSION_CONTROL"] = federationAdmissionControl
data["FEDERATION_DNS_PROVIDER"] = federationDNSProvider
data["FEDERATION_DNS_PROVIDER_CONFIG"] = federationDNSProviderConfig
data["DNS_ZONE_NAME"] = dnsZoneName
return data
}
func (o*ApplyFederationOperation) executeTemplate(key string, templateDefinition string) (string, error) {
data := o.buildTemplateData()
t := template.New(key)
funcMap := make(template.FuncMap)
//funcMap["Args"] = func() []string {
// return args
//}
//funcMap["RenderResource"] = func(resourceName string, args []string) (string, error) {
// return l.renderResource(resourceName, args)
//}
//for k, fn := range l.TemplateFunctions {
// funcMap[k] = fn
//}
t.Funcs(funcMap)
t.Option("missingkey=zero")
_, err := t.Parse(templateDefinition)
if err != nil {
return "", fmt.Errorf("error parsing template %q: %v", key, err)
}
var buffer bytes.Buffer
err = t.ExecuteTemplate(&buffer, key, data)
if err != nil {
return "", fmt.Errorf("error executing template %q: %v", key, err)
}
return buffer.String(), nil
}
func (o*ApplyFederationOperation) EnsureNamespace(c *fi.Context) error {
k8s := c.Target.(*kubernetes.KubernetesTarget).KubernetesClient
ns, err := k8s.Core().Namespaces().Get(o.namespace)
if err != nil {
if errors.IsNotFound(err) {
ns = nil
} else {
return fmt.Errorf("error reading namespace: %v", err)
}
}
if ns == nil {
ns = &k8sapiv1.Namespace{}
ns.Name = o.namespace
ns, err = k8s.Core().Namespaces().Create(ns)
if err != nil {
return fmt.Errorf("error creating namespace: %v", err)
}
}
return nil
}
func (o*ApplyFederationOperation) ensureFederationNamespace(k8s federation_release_1_4.Interface, name string) (*k8sapiv1.Namespace, error) {
return mutateNamespace(k8s, name, func(n *k8sapiv1.Namespace) (*k8sapiv1.Namespace, error) {
if n == nil {
n = &k8sapiv1.Namespace{}
n.Name = name
}
return n, nil
})
}
func EnsureCASecret(keystore fi.Keystore) (*fi.Certificate, *fi.PrivateKey, error) {
id := fi.CertificateId_CA
caCert, caPrivateKey, err := keystore.FindKeypair(id)
if err != nil {
return nil, nil, err
}
if caPrivateKey == nil {
template := fi.BuildCAX509Template()
caRsaKey, err := rsa.GenerateKey(crypto_rand.Reader, 2048)
if err != nil {
return nil, nil, fmt.Errorf("error generating RSA private key: %v", err)
}
caPrivateKey = &fi.PrivateKey{Key: caRsaKey}
caCert, err = fi.SignNewCertificate(caPrivateKey, template, nil, nil)
if err != nil {
return nil, nil, err
}
err = keystore.StoreKeypair(id, caCert, caPrivateKey)
if err != nil {
return nil, nil, err
}
}
return caCert, caPrivateKey, nil
} |
Licensed under the Apache License, Version 2.0 (the "License"); |
md5iter.rs | use itoa;
use md5::{Context, Digest};
pub struct HashIter {
idx: usize,
hasher: Context,
}
impl HashIter {
pub fn new(seed: &str) -> HashIter {
let mut md5 = Context::new();
md5.consume(seed.as_bytes());
HashIter {
idx: 0,
hasher: md5,
}
}
}
impl Iterator for HashIter {
type Item = Digest;
#[inline(always)]
fn next(&mut self) -> Option<Digest> |
}
#[cfg(test)]
mod test {
extern crate test;
use md5;
use std::io::Write;
#[bench]
fn md5_an_md5(b: &mut test::Bencher) {
let mut s = ::std::io::sink();
b.iter(|| write!(s, "{:x}", md5::compute("577571be4de9dcce85a041ba0410f29f")));
}
}
| {
let mut hasher = self.hasher.clone();
itoa::write(&mut hasher, self.idx).unwrap();
self.idx += 1;
Some(hasher.compute())
} |
recursion.spec.ts | import { fibonacci, fibonacciWithCache, fibonacciWithStack } from './recursion'
test('递归获取斐波拉契数列', () => {
const result = fibonacci(10)
expect(result).toEqual(34)
})
test('缓存获取斐波拉契数列', () => {
const result = fibonacciWithCache(10)
expect(result).toEqual(34)
})
test('栈获取斐波拉契数列', () => {
const result = fibonacciWithStack(10)
expect(result).toEqual(34) | }) |
|
topic.py | #!/usr/bin/env python
######################################################################################################################
# Copyright 2020-2021 Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. A copy of the License is located at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# or in the 'license' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES #
# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions #
# and limitations under the License. #
######################################################################################################################
import datetime
import json
import os
from datetime import datetime
import boto3
from botocore import config
from shared_util import custom_boto_config, custom_logging
logger = custom_logging.get_logger(__name__)
firehose = boto3.client("firehose", config=custom_boto_config.init())
def store_topics(data):
for key in data:
for record in data[key]:
logger.debug("Record information for writing to Firehose is " + json.dumps(record))
response = firehose.put_record(
DeliveryStreamName=os.environ["TOPICS_FIREHOSE"],
Record={
"Data": json.dumps(
{
"job_id": record["job_id"],
"job_timestamp": datetime.strftime(
datetime.strptime(record["job_timestamp"], "%Y-%m-%dT%H:%M:%S.%fZ"),
"%Y-%m-%d %H:%M:%S.%f",
),
"topic": record["topic"],
"term": record["term"],
"weight": record["weight"],
}
)
+ "\n"
},
)
logger.debug("Response for record " + record["job_id"] + "is " + json.dumps(response))
def store_mappings(data):
| logger.debug("Data received is " + json.dumps(data))
response = firehose.put_record(
DeliveryStreamName=os.environ["TOPIC_MAPPINGS_FIREHOSE"],
Record={
"Data": json.dumps(
{
"platform": data["platform"],
"job_id": data["job_id"],
"job_timestamp": datetime.strftime(
datetime.strptime(data["job_timestamp"], "%Y-%m-%dT%H:%M:%S.%fZ"), "%Y-%m-%d %H:%M:%S.%f"
),
"topic": data["topic"],
"id_str": data["id_str"],
}
)
+ "\n"
},
)
logger.debug(
"Response for record "
+ json.dumps({"platform": data["platform"], "topic": data["topic"], "id_str": data["id_str"]})
+ "is "
+ json.dumps(response)
) |
|
path.go | package helper
import (
"fmt"
"os"
"os/user"
)
var homepath string
func init() {
u, err := user.Current()
if err != nil {
fmt.Println(err)
os.Exit(1)
}
homepath = u.HomeDir
Store("home", homepath)
}
// Path returns the absolute path for the given file path.
// Relative paths are prefixed with the home directory.
// if the argument is empty, it returns the home directory path.
func Path(str string) string {
if len(str) == 0 {
return homepath
}
if str[0] == '/' |
return homepath + "/" + str
}
| {
return str
} |
main.rs | use std::io::Read;
use aoc_2015_day_04::part_one;
use aoc_2015_day_04::part_two;
fn main() {
let mut input = String::new();
std::io::stdin()
.read_to_string(&mut input)
.expect("failed to read input");
println!("Advent of Code 2015-04");
println!("------ Part 1 ------");
println!("{}", part_one(&input));
println!();
println!("------ Part 2 ------"); | } | println!("{}", part_two(&input));
println!(); |
GroupByNativeTypeModelArgs.ts | import * as TypeGraphQL from "type-graphql";
import * as GraphQLScalars from "graphql-scalars";
import { NativeTypeModelOrderByInput } from "../../../inputs/NativeTypeModelOrderByInput";
import { NativeTypeModelScalarWhereWithAggregatesInput } from "../../../inputs/NativeTypeModelScalarWhereWithAggregatesInput";
import { NativeTypeModelWhereInput } from "../../../inputs/NativeTypeModelWhereInput";
import { NativeTypeModelScalarFieldEnum } from "../../../../enums/NativeTypeModelScalarFieldEnum";
@TypeGraphQL.ArgsType()
export class GroupByNativeTypeModelArgs {
@TypeGraphQL.Field(_type => NativeTypeModelWhereInput, {
nullable: true
})
where?: NativeTypeModelWhereInput | undefined;
@TypeGraphQL.Field(_type => [NativeTypeModelOrderByInput], {
nullable: true
})
orderBy?: NativeTypeModelOrderByInput[] | undefined;
@TypeGraphQL.Field(_type => [NativeTypeModelScalarFieldEnum], {
nullable: false | by!: Array<"id" | "bigInt" | "byteA" | "decimal">;
@TypeGraphQL.Field(_type => NativeTypeModelScalarWhereWithAggregatesInput, {
nullable: true
})
having?: NativeTypeModelScalarWhereWithAggregatesInput | undefined;
@TypeGraphQL.Field(_type => TypeGraphQL.Int, {
nullable: true
})
take?: number | undefined;
@TypeGraphQL.Field(_type => TypeGraphQL.Int, {
nullable: true
})
skip?: number | undefined;
} | }) |
precache-manifest.2ffbfef3ee22a9ee00c377ccc6d45db5.js | self.__precacheManifest = [
{
"revision": "ff66b25a0ebf00d45f6a72f7fc265f2c",
"url": "/img/brand/logo.png"
},
{
"revision": "1dd2fce4c79c5cd8fa2a06fc30baca5d",
"url": "/img/brand/plots screenshot.png"
},
{
"revision": "0fc390b64ce6957d01f9",
"url": "/js/chunk-vendors.15824258.js"
},
{
"revision": "8f23df122fc78432490934ac08e59061",
"url": "/img/theme/ManjotPahwa.png"
},
{
"revision": "be997d5226b992ffad34816870c6b7aa",
"url": "/img/theme/team-2-800x800.jpg"
},
{
"revision": "fd4a34d026fb9e0f4867188d47b11ba8",
"url": "/img/theme/img-1-1200x1000.jpg"
},
{
"revision": "a7435bbc0a1b62ea82777077cec85d50",
"url": "/img/header-img.png"
},
{
"revision": "0b8a30b10cbe7708d5f3a4b007c1d665",
"url": "/img/nucleo-icons.0b8a30b1.svg"
},
{
"revision": "7789b5bfa57722dd8916b1b9ff1b1d37",
"url": "/img/theme/img-2-1200x1000.jpg"
},
{
"revision": "54e3f3c414bd8e7234bae3ee3be950e5",
"url": "/img/theme/team-3-800x800.jpg"
},
{
"revision": "538acb7b7372c338e37ba9b5e1abb16a",
"url": "/img/women_preso_discussing.png"
},
{
"revision": "66618a418175ddf2ac8c47a241d327a8",
"url": "/img/theme/team-4-800x800.jpg"
},
{
"revision": "912ec66d7572ff821749319396470bde",
"url": "/img/fontawesome-webfont.912ec66d.svg"
},
{
"revision": "816ae368141c872173abadad685cadf0",
"url": "/index.html"
},
{
"revision": "edc7106b21ec12e57022b2ebd534cd2d",
"url": "/img/theme/team-1-800x800.jpg"
},
{
"revision": "594b1ee1d95ada356eaad078e9217932",
"url": "/img/ill/ill-2.svg"
},
{
"revision": "dc49ad52655e1d9d0552c026db3ef688",
"url": "/img/theme/landing.jpg"
},
{
"revision": "76f0657b89251f66c80452ca4efd8797",
"url": "/img/theme/ChandraPrakash.jpeg"
},
{
"revision": "71883344aa1fb0c1bac4a7d2cc6308ac",
"url": "/img/theme/HimanshuChaudhary.jpeg"
},
{
"revision": "6fafe4baca9d50d61a898c84ade7afa3",
"url": "/img/brand/white.png"
},
{
"revision": "981ef6e04a37bcd4e291",
"url": "/js/app.1a0517b0.js"
},
{
"revision": "a7eee6a9698ab66b0f93f931282bda6a",
"url": "/img/theme/ManishSachdev.jpeg"
},
{
"revision": "974088a1931e40895bac6db119c62448",
"url": "/img/theme/promo-1.png"
},
{
"revision": "39d48b47ddec702a2a0708e025a5507f",
"url": "/img/theme/ManjotPahwaSquare.jpg"
},
{
"revision": "20d702b83a06bdb2ea71c4c0cb9a7a56",
"url": "/img/theme/profile.jpg"
},
{
"revision": "15689451f4887312a8fec873746f82f2",
"url": "/img/2women_humaaans_discuss.png"
},
{
"revision": "c47104936cd781b936927cc1fc33543b",
"url": "/favicon.png"
},
{
"revision": "c85c75275c0a0a617f9e5accc2700908",
"url": "/img/brand/creativetim-white-slim.png"
},
{
"revision": "8e55eab46b5fcfc4a7a0b27cb07c8888",
"url": "/img/brand/github-white-slim.png"
},
{
"revision": "185288d13ed8e9d745bd279ea34667bf",
"url": "/img/brand/blue.png"
},
{
"revision": "b06871f281fee6b241d60582ae9369b9",
"url": "/fonts/fontawesome-webfont.b06871f2.ttf"
},
{
"revision": "674f50d287a8c48dc19ba404d20fe713",
"url": "/fonts/fontawesome-webfont.674f50d2.eot"
},
{
"revision": "c47104936cd781b936927cc1fc33543b",
"url": "/img/brand/favicon.png"
},
{
"revision": "fee66e712a8a08eef5805a46892932ad",
"url": "/fonts/fontawesome-webfont.fee66e71.woff" | {
"revision": "af7ae505a9eed503f8b8e6982036873e",
"url": "/fonts/fontawesome-webfont.af7ae505.woff2"
},
{
"revision": "f82ec6ba2dc4181db2af35c499462840",
"url": "/fonts/nucleo-icons.f82ec6ba.ttf"
},
{
"revision": "c1733565b32b585676302d4233c39da8",
"url": "/fonts/nucleo-icons.c1733565.eot"
},
{
"revision": "426439788ec5ba772cdf94057f6f4659",
"url": "/fonts/nucleo-icons.42643978.woff2"
},
{
"revision": "2569aaea6eaaf8cd210db7f2fa016743",
"url": "/fonts/nucleo-icons.2569aaea.woff"
},
{
"revision": "981ef6e04a37bcd4e291",
"url": "/css/app.9ef16e3c.css"
}
]; | }, |
lib.rs | #[macro_use] extern crate nom;
extern crate leb128;
extern crate byteorder;
extern crate failure;
mod parser;
mod error;
mod result_types;
pub use error::DexParserError;
pub use result_types::*;
pub use nom::Endianness;
pub fn parse(buf: &[u8]) -> Result<DexFile, DexParserError> |
// TODO: validate checksum/signature | {
parser::parse(buf)
} |
calcfunction_multiple_outputs.py | from aiida.engine import calcfunction |
@calcfunction
def sum_and_difference(alpha, beta):
return {'sum': alpha + beta, 'difference': alpha - beta}
result = sum_and_difference(Int(1), Int(2)) | from aiida.orm import Int |
model_virtualization_esxi_ova_custom_spec.go | /*
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. The Intersight OpenAPI document defines the complete set of properties that are returned in the HTTP response. From that perspective, a client can expect that no additional properties are returned, unless these properties are explicitly defined in the OpenAPI document. However, when a client uses an older version of the Intersight OpenAPI document, the server may send additional properties because the software is more recent than the client. In that case, the client may receive properties that it does not know about. Some generated SDKs perform a strict validation of the HTTP response body against the OpenAPI document.
API version: 1.0.9-6484
Contact: [email protected]
*/
// Code generated by OpenAPI Generator (https://openapi-generator.tech); DO NOT EDIT.
package intersight
import (
"encoding/json"
"reflect"
"strings"
)
// VirtualizationEsxiOvaCustomSpec Specify ESXi virtual machine custom specification.
type VirtualizationEsxiOvaCustomSpec struct {
VirtualizationBaseCustomSpec
// The fully-qualified name of the instantiated, concrete type. This property is used as a discriminator to identify the type of the payload when marshaling and unmarshaling data.
ClassId string `json:"ClassId"`
// The fully-qualified name of the instantiated, concrete type. The value should be the same as the 'ClassId' property.
ObjectType string `json:"ObjectType"`
// Specify the Extra Config specification which can be configured on virtual machine.
ExtraConfig interface{} `json:"ExtraConfig,omitempty"`
// Specify the OVA Environment specification which can be configured on the virtual machine.
OvaEnvSpec interface{} `json:"OvaEnvSpec,omitempty"`
AdditionalProperties map[string]interface{}
}
type _VirtualizationEsxiOvaCustomSpec VirtualizationEsxiOvaCustomSpec
// NewVirtualizationEsxiOvaCustomSpec instantiates a new VirtualizationEsxiOvaCustomSpec object
// This constructor will assign default values to properties that have it defined,
// and makes sure properties required by API are set, but the set of arguments
// will change when the set of required properties is changed
func | (classId string, objectType string) *VirtualizationEsxiOvaCustomSpec {
this := VirtualizationEsxiOvaCustomSpec{}
this.ClassId = classId
this.ObjectType = objectType
return &this
}
// NewVirtualizationEsxiOvaCustomSpecWithDefaults instantiates a new VirtualizationEsxiOvaCustomSpec object
// This constructor will only assign default values to properties that have it defined,
// but it doesn't guarantee that properties required by API are set
func NewVirtualizationEsxiOvaCustomSpecWithDefaults() *VirtualizationEsxiOvaCustomSpec {
this := VirtualizationEsxiOvaCustomSpec{}
var classId string = "virtualization.EsxiOvaCustomSpec"
this.ClassId = classId
var objectType string = "virtualization.EsxiOvaCustomSpec"
this.ObjectType = objectType
return &this
}
// GetClassId returns the ClassId field value
func (o *VirtualizationEsxiOvaCustomSpec) GetClassId() string {
if o == nil {
var ret string
return ret
}
return o.ClassId
}
// GetClassIdOk returns a tuple with the ClassId field value
// and a boolean to check if the value has been set.
func (o *VirtualizationEsxiOvaCustomSpec) GetClassIdOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.ClassId, true
}
// SetClassId sets field value
func (o *VirtualizationEsxiOvaCustomSpec) SetClassId(v string) {
o.ClassId = v
}
// GetObjectType returns the ObjectType field value
func (o *VirtualizationEsxiOvaCustomSpec) GetObjectType() string {
if o == nil {
var ret string
return ret
}
return o.ObjectType
}
// GetObjectTypeOk returns a tuple with the ObjectType field value
// and a boolean to check if the value has been set.
func (o *VirtualizationEsxiOvaCustomSpec) GetObjectTypeOk() (*string, bool) {
if o == nil {
return nil, false
}
return &o.ObjectType, true
}
// SetObjectType sets field value
func (o *VirtualizationEsxiOvaCustomSpec) SetObjectType(v string) {
o.ObjectType = v
}
// GetExtraConfig returns the ExtraConfig field value if set, zero value otherwise (both if not set or set to explicit null).
func (o *VirtualizationEsxiOvaCustomSpec) GetExtraConfig() interface{} {
if o == nil {
var ret interface{}
return ret
}
return o.ExtraConfig
}
// GetExtraConfigOk returns a tuple with the ExtraConfig field value if set, nil otherwise
// and a boolean to check if the value has been set.
// NOTE: If the value is an explicit nil, `nil, true` will be returned
func (o *VirtualizationEsxiOvaCustomSpec) GetExtraConfigOk() (*interface{}, bool) {
if o == nil || o.ExtraConfig == nil {
return nil, false
}
return &o.ExtraConfig, true
}
// HasExtraConfig returns a boolean if a field has been set.
func (o *VirtualizationEsxiOvaCustomSpec) HasExtraConfig() bool {
if o != nil && o.ExtraConfig != nil {
return true
}
return false
}
// SetExtraConfig gets a reference to the given interface{} and assigns it to the ExtraConfig field.
func (o *VirtualizationEsxiOvaCustomSpec) SetExtraConfig(v interface{}) {
o.ExtraConfig = v
}
// GetOvaEnvSpec returns the OvaEnvSpec field value if set, zero value otherwise (both if not set or set to explicit null).
func (o *VirtualizationEsxiOvaCustomSpec) GetOvaEnvSpec() interface{} {
if o == nil {
var ret interface{}
return ret
}
return o.OvaEnvSpec
}
// GetOvaEnvSpecOk returns a tuple with the OvaEnvSpec field value if set, nil otherwise
// and a boolean to check if the value has been set.
// NOTE: If the value is an explicit nil, `nil, true` will be returned
func (o *VirtualizationEsxiOvaCustomSpec) GetOvaEnvSpecOk() (*interface{}, bool) {
if o == nil || o.OvaEnvSpec == nil {
return nil, false
}
return &o.OvaEnvSpec, true
}
// HasOvaEnvSpec returns a boolean if a field has been set.
func (o *VirtualizationEsxiOvaCustomSpec) HasOvaEnvSpec() bool {
if o != nil && o.OvaEnvSpec != nil {
return true
}
return false
}
// SetOvaEnvSpec gets a reference to the given interface{} and assigns it to the OvaEnvSpec field.
func (o *VirtualizationEsxiOvaCustomSpec) SetOvaEnvSpec(v interface{}) {
o.OvaEnvSpec = v
}
func (o VirtualizationEsxiOvaCustomSpec) MarshalJSON() ([]byte, error) {
toSerialize := map[string]interface{}{}
serializedVirtualizationBaseCustomSpec, errVirtualizationBaseCustomSpec := json.Marshal(o.VirtualizationBaseCustomSpec)
if errVirtualizationBaseCustomSpec != nil {
return []byte{}, errVirtualizationBaseCustomSpec
}
errVirtualizationBaseCustomSpec = json.Unmarshal([]byte(serializedVirtualizationBaseCustomSpec), &toSerialize)
if errVirtualizationBaseCustomSpec != nil {
return []byte{}, errVirtualizationBaseCustomSpec
}
if true {
toSerialize["ClassId"] = o.ClassId
}
if true {
toSerialize["ObjectType"] = o.ObjectType
}
if o.ExtraConfig != nil {
toSerialize["ExtraConfig"] = o.ExtraConfig
}
if o.OvaEnvSpec != nil {
toSerialize["OvaEnvSpec"] = o.OvaEnvSpec
}
for key, value := range o.AdditionalProperties {
toSerialize[key] = value
}
return json.Marshal(toSerialize)
}
func (o *VirtualizationEsxiOvaCustomSpec) UnmarshalJSON(bytes []byte) (err error) {
type VirtualizationEsxiOvaCustomSpecWithoutEmbeddedStruct struct {
// The fully-qualified name of the instantiated, concrete type. This property is used as a discriminator to identify the type of the payload when marshaling and unmarshaling data.
ClassId string `json:"ClassId"`
// The fully-qualified name of the instantiated, concrete type. The value should be the same as the 'ClassId' property.
ObjectType string `json:"ObjectType"`
// Specify the Extra Config specification which can be configured on virtual machine.
ExtraConfig interface{} `json:"ExtraConfig,omitempty"`
// Specify the OVA Environment specification which can be configured on the virtual machine.
OvaEnvSpec interface{} `json:"OvaEnvSpec,omitempty"`
}
varVirtualizationEsxiOvaCustomSpecWithoutEmbeddedStruct := VirtualizationEsxiOvaCustomSpecWithoutEmbeddedStruct{}
err = json.Unmarshal(bytes, &varVirtualizationEsxiOvaCustomSpecWithoutEmbeddedStruct)
if err == nil {
varVirtualizationEsxiOvaCustomSpec := _VirtualizationEsxiOvaCustomSpec{}
varVirtualizationEsxiOvaCustomSpec.ClassId = varVirtualizationEsxiOvaCustomSpecWithoutEmbeddedStruct.ClassId
varVirtualizationEsxiOvaCustomSpec.ObjectType = varVirtualizationEsxiOvaCustomSpecWithoutEmbeddedStruct.ObjectType
varVirtualizationEsxiOvaCustomSpec.ExtraConfig = varVirtualizationEsxiOvaCustomSpecWithoutEmbeddedStruct.ExtraConfig
varVirtualizationEsxiOvaCustomSpec.OvaEnvSpec = varVirtualizationEsxiOvaCustomSpecWithoutEmbeddedStruct.OvaEnvSpec
*o = VirtualizationEsxiOvaCustomSpec(varVirtualizationEsxiOvaCustomSpec)
} else {
return err
}
varVirtualizationEsxiOvaCustomSpec := _VirtualizationEsxiOvaCustomSpec{}
err = json.Unmarshal(bytes, &varVirtualizationEsxiOvaCustomSpec)
if err == nil {
o.VirtualizationBaseCustomSpec = varVirtualizationEsxiOvaCustomSpec.VirtualizationBaseCustomSpec
} else {
return err
}
additionalProperties := make(map[string]interface{})
if err = json.Unmarshal(bytes, &additionalProperties); err == nil {
delete(additionalProperties, "ClassId")
delete(additionalProperties, "ObjectType")
delete(additionalProperties, "ExtraConfig")
delete(additionalProperties, "OvaEnvSpec")
// remove fields from embedded structs
reflectVirtualizationBaseCustomSpec := reflect.ValueOf(o.VirtualizationBaseCustomSpec)
for i := 0; i < reflectVirtualizationBaseCustomSpec.Type().NumField(); i++ {
t := reflectVirtualizationBaseCustomSpec.Type().Field(i)
if jsonTag := t.Tag.Get("json"); jsonTag != "" {
fieldName := ""
if commaIdx := strings.Index(jsonTag, ","); commaIdx > 0 {
fieldName = jsonTag[:commaIdx]
} else {
fieldName = jsonTag
}
if fieldName != "AdditionalProperties" {
delete(additionalProperties, fieldName)
}
}
}
o.AdditionalProperties = additionalProperties
}
return err
}
type NullableVirtualizationEsxiOvaCustomSpec struct {
value *VirtualizationEsxiOvaCustomSpec
isSet bool
}
func (v NullableVirtualizationEsxiOvaCustomSpec) Get() *VirtualizationEsxiOvaCustomSpec {
return v.value
}
func (v *NullableVirtualizationEsxiOvaCustomSpec) Set(val *VirtualizationEsxiOvaCustomSpec) {
v.value = val
v.isSet = true
}
func (v NullableVirtualizationEsxiOvaCustomSpec) IsSet() bool {
return v.isSet
}
func (v *NullableVirtualizationEsxiOvaCustomSpec) Unset() {
v.value = nil
v.isSet = false
}
func NewNullableVirtualizationEsxiOvaCustomSpec(val *VirtualizationEsxiOvaCustomSpec) *NullableVirtualizationEsxiOvaCustomSpec {
return &NullableVirtualizationEsxiOvaCustomSpec{value: val, isSet: true}
}
func (v NullableVirtualizationEsxiOvaCustomSpec) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value)
}
func (v *NullableVirtualizationEsxiOvaCustomSpec) UnmarshalJSON(src []byte) error {
v.isSet = true
return json.Unmarshal(src, &v.value)
}
| NewVirtualizationEsxiOvaCustomSpec |
main.go | package main
import (
"fmt"
"log"
"net/http"
)
// ENgine is the uni handler for all requests
type Engine struct{}
func (engine *Engine) ServeHTTP(w http.ResponseWriter,req *http.Request){
switch req.URL.Path {
case "/":
fmt.Fprintf(w,"URL.Path = %q\n",req.URL.Path)
case "/hello":
for k,v := range req.Header {
fmt.Fprintf(w,"Header[%q] = %q\n", k, v)
}
default:
fmt.Fprintf(w,"404 NOT FOUND: %s\n",req.URL)
}
}
func | (){
// engine := new(Engine)
// log.Fatal(http.ListenAndServe(":9999", engine))
r := gee.New()
r.GET("/")
}
// // handler echoes r.URL.Path
// func indexHandler(w http.ResponseWriter, req *http.Request){
// fmt.Fprintf(w,"URL.Path = %q\n",req.URL.Path)
// }
// // handler echoes r.URL.Header
// func helloHandler(w http.ResponseWriter,req *http.Request){
// for k,v := range req.Header {
// fmt.Fprintf(w,"Header[%q] = %q\n",k,v)
// }
// }
| main |
case.rs | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
use std::{any::Any, sync::Arc};
use crate::error::{DataFusionError, Result};
use crate::physical_plan::{ColumnarValue, PhysicalExpr};
use arrow::array::{self, *};
use arrow::compute::{eq, eq_utf8};
use arrow::datatypes::{DataType, Schema};
use arrow::record_batch::RecordBatch;
use serde::{Deserialize, Serialize};
/// The CASE expression is similar to a series of nested if/else and there are two forms that
/// can be used. The first form consists of a series of boolean "when" expressions with
/// corresponding "then" expressions, and an optional "else" expression.
///
/// CASE WHEN condition THEN result
/// [WHEN ...]
/// [ELSE result]
/// END
///
/// The second form uses a base expression and then a series of "when" clauses that match on a
/// literal value.
///
/// CASE expression
/// WHEN value THEN result
/// [WHEN ...]
/// [ELSE result]
/// END
#[derive(Debug, Serialize, Deserialize)]
pub struct CaseExpr {
/// Optional base expression that can be compared to literal values in the "when" expressions
expr: Option<Arc<dyn PhysicalExpr>>,
/// One or more when/then expressions
when_then_expr: Vec<(Arc<dyn PhysicalExpr>, Arc<dyn PhysicalExpr>)>,
/// Optional "else" expression
else_expr: Option<Arc<dyn PhysicalExpr>>,
}
impl std::fmt::Display for CaseExpr {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "CASE ")?;
if let Some(e) = &self.expr {
write!(f, "{} ", e)?;
}
for (w, t) in &self.when_then_expr {
write!(f, "WHEN {} THEN {} ", w, t)?;
}
if let Some(e) = &self.else_expr {
write!(f, "ELSE {} ", e)?;
}
write!(f, "END")
}
}
impl CaseExpr {
/// Create a new CASE WHEN expression
pub fn try_new(
expr: Option<Arc<dyn PhysicalExpr>>,
when_then_expr: &[(Arc<dyn PhysicalExpr>, Arc<dyn PhysicalExpr>)],
else_expr: Option<Arc<dyn PhysicalExpr>>,
) -> Result<Self> {
if when_then_expr.is_empty() {
Err(DataFusionError::Execution(
"There must be at least one WHEN clause".to_string(),
))
} else {
Ok(Self {
expr,
when_then_expr: when_then_expr.to_vec(),
else_expr,
})
}
}
/// Optional base expression that can be compared to literal values in the "when" expressions
pub fn expr(&self) -> &Option<Arc<dyn PhysicalExpr>> {
&self.expr
}
/// One or more when/then expressions
pub fn when_then_expr(&self) -> &[(Arc<dyn PhysicalExpr>, Arc<dyn PhysicalExpr>)] {
&self.when_then_expr
}
/// Optional "else" expression
pub fn else_expr(&self) -> Option<&Arc<dyn PhysicalExpr>> {
self.else_expr.as_ref()
}
}
macro_rules! if_then_else {
($BUILDER_TYPE:ty, $ARRAY_TYPE:ty, $BOOLS:expr, $TRUE:expr, $FALSE:expr) => {{
let true_values = $TRUE
.as_ref()
.as_any()
.downcast_ref::<$ARRAY_TYPE>()
.expect("true_values downcast failed");
let false_values = $FALSE
.as_ref()
.as_any()
.downcast_ref::<$ARRAY_TYPE>()
.expect("false_values downcast failed");
let mut builder = <$BUILDER_TYPE>::new($BOOLS.len());
for i in 0..$BOOLS.len() {
if $BOOLS.is_null(i) {
if false_values.is_null(i) {
builder.append_null()?;
} else {
builder.append_value(false_values.value(i))?;
}
} else if $BOOLS.value(i) {
if true_values.is_null(i) {
builder.append_null()?;
} else {
builder.append_value(true_values.value(i))?;
}
} else {
if false_values.is_null(i) {
builder.append_null()?;
} else {
builder.append_value(false_values.value(i))?;
}
}
}
Ok(Arc::new(builder.finish()))
}};
}
fn if_then_else(
bools: &BooleanArray,
true_values: ArrayRef,
false_values: ArrayRef,
data_type: &DataType,
) -> Result<ArrayRef> {
match data_type {
DataType::UInt8 => if_then_else!(
array::UInt8Builder,
array::UInt8Array,
bools,
true_values,
false_values
),
DataType::UInt16 => if_then_else!(
array::UInt16Builder,
array::UInt16Array,
bools,
true_values,
false_values
),
DataType::UInt32 => if_then_else!(
array::UInt32Builder,
array::UInt32Array,
bools,
true_values,
false_values
),
DataType::UInt64 => if_then_else!(
array::UInt64Builder,
array::UInt64Array,
bools,
true_values,
false_values
),
DataType::Int8 => if_then_else!(
array::Int8Builder,
array::Int8Array,
bools,
true_values,
false_values
),
DataType::Int16 => if_then_else!(
array::Int16Builder,
array::Int16Array,
bools,
true_values,
false_values
),
DataType::Int32 => if_then_else!(
array::Int32Builder,
array::Int32Array,
bools,
true_values,
false_values
),
DataType::Int64 => if_then_else!(
array::Int64Builder,
array::Int64Array,
bools,
true_values,
false_values
),
DataType::Float32 => if_then_else!(
array::Float32Builder,
array::Float32Array,
bools,
true_values,
false_values
),
DataType::Float64 => if_then_else!(
array::Float64Builder,
array::Float64Array,
bools,
true_values,
false_values
),
DataType::Utf8 => if_then_else!(
array::StringBuilder,
array::StringArray,
bools,
true_values,
false_values
),
DataType::Boolean => if_then_else!(
array::BooleanBuilder,
array::BooleanArray,
bools,
true_values,
false_values
),
other => Err(DataFusionError::Execution(format!(
"CASE does not support '{:?}'",
other
))),
}
}
macro_rules! array_equals {
($TY:ty, $L:expr, $R:expr, $eq_fn:expr) => {{
let when_value = $L
.as_ref()
.as_any()
.downcast_ref::<$TY>()
.expect("array_equals downcast failed");
let base_value = $R
.as_ref()
.as_any()
.downcast_ref::<$TY>()
.expect("array_equals downcast failed");
$eq_fn(when_value, base_value).map_err(DataFusionError::from)
}};
}
fn array_equals(
data_type: &DataType,
when_value: ArrayRef,
base_value: ArrayRef,
) -> Result<BooleanArray> {
match data_type {
DataType::UInt8 => {
array_equals!(array::UInt8Array, when_value, base_value, eq)
}
DataType::UInt16 => {
array_equals!(array::UInt16Array, when_value, base_value, eq)
}
DataType::UInt32 => {
array_equals!(array::UInt32Array, when_value, base_value, eq)
}
DataType::UInt64 => {
array_equals!(array::UInt64Array, when_value, base_value, eq)
}
DataType::Int8 => {
array_equals!(array::Int8Array, when_value, base_value, eq)
}
DataType::Int16 => {
array_equals!(array::Int16Array, when_value, base_value, eq)
}
DataType::Int32 => {
array_equals!(array::Int32Array, when_value, base_value, eq)
}
DataType::Int64 => {
array_equals!(array::Int64Array, when_value, base_value, eq)
}
DataType::Float32 => {
array_equals!(array::Float32Array, when_value, base_value, eq)
}
DataType::Float64 => {
array_equals!(array::Float64Array, when_value, base_value, eq)
}
DataType::Utf8 => {
array_equals!(array::StringArray, when_value, base_value, eq_utf8)
}
other => Err(DataFusionError::Execution(format!(
"CASE does not support '{:?}'",
other
))),
}
}
impl CaseExpr {
/// This function evaluates the form of CASE that matches an expression to fixed values.
///
/// CASE expression
/// WHEN value THEN result
/// [WHEN ...]
/// [ELSE result]
/// END
fn case_when_with_expr(&self, batch: &RecordBatch) -> Result<ColumnarValue> {
let return_type = self.when_then_expr[0].1.data_type(&batch.schema())?;
let expr = self.expr.as_ref().unwrap();
let base_value = expr.evaluate(batch)?;
let base_type = expr.data_type(&batch.schema())?;
let base_value = base_value.into_array(batch.num_rows());
// start with the else condition, or nulls
let mut current_value: Option<ArrayRef> = if let Some(e) = &self.else_expr {
Some(e.evaluate(batch)?.into_array(batch.num_rows()))
} else {
Some(new_null_array(&return_type, batch.num_rows()))
};
// walk backwards through the when/then expressions
for i in (0..self.when_then_expr.len()).rev() {
let i = i as usize;
let when_value = self.when_then_expr[i].0.evaluate(batch)?;
let when_value = when_value.into_array(batch.num_rows());
let then_value = self.when_then_expr[i].1.evaluate(batch)?;
let then_value = then_value.into_array(batch.num_rows());
// build boolean array representing which rows match the "when" value
let when_match = array_equals(&base_type, when_value, base_value.clone())?;
current_value = Some(if_then_else(
&when_match,
then_value,
current_value.unwrap(),
&return_type,
)?);
}
Ok(ColumnarValue::Array(current_value.unwrap()))
}
/// This function evaluates the form of CASE where each WHEN expression is a boolean
/// expression.
///
/// CASE WHEN condition THEN result
/// [WHEN ...]
/// [ELSE result]
/// END
fn case_when_no_expr(&self, batch: &RecordBatch) -> Result<ColumnarValue> {
let return_type = self.when_then_expr[0].1.data_type(&batch.schema())?;
// start with the else condition, or nulls
let mut current_value: Option<ArrayRef> = if let Some(e) = &self.else_expr {
Some(e.evaluate(batch)?.into_array(batch.num_rows()))
} else {
Some(new_null_array(&return_type, batch.num_rows()))
};
// walk backwards through the when/then expressions
for i in (0..self.when_then_expr.len()).rev() {
let i = i as usize;
let when_value = self.when_then_expr[i].0.evaluate(batch)?;
let when_value = when_value.into_array(batch.num_rows());
let when_value = when_value
.as_ref()
.as_any()
.downcast_ref::<BooleanArray>()
.expect("WHEN expression did not return a BooleanArray");
let then_value = self.when_then_expr[i].1.evaluate(batch)?;
let then_value = then_value.into_array(batch.num_rows());
current_value = Some(if_then_else(
when_value,
then_value,
current_value.unwrap(),
&return_type,
)?);
}
Ok(ColumnarValue::Array(current_value.unwrap()))
}
}
#[typetag::serde(name = "case_expr")]
impl PhysicalExpr for CaseExpr {
/// Return a reference to Any that can be used for downcasting
fn as_any(&self) -> &dyn Any {
self
}
fn data_type(&self, input_schema: &Schema) -> Result<DataType> {
self.when_then_expr[0].1.data_type(input_schema)
}
fn nullable(&self, input_schema: &Schema) -> Result<bool> {
// this expression is nullable if any of the input expressions are nullable
let then_nullable = self
.when_then_expr
.iter()
.map(|(_, t)| t.nullable(input_schema))
.collect::<Result<Vec<_>>>()?;
if then_nullable.contains(&true) {
Ok(true)
} else if let Some(e) = &self.else_expr {
e.nullable(input_schema)
} else {
Ok(false)
}
}
fn evaluate(&self, batch: &RecordBatch) -> Result<ColumnarValue> {
if self.expr.is_some() {
// this use case evaluates "expr" and then compares the values with the "when"
// values
self.case_when_with_expr(batch)
} else {
// The "when" conditions all evaluate to boolean in this use case and can be | }
}
/// Create a CASE expression
pub fn case(
expr: Option<Arc<dyn PhysicalExpr>>,
when_thens: &[(Arc<dyn PhysicalExpr>, Arc<dyn PhysicalExpr>)],
else_expr: Option<Arc<dyn PhysicalExpr>>,
) -> Result<Arc<dyn PhysicalExpr>> {
Ok(Arc::new(CaseExpr::try_new(expr, when_thens, else_expr)?))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{
error::Result,
logical_plan::Operator,
physical_plan::expressions::{binary, col, lit},
scalar::ScalarValue,
};
use arrow::array::StringArray;
use arrow::datatypes::*;
#[test]
fn case_with_expr() -> Result<()> {
let batch = case_test_batch()?;
let schema = batch.schema();
// CASE a WHEN 'foo' THEN 123 WHEN 'bar' THEN 456 END
let when1 = lit(ScalarValue::Utf8(Some("foo".to_string())));
let then1 = lit(ScalarValue::Int32(Some(123)));
let when2 = lit(ScalarValue::Utf8(Some("bar".to_string())));
let then2 = lit(ScalarValue::Int32(Some(456)));
let expr = case(
Some(col("a", &schema)?),
&[(when1, then1), (when2, then2)],
None,
)?;
let result = expr.evaluate(&batch)?.into_array(batch.num_rows());
let result = result
.as_any()
.downcast_ref::<Int32Array>()
.expect("failed to downcast to Int32Array");
let expected = &Int32Array::from(vec![Some(123), None, None, Some(456)]);
assert_eq!(expected, result);
Ok(())
}
#[test]
fn case_with_expr_else() -> Result<()> {
let batch = case_test_batch()?;
let schema = batch.schema();
// CASE a WHEN 'foo' THEN 123 WHEN 'bar' THEN 456 ELSE 999 END
let when1 = lit(ScalarValue::Utf8(Some("foo".to_string())));
let then1 = lit(ScalarValue::Int32(Some(123)));
let when2 = lit(ScalarValue::Utf8(Some("bar".to_string())));
let then2 = lit(ScalarValue::Int32(Some(456)));
let else_value = lit(ScalarValue::Int32(Some(999)));
let expr = case(
Some(col("a", &schema)?),
&[(when1, then1), (when2, then2)],
Some(else_value),
)?;
let result = expr.evaluate(&batch)?.into_array(batch.num_rows());
let result = result
.as_any()
.downcast_ref::<Int32Array>()
.expect("failed to downcast to Int32Array");
let expected =
&Int32Array::from(vec![Some(123), Some(999), Some(999), Some(456)]);
assert_eq!(expected, result);
Ok(())
}
#[test]
fn case_without_expr() -> Result<()> {
let batch = case_test_batch()?;
let schema = batch.schema();
// CASE WHEN a = 'foo' THEN 123 WHEN a = 'bar' THEN 456 END
let when1 = binary(
col("a", &schema)?,
Operator::Eq,
lit(ScalarValue::Utf8(Some("foo".to_string()))),
&batch.schema(),
)?;
let then1 = lit(ScalarValue::Int32(Some(123)));
let when2 = binary(
col("a", &schema)?,
Operator::Eq,
lit(ScalarValue::Utf8(Some("bar".to_string()))),
&batch.schema(),
)?;
let then2 = lit(ScalarValue::Int32(Some(456)));
let expr = case(None, &[(when1, then1), (when2, then2)], None)?;
let result = expr.evaluate(&batch)?.into_array(batch.num_rows());
let result = result
.as_any()
.downcast_ref::<Int32Array>()
.expect("failed to downcast to Int32Array");
let expected = &Int32Array::from(vec![Some(123), None, None, Some(456)]);
assert_eq!(expected, result);
Ok(())
}
#[test]
fn case_without_expr_else() -> Result<()> {
let batch = case_test_batch()?;
let schema = batch.schema();
// CASE WHEN a = 'foo' THEN 123 WHEN a = 'bar' THEN 456 ELSE 999 END
let when1 = binary(
col("a", &schema)?,
Operator::Eq,
lit(ScalarValue::Utf8(Some("foo".to_string()))),
&batch.schema(),
)?;
let then1 = lit(ScalarValue::Int32(Some(123)));
let when2 = binary(
col("a", &schema)?,
Operator::Eq,
lit(ScalarValue::Utf8(Some("bar".to_string()))),
&batch.schema(),
)?;
let then2 = lit(ScalarValue::Int32(Some(456)));
let else_value = lit(ScalarValue::Int32(Some(999)));
let expr = case(None, &[(when1, then1), (when2, then2)], Some(else_value))?;
let result = expr.evaluate(&batch)?.into_array(batch.num_rows());
let result = result
.as_any()
.downcast_ref::<Int32Array>()
.expect("failed to downcast to Int32Array");
let expected =
&Int32Array::from(vec![Some(123), Some(999), Some(999), Some(456)]);
assert_eq!(expected, result);
Ok(())
}
fn case_test_batch() -> Result<RecordBatch> {
let schema = Schema::new(vec![Field::new("a", DataType::Utf8, true)]);
let a = StringArray::from(vec![Some("foo"), Some("baz"), None, Some("bar")]);
let batch = RecordBatch::try_new(Arc::new(schema), vec![Arc::new(a)])?;
Ok(batch)
}
} | // arbitrary expressions
self.case_when_no_expr(batch)
} |
test_helper.rs | // Copyright (c) The Diem Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::{
account_resource::SimplifiedAccountResource,
command::{Command, CommandName},
keys::{load_key, EncodingType, KeyType},
validator_config::DecryptedValidatorConfig,
validator_set::DecryptedValidatorInfo,
validator_state::VerifyValidatorStateResult,
TransactionContext,
};
use diem_config::{config, config::Peer, network_id::NetworkId};
use diem_crypto::{ed25519::Ed25519PublicKey, traits::ValidCryptoMaterialStringExt, x25519};
use diem_management::{error::Error, secure_backend::DISK};
use diem_types::{
account_address::AccountAddress, chain_id::ChainId, network_address::NetworkAddress,
waypoint::Waypoint, PeerId,
};
use itertools::Itertools;
use std::{
collections::{HashMap, HashSet},
path::Path,
};
use structopt::StructOpt;
const TOOL_NAME: &str = "diem-operational-tool";
/// A helper to test the operational tool in tests
pub struct OperationalTool {
host: String,
chain_id: ChainId,
}
impl OperationalTool {
pub fn new(host: String, chain_id: ChainId) -> OperationalTool {
OperationalTool { host, chain_id }
}
pub fn test() -> OperationalTool {
OperationalTool {
host: "localhost".to_string(),
chain_id: ChainId::test(),
}
}
pub fn account_resource(
&self,
account_address: AccountAddress,
) -> Result<SimplifiedAccountResource, Error> {
let args = format!(
"
{command}
--json-server {json_server}
--account-address {account_address}
",
command = command(TOOL_NAME, CommandName::AccountResource),
json_server = self.host,
account_address = account_address,
);
let command = Command::from_iter(args.split_whitespace());
command.account_resource()
}
pub fn check_endpoint(
&self,
network_id: &NetworkId,
network_address: NetworkAddress,
) -> Result<String, Error> {
let args = format!(
"
{command}
--address {network_address}
--chain-id {chain_id}
--network-id {network_id}
",
command = command(TOOL_NAME, CommandName::CheckEndpoint),
chain_id = self.chain_id.id(),
network_address = network_address,
network_id = network_id
);
let command = Command::from_iter(args.split_whitespace());
command.check_endpoint()
}
pub fn check_endpoint_with_key(
&self,
network_id: &NetworkId,
network_address: NetworkAddress,
private_key: &x25519::PrivateKey,
) -> Result<String, Error> {
let args = format!(
"
{command}
--address {network_address}
--chain-id {chain_id}
--network-id {network_id}
--private-key {private_key}
",
command = command(TOOL_NAME, CommandName::CheckEndpoint),
chain_id = self.chain_id.id(),
network_address = network_address,
network_id = network_id,
private_key = private_key.to_encoded_string().unwrap(),
);
Command::from_iter(args.split_whitespace()).check_endpoint()
}
pub fn create_account(
&self,
name: &str,
path_to_key: &str,
backend: &config::SecureBackend,
disable_validate: bool,
command_name: CommandName,
execute: fn(Command) -> Result<(TransactionContext, AccountAddress), Error>,
) -> Result<(TransactionContext, AccountAddress), Error> {
let args = format!(
"
{command}
--name {name}
--path-to-key {path_to_key}
--json-server {host}
--chain-id {chain_id}
--validator-backend {backend_args}
{disable_validate}
",
command = command(TOOL_NAME, command_name),
name = name,
path_to_key = path_to_key,
host = self.host,
chain_id = self.chain_id.id(),
backend_args = backend_args(backend)?,
disable_validate = optional_flag("disable-validate", disable_validate),
);
let command = Command::from_iter(args.split_whitespace());
execute(command)
}
pub fn create_validator(
&self,
name: &str,
path_to_key: &str,
backend: &config::SecureBackend,
disable_validate: bool,
) -> Result<(TransactionContext, AccountAddress), Error> {
self.create_account(
name,
path_to_key,
backend,
disable_validate,
CommandName::CreateValidator,
|cmd| cmd.create_validator(),
)
}
pub fn create_validator_operator(
&self,
name: &str,
path_to_key: &str,
backend: &config::SecureBackend,
disable_validate: bool,
) -> Result<(TransactionContext, AccountAddress), Error> {
self.create_account(
name,
path_to_key,
backend,
disable_validate,
CommandName::CreateValidatorOperator,
|cmd| cmd.create_validator_operator(),
)
}
fn extract_key(
&self,
key_name: &str,
key_file: &str,
key_type: KeyType,
encoding: EncodingType,
backend: &config::SecureBackend,
command_name: CommandName,
execute: fn(Command) -> Result<(), Error>,
) -> Result<(), Error> {
let args = format!(
"
{command}
--key-name {key_name}
--key-file {key_file}
--key-type {key_type:?}
--encoding {encoding:?}
--validator-backend {backend_args}
",
command = command(TOOL_NAME, command_name),
key_name = key_name,
key_file = key_file,
key_type = key_type,
encoding = encoding,
backend_args = backend_args(backend)?,
);
let command = Command::from_iter(args.split_whitespace());
execute(command)
}
pub fn extract_public_key(
&self,
key_name: &str,
key_file: &str,
key_type: KeyType,
encoding: EncodingType,
backend: &config::SecureBackend,
) -> Result<(), Error> {
self.extract_key(
key_name,
key_file,
key_type,
encoding,
backend,
CommandName::ExtractPublicKey,
|cmd| cmd.extract_public_key(),
)
}
pub fn extract_private_key(
&self,
key_name: &str,
key_file: &str,
key_type: KeyType,
encoding: EncodingType,
backend: &config::SecureBackend,
) -> Result<(), Error> {
self.extract_key(
key_name,
key_file,
key_type,
encoding,
backend,
CommandName::ExtractPrivateKey,
|cmd| cmd.extract_private_key(),
)
}
pub fn extract_peer_from_file(
&self,
key_file: &Path,
encoding: EncodingType,
) -> Result<HashMap<PeerId, Peer>, Error> {
let args = format!(
"
{command}
--key-file {key_file}
--encoding {encoding:?}
",
command = command(TOOL_NAME, CommandName::ExtractPeerFromFile),
key_file = key_file.to_str().unwrap(),
encoding = encoding
);
let command = Command::from_iter(args.split_whitespace());
command.extract_peer_from_file()
}
pub fn extract_peer_from_storage(
&self,
key_name: &str,
backend: &config::SecureBackend,
) -> Result<HashMap<PeerId, Peer>, Error> {
let args = format!(
"
{command}
--key-name {key_name}
--validator-backend {backend_args}
",
command = command(TOOL_NAME, CommandName::ExtractPeerFromStorage),
key_name = key_name,
backend_args = backend_args(backend)?,
);
let command = Command::from_iter(args.split_whitespace());
command.extract_peer_from_storage()
}
pub fn extract_peers_from_keys(
&self,
keys: HashSet<x25519::PublicKey>,
output_file: &Path,
) -> Result<HashMap<PeerId, Peer>, Error> {
let args = format!(
"
{command}
--keys {keys}
--output-file {output_file}
",
command = command(TOOL_NAME, CommandName::ExtractPeersFromKeys),
keys = keys.iter().join(","),
output_file = output_file.to_str().unwrap(),
);
let command = Command::from_iter(args.split_whitespace());
command.extract_peers_from_keys()
}
pub fn generate_key(
&self,
key_type: KeyType,
key_file: &Path,
encoding: EncodingType,
) -> Result<x25519::PrivateKey, Error> {
let args = format!(
"
{command}
--key-type {key_type:?}
--key-file {key_file}
--encoding {encoding:?}
",
command = command(TOOL_NAME, CommandName::GenerateKey),
key_type = key_type,
key_file = key_file.to_str().unwrap(),
encoding = encoding,
);
let command = Command::from_iter(args.split_whitespace());
command.generate_key()?;
load_key(key_file.to_path_buf(), encoding)
}
pub fn insert_waypoint(
&self,
waypoint: Waypoint,
backend: &config::SecureBackend,
set_genesis: bool,
) -> Result<(), Error> {
let args = format!(
"
{command}
--waypoint {waypoint}
--validator-backend {backend_args}
{set_genesis}
",
command = command(TOOL_NAME, CommandName::InsertWaypoint),
waypoint = waypoint,
backend_args = backend_args(backend)?,
set_genesis = optional_flag("set-genesis", set_genesis),
);
let command = Command::from_iter(args.split_whitespace());
command.insert_waypoint()
}
pub fn print_account(
&self,
account_name: &str,
backend: &config::SecureBackend,
) -> Result<AccountAddress, Error> {
let args = format!(
"
{command}
--account-name {account_name}
--validator-backend {backend_args}
",
command = command(TOOL_NAME, CommandName::PrintAccount),
account_name = account_name,
backend_args = backend_args(backend)?,
);
let command = Command::from_iter(args.split_whitespace());
command.print_account()
}
pub fn print_key(
&self,
key_name: &str,
backend: &config::SecureBackend,
) -> Result<Ed25519PublicKey, Error> {
let args = format!(
"
{command}
--key-name {key_name}
--validator-backend {backend_args}
",
command = command(TOOL_NAME, CommandName::PrintKey),
key_name = key_name,
backend_args = backend_args(backend)?,
);
let command = Command::from_iter(args.split_whitespace());
command.print_key()
}
pub fn print_waypoint(
&self,
waypoint_name: &str,
backend: &config::SecureBackend,
) -> Result<Waypoint, Error> {
let args = format!(
"
{command}
--waypoint-name {waypoint_name}
--validator-backend {backend_args}
",
command = command(TOOL_NAME, CommandName::PrintWaypoint),
waypoint_name = waypoint_name,
backend_args = backend_args(backend)?,
);
let command = Command::from_iter(args.split_whitespace());
command.print_waypoint()
}
pub fn | (
&self,
validator_address: Option<NetworkAddress>,
fullnode_address: Option<NetworkAddress>,
backend: &config::SecureBackend,
disable_validate: bool,
disable_address_validation: bool,
) -> Result<TransactionContext, Error> {
let args = format!(
"
{command}
{fullnode_address}
{validator_address}
--chain-id {chain_id}
--json-server {host}
--validator-backend {backend_args}
{disable_validate}
{disable_address_validation}
",
command = command(TOOL_NAME, CommandName::SetValidatorConfig),
host = self.host,
chain_id = self.chain_id.id(),
fullnode_address = optional_arg("fullnode-address", fullnode_address),
validator_address = optional_arg("validator-address", validator_address),
backend_args = backend_args(backend)?,
disable_validate = optional_flag("disable-validate", disable_validate),
disable_address_validation =
optional_flag("disable-address-validation", disable_address_validation),
);
let command = Command::from_iter(args.split_whitespace());
command.set_validator_config()
}
fn rotate_key<T>(
&self,
backend: &config::SecureBackend,
disable_validate: bool,
name: CommandName,
execute: fn(Command) -> Result<T, Error>,
) -> Result<T, Error> {
let args = format!(
"
{command}
--chain-id {chain_id}
--json-server {host}
--validator-backend {backend_args}
{disable_validate}
",
command = command(TOOL_NAME, name),
host = self.host,
chain_id = self.chain_id.id(),
backend_args = backend_args(backend)?,
disable_validate = optional_flag("disable-validate", disable_validate),
);
let command = Command::from_iter(args.split_whitespace());
execute(command)
}
pub fn rotate_consensus_key(
&self,
backend: &config::SecureBackend,
disable_validate: bool,
) -> Result<(TransactionContext, Ed25519PublicKey), Error> {
self.rotate_key(
backend,
disable_validate,
CommandName::RotateConsensusKey,
|cmd| cmd.rotate_consensus_key(),
)
}
pub fn rotate_operator_key(
&self,
backend: &config::SecureBackend,
disable_validate: bool,
) -> Result<(TransactionContext, Ed25519PublicKey), Error> {
self.rotate_key(
backend,
disable_validate,
CommandName::RotateOperatorKey,
|cmd| cmd.rotate_operator_key(),
)
}
pub fn rotate_operator_key_with_custom_validation(
&self,
backend: &config::SecureBackend,
disable_validate: bool,
sleep_interval: Option<u64>,
validate_timeout: Option<u64>,
) -> Result<(TransactionContext, Ed25519PublicKey), Error> {
let args = format!(
"
{command}
--chain-id {chain_id}
--json-server {host}
--validator-backend {backend_args}
{disable_validate}
{sleep_interval}
{validate_timeout}
",
command = command(TOOL_NAME, CommandName::RotateOperatorKey),
host = self.host,
chain_id = self.chain_id.id(),
backend_args = backend_args(backend)?,
disable_validate = optional_flag("disable-validate", disable_validate),
sleep_interval = optional_arg("sleep-interval", sleep_interval),
validate_timeout = optional_arg("validate-timeout", validate_timeout),
);
let command = Command::from_iter(args.split_whitespace());
command.rotate_operator_key()
}
pub fn rotate_validator_network_key(
&self,
backend: &config::SecureBackend,
disable_validate: bool,
) -> Result<(TransactionContext, x25519::PublicKey), Error> {
self.rotate_key(
backend,
disable_validate,
CommandName::RotateValidatorNetworkKey,
|cmd| cmd.rotate_validator_network_key(),
)
}
pub fn rotate_fullnode_network_key(
&self,
backend: &config::SecureBackend,
disable_validate: bool,
) -> Result<(TransactionContext, x25519::PublicKey), Error> {
self.rotate_key(
backend,
disable_validate,
CommandName::RotateFullNodeNetworkKey,
|cmd| cmd.rotate_fullnode_network_key(),
)
}
pub fn validate_transaction(
&self,
account_address: AccountAddress,
sequence_number: u64,
) -> Result<TransactionContext, Error> {
let args = format!(
"
{command}
--json-server {host}
--account-address {account_address}
--sequence-number {sequence_number}
",
command = command(TOOL_NAME, CommandName::ValidateTransaction),
host = self.host,
account_address = account_address,
sequence_number = sequence_number,
);
let command = Command::from_iter(args.split_whitespace());
command.validate_transaction()
}
pub fn set_validator_operator(
&self,
name: &str,
account_address: AccountAddress,
backend: &config::SecureBackend,
disable_validate: bool,
) -> Result<TransactionContext, Error> {
let args = format!(
"
{command}
--json-server {json_server}
--chain-id {chain_id}
--name {name}
--account-address {account_address}
--validator-backend {backend_args}
{disable_validate}
",
command = command(TOOL_NAME, CommandName::SetValidatorOperator),
json_server = self.host,
name = name,
chain_id = self.chain_id.id(),
account_address = account_address,
backend_args = backend_args(backend)?,
disable_validate = optional_flag("disable-validate", disable_validate),
);
let command = Command::from_iter(args.split_whitespace());
command.set_validator_operator()
}
pub fn validator_config(
&self,
account_address: AccountAddress,
backend: Option<&config::SecureBackend>,
) -> Result<DecryptedValidatorConfig, Error> {
let validator_backend = if let Some(backend) = backend {
Some(backend_args(backend)?)
} else {
None
};
let args = format!(
"
{command}
--json-server {json_server}
--account-address {account_address}
{validator_backend}
",
command = command(TOOL_NAME, CommandName::ValidatorConfig),
json_server = self.host,
account_address = account_address,
validator_backend = optional_arg("validator-backend", validator_backend),
);
let command = Command::from_iter(args.split_whitespace());
command.validator_config()
}
pub fn validator_set(
&self,
account_address: Option<AccountAddress>,
backend: Option<&config::SecureBackend>,
) -> Result<Vec<DecryptedValidatorInfo>, Error> {
let validator_backend = if let Some(backend) = backend {
Some(backend_args(backend)?)
} else {
None
};
let args = format!(
"
{command}
{account_address}
--json-server {json_server}
{validator_backend}
",
command = command(TOOL_NAME, CommandName::ValidatorSet),
json_server = self.host,
account_address = optional_arg("account-address", account_address),
validator_backend = optional_arg("validator-backend", validator_backend),
);
let command = Command::from_iter(args.split_whitespace());
command.validator_set()
}
fn validator_operation<T>(
&self,
account_address: AccountAddress,
backend: &config::SecureBackend,
disable_validate: bool,
name: CommandName,
execute: fn(Command) -> Result<T, Error>,
) -> Result<T, Error> {
let args = format!(
"
{command}
--json-server {host}
--chain-id {chain_id}
--account-address {account_address}
--validator-backend {backend_args}
{disable_validate}
",
command = command(TOOL_NAME, name),
host = self.host,
chain_id = self.chain_id.id(),
account_address = account_address,
backend_args = backend_args(backend)?,
disable_validate = optional_flag("disable-validate", disable_validate),
);
let command = Command::from_iter(args.split_whitespace());
execute(command)
}
pub fn add_validator(
&self,
account_address: AccountAddress,
backend: &config::SecureBackend,
disable_validate: bool,
) -> Result<TransactionContext, Error> {
self.validator_operation(
account_address,
backend,
disable_validate,
CommandName::AddValidator,
|cmd| cmd.add_validator(),
)
}
pub fn remove_validator(
&self,
account_address: AccountAddress,
backend: &config::SecureBackend,
disable_validate: bool,
) -> Result<TransactionContext, Error> {
self.validator_operation(
account_address,
backend,
disable_validate,
CommandName::RemoveValidator,
|cmd| cmd.remove_validator(),
)
}
pub fn verify_validator_state(
&self,
backend: &config::SecureBackend,
) -> Result<VerifyValidatorStateResult, Error> {
let args = format!(
"
{command}
--json-server {host}
--validator-backend {backend_args}
",
command = command(TOOL_NAME, CommandName::VerifyValidatorState),
host = self.host,
backend_args = backend_args(backend)?,
);
let command = Command::from_iter(args.split_whitespace());
command.verify_validator_state()
}
}
fn command(tool_name: &'static str, command: CommandName) -> String {
format!("{tool} {command}", tool = tool_name, command = command)
}
/// Allow arguments to be optional
fn optional_arg<T: std::fmt::Display>(name: &'static str, maybe_value: Option<T>) -> String {
if let Some(value) = maybe_value {
format!("--{name} {value}", name = name, value = value)
} else {
String::new()
}
}
/// Allow flags to be optional
fn optional_flag(flag: &'static str, enable_flag: bool) -> String {
if enable_flag {
format!("--{flag}", flag = flag)
} else {
String::new()
}
}
/// Extract on disk storage args
/// TODO: Support other types of storage
fn backend_args(backend: &config::SecureBackend) -> Result<String, Error> {
match backend {
config::SecureBackend::OnDiskStorage(config) => {
let mut s = format!(
"backend={backend};\
path={path}",
backend = DISK,
path = config.path.to_str().unwrap(),
);
if let Some(namespace) = config.namespace.as_ref() {
s.push_str(&format!(";namespace={}", namespace));
}
Ok(s)
}
_ => Err(Error::UnexpectedError("Storage isn't on disk".to_string())),
}
}
| set_validator_config |
util.go | /*
Copyright 2017 Luke Granger-Brown
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package hostapd implements communicating with hostapd over its control socket.
package hostapd
import (
"io/ioutil"
"os"
)
const (
HostapdSocketDirectory = "/var/run/hostapd"
)
// ListTransports lists the sockets available in the hostapd directory.
func ListTransports(dir string) ([]string, error) {
if dir == "" {
dir = HostapdSocketDirectory | dents, err := ioutil.ReadDir(dir)
if err != nil {
return nil, err
}
var socks []string
for _, dent := range dents {
if dent.Mode()&os.ModeSocket != os.ModeSocket {
continue
}
socks = append(socks, dent.Name())
}
return socks, nil
} | }
|
conf.py | # -*- coding: utf-8 -*-
#
# AutoFolio documentation build configuration file, created by
# sphinx-quickstart on Mon Sep 14 12:36:21 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import datetime
import sys
import os
import shlex
import sphinx_bootstrap_theme
sys.path.insert(0, '..')
import autofolio
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'AutoFolio'
copyright = '2015-%s, %s' % (datetime.datetime.now().year, autofolio.AUTHORS)
author = autofolio.AUTHORS
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = autofolio.VERSION
# The full version, including alpha/beta/rc tags.
release = autofolio.VERSION
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_static']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'bootstrap'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
# Navigation bar title. (Default: ``project`` value)
'navbar_title': "AutoFolio",
# Tab name for entire site. (Default: "Site")
# 'navbar_site_name': "Site",
# A list of tuples containting pages to link to. The value should
# be in the form [(name, page), ..]
'navbar_links': [
('Start', 'index'),
('Installation', 'installation'),
('Manual', 'manual'),
('Contact', 'contact'),
('License', 'license'),
],
# Render the next and previous page links in navbar. (Default: true)
'navbar_sidebarrel': False,
# Render the current pages TOC in the navbar. (Default: true)
'navbar_pagenav': False,
# Tab name for the current pages TOC. (Default: "Page")
'navbar_pagenav_name': "On this page",
# Global TOC depth for "site" navbar tab. (Default: 1)
# Switching to -1 shows all levels.
'globaltoc_depth': 1,
# Include hidden TOCs in Site navbar?
#
# Note: If this is "false", you cannot have mixed ``:hidden:`` and
# non-hidden ``toctree`` directives in the same page, or else the build
# will break.
#
# Values: "true" (default) or "false"
'globaltoc_includehidden': "false",
# HTML navbar class (Default: "navbar") to attach to <div> element.
# For black navbar, do "navbar navbar-inverse"
'navbar_class': "navbar",
# Fix navigation bar to top of page?
# Values: "true" (default) or "false"
'navbar_fixed_top': "true",
# Location of link to source.
# Options are "nav" (default), "footer" or anything else to exclude.
'source_link_position': "footer",
# Bootswatch (http://bootswatch.com/) theme.
#
# Options are nothing with "" (default) or the name of a valid theme
# such as "amelia" or "cosmo".
'bootswatch_theme': "cosmo",
# Choose Bootstrap version. | 'bootstrap_version': "3",
}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = sphinx_bootstrap_theme.get_html_theme_path()
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {'**': ['localtoc.html']}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'AutoFoliodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'AutoFolio.tex', u'AutoFolio Documentation', autofolio.AUTHORS, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'autofolio', u'AutoFolio Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'AutoFolio', u'AutoFolio Documentation',
author, 'AutoFolio', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False | # Values: "3" (default) or "2" (in quotes) |
index.d.ts | // Type definitions for Highland 2.12.0
// Project: http://highlandjs.org/
// Definitions by: Bart van der Schoor <https://github.com/Bartvds>
// Hugo Wood <https://github.com/hgwood>
// William Yu <https://github.com/iwllyu>
// Alvis HT Tang <https://github.com/alvis>
// Jack Wearden <https://github.com/notbobthebuilder>
// Definitions: https://github.com/DefinitelyTyped/DefinitelyTyped
// TypeScript Version: 2.8
/// <reference types="node" />
// TODO export the top-level functions
// TODO figure out curry arguments
// TODO use externalised Readable/Writable (not node's)
// Returns the type of a flattened stream.
// Uses trick described in https://github.com/microsoft/TypeScript/pull/33050#issuecomment-552218239
// with string keys to support TS 2.8
type Flattened<R> = {
value: R,
stream: R extends Highland.Stream<infer U> ? Flattened<U> : never,
array: R extends Array<infer U> ? Flattened<U> : never;
}[R extends Array<any> ? 'array' : R extends Highland.Stream<any> ? 'stream' : 'value'];
/**
* Highland: the high-level streams library
*
* Highland may be freely distributed under the Apache 2.0 license.
* https://github.com/caolan/highland
* Copyright (c) Caolan McMahon
*
*/
interface HighlandStatic {
/**
* The Stream constructor, accepts an array of values or a generator function
* as an optional argument. This is typically the entry point to the Highland
* APIs, providing a convenient way of chaining calls together.
*
* **Arrays -** Streams created from Arrays will emit each value of the Array
* and then emit a [nil](#nil) value to signal the end of the Stream.
*
* **Generators -** These are functions which provide values for the Stream.
* They are lazy and can be infinite, they can also be asynchronous (for
* example, making a HTTP request). You emit values on the Stream by calling
* `push(err, val)`, much like a standard Node.js callback. Once it has been
* called, the generator function will not be called again unless you call
* `next()`. This call to `next()` will signal you've finished processing the
* current data and allow for the generator function to be called again. If the
* Stream is still being consumed the generator function will then be called
* again.
*
* You can also redirect a generator Stream by passing a new source Stream
* to read from to next. For example: `next(other_stream)` - then any subsequent
* calls will be made to the new source.
*
* **Node Readable Stream -** Pass in a Node Readable Stream object to wrap
* it with the Highland API. Reading from the resulting Highland Stream will
* begin piping the data from the Node Stream to the Highland Stream.
*
* A stream constructed in this way relies on `Readable#pipe` to end the
* Highland Stream once there is no more data. Not all Readable Streams do
* this. For example, `IncomingMessage` will only emit `close` when the client
* aborts communications and will *not* properly call `end`. In this case, you
* can provide an optional `onFinished` function with the signature
* `onFinished(readable, callback)` as the second argument.
*
* This function will be passed the Readable and a callback that should called
* when the Readable ends. If the Readable ended from an error, the error
* should be passed as the first argument to the callback. `onFinished` should
* bind to whatever listener is necessary to detect the Readable's completion.
* If the callback is called multiple times, only the first invocation counts.
* If the callback is called *after* the Readable has already ended (e.g., the
* `pipe` method already called `end`), it will be ignored.
*
* The `onFinished` function may optionally return one of the following:
*
* - A cleanup function that will be called when the stream ends. It should
* unbind any listeners that were added.
* - An object with the following optional properties:
* - `onDestroy` - the cleanup function.
* - `continueOnError` - Whether or not to continue the stream when an
* error is passed to the callback. Set this to `true` if the Readable
* may continue to emit values after errors. Default: `false`.
*
* See [this issue](https://github.com/caolan/highland/issues/490) for a
* discussion on why Highland cannot reliably detect stream completion for
* all implementations and why the `onFinished` function is required.
*
* **EventEmitter / jQuery Elements -** Pass in both an event name and an
* event emitter as the two arguments to the constructor and the first
* argument emitted to the event handler will be written to the new Stream.
*
* You can pass a mapping hint as the third argument, which specifies how
* event arguments are pushed into the stream. If no mapping hint is provided,
* only the first value emitted with the event to the will be pushed onto the
* Stream.
*
* If `mappingHint` is a number, an array of that length will be pushed onto
* the stream, containing exactly that many parameters from the event. If it's
* an array, it's used as keys to map the arguments into an object which is
* pushed to the tream. If it is a function, it's called with the event
* arguments, and the returned value is pushed.
*
* **Promise -** Accepts an ES6 / jQuery style promise and returns a
* Highland Stream which will emit a single value (or an error). In case you use
* [bluebird cancellation](http://bluebirdjs.com/docs/api/cancellation.html) Highland Stream will be empty for a cancelled promise.
*
* **Iterator -** Accepts an ES6 style iterator that implements the [iterator protocol](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Iteration_protocols#The_.22iterator.22_protocol):
* yields all the values from the iterator using its `next()` method and terminates when the
* iterator's done value returns true. If the iterator's `next()` method throws, the exception will be emitted as an error,
* and the stream will be ended with no further calls to `next()`.
*
* **Iterable -** Accepts an object that implements the [iterable protocol](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Iteration_protocols#The_.22iterable.22_protocol),
* i.e., contains a method that returns an object that conforms to the iterator protocol. The stream will use the
* iterator defined in the `Symbol.iterator` property of the iterable object to generate emitted values.
*
* @id _(source)
* @section Stream Objects
* @name _(source)
* @param {Array | Function | Iterator | Iterable | Promise | Readable Stream | String} source - (optional) source to take values from from
* @param {Function} onFinished - (optional) a function that detects when the readable completes. Second argument. Only valid if `source` is a Readable.
* @param {EventEmitter | jQuery Element} eventEmitter - (optional) An event emitter. Second argument. Only valid if `source` is a String.
* @param {Array | Function | Number} mappingHint - (optional) how to pass the
* arguments to the callback. Only valid if `source` is a String.
* @api public
*/
<R>(): Highland.Stream<R>;
<R>(source: R[]): Highland.Stream<R>;
<R>(source: (push: (err: Error | null, x?: R | Highland.Nil) => void, next: () => void) => void): Highland.Stream<R>;
<R>(source: Highland.Stream<R>): Highland.Stream<R>;
<R>(source: NodeJS.ReadableStream, onFinished?: Highland.OnFinished): Highland.Stream<R>;
<R>(source: string, eventEmitter: NodeJS.EventEmitter, mappingHint?: Highland.MappingHint): Highland.Stream<R>;
// moar (promise for everything?)
<R>(source: PromiseLike<Highland.Stream<R>>): Highland.Stream<R>;
<R>(source: PromiseLike<R>): Highland.Stream<R>;
<R>(source: Iterable<R>): Highland.Stream<R>;
<R>(source: Iterator<R>): Highland.Stream<R>;
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// UTILS
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
/**
* Returns true if `x` is the end of stream marker.
*
* @id isNil
* @section Streams
* @name _.isNil(x)
* @param x - the object to test
* @api public
*/
isNil<R>(x: R | Highland.Nil): x is Highland.Nil;
/**
* Returns true if `x` is a Highland Stream.
*
* @id isStream
* @section Streams
* @name _.isStream(x)
* @param x - the object to test
* @api public
*/
isStream(x: any): x is Highland.Stream<any>;
isStreamError(x: any): x is Highland.Stream<any>;
isStreamRedirect(x: any): x is Highland.Stream<any>;
/**
* Logs values to the console, a simple wrapper around `console.log` that
* it suitable for passing to other functions by reference without having to
* call `bind`.
*
* @id log
* @section Utils
* @name _.log(args..)
* @api public
*/
log(x: any, ...args: any[]): void;
/**
* The end of stream marker. This is sent along the data channel of a Stream
* to tell consumers that the Stream has ended. See the following map code for
* an example of detecting the end of a Stream:
*
* @id nil
* @section Streams
* @name _.nil
* @api public
*/
nil: Highland.Nil;
/**
* Wraps a node-style async function which accepts a callback, transforming
* it to a function which accepts the same arguments minus the callback and
* returns a Highland Stream instead. The wrapped function keeps its context,
* so you can safely use it as a method without binding (see the second
* example below).
*
* wrapCallback also accepts an optional mappingHint, which specifies how
* callback arguments are pushed to the stream. This can be used to handle
* non-standard callback protocols that pass back more than one value.
*
* mappingHint can be a function, number, or array. See the documentation on
* EventEmitter Stream Objects for details on the mapping hint. If
* mappingHint is a function, it will be called with all but the first
* argument that is passed to the callback. The first is still assumed to be
* the error argument.
*
* @id wrapCallback
* @section Utils
* @name _.wrapCallback(f)
* @param {Function} f - the node-style function to wrap
* @param {Array | Function | Number} [mappingHint] - how to pass the arguments to the callback
* @api public
*/
wrapCallback(f: Function, mappingHint?: Highland.MappingHint): (...args: any[]) => Highland.Stream<any>;
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// OBJECTS
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
/**
* Extends one object with the properties of another. **Note:** The
* arguments are in the reverse order of other libraries such as
* underscore. This is so it follows the convention of other functions in
* this library and so you can more meaningfully partially apply it.
*
* @id extend
* @section Objects
* @name _.extend(a, b)
* @param {Object} a - the properties to extend b with
* @param {Object} b - the original object to extend
* @api public
*/
extend(extensions: Object, target: Object): Object;
extend(target: Object): (extensions: Object) => Object;
/**
* Returns a property from an object.
*
* @id get
* @section Objects
* @name _.get(prop, obj)
* @param {String} prop - the property to return
* @param {Object} obj - the object to read properties from
* @api public
*/
get(prop: string, obj: Object): string;
get(prop: string): (obj: Object) => Object;
/**
* Returns keys from an Object as a Stream.
*
* @id keys
* @section Objects
* @name _.keys(obj)
* @param {Object} obj - the object to return keys from
* @api public
*/
keys(obj: Object): Highland.Stream<string>;
/**
* Returns key/value pairs for an Object as a Stream. Reads properties
* lazily, so if you don't read from all keys on an object, not
* all properties will be read from (may have an effect where getters
* are used).
*
* @id pairs
* @section Objects
* @name _.pairs(obj)
* @param {Object} obj - the object to return key/value pairs from
* @api public
*/
pairs(obj: Object): Highland.Stream<any[]>;
pairs(obj: any[]): Highland.Stream<any[]>;
/**
* Updates a property on an object, returning the updated object.
*
* @id set
* @section Objects
* @name _.set(prop, value, obj)
* @param {String} prop - the property to return
* @param value - the value to set the property to
* @param {Object} obj - the object to set properties on
* @api public
*/
set(prop: string, val: any, obj: Object): Object;
set(prop: string, val: any): (obj: Object) => Object;
/**
* Returns values from an Object as a Stream. Reads properties
* lazily, so if you don't read from all keys on an object, not
* all properties will be read from (may have an effect where getters
* are used).
*
* @id values
* @section Objects
* @name _.values(obj)
* @param {Object} obj - the object to return values from
* @api public
*/
values(obj: Object): Highland.Stream<any>;
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// FUNCTIONS
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
/**
* Creates a composite function, which is the application of function1 to
* the results of function2. You can pass an arbitrary number of arguments
* and have them composed. This means you can't partially apply the compose
* function itself.
*
* @id compose
* @name compose(fn1, fn2, ...)
* @section Functions
* @api public
*/
compose(...functions: Function[]): Function;
/**
* Transforms a function with specific arity (all arguments must be
* defined) in a way that it can be called as a chain of functions until
* the arguments list is saturated.
*
* This function is not itself curryable.
*
* @id curry
* @name curry(fn, [*arguments])
* @section Functions
* @param {Function} fn - the function to curry
* @param args.. - any number of arguments to pre-apply to the function
* @returns Function
* @api public
*/
curry(fn: Function, ...args: any[]): Function;
/**
* Evaluates the function `fn` with the argument positions swapped. Only
* works with functions that accept two arguments.
*
* @id flip
* @name flip(fn, [x, y])
* @section Functions
* @param {Function} f - function to flip argument application for
* @param x - parameter to apply to the right hand side of f
* @param y - parameter to apply to the left hand side of f
* @api public
*/
flip(fn: Function, ...args: any[]): Function;
/**
* Same as `curry` but with a specific number of arguments. This can be
* useful when functions do not explicitly define all its parameters.
*
* This function is not itself curryable.
*
* @id ncurry
* @name ncurry(n, fn, [args...])
* @section Functions
* @param {Number} n - the number of arguments to wait for before apply fn
* @param {Function} fn - the function to curry
* @param args... - any number of arguments to pre-apply to the function
* @returns Function
* @api public
*/
ncurry(n: number, fn: Function, ...args: any[]): Function;
/**
* Partially applies the function (regardless of whether it has had curry
* called on it). This will always postpone execution until at least the next
* call of the partially applied function.
*
* @id partial
* @name partial(fn, args...)
* @section Functions
* @param {Function} fn - function to partial apply
* @param args... - the arguments to apply to the function
* @api public
*/
partial(fn: Function, ...args: any[]): Function;
/**
* The reversed version of compose. Where arguments are in the order of
* application.
*
* @id seq
* @name seq(fn1, fn2, ...)
* @section Functions
* @api public
*/
seq(...functions: Function[]): Function;
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// OPERATORS
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
/**
* Add two values. Can be partially applied.
*
* @id add
* @section Operators
* @name _.add(a, b)
* @api public
*/
add(a: number, b: number): number;
add(a: number): (b: number) => number;
/**
* Perform logical negation on a value. If `x` is truthy then returns false,
* otherwise returns true.
*
* @id not
* @section Operators
* @name _.not(x)
* @param x - the value to negate
* @api public
*
* _.not(true) // => false
* _.not(false) // => true
*/
not<R>(x: any): boolean;
}
declare namespace Highland {
// hacky unique
// TODO do we need this?
interface Nil {
Highland_NIL: Nil;
}
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
/**
* Used as an Error marker when writing to a Stream's incoming buffer
*/
// TODO is this public?
class | {
constructor(err: Error);
error: Error;
}
/**
* Used as a Redirect marker when writing to a Stream's incoming buffer
*/
// TODO is this public?
class StreamRedirect<R> {
constructor(to: Stream<R>)
to: Stream<R>;
}
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
/**
* Actual Stream constructor wrapped the the main exported function
*/
interface Stream<R> extends NodeJS.EventEmitter {
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// STREAM OBJECTS
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
/**
* Destroys a stream by unlinking it from any consumers and sources. This will
* stop all consumers from receiving events from this stream and removes this
* stream as a consumer of any source stream.
*
* This function calls end() on the stream and unlinks it from any piped-to streams.
*
* @id pipe
* @section Streams
* @name Stream.destroy()
* @api public
*/
destroy(): void;
/**
* Ends a Stream. This is the same as sending a [nil](#nil) value as data.
* You shouldn't need to call this directly, rather it will be called by
* any [Node Readable Streams](http://nodejs.org/api/stream.html#stream_class_stream_readable)
* you pipe in.
*
* @id end
* @section Streams
* @name Stream.end()
* @api public
*/
end(): void;
/**
* Pauses the stream. All Highland Streams start in the paused state.
*
* @id pause
* @section Streams
* @name Stream.pause()
* @api public
*/
pause(): void;
/**
* Resumes a paused Stream. This will either read from the Stream's incoming
* buffer or request more data from an upstream source.
*
* @id resume
* @section Streams
* @name Stream.resume()
* @api public
*/
resume(): void;
/**
* Writes a value to the Stream. If the Stream is paused it will go into the
* Stream's incoming buffer, otherwise it will be immediately processed and
* sent to the Stream's consumers (if any). Returns false if the Stream is
* paused, true otherwise. This lets Node's pipe method handle back-pressure.
*
* You shouldn't need to call this yourself, but it may be called by Node
* functions which treat Highland Streams as a [Node Writable Stream](http://nodejs.org/api/stream.html#stream_class_stream_writable).
*
* @id write
* @section Streams
* @name Stream.write(x)
* @param x - the value to write to the Stream
* @api public
*/
write(x: R): boolean;
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// TRANSFORMS
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
/**
* Adds a value to the end of a Stream.
*
* @id append
* @section Streams
* @name Stream.append(y)
* @param y - the value to append to the Stream
* @api public
*/
append(y: R): Stream<R>;
/**
* Takes one Stream and batches incoming data into arrays of given length
*
* @id batch
* @section Transforms
* @name Stream.batch(n)
* @param {Number} n - length of the array to batch
* @api public
*
* _([1, 2, 3, 4, 5]).batch(2) // => [1, 2], [3, 4], [5]
*/
batch(n: number): Stream<R[]>;
/**
* Takes one Stream and batches incoming data within a maximum time frame
* into arrays of a maximum length.
*
* @id batchWithTimeOrCount
* @section Transforms
* @name Stream.batchWithTimeOrCount(ms, n)
* @param {Number} ms - the maximum milliseconds to buffer a batch
* @param {Number} n - the maximum length of the array to batch
* @api public
*
* _(function (push) {
* push(1);
* push(2);
* push(3);
* setTimeout(push, 20, 4);
* }).batchWithTimeOrCount(10, 2)
*
* // => [1, 2], [3], [4]
*/
batchWithTimeOrCount(ms: number, n: number): Stream<R[]>;
/**
* Groups all values into an Array and passes down the stream as a single
* data event. This is a bit like doing [toArray](#toArray), but instead
* of accepting a callback and causing a *thunk*, it passes the value on.
*
* @id collect
* @section Streams
* @name Stream.collect()
* @api public
*/
collect(): Stream<R[]>;
/**
* Filters a Stream to drop all non-truthy values.
*
* @id compact
* @section Streams
* @name Stream.compact()
* @api public
*/
compact(): Stream<R>;
/**
* Consumes values from a Stream (once resumed) and returns a new Stream for
* you to optionally push values onto using the provided push / next functions.
*
* This function forms the basis of many higher-level Stream operations.
* It will not cause a paused stream to immediately resume, but behaves more
* like a 'through' stream, handling values as they are read.
*
* @id consume
* @section Streams
* @name Stream.consume(f)
* @param {Function} f - the function to handle errors and values
* @api public
*/
consume<U>(f: (err: Error, x: R | Highland.Nil, push: (err: Error | null, value?: U | Highland.Nil) => void, next: () => void) => void): Stream<U>;
/**
* Holds off pushing data events downstream until there has been no more
* data for `ms` milliseconds. Sends the last value that occurred before
* the delay, discarding all other values.
*
* @id debounce
* @section Streams
* @name Stream.debounce(ms)
* @param {Number} ms - the milliseconds to wait before sending data
* @api public
*/
debounce(ms: number): Stream<R>;
/**
* Creates a new Stream which applies a function to each value from the source
* and re-emits the source value. Useful when you want to mutate the value or
* perform side effects
*
* @id doto
* @section Transforms
* @name Stream.doto(f)
* @param {Function} f - the function to apply
* @api public
*
* var appended = _([[1], [2], [3], [4]]).doto(function (x) {
* x.push(1);
* });
*
* _([1, 2, 3]).doto(console.log)
* // 1
* // 2
* // 3
* // => 1, 2, 3
*/
doto(f: (x: R) => void): Stream<R>;
/**
* Acts as the inverse of [`take(n)`](#take) - instead of returning the first `n` values, it ignores the
* first `n` values and then emits the rest. `n` must be of type `Number`, if not the whole stream will
* be returned. All errors (even ones emitted before the nth value) will be emitted.
*
* @id drop
* @section Transforms
* @name Stream.drop(n)
* @param {Number} n - integer representing number of values to read from source
* @api public
*
* _([1, 2, 3, 4]).drop(2) // => 3, 4
*/
drop(n: number): Stream<R>;
/**
* Extracts errors from a Stream and applies them to an error handler
* function. Returns a new Stream with the errors removed (unless the error
* handler chooses to rethrow them using `push`). Errors can also be
* transformed and put back onto the Stream as values.
*
* @id errors
* @section Streams
* @name Stream.errors(f)
* @param {Function} f - the function to pass all errors to
* @api public
*/
errors(f: (err: Error, push: (err: Error | null, x?: R) => void) => void): Stream<R>;
/**
* Creates a new Stream including only the values which pass a truth test.
*
* @id filter
* @section Streams
* @name Stream.filter(f)
* @param f - the truth test function
* @api public
*/
filter(f: (x: R) => boolean): Stream<R>;
/**
* A convenient form of filter, which returns the first object from a
* Stream that passes the provided truth test
*
* @id find
* @section Streams
* @name Stream.find(f)
* @param {Function} f - the truth test function which returns a Stream
* @api public
*/
find(f: (x: R) => boolean): Stream<R>;
/**
* A convenient form of [where](#where), which returns the first object from a
* Stream that matches a set of property values. findWhere is to [where](#where) as [find](#find) is to [filter](#filter).
*
* @id findWhere
* @section Transforms
* @name Stream.findWhere(props)
* @param {Object} props - the properties to match against
* @api public
*
* var docs = [
* {type: 'blogpost', title: 'foo'},
* {type: 'blogpost', title: 'bar'},
* {type: 'comment', title: 'foo'}
* ];
*
* _(docs).findWhere({type: 'blogpost'})
* // => {type: 'blogpost', title: 'foo'}
*
* // example with partial application
* var firstBlogpost = _.findWhere({type: 'blogpost'});
*
* firstBlogpost(docs)
* // => {type: 'blogpost', title: 'foo'}
*/
findWhere(props: Partial<R>): Stream<R>;
/**
* A convenient form of reduce, which groups items based on a function or property name
*
* @id group
* @section Streams
* @name Stream.group(f)
* @param {Function|String} f - the function or property name on which to group,
* toString() is called on the result of a function.
* @api public
*/
// TODO verify this
group(f: (x: R) => string): Stream<{[prop:string]:R[]}>;
group(prop: string): Stream<{[prop:string]:R[]}>;
/**
* Creates a new Stream with only the first value from the source.
*
* @id head
* @section Streams
* @name Stream.head()
* @api public
*
* _([1, 2, 3, 4]).head() // => 1
*/
head(): Stream<R>;
/**
* Creates a new Stream with the separator interspersed between the elements of the source.
*
* `intersperse` is effectively the inverse of [splitBy](#splitBy).
*
* @id intersperse
* @section Transforms
* @name Stream.intersperse(sep)
* @param {R} separator - the value to intersperse between the source elements
* @api public
*/
intersperse<U>(separator: U): Stream<R | U>;
/**
* Calls a named method on each object from the Stream - returning
* a new stream with the result of those calls.
*
* @id invoke
* @section Streams
* @name Stream.invoke(method, args)
* @param {String} method - the method name to call
* @param {Array} args - the arguments to call the method with
* @api public
*/
invoke<U>(method: string, args: any[]): Stream<U>;
/**
* Drops all values from the Stream apart from the last one (if any).
*
* @id last
* @section Streams
* @name Stream.last()
* @api public
*/
last(): Stream<R>;
/**
* Creates a new Stream, which when read from, only returns the last
* seen value from the source. The source stream does not experience
* back-pressure. Useful if you're using a Stream to model a changing
* property which you need to query periodically.
*
* @id latest
* @section Streams
* @name Stream.latest()
* @api public
*/
latest(): Stream<R>;
/**
* Creates a new Stream of transformed values by applying a function to each
* value from the source. The transformation function can be replaced with
* a non-function value for convenience, and it will emit that value
* for every data event on the source Stream.
*
* @id map
* @section Streams
* @name Stream.map(f)
* @param f - the transformation function or value to map to
* @api public
*/
map<U>(f: (x: R) => U): Stream<U>;
/**
*
* Retrieves copies of all elements in the collection,
* with only the whitelisted keys. If one of the whitelisted
* keys does not exist, it will be ignored.
*
* @id pick
* @section Transforms
* @name Stream.pick(properties)
* @param {Array} properties - property names to white filter
* @api public
*/
pick<Prop extends keyof R>(props: Prop[]): Stream<Pick<R, Prop>>;
/**
*
* Retrieves copies of all the elements in the collection
* that satisfy a given predicate. Note: When using ES3,
* only enumerable elements are selected. Both enumerable
* and non-enumerable elements are selected when using ES5.
*
* @id pickBy
* @section Transforms
* @name Stream.pickBy(f)
* @param {Function} f - the predicate function
* @api public
*/
pickBy<Prop extends keyof R>(f: (key: Prop, value: R[Prop]) => boolean): Stream<Partial<R>>
/**
* Retrieves values associated with a given property from all elements in
* the collection.
*
* @id pluck
* @section Streams
* @name Stream.pluck(property)
* @param {String} prop - the property to which values should be associated
* @api public
*/
pluck<Prop extends keyof R>(prop: Prop): Stream<R[Prop]>;
pluck<U>(prop: string): Stream<U>;
/**
* Limits number of values through the stream to a maximum of number of values
* per window. Errors are not limited but allowed to pass through as soon as
* they are read from the source.
*
* @id ratelimit
* @section Transforms
* @name Stream.ratelimit(num, ms)
* @param {Number} num - the number of operations to perform per window
* @param {Number} ms - the window of time to limit the operations in (in ms)
* @api public
*
* _([1, 2, 3, 4, 5]).ratelimit(2, 100);
*
* // after 0ms => 1, 2
* // after 100ms => 1, 2, 3, 4
* // after 200ms => 1, 2, 3, 4, 5
*/
ratelimit(num: number, ms: number): Stream<R>;
/**
* Boils down a Stream to a single value. The memo is the initial state
* of the reduction, and each successive step of it should be returned by
* the iterator function. The iterator is passed two arguments:
* the memo and the next value.
*
* @id reduce
* @section Streams
* @name Stream.reduce(memo, iterator)
* @param memo - the initial state of the reduction
* @param {Function} iterator - the function which reduces the values
* @api public
*/
reduce<U>(memo: U, iterator: (memo: U, x: R) => U): Stream<U>;
/**
* Same as [reduce](#reduce), but uses the first element as the initial
* state instead of passing in a `memo` value.
*
* @id reduce1
* @section Streams
* @name Stream.reduce1(iterator)
* @param {Function} iterator - the function which reduces the values
* @api public
*/
reduce1<U>(iterator: (memo: R | U, x: R) => U): Stream<U>;
/**
* The inverse of [filter](#filter).
*
* @id reject
* @section Streams
* @name Stream.reject(f)
* @param {Function} f - the truth test function
* @api public
*
* var odds = _([1, 2, 3, 4]).reject(function (x) {
* return x % 2 === 0;
* });
*/
reject(f: (x: R) => boolean): Stream<R>;
/**
* Like [reduce](#reduce), but emits each intermediate value of the
* reduction as it is calculated.
*
* @id scan
* @section Streams
* @name Stream.scan(memo, iterator)
* @param memo - the initial state of the reduction
* @param {Function} iterator - the function which reduces the values
* @api public
*/
scan<U>(memo: U, iterator: (memo: U, x: R) => U): Stream<U>;
/**
* Same as [scan](#scan), but uses the first element as the initial
* state instead of passing in a `memo` value.
*
* @id scan1
* @section Streams
* @name Stream.scan1(iterator)
* @param {Function} iterator - the function which reduces the values
* @api public
*
* _([1, 2, 3, 4]).scan1(add) // => 1, 3, 6, 10
*/
scan1<U>(iterator: (memo: R | U, x: R) => U): Stream<U>;
/**
* Creates a new Stream with the values from the source in the range of `start` (inclusive) to `end` (exclusive).
* `start` and `end` must be of type `Number`, if `start` is not a `Number` it will default to `0`
* and, likewise, `end` will default to `Infinity`: this could result in the whole stream being be
* returned.
*
* @id slice
* @section Transforms
* @name Stream.slice(start, end)
* @param {Number} start - integer representing index to start reading from source (inclusive)
* @param {Number} end - integer representing index to stop reading from source (exclusive)
* @api public
*/
slice(start: number, end: number): Stream<R>;
/**
* Collects all values together then emits each value individually but in sorted order.
* The method for sorting the elements is ascending lexical.
*
* @id sort
* @section Transforms
* @name Stream.sort()
* @api public
*
* var sorted = _(['b', 'z', 'g', 'r']).sort().toArray(_.log);
* // => ['b', 'g', 'r', 'z']
*/
sort(): Stream<R>;
/**
* Collects all values together then emits each value individually in sorted
* order. The method for sorting the elements is defined by the comparator
* function supplied as a parameter.
*
* The comparison function takes two arguments `a` and `b` and should return
*
* - a negative number if `a` should sort before `b`.
* - a positive number if `a` should sort after `b`.
* - zero if `a` and `b` may sort in any order (i.e., they are equal).
*
* This function must also define a [partial
* order](https://en.wikipedia.org/wiki/Partially_ordered_set). If it does not,
* the resulting ordering is undefined.
*
* @id sortBy
* @section Transforms
* @name Stream.sortBy(f)
* @param {Function} f - the comparison function
* @api public
*/
sortBy(f: (a: R, b: R) => number): Stream<R>;
/**
* [splitBy](#splitBy) over newlines.
*
* @id split
* @section Transforms
* @name Stream.split()
* @api public
*/
split(this: Stream<string>): Stream<string>;
/**
* Splits the source Stream by a separator and emits the pieces in between, much like splitting a string.
*
* `splitBy` is effectively the inverse of [intersperse](#intersperse).
*
* @id splitBy
* @section Transforms
* @name Stream.splitBy(sep)
* @param {String | RegExp} sep - the separator to split on
* @api public
*/
splitBy(this: Stream<string>, sep: string | RegExp): Stream<string>;
/**
* Like the [errors](#errors) method, but emits a Stream end marker after
* an Error is encountered.
*
* @id stopOnError
* @section Streams
* @name Stream.stopOnError(f)
* @param {Function} f - the function to handle an error
* @api public
*/
stopOnError(f: (err: Error) => void): Stream<R>;
/**
* Creates a new Stream with the first `n` values from the source.
*
* @id take
* @section Streams
* @name Stream.take(n)
* @param {Number} n - integer representing number of values to read from source
* @api public
*/
take(n: number): Stream<R>;
/**
* An alias for the [doto](#doto) method.
*
* @id tap
* @section Transforms
* @name Stream.tap(f)
* @param {Function} f - the function to apply
* @api public
*
* _([1, 2, 3]).tap(console.log)
*/
tap(f: (x: R) => void): Stream<R>;
/**
* Ensures that only one data event is push downstream (or into the buffer)
* every `ms` milliseconds, any other values are dropped.
*
* @id throttle
* @section Streams
* @name Stream.throttle(ms)
* @param {Number} ms - the minimum milliseconds between each value
* @api public
*/
throttle(ms: number): Stream<R>;
/**
* Filters out all duplicate values from the stream and keeps only the first
* occurence of each value, using === to define equality.
*
* @id uniq
* @section Streams
* @name Stream.uniq()
* @api public
*/
uniq(): Stream<R>;
/**
* Filters out all duplicate values from the stream and keeps only the first
* occurence of each value, using the provided function to define equality.
*
* @id uniqBy
* @section Streams
* @name Stream.uniqBy()
* @api public
*/
uniqBy(f: (a: R, b: R) => boolean): Stream<R>;
/**
* A convenient form of filter, which returns all objects from a Stream
* match a set of property values.
*
* @id where
* @section Streams
* @name Stream.where(props)
* @param {Object} props - the properties to match against
* @api public
*/
where(props: Partial<R>): Stream<R>;
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// HIGHER-ORDER STREAMS
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
/**
* Concatenates a Stream to the end of this Stream.
*
* Be aware that in the top-level export, the args may be in the reverse
* order to what you'd expect `_([a], [b]) => [b, a]`, as this follows the
* convention of other top-level exported functions which do `x` to `y`.
*
* @id concat
* @section Streams
* @name Stream.concat(ys)
* @params {Stream | Array} ys - the values to concatenate onto this Stream
* @api public
*/
concat(ys: Stream<R>): Stream<R>;
concat(ys: R[]): Stream<R>;
/**
* Filters using a predicate which returns a Stream. If you need to check
* against an asynchronous data source when filtering a Stream, this can
* be convenient. The Stream returned from the filter function should have
* a Boolean as it's first value (all other values on the Stream will be
* disregarded).
*
* @id flatFilter
* @section Streams
* @name Stream.flatFilter(f)
* @param {Function} f - the truth test function which returns a Stream
* @api public
*/
flatFilter(f: (x: R) => Stream<boolean>): Stream<R>;
/**
* Creates a new Stream of values by applying each item in a Stream to an
* iterator function which must return a (possibly empty) Stream. Each
* item on these result Streams are then emitted on a single output Stream.
*
* This transform is functionally equivalent to `.map(f).sequence()`.
*
* @id flatMap
* @section Streams
* @name Stream.flatMap(f)
* @param {Function} f - the iterator function
* @api public
*/
flatMap<U>(f: (x: R) => Stream<U>): Stream<U>;
flatMap<U>(f: (x: R) => U): Stream<U>;
/**
* Recursively reads values from a Stream which may contain nested Streams
* or Arrays. As values or errors are encountered, they are emitted on a
* single output Stream.
*
* @id flatten
* @section Streams
* @name Stream.flatten()
* @api public
*/
flatten<U extends Flattened<R>>(): Stream<U>;
/**
* Forks a stream, allowing you to add additional consumers with shared
* back-pressure. A stream forked to multiple consumers will only pull values
* from it's source as fast as the slowest consumer can handle them.
*
* @id fork
* @section Streams
* @name Stream.fork()
* @api public
*/
fork(): Stream<R>;
/**
* Takes a Stream of Streams and merges their values and errors into a
* single new Stream. The merged stream ends when all source streams have
* ended.
*
* Note that no guarantee is made with respect to the order in which
* values for each stream end up in the merged stream. Values in the
* merged stream will, however, respect the order they were emitted from
* their respective streams.
*
* @id merge
* @section Streams
* @name Stream.merge()
* @api public
*
* var txt = _(['foo.txt', 'bar.txt']).map(readFile)
* var md = _(['baz.md']).map(readFile)
*
* _([txt, md]).merge();
* // => contents of foo.txt, bar.txt and baz.txt in the order they were read
*/
merge<U>(this: Stream<Stream<U>>): Stream<U>;
/**
* Takes a Stream of Streams and merges their values and errors into a
* single new Stream, limitting the number of unpaused streams that can
* running at any one time.
*
* Note that no guarantee is made with respect to the order in which
* values for each stream end up in the merged stream. Values in the
* merged stream will, however, respect the order they were emitted from
* their respective streams.
*
* @id mergeWithLimit
* @section Higher-order Streams
* @name Stream.mergeWithLimit(n)
* @param {Number} n - the maximum number of streams to run in parallel
* @api public
*
* var readFile = _.wrapCallback(fs.readFile);
*
* var txt = _(['foo.txt', 'bar.txt']).flatMap(readFile)
* var md = _(['baz.md']).flatMap(readFile)
* var js = _(['bosh.js']).flatMap(readFile)
*
* _([txt, md, js]).mergeWithLimit(2);
* // => contents of foo.txt, bar.txt, baz.txt and bosh.js in the order
* // they were read, but bosh.js is not read until either foo.txt and bar.txt
* // has completely been read or baz.md has been read
*/
mergeWithLimit<U>(this: Stream<Stream<U>>, n: number): Stream<U>;
/**
* Observes a stream, allowing you to handle values as they are emitted, without
* adding back-pressure or causing data to be pulled from the source. This can
* be useful when you are performing two related queries on a stream where one
* would block the other. Just be aware that a slow observer could fill up it's
* buffer and cause memory issues. Where possible, you should use [fork](#fork).
*
* @id observe
* @section Streams
* @name Stream.observe()
* @api public
*/
observe(): Stream<R>;
/**
* Switches source to an alternate Stream if the current Stream is empty.
*
* @id otherwise
* @section Streams
* @name Stream.otherwise(ys)
* @param {Stream} ys - alternate stream to use if this stream is empty
* @api public
*/
otherwise(ys: Stream<R>): Stream<R>;
/**
* Takes a Stream of Streams and reads from them in parallel, buffering
* the results until they can be returned to the consumer in their original
* order.
*
* @id parallel
* @section Streams
* @name Stream.parallel(n)
* @param {Number} n - the maximum number of concurrent reads/buffers
* @api public
*/
parallel<U>(this: Stream<Stream<U>>, n: number): Stream<U>
/**
* Reads values from a Stream of Streams, emitting them on a Single output
* Stream. This can be thought of as a flatten, just one level deep. Often
* used for resolving asynchronous actions such as a HTTP request or reading
* a file.
*
* @id sequence
* @section Streams
* @name Stream.sequence()
* @api public
*/
sequence<U>(this: Stream<Stream<U>>): Stream<U>;
sequence<U>(this: Stream<U[]>): Stream<U>;
/**
* An alias for the [sequence](#sequence) method.
*
* @id series
* @section Streams
* @name Stream.series()
* @api public
*/
series<U>(this: Stream<Stream<U>>): Stream<U>;
series<U>(this: Stream<U[]>): Stream<U>;
/**
* Transforms a stream using an arbitrary target transform.
*
* If `target` is a function, this transform passes the current Stream to it,
* returning the result.
*
* If `target` is a [Duplex
* Stream](https://nodejs.org/api/stream.html#stream_class_stream_duplex_1),
* this transform pipes the current Stream through it. It will always return a
* Highland Stream (instead of the piped to target directly as in
* [pipe](#pipe)). Any errors emitted will be propagated as Highland errors.
*
* **TIP**: Passing a function to `through` is a good way to implement complex
* reusable stream transforms. You can even construct the function dynamically
* based on certain inputs. See examples below.
*
* @id through
* @section Higher-order Streams
* @name Stream.through(target)
* @param {Function | Duplex Stream} target - the stream to pipe through or a
* function to call.
* @api public
*
* // This is a static complex transform.
* function oddDoubler(s) {
* return s.filter(function (x) {
* return x % 2; // odd numbers only
* })
* .map(function (x) {
* return x * 2;
* });
* }
*
* // This is a dynamically-created complex transform.
* function multiplyEvens(factor) {
* return function (s) {
* return s.filter(function (x) {
* return x % 2 === 0;
* })
* .map(function (x) {
* return x * factor;
* });
* };
* }
*
* _([1, 2, 3, 4]).through(oddDoubler); // => 2, 6
*
* _([1, 2, 3, 4]).through(multiplyEvens(5)); // => 10, 20
*
* // Can also be used with Node Through Streams
* _(filenames).through(jsonParser).map(function (obj) {
* // ...
* });
*
* // All errors will be propagated as Highland errors
* _(['zz{"a": 1}']).through(jsonParser).errors(function (err) {
* console.log(err); // => SyntaxError: Unexpected token z
* });
*/
through<U>(f: (x: Stream<R>) => U): U;
through(thru: NodeJS.ReadWriteStream): Stream<any>;
/**
* Takes two Streams and returns a Stream of corresponding pairs.
*
* @id zip
* @section Streams
* @name Stream.zip(ys)
* @param {Array | Stream} ys - the other stream to combine values with
* @api public
*/
zip<U>(ys: U[]): Stream<[R, U]>;
zip<U>(ys: Stream<U>): Stream<[R, U]>;
/**
* Takes a stream and a *finite* stream of `N` streams
* and returns a stream of the corresponding `(N+1)`-tuples.
*
* *Note:* This transform will be renamed `zipEach` in the next major version
* release.
*
* @id zipAll
* @section Higher-order Streams
* @name Stream.zipAll(ys)
* @param {Array | Stream} ys - the array of streams to combine values with
* @api public
*/
zipAll<U>(ys: U[][]): Stream<Array<R | U>>;
zipAll<U>(ys: Stream<U[]>): Stream<Array<R | U>>;
zipAll<U>(ys: Stream<Stream<U>>): Stream<Array<R | U>>;
/**
* Takes a *finite* stream of streams and returns a stream where the first
* element from each separate stream is combined into a single data event,
* followed by the second elements of each stream and so on until the shortest
* input stream is exhausted.
*
* *Note:* This transform will be renamed `zipAll` in the next major version
* release.
*
* @id zipAll0
* @section Higher-order Streams
* @name Stream.zipAll0()
* @api public
*/
zipAll0<T>(this: Stream<Stream<T>>): Stream<T[]>;
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// CONSUMPTION
// - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
/**
* Applies results from a Stream as arguments to a function
*
* @id apply
* @section Streams
* @name Stream.apply(f)
* @param {Function} f - the function to apply arguments to
* @api public
*/
// TODO what to do here?
apply(f: Function): void;
/**
* Calls a function once the Stream has ended. This method consumes the stream.
* If the Stream has already ended, the function is called immediately.
*
* If an error from the Stream reaches this call, it will emit an `error` event
* (i.e., it will call `emit('error')` on the stream being consumed). This
* event will cause an error to be thrown if unhandled.
*
* As a special case, it is possible to chain `done` after a call to
* [each](#each) even though both methods consume the stream.
*
* @id done
* @section Consumption
* @name Stream.done(f)
* @param {Function} f - the callback
* @api public
*
* var total = 0;
* _([1, 2, 3, 4]).each(function (x) {
* total += x;
* }).done(function () {
* // total will be 10
* });
*/
done(f: () => void): void;
/**
* Iterates over every value from the Stream, calling the iterator function
* on each of them. This function causes a **thunk**.
*
* If an error from the Stream reaches the `each` call, it will emit an
* error event (which will cause it to throw if unhandled).
*
* @id each
* @section Streams
* @name Stream.each(f)
* @param {Function} f - the iterator function
* @api public
*/
each(f: (x: R) => void): Pick<Stream<R>, 'done'>;
/**
* Pipes a Highland Stream to a [Node Writable
* Stream](http://nodejs.org/api/stream.html#stream_class_stream_writable).
* This will pull all the data from the source Highland Stream and write it to
* the destination, automatically managing flow so that the destination is not
* overwhelmed by a fast source.
*
* Users may optionally pass an object that may contain any of these fields:
*
* - `end` - Ends the destination when this stream ends. Default: `true`. This
* option has no effect if the destination is either `process.stdout` or
* `process.stderr`. Those two streams are never ended.
*
* Like [Readable#pipe](https://nodejs.org/api/stream.html#stream_readable_pipe_destination_options),
* this function will throw errors if there is no `error` handler installed on
* the stream.
*
* This function returns the destination so you can chain together `pipe` calls.
*
* **NOTE**: While Highland streams created via `_()` and [pipeline](#pipeline)
* support being piped to, it is almost never appropriate to `pipe` from a
* Highland stream to another Highland stream. Those two cases are meant for
* use when piping from *Node* streams. You might be tempted to use `pipe` to
* construct reusable transforms. Do not do it. See [through](#through) for a
* better way.
*
* @id pipe
* @section Consumption
* @name Stream.pipe(dest, options)
* @param {Writable Stream} dest - the destination to write all data to
* @param {Object} options - (optional) pipe options.
* @api public
*/
pipe<U>(dest: Stream<U>): Stream<U>;
pipe<U extends NodeJS.WritableStream>(dest: U, options?: { end?: boolean }): U
/**
* Consumes a single item from the Stream. Unlike consume, this function will
* not provide a new stream for you to push values onto, and it will unsubscribe
* as soon as it has a single error, value or nil from the source.
*
* You probably won't need to use this directly, but it is used internally by
* some functions in the Highland library.
*
* @id pull
* @section Streams
* @name Stream.pull(f)
* @param {Function} f - the function to handle data
* @api public
*/
pull(f: (err: Error, x: R) => void): void;
/**
* Collects all values from a Stream into an Array and calls a function with
* once with the result. This function causes a **thunk**.
*
* If an error from the Stream reaches the `toArray` call, it will emit an
* error event (which will cause it to throw if unhandled).
*
* @id toArray
* @section Streams
* @name Stream.toArray(f)
* @param {Function} f - the callback to provide the completed Array to
* @api public
*/
toArray(f: (arr: R[]) => void): void;
/**
* Returns the result of a stream to a nodejs-style callback function.
*
* If the stream contains a single value, it will call `cb`
* with the single item emitted by the stream (if present).
* If the stream is empty, `cb` will be called without any arguments.
* If an error is encountered in the stream, this function will stop
* consumption and call `cb` with the error.
* If the stream contains more than one item, it will stop consumption
* and call `cb` with an error.
*
* @id toCallback
* @section Consumption
* @name Stream.toCallback(cb)
* @param {Function} cb - the callback to provide the error/result to
* @api public
*
* _([1, 2, 3, 4]).collect().toCallback(function (err, result) {
* // parameter result will be [1,2,3,4]
* // parameter err will be null
* });
*/
toCallback(cb: (err?: Error, x?: R) => void): void;
/**
* Converts the stream to a node Readable Stream for use in methods
* or pipes that depend on the native stream type.
*
* The options parameter can be an object passed into the [`Readable`
* constructor](http://nodejs.org/api/stream.html#stream_class_stream_readable).
*
* @id toNodeStream
* @section Consumption
* @name Stream.toNodeStream(options)
* @param {Object} options - (optional) [`Readable` constructor](http://nodejs.org/api/stream.html#stream_class_stream_readable) options
* @api public
*
* _(fs.createReadStream('./abc')).toNodeStream()
* _(fs.createReadStream('./abc')).toNodeStream({objectMode: false})
* _([{a: 1}]).toNodeStream({objectMode: true})
*/
toNodeStream(options?: object): NodeJS.ReadableStream;
/**
* Converts the result of a stream to Promise.
*
* If the stream contains a single value, it will return
* with the single item emitted by the stream (if present).
* If the stream is empty, `undefined` will be returned.
* If an error is encountered in the stream, this function will stop
* consumption and call `cb` with the error.
* If the stream contains more than one item, it will stop consumption
* and reject with an error.
*
* @id toPromise
* @section Consumption
* @name Stream.toPromise(PromiseCtor)
* @param {Function} PromiseCtor - Promises/A+ compliant constructor
* @api public
*
* _([1, 2, 3, 4]).collect().toPromise(Promise).then(function (result) {
* // parameter result will be [1,2,3,4]
* });
*/
toPromise(PromiseCtor: PromiseConstructor): PromiseLike<R>;
}
interface PipeableStream<T, R> extends Stream<R> {}
interface PipeOptions {
end: boolean
}
type MappingHint = number | string[] | Function;
interface CleanupObject {
onDestroy?: Function;
continueOnError?: boolean;
}
type OnFinished = (r: NodeJS.ReadableStream, cb: (...args: any[]) => void) => void | Function | CleanupObject;
}
declare var highland:HighlandStatic;
declare module 'highland' {
export = highland;
}
| StreamError |
main.rs | #![recursion_limit="1024"] // Recursion limit for error-chain, value used as recommended by the crates documentation.
// Copyright 2020 sacn Developers
//
// Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or
// http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
//
// This file was created as part of a University of St Andrews Computer Science BSC Senior Honours Dissertation Project.
//! An example demo sACN receiver which utilises the sACN library.
//!
//! Primarily used for testing the library including real-world conformance, compliance, integration and acceptance tests.
//! As a test program the error handling is limited for simplicity.
//!
//! Usage instructions are described by either running the receiver and using the help command or by the get_usage_str function
//! below.
//!
//! The ACTION_... constants describe the various user input strings possible once the program has started, with more details described in get_usage_str within
//! the code. The details aren't repeated outside of that to minimise the amount of references that have to be kept upto date and which could diverge over time.
//!
//! Note the lack of top level constant strings used in the place of output format strings is due to a limitation in rust where the format string cannot be a
//! const.
//!
#[macro_use]
extern crate error_chain;
/// The demo itself utilises a small error-chain which wraps the errors from the sACN crate and a few standard crates.
pub mod error;
use error::errors::*;
extern crate sacn;
use sacn::receive::{DMXData, SacnReceiver, DiscoveredSacnSource};
use sacn::packet::ACN_SDT_MULTICAST_PORT;
use std::net::{SocketAddr};
use std::time::Duration;
use std::io;
use std::env;
use std::thread::sleep;
use std::fs::File;
use std::io::prelude::*;
/// The string given by the user to receive data.
const ACTION_RECV: &str = "r";
/// The string given by the user to receive data continously.
const ACTION_RECV_CONTINUOUS: &str = "c";
/// The string given by the user to cause the receiver to display the sources which have currently been discovered.
const ACTION_PRINT_DISCOVERED_SOURCES: &str = "s";
/// The string given by the user to cause the receiver to display the sources which have been discovered but without checking for timeouts first. This is usually
/// used as part of debugging / tests.
const ACTION_PRINT_DISCOVERED_SOURCES_NO_TIMEOUT: &str = "x";
/// The string given by the user to quit the receiver.
const ACTION_QUIT: &str = "q";
/// The string given by the user to display the help.
const ACTION_HELP: &str = "h";
/// The string given by the user to start listening to a specific universe of data.
const ACTION_LISTEN_UNIVERSE: &str = "l";
/// The string given by the user to terminate listening to a specific universe of data.
const ACTION_STOP_LISTEN_UNIVERSE: &str = "t";
/// The string given by the user to cause the receiver to sleep/block for a given time. This is used as part of tests as a way to encourage a specific
/// ordering of concurrent events by having one side way for a period. This is discussed in more detail within the specific tests.
const ACTION_SLEEP: &str = "w";
/// The string given by the user to enable receiving preview data.
const ACTION_PREVIEW: &str = "p";
/// The string given by the user to enable universe discovery packets to be announced when received.
const ACTION_ANNOUNCE_DISCOVERED: &str = "a";
/// Lines of input starting with this string are ignored. This is commonly used within the automated tests to allow comments within the input files.
const ACTION_IGNORE: &str = "#";
/// The string given by the user to cause the receiver to output data to a file.
const ACTION_FILE_OUT: &str = "f";
/// The string given by the user to cause termination packets to be announced. "e" for end.
const ACTION_ANNOUNCE_TERMINATION: &str = "e";
/// The headers used for the top of the file when the FILE_OUT action is used.
const WRITE_TO_FILE_HEADERS: &str = "Data_ID, Universe, Sync_Addr, Priority, Preview_data?, Payload";
/// Describes the various commands / command-line arguments available and what they do.
/// Displayed to the user if they ask for help or enter an unrecognised input.
/// Not a const as const with format! not supported in rust.
fn get_usage_str() -> String {
format!("Usage: ./main <interface_ip>\n
Receive data: \n
{} <timeout in secs, 0 means no timeout>\n
Attempt to receive data with the given timeout for each receive for the given number of times: \n
{} <timeout in secs> <count> \n
Print discovered sources: \n
{} \n
Print discovered sources without checking if they are timed out: \n
{} \n
Quit \n
{} \n
Help \n
{} \n
Listen universe \n
{} <universe> \n
Stop Listening Universe \n
{} <universe> \n
Sleep for x milliseconds \n
{} <milliseconds>\n
Enter preview mode, true means preview data will be received, false means preview data is ignored, default is false\n
{} <'true'/'false'>\n
Enter announce discovery mode, true means that universe discovery packets will be announced as soon as received, false means they are handled silently, default is false\n
{} <'true'/'false'>\n
Enter announce termination mode, true means that termination packets will be announced during a recv() attempt. False means they are handled silently, default is false\n
{} <'true'/'false'>\n
Output received data to a file
{} <file-path> <recv-count> <timeout in sec>\n
All input is ignored on lines starting with '{} '.
", ACTION_RECV, ACTION_RECV_CONTINUOUS, ACTION_PRINT_DISCOVERED_SOURCES, ACTION_PRINT_DISCOVERED_SOURCES_NO_TIMEOUT,
ACTION_QUIT, ACTION_HELP, ACTION_LISTEN_UNIVERSE, ACTION_STOP_LISTEN_UNIVERSE, ACTION_SLEEP, ACTION_PREVIEW, ACTION_ANNOUNCE_DISCOVERED,
ACTION_ANNOUNCE_TERMINATION, ACTION_FILE_OUT, ACTION_IGNORE)
}
/// The entry point of the demo_rcv. Usage is described in get_usage_str or by running the program and typing "h" or "help".
///
/// # Arguments
/// Usage: ./main <interface_ip>
fn main() {
let cmd_args: Vec<String> = env::args().collect();
if cmd_args.len() < 2 {
return display_help();
}
let interface_ip = &cmd_args[1];
let source_limit = None;
let mut dmx_recv = SacnReceiver::with_ip(SocketAddr::new(interface_ip.parse().unwrap(), ACN_SDT_MULTICAST_PORT), source_limit).unwrap();
println!("Started");
loop {
match handle_input(&mut dmx_recv) {
Ok(should_continue) => {
if !should_continue {
break;
}
}
Err(e) => {
println!("Error: Input data line unusable: {}", e);
}
}
}
}
/// Handle a line of input on stdin to the program.
/// Returns true if there is more input expected and false if not.
fn handle_input(dmx_recv: &mut SacnReceiver) -> Result<bool> {
let mut input = String::new();
match io::stdin().read_line(&mut input) {
Ok(n) => {
if n == 0 {
// Means EOF is reached so terminate
return Ok(false);
}
let split_input: Vec<&str> = input.split_whitespace().collect();
if split_input.len() < 1 {
display_help();
return Ok(true);
}
match split_input[0] {
ACTION_IGNORE => {
// Ignore the input, this is usually used for lines that contain comments within test input files.
}
ACTION_HELP => { // Display help
display_help();
}
ACTION_RECV => { // Receive data
if split_input.len() < 2 {
display_help();
bail!(std::io::Error::new(std::io::ErrorKind::InvalidInput, "Insufficient parts ( < 2 )"));
}
// To learn about how to parse strings to ints.
// https://stackoverflow.com/questions/27043268/convert-a-string-to-int-in-rust (03/02/2020)
let timeout_secs: u64 = split_input[1].parse().unwrap();
let timeout = if timeout_secs == 0 { // A timeout value of 0 means no timeout.
None
} else {
Some(Duration::from_secs(timeout_secs))
};
// https://docs.rs/error-chain/0.12.2/error_chain/ (08/03/2020)
let res = dmx_recv.recv(timeout).map_err(|e| e.into());
print_recv(res);
}
ACTION_RECV_CONTINUOUS => { // Receive data continuously.
if split_input.len() < 3 {
display_help();
bail!(std::io::Error::new(std::io::ErrorKind::InvalidInput, "Insufficient parts ( < 3 )"));
}
let timeout_secs: u64 = split_input[1].parse().unwrap();
let count: u64 = split_input[2].parse().unwrap();
let timeout = if timeout_secs == 0 { // A timeout value of 0 means no timeout.
None
} else {
Some(Duration::from_secs(timeout_secs))
};
for _ in 0 .. count {
let res = dmx_recv.recv(timeout).map_err(|e| e.into());
print_recv(res);
}
}
ACTION_PRINT_DISCOVERED_SOURCES => { // Print discovered sources, note that no sources will be discovered unless you try and recv first.
print_discovered_sources(&dmx_recv.get_discovered_sources());
}
ACTION_PRINT_DISCOVERED_SOURCES_NO_TIMEOUT => { // Print discovered sources without checking if they are timed out already.
print_discovered_sources(&dmx_recv.get_discovered_sources_no_check());
}
ACTION_QUIT => {
return Ok(false)
}
ACTION_SLEEP => {
if split_input.len() < 2 {
display_help();
bail!(std::io::Error::new(std::io::ErrorKind::InvalidInput, "Insufficient parts ( < 2 )"));
}
let millisecs: u64 = split_input[1].parse().unwrap();
sleep(Duration::from_millis(millisecs));
}
ACTION_LISTEN_UNIVERSE => {
if split_input.len() < 2 {
display_help();
bail!(std::io::Error::new(std::io::ErrorKind::InvalidInput, "Insufficient parts ( < 2 )"));
}
let universe: u16 = split_input[1].parse().unwrap();
dmx_recv.listen_universes(&[universe])?;
}
ACTION_STOP_LISTEN_UNIVERSE => {
if split_input.len() < 2 {
display_help();
bail!(std::io::Error::new(std::io::ErrorKind::InvalidInput, "Insufficient parts ( < 2 )"));
}
let universe: u16 = split_input[1].parse().unwrap();
dmx_recv.mute_universe(universe)?;
}
ACTION_PREVIEW => {
let val = split_input[1].parse();
match val {
Ok(v) => {
dmx_recv.set_process_preview_data(v);
},
Err(_e) => {
bail!(std::io::Error::new(
std::io::ErrorKind::InvalidInput, "Preview flag option not 'true'/'false' or otherwise parsable as boolean"));
}
}
}
ACTION_ANNOUNCE_DISCOVERED => {
let val = split_input[1].parse();
match val {
Ok(v) => {
dmx_recv.set_announce_source_discovery(v);
},
Err(_e) => {
bail!(std::io::Error::new(
std::io::ErrorKind::InvalidInput, "Announce discovery option not 'true'/'false' or otherwise parsable as boolean"));
}
}
}
ACTION_ANNOUNCE_TERMINATION => {
let val = split_input[1].parse();
match val {
Ok(v) => {
dmx_recv.set_announce_stream_termination(v);
},
Err(_e) => {
bail!(std::io::Error::new(
std::io::ErrorKind::InvalidInput, "Announce stream termination option not 'true'/'false' or otherwise parsable as boolean"));
}
}
}
ACTION_FILE_OUT => {
if split_input.len() < 4 {
display_help();
bail!(std::io::Error::new(std::io::ErrorKind::InvalidInput, "Insufficient parts ( < 3 )"));
}
let file_path = split_input[1];
let count: u64 = split_input[2].parse().unwrap();
let timeout_secs: u64 = split_input[3].parse().unwrap();
let timeout = if timeout_secs == 0 { // A timeout value of 0 means no timeout.
None
} else {
Some(Duration::from_secs(timeout_secs))
};
let out_file = File::create(file_path)?;
let mut boxed_file = Box::new(out_file);
write!(boxed_file, "{}\n", WRITE_TO_FILE_HEADERS)?;
for i in 0 .. count {
let res: Vec<DMXData> = dmx_recv.recv(timeout).unwrap();
write_to_file(&mut boxed_file, res, i)?;
}
}
x => {
bail!(std::io::Error::new(std::io::ErrorKind::InvalidInput, format!("Unknown input type: {}", x)));
}
}
Ok(true)
}
Err(e) => {
bail!(e);
}
}
}
/// Writes the given data to the given file (uses the given data_id as first column).
/// Uses comma separated values.
///
/// # Arguments
/// file: A mutable box reference containing the file to write to.
///
/// data: The data to write to the file.
///
/// data_id: The id used as the first column within the file for the data.
///
fn write_to_file(file: &mut Box<File>, data: Vec<DMXData>, data_id: u64) -> Result<()> |
/// Converts the given array of u8 values into a comma separated string.
///
/// # Arguments
/// values: The unsigned 8 bit number values to turn into a string.
///
fn create_values_str(values: Vec<u8>) -> Result<String> {
let mut res: String = "".to_string();
if values.len() < 1 {
return Ok(res);
}
let mut iter = values.iter();
// Adapted from.
// https://users.rust-lang.org/t/what-is-right-ways-to-concat-strings/3780/4 (09/04/2020)
res.push_str(&format!("{}", iter.next().unwrap()));
for v in iter {
res.push_str(&format!(",{}", v));
}
Ok(res)
}
/// Prints the given output from recv to stdout.
/// Errors are printed using their debug output except for universe terminated which is printed as "Universe x Terminated" where x is the universe. This
/// is to avoid the CID being printed which changes for every test as it is randomly generated in most tests.
///
/// # Arguments
/// res: The data to display.
///
fn print_recv(res: Result<Vec<DMXData>>) {
match res {
Err(e) => {
match e.kind() {
ErrorKind::Sacn(x) => {
match x.kind() {
sacn::error::errors::ErrorKind::UniverseTerminated(_src_cid, uni) => {
println!("Universe {} Terminated", uni);
}
z => {
println!("Error Encountered: {:?}", z);
}
}
},
x => {
println!("Error Encountered: {:?}", x);
}
}
},
Ok(d) => {
print_data(d);
}
}
}
/// Prints the given data to stdout in the format [{{ Universe(s): x, Sync_Universe: y, Values: z }}, ...] where x is the universe, y is the synchronisation address
/// and z is the values. The ... indicates that there may be multiple bits of data to print at once which follows the same format.
///
/// # Arguments
/// data: The data to be printed to stdout.
///
fn print_data(mut data: Vec<DMXData>) {
print!("[");
// Sort the data with lower universes first, this means that even though the data returned from the waiting data can be in any order this means
// that the ordering will be known which makes checking the output using a test script easier.
data.sort();
for d in data {
print!("{{ Universe(s): {}, Sync_Universe: {}, Values: {:?} }}, ", d.universe, d.sync_uni, d.values);
}
println!("]");
}
/// Prints the given array of discovered sources to std out. Uses the format "Name: x, Universes: y" where x is the source name and y is the universes registered to the
/// source.
///
/// # Arguments
/// src: The sources to print to standard out.
///
fn print_discovered_sources(srcs: &Vec<DiscoveredSacnSource>) {
for s in srcs {
println!("Name: {}, Universes: {:?}", s.name, s.get_all_universes());
}
}
/// Displays the usage/help string to stdout.
///
fn display_help(){
println!("{}", get_usage_str());
}
| {
for d in data {
let values_str = create_values_str(d.values)?;
// Note that the formatting string literal must be here and cannot be subsituted using const.
write!(*file, "{},{},{},{},{},{}\n", data_id, d.universe, d.sync_uni, d.priority, d.preview, values_str)?;
}
Ok(())
} |
identify.go | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package distros
import (
"fmt"
"io/ioutil"
"os"
"path"
"strings"
"k8s.io/klog"
)
// FindDistribution identifies the distribution on which we are running
// We will likely remove this when everything is containerized
func FindDistribution(rootfs string) (Distribution, error) | {
// Ubuntu has /etc/lsb-release (and /etc/debian_version)
lsbRelease, err := ioutil.ReadFile(path.Join(rootfs, "etc/lsb-release"))
if err == nil {
for _, line := range strings.Split(string(lsbRelease), "\n") {
line = strings.TrimSpace(line)
if line == "DISTRIB_CODENAME=xenial" {
return DistributionXenial, nil
} else if line == "DISTRIB_CODENAME=bionic" {
klog.Warningf("bionic is not fully supported nor tested for Kops and Kubernetes")
klog.Warningf("this should only be used for testing purposes.")
return DistributionBionic, nil
}
}
} else if !os.IsNotExist(err) {
klog.Warningf("error reading /etc/lsb-release: %v", err)
}
// Debian has /etc/debian_version
debianVersionBytes, err := ioutil.ReadFile(path.Join(rootfs, "etc/debian_version"))
if err == nil {
debianVersion := strings.TrimSpace(string(debianVersionBytes))
if strings.HasPrefix(debianVersion, "8.") {
return DistributionJessie, nil
} else if strings.HasPrefix(debianVersion, "9.") {
return DistributionDebian9, nil
} else if strings.HasPrefix(debianVersion, "10.") {
return DistributionDebian10, nil
} else {
return "", fmt.Errorf("unhandled debian version %q", debianVersion)
}
} else if !os.IsNotExist(err) {
klog.Warningf("error reading /etc/debian_version: %v", err)
}
// Redhat has /etc/redhat-release
// Centos has /etc/centos-release
redhatRelease, err := ioutil.ReadFile(path.Join(rootfs, "etc/redhat-release"))
if err == nil {
for _, line := range strings.Split(string(redhatRelease), "\n") {
line = strings.TrimSpace(line)
if strings.HasPrefix(line, "Red Hat Enterprise Linux Server release 7.") {
return DistributionRhel7, nil
}
if strings.HasPrefix(line, "CentOS Linux release 7.") {
return DistributionCentos7, nil
}
if strings.HasPrefix(line, "Red Hat Enterprise Linux release 8.") {
return DistributionRhel8, nil
}
if strings.HasPrefix(line, "CentOS Linux release 8.") {
return DistributionCentos8, nil
}
}
klog.Warningf("unhandled redhat-release info %q", string(lsbRelease))
} else if !os.IsNotExist(err) {
klog.Warningf("error reading /etc/redhat-release: %v", err)
}
// CoreOS uses /usr/lib/os-release
// Flatcar uses /usr/lib/os-release
usrLibOsRelease, err := ioutil.ReadFile(path.Join(rootfs, "usr/lib/os-release"))
if err == nil {
for _, line := range strings.Split(string(usrLibOsRelease), "\n") {
line = strings.TrimSpace(line)
if line == "ID=coreos" {
return DistributionCoreOS, nil
} else if line == "ID=flatcar" {
return DistributionFlatcar, nil
}
}
klog.Warningf("unhandled os-release info %q", string(usrLibOsRelease))
} else if !os.IsNotExist(err) {
klog.Warningf("error reading /usr/lib/os-release: %v", err)
}
// ContainerOS, Amazon Linux 2 uses /etc/os-release
osRelease, err := ioutil.ReadFile(path.Join(rootfs, "etc/os-release"))
if err == nil {
for _, line := range strings.Split(string(osRelease), "\n") {
line = strings.TrimSpace(line)
if line == "ID=cos" {
return DistributionContainerOS, nil
}
if strings.HasPrefix(line, "PRETTY_NAME=\"Amazon Linux 2") {
return DistributionCentos7, nil
}
}
klog.Warningf("unhandled /etc/os-release info %q", string(osRelease))
} else if !os.IsNotExist(err) {
klog.Warningf("error reading /etc/os-release: %v", err)
}
klog.Warningf("could not determine distro")
klog.Warningf(" /etc/lsb-release: %q", string(lsbRelease))
klog.Warningf(" /etc/debian_version: %q", string(debianVersionBytes))
klog.Warningf(" /etc/redhat-release: %q", string(redhatRelease))
klog.Warningf(" /usr/lib/os-release: %q", string(usrLibOsRelease))
klog.Warningf(" /etc/os-release: %q", string(osRelease))
return "", fmt.Errorf("cannot identify distro")
} |
|
by-url.js | /** | * @return {boolean}
*/
import _ from 'lodash';
import {filterCards} from "../cards";
export const byUrl = (card, query) => {
return Object.keys(query).every(function (queryKey) {
// if (queryKey === 'mechanics') {
// console.log(queryKey);
// return query[queryKey].some(queryValue => {
// console.log(queryValue, card[queryKey].indexOf(queryValue) > -1);
// return card[queryKey].indexOf(queryValue) > -1;
// });
// }
if (query[queryKey].constructor === Array) {
return query[queryKey].some(queryValue => {
return card[queryKey] == queryValue
});
}
else {
return card[_.toLower(queryKey)] == query[queryKey];
}
})
};
export const filterByUrl = (cards, query, cardsLoaded) =>{
return filterCards(cards, byUrl, query, cardsLoaded)
}; | * Filters cards by query
*
* @param {object} card
* @param {string} query |
health.go | // Copyright © 2022, Cisco Systems Inc.
// Use of this source code is governed by an MIT-style license that can be
// found in the LICENSE file or at https://opensource.org/licenses/MIT.
package app
import (
"context"
"cto-github.cisco.com/NFV-BU/go-msx/config"
"cto-github.cisco.com/NFV-BU/go-msx/health"
"cto-github.cisco.com/NFV-BU/go-msx/trace"
"encoding/json"
"github.com/pkg/errors"
"time"
)
const (
rootKeyHealth = "health"
)
var (
healthLogger *HealthLogger
)
type HealthLoggerConfig struct {
Enabled bool `config:"default=true"`
Frequency time.Duration `config:"default=15s"`
}
type HealthLogger struct {
ctx context.Context
cfg *HealthLoggerConfig
done chan struct{}
lastResult *string
}
func (l *HealthLogger) LogHealth() {
ctx, span := trace.NewSpan(l.ctx, "healthLogger.LogHealth",
trace.StartWithTag(trace.FieldSpanType, "health"))
defer span.Finish()
healthReport := health.GenerateReport(ctx)
span.LogFields(trace.Status(healthReport.Status.String()))
if bytes, err := json.Marshal(&healthReport); err != nil {
span.LogFields(trace.Error(err))
logger.Error(err)
} else {
newResult := string(bytes)
if l.lastResult == nil || newResult != *l.lastResult {
logger.Info("Health report: ", string(bytes))
l.lastResult = &newResult
} else {
// DE11136: Silence
}
}
}
func (l *HealthLogger) Run() {
ticker := time.NewTicker(l.cfg.Frequency)
defer ticker.Stop()
for {
select {
case <-ticker.C:
if l.ctx.Err() != nil {
return
} else {
l.LogHealth()
}
case <-l.done:
return
}
}
}
func (l *HealthLogger) Stop() {
close(l.done)
}
func NewHealthLogger(ctx context.Context, cfg *HealthLoggerConfig) *HealthLogger {
return &HealthLogger{
ctx: trace.UntracedContextFromContext(ctx),
cfg: cfg,
done: make(chan struct{}),
}
}
func init() {
OnEvent(EventStart, PhaseAfter, createHealthLogger)
OnEvent(EventStop, PhaseBefore, closeHealthLogger)
}
func createHealthLogger(ctx context.Context) error {
logger.Info("Starting health logger")
cfg := config.FromContext(ctx)
if cfg == nil {
return errors.New("Config not found in context")
}
healthLoggerConfig := &HealthLoggerConfig{}
if err := cfg.Populate(healthLoggerConfig, rootKeyHealth); err != nil {
return err
}
if !healthLoggerConfig.Enabled {
return nil
}
healthLogger = NewHealthLogger(ctx, healthLoggerConfig)
go healthLogger.Run()
return nil
}
func closeHealthLogger(ctx context.Context) error {
logger.Info("Stopping health logger")
if healthLogger != nil {
healthLogger.Stop() | return nil
} | } |
photon_test.go | package photon_test
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
fake "k8s.io/utils/clock/testing"
"github.com/aquasecurity/trivy-db/pkg/db"
dbTypes "github.com/aquasecurity/trivy-db/pkg/types"
"github.com/aquasecurity/trivy-db/pkg/vulnsrc/vulnerability"
"github.com/aquasecurity/trivy/pkg/dbtest"
"github.com/aquasecurity/trivy/pkg/detector/ospkg/photon"
ftypes "github.com/aquasecurity/trivy/pkg/fanal/types"
"github.com/aquasecurity/trivy/pkg/types"
)
func TestScanner_Detect(t *testing.T) {
type args struct {
osVer string
pkgs []ftypes.Package
}
tests := []struct {
name string
args args
fixtures []string
want []types.DetectedVulnerability
wantErr string
}{
{
name: "happy path",
fixtures: []string{"testdata/fixtures/photon.yaml", "testdata/fixtures/data-source.yaml"},
args: args{
osVer: "1.0",
pkgs: []ftypes.Package{
{
Name: "PyYAML",
Version: "3.12",
Release: "4.ph1",
SrcName: "PyYAML",
SrcVersion: "3.12",
SrcRelease: "4.ph1",
Layer: ftypes.Layer{
DiffID: "sha256:932da51564135c98a49a34a193d6cd363d8fa4184d957fde16c9d8527b3f3b02",
},
},
},
}, | want: []types.DetectedVulnerability{
{
PkgName: "PyYAML",
VulnerabilityID: "CVE-2020-1747",
InstalledVersion: "3.12-4.ph1",
FixedVersion: "3.12-5.ph1",
Layer: ftypes.Layer{
DiffID: "sha256:932da51564135c98a49a34a193d6cd363d8fa4184d957fde16c9d8527b3f3b02",
},
DataSource: &dbTypes.DataSource{
ID: vulnerability.Photon,
Name: "Photon OS CVE metadata",
URL: "https://packages.vmware.com/photon/photon_cve_metadata/",
},
},
},
},
{
name: "invalid bucket",
fixtures: []string{"testdata/fixtures/invalid.yaml", "testdata/fixtures/data-source.yaml"},
args: args{
osVer: "1.0",
pkgs: []ftypes.Package{
{
Name: "PyYAML",
Version: "3.12",
SrcName: "PyYAML",
SrcVersion: "3.12",
},
},
},
wantErr: "failed to get Photon advisories",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
_ = dbtest.InitDB(t, tt.fixtures)
defer db.Close()
s := photon.NewScanner()
got, err := s.Detect(tt.args.osVer, nil, tt.args.pkgs)
if tt.wantErr != "" {
require.Error(t, err)
assert.Contains(t, err.Error(), tt.wantErr)
return
}
assert.NoError(t, err)
assert.Equal(t, tt.want, got)
})
}
}
func TestScanner_IsSupportedVersion(t *testing.T) {
type args struct {
osFamily string
osVer string
}
tests := []struct {
name string
now time.Time
args args
want bool
}{
{
name: "photon 1.0",
now: time.Date(2022, 1, 31, 23, 59, 59, 0, time.UTC),
args: args{
osFamily: "photon",
osVer: "1.0",
},
want: true,
},
{
name: "photon 1.0 EOL",
now: time.Date(2022, 3, 31, 23, 59, 59, 0, time.UTC),
args: args{
osFamily: "photon",
osVer: "1.0",
},
want: false,
},
{
name: "unknown",
now: time.Date(2022, 1, 31, 23, 59, 59, 0, time.UTC),
args: args{
osFamily: "photon",
osVer: "unknown",
},
want: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
s := photon.NewScanner(photon.WithClock(fake.NewFakeClock(tt.now)))
got := s.IsSupportedVersion(tt.args.osFamily, tt.args.osVer)
assert.Equal(t, tt.want, got)
})
}
} | |
aws035_test.go | package test
import (
"testing"
"github.com/aquasecurity/tfsec/internal/app/tfsec/rules"
)
func Test_AWSUnencryptedAtRestElasticacheReplicationGroup(t *testing.T) | {
var tests = []struct {
name string
source string
mustIncludeResultCode string
mustExcludeResultCode string
}{
{
name: "check aws_elasticache_replication_group missing at_rest_encryption_enabled",
source: `
resource "aws_elasticache_replication_group" "my-resource" {
replication_group_id = "foo"
replication_group_description = "my foo cluster"
}`,
mustIncludeResultCode: rules.AWSUnencryptedAtRestElasticacheReplicationGroup,
},
{
name: "check aws_elasticache_replication_group with at_rest_encryption_enabled",
source: `
resource "aws_elasticache_replication_group" "my-resource" {
replication_group_id = "foo"
replication_group_description = "my foo cluster"
at_rest_encryption_enabled = true
}`,
mustExcludeResultCode: rules.AWSUnencryptedAtRestElasticacheReplicationGroup,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
results := scanHCL(test.source, t)
assertCheckCode(t, test.mustIncludeResultCode, test.mustExcludeResultCode, results)
})
}
} |
|
html_tags.rs | //! Detects invalid HTML (like an unclosed `<span>`) in doc comments.
use super::Pass;
use crate::clean::*;
use crate::core::DocContext;
use crate::html::markdown::main_body_opts;
use crate::visit::DocVisitor;
use pulldown_cmark::{Event, Parser, Tag};
use std::iter::Peekable;
use std::ops::Range;
use std::str::CharIndices;
crate const CHECK_INVALID_HTML_TAGS: Pass = Pass {
name: "check-invalid-html-tags",
run: check_invalid_html_tags,
description: "detects invalid HTML tags in doc comments",
};
struct InvalidHtmlTagsLinter<'a, 'tcx> {
cx: &'a mut DocContext<'tcx>,
}
crate fn check_invalid_html_tags(krate: Crate, cx: &mut DocContext<'_>) -> Crate {
if cx.tcx.sess.is_nightly_build() {
let mut coll = InvalidHtmlTagsLinter { cx };
coll.visit_crate(&krate);
}
krate
}
const ALLOWED_UNCLOSED: &[&str] = &[
"area", "base", "br", "col", "embed", "hr", "img", "input", "keygen", "link", "meta", "param",
"source", "track", "wbr",
];
fn | (
tags: &mut Vec<(String, Range<usize>)>,
tag_name: String,
range: Range<usize>,
f: &impl Fn(&str, &Range<usize>),
) {
let tag_name_low = tag_name.to_lowercase();
if let Some(pos) = tags.iter().rposition(|(t, _)| t.to_lowercase() == tag_name_low) {
// If the tag is nested inside a "<script>" or a "<style>" tag, no warning should
// be emitted.
let should_not_warn = tags.iter().take(pos + 1).any(|(at, _)| {
let at = at.to_lowercase();
at == "script" || at == "style"
});
for (last_tag_name, last_tag_span) in tags.drain(pos + 1..) {
if should_not_warn {
continue;
}
let last_tag_name_low = last_tag_name.to_lowercase();
if ALLOWED_UNCLOSED.contains(&last_tag_name_low.as_str()) {
continue;
}
// `tags` is used as a queue, meaning that everything after `pos` is included inside it.
// So `<h2><h3></h2>` will look like `["h2", "h3"]`. So when closing `h2`, we will still
// have `h3`, meaning the tag wasn't closed as it should have.
f(&format!("unclosed HTML tag `{}`", last_tag_name), &last_tag_span);
}
// Remove the `tag_name` that was originally closed
tags.pop();
} else {
// It can happen for example in this case: `<h2></script></h2>` (the `h2` tag isn't required
// but it helps for the visualization).
f(&format!("unopened HTML tag `{}`", tag_name), &range);
}
}
fn extract_html_tag(
tags: &mut Vec<(String, Range<usize>)>,
text: &str,
range: &Range<usize>,
start_pos: usize,
iter: &mut Peekable<CharIndices<'_>>,
f: &impl Fn(&str, &Range<usize>),
) {
let mut tag_name = String::new();
let mut is_closing = false;
let mut prev_pos = start_pos;
loop {
let (pos, c) = match iter.peek() {
Some((pos, c)) => (*pos, *c),
// In case we reached the of the doc comment, we want to check that it's an
// unclosed HTML tag. For example "/// <h3".
None => (prev_pos, '\0'),
};
prev_pos = pos;
// Checking if this is a closing tag (like `</a>` for `<a>`).
if c == '/' && tag_name.is_empty() {
is_closing = true;
} else if c.is_ascii_alphanumeric() {
tag_name.push(c);
} else {
if !tag_name.is_empty() {
let mut r = Range { start: range.start + start_pos, end: range.start + pos };
if c == '>' {
// In case we have a tag without attribute, we can consider the span to
// refer to it fully.
r.end += 1;
}
if is_closing {
// In case we have "</div >" or even "</div >".
if c != '>' {
if !c.is_whitespace() {
// It seems like it's not a valid HTML tag.
break;
}
let mut found = false;
for (new_pos, c) in text[pos..].char_indices() {
if !c.is_whitespace() {
if c == '>' {
r.end = range.start + new_pos + 1;
found = true;
}
break;
}
}
if !found {
break;
}
}
drop_tag(tags, tag_name, r, f);
} else {
tags.push((tag_name, r));
}
}
break;
}
iter.next();
}
}
fn extract_tags(
tags: &mut Vec<(String, Range<usize>)>,
text: &str,
range: Range<usize>,
is_in_comment: &mut Option<Range<usize>>,
f: &impl Fn(&str, &Range<usize>),
) {
let mut iter = text.char_indices().peekable();
while let Some((start_pos, c)) = iter.next() {
if is_in_comment.is_some() {
if text[start_pos..].starts_with("-->") {
*is_in_comment = None;
}
} else if c == '<' {
if text[start_pos..].starts_with("<!--") {
// We skip the "!--" part. (Once `advance_by` is stable, might be nice to use it!)
iter.next();
iter.next();
iter.next();
*is_in_comment = Some(Range {
start: range.start + start_pos,
end: range.start + start_pos + 3,
});
} else {
extract_html_tag(tags, text, &range, start_pos, &mut iter, f);
}
}
}
}
impl<'a, 'tcx> DocVisitor for InvalidHtmlTagsLinter<'a, 'tcx> {
fn visit_item(&mut self, item: &Item) {
let tcx = self.cx.tcx;
let hir_id = match DocContext::as_local_hir_id(tcx, item.def_id) {
Some(hir_id) => hir_id,
None => {
// If non-local, no need to check anything.
return;
}
};
let dox = item.attrs.collapsed_doc_value().unwrap_or_default();
if !dox.is_empty() {
let report_diag = |msg: &str, range: &Range<usize>| {
let sp = match super::source_span_for_markdown_range(tcx, &dox, range, &item.attrs)
{
Some(sp) => sp,
None => item.attr_span(tcx),
};
tcx.struct_span_lint_hir(crate::lint::INVALID_HTML_TAGS, hir_id, sp, |lint| {
lint.build(msg).emit()
});
};
let mut tags = Vec::new();
let mut is_in_comment = None;
let mut in_code_block = false;
let p = Parser::new_ext(&dox, main_body_opts()).into_offset_iter();
for (event, range) in p {
match event {
Event::Start(Tag::CodeBlock(_)) => in_code_block = true,
Event::Html(text) | Event::Text(text) if !in_code_block => {
extract_tags(&mut tags, &text, range, &mut is_in_comment, &report_diag)
}
Event::End(Tag::CodeBlock(_)) => in_code_block = false,
_ => {}
}
}
for (tag, range) in tags.iter().filter(|(t, _)| {
let t = t.to_lowercase();
!ALLOWED_UNCLOSED.contains(&t.as_str())
}) {
report_diag(&format!("unclosed HTML tag `{}`", tag), range);
}
if let Some(range) = is_in_comment {
report_diag("Unclosed HTML comment", &range);
}
}
self.visit_item_recur(item)
}
}
| drop_tag |
ping.js | // Again, thanks to [The Perfect Lil' Bot](https://gist.github.com/eslachance/3349734a98d30011bb202f47342601d3)
module.exports = async (client, message, command, args) => { | }; | // Calculates ping between sending a message and editing it, giving a nice round-trip latency.
// The second ping is an average latency between the bot and the websocket server (one-way, not round-trip)
const m = await message.channel.send('Ping?');
m.edit(`Pong! Latency is ${m.createdTimestamp - message.createdTimestamp}ms. API Latency is ${Math.round(client.ping)}ms`); |
train_pg.py | import numpy as np
import tensorflow as tf
import gym
import logz
import scipy.signal
import os
import time
import inspect
from multiprocessing import Process
#============================================================================================#
# Utilities
#============================================================================================#
def build_mlp(
input_placeholder,
output_size,
scope,
n_layers=2,
size=64,
activation=tf.tanh,
output_activation=None
):
#========================================================================================#
# ----------SECTION 3----------
# Network building
#
# Your code should make a feedforward neural network (also called a multilayer perceptron)
# with 'n_layers' hidden layers of size 'size' units.
#
# The output layer should have size 'output_size' and activation 'output_activation'.
#
# Hint: use tf.layers.dense
#========================================================================================#
with tf.variable_scope(scope):
# MY_CODE_HERE
hidden = input_placeholder
for i in range(n_layers):
hidden = tf.layers.dense(hidden, size, activation, name='blah' + str(i))
return tf.layers.dense(hidden, output_size, output_activation)
def | (path):
return len(path["reward"])
def reward_to_q(rewards, gamma, reward_to_go):
q = np.zeros_like(rewards)
T = len(rewards)
if reward_to_go:
q += rewards
for i in range(1, T):
q[:(T - i)] += gamma * q[i:T]
else:
r = 0
for i in range(T - 1, -1, -1):
r = rewards[i] + gamma * r
q = r * np.ones_like(q)
return q
#============================================================================================#
# Policy Gradient
#============================================================================================#
# batch_size is more natural for PG as we need to take average over paths.
# timesteps_per_batch is more relevant for Q-learning as learning is done step by step.
# CartPole
# Here is a good run
# python train_pg.py CartPole-v0 --n_layers 4 --target_reward 200 --learning_rate 1e-2 --nn_baseline --batch_size 10
# ********** Iteration 8 ************
# total trials: 90
# ----------------------------------------
# | Time | 31.1 |
# | Iteration | 8 |
# | AverageReturn | 200 |
# | StdReturn | 0 |
# | MaxReturn | 200 |
# | MinReturn | 200 |
# | EpLenMean | 200 |
# | EpLenStd | 0 |
# | TimestepsThisBatch | 2e+03 |
# | TimestepsSoFar | 1.15e+04 |
# ----------------------------------------
#
# MountainCar
# Working poorly. It seems some good exploration is needed to get any positive path.
#
# Acrobot
# Similar to MountainCar, but it is possible to randomly get a positive path,
# and then the model starts to learn.
# I can get to about 90 steps. What is the "solve" criterion?
# https://github.com/jonholifield/Acrobot-v1
# Box2D
# https://github.com/pybox2d/pybox2d/blob/master/INSTALL.md
# 'sudo' python setup.py install: should not use sudo in venv, it complains about setuptools not found
# LunarLander
# It does not do that well but works to some extent.
def train_PG(exp_name='',
env_name='CartPole-v0',
n_iter=100,
gamma=1.0,
# min_timesteps_per_batch=1000,
batch_size=20,
max_path_length=None,
learning_rate=5e-3,
reward_to_go=True,
animate=True,
logdir=None,
normalize_advantages=True,
nn_baseline=False,
seed=0,
# network arguments
n_layers=1,
size=32,
target_reward=None
):
start = time.time()
TODO = 1
# Configure output directory for logging
logz.configure_output_dir(logdir)
# Log experimental parameters
args = inspect.getargspec(train_PG)[0]
locals_ = locals()
params = {k: locals_[k] if k in locals_ else None for k in args}
logz.save_params(params)
# Set random seeds
tf.set_random_seed(seed)
np.random.seed(seed)
# Make the gym environment
env = gym.make(env_name)
# Is this env continuous, or discrete?
discrete = isinstance(env.action_space, gym.spaces.Discrete)
assert discrete, 'only discrete is implemented'
# Maximum length for episodes
max_path_length = max_path_length or env.spec.max_episode_steps
#========================================================================================#
# Notes on notation:
#
# Symbolic variables have the prefix sy_, to distinguish them from the numerical values
# that are computed later in the function
#
# Prefixes and suffixes:
# ob - observation
# ac - action
# _no - this tensor should have shape (batch size /n/, observation dim)
# _na - this tensor should have shape (batch size /n/, action dim)
# _n - this tensor should have shape (batch size /n/)
#
# Note: batch size /n/ is defined at runtime, and until then, the shape for that axis
# is None
#========================================================================================#
# Observation and action sizes
ob_dim = env.observation_space.shape[0]
ac_dim = env.action_space.n if discrete else env.action_space.shape[0]
#========================================================================================#
# ----------SECTION 4----------
# Placeholders
#
# Need these for batch observations / actions / advantages in policy gradient loss function.
#========================================================================================#
sy_ob_no = tf.placeholder(shape=[None, ob_dim], name="ob", dtype=tf.float32)
if discrete:
sy_ac_na = tf.placeholder(shape=[None], name="ac", dtype=tf.int32)
else:
sy_ac_na = tf.placeholder(shape=[None, ac_dim], name="ac", dtype=tf.float32)
# Define a placeholder for advantages
sy_adv_n = tf.placeholder(shape=[None], name="adv", dtype=tf.float32)
#========================================================================================#
# ----------SECTION 4----------
# Networks
#
# Make symbolic operations for
# 1. Policy network outputs which describe the policy distribution.
# a. For the discrete case, just logits for each action.
#
# b. For the continuous case, the mean / log std of a Gaussian distribution over
# actions.
#
# Hint: use the 'build_mlp' function you defined in utilities.
#
# Note: these ops should be functions of the placeholder 'sy_ob_no'
#
# 2. Producing samples stochastically from the policy distribution.
# a. For the discrete case, an op that takes in logits and produces actions.
#
# Should have shape [None]
#
# b. For the continuous case, use the reparameterization trick:
# The output from a Gaussian distribution with mean 'mu' and std 'sigma' is
#
# mu + sigma * z, z ~ N(0, I)
#
# This reduces the problem to just sampling z. (Hint: use tf.random_normal!)
#
# Should have shape [None, ac_dim]
#
# Note: these ops should be functions of the policy network output ops.
#
# 3. Computing the log probability of a set of actions that were actually taken,
# according to the policy.
#
# Note: these ops should be functions of the placeholder 'sy_ac_na', and the
# policy network output ops.
#
#========================================================================================#
if discrete:
# MY_CODE_HERE
sy_logits_na = build_mlp(
sy_ob_no,
ac_dim,
"nn_policy",
n_layers=n_layers,
size=size)
sy_sampled_ac = tf.multinomial(sy_logits_na, 1) # Hint: Use the tf.multinomial op
sy_logprob_n = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=sy_logits_na, labels=sy_ac_na)
else:
# YOUR_CODE_HERE
sy_mean = TODO
sy_logstd = TODO # logstd should just be a trainable variable, not a network output.
sy_sampled_ac = TODO
sy_logprob_n = TODO # Hint: Use the log probability under a multivariate gaussian.
#========================================================================================#
# ----------SECTION 4----------
# Loss Function and Training Operation
#========================================================================================#
# MY_CODE_HERE
# Loss function that we'll differentiate to get the policy gradient.
# TODO: reduce_mean is not really correct here
loss = tf.reduce_mean(sy_logprob_n * sy_adv_n)
update_op = tf.train.AdamOptimizer(learning_rate).minimize(loss)
#========================================================================================#
# ----------SECTION 5----------
# Optional Baseline
#========================================================================================#
if nn_baseline:
baseline_prediction = tf.squeeze(build_mlp(
sy_ob_no,
1,
"nn_baseline",
n_layers=n_layers,
size=size))
# Define placeholders for targets, a loss function and an update op for fitting a
# neural network baseline. These will be used to fit the neural network baseline.
# MY_CODE_HERE
sy_q_n = tf.placeholder(shape=[None], name='q', dtype=tf.float32)
baseline_loss = tf.nn.l2_loss(baseline_prediction - sy_q_n)
baseline_update_op = tf.train.AdamOptimizer(learning_rate).minimize(baseline_loss)
#========================================================================================#
# Tensorflow Engineering: Config, Session, Variable initialization
#========================================================================================#
tf_config = tf.ConfigProto(inter_op_parallelism_threads=1, intra_op_parallelism_threads=1)
sess = tf.Session(config=tf_config)
sess.__enter__() # equivalent to `with sess:`
tf.global_variables_initializer().run() #pylint: disable=E1101
tf_board = os.path.join('/tmp/gube/hw2')
writer = tf.summary.FileWriter(os.path.join(tf_board, str(int(time.time()))))
writer.add_graph(sess.graph)
merged_summary = tf.summary.merge_all()
#========================================================================================#
# Training Loop
#========================================================================================#
total_timesteps = 0
total_trials = 0
for itr in range(n_iter):
print("********** Iteration %i ************"%itr)
# Collect paths until we have enough timesteps
timesteps_this_batch = 0
trials_this_batch = 0
paths = []
while True:
ob = env.reset()
obs, acs, rewards = [], [], []
animate_this_episode=(len(paths)==0 and (itr % 5 == 0) and animate)
steps = 0
while True:
if animate_this_episode:
env.render()
time.sleep(0.05)
obs.append(ob)
ac = sess.run(sy_sampled_ac, feed_dict={sy_ob_no : ob[None]})
ac = ac[0][0] # was ac[0]
acs.append(ac)
ob, rew, done, _ = env.step(ac)
rewards.append(rew)
steps += 1
if done or steps > max_path_length:
break
total_trials += 1
trials_this_batch += 1
path = {"observation" : np.array(obs),
"reward" : np.array(rewards),
"action" : np.array(acs)}
paths.append(path)
timesteps_this_batch += pathlength(path)
# if timesteps_this_batch > min_timesteps_per_batch:
# break
if trials_this_batch == batch_size:
break
total_timesteps += timesteps_this_batch
print('total trials:', total_trials)
# Build arrays for observation, action for the policy gradient update by concatenating
# across paths
ob_no = np.concatenate([path["observation"] for path in paths])
ac_na = np.concatenate([path["action"] for path in paths])
#====================================================================================#
# ----------SECTION 4----------
# Computing Q-values
#
# Your code should construct numpy arrays for Q-values which will be used to compute
# advantages (which will in turn be fed to the placeholder you defined above).
#
# Recall that the expression for the policy gradient PG is
#
# PG = E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * (Q_t - b_t )]
#
# where
#
# tau=(s_0, a_0, ...) is a trajectory,
# Q_t is the Q-value at time t, Q^{pi}(s_t, a_t),
# and b_t is a baseline which may depend on s_t.
#
# You will write code for two cases, controlled by the flag 'reward_to_go':
#
# Case 1: trajectory-based PG
#
# (reward_to_go = False)
#
# Instead of Q^{pi}(s_t, a_t), we use the total discounted reward summed over
# entire trajectory (regardless of which time step the Q-value should be for).
#
# For this case, the policy gradient estimator is
#
# E_{tau} [sum_{t=0}^T grad log pi(a_t|s_t) * Ret(tau)]
#
# where
#
# Ret(tau) = sum_{t'=0}^T gamma^t' r_{t'}.
#
# Thus, you should compute
#
# Q_t = Ret(tau)
#
# Case 2: reward-to-go PG
#
# (reward_to_go = True)
#
# Here, you estimate Q^{pi}(s_t, a_t) by the discounted sum of rewards starting
# from time step t. Thus, you should compute
#
# Q_t = sum_{t'=t}^T gamma^(t'-t) * r_{t'}
#
#
# Store the Q-values for all timesteps and all trajectories in a variable 'q_n',
# like the 'ob_no' and 'ac_na' above.
#
#====================================================================================#
# MY_CODE_HERE
q_n = np.concatenate([reward_to_q(path['reward'], gamma, reward_to_go) for path in paths])
#====================================================================================#
# ----------SECTION 5----------
# Computing Baselines
#====================================================================================#
if nn_baseline:
# If nn_baseline is True, use your neural network to predict reward-to-go
# at each timestep for each trajectory, and save the result in a variable 'b_n'
# like 'ob_no', 'ac_na', and 'q_n'.
#
# Hint #bl1: rescale the output from the nn_baseline to match the statistics
# (mean and std) of the current or previous batch of Q-values. (Goes with Hint
# #bl2 below.)
# MY_CODE_HERE
# The bootstrap version uses r_t + v(s_{t+1}) - v(s_t), which is biased
b_n = sess.run(baseline_prediction, feed_dict={sy_ob_no: ob_no})
adv_n = q_n - b_n
else:
adv_n = q_n.copy()
#====================================================================================#
# ----------SECTION 4----------
# Advantage Normalization
#====================================================================================#
if normalize_advantages:
# On the next line, implement a trick which is known empirically to reduce variance
# in policy gradient methods: normalize adv_n to have mean zero and std=1.
# MY_CODE_HERE
adv_mu = np.mean(adv_n)
adv_std = np.std(adv_n)
# Could be more robust than this
if adv_std == 0.0:
return
# The normalization could be problematic.
# For environments like CartPole, the reward is an integer and is capped at 200.
# When not using base, adv_n could all be 200 and adv_std = 0.
adv_n = (adv_n - adv_mu) / adv_std
#====================================================================================#
# ----------SECTION 5----------
# Optimizing Neural Network Baseline
#====================================================================================#
if nn_baseline:
# ----------SECTION 5----------
# If a neural network baseline is used, set up the targets and the inputs for the
# baseline.
#
# Fit it to the current batch in order to use for the next iteration. Use the
# baseline_update_op you defined earlier.
#
# Hint #bl2: Instead of trying to target raw Q-values directly, rescale the
# targets to have mean zero and std=1. (Goes with Hint #bl1 above.)
# MY_CODE_HERE
# TODO: what is the right way to fit?
# 1. Using fixed number of steps.
# It might not balance the good vs bad paths well, but 100 seems pretty good.
# 2. Using timesteps as number of steps. This is CartPole specific.
print('timesteps:', timesteps_this_batch)
for i in range(100):
sess.run(baseline_update_op, feed_dict={sy_ob_no: ob_no, sy_q_n: q_n})
#====================================================================================#
# ----------SECTION 4----------
# Performing the Policy Update
#====================================================================================#
# Call the update operation necessary to perform the policy gradient update based on
# the current batch of rollouts.
#
# For debug purposes, you may wish to save the value of the loss function before
# and after an update, and then log them below.
# MY_CODE_HERE
sess.run(update_op, feed_dict={sy_ob_no: ob_no,
sy_ac_na: ac_na,
sy_adv_n: adv_n})
# Log diagnostics
returns = [path["reward"].sum() for path in paths]
ep_lengths = [pathlength(path) for path in paths]
logz.log_tabular("Time", time.time() - start)
logz.log_tabular("Iteration", itr)
logz.log_tabular("AverageReturn", np.mean(returns))
logz.log_tabular("StdReturn", np.std(returns))
logz.log_tabular("MaxReturn", np.max(returns))
logz.log_tabular("MinReturn", np.min(returns))
logz.log_tabular("EpLenMean", np.mean(ep_lengths))
logz.log_tabular("EpLenStd", np.std(ep_lengths))
logz.log_tabular("TimestepsThisBatch", timesteps_this_batch)
logz.log_tabular("TimestepsSoFar", total_timesteps)
logz.dump_tabular()
logz.pickle_tf_vars()
# This stopping criterion is not robust when the batch size is small.
if target_reward is not None:
if np.mean([path["reward"].sum() for path in paths]) >= target_reward:
return
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('env_name', type=str)
parser.add_argument('--exp_name', type=str, default='vpg')
parser.add_argument('--render', action='store_true')
parser.add_argument('--discount', type=float, default=1.0)
parser.add_argument('--n_iter', '-n', type=int, default=100)
parser.add_argument('--batch_size', '-b', type=int, default=1000)
parser.add_argument('--ep_len', '-ep', type=float, default=-1.)
parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3)
parser.add_argument('--reward_to_go', '-rtg', action='store_true')
parser.add_argument('--dont_normalize_advantages', '-dna', action='store_true')
parser.add_argument('--nn_baseline', '-bl', action='store_true')
parser.add_argument('--seed', type=int, default=1)
parser.add_argument('--n_experiments', '-e', type=int, default=1)
parser.add_argument('--n_layers', '-l', type=int, default=1)
parser.add_argument('--size', '-s', type=int, default=32)
parser.add_argument('--target_reward', type=float, default=None)
args = parser.parse_args()
if not(os.path.exists('data')):
os.makedirs('data')
logdir = args.exp_name + '_' + args.env_name + '_' + time.strftime("%d-%m-%Y_%H-%M-%S")
logdir = os.path.join('data', logdir)
if not(os.path.exists(logdir)):
os.makedirs(logdir)
max_path_length = args.ep_len if args.ep_len > 0 else None
for e in range(args.n_experiments):
seed = args.seed + 10*e
print('Running experiment with seed %d'%seed)
def train_func():
train_PG(
exp_name=args.exp_name,
env_name=args.env_name,
n_iter=args.n_iter,
gamma=args.discount,
# min_timesteps_per_batch=args.batch_size,
batch_size=args.batch_size,
max_path_length=max_path_length,
learning_rate=args.learning_rate,
reward_to_go=args.reward_to_go,
animate=args.render,
logdir=os.path.join(logdir,'%d'%seed),
normalize_advantages=not(args.dont_normalize_advantages),
nn_baseline=args.nn_baseline,
seed=seed,
n_layers=args.n_layers,
size=args.size,
target_reward=args.target_reward
)
# Awkward hacky process runs, because Tensorflow does not like
# repeatedly calling train_PG in the same thread.
p = Process(target=train_func, args=tuple())
p.start()
p.join()
if __name__ == "__main__":
main()
| pathlength |
calc_galaxy_luminosity_function.py | #!/usr/bin/env python
#
# 20190222
# copied from "calc_stellar_mass_function.py", this code will superceed "calc_stellar_mass_function.py".
#
from __future__ import print_function
import os, sys, re, json, time, astropy
import numpy as np
from astropy.table import Table, Column, hstack
from copy import copy
from numpy import log, log10, power, sum, sqrt, pi, exp
pow = power
lg = log10
ln = log
from scipy.interpolate import InterpolatedUnivariateSpline, interp1d
if not (os.path.dirname(os.path.abspath(__file__)) in sys.path): sys.path.append(os.path.dirname(os.path.abspath(__file__)))
import apply_cosmology
cosmo = apply_cosmology.cosmo
if sys.version_info.major >= 3:
long = int
else:
pass
#
# def
#
def Schechter_Function_for_LF(L, L_character, Phi_character, alpha):
#
# Schechter (1976)
#
# Phi(L) dL = (Phi_*) * (L/L_*)**(alpha) * exp(-L/L_*) dL/L_*
# = (Phi_*) * x**(alpha) * exp(-x) dx
# = (Phi_*) * 10**(lgx * alpha) * exp(-10**lgx) dx
# = (Phi_*) * 10**(lgx * alpha) * exp(-10**lgx) dlnx
# = (Phi_*) * 10**(lgx * alpha) * exp(-10**lgx) dlgx * ln(10)
# = (Phi_*) * 10**((lgL-lgL_*)*(alpha+1)) * exp(-10**(lgL-lgL_*)) * ln(10) dlgx
# = (Our_Phi_Phi_Schechter) dlgx
#
#lgx = lgL-lg_L0
#Phi_Schechter = phi * (10**(lgx*(alpha+1))) * (np.exp(-10**lgx)) * ln(10) # per dex and already multiplied ln(10), so that its integral directly equals \int Phi(L) / L dL
#
Phi_Schechter = Phi_character * (L/L_character)**(alpha) * np.exp(-(L/L_character)) # Mpc-3 dex-1
#Phi_Schechter = Phi_Schechter * ln(10)
return Phi_Schechter
def Saunders_Function_for_LF(L, L_character, Phi_character, alpha, sigma):
# Saunders et al. (1990)
Phi_Saunders = Phi_character * (L/L_character)**(1-alpha) * np.exp(-1.0/(2.0*sigma**2) * (np.log10(1.0+(L/L_character)))**2 )
#print('Phi_character', Phi_character)
#print('(L/L_character)**(1-alpha)', (L/L_character)**(1-alpha))
#print('np.exp(-1.0/(2.0*sigma**2) * (np.log10(1.0+(L/L_character)))**2 )', np.exp(-1.0/(2.0*sigma**2) * (np.log10(1.0+(L/L_character)))**2 ))
#print('Phi_Saunders', Phi_Saunders)
return Phi_Saunders
#
# def
#
def | (z, lgL=None, galaxy_type = 'SFG'):
#
# Novak 2017 bibcode:2017A&A...602A...5N
# IMF: Chabrier 2003
# Saunders et al. (1990)
# Outputs: lgL_grid, lgPhi_grid
#
# check z
if not np.isscalar(z):
raise ValueError('Please input a float number as the redshift!')
#
# check galaxy_type
if not (type(galaxy_type) is str):
raise ValueError('Please input either "ALL", "SFG" or "QG" as the galaxy_type!')
else:
if not (galaxy_type in ['ALL', 'SFG', 'QG']):
raise ValueError('Please input either "ALL", "SFG" or "QG" as the galaxy_type!')
#
# make lgL_grid
if lgL is None:
lgL_grid = np.linspace(18.0, 25.0, num=1000, endpoint=True)
else:
lgL_grid = lgL
#
L_grid = 10**lgL_grid
#
# read LF parameters
L_character = 1.85e21 # * 1.4e9 / 3.839e25 # vLv(1.4GHz,rest) = W Hz-1 --> Lsun
Phi_character = 3.55e-3 # Mpc-3 dex-1
alpha = 1.22
sigma = 0.63
#
#Phi_z0 = Saunders_Function(L_grid, L_character, Phi_character, alpha, sigma)
#
# check z
LF_zmin = 0.0
LF_zmax = +np.inf
if z < LF_zmin or z > LF_zmax:
raise ValueError('calc_radio_LF_Novak2017: The input redshift is out of the allowed range of %s -- %s!'%(LF_zmin, LF_zmax))
#
# scale to z via pure luminosity evolution
alphaL = 3.16
betaL = -0.32
L_grid_z = (L_grid / ((1.0+z)**(alphaL+(z*betaL))))
Phi = Saunders_Function_for_LF(L_grid_z, L_character, Phi_character, alpha, sigma)
lgPhi = np.log10(Phi)
#
if lgL is None:
return lgL_grid, lgPhi
else:
return lgPhi
def calc_IR_250um_LF_Koprowski2017(z, lgL=None, galaxy_type = 'SFG'):
#
# Koprowski 2017 bibcode:2017MNRAS.471.4155K
# IMF: Chabrier 2003
# Saunders et al. (1990)
# Outputs: lgL_grid, lgPhi_grid
#
# check z
if not np.isscalar(z):
raise ValueError('Please input a float number as the redshift!')
#
# check galaxy_type
if not (type(galaxy_type) is str):
raise ValueError('Please input either "ALL", "SFG" or "QG" as the galaxy_type!')
else:
if not (galaxy_type in ['ALL', 'SFG', 'QG']):
raise ValueError('Please input either "ALL", "SFG" or "QG" as the galaxy_type!')
#
# make lgL_grid
if lgL is None:
lgL_grid = np.linspace(24.0, 27.0, num=1000, endpoint=True)
else:
lgL_grid = lgL
#
L_grid = 10**lgL_grid
#
# read LF parameters
table_z_lower = [0.5, 1.5, 2.5, 3.5]
table_z_upper = [1.5, 2.5, 3.5, 4.5]
table_lgL_character = [25.20, 25.40, 25.63, 25.84] # W Hz-1
table_lgPhi_character = [-2.88, -3.03, -3.73, -4.59] # Mpc-3 dex-1
alpha = -0.4
#
# check z
LF_zmin = table_z_lower[0]
LF_zmax = table_z_upper[-1]
if z < LF_zmin or z > LF_zmax:
raise ValueError('calc_IR_250um_LF_Koprowski2017: The input redshift is out of the allowed range of %s -- %s!'%(LF_zmin, LF_zmax))
#
# scale to z (using step function... <TODO>)
Phi = None
lgPhi = None
for i in range(len(table_z_upper)):
if z >= table_z_lower[i] and z <= table_z_upper[i]:
L_character = 10**(table_lgL_character[i])
Phi_character = 10**(table_lgPhi_character[i])
Phi = Schechter_Function_for_LF(L_grid, L_character, Phi_character, alpha)
lgPhi = np.log10(Phi)
break
#
if lgL is None:
return lgL_grid, lgPhi
else:
return lgPhi
def calc_IR_LF_Gruppioni2013(z, lgL=None, galaxy_type = 'SFG'):
#
# Gruppioni 2013 bibcode:
# IMF: Chabrier 2003
# H0 = 71 km s−1 Mpc−1, Ωm = 0.27, and ΩΛ = 0.73.
# Saunders et al. (1990)
# Outputs: lgL_grid, lgPhi_grid
#
# check z
if not np.isscalar(z):
raise ValueError('Please input a float number as the redshift!')
#
# check galaxy_type
if not (type(galaxy_type) is str):
raise ValueError('Please input either "ALL", "SFG" or "QG" as the galaxy_type!')
else:
if not (galaxy_type in ['ALL', 'SFG', 'QG']):
raise ValueError('Please input either "ALL", "SFG" or "QG" as the galaxy_type!')
#
# make lgL_grid
if lgL is None:
lgL_grid = np.linspace(8.0, 14.0, num=1000, endpoint=True)
else:
lgL_grid = lgL
#
L_grid = 10**lgL_grid
#
# read LF parameters (their Table 7)
table_data = [ [0.0 , 0.3 , 1.15, 0.52, 10.12, -2.29],
[0.3 , 0.45, 1.2 , 0.5 , 10.41, -2.31],
[0.45, 0.6 , 1.2 , 0.5 , 10.55, -2.35],
[0.6 , 0.8 , 1.2 , 0.5 , 10.71, -2.35],
[0.8 , 1.0 , 1.2 , 0.5 , 10.97, -2.40],
[1.0 , 1.2 , 1.2 , 0.5 , 11.13, -2.43],
[1.2 , 1.7 , 1.2 , 0.5 , 11.37, -2.70],
[1.7 , 2.0 , 1.2 , 0.5 , 11.50, -3.00],
[2.0 , 2.5 , 1.2 , 0.5 , 11.60, -3.01],
[2.5 , 3.0 , 1.2 , 0.5 , 11.92, -3.27],
[3.0 , 4.2 , 1.2 , 0.5 , 11.90, -3.74] ]
table_data = np.array(table_data).T
table_z_lower = table_data[0]
table_z_upper = table_data[1]
table_alpha = table_data[2]
table_sigma = table_data[3]
table_lgL_character = table_data[4] # Lsun
table_lgPhi_character = table_data[5] # Mpc-3 dex-1
#
# check z
LF_zmin = table_z_lower[0]
LF_zmax = table_z_upper[-1]
if z < LF_zmin or z > LF_zmax:
raise ValueError('calc_IR_LF_Gruppioni2013: The input redshift is out of the allowed range of %s -- %s!'%(LF_zmin, LF_zmax))
#
# scale to z (using step function... <TODO>)
Phi = None
lgPhi = None
for i in range(len(table_z_upper)):
if z >= table_z_lower[i] and z <= table_z_upper[i]:
L_character = 10**(table_lgL_character[i])
Phi_character = 10**(table_lgPhi_character[i])
alpha = table_alpha[i]
sigma = table_sigma[i]
Phi = Saunders_Function_for_LF(L_grid, L_character, Phi_character, alpha, sigma)
lgPhi = np.log10(Phi)
break
#
if lgL is None:
return lgL_grid, lgPhi
else:
return lgPhi
| calc_radio_LF_Novak2017 |
mod.rs | use std::collections::HashSet;
use abstio::MapName;
use abstutil::{Parallelism, Tags, Timer};
use geom::{Distance, Pt2D};
use map_gui::load::MapLoader;
use map_gui::options::OptionsPanel;
use map_gui::render::{calculate_corners, DrawMap, DrawOptions};
use map_gui::tools::{ChooseSomething, PopupMsg, PromptInput};
use map_gui::ID;
use map_model::{osm, ControlTrafficSignal, IntersectionID, NORMAL_LANE_THICKNESS};
use sim::Sim;
use widgetry::{
lctrl, Btn, Cached, Checkbox, Choice, Color, DrawBaselayer, Drawable, EventCtx, GeomBatch,
GfxCtx, HorizontalAlignment, Key, Line, Outcome, Panel, ScreenDims, State, Text, UpdateType,
VerticalAlignment, Widget,
};
use crate::app::{App, ShowLayers, ShowObject, Transition};
use crate::common::{tool_panel, CommonState};
use crate::info::ContextualActions;
use crate::sandbox::GameplayMode;
mod blocked_by;
mod floodfill;
mod objects;
pub mod path_counter;
mod polygons;
pub mod shared_row;
pub mod streetmix;
pub struct DebugMode {
panel: Panel,
common: CommonState,
tool_panel: Panel,
objects: objects::ObjectDebugger,
hidden: HashSet<ID>,
layers: ShowLayers,
search_results: Option<SearchResults>,
all_routes: Option<(usize, Drawable)>,
highlighted_agents: Cached<IntersectionID, Drawable>,
}
impl DebugMode {
pub fn | (ctx: &mut EventCtx) -> Box<dyn State<App>> {
Box::new(DebugMode {
panel: Panel::new(Widget::col(vec![
Widget::row(vec![
Line("Debug Mode").small_heading().draw(ctx),
Btn::close(ctx),
]),
Text::new().draw(ctx).named("current info"),
Checkbox::switch(ctx, "show buildings", Key::Num1, true),
Checkbox::switch(ctx, "show intersections", Key::Num2, true),
Checkbox::switch(ctx, "show lanes", Key::Num3, true),
Checkbox::switch(ctx, "show areas", Key::Num4, true),
Checkbox::switch(ctx, "show labels", Key::Num5, false),
Checkbox::switch(ctx, "show route for all agents", Key::R, false),
Widget::col(vec![
Btn::text_fg("unhide everything").build_def(ctx, lctrl(Key::H)),
Btn::text_fg("screenshot everything (for leaflet)").build_def(ctx, None),
Btn::text_fg("screenshot all of the everything").build_def(ctx, None),
Btn::text_fg("search OSM metadata").build_def(ctx, Key::Slash),
Btn::text_fg("clear OSM search results").build_def(ctx, lctrl(Key::Slash)),
Btn::text_fg("save sim state").build_def(ctx, Key::O),
Btn::text_fg("load previous sim state").build_def(ctx, Key::Y),
Btn::text_fg("load next sim state").build_def(ctx, Key::U),
Btn::text_fg("pick a savestate to load").build_def(ctx, None),
Btn::text_fg("find bad traffic signals").build_def(ctx, None),
Btn::text_fg("find degenerate roads").build_def(ctx, None),
Btn::text_fg("find large intersections").build_def(ctx, None),
Btn::text_fg("sim internal stats").build_def(ctx, None),
Btn::text_fg("blocked-by graph").build_def(ctx, Key::B),
Btn::text_fg("render to GeoJSON").build_def(ctx, Key::G),
]),
Text::from_all(vec![
Line("Hold "),
Key::LeftControl.txt(ctx),
Line(" to show position"),
])
.draw(ctx),
]))
.aligned(HorizontalAlignment::Right, VerticalAlignment::Top)
.build(ctx),
common: CommonState::new(),
tool_panel: tool_panel(ctx),
objects: objects::ObjectDebugger,
hidden: HashSet::new(),
layers: ShowLayers::new(),
search_results: None,
all_routes: None,
highlighted_agents: Cached::new(),
})
}
fn reset_info(&mut self, ctx: &mut EventCtx) {
let mut txt = Text::new();
if !self.hidden.is_empty() {
txt.add(Line(format!("Hiding {} things", self.hidden.len())));
}
if let Some(ref results) = self.search_results {
txt.add(Line(format!(
"Search for {} has {} results",
results.query, results.num_matches
)));
}
if let Some((n, _)) = self.all_routes {
txt.add(Line(format!(
"Showing {} routes",
abstutil::prettyprint_usize(n)
)));
}
self.panel.replace(ctx, "current info", txt.draw(ctx));
}
}
impl State<App> for DebugMode {
fn event(&mut self, ctx: &mut EventCtx, app: &mut App) -> Transition {
ctx.canvas_movement();
if ctx.redo_mouseover() {
app.primary.current_selection = app.mouseover_debug_mode(ctx, self);
}
match self.panel.event(ctx) {
Outcome::Clicked(x) => match x.as_ref() {
"close" => {
return Transition::Pop;
}
"save sim state" => {
ctx.loading_screen("savestate", |_, timer| {
timer.start("save sim state");
app.primary.sim.save();
timer.stop("save sim state");
});
}
"load previous sim state" => {
if let Some(t) =
ctx.loading_screen("load previous savestate", |ctx, mut timer| {
let prev_state = app
.primary
.sim
.find_previous_savestate(app.primary.sim.time());
match prev_state
.clone()
.and_then(|path| Sim::load_savestate(path, &mut timer).ok())
{
Some(new_sim) => {
app.primary.sim = new_sim;
app.recalculate_current_selection(ctx);
None
}
None => Some(Transition::Push(PopupMsg::new(
ctx,
"Error",
vec![format!(
"Couldn't load previous savestate {:?}",
prev_state
)],
))),
}
})
{
return t;
}
}
"load next sim state" => {
if let Some(t) = ctx.loading_screen("load next savestate", |ctx, mut timer| {
let next_state =
app.primary.sim.find_next_savestate(app.primary.sim.time());
match next_state
.clone()
.and_then(|path| Sim::load_savestate(path, &mut timer).ok())
{
Some(new_sim) => {
app.primary.sim = new_sim;
app.recalculate_current_selection(ctx);
None
}
None => Some(Transition::Push(PopupMsg::new(
ctx,
"Error",
vec![format!("Couldn't load next savestate {:?}", next_state)],
))),
}
}) {
return t;
}
}
"pick a savestate to load" => {
return Transition::Push(ChooseSomething::new(
ctx,
"Load which savestate?",
Choice::strings(abstio::list_all_objects(app.primary.sim.save_dir())),
Box::new(|ss, ctx, app| {
// TODO Oh no, we have to do path construction here :(
let ss_path = format!("{}/{}.bin", app.primary.sim.save_dir(), ss);
ctx.loading_screen("load savestate", |ctx, mut timer| {
app.primary.sim = Sim::load_savestate(ss_path, &mut timer)
.expect("Can't load savestate");
app.recalculate_current_selection(ctx);
});
Transition::Pop
}),
));
}
"unhide everything" => {
self.hidden.clear();
app.primary.current_selection = app.mouseover_debug_mode(ctx, self);
self.reset_info(ctx);
}
"search OSM metadata" => {
return Transition::Push(PromptInput::new(
ctx,
"Search for what?",
Box::new(search_osm),
));
}
"clear OSM search results" => {
self.search_results = None;
self.reset_info(ctx);
}
"screenshot everything (for leaflet)" => {
export_for_leaflet(ctx, app);
return Transition::Keep;
}
"screenshot all of the everything" => {
return Transition::Push(ScreenshotTest::new(
ctx,
app,
vec![
MapName::seattle("downtown"),
MapName::new("krakow", "center"),
MapName::seattle("lakeslice"),
MapName::seattle("montlake"),
MapName::new("london", "southbank"),
MapName::seattle("udistrict"),
],
));
}
"find bad traffic signals" => {
find_bad_signals(app);
}
"find degenerate roads" => {
find_degenerate_roads(app);
}
"find large intersections" => {
find_large_intersections(app);
}
"sim internal stats" => {
return Transition::Push(PopupMsg::new(
ctx,
"Simulation internal stats",
app.primary.sim.describe_internal_stats(),
));
}
"blocked-by graph" => {
return Transition::Push(blocked_by::Viewer::new(ctx, app));
}
"render to GeoJSON" => {
// TODO Loading screen doesn't actually display anything because of the rules
// around hiding the first few draws
ctx.loading_screen("render to GeoJSON", |ctx, timer| {
timer.start("render");
let batch = DrawMap::zoomed_batch(ctx, app);
let features = batch.to_geojson(Some(app.primary.map.get_gps_bounds()));
let geojson = geojson::GeoJson::from(geojson::FeatureCollection {
bbox: None,
features,
foreign_members: None,
});
abstio::write_json("rendered_map.json".to_string(), &geojson);
timer.stop("render");
});
}
_ => unreachable!(),
},
Outcome::Changed => {
// TODO We should really recalculate current_selection when these change. Meh.
self.layers.show_buildings = self.panel.is_checked("show buildings");
self.layers.show_intersections = self.panel.is_checked("show intersections");
self.layers.show_lanes = self.panel.is_checked("show lanes");
self.layers.show_areas = self.panel.is_checked("show areas");
self.layers.show_labels = self.panel.is_checked("show labels");
if self.panel.is_checked("show route for all agents") {
if self.all_routes.is_none() {
self.all_routes = Some(calc_all_routes(ctx, app));
self.reset_info(ctx);
}
} else {
if self.all_routes.is_some() {
self.all_routes = None;
self.reset_info(ctx);
}
}
}
_ => {}
}
self.highlighted_agents.update(
match app.primary.current_selection {
Some(ID::Intersection(i)) => Some(i),
_ => None,
},
|key| {
let mut batch = GeomBatch::new();
for (a, _) in app.primary.sim.get_accepted_agents(key) {
if let Some(obj) = app.primary.draw_map.get_obj(
ctx,
ID::from_agent(a),
app,
&mut app.primary.agents.borrow_mut(),
) {
batch.push(Color::PURPLE, obj.get_outline(&app.primary.map));
} else {
warn!(
"{} is accepted at or blocked by by {:?}, but no longer exists",
a, key
);
}
}
ctx.upload(batch)
},
);
if let Some(t) = self.common.event(ctx, app, &mut Actions {}) {
return t;
}
match self.tool_panel.event(ctx) {
Outcome::Clicked(x) => match x.as_ref() {
"back" => Transition::Pop,
"settings" => Transition::Push(OptionsPanel::new(ctx, app)),
_ => unreachable!(),
},
_ => Transition::Keep,
}
}
fn draw_baselayer(&self) -> DrawBaselayer {
DrawBaselayer::Custom
}
fn draw(&self, g: &mut GfxCtx, app: &App) {
let mut opts = DrawOptions::new();
opts.label_buildings = self.layers.show_labels;
app.draw(g, opts, self);
if let Some(ref results) = self.search_results {
g.redraw(&results.draw);
}
if let Some(draw) = self.highlighted_agents.value() {
g.redraw(draw);
}
self.objects.draw(g, app);
if let Some((_, ref draw)) = self.all_routes {
g.redraw(draw);
}
if !g.is_screencap() {
self.panel.draw(g);
self.common.draw(g, app);
self.tool_panel.draw(g);
}
}
}
impl ShowObject for DebugMode {
fn show(&self, obj: &ID) -> bool {
if self.hidden.contains(obj) {
return false;
}
match obj {
ID::Road(_) | ID::Lane(_) => self.layers.show_lanes,
ID::Building(_) => self.layers.show_buildings,
ID::Intersection(_) => self.layers.show_intersections,
ID::Area(_) => self.layers.show_areas,
_ => true,
}
}
fn layers(&self) -> &ShowLayers {
&self.layers
}
}
fn search_osm(filter: String, ctx: &mut EventCtx, app: &mut App) -> Transition {
let mut num_matches = 0;
let mut batch = GeomBatch::new();
// TODO Case insensitive
let map = &app.primary.map;
let color = Color::RED.alpha(0.8);
for r in map.all_roads() {
if r.osm_tags
.inner()
.iter()
.any(|(k, v)| format!("{} = {}", k, v).contains(&filter))
{
num_matches += 1;
batch.push(color, r.get_thick_polygon(map));
}
}
for a in map.all_areas() {
if a.osm_tags
.inner()
.iter()
.any(|(k, v)| format!("{} = {}", k, v).contains(&filter))
{
num_matches += 1;
batch.push(color, a.polygon.clone());
}
}
let results = SearchResults {
query: filter,
num_matches,
draw: batch.upload(ctx),
};
Transition::Multi(vec![
Transition::Pop,
Transition::ModifyState(Box::new(|state, ctx, _| {
let mut mode = state.downcast_mut::<DebugMode>().unwrap();
mode.search_results = Some(results);
mode.reset_info(ctx);
})),
])
}
struct SearchResults {
query: String,
num_matches: usize,
draw: Drawable,
}
fn calc_all_routes(ctx: &EventCtx, app: &mut App) -> (usize, Drawable) {
let agents = app.primary.sim.active_agents();
let mut batch = GeomBatch::new();
let mut cnt = 0;
let sim = &app.primary.sim;
let map = &app.primary.map;
for maybe_trace in Timer::new("calculate all routes").parallelize(
"route to geometry",
Parallelism::Fastest,
agents,
|id| {
sim.trace_route(id, map)
.map(|trace| trace.make_polygons(NORMAL_LANE_THICKNESS))
},
) {
if let Some(t) = maybe_trace {
cnt += 1;
batch.push(app.cs.route, t);
}
}
(cnt, ctx.upload(batch))
}
struct Actions;
impl ContextualActions for Actions {
fn actions(&self, app: &App, id: ID) -> Vec<(Key, String)> {
let mut actions = vec![(Key::D, "debug".to_string())];
match id {
ID::Lane(l) => {
actions.push((Key::H, "hide this".to_string()));
if app.primary.map.get_l(l).lane_type.supports_any_movement() {
actions.push((Key::F, "floodfill from this lane".to_string()));
actions.push((Key::S, "show strongly-connected components".to_string()));
}
actions.push((Key::X, "debug lane geometry".to_string()));
actions.push((Key::F2, "debug lane triangles geometry".to_string()));
actions.push((
Key::B,
"trace the block to the left of this road".to_string(),
));
}
ID::Intersection(i) => {
actions.push((Key::H, "hide this".to_string()));
actions.push((Key::X, "debug intersection geometry".to_string()));
actions.push((Key::F2, "debug sidewalk corners".to_string()));
if app.primary.map.get_i(i).roads.len() == 2 {
actions.push((Key::C, "collapse degenerate road?".to_string()));
}
}
ID::Car(_) => {
actions.push((Key::Backspace, "forcibly delete this car".to_string()));
}
ID::Area(_) => {
actions.push((Key::X, "debug area geometry".to_string()));
actions.push((Key::F2, "debug area triangles".to_string()));
}
ID::ParkingLot(_) => {
actions.push((Key::H, "hide this".to_string()));
}
ID::BusStop(_) => {
actions.push((Key::H, "hide this".to_string()));
}
_ => {}
}
actions
}
fn execute(
&mut self,
ctx: &mut EventCtx,
app: &mut App,
id: ID,
action: String,
close_info: &mut bool,
) -> Transition {
match (id, action.as_ref()) {
(id, "hide this") => Transition::ModifyState(Box::new(|state, ctx, app| {
let mode = state.downcast_mut::<DebugMode>().unwrap();
println!("Hiding {:?}", id);
app.primary.current_selection = None;
mode.hidden.insert(id);
mode.reset_info(ctx);
})),
(id, "debug") => {
*close_info = false;
objects::ObjectDebugger::dump_debug(id, &app.primary.map, &app.primary.sim);
Transition::Keep
}
(ID::Car(c), "forcibly delete this car") => {
app.primary.sim.delete_car(c, &app.primary.map);
app.primary
.sim
.tiny_step(&app.primary.map, &mut app.primary.sim_cb);
app.primary.current_selection = None;
Transition::Keep
}
(ID::Lane(l), "floodfill from this lane") => {
Transition::Push(floodfill::Floodfiller::floodfill(ctx, app, l))
}
(ID::Lane(l), "show strongly-connected components") => {
Transition::Push(floodfill::Floodfiller::scc(ctx, app, l))
}
(ID::Intersection(i), "debug intersection geometry") => {
let pts = app.primary.map.get_i(i).polygon.points();
let mut pts_without_last = pts.clone();
pts_without_last.pop();
Transition::Push(polygons::PolygonDebugger::new(
ctx,
"point",
pts.iter().map(|pt| polygons::Item::Point(*pt)).collect(),
Some(Pt2D::center(&pts_without_last)),
))
}
(ID::Intersection(i), "debug sidewalk corners") => {
Transition::Push(polygons::PolygonDebugger::new(
ctx,
"corner",
calculate_corners(app.primary.map.get_i(i), &app.primary.map)
.into_iter()
.map(|poly| polygons::Item::Polygon(poly))
.collect(),
None,
))
}
(ID::Intersection(i), "collapse degenerate road?") => {
let i = app.primary.map.get_i(i);
let (r1, r2) = {
let mut iter = i.roads.iter();
(*iter.next().unwrap(), *iter.next().unwrap())
};
diff_tags(
&app.primary.map.get_r(r1).osm_tags,
&app.primary.map.get_r(r2).osm_tags,
);
Transition::Keep
}
(ID::Lane(l), "debug lane geometry") => {
Transition::Push(polygons::PolygonDebugger::new(
ctx,
"point",
app.primary
.map
.get_l(l)
.lane_center_pts
.points()
.iter()
.map(|pt| polygons::Item::Point(*pt))
.collect(),
None,
))
}
(ID::Lane(l), "debug lane triangles geometry") => {
Transition::Push(polygons::PolygonDebugger::new(
ctx,
"triangle",
app.primary
.draw_map
.get_l(l)
.polygon
.triangles()
.into_iter()
.map(|tri| polygons::Item::Triangle(tri))
.collect(),
None,
))
}
(ID::Lane(l), "trace the block to the left of this road") => {
Transition::ModifyState(Box::new(move |state, ctx, app| {
let mut mode = state.downcast_mut::<DebugMode>().unwrap();
// Just abuse this to display the results
mode.search_results = app
.primary
.map
.get_l(l)
.trace_around_block(&app.primary.map)
.map(|(poly, _)| SearchResults {
query: format!("block around {}", l),
num_matches: 0,
draw: ctx.upload(GeomBatch::from(vec![(Color::RED, poly)])),
});
mode.reset_info(ctx);
}))
}
(ID::Area(a), "debug area geometry") => {
let pts = &app.primary.map.get_a(a).polygon.points();
let center = if pts[0] == *pts.last().unwrap() {
// TODO The center looks really wrong for Volunteer Park and others, but I
// think it's because they have many points along some edges.
Pt2D::center(&pts.iter().skip(1).cloned().collect())
} else {
Pt2D::center(pts)
};
Transition::Push(polygons::PolygonDebugger::new(
ctx,
"point",
pts.iter().map(|pt| polygons::Item::Point(*pt)).collect(),
Some(center),
))
}
(ID::Area(a), "debug area triangles") => {
Transition::Push(polygons::PolygonDebugger::new(
ctx,
"triangle",
app.primary
.map
.get_a(a)
.polygon
.triangles()
.into_iter()
.map(|tri| polygons::Item::Triangle(tri))
.collect(),
None,
))
}
_ => unreachable!(),
}
}
fn is_paused(&self) -> bool {
true
}
fn gameplay_mode(&self) -> GameplayMode {
// Hack so info panels can be opened in DebugMode
GameplayMode::FixTrafficSignals
}
}
fn find_bad_signals(app: &App) {
println!("Bad traffic signals:");
for i in app.primary.map.all_intersections() {
if i.is_traffic_signal() {
let first = &ControlTrafficSignal::get_possible_policies(&app.primary.map, i.id)[0].0;
if first == "stage per road" || first == "arbitrary assignment" {
println!("- {}", i.id);
ControlTrafficSignal::brute_force(&app.primary.map, i.id);
}
}
}
}
fn find_degenerate_roads(app: &App) {
let map = &app.primary.map;
for i in map.all_intersections() {
if i.roads.len() != 2 {
continue;
}
if i.turns.iter().any(|t| map.get_t(*t).between_sidewalks()) {
continue;
}
let (r1, r2) = {
let mut iter = i.roads.iter();
(*iter.next().unwrap(), *iter.next().unwrap())
};
let r1 = map.get_r(r1);
let r2 = map.get_r(r2);
if r1.zorder != r2.zorder {
continue;
}
if r1
.lanes_ltr()
.into_iter()
.map(|(_, dir, lt)| (dir, lt))
.collect::<Vec<_>>()
!= r2
.lanes_ltr()
.into_iter()
.map(|(_, dir, lt)| (dir, lt))
.collect::<Vec<_>>()
{
continue;
}
println!("Maybe merge {}", i.id);
diff_tags(&r1.osm_tags, &r2.osm_tags);
}
}
fn diff_tags(t1: &Tags, t2: &Tags) {
for (k, v1) in t1.inner() {
if k == osm::OSM_WAY_ID {
continue;
}
let v2 = t2.get(k).cloned().unwrap_or_else(String::new);
if v1 != &v2 {
println!("- {} = \"{}\" vs \"{}\"", k, v1, v2);
}
}
for (k, v2) in t2.inner() {
if !t1.contains_key(k) {
println!("- {} = \"\" vs \"{}\"", k, v2);
}
}
}
fn find_large_intersections(app: &App) {
let mut seen = HashSet::new();
for t in app.primary.map.all_turns().values() {
if !seen.contains(&t.id.parent) && t.geom.length() > Distance::meters(50.0) {
println!("{} has a long turn", t.id.parent);
seen.insert(t.id.parent);
}
}
}
// Because of the slightly odd control flow needed to ask widgetry to ScreenCaptureEverything, a
// separate state is the easiest way to automatically screenshot multiple maps.
struct ScreenshotTest {
todo_maps: Vec<MapName>,
screenshot_done: bool,
orig_min_zoom: f64,
}
impl ScreenshotTest {
fn new(ctx: &mut EventCtx, app: &mut App, mut todo_maps: Vec<MapName>) -> Box<dyn State<App>> {
let orig_min_zoom = app.opts.min_zoom_for_detail;
app.opts.min_zoom_for_detail = 0.0;
MapLoader::new(
ctx,
app,
todo_maps.pop().unwrap(),
Box::new(move |_, _| {
Transition::Replace(Box::new(ScreenshotTest {
todo_maps,
screenshot_done: false,
orig_min_zoom,
}))
}),
)
}
}
impl State<App> for ScreenshotTest {
fn event(&mut self, ctx: &mut EventCtx, app: &mut App) -> Transition {
if self.screenshot_done {
if self.todo_maps.is_empty() {
app.opts.min_zoom_for_detail = self.orig_min_zoom;
Transition::Pop
} else {
Transition::Replace(ScreenshotTest::new(
ctx,
app,
self.todo_maps.drain(..).collect(),
))
}
} else {
self.screenshot_done = true;
let name = app.primary.map.get_name();
ctx.request_update(UpdateType::ScreenCaptureEverything {
dir: format!("screenshots/{}/{}", name.city, name.map),
zoom: 3.0,
dims: ctx.canvas.get_window_dims(),
leaflet_naming: false,
});
// TODO Sometimes this still gets stuck and needs a mouse wiggle for input event?
Transition::Keep
}
}
fn draw(&self, _: &mut GfxCtx, _: &App) {}
}
fn export_for_leaflet(ctx: &mut EventCtx, app: &App) {
let name = app.primary.map.get_name();
let bounds = app.primary.map.get_bounds();
let map_length = bounds.width().max(bounds.height());
// At zoom level N, the entire map fits into (N + 1) * (N + 1) tiles
for zoom_level in 0..=25 {
let num_tiles = zoom_level + 1;
// How do we fit the entire map_length into this many tiles?
let zoom = 256.0 * (num_tiles as f64) / map_length;
ctx.request_update(UpdateType::ScreenCaptureEverything {
dir: format!("screenshots/{}/{}/{}", name.city, name.map, zoom_level),
zoom,
dims: ScreenDims::new(256.0, 256.0),
leaflet_naming: true,
});
}
}
| new |
main.rs | //! Entrypoint of InfluxDB IOx binary
#![deny(rust_2018_idioms)]
#![warn(
missing_copy_implementations,
missing_debug_implementations,
clippy::explicit_iter_loop,
clippy::use_self
)]
use clap::{crate_authors, crate_version, value_t, App, Arg, ArgMatches, SubCommand};
use ingest::parquet::writer::CompressionLevel;
use tokio::runtime::Runtime;
use tracing::{debug, error, info, warn};
mod panic;
pub mod server;
mod commands {
pub mod convert;
pub mod file_meta;
mod input;
pub mod server;
pub mod stats;
}
use panic::SendPanicsToTracing;
enum ReturnCode {
ConversionFailed = 1,
MetadataDumpFailed = 2,
StatsFailed = 3,
ServerExitedAbnormally = 4,
}
fn main() -> Result<(), std::io::Error> {
let help = r#"InfluxDB IOx server and command line tools
Examples:
# Run the InfluxDB IOx server:
influxdb_iox
# Run the InfluxDB IOx server with extra verbose logging
influxdb_iox -v
# Run InfluxDB IOx with full debug logging specified with RUST_LOG
RUST_LOG=debug influxdb_iox
# converts line protocol formatted data in temperature.lp to out.parquet
influxdb_iox convert temperature.lp out.parquet
# Dumps metadata information about 000000000013.tsm to stdout
influxdb_iox meta 000000000013.tsm
# Dumps storage statistics about out.parquet to stdout
influxdb_iox stats out.parquet
"#;
let matches = App::new(help)
.version(crate_version!())
.author(crate_authors!())
.about("InfluxDB IOx server and command line tools")
.subcommand(
SubCommand::with_name("convert")
.about("Convert one storage format to another")
.arg(
Arg::with_name("INPUT")
.help("The input files to read from")
.required(true)
.index(1),
)
.arg(
Arg::with_name("OUTPUT")
.takes_value(true)
.help("The filename or directory to write the output.")
.required(true)
.index(2),
)
.arg(
Arg::with_name("compression_level")
.short("c")
.long("compression-level")
.help("How much to compress the output data. 'max' compresses the most; 'compatibility' compresses in a manner more likely to be readable by other tools.")
.takes_value(true)
.possible_values(&["max", "compatibility"])
.default_value("compatibility"),
),
)
.subcommand(
SubCommand::with_name("meta")
.about("Print out metadata information about a storage file")
.arg(
Arg::with_name("INPUT")
.help("The input filename to read from")
.required(true)
.index(1),
),
)
.subcommand(
SubCommand::with_name("stats")
.about("Print out storage statistics information to stdout. \
If a directory is specified, checks all files recursively")
.arg(
Arg::with_name("INPUT")
.help("The input filename or directory to read from")
.required(true)
.index(1),
)
.arg(
Arg::with_name("per-column")
.long("per-column")
.help("Include detailed information per column")
)
.arg(
Arg::with_name("per-file")
.long("per-file")
.help("Include detailed information per file")
),
)
.subcommand(
SubCommand::with_name("server")
.about("Runs in server mode (default)")
)
.arg(Arg::with_name("verbose").short("v").long("verbose").multiple(true).help(
"Enables verbose logging (use 'vv' for even more verbosity). You can also set log level via \
the environment variable RUST_LOG=<value>",
))
.arg(Arg::with_name("num-threads").long("num-threads").takes_value(true).help(
"Set the maximum number of threads to use. Defaults to the number of cores on the system",
))
.get_matches();
setup_logging(matches.occurrences_of("verbose"));
// Install custom panic handler (note can not use `_` otherwise
// drop will be called immediately).
let _f = SendPanicsToTracing::new();
let mut tokio_runtime = get_runtime(matches.value_of("num-threads"))?;
tokio_runtime.block_on(dispatch_args(matches));
info!("InfluxDB IOx server shutting down");
Ok(())
}
async fn dispatch_args(matches: ArgMatches<'_>) {
match matches.subcommand() {
("convert", Some(sub_matches)) => {
let input_path = sub_matches.value_of("INPUT").unwrap();
let output_path = sub_matches.value_of("OUTPUT").unwrap();
let compression_level =
value_t!(sub_matches, "compression_level", CompressionLevel).unwrap();
match commands::convert::convert(&input_path, &output_path, compression_level) {
Ok(()) => debug!("Conversion completed successfully"),
Err(e) => {
eprintln!("Conversion failed: {}", e);
std::process::exit(ReturnCode::ConversionFailed as _)
}
}
}
("meta", Some(sub_matches)) => {
let input_filename = sub_matches.value_of("INPUT").unwrap();
match commands::file_meta::dump_meta(&input_filename) {
Ok(()) => debug!("Metadata dump completed successfully"),
Err(e) => {
eprintln!("Metadata dump failed: {}", e);
std::process::exit(ReturnCode::MetadataDumpFailed as _)
}
}
}
("stats", Some(sub_matches)) => {
let config = commands::stats::StatsConfig {
input_path: sub_matches.value_of("INPUT").unwrap().into(),
per_file: sub_matches.is_present("per-file"),
per_column: sub_matches.is_present("per-column"),
};
match commands::stats::stats(&config).await {
Ok(()) => debug!("Storage statistics dump completed successfully"),
Err(e) => {
eprintln!("Stats dump failed: {}", e);
std::process::exit(ReturnCode::StatsFailed as _)
}
}
}
("server", Some(_)) | (_, _) => {
println!("InfluxDB IOx server starting");
match commands::server::main().await {
Ok(()) => eprintln!("Shutdown OK"),
Err(e) => {
error!("Server shutdown with error: {}", e);
std::process::exit(ReturnCode::ServerExitedAbnormally as _);
}
}
}
}
}
/// Default debug level is debug for everything except
/// some especially noisy low level libraries
const DEFAULT_DEBUG_LOG_LEVEL: &str = "debug,hyper::proto::h1=info,h2=info";
// Default verbose log level is info level for all components
const DEFAULT_VERBOSE_LOG_LEVEL: &str = "info";
// Default log level is warn level for all components
const DEFAULT_LOG_LEVEL: &str = "warn";
/// Configures logging in the following precedence:
///
/// 1. If RUST_LOG environment variable is set, use that value
/// 2. if `-vv` (multiple instances of verbose), use DEFAULT_DEBUG_LOG_LEVEL
/// 2. if `-v` (single instances of verbose), use DEFAULT_VERBOSE_LOG_LEVEL
/// 3. Otherwise use DEFAULT_LOG_LEVEL
fn setup_logging(num_verbose: u64) {
let rust_log_env = std::env::var("RUST_LOG");
match rust_log_env {
Ok(lvl) => {
if num_verbose > 0 {
eprintln!(
"WARNING: Using RUST_LOG='{}' environment, ignoring -v command line",
lvl
);
}
}
Err(_) => match num_verbose {
0 => std::env::set_var("RUST_LOG", DEFAULT_LOG_LEVEL),
1 => std::env::set_var("RUST_LOG", DEFAULT_VERBOSE_LOG_LEVEL),
_ => std::env::set_var("RUST_LOG", DEFAULT_DEBUG_LOG_LEVEL),
},
}
env_logger::init();
}
/// Creates the tokio runtime for executing IOx
///
/// if nthreads is none, uses the default scheduler
/// otherwise, creates a scheduler with the number of threads
fn get_runtime(num_threads: Option<&str>) -> Result<Runtime, std::io::Error> | {
use tokio::runtime::Builder;
let kind = std::io::ErrorKind::Other;
match num_threads {
None => Runtime::new(),
Some(num_threads) => {
info!(
"Setting number of threads to '{}' per command line request",
num_threads
);
let n = num_threads.parse::<usize>().map_err(|e| {
let msg = format!(
"Invalid num-threads: can not parse '{}' as an integer: {}",
num_threads, e
);
std::io::Error::new(kind, msg)
})?;
match n {
0 => {
let msg = format!(
"Invalid num-threads: '{}' must be greater than zero",
num_threads
);
Err(std::io::Error::new(kind, msg))
}
1 => Builder::new().basic_scheduler().enable_all().build(),
_ => Builder::new()
.threaded_scheduler()
.enable_all()
.core_threads(n)
.build(),
}
}
}
} |
|
viewer.spec.ts | /// <reference types="Cypress" />
describe('Viewer', () => {
beforeEach(function () {
cy.server();
cy.fixture("viewerLoadConfigDefault").as('viewerLoadConfigDefault');
cy.fixture("comparisonLoadConfigDefault").as('comparisonLoadConfigDefault');
cy.fixture("conversionLoadConfigDefault").as('conversionLoadConfigDefault');
cy.fixture("editorLoadConfigDefault").as('editorLoadConfigDefault');
cy.fixture("signatureLoadConfigDefault").as('signatureLoadConfigDefault');
cy.fixture("annotationLoadConfigDefault").as('annotationLoadConfigDefault');
cy.fixture("metadataLoadConfigDefault").as('metadataLoadConfigDefault');
cy.fixture("searchLoadConfigDefault").as('searchLoadConfigDefault');
cy.fixture("loadFileTreeDefault").as('loadFileTreeDefault');
cy.fixture("loadFileTreeSubFolder").as('loadFileTreeSubFolder');
cy.fixture("loadDocumentDescriptionDefault").as('loadDocumentDescriptionDefault');
cy.route('http://localhost:8080/viewer/loadConfig', "@viewerLoadConfigDefault");
cy.route('http://localhost:8080/comparison/loadConfig', "@comparisonLoadConfigDefault");
cy.route('http://localhost:8080/conversion/loadConfig', "@conversionLoadConfigDefault");
cy.route('http://localhost:8080/editor/loadConfig', "@editorLoadConfigDefault");
cy.route('http://localhost:8080/signature/loadConfig', "@signatureLoadConfigDefault");
cy.route('http://localhost:8080/annotation/loadConfig', "@annotationLoadConfigDefault");
cy.route('http://localhost:8080/metadata/loadConfig', "@metadataLoadConfigDefault");
cy.route('http://localhost:8080/search/loadConfig', "@searchLoadConfigDefault");
cy.route('POST','http://localhost:8080/viewer/loadFileTree', "@loadFileTreeDefault");
cy.route('POST','http://localhost:8080/viewer/loadDocumentDescription', "@loadDocumentDescriptionDefault");
});
it('should see logo', () => {
cy.visit('/viewer');
cy.get('#gd-header-logo .text').should('have.text', 'viewer');
});
it('should open file dialog when clicked on open file icon', () => {
cy.visit('/viewer');
cy.get('#tools > gd-button:nth-child(1)').click();
cy.get('#gd-modal-content > div.gd-modal-header > h4').should('have.text', 'Open document');
});
it('should be able to close dialog by clicking on x', () => {
cy.visit('/viewer');
cy.get('#tools > gd-button:nth-child(1)').click();
cy.get('#gd-modal-content > div.gd-modal-header > h4').should('have.text', 'Open document');
cy.get('#gd-modal-content > div.gd-modal-header > div').click();
cy.get('#gd-modal-content').should('not.exist');
});
it('should be able to close dialog by clicking on backdrop', () => {
cy.visit('/viewer');
cy.get('#tools > gd-button:nth-child(1)').click();
cy.get('#gd-modal-content > div.gd-modal-header > h4').should('have.text', 'Open document');
cy.get('#modalDialog').click({force:true});
cy.get('#gd-modal-content').should('not.exist');
});
| cy.get('#tools > gd-button:nth-child(1)').click();
cy.get('#gd-modal-content > div.gd-modal-header > h4').should('have.text', 'Open document');
cy.get(':nth-child(3) > .file-description > .file-name-format > .file-name').should('have.text', 'TestWord.docx');
cy.get(':nth-child(3) > .file-description > .file-name-format > .file-format').should('have.text', 'Microsoft Word');
cy.get(':nth-child(3) > .file-size').should('have.text', ' 11.63 KB '); // @TODO: trim spaces around size
cy.get(':nth-child(3) > div.file-description > fa-icon').should('have.class', 'fa-file-word');
});
it('should be able to open sub folder', () => {
cy.visit('/viewer');
cy.get('#tools > gd-button:nth-child(1)').click();
cy.get('#gd-modal-content > div.gd-modal-header > h4').should('have.text', 'Open document');
cy.route('POST','http://localhost:8080/viewer/loadFileTree',"@loadFileTreeSubFolder");
cy.get('#gd-modal-filebrowser > div > div:nth-child(2)').click();
cy.get('#gd-modal-filebrowser > div > div:nth-child(2) > div.file-description > div > div.file-name').should('have.text', 'FileInSubFolder.docx');
cy.get('#gd-modal-filebrowser > div > div:nth-child(2) > div.file-description > div > div.file-format').should('have.text', 'Microsoft Word');
cy.get('#gd-modal-filebrowser > div > div:nth-child(2) > div.file-size').should('have.text', ' 11.63 KB '); // @TODO: trim spaces around size
cy.get('#gd-modal-filebrowser > div > div:nth-child(2) > div.file-description > fa-icon').should('have.class', 'fa-file-word');
});
it('when drag file over file dialog drop zone style changed', () => {
cy.visit('/viewer');
cy.get('#tools > gd-button:nth-child(1)').click();
cy.get('#gd-modal-content > div.gd-modal-header > h4').should('have.text', 'Open document');
cy.get('.gd-modal-body').trigger('dragover');
cy.get('.gd-modal-body').trigger('dragenter');
cy.get('.gd-dnd-wrap').should('be.visible');
cy.get('.gd-modal-body').trigger('dragleave');
cy.get('.gd-dnd-wrap').should('not.exist');
});
it('should open file when clicked on file in dialog and display 5 pages', () => {
cy.visit('/viewer');
cy.get('#tools > gd-button:nth-child(1)').click();
cy.get('#gd-modal-content > div.gd-modal-header > h4').should('have.text', 'Open document');
cy.get('#gd-modal-filebrowser > div.list-files-body > div:nth-child(3)').click();
cy.get('.page').its('length').should('eq',5);
});
it('for opened file when thumbnail button clicked should open thumbnail panel', () => {
cy.visit('/viewer');
cy.get('#tools > gd-button:nth-child(1)').click();
cy.get('#gd-modal-content > div.gd-modal-header > h4').should('have.text', 'Open document');
cy.get('#gd-modal-filebrowser > div.list-files-body > div:nth-child(3)').click();
cy.get('#tools > gd-button.thumbnails-button').click();
cy.get('.gd-thumbnails',{timeout: 60000}).should('be.visible');
});
it('should scroll last page into view when clicked on last thumbnail', () => {
cy.visit('/viewer');
cy.get('#tools > gd-button:nth-child(1)').click();
cy.get('#gd-modal-content > div.gd-modal-header > h4').should('have.text', 'Open document');
cy.get('#gd-modal-filebrowser > div.list-files-body > div:nth-child(3)').click();
cy.get('#tools > gd-button.thumbnails-button').click();
cy.get('.gd-thumbnails',{timeout: 60000}).should('be.visible');
cy.get('#gd-thumbnails-page-3').should('be.visible').click();
cy.get('#page-3').should('be.visible');
});
}); | it('should be able to see file dialog file entries with detail', () => {
cy.visit('/viewer'); |
laboratory.ts | import * as yaml from 'js-yaml';
import {
BlobLogger,
Environment,
IOrchestrator,
IStorage,
IWorker,
RamDisk,
Volume,
World,
} from '../cloud';
import {
encodeBenchmark,
encodeCandidate,
encodeSuite,
encodeLog,
createRunId
} from '../naming';
import { decryptSecrets, generateKeys, KeyPair } from '../secrets';
import { sleep } from '../utilities';
import {
AnyDescription,
BenchmarkDescription,
CandidateDescription,
ILaboratory,
SuiteDescription,
Kind,
} from './interfaces';
import { loadCandidate, loadSuite } from './loaders';
export class Laboratory implements ILaboratory {
static image = {
tag: 'labratory:1.0',
create: () => Laboratory.entryPoint
};
// TODO: this should do a bind, not a connect.
static async entryPoint(worker: IWorker): Promise<void> {
worker.log(`Labratory.entryPoint()`);
// TODO: following code is for scenario where CLI gives existing credentials
// to Laboratory.
// // Get private key from secrets.txt
// const secrets =
// (await worker.getWorld().localStorage.readBlob('secrets/keys')).toString('utf-8');
// console.log(`Labratory: secrets = "${secrets}"`);
// Simulate server startup time.
const startupDelaySeconds = 9;
worker.log(`sleeping for ${startupDelaySeconds} seconds`);
await sleep(startupDelaySeconds * 1000);
worker.log('woke up');
// TODO: get KeyPair from local storage instead.
const keys: KeyPair = generateKeys();
// Construct and bind service RPC stub.
const world = worker.getWorld();
const myService = new Laboratory(keys, world);
const env = worker.getWorld().environment;
// TODO: error check on port number parsing.
const port = Number(env.get('port'));
worker.bind(worker.getWorld(), myService, port);
worker.log(`Labratory service running at ${world.hostname}:${port}`);
}
private keys: KeyPair;
private world: World;
// Convenience aliases
private cloudStorage: IStorage;
private orchestrator: IOrchestrator;
constructor(keys: KeyPair, world: World) {
this.keys = keys;
this.world = world;
// Convenience aliases
this.cloudStorage = world.cloudStorage;
this.orchestrator = world.orchestrator;
}
async getPublicKey(): Promise<string> {
return this.keys.publicKey;
}
async create(description: AnyDescription): Promise<string> {
switch (description.kind) {
case Kind.BENCHMARK:
return this.createBenchmark(description);
case Kind.CANDIDATE:
return this.createCandidate(description);
case Kind.SUITE:
return this.createSuite(description);
default:
const message = `Laboratory.create(): unsupported kind==="${description.kind}"`;
this.world.logger.log(message);
throw new TypeError(message);
}
}
private async createBenchmark(description: BenchmarkDescription): Promise<string> {
const encoded = encodeBenchmark(description.image);
const buffer = Buffer.from(yaml.safeDump(description), 'utf8');
// TODO: check for attempt blob overwrite.
await this.cloudStorage.writeBlob(encoded, buffer, false);
this.world.logger.log(`Uploaded benchmark schema to ${encoded}`);
return encoded;
}
private async createCandidate(description: CandidateDescription): Promise<string> {
const encoded = encodeCandidate(description.image);
const buffer = Buffer.from(yaml.safeDump(description), 'utf8');
// TODO: check for attempt blob overwrite.
await this.cloudStorage.writeBlob(encoded, buffer, false);
this.world.logger.log(`Uploaded candidate schema to ${encoded}`);
return encoded;
}
private async createSuite(description: SuiteDescription): Promise<string> {
const encoded = encodeSuite(description.name);
const buffer = Buffer.from(yaml.safeDump(description), 'utf8');
// TODO: check for attempt blob overwrite.
await this.cloudStorage.writeBlob(encoded, buffer, false);
this.world.logger.log(`Uploaded suite schema to ${encoded}`);
return encoded;
}
async run(candidateId: string, suiteId: string): Promise<void> {
this.world.logger.log(`run(${candidateId}, ${suiteId})`);
let suiteData: SuiteDescription;
try {
suiteData = await loadSuite(suiteId, this.cloudStorage);
} catch (e) {
// TODO: only change exception when file not found.
const message = `Cannot find suite ${suiteId}`;
throw new TypeError(message);
}
let candidateData: CandidateDescription;
try {
candidateData = await loadCandidate(candidateId, this.cloudStorage);
} catch (e) {
// TODO: only change exception when file not found.
const message = `Cannot find candidate ${candidateId}`;
throw new TypeError(message);
}
if (suiteData.benchmarkId !== candidateData.benchmarkId) {
const message = "Suite and Candidate benchmarks don't match.";
this.world.logger.log(message);
throw new TypeError(message);
}
const runId = createRunId();
//
// Decrypt candidate manifest secrets
//
decryptSecrets(candidateData, this.keys.privateKey);
const yamlText = yaml.safeDump(candidateData);
const secrets = new RamDisk();
// TODO: use naming service for blob name
await secrets.writeBlob(
'spec.yaml',
Buffer.from(yamlText, 'utf8'),
true
);
const volume: Volume = {
mount: '/secrets',
storage: secrets
};
// Start the candidate container.
// TODO: use naming service for host name
const candidateHost = 'c' + runId;
this.world.logger.log(`Starting candidate ${candidateId} on ${candidateHost}`);
// Don't await createWorker(). Want to model separate process.
this.orchestrator.createWorker(
candidateHost,
candidateId,
this.cloudStorage,
[ volume ],
new Environment(),
new BlobLogger(this.cloudStorage, candidateHost, encodeLog(candidateHost))
);
// Start the benchmark container.
// TODO: use naming service for host name
const benchmarkHost = 'b' + runId;
this.world.logger.log(`Starting benchmark ${suiteData.benchmarkId} on ${benchmarkHost}`);
// Don't await createWorker(). Want to model separate process.
this.orchestrator.createWorker(
benchmarkHost,
suiteData.benchmarkId,
this.cloudStorage,
[],
new Environment([
['candidate', candidateId],
['host', candidateHost],
// TODO: use naming service for runId
['run', 'r' + runId],
['suite', suiteId],
]),
new BlobLogger(this.cloudStorage, benchmarkHost, encodeLog(benchmarkHost))
);
}
}
// TODO: save for unit tests
// async function go() {
// const keys = generateKeys();
// const cloudStorage = new RamDisk();
// const lab = new Laboratory(keys, cloudStorage);
// const benchmark: BenchmarkDescription = {
// name: 'Sample True_Or_False Benchmark',
// description: 'A sample benchmark for boolean expressions evaluation.',
// owner: 'Mike',
// created: new Date().toISOString(),
// image: 'myregistry.azurecr.io/true_or_false_benchmark:1.0'
// };
// const benchmarkId = await lab.createBenchmark(benchmark);
// const suite: SuiteDescription = {
// name: 'Sample True_Or_False Suite',
// description: 'A sample benchmark for boolean expressions evaluation.',
// owner: 'Mike',
// created: new Date().toISOString(),
// benchmarkId,
// // domainData: [],
// // testData: []
// };
// const suiteId = await lab.createSuite(suite);
// const candidate: CandidateDescription = {
// name: 'Sample True_Or_False Candidate',
// description: 'A sample candidate that implements a boolean expression parser.',
// owner: 'Mike',
// created: new Date().toISOString(),
// benchmarkId,
// image: 'myregistry.azurecr.io/true_or_false_candidate:1.0'
// };
// const candidateId = await lab.createCandidate(candidate);
// lab.run(candidateId, suiteId);
// }
// go();
///////////////////////////////////////////////////////////////////////////////
// TODO: Save for Analysis service
// async listBenchmarks(pattern: CandidateDescription): Promise<BenchmarkDescription[]> {
// // TODO: implement wildcard matching
// return [];
// }
// async listCandidates(pattern: CandidateDescription): Promise<CandidateDescription[]> {
// // TODO: implement wildcard matching
// return [];
// }
| // async listSuites(pattern: SuiteDescription): Promise<BenchmarkDescription[]> {
// // TODO: implement wildcard matching
// return [];
// } | |
sha_hasher.rs | use io::sha;
use gfx;
static INPUTS: [&[u8]; 7] = [
b"",
b"a",
b"ab",
b"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
b"aaaaaaaaaaaggGaaaaaaggaaaaaaaagaaaaaaaaaaaaGgaaaaaaaaagaaaaaaaaa",
b"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_-+=",
b"Call me Ishmael. Some years ago - never mind how long precisely - having \
little or no money in my purse, and nothing particular to interest me on \
shore, I thought I would sail about a little and see the watery part of the \
world. It is a way I have of driving off the spleen and regulating the \
circulation. Whenever I find myself growing grim about the mouth; whenever \
it is a damp, drizzly November in my soul; whenever I find myself involuntarily \
pausing before coffin warehouses, and bringing up the rear of every funeral I \
meet; and especially whenever my hypos get such an upper hand of me, that it \
requires a strong moral principle to prevent me from deliberately stepping into \
the street, and methodically knocking people's hats off - then, I account it \
high time to get to sea as soon as I can. This is my substitute for pistol and ball.",
];
static SUMS_160: [&[u8]; 7] = [
b"DA39A3EE5E6B4B0D3255BFEF95601890AFD80709",
b"86F7E437FAA5A7FCE15D1DDCB9EAEAEA377667B8",
b"DA23614E02469A0D7C7BD1BDAB5C9C474B1904DC",
b"0098BA824B5C16427BD7A1122A5A442A25EC644D",
b"5635174F74D73920A19E7736B30100CD736B9402",
b"B56130BF506630D047DABFCA15E6DE966385DD77",
b"5969D0ADB971C33CEF609D82B3E5E40B23ACDA17",
];
static SUMS_224: [&[u8]; 7] = [
b"D14A028C2A3A2BC9476102BB288234C415A2B01F828EA62AC5B3E42F",
b"ABD37534C7D9A2EFB9465DE931CD7055FFDB8879563AE98078D6D6D5",
b"DB3CDA86D4429A1D39C148989566B38F7BDA0156296BD364BA2F878B",
b"A88CD5CDE6D6FE9136A4E58B49167461EA95D388CA2BDB7AFDC3CBF4",
b"408DD6E1AB4F566392E9ED17A0ECAB0BB99BE9C06C4DC65FBA274336",
b"180801D0C38AA714D3C04BE1DBA000873655B719B6EB116D114355F0",
b"615847579C9C5332A6B779A107C384EDA18F59F9CC0390FBE7708E00",
];
static SUMS_256: [&[u8]; 7] = [
b"E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855",
b"CA978112CA1BBDCAFAC231B39A23DC4DA786EFF8147C4E72B9807785AFEE48BB",
b"FB8E20FC2E4C3F248C60C39BD652F3C1347298BB977B8B4D5903B85055620603",
b"FFE054FE7AE0CB6DC65C3AF9B61D5209F439851DB43D0BA5997337DF154668EB",
b"52CBACF2286644C497E14D5A147F245636376458FAF0406A636B8678BEAE14CC",
b"C3AEB0F036DFB5896CCD9AC7B82E095228E4EB0CFED15D928F7956D52976FE2D",
b"60F7C90FAACA7BC77A1ADCF2565F784790A491B4F8AF14798232D2FB073EE6A8",
];
fn match_hexstr(bytes: &[u8], hex_str: &[u8]) -> bool {
if hex_str.len() != bytes.len() * 2 {
log!("Size mismatch: hex {} bytes vs data {} bytes", hex_str.len(), bytes.len() * 2);
return false; // Sizes don't match
}
let mut byte_it = bytes.iter();
let mut hex_it = hex_str.iter();
while let (Some(src_byte), Some(top_n), Some(bot_n)) = (byte_it.next(), hex_it.next(), hex_it.next()) {
let to_num = |c: u8| {
(c as char).to_digit(16).unwrap() as u8
};
let hex_byte = to_num(*top_n) << 4 | to_num(*bot_n);
if *src_byte != hex_byte |
}
true
}
fn test_hashes<OUT>(name: &str, func: fn(&[u8]) -> OUT, sums: &[&[u8]])
where OUT: AsRef<[u8]> {
print!("Testing {} hashes...", name);
for (input, hashstr) in INPUTS.iter().zip(sums.iter()) {
let hash = func(input);
if match_hexstr(hash.as_ref(), hashstr) {
print!("SUCCESS ");
} else {
print!("FAILURE ");
}
}
log!("!");
}
pub fn main() {
gfx::clear_screen(0xFF, 0xFF, 0xFF);
test_hashes("SHA1", sha::hash_160, &SUMS_160);
test_hashes("SHA224", sha::hash_224, &SUMS_224);
test_hashes("SHA256", sha::hash_256, &SUMS_256);
}
| {
log!("Failed, byte {:02X} vs hex {}{}", *src_byte, *top_n as char, *bot_n as char);
return false;
} |
mod.rs | use apu::timer::Timer;
#[derive(Default)]
pub struct Sweep {
period: u8,
enabled_flag: bool,
negate_flag: bool,
shift_count: u8,
reload_flag: bool,
timer: Timer,
}
impl Sweep {
pub fn shift_count(&self) -> u8 {
self.shift_count
}
pub fn negate_flag(&self) -> bool |
pub fn write_flags(&mut self, flags: u8) {
// bit 7 E--- ---- Enabled flag
// bits 6-4 -PPP ---- The divider's period is set to P + 1
// bit 3 ---- N--- Negate flag
// 0: add to period, sweeping toward lower frequencies
// 1: subtract from period, sweeping toward higher frequencies
// bits 2-0 ---- -SSS Shift count (number of bits)
//
// Side effects: Sets the reload flag
self.enabled_flag = flags & 0b_1000_0000 > 0;
self.period = (flags & 0b_0111_0000) >> 4;
self.negate_flag = flags & 0b_0000_1000 > 0;
self.shift_count = flags & 0b_0111;
self.reload_flag = true;
}
/// Clock the sweet unit and return true if the pulse's period should be adjusted
pub fn clock(&mut self) -> bool {
// When the frame counter sends a half-frame clock (at 120 or 96 Hz), one of three things
// happens:
//
// - If the reload flag is set, the divider's counter is set to the period P. If the
// divider's counter was zero before the reload and the sweep is enabled, the pulse's
// period is also adjusted (if the target period is in range; see below). The reload
// flag is then cleared.
//
// - If the reload flag is clear and the divider's counter is non-zero, it is decremented.
//
// - If the reload flag is clear and the divider's counter is zero and the sweep is
// enabled, the counter is set to P and the pulse's period is adjusted (if the target
// period is in range)
if self.reload_flag {
let divider_counter_was_zero = self.timer.is_zero();
self.timer.set_period(self.period as u16);
self.timer.reload();
self.reload_flag = false;
divider_counter_was_zero && self.enabled_flag
} else if self.timer.clock() && self.enabled_flag {
self.timer.set_period(self.period as u16);
self.timer.reload();
true
} else {
false
}
}
}
| {
self.negate_flag
} |
store.rs | use std::marker::PhantomData;
#[derive(Derivative)]
#[derivative(Copy, Clone, Debug, PartialEq)]
pub struct Key<K>(
usize,
#[derivative(Debug="ignore")]
PhantomData<fn(&K) -> ()>
);
#[derive(Derivative)]
#[derivative(Clone, Debug, PartialEq)]
pub struct Store<K, T> {
items: Vec<T>,
#[derivative(Debug="ignore")]
key_phantom: PhantomData<fn(&K) -> ()>,
}
impl<K, T> Store<K, T> {
pub fn new() -> Self {
Store { | key_phantom: PhantomData,
}
}
pub fn insert(&mut self, v: T) -> Key<K> {
self.items.push(v);
Key(self.items.len() - 1, PhantomData)
}
pub fn get(&self, key: Key<K>) -> &T {
let idx = key.0;
self.items.get(idx).expect("Store::get called with an invalid key")
}
} | items: Vec::new(), |
Array.py | class Array(object,ICloneable,IList,ICollection,IEnumerable,IStructuralComparable,IStructuralEquatable):
""" Provides methods for creating,manipulating,searching,and sorting arrays,thereby serving as the base class for all arrays in the common language runtime. """
@staticmethod
def AsReadOnly(array):
""" AsReadOnly[T](array: Array[T]) -> ReadOnlyCollection[T] """
pass
@staticmethod
def BinarySearch(array,*__args):
|
@staticmethod
def Clear(array,index,length):
"""
Clear(array: Array,index: int,length: int)
Sets a range of elements in the System.Array to zero,to false,or to null,
depending on the element type.
array: The System.Array whose elements need to be cleared.
index: The starting index of the range of elements to clear.
length: The number of elements to clear.
"""
pass
def Clone(self):
"""
Clone(self: Array) -> object
Creates a shallow copy of the System.Array.
Returns: A shallow copy of the System.Array.
"""
pass
@staticmethod
def ConstrainedCopy(sourceArray,sourceIndex,destinationArray,destinationIndex,length):
"""
ConstrainedCopy(sourceArray: Array,sourceIndex: int,destinationArray: Array,destinationIndex: int,length: int)
Copies a range of elements from an System.Array starting at the specified
source index and pastes them to another System.Array starting at the specified
destination index. Guarantees that all changes are undone if the copy does not
succeed completely.
sourceArray: The System.Array that contains the data to copy.
sourceIndex: A 32-bit integer that represents the index in the sourceArray at which copying
begins.
destinationArray: The System.Array that receives the data.
destinationIndex: A 32-bit integer that represents the index in the destinationArray at which
storing begins.
length: A 32-bit integer that represents the number of elements to copy.
"""
pass
@staticmethod
def ConvertAll(array,converter):
""" ConvertAll[(TInput,TOutput)](array: Array[TInput],converter: Converter[TInput,TOutput]) -> Array[TOutput] """
pass
@staticmethod
def Copy(sourceArray,*__args):
"""
Copy(sourceArray: Array,destinationArray: Array,length: Int64)
Copies a range of elements from an System.Array starting at the first element
and pastes them into another System.Array starting at the first element. The
length is specified as a 64-bit integer.
sourceArray: The System.Array that contains the data to copy.
destinationArray: The System.Array that receives the data.
length: A 64-bit integer that represents the number of elements to copy. The integer
must be between zero and System.Int32.MaxValue,inclusive.
Copy(sourceArray: Array,sourceIndex: Int64,destinationArray: Array,destinationIndex: Int64,length: Int64)
Copies a range of elements from an System.Array starting at the specified
source index and pastes them to another System.Array starting at the specified
destination index. The length and the indexes are specified as 64-bit integers.
sourceArray: The System.Array that contains the data to copy.
sourceIndex: A 64-bit integer that represents the index in the sourceArray at which copying
begins.
destinationArray: The System.Array that receives the data.
destinationIndex: A 64-bit integer that represents the index in the destinationArray at which
storing begins.
length: A 64-bit integer that represents the number of elements to copy. The integer
must be between zero and System.Int32.MaxValue,inclusive.
Copy(sourceArray: Array,destinationArray: Array,length: int)
Copies a range of elements from an System.Array starting at the first element
and pastes them into another System.Array starting at the first element. The
length is specified as a 32-bit integer.
sourceArray: The System.Array that contains the data to copy.
destinationArray: The System.Array that receives the data.
length: A 32-bit integer that represents the number of elements to copy.
Copy(sourceArray: Array,sourceIndex: int,destinationArray: Array,destinationIndex: int,length: int)
Copies a range of elements from an System.Array starting at the specified
source index and pastes them to another System.Array starting at the specified
destination index. The length and the indexes are specified as 32-bit integers.
sourceArray: The System.Array that contains the data to copy.
sourceIndex: A 32-bit integer that represents the index in the sourceArray at which copying
begins.
destinationArray: The System.Array that receives the data.
destinationIndex: A 32-bit integer that represents the index in the destinationArray at which
storing begins.
length: A 32-bit integer that represents the number of elements to copy.
"""
pass
def CopyTo(self,array,index):
"""
CopyTo(self: Array,array: Array,index: Int64)
Copies all the elements of the current one-dimensional System.Array to the
specified one-dimensional System.Array starting at the specified destination
System.Array index. The index is specified as a 64-bit integer.
array: The one-dimensional System.Array that is the destination of the elements copied
from the current System.Array.
index: A 64-bit integer that represents the index in array at which copying begins.
CopyTo(self: Array,array: Array,index: int)
Copies all the elements of the current one-dimensional System.Array to the
specified one-dimensional System.Array starting at the specified destination
System.Array index. The index is specified as a 32-bit integer.
array: The one-dimensional System.Array that is the destination of the elements copied
from the current System.Array.
index: A 32-bit integer that represents the index in array at which copying begins.
"""
pass
@staticmethod
def CreateInstance(elementType,*__args):
"""
CreateInstance(elementType: Type,*lengths: Array[int]) -> Array
Creates a multidimensional System.Array of the specified System.Type and
dimension lengths,with zero-based indexing. The dimension lengths are
specified in an array of 32-bit integers.
elementType: The System.Type of the System.Array to create.
lengths: An array of 32-bit integers that represent the size of each dimension of the
System.Array to create.
Returns: A new multidimensional System.Array of the specified System.Type with the
specified length for each dimension,using zero-based indexing.
CreateInstance(elementType: Type,*lengths: Array[Int64]) -> Array
Creates a multidimensional System.Array of the specified System.Type and
dimension lengths,with zero-based indexing. The dimension lengths are
specified in an array of 64-bit integers.
elementType: The System.Type of the System.Array to create.
lengths: An array of 64-bit integers that represent the size of each dimension of the
System.Array to create. Each integer in the array must be between zero and
System.Int32.MaxValue,inclusive.
Returns: A new multidimensional System.Array of the specified System.Type with the
specified length for each dimension,using zero-based indexing.
CreateInstance(elementType: Type,lengths: Array[int],lowerBounds: Array[int]) -> Array
Creates a multidimensional System.Array of the specified System.Type and
dimension lengths,with the specified lower bounds.
elementType: The System.Type of the System.Array to create.
lengths: A one-dimensional array that contains the size of each dimension of the
System.Array to create.
lowerBounds: A one-dimensional array that contains the lower bound (starting index) of each
dimension of the System.Array to create.
Returns: A new multidimensional System.Array of the specified System.Type with the
specified length and lower bound for each dimension.
CreateInstance(elementType: Type,length: int) -> Array
Creates a one-dimensional System.Array of the specified System.Type and length,
with zero-based indexing.
elementType: The System.Type of the System.Array to create.
length: The size of the System.Array to create.
Returns: A new one-dimensional System.Array of the specified System.Type with the
specified length,using zero-based indexing.
CreateInstance(elementType: Type,length1: int,length2: int) -> Array
Creates a two-dimensional System.Array of the specified System.Type and
dimension lengths,with zero-based indexing.
elementType: The System.Type of the System.Array to create.
length1: The size of the first dimension of the System.Array to create.
length2: The size of the second dimension of the System.Array to create.
Returns: A new two-dimensional System.Array of the specified System.Type with the
specified length for each dimension,using zero-based indexing.
CreateInstance(elementType: Type,length1: int,length2: int,length3: int) -> Array
Creates a three-dimensional System.Array of the specified System.Type and
dimension lengths,with zero-based indexing.
elementType: The System.Type of the System.Array to create.
length1: The size of the first dimension of the System.Array to create.
length2: The size of the second dimension of the System.Array to create.
length3: The size of the third dimension of the System.Array to create.
Returns: A new three-dimensional System.Array of the specified System.Type with the
specified length for each dimension,using zero-based indexing.
"""
pass
@staticmethod
def Empty():
""" Empty[T]() -> Array[T] """
pass
@staticmethod
def Exists(array,match):
""" Exists[T](array: Array[T],match: Predicate[T]) -> bool """
pass
@staticmethod
def Find(array,match):
""" Find[T](array: Array[T],match: Predicate[T]) -> T """
pass
@staticmethod
def FindAll(array,match):
""" FindAll[T](array: Array[T],match: Predicate[T]) -> Array[T] """
pass
@staticmethod
def FindIndex(array,*__args):
"""
FindIndex[T](array: Array[T],startIndex: int,count: int,match: Predicate[T]) -> int
FindIndex[T](array: Array[T],startIndex: int,match: Predicate[T]) -> int
FindIndex[T](array: Array[T],match: Predicate[T]) -> int
"""
pass
@staticmethod
def FindLast(array,match):
""" FindLast[T](array: Array[T],match: Predicate[T]) -> T """
pass
@staticmethod
def FindLastIndex(array,*__args):
"""
FindLastIndex[T](array: Array[T],startIndex: int,count: int,match: Predicate[T]) -> int
FindLastIndex[T](array: Array[T],startIndex: int,match: Predicate[T]) -> int
FindLastIndex[T](array: Array[T],match: Predicate[T]) -> int
"""
pass
@staticmethod
def ForEach(array,action):
""" ForEach[T](array: Array[T],action: Action[T]) """
pass
def GetEnumerator(self):
"""
GetEnumerator(self: Array) -> IEnumerator
Returns an System.Collections.IEnumerator for the System.Array.
Returns: An System.Collections.IEnumerator for the System.Array.
"""
pass
def GetLength(self,dimension):
"""
GetLength(self: Array,dimension: int) -> int
Gets a 32-bit integer that represents the number of elements in the specified
dimension of the System.Array.
dimension: A zero-based dimension of the System.Array whose length needs to be determined.
Returns: A 32-bit integer that represents the number of elements in the specified
dimension.
"""
pass
def GetLongLength(self,dimension):
"""
GetLongLength(self: Array,dimension: int) -> Int64
Gets a 64-bit integer that represents the number of elements in the specified
dimension of the System.Array.
dimension: A zero-based dimension of the System.Array whose length needs to be determined.
Returns: A 64-bit integer that represents the number of elements in the specified
dimension.
"""
pass
def GetLowerBound(self,dimension):
"""
GetLowerBound(self: Array,dimension: int) -> int
Gets the lower bound of the specified dimension in the System.Array.
dimension: A zero-based dimension of the System.Array whose lower bound needs to be
determined.
Returns: The lower bound of the specified dimension in the System.Array.
"""
pass
def GetUpperBound(self,dimension):
"""
GetUpperBound(self: Array,dimension: int) -> int
Gets the upper bound of the specified dimension in the System.Array.
dimension: A zero-based dimension of the System.Array whose upper bound needs to be
determined.
Returns: The upper bound of the specified dimension in the System.Array.
"""
pass
def GetValue(self,*__args):
"""
GetValue(self: Array,index1: Int64,index2: Int64) -> object
Gets the value at the specified position in the two-dimensional System.Array.
The indexes are specified as 64-bit integers.
index1: A 64-bit integer that represents the first-dimension index of the System.Array
element to get.
index2: A 64-bit integer that represents the second-dimension index of the System.Array
element to get.
Returns: The value at the specified position in the two-dimensional System.Array.
GetValue(self: Array,index: Int64) -> object
Gets the value at the specified position in the one-dimensional System.Array.
The index is specified as a 64-bit integer.
index: A 64-bit integer that represents the position of the System.Array element to
get.
Returns: The value at the specified position in the one-dimensional System.Array.
GetValue(self: Array,*indices: Array[Int64]) -> object
Gets the value at the specified position in the multidimensional System.Array.
The indexes are specified as an array of 64-bit integers.
indices: A one-dimensional array of 64-bit integers that represent the indexes
specifying the position of the System.Array element to get.
Returns: The value at the specified position in the multidimensional System.Array.
GetValue(self: Array,index1: Int64,index2: Int64,index3: Int64) -> object
Gets the value at the specified position in the three-dimensional System.Array.
The indexes are specified as 64-bit integers.
index1: A 64-bit integer that represents the first-dimension index of the System.Array
element to get.
index2: A 64-bit integer that represents the second-dimension index of the System.Array
element to get.
index3: A 64-bit integer that represents the third-dimension index of the System.Array
element to get.
Returns: The value at the specified position in the three-dimensional System.Array.
GetValue(self: Array,index: int) -> object
Gets the value at the specified position in the one-dimensional System.Array.
The index is specified as a 32-bit integer.
index: A 32-bit integer that represents the position of the System.Array element to
get.
Returns: The value at the specified position in the one-dimensional System.Array.
GetValue(self: Array,*indices: Array[int]) -> object
Gets the value at the specified position in the multidimensional System.Array.
The indexes are specified as an array of 32-bit integers.
indices: A one-dimensional array of 32-bit integers that represent the indexes
specifying the position of the System.Array element to get.
Returns: The value at the specified position in the multidimensional System.Array.
GetValue(self: Array,index1: int,index2: int,index3: int) -> object
Gets the value at the specified position in the three-dimensional System.Array.
The indexes are specified as 32-bit integers.
index1: A 32-bit integer that represents the first-dimension index of the System.Array
element to get.
index2: A 32-bit integer that represents the second-dimension index of the System.Array
element to get.
index3: A 32-bit integer that represents the third-dimension index of the System.Array
element to get.
Returns: The value at the specified position in the three-dimensional System.Array.
GetValue(self: Array,index1: int,index2: int) -> object
Gets the value at the specified position in the two-dimensional System.Array.
The indexes are specified as 32-bit integers.
index1: A 32-bit integer that represents the first-dimension index of the System.Array
element to get.
index2: A 32-bit integer that represents the second-dimension index of the System.Array
element to get.
Returns: The value at the specified position in the two-dimensional System.Array.
"""
pass
@staticmethod
def IndexOf(array,value,startIndex=None,count=None):
"""
IndexOf[T](array: Array[T],value: T) -> int
IndexOf[T](array: Array[T],value: T,startIndex: int) -> int
IndexOf[T](array: Array[T],value: T,startIndex: int,count: int) -> int
IndexOf(array: Array,value: object) -> int
Searches for the specified object and returns the index of the first occurrence
within the entire one-dimensional System.Array.
array: The one-dimensional System.Array to search.
value: The object to locate in array.
Returns: The index of the first occurrence of value within the entire array,if found;
otherwise,the lower bound of the array minus 1.
IndexOf(array: Array,value: object,startIndex: int) -> int
Searches for the specified object and returns the index of the first occurrence
within the range of elements in the one-dimensional System.Array that extends
from the specified index to the last element.
array: The one-dimensional System.Array to search.
value: The object to locate in array.
startIndex: The starting index of the search. 0 (zero) is valid in an empty array.
Returns: The index of the first occurrence of value within the range of elements in
array that extends from startIndex to the last element,if found; otherwise,
the lower bound of the array minus 1.
IndexOf(array: Array,value: object,startIndex: int,count: int) -> int
Searches for the specified object and returns the index of the first occurrence
within the range of elements in the one-dimensional System.Array that starts at
the specified index and contains the specified number of elements.
array: The one-dimensional System.Array to search.
value: The object to locate in array.
startIndex: The starting index of the search. 0 (zero) is valid in an empty array.
count: The number of elements in the section to search.
Returns: The index of the first occurrence of value within the range of elements in
array that starts at startIndex and contains the number of elements specified
in count,if found; otherwise,the lower bound of the array minus 1.
"""
pass
def Initialize(self):
"""
Initialize(self: Array)
Initializes every element of the value-type System.Array by calling the default
constructor of the value type.
"""
pass
@staticmethod
def LastIndexOf(array,value,startIndex=None,count=None):
"""
LastIndexOf[T](array: Array[T],value: T) -> int
LastIndexOf[T](array: Array[T],value: T,startIndex: int) -> int
LastIndexOf[T](array: Array[T],value: T,startIndex: int,count: int) -> int
LastIndexOf(array: Array,value: object) -> int
Searches for the specified object and returns the index of the last occurrence
within the entire one-dimensional System.Array.
array: The one-dimensional System.Array to search.
value: The object to locate in array.
Returns: The index of the last occurrence of value within the entire array,if found;
otherwise,the lower bound of the array minus 1.
LastIndexOf(array: Array,value: object,startIndex: int) -> int
Searches for the specified object and returns the index of the last occurrence
within the range of elements in the one-dimensional System.Array that extends
from the first element to the specified index.
array: The one-dimensional System.Array to search.
value: The object to locate in array.
startIndex: The starting index of the backward search.
Returns: The index of the last occurrence of value within the range of elements in array
that extends from the first element to startIndex,if found; otherwise,the
lower bound of the array minus 1.
LastIndexOf(array: Array,value: object,startIndex: int,count: int) -> int
Searches for the specified object and returns the index of the last occurrence
within the range of elements in the one-dimensional System.Array that contains
the specified number of elements and ends at the specified index.
array: The one-dimensional System.Array to search.
value: The object to locate in array.
startIndex: The starting index of the backward search.
count: The number of elements in the section to search.
Returns: The index of the last occurrence of value within the range of elements in array
that contains the number of elements specified in count and ends at startIndex,
if found; otherwise,the lower bound of the array minus 1.
"""
pass
@staticmethod
def Resize(array,newSize):
""" Resize[T](array: Array[T],newSize: int) -> Array[T] """
pass
@staticmethod
def Reverse(array,index=None,length=None):
"""
Reverse(array: Array,index: int,length: int)
Reverses the sequence of the elements in a range of elements in the
one-dimensional System.Array.
array: The one-dimensional System.Array to reverse.
index: The starting index of the section to reverse.
length: The number of elements in the section to reverse.
Reverse(array: Array)
Reverses the sequence of the elements in the entire one-dimensional
System.Array.
array: The one-dimensional System.Array to reverse.
"""
pass
def SetValue(self,value,*__args):
"""
SetValue(self: Array,value: object,index1: Int64,index2: Int64)
Sets a value to the element at the specified position in the two-dimensional
System.Array. The indexes are specified as 64-bit integers.
value: The new value for the specified element.
index1: A 64-bit integer that represents the first-dimension index of the System.Array
element to set.
index2: A 64-bit integer that represents the second-dimension index of the System.Array
element to set.
SetValue(self: Array,value: object,index: Int64)
Sets a value to the element at the specified position in the one-dimensional
System.Array. The index is specified as a 64-bit integer.
value: The new value for the specified element.
index: A 64-bit integer that represents the position of the System.Array element to
set.
SetValue(self: Array,value: object,*indices: Array[Int64])
Sets a value to the element at the specified position in the multidimensional
System.Array. The indexes are specified as an array of 64-bit integers.
value: The new value for the specified element.
indices: A one-dimensional array of 64-bit integers that represent the indexes
specifying the position of the element to set.
SetValue(self: Array,value: object,index1: Int64,index2: Int64,index3: Int64)
Sets a value to the element at the specified position in the three-dimensional
System.Array. The indexes are specified as 64-bit integers.
value: The new value for the specified element.
index1: A 64-bit integer that represents the first-dimension index of the System.Array
element to set.
index2: A 64-bit integer that represents the second-dimension index of the System.Array
element to set.
index3: A 64-bit integer that represents the third-dimension index of the System.Array
element to set.
SetValue(self: Array,value: object,index1: int,index2: int)
Sets a value to the element at the specified position in the two-dimensional
System.Array. The indexes are specified as 32-bit integers.
value: The new value for the specified element.
index1: A 32-bit integer that represents the first-dimension index of the System.Array
element to set.
index2: A 32-bit integer that represents the second-dimension index of the System.Array
element to set.
SetValue(self: Array,value: object,index: int)
Sets a value to the element at the specified position in the one-dimensional
System.Array. The index is specified as a 32-bit integer.
value: The new value for the specified element.
index: A 32-bit integer that represents the position of the System.Array element to
set.
SetValue(self: Array,value: object,*indices: Array[int])
Sets a value to the element at the specified position in the multidimensional
System.Array. The indexes are specified as an array of 32-bit integers.
value: The new value for the specified element.
indices: A one-dimensional array of 32-bit integers that represent the indexes
specifying the position of the element to set.
SetValue(self: Array,value: object,index1: int,index2: int,index3: int)
Sets a value to the element at the specified position in the three-dimensional
System.Array. The indexes are specified as 32-bit integers.
value: The new value for the specified element.
index1: A 32-bit integer that represents the first-dimension index of the System.Array
element to set.
index2: A 32-bit integer that represents the second-dimension index of the System.Array
element to set.
index3: A 32-bit integer that represents the third-dimension index of the System.Array
element to set.
"""
pass
@staticmethod
def Sort(*__args):
"""
Sort[(TKey,TValue)](keys: Array[TKey],items: Array[TValue],index: int,length: int)Sort[T](array: Array[T],comparer: IComparer[T])Sort[(TKey,TValue)](keys: Array[TKey],items: Array[TValue])Sort[T](array: Array[T],index: int,length: int)Sort[(TKey,TValue)](keys: Array[TKey],items: Array[TValue],index: int,length: int,comparer: IComparer[TKey])Sort[T](array: Array[T],comparison: Comparison[T])Sort[(TKey,TValue)](keys: Array[TKey],items: Array[TValue],comparer: IComparer[TKey])Sort[T](array: Array[T],index: int,length: int,comparer: IComparer[T])Sort[T](array: Array[T])Sort(array: Array,index: int,length: int)
Sorts the elements in a range of elements in a one-dimensional System.Array
using the System.IComparable implementation of each element of the
System.Array.
array: The one-dimensional System.Array to sort.
index: The starting index of the range to sort.
length: The number of elements in the range to sort.
Sort(keys: Array,items: Array,index: int,length: int)
Sorts a range of elements in a pair of one-dimensional System.Array objects
(one contains the keys and the other contains the corresponding items) based on
the keys in the first System.Array using the System.IComparable implementation
of each key.
keys: The one-dimensional System.Array that contains the keys to sort.
items: The one-dimensional System.Array that contains the items that correspond to
each of the keys in the keysSystem.Array.-or-null to sort only the
keysSystem.Array.
index: The starting index of the range to sort.
length: The number of elements in the range to sort.
Sort(array: Array)
Sorts the elements in an entire one-dimensional System.Array using the
System.IComparable implementation of each element of the System.Array.
array: The one-dimensional System.Array to sort.
Sort(keys: Array,items: Array)
Sorts a pair of one-dimensional System.Array objects (one contains the keys and
the other contains the corresponding items) based on the keys in the first
System.Array using the System.IComparable implementation of each key.
keys: The one-dimensional System.Array that contains the keys to sort.
items: The one-dimensional System.Array that contains the items that correspond to
each of the keys in the keysSystem.Array.-or-null to sort only the
keysSystem.Array.
Sort(array: Array,index: int,length: int,comparer: IComparer)
Sorts the elements in a range of elements in a one-dimensional System.Array
using the specified System.Collections.IComparer.
array: The one-dimensional System.Array to sort.
index: The starting index of the range to sort.
length: The number of elements in the range to sort.
comparer: The System.Collections.IComparer implementation to use when comparing
elements.-or-null to use the System.IComparable implementation of each element.
Sort(keys: Array,items: Array,index: int,length: int,comparer: IComparer)
Sorts a range of elements in a pair of one-dimensional System.Array objects
(one contains the keys and the other contains the corresponding items) based on
the keys in the first System.Array using the specified
System.Collections.IComparer.
keys: The one-dimensional System.Array that contains the keys to sort.
items: The one-dimensional System.Array that contains the items that correspond to
each of the keys in the keysSystem.Array.-or-null to sort only the
keysSystem.Array.
index: The starting index of the range to sort.
length: The number of elements in the range to sort.
comparer: The System.Collections.IComparer implementation to use when comparing
elements.-or-null to use the System.IComparable implementation of each element.
Sort(array: Array,comparer: IComparer)
Sorts the elements in a one-dimensional System.Array using the specified
System.Collections.IComparer.
array: The one-dimensional System.Array to sort.
comparer: The System.Collections.IComparer implementation to use when comparing
elements.-or-null to use the System.IComparable implementation of each element.
Sort(keys: Array,items: Array,comparer: IComparer)
Sorts a pair of one-dimensional System.Array objects (one contains the keys and
the other contains the corresponding items) based on the keys in the first
System.Array using the specified System.Collections.IComparer.
keys: The one-dimensional System.Array that contains the keys to sort.
items: The one-dimensional System.Array that contains the items that correspond to
each of the keys in the keysSystem.Array.-or-null to sort only the
keysSystem.Array.
comparer: The System.Collections.IComparer implementation to use when comparing
elements.-or-null to use the System.IComparable implementation of each element.
"""
pass
@staticmethod
def TrueForAll(array,match):
""" TrueForAll[T](array: Array[T],match: Predicate[T]) -> bool """
pass
def __add__(self,*args):
""" x.__add__(y) <==> x+y """
pass
def __contains__(self,*args):
"""
__contains__(self: IList,value: object) -> bool
Determines whether the System.Collections.IList contains a specific value.
value: The object to locate in the System.Collections.IList.
Returns: true if the System.Object is found in the System.Collections.IList; otherwise,
false.
"""
pass
def __eq__(self,*args):
""" x.__eq__(y) <==> x==yx.__eq__(y) <==> x==yx.__eq__(y) <==> x==y """
pass
def __getitem__(self,*args):
""" x.__getitem__(y) <==> x[y]x.__getitem__(y) <==> x[y]x.__getitem__(y) <==> x[y]x.__getitem__(y) <==> x[y] """
pass
def __ge__(self,*args):
pass
def __gt__(self,*args):
pass
def __hash__(self,*args):
""" x.__hash__() <==> hash(x) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __iter__(self,*args):
""" __iter__(self: IEnumerable) -> object """
pass
def __len__(self,*args):
""" x.__len__() <==> len(x) """
pass
def __le__(self,*args):
pass
def __lt__(self,*args):
pass
def __mul__(self,*args):
""" x.__mul__(y) <==> x*y """
pass
@staticmethod
def __new__(self,*args): #cannot find CLR constructor
"""
__new__(pythonType: type,items: object) -> object
__new__(pythonType: type,items: ICollection) -> object
"""
pass
def __ne__(self,*args):
pass
def __radd__(self,*args):
""" __radd__(data1: Array,data2: Array) -> Array """
pass
def __reduce_ex__(self,*args):
pass
def __repr__(self,*args):
""" __repr__(self: Array) -> str """
pass
def __setitem__(self,*args):
""" x.__setitem__(i,y) <==> x[i]=x.__setitem__(i,y) <==> x[i]=x.__setitem__(i,y) <==> x[i]= """
pass
IsFixedSize=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating whether the System.Array has a fixed size.
Get: IsFixedSize(self: Array) -> bool
"""
IsReadOnly=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating whether the System.Array is read-only.
Get: IsReadOnly(self: Array) -> bool
"""
IsSynchronized=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a value indicating whether access to the System.Array is synchronized (thread safe).
Get: IsSynchronized(self: Array) -> bool
"""
Length=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a 32-bit integer that represents the total number of elements in all the dimensions of the System.Array.
Get: Length(self: Array) -> int
"""
LongLength=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets a 64-bit integer that represents the total number of elements in all the dimensions of the System.Array.
Get: LongLength(self: Array) -> Int64
"""
Rank=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets the rank (number of dimensions) of the System.Array.
Get: Rank(self: Array) -> int
"""
SyncRoot=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Gets an object that can be used to synchronize access to the System.Array.
Get: SyncRoot(self: Array) -> object
"""
| """
BinarySearch[T](array: Array[T],value: T,comparer: IComparer[T]) -> int
BinarySearch[T](array: Array[T],value: T) -> int
BinarySearch[T](array: Array[T],index: int,length: int,value: T,comparer: IComparer[T]) -> int
BinarySearch[T](array: Array[T],index: int,length: int,value: T) -> int
BinarySearch(array: Array,index: int,length: int,value: object) -> int
Searches a range of elements in a one-dimensional sorted System.Array for a
value,using the System.IComparable interface implemented by each element of
the System.Array and by the specified value.
array: The sorted one-dimensional System.Array to search.
index: The starting index of the range to search.
length: The length of the range to search.
value: The object to search for.
Returns: The index of the specified value in the specified array,if value is found. If
value is not found and value is less than one or more elements in array,a
negative number which is the bitwise complement of the index of the first
element that is larger than value. If value is not found and value is greater
than any of the elements in array,a negative number which is the bitwise
complement of (the index of the last element plus 1).
BinarySearch(array: Array,value: object) -> int
Searches an entire one-dimensional sorted System.Array for a specific element,
using the System.IComparable interface implemented by each element of the
System.Array and by the specified object.
array: The sorted one-dimensional System.Array to search.
value: The object to search for.
Returns: The index of the specified value in the specified array,if value is found. If
value is not found and value is less than one or more elements in array,a
negative number which is the bitwise complement of the index of the first
element that is larger than value. If value is not found and value is greater
than any of the elements in array,a negative number which is the bitwise
complement of (the index of the last element plus 1).
BinarySearch(array: Array,index: int,length: int,value: object,comparer: IComparer) -> int
Searches a range of elements in a one-dimensional sorted System.Array for a
value,using the specified System.Collections.IComparer interface.
array: The sorted one-dimensional System.Array to search.
index: The starting index of the range to search.
length: The length of the range to search.
value: The object to search for.
comparer: The System.Collections.IComparer implementation to use when comparing
elements.-or- null to use the System.IComparable implementation of each
element.
Returns: The index of the specified value in the specified array,if value is found. If
value is not found and value is less than one or more elements in array,a
negative number which is the bitwise complement of the index of the first
element that is larger than value. If value is not found and value is greater
than any of the elements in array,a negative number which is the bitwise
complement of (the index of the last element plus 1).
BinarySearch(array: Array,value: object,comparer: IComparer) -> int
Searches an entire one-dimensional sorted System.Array for a value using the
specified System.Collections.IComparer interface.
array: The sorted one-dimensional System.Array to search.
value: The object to search for.
comparer: The System.Collections.IComparer implementation to use when comparing
elements.-or- null to use the System.IComparable implementation of each
element.
Returns: The index of the specified value in the specified array,if value is found. If
value is not found and value is less than one or more elements in array,a
negative number which is the bitwise complement of the index of the first
element that is larger than value. If value is not found and value is greater
than any of the elements in array,a negative number which is the bitwise
complement of (the index of the last element plus 1).
"""
pass |
ogfuncoin-util-test.py | #!/usr/bin/env python3
# Copyright 2014 BitPay Inc.
# Copyright 2016-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test framework for ogfuncoin utils.
Runs automatically during `make check`.
Can also be run manually."""
from __future__ import division,print_function,unicode_literals
import argparse
import binascii
try:
import configparser
except ImportError:
import ConfigParser as configparser
import difflib
import json
import logging
import os
import pprint
import subprocess
import sys
def | ():
config = configparser.ConfigParser()
config.optionxform = str
config.readfp(open(os.path.join(os.path.dirname(__file__), "../config.ini"), encoding="utf8"))
env_conf = dict(config.items('environment'))
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
verbose = args.verbose
if verbose:
level = logging.DEBUG
else:
level = logging.ERROR
formatter = '%(asctime)s - %(levelname)s - %(message)s'
# Add the format/level to the logger
logging.basicConfig(format=formatter, level=level)
bctester(os.path.join(env_conf["SRCDIR"], "test", "util", "data"), "ogfuncoin-util-test.json", env_conf)
def bctester(testDir, input_basename, buildenv):
""" Loads and parses the input file, runs all tests and reports results"""
input_filename = os.path.join(testDir, input_basename)
raw_data = open(input_filename, encoding="utf8").read()
input_data = json.loads(raw_data)
failed_testcases = []
for testObj in input_data:
try:
bctest(testDir, testObj, buildenv)
logging.info("PASSED: " + testObj["description"])
except:
logging.info("FAILED: " + testObj["description"])
failed_testcases.append(testObj["description"])
if failed_testcases:
error_message = "FAILED_TESTCASES:\n"
error_message += pprint.pformat(failed_testcases, width=400)
logging.error(error_message)
sys.exit(1)
else:
sys.exit(0)
def bctest(testDir, testObj, buildenv):
"""Runs a single test, comparing output and RC to expected output and RC.
Raises an error if input can't be read, executable fails, or output/RC
are not as expected. Error is caught by bctester() and reported.
"""
# Get the exec names and arguments
execprog = os.path.join(buildenv["BUILDDIR"], "src", testObj["exec"] + buildenv["EXEEXT"])
execargs = testObj['args']
execrun = [execprog] + execargs
# Read the input data (if there is any)
stdinCfg = None
inputData = None
if "input" in testObj:
filename = os.path.join(testDir, testObj["input"])
inputData = open(filename, encoding="utf8").read()
stdinCfg = subprocess.PIPE
# Read the expected output data (if there is any)
outputFn = None
outputData = None
outputType = None
if "output_cmp" in testObj:
outputFn = testObj['output_cmp']
outputType = os.path.splitext(outputFn)[1][1:] # output type from file extension (determines how to compare)
try:
outputData = open(os.path.join(testDir, outputFn), encoding="utf8").read()
except:
logging.error("Output file " + outputFn + " can not be opened")
raise
if not outputData:
logging.error("Output data missing for " + outputFn)
raise Exception
if not outputType:
logging.error("Output file %s does not have a file extension" % outputFn)
raise Exception
# Run the test
proc = subprocess.Popen(execrun, stdin=stdinCfg, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
try:
outs = proc.communicate(input=inputData)
except OSError:
logging.error("OSError, Failed to execute " + execprog)
raise
if outputData:
data_mismatch, formatting_mismatch = False, False
# Parse command output and expected output
try:
a_parsed = parse_output(outs[0], outputType)
except Exception as e:
logging.error('Error parsing command output as %s: %s' % (outputType, e))
raise
try:
b_parsed = parse_output(outputData, outputType)
except Exception as e:
logging.error('Error parsing expected output %s as %s: %s' % (outputFn, outputType, e))
raise
# Compare data
if a_parsed != b_parsed:
logging.error("Output data mismatch for " + outputFn + " (format " + outputType + ")")
data_mismatch = True
# Compare formatting
if outs[0] != outputData:
error_message = "Output formatting mismatch for " + outputFn + ":\n"
error_message += "".join(difflib.context_diff(outputData.splitlines(True),
outs[0].splitlines(True),
fromfile=outputFn,
tofile="returned"))
logging.error(error_message)
formatting_mismatch = True
assert not data_mismatch and not formatting_mismatch
# Compare the return code to the expected return code
wantRC = 0
if "return_code" in testObj:
wantRC = testObj['return_code']
if proc.returncode != wantRC:
logging.error("Return code mismatch for " + outputFn)
raise Exception
if "error_txt" in testObj:
want_error = testObj["error_txt"]
# Compare error text
# TODO: ideally, we'd compare the strings exactly and also assert
# That stderr is empty if no errors are expected. However, ogfuncoin-tx
# emits DISPLAY errors when running as a windows application on
# linux through wine. Just assert that the expected error text appears
# somewhere in stderr.
if want_error not in outs[1]:
logging.error("Error mismatch:\n" + "Expected: " + want_error + "\nReceived: " + outs[1].rstrip())
raise Exception
def parse_output(a, fmt):
"""Parse the output according to specified format.
Raise an error if the output can't be parsed."""
if fmt == 'json': # json: compare parsed data
return json.loads(a)
elif fmt == 'hex': # hex: parse and compare binary data
return binascii.a2b_hex(a.strip())
else:
raise NotImplementedError("Don't know how to compare %s" % fmt)
if __name__ == '__main__':
main()
| main |
entity-processor.py | import xml.dom.minidom
import sys
# this uses 658 MB
document = xml.dom.minidom.parse(sys.stdin)
sets = []
entities = {}
for group in document.getElementsByTagName('group'):
if (group.getAttribute('name') == 'html5' or group.getAttribute('name') == 'mathml'):
for set in group.getElementsByTagName('set'):
sets.append(set.getAttribute('name'))
for entity in document.getElementsByTagName('entity'):
assert entity.parentNode.tagName == 'character'
assert entity.hasAttribute('set')
set = entity.getAttribute('set')
if (set in sets):
assert entity.hasAttribute('id')
name = entity.getAttribute('id') | assert name not in entities or entities[name] == value, '(name: ' + name + ' old value: ' + entities[name] + ' new value: ' + value + ')'
if (name not in entities):
entities[name] = value
if ('-' in value):
value1 = value[1:6];
value2 = value[7:];
glyph = '<span data-x="" class="glyph compound">&#x' + value1 + ';&#x' + value2 + ';</span>'
print(' <tr id="entity-' + name + '"> <td> <code data-x="">' + name + ';</code> </td> <td> U+' + value1 + ' U+' + value2 + ' </td> <td> ' + glyph + ' </td> </tr>');
else:
if (value[1:] in ['020DC', '00311', '020DB', '020DB']):
glyph = '<span data-x="" class="glyph composition">◌' + '&#x' + value[1:] + ';</span>'
elif ('00000' < value[1:] < '00020'):
glyph = '<span data-x="" class="glyph control">$' + value[4:] + ';</span>'
else:
glyph = '<span data-x="" class="glyph">&#x' + value[1:] + ';</span>'
print(' <tr id="entity-' + name + '"> <td> <code data-x="">' + name + ';</code> </td> <td> U+' + value[1:] + ' </td> <td> ' + glyph + ' </td> </tr>'); | assert len(name) > 0
assert entity.parentNode.hasAttribute('id')
value = entity.parentNode.getAttribute('id') |
etcd_test.go | /*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package etcd
import (
"net"
"strings"
"testing"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/registry/registrytest"
"k8s.io/kubernetes/pkg/registry/service/allocator"
allocatoretcd "k8s.io/kubernetes/pkg/registry/service/allocator/etcd"
"k8s.io/kubernetes/pkg/registry/service/ipallocator"
"k8s.io/kubernetes/pkg/storage"
"k8s.io/kubernetes/pkg/storage/etcd/etcdtest"
etcdtesting "k8s.io/kubernetes/pkg/storage/etcd/testing"
"golang.org/x/net/context"
)
func newStorage(t *testing.T) (*etcdtesting.EtcdTestServer, ipallocator.Interface, allocator.Interface, storage.Interface) {
etcdStorage, server := registrytest.NewEtcdStorage(t, "")
_, cidr, err := net.ParseCIDR("192.168.1.0/24")
if err != nil {
t.Fatal(err)
}
var backing allocator.Interface
storage := ipallocator.NewAllocatorCIDRRange(cidr, func(max int, rangeSpec string) allocator.Interface {
mem := allocator.NewAllocationMap(max, rangeSpec)
backing = mem
etcd := allocatoretcd.NewEtcd(mem, "/ranges/serviceips", "serviceipallocation", etcdStorage)
return etcd
})
return server, storage, backing, etcdStorage
}
func validNewRangeAllocation() *api.RangeAllocation {
_, cidr, _ := net.ParseCIDR("192.168.1.0/24")
return &api.RangeAllocation{
Range: cidr.String(),
}
}
func key() string {
s := "/ranges/serviceips"
return etcdtest.AddPrefix(s)
}
func | (t *testing.T) {
server, storage, _, _ := newStorage(t)
defer server.Terminate(t)
if err := storage.Allocate(net.ParseIP("192.168.1.2")); !strings.Contains(err.Error(), "cannot allocate resources of type serviceipallocation at this time") {
t.Fatal(err)
}
}
func TestErrors(t *testing.T) {
server, storage, _, _ := newStorage(t)
defer server.Terminate(t)
if err := storage.Allocate(net.ParseIP("192.168.0.0")); err != ipallocator.ErrNotInRange {
t.Fatal(err)
}
}
func TestStore(t *testing.T) {
server, storage, backing, si := newStorage(t)
defer server.Terminate(t)
if err := si.Set(context.TODO(), key(), validNewRangeAllocation(), nil, 0); err != nil {
t.Fatalf("unexpected error: %v", err)
}
if err := storage.Allocate(net.ParseIP("192.168.1.2")); err != nil {
t.Fatal(err)
}
ok, err := backing.Allocate(1)
if err != nil {
t.Fatal(err)
}
if ok {
t.Fatal("Expected allocation to fail")
}
if err := storage.Allocate(net.ParseIP("192.168.1.2")); err != ipallocator.ErrAllocated {
t.Fatal(err)
}
}
| TestEmpty |
generales.js | $(document).ready(function(e){
$.ajaxSetup({
headers: {
'X-CSRF-TOKEN': $('meta[name="csrf-token"]').attr('content')
}
});
Inputmask({"mask":"9999-9999","clearIncomplete":true}).mask(".telefono");
Inputmask({"mask":"99999999-9","clearIncomplete":true}).mask(".dui");
Inputmask({"mask":"9999-999999-999-9","clearIncomplete":true}).mask(".nit");
$(".chosen-select").chosen({'width': "100%"});
$('.fecha').datepicker({
format: 'dd/mm/yyyy',
minDate: "-60Y",
maxDate: "-18Y",
language:'es',
autoclose:true
});
/* $(".form-control").on("keypress", function () {
$input=$(this);
setTimeout(function () {
$input.val($input.val().toUpperCase());
},50);
})
*/
//reporte de iva por pagar
$(document).on("click",".ivapagar",function(e){
e.preventDefault();
$("#modal_reporte_iva").modal("show");
});
//reporte de trabajos a vehiculos
$(document).on("click",".reportevehiculo",function(e){
e.preventDefault();
$("#modal_reporte_carro").modal("show");
});
//busqueda de documento
$(document).on("click",".busqueda_modal",function(e){
e.preventDefault();
let type=$(this).attr('data-type');
let text=$(this).attr('data-text');
$("#eltype").val(type);
$("#exampleModalLabel2").text(text);
$("#modal_buscar").modal("show");
});
//buscar tipo documento
$(document).on("submit","#form_buscadoc",function(e){
e.preventDefault();
var numero=$("#numerodoc").val();
var dominio = window.location.host;
let formulario=$("#form_buscadoc").serialize();
if(numero!=''){
$.ajax({
url:'/busqueda',
type:'get',
dataType:'json',
data:formulario,
success: function(json){
if(json[0]==1){
if(json[1]!=null){
toastr.success("Documento encontrado");
location.href=json[2];
}else{
toastr.error("Documento no encontrado");
}
}
}
});
}else{
toastr.error("Digite el número para buscar");
}
});
//buscar la placa
$(document).on("submit","#form_buscaplaca",function(e){
e.preventDefault();
var placa=$("#laplaquita").val();
var dominio = window.location.host;
var url='/vehiculos/porplaca';
if(placa!=''){
$.ajax({
url:url,
type:'get',
dataType:'json',
data:{placa},
success: function(json){
if(json[0]==1){
if(json[1]!=null){
//location.href='vehiculos/historial/'+placa;
var dominio2 = window.location.host;
/*window.open(
'http://'+dominio2+'/'+carpeta()+'/public/vehiculos/historial/'+json[1].id,
'_blank' // <- This is what makes it open in a new window.
);*/
$("#modal_reporte_carro").modal("hide");
var url = '/vehiculos/historial/'+json[1].id;
$('#verpdf').attr('src', url);
//$('#verpdf').reload();
$("#modal_pdf").modal("show");
}else{
toastr.error("Vehículo no encontrado");
}
}
}
});
}else{
toastr.info("Digite una placa para buscar");
}
});
//submit para reporte de iva por fecha
$(document).on("submit","#form_buscaiva",function(e){
e.preventDefault();
var fecha1=$("#fecha1").val();
var fecha2=$("#fecha2").val();
if(fecha1!='' && fecha2!=''){
var dominio = window.location.host;
/*window.open(
'http://'+dominio+'/'+carpeta()+'/public/ivaporventas?fecha1='+fecha1+'&fecha2='+fecha2,
'_blank' // <- This is what makes it open in a new window.
);*/
$("#modal_reporte_iva").modal("hide");
var url = '/ivaporventas?fecha1='+fecha1+'&fecha2='+fecha2;
$('#verpdf').attr('src', url);
//$('#verpdf').reload();
$("#modal_pdf").modal("show");
}
});
});
function modal_cargando(){
swal.fire({
title: 'Cargando!',
text: 'Este diálogo se cerrará al completar la operación.',
allowOutsideClick: false,
allowEscapeKey: false,
showConfirmButton: false,
onOpen: function () {
swal.showLoading()
}
});
}
function carpe | var carpeta = window.location.href;
var nombre = carpeta.split("/");
return nombre[3];
} | ta(){
|
unix_listener.rs | use crate::os::unix::net::{Incoming, SocketAddr, UnixStream};
use async_std::{
io,
os::unix::{
self,
io::{AsRawFd, FromRawFd, IntoRawFd, RawFd}, |
/// A structure representing a Unix domain socket server.
///
/// This corresponds to [`async_std::os::unix::net::UnixListener`].
///
/// Note that this `UnixListener` has no `bind` method. To bind it to a socket
/// address, you must first obtain a [`Dir`] containing the path, and
/// then call [`Dir::bind_unix_listener`].
///
/// [`async_std::os::unix::net::UnixListener`]: https://docs.rs/async-std/latest/async_std/os/unix/net/struct.UnixListener.html
/// [`Dir`]: struct.Dir.html
/// [`Dir::bind_unix_listener`]: struct.Dir.html#method.bind_unix_listener
pub struct UnixListener {
std: unix::net::UnixListener,
}
impl UnixListener {
/// Constructs a new instance of `Self` from the given `async_std::os::unix::net::UnixListener`.
///
/// # Safety
///
/// `async_std::os::unix::net::UnixListener` is not sandboxed and may access any address that
/// the host process has access to.
#[inline]
pub unsafe fn from_std(std: unix::net::UnixListener) -> Self {
Self { std }
}
/// Accepts a new incoming connection to this listener.
///
/// This corresponds to [`async_std::os::unix::net::UnixListener::accept`].
///
/// [`async_std::os::unix::net::UnixListener::accept`]: https://docs.rs/async-std/latest/async_std/os/unix/net/struct.UnixListener.html#method.accept
#[inline]
pub async fn accept(&self) -> io::Result<(UnixStream, SocketAddr)> {
self.std
.accept()
.await
.map(|(unix_stream, addr)| (unsafe { UnixStream::from_std(unix_stream) }, addr))
}
// async_std doesn't have `try_clone`.
/// Returns the local socket address of this listener.
///
/// This corresponds to [`async_std::os::unix::net::UnixListener::local_addr`].
///
/// [`async_std::os::unix::net::UnixListener::local_addr`]: https://docs.rs/async-std/latest/async_std/os/unix/net/struct.UnixListener.html#method.local_addr
#[inline]
pub fn local_addr(&self) -> io::Result<SocketAddr> {
self.std.local_addr()
}
// async_std doesn't have `set_nonblocking`.
// async_std doesn't have `take_error`.
/// Returns an iterator over incoming connections.
///
/// This corresponds to [`async_std::os::unix::net::UnixListener::incoming`].
///
/// [`async_std::os::unix::net::UnixListener::incoming`]: https://docs.rs/async-std/latest/async_std/os/unix/net/struct.UnixListener.html#method.incoming
#[inline]
pub fn incoming(&self) -> Incoming {
let incoming = self.std.incoming();
unsafe { Incoming::from_std(incoming) }
}
}
impl FromRawFd for UnixListener {
#[inline]
unsafe fn from_raw_fd(fd: RawFd) -> Self {
Self::from_std(unix::net::UnixListener::from_raw_fd(fd))
}
}
impl AsRawFd for UnixListener {
#[inline]
fn as_raw_fd(&self) -> RawFd {
self.std.as_raw_fd()
}
}
impl IntoRawFd for UnixListener {
#[inline]
fn into_raw_fd(self) -> RawFd {
self.std.into_raw_fd()
}
}
// async_std's `IntoStream` is unstable.
// TODO: impl Debug for UnixListener | },
}; |
api.go | package smb
import (
"fmt"
"strings"
"github.com/kubernetes-csi/csi-proxy/pkg/utils"
)
| NewSmbLink(remotePath, localPath string) error
NewSmbGlobalMapping(remotePath, username, password string) error
RemoveSmbGlobalMapping(remotePath string) error
}
type SmbAPI struct{}
var _ API = &SmbAPI{}
func New() SmbAPI {
return SmbAPI{}
}
func (SmbAPI) IsSmbMapped(remotePath string) (bool, error) {
cmdLine := `$(Get-SmbGlobalMapping -RemotePath $Env:smbremotepath -ErrorAction Stop).Status `
cmdEnv := fmt.Sprintf("smbremotepath=%s", remotePath)
out, err := utils.RunPowershellCmd(cmdLine, cmdEnv)
if err != nil {
return false, fmt.Errorf("error checking smb mapping. cmd %s, output: %s, err: %v", remotePath, string(out), err)
}
if len(out) == 0 || !strings.EqualFold(strings.TrimSpace(string(out)), "OK") {
return false, nil
}
return true, nil
}
// NewSmbLink - creates a directory symbolic link to the remote share.
// The os.Symlink was having issue for cases where the destination was an SMB share - the container
// runtime would complain stating "Access Denied". Because of this, we had to perform
// this operation with powershell commandlet creating an directory softlink.
// Since os.Symlink is currently being used in working code paths, no attempt is made in
// alpha to merge the paths.
// TODO (for beta release): Merge the link paths - os.Symlink and Powershell link path.
func (SmbAPI) NewSmbLink(remotePath, localPath string) error {
if !strings.HasSuffix(remotePath, "\\") {
// Golang has issues resolving paths mapped to file shares if they do not end in a trailing \
// so add one if needed.
remotePath = remotePath + "\\"
}
cmdLine := `New-Item -ItemType SymbolicLink $Env:smblocalPath -Target $Env:smbremotepath`
output, err := utils.RunPowershellCmd(cmdLine, fmt.Sprintf("smbremotepath=%s", remotePath), fmt.Sprintf("smblocalpath=%s", localPath))
if err != nil {
return fmt.Errorf("error linking %s to %s. output: %s, err: %v", remotePath, localPath, string(output), err)
}
return nil
}
func (SmbAPI) NewSmbGlobalMapping(remotePath, username, password string) error {
// use PowerShell Environment Variables to store user input string to prevent command line injection
// https://docs.microsoft.com/en-us/powershell/module/microsoft.powershell.core/about/about_environment_variables?view=powershell-5.1
cmdLine := fmt.Sprintf(`$PWord = ConvertTo-SecureString -String $Env:smbpassword -AsPlainText -Force` +
`;$Credential = New-Object -TypeName System.Management.Automation.PSCredential -ArgumentList $Env:smbuser, $PWord` +
`;New-SmbGlobalMapping -RemotePath $Env:smbremotepath -Credential $Credential -RequirePrivacy $true`)
if output, err := utils.RunPowershellCmd(cmdLine, fmt.Sprintf("smbuser=%s", username),
fmt.Sprintf("smbpassword=%s", password),
fmt.Sprintf("smbremotepath=%s", remotePath)); err != nil {
return fmt.Errorf("NewSmbGlobalMapping failed. output: %q, err: %v", string(output), err)
}
return nil
}
func (SmbAPI) RemoveSmbGlobalMapping(remotePath string) error {
cmd := `Remove-SmbGlobalMapping -RemotePath $Env:smbremotepath -Force`
if output, err := utils.RunPowershellCmd(cmd, fmt.Sprintf("smbremotepath=%s", remotePath)); err != nil {
return fmt.Errorf("UnmountSmbShare failed. output: %q, err: %v", string(output), err)
}
return nil
} | type API interface {
IsSmbMapped(remotePath string) (bool, error) |
utils.ts |
export function | () {
const x = {};
(x as any).__proto__ = null;
(x as any).prototype = null;
return x;
} | nullObj |
test_load.py | from configpp.soil import Config, Group, GroupMember, Transport, ClimberLocation
from voidpp_tools.mocks.file_system import FileSystem, mockfs
_data_filename = 'test1.json'
_data = {_data_filename: '{"a": 42}'}
def test_load_simple_not_found():
cfg = Config(_data_filename)
assert cfg.load() is False
@mockfs({'etc': _data})
def test_load_simple_found_etc():
cfg = Config(_data_filename)
assert cfg.load() is True
assert cfg.data == {"a": 42}
assert cfg.path == '/etc/' + _data_filename
@mockfs({'home': {'douglas': _data}})
def test_load_simple_found_home():
cfg = Config(_data_filename)
assert cfg.load() is True
assert cfg.data == {"a": 42}
@mockfs({'teve': _data}, cwd = '/teve')
def test_load_simple_found_cwd():
cfg = Config(_data_filename)
assert cfg.load() is True
assert cfg.data == {"a": 42}
@mockfs({'etc': _data, 'home': {'douglas': {_data_filename: '{"a": 84}'}}})
def test_load_simple_location_order():
cfg = Config(_data_filename)
assert cfg.load() is True
assert cfg.data == {"a": 84}
@mockfs({'etc': {'test1': {'core.json': '{"a": 42}', 'logger.json': '{"b": 42}'}}})
def test_load_group():
core = GroupMember('core.json')
logger = GroupMember('logger.json')
grp = Group('test1', [core, logger])
assert grp.load()
assert core.data == {"a": 42}
assert logger.data == {"b": 42}
@mockfs({'etc': {'test1': {'logger.json': '{"b": 42}'}}})
def test_cant_load_group_missing_one():
core = GroupMember('core.json')
logger = GroupMember('logger.json')
grp = Group('test1', [core, logger] + [GroupMember('op%s' % i, mandatory=False) for i in range(10)])
assert grp.load() is False
@mockfs({'etc': {'test1': {'logger.json': '{"b": 42}'}}})
def test_cant_load_group_missing_many():
core = GroupMember('core.json')
logger = GroupMember('logger.json')
grp = Group('test1', [core, logger])
assert grp.load() is False
@mockfs({'etc': {'app.json': '{"a": 42}'}})
def test_load_group_single():
core = GroupMember('app.json')
grp = Group('', [core])
assert grp.load()
assert core.data == {"a": 42}
@mockfs({'etc': {'test1': {'core.json': '{"a": 42}'}}})
def test_load_group_optional():
core = GroupMember('core.json')
logger = GroupMember('logger.json', mandatory = False)
grp = Group('test1', [core, logger])
assert grp.load() is True
assert core.data == {"a": 42}
assert core.path == '/etc/test1/core.json'
assert logger.is_loaded is False
@mockfs({
'home': {
'douglas': {
'test1': {
'core.json': '{"a": 21}'
}
}
},
'etc': {
'test1': {
'core.json': '{"a": 42}',
'logger.json': '{"b": 42}',
}
}
})
def test_load_group_optional_full_group_is_more_imporant_than_location_order():
core = GroupMember('core.json')
logger = GroupMember('logger.json', mandatory = False)
grp = Group('test1', [core, logger])
assert grp.load() is True
assert core.data == {"a": 42}
assert logger.is_loaded |
@mockfs({'home': {'douglas': {'teve': {_data_filename: '{"a": 84}'}}}}, cwd = '/home/douglas/teve/muha/subn')
def test_load_simple_climber():
cfg = Config(_data_filename, transport = Transport([ClimberLocation()]))
assert cfg.load() is True
assert cfg.data == {"a": 84}
assert cfg.path == '/home/douglas/teve/' + _data_filename
@mockfs({'home': {'douglas': {'teve': {'test1': {'core.json': '{"a": 42}', 'logger.json': '{"b": 42}'}}}}}, cwd = '/home/douglas/teve/muha/subn')
def test_load_group_climber_loc():
core = GroupMember('core.json')
logger = GroupMember('logger.json')
grp = Group('test1', [core, logger], transport = Transport([ClimberLocation()]))
assert grp.load()
assert core.data == {"a": 42}
assert logger.data == {"b": 42}
assert grp.path == '/home/douglas/teve/test1'
assert core.path == '/home/douglas/teve/test1/core.json' | assert logger.data == {"b": 42} |
types.ts | /*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License; | slm_ui: { enabled: boolean };
} | * you may not use this file except in compliance with the Elastic License.
*/
export interface ClientConfigType { |
etcd3.go | /*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package factory
import (
"context"
"fmt"
"net"
"net/url"
"path"
"strings"
"sync"
"sync/atomic"
"time"
grpcprom "github.com/grpc-ecosystem/go-grpc-prometheus"
"go.etcd.io/etcd/client/pkg/v3/transport"
clientv3 "go.etcd.io/etcd/client/v3"
"google.golang.org/grpc"
"k8s.io/apimachinery/pkg/runtime"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/server/egressselector"
"k8s.io/apiserver/pkg/storage"
"k8s.io/apiserver/pkg/storage/etcd3"
"k8s.io/apiserver/pkg/storage/etcd3/metrics"
"k8s.io/apiserver/pkg/storage/storagebackend"
"k8s.io/apiserver/pkg/storage/value"
"k8s.io/component-base/metrics/legacyregistry"
"k8s.io/klog/v2"
)
const (
// The short keepalive timeout and interval have been chosen to aggressively
// detect a failed etcd server without introducing much overhead.
keepaliveTime = 30 * time.Second
keepaliveTimeout = 10 * time.Second
// dialTimeout is the timeout for failing to establish a connection.
// It is set to 20 seconds as times shorter than that will cause TLS connections to fail
// on heavily loaded arm64 CPUs (issue #64649)
dialTimeout = 20 * time.Second
dbMetricsMonitorJitter = 0.5
)
func init() {
// grpcprom auto-registers (via an init function) their client metrics, since we are opting out of
// using the global prometheus registry and using our own wrapped global registry,
// we need to explicitly register these metrics to our global registry here.
// For reference: https://github.com/kubernetes/kubernetes/pull/81387
legacyregistry.RawMustRegister(grpcprom.DefaultClientMetrics)
dbMetricsMonitors = make(map[string]struct{})
}
func newETCD3HealthCheck(c storagebackend.Config) (func() error, error) {
// constructing the etcd v3 client blocks and times out if etcd is not available.
// retry in a loop in the background until we successfully create the client, storing the client or error encountered
clientValue := &atomic.Value{}
clientErrMsg := &atomic.Value{}
clientErrMsg.Store("etcd client connection not yet established")
go wait.PollUntil(time.Second, func() (bool, error) {
client, err := newETCD3Client(c.Transport)
if err != nil {
clientErrMsg.Store(err.Error())
return false, nil
}
clientValue.Store(client)
clientErrMsg.Store("")
return true, nil
}, wait.NeverStop)
return func() error {
if errMsg := clientErrMsg.Load().(string); len(errMsg) > 0 {
return fmt.Errorf(errMsg)
}
client := clientValue.Load().(*clientv3.Client)
healthcheckTimeout := storagebackend.DefaultHealthcheckTimeout
if c.HealthcheckTimeout != time.Duration(0) {
healthcheckTimeout = c.HealthcheckTimeout
}
ctx, cancel := context.WithTimeout(context.Background(), healthcheckTimeout)
defer cancel()
// See https://github.com/etcd-io/etcd/blob/c57f8b3af865d1b531b979889c602ba14377420e/etcdctl/ctlv3/command/ep_command.go#L118
_, err := client.Get(ctx, path.Join("/", c.Prefix, "health"))
if err == nil {
return nil
}
return fmt.Errorf("error getting data from etcd: %v", err)
}, nil
}
func newETCD3Client(c storagebackend.TransportConfig) (*clientv3.Client, error) {
tlsInfo := transport.TLSInfo{
CertFile: c.CertFile,
KeyFile: c.KeyFile,
TrustedCAFile: c.TrustedCAFile,
}
tlsConfig, err := tlsInfo.ClientConfig()
if err != nil {
return nil, err
}
// NOTE: Client relies on nil tlsConfig
// for non-secure connections, update the implicit variable
if len(c.CertFile) == 0 && len(c.KeyFile) == 0 && len(c.TrustedCAFile) == 0 {
tlsConfig = nil
}
networkContext := egressselector.Etcd.AsNetworkContext()
var egressDialer utilnet.DialFunc
if c.EgressLookup != nil {
egressDialer, err = c.EgressLookup(networkContext)
if err != nil { | }
}
dialOptions := []grpc.DialOption{
grpc.WithBlock(), // block until the underlying connection is up
grpc.WithUnaryInterceptor(grpcprom.UnaryClientInterceptor),
grpc.WithStreamInterceptor(grpcprom.StreamClientInterceptor),
}
if egressDialer != nil {
dialer := func(ctx context.Context, addr string) (net.Conn, error) {
if strings.Contains(addr, "//") {
// etcd client prior to 3.5 passed URLs to dialer, normalize to address
u, err := url.Parse(addr)
if err != nil {
return nil, err
}
addr = u.Host
}
return egressDialer(ctx, "tcp", addr)
}
dialOptions = append(dialOptions, grpc.WithContextDialer(dialer))
}
cfg := clientv3.Config{
DialTimeout: dialTimeout,
DialKeepAliveTime: keepaliveTime,
DialKeepAliveTimeout: keepaliveTimeout,
DialOptions: dialOptions,
Endpoints: c.ServerList,
TLS: tlsConfig,
}
return clientv3.New(cfg)
}
type runningCompactor struct {
interval time.Duration
cancel context.CancelFunc
client *clientv3.Client
refs int
}
var (
// compactorsMu guards access to compactors map
compactorsMu sync.Mutex
compactors = map[string]*runningCompactor{}
// dbMetricsMonitorsMu guards access to dbMetricsMonitors map
dbMetricsMonitorsMu sync.Mutex
dbMetricsMonitors map[string]struct{}
)
// startCompactorOnce start one compactor per transport. If the interval get smaller on repeated calls, the
// compactor is replaced. A destroy func is returned. If all destroy funcs with the same transport are called,
// the compactor is stopped.
func startCompactorOnce(c storagebackend.TransportConfig, interval time.Duration) (func(), error) {
compactorsMu.Lock()
defer compactorsMu.Unlock()
key := fmt.Sprintf("%v", c) // gives: {[server1 server2] keyFile certFile caFile}
if compactor, foundBefore := compactors[key]; !foundBefore || compactor.interval > interval {
compactorClient, err := newETCD3Client(c)
if err != nil {
return nil, err
}
if foundBefore {
// replace compactor
compactor.cancel()
compactor.client.Close()
} else {
// start new compactor
compactor = &runningCompactor{}
compactors[key] = compactor
}
ctx, cancel := context.WithCancel(context.Background())
compactor.interval = interval
compactor.cancel = cancel
compactor.client = compactorClient
etcd3.StartCompactor(ctx, compactorClient, interval)
}
compactors[key].refs++
return func() {
compactorsMu.Lock()
defer compactorsMu.Unlock()
compactor := compactors[key]
compactor.refs--
if compactor.refs == 0 {
compactor.cancel()
compactor.client.Close()
delete(compactors, key)
}
}, nil
}
func newETCD3Storage(c storagebackend.Config, newFunc func() runtime.Object) (storage.Interface, DestroyFunc, error) {
stopCompactor, err := startCompactorOnce(c.Transport, c.CompactionInterval)
if err != nil {
return nil, nil, err
}
client, err := newETCD3Client(c.Transport)
if err != nil {
stopCompactor()
return nil, nil, err
}
stopDBSizeMonitor, err := startDBSizeMonitorPerEndpoint(client, c.DBMetricPollInterval)
if err != nil {
return nil, nil, err
}
var once sync.Once
destroyFunc := func() {
// we know that storage destroy funcs are called multiple times (due to reuse in subresources).
// Hence, we only destroy once.
// TODO: fix duplicated storage destroy calls higher level
once.Do(func() {
stopCompactor()
stopDBSizeMonitor()
client.Close()
})
}
transformer := c.Transformer
if transformer == nil {
transformer = value.IdentityTransformer
}
store := etcd3.New(client, c.Codec, newFunc, c.Prefix, transformer, c.Paging, c.LeaseManagerConfig)
if c.ObjectCountTracker != nil {
store = etcd3.WithObjectCountTracker(store, c.ObjectCountTracker)
}
return store, destroyFunc, nil
}
// startDBSizeMonitorPerEndpoint starts a loop to monitor etcd database size and update the
// corresponding metric etcd_db_total_size_in_bytes for each etcd server endpoint.
func startDBSizeMonitorPerEndpoint(client *clientv3.Client, interval time.Duration) (func(), error) {
if interval == 0 {
return func() {}, nil
}
dbMetricsMonitorsMu.Lock()
defer dbMetricsMonitorsMu.Unlock()
ctx, cancel := context.WithCancel(context.Background())
for _, ep := range client.Endpoints() {
if _, found := dbMetricsMonitors[ep]; found {
continue
}
dbMetricsMonitors[ep] = struct{}{}
endpoint := ep
klog.V(4).Infof("Start monitoring storage db size metric for endpoint %s with polling interval %v", endpoint, interval)
go wait.JitterUntilWithContext(ctx, func(context.Context) {
epStatus, err := client.Maintenance.Status(ctx, endpoint)
if err != nil {
klog.V(4).Infof("Failed to get storage db size for ep %s: %v", endpoint, err)
metrics.UpdateEtcdDbSize(endpoint, -1)
} else {
metrics.UpdateEtcdDbSize(endpoint, epStatus.DbSize)
}
}, interval, dbMetricsMonitorJitter, true)
}
return func() {
cancel()
}, nil
} | return nil, err |
config.py | #===----------------------------------------------------------------------===##
#
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
#===----------------------------------------------------------------------===##
import copy
import os
import pkgutil
import pipes
import platform
import re
import shlex
import shutil
import sys
from libcxx.compiler import CXXCompiler
from libcxx.test.target_info import make_target_info
import libcxx.util
import libcxx.test.features
import libcxx.test.newconfig
import libcxx.test.params
import lit
def loadSiteConfig(lit_config, config, param_name, env_name):
# We haven't loaded the site specific configuration (the user is
# probably trying to run on a test file directly, and either the site
# configuration hasn't been created by the build system, or we are in an
# out-of-tree build situation).
site_cfg = lit_config.params.get(param_name,
os.environ.get(env_name))
if not site_cfg:
lit_config.warning('No site specific configuration file found!'
' Running the tests in the default configuration.')
elif not os.path.isfile(site_cfg):
lit_config.fatal(
"Specified site configuration file does not exist: '%s'" %
site_cfg)
else:
lit_config.note('using site specific configuration at %s' % site_cfg)
ld_fn = lit_config.load_config
# Null out the load_config function so that lit.site.cfg doesn't
# recursively load a config even if it tries.
# TODO: This is one hell of a hack. Fix it.
def prevent_reload_fn(*args, **kwargs):
pass
lit_config.load_config = prevent_reload_fn
ld_fn(config, site_cfg)
lit_config.load_config = ld_fn
# Extract the value of a numeric macro such as __cplusplus or a feature-test
# macro.
def intMacroValue(token):
return int(token.rstrip('LlUu'))
class Configuration(object):
# pylint: disable=redefined-outer-name
def __init__(self, lit_config, config):
self.lit_config = lit_config
self.config = config
self.cxx = None
self.cxx_is_clang_cl = None
self.cxx_stdlib_under_test = None
self.project_obj_root = None
self.libcxx_src_root = None
self.libcxx_obj_root = None
self.cxx_library_root = None
self.cxx_runtime_root = None
self.abi_library_root = None
self.link_shared = self.get_lit_bool('enable_shared', default=True)
self.debug_build = self.get_lit_bool('debug_build', default=False)
self.exec_env = dict()
self.use_clang_verify = False
def get_lit_conf(self, name, default=None):
val = self.lit_config.params.get(name, None)
if val is None:
val = getattr(self.config, name, None)
if val is None:
val = default
return val
def get_lit_bool(self, name, default=None, env_var=None):
def check_value(value, var_name):
if value is None:
return default
if isinstance(value, bool):
return value
if not isinstance(value, str):
raise TypeError('expected bool or string')
if value.lower() in ('1', 'true'):
return True
if value.lower() in ('', '0', 'false'):
return False
self.lit_config.fatal(
"parameter '{}' should be true or false".format(var_name))
conf_val = self.get_lit_conf(name)
if env_var is not None and env_var in os.environ and \
os.environ[env_var] is not None:
val = os.environ[env_var]
if conf_val is not None:
self.lit_config.warning(
'Environment variable %s=%s is overriding explicit '
'--param=%s=%s' % (env_var, val, name, conf_val))
return check_value(val, env_var)
return check_value(conf_val, name)
def make_static_lib_name(self, name):
"""Return the full filename for the specified library name"""
if self.target_info.is_windows() and not self.target_info.is_mingw():
assert name == 'c++' # Only allow libc++ to use this function for now.
return 'lib' + name + '.lib'
else:
return 'lib' + name + '.a'
def configure(self):
self.target_info = make_target_info(self)
self.executor = self.get_lit_conf('executor')
self.configure_cxx()
self.configure_src_root()
self.configure_obj_root()
self.cxx_stdlib_under_test = self.get_lit_conf('cxx_stdlib_under_test', 'libc++')
self.cxx_library_root = self.get_lit_conf('cxx_library_root', self.libcxx_obj_root)
self.abi_library_root = self.get_lit_conf('abi_library_root') or self.cxx_library_root
self.cxx_runtime_root = self.get_lit_conf('cxx_runtime_root', self.cxx_library_root)
self.abi_runtime_root = self.get_lit_conf('abi_runtime_root', self.abi_library_root)
self.configure_compile_flags()
self.configure_link_flags()
self.configure_env()
self.configure_coverage()
self.configure_substitutions()
self.configure_features()
libcxx.test.newconfig.configure(
libcxx.test.params.DEFAULT_PARAMETERS,
libcxx.test.features.DEFAULT_FEATURES,
self.config,
self.lit_config
)
self.lit_config.note("All available features: {}".format(self.config.available_features))
def print_config_info(self):
if self.cxx.use_modules:
self.lit_config.note('Using modules flags: %s' %
self.cxx.modules_flags)
if len(self.cxx.warning_flags):
self.lit_config.note('Using warnings: %s' % self.cxx.warning_flags)
show_env_vars = {}
for k,v in self.exec_env.items():
if k not in os.environ or os.environ[k] != v:
show_env_vars[k] = v
self.lit_config.note('Adding environment variables: %r' % show_env_vars)
self.lit_config.note("Linking against the C++ Library at {}".format(self.cxx_library_root))
self.lit_config.note("Running against the C++ Library at {}".format(self.cxx_runtime_root))
self.lit_config.note("Linking against the ABI Library at {}".format(self.abi_library_root))
self.lit_config.note("Running against the ABI Library at {}".format(self.abi_runtime_root))
sys.stderr.flush() # Force flushing to avoid broken output on Windows
def get_test_format(self):
from libcxx.test.format import LibcxxTestFormat
return LibcxxTestFormat(
self.cxx,
self.use_clang_verify,
self.executor,
exec_env=self.exec_env)
def configure_cxx(self):
# Gather various compiler parameters.
cxx = self.get_lit_conf('cxx_under_test')
self.cxx_is_clang_cl = cxx is not None and \
os.path.basename(cxx).startswith('clang-cl')
# If no specific cxx_under_test was given, attempt to infer it as
# clang++.
if cxx is None or self.cxx_is_clang_cl:
search_paths = self.config.environment['PATH']
if cxx is not None and os.path.isabs(cxx):
search_paths = os.path.dirname(cxx)
clangxx = libcxx.util.which('clang++', search_paths)
if clangxx:
cxx = clangxx
self.lit_config.note(
"inferred cxx_under_test as: %r" % cxx)
elif self.cxx_is_clang_cl:
self.lit_config.fatal('Failed to find clang++ substitution for'
' clang-cl')
if not cxx:
self.lit_config.fatal('must specify user parameter cxx_under_test '
'(e.g., --param=cxx_under_test=clang++)')
self.cxx = CXXCompiler(self, cxx) if not self.cxx_is_clang_cl else \
self._configure_clang_cl(cxx)
self.cxx.compile_env = dict(os.environ)
def _configure_clang_cl(self, clang_path):
def _split_env_var(var):
return [p.strip() for p in os.environ.get(var, '').split(';') if p.strip()]
def _prefixed_env_list(var, prefix):
from itertools import chain
return list(chain.from_iterable((prefix, path) for path in _split_env_var(var)))
assert self.cxx_is_clang_cl
flags = []
compile_flags = []
link_flags = _prefixed_env_list('LIB', '-L')
return CXXCompiler(self, clang_path, flags=flags,
compile_flags=compile_flags,
link_flags=link_flags)
def configure_src_root(self):
self.libcxx_src_root = self.get_lit_conf(
'libcxx_src_root', os.path.dirname(self.config.test_source_root))
def configure_obj_root(self):
self.project_obj_root = self.get_lit_conf('project_obj_root')
self.libcxx_obj_root = self.get_lit_conf('libcxx_obj_root')
if not self.libcxx_obj_root and self.project_obj_root is not None:
possible_roots = [
os.path.join(self.project_obj_root, 'libcxx'),
os.path.join(self.project_obj_root, 'projects', 'libcxx'),
os.path.join(self.project_obj_root, 'runtimes', 'libcxx'),
]
for possible_root in possible_roots:
if os.path.isdir(possible_root):
self.libcxx_obj_root = possible_root
break
else:
self.libcxx_obj_root = self.project_obj_root
def configure_features(self):
if self.target_info.is_windows():
if self.cxx_stdlib_under_test == 'libc++':
# LIBCXX-WINDOWS-FIXME is the feature name used to XFAIL the
# initial Windows failures until they can be properly diagnosed
# and fixed. This allows easier detection of new test failures
# and regressions. Note: New failures should not be suppressed
# using this feature. (Also see llvm.org/PR32730)
self.config.available_features.add('LIBCXX-WINDOWS-FIXME')
def configure_compile_flags(self):
self.configure_default_compile_flags()
# Configure extra flags
compile_flags_str = self.get_lit_conf('compile_flags', '')
self.cxx.compile_flags += shlex.split(compile_flags_str)
if self.target_info.is_windows():
self.cxx.compile_flags += ['-D_CRT_SECURE_NO_WARNINGS']
# Don't warn about using common but nonstandard unprefixed functions
# like chdir, fileno.
self.cxx.compile_flags += ['-D_CRT_NONSTDC_NO_WARNINGS']
# Build the tests in the same configuration as libcxx itself,
# to avoid mismatches if linked statically.
self.cxx.compile_flags += ['-D_CRT_STDIO_ISO_WIDE_SPECIFIERS']
# Required so that tests using min/max don't fail on Windows,
# and so that those tests don't have to be changed to tolerate
# this insanity.
self.cxx.compile_flags += ['-DNOMINMAX']
additional_flags = self.get_lit_conf('test_compiler_flags')
if additional_flags:
self.cxx.compile_flags += shlex.split(additional_flags)
def configure_default_compile_flags(self):
# Configure include paths
self.configure_compile_flags_header_includes()
self.target_info.add_cxx_compile_flags(self.cxx.compile_flags)
self.target_info.add_cxx_flags(self.cxx.flags)
# Use verbose output for better errors
self.cxx.flags += ['-v']
sysroot = self.get_lit_conf('sysroot')
if sysroot:
self.cxx.flags += ['--sysroot=' + sysroot]
gcc_toolchain = self.get_lit_conf('gcc_toolchain')
if gcc_toolchain:
self.cxx.flags += ['--gcc-toolchain=' + gcc_toolchain]
# NOTE: the _DEBUG definition must preceed the triple check because for
# the Windows build of libc++, the forced inclusion of a header requires
# that _DEBUG is defined. Incorrect ordering will result in -target
# being elided.
if self.target_info.is_windows() and self.debug_build:
self.cxx.compile_flags += ['-D_DEBUG']
# Add includes for support headers used in the tests.
support_path = os.path.join(self.libcxx_src_root, 'test/support')
self.cxx.compile_flags += ['-I' + support_path]
# On GCC, the libc++ headers cause errors due to throw() decorators
# on operator new clashing with those from the test suite, so we
# don't enable warnings in system headers on GCC.
if self.cxx.type != 'gcc':
self.cxx.compile_flags += ['-D_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER']
# Add includes for the PSTL headers
pstl_src_root = self.get_lit_conf('pstl_src_root')
pstl_obj_root = self.get_lit_conf('pstl_obj_root')
if pstl_src_root is not None and pstl_obj_root is not None:
self.cxx.compile_flags += ['-I' + os.path.join(pstl_src_root, 'include')]
self.cxx.compile_flags += ['-I' + os.path.join(pstl_obj_root, 'generated_headers')]
self.cxx.compile_flags += ['-I' + os.path.join(pstl_src_root, 'test')]
self.config.available_features.add('parallel-algorithms')
def configure_compile_flags_header_includes(self):
support_path = os.path.join(self.libcxx_src_root, 'test', 'support')
if self.cxx_stdlib_under_test != 'libstdc++' and \
not self.target_info.is_windows() and \
not self.target_info.is_zos():
self.cxx.compile_flags += [
'-include', os.path.join(support_path, 'nasty_macros.h')]
if self.cxx_stdlib_under_test == 'msvc':
self.cxx.compile_flags += [
'-include', os.path.join(support_path,
'msvc_stdlib_force_include.h')]
pass
if self.target_info.is_windows() and self.debug_build and \
self.cxx_stdlib_under_test != 'msvc':
self.cxx.compile_flags += [
'-include', os.path.join(support_path,
'set_windows_crt_report_mode.h')
]
cxx_headers = self.get_lit_conf('cxx_headers')
if cxx_headers is None and self.cxx_stdlib_under_test != 'libc++':
self.lit_config.note('using the system cxx headers')
return
self.cxx.compile_flags += ['-nostdinc++']
if not os.path.isdir(cxx_headers):
self.lit_config.fatal("cxx_headers='{}' is not a directory.".format(cxx_headers))
(path, version) = os.path.split(cxx_headers)
(path, cxx) = os.path.split(path)
triple = self.get_lit_conf('target_triple', None)
if triple is not None:
cxx_target_headers = os.path.join(path, triple, cxx, version)
if os.path.isdir(cxx_target_headers):
self.cxx.compile_flags += ['-I' + cxx_target_headers]
self.cxx.compile_flags += ['-I' + cxx_headers]
if self.libcxx_obj_root is not None:
cxxabi_headers = os.path.join(self.libcxx_obj_root, 'include',
'c++build')
if os.path.isdir(cxxabi_headers):
self.cxx.compile_flags += ['-I' + cxxabi_headers]
def configure_link_flags(self):
# Configure library path
self.configure_link_flags_cxx_library_path()
self.configure_link_flags_abi_library_path()
# Configure libraries
if self.cxx_stdlib_under_test == 'libc++':
if self.target_info.is_mingw():
self.cxx.link_flags += ['-nostdlib++']
else:
self.cxx.link_flags += ['-nodefaultlibs']
# FIXME: Handle MSVCRT as part of the ABI library handling.
if self.target_info.is_windows() and not self.target_info.is_mingw():
self.cxx.link_flags += ['-nostdlib']
self.configure_link_flags_cxx_library()
self.configure_link_flags_abi_library()
self.configure_extra_library_flags()
elif self.cxx_stdlib_under_test == 'libstdc++':
self.cxx.link_flags += ['-lstdc++fs', '-lm', '-pthread']
elif self.cxx_stdlib_under_test == 'msvc':
# FIXME: Correctly setup debug/release flags here.
pass
elif self.cxx_stdlib_under_test == 'cxx_default':
self.cxx.link_flags += ['-pthread']
else:
self.lit_config.fatal('invalid stdlib under test')
link_flags_str = self.get_lit_conf('link_flags', '')
self.cxx.link_flags += shlex.split(link_flags_str)
def configure_link_flags_cxx_library_path(self):
if self.cxx_library_root:
self.cxx.link_flags += ['-L' + self.cxx_library_root]
if self.target_info.is_windows() and self.link_shared:
self.add_path(self.cxx.compile_env, self.cxx_library_root)
if self.cxx_runtime_root:
if not self.target_info.is_windows():
self.cxx.link_flags += ['-Wl,-rpath,' +
self.cxx_runtime_root]
elif self.target_info.is_windows() and self.link_shared:
self.add_path(self.exec_env, self.cxx_runtime_root)
additional_flags = self.get_lit_conf('test_linker_flags')
if additional_flags:
self.cxx.link_flags += shlex.split(additional_flags)
def configure_link_flags_abi_library_path(self):
# Configure ABI library paths.
if self.abi_library_root:
self.cxx.link_flags += ['-L' + self.abi_library_root]
if self.abi_runtime_root:
if not self.target_info.is_windows():
self.cxx.link_flags += ['-Wl,-rpath,' + self.abi_runtime_root]
else:
self.add_path(self.exec_env, self.abi_runtime_root)
def configure_link_flags_cxx_library(self):
|
def configure_link_flags_abi_library(self):
cxx_abi = self.get_lit_conf('cxx_abi', 'libcxxabi')
if cxx_abi == 'libstdc++':
self.cxx.link_flags += ['-lstdc++']
elif cxx_abi == 'libsupc++':
self.cxx.link_flags += ['-lsupc++']
elif cxx_abi == 'libcxxabi':
# If the C++ library requires explicitly linking to libc++abi, or
# if we're testing libc++abi itself (the test configs are shared),
# then link it.
testing_libcxxabi = self.get_lit_conf('name', '') == 'libc++abi'
if self.target_info.allow_cxxabi_link() or testing_libcxxabi:
libcxxabi_shared = self.get_lit_bool('libcxxabi_shared', default=True)
if libcxxabi_shared:
self.cxx.link_flags += ['-lc++abi']
else:
if self.abi_library_root:
libname = self.make_static_lib_name('c++abi')
abs_path = os.path.join(self.abi_library_root, libname)
self.cxx.link_flags += [abs_path]
else:
self.cxx.link_flags += ['-lc++abi']
elif cxx_abi == 'libcxxrt':
self.cxx.link_flags += ['-lcxxrt']
elif cxx_abi == 'vcruntime':
debug_suffix = 'd' if self.debug_build else ''
# This matches the set of libraries linked in the toplevel
# libcxx CMakeLists.txt if building targeting msvc.
self.cxx.link_flags += ['-l%s%s' % (lib, debug_suffix) for lib in
['vcruntime', 'ucrt', 'msvcrt', 'msvcprt']]
# The compiler normally links in oldnames.lib too, but we've
# specified -nostdlib above, so we need to specify it manually.
self.cxx.link_flags += ['-loldnames']
elif cxx_abi == 'none' or cxx_abi == 'default':
if self.target_info.is_windows():
debug_suffix = 'd' if self.debug_build else ''
self.cxx.link_flags += ['-lmsvcrt%s' % debug_suffix]
else:
self.lit_config.fatal(
'C++ ABI setting %s unsupported for tests' % cxx_abi)
def configure_extra_library_flags(self):
if self.get_lit_bool('cxx_ext_threads', default=False):
self.cxx.link_flags += ['-lc++external_threads']
self.target_info.add_cxx_link_flags(self.cxx.link_flags)
def configure_coverage(self):
self.generate_coverage = self.get_lit_bool('generate_coverage', False)
if self.generate_coverage:
self.cxx.flags += ['-g', '--coverage']
self.cxx.compile_flags += ['-O0']
def quote(self, s):
if platform.system() == 'Windows':
return lit.TestRunner.quote_windows_command([s])
return pipes.quote(s)
def configure_substitutions(self):
sub = self.config.substitutions
sub.append(('%{cxx}', self.quote(self.cxx.path)))
flags = self.cxx.flags + (self.cxx.modules_flags if self.cxx.use_modules else [])
compile_flags = self.cxx.compile_flags + (self.cxx.warning_flags if self.cxx.use_warnings else [])
sub.append(('%{flags}', ' '.join(map(self.quote, flags))))
sub.append(('%{compile_flags}', ' '.join(map(self.quote, compile_flags))))
sub.append(('%{link_flags}', ' '.join(map(self.quote, self.cxx.link_flags))))
codesign_ident = self.get_lit_conf('llvm_codesign_identity', '')
env_vars = ' '.join('%s=%s' % (k, self.quote(v)) for (k, v) in self.exec_env.items())
exec_args = [
'--execdir %T',
'--codesign_identity "{}"'.format(codesign_ident),
'--env {}'.format(env_vars)
]
sub.append(('%{exec}', '{} {} -- '.format(self.executor, ' '.join(exec_args))))
def configure_env(self):
self.config.environment = dict(os.environ)
def add_path(self, dest_env, new_path):
self.target_info.add_path(dest_env, new_path)
| if self.link_shared:
self.cxx.link_flags += ['-lc++']
else:
if self.cxx_library_root:
libname = self.make_static_lib_name('c++')
abs_path = os.path.join(self.cxx_library_root, libname)
assert os.path.exists(abs_path) and \
"static libc++ library does not exist"
self.cxx.link_flags += [abs_path]
else:
self.cxx.link_flags += ['-lc++'] |
static.go | // Handles serving static resources
package main
import (
"net/http"
"os"
"github.com/gorilla/mux"
)
// registerStaticHandler maps the /static path to the resources folder.
// Only files are mapped (not folders) to disallow directory listings.
func registerStaticHandler(mux *mux.Router) |
// A file system that restricts opening files to only files rather than folders.
type directoryFilteringFileSystem struct {
fs http.FileSystem
}
func (fs directoryFilteringFileSystem) Open(path string) (http.File, error) {
f, err := fs.fs.Open(path)
if err != nil {
return nil, err
}
s, err := f.Stat()
if err != nil || s.IsDir() {
return nil, os.ErrNotExist
}
return f, nil
}
| {
fs := directoryFilteringFileSystem{http.Dir("frontend/resources/")}
mux.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(fs)))
} |
join.py | # python join code
# Copyright Andrew Tridgell 2010
# Copyright Andrew Bartlett 2010
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Joining a domain."""
from samba.auth import system_session
from samba.samdb import SamDB
from samba import gensec, Ldb, drs_utils, arcfour_encrypt, string_to_byte_array
import ldb, samba, sys, uuid
from samba.ndr import ndr_pack
from samba.dcerpc import security, drsuapi, misc, nbt, lsa, drsblobs
from samba.dsdb import DS_DOMAIN_FUNCTION_2003
from samba.credentials import Credentials, DONT_USE_KERBEROS
from samba.provision import secretsdb_self_join, provision, provision_fill, FILL_DRS, FILL_SUBDOMAIN
from samba.provision.common import setup_path
from samba.schema import Schema
from samba import descriptor
from samba.net import Net
from samba.provision.sambadns import setup_bind9_dns
from samba import read_and_sub_file
from base64 import b64encode
import logging
import talloc
import random
import time
# this makes debugging easier
talloc.enable_null_tracking()
class DCJoinException(Exception):
def __init__(self, msg):
super(DCJoinException, self).__init__("Can't join, error: %s" % msg)
class dc_join(object):
"""Perform a DC join."""
def __init__(ctx, logger=None, server=None, creds=None, lp=None, site=None,
netbios_name=None, targetdir=None, domain=None,
machinepass=None, use_ntvfs=False, dns_backend=None,
promote_existing=False):
ctx.logger = logger
ctx.creds = creds
ctx.lp = lp
ctx.site = site
ctx.netbios_name = netbios_name
ctx.targetdir = targetdir
ctx.use_ntvfs = use_ntvfs
ctx.promote_existing = promote_existing
ctx.promote_from_dn = None
ctx.nc_list = []
ctx.full_nc_list = []
ctx.creds.set_gensec_features(creds.get_gensec_features() | gensec.FEATURE_SEAL)
ctx.net = Net(creds=ctx.creds, lp=ctx.lp)
if server is not None:
ctx.server = server
else:
ctx.logger.info("Finding a writeable DC for domain '%s'" % domain)
ctx.server = ctx.find_dc(domain)
ctx.logger.info("Found DC %s" % ctx.server)
ctx.samdb = SamDB(url="ldap://%s" % ctx.server,
session_info=system_session(),
credentials=ctx.creds, lp=ctx.lp)
try:
ctx.samdb.search(scope=ldb.SCOPE_ONELEVEL, attrs=["dn"])
except ldb.LdbError, (enum, estr):
raise DCJoinException(estr)
ctx.myname = netbios_name
ctx.samname = "%s$" % ctx.myname
ctx.base_dn = str(ctx.samdb.get_default_basedn())
ctx.root_dn = str(ctx.samdb.get_root_basedn())
ctx.schema_dn = str(ctx.samdb.get_schema_basedn())
ctx.config_dn = str(ctx.samdb.get_config_basedn())
ctx.domsid = security.dom_sid(ctx.samdb.get_domain_sid())
ctx.forestsid = ctx.domsid
ctx.domain_name = ctx.get_domain_name()
ctx.forest_domain_name = ctx.get_forest_domain_name()
ctx.invocation_id = misc.GUID(str(uuid.uuid4()))
ctx.dc_ntds_dn = ctx.samdb.get_dsServiceName()
ctx.dc_dnsHostName = ctx.get_dnsHostName()
ctx.behavior_version = ctx.get_behavior_version()
if machinepass is not None:
ctx.acct_pass = machinepass
else:
ctx.acct_pass = samba.generate_random_password(32, 40)
# work out the DNs of all the objects we will be adding
ctx.server_dn = "CN=%s,CN=Servers,CN=%s,CN=Sites,%s" % (ctx.myname, ctx.site, ctx.config_dn)
ctx.ntds_dn = "CN=NTDS Settings,%s" % ctx.server_dn
topology_base = "CN=Topology,CN=Domain System Volume,CN=DFSR-GlobalSettings,CN=System,%s" % ctx.base_dn
if ctx.dn_exists(topology_base):
ctx.topology_dn = "CN=%s,%s" % (ctx.myname, topology_base)
else:
ctx.topology_dn = None
ctx.dnsdomain = ctx.samdb.domain_dns_name()
ctx.dnsforest = ctx.samdb.forest_dns_name()
ctx.domaindns_zone = 'DC=DomainDnsZones,%s' % ctx.base_dn
ctx.forestdns_zone = 'DC=ForestDnsZones,%s' % ctx.root_dn
res_domaindns = ctx.samdb.search(scope=ldb.SCOPE_ONELEVEL,
attrs=[],
base=ctx.samdb.get_partitions_dn(),
expression="(&(objectClass=crossRef)(ncName=%s))" % ctx.domaindns_zone)
if dns_backend is None:
ctx.dns_backend = "NONE"
else:
if len(res_domaindns) == 0:
ctx.dns_backend = "NONE"
print "NO DNS zone information found in source domain, not replicating DNS"
else:
ctx.dns_backend = dns_backend
ctx.dnshostname = "%s.%s" % (ctx.myname.lower(), ctx.dnsdomain)
ctx.realm = ctx.dnsdomain
ctx.acct_dn = "CN=%s,OU=Domain Controllers,%s" % (ctx.myname, ctx.base_dn)
ctx.tmp_samdb = None
ctx.SPNs = [ "HOST/%s" % ctx.myname,
"HOST/%s" % ctx.dnshostname,
"GC/%s/%s" % (ctx.dnshostname, ctx.dnsforest) ]
# these elements are optional
ctx.never_reveal_sid = None
ctx.reveal_sid = None
ctx.connection_dn = None
ctx.RODC = False
ctx.krbtgt_dn = None
ctx.drsuapi = None
ctx.managedby = None
ctx.subdomain = False
ctx.adminpass = None
def del_noerror(ctx, dn, recursive=False):
if recursive:
try:
res = ctx.samdb.search(base=dn, scope=ldb.SCOPE_ONELEVEL, attrs=["dn"])
except Exception:
return
for r in res:
ctx.del_noerror(r.dn, recursive=True)
try:
ctx.samdb.delete(dn)
print "Deleted %s" % dn
except Exception:
pass
def cleanup_old_join(ctx):
"""Remove any DNs from a previous join."""
try:
# find the krbtgt link
print("checking sAMAccountName")
if ctx.subdomain:
res = None
else:
res = ctx.samdb.search(base=ctx.samdb.get_default_basedn(),
expression='sAMAccountName=%s' % ldb.binary_encode(ctx.samname),
attrs=["msDS-krbTgtLink"])
if res:
ctx.del_noerror(res[0].dn, recursive=True)
res = ctx.samdb.search(base=ctx.samdb.get_default_basedn(),
expression='(&(sAMAccountName=%s)(servicePrincipalName=%s))' % (ldb.binary_encode("dns-%s" % ctx.myname), ldb.binary_encode("dns/%s" % ctx.dnshostname)),
attrs=[])
if res:
ctx.del_noerror(res[0].dn, recursive=True)
res = ctx.samdb.search(base=ctx.samdb.get_default_basedn(),
expression='(sAMAccountName=%s)' % ldb.binary_encode("dns-%s" % ctx.myname),
attrs=[])
if res:
raise RuntimeError("Not removing account %s which looks like a Samba DNS service account but does not have servicePrincipalName=%s" % (ldb.binary_encode("dns-%s" % ctx.myname), ldb.binary_encode("dns/%s" % ctx.dnshostname)))
if ctx.connection_dn is not None:
ctx.del_noerror(ctx.connection_dn)
if ctx.krbtgt_dn is not None:
ctx.del_noerror(ctx.krbtgt_dn)
ctx.del_noerror(ctx.ntds_dn)
ctx.del_noerror(ctx.server_dn, recursive=True)
if ctx.topology_dn:
ctx.del_noerror(ctx.topology_dn)
if ctx.partition_dn:
ctx.del_noerror(ctx.partition_dn)
if res:
ctx.new_krbtgt_dn = res[0]["msDS-Krbtgtlink"][0]
ctx.del_noerror(ctx.new_krbtgt_dn)
if ctx.subdomain:
binding_options = "sign"
lsaconn = lsa.lsarpc("ncacn_ip_tcp:%s[%s]" % (ctx.server, binding_options),
ctx.lp, ctx.creds)
objectAttr = lsa.ObjectAttribute()
objectAttr.sec_qos = lsa.QosInfo()
pol_handle = lsaconn.OpenPolicy2(''.decode('utf-8'),
objectAttr, security.SEC_FLAG_MAXIMUM_ALLOWED)
name = lsa.String()
name.string = ctx.realm
info = lsaconn.QueryTrustedDomainInfoByName(pol_handle, name, lsa.LSA_TRUSTED_DOMAIN_INFO_FULL_INFO)
lsaconn.DeleteTrustedDomain(pol_handle, info.info_ex.sid)
name = lsa.String()
name.string = ctx.forest_domain_name
info = lsaconn.QueryTrustedDomainInfoByName(pol_handle, name, lsa.LSA_TRUSTED_DOMAIN_INFO_FULL_INFO)
lsaconn.DeleteTrustedDomain(pol_handle, info.info_ex.sid)
except Exception:
pass
def promote_possible(ctx):
"""confirm that the account is just a bare NT4 BDC or a member server, so can be safely promoted"""
if ctx.subdomain:
# This shouldn't happen
raise Exception("Can not promote into a subdomain")
res = ctx.samdb.search(base=ctx.samdb.get_default_basedn(),
expression='sAMAccountName=%s' % ldb.binary_encode(ctx.samname),
attrs=["msDS-krbTgtLink", "userAccountControl", "serverReferenceBL", "rIDSetReferences"])
if len(res) == 0:
raise Exception("Could not find domain member account '%s' to promote to a DC, use 'samba-tool domain join' instead'" % ctx.samname)
if "msDS-krbTgtLink" in res[0] or "serverReferenceBL" in res[0] or "rIDSetReferences" in res[0]:
raise Exception("Account '%s' appears to be an active DC, use 'samba-tool domain join' if you must re-create this account" % ctx.samname)
if (int(res[0]["userAccountControl"][0]) & (samba.dsdb.UF_WORKSTATION_TRUST_ACCOUNT|samba.dsdb.UF_SERVER_TRUST_ACCOUNT) == 0):
raise Exception("Account %s is not a domain member or a bare NT4 BDC, use 'samba-tool domain join' instead'" % ctx.samname)
ctx.promote_from_dn = res[0].dn
def find_dc(ctx, domain):
"""find a writeable DC for the given domain"""
try:
ctx.cldap_ret = ctx.net.finddc(domain=domain, flags=nbt.NBT_SERVER_LDAP | nbt.NBT_SERVER_DS | nbt.NBT_SERVER_WRITABLE)
except Exception:
raise Exception("Failed to find a writeable DC for domain '%s'" % domain)
if ctx.cldap_ret.client_site is not None and ctx.cldap_ret.client_site != "":
ctx.site = ctx.cldap_ret.client_site
return ctx.cldap_ret.pdc_dns_name
def get_behavior_version(ctx):
res = ctx.samdb.search(base=ctx.base_dn, scope=ldb.SCOPE_BASE, attrs=["msDS-Behavior-Version"])
if "msDS-Behavior-Version" in res[0]:
return int(res[0]["msDS-Behavior-Version"][0])
else:
return samba.dsdb.DS_DOMAIN_FUNCTION_2000
def | (ctx):
res = ctx.samdb.search(base="", scope=ldb.SCOPE_BASE, attrs=["dnsHostName"])
return res[0]["dnsHostName"][0]
def get_domain_name(ctx):
'''get netbios name of the domain from the partitions record'''
partitions_dn = ctx.samdb.get_partitions_dn()
res = ctx.samdb.search(base=partitions_dn, scope=ldb.SCOPE_ONELEVEL, attrs=["nETBIOSName"],
expression='ncName=%s' % ctx.samdb.get_default_basedn())
return res[0]["nETBIOSName"][0]
def get_forest_domain_name(ctx):
'''get netbios name of the domain from the partitions record'''
partitions_dn = ctx.samdb.get_partitions_dn()
res = ctx.samdb.search(base=partitions_dn, scope=ldb.SCOPE_ONELEVEL, attrs=["nETBIOSName"],
expression='ncName=%s' % ctx.samdb.get_root_basedn())
return res[0]["nETBIOSName"][0]
def get_parent_partition_dn(ctx):
'''get the parent domain partition DN from parent DNS name'''
res = ctx.samdb.search(base=ctx.config_dn, attrs=[],
expression='(&(objectclass=crossRef)(dnsRoot=%s)(systemFlags:%s:=%u))' %
(ctx.parent_dnsdomain, ldb.OID_COMPARATOR_AND, samba.dsdb.SYSTEM_FLAG_CR_NTDS_DOMAIN))
return str(res[0].dn)
def get_naming_master(ctx):
'''get the parent domain partition DN from parent DNS name'''
res = ctx.samdb.search(base='CN=Partitions,%s' % ctx.config_dn, attrs=['fSMORoleOwner'],
scope=ldb.SCOPE_BASE, controls=["extended_dn:1:1"])
if not 'fSMORoleOwner' in res[0]:
raise DCJoinException("Can't find naming master on partition DN %s in %s" % (ctx.partition_dn, ctx.samdb.url))
try:
master_guid = str(misc.GUID(ldb.Dn(ctx.samdb, res[0]['fSMORoleOwner'][0]).get_extended_component('GUID')))
except KeyError:
raise DCJoinException("Can't find GUID in naming master on partition DN %s" % res[0]['fSMORoleOwner'][0])
master_host = '%s._msdcs.%s' % (master_guid, ctx.dnsforest)
return master_host
def get_mysid(ctx):
'''get the SID of the connected user. Only works with w2k8 and later,
so only used for RODC join'''
res = ctx.samdb.search(base="", scope=ldb.SCOPE_BASE, attrs=["tokenGroups"])
binsid = res[0]["tokenGroups"][0]
return ctx.samdb.schema_format_value("objectSID", binsid)
def dn_exists(ctx, dn):
'''check if a DN exists'''
try:
res = ctx.samdb.search(base=dn, scope=ldb.SCOPE_BASE, attrs=[])
except ldb.LdbError, (enum, estr):
if enum == ldb.ERR_NO_SUCH_OBJECT:
return False
raise
return True
def add_krbtgt_account(ctx):
'''RODCs need a special krbtgt account'''
print "Adding %s" % ctx.krbtgt_dn
rec = {
"dn" : ctx.krbtgt_dn,
"objectclass" : "user",
"useraccountcontrol" : str(samba.dsdb.UF_NORMAL_ACCOUNT |
samba.dsdb.UF_ACCOUNTDISABLE),
"showinadvancedviewonly" : "TRUE",
"description" : "krbtgt for %s" % ctx.samname}
ctx.samdb.add(rec, ["rodc_join:1:1"])
# now we need to search for the samAccountName attribute on the krbtgt DN,
# as this will have been magically set to the krbtgt number
res = ctx.samdb.search(base=ctx.krbtgt_dn, scope=ldb.SCOPE_BASE, attrs=["samAccountName"])
ctx.krbtgt_name = res[0]["samAccountName"][0]
print "Got krbtgt_name=%s" % ctx.krbtgt_name
m = ldb.Message()
m.dn = ldb.Dn(ctx.samdb, ctx.acct_dn)
m["msDS-krbTgtLink"] = ldb.MessageElement(ctx.krbtgt_dn,
ldb.FLAG_MOD_REPLACE, "msDS-krbTgtLink")
ctx.samdb.modify(m)
ctx.new_krbtgt_dn = "CN=%s,CN=Users,%s" % (ctx.krbtgt_name, ctx.base_dn)
print "Renaming %s to %s" % (ctx.krbtgt_dn, ctx.new_krbtgt_dn)
ctx.samdb.rename(ctx.krbtgt_dn, ctx.new_krbtgt_dn)
def drsuapi_connect(ctx):
'''make a DRSUAPI connection to the naming master'''
binding_options = "seal"
if int(ctx.lp.get("log level")) >= 4:
binding_options += ",print"
binding_string = "ncacn_ip_tcp:%s[%s]" % (ctx.server, binding_options)
ctx.drsuapi = drsuapi.drsuapi(binding_string, ctx.lp, ctx.creds)
(ctx.drsuapi_handle, ctx.bind_supported_extensions) = drs_utils.drs_DsBind(ctx.drsuapi)
def create_tmp_samdb(ctx):
'''create a temporary samdb object for schema queries'''
ctx.tmp_schema = Schema(ctx.domsid,
schemadn=ctx.schema_dn)
ctx.tmp_samdb = SamDB(session_info=system_session(), url=None, auto_connect=False,
credentials=ctx.creds, lp=ctx.lp, global_schema=False,
am_rodc=False)
ctx.tmp_samdb.set_schema(ctx.tmp_schema)
def build_DsReplicaAttribute(ctx, attrname, attrvalue):
'''build a DsReplicaAttributeCtr object'''
r = drsuapi.DsReplicaAttribute()
r.attid = ctx.tmp_samdb.get_attid_from_lDAPDisplayName(attrname)
r.value_ctr = 1
def DsAddEntry(ctx, recs):
'''add a record via the DRSUAPI DsAddEntry call'''
if ctx.drsuapi is None:
ctx.drsuapi_connect()
if ctx.tmp_samdb is None:
ctx.create_tmp_samdb()
objects = []
for rec in recs:
id = drsuapi.DsReplicaObjectIdentifier()
id.dn = rec['dn']
attrs = []
for a in rec:
if a == 'dn':
continue
if not isinstance(rec[a], list):
v = [rec[a]]
else:
v = rec[a]
rattr = ctx.tmp_samdb.dsdb_DsReplicaAttribute(ctx.tmp_samdb, a, v)
attrs.append(rattr)
attribute_ctr = drsuapi.DsReplicaAttributeCtr()
attribute_ctr.num_attributes = len(attrs)
attribute_ctr.attributes = attrs
object = drsuapi.DsReplicaObject()
object.identifier = id
object.attribute_ctr = attribute_ctr
list_object = drsuapi.DsReplicaObjectListItem()
list_object.object = object
objects.append(list_object)
req2 = drsuapi.DsAddEntryRequest2()
req2.first_object = objects[0]
prev = req2.first_object
for o in objects[1:]:
prev.next_object = o
prev = o
(level, ctr) = ctx.drsuapi.DsAddEntry(ctx.drsuapi_handle, 2, req2)
if level == 2:
if ctr.dir_err != drsuapi.DRSUAPI_DIRERR_OK:
print("DsAddEntry failed with dir_err %u" % ctr.dir_err)
raise RuntimeError("DsAddEntry failed")
if ctr.extended_err != (0, 'WERR_OK'):
print("DsAddEntry failed with status %s info %s" % (ctr.extended_err))
raise RuntimeError("DsAddEntry failed")
if level == 3:
if ctr.err_ver != 1:
raise RuntimeError("expected err_ver 1, got %u" % ctr.err_ver)
if ctr.err_data.status != (0, 'WERR_OK'):
print("DsAddEntry failed with status %s info %s" % (ctr.err_data.status,
ctr.err_data.info.extended_err))
raise RuntimeError("DsAddEntry failed")
if ctr.err_data.dir_err != drsuapi.DRSUAPI_DIRERR_OK:
print("DsAddEntry failed with dir_err %u" % ctr.err_data.dir_err)
raise RuntimeError("DsAddEntry failed")
return ctr.objects
def join_ntdsdsa_obj(ctx):
'''return the ntdsdsa object to add'''
print "Adding %s" % ctx.ntds_dn
rec = {
"dn" : ctx.ntds_dn,
"objectclass" : "nTDSDSA",
"systemFlags" : str(samba.dsdb.SYSTEM_FLAG_DISALLOW_MOVE_ON_DELETE),
"dMDLocation" : ctx.schema_dn}
nc_list = [ ctx.base_dn, ctx.config_dn, ctx.schema_dn ]
if ctx.behavior_version >= samba.dsdb.DS_DOMAIN_FUNCTION_2003:
rec["msDS-Behavior-Version"] = str(samba.dsdb.DS_DOMAIN_FUNCTION_2008_R2)
if ctx.behavior_version >= samba.dsdb.DS_DOMAIN_FUNCTION_2003:
rec["msDS-HasDomainNCs"] = ctx.base_dn
if ctx.RODC:
rec["objectCategory"] = "CN=NTDS-DSA-RO,%s" % ctx.schema_dn
rec["msDS-HasFullReplicaNCs"] = ctx.full_nc_list
rec["options"] = "37"
else:
rec["objectCategory"] = "CN=NTDS-DSA,%s" % ctx.schema_dn
rec["HasMasterNCs"] = []
for nc in nc_list:
if nc in ctx.full_nc_list:
rec["HasMasterNCs"].append(nc)
if ctx.behavior_version >= samba.dsdb.DS_DOMAIN_FUNCTION_2003:
rec["msDS-HasMasterNCs"] = ctx.full_nc_list
rec["options"] = "1"
rec["invocationId"] = ndr_pack(ctx.invocation_id)
return rec
def join_add_ntdsdsa(ctx):
'''add the ntdsdsa object'''
rec = ctx.join_ntdsdsa_obj()
if ctx.RODC:
ctx.samdb.add(rec, ["rodc_join:1:1"])
else:
ctx.DsAddEntry([rec])
# find the GUID of our NTDS DN
res = ctx.samdb.search(base=ctx.ntds_dn, scope=ldb.SCOPE_BASE, attrs=["objectGUID"])
ctx.ntds_guid = misc.GUID(ctx.samdb.schema_format_value("objectGUID", res[0]["objectGUID"][0]))
def join_add_objects(ctx):
'''add the various objects needed for the join'''
if ctx.acct_dn:
print "Adding %s" % ctx.acct_dn
rec = {
"dn" : ctx.acct_dn,
"objectClass": "computer",
"displayname": ctx.samname,
"samaccountname" : ctx.samname,
"userAccountControl" : str(ctx.userAccountControl | samba.dsdb.UF_ACCOUNTDISABLE),
"dnshostname" : ctx.dnshostname}
if ctx.behavior_version >= samba.dsdb.DS_DOMAIN_FUNCTION_2008:
rec['msDS-SupportedEncryptionTypes'] = str(samba.dsdb.ENC_ALL_TYPES)
elif ctx.promote_existing:
rec['msDS-SupportedEncryptionTypes'] = []
if ctx.managedby:
rec["managedby"] = ctx.managedby
elif ctx.promote_existing:
rec["managedby"] = []
if ctx.never_reveal_sid:
rec["msDS-NeverRevealGroup"] = ctx.never_reveal_sid
elif ctx.promote_existing:
rec["msDS-NeverRevealGroup"] = []
if ctx.reveal_sid:
rec["msDS-RevealOnDemandGroup"] = ctx.reveal_sid
elif ctx.promote_existing:
rec["msDS-RevealOnDemandGroup"] = []
if ctx.promote_existing:
if ctx.promote_from_dn != ctx.acct_dn:
ctx.samdb.rename(ctx.promote_from_dn, ctx.acct_dn)
ctx.samdb.modify(ldb.Message.from_dict(ctx.samdb, rec, ldb.FLAG_MOD_REPLACE))
else:
ctx.samdb.add(rec)
if ctx.krbtgt_dn:
ctx.add_krbtgt_account()
print "Adding %s" % ctx.server_dn
rec = {
"dn": ctx.server_dn,
"objectclass" : "server",
# windows uses 50000000 decimal for systemFlags. A windows hex/decimal mixup bug?
"systemFlags" : str(samba.dsdb.SYSTEM_FLAG_CONFIG_ALLOW_RENAME |
samba.dsdb.SYSTEM_FLAG_CONFIG_ALLOW_LIMITED_MOVE |
samba.dsdb.SYSTEM_FLAG_DISALLOW_MOVE_ON_DELETE),
# windows seems to add the dnsHostName later
"dnsHostName" : ctx.dnshostname}
if ctx.acct_dn:
rec["serverReference"] = ctx.acct_dn
ctx.samdb.add(rec)
if ctx.subdomain:
# the rest is done after replication
ctx.ntds_guid = None
return
ctx.join_add_ntdsdsa()
if ctx.connection_dn is not None:
print "Adding %s" % ctx.connection_dn
rec = {
"dn" : ctx.connection_dn,
"objectclass" : "nTDSConnection",
"enabledconnection" : "TRUE",
"options" : "65",
"fromServer" : ctx.dc_ntds_dn}
ctx.samdb.add(rec)
if ctx.acct_dn:
print "Adding SPNs to %s" % ctx.acct_dn
m = ldb.Message()
m.dn = ldb.Dn(ctx.samdb, ctx.acct_dn)
for i in range(len(ctx.SPNs)):
ctx.SPNs[i] = ctx.SPNs[i].replace("$NTDSGUID", str(ctx.ntds_guid))
m["servicePrincipalName"] = ldb.MessageElement(ctx.SPNs,
ldb.FLAG_MOD_REPLACE,
"servicePrincipalName")
ctx.samdb.modify(m)
# The account password set operation should normally be done over
# LDAP. Windows 2000 DCs however allow this only with SSL
# connections which are hard to set up and otherwise refuse with
# ERR_UNWILLING_TO_PERFORM. In this case we fall back to libnet
# over SAMR.
print "Setting account password for %s" % ctx.samname
try:
ctx.samdb.setpassword("(&(objectClass=user)(sAMAccountName=%s))"
% ldb.binary_encode(ctx.samname),
ctx.acct_pass,
force_change_at_next_login=False,
username=ctx.samname)
except ldb.LdbError, (num, _):
if num != ldb.ERR_UNWILLING_TO_PERFORM:
pass
ctx.net.set_password(account_name=ctx.samname,
domain_name=ctx.domain_name,
newpassword=ctx.acct_pass)
res = ctx.samdb.search(base=ctx.acct_dn, scope=ldb.SCOPE_BASE,
attrs=["msDS-KeyVersionNumber"])
if "msDS-KeyVersionNumber" in res[0]:
ctx.key_version_number = int(res[0]["msDS-KeyVersionNumber"][0])
else:
ctx.key_version_number = None
print("Enabling account")
m = ldb.Message()
m.dn = ldb.Dn(ctx.samdb, ctx.acct_dn)
m["userAccountControl"] = ldb.MessageElement(str(ctx.userAccountControl),
ldb.FLAG_MOD_REPLACE,
"userAccountControl")
ctx.samdb.modify(m)
if ctx.dns_backend.startswith("BIND9_"):
ctx.dnspass = samba.generate_random_password(128, 255)
recs = ctx.samdb.parse_ldif(read_and_sub_file(setup_path("provision_dns_add_samba.ldif"),
{"DNSDOMAIN": ctx.dnsdomain,
"DOMAINDN": ctx.base_dn,
"HOSTNAME" : ctx.myname,
"DNSPASS_B64": b64encode(ctx.dnspass),
"DNSNAME" : ctx.dnshostname}))
for changetype, msg in recs:
assert changetype == ldb.CHANGETYPE_NONE
dns_acct_dn = msg["dn"]
print "Adding DNS account %s with dns/ SPN" % msg["dn"]
# Remove dns password (we will set it as a modify, as we can't do clearTextPassword over LDAP)
del msg["clearTextPassword"]
# Remove isCriticalSystemObject for similar reasons, it cannot be set over LDAP
del msg["isCriticalSystemObject"]
# Disable account until password is set
msg["userAccountControl"] = str(samba.dsdb.UF_NORMAL_ACCOUNT |
samba.dsdb.UF_ACCOUNTDISABLE)
try:
ctx.samdb.add(msg)
except ldb.LdbError, (num, _):
if num != ldb.ERR_ENTRY_ALREADY_EXISTS:
raise
# The account password set operation should normally be done over
# LDAP. Windows 2000 DCs however allow this only with SSL
# connections which are hard to set up and otherwise refuse with
# ERR_UNWILLING_TO_PERFORM. In this case we fall back to libnet
# over SAMR.
print "Setting account password for dns-%s" % ctx.myname
try:
ctx.samdb.setpassword("(&(objectClass=user)(samAccountName=dns-%s))"
% ldb.binary_encode(ctx.myname),
ctx.dnspass,
force_change_at_next_login=False,
username=ctx.samname)
except ldb.LdbError, (num, _):
if num != ldb.ERR_UNWILLING_TO_PERFORM:
raise
ctx.net.set_password(account_name="dns-%s" % ctx.myname,
domain_name=ctx.domain_name,
newpassword=ctx.dnspass)
res = ctx.samdb.search(base=dns_acct_dn, scope=ldb.SCOPE_BASE,
attrs=["msDS-KeyVersionNumber"])
if "msDS-KeyVersionNumber" in res[0]:
ctx.dns_key_version_number = int(res[0]["msDS-KeyVersionNumber"][0])
else:
ctx.dns_key_version_number = None
def join_add_objects2(ctx):
"""add the various objects needed for the join, for subdomains post replication"""
print "Adding %s" % ctx.partition_dn
name_map = {'SubdomainAdmins': "%s-%s" % (str(ctx.domsid), security.DOMAIN_RID_ADMINS)}
sd_binary = descriptor.get_paritions_crossref_subdomain_descriptor(ctx.forestsid, name_map=name_map)
rec = {
"dn" : ctx.partition_dn,
"objectclass" : "crossRef",
"objectCategory" : "CN=Cross-Ref,%s" % ctx.schema_dn,
"nCName" : ctx.base_dn,
"nETBIOSName" : ctx.domain_name,
"dnsRoot": ctx.dnsdomain,
"trustParent" : ctx.parent_partition_dn,
"systemFlags" : str(samba.dsdb.SYSTEM_FLAG_CR_NTDS_NC|samba.dsdb.SYSTEM_FLAG_CR_NTDS_DOMAIN),
"ntSecurityDescriptor" : sd_binary,
}
if ctx.behavior_version >= samba.dsdb.DS_DOMAIN_FUNCTION_2003:
rec["msDS-Behavior-Version"] = str(ctx.behavior_version)
rec2 = ctx.join_ntdsdsa_obj()
objects = ctx.DsAddEntry([rec, rec2])
if len(objects) != 2:
raise DCJoinException("Expected 2 objects from DsAddEntry")
ctx.ntds_guid = objects[1].guid
print("Replicating partition DN")
ctx.repl.replicate(ctx.partition_dn,
misc.GUID("00000000-0000-0000-0000-000000000000"),
ctx.ntds_guid,
exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ,
replica_flags=drsuapi.DRSUAPI_DRS_WRIT_REP)
print("Replicating NTDS DN")
ctx.repl.replicate(ctx.ntds_dn,
misc.GUID("00000000-0000-0000-0000-000000000000"),
ctx.ntds_guid,
exop=drsuapi.DRSUAPI_EXOP_REPL_OBJ,
replica_flags=drsuapi.DRSUAPI_DRS_WRIT_REP)
def join_provision(ctx):
"""Provision the local SAM."""
print "Calling bare provision"
smbconf = ctx.lp.configfile
presult = provision(ctx.logger, system_session(), smbconf=smbconf,
targetdir=ctx.targetdir, samdb_fill=FILL_DRS, realm=ctx.realm,
rootdn=ctx.root_dn, domaindn=ctx.base_dn,
schemadn=ctx.schema_dn, configdn=ctx.config_dn,
serverdn=ctx.server_dn, domain=ctx.domain_name,
hostname=ctx.myname, domainsid=ctx.domsid,
machinepass=ctx.acct_pass, serverrole="active directory domain controller",
sitename=ctx.site, lp=ctx.lp, ntdsguid=ctx.ntds_guid,
use_ntvfs=ctx.use_ntvfs, dns_backend=ctx.dns_backend)
print "Provision OK for domain DN %s" % presult.domaindn
ctx.local_samdb = presult.samdb
ctx.lp = presult.lp
ctx.paths = presult.paths
ctx.names = presult.names
# Fix up the forestsid, it may be different if we are joining as a subdomain
ctx.names.forestsid = ctx.forestsid
def join_provision_own_domain(ctx):
"""Provision the local SAM."""
# we now operate exclusively on the local database, which
# we need to reopen in order to get the newly created schema
print("Reconnecting to local samdb")
ctx.samdb = SamDB(url=ctx.local_samdb.url,
session_info=system_session(),
lp=ctx.local_samdb.lp,
global_schema=False)
ctx.samdb.set_invocation_id(str(ctx.invocation_id))
ctx.local_samdb = ctx.samdb
ctx.logger.info("Finding domain GUID from ncName")
res = ctx.local_samdb.search(base=ctx.partition_dn, scope=ldb.SCOPE_BASE, attrs=['ncName'],
controls=["extended_dn:1:1", "reveal_internals:0"])
if 'nCName' not in res[0]:
raise DCJoinException("Can't find naming context on partition DN %s in %s" % (ctx.partition_dn, ctx.samdb.url))
try:
ctx.names.domainguid = str(misc.GUID(ldb.Dn(ctx.samdb, res[0]['ncName'][0]).get_extended_component('GUID')))
except KeyError:
raise DCJoinException("Can't find GUID in naming master on partition DN %s" % res[0]['ncName'][0])
ctx.logger.info("Got domain GUID %s" % ctx.names.domainguid)
ctx.logger.info("Calling own domain provision")
secrets_ldb = Ldb(ctx.paths.secrets, session_info=system_session(), lp=ctx.lp)
presult = provision_fill(ctx.local_samdb, secrets_ldb,
ctx.logger, ctx.names, ctx.paths,
dom_for_fun_level=DS_DOMAIN_FUNCTION_2003,
targetdir=ctx.targetdir, samdb_fill=FILL_SUBDOMAIN,
machinepass=ctx.acct_pass, serverrole="active directory domain controller",
lp=ctx.lp, hostip=ctx.names.hostip, hostip6=ctx.names.hostip6,
dns_backend=ctx.dns_backend, adminpass=ctx.adminpass)
print("Provision OK for domain %s" % ctx.names.dnsdomain)
def join_replicate(ctx):
"""Replicate the SAM."""
print "Starting replication"
ctx.local_samdb.transaction_start()
try:
source_dsa_invocation_id = misc.GUID(ctx.samdb.get_invocation_id())
if ctx.ntds_guid is None:
print("Using DS_BIND_GUID_W2K3")
destination_dsa_guid = misc.GUID(drsuapi.DRSUAPI_DS_BIND_GUID_W2K3)
else:
destination_dsa_guid = ctx.ntds_guid
if ctx.RODC:
repl_creds = Credentials()
repl_creds.guess(ctx.lp)
repl_creds.set_kerberos_state(DONT_USE_KERBEROS)
repl_creds.set_username(ctx.samname)
repl_creds.set_password(ctx.acct_pass)
else:
repl_creds = ctx.creds
binding_options = "seal"
if int(ctx.lp.get("log level")) >= 5:
binding_options += ",print"
repl = drs_utils.drs_Replicate(
"ncacn_ip_tcp:%s[%s]" % (ctx.server, binding_options),
ctx.lp, repl_creds, ctx.local_samdb, ctx.invocation_id)
repl.replicate(ctx.schema_dn, source_dsa_invocation_id,
destination_dsa_guid, schema=True, rodc=ctx.RODC,
replica_flags=ctx.replica_flags)
repl.replicate(ctx.config_dn, source_dsa_invocation_id,
destination_dsa_guid, rodc=ctx.RODC,
replica_flags=ctx.replica_flags)
if not ctx.subdomain:
# Replicate first the critical object for the basedn
if not ctx.domain_replica_flags & drsuapi.DRSUAPI_DRS_CRITICAL_ONLY:
print "Replicating critical objects from the base DN of the domain"
ctx.domain_replica_flags |= drsuapi.DRSUAPI_DRS_CRITICAL_ONLY | drsuapi.DRSUAPI_DRS_GET_ANC
repl.replicate(ctx.base_dn, source_dsa_invocation_id,
destination_dsa_guid, rodc=ctx.RODC,
replica_flags=ctx.domain_replica_flags)
ctx.domain_replica_flags ^= drsuapi.DRSUAPI_DRS_CRITICAL_ONLY | drsuapi.DRSUAPI_DRS_GET_ANC
else:
ctx.domain_replica_flags |= drsuapi.DRSUAPI_DRS_GET_ANC
repl.replicate(ctx.base_dn, source_dsa_invocation_id,
destination_dsa_guid, rodc=ctx.RODC,
replica_flags=ctx.domain_replica_flags)
print "Done with always replicated NC (base, config, schema)"
for nc in (ctx.domaindns_zone, ctx.forestdns_zone):
if nc in ctx.nc_list:
print "Replicating %s" % (str(nc))
repl.replicate(nc, source_dsa_invocation_id,
destination_dsa_guid, rodc=ctx.RODC,
replica_flags=ctx.replica_flags)
# FIXME At this point we should add an entry in the forestdns and domaindns NC
# (those under CN=Partions,DC=...)
# in order to indicate that we hold a replica for this NC
if ctx.RODC:
repl.replicate(ctx.acct_dn, source_dsa_invocation_id,
destination_dsa_guid,
exop=drsuapi.DRSUAPI_EXOP_REPL_SECRET, rodc=True)
repl.replicate(ctx.new_krbtgt_dn, source_dsa_invocation_id,
destination_dsa_guid,
exop=drsuapi.DRSUAPI_EXOP_REPL_SECRET, rodc=True)
ctx.repl = repl
ctx.source_dsa_invocation_id = source_dsa_invocation_id
ctx.destination_dsa_guid = destination_dsa_guid
print "Committing SAM database"
except:
ctx.local_samdb.transaction_cancel()
raise
else:
ctx.local_samdb.transaction_commit()
def send_DsReplicaUpdateRefs(ctx, dn):
r = drsuapi.DsReplicaUpdateRefsRequest1()
r.naming_context = drsuapi.DsReplicaObjectIdentifier()
r.naming_context.dn = str(dn)
r.naming_context.guid = misc.GUID("00000000-0000-0000-0000-000000000000")
r.naming_context.sid = security.dom_sid("S-0-0")
r.dest_dsa_guid = ctx.ntds_guid
r.dest_dsa_dns_name = "%s._msdcs.%s" % (str(ctx.ntds_guid), ctx.dnsforest)
r.options = drsuapi.DRSUAPI_DRS_ADD_REF | drsuapi.DRSUAPI_DRS_DEL_REF
if not ctx.RODC:
r.options |= drsuapi.DRSUAPI_DRS_WRIT_REP
if ctx.drsuapi:
ctx.drsuapi.DsReplicaUpdateRefs(ctx.drsuapi_handle, 1, r)
def join_finalise(ctx):
"""Finalise the join, mark us synchronised and setup secrets db."""
# FIXME we shouldn't do this in all cases
# If for some reasons we joined in another site than the one of
# DC we just replicated from then we don't need to send the updatereplicateref
# as replication between sites is time based and on the initiative of the
# requesting DC
ctx.logger.info("Sending DsReplicaUpdateRefs for all the replicated partitions")
for nc in ctx.nc_list:
ctx.send_DsReplicaUpdateRefs(nc)
if ctx.RODC:
print "Setting RODC invocationId"
ctx.local_samdb.set_invocation_id(str(ctx.invocation_id))
ctx.local_samdb.set_opaque_integer("domainFunctionality",
ctx.behavior_version)
m = ldb.Message()
m.dn = ldb.Dn(ctx.local_samdb, "%s" % ctx.ntds_dn)
m["invocationId"] = ldb.MessageElement(ndr_pack(ctx.invocation_id),
ldb.FLAG_MOD_REPLACE,
"invocationId")
ctx.local_samdb.modify(m)
# Note: as RODC the invocationId is only stored
# on the RODC itself, the other DCs never see it.
#
# Thats is why we fix up the replPropertyMetaData stamp
# for the 'invocationId' attribute, we need to change
# the 'version' to '0', this is what windows 2008r2 does as RODC
#
# This means if the object on a RWDC ever gets a invocationId
# attribute, it will have version '1' (or higher), which will
# will overwrite the RODC local value.
ctx.local_samdb.set_attribute_replmetadata_version(m.dn,
"invocationId",
0)
ctx.logger.info("Setting isSynchronized and dsServiceName")
m = ldb.Message()
m.dn = ldb.Dn(ctx.local_samdb, '@ROOTDSE')
m["isSynchronized"] = ldb.MessageElement("TRUE", ldb.FLAG_MOD_REPLACE, "isSynchronized")
m["dsServiceName"] = ldb.MessageElement("<GUID=%s>" % str(ctx.ntds_guid),
ldb.FLAG_MOD_REPLACE, "dsServiceName")
ctx.local_samdb.modify(m)
if ctx.subdomain:
return
secrets_ldb = Ldb(ctx.paths.secrets, session_info=system_session(), lp=ctx.lp)
ctx.logger.info("Setting up secrets database")
secretsdb_self_join(secrets_ldb, domain=ctx.domain_name,
realm=ctx.realm,
dnsdomain=ctx.dnsdomain,
netbiosname=ctx.myname,
domainsid=ctx.domsid,
machinepass=ctx.acct_pass,
secure_channel_type=ctx.secure_channel_type,
key_version_number=ctx.key_version_number)
if ctx.dns_backend.startswith("BIND9_"):
setup_bind9_dns(ctx.local_samdb, secrets_ldb,
ctx.names, ctx.paths, ctx.lp, ctx.logger,
dns_backend=ctx.dns_backend,
dnspass=ctx.dnspass, os_level=ctx.behavior_version,
targetdir=ctx.targetdir,
key_version_number=ctx.dns_key_version_number)
def join_setup_trusts(ctx):
"""provision the local SAM."""
print "Setup domain trusts with server %s" % ctx.server
binding_options = "" # why doesn't signing work here? w2k8r2 claims no session key
lsaconn = lsa.lsarpc("ncacn_np:%s[%s]" % (ctx.server, binding_options),
ctx.lp, ctx.creds)
objectAttr = lsa.ObjectAttribute()
objectAttr.sec_qos = lsa.QosInfo()
pol_handle = lsaconn.OpenPolicy2(''.decode('utf-8'),
objectAttr, security.SEC_FLAG_MAXIMUM_ALLOWED)
info = lsa.TrustDomainInfoInfoEx()
info.domain_name.string = ctx.dnsdomain
info.netbios_name.string = ctx.domain_name
info.sid = ctx.domsid
info.trust_direction = lsa.LSA_TRUST_DIRECTION_INBOUND | lsa.LSA_TRUST_DIRECTION_OUTBOUND
info.trust_type = lsa.LSA_TRUST_TYPE_UPLEVEL
info.trust_attributes = lsa.LSA_TRUST_ATTRIBUTE_WITHIN_FOREST
try:
oldname = lsa.String()
oldname.string = ctx.dnsdomain
oldinfo = lsaconn.QueryTrustedDomainInfoByName(pol_handle, oldname,
lsa.LSA_TRUSTED_DOMAIN_INFO_FULL_INFO)
print("Removing old trust record for %s (SID %s)" % (ctx.dnsdomain, oldinfo.info_ex.sid))
lsaconn.DeleteTrustedDomain(pol_handle, oldinfo.info_ex.sid)
except RuntimeError:
pass
password_blob = string_to_byte_array(ctx.trustdom_pass.encode('utf-16-le'))
clear_value = drsblobs.AuthInfoClear()
clear_value.size = len(password_blob)
clear_value.password = password_blob
clear_authentication_information = drsblobs.AuthenticationInformation()
clear_authentication_information.LastUpdateTime = samba.unix2nttime(int(time.time()))
clear_authentication_information.AuthType = lsa.TRUST_AUTH_TYPE_CLEAR
clear_authentication_information.AuthInfo = clear_value
authentication_information_array = drsblobs.AuthenticationInformationArray()
authentication_information_array.count = 1
authentication_information_array.array = [clear_authentication_information]
outgoing = drsblobs.trustAuthInOutBlob()
outgoing.count = 1
outgoing.current = authentication_information_array
trustpass = drsblobs.trustDomainPasswords()
confounder = [3] * 512
for i in range(512):
confounder[i] = random.randint(0, 255)
trustpass.confounder = confounder
trustpass.outgoing = outgoing
trustpass.incoming = outgoing
trustpass_blob = ndr_pack(trustpass)
encrypted_trustpass = arcfour_encrypt(lsaconn.session_key, trustpass_blob)
auth_blob = lsa.DATA_BUF2()
auth_blob.size = len(encrypted_trustpass)
auth_blob.data = string_to_byte_array(encrypted_trustpass)
auth_info = lsa.TrustDomainInfoAuthInfoInternal()
auth_info.auth_blob = auth_blob
trustdom_handle = lsaconn.CreateTrustedDomainEx2(pol_handle,
info,
auth_info,
security.SEC_STD_DELETE)
rec = {
"dn" : "cn=%s,cn=system,%s" % (ctx.dnsforest, ctx.base_dn),
"objectclass" : "trustedDomain",
"trustType" : str(info.trust_type),
"trustAttributes" : str(info.trust_attributes),
"trustDirection" : str(info.trust_direction),
"flatname" : ctx.forest_domain_name,
"trustPartner" : ctx.dnsforest,
"trustAuthIncoming" : ndr_pack(outgoing),
"trustAuthOutgoing" : ndr_pack(outgoing),
"securityIdentifier" : ndr_pack(ctx.forestsid)
}
ctx.local_samdb.add(rec)
rec = {
"dn" : "cn=%s$,cn=users,%s" % (ctx.forest_domain_name, ctx.base_dn),
"objectclass" : "user",
"userAccountControl" : str(samba.dsdb.UF_INTERDOMAIN_TRUST_ACCOUNT),
"clearTextPassword" : ctx.trustdom_pass.encode('utf-16-le'),
"samAccountName" : "%s$" % ctx.forest_domain_name
}
ctx.local_samdb.add(rec)
def do_join(ctx):
# nc_list is the list of naming context (NC) for which we will
# replicate in and send a updateRef command to the partner DC
# full_nc_list is the list of naming context (NC) we hold
# read/write copies of. These are not subsets of each other.
ctx.nc_list = [ ctx.config_dn, ctx.schema_dn ]
ctx.full_nc_list = [ ctx.base_dn, ctx.config_dn, ctx.schema_dn ]
if ctx.subdomain and ctx.dns_backend != "NONE":
ctx.full_nc_list += [ctx.domaindns_zone]
elif not ctx.subdomain:
ctx.nc_list += [ctx.base_dn]
if ctx.dns_backend != "NONE":
ctx.nc_list += [ctx.domaindns_zone]
ctx.nc_list += [ctx.forestdns_zone]
ctx.full_nc_list += [ctx.domaindns_zone]
ctx.full_nc_list += [ctx.forestdns_zone]
if ctx.promote_existing:
ctx.promote_possible()
else:
ctx.cleanup_old_join()
try:
ctx.join_add_objects()
ctx.join_provision()
ctx.join_replicate()
if ctx.subdomain:
ctx.join_add_objects2()
ctx.join_provision_own_domain()
ctx.join_setup_trusts()
ctx.join_finalise()
except:
print "Join failed - cleaning up"
ctx.cleanup_old_join()
raise
def join_RODC(logger=None, server=None, creds=None, lp=None, site=None, netbios_name=None,
targetdir=None, domain=None, domain_critical_only=False,
machinepass=None, use_ntvfs=False, dns_backend=None,
promote_existing=False):
"""Join as a RODC."""
ctx = dc_join(logger, server, creds, lp, site, netbios_name, targetdir, domain,
machinepass, use_ntvfs, dns_backend, promote_existing)
lp.set("workgroup", ctx.domain_name)
logger.info("workgroup is %s" % ctx.domain_name)
lp.set("realm", ctx.realm)
logger.info("realm is %s" % ctx.realm)
ctx.krbtgt_dn = "CN=krbtgt_%s,CN=Users,%s" % (ctx.myname, ctx.base_dn)
# setup some defaults for accounts that should be replicated to this RODC
ctx.never_reveal_sid = [
"<SID=%s-%s>" % (ctx.domsid, security.DOMAIN_RID_RODC_DENY),
"<SID=%s>" % security.SID_BUILTIN_ADMINISTRATORS,
"<SID=%s>" % security.SID_BUILTIN_SERVER_OPERATORS,
"<SID=%s>" % security.SID_BUILTIN_BACKUP_OPERATORS,
"<SID=%s>" % security.SID_BUILTIN_ACCOUNT_OPERATORS]
ctx.reveal_sid = "<SID=%s-%s>" % (ctx.domsid, security.DOMAIN_RID_RODC_ALLOW)
mysid = ctx.get_mysid()
admin_dn = "<SID=%s>" % mysid
ctx.managedby = admin_dn
ctx.userAccountControl = (samba.dsdb.UF_WORKSTATION_TRUST_ACCOUNT |
samba.dsdb.UF_TRUSTED_TO_AUTHENTICATE_FOR_DELEGATION |
samba.dsdb.UF_PARTIAL_SECRETS_ACCOUNT)
ctx.SPNs.extend([ "RestrictedKrbHost/%s" % ctx.myname,
"RestrictedKrbHost/%s" % ctx.dnshostname ])
ctx.connection_dn = "CN=RODC Connection (FRS),%s" % ctx.ntds_dn
ctx.secure_channel_type = misc.SEC_CHAN_RODC
ctx.RODC = True
ctx.replica_flags = (drsuapi.DRSUAPI_DRS_INIT_SYNC |
drsuapi.DRSUAPI_DRS_PER_SYNC |
drsuapi.DRSUAPI_DRS_GET_ANC |
drsuapi.DRSUAPI_DRS_NEVER_SYNCED |
drsuapi.DRSUAPI_DRS_SPECIAL_SECRET_PROCESSING |
drsuapi.DRSUAPI_DRS_GET_ALL_GROUP_MEMBERSHIP)
ctx.domain_replica_flags = ctx.replica_flags
if domain_critical_only:
ctx.domain_replica_flags |= drsuapi.DRSUAPI_DRS_CRITICAL_ONLY
ctx.do_join()
logger.info("Joined domain %s (SID %s) as an RODC" % (ctx.domain_name, ctx.domsid))
def join_DC(logger=None, server=None, creds=None, lp=None, site=None, netbios_name=None,
targetdir=None, domain=None, domain_critical_only=False,
machinepass=None, use_ntvfs=False, dns_backend=None,
promote_existing=False):
"""Join as a DC."""
ctx = dc_join(logger, server, creds, lp, site, netbios_name, targetdir, domain,
machinepass, use_ntvfs, dns_backend, promote_existing)
lp.set("workgroup", ctx.domain_name)
logger.info("workgroup is %s" % ctx.domain_name)
lp.set("realm", ctx.realm)
logger.info("realm is %s" % ctx.realm)
ctx.userAccountControl = samba.dsdb.UF_SERVER_TRUST_ACCOUNT | samba.dsdb.UF_TRUSTED_FOR_DELEGATION
ctx.SPNs.append('E3514235-4B06-11D1-AB04-00C04FC2DCD2/$NTDSGUID/%s' % ctx.dnsdomain)
ctx.secure_channel_type = misc.SEC_CHAN_BDC
ctx.replica_flags = (drsuapi.DRSUAPI_DRS_WRIT_REP |
drsuapi.DRSUAPI_DRS_INIT_SYNC |
drsuapi.DRSUAPI_DRS_PER_SYNC |
drsuapi.DRSUAPI_DRS_FULL_SYNC_IN_PROGRESS |
drsuapi.DRSUAPI_DRS_NEVER_SYNCED)
ctx.domain_replica_flags = ctx.replica_flags
if domain_critical_only:
ctx.domain_replica_flags |= drsuapi.DRSUAPI_DRS_CRITICAL_ONLY
ctx.do_join()
logger.info("Joined domain %s (SID %s) as a DC" % (ctx.domain_name, ctx.domsid))
def join_subdomain(logger=None, server=None, creds=None, lp=None, site=None,
netbios_name=None, targetdir=None, parent_domain=None, dnsdomain=None,
netbios_domain=None, machinepass=None, adminpass=None, use_ntvfs=False,
dns_backend=None):
"""Join as a DC."""
ctx = dc_join(logger, server, creds, lp, site, netbios_name, targetdir, parent_domain,
machinepass, use_ntvfs, dns_backend)
ctx.subdomain = True
if adminpass is None:
ctx.adminpass = samba.generate_random_password(12, 32)
else:
ctx.adminpass = adminpass
ctx.parent_domain_name = ctx.domain_name
ctx.domain_name = netbios_domain
ctx.realm = dnsdomain
ctx.parent_dnsdomain = ctx.dnsdomain
ctx.parent_partition_dn = ctx.get_parent_partition_dn()
ctx.dnsdomain = dnsdomain
ctx.partition_dn = "CN=%s,CN=Partitions,%s" % (ctx.domain_name, ctx.config_dn)
ctx.naming_master = ctx.get_naming_master()
if ctx.naming_master != ctx.server:
logger.info("Reconnecting to naming master %s" % ctx.naming_master)
ctx.server = ctx.naming_master
ctx.samdb = SamDB(url="ldap://%s" % ctx.server,
session_info=system_session(),
credentials=ctx.creds, lp=ctx.lp)
res = ctx.samdb.search(base="", scope=ldb.SCOPE_BASE, attrs=['dnsHostName'],
controls=[])
ctx.server = res[0]["dnsHostName"]
logger.info("DNS name of new naming master is %s" % ctx.server)
ctx.base_dn = samba.dn_from_dns_name(dnsdomain)
ctx.forestsid = ctx.domsid
ctx.domsid = security.random_sid()
ctx.acct_dn = None
ctx.dnshostname = "%s.%s" % (ctx.myname.lower(), ctx.dnsdomain)
ctx.trustdom_pass = samba.generate_random_password(128, 128)
ctx.userAccountControl = samba.dsdb.UF_SERVER_TRUST_ACCOUNT | samba.dsdb.UF_TRUSTED_FOR_DELEGATION
ctx.SPNs.append('E3514235-4B06-11D1-AB04-00C04FC2DCD2/$NTDSGUID/%s' % ctx.dnsdomain)
ctx.secure_channel_type = misc.SEC_CHAN_BDC
ctx.replica_flags = (drsuapi.DRSUAPI_DRS_WRIT_REP |
drsuapi.DRSUAPI_DRS_INIT_SYNC |
drsuapi.DRSUAPI_DRS_PER_SYNC |
drsuapi.DRSUAPI_DRS_FULL_SYNC_IN_PROGRESS |
drsuapi.DRSUAPI_DRS_NEVER_SYNCED)
ctx.domain_replica_flags = ctx.replica_flags
ctx.do_join()
ctx.logger.info("Created domain %s (SID %s) as a DC" % (ctx.domain_name, ctx.domsid))
| get_dnsHostName |
datatype.rs | // Licensed to the Apache Software Foundation (ASF) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The ASF licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
use std::fmt;
use serde_derive::{Deserialize, Serialize};
use serde_json::{json, Value, Value::String as VString};
use crate::error::{ArrowError, Result};
use super::Field;
/// The set of datatypes that are supported by this implementation of Apache Arrow.
///
/// The Arrow specification on data types includes some more types.
/// See also [`Schema.fbs`](https://github.com/apache/arrow/blob/master/format/Schema.fbs)
/// for Arrow's specification.
///
/// The variants of this enum include primitive fixed size types as well as parametric or
/// nested types.
/// Currently the Rust implementation supports the following nested types:
/// - `List<T>`
/// - `Struct<T, U, V, ...>`
///
/// Nested types can themselves be nested within other arrays.
/// For more information on these types please see
/// [the physical memory layout of Apache Arrow](https://arrow.apache.org/docs/format/Columnar.html#physical-memory-layout).
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub enum DataType {
/// Null type
Null,
/// A boolean datatype representing the values `true` and `false`.
Boolean,
/// A signed 8-bit integer.
Int8,
/// A signed 16-bit integer.
Int16,
/// A signed 32-bit integer.
Int32,
/// A signed 64-bit integer.
Int64,
/// An unsigned 8-bit integer.
UInt8,
/// An unsigned 16-bit integer.
UInt16,
/// An unsigned 32-bit integer.
UInt32,
/// An unsigned 64-bit integer.
UInt64,
/// A 16-bit floating point number.
Float16,
/// A 32-bit floating point number.
Float32,
/// A 64-bit floating point number.
Float64,
/// A timestamp with an optional timezone.
///
/// Time is measured as a Unix epoch, counting the seconds from
/// 00:00:00.000 on 1 January 1970, excluding leap seconds,
/// as a 64-bit integer.
///
/// The time zone is a string indicating the name of a time zone, one of:
///
/// * As used in the Olson time zone database (the "tz database" or
/// "tzdata"), such as "America/New_York"
/// * An absolute time zone offset of the form +XX:XX or -XX:XX, such as +07:30
Timestamp(TimeUnit, Option<String>),
/// A 32-bit date representing the elapsed time since UNIX epoch (1970-01-01)
/// in days (32 bits).
Date32,
/// A 64-bit date representing the elapsed time since UNIX epoch (1970-01-01)
/// in milliseconds (64 bits). Values are evenly divisible by 86400000.
Date64,
/// A 32-bit time representing the elapsed time since midnight in the unit of `TimeUnit`.
Time32(TimeUnit),
/// A 64-bit time representing the elapsed time since midnight in the unit of `TimeUnit`.
Time64(TimeUnit),
/// Measure of elapsed time in either seconds, milliseconds, microseconds or nanoseconds.
Duration(TimeUnit),
/// A "calendar" interval which models types that don't necessarily
/// have a precise duration without the context of a base timestamp (e.g.
/// days can differ in length during day light savings time transitions).
Interval(IntervalUnit),
/// Opaque binary data of variable length.
Binary,
/// Opaque binary data of fixed size.
/// Enum parameter specifies the number of bytes per value.
FixedSizeBinary(i32),
/// Opaque binary data of variable length and 64-bit offsets.
LargeBinary,
/// A variable-length string in Unicode with UTF-8 encoding.
Utf8,
/// A variable-length string in Unicode with UFT-8 encoding and 64-bit offsets.
LargeUtf8,
/// A list of some logical data type with variable length.
List(Box<Field>),
/// A list of some logical data type with fixed length.
FixedSizeList(Box<Field>, i32),
/// A list of some logical data type with variable length and 64-bit offsets.
LargeList(Box<Field>),
/// A nested datatype that contains a number of sub-fields.
Struct(Vec<Field>),
/// A nested datatype that can represent slots of differing types.
Union(Vec<Field>),
/// A dictionary encoded array (`key_type`, `value_type`), where
/// each array element is an index of `key_type` into an
/// associated dictionary of `value_type`.
///
/// Dictionary arrays are used to store columns of `value_type`
/// that contain many repeated values using less memory, but with
/// a higher CPU overhead for some operations.
///
/// This type mostly used to represent low cardinality string
/// arrays or a limited set of primitive types as integers.
Dictionary(Box<DataType>, Box<DataType>),
/// Decimal value with precision and scale
Decimal(usize, usize),
/// Int64 mapped decimal
Int64Decimal(usize),
}
/// An absolute length of time in seconds, milliseconds, microseconds or nanoseconds.
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub enum TimeUnit {
/// Time in seconds.
Second,
/// Time in milliseconds.
Millisecond,
/// Time in microseconds.
Microsecond,
/// Time in nanoseconds.
Nanosecond,
}
/// YEAR_MONTH or DAY_TIME interval in SQL style.
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Hash, PartialOrd, Ord)]
pub enum IntervalUnit {
/// Indicates the number of elapsed whole months, stored as 4-byte integers.
YearMonth,
/// Indicates the number of elapsed days and milliseconds,
/// stored as 2 contiguous 32-bit integers (days, milliseconds) (8-bytes in total).
DayTime,
}
impl fmt::Display for DataType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}", self)
}
}
impl DataType {
/// Parse a data type from a JSON representation.
pub(crate) fn from(json: &Value) -> Result<DataType> {
let default_field = Field::new("", DataType::Boolean, true);
match *json {
Value::Object(ref map) => match map.get("name") {
Some(s) if s == "null" => Ok(DataType::Null),
Some(s) if s == "bool" => Ok(DataType::Boolean),
Some(s) if s == "binary" => Ok(DataType::Binary),
Some(s) if s == "largebinary" => Ok(DataType::LargeBinary),
Some(s) if s == "utf8" => Ok(DataType::Utf8),
Some(s) if s == "largeutf8" => Ok(DataType::LargeUtf8),
Some(s) if s == "fixedsizebinary" => {
// return a list with any type as its child isn't defined in the map
if let Some(Value::Number(size)) = map.get("byteWidth") {
Ok(DataType::FixedSizeBinary(size.as_i64().unwrap() as i32))
} else {
Err(ArrowError::ParseError(
"Expecting a byteWidth for fixedsizebinary".to_string(),
))
}
}
Some(s) if s == "decimal" => {
// return a list with any type as its child isn't defined in the map
let precision = match map.get("precision") {
Some(p) => Ok(p.as_u64().unwrap() as usize),
None => Err(ArrowError::ParseError(
"Expecting a precision for decimal".to_string(),
)),
};
let scale = match map.get("scale") {
Some(s) => Ok(s.as_u64().unwrap() as usize),
_ => Err(ArrowError::ParseError(
"Expecting a scale for decimal".to_string(),
)),
};
Ok(DataType::Decimal(precision?, scale?))
}
Some(s) if s == "floatingpoint" => match map.get("precision") {
Some(p) if p == "HALF" => Ok(DataType::Float16),
Some(p) if p == "SINGLE" => Ok(DataType::Float32),
Some(p) if p == "DOUBLE" => Ok(DataType::Float64),
_ => Err(ArrowError::ParseError(
"floatingpoint precision missing or invalid".to_string(),
)),
},
Some(s) if s == "timestamp" => {
let unit = match map.get("unit") {
Some(p) if p == "SECOND" => Ok(TimeUnit::Second),
Some(p) if p == "MILLISECOND" => Ok(TimeUnit::Millisecond),
Some(p) if p == "MICROSECOND" => Ok(TimeUnit::Microsecond),
Some(p) if p == "NANOSECOND" => Ok(TimeUnit::Nanosecond),
_ => Err(ArrowError::ParseError(
"timestamp unit missing or invalid".to_string(),
)),
};
let tz = match map.get("timezone") {
None => Ok(None),
Some(VString(tz)) => Ok(Some(tz.clone())),
_ => Err(ArrowError::ParseError(
"timezone must be a string".to_string(),
)),
};
Ok(DataType::Timestamp(unit?, tz?))
}
Some(s) if s == "date" => match map.get("unit") {
Some(p) if p == "DAY" => Ok(DataType::Date32),
Some(p) if p == "MILLISECOND" => Ok(DataType::Date64),
_ => Err(ArrowError::ParseError(
"date unit missing or invalid".to_string(),
)),
},
Some(s) if s == "time" => {
let unit = match map.get("unit") {
Some(p) if p == "SECOND" => Ok(TimeUnit::Second),
Some(p) if p == "MILLISECOND" => Ok(TimeUnit::Millisecond),
Some(p) if p == "MICROSECOND" => Ok(TimeUnit::Microsecond),
Some(p) if p == "NANOSECOND" => Ok(TimeUnit::Nanosecond),
_ => Err(ArrowError::ParseError(
"time unit missing or invalid".to_string(),
)),
};
match map.get("bitWidth") {
Some(p) if p == 32 => Ok(DataType::Time32(unit?)),
Some(p) if p == 64 => Ok(DataType::Time64(unit?)),
_ => Err(ArrowError::ParseError(
"time bitWidth missing or invalid".to_string(),
)),
}
}
Some(s) if s == "duration" => match map.get("unit") {
Some(p) if p == "SECOND" => Ok(DataType::Duration(TimeUnit::Second)),
Some(p) if p == "MILLISECOND" => {
Ok(DataType::Duration(TimeUnit::Millisecond))
}
Some(p) if p == "MICROSECOND" => {
Ok(DataType::Duration(TimeUnit::Microsecond))
}
Some(p) if p == "NANOSECOND" => {
Ok(DataType::Duration(TimeUnit::Nanosecond))
}
_ => Err(ArrowError::ParseError(
"time unit missing or invalid".to_string(),
)),
},
Some(s) if s == "interval" => match map.get("unit") {
Some(p) if p == "DAY_TIME" => {
Ok(DataType::Interval(IntervalUnit::DayTime))
}
Some(p) if p == "YEAR_MONTH" => {
Ok(DataType::Interval(IntervalUnit::YearMonth))
}
_ => Err(ArrowError::ParseError(
"interval unit missing or invalid".to_string(),
)),
},
Some(s) if s == "decimalint" => match map.get("isSigned") {
Some(&Value::Bool(true)) => match map.get("bitWidth") {
Some(&Value::Number(ref n)) => match n.as_u64() {
Some(64) => match map.get("scale") {
Some(&Value::Number(ref scale)) => match scale.as_u64() {
Some(scale) => {
Ok(DataType::Int64Decimal(scale as usize))
}
_ => Err(ArrowError::ParseError(
"decimalint scale invalid".to_string(),
)),
},
_ => Err(ArrowError::ParseError(
"decimalint scale missing".to_string(),
)),
},
_ => Err(ArrowError::ParseError(
"decimalint bitWidth missing or invalid".to_string(),
)),
},
_ => Err(ArrowError::ParseError(
"decimalint bitWidth missing or invalid".to_string(),
)),
},
_ => Err(ArrowError::ParseError(
"decimalint signed missing or invalid".to_string(),
)),
},
Some(s) if s == "int" => match map.get("isSigned") {
Some(&Value::Bool(true)) => match map.get("bitWidth") {
Some(&Value::Number(ref n)) => match n.as_u64() {
Some(8) => Ok(DataType::Int8),
Some(16) => Ok(DataType::Int16),
Some(32) => Ok(DataType::Int32),
Some(64) => Ok(DataType::Int64),
_ => Err(ArrowError::ParseError(
"int bitWidth missing or invalid".to_string(),
)),
},
_ => Err(ArrowError::ParseError(
"int bitWidth missing or invalid".to_string(),
)),
},
Some(&Value::Bool(false)) => match map.get("bitWidth") {
Some(&Value::Number(ref n)) => match n.as_u64() {
Some(8) => Ok(DataType::UInt8),
Some(16) => Ok(DataType::UInt16),
Some(32) => Ok(DataType::UInt32),
Some(64) => Ok(DataType::UInt64),
_ => Err(ArrowError::ParseError(
"int bitWidth missing or invalid".to_string(),
)),
},
_ => Err(ArrowError::ParseError(
"int bitWidth missing or invalid".to_string(),
)),
},
_ => Err(ArrowError::ParseError(
"int signed missing or invalid".to_string(),
)),
},
Some(s) if s == "list" => {
// return a list with any type as its child isn't defined in the map
Ok(DataType::List(Box::new(default_field)))
}
Some(s) if s == "largelist" => {
// return a largelist with any type as its child isn't defined in the map
Ok(DataType::LargeList(Box::new(default_field)))
}
Some(s) if s == "fixedsizelist" => {
// return a list with any type as its child isn't defined in the map
if let Some(Value::Number(size)) = map.get("listSize") {
Ok(DataType::FixedSizeList(
Box::new(default_field),
size.as_i64().unwrap() as i32,
))
} else {
Err(ArrowError::ParseError(
"Expecting a listSize for fixedsizelist".to_string(),
))
}
}
Some(s) if s == "struct" => {
// return an empty `struct` type as its children aren't defined in the map
Ok(DataType::Struct(vec![]))
}
Some(other) => Err(ArrowError::ParseError(format!(
"invalid or unsupported type name: {} in {:?}",
other, json
))),
None => Err(ArrowError::ParseError("type name missing".to_string())),
},
_ => Err(ArrowError::ParseError(
"invalid json value type".to_string(),
)),
}
}
/// Generate a JSON representation of the data type.
pub fn to_json(&self) -> Value {
match self {
DataType::Null => json!({"name": "null"}),
DataType::Boolean => json!({"name": "bool"}),
DataType::Int8 => json!({"name": "int", "bitWidth": 8, "isSigned": true}),
DataType::Int16 => json!({"name": "int", "bitWidth": 16, "isSigned": true}),
DataType::Int32 => json!({"name": "int", "bitWidth": 32, "isSigned": true}),
DataType::Int64 => json!({"name": "int", "bitWidth": 64, "isSigned": true}),
DataType::Int64Decimal(scale) => {
json!({"name": "intdecimal", "bitWidth": 64, "isSigned": true, "scale": scale})
}
DataType::UInt8 => json!({"name": "int", "bitWidth": 8, "isSigned": false}),
DataType::UInt16 => json!({"name": "int", "bitWidth": 16, "isSigned": false}),
DataType::UInt32 => json!({"name": "int", "bitWidth": 32, "isSigned": false}),
DataType::UInt64 => json!({"name": "int", "bitWidth": 64, "isSigned": false}),
DataType::Float16 => json!({"name": "floatingpoint", "precision": "HALF"}),
DataType::Float32 => json!({"name": "floatingpoint", "precision": "SINGLE"}),
DataType::Float64 => json!({"name": "floatingpoint", "precision": "DOUBLE"}),
DataType::Utf8 => json!({"name": "utf8"}),
DataType::LargeUtf8 => json!({"name": "largeutf8"}),
DataType::Binary => json!({"name": "binary"}),
DataType::LargeBinary => json!({"name": "largebinary"}),
DataType::FixedSizeBinary(byte_width) => {
json!({"name": "fixedsizebinary", "byteWidth": byte_width})
}
DataType::Struct(_) => json!({"name": "struct"}),
DataType::Union(_) => json!({"name": "union"}),
DataType::List(_) => json!({ "name": "list"}),
DataType::LargeList(_) => json!({ "name": "largelist"}),
DataType::FixedSizeList(_, length) => {
json!({"name":"fixedsizelist", "listSize": length})
}
DataType::Time32(unit) => {
json!({"name": "time", "bitWidth": 32, "unit": match unit {
TimeUnit::Second => "SECOND",
TimeUnit::Millisecond => "MILLISECOND",
TimeUnit::Microsecond => "MICROSECOND",
TimeUnit::Nanosecond => "NANOSECOND",
}})
}
DataType::Time64(unit) => {
json!({"name": "time", "bitWidth": 64, "unit": match unit {
TimeUnit::Second => "SECOND",
TimeUnit::Millisecond => "MILLISECOND",
TimeUnit::Microsecond => "MICROSECOND",
TimeUnit::Nanosecond => "NANOSECOND",
}})
}
DataType::Date32 => {
json!({"name": "date", "unit": "DAY"})
}
DataType::Date64 => {
json!({"name": "date", "unit": "MILLISECOND"})
}
DataType::Timestamp(unit, None) => {
json!({"name": "timestamp", "unit": match unit {
TimeUnit::Second => "SECOND",
TimeUnit::Millisecond => "MILLISECOND",
TimeUnit::Microsecond => "MICROSECOND",
TimeUnit::Nanosecond => "NANOSECOND",
}})
}
DataType::Timestamp(unit, Some(tz)) => {
json!({"name": "timestamp", "unit": match unit {
TimeUnit::Second => "SECOND",
TimeUnit::Millisecond => "MILLISECOND",
TimeUnit::Microsecond => "MICROSECOND",
TimeUnit::Nanosecond => "NANOSECOND",
}, "timezone": tz})
}
DataType::Interval(unit) => json!({"name": "interval", "unit": match unit {
IntervalUnit::YearMonth => "YEAR_MONTH",
IntervalUnit::DayTime => "DAY_TIME",
}}),
DataType::Duration(unit) => json!({"name": "duration", "unit": match unit {
TimeUnit::Second => "SECOND",
TimeUnit::Millisecond => "MILLISECOND",
TimeUnit::Microsecond => "MICROSECOND",
TimeUnit::Nanosecond => "NANOSECOND",
}}),
DataType::Dictionary(_, _) => json!({ "name": "dictionary"}),
DataType::Decimal(precision, scale) => {
json!({"name": "decimal", "precision": precision, "scale": scale})
}
}
}
/// Returns true if this type is numeric: (UInt*, Unit*, or Float*).
pub fn | (t: &DataType) -> bool {
use DataType::*;
matches!(
t,
UInt8
| UInt16
| UInt32
| UInt64
| Int8
| Int16
| Int32
| Int64
| Float32
| Float64
| Int64Decimal(_)
)
}
/// Compares the datatype with another, ignoring nested field names
/// and metadata.
pub(crate) fn equals_datatype(&self, other: &DataType) -> bool {
match (&self, other) {
(DataType::List(a), DataType::List(b))
| (DataType::LargeList(a), DataType::LargeList(b)) => {
a.is_nullable() == b.is_nullable()
&& a.data_type().equals_datatype(b.data_type())
}
(DataType::FixedSizeList(a, a_size), DataType::FixedSizeList(b, b_size)) => {
a_size == b_size
&& a.is_nullable() == b.is_nullable()
&& a.data_type().equals_datatype(b.data_type())
}
(DataType::Struct(a), DataType::Struct(b)) => {
a.len() == b.len()
&& a.iter().zip(b).all(|(a, b)| {
a.is_nullable() == b.is_nullable()
&& a.data_type().equals_datatype(b.data_type())
})
}
_ => self == other,
}
}
}
| is_numeric |
encoder_test.go | package csvutil
import (
"bytes"
"encoding"
"encoding/csv"
"encoding/json"
"errors"
"math"
"reflect"
"testing"
)
var Error = errors.New("error")
var nilIface interface{}
var nilPtr *TypeF
var nilIfacePtr interface{} = nilPtr
type embeddedMap map[string]string
type Embedded14 Embedded3
func (e *Embedded14) MarshalCSV() ([]byte, error) {
return json.Marshal(e)
}
type Embedded15 Embedded3
func (e *Embedded15) MarshalText() ([]byte, error) {
return json.Marshal(Embedded3(*e))
}
type CSVMarshaler struct {
Err error
}
func (m CSVMarshaler) MarshalCSV() ([]byte, error) {
if m.Err != nil {
return nil, m.Err
}
return []byte("csvmarshaler"), nil
}
type PtrRecCSVMarshaler int
func (m *PtrRecCSVMarshaler) MarshalCSV() ([]byte, error) {
return []byte("ptrreccsvmarshaler"), nil
}
func (m *PtrRecCSVMarshaler) CSV() ([]byte, error) {
return []byte("ptrreccsvmarshaler.CSV"), nil
}
type PtrRecTextMarshaler int
func (m *PtrRecTextMarshaler) MarshalText() ([]byte, error) {
return []byte("ptrrectextmarshaler"), nil
}
type TextMarshaler struct {
Err error
}
func (m TextMarshaler) MarshalText() ([]byte, error) {
if m.Err != nil {
return nil, m.Err
}
return []byte("textmarshaler"), nil
}
type CSVTextMarshaler struct {
CSVMarshaler
TextMarshaler
}
type Inline struct {
J1 TypeJ `csv:",inline"`
J2 TypeJ `csv:"prefix-,inline"`
String string `csv:"top-string"`
String2 string `csv:"STR"`
}
type Inline2 struct {
S string
A Inline3 `csv:"A,inline"`
B Inline3 `csv:",inline"`
}
type Inline3 struct {
Inline4 `csv:",inline"`
}
type Inline4 struct {
A string
}
type Inline5 struct {
A Inline2 `csv:"A,inline"`
B Inline2 `csv:",inline"`
}
type Inline6 struct {
A Inline7 `csv:",inline"`
}
type Inline7 struct {
A *Inline6 `csv:",inline"`
X int
}
type Inline8 struct {
F *Inline4 `csv:"A,inline"`
AA int
}
type TypeH struct {
Int int `csv:"int,omitempty"`
Int8 int8 `csv:"int8,omitempty"`
Int16 int16 `csv:"int16,omitempty"`
Int32 int32 `csv:"int32,omitempty"`
Int64 int64 `csv:"int64,omitempty"`
UInt uint `csv:"uint,omitempty"`
Uint8 uint8 `csv:"uint8,omitempty"`
Uint16 uint16 `csv:"uint16,omitempty"`
Uint32 uint32 `csv:"uint32,omitempty"`
Uint64 uint64 `csv:"uint64,omitempty"`
Float32 float32 `csv:"float32,omitempty"`
Float64 float64 `csv:"float64,omitempty"`
String string `csv:"string,omitempty"`
Bool bool `csv:"bool,omitempty"`
V interface{} `csv:"interface,omitempty"`
}
type TypeM struct {
*TextMarshaler `csv:"text"`
}
func TestEncoder(t *testing.T) {
fixtures := []struct {
desc string
in []interface{}
regFunc []interface{}
out [][]string
err error
}{
{
desc: "test all types",
in: []interface{}{
TypeF{
Int: 1,
Pint: pint(2),
Int8: 3,
Pint8: pint8(4),
Int16: 5,
Pint16: pint16(6),
Int32: 7,
Pint32: pint32(8),
Int64: 9,
Pint64: pint64(10),
UInt: 11,
Puint: puint(12),
Uint8: 13,
Puint8: puint8(14),
Uint16: 15,
Puint16: puint16(16),
Uint32: 17,
Puint32: puint32(18),
Uint64: 19,
Puint64: puint64(20),
Float32: 21,
Pfloat32: pfloat32(22),
Float64: 23,
Pfloat64: pfloat64(24),
String: "25",
PString: pstring("26"),
Bool: true,
Pbool: pbool(true),
V: "true",
Pv: pinterface("1"),
Binary: Binary,
PBinary: &BinaryLarge,
},
TypeF{},
},
out: [][]string{
{
"int", "pint", "int8", "pint8", "int16", "pint16", "int32",
"pint32", "int64", "pint64", "uint", "puint", "uint8", "puint8",
"uint16", "puint16", "uint32", "puint32", "uint64", "puint64",
"float32", "pfloat32", "float64", "pfloat64", "string", "pstring",
"bool", "pbool", "interface", "pinterface", "binary", "pbinary",
},
{"1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11",
"12", "13", "14", "15", "16", "17", "18", "19", "20", "21",
"22", "23", "24", "25", "26", "true", "true", "true", "1",
EncodedBinary, EncodedBinaryLarge,
},
{"0", "", "0", "", "0", "", "0", "", "0", "", "0", "",
"0", "", "0", "", "0", "", "0", "", "0", "", "0", "", "", "",
"false", "", "", "", "", "",
},
},
},
{
desc: "tags and unexported fields",
in: []interface{}{
TypeG{
String: "string",
Int: 1,
Float: 3.14,
unexported1: 100,
unexported2: 200,
},
},
out: [][]string{
{"String", "Int"},
{"string", "1"},
},
},
{
desc: "omitempty tags",
in: []interface{}{
TypeH{},
},
out: [][]string{
{"int", "int8", "int16", "int32", "int64", "uint", "uint8", "uint16",
"uint32", "uint64", "float32", "float64", "string", "bool", "interface",
},
{"", "", "", "", "", "", "", "", "", "", "", "", "", "", ""},
},
},
{
desc: "omitempty tags on pointers - non nil default values",
in: []interface{}{
struct {
Pint *int `csv:",omitempty"`
PPint **int `csv:",omitempty"`
PPint2 **int `csv:",omitempty"`
PString *string `csv:",omitempty"`
PBool *bool `csv:",omitempty"`
Iint *interface{} `csv:",omitempty"`
}{
pint(0),
ppint(0),
new(*int),
pstring(""),
pbool(false),
pinterface(0),
},
},
out: [][]string{
{"Pint", "PPint", "PPint2", "PString", "PBool", "Iint"},
{"0", "0", "", "", "false", "0"},
},
},
{
desc: "omitempty tags on pointers - nil ptrs",
in: []interface{}{
struct {
Pint *int `csv:",omitempty"`
PPint **int `csv:",omitempty"`
PString *string `csv:",omitempty"`
PBool *bool `csv:",omitempty"`
Iint *interface{} `csv:",omitempty"`
}{},
},
out: [][]string{
{"Pint", "PPint", "PString", "PBool", "Iint"},
{"", "", "", "", ""},
},
},
{
desc: "omitempty tags on interfaces - non nil default values",
in: []interface{}{
struct {
Iint interface{} `csv:",omitempty"`
IPint interface{} `csv:",omitempty"`
}{
0,
pint(0),
},
struct {
Iint interface{} `csv:",omitempty"`
IPint interface{} `csv:",omitempty"`
}{
1,
pint(1),
},
},
out: [][]string{
{"Iint", "IPint"},
{"0", "0"},
{"1", "1"},
},
},
{
desc: "omitempty tags on interfaces - nil",
in: []interface{}{
struct {
Iint interface{} `csv:",omitempty"`
IPint interface{} `csv:",omitempty"`
}{
nil,
nil,
},
struct {
Iint interface{} `csv:",omitempty"`
IPint interface{} `csv:",omitempty"`
}{
(*int)(nil),
pinterface((*int)(nil)),
},
},
out: [][]string{
{"Iint", "IPint"},
{"", ""},
{"", ""},
},
},
{
desc: "embedded types #1",
in: []interface{}{
TypeA{
Embedded1: Embedded1{
String: "string1",
Float: 1,
},
String: "string",
Embedded2: Embedded2{
Float: 2,
Bool: true,
},
Int: 10,
},
},
out: [][]string{
{"string", "bool", "int"},
{"string", "true", "10"},
},
},
{
desc: "embedded non struct tagged types",
in: []interface{}{
TypeB{
Embedded3: Embedded3{"key": "val"},
String: "string1",
},
},
out: [][]string{
{"json", "string"},
{`{"key":"val"}`, "string1"},
},
},
{
desc: "embedded non struct tagged types with pointer receiver MarshalCSV",
in: []interface{}{
&struct {
Embedded14 `csv:"json"`
A Embedded14 `csv:"json2"`
}{
Embedded14: Embedded14{"key": "val"},
A: Embedded14{"key1": "val1"},
},
struct {
*Embedded14 `csv:"json"`
A *Embedded14 `csv:"json2"`
}{
Embedded14: &Embedded14{"key": "val"},
A: &Embedded14{"key1": "val1"},
},
},
out: [][]string{
{"json", "json2"},
{`{"key":"val"}`, `{"key1":"val1"}`},
{`{"key":"val"}`, `{"key1":"val1"}`},
},
},
{
desc: "embedded non struct tagged types with pointer receiver MarshalText",
in: []interface{}{
&struct {
Embedded15 `csv:"json"`
A Embedded15 `csv:"json2"`
}{
Embedded15: Embedded15{"key": "val"},
A: Embedded15{"key1": "val1"},
},
struct {
*Embedded15 `csv:"json"`
A *Embedded15 `csv:"json2"`
}{
Embedded15: &Embedded15{"key": "val"},
A: &Embedded15{"key1": "val1"},
},
},
out: [][]string{
{"json", "json2"},
{`{"key":"val"}`, `{"key1":"val1"}`},
{`{"key":"val"}`, `{"key1":"val1"}`},
},
},
{
desc: "embedded pointer types",
in: []interface{}{
TypeC{
Embedded1: &Embedded1{
String: "string2",
Float: 1,
},
String: "string1",
},
},
out: [][]string{
{"float", "string"},
{`1`, "string1"},
},
},
{
desc: "embedded pointer types with nil values",
in: []interface{}{
TypeC{
Embedded1: nil,
String: "string1",
},
},
out: [][]string{
{"float", "string"},
{``, "string1"},
},
},
{
desc: "embedded non struct tagged pointer types",
in: []interface{}{
TypeD{
Embedded3: &Embedded3{"key": "val"},
String: "string1",
},
},
out: [][]string{
{"json", "string"},
{`{"key":"val"}`, "string1"},
},
},
{
desc: "embedded non struct tagged pointer types with nil value - textmarshaler",
in: []interface{}{
TypeM{
TextMarshaler: nil,
},
},
out: [][]string{
{"text"},
{""},
},
},
{
desc: "embedded non struct tagged pointer types with nil value - csvmarshaler",
in: []interface{}{
TypeD{
Embedded3: nil,
String: "string1",
},
},
out: [][]string{
{"json", "string"},
{"", "string1"},
},
},
{
desc: "tagged fields priority",
in: []interface{}{
TagPriority{Foo: 1, Bar: 2},
},
out: [][]string{
{"Foo"},
{"2"},
},
},
{
desc: "conflicting embedded fields #1",
in: []interface{}{
Embedded5{
Embedded6: Embedded6{X: 60},
Embedded7: Embedded7{X: 70},
Embedded8: Embedded8{
Embedded9: Embedded9{
X: 90,
Y: 91,
},
},
},
},
out: [][]string{
{"Y"},
{"91"},
},
},
{
desc: "conflicting embedded fields #2",
in: []interface{}{
Embedded10{
Embedded11: Embedded11{
Embedded6: Embedded6{X: 60},
},
Embedded12: Embedded12{
Embedded6: Embedded6{X: 60},
},
Embedded13: Embedded13{
Embedded8: Embedded8{
Embedded9: Embedded9{
X: 90,
Y: 91,
},
},
},
},
},
out: [][]string{
{"Y"},
{"91"},
},
},
{
desc: "double pointer",
in: []interface{}{
TypeE{
String: &PString,
Int: &Int,
},
},
out: [][]string{
{"string", "int"},
{"string", "10"},
},
},
{
desc: "nil double pointer",
in: []interface{}{
TypeE{},
},
out: [][]string{
{"string", "int"},
{"", ""},
},
},
{
desc: "unexported non-struct embedded",
in: []interface{}{
struct {
A int
embeddedMap
}{1, make(embeddedMap)},
},
out: [][]string{
{"A"},
{"1"},
},
},
{
desc: "cyclic reference",
in: []interface{}{
A{
B: B{Y: 2, A: &A{}},
X: 1,
},
},
out: [][]string{
{"Y", "X"},
{"2", "1"},
},
},
{
desc: "ptr receiver csv marshaler",
in: []interface{}{
&struct {
A PtrRecCSVMarshaler
}{},
struct {
A PtrRecCSVMarshaler
}{},
struct {
A *PtrRecCSVMarshaler
}{new(PtrRecCSVMarshaler)},
&struct {
A *PtrRecCSVMarshaler
}{new(PtrRecCSVMarshaler)},
&struct {
A *PtrRecCSVMarshaler
}{},
},
out: [][]string{
{"A"},
{"ptrreccsvmarshaler"},
{"0"},
{"ptrreccsvmarshaler"},
{"ptrreccsvmarshaler"},
{""},
},
},
{
desc: "ptr receiver text marshaler",
in: []interface{}{
&struct {
A PtrRecTextMarshaler
}{},
struct {
A PtrRecTextMarshaler
}{},
struct {
A *PtrRecTextMarshaler
}{new(PtrRecTextMarshaler)},
&struct {
A *PtrRecTextMarshaler
}{new(PtrRecTextMarshaler)},
&struct {
A *PtrRecTextMarshaler
}{},
},
out: [][]string{
{"A"},
{"ptrrectextmarshaler"},
{"0"},
{"ptrrectextmarshaler"},
{"ptrrectextmarshaler"},
{""},
},
},
{
desc: "text marshaler",
in: []interface{}{
struct {
A CSVMarshaler
}{},
struct {
A TextMarshaler
}{},
struct {
A struct {
TextMarshaler
CSVMarshaler
}
}{},
},
out: [][]string{
{"A"},
{"csvmarshaler"},
{"textmarshaler"},
{"csvmarshaler"},
},
},
{
desc: "primitive type alias implementing Marshaler",
in: []interface{}{
EnumType{Enum: EnumFirst},
EnumType{Enum: EnumSecond},
},
out: [][]string{
{"enum"},
{"first"},
{"second"},
},
},
{
desc: "aliased type",
in: []interface{}{
struct{ Float float64 }{3.14},
},
out: [][]string{
{"Float"},
{"3.14"},
},
},
{
desc: "embedded tagged marshalers",
in: []interface{}{
struct {
CSVMarshaler `csv:"csv"`
TextMarshaler `csv:"text"`
}{},
},
out: [][]string{
{"csv", "text"},
{"csvmarshaler", "textmarshaler"},
},
},
{
desc: "embedded pointer tagged marshalers",
in: []interface{}{
struct {
*CSVMarshaler `csv:"csv"`
*TextMarshaler `csv:"text"`
}{&CSVMarshaler{}, &TextMarshaler{}},
},
out: [][]string{
{"csv", "text"},
{"csvmarshaler", "textmarshaler"},
},
},
{
desc: "inline fields",
in: []interface{}{
Inline{
J1: TypeJ{
String: "j1",
Int: "1",
Float: "1",
Embedded16: Embedded16{Bool: true, Uint8: 1},
},
J2: TypeJ{
String: "j2",
Int: "2",
Float: "2",
Embedded16: Embedded16{Bool: true, Uint8: 2},
},
String: "top-level-str",
String2: "STR",
},
},
out: [][]string{
{"int", "Bool", "Uint8", "float", "prefix-STR", "prefix-int", "prefix-Bool", "prefix-Uint8", "prefix-float", "top-string", "STR"},
{"1", "true", "1", "1", "j2", "2", "true", "2", "2", "top-level-str", "STR"},
},
},
{
desc: "inline chain",
in: []interface{}{
Inline5{
A: Inline2{
S: "1",
A: Inline3{
Inline4: Inline4{A: "11"},
},
B: Inline3{
Inline4: Inline4{A: "12"},
},
},
B: Inline2{
S: "2",
A: Inline3{
Inline4: Inline4{A: "21"},
},
B: Inline3{
Inline4: Inline4{A: "22"},
},
},
},
},
out: [][]string{
{"AS", "AAA", "S", "A"},
{"1", "11", "2", "22"},
},
},
{
desc: "cyclic inline - no prefix",
in: []interface{}{
Inline6{
A: Inline7{
A: &Inline6{A: Inline7{
A: &Inline6{},
X: 10,
}},
X: 1,
},
},
},
out: [][]string{
{"X"},
{"1"},
},
},
{
desc: "embedded with inline tag",
in: []interface{}{
struct {
Inline7 `csv:"A,inline"`
}{
Inline7: Inline7{
A: &Inline6{A: Inline7{
A: &Inline6{},
X: 10,
}},
X: 1,
},
},
},
out: [][]string{
{"AX"},
{"1"},
},
},
{
desc: "embedded with empty inline tag",
in: []interface{}{
struct {
Inline7 `csv:",inline"`
}{
Inline7: Inline7{
A: &Inline6{A: Inline7{
A: &Inline6{},
X: 10,
}},
X: 1,
},
},
},
out: [][]string{
{"X"},
{"1"},
},
},
{
desc: "embedded with ptr inline tag",
in: []interface{}{
struct {
*Inline7 `csv:"A,inline"`
}{
Inline7: &Inline7{
A: &Inline6{A: Inline7{
A: &Inline6{},
X: 10,
}},
X: 1,
},
},
},
out: [][]string{
{"AX"},
{"1"},
},
},
{
desc: "inline visibility rules - top field first",
in: []interface{}{
struct {
AA string
F Inline4 `csv:"A,inline"`
}{
AA: "1",
F: Inline4{A: "10"},
},
},
out: [][]string{
{"AA"},
{"1"},
},
},
{
desc: "inline visibility rules - top field last",
in: []interface{}{
Inline8{
F: &Inline4{A: "10"},
AA: 1,
},
},
out: [][]string{
{"AA"},
{"1"},
},
},
{
desc: "ignore inline tag on non struct",
in: []interface{}{
struct {
X int `csv:",inline"`
Y int `csv:"y,inline"`
}{
X: 1,
Y: 2,
},
},
out: [][]string{
{"X", "y"},
{"1", "2"},
},
},
{
desc: "registered func - non ptr elem",
in: []interface{}{
struct {
Int int
Pint *int
Iface interface{}
Piface *interface{}
}{
Pint: pint(0),
Iface: 34,
Piface: pinterface(34),
},
},
regFunc: []interface{}{
func(int) ([]byte, error) { return []byte("int"), nil },
},
out: [][]string{
{"Int", "Pint", "Iface", "Piface"},
{"int", "int", "int", "int"},
},
},
{
desc: "registered func - ptr elem",
in: []interface{}{
&struct {
Int int
Pint *int
Iface interface{}
Piface *interface{}
}{
Pint: pint(0),
Iface: 34,
Piface: pinterface(34),
},
},
regFunc: []interface{}{
func(int) ([]byte, error) { return []byte("int"), nil },
},
out: [][]string{
{"Int", "Pint", "Iface", "Piface"},
{"int", "int", "int", "int"},
},
},
{
desc: "registered func - ptr type - non ptr elem",
in: []interface{}{
struct {
Int int
Pint *int
Iface interface{}
Piface *interface{}
}{
Pint: pint(0),
Iface: 34,
Piface: pinterface(pint(34)),
},
},
regFunc: []interface{}{
func(*int) ([]byte, error) { return []byte("int"), nil },
},
out: [][]string{
{"Int", "Pint", "Iface", "Piface"},
{"0", "int", "34", "int"},
},
},
{
desc: "registered func - ptr type - ptr elem",
in: []interface{}{
&struct {
Int int
Pint *int
Iface interface{}
Piface *interface{}
}{
Pint: pint(0),
Iface: 34,
Piface: pinterface(pint(34)),
},
},
regFunc: []interface{}{
func(*int) ([]byte, error) { return []byte("int"), nil },
},
out: [][]string{
{"Int", "Pint", "Iface", "Piface"},
{"int", "int", "34", "int"},
},
},
{
desc: "registered func - mixed types - non ptr elem",
in: []interface{}{
struct {
Int int
Pint *int
Iface interface{}
Piface *interface{}
}{
Pint: pint(0),
Iface: 34,
Piface: pinterface(pint(34)),
},
},
regFunc: []interface{}{
func(int) ([]byte, error) { return []byte("int"), nil },
func(*int) ([]byte, error) { return []byte("*int"), nil },
},
out: [][]string{
{"Int", "Pint", "Iface", "Piface"},
{"int", "*int", "int", "*int"},
},
},
{
desc: "registered func - mixed types - ptr elem",
in: []interface{}{
&struct {
Int int
Pint *int
Iface interface{}
Piface *interface{}
}{
Pint: pint(0),
Iface: 34,
Piface: pinterface(pint(34)),
},
},
regFunc: []interface{}{
func(int) ([]byte, error) { return []byte("int"), nil },
func(*int) ([]byte, error) { return []byte("*int"), nil },
},
out: [][]string{
{"Int", "Pint", "Iface", "Piface"},
{"int", "*int", "int", "*int"},
},
},
{
desc: "registered func - interfaces",
in: []interface{}{
&struct {
CSVMarshaler Marshaler
Marshaler CSVMarshaler
PMarshaler *CSVMarshaler
CSVTextMarshaler CSVTextMarshaler
PCSVTextMarshaler *CSVTextMarshaler
PtrRecCSVMarshaler PtrRecCSVMarshaler
PtrRecTextMarshaler PtrRecTextMarshaler
}{
PMarshaler: &CSVMarshaler{},
PCSVTextMarshaler: &CSVTextMarshaler{},
},
},
regFunc: []interface{}{
func(Marshaler) ([]byte, error) { return []byte("registered.marshaler"), nil },
func(encoding.TextMarshaler) ([]byte, error) { return []byte("registered.textmarshaler"), nil },
},
out: [][]string{
{"CSVMarshaler", "Marshaler", "PMarshaler", "CSVTextMarshaler", "PCSVTextMarshaler", "PtrRecCSVMarshaler", "PtrRecTextMarshaler"},
{"registered.marshaler", "registered.marshaler", "registered.marshaler", "registered.marshaler", "registered.marshaler", "registered.marshaler", "registered.textmarshaler"},
},
},
{
desc: "registered func - interface order",
in: []interface{}{
&struct {
CSVTextMarshaler CSVTextMarshaler
PCSVTextMarshaler *CSVTextMarshaler
}{
PCSVTextMarshaler: &CSVTextMarshaler{},
},
},
regFunc: []interface{}{
func(encoding.TextMarshaler) ([]byte, error) { return []byte("registered.textmarshaler"), nil },
func(Marshaler) ([]byte, error) { return []byte("registered.marshaler"), nil },
},
out: [][]string{
{"CSVTextMarshaler", "PCSVTextMarshaler"},
{"registered.textmarshaler", "registered.textmarshaler"},
},
},
{
desc: "registered func - method",
in: []interface{}{
&struct {
PtrRecCSVMarshaler PtrRecCSVMarshaler
}{},
struct {
PtrRecCSVMarshaler PtrRecCSVMarshaler
}{},
},
regFunc: []interface{}{
(*PtrRecCSVMarshaler).CSV,
},
out: [][]string{
{"PtrRecCSVMarshaler"},
{"ptrreccsvmarshaler.CSV"},
{"0"},
},
},
{
desc: "registered func - fallback error",
in: []interface{}{
struct {
Embedded14
}{},
},
regFunc: []interface{}{
(*Embedded14).MarshalCSV,
},
err: &UnsupportedTypeError{
Type: reflect.TypeOf(Embedded14{}),
},
},
{
desc: "registered interface func - returning error",
in: []interface{}{
&struct {
Embedded14 Embedded14
}{},
},
regFunc: []interface{}{
func(Marshaler) ([]byte, error) { return nil, Error },
},
err: Error,
},
{
desc: "registered func - returning error",
in: []interface{}{
&struct {
A InvalidType
}{},
},
regFunc: []interface{}{
func(*InvalidType) ([]byte, error) { return nil, Error },
},
err: Error,
},
{
desc: "registered func - fallback error on interface",
in: []interface{}{
struct {
Embedded14
}{},
},
regFunc: []interface{}{
func(m Marshaler) ([]byte, error) { return nil, nil },
},
err: &UnsupportedTypeError{
Type: reflect.TypeOf(Embedded14{}),
},
},
{
desc: "marshaler fallback error",
in: []interface{}{
struct {
Embedded14
}{},
},
err: &UnsupportedTypeError{
Type: reflect.TypeOf(Embedded14{}),
},
},
{
desc: "encode different types",
// This doesnt mean the output csv is valid. Generally this is an invalid
// use. However, we need to make sure that the encoder is doing what it is
// asked to... correctly.
in: []interface{}{
struct {
A int
}{},
struct {
A int
B string
}{},
struct {
A int
}{},
struct{}{},
},
out: [][]string{
{"A"},
{"0"},
{"0", ""},
{"0"},
{},
},
},
{
desc: "encode interface values",
in: []interface{}{
struct {
V interface{}
}{1},
struct {
V interface{}
}{pint(10)},
struct {
V interface{}
}{ppint(100)},
struct {
V interface{}
}{pppint(1000)},
struct {
V *interface{}
}{pinterface(ppint(10000))},
struct {
V *interface{}
}{func() *interface{} {
var v interface{} = pppint(100000)
var vv interface{} = v
return &vv
}()},
struct {
V interface{}
}{func() interface{} {
var v interface{} = &CSVMarshaler{}
var vv interface{} = v
return &vv
}()},
struct {
V interface{}
}{func() interface{} {
var v interface{} = CSVMarshaler{}
var vv interface{} = v
return &vv
}()},
struct {
V interface{}
}{func() interface{} {
var v interface{} = &CSVMarshaler{}
var vv interface{} = v
return vv
}()},
struct {
V interface{}
}{
V: func() interface{} {
return PtrRecCSVMarshaler(5)
}(),
},
struct {
V interface{}
}{
V: func() interface{} {
m := PtrRecCSVMarshaler(5)
return &m
}(),
},
struct {
V interface{}
}{func() interface{} {
var v interface{}
var vv interface{} = v
return &vv
}()},
},
out: [][]string{
{"V"},
{"1"},
{"10"},
{"100"},
{"1000"},
{"10000"},
{"100000"},
{"csvmarshaler"},
{"csvmarshaler"},
{"csvmarshaler"},
{"5"},
{"ptrreccsvmarshaler"},
{""},
},
},
{
desc: "encode NaN",
in: []interface{}{
struct {
Float float64
}{math.NaN()},
},
out: [][]string{
{"Float"},
{"NaN"},
},
},
{
desc: "encode NaN with aliased type",
in: []interface{}{
struct {
Float Float
}{Float(math.NaN())},
},
out: [][]string{
{"Float"},
{"NaN"},
},
},
{
desc: "empty struct",
in: []interface{}{
struct{}{},
},
out: [][]string{{}, {}},
},
{
desc: "value wrapped in interfaces and pointers",
in: []interface{}{
func() (v interface{}) { v = &struct{ A int }{5}; return v }(),
},
out: [][]string{{"A"}, {"5"}},
},
{
desc: "csv marshaler error",
in: []interface{}{
struct {
A CSVMarshaler
}{
A: CSVMarshaler{Err: Error},
},
},
err: &MarshalerError{Type: reflect.TypeOf(CSVMarshaler{}), MarshalerType: "MarshalCSV", Err: Error},
},
{
desc: "csv marshaler error as registered error",
in: []interface{}{
struct {
A CSVMarshaler
}{
A: CSVMarshaler{Err: Error},
},
},
regFunc: []interface{}{
CSVMarshaler.MarshalCSV,
},
err: Error,
},
{
desc: "text marshaler error",
in: []interface{}{
struct {
A TextMarshaler
}{
A: TextMarshaler{Err: Error},
},
},
err: &MarshalerError{Type: reflect.TypeOf(TextMarshaler{}), MarshalerType: "MarshalText", Err: Error},
},
{
desc: "text marshaler fallback error - ptr reciever",
in: []interface{}{
struct {
A Embedded15
}{},
},
err: &UnsupportedTypeError{Type: reflect.TypeOf(Embedded15{})},
},
{
desc: "text marshaler error as registered func",
in: []interface{}{
struct {
A TextMarshaler
}{
A: TextMarshaler{Err: Error},
},
},
regFunc: []interface{}{
TextMarshaler.MarshalText,
},
err: Error,
},
{
desc: "unsupported type",
in: []interface{}{
InvalidType{},
},
err: &UnsupportedTypeError{
Type: reflect.TypeOf(struct{}{}),
},
},
{
desc: "unsupported double pointer type",
in: []interface{}{
struct {
A **struct{}
}{},
},
err: &UnsupportedTypeError{
Type: reflect.TypeOf(struct{}{}),
},
},
{
desc: "unsupported interface type",
in: []interface{}{
TypeF{V: TypeA{}},
},
err: &UnsupportedTypeError{
Type: reflect.TypeOf(TypeA{}),
},
},
{
desc: "encode not a struct",
in: []interface{}{int(1)},
err: &InvalidEncodeError{
Type: reflect.TypeOf(int(1)),
},
},
{
desc: "encode nil interface",
in: []interface{}{nilIface},
err: &InvalidEncodeError{
Type: reflect.TypeOf(nilIface),
},
},
{
desc: "encode nil ptr",
in: []interface{}{nilPtr},
err: &InvalidEncodeError{},
},
{
desc: "encode nil interface pointer",
in: []interface{}{nilIfacePtr},
err: &InvalidEncodeError{},
},
}
for _, f := range fixtures {
t.Run(f.desc, func(t *testing.T) {
var buf bytes.Buffer
w := csv.NewWriter(&buf)
enc := NewEncoder(w)
for _, f := range f.regFunc {
enc.Register(f)
}
for _, v := range f.in {
err := enc.Encode(v)
if f.err != nil {
if !reflect.DeepEqual(f.err, err) {
t.Errorf("want err=%v; got %v", f.err, err)
}
return
} else if err != nil {
t.Errorf("want err=nil; got %v", err)
}
}
w.Flush()
if err := w.Error(); err != nil {
t.Errorf("want err=nil; got %v", err)
}
var out bytes.Buffer
if err := csv.NewWriter(&out).WriteAll(f.out); err != nil {
t.Errorf("want err=nil; got %v", err)
}
if buf.String() != out.String() {
t.Errorf("want=%s; got %s", out.String(), buf.String())
}
})
}
t.Run("test decoder tags", func(t *testing.T) {
type Test struct {
A int `custom:"1"`
B string `custom:"2"`
C float64 `custom:"-"`
}
test := &Test{
A: 1,
B: "b",
C: 2.5,
}
var bufs [4]bytes.Buffer
for i := 0; i < 4; i += 2 {
encode(t, &bufs[i], test, "")
encode(t, &bufs[i+1], test, "custom")
}
if b1, b2 := bufs[0].String(), bufs[2].String(); b1 != b2 {
t.Errorf("buffers are not equal: %s vs %s", b1, b2)
}
if b1, b2 := bufs[1].String(), bufs[3].String(); b1 != b2 {
t.Errorf("buffers are not equal: %s vs %s", b1, b2)
}
expected1 := [][]string{
{"A", "B", "C"},
{"1", "b", "2.5"},
}
expected2 := [][]string{
{"1", "2"},
{"1", "b"},
}
if b1, b2 := bufs[0].String(), encodeCSV(t, expected1); b1 != b2 {
t.Errorf("want buf=%s; got %s", b2, b1)
}
if b1, b2 := bufs[1].String(), encodeCSV(t, expected2); b1 != b2 {
t.Errorf("want buf=%s; got %s", b2, b1)
}
})
t.Run("error messages", func(t *testing.T) {
fixtures := []struct {
desc string
expected string
v interface{}
}{
{
desc: "invalid encode error message",
expected: "csvutil: Encode(int64)",
v: int64(1),
},
{
desc: "invalid encode error message with nil interface",
expected: "csvutil: Encode(nil)",
v: nilIface,
},
{
desc: "invalid encode error message with nil value",
expected: "csvutil: Encode(nil)",
v: nilPtr,
},
{
desc: "unsupported type error message",
expected: "csvutil: unsupported type: struct {}",
v: struct{ InvalidType }{},
},
{
desc: "marshaler error message",
expected: "csvutil: error calling MarshalText for type csvutil.TextMarshaler: " + Error.Error(),
v: struct{ M TextMarshaler }{TextMarshaler{Error}},
},
}
for _, f := range fixtures {
t.Run(f.desc, func(t *testing.T) {
err := NewEncoder(csv.NewWriter(bytes.NewBuffer(nil))).Encode(f.v)
if err == nil {
t.Fatal("want err not to be nil")
}
if err.Error() != f.expected {
t.Errorf("want=%s; got %s", f.expected, err.Error())
}
})
}
})
t.Run("EncodeHeader", func(t *testing.T) {
t.Run("no double header with encode", func(t *testing.T) {
var buf bytes.Buffer
w := csv.NewWriter(&buf)
enc := NewEncoder(w)
if err := enc.EncodeHeader(TypeI{}); err != nil {
t.Errorf("want err=nil; got %v", err)
}
if err := enc.Encode(TypeI{}); err != nil {
t.Errorf("want err=nil; got %v", err)
}
w.Flush()
expected := encodeCSV(t, [][]string{
{"String", "int"},
{"", ""},
})
if buf.String() != expected {
t.Errorf("want out=%s; got %s", expected, buf.String())
}
})
t.Run("encode writes header if EncodeHeader fails", func(t *testing.T) {
var buf bytes.Buffer
w := csv.NewWriter(&buf)
enc := NewEncoder(w)
if err := enc.EncodeHeader(InvalidType{}); err == nil {
t.Errorf("expected not nil error")
}
if err := enc.Encode(TypeI{}); err != nil {
t.Errorf("want err=nil; got %v", err)
}
w.Flush()
expected := encodeCSV(t, [][]string{
{"String", "int"},
{"", ""},
})
if buf.String() != expected {
t.Errorf("want out=%s; got %s", expected, buf.String())
}
})
fixtures := []struct {
desc string
in interface{}
tag string
out [][]string
err error
}{
{
desc: "conflicting fields",
in: &Embedded10{},
out: [][]string{
{"Y"},
},
},
{
desc: "custom tag",
in: TypeJ{},
tag: "json",
out: [][]string{
{"string", "bool", "Uint", "Float"},
},
},
{
desc: "nil interface ptr value",
in: nilIfacePtr,
out: [][]string{
{
"int",
"pint",
"int8",
"pint8",
"int16",
"pint16",
"int32",
"pint32",
"int64",
"pint64",
"uint",
"puint",
"uint8",
"puint8",
"uint16",
"puint16",
"uint32",
"puint32",
"uint64",
"puint64",
"float32",
"pfloat32",
"float64",
"pfloat64",
"string",
"pstring",
"bool",
"pbool",
"interface",
"pinterface",
"binary",
"pbinary",
},
},
},
{
desc: "ptr to nil interface ptr value",
in: &nilIfacePtr,
out: [][]string{
{
"int",
"pint",
"int8",
"pint8",
"int16",
"pint16",
"int32",
"pint32",
"int64",
"pint64",
"uint",
"puint",
"uint8",
"puint8",
"uint16",
"puint16",
"uint32",
"puint32",
"uint64",
"puint64",
"float32",
"pfloat32",
"float64",
"pfloat64",
"string",
"pstring",
"bool",
"pbool",
"interface",
"pinterface",
"binary",
"pbinary",
},
},
},
{
desc: "nil ptr value",
in: nilPtr,
out: [][]string{
{
"int",
"pint",
"int8",
"pint8",
"int16",
"pint16",
"int32",
"pint32",
"int64",
"pint64",
"uint",
"puint",
"uint8",
"puint8",
"uint16",
"puint16",
"uint32",
"puint32",
"uint64",
"puint64",
"float32",
"pfloat32",
"float64",
"pfloat64",
"string",
"pstring",
"bool",
"pbool",
"interface",
"pinterface",
"binary",
"pbinary",
},
},
},
{
desc: "ptr to nil ptr value",
in: &nilPtr,
out: [][]string{
{
"int",
"pint",
"int8",
"pint8",
"int16",
"pint16",
"int32",
"pint32",
"int64",
"pint64",
"uint",
"puint",
"uint8",
"puint8",
"uint16",
"puint16",
"uint32",
"puint32",
"uint64",
"puint64",
"float32",
"pfloat32",
"float64",
"pfloat64",
"string",
"pstring",
"bool",
"pbool",
"interface",
"pinterface",
"binary",
"pbinary",
},
},
},
{
desc: "ptr to nil interface",
in: &nilIface,
err: &UnsupportedTypeError{Type: reflect.ValueOf(&nilIface).Type().Elem()},
},
{
desc: "nil value",
err: &UnsupportedTypeError{},
},
{
desc: "ptr - not a struct",
in: &[]int{},
err: &UnsupportedTypeError{Type: reflect.TypeOf([]int{})},
},
{
desc: "not a struct",
in: int(1),
err: &UnsupportedTypeError{Type: reflect.TypeOf(int(0))},
},
}
for _, f := range fixtures {
t.Run(f.desc, func(t *testing.T) {
var buf bytes.Buffer
w := csv.NewWriter(&buf)
enc := NewEncoder(w)
enc.Tag = f.tag
err := enc.EncodeHeader(f.in)
w.Flush()
if !reflect.DeepEqual(err, f.err) {
t.Errorf("want err=%v; got %v", f.err, err)
}
if f.err != nil {
return
}
if expected := encodeCSV(t, f.out); buf.String() != expected {
t.Errorf("want out=%s; got %s", expected, buf.String())
}
})
}
})
t.Run("AutoHeader false", func(t *testing.T) {
var buf bytes.Buffer
w := csv.NewWriter(&buf)
enc := NewEncoder(w)
enc.AutoHeader = false
if err := enc.Encode(TypeG{
String: "s",
Int: 10,
}); err != nil {
t.Fatalf("want err=nil; got %v", err)
}
w.Flush()
expected := encodeCSV(t, [][]string{{"s", "10"}})
if expected != buf.String() {
t.Errorf("want %s; got %s", expected, buf.String())
}
})
t.Run("fail on type encoding without header", func(t *testing.T) {
var buf bytes.Buffer
w := csv.NewWriter(&buf)
enc := NewEncoder(w)
enc.AutoHeader = false
err := enc.Encode(struct {
Invalid InvalidType
}{})
expected := &UnsupportedTypeError{Type: reflect.TypeOf(InvalidType{})}
if !reflect.DeepEqual(err, expected) {
t.Errorf("want %v; got %v", expected, err)
}
})
t.Run("fail while writing header", func(t *testing.T) {
Error := errors.New("error")
enc := NewEncoder(failingWriter{Err: Error})
if err := enc.EncodeHeader(TypeA{}); err != Error {
t.Errorf("want %v; got %v", Error, err)
}
})
t.Run("slice and array", func(t *testing.T) {
fixtures := []struct {
desc string
in interface{}
out [][]string
err error
}{
{
desc: "slice",
in: []TypeI{
{"1", 1},
{"2", 2},
},
out: [][]string{
{"String", "int"},
{"1", "1"},
{"2", "2"},
},
},
{
desc: "ptr slice",
in: &[]TypeI{
{"1", 1},
{"2", 2},
},
out: [][]string{
{"String", "int"},
{"1", "1"},
{"2", "2"},
},
},
{
desc: "ptr slice with ptr elements",
in: &[]*TypeI{
{"1", 1},
{"2", 2},
},
out: [][]string{
{"String", "int"},
{"1", "1"},
{"2", "2"},
},
},
{
desc: "array",
in: [2]TypeI{
{"1", 1},
{"2", 2},
},
out: [][]string{
{"String", "int"},
{"1", "1"},
{"2", "2"},
},
},
{
desc: "ptr array",
in: &[2]TypeI{
{"1", 1},
{"2", 2},
},
out: [][]string{
{"String", "int"},
{"1", "1"},
{"2", "2"},
},
},
{
desc: "ptr array with ptr elements",
in: &[2]*TypeI{
{"1", 1},
{"2", 2},
},
out: [][]string{
{"String", "int"},
{"1", "1"},
{"2", "2"},
},
},
{
desc: "array with default val",
in: [2]TypeI{
{"1", 1},
},
out: [][]string{
{"String", "int"},
{"1", "1"},
{"", ""},
},
},
{
desc: "no auto header on empty slice",
in: []TypeI{},
out: [][]string{},
},
{
desc: "no auto header on empty array",
in: [0]TypeI{},
out: [][]string{},
},
{
desc: "disallow double slice",
in: [][]TypeI{
{
{"1", 1},
},
},
err: &InvalidEncodeError{Type: reflect.TypeOf([][]TypeI{})},
},
{
desc: "disallow double ptr slice",
in: &[][]TypeI{
{
{"1", 1},
},
},
err: &InvalidEncodeError{Type: reflect.TypeOf(&[][]TypeI{})},
},
{
desc: "disallow double ptr slice with ptr slice",
in: &[]*[]TypeI{
{
{"1", 1},
},
},
err: &InvalidEncodeError{Type: reflect.TypeOf(&[]*[]TypeI{})},
},
{
desc: "disallow double array",
in: [2][2]TypeI{
{
{"1", 1},
},
},
err: &InvalidEncodeError{Type: reflect.TypeOf([2][2]TypeI{})},
},
{
desc: "disallow double ptr array",
in: &[2][2]TypeI{
{
{"1", 1},
},
},
err: &InvalidEncodeError{Type: reflect.TypeOf(&[2][2]TypeI{})},
},
{
desc: "disallow interface slice",
in: []interface{}{
TypeI{"1", 1},
},
err: &InvalidEncodeError{Type: reflect.TypeOf([]interface{}{})},
},
{
desc: "disallow interface array",
in: [1]interface{}{
TypeI{"1", 1},
},
err: &InvalidEncodeError{Type: reflect.TypeOf([1]interface{}{})},
},
}
for _, f := range fixtures {
t.Run(f.desc, func(t *testing.T) {
var buf bytes.Buffer
w := csv.NewWriter(&buf)
err := NewEncoder(w).Encode(f.in)
if f.err != nil {
if !reflect.DeepEqual(f.err, err) {
t.Errorf("want err=%v; got %v", f.err, err)
}
return
}
if err != nil {
t.Fatalf("want err=nil; got %v", err)
}
w.Flush()
if err := w.Error(); err != nil {
t.Errorf("want err=nil; got %v", err)
}
var out bytes.Buffer
if err := csv.NewWriter(&out).WriteAll(f.out); err != nil {
t.Errorf("want err=nil; got %v", err)
}
if buf.String() != out.String() {
t.Errorf("want=%s; got %s", out.String(), buf.String())
}
})
}
})
t.Run("register panics", func(t *testing.T) {
var buf bytes.Buffer
r := csv.NewWriter(&buf)
enc := NewEncoder(r)
fixtures := []struct {
desc string
arg interface{}
}{
{
desc: "not a func",
arg: 1,
},
{
desc: "nil",
arg: nil,
},
{
desc: "T == empty interface",
arg: func(interface{}) ([]byte, error) { return nil, nil },
},
{
desc: "first out not bytes",
arg: func(int) (int, error) { return 0, nil },
},
{
desc: "second out not error",
arg: func(int) (int, int) { return 0, 0 },
},
{
desc: "func with one out value",
arg: func(int) error { return nil },
},
{
desc: "func with no returns",
arg: func(int) {},
},
}
for _, f := range fixtures {
t.Run(f.desc, func(t *testing.T) {
var e interface{}
func() {
defer func() {
e = recover()
}()
enc.Register(f.arg)
}()
if e == nil {
t.Error("Register was supposed to panic but it didnt")
}
t.Log(e)
})
}
t.Run("already registered", func(t *testing.T) {
f := func(int) ([]byte, error) { return nil, nil }
enc.Register(f)
var e interface{}
func() {
defer func() {
e = recover()
}()
enc.Register(f)
}()
if e == nil {
t.Error("Register was supposed to panic but it didnt")
}
t.Log(e)
})
})
}
func encode(t *testing.T, buf *bytes.Buffer, v interface{}, tag string) {
w := csv.NewWriter(buf)
enc := NewEncoder(w)
enc.Tag = tag
if err := enc.Encode(v); err != nil {
t.Fatalf("want err=nil; got %v", err)
}
w.Flush()
if err := w.Error(); err != nil {
t.Fatalf("want err=nil; got %v", err)
}
}
func encodeCSV(t *testing.T, recs [][]string) string |
type failingWriter struct {
Err error
}
func (w failingWriter) Write([]string) error {
return w.Err
}
| {
var buf bytes.Buffer
if err := csv.NewWriter(&buf).WriteAll(recs); err != nil {
t.Fatalf("want err=nil; got %v", err)
}
return buf.String()
} |
use-sqoop-source-type.ts | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import { ref, h, watch, Ref } from 'vue'
import { useI18n } from 'vue-i18n'
import { useDatasource } from './use-sqoop-datasource'
import { useCustomParams } from '.'
import styles from '../index.module.scss'
import type { IJsonItem, IOption, ModelType } from '../types'
export function useSourceType(
model: { [field: string]: any },
unCustomSpan: Ref<number>
): IJsonItem[] {
const { t } = useI18n()
const mysqlSpan = ref(24)
const tableSpan = ref(0)
const editorSpan = ref(24)
const columnSpan = ref(0)
const hiveSpan = ref(0)
const hdfsSpan = ref(0)
const datasourceSpan = ref(0)
const resetSpan = () => {
mysqlSpan.value =
unCustomSpan.value && model.sourceType === 'MYSQL' ? 24 : 0
tableSpan.value = mysqlSpan.value && model.srcQueryType === '0' ? 24 : 0
editorSpan.value = mysqlSpan.value && model.srcQueryType === '1' ? 24 : 0
columnSpan.value = tableSpan.value && model.srcColumnType === '1' ? 24 : 0
hiveSpan.value = unCustomSpan.value && model.sourceType === 'HIVE' ? 24 : 0
hdfsSpan.value = unCustomSpan.value && model.sourceType === 'HDFS' ? 24 : 0
datasourceSpan.value =
unCustomSpan.value && model.sourceType === 'MYSQL' ? 12 : 0
}
const sourceTypes = ref([
{
label: 'MYSQL',
value: 'MYSQL'
}
] as IOption[])
const getSourceTypesByModelType = (modelType: ModelType): IOption[] => {
switch (modelType) {
case 'import':
return [
{
label: 'MYSQL',
value: 'MYSQL'
}
]
case 'export':
return [
{
label: 'HDFS',
value: 'HDFS'
},
{
label: 'HIVE',
value: 'HIVE'
}
]
default:
return [
{
label: 'MYSQL',
value: 'MYSQL'
},
{
label: 'HDFS',
value: 'HDFS'
},
{
label: 'HIVE',
value: 'HIVE'
}
]
}
}
watch(
() => model.modelType,
(modelType: ModelType) => {
sourceTypes.value = getSourceTypesByModelType(modelType)
if (!sourceTypes.value.find((type) => model.sourceType === type.value)) {
model.sourceType = sourceTypes.value[0].value
}
}
)
watch(
() => [
unCustomSpan.value,
model.sourceType,
model.srcQueryType,
model.srcColumnType
],
() => {
resetSpan()
}
)
return [
{
type: 'custom',
field: 'custom-title-source',
span: unCustomSpan,
widget: h(
'div',
{ class: styles['field-title'] },
t('project.node.data_source')
)
},
{
type: 'select',
field: 'sourceType',
name: t('project.node.type'),
span: unCustomSpan,
options: sourceTypes
},
...useDatasource(
model,
datasourceSpan,
'sourceMysqlType',
'sourceMysqlDatasource'
),
{
type: 'radio',
field: 'srcQueryType',
name: t('project.node.model_type'),
span: mysqlSpan,
options: [
{
label: t('project.node.form'),
value: '0'
},
{
label: 'SQL',
value: '1'
}
]
},
{
type: 'input',
field: 'srcTable',
name: t('project.node.table'),
span: tableSpan,
props: {
placeholder: t('project.node.table_tips')
},
validate: {
trigger: ['input', 'blur'],
required: true,
validator(validate, value) {
if (tableSpan.value && !value) {
return new Error(t('project.node.table_tips'))
}
}
}
},
{
type: 'radio',
field: 'srcColumnType',
name: t('project.node.column_type'),
span: tableSpan,
options: [
{ label: t('project.node.all_columns'), value: '0' },
{ label: t('project.node.some_columns'), value: '1' }
]
},
{
type: 'input',
field: 'srcColumns',
name: t('project.node.column'),
span: columnSpan,
props: {
placeholder: t('project.node.column_tips')
},
validate: {
trigger: ['input', 'blur'],
required: true,
validator(validate, value) {
if (!!columnSpan.value && !value) {
return new Error(t('project.node.column_tips'))
}
}
}
},
{
type: 'input',
field: 'sourceHiveDatabase',
name: t('project.node.database'),
span: hiveSpan,
props: {
placeholder: t('project.node.database_tips')
},
validate: {
trigger: ['blur', 'input'],
required: true,
validator(validate, value) { | if (hiveSpan.value && !value) {
return new Error(t('project.node.database_tips'))
}
}
}
},
{
type: 'input',
field: 'sourceHiveTable',
name: t('project.node.table'),
span: hiveSpan,
props: {
placeholder: t('project.node.hive_table_tips')
},
validate: {
trigger: ['blur', 'input'],
required: true,
validator(validate, value) {
if (hiveSpan.value && !value) {
return new Error(t('project.node.hive_table_tips'))
}
}
}
},
{
type: 'input',
field: 'sourceHivePartitionKey',
name: t('project.node.hive_partition_keys'),
span: hiveSpan,
props: {
placeholder: t('project.node.hive_partition_keys_tips')
}
},
{
type: 'input',
field: 'sourceHivePartitionValue',
name: t('project.node.hive_partition_values'),
span: hiveSpan,
props: {
placeholder: t('project.node.hive_partition_values_tips')
}
},
{
type: 'input',
field: 'sourceHdfsExportDir',
name: t('project.node.export_dir'),
span: hdfsSpan,
props: {
placeholder: t('project.node.export_dir_tips')
},
validate: {
trigger: ['blur', 'input'],
required: true,
validator(validate, value) {
if (hdfsSpan.value && !value) {
return new Error(t('project.node.export_dir_tips'))
}
}
}
},
{
type: 'editor',
field: 'sourceMysqlSrcQuerySql',
name: t('project.node.sql_statement'),
span: editorSpan,
validate: {
trigger: ['blur', 'input'],
required: true,
validator(validate, value) {
if (editorSpan.value && !value) {
return new Error(t('project.node.sql_statement_tips'))
}
}
}
},
...useCustomParams({
model,
field: 'mapColumnHive',
name: 'map_column_hive',
isSimple: true,
span: mysqlSpan
}),
...useCustomParams({
model,
field: 'mapColumnJava',
name: 'map_column_java',
isSimple: true,
span: mysqlSpan
})
]
} | |
ElementDescriptor.py | # -*- coding: utf-8 -*-
#
# michael a.g. aïvázis
# orthologue
# (c) 1998-2022 all rights reserved
#
from .Descriptor import Descriptor
class ElementDescriptor(Descriptor):
"""
Descriptor class that gathers all the metadata about a document tag that was provided by
the user during the DTD declaration. It is used by DTD derived classes to decorate the
Document instance and the tag handlers with the information needed by the Reader so it can
process XML documents
"""
# element meta data
handler = None # the Node descendant that handles parsing events for this document element
attributes = () # a list of the tag attribute descriptors that encode the document DTD
# meta methods
def __init__(self, *, tag, handler, root=False):
su |
# end of file
| per().__init__(name=tag)
self.handler = handler
self.root = root
return
|
main.rs | /*
* Copyright 2018 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* ------------------------------------------------------------------------------
*/
extern crate cbor;
extern crate clap;
extern crate crypto;
extern crate protobuf;
extern crate rand;
extern crate sawtooth_perf;
extern crate sawtooth_sdk;
extern crate simplelog;
mod intkey_addresser;
mod intkey_iterator;
mod intkey_transformer;
use std::convert::From;
use std::error::Error;
use std::fmt;
use std::fs::File;
use std::io::Read;
use std::num::ParseFloatError;
use std::num::ParseIntError;
use std::str::Split;
use clap::{App, Arg, ArgMatches};
use rand::{Rng, StdRng};
use sawtooth_perf::batch_gen::SignedBatchIterator;
use sawtooth_perf::batch_submit::InfiniteBatchListIterator;
use sawtooth_perf::batch_submit::run_workload;
use sawtooth_sdk::signing;
use sawtooth_sdk::signing::secp256k1::Secp256k1PrivateKey;
use simplelog::{Config, LevelFilter, SimpleLogger};
use intkey_iterator::IntKeyIterator;
use intkey_transformer::IntKeyTransformer;
const APP_NAME: &str = env!("CARGO_PKG_NAME");
const VERSION: &str = env!("CARGO_PKG_VERSION");
fn main() {
match SimpleLogger::init(LevelFilter::Warn, Config::default()) {
Ok(_) => (),
Err(err) => println!("Failed to load logger: {}", err.description()),
}
let arg_matches = get_arg_matches();
match run_load_command(&arg_matches) {
Ok(_) => (),
Err(err) => println!("{}", err.description()),
}
}
fn get_arg_matches<'a>() -> ArgMatches<'a> {
App::new(APP_NAME)
.version(VERSION)
.about("Submit intkey workload at a continuous rate")
.arg(
Arg::with_name("display")
.long("display")
.takes_value(true)
.number_of_values(1)
.default_value("30")
.value_name("TIME_BETWEEN_DISPLAYS")
.help("Seconds between statistics displays"),
)
.arg(
Arg::with_name("key")
.short("k")
.long("key-file")
.value_name("KEY_FILE")
.help("File containing a private key to sign transactions and batches"),
)
.arg(
Arg::with_name("batch-size")
.short("n")
.long("batch-size")
.takes_value(true)
.default_value("1")
.number_of_values(1)
.value_name("BATCH_SIZE")
.help("Transactions in a batch"),
)
.arg(
Arg::with_name("names")
.long("num-names")
.takes_value(true)
.default_value("100")
.number_of_values(1)
.value_name("NUM_NAMES")
.help("Number of IntKey Names to set"),
)
.arg(
Arg::with_name("rate")
.short("r")
.long("rate")
.takes_value(true)
.number_of_values(1)
.default_value("10")
.value_name("RATE")
.help("Batches per second to send to a Sawtooth REST Api"),
)
.arg(
Arg::with_name("seed")
.short("s")
.long("seed")
.takes_value(true)
.number_of_values(1)
.value_name("SEED")
.help("Comma separated list of u8 to make the workload reproduceable"),
)
.arg(
Arg::with_name("unnecessary")
.long("unnecessary")
.takes_value(true)
.number_of_values(1)
.default_value("0.0")
.value_name("UNNECESSARY")
.help("Probability of a transaction having a satisfiable but unnecessary depedendency"),
)
.arg(
Arg::with_name("unsatisfiable")
.long("unsatisfiable")
.takes_value(true)
.number_of_values(1)
.default_value("0.0")
.value_name("UNSATISFIABLE")
.help("Probability of a transaction having an unsatisfiable dependency"),
)
.arg(
Arg::with_name("urls")
.short("u")
.long("urls")
.value_name("URLS")
.takes_value(true)
.number_of_values(1)
.default_value("http://127.0.0.1:8008")
.help("Comma separated list of Sawtooth REST Apis"),
)
.arg(
Arg::with_name("invalid")
.long("invalid")
.value_name("INVALID")
.takes_value(true)
.number_of_values(1)
.default_value("0.0")
.help("Probability of a transaction being invalid"),
)
.arg(
Arg::with_name("wildcard")
.long("wildcard")
.value_name("WILDCARD")
.takes_value(true)
.number_of_values(1)
.default_value("0.0")
.help("Probability of a transaction having a wildcarded input/output"),
)
.arg(
Arg::with_name("username")
.long("auth-username")
.value_name("BASIC_AUTH_USERNAME")
.help("Basic auth username to authenticate with the Sawtooth REST Api"),
)
.arg(
Arg::with_name("password")
.long("auth-password")
.value_name("BASIC_AUTH_PASSWORD")
.help("Basic auth password to authenticate with the Sawtooth REST Api"),
)
.get_matches()
}
fn err_if_out_of_range(val: f32) -> Result<f32, IntKeyCliError> {
if val < 0.0 || val > 1.0 {
return Err(IntKeyCliError {
msg: "Value must be between 0.0 and 1.0, inclusively".to_string(),
});
}
Ok(val)
}
fn | (val: u32) -> Result<u32, IntKeyCliError> {
if val == 0 {
return Err(IntKeyCliError {
msg: "Value must be greater than zero".to_string(),
});
}
Ok(val)
}
fn greater_than_zero(val: usize) -> Result<usize, IntKeyCliError> {
if val == 0 {
return Err(IntKeyCliError {
msg: "Value must be greater than zero".to_string(),
});
}
Ok(val)
}
fn run_load_command(args: &ArgMatches) -> Result<(), Box<Error>> {
let batch_size: usize = args.value_of("batch-size")
.unwrap_or("1")
.parse()
.map_err(IntKeyCliError::from)
.and_then(greater_than_zero)?;
let num_names: usize = args.value_of("names")
.unwrap_or("100")
.parse()
.map_err(IntKeyCliError::from)
.and_then(greater_than_zero)?;
let urls: Vec<String> = args.value_of("urls")
.unwrap_or("http://127.0.0.1:8008")
.parse()
.map_err(|_| String::from("urls are a comma separated list of strings"))
.and_then(|st| {
let s: String = st;
let split: Split<char> = s.split(',');
Ok(split.map(|s| s.to_string()).collect())
})?;
let rate: usize = args.value_of("rate")
.unwrap_or("10")
.parse()
.map_err(IntKeyCliError::from)
.and_then(greater_than_zero)?;
let unsatisfiable: f32 = args.value_of("unsatisfiable")
.unwrap_or("0.0")
.parse()
.map_err(IntKeyCliError::from)
.and_then(err_if_out_of_range)?;
let unnecessary: f32 = args.value_of("unnecessary")
.unwrap_or("0.0")
.parse()
.map_err(IntKeyCliError::from)
.and_then(err_if_out_of_range)?;
let wildcard: f32 = args.value_of("wildcard")
.unwrap_or("0.0")
.parse()
.map_err(IntKeyCliError::from)
.and_then(err_if_out_of_range)?;
let invalid: f32 = args.value_of("invalid")
.unwrap_or("0.0")
.parse()
.map_err(IntKeyCliError::from)
.and_then(err_if_out_of_range)?;
let display: u32 = args.value_of("display")
.unwrap_or("30")
.parse()
.map_err(IntKeyCliError::from)
.and_then(greater_than_zero32)?;
let username = args.value_of("username");
let password = args.value_of("password");
let basic_auth = {
match username {
Some(username) => match password {
None => Some(String::from(username)),
Some(password) => Some([username, password].join(":")),
},
None => None,
}
};
let s: Result<Vec<usize>, std::num::ParseIntError> = match args.value_of("seed") {
Some(s) => {
let split: Split<char> = s.split(',');
split.map(|s| s.parse()).collect()
}
None => {
let mut rng = StdRng::new()?;
Ok(rng.gen_iter().take(10).collect())
}
};
let seed = s?;
let context = signing::create_context("secp256k1")?;
let private_key: Result<Box<signing::PrivateKey>, Box<Error>> = match args.value_of("key") {
Some(file) => {
let mut key_file = File::open(file)?;
let mut buf = String::new();
key_file.read_to_string(&mut buf)?;
buf.pop(); // remove the new line
let private_key = Secp256k1PrivateKey::from_hex(&buf)?;
Ok(Box::new(private_key))
}
None => {
let private_key = context.new_random_private_key()?;
Ok(private_key)
}
};
let priv_key = private_key?;
let signer = signing::Signer::new(context.as_ref(), priv_key.as_ref());
let signer_ref = &signer;
let mut transformer = IntKeyTransformer::new(
signer_ref,
&seed,
unsatisfiable,
wildcard,
num_names,
unnecessary,
);
let mut transaction_iterator = IntKeyIterator::new(num_names, invalid, &seed)
.map(|payload| transformer.intkey_payload_to_transaction(&payload))
.filter_map(|payload| payload.ok());
let mut batch_iter =
SignedBatchIterator::new(&mut transaction_iterator, batch_size, signer_ref);
let mut batchlist_iter = InfiniteBatchListIterator::new(&mut batch_iter);
let time_to_wait: u32 = 1_000_000_000 / rate as u32;
println!("--invalid {} --batch-size {} --rate {} --wildcard {} --urls {:?} --unsatisfiable {} --seed {:?} --num-names {} --display {}",
invalid,
batch_size,
rate,
wildcard,
urls,
unsatisfiable,
seed,
num_names,
display);
match run_workload(
&mut batchlist_iter,
time_to_wait,
display,
urls,
&basic_auth,
) {
Ok(_) => Ok(()),
Err(err) => Err(Box::new(err)),
}
}
#[derive(Debug)]
struct IntKeyCliError {
msg: String,
}
impl Error for IntKeyCliError {
fn description(&self) -> &str {
self.msg.as_str()
}
}
impl fmt::Display for IntKeyCliError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", format!("IntKeyCliError {}", self.msg))
}
}
impl From<ParseIntError> for IntKeyCliError {
fn from(error: ParseIntError) -> Self {
IntKeyCliError {
msg: error.description().to_string(),
}
}
}
impl From<ParseFloatError> for IntKeyCliError {
fn from(error: ParseFloatError) -> Self {
IntKeyCliError {
msg: error.description().to_string(),
}
}
}
| greater_than_zero32 |
top.rs | extern crate shiplift;
use shiplift::Docker;
use std::env;
fn | () {
let docker = Docker::new();
if let Some(id) = env::args().nth(1) {
let top = docker
.containers()
.get(&id)
.top(Default::default())
.unwrap();
println!("{:?}", top);
}
}
| main |
jquery.treegrid.js | /**
* jQuery EasyUI 1.4.1
*
* Copyright (c) 2009-2014 www.jeasyui.com. All rights reserved.
*
* Licensed under the GPL license: http://www.gnu.org/licenses/gpl.txt
* To use it on other terms please contact us at [email protected]
*
*/
(function($){
function _1(_2){
var _3=$.data(_2,"treegrid");
var _4=_3.options;
$(_2).datagrid($.extend({},_4,{url:null,data:null,loader:function(){
return false;
},onBeforeLoad:function(){
return false;
},onLoadSuccess:function(){
},onResizeColumn:function(_5,_6){
_26(_2);
_4.onResizeColumn.call(_2,_5,_6);
},onBeforeSortColumn:function(_7,_8){
if(_4.onBeforeSortColumn.call(_2,_7,_8)==false){
return false;
}
},onSortColumn:function(_9,_a){
_4.sortName=_9;
_4.sortOrder=_a;
if(_4.remoteSort){
_25(_2);
}else{
var _b=$(_2).treegrid("getData");
_3f(_2,0,_b);
}
_4.onSortColumn.call(_2,_9,_a);
},onBeforeEdit:function(_c,_d){
if(_4.onBeforeEdit.call(_2,_d)==false){
return false;
}
},onAfterEdit:function(_e,_f,_10){
_4.onAfterEdit.call(_2,_f,_10);
},onCancelEdit:function(_11,row){
_4.onCancelEdit.call(_2,row);
},onBeforeSelect:function(_12){
if(_4.onBeforeSelect.call(_2,_47(_2,_12))==false){
return false;
}
},onSelect:function(_13){
_4.onSelect.call(_2,_47(_2,_13));
},onBeforeUnselect:function(_14){
if(_4.onBeforeUnselect.call(_2,_47(_2,_14))==false){
return false;
}
},onUnselect:function(_15){
_4.onUnselect.call(_2,_47(_2,_15));
},onBeforeCheck:function(_16){
if(_4.onBeforeCheck.call(_2,_47(_2,_16))==false){
return false;
}
},onCheck:function(_17){
_4.onCheck.call(_2,_47(_2,_17));
},onBeforeUncheck:function(_18){
if(_4.onBeforeUncheck.call(_2,_47(_2,_18))==false){
return false;
}
},onUncheck:function(_19){
_4.onUncheck.call(_2,_47(_2,_19));
},onClickRow:function(_1a){
_4.onClickRow.call(_2,_47(_2,_1a));
},onDblClickRow:function(_1b){
_4.onDblClickRow.call(_2,_47(_2,_1b));
},onClickCell:function(_1c,_1d){
_4.onClickCell.call(_2,_1d,_47(_2,_1c));
},onDblClickCell:function(_1e,_1f){
_4.onDblClickCell.call(_2,_1f,_47(_2,_1e));
},onRowContextMenu:function(e,_20){
_4.onContextMenu.call(_2,e,_47(_2,_20));
}}));
if(!_4.columns){
var _21=$.data(_2,"datagrid").options;
_4.columns=_21.columns;
_4.frozenColumns=_21.frozenColumns;
}
_3.dc=$.data(_2,"datagrid").dc;
if(_4.pagination){
var _22=$(_2).datagrid("getPager");
_22.pagination({pageNumber:_4.pageNumber,pageSize:_4.pageSize,pageList:_4.pageList,onSelectPage:function(_23,_24){
_4.pageNumber=_23;
_4.pageSize=_24;
_25(_2);
}});
_4.pageSize=_22.pagination("options").pageSize;
}
};
function _26(_27,_28){
var _29=$.data(_27,"datagrid").options;
var dc=$.data(_27,"datagrid").dc;
if(!dc.body1.is(":empty")&&(!_29.nowrap||_29.autoRowHeight)){
if(_28!=undefined){
var _2a=_2b(_27,_28);
for(var i=0;i<_2a.length;i++){
_2c(_2a[i][_29.idField]);
}
}
}
$(_27).datagrid("fixRowHeight",_28);
function _2c(_2d){
var tr1=_29.finder.getTr(_27,_2d,"body",1);
var tr2=_29.finder.getTr(_27,_2d,"body",2);
tr1.css("height","");
tr2.css("height","");
var _2e=Math.max(tr1.height(),tr2.height());
tr1.css("height",_2e);
tr2.css("height",_2e);
};
};
function _2f(_30){
var dc=$.data(_30,"datagrid").dc;
var _31=$.data(_30,"treegrid").options;
if(!_31.rownumbers){
return;
}
dc.body1.find("div.datagrid-cell-rownumber").each(function(i){
$(this).html(i+1);
});
};
function _32(_33){
return function(e){
$.fn.datagrid.defaults.rowEvents[_33?"mouseover":"mouseout"](e);
var tt=$(e.target);
var fn=_33?"addClass":"removeClass";
if(tt.hasClass("tree-hit")){
tt.hasClass("tree-expanded")?tt[fn]("tree-expanded-hover"):tt[fn]("tree-collapsed-hover");
}
};
};
function _34(e){
var tt=$(e.target);
if(tt.hasClass("tree-hit")){
var tr=tt.closest("tr.datagrid-row");
var _35=tr.closest("div.datagrid-view").children(".datagrid-f")[0];
_36(_35,tr.attr("node-id"));
}else{
$.fn.datagrid.defaults.rowEvents.click(e);
}
};
function _37(_38,_39){
| var _3a=$.data(_38,"treegrid").options;
var tr1=_3a.finder.getTr(_38,_39,"body",1);
var tr2=_3a.finder.getTr(_38,_39,"body",2);
var _3b=$(_38).datagrid("getColumnFields",true).length+(_3a.rownumbers?1:0);
var _3c=$(_38).datagrid("getColumnFields",false).length;
_3d(tr1,_3b);
_3d(tr2,_3c);
function _3d(tr,_3e){
$("<tr class=\"treegrid-tr-tree\">"+"<td style=\"border:0px\" colspan=\""+_3e+"\">"+"<div></div>"+"</td>"+"</tr>").insertAfter(tr);
};
};
function _3f(_40,_41,_42,_43){
var _44=$.data(_40,"treegrid");
var _45=_44.options;
var dc=_44.dc;
_42=_45.loadFilter.call(_40,_42,_41);
var _46=_47(_40,_41);
if(_46){
var _48=_45.finder.getTr(_40,_41,"body",1);
var _49=_45.finder.getTr(_40,_41,"body",2);
var cc1=_48.next("tr.treegrid-tr-tree").children("td").children("div");
var cc2=_49.next("tr.treegrid-tr-tree").children("td").children("div");
if(!_43){
_46.children=[];
}
}else{
var cc1=dc.body1;
var cc2=dc.body2;
if(!_43){
_44.data=[];
}
}
if(!_43){
cc1.empty();
cc2.empty();
}
if(_45.view.onBeforeRender){
_45.view.onBeforeRender.call(_45.view,_40,_41,_42);
}
_45.view.render.call(_45.view,_40,cc1,true);
_45.view.render.call(_45.view,_40,cc2,false);
if(_45.showFooter){
_45.view.renderFooter.call(_45.view,_40,dc.footer1,true);
_45.view.renderFooter.call(_45.view,_40,dc.footer2,false);
}
if(_45.view.onAfterRender){
_45.view.onAfterRender.call(_45.view,_40);
}
if(!_41&&_45.pagination){
var _4a=$.data(_40,"treegrid").total;
var _4b=$(_40).datagrid("getPager");
if(_4b.pagination("options").total!=_4a){
_4b.pagination({total:_4a});
}
}
_26(_40);
_2f(_40);
$(_40).treegrid("showLines");
$(_40).treegrid("setSelectionState");
$(_40).treegrid("autoSizeColumn");
_45.onLoadSuccess.call(_40,_46,_42);
};
function _25(_4c,_4d,_4e,_4f,_50){
var _51=$.data(_4c,"treegrid").options;
var _52=$(_4c).datagrid("getPanel").find("div.datagrid-body");
if(_4e){
_51.queryParams=_4e;
}
var _53=$.extend({},_51.queryParams);
if(_51.pagination){
$.extend(_53,{page:_51.pageNumber,rows:_51.pageSize});
}
if(_51.sortName){
$.extend(_53,{sort:_51.sortName,order:_51.sortOrder});
}
var row=_47(_4c,_4d);
if(_51.onBeforeLoad.call(_4c,row,_53)==false){
return;
}
var _54=_52.find("tr[node-id=\""+_4d+"\"] span.tree-folder");
_54.addClass("tree-loading");
$(_4c).treegrid("loading");
var _55=_51.loader.call(_4c,_53,function(_56){
_54.removeClass("tree-loading");
$(_4c).treegrid("loaded");
_3f(_4c,_4d,_56,_4f);
if(_50){
_50();
}
},function(){
_54.removeClass("tree-loading");
$(_4c).treegrid("loaded");
_51.onLoadError.apply(_4c,arguments);
if(_50){
_50();
}
});
if(_55==false){
_54.removeClass("tree-loading");
$(_4c).treegrid("loaded");
}
};
function _57(_58){
var _59=_5a(_58);
if(_59.length){
return _59[0];
}else{
return null;
}
};
function _5a(_5b){
return $.data(_5b,"treegrid").data;
};
function _5c(_5d,_5e){
var row=_47(_5d,_5e);
if(row._parentId){
return _47(_5d,row._parentId);
}else{
return null;
}
};
function _2b(_5f,_60){
var _61=$.data(_5f,"treegrid").options;
var _62=$(_5f).datagrid("getPanel").find("div.datagrid-view2 div.datagrid-body");
var _63=[];
if(_60){
_64(_60);
}else{
var _65=_5a(_5f);
for(var i=0;i<_65.length;i++){
_63.push(_65[i]);
_64(_65[i][_61.idField]);
}
}
function _64(_66){
var _67=_47(_5f,_66);
if(_67&&_67.children){
for(var i=0,len=_67.children.length;i<len;i++){
var _68=_67.children[i];
_63.push(_68);
_64(_68[_61.idField]);
}
}
};
return _63;
};
function _69(_6a,_6b){
if(!_6b){
return 0;
}
var _6c=$.data(_6a,"treegrid").options;
var _6d=$(_6a).datagrid("getPanel").children("div.datagrid-view");
var _6e=_6d.find("div.datagrid-body tr[node-id=\""+_6b+"\"]").children("td[field=\""+_6c.treeField+"\"]");
return _6e.find("span.tree-indent,span.tree-hit").length;
};
function _47(_6f,_70){
var _71=$.data(_6f,"treegrid").options;
var _72=$.data(_6f,"treegrid").data;
var cc=[_72];
while(cc.length){
var c=cc.shift();
for(var i=0;i<c.length;i++){
var _73=c[i];
if(_73[_71.idField]==_70){
return _73;
}else{
if(_73["children"]){
cc.push(_73["children"]);
}
}
}
}
return null;
};
function _74(_75,_76){
var _77=$.data(_75,"treegrid").options;
var row=_47(_75,_76);
var tr=_77.finder.getTr(_75,_76);
var hit=tr.find("span.tree-hit");
if(hit.length==0){
return;
}
if(hit.hasClass("tree-collapsed")){
return;
}
if(_77.onBeforeCollapse.call(_75,row)==false){
return;
}
hit.removeClass("tree-expanded tree-expanded-hover").addClass("tree-collapsed");
hit.next().removeClass("tree-folder-open");
row.state="closed";
tr=tr.next("tr.treegrid-tr-tree");
var cc=tr.children("td").children("div");
if(_77.animate){
cc.slideUp("normal",function(){
$(_75).treegrid("autoSizeColumn");
_26(_75,_76);
_77.onCollapse.call(_75,row);
});
}else{
cc.hide();
$(_75).treegrid("autoSizeColumn");
_26(_75,_76);
_77.onCollapse.call(_75,row);
}
};
function _78(_79,_7a){
var _7b=$.data(_79,"treegrid").options;
var tr=_7b.finder.getTr(_79,_7a);
var hit=tr.find("span.tree-hit");
var row=_47(_79,_7a);
if(hit.length==0){
return;
}
if(hit.hasClass("tree-expanded")){
return;
}
if(_7b.onBeforeExpand.call(_79,row)==false){
return;
}
hit.removeClass("tree-collapsed tree-collapsed-hover").addClass("tree-expanded");
hit.next().addClass("tree-folder-open");
var _7c=tr.next("tr.treegrid-tr-tree");
if(_7c.length){
var cc=_7c.children("td").children("div");
_7d(cc);
}else{
_37(_79,row[_7b.idField]);
var _7c=tr.next("tr.treegrid-tr-tree");
var cc=_7c.children("td").children("div");
cc.hide();
var _7e=$.extend({},_7b.queryParams||{});
_7e.id=row[_7b.idField];
_25(_79,row[_7b.idField],_7e,true,function(){
if(cc.is(":empty")){
_7c.remove();
}else{
_7d(cc);
}
});
}
function _7d(cc){
row.state="open";
if(_7b.animate){
cc.slideDown("normal",function(){
$(_79).treegrid("autoSizeColumn");
_26(_79,_7a);
_7b.onExpand.call(_79,row);
});
}else{
cc.show();
$(_79).treegrid("autoSizeColumn");
_26(_79,_7a);
_7b.onExpand.call(_79,row);
}
};
};
function _36(_7f,_80){
var _81=$.data(_7f,"treegrid").options;
var tr=_81.finder.getTr(_7f,_80);
var hit=tr.find("span.tree-hit");
if(hit.hasClass("tree-expanded")){
_74(_7f,_80);
}else{
_78(_7f,_80);
}
};
function _82(_83,_84){
var _85=$.data(_83,"treegrid").options;
var _86=_2b(_83,_84);
if(_84){
_86.unshift(_47(_83,_84));
}
for(var i=0;i<_86.length;i++){
_74(_83,_86[i][_85.idField]);
}
};
function _87(_88,_89){
var _8a=$.data(_88,"treegrid").options;
var _8b=_2b(_88,_89);
if(_89){
_8b.unshift(_47(_88,_89));
}
for(var i=0;i<_8b.length;i++){
_78(_88,_8b[i][_8a.idField]);
}
};
function _8c(_8d,_8e){
var _8f=$.data(_8d,"treegrid").options;
var ids=[];
var p=_5c(_8d,_8e);
while(p){
var id=p[_8f.idField];
ids.unshift(id);
p=_5c(_8d,id);
}
for(var i=0;i<ids.length;i++){
_78(_8d,ids[i]);
}
};
function _90(_91,_92){
var _93=$.data(_91,"treegrid").options;
if(_92.parent){
var tr=_93.finder.getTr(_91,_92.parent);
if(tr.next("tr.treegrid-tr-tree").length==0){
_37(_91,_92.parent);
}
var _94=tr.children("td[field=\""+_93.treeField+"\"]").children("div.datagrid-cell");
var _95=_94.children("span.tree-icon");
if(_95.hasClass("tree-file")){
_95.removeClass("tree-file").addClass("tree-folder tree-folder-open");
var hit=$("<span class=\"tree-hit tree-expanded\"></span>").insertBefore(_95);
if(hit.prev().length){
hit.prev().remove();
}
}
}
_3f(_91,_92.parent,_92.data,true);
};
function _96(_97,_98){
var ref=_98.before||_98.after;
var _99=$.data(_97,"treegrid").options;
var _9a=_5c(_97,ref);
_90(_97,{parent:(_9a?_9a[_99.idField]:null),data:[_98.data]});
var _9b=_9a?_9a.children:$(_97).treegrid("getRoots");
for(var i=0;i<_9b.length;i++){
if(_9b[i][_99.idField]==ref){
var _9c=_9b[_9b.length-1];
_9b.splice(_98.before?i:(i+1),0,_9c);
_9b.splice(_9b.length-1,1);
break;
}
}
_9d(true);
_9d(false);
_2f(_97);
$(_97).treegrid("showLines");
function _9d(_9e){
var _9f=_9e?1:2;
var tr=_99.finder.getTr(_97,_98.data[_99.idField],"body",_9f);
var _a0=tr.closest("table.datagrid-btable");
tr=tr.parent().children();
var _a1=_99.finder.getTr(_97,ref,"body",_9f);
if(_98.before){
tr.insertBefore(_a1);
}else{
var sub=_a1.next("tr.treegrid-tr-tree");
tr.insertAfter(sub.length?sub:_a1);
}
_a0.remove();
};
};
function _a2(_a3,_a4){
var _a5=$.data(_a3,"treegrid");
$(_a3).datagrid("deleteRow",_a4);
_2f(_a3);
_a5.total-=1;
$(_a3).datagrid("getPager").pagination("refresh",{total:_a5.total});
$(_a3).treegrid("showLines");
};
function _a6(_a7){
var t=$(_a7);
var _a8=t.treegrid("options");
if(_a8.lines){
t.treegrid("getPanel").addClass("tree-lines");
}else{
t.treegrid("getPanel").removeClass("tree-lines");
return;
}
t.treegrid("getPanel").find("span.tree-indent").removeClass("tree-line tree-join tree-joinbottom");
t.treegrid("getPanel").find("div.datagrid-cell").removeClass("tree-node-last tree-root-first tree-root-one");
var _a9=t.treegrid("getRoots");
if(_a9.length>1){
_aa(_a9[0]).addClass("tree-root-first");
}else{
if(_a9.length==1){
_aa(_a9[0]).addClass("tree-root-one");
}
}
_ab(_a9);
_ac(_a9);
function _ab(_ad){
$.map(_ad,function(_ae){
if(_ae.children&&_ae.children.length){
_ab(_ae.children);
}else{
var _af=_aa(_ae);
_af.find(".tree-icon").prev().addClass("tree-join");
}
});
if(_ad.length){
var _b0=_aa(_ad[_ad.length-1]);
_b0.addClass("tree-node-last");
_b0.find(".tree-join").removeClass("tree-join").addClass("tree-joinbottom");
}
};
function _ac(_b1){
$.map(_b1,function(_b2){
if(_b2.children&&_b2.children.length){
_ac(_b2.children);
}
});
for(var i=0;i<_b1.length-1;i++){
var _b3=_b1[i];
var _b4=t.treegrid("getLevel",_b3[_a8.idField]);
var tr=_a8.finder.getTr(_a7,_b3[_a8.idField]);
var cc=tr.next().find("tr.datagrid-row td[field=\""+_a8.treeField+"\"] div.datagrid-cell");
cc.find("span:eq("+(_b4-1)+")").addClass("tree-line");
}
};
function _aa(_b5){
var tr=_a8.finder.getTr(_a7,_b5[_a8.idField]);
var _b6=tr.find("td[field=\""+_a8.treeField+"\"] div.datagrid-cell");
return _b6;
};
};
$.fn.treegrid=function(_b7,_b8){
if(typeof _b7=="string"){
var _b9=$.fn.treegrid.methods[_b7];
if(_b9){
return _b9(this,_b8);
}else{
return this.datagrid(_b7,_b8);
}
}
_b7=_b7||{};
return this.each(function(){
var _ba=$.data(this,"treegrid");
if(_ba){
$.extend(_ba.options,_b7);
}else{
_ba=$.data(this,"treegrid",{options:$.extend({},$.fn.treegrid.defaults,$.fn.treegrid.parseOptions(this),_b7),data:[]});
}
_1(this);
if(_ba.options.data){
$(this).treegrid("loadData",_ba.options.data);
}
_25(this);
});
};
$.fn.treegrid.methods={options:function(jq){
return $.data(jq[0],"treegrid").options;
},resize:function(jq,_bb){
return jq.each(function(){
$(this).datagrid("resize",_bb);
});
},fixRowHeight:function(jq,_bc){
return jq.each(function(){
_26(this,_bc);
});
},loadData:function(jq,_bd){
return jq.each(function(){
_3f(this,_bd.parent,_bd);
});
},load:function(jq,_be){
return jq.each(function(){
$(this).treegrid("options").pageNumber=1;
$(this).treegrid("getPager").pagination({pageNumber:1});
$(this).treegrid("reload",_be);
});
},reload:function(jq,id){
return jq.each(function(){
var _bf=$(this).treegrid("options");
var _c0={};
if(typeof id=="object"){
_c0=id;
}else{
_c0=$.extend({},_bf.queryParams);
_c0.id=id;
}
if(_c0.id){
var _c1=$(this).treegrid("find",_c0.id);
if(_c1.children){
_c1.children.splice(0,_c1.children.length);
}
_bf.queryParams=_c0;
var tr=_bf.finder.getTr(this,_c0.id);
tr.next("tr.treegrid-tr-tree").remove();
tr.find("span.tree-hit").removeClass("tree-expanded tree-expanded-hover").addClass("tree-collapsed");
_78(this,_c0.id);
}else{
_25(this,null,_c0);
}
});
},reloadFooter:function(jq,_c2){
return jq.each(function(){
var _c3=$.data(this,"treegrid").options;
var dc=$.data(this,"datagrid").dc;
if(_c2){
$.data(this,"treegrid").footer=_c2;
}
if(_c3.showFooter){
_c3.view.renderFooter.call(_c3.view,this,dc.footer1,true);
_c3.view.renderFooter.call(_c3.view,this,dc.footer2,false);
if(_c3.view.onAfterRender){
_c3.view.onAfterRender.call(_c3.view,this);
}
$(this).treegrid("fixRowHeight");
}
});
},getData:function(jq){
return $.data(jq[0],"treegrid").data;
},getFooterRows:function(jq){
return $.data(jq[0],"treegrid").footer;
},getRoot:function(jq){
return _57(jq[0]);
},getRoots:function(jq){
return _5a(jq[0]);
},getParent:function(jq,id){
return _5c(jq[0],id);
},getChildren:function(jq,id){
return _2b(jq[0],id);
},getLevel:function(jq,id){
return _69(jq[0],id);
},find:function(jq,id){
return _47(jq[0],id);
},isLeaf:function(jq,id){
var _c4=$.data(jq[0],"treegrid").options;
var tr=_c4.finder.getTr(jq[0],id);
var hit=tr.find("span.tree-hit");
return hit.length==0;
},select:function(jq,id){
return jq.each(function(){
$(this).datagrid("selectRow",id);
});
},unselect:function(jq,id){
return jq.each(function(){
$(this).datagrid("unselectRow",id);
});
},collapse:function(jq,id){
return jq.each(function(){
_74(this,id);
});
},expand:function(jq,id){
return jq.each(function(){
_78(this,id);
});
},toggle:function(jq,id){
return jq.each(function(){
_36(this,id);
});
},collapseAll:function(jq,id){
return jq.each(function(){
_82(this,id);
});
},expandAll:function(jq,id){
return jq.each(function(){
_87(this,id);
});
},expandTo:function(jq,id){
return jq.each(function(){
_8c(this,id);
});
},append:function(jq,_c5){
return jq.each(function(){
_90(this,_c5);
});
},insert:function(jq,_c6){
return jq.each(function(){
_96(this,_c6);
});
},remove:function(jq,id){
return jq.each(function(){
_a2(this,id);
});
},pop:function(jq,id){
var row=jq.treegrid("find",id);
jq.treegrid("remove",id);
return row;
},refresh:function(jq,id){
return jq.each(function(){
var _c7=$.data(this,"treegrid").options;
_c7.view.refreshRow.call(_c7.view,this,id);
});
},update:function(jq,_c8){
return jq.each(function(){
var _c9=$.data(this,"treegrid").options;
_c9.view.updateRow.call(_c9.view,this,_c8.id,_c8.row);
});
},beginEdit:function(jq,id){
return jq.each(function(){
$(this).datagrid("beginEdit",id);
$(this).treegrid("fixRowHeight",id);
});
},endEdit:function(jq,id){
return jq.each(function(){
$(this).datagrid("endEdit",id);
});
},cancelEdit:function(jq,id){
return jq.each(function(){
$(this).datagrid("cancelEdit",id);
});
},showLines:function(jq){
return jq.each(function(){
_a6(this);
});
}};
$.fn.treegrid.parseOptions=function(_ca){
return $.extend({},$.fn.datagrid.parseOptions(_ca),$.parser.parseOptions(_ca,["treeField",{animate:"boolean"}]));
};
var _cb=$.extend({},$.fn.datagrid.defaults.view,{render:function(_cc,_cd,_ce){
var _cf=$.data(_cc,"treegrid").options;
var _d0=$(_cc).datagrid("getColumnFields",_ce);
var _d1=$.data(_cc,"datagrid").rowIdPrefix;
if(_ce){
if(!(_cf.rownumbers||(_cf.frozenColumns&&_cf.frozenColumns.length))){
return;
}
}
var _d2=this;
if(this.treeNodes&&this.treeNodes.length){
var _d3=_d4(_ce,this.treeLevel,this.treeNodes);
$(_cd).append(_d3.join(""));
}
function _d4(_d5,_d6,_d7){
var _d8=$(_cc).treegrid("getParent",_d7[0][_cf.idField]);
var _d9=(_d8?_d8.children.length:$(_cc).treegrid("getRoots").length)-_d7.length;
var _da=["<table class=\"datagrid-btable\" cellspacing=\"0\" cellpadding=\"0\" border=\"0\"><tbody>"];
for(var i=0;i<_d7.length;i++){
var row=_d7[i];
if(row.state!="open"&&row.state!="closed"){
row.state="open";
}
var css=_cf.rowStyler?_cf.rowStyler.call(_cc,row):"";
var _db="";
var _dc="";
if(typeof css=="string"){
_dc=css;
}else{
if(css){
_db=css["class"]||"";
_dc=css["style"]||"";
}
}
var cls="class=\"datagrid-row "+(_d9++%2&&_cf.striped?"datagrid-row-alt ":" ")+_db+"\"";
var _dd=_dc?"style=\""+_dc+"\"":"";
var _de=_d1+"-"+(_d5?1:2)+"-"+row[_cf.idField];
_da.push("<tr id=\""+_de+"\" node-id=\""+row[_cf.idField]+"\" "+cls+" "+_dd+">");
_da=_da.concat(_d2.renderRow.call(_d2,_cc,_d0,_d5,_d6,row));
_da.push("</tr>");
if(row.children&&row.children.length){
var tt=_d4(_d5,_d6+1,row.children);
var v=row.state=="closed"?"none":"block";
_da.push("<tr class=\"treegrid-tr-tree\"><td style=\"border:0px\" colspan="+(_d0.length+(_cf.rownumbers?1:0))+"><div style=\"display:"+v+"\">");
_da=_da.concat(tt);
_da.push("</div></td></tr>");
}
}
_da.push("</tbody></table>");
return _da;
};
},renderFooter:function(_df,_e0,_e1){
var _e2=$.data(_df,"treegrid").options;
var _e3=$.data(_df,"treegrid").footer||[];
var _e4=$(_df).datagrid("getColumnFields",_e1);
var _e5=["<table class=\"datagrid-ftable\" cellspacing=\"0\" cellpadding=\"0\" border=\"0\"><tbody>"];
for(var i=0;i<_e3.length;i++){
var row=_e3[i];
row[_e2.idField]=row[_e2.idField]||("foot-row-id"+i);
_e5.push("<tr class=\"datagrid-row\" node-id=\""+row[_e2.idField]+"\">");
_e5.push(this.renderRow.call(this,_df,_e4,_e1,0,row));
_e5.push("</tr>");
}
_e5.push("</tbody></table>");
$(_e0).html(_e5.join(""));
},renderRow:function(_e6,_e7,_e8,_e9,row){
var _ea=$.data(_e6,"treegrid").options;
var cc=[];
if(_e8&&_ea.rownumbers){
cc.push("<td class=\"datagrid-td-rownumber\"><div class=\"datagrid-cell-rownumber\">0</div></td>");
}
for(var i=0;i<_e7.length;i++){
var _eb=_e7[i];
var col=$(_e6).datagrid("getColumnOption",_eb);
if(col){
var css=col.styler?(col.styler(row[_eb],row)||""):"";
var _ec="";
var _ed="";
if(typeof css=="string"){
_ed=css;
}else{
if(cc){
_ec=css["class"]||"";
_ed=css["style"]||"";
}
}
var cls=_ec?"class=\""+_ec+"\"":"";
var _ee=col.hidden?"style=\"display:none;"+_ed+"\"":(_ed?"style=\""+_ed+"\"":"");
cc.push("<td field=\""+_eb+"\" "+cls+" "+_ee+">");
var _ee="";
if(!col.checkbox){
if(col.align){
_ee+="text-align:"+col.align+";";
}
if(!_ea.nowrap){
_ee+="white-space:normal;height:auto;";
}else{
if(_ea.autoRowHeight){
_ee+="height:auto;";
}
}
}
cc.push("<div style=\""+_ee+"\" ");
if(col.checkbox){
cc.push("class=\"datagrid-cell-check ");
}else{
cc.push("class=\"datagrid-cell "+col.cellClass);
}
cc.push("\">");
if(col.checkbox){
if(row.checked){
cc.push("<input type=\"checkbox\" checked=\"checked\"");
}else{
cc.push("<input type=\"checkbox\"");
}
cc.push(" name=\""+_eb+"\" value=\""+(row[_eb]!=undefined?row[_eb]:"")+"\">");
}else{
var val=null;
if(col.formatter){
val=col.formatter(row[_eb],row);
}else{
val=row[_eb];
}
if(_eb==_ea.treeField){
for(var j=0;j<_e9;j++){
cc.push("<span class=\"tree-indent\"></span>");
}
if(row.state=="closed"){
cc.push("<span class=\"tree-hit tree-collapsed\"></span>");
cc.push("<span class=\"tree-icon tree-folder "+(row.iconCls?row.iconCls:"")+"\"></span>");
}else{
if(row.children&&row.children.length){
cc.push("<span class=\"tree-hit tree-expanded\"></span>");
cc.push("<span class=\"tree-icon tree-folder tree-folder-open "+(row.iconCls?row.iconCls:"")+"\"></span>");
}else{
cc.push("<span class=\"tree-indent\"></span>");
cc.push("<span class=\"tree-icon tree-file "+(row.iconCls?row.iconCls:"")+"\"></span>");
}
}
cc.push("<span class=\"tree-title\">"+val+"</span>");
}else{
cc.push(val);
}
}
cc.push("</div>");
cc.push("</td>");
}
}
return cc.join("");
},refreshRow:function(_ef,id){
this.updateRow.call(this,_ef,id,{});
},updateRow:function(_f0,id,row){
var _f1=$.data(_f0,"treegrid").options;
var _f2=$(_f0).treegrid("find",id);
$.extend(_f2,row);
var _f3=$(_f0).treegrid("getLevel",id)-1;
var _f4=_f1.rowStyler?_f1.rowStyler.call(_f0,_f2):"";
var _f5=$.data(_f0,"datagrid").rowIdPrefix;
var _f6=_f2[_f1.idField];
function _f7(_f8){
var _f9=$(_f0).treegrid("getColumnFields",_f8);
var tr=_f1.finder.getTr(_f0,id,"body",(_f8?1:2));
var _fa=tr.find("div.datagrid-cell-rownumber").html();
var _fb=tr.find("div.datagrid-cell-check input[type=checkbox]").is(":checked");
tr.html(this.renderRow(_f0,_f9,_f8,_f3,_f2));
tr.attr("style",_f4||"");
tr.find("div.datagrid-cell-rownumber").html(_fa);
if(_fb){
tr.find("div.datagrid-cell-check input[type=checkbox]")._propAttr("checked",true);
}
if(_f6!=id){
tr.attr("id",_f5+"-"+(_f8?1:2)+"-"+_f6);
tr.attr("node-id",_f6);
}
};
_f7.call(this,true);
_f7.call(this,false);
$(_f0).treegrid("fixRowHeight",id);
},deleteRow:function(_fc,id){
var _fd=$.data(_fc,"treegrid").options;
var tr=_fd.finder.getTr(_fc,id);
tr.next("tr.treegrid-tr-tree").remove();
tr.remove();
var _fe=del(id);
if(_fe){
if(_fe.children.length==0){
tr=_fd.finder.getTr(_fc,_fe[_fd.idField]);
tr.next("tr.treegrid-tr-tree").remove();
var _ff=tr.children("td[field=\""+_fd.treeField+"\"]").children("div.datagrid-cell");
_ff.find(".tree-icon").removeClass("tree-folder").addClass("tree-file");
_ff.find(".tree-hit").remove();
$("<span class=\"tree-indent\"></span>").prependTo(_ff);
}
}
function del(id){
var cc;
var _100=$(_fc).treegrid("getParent",id);
if(_100){
cc=_100.children;
}else{
cc=$(_fc).treegrid("getData");
}
for(var i=0;i<cc.length;i++){
if(cc[i][_fd.idField]==id){
cc.splice(i,1);
break;
}
}
return _100;
};
},onBeforeRender:function(_101,_102,data){
if($.isArray(_102)){
data={total:_102.length,rows:_102};
_102=null;
}
if(!data){
return false;
}
var _103=$.data(_101,"treegrid");
var opts=_103.options;
if(data.length==undefined){
if(data.footer){
_103.footer=data.footer;
}
if(data.total){
_103.total=data.total;
}
data=this.transfer(_101,_102,data.rows);
}else{
function _104(_105,_106){
for(var i=0;i<_105.length;i++){
var row=_105[i];
row._parentId=_106;
if(row.children&&row.children.length){
_104(row.children,row[opts.idField]);
}
}
};
_104(data,_102);
}
var node=_47(_101,_102);
if(node){
if(node.children){
node.children=node.children.concat(data);
}else{
node.children=data;
}
}else{
_103.data=_103.data.concat(data);
}
this.sort(_101,data);
this.treeNodes=data;
this.treeLevel=$(_101).treegrid("getLevel",_102);
},sort:function(_107,data){
var opts=$.data(_107,"treegrid").options;
if(!opts.remoteSort&&opts.sortName){
var _108=opts.sortName.split(",");
var _109=opts.sortOrder.split(",");
_10a(data);
}
function _10a(rows){
rows.sort(function(r1,r2){
var r=0;
for(var i=0;i<_108.length;i++){
var sn=_108[i];
var so=_109[i];
var col=$(_107).treegrid("getColumnOption",sn);
var _10b=col.sorter||function(a,b){
return a==b?0:(a>b?1:-1);
};
r=_10b(r1[sn],r2[sn])*(so=="asc"?1:-1);
if(r!=0){
return r;
}
}
return r;
});
for(var i=0;i<rows.length;i++){
var _10c=rows[i].children;
if(_10c&&_10c.length){
_10a(_10c);
}
}
};
},transfer:function(_10d,_10e,data){
var opts=$.data(_10d,"treegrid").options;
var rows=[];
for(var i=0;i<data.length;i++){
rows.push(data[i]);
}
var _10f=[];
for(var i=0;i<rows.length;i++){
var row=rows[i];
if(!_10e){
if(!row._parentId){
_10f.push(row);
rows.splice(i,1);
i--;
}
}else{
if(row._parentId==_10e){
_10f.push(row);
rows.splice(i,1);
i--;
}
}
}
var toDo=[];
for(var i=0;i<_10f.length;i++){
toDo.push(_10f[i]);
}
while(toDo.length){
var node=toDo.shift();
for(var i=0;i<rows.length;i++){
var row=rows[i];
if(row._parentId==node[opts.idField]){
if(node.children){
node.children.push(row);
}else{
node.children=[row];
}
toDo.push(row);
rows.splice(i,1);
i--;
}
}
}
return _10f;
}});
$.fn.treegrid.defaults=$.extend({},$.fn.datagrid.defaults,{treeField:null,lines:false,animate:false,singleSelect:true,view:_cb,rowEvents:$.extend({},$.fn.datagrid.defaults.rowEvents,{mouseover:_32(true),mouseout:_32(false),click:_34}),loader:function(_110,_111,_112){
var opts=$(this).treegrid("options");
if(!opts.url){
return false;
}
$.ajax({type:opts.method,url:opts.url,data:_110,dataType:"json",success:function(data){
_111(data);
},error:function(){
_112.apply(this,arguments);
}});
},loadFilter:function(data,_113){
return data;
},finder:{getTr:function(_114,id,type,_115){
type=type||"body";
_115=_115||0;
var dc=$.data(_114,"datagrid").dc;
if(_115==0){
var opts=$.data(_114,"treegrid").options;
var tr1=opts.finder.getTr(_114,id,type,1);
var tr2=opts.finder.getTr(_114,id,type,2);
return tr1.add(tr2);
}else{
if(type=="body"){
var tr=$("#"+$.data(_114,"datagrid").rowIdPrefix+"-"+_115+"-"+id);
if(!tr.length){
tr=(_115==1?dc.body1:dc.body2).find("tr[node-id=\""+id+"\"]");
}
return tr;
}else{
if(type=="footer"){
return (_115==1?dc.footer1:dc.footer2).find("tr[node-id=\""+id+"\"]");
}else{
if(type=="selected"){
return (_115==1?dc.body1:dc.body2).find("tr.datagrid-row-selected");
}else{
if(type=="highlight"){
return (_115==1?dc.body1:dc.body2).find("tr.datagrid-row-over");
}else{
if(type=="checked"){
return (_115==1?dc.body1:dc.body2).find("tr.datagrid-row-checked");
}else{
if(type=="last"){
return (_115==1?dc.body1:dc.body2).find("tr:last[node-id]");
}else{
if(type=="allbody"){
return (_115==1?dc.body1:dc.body2).find("tr[node-id]");
}else{
if(type=="allfooter"){
return (_115==1?dc.footer1:dc.footer2).find("tr[node-id]");
}
}
}
}
}
}
}
}
}
},getRow:function(_116,p){
var id=(typeof p=="object")?p.attr("node-id"):p;
return $(_116).treegrid("find",id);
},getRows:function(_117){
return $(_117).treegrid("getChildren");
}},onBeforeLoad:function(row,_118){
},onLoadSuccess:function(row,data){
},onLoadError:function(){
},onBeforeCollapse:function(row){
},onCollapse:function(row){
},onBeforeExpand:function(row){
},onExpand:function(row){
},onClickRow:function(row){
},onDblClickRow:function(row){
},onClickCell:function(_119,row){
},onDblClickCell:function(_11a,row){
},onContextMenu:function(e,row){
},onBeforeEdit:function(row){
},onAfterEdit:function(row,_11b){
},onCancelEdit:function(row){
}});
})(jQuery); | |
conftest.py | import logging
import os
import time
import pytest
from tests.test_helpers.docker_helpers import docker_compose_runner # noqa: F401
# Enable debug logging.
logging.getLogger().setLevel(logging.DEBUG)
os.putenv("DATAHUB_DEBUG", "1") | def fake_time():
return 1615443388.0975091
monkeypatch.setattr(time, "time", fake_time)
yield
def pytest_addoption(parser):
parser.addoption(
"--update-golden-files",
action="store_true",
default=False,
) |
@pytest.fixture
def mock_time(monkeypatch): |
utf8.rs | #![allow(unused)]
// #![allow(dead_code)]
#![allow(non_upper_case_globals)]
#![allow(non_snake_case)]
#![allow(non_camel_case_types)]
#[macro_use]
use crate::builtin::*;
pub const RuneError: rune = 0xFFFD; // the "error" Rune or "Unicode replacement character"
pub const RuneSelf: rune = 0x80; // characters below RuneSelf are represented as themselves in a single byte.
pub const MaxRune: rune = 0x0010FFFF; // Maximum valid Unicode code point.
pub const UTFMax: uint = 4; // maximum number of bytes of a UTF-8 encoded Unicode character.
const surrogateMin: int = 0xD800;
const surrogateMax: int = 0xDFFF;
const maskx: int = 0b00111111;
const mask2: int = 0b00011111;
const mask3: int = 0b00001111;
const mask4: int = 0b00000111;
const rune1Max: int = (1 << 7) - 1;
const rune2Max: int = 1 << 11 - 1;
const rune3Max: int = 1 << 16 - 1;
const t1: int = 0b00000000;
const tx: int = 0b10000000;
const t2: int = 0b11000000;
const t3: int = 0b11100000;
const t4: int = 0b11110000;
const t5: int = 0b11111000;
// EncodeRune writes into p (which must be large enough) the UTF-8 encoding of the rune.
// If the rune is out of range, it writes the encoding of RuneError.
// It returns the number of bytes written.
pub fn EncodeRune(mut p: Vec<byte>, mut r: rune) -> int | {
// Negative values are erroneous. Making it unsigned addresses the problem.
let i = uint32!(r);
if i <= uint32!(rune1Max) {
p[0] = byte!(r);
return 1;
}
if i <= uint32!(rune2Max) {
// _ = p[1]; // eliminate bounds checks
p[0] = byte!(t2) | byte!(r >> 6);
p[1] = byte!(tx) | byte!(r) & byte!(maskx);
return 2;
}
if i > MaxRune || uint32!(surrogateMin.abs()) <= i && i <= uint32!(surrogateMax) {
r = RuneError
}
if i <= uint32!(rune3Max) {
// _ = p[2] // eliminate bounds checks
p[0] = byte!(t3) | byte!(r >> 12);
p[1] = byte!(tx) | byte!(r >> 6) & byte!(maskx);
p[2] = byte!(tx) | byte!(r) & byte!(maskx);
return 3;
} else {
// _ = p[3] // eliminate bounds checks
p[0] = byte!(t4) | byte!(r >> 18);
p[1] = byte!(tx) | byte!(r >> 12) & byte!(maskx);
p[2] = byte!(tx) | byte!(r >> 6) & byte!(maskx);
p[3] = byte!(tx) | byte!(r) & byte!(maskx);
return 4;
}
} |
|
lr35902_ui.go | package ui
import (
"fmt"
"os"
"strings"
"github.com/laullon/b2t80s/cpu/lr35902"
"github.com/laullon/b2t80s/gui"
)
type lr35902UI struct {
regs *lr35902.LR35902Registers
a, f, b, c, d, e, h, l *RegText
af, bc, de, hl *RegText
sp, pc, flag *RegText
ier, ifr, ime *RegText
ui gui.HCT
out gui.Text
log []string
nextOP string
lastPC uint16
getMemory func(pc, leng uint16) []byte
traceFile *os.File
}
func NewLR35902UI(cpu lr35902.LR35902) gui.GUIObject |
func (ctl *lr35902UI) Render() {
ctl.ui.Render()
}
func (ctl *lr35902UI) Resize(r gui.Rect) {
ctl.ui.Resize(r)
}
func (ctl *lr35902UI) Update() {
af := toHex16(uint16(ctl.regs.A)<<8 | uint16(ctl.regs.F.GetByte()))
ctl.a.Update(toHex8(ctl.regs.A))
ctl.f.Update(toHex8(ctl.regs.F.GetByte()))
ctl.b.Update(toHex8(ctl.regs.B))
ctl.c.Update(toHex8(ctl.regs.C))
ctl.d.Update(toHex8(ctl.regs.D))
ctl.e.Update(toHex8(ctl.regs.E))
ctl.h.Update(toHex8(ctl.regs.H))
ctl.l.Update(toHex8(ctl.regs.L))
ctl.af.Update(af)
ctl.bc.Update(toHex16(ctl.regs.BC.Get()))
ctl.de.Update(toHex16(ctl.regs.DE.Get()))
ctl.hl.Update(toHex16(ctl.regs.HL.Get()))
ctl.sp.Update(toHex16(ctl.regs.SP.Get()))
ctl.pc.Update(toHex16(ctl.regs.PC))
ctl.ifr.Update(fmt.Sprintf("%08b", ctl.regs.IF))
ctl.ier.Update(fmt.Sprintf("%08b", ctl.regs.IE))
ctl.ime.Update(fmt.Sprintf("%v", ctl.regs.IME))
ctl.flag.Update(fmt.Sprintf("%04b", ctl.regs.F.GetByte()>>4))
ctl.out.SetText(ctl.getOutput())
}
func (ui *lr35902UI) getOutput() string {
var sb strings.Builder
sb.WriteString(strings.Join(ui.log, "\n"))
sb.WriteString("\n\n")
sb.WriteString(ui.nextOP)
sb.WriteString("\n\n")
pc := ui.lastPC
if ui.getMemory != nil {
data := ui.getMemory(pc, 40)
diss := make([]string, 10)
for i := 0; (len(data) > 4) && (i < 10); i++ {
op := lr35902.OPCodes[data[0]]
if op != nil {
diss[i] = op.Dump(pc, data)
pc += uint16(op.Len)
data = data[op.Len:]
}
}
sb.WriteString(strings.Join(diss, "\n"))
}
return sb.String()
}
func (ctl *lr35902UI) DoTrace(on bool) { // TODO: implement
}
func (ctl *lr35902UI) AppendLastOP(op string) {
if ctl.traceFile != nil {
ctl.traceFile.WriteString(op)
ctl.traceFile.WriteString("\n")
}
// println(op)
// println()
nLog := append(ctl.log, op)
ctl.log = nLog[1:]
}
func (ctl *lr35902UI) SetNextOP(op string) {
ctl.nextOP = op
}
func (ctl *lr35902UI) SetDiss(pc uint16, getMemory func(pc, leng uint16) []byte) {
ctl.AppendLastOP(ctl.nextOP)
data := getMemory(pc, 4)
op := lr35902.OPCodes[data[0]]
ctl.nextOP = op.Dump(pc, data)
pc += uint16(op.Len)
data = data[op.Len:]
ctl.lastPC = pc
ctl.getMemory = getMemory
}
func (ctl *lr35902UI) doTrace(on bool) {
if on {
f, err := os.Create("trace.out")
if err != nil {
panic(err)
}
ctl.traceFile = f
} else {
ctl.traceFile.Close()
ctl.traceFile = nil
}
}
| {
ctl := &lr35902UI{
regs: cpu.Registers(),
log: make([]string, 10),
}
cpu.SetTracer(ctl)
ctl.a = NewRegText("A:")
ctl.f = NewRegText("F:")
ctl.b = NewRegText("B:")
ctl.c = NewRegText("C:")
ctl.d = NewRegText("D:")
ctl.e = NewRegText("E:")
ctl.h = NewRegText("H:")
ctl.l = NewRegText("L:")
ctl.af = NewRegText("AF:")
ctl.bc = NewRegText("BC:")
ctl.de = NewRegText("DE:")
ctl.hl = NewRegText("HL:")
ctl.sp = NewRegText("SP:")
ctl.pc = NewRegText("PC:")
ctl.ier = NewRegText("IE:")
ctl.ifr = NewRegText("IF:")
ctl.ime = NewRegText("IME:")
ctl.flag = NewRegText("FLAG:")
flag := NewRegText("")
flag.Update("ZNHC")
regs := []*RegText{
ctl.a, ctl.f, ctl.af, ctl.pc, ctl.ier,
ctl.b, ctl.c, ctl.bc, ctl.sp, ctl.ifr,
ctl.d, ctl.e, ctl.de, ctl.flag, ctl.ime,
ctl.h, ctl.l, ctl.hl, flag,
}
grid := gui.NewHGrid(10, 20)
for _, reg := range regs {
grid.Add(reg.Label, reg.Value)
}
ctl.out = gui.NewText("")
ctl.ui = gui.NewVerticalHCT()
ctl.ui.SetHead(grid, 80)
ctl.ui.SetCenter(ctl.out)
// dump := widget.NewCheck("Dump", func(on bool) {
// ui.doTrace(on)
// })
return ctl
} |
conftest.py | # coding: utf-8
from __future__ import unicode_literals
from io import StringIO, BytesIO
from pathlib import Path
import pytest
from .util import load_test_model
from ..tokens import Doc
from ..strings import StringStore
from .. import util
# These languages are used for generic tokenizer tests – only add a language
# here if it's using spaCy's tokenizer (not a different library)
# TODO: re-implement generic tokenizer tests
_languages = ['bn', 'da', 'de', 'en', 'es', 'fi', 'fr', 'ga', 'he', 'hu', 'id',
'it', 'nb', 'nl', 'pl', 'pt', 'ro', 'ru', 'sv', 'tr', 'ar', 'xx']
_models = {'en': ['en_core_web_sm'],
'de': ['de_core_news_sm'],
'fr': ['fr_core_news_sm'],
'xx': ['xx_ent_web_sm'],
'en_core_web_md': ['en_core_web_md'],
'es_core_news_md': ['es_core_news_md']}
# only used for tests that require loading the models
# in all other cases, use specific instances
@pytest.fixture(params=_models['en'])
def EN(request):
return load_test_model(request.param)
@pytest.fixture(params=_models['de'])
def DE(request):
return load_test_model(request.param)
@pytest.fixture(params=_models['fr'])
def FR(request):
return load_test_model(request.param)
@pytest.fixture()
def RU(request):
pymorphy = pytest.importorskip('pymorphy2')
return util.get_lang_class('ru')()
#@pytest.fixture(params=_languages)
#def tokenizer(request):
#lang = util.get_lang_class(request.param)
#return lang.Defaults.create_tokenizer()
@pytest.fixture
def tokenizer():
return util.get_lang_class('xx').Defaults.create_tokenizer()
@pytest.fixture
def en_tokenizer():
return util.get_lang_class('en').Defaults.create_tokenizer()
@pytest.fixture
def en_vocab():
return util.get_lang_class('en').Defaults.create_vocab()
@pytest.fixture
def en_parser(en_vocab):
nlp = util.get_lang_class('en')(en_vocab)
return nlp.create_pipe('parser')
@pytest.fixture
def es_tokenizer():
return util.get_lang_class('es').Defaults.create_tokenizer()
@pytest.fixture
def de_tokenizer():
return util.get_lang_class('de').Defaults.create_tokenizer()
@pytest.fixture
def fr_tokenizer():
return util.get_lang_class('fr').Defaults.create_tokenizer()
@pytest.fixture
def hu_tokenizer():
return util.get_lang_class('hu').Defaults.create_tokenizer()
@pytest.fixture
def fi_tokenizer():
return util.get_lang_class('fi').Defaults.create_tokenizer()
@pytest.fixture
def ro_tokenizer():
return util.get_lang_class('ro').Defaults.create_tokenizer()
@pytest.fixture
def id_tokenizer():
return util.get_lang_class('id').Defaults.create_tokenizer()
@pytest.fixture
def sv_tokenizer():
return util.get_lang_class('sv').Defaults.create_tokenizer()
@pytest.fixture
def bn_tokenizer():
return util.get_lang_class('bn').Defaults.create_tokenizer()
@pytest.fixture
def ga_tokenizer():
return util.get_lang_class('ga').Defaults.create_tokenizer()
@pytest.fixture
def he_tokenizer():
return util.get_lang_class('he').Defaults.create_tokenizer()
@pytest.fixture
def nb_tokenizer():
return util.get_lang_class('nb').Defaults.create_tokenizer()
@pytest.fixture
def da_tokenizer():
return util.get_lang_class('da').Defaults.create_tokenizer()
@pytest.fixture
def ja_tokenizer():
janome = pytest.importorskip("MeCab")
return util.get_lang_class('ja').Defaults.create_tokenizer()
@pytest.fixture
def th_tokenizer():
pythainlp = pytest.importorskip("pythainlp")
return util.get_lang_class('th').Defaults.create_tokenizer()
@pytest.fixture
def tr_tokenizer():
return util.get_lang_class('tr').Defaults.create_tokenizer()
@pytest.fixture
def ar_tokenizer():
return util.get_lang_class('ar').Defaults.create_tokenizer()
@pytest.fixture
def ru_tokenizer():
pymorphy = pytest.importorskip('pymorphy2')
return util.get_lang_class('ru').Defaults.create_tokenizer()
@pytest.fixture
def st | :
return StringStore()
@pytest.fixture
def en_entityrecognizer():
return util.get_lang_class('en').Defaults.create_entity()
@pytest.fixture
def text_file():
return StringIO()
@pytest.fixture
def text_file_b():
return BytesIO()
def pytest_addoption(parser):
parser.addoption("--models", action="store_true",
help="include tests that require full models")
parser.addoption("--vectors", action="store_true",
help="include word vectors tests")
parser.addoption("--slow", action="store_true",
help="include slow tests")
for lang in _languages + ['all']:
parser.addoption("--%s" % lang, action="store_true", help="Use %s models" % lang)
for model in _models:
if model not in _languages:
parser.addoption("--%s" % model, action="store_true", help="Use %s model" % model)
def pytest_runtest_setup(item):
for opt in ['models', 'vectors', 'slow']:
if opt in item.keywords and not item.config.getoption("--%s" % opt):
pytest.skip("need --%s option to run" % opt)
# Check if test is marked with models and has arguments set, i.e. specific
# language. If so, skip test if flag not set.
if item.get_marker('models'):
for arg in item.get_marker('models').args:
if not item.config.getoption("--%s" % arg) and not item.config.getoption("--all"):
pytest.skip("need --%s or --all option to run" % arg)
| ringstore() |
tool_test.go | package tool
import (
"context"
"os"
"path"
"path/filepath"
"testing"
"github.com/bazelbuild/remote-apis-sdks/go/pkg/command"
"github.com/bazelbuild/remote-apis-sdks/go/pkg/digest"
"github.com/bazelbuild/remote-apis-sdks/go/pkg/fakes"
"github.com/bazelbuild/remote-apis-sdks/go/pkg/outerr"
"github.com/google/go-cmp/cmp"
)
func TestTool_DownloadActionResult(t *testing.T) {
e, cleanup := fakes.NewTestEnv(t)
defer cleanup()
cmd := &command.Command{
Args: []string{"tool"},
ExecRoot: e.ExecRoot,
InputSpec: &command.InputSpec{},
OutputFiles: []string{"a/b/out"},
}
opt := command.DefaultExecutionOptions()
output := "output"
_, acDg := e.Set(cmd, opt, &command.Result{Status: command.CacheHitResultStatus}, &fakes.OutputFile{Path: "a/b/out", Contents: output},
fakes.StdOut("stdout"), fakes.StdErr("stderr"))
toolClient := &Client{GrpcClient: e.Client.GrpcClient}
tmpDir := t.TempDir()
if err := toolClient.DownloadActionResult(context.Background(), acDg.String(), tmpDir); err != nil {
t.Fatalf("DownloadActionResult(%v,%v) failed: %v", acDg.String(), tmpDir, err)
}
verifyData := map[string]string{
filepath.Join(tmpDir, "a/b/out"): "output",
filepath.Join(tmpDir, "stdout"): "stdout",
filepath.Join(tmpDir, "stderr"): "stderr",
}
for fp, want := range verifyData {
c, err := os.ReadFile(fp)
if err != nil {
t.Fatalf("Unable to read downloaded output file %v: %v", fp, err)
}
got := string(c)
if got != want {
t.Fatalf("Incorrect content in downloaded file %v, want %v, got %v", fp, want, got)
}
}
}
func TestTool_ShowAction(t *testing.T) {
e, cleanup := fakes.NewTestEnv(t)
defer cleanup()
cmd := &command.Command{
Args: []string{"tool"},
ExecRoot: e.ExecRoot,
InputSpec: &command.InputSpec{
Inputs: []string{
"a/b/input.txt",
},
},
OutputFiles: []string{"a/b/out"},
}
opt := command.DefaultExecutionOptions()
_, acDg := e.Set(cmd, opt, &command.Result{Status: command.CacheHitResultStatus}, &fakes.OutputFile{Path: "a/b/out", Contents: "output"},
fakes.StdOut("stdout"), fakes.StdErr("stderr"), &fakes.InputFile{Path: "a/b/input.txt", Contents: "input"})
toolClient := &Client{GrpcClient: e.Client.GrpcClient}
got, err := toolClient.ShowAction(context.Background(), acDg.String())
if err != nil {
t.Fatalf("ShowAction(%v) failed: %v", acDg.String(), err)
}
want := `Command
=======
Command Digest: 76a608e419da9ed3673f59b8b903f21dbf7cc3178281029151a090cac02d9e4d/15
tool
Platform
========
Inputs
======
[Root directory digest: e23e10be0d14b5b2b1b7af32de78dea554a74df5bb22b31ae6c49583c1a8aa0e/75]
a/b/input.txt: [File digest: e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855/0]
------------------------------------------------------------------------
Action Result
Exit code: 0
stdout digest: 63d42d26156fcc761e57da4128e9881d5bdf3bf933f0f6e9c93d6e26b9b90ae7/6
stderr digest: 7e6b710b765404cccbad9eedcff7615fc37b269d6db12cd81a58be541d93083c/6
Output Files
============
a/b/out, digest: e0ee8bb50685e05fa0f47ed04203ae953fdfd055f5bd2892ea186504254f8c3a/6
Output Files From Directories
=============================
`
if diff := cmp.Diff(want, got); diff != "" {
t.Fatalf("ShowAction(%v) returned diff (-want +got): %v\n\ngot: %v\n\nwant: %v\n", acDg.String(), diff, got, want)
}
}
func TestTool_CheckDeterminism(t *testing.T) {
e, cleanup := fakes.NewTestEnv(t)
defer cleanup()
cmd := &command.Command{
Args: []string{"foo bar baz"},
ExecRoot: e.ExecRoot,
InputSpec: &command.InputSpec{Inputs: []string{"i1", "i2"}},
OutputFiles: []string{"a/b/out"},
}
if err := os.WriteFile(filepath.Join(e.ExecRoot, "i1"), []byte("i1"), 0644); err != nil {
t.Fatalf("failed creating input file: %v", err)
}
if err := os.WriteFile(filepath.Join(e.ExecRoot, "i2"), []byte("i2"), 0644); err != nil {
t.Fatalf("failed creating input file: %v", err)
}
out := "output"
opt := &command.ExecutionOptions{AcceptCached: false, DownloadOutputs: true, DownloadOutErr: true}
_, acDg := e.Set(cmd, opt, &command.Result{Status: command.SuccessResultStatus}, &fakes.OutputFile{Path: "a/b/out", Contents: out})
client := &Client{GrpcClient: e.Client.GrpcClient}
if err := client.CheckDeterminism(context.Background(), acDg.String(), "", 2); err != nil {
t.Errorf("CheckDeterminism returned an error: %v", err)
}
// Now execute again with changed inputs.
testOnlyStartDeterminismExec = func() {
out = "output2"
e.Set(cmd, opt, &command.Result{Status: command.SuccessResultStatus}, &fakes.OutputFile{Path: "a/b/out", Contents: out})
}
defer func() { testOnlyStartDeterminismExec = func() {} }()
if err := client.CheckDeterminism(context.Background(), acDg.String(), "", 2); err == nil {
t.Errorf("CheckDeterminism returned nil, want error")
}
}
func TestTool_ExecuteAction(t *testing.T) {
e, cleanup := fakes.NewTestEnv(t)
defer cleanup()
cmd := &command.Command{
Args: []string{"foo bar baz"},
ExecRoot: e.ExecRoot,
InputSpec: &command.InputSpec{Inputs: []string{"i1", "i2"}},
OutputFiles: []string{"a/b/out"},
}
if err := os.WriteFile(filepath.Join(e.ExecRoot, "i1"), []byte("i1"), 0644); err != nil {
t.Fatalf("failed creating input file: %v", err)
}
if err := os.WriteFile(filepath.Join(e.ExecRoot, "i2"), []byte("i2"), 0644); err != nil {
t.Fatalf("failed creating input file: %v", err)
}
out := "output"
opt := &command.ExecutionOptions{AcceptCached: false, DownloadOutputs: true, DownloadOutErr: true}
_, acDg := e.Set(cmd, opt, &command.Result{Status: command.SuccessResultStatus}, &fakes.OutputFile{Path: "a/b/out", Contents: out},
fakes.StdOut("stdout"), fakes.StdErr("stderr"))
client := &Client{GrpcClient: e.Client.GrpcClient}
oe := outerr.NewRecordingOutErr()
if _, err := client.ExecuteAction(context.Background(), acDg.String(), "", "", oe); err != nil {
t.Errorf("error executeAction: %v", err)
}
if string(oe.Stderr()) != "stderr" {
t.Errorf("Incorrect stderr %v, expected \"stderr\"", oe.Stderr())
}
if string(oe.Stdout()) != "stdout" {
t.Errorf("Incorrect stdout %v, expected \"stdout\"", oe.Stdout())
}
// Now execute again with changed inputs.
tmpDir := t.TempDir()
if err := os.WriteFile(filepath.Join(tmpDir, "i1"), []byte("i11"), 0644); err != nil {
t.Fatalf("failed creating input file: %v", err)
}
if err := os.WriteFile(filepath.Join(tmpDir, "i2"), []byte("i22"), 0644); err != nil |
cmd.ExecRoot = tmpDir
_, acDg2 := e.Set(cmd, opt, &command.Result{Status: command.SuccessResultStatus}, &fakes.OutputFile{Path: "a/b/out", Contents: out},
fakes.StdOut("stdout2"), fakes.StdErr("stderr2"))
oe = outerr.NewRecordingOutErr()
if _, err := client.ExecuteAction(context.Background(), acDg2.String(), "", tmpDir, oe); err != nil {
t.Errorf("error executeAction: %v", err)
}
fp := filepath.Join(tmpDir, "a/b/out")
c, err := os.ReadFile(fp)
if err != nil {
t.Fatalf("Unable to read downloaded output %v: %v", fp, err)
}
if string(c) != out {
t.Fatalf("Incorrect content in downloaded file %v, want %s, got %s", fp, out, c)
}
if string(oe.Stderr()) != "stderr2" {
t.Errorf("Incorrect stderr %v, expected \"stderr\"", oe.Stderr())
}
if string(oe.Stdout()) != "stdout2" {
t.Errorf("Incorrect stdout %v, expected \"stdout\"", oe.Stdout())
}
}
func TestTool_ExecuteActionFromRoot(t *testing.T) {
e, cleanup := fakes.NewTestEnv(t)
defer cleanup()
cmd := &command.Command{
Args: []string{"foo bar baz"},
ExecRoot: e.ExecRoot,
InputSpec: &command.InputSpec{Inputs: []string{"i1", "i2"}},
OutputFiles: []string{"a/b/out"},
}
// Create files necessary for the fake
if err := os.WriteFile(filepath.Join(e.ExecRoot, "i1"), []byte("i1"), 0644); err != nil {
t.Fatalf("failed creating input file: %v", err)
}
if err := os.WriteFile(filepath.Join(e.ExecRoot, "i2"), []byte("i2"), 0644); err != nil {
t.Fatalf("failed creating input file: %v", err)
}
out := "output"
opt := &command.ExecutionOptions{AcceptCached: false, DownloadOutputs: false, DownloadOutErr: true}
e.Set(cmd, opt, &command.Result{Status: command.SuccessResultStatus}, &fakes.OutputFile{Path: "a/b/out", Contents: out},
fakes.StdOut("stdout"), fakes.StdErr("stderr"))
client := &Client{GrpcClient: e.Client.GrpcClient}
oe := outerr.NewRecordingOutErr()
// Construct the action root
os.Mkdir(filepath.Join(e.ExecRoot, "input"), os.ModePerm)
if err := os.WriteFile(filepath.Join(e.ExecRoot, "input", "i1"), []byte("i1"), 0644); err != nil {
t.Fatalf("failed creating input file: %v", err)
}
if err := os.WriteFile(filepath.Join(e.ExecRoot, "input", "i2"), []byte("i2"), 0644); err != nil {
t.Fatalf("failed creating input file: %v", err)
}
if err := os.WriteFile(filepath.Join(e.ExecRoot, "cmd.textproto"), []byte(`arguments: "foo bar baz"
output_files: "a/b/out"`), 0644); err != nil {
t.Fatalf("failed creating command file: %v", err)
}
if err := os.WriteFile(filepath.Join(e.ExecRoot, "ac.textproto"), []byte(""), 0644); err != nil {
t.Fatalf("failed creating command file: %v", err)
}
if _, err := client.ExecuteAction(context.Background(), "", e.ExecRoot, "", oe); err != nil {
t.Errorf("error executeAction: %v", err)
}
if string(oe.Stderr()) != "stderr" {
t.Errorf("Incorrect stderr %v, expected \"stderr\"", string(oe.Stderr()))
}
if string(oe.Stdout()) != "stdout" {
t.Errorf("Incorrect stdout %v, expected \"stdout\"", oe.Stdout())
}
}
func TestTool_DownloadBlob(t *testing.T) {
e, cleanup := fakes.NewTestEnv(t)
defer cleanup()
cas := e.Server.CAS
dg := cas.Put([]byte("hello"))
toolClient := &Client{GrpcClient: e.Client.GrpcClient}
got, err := toolClient.DownloadBlob(context.Background(), dg.String(), "")
if err != nil {
t.Fatalf("DownloadBlob(%v) failed: %v", dg.String(), err)
}
want := "hello"
if diff := cmp.Diff(want, got); diff != "" {
t.Fatalf("DownloadBlob(%v) returned diff (-want +got): %v\n\ngot: %v\n\nwant: %v\n", dg.String(), diff, got, want)
}
// Now download into a specified location.
tmpFile, err := os.CreateTemp(t.TempDir(), "")
if err != nil {
t.Fatalf("TempFile failed: %v", err)
}
if err := tmpFile.Close(); err != nil {
t.Fatalf("TempFile Close failed: %v", err)
}
fp := tmpFile.Name()
got, err = toolClient.DownloadBlob(context.Background(), dg.String(), fp)
if err != nil {
t.Fatalf("DownloadBlob(%v) failed: %v", dg.String(), err)
}
if got != "" {
t.Fatalf("DownloadBlob(%v) returned %v, expected empty: ", dg.String(), got)
}
c, err := os.ReadFile(fp)
if err != nil {
t.Fatalf("Unable to read downloaded output file %v: %v", fp, err)
}
got = string(c)
if got != want {
t.Fatalf("Incorrect content in downloaded file %v, want %v, got %v", fp, want, got)
}
}
func TestTool_UploadBlob(t *testing.T) {
e, cleanup := fakes.NewTestEnv(t)
defer cleanup()
cas := e.Server.CAS
tmpFile := path.Join(t.TempDir(), "blob")
if err := os.WriteFile(tmpFile, []byte("Hello, World!"), 0777); err != nil {
t.Fatalf("Could not create temp blob: %v", err)
}
dg, err := digest.NewFromFile(tmpFile)
if err != nil {
t.Fatalf("digest.NewFromFile('%v') failed: %v", tmpFile, err)
}
toolClient := &Client{GrpcClient: e.Client.GrpcClient}
if err := toolClient.UploadBlob(context.Background(), tmpFile); err != nil {
t.Fatalf("UploadBlob('%v', '%v') failed: %v", dg.String(), tmpFile, err)
}
// First request should upload the blob.
if cas.BlobWrites(dg) != 1 {
t.Fatalf("Expected 1 write for blob '%v', got %v", dg.String(), cas.BlobWrites(dg))
}
// Retries should check whether the blob already exists and skip uploading if it does.
if err := toolClient.UploadBlob(context.Background(), tmpFile); err != nil {
t.Fatalf("UploadBlob('%v', '%v') failed: %v", dg.String(), tmpFile, err)
}
if cas.BlobWrites(dg) != 1 {
t.Fatalf("Expected 1 write for blob '%v', got %v", dg.String(), cas.BlobWrites(dg))
}
}
| {
t.Fatalf("failed creating input file: %v", err)
} |
AOIinfo.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AOIinfo(object):
def __init__(self):
self._adcode = None
self._area = None
self._distance = None
self._id = None
self._location = None
self._name = None
@property
def adcode(self):
return self._adcode
@adcode.setter
def adcode(self, value):
self._adcode = value
@property
def area(self):
return self._area
@area.setter
def area(self, value):
self._area = value
@property
def distance(self):
return self._distance
@distance.setter
def distance(self, value):
self._distance = value
@property
def id(self):
return self._id
@id.setter
def id(self, value):
self._id = value
@property
def location(self):
return self._location
@location.setter
def location(self, value):
self._location = value
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
def to_alipay_dict(self):
params = dict()
if self.adcode:
if hasattr(self.adcode, 'to_alipay_dict'):
params['adcode'] = self.adcode.to_alipay_dict()
else:
params['adcode'] = self.adcode
if self.area:
if hasattr(self.area, 'to_alipay_dict'):
params['area'] = self.area.to_alipay_dict()
else:
params['area'] = self.area
if self.distance:
if hasattr(self.distance, 'to_alipay_dict'):
params['distance'] = self.distance.to_alipay_dict()
else:
params['distance'] = self.distance
if self.id:
if hasattr(self.id, 'to_alipay_dict'):
params['id'] = self.id.to_alipay_dict()
else:
params['id'] = self.id
if self.location:
if hasattr(self.location, 'to_alipay_dict'):
params['location'] = self.location.to_alipay_dict()
else:
params['location'] = self.location
if self.name:
if hasattr(self.name, 'to_alipay_dict'):
params['name'] = self.name.to_alipay_dict()
else:
params['name'] = self.name
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AOIinfo()
if 'adcode' in d:
o.adcode = d['adcode']
if 'area' in d:
o.area = d['area']
if 'distance' in d: | if 'id' in d:
o.id = d['id']
if 'location' in d:
o.location = d['location']
if 'name' in d:
o.name = d['name']
return o | o.distance = d['distance'] |
interrupt_core0_cpu_int_pri_21.rs | #[doc = "Reader of register INTERRUPT_CORE0_CPU_INT_PRI_21"]
pub type R = crate::R<u32, super::INTERRUPT_CORE0_CPU_INT_PRI_21>;
#[doc = "Writer for register INTERRUPT_CORE0_CPU_INT_PRI_21"]
pub type W = crate::W<u32, super::INTERRUPT_CORE0_CPU_INT_PRI_21>;
#[doc = "Register INTERRUPT_CORE0_CPU_INT_PRI_21 `reset()`'s with value 0"]
impl crate::ResetValue for super::INTERRUPT_CORE0_CPU_INT_PRI_21 {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `INTERRUPT_CORE0_CPU_PRI_21_MAP`"]
pub type INTERRUPT_CORE0_CPU_PRI_21_MAP_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `INTERRUPT_CORE0_CPU_PRI_21_MAP`"]
pub struct INTERRUPT_CORE0_CPU_PRI_21_MAP_W<'a> {
w: &'a mut W,
}
impl<'a> INTERRUPT_CORE0_CPU_PRI_21_MAP_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn | (self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !0x0f) | ((value as u32) & 0x0f);
self.w
}
}
impl R {
#[doc = "Bits 0:3"]
#[inline(always)]
pub fn interrupt_core0_cpu_pri_21_map(&self) -> INTERRUPT_CORE0_CPU_PRI_21_MAP_R {
INTERRUPT_CORE0_CPU_PRI_21_MAP_R::new((self.bits & 0x0f) as u8)
}
}
impl W {
#[doc = "Bits 0:3"]
#[inline(always)]
pub fn interrupt_core0_cpu_pri_21_map(&mut self) -> INTERRUPT_CORE0_CPU_PRI_21_MAP_W {
INTERRUPT_CORE0_CPU_PRI_21_MAP_W { w: self }
}
}
| bits |
WithdrawalSuccess.tsx | import React from "react"
import { useTranslation } from "react-i18next"
import Typography from "@material-ui/core/Typography"
import { Withdrawal } from "@satoshipay/stellar-transfer"
import { RefStateObject } from "~Generic/hooks/userinterface"
import { ActionButton, DialogActionsBox } from "~Generic/components/DialogActions"
import { VerticalLayout } from "~Layout/components/Box"
import Portal from "~Generic/components/Portal"
import { TransferStates } from "../util/statemachine"
import { Paragraph, Summary } from "./Sidebar"
interface WithdrawalSuccessProps {
dialogActionsRef: RefStateObject | undefined
onClose: () => void
state: TransferStates.TransferCompleted<Withdrawal>
}
function WithdrawalSuccess(props: WithdrawalSuccessProps) {
const { transferServer } = props.state.withdrawal!
const { t } = useTranslation()
return (
<VerticalLayout grow>
<VerticalLayout alignItems="center" margin="24px 0" textAlign="center">
<Typography variant="h5">{t("transfer-service.withdrawal-success.body.withdrawal-in-progress")}</Typography>
<Typography style={{ margin: "16px 0" }} variant="body2">
<Typography style={{ margin: "8px 0" }} variant="body2">
{t(
"transfer-service.withdrawal-success.body.info.1",
`${transferServer.domain} is conducting the withdrawal.`,
{ domain: transferServer.domain }
)}
</Typography>
<Typography style={{ margin: "8px 0" }} variant="body2"> | <Portal desktop="inline" target={props.dialogActionsRef && props.dialogActionsRef.element}>
<DialogActionsBox>
<ActionButton onClick={props.onClose} type="primary">
{t("transfer-service.withdrawal-success.action.close")}
</ActionButton>
</DialogActionsBox>
</Portal>
</VerticalLayout>
</VerticalLayout>
)
}
const Sidebar = () => {
const { t } = useTranslation()
return (
<Summary headline={t("transfer-service.withdrawal-success.sidebar.headline")}>
<Paragraph>{t("transfer-service.withdrawal-success.sidebar.info")}</Paragraph>
</Summary>
)
}
const SuccessView = Object.assign(React.memo(WithdrawalSuccess), { Sidebar })
export default SuccessView | {t("transfer-service.withdrawal-success.body.info.2")}
</Typography>
{/* TODO: Show nice summary */}
</Typography> |
__init__.py | from . import platform | from . import utils |
|
LivingArchitectureEnv.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 8 16:12:32 2018
@author: jack.lingheng.meng
"""
try:
import vrep
except:
print ('--------------------------------------------------------------')
print ('"vrep.py" could not be imported. This means very probably that')
print ('either "vrep.py" or the remoteApi library could not be found.')
print ('Make sure both are in the same folder as this file,')
print ('or appropriately adjust the file "vrep.py"')
print ('--------------------------------------------------------------')
print ('')
import gym
from gym import spaces
import time
import numpy as np
import warnings
class LivingArchitectureEnv(gym.Env):
def __init__(self):
print ('Program started')
# connect to V-REP server
vrep.simxFinish(-1) # just in case, close all opened connections
self.clientID = vrep.simxStart('127.0.0.1',19997,True,True,5000,5) # Connect to V-REP
if self.clientID!=-1:
print ('Connected to remote API server')
else:
print ('Failed connecting to remote API server')
# start simulate
self._def_op_mode = vrep.simx_opmode_blocking
self._set_joint_op_mode = vrep.simx_opmode_oneshot
self._set_light_op_mode = vrep.simx_opmode_oneshot
self._set_visitor_op_mode = vrep.simx_opmode_oneshot
# To get sensor data
# vrep.simx_opmode_buffer: does not work, don't know why?
# vrep.simx_opmode_blocking: too slow
# vrep.simx_opmode_oneshot: works pretty good
self._get_prox_op_mode = vrep.simx_opmode_oneshot
self._get_light_op_mode = vrep.simx_opmode_oneshot
vrep.simxStartSimulation(self.clientID, self._def_op_mode)
# get object names and handles
self._get_object_name_and_handle()
# initialize action and observation space
print("Initialize LAS action and observation space...")
self.prox_sensor_num = len(self.proxSensorHandles)
self.smas_num = len(self.jointHandles)
self.lights_num = len(self.lightHandles)
self.sensors_dim = self.prox_sensor_num + self.lights_num * (1+3)
self.actuators_dim = self.smas_num + self.lights_num * (1+3) # light state & color
self.act_max = np.array([np.inf]*self.actuators_dim)
self.act_min = - np.array([np.inf]*self.actuators_dim)
self.obs_max = np.array([1.]*self.sensors_dim)
self.obs_min = - np.array([1.]*self.sensors_dim)
self.observation_space = spaces.Box(self.obs_min, self.obs_max)
self.action_space = spaces.Box(self.act_min, self.act_max)
print("Initialization of LAS done!")
# initialize Visitor action and observation space
print("Initialize Visitor action and observation space...")
self.visitor_num = len(self.visitorHandles)
self.visitor_action_dim = self.visitor_num * 2 # visitor's position (x,y,0)
self.visitor_action_max = np.array([7,9]*self.visitor_num) # later we should find a way to automatic get this limit
self.visitor_action_min = np.array([-7,-9]*self.visitor_num)
self.visitor_action_space = spaces.Box(self.visitor_action_min, self.visitor_action_max)
# initialize Single Visitor action and observation space
print("Initialize Visitor action and observation space...")
self.single_visitor_action_dim = self.visitor_num * 2 # visitor's position (x,y,0)
self.single_visitor_action_max = np.array([7,9]) # later we should find a way to automatic get this limit
self.single_visitor_action_min = np.array([-7,-9])
self.single_visitor_action_space = spaces.Box(self.single_visitor_action_min, self.single_visitor_action_max)
print("Initialization of visitor done!")
self.reward = 0
def _get_object_name_and_handle(self):
"""
# When call vrep.simxGetObjectGroupData to abstract object name and handle
# choose appropriate objectType parameter:
# joint: vrep.sim_object_joint_type
# proximity sensor: vrep.sim_object_proximitysensor_type
# light: vrep.sim_object_light_type
# visitor target position: vrep.sim_object_dummy_type
# visitor body: vrep.sim_object_shape_type
"""
dataType = 0 # 0: retrieves the object names (in stringData.)
print("Get objects' names and handles ...")
# proximity sensor
proxSensorIndex = []
rc = vrep.simx_return_initialize_error_flag
while rc != vrep.simx_return_ok:
rc, proxSensorHandles, intData, floatData, proxSensorNames = vrep.simxGetObjectGroupData(self.clientID,vrep.sim_object_proximitysensor_type, dataType, self._def_op_mode)
if rc==vrep.simx_return_ok:
print ('Get Prox Sensor Success!!!!!') # display the reply from V-REP (in this case, just a string)
for i, name in enumerate(proxSensorNames):
if "_node#" in name:
print("Proximity Sensor: {}, and handle: {}".format(name, proxSensorHandles[i]))
proxSensorIndex.append(i)
break
else:
print ('Fail to get proximity sensors!!!')
# light
lightIndex = []
rc = vrep.simx_return_initialize_error_flag
while rc != vrep.simx_return_ok:
rc, lightHandles, intData, floatData, lightNames = vrep.simxGetObjectGroupData(self.clientID,vrep.sim_object_light_type, dataType, self._def_op_mode)
if rc==vrep.simx_return_ok:
print ('Get Lihgt Success!!!!!') # display the reply from V-REP (in this case, just a string)
for i, name in enumerate(lightNames):
if "_node#" in name:
print("Light: {}, and handle: {}".format(name, lightHandles[i]))
lightIndex.append(i)
break
else:
print ('Fail to get lights!!!')
# joint
jointIndex = []
rc = vrep.simx_return_initialize_error_flag
while rc != vrep.simx_return_ok:
rc, jointHandles, intData, floatData, jointNames = vrep.simxGetObjectGroupData(self.clientID,vrep.sim_object_joint_type, dataType, self._def_op_mode)
if rc==vrep.simx_return_ok:
print ('Get Joint Success!!!!!') # display the reply from V-REP (in this case, just a string)
for i, name in enumerate(jointNames):
if "_node#" in name:
print("Joint: {}, and handle: {}".format(name, jointHandles[i]))
jointIndex.append(i)
break
else:
print ('Fail to get joints!!!')
# visitor targetPosition
visitorIndex = []
rc = vrep.simx_return_initialize_error_flag
while rc != vrep.simx_return_ok:
rc, visitorHandles, intData, floatData, visitorNames = vrep.simxGetObjectGroupData(self.clientID,vrep.sim_object_dummy_type, dataType, self._def_op_mode)
if rc==vrep.simx_return_ok:
print ('Get Visitor Success!!!!!') # display the reply from V-REP (in this case, just a string)
for i, name in enumerate(visitorNames):
if "TargetPosition_Visitor#" in name:
print("Visitor: {}, and handle: {}".format(name, visitorHandles[i]))
visitorIndex.append(i)
break
else:
print ('Fail to get visitors!!!')
# visitor body
visitorBodyIndex = []
rc = vrep.simx_return_initialize_error_flag
while rc != vrep.simx_return_ok:
rc, visitorBodyHandles, intData, floatData, visitorBodyNames = vrep.simxGetObjectGroupData(self.clientID,vrep.sim_object_shape_type, dataType, self._def_op_mode)
if rc==vrep.simx_return_ok:
print ('Get Visitor Body Success!!!!!') # display the reply from V-REP (in this case, just a string)
for i, name in enumerate(visitorBodyNames):
if "Body_Visitor#" in name:
print("Visitor body: {}, and handle: {}".format(name, visitorBodyHandles[i]))
visitorBodyIndex.append(i)
break
else:
print ('Fail to get visitors body!!!')
proxSensorHandles = np.array(proxSensorHandles)
proxSensorNames = np.array(proxSensorNames)
lightHandles = np.array(lightHandles)
lightNames = np.array(lightNames)
jointHandles = np.array(jointHandles)
jointNames = np.array(jointNames)
visitorHandles = np.array(visitorHandles)
visitorNames = np.array(visitorNames)
visitorBodyHandles = np.array(visitorBodyHandles)
visitorBodyNames = np.array(visitorBodyNames)
# All objects handels and names
self.proxSensorHandles = proxSensorHandles[proxSensorIndex]
self.proxSensorNames = proxSensorNames[proxSensorIndex]
self.lightHandles = lightHandles[lightIndex]
self.lightNames = lightNames[lightIndex]
self.jointHandles = jointHandles[jointIndex]
self.jointNames = jointNames[jointIndex]
self.visitorNames = visitorNames[visitorIndex]
self.visitorHandles = visitorHandles[visitorIndex]
self.visitorBodyNames = visitorBodyNames[visitorBodyIndex]
self.visitorBodyHandles = visitorBodyHandles[visitorBodyIndex]
def step_LAS(self, action):
"""
Take one step of action
Input: action
Output: observation, reward, done, info
"""
#
action = np.clip(action, self.act_min, self.act_max)
# split action for light and sma
action_smas = action[:self.smas_num]
action_lights_state = action[self.smas_num:self.smas_num+self.lights_num]
action_lights_state = action_lights_state.astype(int)
action_lights_color = action[self.smas_num+self.lights_num:]
# taking action
#start = time.time()
vrep.simxPauseCommunication(self.clientID,True) #temporarily halting the communication thread
self._set_all_joint_position(action_smas)
self._set_all_light_state(action_lights_state,action_lights_color)
vrep.simxPauseCommunication(self.clientID,False) #and evaluated at the same time
#print("Action running time: {}".format(time.time()-start))
# observe
#start = time.time()
self._self_observe()
#print("Observation running time: {}".format(time.time()-start))
# caculate reward
self._reward()
done = False
return self.observation, self.reward, done, []
def step_visitor(self, position):
"""
This interface is for change visitor's position.
Input: position
Output: observation, reward, done, info
"""
#
position = np.clip(position,self.visitor_action_min, self.visitor_action_max)
vrep.simxPauseCommunication(self.clientID,True)
self._set_all_visitor_position(position)
vrep.simxPauseCommunication(self.clientID,False)
self._self_observe()
self._reward_visitor()
done = False
return self.observation, self.reward_visitor, done, []
def step_single_visitor(self, name, position):
"""
This interface is for change visitor's position.
Input: position
Output: observation, reward, done, info
"""
#
position = np.clip(position,self.single_visitor_action_min, self.single_visitor_action_max)
#vrep.simxPauseCommunication(self.clientID,True)
self._set_single_visitor_position(name, position)
#vrep.simxPauseCommunication(self.clientID,False)
self._self_observe()
self._reward_visitor()
done = False
return self.observation, self.reward_visitor, done, []
def step_red_light_excited_visitor(self, targetPositionName, bodyName, action):
"""
A specific interface for red excited visitor:
return observation:
light state: observation[:lightNum]
light color: observation[lightNum:lightNum * 4]
light position: observation[lightNum * 4:lightNum * 5]
visitor position: observation[lightNum*5:]
"""
move = action[0]
position = action[1:3] # we can leave z coordinate
#print("Set position:{}".format(position))
position = np.clip(position,self.single_visitor_action_min, self.single_visitor_action_max)
# if move == 1, move; otherwise don't move.
if move == 1:
#vrep.simxPauseCommunication(self.clientID,True)
#print("Set Position in Vrep: {}".format(position))
self._set_single_visitor_position(targetPositionName, position)
#vrep.simxPauseCommunication(self.clientID,False)
observation = self._self_observe_for_red_excited_visitor(bodyName)
#print("len(observation):{}".format(len(observation)))
reward = 0
done = False
return observation, reward, done, []
def _set_single_visitor_position(self, targetPositionName, position):
visitorIndex = np.where(self.visitorNames == targetPositionName)
if len(visitorIndex[0]) == 0:
print("Not found visitor: {}".format(targetPositionName))
else:
vrep.simxSetObjectPosition(self.clientID, self.visitorHandles[visitorIndex], -1, [position[0],position[1],0], self._set_visitor_op_mode)
def _get_single_visitor_body_position(self, bodyName):
"""
Give bodyName, return bodyPosition
"""
bodyPosition = np.zeros(3)
visitorBodyIndex = np.where(self.visitorBodyNames == bodyName)
if len(visitorBodyIndex[0]) == 0:
print("Not found visitor: {}".format(bodyName))
else:
res, bodyPosition = vrep.simxGetObjectPosition(self.clientID, self.visitorBodyHandles[visitorBodyIndex], -1, self._get_light_op_mode)
#print("Visitor position: {}".format(position))
return np.array(bodyPosition)
def _set_all_visitor_position(self, position):
visitorNum = len(self.visitorHandles)
for i in range(visitorNum):
vrep.simxSetObjectPosition(self.clientID, self.visitorHandles[i], -1, [position[i*2],position[i*2+1],0], self._set_visitor_op_mode)
def _set_all_joint_position(self, targetPosition):
jointNum = len(self.jointHandles)
for i in range(jointNum):
vrep.simxSetJointTargetPosition(self.clientID, self.jointHandles[i], targetPosition[i], self._set_joint_op_mode)
def _set_all_light_state(self, targetState, targetColor):
lightNum = len(self.lightHandles)
if len(targetState) != lightNum:
print("len(targetState) != lightNum")
# inner function: remote function call to set light state
def _set_light_state(clientID, name, handle, targetState, targetColor, opMode):
emptyBuff = bytearray()
res,retInts,retFloats,retStrings,retBuffer = vrep.simxCallScriptFunction(clientID,
name,
vrep.sim_scripttype_childscript,
'setLightStateAndColor',
[handle, targetState],targetColor,[],emptyBuff,
opMode)
if res != vrep.simx_return_ok:
warnings.warn("Remote function call: setLightStateAndColor fail in Class AnyLight.")
# inner function end
for i in range(lightNum):
_set_light_state(self.clientID, str(self.lightNames[i]), self.lightHandles[i], targetState[i], targetColor[i*3:(i+1)*3], self._set_light_op_mode)
def _reward(self):
""" calculate reward based on observation of prximity sensor"""
self.reward = np.mean(self.observation[:self.prox_sensor_num])
return self.reward
def | (self):
"""
Calculate reward for visitor
"""
self.reward_visitor = 0
return self.reward_visitor
def _self_observe(self):
"""
This observe function is for LAS:
proximity sensors
light state
light color
"""
proxStates, proxPosition = self._get_all_prox_data()
lightStates, lightDiffsePart, lightSpecularPart = self._get_all_light_data()
self.observation = np.concatenate((proxStates, lightStates, lightDiffsePart.flatten()))
return self.observation
def _self_observe_for_red_excited_visitor(self,bodyName):
"""
This obervave function is for visitors:
light state: observation[:lightNum]
light color: observation[lightNum:lightNum * 4]
light position: observation[lightNum * 4:lightNum * 5]
visitor position: observation[lightNum*5:]
"""
lightStates, lightDiffsePart, lightSpecularPart = self._get_all_light_data()
lightPositions = self._get_all_light_position()
visitorBodyPosition = self._get_single_visitor_body_position(bodyName)
self.obser_for_red_light_excited_visitor = np.concatenate((lightStates,
lightDiffsePart.flatten(),
lightPositions.flatten(),
visitorBodyPosition.flatten()))
#print("length self.obser_for_red_light_excited_visitor:{}".format(len(self.obser_for_red_light_excited_visitor)))
return self.obser_for_red_light_excited_visitor
def _get_all_prox_data(self):
"""
Get all proximity sensory data
"""
proxSensorNum = len(self.proxSensorHandles)
proxStates = np.zeros(proxSensorNum)
proxPosition = np.zeros([proxSensorNum, 3])
for i in range(proxSensorNum):
code, proxStates[i], proxPosition[i,:], handle, snv = vrep.simxReadProximitySensor(self.clientID, self.proxSensorHandles[i], self._get_prox_op_mode)
return proxStates, proxPosition
def _get_all_light_data(self):
"""
Get all light data:
return:
lightStates, lightDiffsePart, lightSpecularPart
"""
lightNum = len(self.lightHandles)
#print("lightNum:{}".format(lightNum))
lightStates = np.zeros(lightNum)
lightDiffsePart = np.zeros([lightNum,3])
lightSpecularPart = np.zeros([lightNum,3])
# inner function to get light state and color
def _get_light_state_and_color(clientID, name , handle, op_mode):
emptyBuff = bytearray()
res,retInts,retFloats,retStrings,retBuffer=vrep.simxCallScriptFunction(clientID,
name,
vrep.sim_scripttype_childscript,
'getLightStateAndColor',
[handle],[],[],emptyBuff,
op_mode)
if res==vrep.simx_return_ok:
#print ('getLightStateAndColor works! ',retStrings[0]) # display the reply from V-REP (in this case, just a string)
lightState = retInts[0]
diffusePart = [retFloats[0],retFloats[1],retFloats[2]]
specularPart = retFloats[3],retFloats[4],retFloats[5]
return lightState, diffusePart, specularPart
else:
warnings.warn("Remote function call: getLightStateAndColor fail in Class AnyLight.")
return -1, [0,0,0], [0,0,0]
# inner function end
for i in range(lightNum):
lightStates[i], lightDiffsePart[i,:], lightSpecularPart[i,:] = _get_light_state_and_color(self.clientID, str(self.lightNames[i]), self.lightHandles[i], self._get_light_op_mode)
return lightStates, lightDiffsePart, lightSpecularPart
def _get_all_light_position(self):
"""
Get all lights position:
return:
lightPositions
"""
lightNum = self.lights_num
#print("_get_all_light_position lightNum:{}".format(lightNum))
lightPositions = np.zeros([lightNum, 3]) # 3: (x, y, z)
for i in range(lightNum):
res, lightPositions[i,:] = vrep.simxGetObjectPosition(self.clientID, self.lightHandles[i], -1, self._get_light_op_mode)
return lightPositions
def reset_env_for_LAS_red_light_excited_visitor(self, bodyName):
vrep.simxStartSimulation(self.clientID, self._def_op_mode)
observationForLAS = self._self_observe()
observationForRedLightExcitedVisitor = self._self_observe_for_red_excited_visitor(bodyName)
done = False
rewardLAS = 0
rewardVisitor = 0
info = []
return observationForLAS, observationForRedLightExcitedVisitor, rewardLAS, rewardVisitor, done, info
def reset(self):
#vrep.simxStopSimulation(self.clientID, self._def_op_mode)
vrep.simxStartSimulation(self.clientID, self._def_op_mode)
self._self_observe()
self._reward()
self._reward_visitor()
done = False
return self.observation, self.reward, self.reward_visitor, done
def destroy(self):
"""
Finish simulation and release connection to server.
"""
vrep.simxStopSimulation(self.clientID, self._def_op_mode)
vrep.simxFinish(self.clientID) | _reward_visitor |
pymunk_demo_platformer_11.py | """
Example of Pymunk Physics Engine Platformer
"""
import math
from typing import Optional
import arcade
SCREEN_TITLE = "PyMunk Platformer"
# How big are our image tiles?
SPRITE_IMAGE_SIZE = 128
# Scale sprites up or down
SPRITE_SCALING_PLAYER = 0.5
SPRITE_SCALING_TILES = 0.5
# Scaled sprite size for tiles
SPRITE_SIZE = int(SPRITE_IMAGE_SIZE * SPRITE_SCALING_PLAYER)
# Size of grid to show on screen, in number of tiles
SCREEN_GRID_WIDTH = 25
SCREEN_GRID_HEIGHT = 15
# Size of screen to show, in pixels
SCREEN_WIDTH = SPRITE_SIZE * SCREEN_GRID_WIDTH
SCREEN_HEIGHT = SPRITE_SIZE * SCREEN_GRID_HEIGHT
# --- Physics forces. Higher number, faster accelerating.
# Gravity
GRAVITY = 1500
# Damping - Amount of speed lost per second
DEFAULT_DAMPING = 1.0
PLAYER_DAMPING = 0.4
# Friction between objects
PLAYER_FRICTION = 1.0
WALL_FRICTION = 0.7
DYNAMIC_ITEM_FRICTION = 0.6
# Mass (defaults to 1)
PLAYER_MASS = 2.0
# Keep player from going too fast
PLAYER_MAX_HORIZONTAL_SPEED = 450
PLAYER_MAX_VERTICAL_SPEED = 1600
# Force applied while on the ground
PLAYER_MOVE_FORCE_ON_GROUND = 8000
# Force applied when moving left/right in the air
PLAYER_MOVE_FORCE_IN_AIR = 900
# Strength of a jump
PLAYER_JUMP_IMPULSE = 1800
# Close enough to not-moving to have the animation go to idle.
DEAD_ZONE = 0.1
# Constants used to track if the player is facing left or right
RIGHT_FACING = 0
LEFT_FACING = 1
# How many pixels to move before we change the texture in the walking animation
DISTANCE_TO_CHANGE_TEXTURE = 20
# How much force to put on the bullet
BULLET_MOVE_FORCE = 4500
# Mass of the bullet
BULLET_MASS = 0.1
# Make bullet less affected by gravity
BULLET_GRAVITY = 300
class PlayerSprite(arcade.Sprite):
""" Player Sprite """
def __init__(self):
""" Init """
# Let parent initialize
super().__init__()
# Set our scale
self.scale = SPRITE_SCALING_PLAYER
# Images from Kenney.nl's Character pack
# main_path = ":resources:images/animated_characters/female_adventurer/femaleAdventurer"
main_path = ":resources:images/animated_characters/female_person/femalePerson"
# main_path = ":resources:images/animated_characters/male_person/malePerson"
# main_path = ":resources:images/animated_characters/male_adventurer/maleAdventurer"
# main_path = ":resources:images/animated_characters/zombie/zombie"
# main_path = ":resources:images/animated_characters/robot/robot"
# Load textures for idle standing
self.idle_texture_pair = arcade.load_texture_pair(f"{main_path}_idle.png")
self.jump_texture_pair = arcade.load_texture_pair(f"{main_path}_jump.png")
self.fall_texture_pair = arcade.load_texture_pair(f"{main_path}_fall.png")
# Load textures for walking
self.walk_textures = []
for i in range(8):
texture = arcade.load_texture_pair(f"{main_path}_walk{i}.png")
self.walk_textures.append(texture)
# Set the initial texture
self.texture = self.idle_texture_pair[0]
# Hit box will be set based on the first image used.
self.hit_box = self.texture.hit_box_points
# Default to face-right
self.character_face_direction = RIGHT_FACING
# Index of our current texture
self.cur_texture = 0
# How far have we traveled horizontally since changing the texture
self.x_odometer = 0
def pymunk_moved(self, physics_engine, dx, dy, d_angle):
""" Handle being moved by the pymunk engine """
# Figure out if we need to face left or right
if dx < -DEAD_ZONE and self.character_face_direction == RIGHT_FACING:
self.character_face_direction = LEFT_FACING
elif dx > DEAD_ZONE and self.character_face_direction == LEFT_FACING:
self.character_face_direction = RIGHT_FACING
# Are we on the ground?
is_on_ground = physics_engine.is_on_ground(self)
# Add to the odometer how far we've moved
self.x_odometer += dx
# Jumping animation
if not is_on_ground:
if dy > DEAD_ZONE:
self.texture = self.jump_texture_pair[self.character_face_direction]
return
elif dy < -DEAD_ZONE:
self.texture = self.fall_texture_pair[self.character_face_direction]
return
# Idle animation
if abs(dx) <= DEAD_ZONE:
self.texture = self.idle_texture_pair[self.character_face_direction]
return
# Have we moved far enough to change the texture?
if abs(self.x_odometer) > DISTANCE_TO_CHANGE_TEXTURE:
# Reset the odometer
self.x_odometer = 0
# Advance the walking animation
self.cur_texture += 1
if self.cur_texture > 7:
self.cur_texture = 0
self.texture = self.walk_textures[self.cur_texture][self.character_face_direction]
class BulletSprite(arcade.SpriteSolidColor):
""" Bullet Sprite """
def pymunk_moved(self, physics_engine, dx, dy, d_angle):
""" Handle when the sprite is moved by the physics engine. """
# If the bullet falls below the screen, remove it
if self.center_y < -100:
self.remove_from_sprite_lists()
class GameWindow(arcade.Window):
""" Main Window """
def __init__(self, width, height, title):
""" Create the variables """
# Init the parent class
super().__init__(width, height, title)
# Player sprite
self.player_sprite: Optional[PlayerSprite] = None
# Sprite lists we need
self.player_list: Optional[arcade.SpriteList] = None
self.wall_list: Optional[arcade.SpriteList] = None
self.bullet_list: Optional[arcade.SpriteList] = None
self.item_list: Optional[arcade.SpriteList] = None
self.moving_sprites_list: Optional[arcade.SpriteList] = None
# Track the current state of what key is pressed
self.left_pressed: bool = False
self.right_pressed: bool = False
# Physics engine
self.physics_engine = Optional[arcade.PymunkPhysicsEngine]
# Set background color
arcade.set_background_color(arcade.color.AMAZON)
def setup(self):
""" Set up everything with the game """
# Create the sprite lists
self.player_list = arcade.SpriteList()
self.bullet_list = arcade.SpriteList()
# Read in the tiled map
map_name = "pymunk_test_map.tmx"
my_map = arcade.tilemap.read_tmx(map_name)
# Read in the map layers
self.wall_list = arcade.tilemap.process_layer(my_map, 'Platforms', SPRITE_SCALING_TILES)
self.item_list = arcade.tilemap.process_layer(my_map, 'Dynamic Items', SPRITE_SCALING_TILES)
# Create player sprite
self.player_sprite = PlayerSprite()
# Set player location
grid_x = 1
grid_y = 1
self.player_sprite.center_x = SPRITE_SIZE * grid_x + SPRITE_SIZE / 2
self.player_sprite.center_y = SPRITE_SIZE * grid_y + SPRITE_SIZE / 2
# Add to player sprite list
self.player_list.append(self.player_sprite)
# Moving Sprite
self.moving_sprites_list = arcade.tilemap.process_layer(my_map,
'Moving Platforms',
SPRITE_SCALING_TILES)
# --- Pymunk Physics Engine Setup ---
# The default damping for every object controls the percent of velocity
# the object will keep each second. A value of 1.0 is no speed loss,
# 0.9 is 10% per second, 0.1 is 90% per second.
# For top-down games, this is basically the friction for moving objects.
# For platformers with gravity, this should probably be set to 1.0.
# Default value is 1.0 if not specified.
damping = DEFAULT_DAMPING
# Set the gravity. (0, 0) is good for outer space and top-down.
gravity = (0, -GRAVITY)
# Create the physics engine
self.physics_engine = arcade.PymunkPhysicsEngine(damping=damping,
gravity=gravity)
def wall_hit_handler(bullet_sprite, _wall_sprite, _arbiter, _space, _data):
""" Called for bullet/wall collision """
bullet_sprite.remove_from_sprite_lists()
self.physics_engine.add_collision_handler("bullet", "wall", post_handler=wall_hit_handler)
def item_hit_handler(bullet_sprite, item_sprite, _arbiter, _space, _data):
""" Called for bullet/wall collision """
bullet_sprite.remove_from_sprite_lists()
item_sprite.remove_from_sprite_lists()
self.physics_engine.add_collision_handler("bullet", "item", post_handler=item_hit_handler)
# Add the player.
# For the player, we set the damping to a lower value, which increases
# the damping rate. This prevents the character from traveling too far
# after the player lets off the movement keys.
# Setting the moment to PymunkPhysicsEngine.MOMENT_INF prevents it from
# rotating.
# Friction normally goes between 0 (no friction) and 1.0 (high friction)
# Friction is between two objects in contact. It is important to remember
# in top-down games that friction moving along the 'floor' is controlled
# by damping.
self.physics_engine.add_sprite(self.player_sprite,
friction=PLAYER_FRICTION,
mass=PLAYER_MASS,
moment=arcade.PymunkPhysicsEngine.MOMENT_INF,
collision_type="player",
max_horizontal_velocity=PLAYER_MAX_HORIZONTAL_SPEED,
max_vertical_velocity=PLAYER_MAX_VERTICAL_SPEED)
# Create the walls.
# By setting the body type to PymunkPhysicsEngine.STATIC the walls can't
# move.
# Movable objects that respond to forces are PymunkPhysicsEngine.DYNAMIC
# PymunkPhysicsEngine.KINEMATIC objects will move, but are assumed to be
# repositioned by code and don't respond to physics forces.
# Dynamic is default.
self.physics_engine.add_sprite_list(self.wall_list,
friction=WALL_FRICTION,
collision_type="wall",
body_type=arcade.PymunkPhysicsEngine.STATIC)
# Create the items
self.physics_engine.add_sprite_list(self.item_list,
friction=DYNAMIC_ITEM_FRICTION,
collision_type="item")
# Add kinematic sprites
self.physics_engine.add_sprite_list(self.moving_sprites_list,
body_type=arcade.PymunkPhysicsEngine.KINEMATIC)
def on_key_press(self, key, modifiers):
"""Called whenever a key is pressed. """
if key == arcade.key.LEFT:
self.left_pressed = True
elif key == arcade.key.RIGHT:
self.right_pressed = True
elif key == arcade.key.UP:
# find out if player is standing on ground
if self.physics_engine.is_on_ground(self.player_sprite):
# She is! Go ahead and jump
impulse = (0, PLAYER_JUMP_IMPULSE)
self.physics_engine.apply_impulse(self.player_sprite, impulse)
def on_key_release(self, key, modifiers):
"""Called when the user releases a key. """
if key == arcade.key.LEFT:
self.left_pressed = False
elif key == arcade.key.RIGHT:
self.right_pressed = False
def on_mouse_press(self, x, y, button, modifiers):
""" Called whenever the mouse button is clicked. """
bullet = BulletSprite(20, 5, arcade.color.DARK_YELLOW)
self.bullet_list.append(bullet)
# Position the bullet at the player's current location
start_x = self.player_sprite.center_x
start_y = self.player_sprite.center_y
bullet.position = self.player_sprite.position
# Get from the mouse the destination location for the bullet
# IMPORTANT! If you have a scrolling screen, you will also need
# to add in self.view_bottom and self.view_left.
dest_x = x
dest_y = y
# Do math to calculate how to get the bullet to the destination.
# Calculation the angle in radians between the start points
# and end points. This is the angle the bullet will travel.
x_diff = dest_x - start_x
y_diff = dest_y - start_y
angle = math.atan2(y_diff, x_diff)
# What is the 1/2 size of this sprite, so we can figure out how far
# away to spawn the bullet
size = max(self.player_sprite.width, self.player_sprite.height) / 2
# Use angle to to spawn bullet away from player in proper direction
bullet.center_x += size * math.cos(angle)
bullet.center_y += size * math.sin(angle)
# Set angle of bullet
bullet.angle = math.degrees(angle)
# Gravity to use for the bullet
# If we don't use custom gravity, bullet drops too fast, or we have
# to make it go too fast.
# Force is in relation to bullet's angle.
bullet_gravity = (0, -BULLET_GRAVITY)
# Add the sprite. This needs to be done AFTER setting the fields above.
self.physics_engine.add_sprite(bullet,
mass=BULLET_MASS,
damping=1.0,
friction=0.6,
collision_type="bullet",
gravity=bullet_gravity,
elasticity=0.9)
# Add force to bullet
force = (BULLET_MOVE_FORCE, 0)
self.physics_engine.apply_force(bullet, force)
def | (self, delta_time):
""" Movement and game logic """
is_on_ground = self.physics_engine.is_on_ground(self.player_sprite)
# Update player forces based on keys pressed
if self.left_pressed and not self.right_pressed:
# Create a force to the left. Apply it.
if is_on_ground:
force = (-PLAYER_MOVE_FORCE_ON_GROUND, 0)
else:
force = (-PLAYER_MOVE_FORCE_IN_AIR, 0)
self.physics_engine.apply_force(self.player_sprite, force)
# Set friction to zero for the player while moving
self.physics_engine.set_friction(self.player_sprite, 0)
elif self.right_pressed and not self.left_pressed:
# Create a force to the right. Apply it.
if is_on_ground:
force = (PLAYER_MOVE_FORCE_ON_GROUND, 0)
else:
force = (PLAYER_MOVE_FORCE_IN_AIR, 0)
self.physics_engine.apply_force(self.player_sprite, force)
# Set friction to zero for the player while moving
self.physics_engine.set_friction(self.player_sprite, 0)
else:
# Player's feet are not moving. Therefore up the friction so we stop.
self.physics_engine.set_friction(self.player_sprite, 1.0)
# Move items in the physics engine
self.physics_engine.step()
# For each moving sprite, see if we've reached a boundary and need to
# reverse course.
for moving_sprite in self.moving_sprites_list:
if moving_sprite.boundary_right and \
moving_sprite.change_x > 0 and \
moving_sprite.right > moving_sprite.boundary_right:
moving_sprite.change_x *= -1
elif moving_sprite.boundary_left and \
moving_sprite.change_x < 0 and \
moving_sprite.left > moving_sprite.boundary_left:
moving_sprite.change_x *= -1
if moving_sprite.boundary_top and \
moving_sprite.change_y > 0 and \
moving_sprite.top > moving_sprite.boundary_top:
moving_sprite.change_y *= -1
elif moving_sprite.boundary_bottom and \
moving_sprite.change_y < 0 and \
moving_sprite.bottom < moving_sprite.boundary_bottom:
moving_sprite.change_y *= -1
# Figure out and set our moving platform velocity.
# Pymunk uses velocity is in pixels per second. If we instead have
# pixels per frame, we need to convert.
velocity = (moving_sprite.change_x * 1 / delta_time, moving_sprite.change_y * 1 / delta_time)
self.physics_engine.set_velocity(moving_sprite, velocity)
def on_draw(self):
""" Draw everything """
arcade.start_render()
self.wall_list.draw()
self.moving_sprites_list.draw()
self.bullet_list.draw()
self.item_list.draw()
self.player_list.draw()
def main():
""" Main method """
window = GameWindow(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
window.setup()
arcade.run()
if __name__ == "__main__":
main()
| on_update |
test.pb.micro.go | // Code generated by protoc-gen-micro. DO NOT EDIT.
// source: server/grpc/proto/test.proto
package test
import (
fmt "fmt"
proto "github.com/golang/protobuf/proto"
_ "google.golang.org/genproto/googleapis/api/annotations"
math "math"
)
import (
context "context"
api "github.com/go-iot-platform/go-micro/api"
client "github.com/go-iot-platform/go-micro/client"
server "github.com/go-iot-platform/go-micro/server"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
// Reference imports to suppress errors if they are not otherwise used.
var _ api.Endpoint
var _ context.Context
var _ client.Option
var _ server.Option
// Api Endpoints for Test service
func NewTestEndpoints() []*api.Endpoint {
return []*api.Endpoint{
&api.Endpoint{
Name: "Test.Call",
Path: []string{"/api/v0/test/call/{uuid}"},
Method: []string{"POST"},
Body: "*",
Handler: "rpc",
},
&api.Endpoint{
Name: "Test.CallPcre",
Path: []string{"^/api/v0/test/call/pcre/?$"},
Method: []string{"POST"},
Body: "*",
Handler: "rpc",
},
&api.Endpoint{
Name: "Test.CallPcreInvalid",
Path: []string{"^/api/v0/test/call/pcre/invalid/?"},
Method: []string{"POST"},
Body: "*",
Handler: "rpc",
}, |
type TestService interface {
Call(ctx context.Context, in *Request, opts ...client.CallOption) (*Response, error)
CallPcre(ctx context.Context, in *Request, opts ...client.CallOption) (*Response, error)
CallPcreInvalid(ctx context.Context, in *Request, opts ...client.CallOption) (*Response, error)
}
type testService struct {
c client.Client
name string
}
func NewTestService(name string, c client.Client) TestService {
return &testService{
c: c,
name: name,
}
}
func (c *testService) Call(ctx context.Context, in *Request, opts ...client.CallOption) (*Response, error) {
req := c.c.NewRequest(c.name, "Test.Call", in)
out := new(Response)
err := c.c.Call(ctx, req, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *testService) CallPcre(ctx context.Context, in *Request, opts ...client.CallOption) (*Response, error) {
req := c.c.NewRequest(c.name, "Test.CallPcre", in)
out := new(Response)
err := c.c.Call(ctx, req, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *testService) CallPcreInvalid(ctx context.Context, in *Request, opts ...client.CallOption) (*Response, error) {
req := c.c.NewRequest(c.name, "Test.CallPcreInvalid", in)
out := new(Response)
err := c.c.Call(ctx, req, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
// Server API for Test service
type TestHandler interface {
Call(context.Context, *Request, *Response) error
CallPcre(context.Context, *Request, *Response) error
CallPcreInvalid(context.Context, *Request, *Response) error
}
func RegisterTestHandler(s server.Server, hdlr TestHandler, opts ...server.HandlerOption) error {
type test interface {
Call(ctx context.Context, in *Request, out *Response) error
CallPcre(ctx context.Context, in *Request, out *Response) error
CallPcreInvalid(ctx context.Context, in *Request, out *Response) error
}
type Test struct {
test
}
h := &testHandler{hdlr}
opts = append(opts, api.WithEndpoint(&api.Endpoint{
Name: "Test.Call",
Path: []string{"/api/v0/test/call/{uuid}"},
Method: []string{"POST"},
Body: "*",
Handler: "rpc",
}))
opts = append(opts, api.WithEndpoint(&api.Endpoint{
Name: "Test.CallPcre",
Path: []string{"^/api/v0/test/call/pcre/?$"},
Method: []string{"POST"},
Body: "*",
Handler: "rpc",
}))
opts = append(opts, api.WithEndpoint(&api.Endpoint{
Name: "Test.CallPcreInvalid",
Path: []string{"^/api/v0/test/call/pcre/invalid/?"},
Method: []string{"POST"},
Body: "*",
Handler: "rpc",
}))
return s.Handle(s.NewHandler(&Test{h}, opts...))
}
type testHandler struct {
TestHandler
}
func (h *testHandler) Call(ctx context.Context, in *Request, out *Response) error {
return h.TestHandler.Call(ctx, in, out)
}
func (h *testHandler) CallPcre(ctx context.Context, in *Request, out *Response) error {
return h.TestHandler.CallPcre(ctx, in, out)
}
func (h *testHandler) CallPcreInvalid(ctx context.Context, in *Request, out *Response) error {
return h.TestHandler.CallPcreInvalid(ctx, in, out)
} | }
}
// Client API for Test service |
color.test.ts | import { toStyledTag } from '../color';
test('Test toStyledTag no match', () => {
const res = toStyledTag('abc123');
expect(res.style).toEqual({
backgroundColor: '#ffffff',
border: '1px solid #575757',
color: '#000000',
});
}); | backgroundColor: '#905994',
border: '1px solid #7a537d',
color: '#fff',
},
});
expect(res.style).toEqual({
backgroundColor: '#905994',
border: '1px solid #7a537d',
color: '#fff',
});
}); |
test('Test toStyledTag', () => {
const res = toStyledTag('integrations', {
integrations: { |
config_test.go | package config
import (
"github.com/stretchr/testify/require"
"os"
"testing"
)
func TestGetPath(t *testing.T) {
path := GetPath()
_, err := os.Stat(path)
require.NoError(t, err) | conf, err := Load(path)
require.NoError(t, err)
require.NotNil(t, conf)
} | }
func TestLoad(t *testing.T) {
path := GetPath() |
test_urlfield.py | from django.core.exceptions import ValidationError
from django.forms import URLField
from django.test import SimpleTestCase
from . import FormFieldAssertionsMixin
class URLFieldTest(FormFieldAssertionsMixin, SimpleTestCase):
def test_urlfield_1(self):
f = URLField()
self.assertWidgetRendersTo(f, '<input type="url" name="f" id="id_f" required>')
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean("")
with self.assertRaisesMessage(ValidationError, "'This field is required.'"):
f.clean(None)
self.assertEqual("http://localhost", f.clean("http://localhost"))
self.assertEqual("http://example.com", f.clean("http://example.com"))
self.assertEqual("http://example.com.", f.clean("http://example.com."))
self.assertEqual("http://www.example.com", f.clean("http://www.example.com"))
self.assertEqual(
"http://www.example.com:8000/test",
f.clean("http://www.example.com:8000/test"),
)
self.assertEqual(
"http://valid-with-hyphens.com", f.clean("valid-with-hyphens.com")
)
self.assertEqual("http://subdomain.domain.com", f.clean("subdomain.domain.com"))
self.assertEqual("http://200.8.9.10", f.clean("http://200.8.9.10"))
self.assertEqual(
"http://200.8.9.10:8000/test", f.clean("http://200.8.9.10:8000/test")
)
with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"):
f.clean("foo")
with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"):
f.clean("http://")
with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"):
f.clean("http://example")
with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"):
f.clean("http://example.")
with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"):
f.clean("com.")
with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"):
f.clean(".")
with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"):
f.clean("http://.com")
with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"):
f.clean("http://invalid-.com")
with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"):
f.clean("http://-invalid.com")
with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"):
f.clean("http://inv-.alid-.com")
with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"):
f.clean("http://inv-.-alid.com")
self.assertEqual(
"http://valid-----hyphens.com", f.clean("http://valid-----hyphens.com")
)
self.assertEqual(
"http://some.idn.xyz\xe4\xf6\xfc\xdfabc.domain.com:123/blah",
f.clean("http://some.idn.xyzäöüßabc.domain.com:123/blah"),
)
self.assertEqual(
"http://www.example.com/s/http://code.djangoproject.com/ticket/13804",
f.clean("www.example.com/s/http://code.djangoproject.com/ticket/13804"),
)
with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"):
f.clean("[a")
with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"):
f.clean("http://[a")
def test_url_regex_ticket11198(self):
f = URLField()
# hangs "forever" if catastrophic backtracking in ticket:#11198 not fixed
with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"):
f.clean("http://%s" % ("X" * 200,))
# a second test, to make sure the problem is really addressed, even on
# domains that don't fail the domain label length check in the regex
with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"):
f.clean("http://%s" % ("X" * 60,))
def test_urlfield_2(self):
f = URLField(required=False)
self.assertEqual("", f.clean(""))
self.assertEqual("", f.clean(None))
self.assertEqual("http://example.com", f.clean("http://example.com"))
self.assertEqual("http://www.example.com", f.clean("http://www.example.com"))
with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"):
f.clean("foo")
with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"):
f.clean("http://")
with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"):
f.clean("http://example")
with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"):
f.clean("http://example.")
with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"):
f.clean("http://.com")
def test_urlfield_5(self):
f = URLField(min_length=15, max_length=20)
self.assertWidgetRendersTo(
f,
'<input id="id_f" type="url" name="f" maxlength="20" minlength="15" required>',
)
with self.assertRaisesMessage(
ValidationError,
"'Ensure this value has at least 15 characters (it has 12).'",
):
f.clean("http://f.com")
self.assertEqual("http://example.com", f.clean("http://example.com"))
with self.assertRaisesMessage(
ValidationError,
"'Ensure this value has at most 20 characters (it has 37).'",
):
f.clean("http://abcdefghijklmnopqrstuvwxyz.com")
def test_urlfield_6(self):
f = URLField(required=False)
self.assertEqual("http://example.com", f.clean("example.com"))
self.assertEqual("", f.clean(""))
self.assertEqual("https://example.com", f.clean("https://example.com"))
def test_urlfield_7(self):
f = URLField()
self.assertEqual("http://example.com", f.clean("http://example.com"))
self.assertEqual("http://example.com/test", f.clean("http://example.com/test"))
self.assertEqual(
"http://example.com?some_param=some_value",
f.clean("http://example.com?some_param=some_value"),
)
| urls = (
"http://עברית.idn.icann.org/",
"http://sãopaulo.com/",
"http://sãopaulo.com.br/",
"http://пример.испытание/",
"http://مثال.إختبار/",
"http://例子.测试/",
"http://例子.測試/",
"http://उदाहरण.परीक्षा/",
"http://例え.テスト/",
"http://مثال.آزمایشی/",
"http://실례.테스트/",
"http://العربية.idn.icann.org/",
)
for url in urls:
with self.subTest(url=url):
# Valid IDN
self.assertEqual(url, f.clean(url))
def test_urlfield_10(self):
"""URLField correctly validates IPv6 (#18779)."""
f = URLField()
urls = (
"http://[12:34::3a53]/",
"http://[a34:9238::]:8080/",
)
for url in urls:
with self.subTest(url=url):
self.assertEqual(url, f.clean(url))
def test_urlfield_not_string(self):
f = URLField(required=False)
with self.assertRaisesMessage(ValidationError, "'Enter a valid URL.'"):
f.clean(23)
def test_urlfield_normalization(self):
f = URLField()
self.assertEqual(f.clean("http://example.com/ "), "http://example.com/")
def test_urlfield_strip_on_none_value(self):
f = URLField(required=False, empty_value=None)
self.assertIsNone(f.clean(""))
self.assertIsNone(f.clean(None))
def test_urlfield_unable_to_set_strip_kwarg(self):
msg = "__init__() got multiple values for keyword argument 'strip'"
with self.assertRaisesMessage(TypeError, msg):
URLField(strip=False) | def test_urlfield_9(self):
f = URLField() |
yopcontext.js | /***********************************************************************
* app-src/js/yopcontext.js
* YeAPF 0.8.58-39 built on 2017-06-12 17:18 (-3 DST)
* Copyright (C) 2004-2017 Esteban Daniel Dortta - [email protected]
* 2017-06-12 15:27:01 (-3 DST)
* First Version (C) 2014 - esteban daniel dortta - [email protected]
*
* This is a set of function that helps to recognize operational context
**********************************************************************/
//# sourceURL=app-src/js/yopcontext.js
function | () {
/* http://msdn.microsoft.com/en-us/library/ms537509(v=vs.85).aspx
* Returns the version of Internet Explorer or a -1
* (indicating the use of another browser).
*/
var rv = -1; // Return value assumes failure.
if (navigator.appName == 'Microsoft Internet Explorer')
{
var ua = navigator.userAgent;
var re = new RegExp("MSIE ([0-9]{1,}[\.0-9]{0,})");
if (re.exec(ua) != null)
rv = parseFloat( RegExp.$1 );
}
return rv;
}
function isInternetExplorer() {
return (getInternetExplorerVersion() >= 0);
};
function getAndroidVersion(ua) {
ua = (ua || navigator.userAgent).toLowerCase();
var match = ua.match(/android\s([0-9\.]*)/);
return match ? match[1] : false;
};
function isOnMobile() {
var ret=false;
if (typeof mosync != 'undefined') {
ret = mosync.isAndroid || mosync.isIOS || mosync.isWindowsPhone;
} else
ret=/Android|webOS|iPhone|iPad|iPod|BlackBerry|IEMobile|Opera Mini/i.test(navigator.userAgent);
return ret;
};
| getInternetExplorerVersion |
add_clusterdeprovisionrequest.go | package controller
import (
"github.com/openshift/hive/pkg/controller/clusterdeprovisionrequest"
) | AddToManagerFuncs = append(AddToManagerFuncs, clusterdeprovisionrequest.Add)
} |
func init() {
// AddToManagerFuncs is a list of functions to create controllers and add them to a manager. |
viper_test.go | // Copyright © 2014 Steve Francia <[email protected]>.
//
// Use of this source code is governed by an MIT-style
// license that can be found in the LICENSE file.
package viper
import (
"bytes"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"os"
"os/exec"
"path"
"reflect"
"runtime"
"sort"
"strings"
"sync"
"testing"
"time"
"github.com/fsnotify/fsnotify"
"github.com/mitchellh/mapstructure"
"github.com/spf13/afero"
"github.com/spf13/cast"
"github.com/spf13/pflag"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
var yamlExample = []byte(`Hacker: true
name: steve
hobbies:
- skateboarding
- snowboarding
- go
clothing:
jacket: leather
trousers: denim
pants:
size: large
age: 35
eyes : brown
beard: true
`)
var yamlExampleWithExtras = []byte(`Existing: true
Bogus: true
`)
type testUnmarshalExtra struct {
Existing bool
}
var tomlExample = []byte(`
title = "TOML Example"
[owner]
organization = "MongoDB"
Bio = "MongoDB Chief Developer Advocate & Hacker at Large"
dob = 1979-05-27T07:32:00Z # First class dates? Why not?`)
var jsonExample = []byte(`{
"id": "0001",
"type": "donut",
"name": "Cake",
"ppu": 0.55,
"batters": {
"batter": [
{ "type": "Regular" },
{ "type": "Chocolate" },
{ "type": "Blueberry" },
{ "type": "Devil's Food" }
]
}
}`)
var hclExample = []byte(`
id = "0001"
type = "donut"
name = "Cake"
ppu = 0.55
foos {
foo {
key = 1
}
foo {
key = 2
}
foo {
key = 3
}
foo {
key = 4
}
}`)
var propertiesExample = []byte(`
p_id: 0001
p_type: donut
p_name: Cake
p_ppu: 0.55
p_batters.batter.type: Regular
`)
var remoteExample = []byte(`{
"id":"0002",
"type":"cronut",
"newkey":"remote"
}`)
func initConfigs() {
Reset()
var r io.Reader
SetConfigType("yaml")
r = bytes.NewReader(yamlExample)
unmarshalReader(r, v.config)
SetConfigType("json")
r = bytes.NewReader(jsonExample)
unmarshalReader(r, v.config)
SetConfigType("hcl")
r = bytes.NewReader(hclExample)
unmarshalReader(r, v.config)
SetConfigType("properties")
r = bytes.NewReader(propertiesExample)
unmarshalReader(r, v.config)
SetConfigType("toml")
r = bytes.NewReader(tomlExample)
unmarshalReader(r, v.config)
SetConfigType("json")
remote := bytes.NewReader(remoteExample)
unmarshalReader(remote, v.kvstore)
}
func initConfig(typ, config string) {
Reset()
SetConfigType(typ)
r := strings.NewReader(config)
if err := unmarshalReader(r, v.config); err != nil {
panic(err)
}
}
func initYAML() {
initConfig("yaml", string(yamlExample))
}
func initJSON() {
Reset()
SetConfigType("json")
r := bytes.NewReader(jsonExample)
unmarshalReader(r, v.config)
}
func initProperties() {
Reset()
SetConfigType("properties")
r := bytes.NewReader(propertiesExample)
unmarshalReader(r, v.config)
}
func initTOML() {
Reset()
SetConfigType("toml")
r := bytes.NewReader(tomlExample)
unmarshalReader(r, v.config)
}
func initHcl() {
Reset()
SetConfigType("hcl")
r := bytes.NewReader(hclExample)
unmarshalReader(r, v.config)
}
// make directories for testing
func initDirs(t *testing.T) (string, string, func()) {
var (
testDirs = []string{`a a`, `b`, `c\c`, `D_`}
config = `improbable`
)
root, err := ioutil.TempDir("", "")
cleanup := true
defer func() {
if cleanup {
os.Chdir("..")
os.RemoveAll(root)
}
}()
assert.Nil(t, err)
err = os.Chdir(root)
assert.Nil(t, err)
for _, dir := range testDirs {
err = os.Mkdir(dir, 0750)
assert.Nil(t, err)
err = ioutil.WriteFile(
path.Join(dir, config+".toml"),
[]byte("key = \"value is "+dir+"\"\n"),
0640)
assert.Nil(t, err)
}
cleanup = false
return root, config, func() {
os.Chdir("..")
os.RemoveAll(root)
}
}
//stubs for PFlag Values
type stringValue string
func newStringValue(val string, p *string) *stringValue {
*p = val
return (*stringValue)(p)
}
func (s *stringValue) Set(val string) error {
*s = stringValue(val)
return nil
}
func (s *stringValue) Type() string {
return "string"
}
func (s *stringValue) String() string {
return fmt.Sprintf("%s", *s)
}
func TestBasics(t *testing.T) {
SetConfigFile("/tmp/config.yaml")
filename, err := v.getConfigFile()
assert.Equal(t, "/tmp/config.yaml", filename)
assert.NoError(t, err)
}
func TestDefault(t *testing.T) {
SetDefault("age", 45)
assert.Equal(t, 45, Get("age"))
SetDefault("clothing.jacket", "slacks")
assert.Equal(t, "slacks", Get("clothing.jacket"))
SetConfigType("yaml")
err := ReadConfig(bytes.NewBuffer(yamlExample))
assert.NoError(t, err)
assert.Equal(t, "leather", Get("clothing.jacket"))
}
func TestUnmarshaling(t *testing.T) {
SetConfigType("yaml")
r := bytes.NewReader(yamlExample)
unmarshalReader(r, v.config)
assert.True(t, InConfig("name"))
assert.False(t, InConfig("state"))
assert.Equal(t, "steve", Get("name"))
assert.Equal(t, []interface{}{"skateboarding", "snowboarding", "go"}, Get("hobbies"))
assert.Equal(t, map[string]interface{}{"jacket": "leather", "trousers": "denim", "pants": map[string]interface{}{"size": "large"}}, Get("clothing"))
assert.Equal(t, 35, Get("age"))
}
func TestUnmarshalExact(t *testing.T) {
vip := New()
target := &testUnmarshalExtra{}
vip.SetConfigType("yaml")
r := bytes.NewReader(yamlExampleWithExtras)
vip.ReadConfig(r)
err := vip.UnmarshalExact(target)
if err == nil {
t.Fatal("UnmarshalExact should error when populating a struct from a conf that contains unused fields")
}
}
func TestOverrides(t *testing.T) { |
func TestDefaultPost(t *testing.T) {
assert.NotEqual(t, "NYC", Get("state"))
SetDefault("state", "NYC")
assert.Equal(t, "NYC", Get("state"))
}
func TestAliases(t *testing.T) {
RegisterAlias("years", "age")
assert.Equal(t, 40, Get("years"))
Set("years", 45)
assert.Equal(t, 45, Get("age"))
}
func TestAliasInConfigFile(t *testing.T) {
// the config file specifies "beard". If we make this an alias for
// "hasbeard", we still want the old config file to work with beard.
RegisterAlias("beard", "hasbeard")
assert.Equal(t, true, Get("hasbeard"))
Set("hasbeard", false)
assert.Equal(t, false, Get("beard"))
}
func TestYML(t *testing.T) {
initYAML()
assert.Equal(t, "steve", Get("name"))
}
func TestJSON(t *testing.T) {
initJSON()
assert.Equal(t, "0001", Get("id"))
}
func TestProperties(t *testing.T) {
initProperties()
assert.Equal(t, "0001", Get("p_id"))
}
func TestTOML(t *testing.T) {
initTOML()
assert.Equal(t, "TOML Example", Get("title"))
}
func TestHCL(t *testing.T) {
initHcl()
assert.Equal(t, "0001", Get("id"))
assert.Equal(t, 0.55, Get("ppu"))
assert.Equal(t, "donut", Get("type"))
assert.Equal(t, "Cake", Get("name"))
Set("id", "0002")
assert.Equal(t, "0002", Get("id"))
assert.NotEqual(t, "cronut", Get("type"))
}
func TestRemotePrecedence(t *testing.T) {
initJSON()
remote := bytes.NewReader(remoteExample)
assert.Equal(t, "0001", Get("id"))
unmarshalReader(remote, v.kvstore)
assert.Equal(t, "0001", Get("id"))
assert.NotEqual(t, "cronut", Get("type"))
assert.Equal(t, "remote", Get("newkey"))
Set("newkey", "newvalue")
assert.NotEqual(t, "remote", Get("newkey"))
assert.Equal(t, "newvalue", Get("newkey"))
Set("newkey", "remote")
}
func TestEnv(t *testing.T) {
initJSON()
BindEnv("id")
BindEnv("f", "FOOD")
os.Setenv("ID", "13")
os.Setenv("FOOD", "apple")
os.Setenv("NAME", "crunk")
assert.Equal(t, "13", Get("id"))
assert.Equal(t, "apple", Get("f"))
assert.Equal(t, "Cake", Get("name"))
AutomaticEnv()
assert.Equal(t, "crunk", Get("name"))
}
func TestEmptyEnv(t *testing.T) {
initJSON()
BindEnv("type") // Empty environment variable
BindEnv("name") // Bound, but not set environment variable
os.Clearenv()
os.Setenv("TYPE", "")
assert.Equal(t, "donut", Get("type"))
assert.Equal(t, "Cake", Get("name"))
}
func TestEmptyEnv_Allowed(t *testing.T) {
initJSON()
AllowEmptyEnv(true)
BindEnv("type") // Empty environment variable
BindEnv("name") // Bound, but not set environment variable
os.Clearenv()
os.Setenv("TYPE", "")
assert.Equal(t, "", Get("type"))
assert.Equal(t, "Cake", Get("name"))
}
func TestEnvPrefix(t *testing.T) {
initJSON()
SetEnvPrefix("foo") // will be uppercased automatically
BindEnv("id")
BindEnv("f", "FOOD") // not using prefix
os.Setenv("FOO_ID", "13")
os.Setenv("FOOD", "apple")
os.Setenv("FOO_NAME", "crunk")
assert.Equal(t, "13", Get("id"))
assert.Equal(t, "apple", Get("f"))
assert.Equal(t, "Cake", Get("name"))
AutomaticEnv()
assert.Equal(t, "crunk", Get("name"))
}
func TestAutoEnv(t *testing.T) {
Reset()
AutomaticEnv()
os.Setenv("FOO_BAR", "13")
assert.Equal(t, "13", Get("foo_bar"))
}
func TestAutoEnvWithPrefix(t *testing.T) {
Reset()
AutomaticEnv()
SetEnvPrefix("Baz")
os.Setenv("BAZ_BAR", "13")
assert.Equal(t, "13", Get("bar"))
}
func TestSetEnvKeyReplacer(t *testing.T) {
Reset()
AutomaticEnv()
os.Setenv("REFRESH_INTERVAL", "30s")
replacer := strings.NewReplacer("-", "_")
SetEnvKeyReplacer(replacer)
assert.Equal(t, "30s", Get("refresh-interval"))
}
func TestAllKeys(t *testing.T) {
initConfigs()
ks := sort.StringSlice{"title", "newkey", "owner.organization", "owner.dob", "owner.bio", "name", "beard", "ppu", "batters.batter", "hobbies", "clothing.jacket", "clothing.trousers", "clothing.pants.size", "age", "hacker", "id", "type", "eyes", "p_id", "p_ppu", "p_batters.batter.type", "p_type", "p_name", "foos"}
dob, _ := time.Parse(time.RFC3339, "1979-05-27T07:32:00Z")
all := map[string]interface{}{"owner": map[string]interface{}{"organization": "MongoDB", "bio": "MongoDB Chief Developer Advocate & Hacker at Large", "dob": dob}, "title": "TOML Example", "ppu": 0.55, "eyes": "brown", "clothing": map[string]interface{}{"trousers": "denim", "jacket": "leather", "pants": map[string]interface{}{"size": "large"}}, "id": "0001", "batters": map[string]interface{}{"batter": []interface{}{map[string]interface{}{"type": "Regular"}, map[string]interface{}{"type": "Chocolate"}, map[string]interface{}{"type": "Blueberry"}, map[string]interface{}{"type": "Devil's Food"}}}, "hacker": true, "beard": true, "hobbies": []interface{}{"skateboarding", "snowboarding", "go"}, "age": 35, "type": "donut", "newkey": "remote", "name": "Cake", "p_id": "0001", "p_ppu": "0.55", "p_name": "Cake", "p_batters": map[string]interface{}{"batter": map[string]interface{}{"type": "Regular"}}, "p_type": "donut", "foos": []map[string]interface{}{map[string]interface{}{"foo": []map[string]interface{}{map[string]interface{}{"key": 1}, map[string]interface{}{"key": 2}, map[string]interface{}{"key": 3}, map[string]interface{}{"key": 4}}}}}
var allkeys sort.StringSlice
allkeys = AllKeys()
allkeys.Sort()
ks.Sort()
assert.Equal(t, ks, allkeys)
assert.Equal(t, all, AllSettings())
}
func TestAllKeysWithEnv(t *testing.T) {
v := New()
// bind and define environment variables (including a nested one)
v.BindEnv("id")
v.BindEnv("foo.bar")
v.SetEnvKeyReplacer(strings.NewReplacer(".", "_"))
os.Setenv("ID", "13")
os.Setenv("FOO_BAR", "baz")
expectedKeys := sort.StringSlice{"id", "foo.bar"}
expectedKeys.Sort()
keys := sort.StringSlice(v.AllKeys())
keys.Sort()
assert.Equal(t, expectedKeys, keys)
}
func TestAliasesOfAliases(t *testing.T) {
Set("Title", "Checking Case")
RegisterAlias("Foo", "Bar")
RegisterAlias("Bar", "Title")
assert.Equal(t, "Checking Case", Get("FOO"))
}
func TestRecursiveAliases(t *testing.T) {
RegisterAlias("Baz", "Roo")
RegisterAlias("Roo", "baz")
}
func TestUnmarshal(t *testing.T) {
SetDefault("port", 1313)
Set("name", "Steve")
Set("duration", "1s1ms")
type config struct {
Port int
Name string
Duration time.Duration
}
var C config
err := Unmarshal(&C)
if err != nil {
t.Fatalf("unable to decode into struct, %v", err)
}
assert.Equal(t, &config{Name: "Steve", Port: 1313, Duration: time.Second + time.Millisecond}, &C)
Set("port", 1234)
err = Unmarshal(&C)
if err != nil {
t.Fatalf("unable to decode into struct, %v", err)
}
assert.Equal(t, &config{Name: "Steve", Port: 1234, Duration: time.Second + time.Millisecond}, &C)
}
func TestUnmarshalWithDecoderOptions(t *testing.T) {
Set("credentials", "{\"foo\":\"bar\"}")
opt := DecodeHook(mapstructure.ComposeDecodeHookFunc(
mapstructure.StringToTimeDurationHookFunc(),
mapstructure.StringToSliceHookFunc(","),
// Custom Decode Hook Function
func(rf reflect.Kind, rt reflect.Kind, data interface{}) (interface{}, error) {
if rf != reflect.String || rt != reflect.Map {
return data, nil
}
m := map[string]string{}
raw := data.(string)
if raw == "" {
return m, nil
}
return m, json.Unmarshal([]byte(raw), &m)
},
))
type config struct {
Credentials map[string]string
}
var C config
err := Unmarshal(&C, opt)
if err != nil {
t.Fatalf("unable to decode into struct, %v", err)
}
assert.Equal(t, &config{
Credentials: map[string]string{"foo": "bar"},
}, &C)
}
func TestBindPFlags(t *testing.T) {
v := New() // create independent Viper object
flagSet := pflag.NewFlagSet("test", pflag.ContinueOnError)
var testValues = map[string]*string{
"host": nil,
"port": nil,
"endpoint": nil,
}
var mutatedTestValues = map[string]string{
"host": "localhost",
"port": "6060",
"endpoint": "/public",
}
for name := range testValues {
testValues[name] = flagSet.String(name, "", "test")
}
err := v.BindPFlags(flagSet)
if err != nil {
t.Fatalf("error binding flag set, %v", err)
}
flagSet.VisitAll(func(flag *pflag.Flag) {
flag.Value.Set(mutatedTestValues[flag.Name])
flag.Changed = true
})
for name, expected := range mutatedTestValues {
assert.Equal(t, expected, v.Get(name))
}
}
func TestBindPFlagsStringSlice(t *testing.T) {
for _, testValue := range []struct {
Expected []string
Value string
}{
{[]string{}, ""},
{[]string{"jeden"}, "jeden"},
{[]string{"dwa", "trzy"}, "dwa,trzy"},
{[]string{"cztery", "piec , szesc"}, "cztery,\"piec , szesc\""}} {
for _, changed := range []bool{true, false} {
v := New() // create independent Viper object
flagSet := pflag.NewFlagSet("test", pflag.ContinueOnError)
flagSet.StringSlice("stringslice", testValue.Expected, "test")
flagSet.Visit(func(f *pflag.Flag) {
if len(testValue.Value) > 0 {
f.Value.Set(testValue.Value)
f.Changed = changed
}
})
err := v.BindPFlags(flagSet)
if err != nil {
t.Fatalf("error binding flag set, %v", err)
}
type TestStr struct {
StringSlice []string
}
val := &TestStr{}
if err := v.Unmarshal(val); err != nil {
t.Fatalf("%+#v cannot unmarshal: %s", testValue.Value, err)
}
assert.Equal(t, testValue.Expected, val.StringSlice)
}
}
}
func TestBindPFlag(t *testing.T) {
var testString = "testing"
var testValue = newStringValue(testString, &testString)
flag := &pflag.Flag{
Name: "testflag",
Value: testValue,
Changed: false,
}
BindPFlag("testvalue", flag)
assert.Equal(t, testString, Get("testvalue"))
flag.Value.Set("testing_mutate")
flag.Changed = true //hack for pflag usage
assert.Equal(t, "testing_mutate", Get("testvalue"))
}
func TestBoundCaseSensitivity(t *testing.T) {
assert.Equal(t, "brown", Get("eyes"))
BindEnv("eYEs", "TURTLE_EYES")
os.Setenv("TURTLE_EYES", "blue")
assert.Equal(t, "blue", Get("eyes"))
var testString = "green"
var testValue = newStringValue(testString, &testString)
flag := &pflag.Flag{
Name: "eyeballs",
Value: testValue,
Changed: true,
}
BindPFlag("eYEs", flag)
assert.Equal(t, "green", Get("eyes"))
}
func TestSizeInBytes(t *testing.T) {
input := map[string]uint{
"": 0,
"b": 0,
"12 bytes": 0,
"200000000000gb": 0,
"12 b": 12,
"43 MB": 43 * (1 << 20),
"10mb": 10 * (1 << 20),
"1gb": 1 << 30,
}
for str, expected := range input {
assert.Equal(t, expected, parseSizeInBytes(str), str)
}
}
func TestFindsNestedKeys(t *testing.T) {
initConfigs()
dob, _ := time.Parse(time.RFC3339, "1979-05-27T07:32:00Z")
Set("super", map[string]interface{}{
"deep": map[string]interface{}{
"nested": "value",
},
})
expected := map[string]interface{}{
"super": map[string]interface{}{
"deep": map[string]interface{}{
"nested": "value",
},
},
"super.deep": map[string]interface{}{
"nested": "value",
},
"super.deep.nested": "value",
"owner.organization": "MongoDB",
"batters.batter": []interface{}{
map[string]interface{}{
"type": "Regular",
},
map[string]interface{}{
"type": "Chocolate",
},
map[string]interface{}{
"type": "Blueberry",
},
map[string]interface{}{
"type": "Devil's Food",
},
},
"hobbies": []interface{}{
"skateboarding", "snowboarding", "go",
},
"title": "TOML Example",
"newkey": "remote",
"batters": map[string]interface{}{
"batter": []interface{}{
map[string]interface{}{
"type": "Regular",
},
map[string]interface{}{
"type": "Chocolate",
}, map[string]interface{}{
"type": "Blueberry",
}, map[string]interface{}{
"type": "Devil's Food",
},
},
},
"eyes": "brown",
"age": 35,
"owner": map[string]interface{}{
"organization": "MongoDB",
"bio": "MongoDB Chief Developer Advocate & Hacker at Large",
"dob": dob,
},
"owner.bio": "MongoDB Chief Developer Advocate & Hacker at Large",
"type": "donut",
"id": "0001",
"name": "Cake",
"hacker": true,
"ppu": 0.55,
"clothing": map[string]interface{}{
"jacket": "leather",
"trousers": "denim",
"pants": map[string]interface{}{
"size": "large",
},
},
"clothing.jacket": "leather",
"clothing.pants.size": "large",
"clothing.trousers": "denim",
"owner.dob": dob,
"beard": true,
"foos": []map[string]interface{}{
map[string]interface{}{
"foo": []map[string]interface{}{
map[string]interface{}{
"key": 1,
},
map[string]interface{}{
"key": 2,
},
map[string]interface{}{
"key": 3,
},
map[string]interface{}{
"key": 4,
},
},
},
},
}
for key, expectedValue := range expected {
assert.Equal(t, expectedValue, v.Get(key))
}
}
func TestReadBufConfig(t *testing.T) {
v := New()
v.SetConfigType("yaml")
v.ReadConfig(bytes.NewBuffer(yamlExample))
t.Log(v.AllKeys())
assert.True(t, v.InConfig("name"))
assert.False(t, v.InConfig("state"))
assert.Equal(t, "steve", v.Get("name"))
assert.Equal(t, []interface{}{"skateboarding", "snowboarding", "go"}, v.Get("hobbies"))
assert.Equal(t, map[string]interface{}{"jacket": "leather", "trousers": "denim", "pants": map[string]interface{}{"size": "large"}}, v.Get("clothing"))
assert.Equal(t, 35, v.Get("age"))
}
func TestIsSet(t *testing.T) {
v := New()
v.SetConfigType("yaml")
v.ReadConfig(bytes.NewBuffer(yamlExample))
assert.True(t, v.IsSet("clothing.jacket"))
assert.False(t, v.IsSet("clothing.jackets"))
assert.False(t, v.IsSet("helloworld"))
v.Set("helloworld", "fubar")
assert.True(t, v.IsSet("helloworld"))
}
func TestDirsSearch(t *testing.T) {
root, config, cleanup := initDirs(t)
defer cleanup()
v := New()
v.SetConfigName(config)
v.SetDefault(`key`, `default`)
entries, err := ioutil.ReadDir(root)
for _, e := range entries {
if e.IsDir() {
v.AddConfigPath(e.Name())
}
}
err = v.ReadInConfig()
assert.Nil(t, err)
assert.Equal(t, `value is `+path.Base(v.configPaths[0]), v.GetString(`key`))
}
func TestWrongDirsSearchNotFound(t *testing.T) {
_, config, cleanup := initDirs(t)
defer cleanup()
v := New()
v.SetConfigName(config)
v.SetDefault(`key`, `default`)
v.AddConfigPath(`whattayoutalkingbout`)
v.AddConfigPath(`thispathaintthere`)
err := v.ReadInConfig()
assert.Equal(t, reflect.TypeOf(ConfigFileNotFoundError{"", ""}), reflect.TypeOf(err))
// Even though config did not load and the error might have
// been ignored by the client, the default still loads
assert.Equal(t, `default`, v.GetString(`key`))
}
func TestWrongDirsSearchNotFoundForMerge(t *testing.T) {
_, config, cleanup := initDirs(t)
defer cleanup()
v := New()
v.SetConfigName(config)
v.SetDefault(`key`, `default`)
v.AddConfigPath(`whattayoutalkingbout`)
v.AddConfigPath(`thispathaintthere`)
err := v.MergeInConfig()
assert.Equal(t, reflect.TypeOf(ConfigFileNotFoundError{"", ""}), reflect.TypeOf(err))
// Even though config did not load and the error might have
// been ignored by the client, the default still loads
assert.Equal(t, `default`, v.GetString(`key`))
}
func TestSub(t *testing.T) {
v := New()
v.SetConfigType("yaml")
v.ReadConfig(bytes.NewBuffer(yamlExample))
subv := v.Sub("clothing")
assert.Equal(t, v.Get("clothing.pants.size"), subv.Get("pants.size"))
subv = v.Sub("clothing.pants")
assert.Equal(t, v.Get("clothing.pants.size"), subv.Get("size"))
subv = v.Sub("clothing.pants.size")
assert.Equal(t, (*Viper)(nil), subv)
subv = v.Sub("missing.key")
assert.Equal(t, (*Viper)(nil), subv)
}
var hclWriteExpected = []byte(`"foos" = {
"foo" = {
"key" = 1
}
"foo" = {
"key" = 2
}
"foo" = {
"key" = 3
}
"foo" = {
"key" = 4
}
}
"id" = "0001"
"name" = "Cake"
"ppu" = 0.55
"type" = "donut"`)
func TestWriteConfigHCL(t *testing.T) {
v := New()
fs := afero.NewMemMapFs()
v.SetFs(fs)
v.SetConfigName("c")
v.SetConfigType("hcl")
err := v.ReadConfig(bytes.NewBuffer(hclExample))
if err != nil {
t.Fatal(err)
}
if err := v.WriteConfigAs("c.hcl"); err != nil {
t.Fatal(err)
}
read, err := afero.ReadFile(fs, "c.hcl")
if err != nil {
t.Fatal(err)
}
assert.Equal(t, hclWriteExpected, read)
}
var jsonWriteExpected = []byte(`{
"batters": {
"batter": [
{
"type": "Regular"
},
{
"type": "Chocolate"
},
{
"type": "Blueberry"
},
{
"type": "Devil's Food"
}
]
},
"id": "0001",
"name": "Cake",
"ppu": 0.55,
"type": "donut"
}`)
func TestWriteConfigJson(t *testing.T) {
v := New()
fs := afero.NewMemMapFs()
v.SetFs(fs)
v.SetConfigName("c")
v.SetConfigType("json")
err := v.ReadConfig(bytes.NewBuffer(jsonExample))
if err != nil {
t.Fatal(err)
}
if err := v.WriteConfigAs("c.json"); err != nil {
t.Fatal(err)
}
read, err := afero.ReadFile(fs, "c.json")
if err != nil {
t.Fatal(err)
}
assert.Equal(t, jsonWriteExpected, read)
}
var propertiesWriteExpected = []byte(`p_id = 0001
p_type = donut
p_name = Cake
p_ppu = 0.55
p_batters.batter.type = Regular
`)
func TestWriteConfigProperties(t *testing.T) {
v := New()
fs := afero.NewMemMapFs()
v.SetFs(fs)
v.SetConfigName("c")
v.SetConfigType("properties")
err := v.ReadConfig(bytes.NewBuffer(propertiesExample))
if err != nil {
t.Fatal(err)
}
if err := v.WriteConfigAs("c.properties"); err != nil {
t.Fatal(err)
}
read, err := afero.ReadFile(fs, "c.properties")
if err != nil {
t.Fatal(err)
}
assert.Equal(t, propertiesWriteExpected, read)
}
func TestWriteConfigTOML(t *testing.T) {
fs := afero.NewMemMapFs()
v := New()
v.SetFs(fs)
v.SetConfigName("c")
v.SetConfigType("toml")
err := v.ReadConfig(bytes.NewBuffer(tomlExample))
if err != nil {
t.Fatal(err)
}
if err := v.WriteConfigAs("c.toml"); err != nil {
t.Fatal(err)
}
// The TOML String method does not order the contents.
// Therefore, we must read the generated file and compare the data.
v2 := New()
v2.SetFs(fs)
v2.SetConfigName("c")
v2.SetConfigType("toml")
v2.SetConfigFile("c.toml")
err = v2.ReadInConfig()
if err != nil {
t.Fatal(err)
}
assert.Equal(t, v.GetString("title"), v2.GetString("title"))
assert.Equal(t, v.GetString("owner.bio"), v2.GetString("owner.bio"))
assert.Equal(t, v.GetString("owner.dob"), v2.GetString("owner.dob"))
assert.Equal(t, v.GetString("owner.organization"), v2.GetString("owner.organization"))
}
var yamlWriteExpected = []byte(`age: 35
beard: true
clothing:
jacket: leather
pants:
size: large
trousers: denim
eyes: brown
hacker: true
hobbies:
- skateboarding
- snowboarding
- go
name: steve
`)
func TestWriteConfigYAML(t *testing.T) {
v := New()
fs := afero.NewMemMapFs()
v.SetFs(fs)
v.SetConfigName("c")
v.SetConfigType("yaml")
err := v.ReadConfig(bytes.NewBuffer(yamlExample))
if err != nil {
t.Fatal(err)
}
if err := v.WriteConfigAs("c.yaml"); err != nil {
t.Fatal(err)
}
read, err := afero.ReadFile(fs, "c.yaml")
if err != nil {
t.Fatal(err)
}
assert.Equal(t, yamlWriteExpected, read)
}
var yamlMergeExampleTgt = []byte(`
hello:
pop: 37890
lagrenum: 765432101234567
world:
- us
- uk
- fr
- de
`)
var yamlMergeExampleSrc = []byte(`
hello:
pop: 45000
lagrenum: 7654321001234567
universe:
- mw
- ad
fu: bar
`)
func TestMergeConfig(t *testing.T) {
v := New()
v.SetConfigType("yml")
if err := v.ReadConfig(bytes.NewBuffer(yamlMergeExampleTgt)); err != nil {
t.Fatal(err)
}
if pop := v.GetInt("hello.pop"); pop != 37890 {
t.Fatalf("pop != 37890, = %d", pop)
}
if pop := v.GetInt32("hello.pop"); pop != int32(37890) {
t.Fatalf("pop != 37890, = %d", pop)
}
if pop := v.GetInt64("hello.lagrenum"); pop != int64(765432101234567) {
t.Fatalf("int64 lagrenum != 765432101234567, = %d", pop)
}
if world := v.GetStringSlice("hello.world"); len(world) != 4 {
t.Fatalf("len(world) != 4, = %d", len(world))
}
if fu := v.GetString("fu"); fu != "" {
t.Fatalf("fu != \"\", = %s", fu)
}
if err := v.MergeConfig(bytes.NewBuffer(yamlMergeExampleSrc)); err != nil {
t.Fatal(err)
}
if pop := v.GetInt("hello.pop"); pop != 45000 {
t.Fatalf("pop != 45000, = %d", pop)
}
if pop := v.GetInt32("hello.pop"); pop != int32(45000) {
t.Fatalf("pop != 45000, = %d", pop)
}
if pop := v.GetInt64("hello.lagrenum"); pop != int64(7654321001234567) {
t.Fatalf("int64 lagrenum != 7654321001234567, = %d", pop)
}
if world := v.GetStringSlice("hello.world"); len(world) != 4 {
t.Fatalf("len(world) != 4, = %d", len(world))
}
if universe := v.GetStringSlice("hello.universe"); len(universe) != 2 {
t.Fatalf("len(universe) != 2, = %d", len(universe))
}
if fu := v.GetString("fu"); fu != "bar" {
t.Fatalf("fu != \"bar\", = %s", fu)
}
}
func TestMergeConfigNoMerge(t *testing.T) {
v := New()
v.SetConfigType("yml")
if err := v.ReadConfig(bytes.NewBuffer(yamlMergeExampleTgt)); err != nil {
t.Fatal(err)
}
if pop := v.GetInt("hello.pop"); pop != 37890 {
t.Fatalf("pop != 37890, = %d", pop)
}
if world := v.GetStringSlice("hello.world"); len(world) != 4 {
t.Fatalf("len(world) != 4, = %d", len(world))
}
if fu := v.GetString("fu"); fu != "" {
t.Fatalf("fu != \"\", = %s", fu)
}
if err := v.ReadConfig(bytes.NewBuffer(yamlMergeExampleSrc)); err != nil {
t.Fatal(err)
}
if pop := v.GetInt("hello.pop"); pop != 45000 {
t.Fatalf("pop != 45000, = %d", pop)
}
if world := v.GetStringSlice("hello.world"); len(world) != 0 {
t.Fatalf("len(world) != 0, = %d", len(world))
}
if universe := v.GetStringSlice("hello.universe"); len(universe) != 2 {
t.Fatalf("len(universe) != 2, = %d", len(universe))
}
if fu := v.GetString("fu"); fu != "bar" {
t.Fatalf("fu != \"bar\", = %s", fu)
}
}
func TestUnmarshalingWithAliases(t *testing.T) {
v := New()
v.SetDefault("ID", 1)
v.Set("name", "Steve")
v.Set("lastname", "Owen")
v.RegisterAlias("UserID", "ID")
v.RegisterAlias("Firstname", "name")
v.RegisterAlias("Surname", "lastname")
type config struct {
ID int
FirstName string
Surname string
}
var C config
err := v.Unmarshal(&C)
if err != nil {
t.Fatalf("unable to decode into struct, %v", err)
}
assert.Equal(t, &config{ID: 1, FirstName: "Steve", Surname: "Owen"}, &C)
}
func TestSetConfigNameClearsFileCache(t *testing.T) {
SetConfigFile("/tmp/config.yaml")
SetConfigName("default")
f, err := v.getConfigFile()
if err == nil {
t.Fatalf("config file cache should have been cleared")
}
assert.Empty(t, f)
}
func TestShadowedNestedValue(t *testing.T) {
config := `name: steve
clothing:
jacket: leather
trousers: denim
pants:
size: large
`
initConfig("yaml", config)
assert.Equal(t, "steve", GetString("name"))
polyester := "polyester"
SetDefault("clothing.shirt", polyester)
SetDefault("clothing.jacket.price", 100)
assert.Equal(t, "leather", GetString("clothing.jacket"))
assert.Nil(t, Get("clothing.jacket.price"))
assert.Equal(t, polyester, GetString("clothing.shirt"))
clothingSettings := AllSettings()["clothing"].(map[string]interface{})
assert.Equal(t, "leather", clothingSettings["jacket"])
assert.Equal(t, polyester, clothingSettings["shirt"])
}
func TestDotParameter(t *testing.T) {
initJSON()
// shoud take precedence over batters defined in jsonExample
r := bytes.NewReader([]byte(`{ "batters.batter": [ { "type": "Small" } ] }`))
unmarshalReader(r, v.config)
actual := Get("batters.batter")
expected := []interface{}{map[string]interface{}{"type": "Small"}}
assert.Equal(t, expected, actual)
}
func TestCaseInsensitive(t *testing.T) {
for _, config := range []struct {
typ string
content string
}{
{"yaml", `
aBcD: 1
eF:
gH: 2
iJk: 3
Lm:
nO: 4
P:
Q: 5
R: 6
`},
{"json", `{
"aBcD": 1,
"eF": {
"iJk": 3,
"Lm": {
"P": {
"Q": 5,
"R": 6
},
"nO": 4
},
"gH": 2
}
}`},
{"toml", `aBcD = 1
[eF]
gH = 2
iJk = 3
[eF.Lm]
nO = 4
[eF.Lm.P]
Q = 5
R = 6
`},
} {
doTestCaseInsensitive(t, config.typ, config.content)
}
}
func TestCaseInsensitiveSet(t *testing.T) {
Reset()
m1 := map[string]interface{}{
"Foo": 32,
"Bar": map[interface{}]interface {
}{
"ABc": "A",
"cDE": "B"},
}
m2 := map[string]interface{}{
"Foo": 52,
"Bar": map[interface{}]interface {
}{
"bCd": "A",
"eFG": "B"},
}
Set("Given1", m1)
Set("Number1", 42)
SetDefault("Given2", m2)
SetDefault("Number2", 52)
// Verify SetDefault
if v := Get("number2"); v != 52 {
t.Fatalf("Expected 52 got %q", v)
}
if v := Get("given2.foo"); v != 52 {
t.Fatalf("Expected 52 got %q", v)
}
if v := Get("given2.bar.bcd"); v != "A" {
t.Fatalf("Expected A got %q", v)
}
if _, ok := m2["Foo"]; !ok {
t.Fatal("Input map changed")
}
// Verify Set
if v := Get("number1"); v != 42 {
t.Fatalf("Expected 42 got %q", v)
}
if v := Get("given1.foo"); v != 32 {
t.Fatalf("Expected 32 got %q", v)
}
if v := Get("given1.bar.abc"); v != "A" {
t.Fatalf("Expected A got %q", v)
}
if _, ok := m1["Foo"]; !ok {
t.Fatal("Input map changed")
}
}
func TestParseNested(t *testing.T) {
type duration struct {
Delay time.Duration
}
type item struct {
Name string
Delay time.Duration
Nested duration
}
config := `[[parent]]
delay="100ms"
[parent.nested]
delay="200ms"
`
initConfig("toml", config)
var items []item
err := v.UnmarshalKey("parent", &items)
if err != nil {
t.Fatalf("unable to decode into struct, %v", err)
}
assert.Equal(t, 1, len(items))
assert.Equal(t, 100*time.Millisecond, items[0].Delay)
assert.Equal(t, 200*time.Millisecond, items[0].Nested.Delay)
}
func doTestCaseInsensitive(t *testing.T, typ, config string) {
initConfig(typ, config)
Set("RfD", true)
assert.Equal(t, true, Get("rfd"))
assert.Equal(t, true, Get("rFD"))
assert.Equal(t, 1, cast.ToInt(Get("abcd")))
assert.Equal(t, 1, cast.ToInt(Get("Abcd")))
assert.Equal(t, 2, cast.ToInt(Get("ef.gh")))
assert.Equal(t, 3, cast.ToInt(Get("ef.ijk")))
assert.Equal(t, 4, cast.ToInt(Get("ef.lm.no")))
assert.Equal(t, 5, cast.ToInt(Get("ef.lm.p.q")))
}
func newViperWithConfigFile(t *testing.T) (*Viper, string, func()) {
watchDir, err := ioutil.TempDir("", "")
require.Nil(t, err)
configFile := path.Join(watchDir, "config.yaml")
err = ioutil.WriteFile(configFile, []byte("foo: bar\n"), 0640)
require.Nil(t, err)
cleanup := func() {
os.RemoveAll(watchDir)
}
v := New()
v.SetConfigFile(configFile)
err = v.ReadInConfig()
require.Nil(t, err)
require.Equal(t, "bar", v.Get("foo"))
return v, configFile, cleanup
}
func newViperWithSymlinkedConfigFile(t *testing.T) (*Viper, string, string, func()) {
watchDir, err := ioutil.TempDir("", "")
require.Nil(t, err)
dataDir1 := path.Join(watchDir, "data1")
err = os.Mkdir(dataDir1, 0777)
require.Nil(t, err)
realConfigFile := path.Join(dataDir1, "config.yaml")
t.Logf("Real config file location: %s\n", realConfigFile)
err = ioutil.WriteFile(realConfigFile, []byte("foo: bar\n"), 0640)
require.Nil(t, err)
cleanup := func() {
os.RemoveAll(watchDir)
}
// now, symlink the tm `data1` dir to `data` in the baseDir
os.Symlink(dataDir1, path.Join(watchDir, "data"))
// and link the `<watchdir>/datadir1/config.yaml` to `<watchdir>/config.yaml`
configFile := path.Join(watchDir, "config.yaml")
os.Symlink(path.Join(watchDir, "data", "config.yaml"), configFile)
t.Logf("Config file location: %s\n", path.Join(watchDir, "config.yaml"))
// init Viper
v := New()
v.SetConfigFile(configFile)
err = v.ReadInConfig()
require.Nil(t, err)
require.Equal(t, "bar", v.Get("foo"))
return v, watchDir, configFile, cleanup
}
func TestWatchFile(t *testing.T) {
t.Run("file content changed", func(t *testing.T) {
// given a `config.yaml` file being watched
v, configFile, cleanup := newViperWithConfigFile(t)
defer cleanup()
_, err := os.Stat(configFile)
require.NoError(t, err)
t.Logf("test config file: %s\n", configFile)
wg := sync.WaitGroup{}
wg.Add(1)
v.OnConfigChange(func(in fsnotify.Event) {
t.Logf("config file changed")
wg.Done()
})
v.WatchConfig()
// when overwriting the file and waiting for the custom change notification handler to be triggered
err = ioutil.WriteFile(configFile, []byte("foo: baz\n"), 0640)
wg.Wait()
// then the config value should have changed
require.Nil(t, err)
assert.Equal(t, "baz", v.Get("foo"))
})
t.Run("link to real file changed (à la Kubernetes)", func(t *testing.T) {
// skip if not executed on Linux
if runtime.GOOS != "linux" {
t.Skipf("Skipping test as symlink replacements don't work on non-linux environment...")
}
v, watchDir, _, _ := newViperWithSymlinkedConfigFile(t)
// defer cleanup()
wg := sync.WaitGroup{}
v.WatchConfig()
v.OnConfigChange(func(in fsnotify.Event) {
t.Logf("config file changed")
wg.Done()
})
wg.Add(1)
// when link to another `config.yaml` file
dataDir2 := path.Join(watchDir, "data2")
err := os.Mkdir(dataDir2, 0777)
require.Nil(t, err)
configFile2 := path.Join(dataDir2, "config.yaml")
err = ioutil.WriteFile(configFile2, []byte("foo: baz\n"), 0640)
require.Nil(t, err)
// change the symlink using the `ln -sfn` command
err = exec.Command("ln", "-sfn", dataDir2, path.Join(watchDir, "data")).Run()
require.Nil(t, err)
wg.Wait()
// then
require.Nil(t, err)
assert.Equal(t, "baz", v.Get("foo"))
})
}
func BenchmarkGetBool(b *testing.B) {
key := "BenchmarkGetBool"
v = New()
v.Set(key, true)
for i := 0; i < b.N; i++ {
if !v.GetBool(key) {
b.Fatal("GetBool returned false")
}
}
}
func BenchmarkGet(b *testing.B) {
key := "BenchmarkGet"
v = New()
v.Set(key, true)
for i := 0; i < b.N; i++ {
if !v.Get(key).(bool) {
b.Fatal("Get returned false")
}
}
}
// This is the "perfect result" for the above.
func BenchmarkGetBoolFromMap(b *testing.B) {
m := make(map[string]bool)
key := "BenchmarkGetBool"
m[key] = true
for i := 0; i < b.N; i++ {
if !m[key] {
b.Fatal("Map value was false")
}
}
}
|
Set("age", 40)
assert.Equal(t, 40, Get("age"))
}
|
util.py | def split_caps(key):
| """ splits a string using capital letters as separator
eg split_caps('ExampleString') returns ['Example', 'String']
"""
b = False
lst = []
x = 0
for i, ch in enumerate(key):
if i > 0 and ch.isupper():
temp = key[x:i]
if b and not temp.isupper():
lst.append(key[x:i-1])
x = i-1
b = True
elif b:
lst.append(key[x:i-1])
b = False
x = i-1
lst.append(key[x:])
return lst |
|
octconv.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class | (nn.Module):
def __init__(self, ch_in, ch_out, kernel_size, stride=1, alphas=(0.5, 0.5)):
super(OctConv, self).__init__()
self.alpha_in, self.alpha_out = alphas
assert 0 <= self.alpha_in <= 1 and 0 <= self.alpha_in <= 1, "Alphas must be in interval [0, 1]"
# CH IN
self.ch_in_hf = int((1 - self.alpha_in) * ch_in)
self.ch_in_lf = ch_in - self.ch_in_hf
# CH OUT
self.ch_out_hf = int((1 - self.alpha_out) * ch_out)
self.ch_out_lf = ch_out - self.ch_out_hf
# FILTERS
self.wHtoH = nn.Parameter(torch.randn(self.ch_out_hf, self.ch_in_hf, kernel_size, kernel_size))
self.wHtoL = nn.Parameter(torch.randn(self.ch_out_lf, self.ch_in_hf, kernel_size, kernel_size))
self.wLtoH = nn.Parameter(torch.randn(self.ch_out_hf, self.ch_in_lf, kernel_size, kernel_size))
self.wLtoL = nn.Parameter(torch.randn(self.ch_out_lf, self.ch_in_lf, kernel_size, kernel_size))
# PADDING: (H - F + 2P)/S + 1 = 2 * [(0.5 H - F + 2P)/S +1] -> P = (F-S)/2
self.padding = (kernel_size - stride) // 2
def forward(self, input):
# logic to handle input tensors:
# if alpha_in = 0., we assume to be at the first layer, with only high freq repr
if self.alpha_in == 0:
hf_input = input
lf_input = torch.Tensor([]).reshape(1, 0)
else:
fmap_size = input.shape[-1]
hf_input = input[:, :self.ch_in_hf * 4, ...].reshape(-1, self.ch_in_hf, fmap_size * 2, fmap_size * 2)
lf_input = input[:, self.ch_in_hf * 4:, ...]
HtoH = HtoL = LtoL = LtoH = 0.
if self.alpha_in < 1:
# if alpha < 1 there is high freq component
if self.ch_out_hf > 0:
HtoH = F.conv2d(hf_input, self.wHtoH, padding=self.padding)
if self.ch_out_lf > 0:
HtoL = F.conv2d(F.avg_pool2d(hf_input, 2), self.wHtoL, padding=self.padding)
if self.alpha_in > 0:
# if alpha > 0 there is low freq component
if self.ch_out_hf > 0:
LtoH = F.interpolate(F.conv2d(lf_input, self.wLtoH, padding=self.padding),
scale_factor=2, mode='nearest')
if self.ch_out_lf > 0:
LtoL = F.conv2d(lf_input, self.wLtoL, padding=self.padding)
hf_output = HtoH + LtoH
lf_output = LtoL + HtoL
if 0 < self.alpha_out < 1:
# if alpha in (0, 1)
fmap_size = hf_output.shape[-1] // 2
hf_output = hf_output.reshape(-1, 4 * self.ch_out_hf, fmap_size, fmap_size)
output = torch.cat([hf_output, lf_output], dim=1) # cat over channel dim
elif np.isclose(self.alpha_out, 1., atol=1e-8):
# if only low req (alpha_out = 1.)
output = lf_output
elif np.isclose(self.alpha_out, 0., atol=1e-8):
# if only high freq (alpha_out = 0.)
output = hf_output
return output
oc = OctConv(ch_in=3, ch_out=3, kernel_size=3, alphas=(0., 0.5))
oc1 = OctConv(ch_in=3, ch_out=10, kernel_size=7, alphas=(0.5, 0.8))
oc2 = OctConv(ch_in=10, ch_out=1, kernel_size=3, alphas=(0.8, 0.))
out = oc2(oc1(oc(torch.randn(2, 3, 32, 32))))
print(out.shape)
| OctConv |
abstract_variational_driver.py | # Copyright 2021 The NetKet Authors - All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import numbers
from functools import partial
import numpy as np
from tqdm import tqdm
import warnings
import jax
from jax.tree_util import tree_map
from netket.logging import JsonLog
from netket.utils import node_number, n_nodes
def _to_iterable(maybe_iterable):
"""
_to_iterable(maybe_iterable)
Ensure the result is iterable. If the input is not iterable, it is wrapped into a tuple.
"""
if hasattr(maybe_iterable, "__iter__"):
surely_iterable = maybe_iterable
else:
surely_iterable = (maybe_iterable,)
return surely_iterable
# Note: to implement a new Driver (see also _vmc.py for an example)
# If you want to inherit the nice interface of AbstractMCDriver, you should
# subclass it, defining the following methods:
# - Either _forward_and_backward or individually _forward, _backward, that should
# compute the loss function and the gradient. If the driver is minimizing or
# maximising some loss function, this quantity should be assigned to self._stats
# in order to monitor it.
# - _estimate_stats should return the MC estimate of a single operator
# - reset should reset the driver (usually the sampler).
# - info should return a string with an overview of the driver.
# - The __init__ method shouldbe called with the machine and the optimizer. If this
# driver is minimising a loss function and you want it's name to show up automatically
# in the progress bar/ouput files you should pass the optional keyword argument
# minimized_quantity_name.
class AbstractVariationalDriver(abc.ABC):
"""Abstract base class for NetKet Variational Monte Carlo drivers"""
def __init__(self, variational_state, optimizer, minimized_quantity_name=""):
self._mynode = node_number
self._mpi_nodes = n_nodes
self._loss_stats = None
self._loss_name = minimized_quantity_name
self._step_count = 0
self._variational_state = variational_state
self.optimizer = optimizer
def _forward_and_backward(self):
"""
Performs the forward and backward pass at the same time.
Concrete drivers should either override this method, or override individually
_forward and _backward.
Returns:
the update for the weights.
"""
self._forward()
dp = self._backward()
return dp
def _forward(self):
"""
Performs the forward pass, computing the loss function.
Concrete should either implement _forward and _backward or the joint method
_forward_and_backward.
"""
raise NotImplementedError()
def _backward(self):
"""
Performs the backward pass, computing the update for the parameters.
Concrete should either implement _forward and _backward or the joint method
_forward_and_backward.
"""
raise NotImplementedError()
def _estimate_stats(self, observable):
"""
Returns the MCMC statistics for the expectation value of an observable.
Must be implemented by super-classes of AbstractVMC.
:param observable: A quantum operator (netket observable)
:return:
"""
return self.state.expect(observable)
def reset(self):
"""
Resets the driver.
Concrete drivers should also call super().reset() to ensure that the step
count is set to 0.
"""
self.state.reset()
self.step_count = 0
pass
@abc.abstractmethod
def info(self, depth=0):
"""
Returns an info string used to print information to screen about this driver.
"""
pass
@property
def state(self):
"""
Returns the machine that is optimized by this driver.
"""
return self._variational_state
@property
def optimizer(self):
return self._optimizer
@optimizer.setter
def optimizer(self, optimizer):
self._optimizer = optimizer
self._optimizer_state = optimizer.init(self.state.parameters)
@property
def step_count(self):
"""
Returns a monotonic integer labelling all the steps performed by this driver.
This can be used, for example, to identify the line in a log file.
"""
return self._step_count
def iter(self, n_steps: int, step: int = 1):
"""
Returns a generator which advances the VMC optimization, yielding
after every `step_size` steps.
Args:
n_iter: The total number of steps to perform.
step_size: The number of internal steps the simulation
is advanced every turn.
Yields:
int: The current step.
"""
for _ in range(0, n_steps, step):
for i in range(0, step):
dp = self._forward_and_backward()
if i == 0:
yield self.step_count
self._step_count += 1
self.update_parameters(dp)
def advance(self, steps: int = 1):
"""
Performs `steps` optimization steps.
steps: (Default=1) number of steps
"""
for _ in self.iter(steps):
pass
def run(
self,
n_iter,
out=None,
obs=None,
show_progress=True,
save_params_every=50, # for default logger
write_every=50, # for default logger
step_size=1, # for default logger
callback=lambda *x: True,
):
"""
Executes the Monte Carlo Variational optimization, updating the weights of the network
stored in this driver for `n_iter` steps and dumping values of the observables `obs`
in the output `logger`. If no logger is specified, creates a json file at `out`,
overwriting files with the same prefix.
By default uses :ref:`netket.logging.JsonLogger`. To know about the output format
check it's documentation. The logger object is also returned at the end of this function
so that you can inspect the results without reading the json output.
Args:
n_iter: the total number of iterations
out: A logger object, or an iterable of loggers, to be used to store simulation log and data.
If this argument is a string, it will be used as output prefix for the standard JSON logger.
obs: An iterable containing all observables that should be computed
save_params_every: Every how many steps the parameters of the network should be
serialized to disk (ignored if logger is provided)
write_every: Every how many steps the json data should be flushed to disk (ignored if
logger is provided)
step_size: Every how many steps should observables be logged to disk (default=1)
show_progress: If true displays a progress bar (default=True)
callback: Callable or list of callable callback functions to stop training given a condition
"""
if not isinstance(n_iter, numbers.Number):
raise ValueError(
"n_iter, the first positional argument to `run`, must be a number!"
)
if obs is None:
obs = {}
if out is None:
out = tuple()
print(
"No output specified (out=[apath|nk.logging.JsonLogger(...)])."
"Running the optimization but not saving the output."
)
# Log only non-root nodes
if self._mynode == 0:
# if out is a path, create an overwriting Json Log for output
if isinstance(out, str):
loggers = (JsonLog(out, "w", save_params_every, write_every),)
else:
loggers = _to_iterable(out)
else:
loggers = tuple()
show_progress = False
callbacks = _to_iterable(callback)
callback_stop = False
with tqdm(
self.iter(n_iter, step_size), total=n_iter, disable=not show_progress
) as itr:
first_step = True
for step in itr:
log_data = self.estimate(obs)
# if the cost-function is defined then report it in the progress bar
if self._loss_stats is not None:
itr.set_postfix_str(self._loss_name + "=" + str(self._loss_stats))
log_data[self._loss_name] = self._loss_stats
for callback in callbacks:
if not callback(step, log_data, self):
callback_stop = True
for logger in loggers:
logger(self.step_count, log_data, self.state)
if callback_stop:
break
# Reset the timing of tqdm after the first step, to ignore compilation time
if first_step:
first_step = False
itr.unpause()
# flush at the end of the evolution so that final values are saved to
# file |
return loggers
def estimate(self, observables):
"""
Return MCMC statistics for the expectation value of observables in the
current state of the driver.
Args:
observables: A pytree of operators for which statistics should be computed.
Returns:
A pytree of the same structure as the input, containing MCMC statistics
for the corresponding operators as leaves.
"""
return tree_map(self._estimate_stats, observables)
def update_parameters(self, dp):
"""
Updates the parameters of the machine using the optimizer in this driver
Args:
dp: the pytree containing the updates to the parameters
"""
self._optimizer_state, self.state.parameters = apply_gradient(
self._optimizer.update, self._optimizer_state, dp, self.state.parameters
)
@partial(jax.jit, static_argnums=0)
def apply_gradient(optimizer_fun, optimizer_state, dp, params):
import optax
updates, new_optimizer_state = optimizer_fun(dp, optimizer_state, params)
new_params = optax.apply_updates(params, updates)
return new_optimizer_state, new_params | for logger in loggers:
logger.flush(self.state) |
polyfillNestedWorker.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import { NewWorkerMessage, TerminateWorkerMessage } from 'vs/workbench/services/extensions/common/polyfillNestedWorker.protocol';
declare function postMessage(data: any, transferables?: Transferable[]): void;
declare type MessageEventHandler = ((ev: MessageEvent<any>) => any) | null;
const _bootstrapFnSource = (function _bootstrapFn(workerUrl: string) {
const listener: EventListener = (event: Event): void => {
// uninstall handler
self.removeEventListener('message', listener);
// get data
const port = <MessagePort>(<MessageEvent>event).data;
// postMessage
// onmessage
Object.defineProperties(self, {
'postMessage': {
value(data: any, transferOrOptions?: any) {
port.postMessage(data, transferOrOptions);
}
},
'onmessage': {
get() {
return port.onmessage;
},
set(value: MessageEventHandler) {
port.onmessage = value;
}
}
// todo onerror
});
port.addEventListener('message', msg => {
self.dispatchEvent(new MessageEvent('message', { data: msg.data }));
});
port.start();
// fake recursively nested worker
self.Worker = <any>class { constructor() { throw new TypeError('Nested workers from within nested worker are NOT supported.'); } };
// load module
importScripts(workerUrl);
};
self.addEventListener('message', listener);
}).toString();
export class | extends EventTarget implements Worker {
onmessage: ((this: Worker, ev: MessageEvent<any>) => any) | null = null;
onmessageerror: ((this: Worker, ev: MessageEvent<any>) => any) | null = null;
onerror: ((this: AbstractWorker, ev: ErrorEvent) => any) | null = null;
readonly terminate: () => void;
readonly postMessage: (message: any, options?: any) => void;
constructor(nativePostMessage: typeof postMessage, stringOrUrl: string | URL, options?: WorkerOptions) {
super();
// create bootstrap script
const bootstrap = `((${_bootstrapFnSource})('${stringOrUrl}'))`;
const blob = new Blob([bootstrap], { type: 'application/javascript' });
const blobUrl = URL.createObjectURL(blob);
const channel = new MessageChannel();
const id = blobUrl; // works because blob url is unique, needs ID pool otherwise
const msg: NewWorkerMessage = {
type: '_newWorker',
id,
port: channel.port2,
url: blobUrl,
options,
};
nativePostMessage(msg, [channel.port2]);
// worker-impl: functions
this.postMessage = channel.port1.postMessage.bind(channel.port1);
this.terminate = () => {
const msg: TerminateWorkerMessage = {
type: '_terminateWorker',
id
};
channel.port1.postMessage(msg);
URL.revokeObjectURL(blobUrl);
channel.port1.close();
channel.port2.close();
};
// worker-impl: events
Object.defineProperties(this, {
'onmessage': {
get() {
return channel.port1.onmessage;
},
set(value: MessageEventHandler) {
channel.port1.onmessage = value;
}
},
'onmessageerror': {
get() {
return channel.port1.onmessageerror;
},
set(value: MessageEventHandler) {
channel.port1.onmessageerror = value;
}
},
// todo onerror
});
channel.port1.addEventListener('messageerror', evt => {
const msgEvent = new MessageEvent('messageerror', { data: evt.data });
this.dispatchEvent(msgEvent);
});
channel.port1.addEventListener('message', evt => {
const msgEvent = new MessageEvent('message', { data: evt.data });
this.dispatchEvent(msgEvent);
});
channel.port1.start();
}
}
| NestedWorker |
subprocess_env_manager.py | from typing import Dict, NamedTuple, List, Any, Optional, Callable, Set, Tuple
import cloudpickle
import enum
from mlagents_envs.environment import UnityEnvironment
from mlagents_envs.exception import (
UnityCommunicationException,
UnityTimeOutException,
UnityEnvironmentException,
)
from multiprocessing import Process, Pipe, Queue
from multiprocessing.connection import Connection
from queue import Empty as EmptyQueueException
from mlagents_envs.base_env import BaseEnv, AgentGroup
from mlagents_envs.logging_util import get_logger
from mlagents.trainers.env_manager import EnvManager, EnvironmentStep, AllStepResult
from mlagents_envs.timers import (
TimerNode,
timed,
hierarchical_timer,
reset_timers,
get_timer_root,
)
from mlagents.trainers.brain import BrainParameters
from mlagents.trainers.action_info import ActionInfo
from mlagents_envs.side_channel.float_properties_channel import FloatPropertiesChannel
from mlagents_envs.side_channel.engine_configuration_channel import (
EngineConfigurationChannel,
EngineConfig,
)
from mlagents_envs.side_channel.stats_side_channel import (
StatsSideChannel,
StatsAggregationMethod,
)
from mlagents_envs.side_channel.side_channel import SideChannel
from mlagents.trainers.brain_conversion_utils import group_spec_to_brain_parameters
logger = get_logger(__name__)
class EnvironmentCommand(enum.Enum):
STEP = 1
EXTERNAL_BRAINS = 2
GET_PROPERTIES = 3
RESET = 4
CLOSE = 5
ENV_EXITED = 6
class EnvironmentRequest(NamedTuple):
cmd: EnvironmentCommand
payload: Any = None
class EnvironmentResponse(NamedTuple):
cmd: EnvironmentCommand
worker_id: int
payload: Any
class StepResponse(NamedTuple):
all_step_result: AllStepResult
timer_root: Optional[TimerNode]
environment_stats: Dict[str, Tuple[float, StatsAggregationMethod]]
class UnityEnvWorker:
def __init__(self, process: Process, worker_id: int, conn: Connection):
self.process = process
self.worker_id = worker_id
self.conn = conn
self.previous_step: EnvironmentStep = EnvironmentStep.empty(worker_id)
self.previous_all_action_info: Dict[str, ActionInfo] = {}
self.waiting = False
def send(self, cmd: EnvironmentCommand, payload: Any = None) -> None:
try:
req = EnvironmentRequest(cmd, payload)
self.conn.send(req)
except (BrokenPipeError, EOFError):
raise UnityCommunicationException("UnityEnvironment worker: send failed.")
def recv(self) -> EnvironmentResponse:
try:
response: EnvironmentResponse = self.conn.recv()
if response.cmd == EnvironmentCommand.ENV_EXITED:
env_exception: Exception = response.payload
raise env_exception
return response
except (BrokenPipeError, EOFError):
raise UnityCommunicationException("UnityEnvironment worker: recv failed.")
def close(self):
try:
self.conn.send(EnvironmentRequest(EnvironmentCommand.CLOSE))
except (BrokenPipeError, EOFError):
logger.debug(
f"UnityEnvWorker {self.worker_id} got exception trying to close."
)
pass
logger.debug(f"UnityEnvWorker {self.worker_id} joining process.")
self.process.join()
def worker(
parent_conn: Connection,
step_queue: Queue,
pickled_env_factory: str,
worker_id: int,
engine_configuration: EngineConfig,
) -> None:
env_factory: Callable[
[int, List[SideChannel]], UnityEnvironment
] = cloudpickle.loads(pickled_env_factory)
shared_float_properties = FloatPropertiesChannel()
engine_configuration_channel = EngineConfigurationChannel()
engine_configuration_channel.set_configuration(engine_configuration)
stats_channel = StatsSideChannel()
env: BaseEnv = None
def _send_response(cmd_name: EnvironmentCommand, payload: Any) -> None:
parent_conn.send(EnvironmentResponse(cmd_name, worker_id, payload))
def _generate_all_results() -> AllStepResult:
all_step_result: AllStepResult = {}
for brain_name in env.get_agent_groups():
all_step_result[brain_name] = env.get_step_result(brain_name)
return all_step_result
def external_brains():
result = {}
for brain_name in env.get_agent_groups():
result[brain_name] = group_spec_to_brain_parameters(
brain_name, env.get_agent_group_spec(brain_name)
)
return result
try:
env = env_factory(
worker_id,
[shared_float_properties, engine_configuration_channel, stats_channel],
)
while True:
req: EnvironmentRequest = parent_conn.recv()
if req.cmd == EnvironmentCommand.STEP:
all_action_info = req.payload
for brain_name, action_info in all_action_info.items():
if len(action_info.action) != 0:
env.set_actions(brain_name, action_info.action)
env.step()
all_step_result = _generate_all_results()
# The timers in this process are independent from all the processes and the "main" process
# So after we send back the root timer, we can safely clear them.
# Note that we could randomly return timers a fraction of the time if we wanted to reduce
# the data transferred.
# TODO get gauges from the workers and merge them in the main process too.
env_stats = stats_channel.get_and_reset_stats()
step_response = StepResponse(
all_step_result, get_timer_root(), env_stats
)
step_queue.put(
EnvironmentResponse(
EnvironmentCommand.STEP, worker_id, step_response
)
)
reset_timers()
elif req.cmd == EnvironmentCommand.EXTERNAL_BRAINS:
_send_response(EnvironmentCommand.EXTERNAL_BRAINS, external_brains())
elif req.cmd == EnvironmentCommand.GET_PROPERTIES:
reset_params = shared_float_properties.get_property_dict_copy()
_send_response(EnvironmentCommand.GET_PROPERTIES, reset_params)
elif req.cmd == EnvironmentCommand.RESET:
for k, v in req.payload.items():
shared_float_properties.set_property(k, v)
env.reset()
all_step_result = _generate_all_results()
_send_response(EnvironmentCommand.RESET, all_step_result)
elif req.cmd == EnvironmentCommand.CLOSE:
break
except (
KeyboardInterrupt,
UnityCommunicationException,
UnityTimeOutException,
UnityEnvironmentException,
) as ex:
logger.info(f"UnityEnvironment worker {worker_id}: environment stopping.")
step_queue.put(
EnvironmentResponse(EnvironmentCommand.ENV_EXITED, worker_id, ex)
)
_send_response(EnvironmentCommand.ENV_EXITED, ex)
finally:
# If this worker has put an item in the step queue that hasn't been processed by the EnvManager, the process
# will hang until the item is processed. We avoid this behavior by using Queue.cancel_join_thread()
# See https://docs.python.org/3/library/multiprocessing.html#multiprocessing.Queue.cancel_join_thread for
# more info.
logger.debug(f"UnityEnvironment worker {worker_id} closing.")
step_queue.cancel_join_thread()
step_queue.close()
if env is not None:
env.close()
logger.debug(f"UnityEnvironment worker {worker_id} done.")
class SubprocessEnvManager(EnvManager):
def __init__(
self,
env_factory: Callable[[int, List[SideChannel]], BaseEnv],
engine_configuration: EngineConfig,
n_env: int = 1,
):
super().__init__()
self.env_workers: List[UnityEnvWorker] = []
self.step_queue: Queue = Queue()
for worker_idx in range(n_env):
self.env_workers.append(
self.create_worker(
worker_idx, self.step_queue, env_factory, engine_configuration
)
)
@staticmethod
def create_worker(
worker_id: int,
step_queue: Queue,
env_factory: Callable[[int, List[SideChannel]], BaseEnv],
engine_configuration: EngineConfig,
) -> UnityEnvWorker:
parent_conn, child_conn = Pipe()
# Need to use cloudpickle for the env factory function since function objects aren't picklable
# on Windows as of Python 3.6.
pickled_env_factory = cloudpickle.dumps(env_factory)
child_process = Process(
target=worker,
args=(
child_conn,
step_queue,
pickled_env_factory,
worker_id,
engine_configuration,
),
)
child_process.start()
return UnityEnvWorker(child_process, worker_id, parent_conn)
def _queue_steps(self) -> None:
for env_worker in self.env_workers:
if not env_worker.waiting:
env_action_info = self._take_step(env_worker.previous_step)
env_worker.previous_all_action_info = env_action_info
env_worker.send(EnvironmentCommand.STEP, env_action_info)
env_worker.waiting = True
def _step(self) -> List[EnvironmentStep]:
# Queue steps for any workers which aren't in the "waiting" state.
self._queue_steps()
worker_steps: List[EnvironmentResponse] = []
step_workers: Set[int] = set()
# Poll the step queue for completed steps from environment workers until we retrieve
# 1 or more, which we will then return as StepInfos
while len(worker_steps) < 1:
try:
while True:
step: EnvironmentResponse = self.step_queue.get_nowait()
if step.cmd == EnvironmentCommand.ENV_EXITED:
env_exception: Exception = step.payload
raise env_exception
self.env_workers[step.worker_id].waiting = False
if step.worker_id not in step_workers:
worker_steps.append(step)
step_workers.add(step.worker_id)
except EmptyQueueException:
pass
step_infos = self._postprocess_steps(worker_steps)
return step_infos
def _reset_env(self, config: Optional[Dict] = None) -> List[EnvironmentStep]:
while any(ew.waiting for ew in self.env_workers):
if not self.step_queue.empty():
step = self.step_queue.get_nowait()
self.env_workers[step.worker_id].waiting = False
# First enqueue reset commands for all workers so that they reset in parallel
for ew in self.env_workers:
ew.send(EnvironmentCommand.RESET, config)
# Next (synchronously) collect the reset observations from each worker in sequence
for ew in self.env_workers:
ew.previous_step = EnvironmentStep(ew.recv().payload, ew.worker_id, {}, {})
return list(map(lambda ew: ew.previous_step, self.env_workers))
@property
def external_brains(self) -> Dict[AgentGroup, BrainParameters]:
self.env_workers[0].send(EnvironmentCommand.EXTERNAL_BRAINS)
return self.env_workers[0].recv().payload
@property
def get_properties(self) -> Dict[AgentGroup, float]:
self.env_workers[0].send(EnvironmentCommand.GET_PROPERTIES)
return self.env_workers[0].recv().payload
def close(self) -> None:
|
def _postprocess_steps(
self, env_steps: List[EnvironmentResponse]
) -> List[EnvironmentStep]:
step_infos = []
timer_nodes = []
for step in env_steps:
payload: StepResponse = step.payload
env_worker = self.env_workers[step.worker_id]
new_step = EnvironmentStep(
payload.all_step_result,
step.worker_id,
env_worker.previous_all_action_info,
payload.environment_stats,
)
step_infos.append(new_step)
env_worker.previous_step = new_step
if payload.timer_root:
timer_nodes.append(payload.timer_root)
if timer_nodes:
with hierarchical_timer("workers") as main_timer_node:
for worker_timer_node in timer_nodes:
main_timer_node.merge(
worker_timer_node, root_name="worker_root", is_parallel=True
)
return step_infos
@timed
def _take_step(self, last_step: EnvironmentStep) -> Dict[AgentGroup, ActionInfo]:
all_action_info: Dict[str, ActionInfo] = {}
for brain_name, batch_step_result in last_step.current_all_step_result.items():
if brain_name in self.policies:
all_action_info[brain_name] = self.policies[brain_name].get_action(
batch_step_result, last_step.worker_id
)
return all_action_info
| logger.debug(f"SubprocessEnvManager closing.")
self.step_queue.close()
self.step_queue.join_thread()
for env_worker in self.env_workers:
env_worker.close() |
forms.py | from flask_wtf import FlaskForm
from wtforms import RadioField, SelectMultipleField, widgets
class MultiCheckBoxField(SelectMultipleField):
widget = widgets.ListWidget(prefix_label=False)
option_widget = widgets.CheckboxInput()
class GoogleGroupsSubscribe(FlaskForm):
group = MultiCheckBoxField(
'Which emails are you signing up for?',
choices = [
('wheaton-ultimate', 'Social events'),
('wheaton-ultimate-frisbee', 'Ultimate frisbee games'),
('wheaton-soccer', 'Soccer games'),
('wheaton-housing', 'Housing roommates/tenants'),
],
)
class | (FlaskForm):
group = MultiCheckBoxField(
'Which emails are you unsubscribing from?',
choices = [
('wheaton-ultimate', 'Social events'),
('wheaton-ultimate-frisbee', 'Ultimate frisbee games'),
('wheaton-soccer', 'Soccer games'),
('wheaton-housing', 'Housing roommates/tenants'),
],
)
| GoogleGroupsUnsubscribe |
clear.go | /*
Copyright © 2021 tmax cloud <[email protected]>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software | See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"fmt"
"os"
"github.com/spf13/cobra"
"github.com/hantmac/tmax/setting"
)
// clearCmd represents the clear command
var clearCmd = &cobra.Command{
Use: "clear",
Short: "remove .tmax.yaml",
Long: `remove .tmax.yaml`,
Run: func(cmd *cobra.Command, args []string) {
deleteConfig()
fmt.Println("remove .tmax.yaml succeeded, you can re-generate it by running 'tmax generate'")
},
}
func init() {
rootCmd.AddCommand(clearCmd)
}
func deleteConfig() {
err := os.Remove(setting.ConfigPath)
if err != nil && !os.IsNotExist(err) {
fmt.Printf("can not delete file %s: %s\n", setting.ConfigPath, err)
os.Exit(1)
}
} | distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
startCirq3299.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 5/15/20 4:49 PM
# @File : grover.py
# qubit number=4
# total number=45
import cirq
import cirq.google as cg
from typing import Optional
import sys
from math import log2
import numpy as np
#thatsNoCode
from cirq.contrib.svg import SVGCircuit
# Symbols for the rotation angles in the QAOA circuit.
def make_circuit(n: int, input_qubit):
c = cirq.Circuit() # circuit begin
c.append(cirq.H.on(input_qubit[0])) # number=9
c.append(cirq.H.on(input_qubit[1])) # number=2
c.append(cirq.H.on(input_qubit[2])) # number=3
c.append(cirq.H.on(input_qubit[3])) # number=4
c.append(cirq.Y.on(input_qubit[3])) # number=12
c.append(cirq.H.on(input_qubit[3])) # number=36
c.append(cirq.CZ.on(input_qubit[2],input_qubit[3])) # number=37
c.append(cirq.H.on(input_qubit[3])) # number=38
c.append(cirq.H.on(input_qubit[0])) # number=5
c.append(cirq.H.on(input_qubit[1])) # number=6
c.append(cirq.H.on(input_qubit[2])) # number=24
c.append(cirq.CZ.on(input_qubit[3],input_qubit[2])) # number=25
c.append(cirq.H.on(input_qubit[2])) # number=26
c.append(cirq.H.on(input_qubit[2])) # number=7
c.append(cirq.H.on(input_qubit[3])) # number=8
c.append(cirq.H.on(input_qubit[2])) # number=42
c.append(cirq.CZ.on(input_qubit[0],input_qubit[2])) # number=43
c.append(cirq.H.on(input_qubit[2])) # number=44
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=39
c.append(cirq.X.on(input_qubit[2])) # number=40
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=41
c.append(cirq.CNOT.on(input_qubit[0],input_qubit[2])) # number=31
c.append(cirq.H.on(input_qubit[3])) # number=16
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3])) # number=17
c.append(cirq.H.on(input_qubit[3])) # number=18
c.append(cirq.X.on(input_qubit[3])) # number=14
c.append(cirq.H.on(input_qubit[3])) # number=32
c.append(cirq.CZ.on(input_qubit[0],input_qubit[3])) # number=33
c.append(cirq.H.on(input_qubit[3])) # number=34
c.append(cirq.rx(-1.928937889304133).on(input_qubit[1])) # number=35
c.append(cirq.Y.on(input_qubit[2])) # number=10
c.append(cirq.Y.on(input_qubit[2])) # number=11
c.append(cirq.X.on(input_qubit[1])) # number=20
c.append(cirq.X.on(input_qubit[1])) # number=21
c.append(cirq.X.on(input_qubit[3])) # number=27
c.append(cirq.X.on(input_qubit[3])) # number=28
# circuit end
c.append(cirq.measure(*input_qubit, key='result'))
return c
def bitstring(bits):
|
if __name__ == '__main__':
qubit_count = 4
input_qubits = [cirq.GridQubit(i, 0) for i in range(qubit_count)]
circuit = make_circuit(qubit_count,input_qubits)
circuit = cg.optimized_for_sycamore(circuit, optimizer_type='sqrt_iswap')
circuit_sample_count =2000
simulator = cirq.Simulator()
result = simulator.run(circuit, repetitions=circuit_sample_count)
frequencies = result.histogram(key='result', fold_func=bitstring)
writefile = open("../data/startCirq3299.csv","w+")
print(format(frequencies),file=writefile)
print("results end", file=writefile)
print(circuit.__len__(), file=writefile)
print(circuit,file=writefile)
writefile.close() | return ''.join(str(int(b)) for b in bits) |
typeOfActivity.controller.ts | import { Controller, Res, Param, HttpStatus, Body, Post, Get, Put, UseGuards, Delete } from '@nestjs/common';
import { TypeOfActivityService } from './typeOfActivity.service';
import { TypeActivityDTO } from './dto/typeOfActivity.dto';
import { JwtAuthGuard } from '../auth/guards/jwt-auth.guard';
@UseGuards(JwtAuthGuard)
@Controller('/api/typeofactivity')
export class | {
constructor(private typeOfActivityService: TypeOfActivityService) {}
@Get('/')
async getTypes(@Res() res) {
try {
const response = await this.typeOfActivityService.getTypes();
return res.status(HttpStatus.OK).json({
message: 'List of Types of activities',
response
});
} catch (err) {
return res.status(HttpStatus.BAD_REQUEST).json({
err: err.message
});
}
}
@Get('/:typeId')
async getType(@Res() res, @Param('typeId') typeId) {
try {
const response = await this.typeOfActivityService.getType(typeId);
return res.status(HttpStatus.OK).json({
response
});
} catch (err) {
return res.status(HttpStatus.BAD_REQUEST).json({
err: err.message
});
}
}
@Post('/')
async createType(@Body() data: TypeActivityDTO, @Res() res) {
try {
const response = await this.typeOfActivityService.createType(data);
return res.status(HttpStatus.OK).json({
message: 'Type of activity Created',
response
});
} catch (err) {
return res.status(HttpStatus.BAD_REQUEST).json({
err: err.message
});
}
}
@Put('/update/:typeId')
async updateType(@Body() data: TypeActivityDTO, @Res() res, @Param('typeId') typeId) {
try {
const response = await this.typeOfActivityService.updateType(typeId, data);
return res.status(HttpStatus.OK).json({
message: 'Type of activity updated!',
response
});
} catch (err) {
return res.status(HttpStatus.BAD_REQUEST).json({
err: err.message
});
}
}
@Delete('/delete/:typeId')
async deleteType(@Res() res, @Param('typeId') typeId) {
try {
await this.typeOfActivityService.deleteType(typeId);
return res.status(HttpStatus.OK).json({
message: 'Type of activity deleted.'
});
} catch (err) {
return res.status(HttpStatus.BAD_REQUEST).json({
message: 'An error has ocurred',
err: err.message
});
}
}
}
| TypeOfActivityController |
calculator.js | /**
* Title: react-redux-calculator
* Author: Jaehyun Park
* History:
* Aug/19/2019 - creates file.
* Aug/20/2019 - toInfix().
*/
import stack from "./stack";
/**
* Token Types and Seperator
*/
const tknT = {
UNKNOWN: -1,
OPERAND: 0,
OPERATOR: 1,
SEPERATOR: ' '
}
/**
* Operator Types
*/
const opT = {
ADDITION: '+',
SUBTRACTION: '-',
MULTIPLICATION: '*',
DIVISION: '/',
MODULUS: '%',
POWER: '^',
LEFT_PARENTHESIS: '(',
RIGHT_PARENTHESIS: ')'
}
/**
* function getType
* note:
* gets token type of input charactor.
* @param {*} c
*/
const getType = c => {
if(!isNaN(c) || c === '.')
return tknT.OPERAND;
switch(c) {
case opT.ADDITION:
case opT.SUBTRACTION:
case opT.MULTIPLICATION:
case opT.DIVISION:
case opT.MODULUS:
case opT.POWER:
case opT.LEFT_PARENTHESIS:
case opT.RIGHT_PARENTHESIS:
return tknT.OPERATOR;
default:
return tknT.UNKNOWN;
}
}
/**
* function getPriority
* note:
* gets operator priority of input charactor.
* @param {*} c
*/
const getPriority = c => {
let priority = -1;
switch(c) {
case opT.POWER:
priority = 0;
break;
case opT.MULTIPLICATION:
case opT.DIVISION:
case opT.MODULUS:
priority = 1;
break;
case opT.ADDITION:
case opT.SUBTRACTION:
priority = 2;
break;
case opT.LEFT_PARENTHESIS:
priority = 3;
break;
default:
break;
}
return priority;
}
/**
* Boolean Functions
*/
const isString = exp => {return typeof exp === 'string'}
const isOperand = c => {return getType(c) === tknT.OPERAND}
const isOperator = c => {return getType(c) === tknT.OPERATOR}
const isLeftParenthesis = c => {return c === opT.LEFT_PARENTHESIS}
const isRightParenthesis = c => {return c === opT.RIGHT_PARENTHESIS}
const isBlank = c => {return c === ' '}
const isPrior = (c1, c2) => {return getPriority(c1) > getPriority(c2)}
const isPriorOrEqual = (c1, c2) => {return getPriority(c1) >= getPriority(c2)}
const isSign = (str, pos) => {
/**
* Check if charactor is sign.
* 1. if the string starts with '+' or '-', it's a sign.
* 2. if the previous charactor is operator excepts for parenthesis and followed by number,
* it's also a sing.
*/
if(str[pos] === '+' || str[pos] === '-') {
if((pos === 0) ||
((str[pos-1] !== opT.RIGHT_PARENTHESIS) &&
(getType(str[pos-1]) === tknT.OPERATOR) &&
(getType(str[pos+1]) === tknT.OPERAND))) {
return true;
}
}
return false;
}
/**
* function toPostfix
* note:
* transforms infix expression into postfix.
* @param {*} infix
*/
const toPostfix = infix => {
let postfix = "";
stack.clear();
try {
// traverse infix expression
for(let pos=0; pos<infix.length; pos++) {
let c = infix[pos];
// number case
if(isBlank(c)) continue;
if(isOperand(c) || isSign(infix, pos)) {
postfix += c;
continue;
}
else if(!isOperator(c))
throw new Error('Wrong expression');
// operator case
if(isLeftParenthesis(c))
stack.push(c);
else if(isRightParenthesis(c)) { | postfix += stack.pop();
}
if(stack.isEmpty())
throw new Error('Wrong expression: no left parenthesis')
else // pop left parenthesis
stack.pop();
}
else { // operators execpt for parenthesis
postfix += tknT.SEPERATOR; // add a token seperator
while(!stack.isEmpty() && isPriorOrEqual(c, stack.peek())) {
postfix += stack.pop();
postfix += tknT.SEPERATOR;
}
stack.push(c);
}
}
// pop the rest operators
while(!stack.isEmpty()) {
postfix += tknT.SEPERATOR;
postfix += stack.pop();
}
} catch(e) {
console.log(e.name + ' : ' + e.message);
}
return postfix;
}
/**
* function toInfix
* note:
* transforms postfix expression into infix.
* @param {*} postfix
*/
const toInfix = postfix => {
let infix, rval, lval;
let prev; // previous item
stack.clear();
let tokens = postfix.split(tknT.SEPERATOR);
// TODO: adds error throw cases.
try {
tokens.forEach(item => {
if (!prev) prev = item;
if (isOperand(item)) {
stack.push(item);
} else {
// if found operator, pop previous two numbers,
// then makes and pushes infix expression from the numbers.
rval = stack.pop();
lval = stack.pop();
if(stack.getSize() > 0 && isPrior(item, prev))
stack.push(`( ${lval} ${item} ${rval} )`);
else
stack.push(`${lval} ${item} ${rval}`);
prev = item;
}
});
infix = stack.pop().replace(/\s/g, ''); // remove blank spaces for infix
} catch(e) {
console.log(e.name + ' : ' + e.message);
}
return infix;
}
/**
* function calculate
* note:
* calcultes input expression and returns the result.
* input expression should be postfix format.
* @param {*} expression
* @param {*} mode
*/
const calculate = (expression, mode='infix') => {
if (!isString(expression)) return '';
if (mode === 'infix') expression = toPostfix(expression);
let result, rval, lval = 0;
let tokens = expression.split(tknT.SEPERATOR); // tokenize postfix expression
stack.clear();
try {
tokens.forEach(item => {
if(isNaN(item)) { // operator case
if(stack.getSize() < 2)
throw new Error('Wrong expression');
rval = parseFloat(stack.pop());
lval = parseFloat(stack.pop());
switch(item[0]) {
case opT.ADDITION:
stack.push(lval + rval);
break;
case opT.SUBTRACTION:
stack.push(lval - rval);
break;
case opT.MULTIPLICATION:
stack.push(lval * rval);
break;
case opT.DIVISION:
stack.push(lval / rval);
break;
case opT.MODULUS:
stack.push(lval % rval);
break;
case opT.POWER:
stack.push(lval ** rval);
break;
default:
break;
}
}
else { // operand case (number)
stack.push(item);
}
});
result = stack.pop();
} catch(e) {
console.log(e.name + ' : ' + e.message);
}
return result;
}
export default calculate;
export { toPostfix, toInfix }; | while(!stack.isEmpty() && !isLeftParenthesis(stack.peek())) {
postfix += tknT.SEPERATOR; // add a token seperator |
fitness_commission_summary.py | # Copyright (c) 2013, Blue Lynx and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe import _
import math
from frappe.utils import getdate, get_time, flt
from datetime import datetime, timedelta, date, time
import calendar
def execute(filters=None):
columns, data = [], []
if filters:
columns = get_column()
data = get_data(filters)
return columns, data
def get_column():
columns = [
{
"label": "Staff Name",
"fieldname": "staff_name",
"fieldtype": "Data",
"width": 120
},
{
"label": "PT Count (Hours)",
"fieldname": "pt_count",
"fieldtype": "Data",
"width": 120
},
{
"label": "GX Count (Hours)",
"fieldname": "gx_count",
"fieldtype": "Data",
"width": 120
},
{
"label": "Others (Hours)",
"fieldname": "ot_count",
"fieldtype": "Data",
"width": 120
},
{
"label": "PT Commissions",
"fieldname": "pt_commission",
"fieldtype": "Currency",
"width": 150
},
{
"label": "GX Commissions",
"fieldname": "gc_commission",
"fieldtype": "Currency",
"width": 150
},
{
"label": "Other Commissions",
"fieldname": "other_commission",
"fieldtype": "Currency",
"width": 150,
"default": 0.0
},
{
"label": "Total Commission",
"fieldname": "total_commission",
"fieldtype": "Currency",
"width": 150
}
]
return columns
def get_data(filters):
data = []
final_data = []
year = int(filters['year'])
if 'date_range' in filters:
if filters['date_range'] == "Month":
month = filters['month']
month_number = int(datetime.strptime(month, '%B').month)
last_day = calendar.monthrange(year, month_number)[1]
start_date = datetime(year, month_number, 1)
start = start_date.date()
end_date = datetime(year, month_number, last_day)
end = end_date.date()
elif filters['date_range'] == "Custom Range":
start = getdate(filters['from_date'])
end = getdate( filters['to_date'])
if 'service_staff' in filters:
staff_list = frappe.get_all('Service Staff', filters={'name': filters['service_staff']})
else:
staff_list = frappe.db.get_list('Service Staff', filters=[['fitness_check', '=', 1]], fields=['name'])
settings = frappe.get_doc('Fitness Training Settings')
if staff_list:
for staff in staff_list:
pt_count = 0.0
ot_count = 0.0
other_commission = 0.0
service_staff = frappe.get_doc('Service Staff', staff.name)
if service_staff.fitness_service_assignment:
for services in service_staff.fitness_service_assignment:
if services.commission_applicable:
appointments_list = frappe.db.get_list('Fitness Training Appointment', filters=[['fitness_service', '=', services.fitness_package], ['appointment_date', 'between', [start, end]], ['payment_status', '=', 'Paid'], ['service_staff', '=', staff.name], ['appointment_status', 'in', {'Completed', 'No Show'}]], fields=['name', 'fitness_service'])
if services.commission_type == "Standard":
if appointments_list:
for appointments in appointments_list:
pt_service = frappe.get_doc('Fitness Services', appointments.fitness_service)
if pt_service.session_for == "Single":
pt_count += settings.single_session
elif pt_service.session_for == "Couple":
pt_count += settings.couple_session
elif services.commission_type == "Custom":
if appointments_list:
for appointments in appointments_list:
other_commission += services.commission_amount
ot_count += 1
staff['staff_name']= staff.name
staff['pt_count'] = pt_count
staff['ot_count'] = ot_count
staff['other_commission'] = other_commission
gc = []
gc_list = frappe.db.get_list('Group Class', filters=[['class_date', 'between', [start, end]], ['trainer_name', '=', staff.name], ['class_status', '=', 'Completed']], fields=['count(name) as gx_count'], group_by='trainer_name')
if gc_list:
for group_class in gc_list:
group_class_attendee = frappe.get_all('Group Class Attendees', filters={'group_class': group_class.name, 'attendee_status': 'Complete' })
if group_class_attendee:
if len(group_class_attendee) >= 3:
gc.append(group_class)
staff['gx_count'] = len(gc)
data.append(staff)
for row in data:
row['gc_commission'] = float(row['gx_count']) * float(settings.group_class_rate)
pt = calculate_pt(row['pt_count'], row['gx_count'])
row['pt_commission'] = pt
row['total_commission'] = row['gc_commission'] + row['pt_commission'] + row['other_commission']
final_data.append(row)
return final_data
def | ():
year = 2021
months = 'July'
month_number = datetime.strptime(months, '%B').month
last_day = calendar.monthrange(year, month_number)[1]
start_date = datetime(year, month_number, 1)
start = start_date.date()
end_date = datetime(year, month_number, last_day)
end = end_date.date()
staff_list = frappe.db.get_list('Service Staff', filters=[['fitness_check', '=', 1]], fields=['name'])
for staff in staff_list:
gc_list = frappe.db.get_list('Group Class', filters=[['class_date', 'between', [start, end]], ['trainer_name', '=', 'Jatinder'], ['class_status', '=', 'Completed']], fields=['count(name) as gc_count'], group_by='trainer_name')
for gc in gc_list:
return type(gc.gc_count)
@frappe.whitelist()
def calculate_pt(pt_count, gx_count):
total_count = pt_count + gx_count
scale = {(0, 30): 40, (30, 60): 60, (60, 90): 80, (90, 120): 100, (120, 150): 120, (150, math.inf): 140}
hours_worked = total_count
decimal_rate = next(rate for (lower, upper), rate in scale.items() if lower <= hours_worked and upper >= hours_worked)
decimal_end = hours_worked - int(hours_worked)
end_pay = decimal_end * decimal_rate
# Use an integer for ease of calculation
hours_worked = int(hours_worked)
hours_paid_for = 0
# Beginning total pay is just the decimal "ending"
total_pay = end_pay
while hours_paid_for < hours_worked:
# Find the rate for the current bucket of hours
rate_filter = (rate for (lower, upper), rate in scale.items() if lower <= hours_paid_for and hours_paid_for < upper)
current_level = next(rate_filter)
total_pay += current_level
hours_paid_for += 1
total_session = total_pay
scale_1 = {(0, 30): 40, (30, 60): 60, (60, 90): 80, (90, 120): 100, (120, 150): 120, (150, math.inf): 140}
hours_worked_1 = gx_count
decimal_rate_1 = next(rate for (lower, upper), rate in scale_1.items() if lower <= hours_worked_1 and upper >= hours_worked_1)
decimal_end_1 = hours_worked_1 - int(hours_worked_1)
end_pay_1 = decimal_end_1 * decimal_rate_1
# Use an integer for ease of calculation
hours_worked_1 = int(hours_worked_1)
hours_paid_for_1 = 0
# Beginning total pay is just the decimal "ending"
total_pay_1 = end_pay_1
while hours_paid_for_1 < hours_worked_1:
# Find the rate for the current bucket of hours
rate_filter = (rate for (lower, upper), rate in scale_1.items() if lower <= hours_paid_for_1 and hours_paid_for_1 < upper)
current_level = next(rate_filter)
total_pay_1 += current_level
hours_paid_for_1 += 1
total_gc = total_pay_1
commission_from_pt = total_session - total_gc
return commission_from_pt | month |
files_2.js | var searchData=
[
['data_2emd_726',['data.md',['../data_8md.html',1,'']]],
['database_2ecpp_727',['Database.cpp',['../_database_8cpp.html',1,'']]],
['database_2eh_728',['Database.h',['../_database_8h.html',1,'']]],
['database_2emd_729',['database.md',['../database_8md.html',1,'']]], | ['datamanager_2eh_731',['DataManager.h',['../_data_manager_8h.html',1,'']]]
]; | ['datamanager_2ecpp_730',['DataManager.cpp',['../_data_manager_8cpp.html',1,'']]], |
paths_windows.go | // Copyright (c) 2021 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package paths
import (
"os"
"path/filepath"
"strings"
"golang.org/x/sys/windows"
"tailscale.com/util/winutil"
)
// ensureStateDirPerms applies a restrictive ACL to the directory specified by dirPath. | // DACL: Full control to the current user and to the Administrators group.
// (We include Administrators so that admin users may still access logs;
// granting access exclusively to LocalSystem would require admins to use
// special tools to access the Log directory)
// Inheritance: The directory does not inherit the ACL from its parent.
// However, any directories and/or files created within this
// directory *do* inherit the ACL that we are setting.
func ensureStateDirPerms(dirPath string) error {
fi, err := os.Stat(dirPath)
if err != nil {
return err
}
if !fi.IsDir() {
return os.ErrInvalid
}
if strings.ToLower(filepath.Base(dirPath)) != "tailscale" {
return nil
}
// We need the info for our current user as SIDs
sids, err := winutil.GetCurrentUserSIDs()
if err != nil {
return err
}
// We also need the SID for the Administrators group so that admins may
// easily access logs.
adminGroupSid, err := windows.CreateWellKnownSid(windows.WinBuiltinAdministratorsSid)
if err != nil {
return err
}
// Munge the SIDs into the format required by EXPLICIT_ACCESS.
userTrustee := windows.TRUSTEE{nil, windows.NO_MULTIPLE_TRUSTEE,
windows.TRUSTEE_IS_SID, windows.TRUSTEE_IS_USER,
windows.TrusteeValueFromSID(sids.User)}
adminTrustee := windows.TRUSTEE{nil, windows.NO_MULTIPLE_TRUSTEE,
windows.TRUSTEE_IS_SID, windows.TRUSTEE_IS_WELL_KNOWN_GROUP,
windows.TrusteeValueFromSID(adminGroupSid)}
// We declare our access rights via this array of EXPLICIT_ACCESS structures.
// We set full access to our user and to Administrators.
// We configure the DACL such that any files or directories created within
// dirPath will also inherit this DACL.
explicitAccess := []windows.EXPLICIT_ACCESS{
{
windows.GENERIC_ALL,
windows.SET_ACCESS,
windows.SUB_CONTAINERS_AND_OBJECTS_INHERIT,
userTrustee,
},
{
windows.GENERIC_ALL,
windows.SET_ACCESS,
windows.SUB_CONTAINERS_AND_OBJECTS_INHERIT,
adminTrustee,
},
}
dacl, err := windows.ACLFromEntries(explicitAccess, nil)
if err != nil {
return err
}
// We now reset the file's owner, primary group, and DACL.
// We also must pass PROTECTED_DACL_SECURITY_INFORMATION so that our new ACL
// does not inherit any ACL entries from the parent directory.
const flags = windows.OWNER_SECURITY_INFORMATION |
windows.GROUP_SECURITY_INFORMATION |
windows.DACL_SECURITY_INFORMATION |
windows.PROTECTED_DACL_SECURITY_INFORMATION
return windows.SetNamedSecurityInfo(dirPath, windows.SE_FILE_OBJECT, flags,
sids.User, sids.PrimaryGroup, dacl, nil)
}
// LegacyStateFilePath returns the legacy path to the state file when it was stored under the
// current user's %LocalAppData%.
func LegacyStateFilePath() string {
return filepath.Join(os.Getenv("LocalAppData"), "Tailscale", "server-state.conf")
} | // It sets the following security attributes on the directory:
// Owner: The user for the current process;
// Primary Group: The primary group for the current process; |
test_getitem.py | #!/usr/bin/env python
# BSD 3-Clause License; see https://github.com/scikit-hep/aghast/blob/master/LICENSE
import sys
import unittest
import numpy
from aghast import *
class Test(unittest.TestCase):
def runTest(self):
|
def test_getitem_twodim(self):
a = Histogram(
[Axis(IntegerBinning(0, 3)), Axis(IntegerBinning(0, 2))],
UnweightedCounts(
InterpretedInlineBuffer.fromarray(
numpy.array(
[
[10, 100, 1000],
[20, 200, 2000],
[30, 300, 3000],
[40, 400, 4000],
]
)
)
),
)
a.checkvalid()
assert a.axis[0].binning.toCategoryBinning().categories == ["0", "1", "2", "3"]
assert a.axis[1].binning.toCategoryBinning().categories == ["0", "1", "2"]
assert a.counts.counts.array.tolist() == [
[10, 100, 1000],
[20, 200, 2000],
[30, 300, 3000],
[40, 400, 4000],
]
assert a.counts[None, None] == sum(
[10, 100, 1000, 20, 200, 2000, 30, 300, 3000, 40, 400, 4000]
)
assert a.counts[None, :].tolist() == [100, 1000, 10000]
assert a.counts[None].tolist() == [100, 1000, 10000]
assert a.counts[:, None].tolist() == [1110, 2220, 3330, 4440]
assert a.counts[None, 1] == 1000
assert a.counts[1, None] == 2220
assert a.counts[None, 1:].tolist() == [1000, 10000]
assert a.counts[1:, None].tolist() == [2220, 3330, 4440]
assert a.counts[None, [2, 1, 1, 0]].tolist() == [10000, 1000, 1000, 100]
assert a.counts[[3, 2, 2, 0], None].tolist() == [4440, 3330, 3330, 1110]
assert a.counts[None, [True, False, True]].tolist() == [100, 10000]
assert a.counts[[False, True, True, False], None].tolist() == [2220, 3330]
assert a.counts[:, :].tolist() == [
[10, 100, 1000],
[20, 200, 2000],
[30, 300, 3000],
[40, 400, 4000],
]
assert a.counts[:].tolist() == [
[10, 100, 1000],
[20, 200, 2000],
[30, 300, 3000],
[40, 400, 4000],
]
assert a.counts[1:, :].tolist() == [
[20, 200, 2000],
[30, 300, 3000],
[40, 400, 4000],
]
assert a.counts[1:].tolist() == [
[20, 200, 2000],
[30, 300, 3000],
[40, 400, 4000],
]
assert a.counts[:, 1:].tolist() == [
[100, 1000],
[200, 2000],
[300, 3000],
[400, 4000],
]
assert a.counts[2:, 1:].tolist() == [[300, 3000], [400, 4000]]
assert a.counts[:, 1].tolist() == [100, 200, 300, 400]
assert a.counts[1, :].tolist() == [20, 200, 2000]
assert a.counts[1].tolist() == [20, 200, 2000]
assert a.counts[2:, 1].tolist() == [300, 400]
assert a.counts[1, 2:].tolist() == [2000]
assert a.counts[:, [2, 0]].tolist() == [
[1000, 10],
[2000, 20],
[3000, 30],
[4000, 40],
]
assert a.counts[[2, 0], :].tolist() == [[30, 300, 3000], [10, 100, 1000]]
assert a.counts[1:, [2, 0]].tolist() == [[2000, 20], [3000, 30], [4000, 40]]
assert a.counts[[2, 0], 1:].tolist() == [[300, 3000], [100, 1000]]
assert a.counts[:, [True, False, True]].tolist() == [
[10, 1000],
[20, 2000],
[30, 3000],
[40, 4000],
]
assert a.counts[[False, True, True, False], :].tolist() == [
[20, 200, 2000],
[30, 300, 3000],
]
assert a.counts[1:, [True, False, True]].tolist() == [
[20, 2000],
[30, 3000],
[40, 4000],
]
assert a.counts[[False, True, True, False], 1:].tolist() == [
[200, 2000],
[300, 3000],
]
assert a.counts[1, 2] == 2000
assert a.counts[1, [2, 2, 0]].tolist() == [2000, 2000, 20]
assert a.counts[[2, 2, 0], 1].tolist() == [300, 300, 100]
assert a.counts[1, [True, False, True]].tolist() == [20, 2000]
assert a.counts[[False, True, True, False], 1].tolist() == [200, 300]
assert a.counts[[1, 2], [2, 0]].tolist() == [[2000, 20], [3000, 30]]
assert a.counts[[False, True, True, False], [2, 0]].tolist() == [
[2000, 20],
[3000, 30],
]
assert a.counts[[False, True, True, False], [True, False, True]].tolist() == [
[20, 2000],
[30, 3000],
]
assert a.counts[[2, 0], [2, 2, 0]].tolist() == [
[3000, 3000, 30],
[1000, 1000, 10],
]
assert a.counts[[2, 0], [True, False, True]].tolist() == [
[30, 3000],
[10, 1000],
]
assert a.counts[[True, False, True, False], [True, False, True]].tolist() == [
[10, 1000],
[30, 3000],
]
def test_getitem_IntegerBinning(self):
a = Histogram(
[Axis(IntegerBinning(-5, 5))],
UnweightedCounts(
InterpretedInlineBuffer.fromarray(numpy.arange(11, dtype=int))
),
)
assert a.axis[0].binning.toCategoryBinning().categories == [
"-5",
"-4",
"-3",
"-2",
"-1",
"0",
"1",
"2",
"3",
"4",
"5",
]
assert a.counts.counts.array.tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
assert a.counts[None] == 55
assert a.counts[:].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
assert a.counts[5:].tolist() == [5, 6, 7, 8, 9, 10]
assert a.counts[5] == 5
assert a.counts[[7, 4, 7, 5, -1]].tolist() == [7, 4, 7, 5, 10]
assert a.counts[numpy.array([7, 4, 7, 5, -1])].tolist() == [7, 4, 7, 5, 10]
assert a.counts[
[True, False, True, False, True, False, True, False, True, False, True]
].tolist() == [0, 2, 4, 6, 8, 10]
assert a.counts[
numpy.array(
[True, False, True, False, True, False, True, False, True, False, True]
)
].tolist() == [0, 2, 4, 6, 8, 10]
a = Histogram(
[Axis(IntegerBinning(-5, 5, loc_overflow=IntegerBinning.above1))],
UnweightedCounts(
InterpretedInlineBuffer.fromarray(
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 999]
)
),
)
assert a.axis[0].binning.toCategoryBinning().categories == [
"-5",
"-4",
"-3",
"-2",
"-1",
"0",
"1",
"2",
"3",
"4",
"5",
"[6, +inf)",
]
assert a.counts.counts.array.tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 999]
assert a.counts[None] == 55 + 999
assert a.counts[:].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
assert a.counts[5:].tolist() == [5, 6, 7, 8, 9, 10]
assert a.counts[5 : numpy.inf].tolist() == [5, 6, 7, 8, 9, 10, 999]
assert a.counts[5] == 5
assert a.counts[numpy.inf] == 999
assert a.counts[[7, 4, 7, 5, -1]].tolist() == [7, 4, 7, 5, 10]
assert a.counts[[7, 4, 7, numpy.inf, 5, -1]].tolist() == [7, 4, 7, 999, 5, 10]
assert a.counts[
[True, False, True, False, True, False, True, False, True, False, True]
].tolist() == [0, 2, 4, 6, 8, 10]
a = Histogram(
[Axis(IntegerBinning(-5, 5, loc_overflow=IntegerBinning.below1))],
UnweightedCounts(
InterpretedInlineBuffer.fromarray(
[999, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
)
),
)
assert a.axis[0].binning.toCategoryBinning().categories == [
"[6, +inf)",
"-5",
"-4",
"-3",
"-2",
"-1",
"0",
"1",
"2",
"3",
"4",
"5",
]
assert a.counts.counts.array.tolist() == [999, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
assert a.counts[None] == 55 + 999
assert a.counts[:].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
assert a.counts[5:].tolist() == [5, 6, 7, 8, 9, 10]
assert a.counts[5 : numpy.inf].tolist() == [5, 6, 7, 8, 9, 10, 999]
assert a.counts[5] == 5
assert a.counts[numpy.inf] == 999
assert a.counts[[7, 4, 7, 5, -1]].tolist() == [7, 4, 7, 5, 10]
assert a.counts[[7, 4, 7, numpy.inf, 5, -1]].tolist() == [7, 4, 7, 999, 5, 10]
assert a.counts[
[True, False, True, False, True, False, True, False, True, False, True]
].tolist() == [0, 2, 4, 6, 8, 10]
a = Histogram(
[
Axis(
IntegerBinning(
-5,
5,
loc_underflow=IntegerBinning.below2,
loc_overflow=IntegerBinning.below1,
)
)
],
UnweightedCounts(
InterpretedInlineBuffer.fromarray(
[123, 999, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
)
),
)
assert a.axis[0].binning.toCategoryBinning().categories == [
"(-inf, -6]",
"[6, +inf)",
"-5",
"-4",
"-3",
"-2",
"-1",
"0",
"1",
"2",
"3",
"4",
"5",
]
assert a.counts.counts.array.tolist() == [
123,
999,
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
]
assert a.counts[None] == 55 + 123 + 999
assert a.counts[:].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
assert a.counts[5:].tolist() == [5, 6, 7, 8, 9, 10]
assert a.counts[5 : numpy.inf].tolist() == [5, 6, 7, 8, 9, 10, 999]
assert a.counts[-numpy.inf : 5].tolist() == [123, 0, 1, 2, 3, 4]
assert a.counts[-numpy.inf : numpy.inf].tolist() == [
123,
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
999,
]
assert a.counts[: numpy.inf].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 999]
assert a.counts[-numpy.inf :].tolist() == [
123,
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
]
assert a.counts[5] == 5
assert a.counts[-numpy.inf] == 123
assert a.counts[numpy.inf] == 999
assert a.counts[[7, 4, 7, 5, -1]].tolist() == [7, 4, 7, 5, 10]
assert a.counts[[7, 4, 7, numpy.inf, 5, -1]].tolist() == [7, 4, 7, 999, 5, 10]
assert a.counts[[7, 4, 7, numpy.inf, 5, -numpy.inf, -1]].tolist() == [
7,
4,
7,
999,
5,
123,
10,
]
assert a.counts[
[7, -numpy.inf, 4, 7, numpy.inf, 5, -numpy.inf, -1]
].tolist() == [7, 123, 4, 7, 999, 5, 123, 10]
assert a.counts[
[True, False, True, False, True, False, True, False, True, False, True]
].tolist() == [0, 2, 4, 6, 8, 10]
a = Histogram(
[
Axis(
IntegerBinning(
-5,
5,
loc_underflow=IntegerBinning.above1,
loc_overflow=IntegerBinning.below1,
)
)
],
UnweightedCounts(
InterpretedInlineBuffer.fromarray(
[999, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 123]
)
),
)
assert a.axis[0].binning.toCategoryBinning().categories == [
"[6, +inf)",
"-5",
"-4",
"-3",
"-2",
"-1",
"0",
"1",
"2",
"3",
"4",
"5",
"(-inf, -6]",
]
assert a.counts.counts.array.tolist() == [
999,
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
123,
]
assert a.counts[None] == 55 + 123 + 999
assert a.counts[:].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
assert a.counts[5:].tolist() == [5, 6, 7, 8, 9, 10]
assert a.counts[5 : numpy.inf].tolist() == [5, 6, 7, 8, 9, 10, 999]
assert a.counts[-numpy.inf : 5].tolist() == [123, 0, 1, 2, 3, 4]
assert a.counts[-numpy.inf : numpy.inf].tolist() == [
123,
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
999,
]
assert a.counts[: numpy.inf].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 999]
assert a.counts[-numpy.inf :].tolist() == [
123,
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
10,
]
assert a.counts[5] == 5
assert a.counts[-numpy.inf] == 123
assert a.counts[numpy.inf] == 999
assert a.counts[[7, 4, 7, 5, -1]].tolist() == [7, 4, 7, 5, 10]
assert a.counts[[7, 4, 7, numpy.inf, 5, -1]].tolist() == [7, 4, 7, 999, 5, 10]
assert a.counts[[7, 4, 7, numpy.inf, 5, -numpy.inf, -1]].tolist() == [
7,
4,
7,
999,
5,
123,
10,
]
assert a.counts[
[7, -numpy.inf, 4, 7, numpy.inf, 5, -numpy.inf, -1]
].tolist() == [7, 123, 4, 7, 999, 5, 123, 10]
assert a.counts[
[True, False, True, False, True, False, True, False, True, False, True]
].tolist() == [0, 2, 4, 6, 8, 10]
def test_getitem_RegularBinning(self):
a = Histogram(
[Axis(RegularBinning(10, RealInterval(-5, 5)))],
UnweightedCounts(
InterpretedInlineBuffer.fromarray(numpy.arange(10, dtype=int))
),
)
assert a.axis[0].binning.toCategoryBinning().categories == [
"[-5, -4)",
"[-4, -3)",
"[-3, -2)",
"[-2, -1)",
"[-1, 0)",
"[0, 1)",
"[1, 2)",
"[2, 3)",
"[3, 4)",
"[4, 5)",
]
assert a.counts.counts.array.tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
assert a.counts[None] == sum([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
assert a.counts[:].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
assert a.counts[5:].tolist() == [5, 6, 7, 8, 9]
assert a.counts[5] == 5
assert a.counts[[7, 4, 7, 5, -1]].tolist() == [7, 4, 7, 5, 9]
assert a.counts[numpy.array([7, 4, 7, 5, -1])].tolist() == [7, 4, 7, 5, 9]
assert a.counts[
[True, False, True, False, True, False, True, False, True, False]
].tolist() == [0, 2, 4, 6, 8]
assert a.counts[
numpy.array(
[True, False, True, False, True, False, True, False, True, False]
)
].tolist() == [0, 2, 4, 6, 8]
a = Histogram(
[
Axis(
RegularBinning(
10,
RealInterval(-5, 5),
overflow=RealOverflow(loc_overflow=RealOverflow.above1),
)
)
],
UnweightedCounts(
InterpretedInlineBuffer.fromarray(
numpy.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 999], dtype=int)
)
),
)
assert a.axis[0].binning.toCategoryBinning().categories == [
"[-5, -4)",
"[-4, -3)",
"[-3, -2)",
"[-2, -1)",
"[-1, 0)",
"[0, 1)",
"[1, 2)",
"[2, 3)",
"[3, 4)",
"[4, 5)",
"[5, +inf]",
]
assert a.counts.counts.array.tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 999]
assert a.counts[None] == sum([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + 999
assert a.counts[:].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
assert a.counts[: numpy.inf].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 999]
assert a.counts[5:].tolist() == [5, 6, 7, 8, 9]
assert a.counts[5 : numpy.inf].tolist() == [5, 6, 7, 8, 9, 999]
assert a.counts[5] == 5
assert a.counts[[7, numpy.inf, 4, 7, 5, numpy.inf, -1]].tolist() == [
7,
999,
4,
7,
5,
999,
9,
]
assert a.counts[
numpy.array([7, numpy.inf, 4, 7, 5, numpy.inf, -1])
].tolist() == [7, 999, 4, 7, 5, 999, 9]
assert a.counts[
[True, False, True, False, True, False, True, False, True, False]
].tolist() == [0, 2, 4, 6, 8]
assert a.counts[
numpy.array(
[True, False, True, False, True, False, True, False, True, False]
)
].tolist() == [0, 2, 4, 6, 8]
a = Histogram(
[
Axis(
RegularBinning(
10,
RealInterval(-5, 5),
overflow=RealOverflow(loc_overflow=RealOverflow.below1),
)
)
],
UnweightedCounts(
InterpretedInlineBuffer.fromarray(
numpy.array([999, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=int)
)
),
)
assert a.axis[0].binning.toCategoryBinning().categories == [
"[5, +inf]",
"[-5, -4)",
"[-4, -3)",
"[-3, -2)",
"[-2, -1)",
"[-1, 0)",
"[0, 1)",
"[1, 2)",
"[2, 3)",
"[3, 4)",
"[4, 5)",
]
assert a.counts.counts.array.tolist() == [999, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
assert a.counts[None] == sum([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + 999
assert a.counts[:].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
assert a.counts[: numpy.inf].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 999]
assert a.counts[5:].tolist() == [5, 6, 7, 8, 9]
assert a.counts[5 : numpy.inf].tolist() == [5, 6, 7, 8, 9, 999]
assert a.counts[5] == 5
assert a.counts[numpy.inf] == 999
assert a.counts[[7, numpy.inf, 4, 7, 5, numpy.inf, -1]].tolist() == [
7,
999,
4,
7,
5,
999,
9,
]
assert a.counts[
numpy.array([7, numpy.inf, 4, 7, 5, numpy.inf, -1])
].tolist() == [7, 999, 4, 7, 5, 999, 9]
assert a.counts[
[True, False, True, False, True, False, True, False, True, False]
].tolist() == [0, 2, 4, 6, 8]
assert a.counts[
numpy.array(
[True, False, True, False, True, False, True, False, True, False]
)
].tolist() == [0, 2, 4, 6, 8]
a = Histogram(
[
Axis(
RegularBinning(
10,
RealInterval(-5, 5),
overflow=RealOverflow(
loc_overflow=RealOverflow.below2,
loc_nanflow=RealOverflow.below1,
),
)
)
],
UnweightedCounts(
InterpretedInlineBuffer.fromarray(
numpy.array([999, 123, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=int)
)
),
)
assert a.axis[0].binning.toCategoryBinning().categories == [
"[5, +inf]",
"{nan}",
"[-5, -4)",
"[-4, -3)",
"[-3, -2)",
"[-2, -1)",
"[-1, 0)",
"[0, 1)",
"[1, 2)",
"[2, 3)",
"[3, 4)",
"[4, 5)",
]
assert a.counts.counts.array.tolist() == [
999,
123,
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
]
assert a.counts[None] == sum([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + 999 + 123
assert a.counts[:].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
assert a.counts[: numpy.inf].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 999]
assert a.counts[5:].tolist() == [5, 6, 7, 8, 9]
assert a.counts[5 : numpy.inf].tolist() == [5, 6, 7, 8, 9, 999]
assert a.counts[5] == 5
assert a.counts[numpy.inf] == 999
assert a.counts[numpy.nan] == 123
assert a.counts[[7, numpy.inf, 4, 7, 5, numpy.nan, -1]].tolist() == [
7,
999,
4,
7,
5,
123,
9,
]
if sys.version_info[0] >= 3:
exec(
"assert a.counts[[numpy.inf, ..., numpy.nan]].tolist() == [999, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 123]"
)
assert a.counts[
numpy.array([7, numpy.inf, 4, 7, 5, numpy.nan, -1])
].tolist() == [7, 999, 4, 7, 5, 123, 9]
assert a.counts[
[True, False, True, False, True, False, True, False, True, False]
].tolist() == [0, 2, 4, 6, 8]
assert a.counts[
numpy.array(
[True, False, True, False, True, False, True, False, True, False]
)
].tolist() == [0, 2, 4, 6, 8]
a = Histogram(
[
Axis(
RegularBinning(
10,
RealInterval(-5, 5),
overflow=RealOverflow(
loc_overflow=RealOverflow.above1,
loc_nanflow=RealOverflow.below1,
),
)
)
],
UnweightedCounts(
InterpretedInlineBuffer.fromarray(
numpy.array([123, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 999], dtype=int)
)
),
)
assert a.axis[0].binning.toCategoryBinning().categories == [
"{nan}",
"[-5, -4)",
"[-4, -3)",
"[-3, -2)",
"[-2, -1)",
"[-1, 0)",
"[0, 1)",
"[1, 2)",
"[2, 3)",
"[3, 4)",
"[4, 5)",
"[5, +inf]",
]
assert a.counts.counts.array.tolist() == [
123,
0,
1,
2,
3,
4,
5,
6,
7,
8,
9,
999,
]
assert a.counts[None] == sum([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) + 999 + 123
assert a.counts[:].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
assert a.counts[: numpy.inf].tolist() == [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 999]
assert a.counts[5:].tolist() == [5, 6, 7, 8, 9]
assert a.counts[5 : numpy.inf].tolist() == [5, 6, 7, 8, 9, 999]
assert a.counts[5] == 5
assert a.counts[numpy.inf] == 999
assert a.counts[numpy.nan] == 123
assert a.counts[[7, numpy.inf, 4, 7, 5, numpy.nan, -1]].tolist() == [
7,
999,
4,
7,
5,
123,
9,
]
assert a.counts[
numpy.array([7, numpy.inf, 4, 7, 5, numpy.nan, -1])
].tolist() == [7, 999, 4, 7, 5, 123, 9]
assert a.counts[
[True, False, True, False, True, False, True, False, True, False]
].tolist() == [0, 2, 4, 6, 8]
assert a.counts[
numpy.array(
[True, False, True, False, True, False, True, False, True, False]
)
].tolist() == [0, 2, 4, 6, 8]
| pass |
ClientRequestInspector.ts | 'use strict';
import { IAgent } from '../IAgent';
import { IInspector } from './IInspector';
import { IProxyEvent } from '../tracing/IProxyEvent';
import { DateTimeValue } from '../configuration/DateTimeValue';
import tracing from '../tracing/Tracing';
import * as HttpClientProxy from '../tracing/proxies/HttpClientProxy';
import { HttpHelper } from './util/HttpHelper';
import url = require('url');
import * as _ from 'lodash';
// TODO: move this to a global config object
const MAX_BODY_SIZE = 132000;
export class | implements IInspector {
private agent: IAgent;
private requests = {};
private normalizeOptions(options, req) {
// Normalize to a copy of the original options
if (typeof options === 'string') {
options = url.parse(options);
}
options = _.assign({}, options);
// Oddly, url.format ignores path and only uses pathname and search,
// so create them from the path, if path was specified
if (options.path) {
const parsedQuery = url.parse(options.path);
options.pathname = parsedQuery.pathname;
options.search = parsedQuery.search;
}
// Simiarly, url.format ignores hostname and path if host is specified,
// even if host doesn't have the port, but http.request does not work
// this way. It will use the port if one is not specified in host,
// effectively treating host as hostname, but will use the port specified
// in host if it exists. Fun times.
if (options.host && options.port) {
// Force a protocol so it will parse the host as the host, not path.
// It is discarded and not used, so it doesn't matter if it doesn't match
const parsedHost = url.parse(`http://${options.host}`);
if (!parsedHost.port && options.port) {
options.hostname = options.host;
delete options.host;
}
}
// Mix in default values used by http.request and others
options.protocol = options.protocol || req.agent.protocol;
options.hostname = options.hostname || 'localhost';
return options;
}
public numOutstandingRequests() {
return Object.keys(this.requests).length;
}
public before(options, req, content, size: number, startTimeStamp: string) {
// TODO: https://github.com/Glimpse/Glimpse.Node.Prototype/issues/307
// Add support for base64 encoding non-text content by setting the encoding here
const encoding = 'utf8';
for (let i = 0; i < content.length; i++) {
if (Buffer.isBuffer(content[i])) {
content[i] = content[i].toString();
}
}
const payload = {
protocol: {
identifier: options.protocol.replace(':', '').toUpperCase(),
// This value is hard coded in Node: https://github.com/nodejs/node/blob/d0582ef9e19e8ed941b0a585c935ad11919080ee/lib/_http_client.js#L114
version: '1.1'
},
url: url.format(options),
method: req.method,
startTime: startTimeStamp,
// Note: this uses a private field on the request object. Sadly, there isn't another way to get these currently.
headers: req._headers,
isAjax: false,
clientIp: '127.0.0.1', // TODO: Is this field relevant, since it's the IP of this system? We can get the list of interfaces from os.networkInterfaces()
body: {
size,
encoding,
content: content.join(''),
isTruncated: size > content.length
}
};
this.agent.broker.sendMessage(payload, ['data-http-request'], undefined, HttpHelper.getContext(req));
}
public after(options, res, content, size: number, endTimeStamp: string, duration: number) {
// TODO: https://github.com/Glimpse/Glimpse.Node.Prototype/issues/307
// Add support for base64 encoding non-text content by setting the encoding here
const encoding = 'utf8';
for (let i = 0; i < content.length; i++) {
if (Buffer.isBuffer(content[i])) {
content[i] = content[i].toString();
}
}
const payload = {
// res.url doesn't seem to be populated in practice
url: res.url || url.format(options),
headers: res.headers,
statusCode: res.statusCode,
endTime: endTimeStamp,
duration,
body: {
size,
encoding,
content: content.join(''),
isTruncated: size > content.length
}
};
this.agent.broker.sendMessage(payload, ['data-http-response'], undefined, HttpHelper.getContext(res));
}
public init(agent: IAgent) {
this.agent = agent;
tracing.onAlways(HttpClientProxy.EVENT_REQUEST_CREATED, (event) => this.onRequestCreated(event));
tracing.onAlways(HttpClientProxy.EVENT_REQUEST_DATA_SENT, (event) => this.onRequestDataSent(event));
tracing.onAlways(HttpClientProxy.EVENT_REQUEST_END, (event) => this.onRequestEnd(event));
tracing.onAlways(HttpClientProxy.EVENT_REQUEST_ERROR, (event) => this.onRequestError(event));
tracing.onAlways(HttpClientProxy.EVENT_RESPONSE_DATA_RECEIVED, (event) => this.onResponseDataReceived(event));
tracing.onAlways(HttpClientProxy.EVENT_RESPONSE_END, (event) => this.onResponseEnd(event));
tracing.onAlways(HttpClientProxy.EVENT_RESPONSE_ERROR, (event) => this.onResponseError(event));
}
private onRequestCreated(event: IProxyEvent): void {
const eventData: HttpClientProxy.IRequestCreatedEvent = event.data;
this.requests[eventData.id] = {
startTime: event.time,
startTimeStamp: DateTimeValue.fromUnixMillisecondTimestamp(event.timestamp, event.time).format(),
options: this.normalizeOptions(eventData.options, eventData.req),
requestBodyChunks: [],
requestBodyLength: 0,
responseBodyChunks: [],
responseBodyLength: 0
};
}
private onRequestDataSent(event: IProxyEvent): void {
const eventData: HttpClientProxy.IRequestDataSentEvent = event.data;
const masterData = this.requests[eventData.id];
if (!masterData) {
throw new Error('Internal error: could not find associated master data');
}
// Save part or all of the chunk to the set of chunks,
// truncating if necessary to keep the set under the
// max body size
const originalChunkLength = eventData.chunk.length;
let normalizedChunk = eventData.chunk;
if (masterData.requestBodyLength < MAX_BODY_SIZE) {
if (masterData.requestBodyLength + originalChunkLength >= MAX_BODY_SIZE) {
normalizedChunk = normalizedChunk.slice(0, MAX_BODY_SIZE - masterData.requestBodyLength);
}
masterData.requestBodyChunks.push(normalizedChunk);
}
masterData.requestBodyLength += originalChunkLength;
}
private onRequestEnd(event: IProxyEvent): void {
const eventData: HttpClientProxy.IRequestEndEvent = event.data;
const masterData = this.requests[eventData.id];
if (!masterData) {
throw new Error('Internal error: could not find associated master data');
}
this.before(
masterData.options,
eventData.req,
masterData.requestBodyChunks,
masterData.requestBodyLength,
masterData.startTimeStamp
);
}
private onRequestError(event: IProxyEvent): void {
const eventData: HttpClientProxy.IRequestErrorEvent = event.data;
const masterData = this.requests[eventData.id];
if (!masterData) {
throw new Error('Internal error: could not find associated master data');
}
delete this.requests[eventData.id];
}
private onResponseDataReceived(event: IProxyEvent): void {
const eventData: HttpClientProxy.IResponseDataReceivedEvent = event.data;
const masterData = this.requests[eventData.id];
if (!masterData) {
throw new Error('Internal error: could not find associated master data');
}
// Save part or all of the chunk to the set of chunks,
// truncating if necessary to keep the set under the
// max body size
const originalChunkLength = eventData.chunk.length;
let normalizedChunk = eventData.chunk;
if (masterData.responseBodyLength < MAX_BODY_SIZE) {
if (masterData.responseBodyLength + originalChunkLength >= MAX_BODY_SIZE) {
normalizedChunk = normalizedChunk.slice(0, MAX_BODY_SIZE - masterData.responseBodyLength);
}
masterData.responseBodyChunks.push(normalizedChunk);
}
masterData.responseBodyLength += originalChunkLength;
}
private onResponseEnd(event: IProxyEvent): void {
const eventData: HttpClientProxy.IResponseEndEvent = event.data;
const masterData = this.requests[eventData.id];
if (!masterData) {
throw new Error('Internal error: could not find associated master data');
}
const duration = (event.time[0] * 1e9 + event.time[1] -
masterData.startTime[0] * 1e9 - masterData.startTime[1]) / 1e6;
this.after(
masterData.options,
eventData.res,
masterData.responseBodyChunks,
masterData.responseBodyLength,
DateTimeValue.fromUnixMillisecondTimestamp(event.timestamp, event.time).format(),
duration
);
delete this.requests[eventData.id];
}
private onResponseError(event: IProxyEvent): void {
const eventData: HttpClientProxy.IResponseErrorEvent = event.data;
const masterData = this.requests[eventData.id];
if (!masterData) {
throw new Error('Internal error: could not find associated master data');
}
delete this.requests[eventData.id];
}
}
| ClientRequestInspector |
Vec3.js | class | {
constructor(x, y, z) {
this.x = x;
this.y = y;
this.z = z;
}
cross(v) {
if (!(v instanceof Vec3)) {
throw new Error('Input argument must be of type Vec3');
}
const t = this;
return new Vec3(
(t.y * v.z) - (t.z * v.y),
(t.z * v.x) - (t.x * v.z),
(t.x * v.y) - (t.y * v.x)
);
}
mult(a) {
return this.map(x => x * a);
}
neg() {
return this.map(x => -x);
}
map(f) {
return new Vec3(f(this.x), f(this.y), f(this.z));
}
add(v) {
if (v instanceof Vec3) {
return new Vec3(this.x + v.x, this.y + v.y, this.z + v.z);
}
return this.map(x => x + v);
}
sub(v) {
if (v instanceof Vec3) {
return new Vec3(this.x - v.x, this.y - v.y, this.z - v.z);
}
return this.map(x => x - v);
}
dot(v) {
return (this.x * v.x) + (this.y * v.y) + (this.z * v.z);
}
toString() {
return `[${this.x}, ${this.y}, ${this.z}]`;
}
lengthSquared() {
return this.dot(this);
}
length() {
return Math.sqrt(this.lengthSquared());
}
normalize() {
return this.mult(1 / this.length());
}
asArray() {
return [this.x, this.y, this.z];
}
round() {
return this.map(Math.round);
}
}
export default Vec3;
| Vec3 |
leetcode_0122.go | package main
func | (prices []int) int {
if len(prices) == 1 {
return 0
}
maxProfit := 0
var deltaProfit int
for day := 1; day < len(prices); day++ {
deltaProfit = prices[day] - prices[day-1]
if deltaProfit < 0 {
continue
}
maxProfit += deltaProfit
}
return maxProfit
}
| maxProfit2 |
cwd.rs | // /proc/[pid]/cwd
// This is a symbolic link to the current working directory of
// the process. To find out the current working directory of
// process 20, for instance, you can do this:
//
// $ cd /proc/20/cwd; /bin/pwd
//
// Note that the pwd command is often a shell built-in, and might
// not work properly. In bash(1), you may use pwd -P.
//
// In a multithreaded process, the contents of this symbolic link
// are not available if the main thread has already terminated
// (typically by calling pthread_exit(3)).
//
// Permission to dereference or read (readlink(2)) this symbolic
// link is governed by a ptrace access mode
// PTRACE_MODE_READ_FSCREDS check; see ptrace(2).
//
// -- http://man7.org/linux/man-pages/man5/proc.5.html
use std::path::PathBuf;
define_struct! {
pub struct Cwd(PathBuf);
}
pub fn cwd_of(pid: u32) -> Result<Cwd, crate::ProcErr> |
pub fn cwd_self() -> Result<Cwd, crate::ProcErr> {
let path = std::fs::read_link("/proc/self/cwd")?;
Ok(Cwd(path))
}
pub fn cwd_of_of(pid: u32, tid: u32) -> Result<Cwd, crate::ProcErr> {
let path = std::fs::read_link(format!("/proc/{}/task/{}/cwd", pid, tid))?;
Ok(Cwd(path))
}
pub fn cwd_self_of(tid: u32) -> Result<Cwd, crate::ProcErr> {
let path = std::fs::read_link(format!("/proc/self/task/{}/cwd", tid))?;
Ok(Cwd(path))
}
pub fn cwd_self_self() -> Result<Cwd, crate::ProcErr> {
let path = std::fs::read_link("/proc/thread-self/cwd")?;
Ok(Cwd(path))
}
| {
let path = std::fs::read_link(format!("/proc/{}/cwd", pid))?;
Ok(Cwd(path))
} |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.