file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
sample_basilisk.py
|
from __future__ import print_function
from fragbuilder import Basilisk_DBN
from fragbuilder import set_seed
set_seed(12)
dbn = Basilisk_DBN()
chi, bb, ll = dbn.get_sample("K")
print("Chi angles: ", chi)
|
print("Phi/Psi angles: ", bb)
print("Log likelihood: ", ll)
|
|
indexer_service.go
|
package txindex
import (
"context"
"github.com/mydexchain/tendermint/libs/service"
"github.com/mydexchain/tendermint/state/indexer"
"github.com/mydexchain/tendermint/types"
)
// XXX/TODO: These types should be moved to the indexer package.
const (
subscriber = "IndexerService"
)
// IndexerService connects event bus, transaction and block indexers together in
// order to index transactions and blocks coming from the event bus.
type IndexerService struct {
service.BaseService
txIdxr TxIndexer
blockIdxr indexer.BlockIndexer
eventBus *types.EventBus
}
// NewIndexerService returns a new service instance.
func NewIndexerService(
txIdxr TxIndexer,
blockIdxr indexer.BlockIndexer,
eventBus *types.EventBus,
) *IndexerService
|
// OnStart implements service.Service by subscribing for all transactions
// and indexing them by events.
func (is *IndexerService) OnStart() error {
// Use SubscribeUnbuffered here to ensure both subscriptions does not get
// cancelled due to not pulling messages fast enough. Cause this might
// sometimes happen when there are no other subscribers.
blockHeadersSub, err := is.eventBus.SubscribeUnbuffered(
context.Background(),
subscriber,
types.EventQueryNewBlockHeader)
if err != nil {
return err
}
txsSub, err := is.eventBus.SubscribeUnbuffered(context.Background(), subscriber, types.EventQueryTx)
if err != nil {
return err
}
go func() {
for {
msg := <-blockHeadersSub.Out()
eventDataHeader := msg.Data().(types.EventDataNewBlockHeader)
height := eventDataHeader.Header.Height
batch := NewBatch(eventDataHeader.NumTxs)
for i := int64(0); i < eventDataHeader.NumTxs; i++ {
msg2 := <-txsSub.Out()
txResult := msg2.Data().(types.EventDataTx).TxResult
if err = batch.Add(&txResult); err != nil {
is.Logger.Error(
"failed to add tx to batch",
"height", height,
"index", txResult.Index,
"err", err,
)
}
}
if err := is.blockIdxr.Index(eventDataHeader); err != nil {
is.Logger.Error("failed to index block", "height", height, "err", err)
} else {
is.Logger.Info("indexed block", "height", height)
}
if err = is.txIdxr.AddBatch(batch); err != nil {
is.Logger.Error("failed to index block txs", "height", height, "err", err)
} else {
is.Logger.Debug("indexed block txs", "height", height, "num_txs", eventDataHeader.NumTxs)
}
}
}()
return nil
}
// OnStop implements service.Service by unsubscribing from all transactions.
func (is *IndexerService) OnStop() {
if is.eventBus.IsRunning() {
_ = is.eventBus.UnsubscribeAll(context.Background(), subscriber)
}
}
|
{
is := &IndexerService{txIdxr: txIdxr, blockIdxr: blockIdxr, eventBus: eventBus}
is.BaseService = *service.NewBaseService(nil, "IndexerService", is)
return is
}
|
FormValidation.js
|
import React, {Component} from 'react';
class FormValidation extends Component {
constructor() {
super();
this.state = {
|
name: '',
email: '',
phone: '',
password: '',
description: '',
skill: '',
}
}
onChangeHandler= (event)=>{
let inputName = event.target.name;
let inputValue = event.target.value;
this.setState({[inputName]: inputValue})
//Form validation
var emailRegex = /^[a-z0-9._%+-]+@[a-z0-9.-]+\.[a-z]{2,}$/;
var textRegex = /^([a-zA-Z ]){2,30}$/;
var phoneRegex = /^([0-9\(\)\/\+ \-]*)$/;
if (inputName === 'name'){
if (inputValue === null){
this.setState({name: 'Name required'})
}else if (!textRegex.test(inputValue)){
this.setState({name: 'Name is invalid'})
}
}else if (inputName === 'email'){
if (inputValue === null){
this.setState({email: 'Email required'})
}else if (!emailRegex.test(inputValue)){
this.setState({email: 'Email is invalid'})
}
}else if (inputName === 'phone'){
if (inputValue === null){
this.setState({phone: 'Phone number required'})
}else if (!phoneRegex.test(inputValue)){
this.setState({phone: 'Phone number is invalid'})
}
}else if (inputName === 'password'){
if (inputValue === null){
this.setState({password: 'Password required'})
}
}
}
render() {
return (
<div>
<div>
<p>Name: {this.state.name}</p>
<p>Email: {this.state.email}</p>
<p>Phone: {this.state.phone}</p>
<p>Phone: {this.state.password}</p>
<p>Description: {this.state.description}</p>
<p>Skill: {this.state.skill}</p>
</div>
<form>
<input onChange={this.onChangeHandler} type="text" name="name" placeholder="Name"/> <br/>
<input onChange={this.onChangeHandler} type="email" name="email" placeholder="Email"/> <br/>
<input onChange={this.onChangeHandler} type="text" name="phone" placeholder="Phone"/> <br/>
<input onChange={this.onChangeHandler} type="password" name="password" placeholder="Password"/> <br/> <br/>
<textarea onChange={this.onChangeHandler} name="description" placeholder="Your description" cols="21" rows="4"/><br/>
<select onChange={this.onChangeHandler} name="skill">
<option>HTML</option>
<option>CSS</option>
<option>PHP</option>
<option>Javascript</option>
<option>Laravel</option>
</select> <br/>
<input type="submit" name="submit" value="Save"/> <br/>
</form>
</div>
);
}
}
export default FormValidation;
| |
config.go
|
package protoc
import (
"os"
"path/filepath"
"strings"
"github.com/pkg/errors"
"github.com/spf13/afero"
)
// Config stores setting params related protoc.
type Config struct {
ImportDirs []string `mapstructure:"import_dirs"`
ProtosDir string `mapstructure:"protos_dir"`
OutDir string `mapstructure:"out_dir"`
Plugins []*Plugin
}
// ProtoFiles returns .proto file paths.
func (c *Config) ProtoFiles(fs afero.Fs, rootDir string) ([]string, error) {
paths := []string{}
protosDir := filepath.Join(rootDir, c.ProtosDir)
err := afero.Walk(fs, protosDir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return errors.WithStack(err)
}
if !info.IsDir() && filepath.Ext(path) == ".proto" {
paths = append(paths, path)
}
return nil
})
|
}
// OutDirOf returns a directory path of protoc result output path for given proto file.
func (c *Config) OutDirOf(rootDir string, protoPath string) (string, error) {
protosDir := filepath.Join(rootDir, c.ProtosDir)
relProtoDir, err := filepath.Rel(protosDir, filepath.Dir(protoPath))
if strings.Contains(relProtoDir, "..") {
return "", errors.Errorf(".proto files should be included in %s", c.ProtosDir)
}
if err != nil {
return "", errors.Wrapf(err, ".proto files should be included in %s", c.ProtosDir)
}
return filepath.Join(c.OutDir, relProtoDir), nil
}
// Commands returns protoc command and arguments for given proto file.
func (c *Config) Commands(rootDir, protoPath string) ([][]string, error) {
cmds := make([][]string, 0, len(c.Plugins))
relProtoPath, _ := filepath.Rel(rootDir, protoPath)
outDir, err := c.OutDirOf(rootDir, protoPath)
if err != nil {
return nil, errors.WithStack(err)
}
for _, p := range c.Plugins {
args := []string{}
args = append(args, "-I", filepath.Dir(relProtoPath))
for _, dir := range c.ImportDirs {
args = append(args, "-I", dir)
}
args = append(args, p.toProtocArg(outDir))
args = append(args, relProtoPath)
cmds = append(cmds, append([]string{"protoc"}, args...))
}
return cmds, nil
}
|
return paths, errors.WithStack(err)
|
Container.js
|
'use strict';
Object.defineProperty(exports, "__esModule", {
value: true
});
var _createClass = function () { function defineProperties(target, props) { for (var i = 0; i < props.length; i++) { var descriptor = props[i]; descriptor.enumerable = descriptor.enumerable || false; descriptor.configurable = true; if ("value" in descriptor) descriptor.writable = true; Object.defineProperty(target, descriptor.key, descriptor); } } return function (Constructor, protoProps, staticProps) { if (protoProps) defineProperties(Constructor.prototype, protoProps); if (staticProps) defineProperties(Constructor, staticProps); return Constructor; }; }();
var _react = require('react');
var _react2 = _interopRequireDefault(_react);
var _Item = require('./Item');
var _Item2 = _interopRequireDefault(_Item);
function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; }
function _classCallCheck(instance, Constructor) { if (!(instance instanceof Constructor)) { throw new TypeError("Cannot call a class as a function"); } }
function _possibleConstructorReturn(self, call) { if (!self) { throw new ReferenceError("this hasn't been initialised - super() hasn't been called"); } return call && (typeof call === "object" || typeof call === "function") ? call : self; }
function _inherits(subClass, superClass) { if (typeof superClass !== "function" && superClass !== null) { throw new TypeError("Super expression must either be null or a function, not " + typeof superClass); } subClass.prototype = Object.create(superClass && superClass.prototype, { constructor: { value: subClass, enumerable: false, writable: true, configurable: true } }); if (superClass) Object.setPrototypeOf ? Object.setPrototypeOf(subClass, superClass) : subClass.__proto__ = superClass; }
var Container = function (_Component) {
_inherits(Container, _Component);
function Container() {
_classCallCheck(this, Container);
return _possibleConstructorReturn(this, Object.getPrototypeOf(Container).apply(this, arguments));
}
_createClass(Container, [{
|
value: function render() {
var _props = this.props;
var notifications = _props.notifications;
var onRemove = _props.onRemove;
var noAnimation = _props.noAnimation;
return _react2.default.createElement(
'ul',
null,
notifications.map(function (notification) {
return _react2.default.createElement(_Item2.default, {
ref: 'notification-' + notification.uid,
key: notification.uid,
notification: notification,
onRemove: onRemove,
noAnimation: noAnimation
});
})
);
}
}]);
return Container;
}(_react.Component);
Container.propTypes = {
notifications: _react.PropTypes.array.isRequired,
onRemove: _react.PropTypes.func,
noAnimation: _react.PropTypes.bool
};
exports.default = Container;
|
key: 'render',
|
tanh_gaussian_promp_multi_policy.py
|
import math
import torch
from torch import nn as nn
from torch.distributions import Normal
from robolearn.torch.core import PyTorchModule
from robolearn.torch.utils.pytorch_util import np_ify
from torch.nn.modules.normalization import LayerNorm
import robolearn.torch.utils.pytorch_util as ptu
from robolearn.models.policies import ExplorationPolicy
from collections import OrderedDict
from itertools import chain
# LOG_SIG_MAX = 2
# LOG_SIG_MIN = -3.0
LOG_SIG_MAX = 2
LOG_SIG_MIN = -20
# SIG_MAX = 7.38905609893065
# SIG_MIN = 0.049787068367863944
# LOG_MIX_COEFF_MIN = -10
# LOG_MIX_COEFF_MAX = -1e-6 #-4.5e-5
# LOG_MIX_COEFF_MIN = -1
# LOG_MIX_COEFF_MAX = 1 #-4.5e-5
# EPS = 1e-12
EPS = 1e-8
class TanhGaussianPrompMultiPolicy(PyTorchModule, ExplorationPolicy):
"""
Usage:
```
policy = TanhGaussianPrompMultiPolicy(...)
action, policy_dict = policy(obs)
```
Here, mean and log_std are the mean and log_std of the Gaussian that is
sampled from.
If deterministic is True, action = tanh(mean).
If return_log_prob is False (default), log_prob = None
This is done because computing the log_prob can be a bit expensive.
"""
def __init__(
self,
obs_dim,
action_dim,
n_policies,
shared_hidden_sizes=None,
unshared_hidden_sizes=None,
unshared_mix_hidden_sizes=None,
stds=None,
hidden_activation='relu',
hidden_w_init='xavier_normal',
hidden_b_init_val=0,
output_w_init='xavier_normal',
output_b_init_val=0,
pol_output_activation='linear',
mix_output_activation='linear',
input_norm=False,
shared_layer_norm=False,
policies_layer_norm=False,
mixture_layer_norm=False,
softmax_weights=False,
**kwargs
):
self.save_init_params(locals())
PyTorchModule.__init__(self)
ExplorationPolicy.__init__(self, action_dim)
self._input_size = obs_dim
self._output_sizes = action_dim
self._n_subpolicies = n_policies
# Activation Fcns
self._hidden_activation = ptu.get_activation(hidden_activation)
self._pol_output_activation = ptu.get_activation(pol_output_activation)
self._mix_output_activation = ptu.get_activation(mix_output_activation)
# Normalization Layer Flags
self._shared_layer_norm = shared_layer_norm
self._policies_layer_norm = policies_layer_norm
self._mixture_layer_norm = mixture_layer_norm
# Layers Lists
self._sfcs = [] # Shared Layers
self._sfc_norms = [] # Norm. Shared Layers
self._pfcs = [list() for _ in range(self._n_subpolicies)] # Policies Layers
self._pfc_norms = [list() for _ in range(self._n_subpolicies)] # N. Pol. L.
self._pfc_lasts = [] # Last Policies Layers
self._mfcs = [] # Mixing Layers
self._norm_mfcs = [] # Norm. Mixing Layers
# self.mfc_last = None # Below is instantiated
self._softmax_weights = softmax_weights
# Initial size = Obs size
in_size = self._input_size
# Ordered Dictionaries for specific modules/parameters
self._shared_modules = OrderedDict()
self._shared_parameters = OrderedDict()
self._policies_modules = [OrderedDict() for _ in range(n_policies)]
self._policies_parameters = [OrderedDict() for _ in range(n_policies)]
self._mixing_modules = OrderedDict()
self._mixing_parameters = OrderedDict()
# ############# #
# Shared Layers #
# ############# #
if input_norm:
ln = nn.BatchNorm1d(in_size)
self.sfc_input = ln
self.add_shared_module("sfc_input", ln)
else:
self.sfc_input = None
if shared_hidden_sizes is not None:
for ii, next_size in enumerate(shared_hidden_sizes):
sfc = nn.Linear(in_size, next_size)
ptu.layer_init(
layer=sfc,
option=hidden_w_init,
activation=hidden_activation,
b=hidden_b_init_val,
)
self.__setattr__("sfc{}".format(ii), sfc)
self._sfcs.append(sfc)
self.add_shared_module("sfc{}".format(ii), sfc)
if self._shared_layer_norm:
ln = LayerNorm(next_size)
# ln = nn.BatchNorm1d(next_size)
self.__setattr__("sfc{}_norm".format(ii), ln)
self._sfc_norms.append(ln)
self.add_shared_module("sfc{}_norm".format(ii), ln)
in_size = next_size
# Get the output_size of the shared layers (assume same for all)
multipol_in_size = in_size
mixture_in_size = in_size
# ############### #
# Unshared Layers #
# ############### #
# Unshared Multi-Policy Hidden Layers
if unshared_hidden_sizes is not None:
for ii, next_size in enumerate(unshared_hidden_sizes):
for pol_idx in range(self._n_subpolicies):
pfc = nn.Linear(multipol_in_size, next_size)
ptu.layer_init(
layer=pfc,
option=hidden_w_init,
activation=hidden_activation,
b=hidden_b_init_val,
)
self.__setattr__("pfc{}_{}".format(pol_idx, ii), pfc)
self._pfcs[pol_idx].append(pfc)
self.add_policies_module("pfc{}_{}".format(pol_idx, ii),
pfc, idx=pol_idx)
if self._policies_layer_norm:
ln = LayerNorm(next_size)
# ln = nn.BatchNorm1d(next_size)
self.__setattr__("pfc{}_{}_norm".format(pol_idx, ii),
ln)
self._pfc_norms[pol_idx].append(ln)
self.add_policies_module("pfc{}_{}_norm".format(pol_idx,
ii),
ln, idx=pol_idx)
multipol_in_size = next_size
# Multi-Policy Last Layers
for pol_idx in range(self._n_subpolicies):
last_pfc = nn.Linear(multipol_in_size, action_dim)
ptu.layer_init(
layer=last_pfc,
option=output_w_init,
activation=pol_output_activation,
b=output_b_init_val,
)
self.__setattr__("pfc{}_last".format(pol_idx), last_pfc)
self._pfc_lasts.append(last_pfc)
self.add_policies_module("pfc{}_last".format(pol_idx), last_pfc,
idx=pol_idx)
# Multi-Policy Log-Stds Last Layers
self.stds = stds
self.log_std = list()
if stds is None:
self._pfc_log_std_lasts = list()
for pol_idx in range(self._n_subpolicies):
last_pfc_log_std = nn.Linear(multipol_in_size, action_dim)
ptu.layer_init(
layer=last_pfc_log_std,
option=output_w_init,
activation=pol_output_activation,
b=output_b_init_val,
)
self.__setattr__("pfc{}_log_std_last".format(pol_idx),
last_pfc_log_std)
self._pfc_log_std_lasts.append(last_pfc_log_std)
self.add_policies_module("pfc{}_log_std_last".format(pol_idx),
last_pfc_log_std, idx=pol_idx)
else:
for std in stds:
self.log_std.append(torch.log(stds))
assert LOG_SIG_MIN <= self.log_std[-1] <= LOG_SIG_MAX
# ############# #
# Mixing Layers #
# ############# #
# Unshared Mixing-Weights Hidden Layers
if unshared_mix_hidden_sizes is not None:
for ii, next_size in enumerate(unshared_mix_hidden_sizes):
mfc = nn.Linear(mixture_in_size, next_size)
ptu.layer_init(
layer=mfc,
option=hidden_w_init,
activation=hidden_activation,
b=hidden_b_init_val,
)
self.__setattr__("mfc{}".format(ii), mfc)
self._mfcs.append(mfc)
# Add it to specific dictionaries
self.add_mixing_module("mfc{}".format(ii), mfc)
if self._mixture_layer_norm:
ln = LayerNorm(next_size)
# ln = nn.BatchNorm1d(next_size)
self.__setattr__("mfc{}_norm".format(ii), ln)
self._norm_mfcs.append(ln)
self.add_mixing_module("mfc{}_norm".format(ii), ln)
mixture_in_size = next_size
# Unshared Mixing-Weights Last Layers
mfc_last = nn.Linear(mixture_in_size, self._n_subpolicies * action_dim)
ptu.layer_init(
layer=mfc_last,
option=output_w_init,
activation=mix_output_activation,
b=output_b_init_val,
)
self.__setattr__("mfc_last", mfc_last)
self.mfc_last = mfc_last
# Add it to specific dictionaries
self.add_mixing_module("mfc_last", mfc_last)
self.mfc_sigmoid = nn.Sigmoid()
self._normal_dist = Normal(loc=ptu.zeros(action_dim),
scale=ptu.ones(action_dim))
self._pols_idxs = ptu.arange(self._n_subpolicies)
def get_action(self, obs_np, **kwargs):
"""
"""
actions, info_dict = self.get_actions(obs_np[None], **kwargs)
for key, val in info_dict.items():
info_dict[key] = val[0, :]
# Get [0, :] vals (Because it has dimension 1xdA)
return actions[0, :], info_dict
def get_actions(self, obs_np, **kwargs):
"""
"""
actions, torch_info_dict = self.eval_np(obs_np, **kwargs)
info_dict = dict()
for key, vals in torch_info_dict.items():
if key in ['mixing_coeff']:
info_dict[key] = np_ify(torch_info_dict[key])
return actions, info_dict
def forward(
self,
obs,
deterministic=False,
return_log_prob=False,
pol_idx=None,
optimize_policies=True,
):
"""
Args:
obs (Tensor): Observation(s)
deterministic (bool): True for using mean. False, sample from dist.
return_log_prob (bool):
pol_idx (int):
optimize_policies (bool):
Returns:
action (Tensor):
pol_info (dict):
"""
h = obs
nbatch = obs.shape[0]
# ############# #
# Shared Layers #
# ############# #
if self.sfc_input is not None:
# h = self.sfc_input(h)
if nbatch > 1:
h = self.sfc_input(h)
else:
h = torch.batch_norm(
h,
self.sfc_input.weight,
self.sfc_input.bias,
self.sfc_input.running_mean,
self.sfc_input.running_var,
True, # TODO: True or False??
self.sfc_input.momentum,
self.sfc_input.eps,
torch.backends.cudnn.enabled
)
for ss, fc in enumerate(self._sfcs):
h = fc(h)
if self._shared_layer_norm:
h = self._sfc_norms[ss](h)
h = self._hidden_activation(h)
# ############## #
# Multi Policies #
# ############## #
hs = [h.clone() for _ in range(self._n_subpolicies)]
# Hidden Layers
if len(self._pfcs) > 0:
for pp in range(self._n_subpolicies):
for ii, fc in enumerate(self._pfcs[pp]):
hs[pp] = fc(hs[pp])
if self._policies_layer_norm:
hs[pp] = self._pfc_norms[pp][ii](hs[pp])
hs[pp] = self._hidden_activation(hs[pp])
# Last Mean Layers
means = torch.cat(
[(
self._pol_output_activation(self._pfc_lasts[pp](hs[pp]))
).unsqueeze(dim=1)
for pp in range(self._n_subpolicies)
],
dim=1
) # Batch x Npols x dA
# Last Log-Std Layers
if self.stds is None:
log_stds = torch.cat(
[(
self._pol_output_activation(
self._pfc_log_std_lasts[pp](hs[pp])
)
).unsqueeze(dim=1)
for pp in range(self._n_subpolicies)
],
dim=1
) # Batch x Npols x dA
# # log_std option 1:
# log_stds = torch.clamp(log_stds, min=LOG_SIG_MIN, max=LOG_SIG_MAX)
# log_std option 2:
log_stds = torch.tanh(log_stds)
log_stds = \
LOG_SIG_MIN + 0.5 * (LOG_SIG_MAX - LOG_SIG_MIN)*(log_stds + 1)
stds = torch.exp(log_stds)
variances = stds**2
else:
log_stds = self.log_std
stds = self.stds
variances = stds**2
# ############## #
# Mixing Weigths #
# ############## #
mh = h.clone()
if len(self._mfcs) > 0:
for mm, mfc in enumerate(self._mfcs):
mh = mfc(mh)
if self._mixture_layer_norm:
mh = self._norm_mfcs[mm](mh)
mh = self._hidden_activation(mh)
# NO nonlinear transformation
mixture_coeff = \
self.mfc_last(mh).reshape(-1, self._n_subpolicies, self.action_dim)
mixture_coeff = self.mfc_sigmoid(mixture_coeff)
# if torch.isnan(mixture_coeff).any():
# raise ValueError('Some mixture coeff(s) is(are) NAN: %s' %
# mixture_coeff)
#
# if torch.isnan(means).any():
# raise ValueError('Some means are NAN: %s' %
# means)
#
# if torch.isnan(stds).any():
# raise ValueError('Some stds are NAN: %s' %
# stds)
if pol_idx is None:
# Calculate weighted means and stds (and log_stds)
if optimize_policies:
sig_invs = mixture_coeff/variances
else:
sig_invs = mixture_coeff/variances.detach()
variance = 1./torch.sum(sig_invs, dim=1, keepdim=False)
if optimize_policies:
mean = variance*torch.sum(
means*sig_invs,
dim=1,
keepdim=False
)
else:
mean = variance*torch.sum(
means.detach()*sig_invs,
dim=1,
keepdim=False
)
# log_std option 1:
std = torch.sqrt(variance)
std = torch.clamp(std,
min=math.exp(LOG_SIG_MIN),
max=math.exp(LOG_SIG_MAX))
log_std = torch.log(std)
# # log_std option 2:
# variance = torch.tanh(variance)
# variance = (
# math.exp(LOG_SIG_MIN)**2 +
# 0.5*(math.exp(LOG_SIG_MAX)**2 - math.exp(LOG_SIG_MIN)**2) *
# (variance + 1)
# )
# std = torch.sqrt(variance)
# log_std = torch.log(std)
# TODO: Remove the following?
# log_std = torch.logsumexp(
# log_stds + log_mixture_coeff.reshape(-1,
# self.action_dim,
# self._n_subpolicies),
# dim=-1,
# keepdim=False
# ) - torch.logsumexp(log_mixture_coeff, dim=-1, keepdim=True)
# log_std = torch.log(std)
else:
index = self._pols_idxs[pol_idx]
mean = \
torch.index_select(means, dim=1, index=index).squeeze(1)
std = \
torch.index_select(stds, dim=1, index=index).squeeze(1)
log_std = \
torch.index_select(log_stds, dim=1, index=index).squeeze(1)
variance = \
torch.index_select(variances, dim=1, index=index).squeeze(1)
pre_tanh_value = None
log_prob = None
pre_tanh_values = None
log_probs = None
if deterministic:
action = torch.tanh(mean)
actions = torch.tanh(means)
else:
# # Using this distribution instead of TanhMultivariateNormal
# # because it has Diagonal Covariance.
# # Then, a collection of n independent Gaussian r.v.
# tanh_normal = TanhNormal(mean, std)
#
# # # It is the Lower-triangular factor of covariance because it is
# # # Diagonal Covariance
# # scale_trils = torch.stack([torch.diag(m) for m in std])
# # tanh_normal = TanhMultivariateNormal(mean, scale_tril=scale_trils)
#
# if return_log_prob:
# log_prob = tanh_normal.log_prob(
# action,
# pre_tanh_value=pre_tanh_value
# )
# log_prob = log_prob.sum(dim=-1, keepdim=True)
noise = self._normal_dist.sample((nbatch,))
pre_tanh_value = std*noise + mean
pre_tanh_values = stds*noise.unsqueeze(1) + means
action = torch.tanh(pre_tanh_value)
actions = torch.tanh(pre_tanh_values)
if return_log_prob:
# Log probability: Main Policy
log_prob = -((pre_tanh_value - mean) ** 2) / (2*variance) \
- log_std - math.log(math.sqrt(2*math.pi))
log_prob -= torch.log(
# torch.clamp(1. - action**2, 0, 1)
clip_but_pass_gradient(1. - action**2, 0, 1)
+ 1.e-6
)
log_prob = log_prob.sum(dim=-1, keepdim=True)
# Log probability: Sub-Policies
log_probs = -((pre_tanh_values - means) ** 2) / (2*variances)\
- log_stds - math.log(math.sqrt(2*math.pi))
log_probs -= torch.log(
# torch.clamp(1. - actions**2, 0, 1)
clip_but_pass_gradient(1. - actions**2, 0, 1)
+ 1.e-6
)
log_probs = log_probs.sum(dim=-1, keepdim=True)
# if torch.isnan(action).any():
# raise ValueError('ACTION NAN')
#
# if torch.isnan(actions).any():
# raise ValueError('ACTION NAN')
info_dict = dict(
mean=mean,
std=std,
log_std=log_std,
log_prob=log_prob,
pre_tanh_value=pre_tanh_value,
# log_mixture_coeff=log_mixture_coeff,
mixing_coeff=mixture_coeff,
pol_actions=actions,
pol_means=means,
pol_stds=stds,
pol_log_stds=log_stds,
pol_log_probs=log_probs,
pol_pre_tanh_values=pre_tanh_values,
)
return action, info_dict
def log_action(self, actions, obs, pol_idx=None):
raise NotImplementedError
@property
def n_heads(self):
return self._n_subpolicies
@property
def n_subpolicies(self):
return self._n_subpolicies
# ################# #
# Shared parameters #
# ################# #
def shared_parameters(self):
"""Returns an iterator over the shared parameters.
"""
for name, param in self.named_shared_parameters():
yield param
def
|
(self, **kwargs):
"""Returns an iterator over shared module parameters, yielding both the
name of the parameter as well as the parameter itself
"""
return ptu.named_parameters(self._shared_modules,
self._shared_parameters,
**kwargs)
def add_shared_module(self, name, module):
ptu.add_module(self._shared_modules, name, module)
# ####################### #
# Sub-Policies parameters #
# ####################### #
def policies_parameters(self, idx=None):
"""Returns an iterator over the policies parameters.
"""
if idx is None:
idx_list = list(range(self._n_subpolicies))
elif isinstance(idx, list) or isinstance(idx, tuple):
idx_list = idx
else:
idx_list = [idx]
for name, param in self.named_policies_parameters(idx_list):
yield param
def named_policies_parameters(self, idx=None, **kwargs):
"""Returns an iterator over policies module parameters, yielding both the
name of the parameter as well as the parameter itself
"""
if idx is None:
idx_list = list(range(self._n_subpolicies))
elif isinstance(idx, list) or isinstance(idx, tuple):
idx_list = idx
else:
idx_list = [idx]
return chain(*[ptu.named_parameters(self._policies_modules[idx],
self._policies_parameters[idx],
**kwargs)
for idx in idx_list])
def add_policies_module(self, name, module, idx=None):
if idx is None:
idx_list = list(range(self._n_subpolicies))
elif isinstance(idx, list) or isinstance(idx, tuple):
idx_list = idx
else:
idx_list = [idx]
for idx in idx_list:
ptu.add_module(self._policies_modules[idx], name, module)
# ################# #
# Mixing parameters #
# ################# #
def mixing_parameters(self):
"""Returns an iterator over the mixing parameters.
"""
for name, param in self.named_mixing_parameters():
yield param
def named_mixing_parameters(self, **kwargs):
"""Returns an iterator over mixing module parameters, yielding both the
name of the parameter as well as the parameter itself
"""
return ptu.named_parameters(self._mixing_modules,
self._mixing_parameters,
**kwargs)
def add_mixing_module(self, name, module):
ptu.add_module(self._mixing_modules, name, module)
def clip_but_pass_gradient(x, l=-1., u=1.):
clip_up = (x > u).to(ptu.device, dtype=torch.float32)
clip_low = (x < l).to(ptu.device, dtype=torch.float32)
return x + ((u - x)*clip_up + (l - x)*clip_low).detach()
|
named_shared_parameters
|
alveo_u250.py
|
#!/usr/bin/env python3
#
# This file is part of LiteX-Boards.
#
# Copyright (c) 2020 Fei Gao <[email protected]>
# Copyright (c) 2020 Florent Kermarrec <[email protected]>
# Copyright (c) 2020 David Shah <[email protected]>
# SPDX-License-Identifier: BSD-2-Clause
import argparse, os
from migen import *
from litex_boards.platforms import alveo_u250
from litex.soc.cores.clock import *
from litex.soc.integration.soc_core import *
from litex.soc.integration.soc_sdram import *
from litex.soc.integration.builder import *
from litex.soc.cores.led import LedChaser
from litedram.modules import MTA18ASF2G72PZ
from litedram.phy import usddrphy
from litepcie.phy.usppciephy import USPPCIEPHY
from litepcie.core import LitePCIeEndpoint, LitePCIeMSI
from litepcie.frontend.dma import LitePCIeDMA
from litepcie.frontend.wishbone import LitePCIeWishboneBridge
from litepcie.software import generate_litepcie_software
# CRG ----------------------------------------------------------------------------------------------
class _CRG(Module):
def __init__(self, platform, sys_clk_freq):
self.clock_domains.cd_sys = ClockDomain()
self.clock_domains.cd_sys4x = ClockDomain(reset_less=True)
self.clock_domains.cd_pll4x = ClockDomain(reset_less=True)
self.clock_domains.cd_clk500 = ClockDomain()
# # #
self.submodules.pll = pll = USMMCM(speedgrade=-2)
self.comb += pll.reset.eq(0) # FIXME
pll.register_clkin(platform.request("clk300", 0), 300e6)
pll.create_clkout(self.cd_pll4x, sys_clk_freq*4, buf=None, with_reset=False)
pll.create_clkout(self.cd_clk500, 500e6, with_reset=False)
self.specials += [
Instance("BUFGCE_DIV", name="main_bufgce_div",
p_BUFGCE_DIVIDE=4,
i_CE=1, i_I=self.cd_pll4x.clk, o_O=self.cd_sys.clk),
Instance("BUFGCE", name="main_bufgce",
i_CE=1, i_I=self.cd_pll4x.clk, o_O=self.cd_sys4x.clk),
AsyncResetSynchronizer(self.cd_clk500, ~pll.locked),
]
self.submodules.idelayctrl = USIDELAYCTRL(cd_ref=self.cd_clk500, cd_sys=self.cd_sys)
# BaseSoC ------------------------------------------------------------------------------------------
class BaseSoC(SoCCore):
def __init__(self, sys_clk_freq=int(125e6), with_pcie=False, **kwargs):
platform = alveo_u250.Platform()
# SoCCore ----------------------------------------------------------------------------------
SoCCore.__init__(self, platform, sys_clk_freq,
ident = "LiteX SoC on Alveo U250",
ident_version = True,
**kwargs)
# CRG --------------------------------------------------------------------------------------
self.submodules.crg = _CRG(platform, sys_clk_freq)
# DDR4 SDRAM -------------------------------------------------------------------------------
if not self.integrated_main_ram_size:
self.submodules.ddrphy = usddrphy.USPDDRPHY(platform.request("ddram"),
memtype = "DDR4",
sys_clk_freq = sys_clk_freq,
iodelay_clk_freq = 500e6,
cmd_latency = 1,
is_rdimm = True)
self.add_csr("ddrphy")
self.add_sdram("sdram",
phy = self.ddrphy,
module = MTA18ASF2G72PZ(sys_clk_freq, "1:4"),
origin = self.mem_map["main_ram"],
size = kwargs.get("max_sdram_size", 0x40000000),
l2_cache_size = kwargs.get("l2_size", 8192),
l2_cache_min_data_width = kwargs.get("min_l2_data_width", 128),
l2_cache_reverse = True
)
# Firmware RAM (To ease initial LiteDRAM calibration support) ------------------------------
self.add_ram("firmware_ram", 0x20000000, 0x8000)
# PCIe -------------------------------------------------------------------------------------
if with_pcie:
# PHY
self.submodules.pcie_phy = USPPCIEPHY(platform, platform.request("pcie_x4"),
data_width = 128,
bar0_size = 0x20000)
#self.pcie_phy.add_timing_constraints(platform) # FIXME
platform.add_false_path_constraints(self.crg.cd_sys.clk, self.pcie_phy.cd_pcie.clk)
self.add_csr("pcie_phy")
# Endpoint
self.submodules.pcie_endpoint = LitePCIeEndpoint(self.pcie_phy, max_pending_requests=8)
# Wishbone bridge
self.submodules.pcie_bridge = LitePCIeWishboneBridge(self.pcie_endpoint,
base_address = self.mem_map["csr"])
self.add_wb_master(self.pcie_bridge.wishbone)
# DMA0
self.submodules.pcie_dma0 = LitePCIeDMA(self.pcie_phy, self.pcie_endpoint,
with_buffering = True, buffering_depth=1024,
with_loopback = True)
self.add_csr("pcie_dma0")
self.add_constant("DMA_CHANNELS", 1)
# MSI
self.submodules.pcie_msi = LitePCIeMSI()
self.add_csr("pcie_msi")
self.comb += self.pcie_msi.source.connect(self.pcie_phy.msi)
self.interrupts = {
"PCIE_DMA0_WRITER": self.pcie_dma0.writer.irq,
"PCIE_DMA0_READER": self.pcie_dma0.reader.irq,
}
for i, (k, v) in enumerate(sorted(self.interrupts.items())):
self.comb += self.pcie_msi.irqs[i].eq(v)
|
self.submodules.leds = LedChaser(
pads = platform.request_all("user_led"),
sys_clk_freq = sys_clk_freq)
self.add_csr("leds")
# Build --------------------------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(description="LiteX SoC on Alveo U250")
parser.add_argument("--build", action="store_true", help="Build bitstream")
parser.add_argument("--with-pcie", action="store_true", help="Enable PCIe support")
parser.add_argument("--driver", action="store_true", help="Generate PCIe driver")
parser.add_argument("--load", action="store_true", help="Load bitstream")
builder_args(parser)
soc_sdram_args(parser)
args = parser.parse_args()
# Enforce arguments
args.csr_data_width = 32
soc = BaseSoC(with_pcie=args.with_pcie, **soc_sdram_argdict(args))
builder = Builder(soc, **builder_argdict(args))
builder.build(run=args.build)
if args.driver:
generate_litepcie_software(soc, os.path.join(builder.output_dir, "driver"))
if args.load:
prog = soc.platform.create_programmer()
prog.load_bitstream(os.path.join(builder.gateware_dir, soc.build_name + ".bit"))
if __name__ == "__main__":
main()
|
self.add_constant(k + "_INTERRUPT", i)
# Leds -------------------------------------------------------------------------------------
|
image_merge.go
|
// Copyright © 2019 Erin Shepherd
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cmd
import (
"errors"
"fmt"
"github.com/erincandescent/nuvoprog/target"
"github.com/spf13/cobra"
)
// imageMergeCmd represents the imageMerge command
var imageMergeCmd = &cobra.Command{
Use: "merge",
Short: "Merge image files",
Long: `Merges configuration, APROM and optionally LDROM images into a composite image`,
RunE: func(cmd *cobra.Command, args []string) error {
if targetName == "" {
return errors.New("Target device not specified")
}
td := target.ByName(targetName)
if td == nil {
return fmt.Errorf("Target device '%s' not found", targetName)
}
config, _ := cmd.Flags().GetString("config")
image, _ := cmd.Flags().GetString("image")
aprom, _ := cmd.Flags().GetString("aprom")
ldrom, _ := cmd.Flags().GetString("ldrom")
output, _ := cmd.Flags().GetString("output")
d, err := ReadTargetData(config, image, aprom, ldrom, td, true)
if err != nil {
return err
}
w, err := openWrite(output)
if err != nil {
return err
}
d.Write(w)
return nil
},
}
func init() {
|
imageCmd.AddCommand(imageMergeCmd)
imageMergeCmd.Flags().StringP("output", "o", "", "Output file, e.g. image.ihx")
}
|
|
api_user_interface.go
|
/*
* EVE Swagger Interface
*
* An OpenAPI for EVE Online
*
* OpenAPI spec version: 1.8.2
*
* Generated by: https://github.com/swagger-api/swagger-codegen.git
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package esi
import (
"context"
"io/ioutil"
"net/http"
"net/url"
"strings"
"github.com/antihax/goesi/optional"
)
// Linger please
var (
_ context.Context
)
type UserInterfaceApiService service
/*
UserInterfaceApiService Set Autopilot Waypoint
Set a solar system as autopilot waypoint ---
* @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background().
* @param addToBeginning Whether this solar system should be added to the beginning of all waypoints
* @param clearOtherWaypoints Whether clean other waypoints beforing adding this one
* @param destinationId The destination to travel to, can be solar system, station or structure's id
* @param optional nil or *PostUiAutopilotWaypointOpts - Optional Parameters:
* @param "Datasource" (optional.String) - The server name you would like data from
* @param "Token" (optional.String) - Access token to use if unable to set a header
*/
type PostUiAutopilotWaypointOpts struct {
Datasource optional.String
Token optional.String
}
func (a *UserInterfaceApiService) PostUiAutopilotWaypoint(ctx context.Context, addToBeginning bool, clearOtherWaypoints bool, destinationId int64, localVarOptionals *PostUiAutopilotWaypointOpts) (*http.Response, error) {
var (
localVarHttpMethod = strings.ToUpper("Post")
localVarPostBody interface{}
localVarFileName string
localVarFileBytes []byte
)
// create path and map variables
localVarPath := a.client.basePath + "/v2/ui/autopilot/waypoint/"
localVarHeaderParams := make(map[string]string)
localVarQueryParams := url.Values{}
localVarFormParams := url.Values{}
localVarQueryParams.Add("add_to_beginning", parameterToString(addToBeginning, ""))
localVarQueryParams.Add("clear_other_waypoints", parameterToString(clearOtherWaypoints, ""))
if localVarOptionals != nil && localVarOptionals.Datasource.IsSet() {
localVarQueryParams.Add("datasource", parameterToString(localVarOptionals.Datasource.Value(), ""))
}
localVarQueryParams.Add("destination_id", parameterToString(destinationId, ""))
if localVarOptionals != nil && localVarOptionals.Token.IsSet() {
localVarQueryParams.Add("token", parameterToString(localVarOptionals.Token.Value(), ""))
}
// to determine the Content-Type header
localVarHttpContentTypes := []string{"application/json"}
// set Content-Type header
localVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)
if localVarHttpContentType != "" {
localVarHeaderParams["Content-Type"] = localVarHttpContentType
}
// to determine the Accept header
localVarHttpHeaderAccepts := []string{"application/json"}
// set Accept header
localVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)
if localVarHttpHeaderAccept != "" {
localVarHeaderParams["Accept"] = localVarHttpHeaderAccept
}
r, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)
if err != nil {
return nil, err
}
localVarHttpResponse, err := a.client.callAPI(r)
if err != nil || localVarHttpResponse == nil {
return localVarHttpResponse, err
}
localVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)
localVarHttpResponse.Body.Close()
if err != nil {
return localVarHttpResponse, err
}
if localVarHttpResponse.StatusCode >= 400 {
newErr := GenericSwaggerError{
body: localVarBody,
error: localVarHttpResponse.Status,
}
if localVarHttpResponse.StatusCode == 400 {
var v BadRequest
err = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get("content-type"))
if err != nil {
newErr.error = err.Error()
return localVarHttpResponse, newErr
}
newErr.model = v
return localVarHttpResponse, newErr
}
if localVarHttpResponse.StatusCode == 401 {
var v Unauthorized
err = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get("content-type"))
if err != nil {
newErr.error = err.Error()
return localVarHttpResponse, newErr
}
newErr.model = v
return localVarHttpResponse, newErr
}
if localVarHttpResponse.StatusCode == 403 {
var v Forbidden
err = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get("content-type"))
if err != nil {
newErr.error = err.Error()
return localVarHttpResponse, newErr
}
newErr.model = v
return localVarHttpResponse, newErr
}
if localVarHttpResponse.StatusCode == 420 {
var v ErrorLimited
err = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get("content-type"))
if err != nil {
newErr.error = err.Error()
return localVarHttpResponse, newErr
}
newErr.model = v
return localVarHttpResponse, newErr
}
if localVarHttpResponse.StatusCode == 500 {
var v InternalServerError
err = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get("content-type"))
if err != nil {
newErr.error = err.Error()
return localVarHttpResponse, newErr
}
newErr.model = v
return localVarHttpResponse, newErr
}
if localVarHttpResponse.StatusCode == 503 {
var v ServiceUnavailable
err = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get("content-type"))
if err != nil {
newErr.error = err.Error()
return localVarHttpResponse, newErr
}
newErr.model = v
return localVarHttpResponse, newErr
}
if localVarHttpResponse.StatusCode == 504 {
var v GatewayTimeout
err = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get("content-type"))
if err != nil {
newErr.error = err.Error()
return localVarHttpResponse, newErr
}
newErr.model = v
return localVarHttpResponse, newErr
}
return localVarHttpResponse, newErr
}
return localVarHttpResponse, nil
}
/*
UserInterfaceApiService Open Contract Window
Open the contract window inside the client ---
* @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background().
* @param contractId The contract to open
* @param optional nil or *PostUiOpenwindowContractOpts - Optional Parameters:
* @param "Datasource" (optional.String) - The server name you would like data from
* @param "Token" (optional.String) - Access token to use if unable to set a header
*/
type PostUiOpenwindowContractOpts struct {
Datasource optional.String
Token optional.String
}
func (a *UserInterfaceApiService) PostUiOpenwindowContract(ctx context.Context, contractId int32, localVarOptionals *PostUiOpenwindowContractOpts) (*http.Response, error) {
var (
localVarHttpMethod = strings.ToUpper("Post")
localVarPostBody interface{}
localVarFileName string
localVarFileBytes []byte
)
// create path and map variables
localVarPath := a.client.basePath + "/v1/ui/openwindow/contract/"
localVarHeaderParams := make(map[string]string)
localVarQueryParams := url.Values{}
localVarFormParams := url.Values{}
localVarQueryParams.Add("contract_id", parameterToString(contractId, ""))
if localVarOptionals != nil && localVarOptionals.Datasource.IsSet() {
localVarQueryParams.Add("datasource", parameterToString(localVarOptionals.Datasource.Value(), ""))
}
if localVarOptionals != nil && localVarOptionals.Token.IsSet() {
localVarQueryParams.Add("token", parameterToString(localVarOptionals.Token.Value(), ""))
}
// to determine the Content-Type header
localVarHttpContentTypes := []string{"application/json"}
// set Content-Type header
localVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)
if localVarHttpContentType != "" {
localVarHeaderParams["Content-Type"] = localVarHttpContentType
}
// to determine the Accept header
localVarHttpHeaderAccepts := []string{"application/json"}
// set Accept header
localVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)
if localVarHttpHeaderAccept != "" {
localVarHeaderParams["Accept"] = localVarHttpHeaderAccept
}
r, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)
if err != nil {
return nil, err
}
localVarHttpResponse, err := a.client.callAPI(r)
if err != nil || localVarHttpResponse == nil {
return localVarHttpResponse, err
}
localVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)
localVarHttpResponse.Body.Close()
if err != nil {
return localVarHttpResponse, err
}
if localVarHttpResponse.StatusCode >= 400 {
newErr := GenericSwaggerError{
body: localVarBody,
error: localVarHttpResponse.Status,
}
if localVarHttpResponse.StatusCode == 400 {
var v BadRequest
err = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get("content-type"))
if err != nil {
newErr.error = err.Error()
return localVarHttpResponse, newErr
}
newErr.model = v
return localVarHttpResponse, newErr
}
if localVarHttpResponse.StatusCode == 401 {
var v Unauthorized
err = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get("content-type"))
if err != nil {
newErr.error = err.Error()
return localVarHttpResponse, newErr
}
newErr.model = v
return localVarHttpResponse, newErr
}
if localVarHttpResponse.StatusCode == 403 {
var v Forbidden
err = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get("content-type"))
if err != nil {
newErr.error = err.Error()
return localVarHttpResponse, newErr
}
newErr.model = v
return localVarHttpResponse, newErr
}
if localVarHttpResponse.StatusCode == 420 {
var v ErrorLimited
err = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get("content-type"))
if err != nil {
newErr.error = err.Error()
return localVarHttpResponse, newErr
}
newErr.model = v
return localVarHttpResponse, newErr
}
if localVarHttpResponse.StatusCode == 500 {
var v InternalServerError
err = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get("content-type"))
if err != nil {
newErr.error = err.Error()
return localVarHttpResponse, newErr
}
newErr.model = v
return localVarHttpResponse, newErr
}
if localVarHttpResponse.StatusCode == 503 {
var v ServiceUnavailable
err = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get("content-type"))
if err != nil {
newErr.error = err.Error()
return localVarHttpResponse, newErr
}
newErr.model = v
return localVarHttpResponse, newErr
}
if localVarHttpResponse.StatusCode == 504 {
var v GatewayTimeout
err = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get("content-type"))
if err != nil {
newErr.error = err.Error()
return localVarHttpResponse, newErr
}
newErr.model = v
return localVarHttpResponse, newErr
}
return localVarHttpResponse, newErr
}
return localVarHttpResponse, nil
}
/*
UserInterfaceApiService Open Information Window
Open the information window for a character, corporation or alliance inside the client ---
* @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background().
* @param targetId The target to open
* @param optional nil or *PostUiOpenwindowInformationOpts - Optional Parameters:
* @param "Datasource" (optional.String) - The server name you would like data from
* @param "Token" (optional.String) - Access token to use if unable to set a header
*/
type PostUiOpenwindowInformationOpts struct {
Datasource optional.String
Token optional.String
}
func (a *UserInterfaceApiService) PostUiOpenwindowInformation(ctx context.Context, targetId int32, localVarOptionals *PostUiOpenwindowInformationOpts) (*http.Response, error) {
var (
localVarHttpMethod = strings.ToUpper("Post")
localVarPostBody interface{}
localVarFileName string
localVarFileBytes []byte
)
// create path and map variables
localVarPath := a.client.basePath + "/v1/ui/openwindow/information/"
localVarHeaderParams := make(map[string]string)
localVarQueryParams := url.Values{}
localVarFormParams := url.Values{}
if localVarOptionals != nil && localVarOptionals.Datasource.IsSet() {
localVarQueryParams.Add("datasource", parameterToString(localVarOptionals.Datasource.Value(), ""))
}
localVarQueryParams.Add("target_id", parameterToString(targetId, ""))
if localVarOptionals != nil && localVarOptionals.Token.IsSet() {
localVarQueryParams.Add("token", parameterToString(localVarOptionals.Token.Value(), ""))
}
// to determine the Content-Type header
localVarHttpContentTypes := []string{"application/json"}
// set Content-Type header
localVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)
if localVarHttpContentType != "" {
localVarHeaderParams["Content-Type"] = localVarHttpContentType
}
// to determine the Accept header
localVarHttpHeaderAccepts := []string{"application/json"}
// set Accept header
localVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)
if localVarHttpHeaderAccept != "" {
localVarHeaderParams["Accept"] = localVarHttpHeaderAccept
}
r, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)
if err != nil {
return nil, err
}
localVarHttpResponse, err := a.client.callAPI(r)
if err != nil || localVarHttpResponse == nil {
return localVarHttpResponse, err
}
localVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)
localVarHttpResponse.Body.Close()
if err != nil {
return localVarHttpResponse, err
}
if localVarHttpResponse.StatusCode >= 400 {
newErr := GenericSwaggerError{
body: localVarBody,
error: localVarHttpResponse.Status,
}
if localVarHttpResponse.StatusCode == 400 {
var v BadRequest
err = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get("content-type"))
if err != nil {
newErr.error = err.Error()
return localVarHttpResponse, newErr
}
newErr.model = v
return localVarHttpResponse, newErr
}
if localVarHttpResponse.StatusCode == 401 {
var v Unauthorized
err = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get("content-type"))
if err != nil {
newErr.error = err.Error()
return localVarHttpResponse, newErr
}
newErr.model = v
return localVarHttpResponse, newErr
}
if localVarHttpResponse.StatusCode == 403 {
var v Forbidden
err = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get("content-type"))
if err != nil {
newErr.error = err.Error()
return localVarHttpResponse, newErr
}
newErr.model = v
return localVarHttpResponse, newErr
}
if localVarHttpResponse.StatusCode == 420 {
var v ErrorLimited
err = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get("content-type"))
if err != nil {
newErr.error = err.Error()
return localVarHttpResponse, newErr
}
newErr.model = v
return localVarHttpResponse, newErr
}
if localVarHttpResponse.StatusCode == 500 {
var v InternalServerError
err = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get("content-type"))
if err != nil {
newErr.error = err.Error()
return localVarHttpResponse, newErr
}
newErr.model = v
return localVarHttpResponse, newErr
}
if localVarHttpResponse.StatusCode == 503 {
var v ServiceUnavailable
err = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get("content-type"))
if err != nil {
newErr.error = err.Error()
return localVarHttpResponse, newErr
}
newErr.model = v
return localVarHttpResponse, newErr
}
if localVarHttpResponse.StatusCode == 504 {
var v GatewayTimeout
err = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get("content-type"))
if err != nil {
newErr.error = err.Error()
return localVarHttpResponse, newErr
}
newErr.model = v
return localVarHttpResponse, newErr
}
return localVarHttpResponse, newErr
}
return localVarHttpResponse, nil
}
/*
UserInterfaceApiService Open Market Details
Open the market details window for a specific typeID inside the client ---
* @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background().
* @param typeId The item type to open in market window
* @param optional nil or *PostUiOpenwindowMarketdetailsOpts - Optional Parameters:
* @param "Datasource" (optional.String) - The server name you would like data from
* @param "Token" (optional.String) - Access token to use if unable to set a header
*/
type PostUiOpenwindowMarketdetailsOpts struct {
Datasource optional.String
Token optional.String
}
func (a *UserInterfaceApiService) PostUiOpenwindowMarketdetails(ctx context.Context, typeId int32, localVarOptionals *PostUiOpenwindowMarketdetailsOpts) (*http.Response, error) {
var (
localVarHttpMethod = strings.ToUpper("Post")
localVarPostBody interface{}
localVarFileName string
localVarFileBytes []byte
)
// create path and map variables
localVarPath := a.client.basePath + "/v1/ui/openwindow/marketdetails/"
localVarHeaderParams := make(map[string]string)
localVarQueryParams := url.Values{}
localVarFormParams := url.Values{}
if localVarOptionals != nil && localVarOptionals.Datasource.IsSet() {
localVarQueryParams.Add("datasource", parameterToString(localVarOptionals.Datasource.Value(), ""))
}
if localVarOptionals != nil && localVarOptionals.Token.IsSet() {
localVarQueryParams.Add("token", parameterToString(localVarOptionals.Token.Value(), ""))
}
localVarQueryParams.Add("type_id", parameterToString(typeId, ""))
// to determine the Content-Type header
localVarHttpContentTypes := []string{"application/json"}
// set Content-Type header
localVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)
if localVarHttpContentType != "" {
localVarHeaderParams["Content-Type"] = localVarHttpContentType
}
// to determine the Accept header
localVarHttpHeaderAccepts := []string{"application/json"}
// set Accept header
localVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)
if localVarHttpHeaderAccept != "" {
localVarHeaderParams["Accept"] = localVarHttpHeaderAccept
}
r, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)
if err != nil {
return nil, err
}
localVarHttpResponse, err := a.client.callAPI(r)
if err != nil || localVarHttpResponse == nil {
return localVarHttpResponse, err
}
localVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)
localVarHttpResponse.Body.Close()
if err != nil {
return localVarHttpResponse, err
}
if localVarHttpResponse.StatusCode >= 400 {
newErr := GenericSwaggerError{
body: localVarBody,
error: localVarHttpResponse.Status,
}
if localVarHttpResponse.StatusCode == 400 {
var v BadRequest
err = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get("content-type"))
if err != nil {
newErr.error = err.Error()
return localVarHttpResponse, newErr
}
newErr.model = v
return localVarHttpResponse, newErr
}
if localVarHttpResponse.StatusCode == 401 {
var v Unauthorized
err = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get("content-type"))
if err != nil {
newErr.error = err.Error()
return localVarHttpResponse, newErr
}
newErr.model = v
return localVarHttpResponse, newErr
}
if localVarHttpResponse.StatusCode == 403 {
var v Forbidden
err = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get("content-type"))
if err != nil {
newErr.error = err.Error()
return localVarHttpResponse, newErr
}
newErr.model = v
return localVarHttpResponse, newErr
}
if localVarHttpResponse.StatusCode == 420 {
var v ErrorLimited
err = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get("content-type"))
if err != nil {
newErr.error = err.Error()
return localVarHttpResponse, newErr
}
newErr.model = v
return localVarHttpResponse, newErr
}
if localVarHttpResponse.StatusCode == 500 {
var v InternalServerError
err = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get("content-type"))
if err != nil {
newErr.error = err.Error()
return localVarHttpResponse, newErr
}
newErr.model = v
return localVarHttpResponse, newErr
}
if localVarHttpResponse.StatusCode == 503 {
var v ServiceUnavailable
err = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get("content-type"))
if err != nil {
newErr.error = err.Error()
return localVarHttpResponse, newErr
}
newErr.model = v
return localVarHttpResponse, newErr
}
if localVarHttpResponse.StatusCode == 504 {
var v GatewayTimeout
err = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get("content-type"))
if err != nil {
newErr.error = err.Error()
return localVarHttpResponse, newErr
}
newErr.model = v
return localVarHttpResponse, newErr
}
return localVarHttpResponse, newErr
}
return localVarHttpResponse, nil
}
/*
UserInterfaceApiService Open New Mail Window
Open the New Mail window, according to settings from the request if applicable ---
* @param ctx context.Context - for authentication, logging, cancellation, deadlines, tracing, etc. Passed from http.Request or context.Background().
* @param newMail The details of mail to create
* @param optional nil or *PostUiOpenwindowNewmailOpts - Optional Parameters:
* @param "Datasource" (optional.String) - The server name you would like data from
* @param "Token" (optional.String) - Access token to use if unable to set a header
*/
type PostUiOpenwindowNewmailOpts struct {
Datasource optional.String
Token optional.String
}
func (a *UserInterfaceApiService) PostUiOpenwindowNewmail(ctx context.Context, newMail PostUiOpenwindowNewmailNewMail, localVarOptionals *PostUiOpenwindowNewmailOpts) (*http.Response, error) {
var (
localVarHttpMethod = strings.ToUpper("Post")
localVarPostBody interface{}
localVarFileName string
localVarFileBytes []byte
)
// create path and map variables
localVarPath := a.client.basePath + "/v1/ui/openwindow/newmail/"
localVarHeaderParams := make(map[string]string)
localVarQueryParams := url.Values{}
localVarFormParams := url.Values{}
if localVarOptionals != nil && localVarOptionals.Datasource.IsSet() {
localVarQueryParams.Add("datasource", parameterToString(localVarOptionals.Datasource.Value(), ""))
}
if localVarOptionals != nil && localVarOptionals.Token.IsSet() {
localVarQueryParams.Add("token", parameterToString(localVarOptionals.Token.Value(), ""))
}
// to determine the Content-Type header
localVarHttpContentTypes := []string{"application/json"}
// set Content-Type header
localVarHttpContentType := selectHeaderContentType(localVarHttpContentTypes)
if localVarHttpContentType != "" {
localVarHeaderParams["Content-Type"] = localVarHttpContentType
}
// to determine the Accept header
localVarHttpHeaderAccepts := []string{"application/json"}
// set Accept header
localVarHttpHeaderAccept := selectHeaderAccept(localVarHttpHeaderAccepts)
if localVarHttpHeaderAccept != "" {
localVarHeaderParams["Accept"] = localVarHttpHeaderAccept
}
// body params
localVarPostBody = &newMail
r, err := a.client.prepareRequest(ctx, localVarPath, localVarHttpMethod, localVarPostBody, localVarHeaderParams, localVarQueryParams, localVarFormParams, localVarFileName, localVarFileBytes)
if err != nil {
return nil, err
}
localVarHttpResponse, err := a.client.callAPI(r)
if err != nil || localVarHttpResponse == nil {
return localVarHttpResponse, err
}
localVarBody, err := ioutil.ReadAll(localVarHttpResponse.Body)
localVarHttpResponse.Body.Close()
if err != nil {
return localVarHttpResponse, err
}
if localVarHttpResponse.StatusCode >= 400 {
newErr := GenericSwaggerError{
body: localVarBody,
error: localVarHttpResponse.Status,
}
if localVarHttpResponse.StatusCode == 400 {
var v BadRequest
err = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get("content-type"))
if err != nil {
newErr.error = err.Error()
return localVarHttpResponse, newErr
}
newErr.model = v
return localVarHttpResponse, newErr
}
if localVarHttpResponse.StatusCode == 401 {
var v Unauthorized
err = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get("content-type"))
if err != nil {
newErr.error = err.Error()
return localVarHttpResponse, newErr
}
newErr.model = v
return localVarHttpResponse, newErr
}
if localVarHttpResponse.StatusCode == 403 {
var v Forbidden
err = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get("content-type"))
if err != nil {
newErr.error = err.Error()
return localVarHttpResponse, newErr
}
newErr.model = v
return localVarHttpResponse, newErr
}
if localVarHttpResponse.StatusCode == 420 {
var v ErrorLimited
err = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get("content-type"))
if err != nil {
|
newErr.error = err.Error()
return localVarHttpResponse, newErr
}
newErr.model = v
return localVarHttpResponse, newErr
}
if localVarHttpResponse.StatusCode == 422 {
var v PostUiOpenwindowNewmailUnprocessableEntity
err = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get("content-type"))
if err != nil {
newErr.error = err.Error()
return localVarHttpResponse, newErr
}
newErr.model = v
return localVarHttpResponse, newErr
}
if localVarHttpResponse.StatusCode == 500 {
var v InternalServerError
err = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get("content-type"))
if err != nil {
newErr.error = err.Error()
return localVarHttpResponse, newErr
}
newErr.model = v
return localVarHttpResponse, newErr
}
if localVarHttpResponse.StatusCode == 503 {
var v ServiceUnavailable
err = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get("content-type"))
if err != nil {
newErr.error = err.Error()
return localVarHttpResponse, newErr
}
newErr.model = v
return localVarHttpResponse, newErr
}
if localVarHttpResponse.StatusCode == 504 {
var v GatewayTimeout
err = a.client.decode(&v, localVarBody, localVarHttpResponse.Header.Get("content-type"))
if err != nil {
newErr.error = err.Error()
return localVarHttpResponse, newErr
}
newErr.model = v
return localVarHttpResponse, newErr
}
return localVarHttpResponse, newErr
}
return localVarHttpResponse, nil
}
| |
factory.rs
|
// Copyright 2016 The Gfx-rs Developers.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::os::raw::c_void;
use std::sync::Arc;
use std::{mem, slice, str};
use std::path::Path;
// use cocoa::base::{selector, class};
// use cocoa::foundation::{NSUInteger};
use core::{self, buffer, factory, mapping, memory};
use core::handle::{self, Producer};
use core::memory::Typed;
use metal::*;
use command::CommandBuffer;
use MTL_MAX_BUFFER_BINDINGS;
use {Resources, Share, Texture, Buffer, Shader, Program, ShaderLibrary, Pipeline};
use native;
use mirror;
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct RawMapping {
pointer: *mut c_void,
}
unsafe impl Send for RawMapping {}
unsafe impl Sync for RawMapping {}
impl mapping::Gate<Resources> for RawMapping {
unsafe fn set<T>(&self, index: usize, val: T) {
*(self.pointer as *mut T).offset(index as isize) = val;
}
unsafe fn slice<'a, 'b, T>(&'a self, len: usize) -> &'b [T] {
slice::from_raw_parts(self.pointer as *const T, len)
}
unsafe fn mut_slice<'a, 'b, T>(&'a self, len: usize) -> &'b mut [T] {
slice::from_raw_parts_mut(self.pointer as *mut T, len)
}
}
pub struct Factory {
device: MTLDevice,
queue: MTLCommandQueue,
share: Arc<Share>,
frame_handles: handle::Manager<Resources>,
}
impl Factory {
pub fn new(device: MTLDevice, share: Arc<Share>) -> Factory {
Factory {
device: device,
queue: device.new_command_queue(),
share: share,
frame_handles: handle::Manager::new(),
}
}
pub fn create_command_buffer(&self) -> CommandBuffer {
CommandBuffer::new(self.device, self.queue)
}
fn create_buffer_internal(&self,
info: buffer::Info,
raw_data: Option<*const c_void>)
-> Result<handle::RawBuffer<Resources>, buffer::CreationError> {
use map::map_buffer_usage;
let usage = map_buffer_usage(info.usage, info.bind);
if info.bind.contains(memory::RENDER_TARGET) | info.bind.contains(memory::DEPTH_STENCIL) {
return Err(buffer::CreationError::UnsupportedBind(info.bind));
}
let raw_buf = if let Some(data) = raw_data {
self.device
.new_buffer_with_data(unsafe { mem::transmute(data) }, info.size as u64, usage)
} else {
self.device.new_buffer(info.size as u64, usage)
};
let buf = Buffer(native::Buffer(Box::into_raw(Box::new(raw_buf))), info.usage, info.bind);
// TODO(fkaa): if we have a way to track buffers in use (added on
// scheduling of command buffers, removed on completion),
// we could block while in use on both sides. would need
// a state for each mode (`in-use` vs. `mapped`).
let mapping = None;
Ok(self.share.handles.borrow_mut().make_buffer(buf, info, mapping))
}
pub fn make_depth_stencil(&self, info: &core::pso::DepthStencilInfo) -> MTLDepthStencilState {
use map::{map_function, map_stencil_op};
let desc = MTLDepthStencilDescriptor::alloc().init();
desc.set_depth_write_enabled(match info.depth {
Some(ref depth) => depth.write,
None => false
});
desc.set_depth_compare_function(match info.depth {
Some(ref depth) => map_function(depth.fun),
None => MTLCompareFunction::Never,
});
if let Some(stencil) = info.front {
let front = MTLStencilDescriptor::alloc().init();
front.set_stencil_compare_function(map_function(stencil.fun));
front.set_stencil_failure_operation(map_stencil_op(stencil.op_fail));
front.set_depth_failure_operation(map_stencil_op(stencil.op_depth_fail));
front.set_depth_stencil_pass_operation(map_stencil_op(stencil.op_pass));
// TODO: wrong type?
front.set_read_mask(stencil.mask_read as u32);
front.set_write_mask(stencil.mask_write as u32);
desc.set_front_face_stencil(front);
};
if let Some(stencil) = info.back {
let back = MTLStencilDescriptor::alloc().init();
back.set_stencil_compare_function(map_function(stencil.fun));
back.set_stencil_failure_operation(map_stencil_op(stencil.op_fail));
back.set_depth_failure_operation(map_stencil_op(stencil.op_depth_fail));
back.set_depth_stencil_pass_operation(map_stencil_op(stencil.op_pass));
// TODO: wrong type?
back.set_read_mask(stencil.mask_read as u32);
back.set_write_mask(stencil.mask_write as u32);
desc.set_back_face_stencil(back);
};
self.device.new_depth_stencil_state(desc)
}
pub fn create_library<P: AsRef<Path>>
(&mut self,
file: P)
-> Result<ShaderLibrary, core::shade::CreateShaderError> {
use core::shade::CreateShaderError;
match self.device.new_library_with_file(file) {
Ok(lib) => Ok(ShaderLibrary { lib: lib }),
Err(err) => Err(CreateShaderError::CompilationFailed(err.into())),
}
}
fn create_shader_from_library<S: AsRef<str>>
(&mut self,
stage: core::shade::Stage,
library: &ShaderLibrary,
function_name: S)
-> Result<handle::Shader<Resources>, core::shade::CreateShaderError> {
use core::shade::{CreateShaderError, Stage};
match stage {
Stage::Vertex | Stage::Pixel => (),
_ => return Err(CreateShaderError::StageNotSupported(stage)),
}
let shader = Shader {
func: library.lib.get_function(function_name.as_ref()),
};
Ok(self.share.handles.borrow_mut().make_shader(shader))
}
pub fn create_shader_vertex_from_library<S: AsRef<str>>
(&mut self,
library: &ShaderLibrary,
function_name: S)
-> Result<core::VertexShader<Resources>, core::shade::CreateShaderError> {
self.create_shader_from_library(
core::shade::Stage::Vertex,
library,
function_name)
.map(|s| core::VertexShader::new(s))
}
pub fn
|
<S: AsRef<str>>
(&mut self,
library: &ShaderLibrary,
function_name: S)
-> Result<core::PixelShader<Resources>, core::shade::CreateShaderError> {
self.create_shader_from_library(
core::shade::Stage::Pixel,
library,
function_name)
.map(|s| core::PixelShader::new(s))
}
}
impl core::Factory<Resources> for Factory {
fn get_capabilities(&self) -> &core::Capabilities {
&self.share.capabilities
}
fn create_buffer_raw(&mut self,
info: buffer::Info)
-> Result<handle::RawBuffer<Resources>, buffer::CreationError> {
self.create_buffer_internal(info, None)
}
fn create_buffer_immutable_raw
(&mut self,
data: &[u8],
stride: usize,
role: buffer::Role,
bind: memory::Bind)
-> Result<handle::RawBuffer<Resources>, buffer::CreationError> {
let info = buffer::Info {
role: role,
usage: memory::Usage::Data,
bind: bind,
size: data.len(),
stride: stride,
};
self.create_buffer_internal(info, Some(data.as_ptr() as *const c_void))
}
fn create_shader(&mut self,
stage: core::shade::Stage,
code: &[u8])
-> Result<handle::Shader<Resources>, core::shade::CreateShaderError> {
use core::shade::{CreateShaderError, Stage};
let lib = match stage {
Stage::Vertex | Stage::Pixel => {
let src = str::from_utf8(code).unwrap();
match self.device.new_library_with_source(src, MTLCompileOptions::nil()) {
Ok(lib) => lib,
Err(err) => return Err(CreateShaderError::CompilationFailed(err.into())),
}
}
_ => return Err(CreateShaderError::StageNotSupported(stage)),
};
let shader = Shader {
func: lib.get_function(match stage {
Stage::Vertex => "vert",
Stage::Pixel => "frag",
_ => return Err(CreateShaderError::StageNotSupported(stage)),
}),
};
Ok(self.share.handles.borrow_mut().make_shader(shader))
}
fn create_program(&mut self,
shader_set: &core::ShaderSet<Resources>)
-> Result<handle::Program<Resources>, core::shade::CreateProgramError> {
use core::shade::{ProgramInfo, Stage};
let (prog, info) = match shader_set {
&core::ShaderSet::Simple(ref vs, ref ps) => {
let mut info = ProgramInfo {
vertex_attributes: Vec::new(),
globals: Vec::new(),
constant_buffers: Vec::new(),
textures: Vec::new(),
unordereds: Vec::new(),
samplers: Vec::new(),
outputs: Vec::new(),
output_depth: false,
knows_outputs: false,
};
let fh = &mut self.frame_handles;
let (vs, ps) = (vs.reference(fh).func, ps.reference(fh).func);
let mut reflection = MTLRenderPipelineReflection::nil();
// since Metal doesn't allow for fetching shader reflection
// without creating a PSO, we're creating a "fake" PSO to get
// the reflection, and destroying it afterwards.
//
// Tracking: https://forums.developer.apple.com/thread/46535
let pso_descriptor = MTLRenderPipelineDescriptor::alloc().init();
pso_descriptor.set_vertex_function(vs);
if !ps.is_null() {
pso_descriptor.set_fragment_function(ps);
}
pso_descriptor.color_attachments()
.object_at(0)
.set_pixel_format(MTLPixelFormat::BGRA8Unorm_sRGB);
// We need fake depth attachments in case explicit writes to the depth buffer are required
pso_descriptor.set_depth_attachment_pixel_format(MTLPixelFormat::Depth32Float_Stencil8);
pso_descriptor.set_stencil_attachment_pixel_format(MTLPixelFormat::Depth32Float_Stencil8);
// TODO: prevent collision between dummy buffers and real
// values
let vertex_desc = MTLVertexDescriptor::new();
let buf = vertex_desc.layouts().object_at((MTL_MAX_BUFFER_BINDINGS - 1) as usize);
buf.set_stride(16);
buf.set_step_function(MTLVertexStepFunction::Constant);
buf.set_step_rate(0);
mirror::populate_vertex_attributes(&mut info, vs.vertex_attributes());
for attr in info.vertex_attributes.iter() {
// TODO: handle case when requested vertex format is invalid
let attribute = vertex_desc.attributes().object_at(attr.slot as usize);
attribute.set_format(mirror::map_base_type_to_format(attr.base_type));
attribute.set_offset(0);
attribute.set_buffer_index((MTL_MAX_BUFFER_BINDINGS - 1) as u64);
}
pso_descriptor.set_vertex_descriptor(vertex_desc);
let _pso = self.device
.new_render_pipeline_state_with_reflection(pso_descriptor, &mut reflection)
.unwrap();
// fill the `ProgramInfo` struct with goodies
mirror::populate_info(&mut info, Stage::Vertex, reflection.vertex_arguments());
mirror::populate_info(&mut info, Stage::Pixel, reflection.fragment_arguments());
// destroy PSO & reflection object after we're done with
// parsing reflection
// unsafe {
// pso.release();
// reflection.release();
// }
// FIXME: retain functions?
let program = Program { vs: vs, ps: ps };
(program, info)
}
// Metal only supports vertex + fragment and has some features from
// geometry shaders in vertex (layered rendering)
//
// Tracking: https://forums.developer.apple.com/message/9495
_ => {
return Err("Metal only supports vertex + fragment shader programs".into());
}
};
Ok(self.share.handles.borrow_mut().make_program(prog, info))
}
fn create_pipeline_state_raw(&mut self, program: &handle::Program<Resources>, desc: &core::pso::Descriptor)
-> Result<handle::RawPipelineState<Resources>, core::pso::CreationError> {
use map::{map_depth_surface, map_vertex_format, map_topology,
map_winding, map_cull, map_fill, map_format, map_blend_op,
map_blend_factor, map_write_mask};
use core::{MAX_COLOR_TARGETS};
let vertex_desc = MTLVertexDescriptor::new();
let mut vb_count = 0;
for vb in desc.vertex_buffers.iter() {
if let &Some(vbuf) = vb {
let buf = vertex_desc.layouts().object_at((MTL_MAX_BUFFER_BINDINGS - 1) as usize - vb_count);
buf.set_stride(vbuf.stride as u64);
if vbuf.rate > 0 {
buf.set_step_function(MTLVertexStepFunction::PerInstance);
buf.set_step_rate(vbuf.rate as u64);
} else {
buf.set_step_function(MTLVertexStepFunction::PerVertex);
buf.set_step_rate(1);
}
vb_count += 1;
}
}
// TODO: find a better way to set the buffer's stride, step func and
// step rate
for (attr, attr_desc) in program.get_info().vertex_attributes.iter().zip(desc.attributes.iter()) {
let (idx, elem) = match attr_desc {
&Some((idx, el)) => (idx, el),
&None => continue,
};
if elem.offset & 1 != 0 {
error!("Vertex attribute {} must be aligned to 2 bytes, has offset {}",
attr.name,
elem.offset);
return Err(core::pso::CreationError);
}
// TODO: handle case when requested vertex format is invalid
let attribute = vertex_desc.attributes().object_at(attr.slot as usize);
attribute.set_format(map_vertex_format(elem.format).unwrap());
attribute.set_offset(elem.offset as u64);
attribute.set_buffer_index((MTL_MAX_BUFFER_BINDINGS - 1) as u64 - idx as u64);
}
let prog = self.frame_handles.ref_program(program);
let pso_descriptor = MTLRenderPipelineDescriptor::alloc().init();
pso_descriptor.set_vertex_function(prog.vs);
if !prog.ps.is_null() {
pso_descriptor.set_fragment_function(prog.ps);
}
pso_descriptor.set_vertex_descriptor(vertex_desc);
pso_descriptor.set_input_primitive_topology(map_topology(desc.primitive));
for idx in 0..MAX_COLOR_TARGETS {
if let Some(color) = desc.color_targets[idx] {
let attachment = pso_descriptor.color_attachments().object_at(idx);
attachment.set_pixel_format(map_format(color.0, true).unwrap());
attachment.set_blending_enabled(color.1.color.is_some() || color.1.alpha.is_some());
attachment.set_write_mask(map_write_mask(color.1.mask));
if let Some(blend) = color.1.color {
attachment.set_source_rgb_blend_factor(map_blend_factor(blend.source, false));
attachment.set_destination_rgb_blend_factor(map_blend_factor(blend.destination, false));
attachment.set_rgb_blend_operation(map_blend_op(blend.equation));
}
if let Some(blend) = color.1.alpha {
attachment.set_source_alpha_blend_factor(map_blend_factor(blend.source, true));
attachment.set_destination_alpha_blend_factor(map_blend_factor(blend.destination, true));
attachment.set_alpha_blend_operation(map_blend_op(blend.equation));
}
}
}
if let Some(depth_desc) = desc.depth_stencil {
let (depth_pixel_format, has_stencil) = map_depth_surface((depth_desc.0).0).expect("Unsupported depth format");
pso_descriptor.set_depth_attachment_pixel_format(depth_pixel_format);
if has_stencil {
pso_descriptor.set_stencil_attachment_pixel_format(depth_pixel_format);
}
}
let pso = self.device.new_render_pipeline_state(pso_descriptor).unwrap();
let pso = Pipeline {
pipeline: pso,
depth_stencil: desc.depth_stencil.map(|desc| self.make_depth_stencil(&desc.1)),
winding: map_winding(desc.rasterizer.front_face),
cull: map_cull(desc.rasterizer.cull_face),
fill: map_fill(desc.rasterizer.method),
alpha_to_one: false,
alpha_to_coverage: false,
depth_bias: if let Some(ref offset) = desc.rasterizer.offset {
offset.1
} else {
0
},
slope_scaled_depth_bias: if let Some(ref offset) = desc.rasterizer.offset {
offset.0
} else {
0
},
depth_clip: true
};
Ok(self.share.handles.borrow_mut().make_pso(pso, program))
}
fn create_texture_raw
(&mut self,
desc: core::texture::Info,
hint: Option<core::format::ChannelType>,
data_opt: Option<&[&[u8]]>)
-> Result<handle::RawTexture<Resources>, core::texture::CreationError> {
use core::texture::{AaMode, Kind};
use map::{map_channel_hint, map_texture_bind, map_texture_usage, map_format};
let (resource, storage) = map_texture_usage(desc.usage, desc.bind);
let descriptor = MTLTextureDescriptor::alloc().init();
descriptor.set_mipmap_level_count(desc.levels as u64);
descriptor.set_resource_options(resource);
descriptor.set_storage_mode(storage);
descriptor.set_pixel_format(map_format(core::format::Format(desc.format, hint.unwrap_or(map_channel_hint(desc.format).unwrap())), true).unwrap());
descriptor.set_usage(map_texture_bind(desc.bind));
match desc.kind {
Kind::D1(w) => {
descriptor.set_width(w as u64);
descriptor.set_texture_type(MTLTextureType::D1);
}
Kind::D1Array(w, d) => {
descriptor.set_width(w as u64);
descriptor.set_array_length(d as u64);
descriptor.set_texture_type(MTLTextureType::D1Array);
}
Kind::D2(w, h, aa) => {
descriptor.set_width(w as u64);
descriptor.set_height(h as u64);
match aa {
AaMode::Single => {
descriptor.set_texture_type(MTLTextureType::D2);
}
AaMode::Multi(samples) => {
descriptor.set_texture_type(MTLTextureType::D2Multisample);
descriptor.set_sample_count(samples as u64);
}
_ => unimplemented!(),
};
}
Kind::D2Array(w, h, d, _aa) => {
descriptor.set_width(w as u64);
descriptor.set_height(h as u64);
descriptor.set_array_length(d as u64);
descriptor.set_texture_type(MTLTextureType::D2Array);
}
Kind::D3(w, h, d) => {
descriptor.set_width(w as u64);
descriptor.set_height(h as u64);
descriptor.set_depth(d as u64);
descriptor.set_texture_type(MTLTextureType::D3);
}
Kind::Cube(w) => {
descriptor.set_width(w as u64);
descriptor.set_texture_type(MTLTextureType::Cube);
}
Kind::CubeArray(w, d) => {
descriptor.set_width(w as u64);
descriptor.set_array_length(d as u64);
descriptor.set_texture_type(MTLTextureType::CubeArray);
}
};
let raw_tex = self.device.new_texture(descriptor);
if let Some(data) = data_opt {
let region = match desc.kind {
Kind::D1(w) => {
MTLRegion {
origin: MTLOrigin { x: 0, y: 0, z: 0 },
size: MTLSize {
width: w as u64,
height: 1,
depth: 1,
},
}
}
Kind::D1Array(w, d) => {
MTLRegion {
origin: MTLOrigin { x: 0, y: 0, z: 0 },
size: MTLSize {
width: w as u64,
height: 1,
depth: d as u64,
},
}
}
Kind::D2(w, h, _) => {
MTLRegion {
origin: MTLOrigin { x: 0, y: 0, z: 0 },
size: MTLSize {
width: w as u64,
height: h as u64,
depth: 1,
},
}
}
Kind::D2Array(w, h, d, _) => {
MTLRegion {
origin: MTLOrigin { x: 0, y: 0, z: 0 },
size: MTLSize {
width: w as u64,
height: h as u64,
depth: d as u64,
},
}
}
Kind::D3(w, h, d) => {
MTLRegion {
origin: MTLOrigin { x: 0, y: 0, z: 0 },
size: MTLSize {
width: w as u64,
height: h as u64,
depth: d as u64,
},
}
}
_ => unimplemented!(),
};
// TODO: handle the data better
raw_tex.replace_region(region,
0,
4 * region.size.width,
data[0].as_ptr() as *const _);
}
let tex = Texture(native::Texture(Box::into_raw(Box::new(raw_tex))),
desc.usage);
Ok(self.share.handles.borrow_mut().make_texture(tex, desc))
}
fn view_buffer_as_shader_resource_raw
(&mut self,
_hbuf: &handle::RawBuffer<Resources>)
-> Result<handle::RawShaderResourceView<Resources>, factory::ResourceViewError> {
unimplemented!()
// Err(factory::ResourceViewError::Unsupported) //TODO
}
fn view_buffer_as_unordered_access_raw
(&mut self,
_hbuf: &handle::RawBuffer<Resources>)
-> Result<handle::RawUnorderedAccessView<Resources>, factory::ResourceViewError> {
unimplemented!()
// Err(factory::ResourceViewError::Unsupported) //TODO
}
fn view_texture_as_shader_resource_raw
(&mut self,
htex: &handle::RawTexture<Resources>,
_desc: core::texture::ResourceDesc)
-> Result<handle::RawShaderResourceView<Resources>, factory::ResourceViewError> {
// use winapi::UINT;
// use core::texture::{AaMode, Kind};
// use data::map_format;
//
// let (dim, layers, has_levels) = match htex.get_info().kind {
// Kind::D1(_) =>
// (winapi::D3D11_SRV_DIMENSION_TEXTURE1D, 1, true),
// Kind::D1Array(_, d) =>
// (winapi::D3D11_SRV_DIMENSION_TEXTURE1DARRAY, d, true),
// Kind::D2(_, _, AaMode::Single) =>
// (winapi::D3D11_SRV_DIMENSION_TEXTURE2D, 1, true),
// Kind::D2(_, _, _) =>
// (winapi::D3D11_SRV_DIMENSION_TEXTURE2DMS, 1, false),
// Kind::D2Array(_, _, d, AaMode::Single) =>
// (winapi::D3D11_SRV_DIMENSION_TEXTURE2DARRAY, d, true),
// Kind::D2Array(_, _, d, _) =>
// (winapi::D3D11_SRV_DIMENSION_TEXTURE2DMSARRAY, d, false),
// Kind::D3(_, _, _) =>
// (winapi::D3D11_SRV_DIMENSION_TEXTURE3D, 1, true),
// Kind::Cube(_) =>
// (winapi::D3D11_SRV_DIMENSION_TEXTURECUBE, 1, true),
// Kind::CubeArray(_, d) =>
// (winapi::D3D11_SRV_DIMENSION_TEXTURECUBEARRAY, d, true),
// };
//
// let format = core::format::Format(htex.get_info().format, desc.channel);
// let native_desc = winapi::D3D11_SHADER_RESOURCE_VIEW_DESC {
// Format: match map_format(format, false) {
// Some(fm) => fm,
// None => return Err(f::ResourceViewError::Channel(desc.channel)),
// },
// ViewDimension: dim,
// u: if has_levels {
// assert!(desc.max >= desc.min);
// [desc.min as UINT, (desc.max + 1 - desc.min) as UINT, 0, layers as UINT]
// }else {
// [0, layers as UINT, 0, 0]
// },
// };
//
// let mut raw_view = ptr::null_mut();
// let raw_tex = self.frame_handles.ref_texture(htex).as_resource();
// let hr = unsafe {
// (*self.device).CreateShaderResourceView(raw_tex, &native_desc, &mut raw_view)
// };
// if !winapi::SUCCEEDED(hr) {
// error!("Failed to create SRV from {:#?}, error {:x}", native_desc, hr);
// return Err(f::ResourceViewError::Unsupported);
// }
// Ok(self.share.handles.borrow_mut().make_texture_srv(native::Srv(raw_view), htex))
let raw_tex = self.frame_handles.ref_texture(htex).0;
Ok(self.share.handles.borrow_mut().make_texture_srv(native::Srv(raw_tex.0), htex))
}
fn view_texture_as_unordered_access_raw
(&mut self,
_htex: &handle::RawTexture<Resources>)
-> Result<handle::RawUnorderedAccessView<Resources>, factory::ResourceViewError> {
// Err(factory::ResourceViewError::Unsupported) //TODO
unimplemented!()
}
fn view_texture_as_render_target_raw
(&mut self,
htex: &handle::RawTexture<Resources>,
desc: core::texture::RenderDesc)
-> Result<handle::RawRenderTargetView<Resources>, factory::TargetViewError> {
let raw_tex = self.frame_handles.ref_texture(htex).0;
let size = htex.get_info().kind.get_level_dimensions(desc.level);
Ok(self.share.handles.borrow_mut().make_rtv(native::Rtv(raw_tex.0), htex, size))
}
fn view_texture_as_depth_stencil_raw
(&mut self,
htex: &handle::RawTexture<Resources>,
desc: core::texture::DepthStencilDesc)
-> Result<handle::RawDepthStencilView<Resources>, factory::TargetViewError> {
// use winapi::UINT;
// use core::texture::{AaMode, Kind};
// use data::{map_format, map_dsv_flags};
//
// let level = desc.level as UINT;
// let (dim, extra) = match (htex.get_info().kind, desc.layer) {
// (Kind::D1(..), None) =>
// (winapi::D3D11_DSV_DIMENSION_TEXTURE1D, [level, 0, 0]),
// (Kind::D1Array(_, nlayers), Some(lid)) if lid < nlayers =>
// (winapi::D3D11_DSV_DIMENSION_TEXTURE1DARRAY, [level, lid as UINT, 1+lid as UINT]),
// (Kind::D1Array(_, nlayers), None) =>
// (winapi::D3D11_DSV_DIMENSION_TEXTURE1DARRAY, [level, 0, nlayers as UINT]),
// (Kind::D2(_, _, AaMode::Single), None) =>
// (winapi::D3D11_DSV_DIMENSION_TEXTURE2D, [level, 0, 0]),
// (Kind::D2(_, _, _), None) if level == 0 =>
// (winapi::D3D11_DSV_DIMENSION_TEXTURE2DMS, [0, 0, 0]),
// (Kind::D2Array(_, _, nlayers, AaMode::Single), None) =>
// (winapi::D3D11_DSV_DIMENSION_TEXTURE2DARRAY, [level, 0, nlayers as UINT]),
// (Kind::D2Array(_, _, nlayers, AaMode::Single), Some(lid)) if lid < nlayers =>
// (winapi::D3D11_DSV_DIMENSION_TEXTURE2DARRAY, [level, lid as UINT, 1+lid as UINT]),
// (Kind::D2Array(_, _, nlayers, _), None) if level == 0 =>
// (winapi::D3D11_DSV_DIMENSION_TEXTURE2DMSARRAY, [0, nlayers as UINT, 0]),
// (Kind::D2Array(_, _, nlayers, _), Some(lid)) if level == 0 && lid < nlayers =>
// (winapi::D3D11_DSV_DIMENSION_TEXTURE2DMSARRAY, [lid as UINT, 1+lid as UINT, 0]),
// (Kind::D3(..), _) => return Err(f::TargetViewError::Unsupported),
// (Kind::Cube(..), None) =>
// (winapi::D3D11_DSV_DIMENSION_TEXTURE2DARRAY, [level, 0, 6]),
// (Kind::Cube(..), Some(lid)) if lid < 6 =>
// (winapi::D3D11_DSV_DIMENSION_TEXTURE2DARRAY, [level, lid as UINT, 1+lid as UINT]),
// (Kind::CubeArray(_, nlayers), None) =>
// (winapi::D3D11_DSV_DIMENSION_TEXTURE2DARRAY, [level, 0, 6 * nlayers as UINT]),
// (Kind::CubeArray(_, nlayers), Some(lid)) if lid < nlayers =>
// (winapi::D3D11_DSV_DIMENSION_TEXTURE2DARRAY, [level, 6 * lid as UINT, 6 * (1+lid) as UINT]),
// (_, None) => return Err(f::TargetViewError::BadLevel(desc.level)),
// (_, Some(lid)) => return Err(f::TargetViewError::BadLayer(lid)),
// };
//
// let channel = core::format::ChannelType::Uint; //doesn't matter
// let format = core::format::Format(htex.get_info().format, channel);
// let native_desc = winapi::D3D11_DEPTH_STENCIL_VIEW_DESC {
// Format: match map_format(format, true) {
// Some(fm) => fm,
// None => return Err(f::TargetViewError::Channel(channel)),
// },
// ViewDimension: dim,
// Flags: map_dsv_flags(desc.flags).0,
// u: extra,
// };
//
// let mut raw_view = ptr::null_mut();
// let raw_tex = self.frame_handles.ref_texture(htex).as_resource();
// let hr = unsafe {
// (*self.device).CreateDepthStencilView(raw_tex, &native_desc, &mut raw_view)
// };
// if !winapi::SUCCEEDED(hr) {
// error!("Failed to create DSV from {:#?}, error {:x}", native_desc, hr);
// return Err(f::TargetViewError::Unsupported);
// }
// let dim = htex.get_info().kind.get_level_dimensions(desc.level);
// Ok(self.share.handles.borrow_mut().make_dsv(native::Dsv(raw_view), htex, dim))
let raw_tex = self.frame_handles.ref_texture(htex).0;
let size = htex.get_info().kind.get_level_dimensions(desc.level);
Ok(self.share.handles.borrow_mut().make_dsv(native::Dsv(raw_tex.0, desc.layer), htex, size))
}
fn create_sampler(&mut self, info: core::texture::SamplerInfo) -> handle::Sampler<Resources> {
use core::texture::FilterMethod;
use map::{map_function, map_filter, map_wrap};
let desc = MTLSamplerDescriptor::new();
let (filter, mip) = map_filter(info.filter);
desc.set_min_filter(filter);
desc.set_mag_filter(filter);
desc.set_mip_filter(mip);
if let FilterMethod::Anisotropic(anisotropy) = info.filter {
desc.set_max_anisotropy(anisotropy as u64);
}
desc.set_lod_bias(info.lod_bias.into());
desc.set_lod_min_clamp(info.lod_range.0.into());
desc.set_lod_max_clamp(info.lod_range.1.into());
desc.set_address_mode_s(map_wrap(info.wrap_mode.0));
desc.set_address_mode_t(map_wrap(info.wrap_mode.1));
desc.set_address_mode_r(map_wrap(info.wrap_mode.2));
desc.set_compare_function(map_function(info.comparison.unwrap_or(
core::state::Comparison::Always)));
let sampler = self.device.new_sampler(desc);
self.share.handles.borrow_mut().make_sampler(native::Sampler(sampler), info)
}
fn read_mapping<'a, 'b, T>(&'a mut self, buf: &'b handle::Buffer<Resources, T>)
-> Result<mapping::Reader<'b, Resources, T>,
mapping::Error>
where T: Copy
{
unimplemented!()
}
fn write_mapping<'a, 'b, T>(&'a mut self, buf: &'b handle::Buffer<Resources, T>)
-> Result<mapping::Writer<'b, Resources, T>,
mapping::Error>
where T: Copy
{
unimplemented!()
}
}
|
create_shader_pixel_from_library
|
grafeas.get_occurrence_note.js
|
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
'use strict';
function
|
(name) {
// [START containeranalysis_v1_generated_Grafeas_GetOccurrenceNote_async]
/**
* TODO(developer): Uncomment these variables before running the sample.
*/
/**
* The name of the occurrence in the form of
* `projects/[PROJECT_ID]/occurrences/[OCCURRENCE_ID]`.
*/
// const name = 'abc123'
// Imports the Grafeas library
const {GrafeasClient} = require('@google-cloud/grafeas').v1;
// Instantiates a client
const grafeasClient = new GrafeasClient();
async function getOccurrenceNote() {
// Construct request
const request = {
name,
};
// Run request
const response = await grafeasClient.getOccurrenceNote(request);
console.log(response);
}
getOccurrenceNote();
// [END containeranalysis_v1_generated_Grafeas_GetOccurrenceNote_async]
}
process.on('unhandledRejection', err => {
console.error(err.message);
process.exitCode = 1;
});
main(...process.argv.slice(2));
|
main
|
__main__.py
|
import argparse
import sys
import typing
from . import __version__
from . import advanced_repr
from . import archiver
from . import stream
def make_subcommand_parser(subs: typing.Any, name: str, *, help: str, description: str, **kwargs: typing.Any) -> argparse.ArgumentParser:
"""Add a subcommand parser with some slightly modified defaults to a subcommand set.
This function is used to ensure that all subcommands use the same base configuration for their ArgumentParser.
"""
ap = subs.add_parser(
name,
formatter_class=argparse.RawDescriptionHelpFormatter,
help=help,
description=description,
allow_abbrev=False,
add_help=False,
**kwargs,
)
ap.add_argument("--help", action="help", help="Display this help message and exit.")
return ap
def open_typedstream_file(file: str) -> stream.TypedStreamReader:
if file == "-":
return stream.TypedStreamReader(sys.stdin.buffer)
else:
return stream.TypedStreamReader.open(file)
def dump_typedstream(ts: stream.TypedStreamReader) -> typing.Iterable[str]:
yield f"streamer version {ts.streamer_version}, byte order {ts.byte_order}, system version {ts.system_version}"
yield ""
indent = 0
next_object_number = 0
for event in ts:
if isinstance(event, (stream.EndTypedValues, stream.EndObject, stream.EndArray, stream.EndStruct)):
indent -= 1
rep = ("\t" * indent) + str(event)
if isinstance(event, (stream.CString, stream.SingleClass, stream.BeginObject)):
rep += f" (#{next_object_number})"
next_object_number += 1
yield rep
if isinstance(event, (stream.BeginTypedValues, stream.BeginObject, stream.BeginArray, stream.BeginStruct)):
indent += 1
def do_read(ns: argparse.Namespace) -> typing.NoReturn:
with open_typedstream_file(ns.file) as ts:
for line in dump_typedstream(ts):
print(line)
sys.exit(0)
def dump_decoded_typedstream(ts: stream.TypedStreamReader) -> typing.Iterable[str]:
unarchiver = archiver.Unarchiver(ts)
for obj in unarchiver.decode_all():
yield from advanced_repr.as_multiline_string(obj)
def do_decode(ns: argparse.Namespace) -> typing.NoReturn:
with open_typedstream_file(ns.file) as ts:
for line in dump_decoded_typedstream(ts):
print(line)
sys.exit(0)
def main() -> typing.NoReturn:
"""Main function of the CLI.
This function is a valid setuptools entry point.
Arguments are passed in sys.argv,
and every execution path ends with a sys.exit call.
(setuptools entry points are also permitted to return an integer,
which will be treated as an exit code.
We do not use this feature and instead always call sys.exit ourselves.)
"""
ap = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""
%(prog)s is a tool for dumping typedstream files, which are produced by
the NSArchiver class in Apple's Foundation framework, as well as the
NXTypedStream APIs in the older NeXTSTEP OS.
""",
allow_abbrev=False,
add_help=False,
)
ap.add_argument("--help", action="help", help="Display this help message and exit.")
ap.add_argument("--version", action="version", version=__version__, help="Display version information and exit.")
subs = ap.add_subparsers(
dest="subcommand",
metavar="SUBCOMMAND",
)
sub_read = make_subcommand_parser(
subs,
"read",
help="Read and display the raw contents of a typedstream.",
description="""
Read and display the raw contents of a typedstream.
All information is displayed as it's stored in the typedstream and is processed
as little as possible. In particular, object references are not resolved
(although each object's reference number is displayed, so that the references
can be followed manually), and objects aren't handled differently based on
their class.
""",
)
sub_read.add_argument("file", help="The typedstream file to read, or - for stdin.")
sub_decode = make_subcommand_parser(
subs,
|
Read, decode and display the contents of a typedstream.
Where possible, the data read from the typedstream is decoded into a
higher-level structure before being displayed. Objects are decoded based on
their class when their format is known and implemented. Objects of unknown
classes are also supported, but are decoded to a generic format based on the
typedstream data.
As a result of this decoding, some low-level information from the typedstream
is discarded and not displayed, such as raw type encoding strings in known
classes, and object reference numbers. To see this low-level information,
use the read subcommand instead.
""",
)
sub_decode.add_argument("file", help="The typedstream file to read, or - for stdin.")
ns = ap.parse_args()
if ns.subcommand is None:
print("Missing subcommand", file=sys.stderr)
sys.exit(2)
elif ns.subcommand == "read":
do_read(ns)
elif ns.subcommand == "decode":
do_decode(ns)
else:
print(f"Unknown subcommand: {ns.subcommand!r}", file=sys.stderr)
sys.exit(2)
if __name__ == "__main__":
sys.exit(main())
|
"decode",
help="Read, decode and display the contents of a typedstream.",
description="""
|
bootstrap.js
|
/*!
* Bootstrap v3.3.7 (http://getbootstrap.com)
* Copyright 2011-2016 Twitter, Inc.
* Licensed under the MIT license
*/
if("undefined"==typeof jQuery)throw new Error("Bootstrap's JavaScript requires jQuery");+function(a){"use strict";var b=a.fn.jquery.split(" ")[0].split(".");if(b[0]<2&&b[1]<9||1==b[0]&&9==b[1]&&b[2]<1||b[0]>3)throw new Error("Bootstrap's JavaScript requires jQuery version 1.9.1 or higher, but lower than version 4")}(jQuery),+function(a){"use strict";function b(){var a=document.createElement("bootstrap"),b={WebkitTransition:"webkitTransitionEnd",MozTransition:"transitionend",OTransition:"oTransitionEnd otransitionend",transition:"transitionend"};for(var c in b)if(void 0!==a.style[c])return{end:b[c]};return!1}a.fn.emulateTransitionEnd=function(b){var c=!1,d=this;a(this).one("bsTransitionEnd",function(){c=!0});var e=function(){c||a(d).trigger(a.support.transition.end)};return setTimeout(e,b),this},a(function(){a.support.transition=b(),a.support.transition&&(a.event.special.bsTransitionEnd={bindType:a.support.transition.end,delegateType:a.support.transition.end,handle:function(b){if(a(b.target).is(this))return b.handleObj.handler.apply(this,arguments)}})})}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var c=a(this),e=c.data("bs.alert");e||c.data("bs.alert",e=new d(this)),"string"==typeof b&&e[b].call(c)})}var c='[data-dismiss="alert"]',d=function(b){a(b).on("click",c,this.close)};d.VERSION="3.3.7",d.TRANSITION_DURATION=150,d.prototype.close=function(b){function c(){g.detach().trigger("closed.bs.alert").remove()}var e=a(this),f=e.attr("data-target");f||(f=e.attr("href"),f=f&&f.replace(/.*(?=#[^\s]*$)/,""));var g=a("#"===f?[]:f);b&&b.preventDefault(),g.length||(g=e.closest(".alert")),g.trigger(b=a.Event("close.bs.alert")),b.isDefaultPrevented()||(g.removeClass("in"),a.support.transition&&g.hasClass("fade")?g.one("bsTransitionEnd",c).emulateTransitionEnd(d.TRANSITION_DURATION):c())};var e=a.fn.alert;a.fn.alert=b,a.fn.alert.Constructor=d,a.fn.alert.noConflict=function(){return a.fn.alert=e,this},a(document).on("click.bs.alert.data-api",c,d.prototype.close)}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.button"),f="object"==typeof b&&b;e||d.data("bs.button",e=new c(this,f)),"toggle"==b?e.toggle():b&&e.setState(b)})}var c=function(b,d){this.$element=a(b),this.options=a.extend({},c.DEFAULTS,d),this.isLoading=!1};c.VERSION="3.3.7",c.DEFAULTS={loadingText:"loading..."},c.prototype.setState=function(b){var c="disabled",d=this.$element,e=d.is("input")?"val":"html",f=d.data();b+="Text",null==f.resetText&&d.data("resetText",d[e]()),setTimeout(a.proxy(function(){d[e](null==f[b]?this.options[b]:f[b]),"loadingText"==b?(this.isLoading=!0,d.addClass(c).attr(c,c).prop(c,!0)):this.isLoading&&(this.isLoading=!1,d.removeClass(c).removeAttr(c).prop(c,!1))},this),0)},c.prototype.toggle=function(){var a=!0,b=this.$element.closest('[data-toggle="buttons"]');if(b.length){var c=this.$element.find("input");"radio"==c.prop("type")?(c.prop("checked")&&(a=!1),b.find(".active").removeClass("active"),this.$element.addClass("active")):"checkbox"==c.prop("type")&&(c.prop("checked")!==this.$element.hasClass("active")&&(a=!1),this.$element.toggleClass("active")),c.prop("checked",this.$element.hasClass("active")),a&&c.trigger("change")}else this.$element.attr("aria-pressed",!this.$element.hasClass("active")),this.$element.toggleClass("active")};var d=a.fn.button;a.fn.button=b,a.fn.button.Constructor=c,a.fn.button.noConflict=function(){return a.fn.button=d,this},a(document).on("click.bs.button.data-api",'[data-toggle^="button"]',function(c){var d=a(c.target).closest(".btn");b.call(d,"toggle"),a(c.target).is('input[type="radio"], input[type="checkbox"]')||(c.preventDefault(),d.is("input,button")?d.trigger("focus"):d.find("input:visible,button:visible").first().trigger("focus"))}).on("focus.bs.button.data-api blur.bs.button.data-api",'[data-toggle^="button"]',function(b){a(b.target).closest(".btn").toggleClass("focus",/^focus(in)?$/.test(b.type))})}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.carousel"),f=a.extend({},c.DEFAULTS,d.data(),"object"==typeof b&&b),g="string"==typeof b?b:f.slide;e||d.data("bs.carousel",e=new c(this,f)),"number"==typeof b?e.to(b):g?e[g]():f.interval&&e.pause().cycle()})}var c=function(b,c){this.$element=a(b),this.$indicators=this.$element.find(".carousel-indicators"),this.options=c,this.paused=null,this.sliding=null,this.interval=null,this.$active=null,this.$items=null,this.options.keyboard&&this.$element.on("keydown.bs.carousel",a.proxy(this.keydown,this)),"hover"==this.options.pause&&!("ontouchstart"in document.documentElement)&&this.$element.on("mouseenter.bs.carousel",a.proxy(this.pause,this)).on("mouseleave.bs.carousel",a.proxy(this.cycle,this))};c.VERSION="3.3.7",c.TRANSITION_DURATION=600,c.DEFAULTS={interval:5e3,pause:"hover",wrap:!0,keyboard:!0},c.prototype.keydown=function(a){if(!/input|textarea/i.test(a.target.tagName)){switch(a.which){case 37:this.prev();break;case 39:this.next();break;default:return}a.preventDefault()}},c.prototype.cycle=function(b){return b||(this.paused=!1),this.interval&&clearInterval(this.interval),this.options.interval&&!this.paused&&(this.interval=setInterval(a.proxy(this.next,this),this.options.interval)),this},c.prototype.getItemIndex=function(a){return this.$items=a.parent().children(".item"),this.$items.index(a||this.$active)},c.prototype.getItemForDirection=function(a,b){var c=this.getItemIndex(b),d="prev"==a&&0===c||"next"==a&&c==this.$items.length-1;if(d&&!this.options.wrap)return b;var e="prev"==a?-1:1,f=(c+e)%this.$items.length;return this.$items.eq(f)},c.prototype.to=function(a){var b=this,c=this.getItemIndex(this.$active=this.$element.find(".item.active"));if(!(a>this.$items.length-1||a<0))return this.sliding?this.$element.one("slid.bs.carousel",function(){b.to(a)}):c==a?this.pause().cycle():this.slide(a>c?"next":"prev",this.$items.eq(a))},c.prototype.pause=function(b){return b||(this.paused=!0),this.$element.find(".next, .prev").length&&a.support.transition&&(this.$element.trigger(a.support.transition.end),this.cycle(!0)),this.interval=clearInterval(this.interval),this},c.prototype.next=function(){if(!this.sliding)return this.slide("next")},c.prototype.prev=function(){if(!this.sliding)return this.slide("prev")},c.prototype.slide=function(b,d){var e=this.$element.find(".item.active"),f=d||this.getItemForDirection(b,e),g=this.interval,h="next"==b?"left":"right",i=this;if(f.hasClass("active"))return this.sliding=!1;var j=f[0],k=a.Event("slide.bs.carousel",{relatedTarget:j,direction:h});if(this.$element.trigger(k),!k.isDefaultPrevented()){if(this.sliding=!0,g&&this.pause(),this.$indicators.length){this.$indicators.find(".active").removeClass("active");var l=a(this.$indicators.children()[this.getItemIndex(f)]);l&&l.addClass("active")}var m=a.Event("slid.bs.carousel",{relatedTarget:j,direction:h});return a.support.transition&&this.$element.hasClass("slide")?(f.addClass(b),f[0].offsetWidth,e.addClass(h),f.addClass(h),e.one("bsTransitionEnd",function(){f.removeClass([b,h].join(" ")).addClass("active"),e.removeClass(["active",h].join(" ")),i.sliding=!1,setTimeout(function(){i.$element.trigger(m)},0)}).emulateTransitionEnd(c.TRANSITION_DURATION)):(e.removeClass("active"),f.addClass("active"),this.sliding=!1,this.$element.trigger(m)),g&&this.cycle(),this}};var d=a.fn.carousel;a.fn.carousel=b,a.fn.carousel.Constructor=c,a.fn.carousel.noConflict=function(){return a.fn.carousel=d,this};var e=function(c){var d,e=a(this),f=a(e.attr("data-target")||(d=e.attr("href"))&&d.replace(/.*(?=#[^\s]+$)/,""));if(f.hasClass("carousel")){var g=a.extend({},f.data(),e.data()),h=e.attr("data-slide-to");h&&(g.interval=!1),b.call(f,g),h&&f.data("bs.carousel").to(h),c.preventDefault()}};a(document).on("click.bs.carousel.data-api","[data-slide]",e).on("click.bs.carousel.data-api","[data-slide-to]",e),a(window).on("load",function(){a('[data-ride="carousel"]').each(function(){var c=a(this);b.call(c,c.data())})})}(jQuery),+function(a){"use strict";function b(b){var c,d=b.attr("data-target")||(c=b.attr("href"))&&c.replace(/.*(?=#[^\s]+$)/,"");return a(d)}function c(b){return this.each(function(){var c=a(this),e=c.data("bs.collapse"),f=a.extend({},d.DEFAULTS,c.data(),"object"==typeof b&&b);!e&&f.toggle&&/show|hide/.test(b)&&(f.toggle=!1),e||c.data("bs.collapse",e=new d(this,f)),"string"==typeof b&&e[b]()})}var d=function(b,c){this.$element=a(b),this.options=a.extend({},d.DEFAULTS,c),this.$trigger=a('[data-toggle="collapse"][href="#'+b.id+'"],[data-toggle="collapse"][data-target="#'+b.id+'"]'),this.transitioning=null,this.options.parent?this.$parent=this.getParent():this.addAriaAndCollapsedClass(this.$element,this.$trigger),this.options.toggle&&this.toggle()};d.VERSION="3.3.7",d.TRANSITION_DURATION=350,d.DEFAULTS={toggle:!0},d.prototype.dimension=function(){var a=this.$element.hasClass("width");return a?"width":"height"},d.prototype.show=function(){if(!this.transitioning&&!this.$element.hasClass("in")){var b,e=this.$parent&&this.$parent.children(".panel").children(".in, .collapsing");if(!(e&&e.length&&(b=e.data("bs.collapse"),b&&b.transitioning))){var f=a.Event("show.bs.collapse");if(this.$element.trigger(f),!f.isDefaultPrevented()){e&&e.length&&(c.call(e,"hide"),b||e.data("bs.collapse",null));var g=this.dimension();this.$element.removeClass("collapse").addClass("collapsing")[g](0).attr("aria-expanded",!0),this.$trigger.removeClass("collapsed").attr("aria-expanded",!0),this.transitioning=1;var h=function(){this.$element.removeClass("collapsing").addClass("collapse in")[g](""),this.transitioning=0,this.$element.trigger("shown.bs.collapse")};if(!a.support.transition)return h.call(this);var i=a.camelCase(["scroll",g].join("-"));this.$element.one("bsTransitionEnd",a.proxy(h,this)).emulateTransitionEnd(d.TRANSITION_DURATION)[g](this.$element[0][i])}}}},d.prototype.hide=function(){if(!this.transitioning&&this.$element.hasClass("in")){var b=a.Event("hide.bs.collapse");if(this.$element.trigger(b),!b.isDefaultPrevented()){var c=this.dimension();this.$element[c](this.$element[c]())[0].offsetHeight,this.$element.addClass("collapsing").removeClass("collapse in").attr("aria-expanded",!1),this.$trigger.addClass("collapsed").attr("aria-expanded",!1),this.transitioning=1;var e=function(){this.transitioning=0,this.$element.removeClass("collapsing").addClass("collapse").trigger("hidden.bs.collapse")};return a.support.transition?void this.$element[c](0).one("bsTransitionEnd",a.proxy(e,this)).emulateTransitionEnd(d.TRANSITION_DURATION):e.call(this)}}},d.prototype.toggle=function(){this[this.$element.hasClass("in")?"hide":"show"]()},d.prototype.getParent=function(){return a(this.options.parent).find('[data-toggle="collapse"][data-parent="'+this.options.parent+'"]').each(a.proxy(function(c,d){var e=a(d);this.addAriaAndCollapsedClass(b(e),e)},this)).end()},d.prototype.addAriaAndCollapsedClass=function(a,b){var c=a.hasClass("in");a.attr("aria-expanded",c),b.toggleClass("collapsed",!c).attr("aria-expanded",c)};var e=a.fn.collapse;a.fn.collapse=c,a.fn.collapse.Constructor=d,a.fn.collapse.noConflict=function(){return a.fn.collapse=e,this},a(document).on("click.bs.collapse.data-api",'[data-toggle="collapse"]',function(d){var e=a(this);e.attr("data-target")||d.preventDefault();var f=b(e),g=f.data("bs.collapse"),h=g?"toggle":e.data();c.call(f,h)})}(jQuery),+function(a){"use strict";function b(b){var c=b.attr("data-target");c||(c=b.attr("href"),c=c&&/#[A-Za-z]/.test(c)&&c.replace(/.*(?=#[^\s]*$)/,""));var d=c&&a(c);return d&&d.length?d:b.parent()}function c(c){c&&3===c.which||(a(e).remove(),a(f).each(function(){var d=a(this),e=b(d),f={relatedTarget:this};e.hasClass("open")&&(c&&"click"==c.type&&/input|textarea/i.test(c.target.tagName)&&a.contains(e[0],c.target)||(e.trigger(c=a.Event("hide.bs.dropdown",f)),c.isDefaultPrevented()||(d.attr("aria-expanded","false"),e.removeClass("open").trigger(a.Event("hidden.bs.dropdown",f)))))}))}function d(b){return this.each(function(){var c=a(this),d=c.data("bs.dropdown");d||c.data("bs.dropdown",d=new g(this)),"string"==typeof b&&d[b].call(c)})}var e=".dropdown-backdrop",f='[data-toggle="dropdown"]',g=function(b){a(b).on("click.bs.dropdown",this.toggle)};g.VERSION="3.3.7",g.prototype.toggle=function(d){var e=a(this);if(!e.is(".disabled, :disabled")){var f=b(e),g=f.hasClass("open");if(c(),!g){"ontouchstart"in document.documentElement&&!f.closest(".navbar-nav").length&&a(document.createElement("div")).addClass("dropdown-backdrop").insertAfter(a(this)).on("click",c);var h={relatedTarget:this};if(f.trigger(d=a.Event("show.bs.dropdown",h)),d.isDefaultPrevented())return;e.trigger("focus").attr("aria-expanded","true"),f.toggleClass("open").trigger(a.Event("shown.bs.dropdown",h))}return!1}},g.prototype.keydown=function(c){if(/(38|40|27|32)/.test(c.which)&&!/input|textarea/i.test(c.target.tagName)){var d=a(this);if(c.preventDefault(),c.stopPropagation(),!d.is(".disabled, :disabled")){var e=b(d),g=e.hasClass("open");if(!g&&27!=c.which||g&&27==c.which)return 27==c.which&&e.find(f).trigger("focus"),d.trigger("click");var h=" li:not(.disabled):visible a",i=e.find(".dropdown-menu"+h);if(i.length){var j=i.index(c.target);38==c.which&&j>0&&j--,40==c.which&&j<i.length-1&&j++,~j||(j=0),i.eq(j).trigger("focus")}}}};var h=a.fn.dropdown;a.fn.dropdown=d,a.fn.dropdown.Constructor=g,a.fn.dropdown.noConflict=function(){return a.fn.dropdown=h,this},a(document).on("click.bs.dropdown.data-api",c).on("click.bs.dropdown.data-api",".dropdown form",function(a){a.stopPropagation()}).on("click.bs.dropdown.data-api",f,g.prototype.toggle).on("keydown.bs.dropdown.data-api",f,g.prototype.keydown).on("keydown.bs.dropdown.data-api",".dropdown-menu",g.prototype.keydown)}(jQuery),+function(a){"use strict";function b(b,d){return this.each(function(){var e=a(this),f=e.data("bs.modal"),g=a.extend({},c.DEFAULTS,e.data(),"object"==typeof b&&b);f||e.data("bs.modal",f=new c(this,g)),"string"==typeof b?f[b](d):g.show&&f.show(d)})}var c=function(b,c){this.options=c,this.$body=a(document.body),this.$element=a(b),this.$dialog=this.$element.find(".modal-dialog"),this.$backdrop=null,this.isShown=null,this.originalBodyPad=null,this.scrollbarWidth=0,this.ignoreBackdropClick=!1,this.options.remote&&this.$element.find(".modal-content").load(this.options.remote,a.proxy(function(){this.$element.trigger("loaded.bs.modal")},this))};c.VERSION="3.3.7",c.TRANSITION_DURATION=300,c.BACKDROP_TRANSITION_DURATION=150,c.DEFAULTS={backdrop:!0,keyboard:!0,show:!0},c.prototype.toggle=function(a){return this.isShown?this.hide():this.show(a)},c.prototype.show=function(b){var d=this,e=a.Event("show.bs.modal",{relatedTarget:b});this.$element.trigger(e),this.isShown||e.isDefaultPrevented()||(this.isShown=!0,this.checkScrollbar(),this.setScrollbar(),this.$body.addClass("modal-open"),this.escape(),this.resize(),this.$element.on("click.dismiss.bs.modal",'[data-dismiss="modal"]',a.proxy(this.hide,this)),this.$dialog.on("mousedown.dismiss.bs.modal",function(){d.$element.one("mouseup.dismiss.bs.modal",function(b){a(b.target).is(d.$element)&&(d.ignoreBackdropClick=!0)})}),this.backdrop(function(){var e=a.support.transition&&d.$element.hasClass("fade");d.$element.parent().length||d.$element.appendTo(d.$body),d.$element.show().scrollTop(0),d.adjustDialog(),e&&d.$element[0].offsetWidth,d.$element.addClass("in"),d.enforceFocus();var f=a.Event("shown.bs.modal",{relatedTarget:b});e?d.$dialog.one("bsTransitionEnd",function(){d.$element.trigger("focus").trigger(f)}).emulateTransitionEnd(c.TRANSITION_DURATION):d.$element.trigger("focus").trigger(f)}))},c.prototype.hide=function(b){b&&b.preventDefault(),b=a.Event("hide.bs.modal"),this.$element.trigger(b),this.isShown&&!b.isDefaultPrevented()&&(this.isShown=!1,this.escape(),this.resize(),a(document).off("focusin.bs.modal"),this.$element.removeClass("in").off("click.dismiss.bs.modal").off("mouseup.dismiss.bs.modal"),this.$dialog.off("mousedown.dismiss.bs.modal"),a.support.transition&&this.$element.hasClass("fade")?this.$element.one("bsTransitionEnd",a.proxy(this.hideModal,this)).emulateTransitionEnd(c.TRANSITION_DURATION):this.hideModal())},c.prototype.enforceFocus=function(){a(document).off("focusin.bs.modal").on("focusin.bs.modal",a.proxy(function(a){document===a.target||this.$element[0]===a.target||this.$element.has(a.target).length||this.$element.trigger("focus")},this))},c.prototype.escape=function(){this.isShown&&this.options.keyboard?this.$element.on("keydown.dismiss.bs.modal",a.proxy(function(a){27==a.which&&this.hide()},this)):this.isShown||this.$element.off("keydown.dismiss.bs.modal")},c.prototype.resize=function(){this.isShown?a(window).on("resize.bs.modal",a.proxy(this.handleUpdate,this)):a(window).off("resize.bs.modal")},c.prototype.hideModal=function(){var a=this;this.$element.hide(),this.backdrop(function(){a.$body.removeClass("modal-open"),a.resetAdjustments(),a.resetScrollbar(),a.$element.trigger("hidden.bs.modal")})},c.prototype.removeBackdrop=function(){this.$backdrop&&this.$backdrop.remove(),this.$backdrop=null},c.prototype.backdrop=function(b){var d=this,e=this.$element.hasClass("fade")?"fade":"";if(this.isShown&&this.options.backdrop){var f=a.support.transition&&e;if(this.$backdrop=a(document.createElement("div")).addClass("modal-backdrop "+e).appendTo(this.$body),this.$element.on("click.dismiss.bs.modal",a.proxy(function(a){return this.ignoreBackdropClick?void(this.ignoreBackdropClick=!1):void(a.target===a.currentTarget&&("static"==this.options.backdrop?this.$element[0].focus():this.hide()))},this)),f&&this.$backdrop[0].offsetWidth,this.$backdrop.addClass("in"),!b)return;f?this.$backdrop.one("bsTransitionEnd",b).emulateTransitionEnd(c.BACKDROP_TRANSITION_DURATION):b()}else if(!this.isShown&&this.$backdrop){this.$backdrop.removeClass("in");var g=function(){d.removeBackdrop(),b&&b()};a.support.transition&&this.$element.hasClass("fade")?this.$backdrop.one("bsTransitionEnd",g).emulateTransitionEnd(c.BACKDROP_TRANSITION_DURATION):g()}else b&&b()},c.prototype.handleUpdate=function(){this.adjustDialog()},c.prototype.adjustDialog=function(){var a=this.$element[0].scrollHeight>document.documentElement.clientHeight;this.$element.css({paddingLeft:!this.bodyIsOverflowing&&a?this.scrollbarWidth:"",paddingRight:this.bodyIsOverflowing&&!a?this.scrollbarWidth:""})},c.prototype.resetAdjustments=function(){this.$element.css({paddingLeft:"",paddingRight:""})},c.prototype.checkScrollbar=function(){var a=window.innerWidth;if(!a){var b=document.documentElement.getBoundingClientRect();a=b.right-Math.abs(b.left)}this.bodyIsOverflowing=document.body.clientWidth<a,this.scrollbarWidth=this.measureScrollbar()},c.prototype.setScrollbar=function(){var a=parseInt(this.$body.css("padding-right")||0,10);this.originalBodyPad=document.body.style.paddingRight||"",this.bodyIsOverflowing&&this.$body.css("padding-right",a+this.scrollbarWidth)},c.prototype.resetScrollbar=function(){this.$body.css("padding-right",this.originalBodyPad)},c.prototype.measureScrollbar=function(){var a=document.createElement("div");a.className="modal-scrollbar-measure",this.$body.append(a);var b=a.offsetWidth-a.clientWidth;return this.$body[0].removeChild(a),b};var d=a.fn.modal;a.fn.modal=b,a.fn.modal.Constructor=c,a.fn.modal.noConflict=function(){return a.fn.modal=d,this},a(document).on("click.bs.modal.data-api",'[data-toggle="modal"]',function(c){var d=a(this),e=d.attr("href"),f=a(d.attr("data-target")||e&&e.replace(/.*(?=#[^\s]+$)/,"")),g=f.data("bs.modal")?"toggle":a.extend({remote:!/#/.test(e)&&e},f.data(),d.data());d.is("a")&&c.preventDefault(),f.one("show.bs.modal",function(a){a.isDefaultPrevented()||f.one("hidden.bs.modal",function(){d.is(":visible")&&d.trigger("focus")})}),b.call(f,g,this)})}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.tooltip"),f="object"==typeof b&&b;!e&&/destroy|hide/.test(b)||(e||d.data("bs.tooltip",e=new c(this,f)),"string"==typeof b&&e[b]())})}var c=function(a,b){this.type=null,this.options=null,this.enabled=null,this.timeout=null,this.hoverState=null,this.$element=null,this.inState=null,this.init("tooltip",a,b)};c.VERSION="3.3.7",c.TRANSITION_DURATION=150,c.DEFAULTS={animation:!0,placement:"top",selector:!1,template:'<div class="tooltip" role="tooltip"><div class="tooltip-arrow"></div><div class="tooltip-inner"></div></div>',trigger:"hover focus",title:"",delay:0,html:!1,container:!1,viewport:{selector:"body",padding:0}},c.prototype.init=function(b,c,d){if(this.enabled=!0,this.type=b,this.$element=a(c),this.options=this.getOptions(d),this.$viewport=this.options.viewport&&a(a.isFunction(this.options.viewport)?this.options.viewport.call(this,this.$element):this.options.viewport.selector||this.options.viewport),this.inState={click:!1,hover:!1,focus:!1},this.$element[0]instanceof document.constructor&&!this.options.selector)throw new Error("`selector` option must be specified when initializing "+this.type+" on the window.document object!");for(var e=this.options.trigger.split(" "),f=e.length;f--;){var g=e[f];if("click"==g)this.$element.on("click."+this.type,this.options.selector,a.proxy(this.toggle,this));else if("manual"!=g){var h="hover"==g?"mouseenter":"focusin",i="hover"==g?"mouseleave":"focusout";this.$element.on(h+"."+this.type,this.options.selector,a.proxy(this.enter,this)),this.$element.on(i+"."+this.type,this.options.selector,a.proxy(this.leave,this))}}this.options.selector?this._options=a.extend({},this.options,{trigger:"manual",selector:""}):this.fixTitle()},c.prototype.getDefaults=function(){return c.DEFAULTS},c.prototype.getOptions=function(b){return b=a.extend({},this.getDefaults(),this.$element.data(),b),b.delay&&"number"==typeof b.delay&&(b.delay={show:b.delay,hide:b.delay}),b},c.prototype.getDelegateOptions=function(){var b={},c=this.getDefaults();return this._options&&a.each(this._options,function(a,d){c[a]!=d&&(b[a]=d)}),b},c.prototype.enter=function(b){var c=b instanceof this.constructor?b:a(b.currentTarget).data("bs."+this.type);return c||(c=new this.constructor(b.currentTarget,this.getDelegateOptions()),a(b.currentTarget).data("bs."+this.type,c)),b instanceof a.Event&&(c.inState["focusin"==b.type?"focus":"hover"]=!0),c.tip().hasClass("in")||"in"==c.hoverState?void(c.hoverState="in"):(clearTimeout(c.timeout),c.hoverState="in",c.options.delay&&c.options.delay.show?void(c.timeout=setTimeout(function(){"in"==c.hoverState&&c.show()},c.options.delay.show)):c.show())},c.prototype.isInStateTrue=function(){for(var a in this.inState)if(this.inState[a])return!0;return!1},c.prototype.leave=function(b){var c=b instanceof this.constructor?b:a(b.currentTarget).data("bs."+this.type);if(c||(c=new this.constructor(b.currentTarget,this.getDelegateOptions()),a(b.currentTarget).data("bs."+this.type,c)),b instanceof a.Event&&(c.inState["focusout"==b.type?"focus":"hover"]=!1),!c.isInStateTrue())return clearTimeout(c.timeout),c.hoverState="out",c.options.delay&&c.options.delay.hide?void(c.timeout=setTimeout(function(){"out"==c.hoverState&&c.hide()},c.options.delay.hide)):c.hide()},c.prototype.show=function(){var b=a.Event("show.bs."+this.type);if(this.hasContent()&&this.enabled){this.$element.trigger(b);var d=a.contains(this.$element[0].ownerDocument.documentElement,this.$element[0]);if(b.isDefaultPrevented()||!d)return;var e=this,f=this.tip(),g=this.getUID(this.type);this.setContent(),f.attr("id",g),this.$element.attr("aria-describedby",g),this.options.animation&&f.addClass("fade");var h="function"==typeof this.options.placement?this.options.placement.call(this,f[0],this.$element[0]):this.options.placement,i=/\s?auto?\s?/i,j=i.test(h);j&&(h=h.replace(i,"")||"top"),f.detach().css({top:0,left:0,display:"block"}).addClass(h).data("bs."+this.type,this),this.options.container?f.appendTo(this.options.container):f.insertAfter(this.$element),this.$element.trigger("inserted.bs."+this.type);var k=this.getPosition(),l=f[0].offsetWidth,m=f[0].offsetHeight;if(j){var n=h,o=this.getPosition(this.$viewport);h="bottom"==h&&k.bottom+m>o.bottom?"top":"top"==h&&k.top-m<o.top?"bottom":"right"==h&&k.right+l>o.width?"left":"left"==h&&k.left-l<o.left?"right":h,f.removeClass(n).addClass(h)}var p=this.getCalculatedOffset(h,k,l,m);this.applyPlacement(p,h);var q=function(){var a=e.hoverState;e.$element.trigger("shown.bs."+e.type),e.hoverState=null,"out"==a&&e.leave(e)};a.support.transition&&this.$tip.hasClass("fade")?f.one("bsTransitionEnd",q).emulateTransitionEnd(c.TRANSITION_DURATION):q()}},c.prototype.applyPlacement=function(b,c){var d=this.tip(),e=d[0].offsetWidth,f=d[0].offsetHeight,g=parseInt(d.css("margin-top"),10),h=parseInt(d.css("margin-left"),10);isNaN(g)&&(g=0),isNaN(h)&&(h=0),b.top+=g,b.left+=h,a.offset.setOffset(d[0],a.extend({using:function(a){d.css({top:Math.round(a.top),left:Math.round(a.left)})}},b),0),d.addClass("in");var i=d[0].offsetWidth,j=d[0].offsetHeight;"top"==c&&j!=f&&(b.top=b.top+f-j);var k=this.getViewportAdjustedDelta(c,b,i,j);k.left?b.left+=k.left:b.top+=k.top;var l=/top|bottom/.test(c),m=l?2*k.left-e+i:2*k.top-f+j,n=l?"offsetWidth":"offsetHeight";d.offset(b),this.replaceArrow(m,d[0][n],l)},c.prototype.replaceArrow=function(a,b,c){this.arrow().css(c?"left":"top",50*(1-a/b)+"%").css(c?"top":"left","")},c.prototype.setContent=function(){var a=this.tip(),b=this.getTitle();a.find(".tooltip-inner")[this.options.html?"html":"text"](b),a.removeClass("fade in top bottom left right")},c.prototype.hide=function(b){function d(){"in"!=e.hoverState&&f.detach(),e.$element&&e.$element.removeAttr("aria-describedby").trigger("hidden.bs."+e.type),b&&b()}var e=this,f=a(this.$tip),g=a.Event("hide.bs."+this.type);if(this.$element.trigger(g),!g.isDefaultPrevented())return f.removeClass("in"),a.support.transition&&f.hasClass("fade")?f.one("bsTransitionEnd",d).emulateTransitionEnd(c.TRANSITION_DURATION):d(),this.hoverState=null,this},c.prototype.fixTitle=function(){var a=this.$element;(a.attr("title")||"string"!=typeof a.attr("data-original-title"))&&a.attr("data-original-title",a.attr("title")||"").attr("title","")},c.prototype.hasContent=function(){return this.getTitle()},c.prototype.getPosition=function(b){b=b||this.$element;var c=b[0],d="BODY"==c.tagName,e=c.getBoundingClientRect();null==e.width&&(e=a.extend({},e,{width:e.right-e.left,height:e.bottom-e.top}));var f=window.SVGElement&&c instanceof window.SVGElement,g=d?{top:0,left:0}:f?null:b.offset(),h={scroll:d?document.documentElement.scrollTop||document.body.scrollTop:b.scrollTop()},i=d?{width:a(window).width(),height:a(window).height()}:null;return a.extend({},e,h,i,g)},c.prototype.getCalculatedOffset=function(a,b,c,d){return"bottom"==a?{top:b.top+b.height,left:b.left+b.width/2-c/2}:"top"==a?{top:b.top-d,left:b.left+b.width/2-c/2}:"left"==a?{top:b.top+b.height/2-d/2,left:b.left-c}:{top:b.top+b.height/2-d/2,left:b.left+b.width}},c.prototype.getViewportAdjustedDelta=function(a,b,c,d){var e={top:0,left:0};if(!this.$viewport)return e;var f=this.options.viewport&&this.options.viewport.padding||0,g=this.getPosition(this.$viewport);if(/right|left/.test(a)){var h=b.top-f-g.scroll,i=b.top+f-g.scroll+d;h<g.top?e.top=g.top-h:i>g.top+g.height&&(e.top=g.top+g.height-i)}else{var j=b.left-f,k=b.left+f+c;j<g.left?e.left=g.left-j:k>g.right&&(e.left=g.left+g.width-k)}return e},c.prototype.getTitle=function(){var a,b=this.$element,c=this.options;return a=b.attr("data-original-title")||("function"==typeof c.title?c.title.call(b[0]):c.title)},c.prototype.getUID=function(a){do a+=~~(1e6*Math.random());while(document.getElementById(a));return a},c.prototype.tip=function(){if(!this.$tip&&(this.$tip=a(this.options.template),1!=this.$tip.length))throw new Error(this.type+" `template` option must consist of exactly 1 top-level element!");return this.$tip},c.prototype.arrow=function(){return this.$arrow=this.$arrow||this.tip().find(".tooltip-arrow")},c.prototype.enable=function(){this.enabled=!0},c.prototype.disable=function(){this.enabled=!1},c.prototype.toggleEnabled=function(){this.enabled=!this.enabled},c.prototype.toggle=function(b){var c=this;b&&(c=a(b.currentTarget).data("bs."+this.type),c||(c=new this.constructor(b.currentTarget,this.getDelegateOptions()),a(b.currentTarget).data("bs."+this.type,c))),b?(c.inState.click=!c.inState.click,c.isInStateTrue()?c.enter(c):c.leave(c)):c.tip().hasClass("in")?c.leave(c):c.enter(c)},c.prototype.destroy=function(){var a=this;clearTimeout(this.timeout),this.hide(function(){a.$element.off("."+a.type).removeData("bs."+a.type),a.$tip&&a.$tip.detach(),a.$tip=null,a.$arrow=null,a.$viewport=null,a.$element=null})};var d=a.fn.tooltip;a.fn.tooltip=b,a.fn.tooltip.Constructor=c,a.fn.tooltip.noConflict=function(){return a.fn.tooltip=d,this}}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.popover"),f="object"==typeof b&&b;!e&&/destroy|hide/.test(b)||(e||d.data("bs.popover",e=new c(this,f)),"string"==typeof b&&e[b]())})}var c=function(a,b){this.init("popover",a,b)};if(!a.fn.tooltip)throw new Error("Popover requires tooltip.js");c.VERSION="3.3.7",c.DEFAULTS=a.extend({},a.fn.tooltip.Constructor.DEFAULTS,{placement:"right",trigger:"click",content:"",template:'<div class="popover" role="tooltip"><div class="arrow"></div><h3 class="popover-title"></h3><div class="popover-content"></div></div>'}),c.prototype=a.extend({},a.fn.tooltip.Constructor.prototype),c.prototype.constructor=c,c.prototype.getDefaults=function(){return c.DEFAULTS},c.prototype.setContent=function(){var a=this.tip(),b=this.getTitle(),c=this.getContent();a.find(".popover-title")[this.options.html?"html":"text"](b),a.find(".popover-content").children().detach().end()[this.options.html?"string"==typeof c?"html":"append":"text"](c),a.removeClass("fade top bottom left right in"),a.find(".popover-title").html()||a.find(".popover-title").hide()},c.prototype.hasContent=function(){return this.getTitle()||this.getContent()},c.prototype.getContent=function(){var a=this.$element,b=this.options;return a.attr("data-content")||("function"==typeof b.content?b.content.call(a[0]):b.content)},c.prototype.arrow=function(){return this.$arrow=this.$arrow||this.tip().find(".arrow")};var d=a.fn.popover;a.fn.popover=b,a.fn.popover.Constructor=c,a.fn.popover.noConflict=function(){return a.fn.popover=d,this}}(jQuery),+function(a){"use strict";function b(c,d){this.$body=a(document.body),this.$scrollElement=a(a(c).is(document.body)?window:c),this.options=a.extend({},b.DEFAULTS,d),this.selector=(this.options.target||"")+" .nav li > a",this.offsets=[],this.targets=[],this.activeTarget=null,this.scrollHeight=0,this.$scrollElement.on("scroll.bs.scrollspy",a.proxy(this.process,this)),this.refresh(),this.process()}function c(c){return this.each(function(){var d=a(this),e=d.data("bs.scrollspy"),f="object"==typeof c&&c;e||d.data("bs.scrollspy",e=new b(this,f)),"string"==typeof c&&e[c]()})}b.VERSION="3.3.7",b.DEFAULTS={offset:10},b.prototype.getScrollHeight=function(){return this.$scrollElement[0].scrollHeight||Math.max(this.$body[0].scrollHeight,document.documentElement.scrollHeight)},b.prototype.refresh=function(){var b=this,c="offset",d=0;this.offsets=[],this.targets=[],this.scrollHeight=this.getScrollHeight(),a.isWindow(this.$scrollElement[0])||(c="position",d=this.$scrollElement.scrollTop()),this.$body.find(this.selector).map(function(){var b=a(this),e=b.data("target")||b.attr("href"),f=/^#./.test(e)&&a(e);return f&&f.length&&f.is(":visible")&&[[f[c]().top+d,e]]||null}).sort(function(a,b){return a[0]-b[0]}).each(function(){b.offsets.push(this[0]),b.targets.push(this[1])})},b.prototype.process=function(){var a,b=this.$scrollElement.scrollTop()+this.options.offset,c=this.getScrollHeight(),d=this.options.offset+c-this.$scrollElement.height(),e=this.offsets,f=this.targets,g=this.activeTarget;if(this.scrollHeight!=c&&this.refresh(),b>=d)return g!=(a=f[f.length-1])&&this.activate(a);if(g&&b<e[0])return this.activeTarget=null,this.clear();for(a=e.length;a--;)g!=f[a]&&b>=e[a]&&(void 0===e[a+1]||b<e[a+1])&&this.activate(f[a])},b.prototype.activate=function(b){
this.activeTarget=b,this.clear();var c=this.selector+'[data-target="'+b+'"],'+this.selector+'[href="'+b+'"]',d=a(c).parents("li").addClass("active");d.parent(".dropdown-menu").length&&(d=d.closest("li.dropdown").addClass("active")),d.trigger("activate.bs.scrollspy")},b.prototype.clear=function(){a(this.selector).parentsUntil(this.options.target,".active").removeClass("active")};var d=a.fn.scrollspy;a.fn.scrollspy=c,a.fn.scrollspy.Constructor=b,a.fn.scrollspy.noConflict=function(){return a.fn.scrollspy=d,this},a(window).on("load.bs.scrollspy.data-api",function(){a('[data-spy="scroll"]').each(function(){var b=a(this);c.call(b,b.data())})})}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.tab");e||d.data("bs.tab",e=new c(this)),"string"==typeof b&&e[b]()})}var c=function(b){this.element=a(b)};c.VERSION="3.3.7",c.TRANSITION_DURATION=150,c.prototype.show=function(){var b=this.element,c=b.closest("ul:not(.dropdown-menu)"),d=b.data("target");if(d||(d=b.attr("href"),d=d&&d.replace(/.*(?=#[^\s]*$)/,"")),!b.parent("li").hasClass("active")){var e=c.find(".active:last a"),f=a.Event("hide.bs.tab",{relatedTarget:b[0]}),g=a.Event("show.bs.tab",{relatedTarget:e[0]});if(e.trigger(f),b.trigger(g),!g.isDefaultPrevented()&&!f.isDefaultPrevented()){var h=a(d);this.activate(b.closest("li"),c),this.activate(h,h.parent(),function(){e.trigger({type:"hidden.bs.tab",relatedTarget:b[0]}),b.trigger({type:"shown.bs.tab",relatedTarget:e[0]})})}}},c.prototype.activate=function(b,d,e){function f(){g.removeClass("active").find("> .dropdown-menu > .active").removeClass("active").end().find('[data-toggle="tab"]').attr("aria-expanded",!1),b.addClass("active").find('[data-toggle="tab"]').attr("aria-expanded",!0),h?(b[0].offsetWidth,b.addClass("in")):b.removeClass("fade"),b.parent(".dropdown-menu").length&&b.closest("li.dropdown").addClass("active").end().find('[data-toggle="tab"]').attr("aria-expanded",!0),e&&e()}var g=d.find("> .active"),h=e&&a.support.transition&&(g.length&&g.hasClass("fade")||!!d.find("> .fade").length);g.length&&h?g.one("bsTransitionEnd",f).emulateTransitionEnd(c.TRANSITION_DURATION):f(),g.removeClass("in")};var d=a.fn.tab;a.fn.tab=b,a.fn.tab.Constructor=c,a.fn.tab.noConflict=function(){return a.fn.tab=d,this};var e=function(c){c.preventDefault(),b.call(a(this),"show")};a(document).on("click.bs.tab.data-api",'[data-toggle="tab"]',e).on("click.bs.tab.data-api",'[data-toggle="pill"]',e)}(jQuery),+function(a){"use strict";function b(b){return this.each(function(){var d=a(this),e=d.data("bs.affix"),f="object"==typeof b&&b;e||d.data("bs.affix",e=new c(this,f)),"string"==typeof b&&e[b]()})}var c=function(b,d){this.options=a.extend({},c.DEFAULTS,d),this.$target=a(this.options.target).on("scroll.bs.affix.data-api",a.proxy(this.checkPosition,this)).on("click.bs.affix.data-api",a.proxy(this.checkPositionWithEventLoop,this)),this.$element=a(b),this.affixed=null,this.unpin=null,this.pinnedOffset=null,this.checkPosition()};c.VERSION="3.3.7",c.RESET="affix affix-top affix-bottom",c.DEFAULTS={offset:0,target:window},c.prototype.getState=function(a,b,c,d){var e=this.$target.scrollTop(),f=this.$element.offset(),g=this.$target.height();if(null!=c&&"top"==this.affixed)return e<c&&"top";if("bottom"==this.affixed)return null!=c?!(e+this.unpin<=f.top)&&"bottom":!(e+g<=a-d)&&"bottom";var h=null==this.affixed,i=h?e:f.top,j=h?g:b;return null!=c&&e<=c?"top":null!=d&&i+j>=a-d&&"bottom"},c.prototype.getPinnedOffset=function(){if(this.pinnedOffset)return this.pinnedOffset;this.$element.removeClass(c.RESET).addClass("affix");var a=this.$target.scrollTop(),b=this.$element.offset();return this.pinnedOffset=b.top-a},c.prototype.checkPositionWithEventLoop=function(){setTimeout(a.proxy(this.checkPosition,this),1)},c.prototype.checkPosition=function(){if(this.$element.is(":visible")){var b=this.$element.height(),d=this.options.offset,e=d.top,f=d.bottom,g=Math.max(a(document).height(),a(document.body).height());"object"!=typeof d&&(f=e=d),"function"==typeof e&&(e=d.top(this.$element)),"function"==typeof f&&(f=d.bottom(this.$element));var h=this.getState(g,b,e,f);if(this.affixed!=h){null!=this.unpin&&this.$element.css("top","");var i="affix"+(h?"-"+h:""),j=a.Event(i+".bs.affix");if(this.$element.trigger(j),j.isDefaultPrevented())return;this.affixed=h,this.unpin="bottom"==h?this.getPinnedOffset():null,this.$element.removeClass(c.RESET).addClass(i).trigger(i.replace("affix","affixed")+".bs.affix")}"bottom"==h&&this.$element.offset({top:g-b-f})}};var d=a.fn.affix;a.fn.affix=b,a.fn.affix.Constructor=c,a.fn.affix.noConflict=function(){return a.fn.affix=d,this},a(window).on("load",function(){a('[data-spy="affix"]').each(function(){var c=a(this),d=c.data();d.offset=d.offset||{},null!=d.offsetBottom&&(d.offset.bottom=d.offsetBottom),null!=d.offsetTop&&(d.offset.top=d.offsetTop),b.call(c,d)})})}(jQuery);
/**
* bootbox.js 5.5.2
*
* http://bootboxjs.com/license.txt
*/
!function(t,e){'use strict';'function'==typeof define&&define.amd?define(['jquery'],e):'object'==typeof exports?module.exports=e(require('jquery')):t.bootbox=e(t.jQuery)}(this,function e(u,p){'use strict';var r,n,i,l;Object.keys||(Object.keys=(r=Object.prototype.hasOwnProperty,n=!{toString:null}.propertyIsEnumerable('toString'),l=(i=['toString','toLocaleString','valueOf','hasOwnProperty','isPrototypeOf','propertyIsEnumerable','constructor']).length,function(t){if('function'!=typeof t&&('object'!=typeof t||null===t))throw new TypeError('Object.keys called on non-object');var e,o,a=[];for(e in t)r.call(t,e)&&a.push(e);if(n)for(o=0;o<l;o++)r.call(t,i[o])&&a.push(i[o]);return a}));var d={};d.VERSION='5.5.2';var b={ar:{OK:'موافق',CANCEL:'الغاء',CONFIRM:'تأكيد'},bg_BG:{OK:'Ок',CANCEL:'Отказ',CONFIRM:'Потвърждавам'},br:{OK:'OK',CANCEL:'Cancelar',CONFIRM:'Sim'},cs:{OK:'OK',CANCEL:'Zrušit',CONFIRM:'Potvrdit'},da:{OK:'OK',CANCEL:'Annuller',CONFIRM:'Accepter'},de:{OK:'OK',CANCEL:'Abbrechen',CONFIRM:'Akzeptieren'},el:{OK:'Εντάξει',CANCEL:'Ακύρωση',CONFIRM:'Επιβεβαίωση'},en:{OK:'OK',CANCEL:'Cancel',CONFIRM:'OK'},es:{OK:'OK',CANCEL:'Cancelar',CONFIRM:'Aceptar'},eu:{OK:'OK',CANCEL:'Ezeztatu',CONFIRM:'Onartu'},et:{OK:'OK',CANCEL:'Katkesta',CONFIRM:'OK'},fa:{OK:'قبول',CANCEL:'لغو',CONFIRM:'تایید'},fi:{OK:'OK',CANCEL:'Peruuta',CONFIRM:'OK'},fr:{OK:'OK',CANCEL:'Annuler',CONFIRM:'Confirmer'},he:{OK:'אישור',CANCEL:'ביטול',CONFIRM:'אישור'},hu:{OK:'OK',CANCEL:'Mégsem',CONFIRM:'Megerősít'},hr:{OK:'OK',CANCEL:'Odustani',CONFIRM:'Potvrdi'},id:{OK:'OK',CANCEL:'Batal',CONFIRM:'OK'},it:{OK:'OK',CANCEL:'Annulla',CONFIRM:'Conferma'},ja:{OK:'OK',CANCEL:'キャンセル',CONFIRM:'確認'},ka:{OK:'OK',CANCEL:'გაუქმება',CONFIRM:'დადასტურება'},ko:{OK:'OK',CANCEL:'취소',CONFIRM:'확인'},lt:{OK:'Gerai',CANCEL:'Atšaukti',CONFIRM:'Patvirtinti'},lv:{OK:'Labi',CANCEL:'Atcelt',CONFIRM:'Apstiprināt'},nl:{OK:'OK',CANCEL:'Annuleren',CONFIRM:'Accepteren'},no:{OK:'OK',CANCEL:'Avbryt',CONFIRM:'OK'},pl:{OK:'OK',CANCEL:'Anuluj',CONFIRM:'Potwierdź'},pt:{OK:'OK',CANCEL:'Cancelar',CONFIRM:'Confirmar'},ru:{OK:'OK',CANCEL:'Отмена',CONFIRM:'Применить'},sk:{OK:'OK',CANCEL:'Zrušiť',CONFIRM:'Potvrdiť'},sl:{OK:'OK',CANCEL:'Prekliči',CONFIRM:'Potrdi'},sq:{OK:'OK',CANCEL:'Anulo',CONFIRM:'Prano'},sv:{OK:'OK',CANCEL:'Avbryt',CONFIRM:'OK'},sw:{OK:'Sawa',CANCEL:'Ghairi',CONFIRM:'Thibitisha'},ta:{OK:'சரி',CANCEL:'ரத்து செய்',CONFIRM:'உறுதி செய்'},th:{OK:'ตกลง',CANCEL:'ยกเลิก',CONFIRM:'ยืนยัน'},tr:{OK:'Tamam',CANCEL:'İptal',CONFIRM:'Onayla'},uk:{OK:'OK',CANCEL:'Відміна',CONFIRM:'Прийняти'},vi:{OK:'OK',CANCEL:'Hủy bỏ',CONFIRM:'Xác nhận'},zh_CN:{OK:'OK',CANCEL:'取消',CONFIRM:'确认'},zh_TW:{OK:'OK',CANCEL:'取消',CONFIRM:'確認'}},f={dialog:"<div class=\"bootbox modal\" tabindex=\"-1\" role=\"dialog\" aria-hidden=\"true\"><div class=\"modal-dialog\"><div class=\"modal-content\"><div class=\"modal-body\"><div class=\"bootbox-body\"></div></div></div></div></div>",header:"<div class=\"modal-header\"><h5 class=\"modal-title\"></h5></div>",footer:'<div class="modal-footer"></div>',closeButton:'<button type="button" class="bootbox-close-button close" aria-hidden="true">×</button>',form:'<form class="bootbox-form"></form>',button:'<button type="button" class="btn"></button>',option:'<option></option>',promptMessage:'<div class="bootbox-prompt-message"></div>',inputs:{text:'<input class="bootbox-input bootbox-input-text form-control" autocomplete="off" type="text" />',textarea:'<textarea class="bootbox-input bootbox-input-textarea form-control"></textarea>',email:'<input class="bootbox-input bootbox-input-email form-control" autocomplete="off" type="email" />',select:'<select class="bootbox-input bootbox-input-select form-control"></select>',checkbox:'<div class="form-check checkbox"><label class="form-check-label"><input class="form-check-input bootbox-input bootbox-input-checkbox" type="checkbox" /></label></div>',radio:'<div class="form-check radio"><label class="form-check-label"><input class="form-check-input bootbox-input bootbox-input-radio" type="radio" name="bootbox-radio" /></label></div>',date:'<input class="bootbox-input bootbox-input-date form-control" autocomplete="off" type="date" />',time:'<input class="bootbox-input bootbox-input-time form-control" autocomplete="off" type="time" />',number:'<input class="bootbox-input bootbox-input-number form-control" autocomplete="off" type="number" />',password:'<input class="bootbox-input bootbox-input-password form-control" autocomplete="off" type="password" />',range:'<input class="bootbox-input bootbox-input-range form-control-range" autocomplete="off" type="range" />'}},m={locale:'en',backdrop:'static',animate:!0,className:null,closeButton:!0,show:!0,container:'body',value:'',inputType:'text',swapButtonOrder:!1,centerVertical:!1,multiple:!1,scrollable:!1,reusable:!1};function c(t,e,o){return u.extend(!0,{},t,function(t,e){var o=t.length,a={};if(o<1||2<o)throw new Error('Invalid argument length');return 2===o||'string'==typeof t[0]?(a[e[0]]=t[0],a[e[1]]=t[1]):a=t[0],a}(e,o))}function h(t,e,o,a){var r;a&&a[0]&&(r=a[0].locale||m.locale,(a[0].swapButtonOrder||m.swapButtonOrder)&&(e=e.reverse()));var n,i,l,s={className:'bootbox-'+t,buttons:function(t,e){for(var o={},a=0,r=t.length;a<r;a++){var n=t[a],i=n.toLowerCase(),l=n.toUpperCase();o[i]={label:(s=l,c=e,u=b[c],u?u[s]:b.en[s])}}var s,c,u;return o}(e,r)};return n=c(s,a,o),l={},O(i=e,function(t,e){l[e]=!0}),O(n.buttons,function(t){if(l[t]===p)throw new Error('button key "'+t+'" is not allowed (options are '+i.join(' ')+')')}),n}function C(t){return Object.keys(t).length}function O(t,o){var a=0;u.each(t,function(t,e){o(t,e,a++)})}function w(t){t.data.dialog.find('.bootbox-accept').first().trigger('focus')}function v(t){t.target===t.data.dialog[0]&&t.data.dialog.remove()}function g(t){t.target===t.data.dialog[0]&&(t.data.dialog.off('escape.close.bb'),t.data.dialog.off('click'))}function N(t,e,o){t.stopPropagation(),t.preventDefault(),u.isFunction(o)&&!1===o.call(e,t)||e.modal('hide')}function y(t){return/([01][0-9]|2[0-3]):[0-5][0-9]?:[0-5][0-9]/.test(t)}function x(t){return/(\d{4})-(\d{2})-(\d{2})/.test(t)}return d.locales=function(t){return t?b[t]:b},d.addLocale=function(t,o){return u.each(['OK','CANCEL','CONFIRM'],function(t,e){if(!o[e])throw new Error('Please supply a translation for "'+e+'"')}),b[t]={OK:o.OK,CANCEL:o.CANCEL,CONFIRM:o.CONFIRM},d},d.removeLocale=function(t){if('en'===t)throw new Error('"en" is used as the default and fallback locale and cannot be removed.');return delete b[t],d},d.setLocale=function(t){return d.setDefaults('locale',t)},d.setDefaults=function(){var t={};return 2===arguments.length?t[arguments[0]]=arguments[1]:t=arguments[0],u.extend(m,t),d},d.hideAll=function(){return u('.bootbox').modal('hide'),d},d.init=function(t){return e(t||u)},d.dialog=function(t){if(u.fn.modal===p)throw new Error("\"$.fn.modal\" is not defined; please double check you have included the Bootstrap JavaScript library. See https://getbootstrap.com/docs/4.4/getting-started/javascript/ for more details.");if(t=function(r){var n,i;if('object'!=typeof r)throw new Error('Please supply an object of options');if(!r.message)throw new Error('"message" option must not be null or an empty string.');(r=u.extend({},m,r)).backdrop?r.backdrop='string'!=typeof r.backdrop||'static'!==r.backdrop.toLowerCase()||'static':r.backdrop=!1!==r.backdrop&&0!==r.backdrop&&'static';r.buttons||(r.buttons={});return n=r.buttons,i=C(n),O(n,function(t,e,o){if(u.isFunction(e)&&(e=n[t]={callback:e}),'object'!==u.type(e))throw new Error('button with key "'+t+'" must be an object');if(e.label||(e.label=t),!e.className){var a=!1;a=r.swapButtonOrder?0===o:o===i-1,e.className=i<=2&&a?'btn-primary':'btn-secondary btn-default'}}),r}(t),u.fn.modal.Constructor.VERSION){t.fullBootstrapVersion=u.fn.modal.Constructor.VERSION;var e=t.fullBootstrapVersion.indexOf('.');t.bootstrap=t.fullBootstrapVersion.substring(0,e)}else t.bootstrap='2',t.fullBootstrapVersion='2.3.2',console.warn('Bootbox will *mostly* work with Bootstrap 2, but we do not officially support it. Please upgrade, if possible.');var o=u(f.dialog),a=o.find('.modal-dialog'),r=o.find('.modal-body'),n=u(f.header),i=u(f.footer),l=t.buttons,s={onEscape:t.onEscape};if(r.find('.bootbox-body').html(t.message),0<C(t.buttons)&&(O(l,function(t,e){var o=u(f.button);switch(o.data('bb-handler',t),o.addClass(e.className),t){case'ok':case'confirm':o.addClass('bootbox-accept');break;case'cancel':o.addClass('bootbox-cancel')}o.html(e.label),i.append(o),s[t]=e.callback}),r.after(i)),!0===t.animate&&o.addClass('fade'),t.className&&o.addClass(t.className),t.size)switch(t.fullBootstrapVersion.substring(0,3)<'3.1'&&console.warn('"size" requires Bootstrap 3.1.0 or higher. You appear to be using '+t.fullBootstrapVersion+'. Please upgrade to use this option.'),t.size){case'small':case'sm':a.addClass('modal-sm');break;case'large':case'lg':a.addClass('modal-lg');break;case'extra-large':case'xl':a.addClass('modal-xl'),t.fullBootstrapVersion.substring(0,3)<'4.2'&&console.warn('Using size "xl"/"extra-large" requires Bootstrap 4.2.0 or higher. You appear to be using '+t.fullBootstrapVersion+'. Please upgrade to use this option.')}if(t.scrollable&&(a.addClass('modal-dialog-scrollable'),t.fullBootstrapVersion.substring(0,3)<'4.3'&&console.warn('Using "scrollable" requires Bootstrap 4.3.0 or higher. You appear to be using '+t.fullBootstrapVersion+'. Please upgrade to use this option.')),t.title&&(r.before(n),o.find('.modal-title').html(t.title)),t.closeButton){var c=u(f.closeButton);t.title?3<t.bootstrap?o.find('.modal-header').append(c):o.find('.modal-header').prepend(c):c.prependTo(r)}if(t.centerVertical&&(a.addClass('modal-dialog-centered'),t.fullBootstrapVersion<'4.0.0'&&console.warn('"centerVertical" requires Bootstrap 4.0.0-beta.3 or higher. You appear to be using '+t.fullBootstrapVersion+'. Please upgrade to use this option.')),t.reusable||o.one('hide.bs.modal',{dialog:o},g),t.onHide){if(!u.isFunction(t.onHide))throw new Error('Argument supplied to "onHide" must be a function');o.on('hide.bs.modal',t.onHide)}if(t.reusable||o.one('hidden.bs.modal',{dialog:o},v),t.onHidden){if(!u.isFunction(t.onHidden))throw new Error('Argument supplied to "onHidden" must be a function');o.on('hidden.bs.modal',t.onHidden)}if(t.onShow){if(!u.isFunction(t.onShow))throw new Error('Argument supplied to "onShow" must be a function');o.on('show.bs.modal',t.onShow)}if(o.one('shown.bs.modal',{dialog:o},w),t.onShown){if(!u.isFunction(t.onShown))throw new Error('Argument supplied to "onShown" must be a function');o.on('shown.bs.modal',t.onShown)}return!0===t.backdrop&&o.on('click.dismiss.bs.modal',function(t){o.children('.modal-backdrop').length&&(t.currentTarget=o.children('.modal-backdrop').get(0)),t.target===t.currentTarget&&o.trigger('escape.close.bb')}),o.on('escape.close.bb',function(t){s.onEscape&&N(t,o,s.onEscape)}),o.on('click','.modal-footer button:not(.disabled)',function(t){var e=u(this).data('bb-handler');e!==p&&N(t,o,s[e])}),o.on('click','.bootbox-close-button',function(t){N(t,o,s.onEscape)}),o.on('keyup',function(t){27===t.which&&o.trigger('escape.close.bb')}),u(t.container).append(o),o.modal({backdrop:t.backdrop,keyboard:!1,show:!1}),t.show&&o.modal('show'),o},d.alert=function(){var t;if((t=h('alert',['ok'],['message','callback'],arguments)).callback&&!u.isFunction(t.callback))throw new Error('alert requires the "callback" property to be a function when provided');return t.buttons.ok.callback=t.onEscape=function(){return!u.isFunction(t.callback)||t.callback.call(this)},d.dialog(t)},d.confirm=function(){var t;if(t=h('confirm',['cancel','confirm'],['message','callback'],arguments),!u.isFunction(t.callback))throw new Error('confirm requires a callback');return t.buttons.cancel.callback=t.onEscape=function(){return t.callback.call(this,!1)},t.buttons.confirm.callback=function(){return t.callback.call(this,!0)},d.dialog(t)},d.prompt=function(){var r,e,t,n,o,a;if(t=u(f.form),(r=h('prompt',['cancel','confirm'],['title','callback'],arguments)).value||(r.value=m.value),r.inputType||(r.inputType=m.inputType),o=r.show===p?m.show:r.show,r.show=!1,r.buttons.cancel.callback=r.onEscape=function(){return r.callback.call(this,null)},r.buttons.confirm.callback=function(){var t;if('checkbox'===r.inputType)t=n.find('input:checked').map(function(){return u(this).val()}).get();else if('radio'===r.inputType)t=n.find('input:checked').val();else{if(n[0].checkValidity&&!n[0].checkValidity())return!1;t='select'===r.inputType&&!0===r.multiple?n.find('option:selected').map(function(){return u(this).val()}).get():n.val()}return r.callback.call(this,t)},!r.title)throw new Error('prompt requires a title');if(!u.isFunction(r.callback))throw new Error('prompt requires a callback');if(!f.inputs[r.inputType])throw new Error('Invalid prompt type');switch(n=u(f.inputs[r.inputType]),r.inputType){case'text':case'textarea':case'email':case'password':n.val(r.value),r.placeholder&&n.attr('placeholder',r.placeholder),r.pattern&&n.attr('pattern',r.pattern),r.maxlength&&n.attr('maxlength',r.maxlength),r.required&&n.prop({required:!0}),r.rows&&!isNaN(parseInt(r.rows))&&'textarea'===r.inputType&&n.attr({rows:r.rows});break;case'date':case'time':case'number':case'range':if(n.val(r.value),r.placeholder&&n.attr('placeholder',r.placeholder),r.pattern&&n.attr('pattern',r.pattern),r.required&&n.prop({required:!0}),'date'!==r.inputType&&r.step){if(!('any'===r.step||!isNaN(r.step)&&0<parseFloat(r.step)))throw new Error('"step" must be a valid positive number or the value "any". See https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input#attr-step for more information.');n.attr('step',r.step)}!function(t,e,o){var a=!1,r=!0,n=!0;if('date'===t)e===p||(r=x(e))?o===p||(n=x(o))||console.warn('Browsers which natively support the "date" input type expect date values to be of the form "YYYY-MM-DD" (see ISO-8601 https://www.iso.org/iso-8601-date-and-time-format.html). Bootbox does not enforce this rule, but your max value may not be enforced by this browser.'):console.warn('Browsers which natively support the "date" input type expect date values to be of the form "YYYY-MM-DD" (see ISO-8601 https://www.iso.org/iso-8601-date-and-time-format.html). Bootbox does not enforce this rule, but your min value may not be enforced by this browser.');else if('time'===t){if(e!==p&&!(r=y(e)))throw new Error('"min" is not a valid time. See https://www.w3.org/TR/2012/WD-html-markup-20120315/datatypes.html#form.data.time for more information.');if(o!==p&&!(n=y(o)))throw new Error('"max" is not a valid time. See https://www.w3.org/TR/2012/WD-html-markup-20120315/datatypes.html#form.data.time for more information.')}else{if(e!==p&&isNaN(e))throw r=!1,new Error('"min" must be a valid number. See https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input#attr-min for more information.');if(o!==p&&isNaN(o))throw n=!1,new Error('"max" must be a valid number. See https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input#attr-max for more information.')}if(r&&n){if(o<=e)throw new Error('"max" must be greater than "min". See https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input#attr-max for more information.');a=!0}return a}(r.inputType,r.min,r.max)||(r.min!==p&&n.attr('min',r.min),r.max!==p&&n.attr('max',r.max));break;case'select':var i={};if(a=r.inputOptions||[],!u.isArray(a))throw new Error('Please pass an array of input options');if(!a.length)throw new Error('prompt with "inputType" set to "select" requires at least one option');r.placeholder&&n.attr('placeholder',r.placeholder),r.required&&n.prop({required:!0}),r.multiple&&n.prop({multiple:!0}),O(a,function(t,e){var o=n;if(e.value===p||e.text===p)throw new Error('each option needs a "value" property and a "text" property');e.group&&(i[e.group]||(i[e.group]=u('<optgroup />').attr('label',e.group)),o=i[e.group]);var a=u(f.option);a.attr('value',e.value).text(e.text),o.append(a)}),O(i,function(t,e){n.append(e)}),n.val(r.value);break;case'checkbox':var l=u.isArray(r.value)?r.value:[r.value];if(!(a=r.inputOptions||[]).length)throw new Error('prompt with "inputType" set to "checkbox" requires at least one option');n=u('<div class="bootbox-checkbox-list"></div>'),O(a,function(t,o){if(o.value===p||o.text===p)throw new Error('each option needs a "value" property and a "text" property');var a=u(f.inputs[r.inputType]);a.find('input').attr('value',o.value),a.find('label').append('\n'+o.text),O(l,function(t,e){e===o.value&&a.find('input').prop('checked',!0)}),n.append(a)});break;case'radio':if(r.value!==p&&u.isArray(r.value))throw new Error('prompt with "inputType" set to "radio" requires a single, non-array value for "value"');if(!(a=r.inputOptions||[]).length)throw new Error('prompt with "inputType" set to "radio" requires at least one option');n=u('<div class="bootbox-radiobutton-list"></div>');var s=!0;O(a,function(t,e){if(e.value===p||e.text===p)throw new Error('each option needs a "value" property and a "text" property');var o=u(f.inputs[r.inputType]);o.find('input').attr('value',e.value),o.find('label').append('\n'+e.text),r.value!==p&&e.value===r.value&&(o.find('input').prop('checked',!0),s=!1),n.append(o)}),s&&n.find('input[type="radio"]').first().prop('checked',!0)}if(t.append(n),t.on('submit',function(t){t.preventDefault(),t.stopPropagation(),e.find('.bootbox-accept').trigger('click')}),''!==u.trim(r.message)){var c=u(f.promptMessage).html(r.message);t.prepend(c),r.message=t}else r.message=t;return(e=d.dialog(r)).off('shown.bs.modal',w),e.on('shown.bs.modal',function(){n.focus()}),!0===o&&e.modal('show'),e},d});
/*! jQuery Mobile v1.5.0-alpha.1 | Copyright jQuery Foundation, Inc. | jquery.org/license */
(function(e,t,n){typeof define=="function"&&define.amd?define(["jquery"],function(r){return n(r,e,t),r.mobile}):n(e.jQuery,e,t)})(this,document,function(e,t,n,r){(function(t){typeof define=="function"&&define.amd?define("vmouse",["jquery"],t):t(e)})(function(e){function T(e){while(e&&typeof e.originalEvent!="undefined")e=e.originalEvent;return e}function N(t,n){var i=t.type,o,a,l,c,h,p,d,v,m;t=e.Event(t),t.type=n,o=t.originalEvent,a=u,i.search(/^(mouse|click)/)>-1&&(a=f);if(o)for(d=a.length;d;)c=a[--d],t[c]=o[c];i.search(/mouse(down|up)|click/)>-1&&!t.which&&(t.which=1);if(i.search(/^touch/)!==-1){l=T(o),i=l.touches,h=l.changedTouches,p=i&&i.length?i[0]:h&&h.length?h[0]:r;if(p)for(v=0,m=s.length;v<m;v++)c=s[v],t[c]=p[c]}return t}function C(n){var r={},i,s;while(n){i=e.data(n,t);for(s in i)i[s]&&(r[s]=r.hasVirtualBinding=!0);n=n.parentNode}return r}function k(n,r){var i;while(n){i=e.data(n,t);if(i&&(!r||i[r]))return n;n=n.parentNode}return null}function L(){g=!1}function A(){g=!0}function O(){E=0,v.length=0,m=!1,A()}function M(){L()}function _(){c&&(clearTimeout(c),c=0)}function D(){_(),c=setTimeout(function(){c=0,O()},e.vmouse.resetTimerDuration)}function P(t,n,r){var i;if(r&&r[t]||!r&&k(n.target,t))i=N(n,t),e(n.target).trigger(i);return i}function H(t){var n=e.data(t.target,i),r;t.type==="click"&&e.data(t.target,"lastTouchType")==="touchstart"&&setTimeout(function(){e.data(t.target,"lastTouchType")==="touchstart"&&(O(),delete e.data(t
|
target).lastTouchType,H(t))},e.vmouse.maximumTimeBetweenTouches),!m&&(!E||E!==n)&&(r=P("v"+t.type,t),r&&(r.isDefaultPrevented()&&t.preventDefault(),r.isPropagationStopped()&&t.stopPropagation(),r.isImmediatePropagationStopped()&&t.stopImmediatePropagation()))}function B(t){var n=T(t).touches,r,s,o;n&&n.length===1&&(r=t.target,s=C(r),e.data(t.target,"lastTouchType",t.type),s.hasVirtualBinding&&(E=w++,e.data(r,i,E),_(),M(),d=!1,o=T(t).touches[0],h=o.pageX,p=o.pageY,P("vmouseover",t,s),P("vmousedown",t,s)))}function j(t){if(g)return;d||P("vmousecancel",t,C(t.target)),e.data(t.target,"lastTouchType",t.type),d=!0,D()}function F(t){if(g)return;var n=T(t).touches[0],r=d,i=e.vmouse.moveDistanceThreshold,s=C(t.target);e.data(t.target,"lastTouchType",t.type),d=d||Math.abs(n.pageX-h)>i||Math.abs(n.pageY-p)>i,d&&!r&&P("vmousecancel",t,s),P("vmousemove",t,s),D()}function I(t){if(g||e.data(t.target,"lastTouchType")===r)return;A(),delete e.data(t.target).lastTouchType;var n=C(t.target),i,s;P("vmouseup",t,n),d||(i=P("vclick",t,n),i&&i.isDefaultPrevented()&&(s=T(t).changedTouches[0],v.push({touchID:E,x:s.clientX,y:s.clientY}),m=!0)),P("vmouseout",t,n),d=!1,D()}function q(n){var r=e.data(n,t),i;if(r)for(i in r)if(r[i])return!0;return!1}function R(){}function U(n){var r=n.substr(1);return{setup:function(){q(this)||e.data(this,t,{});var i=e.data(this,t);i[n]=!0,l[n]=(l[n]||0)+1,l[n]===1&&b.bind(r,H),e(this).bind(r,R),y&&(l.touchstart=(l.touchstart||0)+1,l.touchstart===1&&b.bind("touchstart",B).bind("touchend",I).bind("touchmove",F).bind("scroll",j))},teardown:function(){--l[n],l[n]||b.unbind(r,H),y&&(--l.touchstart,l.touchstart||b.unbind("touchstart",B).unbind("touchmove",F).unbind("touchend",I).unbind("scroll",j));var i=e(this),s=e.data(this,t);s&&(s[n]=!1),i.unbind(r,R),q(this)||i.removeData(t)}}}var t="virtualMouseBindings",i="virtualTouchID",s="clientX clientY pageX pageY screenX screenY".split(" "),o="vmouseover vmousedown vmousemove vmouseup vclick vmouseout vmousecancel".split(" "),u="altKey bubbles cancelable ctrlKey currentTarget detail eventPhase metaKey relatedTarget shiftKey target timeStamp view which".split(" "),a=e.event.mouseHooks?e.event.mouseHooks.props:[],f=u.concat(a),l={},c=0,h=0,p=0,d=!1,v=[],m=!1,g=!1,y="addEventListener"in n,b=e(n),w=1,E=0,S,x;e.vmouse={moveDistanceThreshold:10,clickDistanceThreshold:10,resetTimerDuration:1500,maximumTimeBetweenTouches:100};for(x=0;x<o.length;x++)e.event.special[o[x]]=U(o[x]);y&&n.addEventListener("click",function(t){var n=v.length,r=t.target,s,o,u,a,f,l;if(n){s=t.clientX,o=t.clientY,S=e.vmouse.clickDistanceThreshold,u=r;while(u){for(a=0;a<n;a++){f=v[a],l=0;if(u===r&&Math.abs(f.x-s)<S&&Math.abs(f.y-o)<S||e.data(u,i)===f.touchID){t.preventDefault(),t.stopPropagation();return}}u=u.parentNode}}},!0)}),function(t){typeof define=="function"&&define.amd?define("ns",["jquery"],t):t(e)}(function(e){return e.mobile={version:"@VERSION"},e.mobile}),function(t){typeof define=="function"&&define.amd?define("support/touch",["jquery","../ns"],t):t(e)}(function(e){var t={touch:"ontouchend"in n};return e.mobile.support=e.mobile.support||{},e.extend(e.support,t),e.extend(e.mobile.support,t),e.support}),function(t){typeof define=="function"&&define.amd?define("events/touch",["jquery","../vmouse","../support/touch"],t):t(e)}(function(e){function f(t,n,i,s){var o=i.type;i.type=n,s?e.event.trigger(i,r,t):e.event.dispatch.call(t,i),i.type=o}var i=e(n),s=e.mobile.support.touch,o=s?"touchstart":"mousedown",u=s?"touchend":"mouseup",a=s?"touchmove":"mousemove";return e.each("touchstart touchmove touchend tap taphold swipe swipeleft swiperight".split(" "),function(t,n){e.fn[n]=function(e){return e?this.bind(n,e):this.trigger(n)},e.attrFn&&(e.attrFn[n]=!0)}),e.event.special.tap={tapholdThreshold:750,emitTapOnTaphold:!0,setup:function(){var t=this,n=e(t),r=!1;n.bind("vmousedown",function(s){function l(){u&&(n.bind("vclick",a),clearTimeout(u))}function c(){l(),n.unbind("vclick",a).unbind("vmouseup",l),i.unbind("vmousecancel",c)}r=!1;if(s.which&&s.which!==1)return!0;var o=s.target,u,a;a=function(e){c(),!r&&o===e.target?f(t,"tap",e):r&&e.preventDefault()},n.bind("vmouseup",l),i.bind("vmousecancel",c),u=setTimeout(function(){e.event.special.tap.emitTapOnTaphold||(r=!0),u=0,f(t,"taphold",e.Event("taphold",{target:o}))},e.event.special.tap.tapholdThreshold)})},teardown:function(){e(this).unbind("vmousedown").unbind("vclick").unbind("vmouseup"),i.unbind("vmousecancel")}},e.event.special.swipe={scrollSupressionThreshold:30,durationThreshold:1e3,horizontalDistanceThreshold:t.devicePixelRatio>=2?15:30,verticalDistanceThreshold:t.devicePixelRatio>=2?15:30,getLocation:function(e){var n=t.pageXOffset,r=t.pageYOffset,i=e.clientX,s=e.clientY;if(e.pageY===0&&Math.floor(s)>Math.floor(e.pageY)||e.pageX===0&&Math.floor(i)>Math.floor(e.pageX))i-=n,s-=r;else if(s<e.pageY-r||i<e.pageX-n)i=e.pageX-n,s=e.pageY-r;return{x:i,y:s}},start:function(t){var n=t.originalEvent.touches?t.originalEvent.touches[0]:t,r=e.event.special.swipe.getLocation(n);return{time:(new Date).getTime(),coords:[r.x,r.y],origin:e(t.target)}},stop:function(t){var n=t.originalEvent.touches?t.originalEvent.touches[0]:t,r=e.event.special.swipe.getLocation(n);return{time:(new Date).getTime(),coords:[r.x,r.y]}},handleSwipe:function(t,n,r,i){if(n.time-t.time<e.event.special.swipe.durationThreshold&&Math.abs(t.coords[0]-n.coords[0])>e.event.special.swipe.horizontalDistanceThreshold&&Math.abs(t.coords[1]-n.coords[1])<e.event.special.swipe.verticalDistanceThreshold){var s=t.coords[0]>n.coords[0]?"swipeleft":"swiperight";return f(r,"swipe",e.Event("swipe",{target:i,swipestart:t,swipestop:n}),!0),f(r,s,e.Event(s,{target:i,swipestart:t,swipestop:n}),!0),!0}return!1},eventInProgress:!1,setup:function(){var t,n=this,r=e(n),s={};t=e.data(this,"mobile-events"),t||(t={length:0},e.data(this,"mobile-events",t)),t.length++,t.swipe=s,s.start=function(t){if(e.event.special.swipe.eventInProgress)return;e.event.special.swipe.eventInProgress=!0;var r,o=e.event.special.swipe.start(t),f=t.target,l=!1;s.move=function(t){if(!o||t.isDefaultPrevented())return;r=e.event.special.swipe.stop(t),l||(l=e.event.special.swipe.handleSwipe(o,r,n,f),l&&(e.event.special.swipe.eventInProgress=!1)),Math.abs(o.coords[0]-r.coords[0])>e.event.special.swipe.scrollSupressionThreshold&&t.preventDefault()},s.stop=function(){l=!0,e.event.special.swipe.eventInProgress=!1,i.off(a,s.move),s.move=null},i.on(a,s.move).one(u,s.stop)},r.on(o,s.start)},teardown:function(){var t,n;t=e.data(this,"mobile-events"),t&&(n=t.swipe,delete t.swipe,t.length--,t.length===0&&e.removeData(this,"mobile-events")),n&&(n.start&&e(this).off(o,n.start),n.move&&i.off(a,n.move),n.stop&&i.off(u,n.stop))}},e.each({taphold:"tap",swipeleft:"swipe.left",swiperight:"swipe.right"},function(t,n){e.event.special[t]={setup:function(){e(this).bind(n,e.noop)},teardown:function(){e(this).unbind(n)}}}),e.event.special})});
|
.
|
FIDCompoundMetric.py
|
from src.metric.CompoundMetric import CompoundMetric
from src.metric.SampleMetricManager import SampleMetricManager
from src.core.Setupable import SetupMode
from src.metric.CompoundMetricManager import CompoundMetricManager
from typing import Any
from cleanfid import fid
from src.population.Population import Population
from src.dataset.FFHQDataset import FFHQDataset
import numpy as np
from pathlib import Path
FID_NAME = "FID"
FID_CALC_MODES = ["clean", "legacy_tensorflow", "legacy_pytorch"]
FID_DEFAULT_CALC_MODE = "clean"
# TODO: Fix this:
#! Only works when equal to zero, gets pickle error otherwise
NUM_WORKERS = 0
class FIDCompoundMetric(CompoundMetric):
def __init__(
self,
cmm: CompoundMetricManager,
smm: SampleMetricManager = None,
):
"""
Constructor for FIDCompoundMetric class, subclass of the CompoundMetric class.
Args:
cmm (CompoundMetricManager): Manager used by metrics. Population and dataset is derived
from this manager.
smm (SampleMetricManager, optional): Not used for this metric. Defaults to None.
"""
super(FIDCompoundMetric, self).__init__(FID_NAME, cmm, smm)
# Init storage structure for this metric
self._fid = dict()
def reg_setup_modes(self) -> dict[str, SetupMode]:
ds = self.get_dataset()
return {
f"statistics_{fcm}_{ds.get_name(ds.get_resolution())}": SetupMode(
True,
lambda _, fcm=fcm: self._setup(fcm),
lambda fcm=fcm: self._is_ready(fcm),
)
for fcm in FID_CALC_MODES
}
def _setup(self, calc_mode: str = FID_DEFAULT_CALC_MODE) -> None:
"""
Setup the needed statistics to calculate the metric.
Note that each dataset and `calc_mode` combination needs a calculated statistic.
For more information regarding `calc_mode`, see the documentation for `calc()`.
Args:
calc_mode (str, optional): Calc mode determines FID implementation, different statistics
needed for different implementations. See documentation on `calc()` for more information.
Defaults to `FID_DEFAULT_CALC_MODE` ("clean").
Raises:
ValueError: Error when non-valid `calc_mode`, valid modes are defined by `FID_CALC_MODES`.
"""
# Check calc_mode
if calc_mode not in FID_CALC_MODES:
raise ValueError(
f"{calc_mode} not supported, supported modes: {FID_CALC_MODES}"
)
# Calculate custom statistics
ds = self.get_dataset()
fid.make_custom_stats(
ds.get_name(ds.get_resolution()), str(ds.get_image_dir()), mode=calc_mode
)
def _is_ready(self, calc_mode=FID_DEFAULT_CALC_MODE) -> bool:
"""
Checks if compound metric is ready for calculations.
Args:
calc_mode (str, optional): Calc mode determines FID implementation, different statistics
needed for different implementations. See documentation on `calc()` for more information.
Defaults to `FID_DEFAULT_CALC_MODE` ("clean").
Raises:
ValueError: Error when non-valid `calc_mode`, valid modes are defined by `FID_CALC_MODES`.
Returns:
bool: True if the compound metrics is ready for calculations.
"""
# Check calc_mode
if calc_mode not in FID_CALC_MODES:
raise ValueError(
f"{calc_mode} not supported, supported modes: {FID_CALC_MODES}"
)
ds = self.get_dataset()
if type(ds).get_resolution_invariant_name() == "FFHQ" and (
ds.get_resolution() == 256 or ds.get_resolution() == 1024
):
# pre-computed statistic by clean-fid
return True
else:
return fid.test_stats_exists(ds.get_name(), calc_mode)
def _move_filtered_files(self, source_files: list[Path]) -> Path:
# Look for temp name not taken
while True:
target = (
Population.POPULATION_ROOT_DIR
/ f"temp_filtered_population{np.random.randint(10000,99999)}"
)
if not (target.is_file() or target.is_dir()):
break
# Create temp directory
Path.mkdir(target)
# Move filtered files to temp directory
for image_file in source_files:
image_file.rename(target.joinpath(image_file.name))
return target
def _move_filtered_files_back(self, source: Path) -> None:
target = Population.POPULATION_ROOT_DIR / self.get_population().get_name()
# Check if source file exists
if not source.exists():
raise FileNotFoundError(f"Could not find file: '{source.absolute()}'")
# Check if target file exists
if not target.exists():
raise FileNotFoundError(f"Could not find file: '{target.absolute()}'")
# Move filtered files back to population directory
for image_file in source.glob("*"):
image_file.rename(target.joinpath(image_file.name))
# Remove temp directory
source.rmdir()
def calc(self, filter_bit: int = 1, **parameters: Any) -> Any:
"""
Calculates the FID given the dataset and the population.
No setup is needed for FFHQ 256/1024, if using other custom datasets `calc()`
requires the user to run `setup()` first.
Args:
filter_bit (int, optional): Filter bit used to select a subset of the
population. Defaults to 1 (IdentityFilter).
calc_mode (str, optional): Either "clean", "legacy_tensorflow, or "legacy_pytorch".
This decides how the FID score should be calculated, i.e., using clean-fid,
regular tensorflow implementation, or pytorch implementation. Default is "clean" (clean-fid).
Raises:
ValueError: Error when non-valid `calc_mode`, valid modes are defined by `FID_CALC_MODES`.
ValueError: Error when the name of the dataset in conjunction with the
specified `calc_mode` don't have a pre-computed statistic.
RuntimeError: When clean-fid library gets an error.
FileNotFoundError: When incorrect path is provided when moving files.
Returns:
Any: The FID value.
"""
# Fetch parameters
calc_mode = self._check_calc_mode(parameters)
# Move files to temp folder
uris = self._population.get_filtered_data(filter_bit)[
self._population.COLUMN_URI
]
uris = [Path(str_path) for str_path in list(uris)]
pop_path = self._move_filtered_files(uris)
# Get variables for use in FID
ds = self.get_dataset()
resolution = ds.get_resolution()
fid_score = None
if type(ds).get_resolution_invariant_name() == "FFHQ" and (
ds.get_resolution() == 256 or ds.get_resolution() == 1024
):
# Use pre-computed statistic by clean-fid
try:
fid_score = fid.compute_fid(
str(pop_path),
dataset_name=type(ds).get_resolution_invariant_name(),
dataset_res=resolution,
mode=calc_mode,
dataset_split="trainval70k",
num_workers=NUM_WORKERS,
)
except Exception as error:
self._move_filtered_files_back(pop_path)
print("Something went wrong when calculating FID using clean-fid.")
print(repr(error))
raise
else:
# Use custom pre-computed statistic
dataset_name = ds.get_name(resolution)
# Check if statistic exists
if fid.test_stats_exists(dataset_name, calc_mode):
try:
fid_score = fid.compute_fid(
str(pop_path),
dataset_name=dataset_name,
mode=calc_mode,
dataset_split="custom",
num_workers=NUM_WORKERS,
)
except Exception as error:
# Move back files
self._move_filtered_files_back(pop_path)
print("Something went when calculating FID using clean-fid.")
print(repr(error))
raise
else:
# Move back files
self._move_filtered_files_back(pop_path)
raise ValueError(
f"Statistic named '{dataset_name}' with `calc_mode` '{calc_mode}'"
" has no statistic. Double check `calc_mode` or run 'setup()'"
)
# Move back files
self._move_filtered_files_back(pop_path)
# Save result
self._fid[calc_mode] = fid_score
return fid_score
def get(self, calc_if_missing: bool = False, **parameters: Any) -> Any:
# Check parameters
calc_mode = self._check_calc_mode(parameters)
# Check if metric already calculated
if calc_mode in self._fid.keys() and self._fid[calc_mode] is not None:
return self._fid[calc_mode]
# Check if calculate when missing
elif calc_if_missing:
return self.calc(**parameters)
else:
return None
def
|
(self) -> None:
for calc_mode, fid_score in self._fid.items():
print(calc_mode + " FID: ", fid_score)
def plot_result(self) -> None:
pass
def _check_calc_mode(self, parameters) -> str:
# Fetch parameters
if "calc_mode" in parameters.keys():
calc_mode = parameters["calc_mode"]
# Check calc_mode
if calc_mode not in FID_CALC_MODES:
raise ValueError(
f"{calc_mode} not supported, supported modes: {FID_CALC_MODES}"
)
else:
calc_mode = FID_DEFAULT_CALC_MODE
return calc_mode
|
print_result
|
counter_add.go
|
// Copyright 2018 Nuno Preguica, NOVA LINCS, FCT, Universidade NOVA de Lisboa.
// All rights reserved.
// Use of this source code is governed by Apache 2.0
// license that can be found in the LICENSE file.
package opcrdts
import (
"encoding/binary"
"rockscrdtdb/utils"
)
// Counter operation
type CounterOpAdd struct {
Delta int64
}
func (m *CounterOpAdd) GetCRDTType() byte {
return CRDT_COUNTER
}
func (m *CounterOpAdd) GetType() byte {
return CRDT_COUNTER__INC
}
func (m *CounterOpAdd) Serialize() ([]byte, bool) {
buf := make([]byte, 8)
binary.BigEndian.PutUint64(buf, uint64(m.Delta))
return buf,true
}
func
|
(b []byte) (CRDTOperation, bool) {
return &CounterOpAdd{int64(binary.BigEndian.Uint64(b))},true
}
func (leftOp *CounterOpAdd) Merge( otherOp CRDTOperation) (CRDTOperation, bool) {
rightOp, ok := otherOp.(*CounterOpAdd)
if ok == false {
return leftOp, false
}
leftOp.Delta += rightOp.Delta;
return leftOp, true
}
func (cntOp *CounterOpAdd) Apply(obj CRDT) bool {
cnt, ok := (obj).(*Counter)
if ok == false {
return false
}
cnt.Val = cnt.Val + cntOp.Delta
return true
}
func NewCounterOpAdd( ts *utils.Timestamp, vv *utils.VersionVector, delta int64) *CounterOpAdd {
return &CounterOpAdd{ delta}
}
|
UnserializeCounterOpAdd
|
identify3_ui.go
|
// Auto-generated to Go types and interfaces using avdl-compiler v1.4.2 (https://github.com/keybase/node-avdl-compiler)
// Input file: avdl/keybase1/identify3_ui.avdl
package keybase1
import (
"github.com/keybase/go-framed-msgpack-rpc/rpc"
context "golang.org/x/net/context"
)
type Identify3RowState int
const (
Identify3RowState_CHECKING Identify3RowState = 1
Identify3RowState_VALID Identify3RowState = 2
Identify3RowState_ERROR Identify3RowState = 3
Identify3RowState_WARNING Identify3RowState = 4
Identify3RowState_REVOKED Identify3RowState = 5
)
func (o Identify3RowState) DeepCopy() Identify3RowState { return o }
var Identify3RowStateMap = map[string]Identify3RowState{
"CHECKING": 1,
"VALID": 2,
"ERROR": 3,
"WARNING": 4,
"REVOKED": 5,
}
var Identify3RowStateRevMap = map[Identify3RowState]string{
1: "CHECKING",
2: "VALID",
3: "ERROR",
4: "WARNING",
5: "REVOKED",
}
func (e Identify3RowState) String() string {
if v, ok := Identify3RowStateRevMap[e]; ok {
return v
}
return ""
}
type Identify3RowColor int
const (
Identify3RowColor_BLUE Identify3RowColor = 1
Identify3RowColor_RED Identify3RowColor = 2
Identify3RowColor_BLACK Identify3RowColor = 3
Identify3RowColor_GREEN Identify3RowColor = 4
Identify3RowColor_GRAY Identify3RowColor = 5
Identify3RowColor_YELLOW Identify3RowColor = 6
Identify3RowColor_ORANGE Identify3RowColor = 7
)
func (o Identify3RowColor) DeepCopy() Identify3RowColor { return o }
var Identify3RowColorMap = map[string]Identify3RowColor{
"BLUE": 1,
"RED": 2,
"BLACK": 3,
"GREEN": 4,
"GRAY": 5,
"YELLOW": 6,
"ORANGE": 7,
}
var Identify3RowColorRevMap = map[Identify3RowColor]string{
1: "BLUE",
2: "RED",
3: "BLACK",
4: "GREEN",
5: "GRAY",
6: "YELLOW",
7: "ORANGE",
}
func (e Identify3RowColor) String() string {
if v, ok := Identify3RowColorRevMap[e]; ok {
return v
}
return ""
}
type Identify3ResultType int
const (
Identify3ResultType_OK Identify3ResultType = 0
Identify3ResultType_BROKEN Identify3ResultType = 1
Identify3ResultType_NEEDS_UPGRADE Identify3ResultType = 2
Identify3ResultType_CANCELED Identify3ResultType = 3
)
func (o Identify3ResultType) DeepCopy() Identify3ResultType { return o }
var Identify3ResultTypeMap = map[string]Identify3ResultType{
"OK": 0,
"BROKEN": 1,
"NEEDS_UPGRADE": 2,
"CANCELED": 3,
}
var Identify3ResultTypeRevMap = map[Identify3ResultType]string{
0: "OK",
1: "BROKEN",
2: "NEEDS_UPGRADE",
3: "CANCELED",
}
func (e Identify3ResultType) String() string {
if v, ok := Identify3ResultTypeRevMap[e]; ok {
return v
}
return ""
}
type Identify3RowMeta struct {
Color Identify3RowColor `codec:"color" json:"color"`
Label string `codec:"label" json:"label"`
}
func (o Identify3RowMeta) DeepCopy() Identify3RowMeta {
return Identify3RowMeta{
Color: o.Color.DeepCopy(),
Label: o.Label,
}
}
type Identify3Row struct {
GuiID Identify3GUIID `codec:"guiID" json:"guiID"`
Key string `codec:"key" json:"key"`
Value string `codec:"value" json:"value"`
Priority int `codec:"priority" json:"priority"`
SiteURL string `codec:"siteURL" json:"siteURL"`
SiteIcon []SizedImage `codec:"siteIcon" json:"siteIcon"`
SiteIconFull []SizedImage `codec:"siteIconFull" json:"siteIconFull"`
ProofURL string `codec:"proofURL" json:"proofURL"`
SigID SigID `codec:"sigID" json:"sigID"`
Ctime Time `codec:"ctime" json:"ctime"`
State Identify3RowState `codec:"state" json:"state"`
Metas []Identify3RowMeta `codec:"metas" json:"metas"`
Color Identify3RowColor `codec:"color" json:"color"`
Kid *KID `codec:"kid,omitempty" json:"kid,omitempty"`
}
func (o Identify3Row) DeepCopy() Identify3Row {
return Identify3Row{
GuiID: o.GuiID.DeepCopy(),
Key: o.Key,
Value: o.Value,
Priority: o.Priority,
SiteURL: o.SiteURL,
SiteIcon: (func(x []SizedImage) []SizedImage {
if x == nil {
return nil
}
ret := make([]SizedImage, len(x))
for i, v := range x {
vCopy := v.DeepCopy()
ret[i] = vCopy
}
return ret
})(o.SiteIcon),
SiteIconFull: (func(x []SizedImage) []SizedImage {
if x == nil {
return nil
}
ret := make([]SizedImage, len(x))
for i, v := range x {
vCopy := v.DeepCopy()
ret[i] = vCopy
}
return ret
})(o.SiteIconFull),
ProofURL: o.ProofURL,
SigID: o.SigID.DeepCopy(),
Ctime: o.Ctime.DeepCopy(),
State: o.State.DeepCopy(),
Metas: (func(x []Identify3RowMeta) []Identify3RowMeta {
if x == nil {
return nil
}
ret := make([]Identify3RowMeta, len(x))
for i, v := range x {
vCopy := v.DeepCopy()
ret[i] = vCopy
}
return ret
})(o.Metas),
Color: o.Color.DeepCopy(),
Kid: (func(x *KID) *KID {
if x == nil {
return nil
}
tmp := (*x).DeepCopy()
return &tmp
})(o.Kid),
}
}
type Identify3ShowTrackerArg struct {
GuiID Identify3GUIID `codec:"guiID" json:"guiID"`
Assertion Identify3Assertion `codec:"assertion" json:"assertion"`
Reason IdentifyReason `codec:"reason" json:"reason"`
ForceDisplay bool `codec:"forceDisplay" json:"forceDisplay"`
}
type Identify3UpdateRowArg struct {
Row Identify3Row `codec:"row" json:"row"`
}
type Identify3UserResetArg struct {
GuiID Identify3GUIID `codec:"guiID" json:"guiID"`
}
type Identify3UpdateUserCardArg struct {
GuiID Identify3GUIID `codec:"guiID" json:"guiID"`
Card UserCard `codec:"card" json:"card"`
}
type Identify3TrackerTimedOutArg struct {
GuiID Identify3GUIID `codec:"guiID" json:"guiID"`
}
type Identify3ResultArg struct {
GuiID Identify3GUIID `codec:"guiID" json:"guiID"`
Result Identify3ResultType `codec:"result" json:"result"`
}
type Identify3UiInterface interface {
Identify3ShowTracker(context.Context, Identify3ShowTrackerArg) error
Identify3UpdateRow(context.Context, Identify3Row) error
Identify3UserReset(context.Context, Identify3GUIID) error
Identify3UpdateUserCard(context.Context, Identify3UpdateUserCardArg) error
Identify3TrackerTimedOut(context.Context, Identify3GUIID) error
Identify3Result(context.Context, Identify3ResultArg) error
}
func
|
(i Identify3UiInterface) rpc.Protocol {
return rpc.Protocol{
Name: "keybase.1.identify3Ui",
Methods: map[string]rpc.ServeHandlerDescription{
"identify3ShowTracker": {
MakeArg: func() interface{} {
var ret [1]Identify3ShowTrackerArg
return &ret
},
Handler: func(ctx context.Context, args interface{}) (ret interface{}, err error) {
typedArgs, ok := args.(*[1]Identify3ShowTrackerArg)
if !ok {
err = rpc.NewTypeError((*[1]Identify3ShowTrackerArg)(nil), args)
return
}
err = i.Identify3ShowTracker(ctx, typedArgs[0])
return
},
},
"identify3UpdateRow": {
MakeArg: func() interface{} {
var ret [1]Identify3UpdateRowArg
return &ret
},
Handler: func(ctx context.Context, args interface{}) (ret interface{}, err error) {
typedArgs, ok := args.(*[1]Identify3UpdateRowArg)
if !ok {
err = rpc.NewTypeError((*[1]Identify3UpdateRowArg)(nil), args)
return
}
err = i.Identify3UpdateRow(ctx, typedArgs[0].Row)
return
},
},
"identify3UserReset": {
MakeArg: func() interface{} {
var ret [1]Identify3UserResetArg
return &ret
},
Handler: func(ctx context.Context, args interface{}) (ret interface{}, err error) {
typedArgs, ok := args.(*[1]Identify3UserResetArg)
if !ok {
err = rpc.NewTypeError((*[1]Identify3UserResetArg)(nil), args)
return
}
err = i.Identify3UserReset(ctx, typedArgs[0].GuiID)
return
},
},
"identify3UpdateUserCard": {
MakeArg: func() interface{} {
var ret [1]Identify3UpdateUserCardArg
return &ret
},
Handler: func(ctx context.Context, args interface{}) (ret interface{}, err error) {
typedArgs, ok := args.(*[1]Identify3UpdateUserCardArg)
if !ok {
err = rpc.NewTypeError((*[1]Identify3UpdateUserCardArg)(nil), args)
return
}
err = i.Identify3UpdateUserCard(ctx, typedArgs[0])
return
},
},
"identify3TrackerTimedOut": {
MakeArg: func() interface{} {
var ret [1]Identify3TrackerTimedOutArg
return &ret
},
Handler: func(ctx context.Context, args interface{}) (ret interface{}, err error) {
typedArgs, ok := args.(*[1]Identify3TrackerTimedOutArg)
if !ok {
err = rpc.NewTypeError((*[1]Identify3TrackerTimedOutArg)(nil), args)
return
}
err = i.Identify3TrackerTimedOut(ctx, typedArgs[0].GuiID)
return
},
},
"identify3Result": {
MakeArg: func() interface{} {
var ret [1]Identify3ResultArg
return &ret
},
Handler: func(ctx context.Context, args interface{}) (ret interface{}, err error) {
typedArgs, ok := args.(*[1]Identify3ResultArg)
if !ok {
err = rpc.NewTypeError((*[1]Identify3ResultArg)(nil), args)
return
}
err = i.Identify3Result(ctx, typedArgs[0])
return
},
},
},
}
}
type Identify3UiClient struct {
Cli rpc.GenericClient
}
func (c Identify3UiClient) Identify3ShowTracker(ctx context.Context, __arg Identify3ShowTrackerArg) (err error) {
err = c.Cli.Call(ctx, "keybase.1.identify3Ui.identify3ShowTracker", []interface{}{__arg}, nil)
return
}
func (c Identify3UiClient) Identify3UpdateRow(ctx context.Context, row Identify3Row) (err error) {
__arg := Identify3UpdateRowArg{Row: row}
err = c.Cli.Notify(ctx, "keybase.1.identify3Ui.identify3UpdateRow", []interface{}{__arg})
return
}
func (c Identify3UiClient) Identify3UserReset(ctx context.Context, guiID Identify3GUIID) (err error) {
__arg := Identify3UserResetArg{GuiID: guiID}
err = c.Cli.Notify(ctx, "keybase.1.identify3Ui.identify3UserReset", []interface{}{__arg})
return
}
func (c Identify3UiClient) Identify3UpdateUserCard(ctx context.Context, __arg Identify3UpdateUserCardArg) (err error) {
err = c.Cli.Notify(ctx, "keybase.1.identify3Ui.identify3UpdateUserCard", []interface{}{__arg})
return
}
func (c Identify3UiClient) Identify3TrackerTimedOut(ctx context.Context, guiID Identify3GUIID) (err error) {
__arg := Identify3TrackerTimedOutArg{GuiID: guiID}
err = c.Cli.Notify(ctx, "keybase.1.identify3Ui.identify3TrackerTimedOut", []interface{}{__arg})
return
}
func (c Identify3UiClient) Identify3Result(ctx context.Context, __arg Identify3ResultArg) (err error) {
err = c.Cli.Notify(ctx, "keybase.1.identify3Ui.identify3Result", []interface{}{__arg})
return
}
|
Identify3UiProtocol
|
data-transfer-object.ts
|
import type {ApiCurrency, ApiBot, ApiGetManyDto, ApiTransaction} from '../types/api';
import type {Currency, Bot} from '../types/discoin';
/**
* Convert an API currency to a regular currency.
* @param currency - API currency to convert
* @returns The parsed currency
* @internal
*/
export function apiCurrencyToCurrency(currency: ApiCurrency): Currency {
const {reserve, wid, ...rest} = currency;
return {...rest, reserve: Number(reserve), wid: Number(wid)};
}
/**
* Convert an API bot to a regular bot.
* Wrapper around `apiCurrencyToCurrency()`.
* @param bot - The API bot to convert
* @internal
*/
export function
|
(bot: ApiBot): Bot {
const {currencies, ...rest} = bot;
return {...rest, currencies: currencies.map(currency => apiCurrencyToCurrency(currency))};
}
type GetManyResponse = ApiBot[] | ApiCurrency[] | ApiTransaction[];
/**
* Check if a `getMany` response from the API is a DTO.
* @param getManyResponse - The `getMany` response to check
* @returns Boolean of whether or not the provided response is a DTO
* @internal
*/
export function getManyResponseIsDto<T>(getManyResponse: GetManyResponse | ApiGetManyDto<T>): getManyResponse is ApiGetManyDto<T> {
return !Array.isArray(getManyResponse) && Object.prototype.hasOwnProperty.call(getManyResponse, 'data');
}
/**
* Check if a currency object (`APICurrency` or `Currency`) is an `APICurrency`.
* @param currency - Currency object to check
* @returns Boolean of whether or not the provided currency is an API currency.
* @internal
*/
export function currencyIsApiCurrency(currency: ApiCurrency | Currency): currency is ApiCurrency {
return typeof currency.reserve === 'string' && typeof currency.value === 'string';
}
|
apiBotToBot
|
doc.go
|
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package runtime includes helper functions for working with API objects
|
// 1. Your code refers to an internal set of API objects.
//
// 2. In a separate package, you have an external set of API objects.
//
// 3. The external set is considered to be versioned, and no breaking
// changes are ever made to it (fields may be added but not changed
// or removed).
//
// 4. As your api evolves, you'll make an additional versioned package
// with every major change.
//
// 5. Versioned packages have conversion functions which convert to
// and from the internal version.
//
// 6. You'll continue to support older versions according to your
// deprecation policy, and you can easily provide a program/library
// to update old versions into new versions because of 5.
//
// 7. All of your serializations and deserializations are handled in a
// centralized place.
//
// Package runtime provides a conversion helper to make 5 easy, and the
// Encode/Decode/DecodeInto trio to accomplish 7. You can also register
// additional "codecs" which use a version of your choice. It's
// recommended that you register your types with runtime in your
// package's init function.
//
// As a bonus, a few common types useful from all api objects and versions
// are provided in types.go.
package runtime // import "k8s.io/apimachinery/pkg/runtime"
|
// that follow the kubernetes API object conventions, which are:
//
// 0. Your API objects have a common metadata struct member, TypeMeta.
//
|
ping-pong.py
|
from pygame import *
from random import randint, uniform
from time import time as timer
class GameSprite(sprite.Sprite):
def __init__(self, player_image, player_x, player_y, w, h, player_speed):
super().__init__()
self.image = transform.scale(image.load(player_image), (w, h))
self.speed = player_speed
self.rect = self.image.get_rect()
self.rect.x = player_x
self.rect.y = player_y
self.killed = 0
def reset(self):
window.blit(self.image, (self.rect.x, self.rect.y))
class Player(GameSprite):
def update_right(self):
keys_pressed = key.get_pressed()
if keys_pressed[K_w] and self.rect.y > 0: self.rect.y -= self.speed
if keys_pressed[K_s] and self.rect.y < 400: self.rect.y += self.speed
def update_left(self):
keys_pressed = key.get_pressed()
if keys_pressed[K_UP] and self.rect.y > 0: self.rect.y -= self.speed
if keys_pressed[K_DOWN] and self.rect.y < 400: self.rect.y += self.speed
killed = 0
lost = 0
lives = 3
clock = time.Clock()
FPS = 60
window = display.set_mode((700, 500))
display.set_caption("ping-pong")
background = transform.scale(image.load("galaxy.jpg"), (700, 500))
player = Player('racket.png',10, 100, 75, 100, 15)
player2 = Player('racket.png',620, 100, 75, 100, 15)
ball = GameSprite('tenis_ball.png', 350 , 250, 40, 40, 2)
speed_y = 3
speed_x = 3
font.init()
font1 = font.SysFont('Arial', 36)
p1lose_text = font1.render('проиграл левый', 1, (255,255,255))
p2lose_text = font1.render('проиграл правый', 1, (255,255,255))
game = True
finish = False
while game:
for e in event.get():
if e.type == QUIT: game = False
if finish != True:
window.fill((0,255,0))
player.reset()
player.update_right()
player2.reset()
player2.update_left()
ball.reset()
ball.rect.x += speed_x
ball.rect.y += speed_y
if ball.rect.x <= 0 or ball.rect.x >= 700:
finish = True
window.blit(p1lose_text, (250,400))
if ball.rect.y <= 0 :
speed_y *= -1
if ball.rect.y > 460 :
speed_y *= -1
if
|
de_rect(ball, player2):
speed_x *= -1
if sprite.collide_rect(ball, player):
speed_x *= -1
clock.tick(FPS)
display.update()
|
sprite.colli
|
dfa.py
|
from enum import unique, Enum
class DFA:
def __init__(self, source_data):
if type(source_data) != dict:
raise TypeError('第 1 个参数期望 {arg_type_expect} 类型,却接收到类型 {arg_type} '.format(
arg_type_expect='dict', arg_type=str(type(source_data))
))
if type(source_data.get('type')) != Token:
raise TypeError('第 1 个参数的 "type" 字段期望 {arg_type_expect} 类型,却接收到类型 {arg_type} '.format(
arg_type_expect='Token', arg_type=str(type(source_data.get('type')))
))
self.token_type = source_data.get('type')
if type(source_data.get('as_set')) != set:
raise TypeError('第 1 个参数的 "as_set" 字段期望 {arg_type_expect} 类型,却接收到类型 {arg_type} '.format(
arg_type_expect='set', arg_type=str(type(source_data.get('as_set')))
))
self.as_set = source_data.get('as_set')
if type(source_data.get('stm')) != dict:
raise TypeError('第 1 个参数的 "stm" 字段期望 {arg_type_expect} 类型,却接收到类型 {arg_type} '.format(
arg_type_expect='dict', arg_type=str(type(source_data.get('stm')))
))
self.stm = source_data.get('stm')
self.state = 0
# 清除状态(回到初态)
def clear(self):
self.state = 0
# 状态转移函数
# 返回 bool 类型,转移成功返回 True,否则返回 False
def move(self, ch):
# 条件跳转
if self.stm.get(ch) is not None:
if self.stm.get(ch)[self.state] is not None:
self.state = self.stm.get(ch)[self.state]
else:
return False
# 特殊字符集跳转
elif self.stm.get(SpecificCharSet.BLANK) is not None \
and ch in SpecificCharSet.CHARSET_MAP.get(SpecificCharSet.BLANK):
if self.stm.get(SpecificCharSet.BLANK)[self.state] is not None:
self.state = self.stm.get(SpecificCharSet.BLANK)[self.state]
else:
return False
elif self.stm.get(SpecificCharSet.NONZERO_DIGIT) is not None \
and ch in SpecificCharSet.CHARSET_MAP.get(SpecificCharSet.NONZERO_DIGIT):
if self.stm.get(SpecificCharSet.NONZERO_DIGIT)[self.state] is not None:
self.state = self.stm.get(SpecificCharSet.NONZERO_DIGIT)[self.state]
else:
return False
elif self.stm.get(SpecificCharSet.DIGIT) is not None \
and ch in SpecificCharSet.CHARSET_MAP.get(SpecificCharSet.DIGIT):
if self.stm.get(SpecificCharSet.DIGIT)[self.state] is not None:
self.state = self.stm.get(SpecificCharSet.DIGIT)[self.state]
else:
return False
# 任意跳转
elif self.stm.get(SpecificCharSet.ANY) is not None:
if self.stm.get(SpecificCharSet.ANY)[self.state] is not None:
self.state = self.stm.get(SpecificCharSet.ANY)[self.state]
else:
return False
# 非接受字符集
else:
return False
return True
# 判断是否处于接受状态
def is_access(self):
return self.state in self.as_set
@unique
class Token(Enum):
# 保留字
ORIGIN = 1
SCALE = 2
ROT = 3
IS = 4
TO = 5
STEP = 6
DRAW = 7
FOR = 8
FROM = 9
COLOR = 10
BACKGROUND = 11
# 分隔符
SEMICOLON = 21 # 分号
L_BRACKET = 22 # 左括号
R_BRACKET = 23 # 右括号
COMMA = 24 # 逗号
# 运算符
PLUS = 35 # 加号
MINUS = 36 # 减号
MUL = 37 # 乘号
DIV = 38 # 除号
POWER = 39 # 乘方号
# 其他
FUNC = 51 # 函数
NUM = 52 # 数值字面量
CONST_ID = 53 # 常量
T = 54 # 参数
COMMENT = 61 # 注释
NON_TOKEN = 62 # 空记号(源程序结束)
ERR_TOKEN = 63 # 错误记号
class SpecificCharSet(object):
NONZERO_DIGIT = 'NONZERO_DIGIT'
DIGIT = 'DIGIT'
BLANK = 'BLANK'
ANY = 'ANY'
CHARSET_MAP = {
'NONZERO_DIGIT': {'1', '2', '3', '4', '5', '6', '7', '8', '9'},
'DIGIT': {'0', '1', '2', '3', '4', '5', '6', '7', '8', '9'},
'BLANK': {'\n', ' '}
}
# 识别各种记号的 DFA
# type 是 DFA 识别的记号的类型
# as_set 是 DFA 的接受状态集,access s
|
是 DFA 状态转移矩阵,state transition matrix,状态 0 为起始状态
DFA_DATA = (
# 保留字
{
'type': Token.ORIGIN,
'as_set': {7, },
'stm': {
'o': (1, None, None, None, None, None, None, None),
'r': (None, 2, None, None, None, None, None, None),
'i': (None, None, 3, None, 5, None, None, None),
'g': (None, None, None, 4, None, None, None, None),
'n': (None, None, None, None, None, 6, None, None),
SpecificCharSet.BLANK: (None, None, None, None, None, None, 7, None)
}
}, {
'type': Token.SCALE,
'as_set': {6, },
'stm': {
's': (1, None, None, None, None, None, None),
'c': (None, 2, None, None, None, None, None),
'a': (None, None, 3, None, None, None, None),
'l': (None, None, None, 4, None, None, None),
'e': (None, None, None, None, 5, None, None),
SpecificCharSet.BLANK: (None, None, None, None, None, 6, None)
}
}, {
'type': Token.ROT,
'as_set': {4, },
'stm': {
'r': (1, None, None, None, None),
'o': (None, 2, None, None, None),
't': (None, None, 3, None, None),
SpecificCharSet.BLANK: (None, None, None, 4, None)
}
}, {
'type': Token.IS,
'as_set': {3, },
'stm': {
'i': (1, None, None, None),
's': (None, 2, None, None),
SpecificCharSet.BLANK: (None, None, 3, None)
}
}, {
'type': Token.TO,
'as_set': {3, },
'stm': {
't': (1, None, None, None),
'o': (None, 2, None, None),
SpecificCharSet.BLANK: (None, None, 3, None)
}
}, {
'type': Token.STEP,
'as_set': {5, },
'stm': {
's': (1, None, None, None, None, None),
't': (None, 2, None, None, None, None),
'e': (None, None, 3, None, None, None),
'p': (None, None, None, 4, None, None),
SpecificCharSet.BLANK: (None, None, None, None, 5, None),
}
}, {
'type': Token.DRAW,
'as_set': {5, },
'stm': {
'd': (1, None, None, None, None, None),
'r': (None, 2, None, None, None, None),
'a': (None, None, 3, None, None, None),
'w': (None, None, None, 4, None, None),
SpecificCharSet.BLANK: (None, None, None, None, 5, None),
}
}, {
'type': Token.FOR,
'as_set': {4, },
'stm': {
'f': (1, None, None, None, None),
'o': (None, 2, None, None, None),
'r': (None, None, 3, None, None),
SpecificCharSet.BLANK: (None, None, None, 4, None)
}
}, {
'type': Token.FROM,
'as_set': {5, },
'stm': {
'f': (1, None, None, None, None, None),
'r': (None, 2, None, None, None, None),
'o': (None, None, 3, None, None, None),
'm': (None, None, None, 4, None, None),
SpecificCharSet.BLANK: (None, None, None, None, 5, None)
}
}, {
'type': Token.COLOR,
'as_set': {6, },
'stm': {
'c': (1, None, None, None, None, None, None),
'o': (None, 2, None, 4, None, None, None),
'l': (None, None, 3, None, None, None, None),
'r': (None, None, None, None, 5, None, None),
SpecificCharSet.BLANK: (None, None, None, None, None, 6, None)
}
}, {
'type': Token.BACKGROUND,
'as_set': {11, },
'stm': {
'b': (1, None, None, None, None, None, None, None, None, None, None, None),
'a': (None, 2, None, None, None, None, None, None, None, None, None, None),
'c': (None, None, 3, None, None, None, None, None, None, None, None, None),
'k': (None, None, None, 4, None, None, None, None, None, None, None, None),
'g': (None, None, None, None, 5, None, None, None, None, None, None, None),
'r': (None, None, None, None, None, 6, None, None, None, None, None, None),
'o': (None, None, None, None, None, None, 7, None, None, None, None, None),
'u': (None, None, None, None, None, None, None, 8, None, None, None, None),
'n': (None, None, None, None, None, None, None, None, 9, None, None, None),
'd': (None, None, None, None, None, None, None, None, None, 10, None, None),
SpecificCharSet.BLANK: (None, None, None, None, None, None, None, None, None, None, 11, None)
}
},
# 分隔符
{
'type': Token.SEMICOLON,
'as_set': {1, },
'stm': {
';': (1, None)
}
}, {
'type': Token.L_BRACKET,
'as_set': {1, },
'stm': {
'(': (1, None)
}
}, {
'type': Token.R_BRACKET,
'as_set': {1, },
'stm': {
')': (1, None)
}
}, {
'type': Token.COMMA,
'as_set': {1, },
'stm': {
',': (1, None)
}
},
# 运算符
{
'type': Token.PLUS,
'as_set': {1, },
'stm': {
'+': (1, None)
}
}, {
'type': Token.MINUS,
'as_set': {1, },
'stm': {
'-': (1, None)
}
}, {
'type': Token.MUL,
'as_set': {1, },
'stm': {
'*': (1, None)
}
}, {
'type': Token.DIV,
'as_set': {1, },
'stm': {
'/': (1, None)
}
}, {
'type': Token.POWER,
'as_set': {1, },
'stm': {
'^': (1, None)
}
},
# 其他
{
'type': Token.FUNC,
'as_set': {10, },
'stm': {
'a': (None, 6, None, None, None, None, None, None, None, None, None),
'c': (3, None, None, None, None, None, None, None, None, None, None),
'e': (4, None, None, None, None, None, None, None, None, None, None),
'i': (None, None, 6, None, None, None, None, None, None, None, None),
'l': (6, None, None, None, None, None, None, None, None, None, None),
'n': (None, None, None, None, None, None, 10, None, None, None, None),
'o': (None, None, None, 8, None, None, None, None, None, None, None),
'p': (None, None, None, None, None, None, None, None, None, 10, None),
'q': (None, None, 5, None, None, None, None, None, None, None, None),
'r': (None, None, None, None, None, 7, None, None, None, None, None),
's': (2, None, None, None, None, None, None, None, 10, None, None),
't': (1, None, None, None, None, None, None, 10, None, None, None),
'x': (None, None, None, None, 9, None, None, None, None, None, None)
}
}, {
'type': Token.NUM,
'as_set': {2, 3, 4},
'stm': {
SpecificCharSet.NONZERO_DIGIT: (3, 4, None, 3, 4),
'0': (2, 4, None, 3, 4),
'.': (1, None, 4, 4, None)
}
}, {
'type': Token.CONST_ID,
'as_set': {2, },
'stm': {
'e': (2, None, None),
'p': (1, None, None),
'i': (None, 2, None),
}
}, {
'type': Token.T,
'as_set': {1, },
'stm': {
't': (1, None)
}
}, {
'type': Token.COMMENT,
'as_set': {3, },
'stm': {
SpecificCharSet.ANY: (None, None, 2, None),
'/': (1, 2, 2, None),
'\n': (None, None, 3, None),
}
}, {
'type': Token.ERR_TOKEN,
'as_set': {0, 1},
'stm': {
SpecificCharSet.ANY: (1, 1),
SpecificCharSet.BLANK: (None, None)
}
}
)
|
tate set
# stm
|
rwlock.py
|
# -*- coding: utf-8 -*-
""" rwlock.py
A class to implement read-write locks on top of the standard threading
library.
This is implemented with two mutexes (threading.Lock instances) as per this
wikipedia pseudocode:
https://en.wikipedia.org/wiki/Readers%E2%80%93writer_lock#Using_two_mutexes
Code written by Tyler Neylon at Unbox Research.
This file is public domain.
Modified to add a w_demote function to convert a writer lock to a reader lock
"""
# _______________________________________________________________________
# Imports
from contextlib import contextmanager
from threading import Lock
# _______________________________________________________________________
# Class
class RWLock(object):
""" RWLock class; this is meant to allow an object to be read from by
multiple threads, but only written to by a single thread at a time. See:
https://en.wikipedia.org/wiki/Readers%E2%80%93writer_lock
Usage:
from rwlock import RWLock
my_obj_rwlock = RWLock()
# When reading from my_obj:
with my_obj_rwlock.r_locked():
do_read_only_things_with(my_obj)
# When writing to my_obj:
with my_obj_rwlock.w_locked():
mutate(my_obj)
"""
def __init__(self):
self.w_lock = Lock()
self.num_r_lock = Lock()
self.num_r = 0
# The d_lock is needed to handle the demotion case,
# so that the writer can become a reader without releasing the w_lock.
# the d_lock is held by the writer, and prevents any other thread from taking the
# num_r_lock during that time, which means the writer thread is able to take the
# num_r_lock to update the num_r.
self.d_lock = Lock()
# ___________________________________________________________________
# Reading methods.
|
self.num_r_lock.acquire()
self.num_r += 1
if self.num_r == 1:
self.w_lock.acquire()
self.num_r_lock.release()
self.d_lock.release()
def r_release(self):
assert self.num_r > 0
self.num_r_lock.acquire()
self.num_r -= 1
if self.num_r == 0:
self.w_lock.release()
self.num_r_lock.release()
@contextmanager
def r_locked(self):
""" This method is designed to be used via the `with` statement. """
try:
self.r_acquire()
yield
finally:
self.r_release()
# ___________________________________________________________________
# Writing methods.
def w_acquire(self):
self.d_lock.acquire()
self.w_lock.acquire()
def w_acquire_non_blocking(self):
# if d_lock and w_lock can be acquired without blocking, acquire and return True,
# else immediately return False.
if self.d_lock.acquire(blocking=False):
if self.w_lock.acquire(blocking=False):
return True
else:
self.d_lock.release()
return False
def w_release(self):
self.w_lock.release()
self.d_lock.release()
def w_demote(self):
"""demote a writer lock to a reader lock"""
# the d_lock is already held from w_acquire.
# releasing the d_lock at the end of this function allows multiple readers.
# incrementing num_r makes this thread one of those readers.
self.num_r_lock.acquire()
self.num_r += 1
self.num_r_lock.release()
self.d_lock.release()
@contextmanager
def w_locked(self):
""" This method is designed to be used via the `with` statement. """
try:
self.w_acquire()
yield
finally:
self.w_release()
|
def r_acquire(self):
self.d_lock.acquire()
|
server.py
|
# Copyright 2014-2021 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import itertools
import logging
import os.path
import re
import urllib.parse
from textwrap import indent
from typing import Any, Dict, Iterable, List, Optional, Set, Tuple, Union
import attr
import yaml
from netaddr import AddrFormatError, IPNetwork, IPSet
from twisted.conch.ssh.keys import Key
from synapse.api.room_versions import KNOWN_ROOM_VERSIONS
from synapse.types import JsonDict
from synapse.util.module_loader import load_module
from synapse.util.stringutils import parse_and_validate_server_name
from ._base import Config, ConfigError
from ._util import validate_config
logger = logging.Logger(__name__)
# by default, we attempt to listen on both '::' *and* '0.0.0.0' because some OSes
# (Windows, macOS, other BSD/Linux where net.ipv6.bindv6only is set) will only listen
# on IPv6 when '::' is set.
#
# We later check for errors when binding to 0.0.0.0 and ignore them if :: is also in
# in the list.
DEFAULT_BIND_ADDRESSES = ["::", "0.0.0.0"]
def _6to4(network: IPNetwork) -> IPNetwork:
"""Convert an IPv4 network into a 6to4 IPv6 network per RFC 3056."""
# 6to4 networks consist of:
# * 2002 as the first 16 bits
# * The first IPv4 address in the network hex-encoded as the next 32 bits
# * The new prefix length needs to include the bits from the 2002 prefix.
hex_network = hex(network.first)[2:]
hex_network = ("0" * (8 - len(hex_network))) + hex_network
return IPNetwork(
"2002:%s:%s::/%d"
% (
hex_network[:4],
hex_network[4:],
16 + network.prefixlen,
)
)
def generate_ip_set(
ip_addresses: Optional[Iterable[str]],
extra_addresses: Optional[Iterable[str]] = None,
config_path: Optional[Iterable[str]] = None,
) -> IPSet:
"""
Generate an IPSet from a list of IP addresses or CIDRs.
Additionally, for each IPv4 network in the list of IP addresses, also
includes the corresponding IPv6 networks.
This includes:
* IPv4-Compatible IPv6 Address (see RFC 4291, section 2.5.5.1)
* IPv4-Mapped IPv6 Address (see RFC 4291, section 2.5.5.2)
* 6to4 Address (see RFC 3056, section 2)
Args:
ip_addresses: An iterable of IP addresses or CIDRs.
extra_addresses: An iterable of IP addresses or CIDRs.
config_path: The path in the configuration for error messages.
Returns:
A new IP set.
"""
result = IPSet()
for ip in itertools.chain(ip_addresses or (), extra_addresses or ()):
try:
network = IPNetwork(ip)
except AddrFormatError as e:
raise ConfigError(
"Invalid IP range provided: %s." % (ip,), config_path
) from e
result.add(network)
# It is possible that these already exist in the set, but that's OK.
if ":" not in str(network):
result.add(IPNetwork(network).ipv6(ipv4_compatible=True))
result.add(IPNetwork(network).ipv6(ipv4_compatible=False))
result.add(_6to4(network))
return result
# IP ranges that are considered private / unroutable / don't make sense.
DEFAULT_IP_RANGE_BLACKLIST = [
# Localhost
"127.0.0.0/8",
# Private networks.
"10.0.0.0/8",
"172.16.0.0/12",
"192.168.0.0/16",
# Carrier grade NAT.
"100.64.0.0/10",
# Address registry.
"192.0.0.0/24",
# Link-local networks.
"169.254.0.0/16",
# Formerly used for 6to4 relay.
"192.88.99.0/24",
# Testing networks.
"198.18.0.0/15",
"192.0.2.0/24",
"198.51.100.0/24",
"203.0.113.0/24",
# Multicast.
"224.0.0.0/4",
# Localhost
"::1/128",
# Link-local addresses.
"fe80::/10",
# Unique local addresses.
"fc00::/7",
# Testing networks.
"2001:db8::/32",
# Multicast.
"ff00::/8",
# Site-local addresses
"fec0::/10",
]
DEFAULT_ROOM_VERSION = "9"
ROOM_COMPLEXITY_TOO_GREAT = (
"Your homeserver is unable to join rooms this large or complex. "
"Please speak to your server administrator, or upgrade your instance "
"to join this room."
)
METRICS_PORT_WARNING = """\
The metrics_port configuration option is deprecated in Synapse 0.31 in favour of
a listener. Please see
https://matrix-org.github.io/synapse/latest/metrics-howto.html
on how to configure the new listener.
--------------------------------------------------------------------------------"""
KNOWN_LISTENER_TYPES = {
"http",
"metrics",
"manhole",
"replication",
}
KNOWN_RESOURCES = {
"client",
"consent",
"federation",
"keys",
"media",
"metrics",
"openid",
"replication",
"static",
}
@attr.s(frozen=True)
class HttpResourceConfig:
names: List[str] = attr.ib(
factory=list,
validator=attr.validators.deep_iterable(attr.validators.in_(KNOWN_RESOURCES)),
)
compress: bool = attr.ib(
default=False,
validator=attr.validators.optional(attr.validators.instance_of(bool)), # type: ignore[arg-type]
)
@attr.s(slots=True, frozen=True, auto_attribs=True)
class HttpListenerConfig:
"""Object describing the http-specific parts of the config of a listener"""
x_forwarded: bool = False
resources: List[HttpResourceConfig] = attr.Factory(list)
additional_resources: Dict[str, dict] = attr.Factory(dict)
tag: Optional[str] = None
@attr.s(slots=True, frozen=True, auto_attribs=True)
class ListenerConfig:
"""Object describing the configuration of a single listener."""
port: int = attr.ib(validator=attr.validators.instance_of(int))
bind_addresses: List[str]
type: str = attr.ib(validator=attr.validators.in_(KNOWN_LISTENER_TYPES))
tls: bool = False
# http_options is only populated if type=http
http_options: Optional[HttpListenerConfig] = None
@attr.s(slots=True, frozen=True, auto_attribs=True)
class ManholeConfig:
"""Object describing the configuration of the manhole"""
username: str = attr.ib(validator=attr.validators.instance_of(str))
password: str = attr.ib(validator=attr.validators.instance_of(str))
priv_key: Optional[Key]
pub_key: Optional[Key]
@attr.s(frozen=True)
class LimitRemoteRoomsConfig:
enabled: bool = attr.ib(validator=attr.validators.instance_of(bool), default=False)
complexity: Union[float, int] = attr.ib(
validator=attr.validators.instance_of((float, int)), # noqa
default=1.0,
)
complexity_error: str = attr.ib(
validator=attr.validators.instance_of(str),
default=ROOM_COMPLEXITY_TOO_GREAT,
)
admins_can_join: bool = attr.ib(
validator=attr.validators.instance_of(bool), default=False
)
class ServerConfig(Config):
section = "server"
def read_config(self, config: JsonDict, **kwargs: Any) -> None:
self.server_name = config["server_name"]
self.server_context = config.get("server_context", None)
try:
parse_and_validate_server_name(self.server_name)
except ValueError as e:
raise ConfigError(str(e))
self.pid_file = self.abspath(config.get("pid_file"))
self.soft_file_limit = config.get("soft_file_limit", 0)
self.daemonize = bool(config.get("daemonize"))
self.print_pidfile = bool(config.get("print_pidfile"))
self.user_agent_suffix = config.get("user_agent_suffix")
self.use_frozen_dicts = config.get("use_frozen_dicts", False)
self.serve_server_wellknown = config.get("serve_server_wellknown", False)
# Whether we should serve a "client well-known":
# (a) at .well-known/matrix/client on our client HTTP listener
# (b) in the response to /login
#
# ... which together help ensure that clients use our public_baseurl instead of
# whatever they were told by the user.
#
# For the sake of backwards compatibility with existing installations, this is
# True if public_baseurl is specified explicitly, and otherwise False. (The
# reasoning here is that we have no way of knowing that the default
# public_baseurl is actually correct for existing installations - many things
# will not work correctly, but that's (probably?) better than sending clients
# to a completely broken URL.
self.serve_client_wellknown = False
public_baseurl = config.get("public_baseurl")
if public_baseurl is None:
public_baseurl = f"https://{self.server_name}/"
logger.info("Using default public_baseurl %s", public_baseurl)
else:
self.serve_client_wellknown = True
if public_baseurl[-1] != "/":
public_baseurl += "/"
self.public_baseurl = public_baseurl
# check that public_baseurl is valid
try:
splits = urllib.parse.urlsplit(self.public_baseurl)
except Exception as e:
raise ConfigError(f"Unable to parse URL: {e}", ("public_baseurl",))
if splits.scheme not in ("https", "http"):
raise ConfigError(
f"Invalid scheme '{splits.scheme}': only https and http are supported"
)
if splits.query or splits.fragment:
raise ConfigError(
"public_baseurl cannot contain query parameters or a #-fragment"
)
# Whether to enable user presence.
presence_config = config.get("presence") or {}
self.use_presence = presence_config.get("enabled")
if self.use_presence is None:
self.use_presence = config.get("use_presence", True)
# Custom presence router module
# This is the legacy way of configuring it (the config should now be put in the modules section)
self.presence_router_module_class = None
self.presence_router_config = None
presence_router_config = presence_config.get("presence_router")
if presence_router_config:
(
self.presence_router_module_class,
self.presence_router_config,
) = load_module(presence_router_config, ("presence", "presence_router"))
# whether to enable the media repository endpoints. This should be set
# to false if the media repository is running as a separate endpoint;
# doing so ensures that we will not run cache cleanup jobs on the
# master, potentially causing inconsistency.
self.enable_media_repo = config.get("enable_media_repo", True)
# Whether to require authentication to retrieve profile data (avatars,
# display names) of other users through the client API.
self.require_auth_for_profile_requests = config.get(
"require_auth_for_profile_requests", False
)
# Whether to require sharing a room with a user to retrieve their
# profile data
self.limit_profile_requests_to_users_who_share_rooms = config.get(
"limit_profile_requests_to_users_who_share_rooms",
False,
)
# Whether to retrieve and display profile data for a user when they
# are invited to a room
self.include_profile_data_on_invite = config.get(
"include_profile_data_on_invite", True
)
if "restrict_public_rooms_to_local_users" in config and (
"allow_public_rooms_without_auth" in config
or "allow_public_rooms_over_federation" in config
):
raise ConfigError(
"Can't use 'restrict_public_rooms_to_local_users' if"
" 'allow_public_rooms_without_auth' and/or"
" 'allow_public_rooms_over_federation' is set."
)
# Check if the legacy "restrict_public_rooms_to_local_users" flag is set. This
# flag is now obsolete but we need to check it for backward-compatibility.
if config.get("restrict_public_rooms_to_local_users", False):
self.allow_public_rooms_without_auth = False
self.allow_public_rooms_over_federation = False
else:
# If set to 'true', removes the need for authentication to access the server's
# public rooms directory through the client API, meaning that anyone can
# query the room directory. Defaults to 'false'.
self.allow_public_rooms_without_auth = config.get(
"allow_public_rooms_without_auth", False
)
# If set to 'true', allows any other homeserver to fetch the server's public
# rooms directory via federation. Defaults to 'false'.
self.allow_public_rooms_over_federation = config.get(
"allow_public_rooms_over_federation", False
)
default_room_version = config.get("default_room_version", DEFAULT_ROOM_VERSION)
# Ensure room version is a str
default_room_version = str(default_room_version)
if default_room_version not in KNOWN_ROOM_VERSIONS:
raise ConfigError(
"Unknown default_room_version: %s, known room versions: %s"
% (default_room_version, list(KNOWN_ROOM_VERSIONS.keys()))
)
# Get the actual room version object rather than just the identifier
self.default_room_version = KNOWN_ROOM_VERSIONS[default_room_version]
# whether to enable search. If disabled, new entries will not be inserted
# into the search tables and they will not be indexed. Users will receive
# errors when attempting to search for messages.
self.enable_search = config.get("enable_search", True)
self.filter_timeline_limit = config.get("filter_timeline_limit", 100)
# Whether we should block invites sent to users on this server
# (other than those sent by local server admins)
self.block_non_admin_invites = config.get("block_non_admin_invites", False)
# Options to control access by tracking MAU
self.limit_usage_by_mau = config.get("limit_usage_by_mau", False)
self.max_mau_value = 0
if self.limit_usage_by_mau:
self.max_mau_value = config.get("max_mau_value", 0)
self.mau_stats_only = config.get("mau_stats_only", False)
self.mau_limits_reserved_threepids = config.get(
"mau_limit_reserved_threepids", []
)
self.mau_trial_days = config.get("mau_trial_days", 0)
self.mau_appservice_trial_days = config.get("mau_appservice_trial_days", {})
self.mau_limit_alerting = config.get("mau_limit_alerting", True)
# How long to keep redacted events in the database in unredacted form
# before redacting them.
redaction_retention_period = config.get("redaction_retention_period", "7d")
if redaction_retention_period is not None:
self.redaction_retention_period: Optional[int] = self.parse_duration(
redaction_retention_period
)
else:
self.redaction_retention_period = None
# How long to keep entries in the `users_ips` table.
user_ips_max_age = config.get("user_ips_max_age", "28d")
if user_ips_max_age is not None:
self.user_ips_max_age: Optional[int] = self.parse_duration(user_ips_max_age)
else:
self.user_ips_max_age = None
# Options to disable HS
self.hs_disabled = config.get("hs_disabled", False)
self.hs_disabled_message = config.get("hs_disabled_message", "")
# Admin uri to direct users at should their instance become blocked
# due to resource constraints
self.admin_contact = config.get("admin_contact", None)
ip_range_blacklist = config.get(
"ip_range_blacklist", DEFAULT_IP_RANGE_BLACKLIST
)
# Attempt to create an IPSet from the given ranges
# Always blacklist 0.0.0.0, ::
self.ip_range_blacklist = generate_ip_set(
ip_range_blacklist, ["0.0.0.0", "::"], config_path=("ip_range_blacklist",)
)
self.ip_range_whitelist = generate_ip_set(
config.get("ip_range_whitelist", ()), config_path=("ip_range_whitelist",)
)
# The federation_ip_range_blacklist is used for backwards-compatibility
# and only applies to federation and identity servers.
if "federation_ip_range_blacklist" in config:
# Always blacklist 0.0.0.0, ::
self.federation_ip_range_blacklist = generate_ip_set(
config["federation_ip_range_blacklist"],
["0.0.0.0", "::"],
config_path=("federation_ip_range_blacklist",),
)
# 'federation_ip_range_whitelist' was never a supported configuration option.
self.federation_ip_range_whitelist = None
else:
# No backwards-compatiblity requrired, as federation_ip_range_blacklist
# is not given. Default to ip_range_blacklist and ip_range_whitelist.
self.federation_ip_range_blacklist = self.ip_range_blacklist
self.federation_ip_range_whitelist = self.ip_range_whitelist
# (undocumented) option for torturing the worker-mode replication a bit,
# for testing. The value defines the number of milliseconds to pause before
# sending out any replication updates.
self.replication_torture_level = config.get("replication_torture_level")
# Whether to require a user to be in the room to add an alias to it.
# Defaults to True.
self.require_membership_for_aliases = config.get(
"require_membership_for_aliases", True
)
# Whether to allow per-room membership profiles through the send of membership
# events with profile information that differ from the target's global profile.
self.allow_per_room_profiles = config.get("allow_per_room_profiles", True)
# The maximum size an avatar can have, in bytes.
self.max_avatar_size = config.get("max_avatar_size")
if self.max_avatar_size is not None:
self.max_avatar_size = self.parse_size(self.max_avatar_size)
# The MIME types allowed for an avatar.
self.allowed_avatar_mimetypes = config.get("allowed_avatar_mimetypes")
if self.allowed_avatar_mimetypes and not isinstance(
self.allowed_avatar_mimetypes,
list,
):
raise ConfigError("allowed_avatar_mimetypes must be a list")
self.listeners = [parse_listener_def(x) for x in config.get("listeners", [])]
# no_tls is not really supported any more, but let's grandfather it in
# here.
if config.get("no_tls", False):
l2 = []
for listener in self.listeners:
if listener.tls:
logger.info(
"Ignoring TLS-enabled listener on port %i due to no_tls",
listener.port,
)
else:
l2.append(listener)
self.listeners = l2
self.web_client_location = config.get("web_client_location", None)
# Non-HTTP(S) web client location is not supported.
if self.web_client_location and not (
self.web_client_location.startswith("http://")
or self.web_client_location.startswith("https://")
):
raise ConfigError("web_client_location must point to a HTTP(S) URL.")
self.gc_thresholds = read_gc_thresholds(config.get("gc_thresholds", None))
self.gc_seconds = self.read_gc_intervals(config.get("gc_min_interval", None))
self.limit_remote_rooms = LimitRemoteRoomsConfig(
**(config.get("limit_remote_rooms") or {})
)
bind_port = config.get("bind_port")
if bind_port:
if config.get("no_tls", False):
raise ConfigError("no_tls is incompatible with bind_port")
self.listeners = []
bind_host = config.get("bind_host", "")
gzip_responses = config.get("gzip_responses", True)
http_options = HttpListenerConfig(
resources=[
HttpResourceConfig(names=["client"], compress=gzip_responses),
HttpResourceConfig(names=["federation"]),
],
)
self.listeners.append(
ListenerConfig(
port=bind_port,
bind_addresses=[bind_host],
tls=True,
type="http",
http_options=http_options,
)
)
unsecure_port = config.get("unsecure_port", bind_port - 400)
if unsecure_port:
self.listeners.append(
ListenerConfig(
port=unsecure_port,
bind_addresses=[bind_host],
tls=False,
type="http",
http_options=http_options,
)
)
manhole = config.get("manhole")
if manhole:
self.listeners.append(
ListenerConfig(
port=manhole,
bind_addresses=["127.0.0.1"],
type="manhole",
)
)
manhole_settings = config.get("manhole_settings") or {}
validate_config(
_MANHOLE_SETTINGS_SCHEMA, manhole_settings, ("manhole_settings",)
)
manhole_username = manhole_settings.get("username", "matrix")
manhole_password = manhole_settings.get("password", "rabbithole")
manhole_priv_key_path = manhole_settings.get("ssh_priv_key_path")
manhole_pub_key_path = manhole_settings.get("ssh_pub_key_path")
manhole_priv_key = None
if manhole_priv_key_path is not None:
try:
manhole_priv_key = Key.fromFile(manhole_priv_key_path)
except Exception as e:
raise ConfigError(
f"Failed to read manhole private key file {manhole_priv_key_path}"
) from e
manhole_pub_key = None
if manhole_pub_key_path is not None:
try:
manhole_pub_key = Key.fromFile(manhole_pub_key_path)
except Exception as e:
raise ConfigError(
f"Failed to read manhole public key file {manhole_pub_key_path}"
) from e
self.manhole_settings = ManholeConfig(
username=manhole_username,
password=manhole_password,
priv_key=manhole_priv_key,
pub_key=manhole_pub_key,
)
metrics_port = config.get("metrics_port")
if metrics_port:
logger.warning(METRICS_PORT_WARNING)
self.listeners.append(
ListenerConfig(
port=metrics_port,
bind_addresses=[config.get("metrics_bind_host", "127.0.0.1")],
type="http",
http_options=HttpListenerConfig(
resources=[HttpResourceConfig(names=["metrics"])]
),
)
)
self.cleanup_extremities_with_dummy_events = config.get(
"cleanup_extremities_with_dummy_events", True
)
# The number of forward extremities in a room needed to send a dummy event.
self.dummy_events_threshold = config.get("dummy_events_threshold", 10)
self.enable_ephemeral_messages = config.get("enable_ephemeral_messages", False)
# Inhibits the /requestToken endpoints from returning an error that might leak
# information about whether an e-mail address is in use or not on this
# homeserver, and instead return a 200 with a fake sid if this kind of error is
# met, without sending anything.
# This is a compromise between sending an email, which could be a spam vector,
# and letting the client know which email address is bound to an account and
# which one isn't.
self.request_token_inhibit_3pid_errors = config.get(
"request_token_inhibit_3pid_errors",
False,
)
# Whitelist of domain names that given next_link parameters must have
next_link_domain_whitelist: Optional[List[str]] = config.get(
"next_link_domain_whitelist"
)
self.next_link_domain_whitelist: Optional[Set[str]] = None
if next_link_domain_whitelist is not None:
if not isinstance(next_link_domain_whitelist, list):
raise ConfigError("'next_link_domain_whitelist' must be a list")
# Turn the list into a set to improve lookup speed.
self.next_link_domain_whitelist = set(next_link_domain_whitelist)
templates_config = config.get("templates") or {}
if not isinstance(templates_config, dict):
raise ConfigError("The 'templates' section must be a dictionary")
self.custom_template_directory: Optional[str] = templates_config.get(
"custom_template_directory"
)
if self.custom_template_directory is not None and not isinstance(
self.custom_template_directory, str
):
raise ConfigError("'custom_template_directory' must be a string")
self.use_account_validity_in_account_status: bool = (
config.get("use_account_validity_in_account_status") or False
)
self.rooms_to_exclude_from_sync: List[str] = (
config.get("exclude_rooms_from_sync") or []
)
def has_tls_listener(self) -> bool:
return any(listener.tls for listener in self.listeners)
def generate_config_section(
self,
config_dir_path: str,
data_dir_path: str,
server_name: str,
open_private_ports: bool,
listeners: Optional[List[dict]],
**kwargs: Any,
) -> str:
|
def read_arguments(self, args: argparse.Namespace) -> None:
if args.manhole is not None:
self.manhole = args.manhole
if args.daemonize is not None:
self.daemonize = args.daemonize
if args.print_pidfile is not None:
self.print_pidfile = args.print_pidfile
@staticmethod
def add_arguments(parser: argparse.ArgumentParser) -> None:
server_group = parser.add_argument_group("server")
server_group.add_argument(
"-D",
"--daemonize",
action="store_true",
default=None,
help="Daemonize the homeserver",
)
server_group.add_argument(
"--print-pidfile",
action="store_true",
default=None,
help="Print the path to the pidfile just before daemonizing",
)
server_group.add_argument(
"--manhole",
metavar="PORT",
dest="manhole",
type=int,
help="Turn on the twisted telnet manhole service on the given port.",
)
def read_gc_intervals(self, durations: Any) -> Optional[Tuple[float, float, float]]:
"""Reads the three durations for the GC min interval option, returning seconds."""
if durations is None:
return None
try:
if len(durations) != 3:
raise ValueError()
return (
self.parse_duration(durations[0]) / 1000,
self.parse_duration(durations[1]) / 1000,
self.parse_duration(durations[2]) / 1000,
)
except Exception:
raise ConfigError(
"Value of `gc_min_interval` must be a list of three durations if set"
)
def is_threepid_reserved(
reserved_threepids: List[JsonDict], threepid: JsonDict
) -> bool:
"""Check the threepid against the reserved threepid config
Args:
reserved_threepids: List of reserved threepids
threepid: The threepid to test for
Returns:
Is the threepid undertest reserved_user
"""
for tp in reserved_threepids:
if threepid["medium"] == tp["medium"] and threepid["address"] == tp["address"]:
return True
return False
def read_gc_thresholds(
thresholds: Optional[List[Any]],
) -> Optional[Tuple[int, int, int]]:
"""Reads the three integer thresholds for garbage collection. Ensures that
the thresholds are integers if thresholds are supplied.
"""
if thresholds is None:
return None
try:
assert len(thresholds) == 3
return int(thresholds[0]), int(thresholds[1]), int(thresholds[2])
except Exception:
raise ConfigError(
"Value of `gc_threshold` must be a list of three integers if set"
)
def parse_listener_def(listener: Any) -> ListenerConfig:
"""parse a listener config from the config file"""
listener_type = listener["type"]
port = listener.get("port")
if not isinstance(port, int):
raise ConfigError("Listener configuration is lacking a valid 'port' option")
tls = listener.get("tls", False)
bind_addresses = listener.get("bind_addresses", [])
bind_address = listener.get("bind_address")
# if bind_address was specified, add it to the list of addresses
if bind_address:
bind_addresses.append(bind_address)
# if we still have an empty list of addresses, use the default list
if not bind_addresses:
if listener_type == "metrics":
# the metrics listener doesn't support IPv6
bind_addresses.append("0.0.0.0")
else:
bind_addresses.extend(DEFAULT_BIND_ADDRESSES)
http_config = None
if listener_type == "http":
try:
resources = [
HttpResourceConfig(**res) for res in listener.get("resources", [])
]
except ValueError as e:
raise ConfigError("Unknown listener resource") from e
http_config = HttpListenerConfig(
x_forwarded=listener.get("x_forwarded", False),
resources=resources,
additional_resources=listener.get("additional_resources", {}),
tag=listener.get("tag"),
)
return ListenerConfig(port, bind_addresses, listener_type, tls, http_config)
_MANHOLE_SETTINGS_SCHEMA = {
"type": "object",
"properties": {
"username": {"type": "string"},
"password": {"type": "string"},
"ssh_priv_key_path": {"type": "string"},
"ssh_pub_key_path": {"type": "string"},
},
}
|
ip_range_blacklist = "\n".join(
" # - '%s'" % ip for ip in DEFAULT_IP_RANGE_BLACKLIST
)
_, bind_port = parse_and_validate_server_name(server_name)
if bind_port is not None:
unsecure_port = bind_port - 400
else:
bind_port = 8448
unsecure_port = 8008
pid_file = os.path.join(data_dir_path, "homeserver.pid")
# Bring DEFAULT_ROOM_VERSION into the local-scope for use in the
# default config string
default_room_version = DEFAULT_ROOM_VERSION
secure_listeners = []
unsecure_listeners = []
private_addresses = ["::1", "127.0.0.1"]
if listeners:
for listener in listeners:
if listener["tls"]:
secure_listeners.append(listener)
else:
# If we don't want open ports we need to bind the listeners
# to some address other than 0.0.0.0. Here we chose to use
# localhost.
# If the addresses are already bound we won't overwrite them
# however.
if not open_private_ports:
listener.setdefault("bind_addresses", private_addresses)
unsecure_listeners.append(listener)
secure_http_bindings = indent(
yaml.dump(secure_listeners), " " * 10
).lstrip()
unsecure_http_bindings = indent(
yaml.dump(unsecure_listeners), " " * 10
).lstrip()
if not unsecure_listeners:
unsecure_http_bindings = (
"""- port: %(unsecure_port)s
tls: false
type: http
x_forwarded: true"""
% locals()
)
if not open_private_ports:
unsecure_http_bindings += (
"\n bind_addresses: ['::1', '127.0.0.1']"
)
unsecure_http_bindings += """
resources:
- names: [client, federation]
compress: false"""
if listeners:
# comment out this block
unsecure_http_bindings = "#" + re.sub(
"\n {10}",
lambda match: match.group(0) + "#",
unsecure_http_bindings,
)
if not secure_listeners:
secure_http_bindings = (
"""#- port: %(bind_port)s
# type: http
# tls: true
# resources:
# - names: [client, federation]"""
% locals()
)
return (
"""\
## Server ##
# The public-facing domain of the server
#
# The server_name name will appear at the end of usernames and room addresses
# created on this server. For example if the server_name was example.com,
# usernames on this server would be in the format @user:example.com
#
# In most cases you should avoid using a matrix specific subdomain such as
# matrix.example.com or synapse.example.com as the server_name for the same
# reasons you wouldn't use [email protected] as your email address.
# See https://matrix-org.github.io/synapse/latest/delegate.html
# for information on how to host Synapse on a subdomain while preserving
# a clean server_name.
#
# The server_name cannot be changed later so it is important to
# configure this correctly before you start Synapse. It should be all
# lowercase and may contain an explicit port.
# Examples: matrix.org, localhost:8080
#
server_name: "%(server_name)s"
# When running as a daemon, the file to store the pid in
#
pid_file: %(pid_file)s
# The absolute URL to the web client which / will redirect to.
#
#web_client_location: https://riot.example.com/
# The public-facing base URL that clients use to access this Homeserver (not
# including _matrix/...). This is the same URL a user might enter into the
# 'Custom Homeserver URL' field on their client. If you use Synapse with a
# reverse proxy, this should be the URL to reach Synapse via the proxy.
# Otherwise, it should be the URL to reach Synapse's client HTTP listener (see
# 'listeners' below).
#
# Defaults to 'https://<server_name>/'.
#
#public_baseurl: https://example.com/
# Uncomment the following to tell other servers to send federation traffic on
# port 443.
#
# By default, other servers will try to reach our server on port 8448, which can
# be inconvenient in some environments.
#
# Provided 'https://<server_name>/' on port 443 is routed to Synapse, this
# option configures Synapse to serve a file at
# 'https://<server_name>/.well-known/matrix/server'. This will tell other
# servers to send traffic to port 443 instead.
#
# See https://matrix-org.github.io/synapse/latest/delegate.html for more
# information.
#
# Defaults to 'false'.
#
#serve_server_wellknown: true
# Set the soft limit on the number of file descriptors synapse can use
# Zero is used to indicate synapse should set the soft limit to the
# hard limit.
#
#soft_file_limit: 0
# Presence tracking allows users to see the state (e.g online/offline)
# of other local and remote users.
#
presence:
# Uncomment to disable presence tracking on this homeserver. This option
# replaces the previous top-level 'use_presence' option.
#
#enabled: false
# Whether to require authentication to retrieve profile data (avatars,
# display names) of other users through the client API. Defaults to
# 'false'. Note that profile data is also available via the federation
# API, unless allow_profile_lookup_over_federation is set to false.
#
#require_auth_for_profile_requests: true
# Uncomment to require a user to share a room with another user in order
# to retrieve their profile information. Only checked on Client-Server
# requests. Profile requests from other servers should be checked by the
# requesting server. Defaults to 'false'.
#
#limit_profile_requests_to_users_who_share_rooms: true
# Uncomment to prevent a user's profile data from being retrieved and
# displayed in a room until they have joined it. By default, a user's
# profile data is included in an invite event, regardless of the values
# of the above two settings, and whether or not the users share a server.
# Defaults to 'true'.
#
#include_profile_data_on_invite: false
# If set to 'true', removes the need for authentication to access the server's
# public rooms directory through the client API, meaning that anyone can
# query the room directory. Defaults to 'false'.
#
#allow_public_rooms_without_auth: true
# If set to 'true', allows any other homeserver to fetch the server's public
# rooms directory via federation. Defaults to 'false'.
#
#allow_public_rooms_over_federation: true
# The default room version for newly created rooms.
#
# Known room versions are listed here:
# https://spec.matrix.org/latest/rooms/#complete-list-of-room-versions
#
# For example, for room version 1, default_room_version should be set
# to "1".
#
#default_room_version: "%(default_room_version)s"
# The GC threshold parameters to pass to `gc.set_threshold`, if defined
#
#gc_thresholds: [700, 10, 10]
# The minimum time in seconds between each GC for a generation, regardless of
# the GC thresholds. This ensures that we don't do GC too frequently.
#
# A value of `[1s, 10s, 30s]` indicates that a second must pass between consecutive
# generation 0 GCs, etc.
#
# Defaults to `[1s, 10s, 30s]`.
#
#gc_min_interval: [0.5s, 30s, 1m]
# Set the limit on the returned events in the timeline in the get
# and sync operations. The default value is 100. -1 means no upper limit.
#
# Uncomment the following to increase the limit to 5000.
#
#filter_timeline_limit: 5000
# Whether room invites to users on this server should be blocked
# (except those sent by local server admins). The default is False.
#
#block_non_admin_invites: true
# Room searching
#
# If disabled, new messages will not be indexed for searching and users
# will receive errors when searching for messages. Defaults to enabled.
#
#enable_search: false
# Prevent outgoing requests from being sent to the following blacklisted IP address
# CIDR ranges. If this option is not specified then it defaults to private IP
# address ranges (see the example below).
#
# The blacklist applies to the outbound requests for federation, identity servers,
# push servers, and for checking key validity for third-party invite events.
#
# (0.0.0.0 and :: are always blacklisted, whether or not they are explicitly
# listed here, since they correspond to unroutable addresses.)
#
# This option replaces federation_ip_range_blacklist in Synapse v1.25.0.
#
# Note: The value is ignored when an HTTP proxy is in use
#
#ip_range_blacklist:
%(ip_range_blacklist)s
# List of IP address CIDR ranges that should be allowed for federation,
# identity servers, push servers, and for checking key validity for
# third-party invite events. This is useful for specifying exceptions to
# wide-ranging blacklisted target IP ranges - e.g. for communication with
# a push server only visible in your network.
#
# This whitelist overrides ip_range_blacklist and defaults to an empty
# list.
#
#ip_range_whitelist:
# - '192.168.1.1'
# List of ports that Synapse should listen on, their purpose and their
# configuration.
#
# Options for each listener include:
#
# port: the TCP port to bind to
#
# bind_addresses: a list of local addresses to listen on. The default is
# 'all local interfaces'.
#
# type: the type of listener. Normally 'http', but other valid options are:
# 'manhole' (see https://matrix-org.github.io/synapse/latest/manhole.html),
# 'metrics' (see https://matrix-org.github.io/synapse/latest/metrics-howto.html),
# 'replication' (see https://matrix-org.github.io/synapse/latest/workers.html).
#
# tls: set to true to enable TLS for this listener. Will use the TLS
# key/cert specified in tls_private_key_path / tls_certificate_path.
#
# x_forwarded: Only valid for an 'http' listener. Set to true to use the
# X-Forwarded-For header as the client IP. Useful when Synapse is
# behind a reverse-proxy.
#
# resources: Only valid for an 'http' listener. A list of resources to host
# on this port. Options for each resource are:
#
# names: a list of names of HTTP resources. See below for a list of
# valid resource names.
#
# compress: set to true to enable HTTP compression for this resource.
#
# additional_resources: Only valid for an 'http' listener. A map of
# additional endpoints which should be loaded via dynamic modules.
#
# Valid resource names are:
#
# client: the client-server API (/_matrix/client), and the synapse admin
# API (/_synapse/admin). Also implies 'media' and 'static'.
#
# consent: user consent forms (/_matrix/consent).
# See https://matrix-org.github.io/synapse/latest/consent_tracking.html.
#
# federation: the server-server API (/_matrix/federation). Also implies
# 'media', 'keys', 'openid'
#
# keys: the key discovery API (/_matrix/key).
#
# media: the media API (/_matrix/media).
#
# metrics: the metrics interface.
# See https://matrix-org.github.io/synapse/latest/metrics-howto.html.
#
# openid: OpenID authentication.
#
# replication: the HTTP replication API (/_synapse/replication).
# See https://matrix-org.github.io/synapse/latest/workers.html.
#
# static: static resources under synapse/static (/_matrix/static). (Mostly
# useful for 'fallback authentication'.)
#
listeners:
# TLS-enabled listener: for when matrix traffic is sent directly to synapse.
#
# Disabled by default. To enable it, uncomment the following. (Note that you
# will also need to give Synapse a TLS key and certificate: see the TLS section
# below.)
#
%(secure_http_bindings)s
# Unsecure HTTP listener: for when matrix traffic passes through a reverse proxy
# that unwraps TLS.
#
# If you plan to use a reverse proxy, please see
# https://matrix-org.github.io/synapse/latest/reverse_proxy.html.
#
%(unsecure_http_bindings)s
# example additional_resources:
#
#additional_resources:
# "/_matrix/my/custom/endpoint":
# module: my_module.CustomRequestHandler
# config: {}
# Turn on the twisted ssh manhole service on localhost on the given
# port.
#
#- port: 9000
# bind_addresses: ['::1', '127.0.0.1']
# type: manhole
# Connection settings for the manhole
#
manhole_settings:
# The username for the manhole. This defaults to 'matrix'.
#
#username: manhole
# The password for the manhole. This defaults to 'rabbithole'.
#
#password: mypassword
# The private and public SSH key pair used to encrypt the manhole traffic.
# If these are left unset, then hardcoded and non-secret keys are used,
# which could allow traffic to be intercepted if sent over a public network.
#
#ssh_priv_key_path: %(config_dir_path)s/id_rsa
#ssh_pub_key_path: %(config_dir_path)s/id_rsa.pub
# Forward extremities can build up in a room due to networking delays between
# homeservers. Once this happens in a large room, calculation of the state of
# that room can become quite expensive. To mitigate this, once the number of
# forward extremities reaches a given threshold, Synapse will send an
# org.matrix.dummy_event event, which will reduce the forward extremities
# in the room.
#
# This setting defines the threshold (i.e. number of forward extremities in the
# room) at which dummy events are sent. The default value is 10.
#
#dummy_events_threshold: 5
## Homeserver blocking ##
# How to reach the server admin, used in ResourceLimitError
#
#admin_contact: 'mailto:[email protected]'
# Global blocking
#
#hs_disabled: false
#hs_disabled_message: 'Human readable reason for why the HS is blocked'
# Monthly Active User Blocking
#
# Used in cases where the admin or server owner wants to limit to the
# number of monthly active users.
#
# 'limit_usage_by_mau' disables/enables monthly active user blocking. When
# enabled and a limit is reached the server returns a 'ResourceLimitError'
# with error type Codes.RESOURCE_LIMIT_EXCEEDED
#
# 'max_mau_value' is the hard limit of monthly active users above which
# the server will start blocking user actions.
#
# 'mau_trial_days' is a means to add a grace period for active users. It
# means that users must be active for this number of days before they
# can be considered active and guards against the case where lots of users
# sign up in a short space of time never to return after their initial
# session.
#
# The option `mau_appservice_trial_days` is similar to `mau_trial_days`, but
# applies a different trial number if the user was registered by an appservice.
# A value of 0 means no trial days are applied. Appservices not listed in this
# dictionary use the value of `mau_trial_days` instead.
#
# 'mau_limit_alerting' is a means of limiting client side alerting
# should the mau limit be reached. This is useful for small instances
# where the admin has 5 mau seats (say) for 5 specific people and no
# interest increasing the mau limit further. Defaults to True, which
# means that alerting is enabled
#
#limit_usage_by_mau: false
#max_mau_value: 50
#mau_trial_days: 2
#mau_limit_alerting: false
#mau_appservice_trial_days:
# "appservice-id": 1
# If enabled, the metrics for the number of monthly active users will
# be populated, however no one will be limited. If limit_usage_by_mau
# is true, this is implied to be true.
#
#mau_stats_only: false
# Sometimes the server admin will want to ensure certain accounts are
# never blocked by mau checking. These accounts are specified here.
#
#mau_limit_reserved_threepids:
# - medium: 'email'
# address: '[email protected]'
# Used by phonehome stats to group together related servers.
#server_context: context
# Resource-constrained homeserver settings
#
# When this is enabled, the room "complexity" will be checked before a user
# joins a new remote room. If it is above the complexity limit, the server will
# disallow joining, or will instantly leave.
#
# Room complexity is an arbitrary measure based on factors such as the number of
# users in the room.
#
limit_remote_rooms:
# Uncomment to enable room complexity checking.
#
#enabled: true
# the limit above which rooms cannot be joined. The default is 1.0.
#
#complexity: 0.5
# override the error which is returned when the room is too complex.
#
#complexity_error: "This room is too complex."
# allow server admins to join complex rooms. Default is false.
#
#admins_can_join: true
# Whether to require a user to be in the room to add an alias to it.
# Defaults to 'true'.
#
#require_membership_for_aliases: false
# Whether to allow per-room membership profiles through the send of membership
# events with profile information that differ from the target's global profile.
# Defaults to 'true'.
#
#allow_per_room_profiles: false
# The largest allowed file size for a user avatar. Defaults to no restriction.
#
# Note that user avatar changes will not work if this is set without
# using Synapse's media repository.
#
#max_avatar_size: 10M
# The MIME types allowed for user avatars. Defaults to no restriction.
#
# Note that user avatar changes will not work if this is set without
# using Synapse's media repository.
#
#allowed_avatar_mimetypes: ["image/png", "image/jpeg", "image/gif"]
# How long to keep redacted events in unredacted form in the database. After
# this period redacted events get replaced with their redacted form in the DB.
#
# Defaults to `7d`. Set to `null` to disable.
#
#redaction_retention_period: 28d
# How long to track users' last seen time and IPs in the database.
#
# Defaults to `28d`. Set to `null` to disable clearing out of old rows.
#
#user_ips_max_age: 14d
# Inhibits the /requestToken endpoints from returning an error that might leak
# information about whether an e-mail address is in use or not on this
# homeserver.
# Note that for some endpoints the error situation is the e-mail already being
# used, and for others the error is entering the e-mail being unused.
# If this option is enabled, instead of returning an error, these endpoints will
# act as if no error happened and return a fake session ID ('sid') to clients.
#
#request_token_inhibit_3pid_errors: true
# A list of domains that the domain portion of 'next_link' parameters
# must match.
#
# This parameter is optionally provided by clients while requesting
# validation of an email or phone number, and maps to a link that
# users will be automatically redirected to after validation
# succeeds. Clients can make use this parameter to aid the validation
# process.
#
# The whitelist is applied whether the homeserver or an
# identity server is handling validation.
#
# The default value is no whitelist functionality; all domains are
# allowed. Setting this value to an empty list will instead disallow
# all domains.
#
#next_link_domain_whitelist: ["matrix.org"]
# Templates to use when generating email or HTML page contents.
#
templates:
# Directory in which Synapse will try to find template files to use to generate
# email or HTML page contents.
# If not set, or a file is not found within the template directory, a default
# template from within the Synapse package will be used.
#
# See https://matrix-org.github.io/synapse/latest/templates.html for more
# information about using custom templates.
#
#custom_template_directory: /path/to/custom/templates/
# List of rooms to exclude from sync responses. This is useful for server
# administrators wishing to group users into a room without these users being able
# to see it from their client.
#
# By default, no room is excluded.
#
#exclude_rooms_from_sync:
# - !foo:example.com
"""
% locals()
)
|
ptspodrequest.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated Thu Dec 13 17:56:06 2018 by generateDS.py version 2.29.5.
# Python 3.6.5 (default, May 19 2018, 11:27:13) [GCC 4.2.1 Compatible Apple LLVM 9.0.0 (clang-900.0.39.2)]
#
# Command line options:
# ('-o', '../python/PTSPodRequest.xsd.py')
#
# Command line arguments:
# PTSPodRequest.xsd
#
# Command line:
# /Users/danielkobina/Documents/Open/bin/generateDS -o "../python/PTSPodRequest.xsd.py" PTSPodRequest.xsd
#
# Current working directory (os.getcwd()):
# schemas
#
import sys
import re as re_
import base64
import datetime as datetime_
import warnings as warnings_
try:
from lxml import etree as etree_
except ImportError:
from xml.etree import ElementTree as etree_
Validate_simpletypes_ = True
if sys.version_info.major == 2:
BaseStrType_ = basestring
else:
BaseStrType_ = str
def parsexml_(infile, parser=None, **kwargs):
if parser is None:
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
try:
parser = etree_.ETCompatXMLParser()
except AttributeError:
# fallback to xml.etree
parser = etree_.XMLParser()
doc = etree_.parse(infile, parser=parser, **kwargs)
return doc
def parsexmlstring_(instring, parser=None, **kwargs):
if parser is None:
# Use the lxml ElementTree compatible parser so that, e.g.,
# we ignore comments.
try:
parser = etree_.ETCompatXMLParser()
except AttributeError:
# fallback to xml.etree
parser = etree_.XMLParser()
element = etree_.fromstring(instring, parser=parser, **kwargs)
return element
#
# Namespace prefix definition table (and other attributes, too)
#
# The module generatedsnamespaces, if it is importable, must contain
# a dictionary named GeneratedsNamespaceDefs. This Python dictionary
# should map element type names (strings) to XML schema namespace prefix
# definitions. The export method for any class for which there is
# a namespace prefix definition, will export that definition in the
# XML representation of that element. See the export method of
# any generated element type class for a example of the use of this
# table.
# A sample table is:
#
# # File: generatedsnamespaces.py
#
# GenerateDSNamespaceDefs = {
# "ElementtypeA": "http://www.xxx.com/namespaceA",
# "ElementtypeB": "http://www.xxx.com/namespaceB",
# }
#
try:
from generatedsnamespaces import GenerateDSNamespaceDefs as GenerateDSNamespaceDefs_
except ImportError:
GenerateDSNamespaceDefs_ = {}
#
# The root super-class for element type classes
#
# Calls to the methods in these classes are generated by generateDS.py.
# You can replace these methods by re-implementing the following class
# in a module named generatedssuper.py.
try:
from generatedssuper import GeneratedsSuper
except ImportError as exp:
class GeneratedsSuper(object):
tzoff_pattern = re_.compile(r'(\+|-)((0\d|1[0-3]):[0-5]\d|14:00)$')
class _FixedOffsetTZ(datetime_.tzinfo):
def __init__(self, offset, name):
self.__offset = datetime_.timedelta(minutes=offset)
self.__name = name
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return None
def gds_format_string(self, input_data, input_name=''):
return input_data
def gds_validate_string(self, input_data, node=None, input_name=''):
if not input_data:
return ''
else:
return input_data
def gds_format_base64(self, input_data, input_name=''):
return base64.b64encode(input_data)
def gds_validate_base64(self, input_data, node=None, input_name=''):
return input_data
def gds_format_integer(self, input_data, input_name=''):
return '%d' % input_data
def gds_validate_integer(self, input_data, node=None, input_name=''):
return input_data
def gds_format_integer_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_integer_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
int(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of integers')
return values
def gds_format_float(self, input_data, input_name=''):
return ('%.15f' % input_data).rstrip('0')
def gds_validate_float(self, input_data, node=None, input_name=''):
return input_data
def gds_format_float_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_float_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of floats')
return values
def gds_format_double(self, input_data, input_name=''):
return '%e' % input_data
def gds_validate_double(self, input_data, node=None, input_name=''):
return input_data
def gds_format_double_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_double_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
try:
float(value)
except (TypeError, ValueError):
raise_parse_error(node, 'Requires sequence of doubles')
return values
def gds_format_boolean(self, input_data, input_name=''):
return ('%s' % input_data).lower()
def gds_validate_boolean(self, input_data, node=None, input_name=''):
return input_data
def gds_format_boolean_list(self, input_data, input_name=''):
return '%s' % ' '.join(input_data)
def gds_validate_boolean_list(
self, input_data, node=None, input_name=''):
values = input_data.split()
for value in values:
if value not in ('true', '1', 'false', '0', ):
raise_parse_error(
node,
'Requires sequence of booleans '
'("true", "1", "false", "0")')
return values
def gds_validate_datetime(self, input_data, node=None, input_name=''):
return input_data
def gds_format_datetime(self, input_data, input_name=''):
if input_data.microsecond == 0:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%04d-%02d-%02dT%02d:%02d:%02d.%s' % (
input_data.year,
input_data.month,
input_data.day,
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
@classmethod
def gds_parse_datetime(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
time_parts = input_data.split('.')
if len(time_parts) > 1:
micro_seconds = int(float('0.' + time_parts[1]) * 1000000)
input_data = '%s.%s' % (time_parts[0], micro_seconds, )
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(
input_data, '%Y-%m-%dT%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt
def gds_validate_date(self, input_data, node=None, input_name=''):
return input_data
def gds_format_date(self, input_data, input_name=''):
_svalue = '%04d-%02d-%02d' % (
input_data.year,
input_data.month,
input_data.day,
)
try:
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(
hours, minutes)
except AttributeError:
pass
return _svalue
@classmethod
def gds_parse_date(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
dt = datetime_.datetime.strptime(input_data, '%Y-%m-%d')
dt = dt.replace(tzinfo=tz)
return dt.date()
def gds_validate_time(self, input_data, node=None, input_name=''):
return input_data
def gds_format_time(self, input_data, input_name=''):
|
def gds_validate_simple_patterns(self, patterns, target):
# pat is a list of lists of strings/patterns. We should:
# - AND the outer elements
# - OR the inner elements
found1 = True
for patterns1 in patterns:
found2 = False
for patterns2 in patterns1:
if re_.search(patterns2, target) is not None:
found2 = True
break
if not found2:
found1 = False
break
return found1
@classmethod
def gds_parse_time(cls, input_data):
tz = None
if input_data[-1] == 'Z':
tz = GeneratedsSuper._FixedOffsetTZ(0, 'UTC')
input_data = input_data[:-1]
else:
results = GeneratedsSuper.tzoff_pattern.search(input_data)
if results is not None:
tzoff_parts = results.group(2).split(':')
tzoff = int(tzoff_parts[0]) * 60 + int(tzoff_parts[1])
if results.group(1) == '-':
tzoff *= -1
tz = GeneratedsSuper._FixedOffsetTZ(
tzoff, results.group(0))
input_data = input_data[:-6]
if len(input_data.split('.')) > 1:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S.%f')
else:
dt = datetime_.datetime.strptime(input_data, '%H:%M:%S')
dt = dt.replace(tzinfo=tz)
return dt.time()
def gds_str_lower(self, instring):
return instring.lower()
def get_path_(self, node):
path_list = []
self.get_path_list_(node, path_list)
path_list.reverse()
path = '/'.join(path_list)
return path
Tag_strip_pattern_ = re_.compile(r'\{.*\}')
def get_path_list_(self, node, path_list):
if node is None:
return
tag = GeneratedsSuper.Tag_strip_pattern_.sub('', node.tag)
if tag:
path_list.append(tag)
self.get_path_list_(node.getparent(), path_list)
def get_class_obj_(self, node, default_class=None):
class_obj1 = default_class
if 'xsi' in node.nsmap:
classname = node.get('{%s}type' % node.nsmap['xsi'])
if classname is not None:
names = classname.split(':')
if len(names) == 2:
classname = names[1]
class_obj2 = globals().get(classname)
if class_obj2 is not None:
class_obj1 = class_obj2
return class_obj1
def gds_build_any(self, node, type_name=None):
return None
@classmethod
def gds_reverse_node_mapping(cls, mapping):
return dict(((v, k) for k, v in mapping.iteritems()))
@staticmethod
def gds_encode(instring):
if sys.version_info.major == 2:
return instring.encode(ExternalEncoding)
else:
return instring
@staticmethod
def convert_unicode(instring):
if isinstance(instring, str):
result = quote_xml(instring)
elif sys.version_info.major == 2 and isinstance(instring, unicode):
result = quote_xml(instring).encode('utf8')
else:
result = GeneratedsSuper.gds_encode(str(instring))
return result
def __eq__(self, other):
if type(self) != type(other):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
def getSubclassFromModule_(module, class_):
'''Get the subclass of a class from a specific module.'''
name = class_.__name__ + 'Sub'
if hasattr(module, name):
return getattr(module, name)
else:
return None
#
# If you have installed IPython you can uncomment and use the following.
# IPython is available from http://ipython.scipy.org/.
#
## from IPython.Shell import IPShellEmbed
## args = ''
## ipshell = IPShellEmbed(args,
## banner = 'Dropping into IPython',
## exit_msg = 'Leaving Interpreter, back to program.')
# Then use the following line where and when you want to drop into the
# IPython shell:
# ipshell('<some message> -- Entering ipshell.\nHit Ctrl-D to exit')
#
# Globals
#
ExternalEncoding = 'utf-8'
Tag_pattern_ = re_.compile(r'({.*})?(.*)')
String_cleanup_pat_ = re_.compile(r"[\n\r\s]+")
Namespace_extract_pat_ = re_.compile(r'{(.*)}(.*)')
CDATA_pattern_ = re_.compile(r"<!\[CDATA\[.*?\]\]>", re_.DOTALL)
# Change this to redirect the generated superclass module to use a
# specific subclass module.
CurrentSubclassModule_ = None
#
# Support/utility functions.
#
def showIndent(outfile, level, pretty_print=True):
if pretty_print:
for idx in range(level):
outfile.write(' ')
def quote_xml(inStr):
"Escape markup chars, but do not modify CDATA sections."
if not inStr:
return ''
s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr)
s2 = ''
pos = 0
matchobjects = CDATA_pattern_.finditer(s1)
for mo in matchobjects:
s3 = s1[pos:mo.start()]
s2 += quote_xml_aux(s3)
s2 += s1[mo.start():mo.end()]
pos = mo.end()
s3 = s1[pos:]
s2 += quote_xml_aux(s3)
return s2
def quote_xml_aux(inStr):
s1 = inStr.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
return s1
def quote_attrib(inStr):
s1 = (isinstance(inStr, BaseStrType_) and inStr or '%s' % inStr)
s1 = s1.replace('&', '&')
s1 = s1.replace('<', '<')
s1 = s1.replace('>', '>')
if '"' in s1:
if "'" in s1:
s1 = '"%s"' % s1.replace('"', """)
else:
s1 = "'%s'" % s1
else:
s1 = '"%s"' % s1
return s1
def quote_python(inStr):
s1 = inStr
if s1.find("'") == -1:
if s1.find('\n') == -1:
return "'%s'" % s1
else:
return "'''%s'''" % s1
else:
if s1.find('"') != -1:
s1 = s1.replace('"', '\\"')
if s1.find('\n') == -1:
return '"%s"' % s1
else:
return '"""%s"""' % s1
def get_all_text_(node):
if node.text is not None:
text = node.text
else:
text = ''
for child in node:
if child.tail is not None:
text += child.tail
return text
def find_attr_value_(attr_name, node):
attrs = node.attrib
attr_parts = attr_name.split(':')
value = None
if len(attr_parts) == 1:
value = attrs.get(attr_name)
elif len(attr_parts) == 2:
prefix, name = attr_parts
namespace = node.nsmap.get(prefix)
if namespace is not None:
value = attrs.get('{%s}%s' % (namespace, name, ))
return value
class GDSParseError(Exception):
pass
def raise_parse_error(node, msg):
msg = '%s (element %s/line %d)' % (msg, node.tag, node.sourceline, )
raise GDSParseError(msg)
class MixedContainer:
# Constants for category:
CategoryNone = 0
CategoryText = 1
CategorySimple = 2
CategoryComplex = 3
# Constants for content_type:
TypeNone = 0
TypeText = 1
TypeString = 2
TypeInteger = 3
TypeFloat = 4
TypeDecimal = 5
TypeDouble = 6
TypeBoolean = 7
TypeBase64 = 8
def __init__(self, category, content_type, name, value):
self.category = category
self.content_type = content_type
self.name = name
self.value = value
def getCategory(self):
return self.category
def getContenttype(self, content_type):
return self.content_type
def getValue(self):
return self.value
def getName(self):
return self.name
def export(self, outfile, level, name, namespace,
pretty_print=True):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
outfile.write(self.value)
elif self.category == MixedContainer.CategorySimple:
self.exportSimple(outfile, level, name)
else: # category == MixedContainer.CategoryComplex
self.value.export(
outfile, level, namespace, name,
pretty_print=pretty_print)
def exportSimple(self, outfile, level, name):
if self.content_type == MixedContainer.TypeString:
outfile.write('<%s>%s</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeInteger or \
self.content_type == MixedContainer.TypeBoolean:
outfile.write('<%s>%d</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeFloat or \
self.content_type == MixedContainer.TypeDecimal:
outfile.write('<%s>%f</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeDouble:
outfile.write('<%s>%g</%s>' % (
self.name, self.value, self.name))
elif self.content_type == MixedContainer.TypeBase64:
outfile.write('<%s>%s</%s>' % (
self.name,
base64.b64encode(self.value),
self.name))
def to_etree(self, element):
if self.category == MixedContainer.CategoryText:
# Prevent exporting empty content as empty lines.
if self.value.strip():
if len(element) > 0:
if element[-1].tail is None:
element[-1].tail = self.value
else:
element[-1].tail += self.value
else:
if element.text is None:
element.text = self.value
else:
element.text += self.value
elif self.category == MixedContainer.CategorySimple:
subelement = etree_.SubElement(
element, '%s' % self.name)
subelement.text = self.to_etree_simple()
else: # category == MixedContainer.CategoryComplex
self.value.to_etree(element)
def to_etree_simple(self):
if self.content_type == MixedContainer.TypeString:
text = self.value
elif (self.content_type == MixedContainer.TypeInteger or
self.content_type == MixedContainer.TypeBoolean):
text = '%d' % self.value
elif (self.content_type == MixedContainer.TypeFloat or
self.content_type == MixedContainer.TypeDecimal):
text = '%f' % self.value
elif self.content_type == MixedContainer.TypeDouble:
text = '%g' % self.value
elif self.content_type == MixedContainer.TypeBase64:
text = '%s' % base64.b64encode(self.value)
return text
def exportLiteral(self, outfile, level, name):
if self.category == MixedContainer.CategoryText:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type,
self.name, self.value))
elif self.category == MixedContainer.CategorySimple:
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s", "%s"),\n' % (
self.category, self.content_type,
self.name, self.value))
else: # category == MixedContainer.CategoryComplex
showIndent(outfile, level)
outfile.write(
'model_.MixedContainer(%d, %d, "%s",\n' % (
self.category, self.content_type, self.name,))
self.value.exportLiteral(outfile, level + 1)
showIndent(outfile, level)
outfile.write(')\n')
class MemberSpec_(object):
def __init__(self, name='', data_type='', container=0,
optional=0, child_attrs=None, choice=None):
self.name = name
self.data_type = data_type
self.container = container
self.child_attrs = child_attrs
self.choice = choice
self.optional = optional
def set_name(self, name): self.name = name
def get_name(self): return self.name
def set_data_type(self, data_type): self.data_type = data_type
def get_data_type_chain(self): return self.data_type
def get_data_type(self):
if isinstance(self.data_type, list):
if len(self.data_type) > 0:
return self.data_type[-1]
else:
return 'xs:string'
else:
return self.data_type
def set_container(self, container): self.container = container
def get_container(self): return self.container
def set_child_attrs(self, child_attrs): self.child_attrs = child_attrs
def get_child_attrs(self): return self.child_attrs
def set_choice(self, choice): self.choice = choice
def get_choice(self): return self.choice
def set_optional(self, optional): self.optional = optional
def get_optional(self): return self.optional
def _cast(typ, value):
if typ is None or value is None:
return value
return typ(value)
#
# Data representation classes.
#
class PTSPodRequest(GeneratedsSuper):
subclass = None
superclass = None
def __init__(self, USERID=None, TrackId=None, ClientIp=None, MpSuffix=None, MpDate=None, RequestType=None, FirstName=None, LastName=None, Email1=None, Email2=None, Email3=None, FaxNumber=None, AddressLine1=None, AddressLine2=None, City=None, State=None, Zip=None, VerifyAddress=None, TableCode=None, CustRegID=None):
self.original_tagname_ = None
self.USERID = _cast(None, USERID)
self.TrackId = TrackId
self.ClientIp = ClientIp
self.MpSuffix = MpSuffix
self.MpDate = MpDate
self.RequestType = RequestType
self.FirstName = FirstName
self.LastName = LastName
self.Email1 = Email1
self.Email2 = Email2
self.Email3 = Email3
self.FaxNumber = FaxNumber
self.AddressLine1 = AddressLine1
self.AddressLine2 = AddressLine2
self.City = City
self.State = State
self.Zip = Zip
self.VerifyAddress = VerifyAddress
self.TableCode = TableCode
self.CustRegID = CustRegID
def factory(*args_, **kwargs_):
if CurrentSubclassModule_ is not None:
subclass = getSubclassFromModule_(
CurrentSubclassModule_, PTSPodRequest)
if subclass is not None:
return subclass(*args_, **kwargs_)
if PTSPodRequest.subclass:
return PTSPodRequest.subclass(*args_, **kwargs_)
else:
return PTSPodRequest(*args_, **kwargs_)
factory = staticmethod(factory)
def get_TrackId(self): return self.TrackId
def set_TrackId(self, TrackId): self.TrackId = TrackId
def get_ClientIp(self): return self.ClientIp
def set_ClientIp(self, ClientIp): self.ClientIp = ClientIp
def get_MpSuffix(self): return self.MpSuffix
def set_MpSuffix(self, MpSuffix): self.MpSuffix = MpSuffix
def get_MpDate(self): return self.MpDate
def set_MpDate(self, MpDate): self.MpDate = MpDate
def get_RequestType(self): return self.RequestType
def set_RequestType(self, RequestType): self.RequestType = RequestType
def get_FirstName(self): return self.FirstName
def set_FirstName(self, FirstName): self.FirstName = FirstName
def get_LastName(self): return self.LastName
def set_LastName(self, LastName): self.LastName = LastName
def get_Email1(self): return self.Email1
def set_Email1(self, Email1): self.Email1 = Email1
def get_Email2(self): return self.Email2
def set_Email2(self, Email2): self.Email2 = Email2
def get_Email3(self): return self.Email3
def set_Email3(self, Email3): self.Email3 = Email3
def get_FaxNumber(self): return self.FaxNumber
def set_FaxNumber(self, FaxNumber): self.FaxNumber = FaxNumber
def get_AddressLine1(self): return self.AddressLine1
def set_AddressLine1(self, AddressLine1): self.AddressLine1 = AddressLine1
def get_AddressLine2(self): return self.AddressLine2
def set_AddressLine2(self, AddressLine2): self.AddressLine2 = AddressLine2
def get_City(self): return self.City
def set_City(self, City): self.City = City
def get_State(self): return self.State
def set_State(self, State): self.State = State
def get_Zip(self): return self.Zip
def set_Zip(self, Zip): self.Zip = Zip
def get_VerifyAddress(self): return self.VerifyAddress
def set_VerifyAddress(self, VerifyAddress): self.VerifyAddress = VerifyAddress
def get_TableCode(self): return self.TableCode
def set_TableCode(self, TableCode): self.TableCode = TableCode
def get_CustRegID(self): return self.CustRegID
def set_CustRegID(self, CustRegID): self.CustRegID = CustRegID
def get_USERID(self): return self.USERID
def set_USERID(self, USERID): self.USERID = USERID
def hasContent_(self):
if (
self.TrackId is not None or
self.ClientIp is not None or
self.MpSuffix is not None or
self.MpDate is not None or
self.RequestType is not None or
self.FirstName is not None or
self.LastName is not None or
self.Email1 is not None or
self.Email2 is not None or
self.Email3 is not None or
self.FaxNumber is not None or
self.AddressLine1 is not None or
self.AddressLine2 is not None or
self.City is not None or
self.State is not None or
self.Zip is not None or
self.VerifyAddress is not None or
self.TableCode is not None or
self.CustRegID is not None
):
return True
else:
return False
def export(self, outfile, level, namespace_='', name_='PTSPodRequest', namespacedef_='', pretty_print=True):
imported_ns_def_ = GenerateDSNamespaceDefs_.get('PTSPodRequest')
if imported_ns_def_ is not None:
namespacedef_ = imported_ns_def_
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.original_tagname_ is not None:
name_ = self.original_tagname_
showIndent(outfile, level, pretty_print)
outfile.write('<%s%s%s' % (namespace_, name_, namespacedef_ and ' ' + namespacedef_ or '', ))
already_processed = set()
self.exportAttributes(outfile, level, already_processed, namespace_, name_='PTSPodRequest')
if self.hasContent_():
outfile.write('>%s' % (eol_, ))
self.exportChildren(outfile, level + 1, namespace_='', name_='PTSPodRequest', pretty_print=pretty_print)
showIndent(outfile, level, pretty_print)
outfile.write('</%s%s>%s' % (namespace_, name_, eol_))
else:
outfile.write('/>%s' % (eol_, ))
def exportAttributes(self, outfile, level, already_processed, namespace_='', name_='PTSPodRequest'):
if self.USERID is not None and 'USERID' not in already_processed:
already_processed.add('USERID')
outfile.write(' USERID=%s' % (self.gds_encode(self.gds_format_string(quote_attrib(self.USERID), input_name='USERID')), ))
def exportChildren(self, outfile, level, namespace_='', name_='PTSPodRequest', fromsubclass_=False, pretty_print=True):
if pretty_print:
eol_ = '\n'
else:
eol_ = ''
if self.TrackId is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<TrackId>%s</TrackId>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.TrackId), input_name='TrackId')), eol_))
if self.ClientIp is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<ClientIp>%s</ClientIp>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.ClientIp), input_name='ClientIp')), eol_))
if self.MpSuffix is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<MpSuffix>%s</MpSuffix>%s' % (self.gds_format_integer(self.MpSuffix, input_name='MpSuffix'), eol_))
if self.MpDate is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<MpDate>%s</MpDate>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.MpDate), input_name='MpDate')), eol_))
if self.RequestType is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<RequestType>%s</RequestType>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.RequestType), input_name='RequestType')), eol_))
if self.FirstName is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<FirstName>%s</FirstName>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.FirstName), input_name='FirstName')), eol_))
if self.LastName is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<LastName>%s</LastName>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.LastName), input_name='LastName')), eol_))
if self.Email1 is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<Email1>%s</Email1>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.Email1), input_name='Email1')), eol_))
if self.Email2 is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<Email2>%s</Email2>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.Email2), input_name='Email2')), eol_))
if self.Email3 is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<Email3>%s</Email3>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.Email3), input_name='Email3')), eol_))
if self.FaxNumber is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<FaxNumber>%s</FaxNumber>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.FaxNumber), input_name='FaxNumber')), eol_))
if self.AddressLine1 is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<AddressLine1>%s</AddressLine1>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.AddressLine1), input_name='AddressLine1')), eol_))
if self.AddressLine2 is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<AddressLine2>%s</AddressLine2>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.AddressLine2), input_name='AddressLine2')), eol_))
if self.City is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<City>%s</City>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.City), input_name='City')), eol_))
if self.State is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<State>%s</State>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.State), input_name='State')), eol_))
if self.Zip is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<Zip>%s</Zip>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.Zip), input_name='Zip')), eol_))
if self.VerifyAddress is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<VerifyAddress>%s</VerifyAddress>%s' % (self.gds_format_boolean(self.VerifyAddress, input_name='VerifyAddress'), eol_))
if self.TableCode is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<TableCode>%s</TableCode>%s' % (self.gds_encode(self.gds_format_string(quote_xml(self.TableCode), input_name='TableCode')), eol_))
if self.CustRegID is not None:
showIndent(outfile, level, pretty_print)
outfile.write('<CustRegID>%s</CustRegID>%s' % (self.gds_format_integer(self.CustRegID, input_name='CustRegID'), eol_))
def build(self, node):
already_processed = set()
self.buildAttributes(node, node.attrib, already_processed)
for child in node:
nodeName_ = Tag_pattern_.match(child.tag).groups()[-1]
self.buildChildren(child, node, nodeName_)
return self
def buildAttributes(self, node, attrs, already_processed):
value = find_attr_value_('USERID', node)
if value is not None and 'USERID' not in already_processed:
already_processed.add('USERID')
self.USERID = value
def buildChildren(self, child_, node, nodeName_, fromsubclass_=False):
if nodeName_ == 'TrackId':
TrackId_ = child_.text
TrackId_ = self.gds_validate_string(TrackId_, node, 'TrackId')
self.TrackId = TrackId_
elif nodeName_ == 'ClientIp':
ClientIp_ = child_.text
ClientIp_ = self.gds_validate_string(ClientIp_, node, 'ClientIp')
self.ClientIp = ClientIp_
elif nodeName_ == 'MpSuffix':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'MpSuffix')
self.MpSuffix = ival_
elif nodeName_ == 'MpDate':
MpDate_ = child_.text
MpDate_ = self.gds_validate_string(MpDate_, node, 'MpDate')
self.MpDate = MpDate_
elif nodeName_ == 'RequestType':
RequestType_ = child_.text
RequestType_ = self.gds_validate_string(RequestType_, node, 'RequestType')
self.RequestType = RequestType_
elif nodeName_ == 'FirstName':
FirstName_ = child_.text
FirstName_ = self.gds_validate_string(FirstName_, node, 'FirstName')
self.FirstName = FirstName_
elif nodeName_ == 'LastName':
LastName_ = child_.text
LastName_ = self.gds_validate_string(LastName_, node, 'LastName')
self.LastName = LastName_
elif nodeName_ == 'Email1':
Email1_ = child_.text
Email1_ = self.gds_validate_string(Email1_, node, 'Email1')
self.Email1 = Email1_
elif nodeName_ == 'Email2':
Email2_ = child_.text
Email2_ = self.gds_validate_string(Email2_, node, 'Email2')
self.Email2 = Email2_
elif nodeName_ == 'Email3':
Email3_ = child_.text
Email3_ = self.gds_validate_string(Email3_, node, 'Email3')
self.Email3 = Email3_
elif nodeName_ == 'FaxNumber':
FaxNumber_ = child_.text
FaxNumber_ = self.gds_validate_string(FaxNumber_, node, 'FaxNumber')
self.FaxNumber = FaxNumber_
elif nodeName_ == 'AddressLine1':
AddressLine1_ = child_.text
AddressLine1_ = self.gds_validate_string(AddressLine1_, node, 'AddressLine1')
self.AddressLine1 = AddressLine1_
elif nodeName_ == 'AddressLine2':
AddressLine2_ = child_.text
AddressLine2_ = self.gds_validate_string(AddressLine2_, node, 'AddressLine2')
self.AddressLine2 = AddressLine2_
elif nodeName_ == 'City':
City_ = child_.text
City_ = self.gds_validate_string(City_, node, 'City')
self.City = City_
elif nodeName_ == 'State':
State_ = child_.text
State_ = self.gds_validate_string(State_, node, 'State')
self.State = State_
elif nodeName_ == 'Zip':
Zip_ = child_.text
Zip_ = self.gds_validate_string(Zip_, node, 'Zip')
self.Zip = Zip_
elif nodeName_ == 'VerifyAddress':
sval_ = child_.text
if sval_ in ('true', '1'):
ival_ = True
elif sval_ in ('false', '0'):
ival_ = False
else:
raise_parse_error(child_, 'requires boolean')
ival_ = self.gds_validate_boolean(ival_, node, 'VerifyAddress')
self.VerifyAddress = ival_
elif nodeName_ == 'TableCode':
TableCode_ = child_.text
TableCode_ = self.gds_validate_string(TableCode_, node, 'TableCode')
self.TableCode = TableCode_
elif nodeName_ == 'CustRegID':
sval_ = child_.text
try:
ival_ = int(sval_)
except (TypeError, ValueError) as exp:
raise_parse_error(child_, 'requires integer: %s' % exp)
ival_ = self.gds_validate_integer(ival_, node, 'CustRegID')
self.CustRegID = ival_
# end class PTSPodRequest
GDSClassesMapping = {
}
USAGE_TEXT = """
Usage: python <Parser>.py [ -s ] <in_xml_file>
"""
def usage():
print(USAGE_TEXT)
sys.exit(1)
def get_root_tag(node):
tag = Tag_pattern_.match(node.tag).groups()[-1]
rootClass = GDSClassesMapping.get(tag)
if rootClass is None:
rootClass = globals().get(tag)
return tag, rootClass
def parse(inFileName, silence=False):
parser = None
doc = parsexml_(inFileName, parser)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'PTSPodRequest'
rootClass = PTSPodRequest
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_=rootTag,
namespacedef_='',
pretty_print=True)
return rootObj
def parseEtree(inFileName, silence=False):
parser = None
doc = parsexml_(inFileName, parser)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'PTSPodRequest'
rootClass = PTSPodRequest
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
mapping = {}
rootElement = rootObj.to_etree(None, name_=rootTag, mapping_=mapping)
reverse_mapping = rootObj.gds_reverse_node_mapping(mapping)
if not silence:
content = etree_.tostring(
rootElement, pretty_print=True,
xml_declaration=True, encoding="utf-8")
sys.stdout.write(content)
sys.stdout.write('\n')
return rootObj, rootElement, mapping, reverse_mapping
def parseString(inString, silence=False):
'''Parse a string, create the object tree, and export it.
Arguments:
- inString -- A string. This XML fragment should not start
with an XML declaration containing an encoding.
- silence -- A boolean. If False, export the object.
Returns -- The root object in the tree.
'''
parser = None
rootNode= parsexmlstring_(inString, parser)
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'PTSPodRequest'
rootClass = PTSPodRequest
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
if not silence:
sys.stdout.write('<?xml version="1.0" ?>\n')
rootObj.export(
sys.stdout, 0, name_=rootTag,
namespacedef_='')
return rootObj
def parseLiteral(inFileName, silence=False):
parser = None
doc = parsexml_(inFileName, parser)
rootNode = doc.getroot()
rootTag, rootClass = get_root_tag(rootNode)
if rootClass is None:
rootTag = 'PTSPodRequest'
rootClass = PTSPodRequest
rootObj = rootClass.factory()
rootObj.build(rootNode)
# Enable Python to collect the space used by the DOM.
doc = None
if not silence:
sys.stdout.write('#from PTSPodRequest.xsd import *\n\n')
sys.stdout.write('import PTSPodRequest.xsd as model_\n\n')
sys.stdout.write('rootObj = model_.rootClass(\n')
rootObj.exportLiteral(sys.stdout, 0, name_=rootTag)
sys.stdout.write(')\n')
return rootObj
def main():
args = sys.argv[1:]
if len(args) == 1:
parse(args[0])
else:
usage()
if __name__ == '__main__':
#import pdb; pdb.set_trace()
main()
__all__ = [
"PTSPodRequest"
]
|
if input_data.microsecond == 0:
_svalue = '%02d:%02d:%02d' % (
input_data.hour,
input_data.minute,
input_data.second,
)
else:
_svalue = '%02d:%02d:%02d.%s' % (
input_data.hour,
input_data.minute,
input_data.second,
('%f' % (float(input_data.microsecond) / 1000000))[2:],
)
if input_data.tzinfo is not None:
tzoff = input_data.tzinfo.utcoffset(input_data)
if tzoff is not None:
total_seconds = tzoff.seconds + (86400 * tzoff.days)
if total_seconds == 0:
_svalue += 'Z'
else:
if total_seconds < 0:
_svalue += '-'
total_seconds *= -1
else:
_svalue += '+'
hours = total_seconds // 3600
minutes = (total_seconds - (hours * 3600)) // 60
_svalue += '{0:02d}:{1:02d}'.format(hours, minutes)
return _svalue
|
lib.rs
|
#![cfg_attr(not(feature = "std"), no_std)]
mod mock;
use frame_benchmarking::{account, benchmarks, whitelisted_caller};
use frame_support::{sp_runtime::SaturatedConversion, traits::Currency};
use frame_system::RawOrigin;
use pallet_server::{AdminKey, Config as ServerConfig, Pallet as Server};
use pallet_tipping::{Call, Config as TippingConfig, Pallet as Tipping, TipsBalanceInfo};
const SEED: u32 = 0;
pub struct
|
<T: Config>(Tipping<T>);
pub trait Config: TippingConfig + ServerConfig {}
benchmarks! {
send_tip {
let s in 1 .. 100;
let balance = 1000000000000000000000u128.saturated_into();
let caller: T::AccountId = whitelisted_caller();
let _ = <T as TippingConfig>::Currency::deposit_creating(&caller, balance);
let admin: T::AccountId = AdminKey::<T>::get();
let server_account: T::AccountId = account("server_account", 0, SEED);
let server_name = "myriad".as_bytes().to_vec();
let admin_origin = <T as frame_system::Config>::Origin::from(RawOrigin::Signed(admin));
let server_id = "server".as_bytes().to_vec();
let server_api_url = "https://api.dev.myriad.social".as_bytes().to_vec();
let server_web_url = "https://app.dev.myriad.social".as_bytes().to_vec();
let _server = Server::<T>::register(
admin_origin,
server_account,
server_id.clone(),
server_name,
server_api_url,
server_web_url
);
let reference_id = "people_id".as_bytes().to_vec();
let reference_type = "people".as_bytes().to_vec();
let ft_identifier = "native".as_bytes().to_vec();
let tips_balance_info = TipsBalanceInfo::new(
&server_id,
&reference_type,
&reference_id,
&ft_identifier
);
}: _(RawOrigin::Signed(caller), tips_balance_info, s.into())
claim_reference {
let caller: T::AccountId = whitelisted_caller();
let admin: T::AccountId = AdminKey::<T>::get();
let admin_origin = <T as frame_system::Config>::Origin::from(RawOrigin::Signed(admin));
let server_name = "myriad".as_bytes().to_vec();
let server_id = "server".as_bytes().to_vec();
let server_api_url = "https://api.dev.myriad.social".as_bytes().to_vec();
let server_web_url = "https://app.dev.myriad.social".as_bytes().to_vec();
let _server = Server::<T>::register(
admin_origin,
caller.clone(),
server_id.clone(),
server_name,
server_api_url,
server_web_url
);
let reference_id = "people_id".as_bytes().to_vec();
let reference_type = "people".as_bytes().to_vec();
let ft_identifier = "native".as_bytes().to_vec();
let tips_balance_info = TipsBalanceInfo::new(
&server_id,
&reference_type,
&reference_id,
&ft_identifier
);
}: _(RawOrigin::Signed(caller), tips_balance_info, "user".as_bytes().to_vec(), "user_id".as_bytes().to_vec(), None)
claim_tip {
let caller: T::AccountId = whitelisted_caller();
let tipping_account_id = Tipping::<T>::tipping_account_id();
let balance = 1000000000000000000000u128.saturated_into();
let _ = <T as TippingConfig>::Currency::deposit_creating(&caller, balance);
let _ = <T as TippingConfig>::Currency::deposit_creating(&tipping_account_id, balance);
// Register Server
// Server admin => server_account
let admin: T::AccountId = AdminKey::<T>::get();
let server_account: T::AccountId = account("server_account", 0, SEED);
let admin_origin = <T as frame_system::Config>::Origin::from(RawOrigin::Signed(admin));
let server_origin = <T as frame_system::Config>::Origin::from(RawOrigin::Signed(server_account.clone()));
let server_id = "server".as_bytes().to_vec();
let server_api_url = "https://api.dev.myriad.social".as_bytes().to_vec();
let server_web_url = "https://app.dev.myriad.social".as_bytes().to_vec();
let _ = Server::<T>::register(
admin_origin,
server_account,
server_id.clone(),
"myriad".as_bytes().to_vec(),
server_api_url,
server_web_url
);
// Send Tipping
let account_1: T::AccountId = account("account", 0, SEED);
let account_1_origin = <T as frame_system::Config>::Origin::from(RawOrigin::Signed(account_1.clone()));
let tips_balance_info = TipsBalanceInfo::new(
&server_id,
"people".as_bytes(),
"people_id".as_bytes(),
"native".as_bytes()
);
let balance_2 = 10000000000000000000u128.saturated_into();
let _ = <T as TippingConfig>::Currency::deposit_creating(&account_1, balance);
let _ = Tipping::<T>::send_tip(account_1_origin, tips_balance_info.clone(), balance_2);
// Claim Reference
let tips_balance_info_user = TipsBalanceInfo::new(
&server_id,
"user".as_bytes(),
"user_id".as_bytes(),
"native".as_bytes()
);
let _ = Tipping::<T>::claim_reference(
server_origin.clone(),
tips_balance_info_user.clone(),
"user".as_bytes().to_vec(),
"user_id".as_bytes().to_vec(),
Some(caller.clone())
);
let _ = Tipping::<T>::claim_reference(
server_origin,
tips_balance_info,
"user".as_bytes().to_vec(),
"user_id".as_bytes().to_vec(),
None
);
}: _(RawOrigin::Signed(caller), tips_balance_info_user)
}
|
Pallet
|
p4.js
|
/*
Author: Adam Carlton
Project #4
This JS will allow for a user to move around a sprite in the canvas as well as display a video when the sprite reaches a certain location in the canvas
*/
//my sprite sheet and video source
const mySprite = "ogre.png"
let videoPlaying = false
let infiniteFun = false
//canvasInfo
let ci = {
canvas: null,
ctx: null,
numOfFrames: 4,
img: null,
imgWidth: 0,
imgHeight: 0,
frameIndex: 0,
frameWidth: 0,
x: 600,
y: 290,
vid: null,
vidX: null,
vidY: null,
vidW: null,
vidH: null
}
//draw initial background elements
function setBackground(){
ci.canvas.style.backgroundColor = "Cyan"
ci.ctx.beginPath()
ci.ctx.moveTo(0, 350)
ci.ctx.lineTo(1250, 350)
ci.ctx.closePath()
ci.ctx.stroke()
ci.ctx.beginPath()
ci.ctx.moveTo(0, 50)
ci.ctx.lineTo(30, 50)
ci.ctx.closePath()
ci.ctx.strokeStyle = "Red"
ci.ctx.lineWidth = 5
ci.ctx.stroke()
ci.ctx.beginPath()
ci.ctx.moveTo(1220, 50)
ci.ctx.lineTo(1250, 50)
ci.ctx.closePath()
ci.ctx.strokeStyle = "Red"
ci.ctx.lineWidth = 5
ci.ctx.stroke()
}
//handles the arrow keys being pressed down
function handleKeyDown(event){
if(videoPlaying){
videoPlaying = false
stopAndHideVideo()
}
switch(event.keyCode){
case 37:
//left
moveLeft()
break
case 38:
//up
moveUp()
break
case 39:
//right
moveRight()
break
case 40:
//down
moveDown()
break
}
}
//goes back to standing still facing the user
function handleKeyUp(event){
standStill()
}
//you'll see
function rekt(){
ci.ctx.font = "50px Arial"
ci.ctx.fillStyle = "Red"
ci.ctx.fillText("AIN'T NO BRAKES ON THE RICK ROLL TRAIN", 100, 100)
}
//moves left at a rate of 5 x values at a time. Also detects if the user is in the correct spot for a video to begin playing
function moveLeft(){
frameHeight = ci.imgHeight
ci.ctx.clearRect(ci.x, ci.y, ci.frameWidth, frameHeight)
ci.ctx.drawImage(ci.img, (ci.frameIndex+12) * ci.frameWidth, 0, ci.frameWidth, frameHeight, ci.x, ci.y, ci.frameWidth, frameHeight)
ci.frameIndex++
ci.x-=5
if(ci.frameIndex >= ci.numOfFrames){
ci.frameIndex = 0
}
if(ci.x <= 1){
ci.x = 2
}
if (ci.x <= 2){
ci.vid.play()
if (!infiniteFun) {
videoPlaying = true
}
else {
rekt()
}
}
}
//moves right at a rate of 5 x values at a time. Also detects if the user is in the correct spot for a video to begin playing
function moveRight(){
frameHeight = ci.imgHeight
ci.ctx.clearRect(ci.x, ci.y, ci.frameWidth, frameHeight)
ci.ctx.drawImage(ci.img, (ci.frameIndex + 4) * ci.frameWidth, 0, ci.frameWidth, frameHeight, ci.x, ci.y, ci.frameWidth, frameHeight)
ci.frameIndex++
ci.x += 5
if (ci.frameIndex >= ci.numOfFrames) {
ci.frameIndex = 0
}
if(ci.x >= 1200){
ci.x=1199
}
if(ci.x >= 1199){
ci.vid.play()
if(!infiniteFun){
videoPlaying = true
}
else{
rekt()
}
}
}
//moves upward at a rate of 1 y value. Higher values would leave trails from the sprite
function moveUp(){
frameHeight = ci.imgHeight
ci.ctx.clearRect(ci.x, ci.y, ci.frameWidth, frameHeight)
ci.ctx.drawImage(ci.img, (ci.frameIndex + 8) * ci.frameWidth, 0, ci.frameWidth, frameHeight, ci.x, ci.y, ci.frameWidth, frameHeight)
ci.frameIndex++
ci.y -= 1
if (ci.frameIndex >= ci.numOfFrames) {
ci.frameIndex = 0
}
if (ci.y <= 10){
ci.y = 11
}
}
//moves downward at a rate of 1 y value. Problem here was higher values would leave trails from the sprite.
function moveDown(){
frameHeight = ci.imgHeight
ci.ctx.clearRect(ci.x, ci.y, ci.frameWidth, frameHeight)
ci.ctx.drawImage(ci.img, ci.frameIndex * ci.frameWidth, 0, ci.frameWidth, frameHeight, ci.x, ci.y, ci.frameWidth, frameHeight)
ci.frameIndex++
ci.y += 1
if (ci.frameIndex >= ci.numOfFrames) {
ci.frameIndex = 0
}
if(ci.y >= 291){
ci.y = 290
}
}
//makes the sprite stand still on the first animation of the sprite
function
|
(){
let frameWidth = ci.frameWidth
let frameHeight = ci.imgHeight
ci.ctx.clearRect(ci.x, ci.y, frameWidth, frameHeight)
ci.ctx.drawImage(ci.img, 0, 0, frameWidth, frameHeight, ci.x, ci.y, frameWidth, frameHeight)
}
//will play the video inside the canvas
function playVideo(){
(function loop(){
if (!ci.vid.paused && !ci.vid.end){
ci.ctx.drawImage(ci.vid, ci.vidX, ci.vidY, ci.vidW, ci.vidH)
setTimeout(loop, 1000/60)
}
})()
}
//will pause and hide the video
function stopAndHideVideo(){
ci.vid.pause()
ci.ctx.clearRect(ci.vidX, ci.vidY, ci.vidW, ci.vidH)
}
//function to load up canvas information as well as set up event handlers
window.onload = function(){
ci.canvas = document.querySelector("#draw")
ci.ctx = ci.canvas.getContext("2d")
setBackground()
ci.img = new Image()
ci.img.onload = function(){
ci.imgWidth = this.width/4
ci.imgHeight = this.height/4.5
ci.frameWidth = ci.imgWidth/ci.numOfFrames
standStill()
}
ci.img.src = mySprite
ci.vid = document.querySelector('#video')
ci.vidX = 0
ci.vidY = 351
ci.vidH = 199
ci.vidW = 1250
document.addEventListener("keydown", handleKeyDown)
document.addEventListener("keyup", handleKeyUp)
document.querySelector("#fun").addEventListener("click", function(){
infiniteFun = true
})
ci.vid.addEventListener("play", playVideo)
}
|
standStill
|
masked_language_model.py
|
from typing import Dict
from overrides import overrides
import torch
from allennlp.common.checks import check_dimensions_match
from allennlp.data import TextFieldTensors, Vocabulary
from allennlp.models.model import Model
from allennlp.modules import Seq2SeqEncoder, TextFieldEmbedder
from allennlp.nn import util, InitializerApplicator
from allennlp.training.metrics import Perplexity
from allennlp_models.lm.modules.language_model_heads import LanguageModelHead
@Model.register("masked_language_model")
class MaskedLanguageModel(Model):
"""
The `MaskedLanguageModel` embeds some input tokens (including some which are masked),
contextualizes them, then predicts targets for the masked tokens, computing a loss against
known targets.
NOTE: This was developed for use in a demo, not for training. It's possible that it will still
work for training a masked LM, but it is very likely that some other code would be much more
efficient for that. This `does` compute correct gradients of the loss, because we use that in
our demo, so in principle it should be able to train a model, we just don't necessarily endorse
that use.
# Parameters
vocab : `Vocabulary`
text_field_embedder : `TextFieldEmbedder`
Used to embed the indexed tokens we get in `forward`.
language_model_head : `LanguageModelHead`
The `torch.nn.Module` that goes from the hidden states output by the contextualizer to
logits over some output vocabulary.
contextualizer : `Seq2SeqEncoder`, optional (default=`None`)
Used to "contextualize" the embeddings. This is optional because the contextualization
might actually be done in the text field embedder.
target_namespace : `str`, optional (default=`'bert'`)
Namespace to use to convert predicted token ids to strings in
`Model.make_output_human_readable`.
dropout : `float`, optional (default=`0.0`)
If specified, dropout is applied to the contextualized embeddings before computation of
the softmax. The contextualized embeddings themselves are returned without dropout.
"""
def __init__(
self,
vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
language_model_head: LanguageModelHead,
contextualizer: Seq2SeqEncoder = None,
target_namespace: str = "bert",
dropout: float = 0.0,
initializer: InitializerApplicator = None,
**kwargs,
) -> None:
super().__init__(vocab, **kwargs)
self._text_field_embedder = text_field_embedder
self._contextualizer = contextualizer
if contextualizer:
check_dimensions_match(
text_field_embedder.get_output_dim(),
contextualizer.get_input_dim(),
"text field embedder output",
"contextualizer input",
)
self._language_model_head = language_model_head
self._target_namespace = target_namespace
self._perplexity = Perplexity()
self._dropout = torch.nn.Dropout(dropout)
if initializer is not None:
initializer(self)
def forward( # type: ignore
self,
tokens: TextFieldTensors,
mask_positions: torch.BoolTensor,
target_ids: TextFieldTensors = None,
) -> Dict[str, torch.Tensor]:
"""
# Parameters
tokens : `TextFieldTensors`
The output of `TextField.as_tensor()` for a batch of sentences.
mask_positions : `torch.LongTensor`
The positions in `tokens` that correspond to [MASK] tokens that we should try to fill
in. Shape should be (batch_size, num_masks).
target_ids : `TextFieldTensors`
This is a list of token ids that correspond to the mask positions we're trying to fill.
It is the output of a `TextField`, purely for convenience, so we can handle wordpiece
tokenizers and such without having to do crazy things in the dataset reader. We assume
that there is exactly one entry in the dictionary, and that it has a shape identical to
`mask_positions` - one target token per mask position.
"""
targets = None
if target_ids is not None:
# A bit of a hack to get the right targets out of the TextField output...
if len(target_ids) != 1:
targets = target_ids["bert"]["token_ids"]
else:
targets = list(target_ids.values())[0]["tokens"]
mask_positions = mask_positions.squeeze(-1)
batch_size, num_masks = mask_positions.size()
if targets is not None and targets.size() != mask_positions.size():
raise ValueError(
f"Number of targets ({targets.size()}) and number of masks "
f"({mask_positions.size()}) are not equal"
)
# Shape: (batch_size, num_tokens, embedding_dim)
embeddings = self._text_field_embedder(tokens)
# Shape: (batch_size, num_tokens, encoding_dim)
if self._contextualizer:
mask = util.get_text_field_mask(embeddings)
contextual_embeddings = self._contextualizer(embeddings, mask)
else:
contextual_embeddings = embeddings
# Does advanced indexing to get the embeddings of just the mask positions, which is what
# we're trying to predict.
batch_index = torch.arange(0, batch_size).long().unsqueeze(1)
mask_embeddings = contextual_embeddings[batch_index, mask_positions]
target_logits = self._language_model_head(self._dropout(mask_embeddings))
vocab_size = target_logits.size(-1)
probs = torch.nn.functional.softmax(target_logits, dim=-1)
k = min(vocab_size, 5) # min here largely because tests use small vocab
top_probs, top_indices = probs.topk(k=k, dim=-1)
output_dict = {"probabilities": top_probs, "top_indices": top_indices}
output_dict["token_ids"] = util.get_token_ids_from_text_field_tensors(tokens)
if targets is not None:
target_logits = target_logits.view(batch_size * num_masks, vocab_size)
targets = targets.view(batch_size * num_masks)
loss = torch.nn.functional.cross_entropy(target_logits, targets)
self._perplexity(loss)
output_dict["loss"] = loss
return output_dict
def get_metrics(self, reset: bool = False):
return {"perplexity": self._perplexity.get_metric(reset=reset)}
@overrides
def make_output_human_readable(
self, output_dict: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:
top_words = []
for instance_indices in output_dict["top_indices"]:
top_words.append(
[
[
self.vocab.get_token_from_index(
index.item(), namespace=self._target_namespace
)
for index in mask_positions
]
for mask_positions in instance_indices
|
output_dict["words"] = top_words
tokens = []
for instance_tokens in output_dict["token_ids"]:
tokens.append(
[
self.vocab.get_token_from_index(
token_id.item(), namespace=self._target_namespace
)
for token_id in instance_tokens
]
)
output_dict["tokens"] = tokens
return output_dict
default_predictor = "masked_language_model"
|
]
)
|
panic.rs
|
#![cfg(not(feature = "std"))]
extern "C" {
fn panic(payload_ptr: *const u8, payload_len: u32) -> !;
}
/// Overrides the default panic_fmt
#[cfg(not(feature = "panic_with_msg"))]
#[no_mangle]
#[panic_handler]
pub fn panic_fmt(_info: &crate::core::panic::PanicInfo) -> ! {
unsafe {
panic(crate::core::ptr::null(), 0u32);
}
}
/// Overrides the default panic_fmt
#[cfg(feature = "panic_with_msg")]
#[no_mangle]
#[panic_handler]
pub fn
|
(info: &crate::core::panic::PanicInfo) -> ! {
use crate::Vec;
use byteorder::{LittleEndian, ByteOrder};
struct Sink {
buf: Vec<u8>,
pos: usize
}
impl Sink {
#[inline(always)]
fn new(capacity: usize) -> Sink {
let mut buf = Vec::with_capacity(capacity);
buf.resize(capacity, 0);
Sink {
buf: buf,
pos: 0,
}
}
#[inline(always)]
fn reserve(&mut self, len: usize) -> &mut [u8] {
let dst = &mut self.buf[self.pos..self.pos+len];
self.pos += len;
dst
}
#[inline(always)]
fn write_u32(&mut self, val: u32) {
LittleEndian::write_u32(self.reserve(4), val);
}
#[inline(always)]
fn write_str(&mut self, bytes: &[u8]) {
self.write_u32(bytes.len() as u32);
self.reserve(bytes.len()).copy_from_slice(bytes)
}
}
impl crate::core::ops::Deref for Sink {
type Target = [u8];
fn deref(&self) -> &[u8] {
&self.buf
}
}
let msg = if let Some(fmt) = info.message() {
format!("{}", fmt)
} else {
Default::default()
};
let (file, line, col) = if let Some(loc) = info.location() {
(loc.file(), loc.line(), loc.column())
} else {
("", 0, 0)
};
let mut sink = Sink::new(
4 + msg.as_bytes().len() + // len + [msg]
4 + file.as_bytes().len() + // len + [file]
4 + // line
4 // col
);
sink.write_str(msg.as_bytes());
sink.write_str(file.as_bytes());
sink.write_u32(line);
sink.write_u32(col);
unsafe {
panic(sink.as_ptr(), sink.len() as u32)
}
}
#[lang = "eh_personality"]
extern "C" fn eh_personality() {}
/// Overrides the default oom
#[lang = "oom"]
#[no_mangle]
pub extern fn oom(_: crate::core::alloc::Layout) -> ! {
unsafe { crate::core::intrinsics::abort() }
}
|
panic_fmt
|
bokeh-plot-7affb552d313492d8b953f707e22c798.js
|
(function() {
var fn = function() {
(function(root) {
function now() {
return new Date();
}
var force = false;
if (typeof (root._bokeh_onload_callbacks) === "undefined" || force === true) {
root._bokeh_onload_callbacks = [];
root._bokeh_is_loading = undefined;
}
function run_callbacks() {
try {
root._bokeh_onload_callbacks.forEach(function(callback) { callback() });
}
finally {
delete root._bokeh_onload_callbacks
}
console.info("Bokeh: all callbacks have finished");
}
function load_libs(js_urls, callback) {
root._bokeh_onload_callbacks.push(callback);
if (root._bokeh_is_loading > 0) {
console.log("Bokeh: BokehJS is being loaded, scheduling callback at", now());
return null;
}
if (js_urls == null || js_urls.length === 0) {
run_callbacks();
return null;
}
console.log("Bokeh: BokehJS not loaded, scheduling load and callback at", now());
root._bokeh_is_loading = js_urls.length;
for (var i = 0; i < js_urls.length; i++) {
var url = js_urls[i];
var s = document.createElement('script');
s.src = url;
s.async = false;
s.onreadystatechange = s.onload = function() {
root._bokeh_is_loading--;
if (root._bokeh_is_loading === 0) {
console.log("Bokeh: all BokehJS libraries loaded");
run_callbacks()
}
};
s.onerror = function() {
console.warn("failed to load library " + url);
};
console.log("Bokeh: injecting script tag for BokehJS library: ", url);
document.getElementsByTagName("head")[0].appendChild(s);
}
};var element = document.getElementById("efaf503e-e285-4df4-b593-bca44f8c4e38");
if (element == null) {
console.log("Bokeh: ERROR: autoload.js configured with elementid 'efaf503e-e285-4df4-b593-bca44f8c4e38' but no matching script tag was found. ")
return false;
}
var js_urls = ["https://cdn.bokeh.org/bokeh/release/bokeh-1.0.0.min.js", "https://cdn.bokeh.org/bokeh/release/bokeh-widgets-1.0.0.min.js", "https://cdn.bokeh.org/bokeh/release/bokeh-tables-1.0.0.min.js", "https://cdn.bokeh.org/bokeh/release/bokeh-gl-1.0.0.min.js"];
var inline_js = [
function(Bokeh) {
Bokeh.set_log_level("info");
},
function(Bokeh) {
},
function(Bokeh) {
(function() {
var fn = function() {
Bokeh.safely(function() {
(function(root) {
function embed_document(root) {
var docs_json = '{"9429c435-1d14-4a44-ba18-14c523013f30":{"roots":{"references":[{"attributes":{"formatter":{"id":"13163","type":"BasicTickFormatter"},"plot":{"id":"13112","subtype":"Figure","type":"Plot"},"ticker":{"id":"13127","type":"BasicTicker"}},"id":"13126","type":"LinearAxis"},{"attributes":{},"id":"13127","type":"BasicTicker"},{"attributes":{},"id":"13161","type":"BasicTickFormatter"},{"attributes":{},"id":"13163","type":"BasicTickFormatter"},{"attributes":{"data_source":{"id":"13146","type":"ColumnDataSource"},"glyph":{"id":"13147","type":"Circle"},"hover_glyph":null,"muted_glyph":null,"nonselection_glyph":{"id":"13148","type":"Circle"},"selection_glyph":null,"view":{"id":"13150","type":"CDSView"}},"id":"13149","type":"GlyphRenderer"},{"attributes":{},"id":"13164","type":"BasicTicker"},{"attributes":{"fill_color":{"value":"red"},"line_color":{"value":"red"},"x":{"field":"x"},"y":{"field":"y"}},"id":"13147","type":"Circle"},{"attributes":{},"id":"13168","type":"UnionRenderers"},{"attributes":{"plot":null,"text":""},"id":"13159","type":"Title"},{"attributes":{"callback":null,"data":{"x":{"__ndarray__":"GC1EVPshGcCyxt3tlLsYwExgd4cuVRjA5vkQIcjuF8CAk6q6YYgXwBotRFT7IRfAtMbd7ZS7FsBOYHeHLlUWwOj5ECHI7hXAgpOqumGIFcAcLURU+yEVwLbG3e2UuxTAUGB3hy5VFMDq+RAhyO4TwISTqrphiBPAHi1EVPshE8C4xt3tlLsSwFJgd4cuVRLA7PkQIcjuEcCGk6q6YYgRwCAtRFT7IRHAusbd7ZS7EMBUYHeHLlUQwNzzIUKQ3Q/AECdVdcMQD8BEWoio9kMOwHiNu9spdw3ArMDuDl2qDMDg8yFCkN0LwBQnVXXDEAvASFqIqPZDCsB8jbvbKXcJwLDA7g5dqgjA5PMhQpDdB8AYJ1V1wxAHwExaiKj2QwbAgI272yl3BcC0wO4OXaoEwOjzIUKQ3QPAHCdVdcMQA8BQWoio9kMCwISNu9spdwHAuMDuDl2qAMDY50OEILv/v0BOquqGIf6/qLQQUe2H/L8QG3e3U+76v3iB3R26VPm/4OdDhCC7979ITqrqhiH2v7C0EFHth/S/GBt3t1Pu8r+Agd0dulTxv9DPhwhBdu+/oJxU1Q1D7L9waSGi2g/pv0A27m6n3OW/EAO7O3Sp4r/Anw8Rguzev2A5qaobhti/ANNCRLUf0r9A2bi7nXLHvwAZ2N2hS7W/AAIG7943kT8AGltVkee9P8BZeneVwMs/QJMjIrFG1D+g+YmIF63aPwAwePe+ieA/MGOrKvK84z9glt5dJfDmP5DJEZFYI+o/wPxExItW7T/4F7x730TwP5CxVRV53vE/KEvvrhJ48z/A5IhIrBH1P1h+IuJFq/Y/8Be8e99E+D+IsVUVed75PyBL764SePs/uOSISKwR/T9QfiLiRav+P/QL3r1vIgBAwNiqijzvAECMpXdXCbwBQFhyRCTWiAJAJD8R8aJVA0DwC969byIEQLzYqoo87wRAiKV3Vwm8BUBUckQk1ogGQCA/EfGiVQdA7AvevW8iCEC42KqKPO8IQISld1cJvAlAUHJEJNaICkAcPxHxolULQOgL3r1vIgxAtNiqijzvDECApXdXCbwNQExyRCTWiA5AGD8R8aJVD0DyBe/eNxEQQFhsVUWedxBAvtK7qwTeEEAkOSISa0QRQIqfiHjRqhFA8AXv3jcREkBWbFVFnncSQLzSu6sE3hJAIjkiEmtEE0CIn4h40aoTQO4F7943ERRAVGxVRZ53FEC60rurBN4UQCA5IhJrRBVAhp+IeNGqFUDsBe/eNxEWQFJsVUWedxZAuNK7qwTeFkAeOSISa0QXQISfiHjRqhdA6gXv3jcRGEBQbFVFnncYQLbSu6sE3hhA","dtype":"float64","shape":[126]},"y":{"__ndarray__":"B1wUMyamsTwky4vLro65P6zSPSP/bck/Jbq6lc3p0j9XZyvpOuzYP9gFS3Tort4/S0GlF40R4j+pGUZpbp3kP7z+v8KU9OY/MKhdmAMR6T/eDAmPVO3qPyhjLUzFhOw/t1UaokPT7T9BHsX5d9XuP/vgRN/NiO8/iG0sm3rr7z/HQuDHgfzvP72YL9e3u+8//RW9gcIp7z9qOBofF0juP2C00er2GO0/HHPrP2mf6z8x16rZM9/pP1MvWC3R3Oc/0tHD9WSd5T/vys8NrybjPwGGoLv8fuA/p6ckJDFa2z9p5l0jcnDVPz8ySNa4n84/z9a2bTgQwj9JFpiCD0qlP9gm6Dk3462/9Qq3ygIxxL+gS/8QyVrQv1Jhun47c9a/JdHwQ0FS3L/dGi7savTgv0DA7P9WlOO/crMfLS4C5r9cHtzduTfov3DI4npTL+q/lhLQ3/Lj67/viQE6O1Htv8QoRTGGc+6/Y0DHPe1H77+p7F0TUczvv1HsNw9f/++/DT4MmpTg77/B6yJ2QHDvv2CZ4PWBr+6/9yLYHEag7b8lJbqyQkXsv5cSvFTvoeq/3gUyl3y66L/SsApOyZPmv0H2lhdWM+S/OMh6STef4b8a4Y7HCrzdv3u+C2SY7de/LaSwivHh0b9m/jPXGVHHv0bTvttYRbW/xSXLwqk3kT/JIzUvK9a9PxjIxkANics//AfpL0vw0z91hzWkD+nZP4cP6pqNn98/CxrYG5SC4j9hCW3CCAblP+5iZOG2U+c/hMjjJbpl6T/Mcx3cxjbrP44pBXs3wuw/3a38hhgE7j/R9xCtMvnuPzNU7vwSn+8/IbeALBH07z+1XzrVU/fvP6DPKKDSqO8/pbRJW1YJ7z+JM+n3dhruP8RhK3eX3uw/Jvgtz99Y6z+f2l/YNI3pP+WxtFcugOc/zaIuPws35T/F08pCpLfiP3g4I+RcCOA/mTEMNiZg2j8025+JG2zUPwSg1R2oh8w/qzXOvj7cvz+ZZ0Soul6ZPxMepJQaPbO/RLIVCFdQxr8Pn7GuhmTRv7Nrs+BkdNe/O7QgoURI3b9ITvP1nmjhv9zlVSmUAOS/XGXQmV9l5r9ZNdHU4ZDov+OMb++Nfeq/d0LwuHcm7L9QJSxSYIftv8TXognBnO6/SRtzYNRj779HhCYhndrvv+yTLnbr/++/NaAP81/T77/KpXWIbFXvv+6vwmBTh+6/2k8AqCNr7b+D929HtAPsv7RWM6KcVOq/8K2gZSti6L8bWsKEWzHmvw8RMHvHx+O/52bX95or4b9I9bkoBsfcv2FS64I67da/RcPLJ8rY0L9F3NCChDLFvyPcIcx4+rC/","dtype":"float64","shape":[126]}},"selected":{"id":"13167","type":"Selection"},"selection_policy":{"id":"13168","type":"UnionRenderers"}},"id":"13146","type":"ColumnDataSource"},{"attributes":{},"id":"13131","type":"PanTool"},{"attributes":{},"id":"13135","type":"ResetTool"},{"attributes":{"data_source":{"id":"13152","type":"ColumnDataSource"},"glyph":{"id":"13153","type":"Circle"},"hover_glyph":null,"muted_glyph":null,"nonselection_glyph":{"id":"13154","type":"Circle"},"selection_glyph":null,"view":{"id":"13156","type":"CDSView"},"y_range_name":"foo"},"id":"13155","type":"GlyphRenderer"},{"attributes":{"source":{"id":"13152","type":"ColumnDataSource"}},"id":"13156","type":"CDSView"},{"attributes":{},"id":"13132","type":"WheelZoomTool"},{"attributes":{"formatter":{"id":"13165","type":"BasicTickFormatter"},"plot":{"id":"13112","subtype":"Figure","type":"Plot"},"ticker":{"id":"13164","type":"BasicTicker"},"y_range_name":"foo"},"id":"13157","type":"LinearAxis"},{"attributes":{},"id":"13167","type":"Selection"},{"attributes":{"overlay":{"id":"13139","type":"BoxAnnotation"}},"id":"13133","type":"BoxZoomTool"},{"attributes":{},"id":"13134","type":"SaveTool"},{"attributes":{},"id":"13165","type":"BasicTickFormatter"},{"attributes":{"below":[{"id":"13121","type":"LinearAxis"}],"extra_y_ranges":{"foo":{"id":"13151","type":"Range1d"}},"left":[{"id":"13126","type":"LinearAxis"},{"id":"13157","type":"LinearAxis"}],"renderers":[{"id":"13121","type":"LinearAxis"},{"id":"13125","type":"Grid"},{"id":"13126","type":"LinearAxis"},{"id":"13130","type":"Grid"},{"id":"13139","type":"BoxAnnotation"},{"id":"13149","type":"GlyphRenderer"},{"id":"13155","type":"GlyphRenderer"},{"id":"13157","type":"LinearAxis"}],"title":{"id":"13159","type":"Title"},"toolbar":{"id":"13137","type":"Toolbar"},"x_range":{"id":"13113","type":"Range1d"},"x_scale":{"id":"13117","type":"LinearScale"},"y_range":{"id":"13115","type":"Range1d"},"y_scale":{"id":"13119","type":"LinearScale"}},"id":"13112","subtype":"Figure","type":"Plot"},{"attributes":{"fill_alpha":{"value":0.1},"fill_color":{"value":"#1f77b4"},"line_alpha":{"value":0.1},"line_color":{"value":"#1f77b4"},"x":{"field":"x"},"y":{"field":"y"}},"id":"13148","type":"Circle"},{"attributes":{},"id":"13136","type":"HelpTool"},{"attributes":{"active_drag":"auto","active_inspect":"auto","active_multi":null,"active_scroll":"auto","active_tap":"auto","tools":[{"id":"13131","type":"PanTool"},{"id":"13132","type":"WheelZoomTool"},{"id":"13133","type":"BoxZoomTool"},{"id":"13134","type":"SaveTool"},{"id":"13135","type":"ResetTool"},{"id":"13136","type":"HelpTool"}]},"id":"13137","type":"Toolbar"},{"attributes":{"callback":null,"end":6.5,"start":-6.5},"id":"13113","type":"Range1d"},{"attributes":{},"id":"13170","type":"UnionRenderers"},{"attributes":{"callback":null,"end":1.1,"start":-1.1},"id":"13115","type":"Range1d"},{"attributes":{},"id":"13169","type":"Selection"},{"attributes":{"fill_alpha":{"value":0.1},"fill_color":{"value":"#1f77b4"},"line_alpha":{"value":0.1},"line_color":{"value":"#1f77b4"},"x":{"field":"x"},"y":{"field":"y"}},"id":"13154","type":"Circle"},{"attributes":{},"id":"13117","type":"LinearScale"},{"attributes":{"bottom_units":"screen","fill_alpha":{"value":0.5},"fill_color":{"value":"lightgrey"},"left_units":"screen","level":"overlay","line_alpha":{"value":1.0},"line_color":{"value":"black"},"line_dash":[4,4],"line_width":{"value":2},"plot":null,"render_mode":"css","right_units":"screen","top_units":"screen"},"id":"13139","type":"BoxAnnotation"},{"attributes":{},"id":"13119","type":"LinearScale"},{"attributes":{"formatter":{"id":"13161","type":"BasicTickFormatter"},"plot":{"id":"13112","subtype":"Figure","type":"Plot"},"ticker":{"id":"13122","type":"BasicTicker"}},"id":"13121","type":"LinearAxis"},{"attributes":{"source":{"id":"13146","type":"ColumnDataSource"}},"id":"13150","type":"CDSView"},{"attributes":{"callback":null,"end":100},"id":"13151","type":"Range1d"},{"attributes":{"dimension":1,"plot":{"id":"13112","subtype":"Figure","type":"Plot"},"ticker":{"id":"13127","type":"BasicTicker"}},"id":"13130","type":"Grid"},{"attributes":{},"id":"13122","type":"BasicTicker"},{"attributes":{"callback":null,"data":{"x":{"__ndarray__":"GC1EVPshGcCyxt3tlLsYwExgd4cuVRjA5vkQIcjuF8CAk6q6YYgXwBotRFT7IRfAtMbd7ZS7FsBOYHeHLlUWwOj5ECHI7hXAgpOqumGIFcAcLURU+yEVwLbG3e2UuxTAUGB3hy5VFMDq+RAhyO4TwISTqrphiBPAHi1EVPshE8C4xt3tlLsSwFJgd4cuVRLA7PkQIcjuEcCGk6q6YYgRwCAtRFT7IRHAusbd7ZS7EMBUYHeHLlUQwNzzIUKQ3Q/AECdVdcMQD8BEWoio9kMOwHiNu9spdw3ArMDuDl2qDMDg8yFCkN0LwBQnVXXDEAvASFqIqPZDCsB8jbvbKXcJwLDA7g5dqgjA5PMhQpDdB8AYJ1V1wxAHwExaiKj2QwbAgI272yl3BcC0wO4OXaoEwOjzIUKQ3QPAHCdVdcMQA8BQWoio9kMCwISNu9spdwHAuMDuDl2qAMDY50OEILv/v0BOquqGIf6/qLQQUe2H/L8QG3e3U+76v3iB3R26VPm/4OdDhCC7979ITqrqhiH2v7C0EFHth/S/GBt3t1Pu8r+Agd0dulTxv9DPhwhBdu+/oJxU1Q1D7L9waSGi2g/pv0A27m6n3OW/EAO7O3Sp4r/Anw8Rguzev2A5qaobhti/ANNCRLUf0r9A2bi7nXLHvwAZ2N2hS7W/AAIG7943kT8AGltVkee9P8BZeneVwMs/QJMjIrFG1D+g+YmIF63aPwAwePe+ieA/MGOrKvK84z9glt5dJfDmP5DJEZFYI+o/wPxExItW7T/4F7x730TwP5CxVRV53vE/KEvvrhJ48z/A5IhIrBH1P1h+IuJFq/Y/8Be8e99E+D+IsVUVed75PyBL764SePs/uOSISKwR/T9QfiLiRav+P/QL3r1vIgBAwNiqijzvAECMpXdXCbwBQFhyRCTWiAJAJD8R8aJVA0DwC969byIEQLzYqoo87wRAiKV3Vwm8BUBUckQk1ogGQCA/EfGiVQdA7AvevW8iCEC42KqKPO8IQISld1cJvAlAUHJEJNaICkAcPxHxolULQOgL3r1vIgxAtNiqijzvDECApXdXCbwNQExyRCTWiA5AGD8R8aJVD0DyBe/eNxEQQFhsVUWedxBAvtK7qwTeEEAkOSISa0QRQIqfiHjRqhFA8AXv3jcREkBWbFVFnncSQLzSu6sE3hJAIjkiEmtEE0CIn4h40aoTQO4F7943ERRAVGxVRZ53FEC60rurBN4UQCA5IhJrRBVAhp+IeNGqFUDsBe/eNxEWQFJsVUWedxZAuNK7qwTeFkAeOSISa0QXQISfiHjRqhdA6gXv3jcRGEBQbFVFnncYQLbSu6sE3hhA","dtype":"float64","shape":[126]},"y":{"__ndarray__":"AAAAAAAAAACamZmZmZnpP5qZmZmZmfk/NDMzMzMzA0CamZmZmZkJQAAAAAAAABBANDMzMzMzE0BnZmZmZmYWQJqZmZmZmRlAzczMzMzMHEAAAAAAAAAgQJqZmZmZmSFANDMzMzMzI0DNzMzMzMwkQGdmZmZmZiZAAAAAAAAAKECamZmZmZkpQDQzMzMzMytAzczMzMzMLEBnZmZmZmYuQAAAAAAAADBAzczMzMzMMECamZmZmZkxQGdmZmZmZjJANDMzMzMzM0AAAAAAAAA0QM3MzMzMzDRAmpmZmZmZNUBnZmZmZmY2QDQzMzMzMzdAAAAAAAAAOEDNzMzMzMw4QJqZmZmZmTlAZ2ZmZmZmOkA0MzMzMzM7QAAAAAAAADxAzczMzMzMPECamZmZmZk9QGdmZmZmZj5ANDMzMzMzP0AAAAAAAABAQGdmZmZmZkBAzczMzMzMQEAzMzMzMzNBQJqZmZmZmUFAAAAAAAAAQkBnZmZmZmZCQM3MzMzMzEJANDMzMzMzQ0CamZmZmZlDQAAAAAAAAERAZ2ZmZmZmREDNzMzMzMxEQDQzMzMzM0VAmpmZmZmZRUAAAAAAAABGQGdmZmZmZkZAzczMzMzMRkA0MzMzMzNHQJqZmZmZmUdAAAAAAAAASEBnZmZmZmZIQM3MzMzMzEhANDMzMzMzSUCamZmZmZlJQAAAAAAAAEpAZ2ZmZmZmSkDNzMzMzMxKQDQzMzMzM0tAmpmZmZmZS0AAAAAAAABMQGdmZmZmZkxAzczMzMzMTEA0MzMzMzNNQJqZmZmZmU1AAAAAAAAATkBnZmZmZmZOQM3MzMzMzE5ANDMzMzMzT0CamZmZmZlPQAAAAAAAAFBAMzMzMzMzUEBnZmZmZmZQQJqZmZmZmVBAzczMzMzMUEAAAAAAAABRQDMzMzMzM1FAZ2ZmZmZmUUCamZmZmZlRQM3MzMzMzFFAAAAAAAAAUkAzMzMzMzNSQGdmZmZmZlJAmpmZmZmZUkDNzMzMzMxSQAAAAAAAAFNANDMzMzMzU0BnZmZmZmZTQJqZmZmZmVNAzczMzMzMU0AAAAAAAABUQDQzMzMzM1RAZ2ZmZmZmVECamZmZmZlUQM3MzMzMzFRAAAAAAAAAVUA0MzMzMzNVQGdmZmZmZlVAmpmZmZmZVUDNzMzMzMxVQAAAAAAAAFZANDMzMzMzVkBnZmZmZmZWQJqZmZmZmVZAzczMzMzMVkAAAAAAAABXQDQzMzMzM1dAZ2ZmZmZmV0CamZmZmZlXQM3MzMzMzFdAAAAAAAAAWEA0MzMzMzNYQGdmZmZmZlhAmpmZmZmZWEDNzMzMzMxYQAAAAAAAAFlA","dtype":"float64","shape":[126]}},"selected":{"id":"13169","type":"Selection"},"selection_policy":{"id":"13170","type":"UnionRenderers"}},"id":"13152","type":"ColumnDataSource"},{"attributes":{"plot":{"id":"13112","subtype":"Figure","type":"Plot"},"ticker":{"id":"13122","type":"BasicTicker"}},"id":"13125","type":"Grid"},{"attributes":{"fill_color":{"value":"blue"},"line_color":{"value":"blue"},"x":{"field":"x"},"y":{"field":"y"}},"id":"13153","type":"Circle"}],"root_ids":["13112"]},"title":"Bokeh Application","version":"1.0.0"}}';
var render_items = [{"docid":"9429c435-1d14-4a44-ba18-14c523013f30","roots":{"13112":"efaf503e-e285-4df4-b593-bca44f8c4e38"}}];
root.Bokeh.embed.embed_items(docs_json, render_items);
}
if (root.Bokeh !== undefined) {
embed_document(root);
} else {
var attempts = 0;
var timer = setInterval(function(root) {
if (root.Bokeh !== undefined) {
embed_document(root);
clearInterval(timer);
}
attempts++;
if (attempts > 100) {
console.log("Bokeh: ERROR: Unable to run BokehJS code because BokehJS library is missing");
clearInterval(timer);
}
}, 10, root)
}
})(window);
});
};
if (document.readyState != "loading") fn();
else document.addEventListener("DOMContentLoaded", fn);
})();
},
function(Bokeh) {
console.log("Bokeh: injecting CSS: https://cdn.bokeh.org/bokeh/release/bokeh-1.0.0.min.css");
Bokeh.embed.inject_css("https://cdn.bokeh.org/bokeh/release/bokeh-1.0.0.min.css");
console.log("Bokeh: injecting CSS: https://cdn.bokeh.org/bokeh/release/bokeh-widgets-1.0.0.min.css");
Bokeh.embed.inject_css("https://cdn.bokeh.org/bokeh/release/bokeh-widgets-1.0.0.min.css");
console.log("Bokeh: injecting CSS: https://cdn.bokeh.org/bokeh/release/bokeh-tables-1.0.0.min.css");
Bokeh.embed.inject_css("https://cdn.bokeh.org/bokeh/release/bokeh-tables-1.0.0.min.css");
}
];
function run_inline_js() {
for (var i = 0; i < inline_js.length; i++) {
inline_js[i].call(root, root.Bokeh);
}
|
if (root._bokeh_is_loading === 0) {
console.log("Bokeh: BokehJS loaded, going straight to plotting");
run_inline_js();
} else {
load_libs(js_urls, function() {
console.log("Bokeh: BokehJS plotting callback run at", now());
run_inline_js();
});
}
}(window));
};
if (document.readyState != "loading") fn();
else document.addEventListener("DOMContentLoaded", fn);
})();
|
}
|
engine.rs
|
use crate::{ffi::*, sys::SwrEngine::*};
#[derive(Eq, PartialEq, Copy, Clone, Debug)]
pub enum Engine {
Software,
SoundExchange,
}
impl From<SwrEngine> for Engine {
fn from(value: SwrEngine) -> Engine {
match value {
SWR_ENGINE_SWR => Engine::Software,
SWR_ENGINE_SOXR => Engine::SoundExchange,
SWR_ENGINE_NB => Engine::Software,
}
}
}
impl Into<SwrEngine> for Engine {
fn into(self) -> SwrEngine {
|
}
}
}
|
match self {
Engine::Software => SWR_ENGINE_SWR,
Engine::SoundExchange => SWR_ENGINE_SOXR,
|
loadbalancernetworkinterfaces.go
|
package network
// Copyright (c) Microsoft and contributors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
//
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
import (
"context"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/Azure/go-autorest/tracing"
"net/http"
)
// LoadBalancerNetworkInterfacesClient is the network Client
type LoadBalancerNetworkInterfacesClient struct {
BaseClient
}
// NewLoadBalancerNetworkInterfacesClient creates an instance of the LoadBalancerNetworkInterfacesClient client.
func NewLoadBalancerNetworkInterfacesClient(subscriptionID string) LoadBalancerNetworkInterfacesClient {
return NewLoadBalancerNetworkInterfacesClientWithBaseURI(DefaultBaseURI, subscriptionID)
}
// NewLoadBalancerNetworkInterfacesClientWithBaseURI creates an instance of the LoadBalancerNetworkInterfacesClient
// client using a custom endpoint. Use this when interacting with an Azure cloud that uses a non-standard base URI
// (sovereign clouds, Azure stack).
func NewLoadBalancerNetworkInterfacesClientWithBaseURI(baseURI string, subscriptionID string) LoadBalancerNetworkInterfacesClient {
return LoadBalancerNetworkInterfacesClient{NewWithBaseURI(baseURI, subscriptionID)}
}
// List gets associated load balancer network interfaces.
// Parameters:
// resourceGroupName - the name of the resource group.
// loadBalancerName - the name of the load balancer.
func (client LoadBalancerNetworkInterfacesClient) List(ctx context.Context, resourceGroupName string, loadBalancerName string) (result InterfaceListResultPage, err error) {
if tracing.IsEnabled() {
ctx = tracing.StartSpan(ctx, fqdn+"/LoadBalancerNetworkInterfacesClient.List")
defer func() {
sc := -1
if result.ilr.Response.Response != nil {
sc = result.ilr.Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
result.fn = client.listNextResults
req, err := client.ListPreparer(ctx, resourceGroupName, loadBalancerName)
if err != nil {
err = autorest.NewErrorWithError(err, "network.LoadBalancerNetworkInterfacesClient", "List", nil, "Failure preparing request")
return
}
resp, err := client.ListSender(req)
if err != nil {
result.ilr.Response = autorest.Response{Response: resp}
err = autorest.NewErrorWithError(err, "network.LoadBalancerNetworkInterfacesClient", "List", resp, "Failure sending request")
return
}
result.ilr, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.LoadBalancerNetworkInterfacesClient", "List", resp, "Failure responding to request")
}
if result.ilr.hasNextLink() && result.ilr.IsEmpty() {
err = result.NextWithContext(ctx)
}
return
}
// ListPreparer prepares the List request.
func (client LoadBalancerNetworkInterfacesClient) ListPreparer(ctx context.Context, resourceGroupName string, loadBalancerName string) (*http.Request, error) {
pathParameters := map[string]interface{}{
"loadBalancerName": autorest.Encode("path", loadBalancerName),
"resourceGroupName": autorest.Encode("path", resourceGroupName),
"subscriptionId": autorest.Encode("path", client.SubscriptionID),
}
const APIVersion = "2018-07-01"
queryParameters := map[string]interface{}{
"api-version": APIVersion,
}
preparer := autorest.CreatePreparer(
autorest.AsGet(),
autorest.WithBaseURL(client.BaseURI),
autorest.WithPathParameters("/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/loadBalancers/{loadBalancerName}/networkInterfaces", pathParameters),
autorest.WithQueryParameters(queryParameters))
return preparer.Prepare((&http.Request{}).WithContext(ctx))
}
// ListSender sends the List request. The method will close the
// http.Response Body if it receives an error.
func (client LoadBalancerNetworkInterfacesClient) ListSender(req *http.Request) (*http.Response, error) {
return client.Send(req, azure.DoRetryWithRegistration(client.Client))
}
// ListResponder handles the response to the List request. The method always
// closes the http.Response Body.
func (client LoadBalancerNetworkInterfacesClient) ListResponder(resp *http.Response) (result InterfaceListResult, err error) {
err = autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result),
autorest.ByClosing())
result.Response = autorest.Response{Response: resp}
return
}
// listNextResults retrieves the next set of results, if any.
func (client LoadBalancerNetworkInterfacesClient) listNextResults(ctx context.Context, lastResults InterfaceListResult) (result InterfaceListResult, err error) {
req, err := lastResults.interfaceListResultPreparer(ctx)
if err != nil {
return result, autorest.NewErrorWithError(err, "network.LoadBalancerNetworkInterfacesClient", "listNextResults", nil, "Failure preparing next results request")
}
if req == nil {
return
}
resp, err := client.ListSender(req)
if err != nil {
result.Response = autorest.Response{Response: resp}
return result, autorest.NewErrorWithError(err, "network.LoadBalancerNetworkInterfacesClient", "listNextResults", resp, "Failure sending next results request")
}
result, err = client.ListResponder(resp)
if err != nil {
err = autorest.NewErrorWithError(err, "network.LoadBalancerNetworkInterfacesClient", "listNextResults", resp, "Failure responding to next results request")
}
return
}
// ListComplete enumerates all values, automatically crossing page boundaries as required.
func (client LoadBalancerNetworkInterfacesClient) ListComplete(ctx context.Context, resourceGroupName string, loadBalancerName string) (result InterfaceListResultIterator, err error) {
if tracing.IsEnabled()
|
result.page, err = client.List(ctx, resourceGroupName, loadBalancerName)
return
}
|
{
ctx = tracing.StartSpan(ctx, fqdn+"/LoadBalancerNetworkInterfacesClient.List")
defer func() {
sc := -1
if result.Response().Response.Response != nil {
sc = result.page.Response().Response.Response.StatusCode
}
tracing.EndSpan(ctx, sc, err)
}()
}
|
connectivity_status.go
|
// Code generated by go-swagger; DO NOT EDIT.
// Copyright Authors of Cilium
// SPDX-License-Identifier: Apache-2.0
package models
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
import (
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag"
)
// ConnectivityStatus Connectivity status of a path
//
// swagger:model ConnectivityStatus
type ConnectivityStatus struct {
// Round trip time to node in nanoseconds
Latency int64 `json:"latency,omitempty"`
// Human readable status/error/warning message
Status string `json:"status,omitempty"`
}
// Validate validates this connectivity status
func (m *ConnectivityStatus) Validate(formats strfmt.Registry) error {
return nil
}
// MarshalBinary interface implementation
func (m *ConnectivityStatus) MarshalBinary() ([]byte, error) {
if m == nil {
|
return nil, nil
}
return swag.WriteJSON(m)
}
// UnmarshalBinary interface implementation
func (m *ConnectivityStatus) UnmarshalBinary(b []byte) error {
var res ConnectivityStatus
if err := swag.ReadJSON(b, &res); err != nil {
return err
}
*m = res
return nil
}
| |
create_character_database.py
|
""" Character database schema """
from sqlalchemy import Column, Integer, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
base = declarative_base()
class Character(base):
""" Character database schema """
__tablename__ = 'character'
id = Column(Integer, primary_key=True)
name = Column(String(250), nullable=False)
strength = Column(Integer)
inteligence = Column(Integer)
energy_projection = Column(Integer)
mental_power = Column(Integer)
fightning_ability = Column(Integer)
speed = Column(Integer)
wins = Column(Integer)
loses = Column(Integer)
overall_wins = Column(Integer)
overall_losses = Column(Integer)
image = Column(String(500))
@property
def
|
(self):
"""Return object data in easily serializeable format"""
return {
'name': self.name,
'id': self.id,
'strength': self.strength,
'inteligence': self.inteligence,
'energy_projection': self.energy_projection,
'mental_power': self.mental_power,
'fightning_ability': self.fightning_ability,
'speed': self.speed,
'wins': self.wins,
'loses': self.loses,
'overall_wins': self.overall_wins,
'overall_losses': self.overall_losses,
'image': self.image,
}
ENGINE = create_engine('sqlite:///characters.db')
base.metadata.create_all(ENGINE)
|
serialize
|
HtmlReporter.spec.ts
|
import * as path from 'path';
import { mutationTestReportSchema } from '@stryker-mutator/api/report';
import { testInjector } from '@stryker-mutator/test-helpers';
import { expect } from 'chai';
import * as sinon from 'sinon';
import HtmlReporter from '../../../../src/reporters/html/HtmlReporter';
import * as HtmlReporterUtil from '../../../../src/reporters/html/HtmlReporterUtil';
import { bindMutationTestReport } from '../../../../src/reporters/html/templates/bindMutationTestReport';
describe(HtmlReporter.name, () => {
let copyFileStub: sinon.SinonStub;
let writeFileStub: sinon.SinonStub;
let mkdirStub: sinon.SinonStub;
let deleteDirStub: sinon.SinonStub;
let sut: HtmlReporter;
beforeEach(() => {
copyFileStub = sinon.stub(HtmlReporterUtil, 'copyFile');
writeFileStub = sinon.stub(HtmlReporterUtil, 'writeFile');
deleteDirStub = sinon.stub(HtmlReporterUtil, 'deleteDir');
mkdirStub = sinon.stub(HtmlReporterUtil, 'mkdir');
sut = testInjector.injector.injectClass(HtmlReporter);
});
describe('onMutationTestReportReady', () => {
it('should use configured base directory', async () => {
testInjector.options.htmlReporter = { baseDir: 'foo/bar' };
actReportReady();
await sut.wrapUp();
expect(testInjector.logger.debug).calledWith('Using configured output folder foo/bar');
expect(deleteDirStub).calledWith('foo/bar');
});
it('should use default base directory when no override is configured', async () => {
const expectedBaseDir = path.normalize('reports/mutation/html');
actReportReady();
await sut.wrapUp();
expect(testInjector.logger.debug).calledWith(
`No base folder configuration found (using configuration: htmlReporter: { baseDir: 'output/folder' }), using default ${expectedBaseDir}`
);
expect(deleteDirStub).calledWith(expectedBaseDir);
});
|
expect(deleteDirStub).calledWith(path.normalize('reports/mutation/html'));
expect(mkdirStub).calledWith(path.normalize('reports/mutation/html'));
expect(deleteDirStub).calledBefore(mkdirStub);
});
it('should copy the template files', async () => {
actReportReady();
await sut.wrapUp();
expect(copyFileStub).calledWith(
path.resolve(__dirname, '..', '..', '..', '..', 'src', 'reporters', 'html', 'templates', 'stryker-80x80.png'),
path.resolve('reports', 'mutation', 'html', 'stryker-80x80.png')
);
expect(copyFileStub).calledWith(
path.resolve(__dirname, '..', '..', '..', '..', 'src', 'reporters', 'html', 'templates', 'index.html'),
path.resolve('reports', 'mutation', 'html', 'index.html')
);
});
it('should write the mutation report to disk', async () => {
const report: mutationTestReportSchema.MutationTestResult = {
files: {},
schemaVersion: '1.0',
thresholds: {
high: 80,
low: 60
}
};
sut.onMutationTestReportReady(report);
await sut.wrapUp();
expect(writeFileStub).calledWith(path.resolve('reports', 'mutation', 'html', 'bind-mutation-test-report.js'), bindMutationTestReport(report));
});
});
describe('wrapUp', () => {
it('should resolve when everything is OK', () => {
actReportReady();
return expect(sut.wrapUp()).eventually.undefined;
});
it('should reject when "deleteDir" rejects', () => {
const expectedError = new Error('delete dir');
deleteDirStub.rejects(expectedError);
actReportReady();
return expect(sut.wrapUp()).rejectedWith(expectedError);
});
it('should reject when "mkdir" rejects', () => {
const expectedError = new Error('mkdir');
mkdirStub.rejects(expectedError);
actReportReady();
return expect(sut.wrapUp()).rejectedWith(expectedError);
});
it('should reject when "writeFile" rejects', () => {
const expectedError = new Error('writeFile');
writeFileStub.rejects(expectedError);
actReportReady();
return expect(sut.wrapUp()).rejectedWith(expectedError);
});
it('should reject when "copyFile" rejects', () => {
const expectedError = new Error('copyFile');
copyFileStub.rejects(expectedError);
actReportReady();
return expect(sut.wrapUp()).rejectedWith(expectedError);
});
});
function actReportReady() {
sut.onMutationTestReportReady({ files: {}, schemaVersion: '', thresholds: { high: 0, low: 0 } });
}
});
|
it('should clean the base directory', async () => {
actReportReady();
await sut.wrapUp();
|
make_tests.py
|
import numpy as np
import pprint
from keras.models import Sequential
from keras.layers import Convolution2D, Dense, Flatten, Activation, MaxPooling2D, Dropout
from keras.layers.recurrent import LSTM
from keras.layers.advanced_activations import ELU
from keras.layers.embeddings import Embedding
from kerasify import export_model
np.set_printoptions(precision=25, threshold=np.nan)
def c_array(a):
s = pprint.pformat(a.flatten())
s = s.replace('[', '{').replace(']', '}').replace('array(', '').replace(')', '').replace(', dtype=float32', '')
shape = ''
if a.shape == ():
s = '{%s}' % s
shape = '(1)'
else:
shape = repr(a.shape).replace(',)', ')')
return shape, s
TEST_CASE = '''
bool test_%s(double* load_time, double* apply_time)
{
printf("TEST %s\\n");
KASSERT(load_time, "Invalid double");
KASSERT(apply_time, "Invalid double");
Tensor in%s;
in.data_ = %s;
Tensor out%s;
out.data_ = %s;
KerasTimer load_timer;
load_timer.Start();
KerasModel model;
KASSERT(model.LoadModel("test_%s.model"), "Failed to load model");
*load_time = load_timer.Stop();
KerasTimer apply_timer;
apply_timer.Start();
Tensor predict = out;
KASSERT(model.Apply(&in, &out), "Failed to apply");
*apply_time = apply_timer.Stop();
for (int i = 0; i < out.dims_[0]; i++)
{
KASSERT_EQ(out(i), predict(i), %s);
}
return true;
}
'''
def output_testcase(model, test_x, test_y, name, eps):
print("Processing %s" % name)
model.compile(loss='mean_squared_error', optimizer='adamax')
model.fit(test_x, test_y, nb_epoch=1, verbose=False)
predict_y = model.predict(test_x).astype('f')
print(model.summary())
export_model(model, 'test_%s.model' % name)
with open('test_%s.h' % name, 'w') as f:
x_shape, x_data = c_array(test_x[0])
y_shape, y_data = c_array(predict_y[0])
f.write(TEST_CASE % (name, name, x_shape, x_data, y_shape, y_data, name, eps))
''' Dense 1x1 '''
test_x = np.arange(10)
test_y = test_x * 10 + 1
model = Sequential()
model.add(Dense(1, input_dim=1))
output_testcase(model, test_x, test_y, 'dense_1x1', '1e-6')
''' Dense 10x1 '''
test_x = np.random.rand(10, 10).astype('f')
test_y = np.random.rand(10).astype('f')
model = Sequential()
model.add(Dense(1, input_dim=10))
output_testcase(model, test_x, test_y, 'dense_10x1', '1e-6')
''' Dense 2x2 '''
|
model.add(Dense(1))
output_testcase(model, test_x, test_y, 'dense_2x2', '1e-6')
''' Dense 10x10 '''
test_x = np.random.rand(10, 10).astype('f')
test_y = np.random.rand(10).astype('f')
model = Sequential()
model.add(Dense(10, input_dim=10))
model.add(Dense(1))
output_testcase(model, test_x, test_y, 'dense_10x10', '1e-6')
''' Dense 10x10x10 '''
test_x = np.random.rand(10, 10).astype('f')
test_y = np.random.rand(10, 10).astype('f')
model = Sequential()
model.add(Dense(10, input_dim=10))
model.add(Dense(10))
output_testcase(model, test_x, test_y, 'dense_10x10x10', '1e-6')
''' Conv 2x2 '''
test_x = np.random.rand(10, 1, 2, 2).astype('f')
test_y = np.random.rand(10, 1).astype('f')
model = Sequential()
model.add(Convolution2D(1, 2, 2, input_shape=(1, 2, 2)))
model.add(Flatten())
model.add(Dense(1))
output_testcase(model, test_x, test_y, 'conv_2x2', '1e-6')
''' Conv 3x3 '''
test_x = np.random.rand(10, 1, 3, 3).astype('f').astype('f')
test_y = np.random.rand(10, 1).astype('f')
model = Sequential()
model.add(Convolution2D(1, 3, 3, input_shape=(1, 3, 3)))
model.add(Flatten())
model.add(Dense(1))
output_testcase(model, test_x, test_y, 'conv_3x3', '1e-6')
''' Conv 3x3x3 '''
test_x = np.random.rand(10, 3, 10, 10).astype('f')
test_y = np.random.rand(10, 1).astype('f')
model = Sequential()
model.add(Convolution2D(3, 3, 3, input_shape=(3, 10, 10)))
model.add(Flatten())
model.add(Dense(1))
output_testcase(model, test_x, test_y, 'conv_3x3x3', '1e-6')
''' Activation ELU '''
test_x = np.random.rand(1, 10).astype('f')
test_y = np.random.rand(1, 1).astype('f')
model = Sequential()
model.add(Dense(10, input_dim=10))
model.add(ELU(alpha=0.5))
model.add(Dense(1))
output_testcase(model, test_x, test_y, 'elu_10', '1e-6')
''' Activation relu '''
test_x = np.random.rand(1, 10).astype('f')
test_y = np.random.rand(1, 10).astype('f')
model = Sequential()
model.add(Dense(10, input_dim=10))
model.add(Activation('relu'))
output_testcase(model, test_x, test_y, 'relu_10', '1e-6')
''' Dense relu '''
test_x = np.random.rand(1, 10).astype('f')
test_y = np.random.rand(1, 10).astype('f')
model = Sequential()
model.add(Dense(10, input_dim=10, activation='relu'))
model.add(Dense(10, input_dim=10, activation='relu'))
model.add(Dense(10, input_dim=10, activation='relu'))
output_testcase(model, test_x, test_y, 'dense_relu_10', '1e-6')
''' Dense relu '''
test_x = np.random.rand(1, 10).astype('f')
test_y = np.random.rand(1, 10).astype('f')
model = Sequential()
model.add(Dense(10, input_dim=10, activation='tanh'))
model.add(Dense(10, input_dim=10, activation='tanh'))
model.add(Dense(10, input_dim=10, activation='tanh'))
output_testcase(model, test_x, test_y, 'dense_tanh_10', '1e-6')
''' Conv softplus '''
test_x = np.random.rand(10, 1, 2, 2).astype('f')
test_y = np.random.rand(10, 1).astype('f')
model = Sequential()
model.add(Convolution2D(1, 2, 2, input_shape=(1, 2, 2), activation='softplus'))
model.add(Flatten())
model.add(Dense(1))
output_testcase(model, test_x, test_y, 'conv_softplus_2x2', '1e-6')
''' Conv hardsigmoid '''
test_x = np.random.rand(10, 1, 2, 2).astype('f')
test_y = np.random.rand(10, 1).astype('f')
model = Sequential()
model.add(Convolution2D(1, 2, 2, input_shape=(1, 2, 2), activation='hard_sigmoid'))
model.add(Flatten())
model.add(Dense(1))
output_testcase(model, test_x, test_y, 'conv_hard_sigmoid_2x2', '1e-6')
''' Conv sigmoid '''
test_x = np.random.rand(10, 1, 2, 2).astype('f')
test_y = np.random.rand(10, 1).astype('f')
model = Sequential()
model.add(Convolution2D(1, 2, 2, input_shape=(1, 2, 2), activation='sigmoid'))
model.add(Flatten())
model.add(Dense(1))
output_testcase(model, test_x, test_y, 'conv_sigmoid_2x2', '1e-6')
''' Maxpooling2D 1x1'''
test_x = np.random.rand(10, 1, 10, 10).astype('f')
test_y = np.random.rand(10, 1).astype('f')
model = Sequential()
model.add(MaxPooling2D(pool_size=(1, 1), input_shape=(1, 10, 10)))
model.add(Flatten())
model.add(Dense(1))
output_testcase(model, test_x, test_y, 'maxpool2d_1x1', '1e-6')
''' Maxpooling2D 2x2'''
test_x = np.random.rand(10, 1, 10, 10).astype('f')
test_y = np.random.rand(10, 1).astype('f')
model = Sequential()
model.add(MaxPooling2D(pool_size=(2, 2), input_shape=(1, 10, 10)))
model.add(Flatten())
model.add(Dense(1))
output_testcase(model, test_x, test_y, 'maxpool2d_2x2', '1e-6')
''' Maxpooling2D 3x2x2'''
test_x = np.random.rand(10, 3, 10, 10).astype('f')
test_y = np.random.rand(10, 1).astype('f')
model = Sequential()
model.add(MaxPooling2D(pool_size=(2, 2), input_shape=(3, 10, 10)))
model.add(Flatten())
model.add(Dense(1))
output_testcase(model, test_x, test_y, 'maxpool2d_3x2x2', '1e-6')
''' Maxpooling2D 3x3x3'''
test_x = np.random.rand(10, 3, 10, 10).astype('f')
test_y = np.random.rand(10, 1).astype('f')
model = Sequential()
model.add(MaxPooling2D(pool_size=(3, 3), input_shape=(3, 10, 10)))
model.add(Flatten())
model.add(Dense(1))
output_testcase(model, test_x, test_y, 'maxpool2d_3x3x3', '1e-6')
''' LSTM simple 7x20 '''
test_x = np.random.rand(10, 7, 20).astype('f')
test_y = np.random.rand(10, 3).astype('f')
model = Sequential()
model.add(LSTM(3, return_sequences=False, input_shape=(7, 20)))
output_testcase(model, test_x, test_y, 'lstm_simple_7x20', '1e-6')
''' LSTM simple stacked 20x9 '''
test_x = np.random.rand(10, 20, 9).astype('f')
test_y = np.random.rand(10, 1).astype('f')
model = Sequential()
model.add(LSTM(32, return_sequences=False, input_shape=(20, 9)))
model.add(Dense(3, input_dim=32, activation='tanh'))
model.add(Dense(1))
output_testcase(model, test_x, test_y, 'lstm_simple_stacked20x9', '1e-6')
''' LSTM stacked 150x83 '''
test_x = np.random.rand(10, 150, 83).astype('f')
test_y = np.random.rand(10, 1).astype('f')
model = Sequential()
model.add(LSTM(32, return_sequences=True, input_shape=(150, 83)))
model.add(LSTM(32, return_sequences=False))
model.add(Dense(1, activation='sigmoid'))
output_testcase(model, test_x, test_y, 'lstm_stacked150x83', '1e-6')
''' Embedding 64 '''
np.random.seed(10)
test_x = np.random.randint(100, size=(32, 10)).astype('f')
test_y = np.random.rand(32, 20).astype('f')
model = Sequential()
model.add(Embedding(100, 64, input_length=10))
model.add(Flatten())
#model.add(Dropout(0.5))
model.add(Dense(20, activation='sigmoid'))
output_testcase(model, test_x, test_y, 'embedding64', '1e-6')
''' Benchmark '''
test_x = np.random.rand(1, 3, 128, 128).astype('f')
test_y = np.random.rand(1, 10).astype('f')
model = Sequential()
model.add(Convolution2D(16, 7, 7, input_shape=(3, 128, 128), activation='relu'))
model.add(MaxPooling2D(pool_size=(3, 3)))
model.add(ELU())
model.add(Convolution2D(8, 3, 3))
model.add(Flatten())
model.add(Dense(1000, activation='relu'))
model.add(Dense(10))
output_testcase(model, test_x, test_y, 'benchmark', '1e-3')
|
test_x = np.random.rand(10, 2).astype('f')
test_y = np.random.rand(10).astype('f')
model = Sequential()
model.add(Dense(2, input_dim=2))
|
abstract.py
|
import abc
import logging
from connexion.operations.secure import SecureOperation
from ..decorators.metrics import UWSGIMetricsCollector
from ..decorators.parameter import parameter_to_arg
from ..decorators.produces import BaseSerializer, Produces
from ..decorators.response import ResponseValidator
from ..decorators.validation import ParameterValidator, RequestBodyValidator
from ..utils import all_json, is_nullable, make_type
logger = logging.getLogger('connexion.operations.abstract')
DEFAULT_MIMETYPE = 'application/json'
VALIDATOR_MAP = {
'parameter': ParameterValidator,
'body': RequestBodyValidator,
'response': ResponseValidator,
}
class AbstractOperation(SecureOperation, metaclass=abc.ABCMeta):
"""
An API routes requests to an Operation by a (path, method) pair.
The operation uses a resolver to resolve its handler function.
We use the provided spec to do a bunch of heavy lifting before
(and after) we call security_schemes handler.
The registered handler function ends up looking something like:
@secure_endpoint
@validate_inputs
@deserialize_function_inputs
@serialize_function_outputs
@validate_outputs
def user_provided_handler_function(important, stuff):
if important:
serious_business(stuff)
"""
def __init__(self, api, method, path, operation, resolver,
app_security=None, security_schemes=None,
validate_responses=False, strict_validation=False,
randomize_endpoint=None, validator_map=None,
format_converters=None, pythonic_params=False,
uri_parser_class=None, pass_context_arg_name=None):
"""
:param api: api that this operation is attached to
:type api: apis.AbstractAPI
:param method: HTTP method
:type method: str
:param path:
:type path: str
:param operation: swagger operation object
:type operation: dict
:param resolver: Callable that maps operationID to a function
:param app_produces: list of content types the application can return by default
:param app_security: list of security rules the application uses by default
:type app_security: list
:param security_schemes: `Security Definitions Object
<https://github.com/swagger-api/swagger-spec/blob/master/versions/2.0.md#security-definitions-object>`_
:type security_schemes: dict
:param validate_responses: True enables validation. Validation errors generate HTTP 500 responses.
:type validate_responses: bool
:param strict_validation: True enables validation on invalid request parameters
:type strict_validation: bool
:param randomize_endpoint: number of random characters to append to operation name
:type randomize_endpoint: integer
:param validator_map: Custom validators for the types "parameter", "body" and "response".
:type validator_map: dict
:param format_converters: Custom value converters based on the schema format of properties.
:type format_converters: dict
:param pythonic_params: When True CamelCase parameters are converted to snake_case and an underscore is appended
to any shadowed built-ins
:type pythonic_params: bool
:param uri_parser_class: class to use for uri parseing
:type uri_parser_class: AbstractURIParser
:param pass_context_arg_name: If not None will try to inject the request context to the function using this
name.
:type pass_context_arg_name: str|None
"""
self._api = api
self._method = method
self._path = path
self._operation = operation
self._resolver = resolver
self._security = app_security
self._security_schemes = security_schemes
self._validate_responses = validate_responses
self._strict_validation = strict_validation
self._pythonic_params = pythonic_params
self._uri_parser_class = uri_parser_class
self._pass_context_arg_name = pass_context_arg_name
self._randomize_endpoint = randomize_endpoint
self._operation_id = self._operation.get("operationId")
self._resolution = resolver.resolve(self)
self._operation_id = self._resolution.operation_id
self._responses = self._operation.get("responses", {})
self._validator_map = dict(VALIDATOR_MAP)
self._validator_map.update(validator_map or {})
self._format_converters = format_converters or {}
@property
def method(self):
"""
The HTTP method for this operation (ex. GET, POST)
"""
return self._method
@property
def path(self):
"""
The path of the operation, relative to the API base path
"""
return self._path
@property
def responses(self):
"""
Returns the responses for this operation
"""
return self._responses
@property
def validator_map(self):
"""
Validators to use for parameter, body, and response validation
"""
return self._validator_map
@property
def format_converters(self):
"""
Converters to use to convert input type based on the schema format
attribute.
"""
return self._format_converters
@property
def operation_id(self):
"""
The operation id used to indentify the operation internally to the app
"""
return self._operation_id
@property
def randomize_endpoint(self):
"""
number of random digits to generate and append to the operation_id.
"""
return self._randomize_endpoint
@property
def router_controller(self):
"""
The router controller to use (python module where handler functions live)
"""
return self._router_controller
@property
def strict_validation(self):
"""
If True, validate all requests against the spec
"""
return self._strict_validation
@property
def pythonic_params(self):
"""
If True, convert CamelCase into pythonic_variable_names
"""
return self._pythonic_params
@property
def validate_responses(self):
"""
If True, check the response against the response schema, and return an
error if the response does not validate.
"""
return self._validate_responses
@staticmethod
def _get_file_arguments(files, arguments, has_kwargs=False):
return {k: v for k, v in files.items() if k in arguments or has_kwargs}
@abc.abstractmethod
def _get_val_from_param(self, value, query_defn):
"""
Convert input parameters into the correct type
"""
def _query_args_helper(self, query_defns, query_arguments,
function_arguments, has_kwargs, sanitize):
res = {}
for key, value in query_arguments.items():
key = sanitize(key)
if not has_kwargs and key not in function_arguments:
logger.debug("Query Parameter '%s' not in function arguments", key)
else:
logger.debug("Query Parameter '%s' in function arguments", key)
try:
query_defn = query_defns[key]
except KeyError: # pragma: no cover
logger.error("Function argument '{}' not defined in specification".format(key))
else:
logger.debug('%s is a %s', key, query_defn)
res.update({key: self._get_val_from_param(value, query_defn)})
return res
@abc.abstractmethod
def _get_query_arguments(self, query, arguments, has_kwargs, sanitize):
"""
extract handler function arguments from the query parameters
"""
@abc.abstractmethod
def _get_body_argument(self, body, arguments, has_kwargs, sanitize):
"""
extract handler function arguments from the request body
"""
def _get_path_arguments(self, path_params, sanitize):
"""
extract handler function arguments from path parameters
"""
kwargs = {}
path_defns = {p["name"]: p for p in self.parameters if p["in"] == "path"}
for key, value in path_params.items():
sanitized_key = sanitize(key)
if key in path_defns:
kwargs[sanitized_key] = self._get_val_from_param(value, path_defns[key])
else: # Assume path params mechanism used for injection
kwargs[sanitized_key] = value
return kwargs
@abc.abstractproperty
def parameters(self):
"""
Returns the parameters for this operation
"""
@abc.abstractproperty
def produces(self):
"""
Content-Types that the operation produces
"""
@abc.abstractproperty
def consumes(self):
"""
Content-Types that the operation consumes
"""
@abc.abstractproperty
def body_schema(self):
"""
The body schema definition for this operation.
"""
@abc.abstractproperty
def body_definition(self):
"""
The body definition for this operation.
:rtype: dict
"""
def get_arguments(self, path_params, query_params, body, files, arguments,
has_kwargs, sanitize):
"""
get arguments for handler function
"""
ret = {}
ret.update(self._get_path_arguments(path_params, sanitize))
ret.update(self._get_query_arguments(query_params, arguments,
has_kwargs, sanitize))
if self.method.upper() in ["PATCH", "POST", "PUT"]:
ret.update(self._get_body_argument(body, arguments,
has_kwargs, sanitize))
ret.update(self._get_file_arguments(files, arguments, has_kwargs))
return ret
def response_definition(self, status_code=None,
content_type=None):
"""
response definition for this endpoint
"""
content_type = content_type or self.get_mimetype()
response_definition = self.responses.get(
str(status_code),
self.responses.get("default", {})
)
return response_definition
@abc.abstractmethod
def response_schema(self, status_code=None, content_type=None):
"""
response schema for this endpoint
"""
@abc.abstractmethod
def example_response(self, status_code=None, content_type=None):
"""
Returns an example from the spec
"""
@abc.abstractmethod
def get_path_parameter_types(self):
"""
Returns the types for parameters in the path
"""
@abc.abstractmethod
def with_definitions(self, schema):
"""
Returns the given schema, but with the definitions from the spec
attached. This allows any remaining references to be resolved by a
validator (for example).
"""
def get_mimetype(self):
"""
If the endpoint has no 'produces' then the default is
'application/json'.
:rtype str
"""
if all_json(self.produces):
try:
return self.produces[0]
except IndexError:
return DEFAULT_MIMETYPE
elif len(self.produces) == 1:
return self.produces[0]
else:
return DEFAULT_MIMETYPE
@property
def _uri_parsing_decorator(self):
"""
Returns a decorator that parses request data and handles things like
array types, and duplicate parameter definitions.
"""
return self._uri_parser_class(self.parameters, self.body_definition)
@property
def function(self):
"""
Operation function with decorators
:rtype: types.FunctionType
"""
function = parameter_to_arg(
self, self._resolution.function, self.pythonic_params,
self._pass_context_arg_name
)
if self.validate_responses:
logger.debug('... Response validation enabled.')
response_decorator = self.__response_validation_decorator
logger.debug('... Adding response decorator (%r)', response_decorator)
function = response_decorator(function)
produces_decorator = self.__content_type_decorator
logger.debug('... Adding produces decorator (%r)', produces_decorator)
function = produces_decorator(function)
for validation_decorator in self.__validation_decorators:
function = validation_decorator(function)
uri_parsing_decorator = self._uri_parsing_decorator
function = uri_parsing_decorator(function)
# NOTE: the security decorator should be applied last to check auth before anything else :-)
security_decorator = self.security_decorator
logger.debug('... Adding security decorator (%r)', security_decorator)
function = security_decorator(function)
function = self._request_response_decorator(function)
if UWSGIMetricsCollector.is_available(): # pragma: no cover
decorator = UWSGIMetricsCollector(self.path, self.method)
function = decorator(function)
return function
@property
def __content_type_decorator(self):
"""
Get produces decorator.
If the operation mimetype format is json then the function return value is jsonified
From Swagger Specification:
**Produces**
A list of MIME types the operation can produce. This overrides the produces definition at the Swagger Object.
An empty value MAY be used to clear the global definition.
:rtype: types.FunctionType
"""
logger.debug('... Produces: %s', self.produces, extra=vars(self))
mimetype = self.get_mimetype()
if all_json(self.produces): # endpoint will return json
logger.debug('... Produces json', extra=vars(self))
# TODO: Refactor this.
return lambda f: f
elif len(self.produces) == 1:
logger.debug('... Produces %s', mimetype, extra=vars(self))
decorator = Produces(mimetype)
return decorator
else:
return BaseSerializer()
@property
def __validation_decorators(self):
|
@property
def __response_validation_decorator(self):
"""
Get a decorator for validating the generated Response.
:rtype: types.FunctionType
"""
ResponseValidator = self.validator_map['response']
return ResponseValidator(self, self.get_mimetype())
def convert_type(self, value, _type, _format=None):
"""
Convert the input value to the corresponding python type.
:param value: The raw input value from the HTTP request.
:param _type: The type of the property as defined in the schema.
:param _format: The optional format of the property as defined in the schema.
:return: The input value converted to the python type.
"""
typed_value = make_type(value, _type)
type_converters = self.format_converters.get(_type)
if not type_converters:
return typed_value
format_converter = type_converters.get(_format)
if not format_converter:
return typed_value
return format_converter(_type, _format, value)
def json_loads(self, data):
"""
A wrapper for calling the API specific JSON loader.
:param data: The JSON data in textual form.
:type data: bytes
"""
return self.api.json_loads(data)
|
"""
:rtype: types.FunctionType
"""
ParameterValidator = self.validator_map['parameter']
RequestBodyValidator = self.validator_map['body']
if self.parameters:
yield ParameterValidator(self.parameters,
self.api,
strict_validation=self.strict_validation)
if self.body_schema:
yield RequestBodyValidator(self.body_schema, self.consumes, self.api,
is_nullable(self.body_definition),
strict_validation=self.strict_validation)
|
monitoring.go
|
// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
// or more contributor license agreements. Licensed under the Elastic License;
// you may not use this file except in compliance with the Elastic License.
package beats
import (
"fmt"
"github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/agent/application/paths"
)
const (
// args: data path, pipeline name, application name
logFileFormat = "%s/logs/%s/%s-json.log"
// args: data path, install path, pipeline name, application name
logFileFormatWin = "%s\\logs\\%s\\%s-json.log"
// args: pipeline name, application name
mbEndpointFileFormat = "unix:///tmp/elastic-agent/%s/%s/%s.sock"
// args: pipeline name, application name
mbEndpointFileFormatWin = `npipe:///%s-%s`
)
func getMonitoringEndpoint(program, operatingSystem, pipelineID string) string {
if operatingSystem == "windows"
|
return fmt.Sprintf(mbEndpointFileFormat, pipelineID, program, program)
}
func getLoggingFile(program, operatingSystem, installPath, pipelineID string) string {
if operatingSystem == "windows" {
return fmt.Sprintf(logFileFormatWin, paths.Home(), pipelineID, program)
}
return fmt.Sprintf(logFileFormat, paths.Home(), pipelineID, program)
}
|
{
return fmt.Sprintf(mbEndpointFileFormatWin, pipelineID, program)
}
|
multicast.ts
|
import { Observable } from '../../internal/Observable';
import { multicast } from '../../internal/patching/operator/multicast';
Observable.prototype.multicast = <any>multicast;
declare module '../../internal/Observable' {
|
}
}
|
interface Observable<T> {
multicast: typeof multicast;
|
test_weyl.py
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
# pylint: disable=invalid-name
"""Tests for Weyl coorindate routines."""
import unittest
import numpy as np
from numpy.testing import assert_allclose
from qiskit.test import QiskitTestCase
from qiskit.quantum_info.random import random_unitary
from qiskit.quantum_info.synthesis.weyl import weyl_coordinates
from qiskit.quantum_info.synthesis.local_invariance import (two_qubit_local_invariants,
local_equivalence)
class TestWeyl(QiskitTestCase):
"""Test Weyl coordinate routines"""
def test_weyl_coordinates_simple(self):
"""Check Weyl coordinates against known cases.
"""
# Identity [0,0,0]
U = np.identity(4)
weyl = weyl_coordinates(U)
assert_allclose(weyl, [0, 0, 0])
# CNOT [pi/4, 0, 0]
U = np.array([[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0]], dtype=complex)
weyl = weyl_coordinates(U)
assert_allclose(weyl, [np.pi / 4, 0, 0], atol=1e-07)
# SWAP [pi/4, pi/4 ,pi/4]
U = np.array([[1, 0, 0, 0],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 0, 1]], dtype=complex)
weyl = weyl_coordinates(U)
assert_allclose(weyl, [np.pi / 4, np.pi / 4, np.pi / 4])
# SQRT ISWAP [pi/8, pi/8, 0]
U = np.array([[1, 0, 0, 0],
[0, 1 / np.sqrt(2), 1j / np.sqrt(2), 0],
[0, 1j / np.sqrt(2), 1 / np.sqrt(2), 0],
|
def test_weyl_coordinates_random(self):
"""Randomly check Weyl coordinates with local invariants.
"""
for _ in range(10):
U = random_unitary(4).data
weyl = weyl_coordinates(U)
local_equiv = local_equivalence(weyl)
local = two_qubit_local_invariants(U)
assert_allclose(local, local_equiv)
if __name__ == '__main__':
unittest.main()
|
[0, 0, 0, 1]], dtype=complex)
weyl = weyl_coordinates(U)
assert_allclose(weyl, [np.pi / 8, np.pi / 8, 0])
|
flickr_api.py
|
"""
flickr.py
Copyright 2004-2006 James Clarke <[email protected]>
Portions Copyright 2007-2008 Joshua Henderson <[email protected]>
THIS SOFTWARE IS SUPPLIED WITHOUT WARRANTY OF ANY KIND, AND MAY BE
COPIED, MODIFIED OR DISTRIBUTED IN ANY WAY, AS LONG AS THIS NOTICE
AND ACKNOWLEDGEMENT OF AUTHORSHIP REMAIN.
2007-12-17
For an upto date TODO list, please see:
http://code.google.com/p/flickrpy/wiki/TodoList
For information on how to use the Authentication
module, plese see:
http://code.google.com/p/flickrpy/wiki/UserAuthentication
2006-12-19
Applied patches from Berco Beute and Wolfram Kriesing.
"""
__author__ = "James Clarke <[email protected]>"
__version__ = "$Rev$"
__date__ = "$Date$"
__copyright__ = "Copyright: 2004-2010 James Clarke; Portions: 2007-2008 Joshua Henderson; Portions: 2011 Andrei Vlad Vacariu"
from urllib import urlencode, urlopen
from xml.dom import minidom
import hashlib
import os
HOST = 'http://flickr.com'
API = '/services/rest'
# set these here or using flickr.API_KEY in your application
API_KEY = None
API_SECRET = None
email = None
password = None
AUTH = False
debug = False
# The next 2 variables are only importatnt if authentication is used
# this can be set here or using flickr.tokenPath in your application
# this is the path to the folder containing tokenFile (default: token.txt)
tokenPath = ''
# this can be set here or using flickr.tokenFile in your application
# this is the name of the file containing the stored token.
tokenFile = 'token.txt'
class FlickrError(Exception): pass
class Photo(object):
"""Represents a Flickr Photo."""
__readonly = ['id', 'secret', 'server', 'farm', 'isfavorite', 'license', 'rotation',
'owner', 'dateposted', 'datetaken', 'takengranularity',
'title', 'description', 'ispublic', 'isfriend', 'isfamily',
'cancomment', 'canaddmeta', 'comments', 'tags', 'permcomment',
'permaddmeta', 'url', 'views']
#XXX: Hopefully None won't cause problems
def __init__(self, id, owner=None, dateuploaded=None, \
title=None, description=None, ispublic=None, \
isfriend=None, isfamily=None, cancomment=None, \
canaddmeta=None, comments=None, tags=None, secret=None, \
isfavorite=None, server=None, farm=None, license=None, \
rotation=None, url=None, views=None):
"""Must specify id, rest is optional."""
self.__loaded = False
self.__cancomment = cancomment
self.__canaddmeta = canaddmeta
self.__comments = comments
self.__dateuploaded = dateuploaded
self.__description = description
self.__id = id
self.__license = license
self.__isfamily = isfamily
self.__isfavorite = isfavorite
self.__isfriend = isfriend
self.__ispublic = ispublic
self.__owner = owner
self.__rotation = rotation
self.__secret = secret
self.__server = server
self.__farm = farm
self.__tags = tags
self.__title = title
self.__dateposted = None
self.__datetaken = None
self.__takengranularity = None
self.__permcomment = None
self.__permaddmeta = None
self.__url = None
self.__views = None
def __setattr__(self, key, value):
if key in self.__class__.__readonly:
raise AttributeError("The attribute %s is read-only." % key)
else:
super(Photo, self).__setattr__(key, value)
def _val(self, key):
if key in self.__class__.__readonly:
return super(Photo, self).__getattribute__("_%s__%s" % (self.__class__.__name__, key))
else:
return super(Photo, self).__getattribute__(key)
def __getattr__(self, key):
val = self._val(key)
if val == None and not self.__loaded:
self._load_properties()
val = self._val(key)
return val
def _load_properties(self):
"""Loads the properties from Flickr."""
self.__loaded = True
method = 'flickr.photos.getInfo'
data = _doget(method, photo_id=self.id)
photo = data.rsp.photo
self.__secret = photo.secret
self.__server = photo.server
self.__farm = photo.farm
self.__isfavorite = photo.isfavorite
self.__license = photo.license
self.__rotation = photo.rotation
owner = photo.owner
self.__owner = User(owner.nsid, username=owner.username,\
realname=owner.realname,\
location=owner.location)
self.__title = photo.title.text
self.__description = photo.description.text
self.__ispublic = photo.visibility.ispublic
self.__isfriend = photo.visibility.isfriend
self.__isfamily = photo.visibility.isfamily
self.__dateposted = photo.dates.posted
self.__datetaken = photo.dates.taken
self.__takengranularity = photo.dates.takengranularity
self.__cancomment = photo.editability.cancomment
self.__canaddmeta = photo.editability.canaddmeta
self.__comments = photo.comments.text
self.__url = photo.urls.url.text
self.__views = photo.views
try:
self.__permcomment = photo.permissions.permcomment
self.__permaddmeta = photo.permissions.permaddmeta
except AttributeError:
self.__permcomment = None
self.__permaddmeta = None
#TODO: Implement Notes?
if hasattr(photo.tags, "tag"):
if isinstance(photo.tags.tag, list):
self.__tags = [Tag(tag.id, User(tag.author), tag.raw, tag.text) \
for tag in photo.tags.tag]
else:
tag = photo.tags.tag
self.__tags = [Tag(tag.id, User(tag.author), tag.raw, tag.text)]
def __str__(self):
return '<Flickr Photo %s>' % self.id
def setTags(self, tags):
"""Set the tags for current photo to list tags.
(flickr.photos.settags)
"""
method = 'flickr.photos.setTags'
tags = uniq(tags)
_dopost(method, auth=True, photo_id=self.id, tags=tags)
self._load_properties()
def addTags(self, tags):
"""Adds the list of tags to current tags. (flickr.photos.addtags)
"""
method = 'flickr.photos.addTags'
if isinstance(tags, list):
tags = uniq(tags)
_dopost(method, auth=True, photo_id=self.id, tags=tags)
#load properties again
self._load_properties()
def removeTag(self, tag):
"""Remove the tag from the photo must be a Tag object.
(flickr.photos.removeTag)
"""
method = 'flickr.photos.removeTag'
tag_id = ''
try:
tag_id = tag.id
except AttributeError:
raise FlickrError, "Tag object expected"
_dopost(method, auth=True, photo_id=self.id, tag_id=tag_id)
self._load_properties()
def setMeta(self, title=None, description=None):
"""Set metadata for photo. (flickr.photos.setMeta)"""
method = 'flickr.photos.setMeta'
if title is None:
title = self.title
if description is None:
description = self.description
_dopost(method, auth=True, title=title, \
description=description, photo_id=self.id)
self.__title = title
self.__description = description
def getAllContexts(self):
"""Retrieves lists of the pools/sets the photo is in"""
method = 'flickr.photos.getAllContexts'
data = _doget(method, photo_id=self.id)
d = {'pools': [], 'sets': []}
if hasattr(data.rsp, "pool"):
if isinstance(data.rsp.pool, list):
for pool in data.rsp.pool:
d["pools"].append({"id": pool.id, "title": pool.title})
else:
d["pools"].append({"id": data.rsp.pool.id, "title": data.rsp.pool.title})
if hasattr(data.rsp, "set"):
if isinstance(data.rsp.set, list):
for theset in data.rsp.set:
d["sets"].append({"id": theset.id, "title": theset.title})
else:
d["sets"].append({"id": data.rsp.set.id, "title": data.rsp.set.title})
return d
def getPoolCount(self):
"""Retrieves a count of the pools the photo is in"""
d = self.getAllContexts()
return len( d["pools"] )
def getSetCount(self):
"""Retrieves a count of the pools the photo is in"""
d = self.getAllContexts()
return len( d["sets"] )
def getURL(self, size='Medium', urlType='url'):
"""Retrieves a url for the photo. (flickr.photos.getSizes)
urlType - 'url' or 'source'
'url' - flickr page of photo
'source' - image file
"""
method = 'flickr.photos.getSizes'
data = _doget(method, photo_id=self.id)
for psize in data.rsp.sizes.size:
if psize.label == size:
return getattr(psize, urlType)
raise FlickrError, "No URL found"
def getSizes(self):
"""
Get all the available sizes of the current image, and all available
data about them.
Returns: A list of dicts with the size data.
"""
method = 'flickr.photos.getSizes'
data = _doget(method, photo_id=self.id)
ret = []
# The given props are those that we return and the according types, since
# return width and height as string would make "75">"100" be True, which
# is just error prone.
props = {'url':str,'width':int,'height':int,'label':str,'source':str,'text':str}
for psize in data.rsp.sizes.size:
d = {}
for prop,convert_to_type in props.items():
d[prop] = convert_to_type(getattr(psize, prop))
ret.append(d)
return ret
def getExif(self):
"""Retrieves EXIF metadata for the photo.
Example usage:
>>> exif = photo.getExif()
>>> print exif.camera
>>> for t in exif.tags:
... print '%s: %s' % (t.label, t.raw)
"""
return Exif.getExif(self.id)
def getLocation(self):
"""
Return the latitude+longitutde of the picture.
Returns None if no location given for this pic.
"""
method = 'flickr.photos.geo.getLocation'
try:
data = _doget(method, photo_id=self.id)
except FlickrError: # Some other error might have occured too!?
return None
loc = data.rsp.photo.location
return [loc.latitude, loc.longitude]
def getComments(self):
""""
get list of comments for photo
returns a list of comment objects
comment text is in return [item].text
"""
method = "flickr.photos.comments.getList"
try:
data = _doget(method, photo_id=self.id)
except FlickrError: # ???? what errors might there be????
return None
return data.rsp.comments
def _getDirectURL(self, size):
return "http://farm%s.static.flickr.com/%s/%s_%s_%s.jpg" % \
(self.farm, self.server, self.id, self.secret, size)
def getThumbnail(self):
"""
Return a string representation of the URL to the thumbnail
image (not the thumbnail image page).
"""
return self._getDirectURL('t')
def getSmallSquare(self):
"""
Return a string representation of the URL to the small square
image (not the small square image page).
"""
return self._getDirectURL('s')
def getSmall(self):
"""
Return a string representation of the URL to the small
image (not the small image page).
"""
return self._getDirectURL('m')
def getMedium(self):
"""
Return a string representation of the URL to the medium
image (not the medium image page).
"""
return self._getDirectURL('z')
def getLarge(self):
"""
Return a string representation of the URL to the large
image (not the large image page).
"""
return self._getDirectURL('b')
def getGalleryList(self, per_page='', page=''):
"""
get list of galleries which
contain the photo.
Galleries are returned sorted by
date which the photo was added
to the gallery
"""
if per_page > 500: # Max is 500
per_page = 500
method = "flickr.galleries.getListForPhoto"
try:
data = _doget(method, photo_id=self.id, per_page=per_page, \
page=page)
except FlickrError:
return None
return data.rsp.galleries.gallery
def getFavoriteCount(self):
"""
Return the number of favorites to the specific photo
"""
method = 'flickr.photos.getFavorites'
data = _doget(method, photo_id=self.id)
return data.rsp.photo.total
def getFavoriteUsers(self):
"""
Return the list of users who marked the specific photo as favorite
return format: { userid, username, date of marking favorite}
"""
method = 'flickr.photos.getFavorites'
data = _doget(method, photo_id=self.id)
u = []
try:
users = data.rsp.photo.person
except AttributeError:
return u # there are no favorite of this photo
try:
iter(users)
except TypeError:
users = [users] # there is only one favorite, so make is a list
for user in users:
u.append({"id": user.nsid, "username": user.username, "favedate": user.favedate})
return u
class Photoset(object):
"""A Flickr photoset.
If constructed with just an ID, the rest of the data about the Photoset is
fetched from the API.
"""
def __init__(self, id, title=None, primary=None, photos=0, description='', \
secret='', server=''):
self.__id = id
if not title and not primary:
method = 'flickr.photosets.getInfo'
data = _doget(method, photoset_id=self.id)
title = data.rsp.photoset.title.text
primary = Photo(data.rsp.photoset.primary)
description = data.rsp.photoset.description.text
count = data.rsp.photoset.photos
self.__title = title
self.__primary = primary
self.__description = description
self.__count = photos
self.__secret = secret
self.__server = server
id = property(lambda self: self.__id)
title = property(lambda self: self.__title)
description = property(lambda self: self.__description)
primary = property(lambda self: self.__primary)
def __len__(self):
return self.__count
def __str__(self):
return '<Flickr Photoset %s>' % self.id
def getPhotos(self):
"""Returns list of Photos."""
method = 'flickr.photosets.getPhotos'
data = _doget(method, photoset_id=self.id)
photos = data.rsp.photoset.photo
p = []
# If there's only one photo in the set, the API returns a single photo,
# not a list
try:
iter(photos)
except TypeError:
photos = [photos]
for photo in photos:
p.append(Photo(photo.id, title=photo.title, secret=photo.secret, \
server=photo.server))
return p
def editPhotos(self, photos, primary=None):
"""Edit the photos in this set.
photos - photos for set
primary - primary photo (if None will used current)
"""
method = 'flickr.photosets.editPhotos'
if primary is None:
primary = self.primary
ids = [photo.id for photo in photos]
if primary.id not in ids:
ids.append(primary.id)
_dopost(method, auth=True, photoset_id=self.id,\
primary_photo_id=primary.id,
photo_ids=ids)
self.__count = len(ids)
return True
def addPhoto(self, photo):
"""Add a photo to this set.
photo - the photo
"""
method = 'flickr.photosets.addPhoto'
_dopost(method, auth=True, photoset_id=self.id, photo_id=photo.id)
self.__count += 1
return True
def removePhoto(self, photo):
"""Remove the photo from this set.
photo - the photo
"""
method = 'flickr.photosets.removePhoto'
_dopost(method, auth=True, photoset_id=self.id, photo_id=photo.id)
self.__count = self.__count - 1
return True
def editMeta(self, title=None, description=None):
"""Set metadata for photo. (flickr.photos.setMeta)"""
method = 'flickr.photosets.editMeta'
if title is None:
title = self.title
if description is None:
description = self.description
_dopost(method, auth=True, title=title, \
description=description, photoset_id=self.id)
self.__title = title
self.__description = description
return True
#XXX: Delete isn't handled well as the python object will still exist
def delete(self):
"""Deletes the photoset.
"""
method = 'flickr.photosets.delete'
_dopost(method, auth=True, photoset_id=self.id)
return True
def create(cls, photo, title, description=''):
"""Create a new photoset.
photo - primary photo
"""
if not isinstance(photo, Photo):
raise TypeError, "Photo expected"
method = 'flickr.photosets.create'
data = _dopost(method, auth=True, title=title,\
description=description,\
primary_photo_id=photo.id)
set = Photoset(data.rsp.photoset.id, title, Photo(photo.id),
photos=1, description=description)
return set
create = classmethod(create)
class User(object):
"""A Flickr user."""
def __init__(self, id, username=None, isadmin=None, ispro=None, \
realname=None, location=None, firstdate=None, count=None):
"""id required, rest optional."""
self.__loaded = False #so we don't keep loading data
self.__id = id
self.__username = username
self.__isadmin = isadmin
self.__ispro = ispro
self.__realname = realname
self.__location = location
self.__photos_firstdate = firstdate
self.__photos_count = count
#property fu
id = property(lambda self: self._general_getattr('id'))
username = property(lambda self: self._general_getattr('username'))
isadmin = property(lambda self: self._general_getattr('isadmin'))
ispro = property(lambda self: self._general_getattr('ispro'))
realname = property(lambda self: self._general_getattr('realname'))
location = property(lambda self: self._general_getattr('location'))
photos_firstdate = property(lambda self: \
self._general_getattr('photos_firstdate'))
photos_firstdatetaken = property(lambda self: \
self._general_getattr\
('photos_firstdatetaken'))
photos_count = property(lambda self: \
self._general_getattr('photos_count'))
icon_server= property(lambda self: self._general_getattr('icon_server'))
icon_url= property(lambda self: self._general_getattr('icon_url'))
def _general_getattr(self, var):
"""Generic get attribute function."""
if getattr(self, "_%s__%s" % (self.__class__.__name__, var)) is None \
and not self.__loaded:
self._load_properties()
return getattr(self, "_%s__%s" % (self.__class__.__name__, var))
def _load_properties(self):
"""Load User properties from Flickr."""
method = 'flickr.people.getInfo'
data = _doget(method, user_id=self.__id)
self.__loaded = True
person = data.rsp.person
self.__isadmin = person.isadmin
self.__ispro = person.ispro
self.__icon_server = person.iconserver
if int(person.iconserver) > 0:
self.__icon_url = 'http://photos%s.flickr.com/buddyicons/%s.jpg' \
% (person.iconserver, self.__id)
else:
self.__icon_url = 'http://www.flickr.com/images/buddyicon.jpg'
self.__username = person.username.text
self.__realname = getattr((getattr(person, 'realname', u'')), 'text', u'')
self.__location = getattr((getattr(person, 'location', u'')), 'text', u'')
self.__photos_count = getattr((getattr(getattr(person, 'photos', None), 'count', u'')), 'text', u'')
if self.__photos_count:
self.__photos_firstdate = person.photos.firstdate.text
self.__photos_firstdatetaken = person.photos.firstdatetaken.text
else:
self.__photos_firstdate = None
self.__photos_firstdatetaken = None
def __str__(self):
return '<Flickr User %s>' % self.id
def getPhotosets(self):
"""Returns a list of Photosets."""
method = 'flickr.photosets.getList'
data = _doget(method, user_id=self.id)
sets = []
if not getattr(data.rsp.photosets, 'photoset',None):
return sets #N.B. returns an empty set
if isinstance(data.rsp.photosets.photoset, list):
for photoset in data.rsp.photosets.photoset:
sets.append(Photoset(photoset.id, photoset.title.text,\
Photo(photoset.primary),\
secret=photoset.secret, \
server=photoset.server, \
description=photoset.description.text,
photos=photoset.photos))
else:
photoset = data.rsp.photosets.photoset
sets.append(Photoset(photoset.id, photoset.title.text,\
Photo(photoset.primary),\
secret=photoset.secret, \
server=photoset.server, \
description=photoset.description.text,
photos=photoset.photos))
return sets
def getPublicFavorites(self, per_page='', page=''):
return favorites_getPublicList(user_id=self.id, per_page=per_page, \
page=page)
def getFavorites(self, per_page='', page=''):
return favorites_getList(user_id=self.id, per_page=per_page, \
page=page)
def getGalleries(self, per_page='', page=''):
return galleries_getList(user_id=self.id, per_page=per_page, \
page=page)
class Group(object):
"""Flickr Group Pool"""
def __init__(self, id, name=None, members=None, online=None,\
privacy=None, chatid=None, chatcount=None):
self.__loaded = False
self.__id = id
self.__name = name
self.__members = members
self.__online = online
self.__privacy = privacy
self.__chatid = chatid
self.__chatcount = chatcount
self.__url = None
id = property(lambda self: self._general_getattr('id'))
name = property(lambda self: self._general_getattr('name'))
members = property(lambda self: self._general_getattr('members'))
online = property(lambda self: self._general_getattr('online'))
privacy = property(lambda self: self._general_getattr('privacy'))
chatid = property(lambda self: self._general_getattr('chatid'))
chatcount = property(lambda self: self._general_getattr('chatcount'))
def _general_getattr(self, var):
"""Generic get attribute function."""
if getattr(self, "_%s__%s" % (self.__class__.__name__, var)) is None \
and not self.__loaded:
self._load_properties()
return getattr(self, "_%s__%s" % (self.__class__.__name__, var))
def _load_properties(self):
"""Loads the properties from Flickr."""
method = 'flickr.groups.getInfo'
data = _doget(method, group_id=self.id)
self.__loaded = True
group = data.rsp.group
self.__name = group.name.text
self.__description = group.description.text
self.__members = group.members.text
self.__privacy = group.privacy.text
def __str__(self):
return '<Flickr Group %s>' % self.id
def getPhotos(self, tags='', per_page='', page=''):
"""Get a list of photo objects for this group"""
method = 'flickr.groups.pools.getPhotos'
data = _doget(method, group_id=self.id, tags=tags,\
per_page=per_page, page=page)
photos = []
for photo in data.rsp.photos.photo:
photos.append(_parse_photo(photo))
return photos
def add(self, photo):
"""Adds a Photo to the group"""
method = 'flickr.groups.pools.add'
_dopost(method, auth=True, photo_id=photo.id, group_id=self.id)
return True
def remove(self, photo):
"""Remove a Photo from the group"""
method = 'flickr.groups.pools.remove'
_dopost(method, auth=True, photo_id=photo.id, group_id=self.id)
return True
class Tag(object):
def __init__(self, id, author, raw, text):
self.id = id
self.author = author
self.raw = raw
self.text = text
def __str__(self):
return '<Flickr Tag %s (%s)>' % (self.id, self.text)
class Exif(object):
def __init__(self, camera, tags):
self.camera = camera
self.tags = tags
def __str__(self):
return '<Flickr Exif>'
@staticmethod
def getExif(photo_id_):
method = 'flickr.photos.getExif'
data = _doget(method, photo_id=photo_id_)
return Exif.parse(data.rsp.photo)
@staticmethod
def parse(photo):
camera = getattr(photo, 'camera', '')
tags = []
if hasattr(photo, 'exif'):
if isinstance(photo.exif, list):
tags = [ExifTag.parse(e) for e in photo.exif]
else:
tags = [ExifTag.parse(photo.exif)]
return Exif(camera, tags)
class ExifTag(object):
def __init__(self, tagspace, tagspaceid, tag, label, raw, clean):
self.tagspace = tagspace
self.tagspaceid = tagspaceid
self.tag = tag
self.label = label
self.raw = raw
self.clean = clean
def __str__(self):
return '<Flickr ExifTag %s (%s)>' % (self.tag, self.label)
@staticmethod
def parse(exif):
raw = ''
if hasattr(exif, 'raw'):
raw = exif.raw.text
clean = ''
if hasattr(exif, 'clean'):
clean = exif.clean.text
return ExifTag(exif.tagspace, exif.tagspaceid, exif.tag, exif.label,
raw, clean)
class Gallery(object):
"""Represents a Flickr Gallery.
Takes gallery_id as argument.
"""
# There are other attributes a Gallery could have,
# but defining them here might create errors.
# Might be useful to define them here, though,
# if the user wants to change them when creating
# an instance.
def __init__(self, id, owner=None, title=None, description=None, \
date_create=None, date_update=None, count_photos=None, \
count_videos=None, primary_photo_id=None, \
primary_photo_server=None, primary_photo_farm=None, \
primary_photo_secret=None):
self.__loaded = False
self.__url = None
self.__id = id
self.__owner = owner
self.__title = title
self.__description = description
self.__date_create = date_create
self.__date_update = date_update
self.__count_photos = count_photos
self.__count_videos = count_videos
self.__primary_photo_id = primary_photo_id
self.__primary_photo_server = primary_photo_server
self.__primary_photo_farm = primary_photo_farm
self.__primary_photo_secret = primary_photo_secret
id = property(lambda self: self._general_getattr('id'))
url = property(lambda self: self._general_getattr('url'))
owner = property(lambda self: self._general_getattr('owner'))
title = property(lambda self: self._general_getattr('title'))
description = property(lambda self: self._general_getattr('description'))
date_create = property(lambda self: self._general_getattr('date_create'))
date_update = property(lambda self: self._general_getattr('date_update'))
count_photos = property(lambda self: self._general_getattr('count_photos'))
count_videos = property(lambda self: self._general_getattr('count_videos'))
primary_photo_id = property(lambda self: self._general_getattr('primary_photo_id'))
primary_photo_server = property(lambda self: self._general_getattr('primary_photo_server'))
primary_photo_farm = property(lambda self: self._general_getattr('primary_photo_farm'))
primary_photo_secret = property(lambda self: self._general_getattr('primary_photo_secret'))
def _general_getattr(self, var):
"""Generic get attribute function."""
if getattr(self, "_%s__%s" % (self.__class__.__name__, var)) is None \
and not self.__loaded:
self._load_properties()
return getattr(self, "_%s__%s" % (self.__class__.__name__, var))
def _load_properties(self):
"""Loads the properties from Flickr."""
method = 'flickr.galleries.getInfo'
data = _doget(method, gallery_id=self.id)
self.__loaded = True
gallery = data.rsp.gallery
self.__url = gallery.url
self.__owner = gallery.owner
self.__title = gallery.title.text
self.__description = gallery.description.text
self.__date_create = gallery.date_create
self.__date_update = gallery.date_update
self.__count_photos = gallery.count_photos
self.__count_videos = gallery.count_videos
self.__primary_photo_id = gallery.primary_photo_id
self.__primary_photo_server = gallery.primary_photo_server
self.__primary_photo_farm = gallery.primary_photo_farm
self.__primary_photo_secret = gallery.primary_photo_secret
def __str__(self):
return '<Flickr Gallery %s>' % self.id
def addPhoto(self, photo, comment=''):
"""Add a new Photo to the Gallery."""
method = 'flickr.galleries.addPhoto'
_dopost(method, auth=True, photo_id=photo.id, gallery_id=self.id, \
comment=comment)
return True
def editMeta(self, title='', description=''):
"""Modify the meta-data for a gallery.
In original API, title is required, but here, if not
specified, it will use the current title. (So it's optional)
Calling this function without any parameters will blank out the description.
"""
method = 'flickr.galleries.editMeta'
if title == '':
title = self.title
_dopost(method, auth=True, gallery_id=self.id, title=title, \
description=description)
return True
def editPhoto(self, photo, comment):
"""Change the comment for the given Photo."""
method = 'flickr.galleries.editPhoto'
_dopost(method, auth=True, gallery_id=self.id, photo_id=photo.id, \
comment=comment)
return True
def editPhotos(self, primary_photo, *photos):
"""Modify the photos in a gallery. Use this method to add,
remove and re-order photos."""
method = 'flickr.galleries.editPhotos'
photo_ids = ','.join([photo.id for photo in photos])
_dopost(method, auth=True, gallery_id=self.id, \
primary_photo_id=primary_photo.id, photo_ids=photo_ids)
return True
def getPhotos(self, per_page='', page='', **extras):
"""Return the list of photos for a gallery.
*extras (optional): A comma-delimited list of extra information
to fetch for each returned record. Currently supported fields are:
description, license, date_upload, date_taken, owner_name,
icon_server, original_format, last_update, geo, tags, machine_tags,
o_dims, views, media, path_alias, url_sq, url_t, url_s, url_m, url_o
"""
method = 'flickr.galleries.getPhotos'
extras = ','.join('%s=%s' % (i, v) for i, v in dict(extras).items())
data = _doget(method, gallery_id=self.id, per_page=per_page, \
page=page, extras=extras)
photos = {} # dict with photo instance as key and comment as value.
# if there's no comment, '' will be assigned.
for photo in data.rsp.photos.photo:
if photo.has_comment == '1':
photos[_parse_photo(photo)] = photo.comment.text
elif photo.has_comment == '0':
photos[_parse_photo(photo)] = ''
else: # Shouldn't EVER get here
raise FlickrError
return photos
#Flickr API methods
#see api docs http://www.flickr.com/services/api/
#for details of each param
#XXX: Could be Photo.search(cls)
def photos_search(user_id='', auth=False, tags='', tag_mode='', text='',\
min_upload_date='', max_upload_date='',\
min_taken_date='', max_taken_date='', \
license='', per_page='', page='', sort='',\
safe_search='', content_type='', **kwargs):
"""Returns a list of Photo objects.
If auth=True then will auth the user. Can see private etc
"""
method = 'flickr.photos.search'
data = _doget(method, auth=auth, user_id=user_id, tags=tags, text=text,\
min_upload_date=min_upload_date,\
max_upload_date=max_upload_date, \
min_taken_date=min_taken_date, \
max_taken_date=max_taken_date, \
license=license, per_page=per_page,\
page=page, sort=sort, safe_search=safe_search, \
content_type=content_type, \
tag_mode=tag_mode, **kwargs)
photos = []
if data.rsp.photos.__dict__.has_key('photo'):
if isinstance(data.rsp.photos.photo, list):
for photo in data.rsp.photos.photo:
photos.append(_parse_photo(photo))
else:
photos = [_parse_photo(data.rsp.photos.photo)]
return photos
def photos_search_pages(user_id='', auth=False, tags='', tag_mode='', text='',\
min_upload_date='', max_upload_date='',\
min_taken_date='', max_taken_date='', \
license='', per_page='', page='', sort=''):
"""Returns the number of pages for the previous function (photos_search())
"""
method = 'flickr.photos.search'
data = _doget(method, auth=auth, user_id=user_id, tags=tags, text=text,\
min_upload_date=min_upload_date,\
max_upload_date=max_upload_date, \
min_taken_date=min_taken_date, \
max_taken_date=max_taken_date, \
license=license, per_page=per_page,\
page=page, sort=sort)
return data.rsp.photos.pages
def photos_get_recent(extras='', per_page='', page=''):
"""http://www.flickr.com/services/api/flickr.photos.getRecent.html
"""
method = 'flickr.photos.getRecent'
data = _doget(method, extras=extras, per_page=per_page, page=page)
photos = []
if data.rsp.photos.__dict__.has_key('photo'):
if isinstance(data.rsp.photos.photo, list):
for photo in data.rsp.photos.photo:
photos.append(_parse_photo(photo))
else:
photos = [_parse_photo(data.rsp.photos.photo)]
return photos
#XXX: Could be class method in User
def people_findByEmail(email):
"""Returns User object."""
method = 'flickr.people.findByEmail'
data = _doget(method, find_email=email)
user = User(data.rsp.user.id, username=data.rsp.user.username.text)
return user
def people_findByUsername(username):
"""Returns User object."""
method = 'flickr.people.findByUsername'
data = _doget(method, username=username)
user = User(data.rsp.user.id, username=data.rsp.user.username.text)
return user
#XXX: Should probably be in User as a list User.public
def people_getPublicPhotos(user_id, per_page='', page=''):
"""Returns list of Photo objects."""
method = 'flickr.people.getPublicPhotos'
data = _doget(method, user_id=user_id, per_page=per_page, page=page)
photos = []
if hasattr(data.rsp.photos, "photo"): # Check if there are photos at all (may be been paging too far).
if isinstance(data.rsp.photos.photo, list):
for photo in data.rsp.photos.photo:
photos.append(_parse_photo(photo))
else:
photos = [_parse_photo(data.rsp.photos.photo)]
return photos
#XXX: These are also called from User
def
|
(user_id='', per_page='', page=''):
"""Returns list of Photo objects."""
method = 'flickr.favorites.getList'
data = _doget(method, auth=True, user_id=user_id, per_page=per_page,\
page=page)
photos = []
if isinstance(data.rsp.photos.photo, list):
for photo in data.rsp.photos.photo:
photos.append(_parse_photo(photo))
else:
photos = [_parse_photo(data.rsp.photos.photo)]
return photos
def favorites_getPublicList(user_id, per_page='', page=''):
"""Returns list of Photo objects."""
method = 'flickr.favorites.getPublicList'
data = _doget(method, auth=False, user_id=user_id, per_page=per_page,\
page=page)
photos = []
if isinstance(data.rsp.photos.photo, list):
for photo in data.rsp.photos.photo:
photos.append(_parse_photo(photo))
else:
photos = [_parse_photo(data.rsp.photos.photo)]
return photos
def favorites_add(photo_id):
"""Add a photo to the user's favorites."""
method = 'flickr.favorites.add'
_dopost(method, auth=True, photo_id=photo_id)
return True
def favorites_remove(photo_id):
"""Remove a photo from the user's favorites."""
method = 'flickr.favorites.remove'
_dopost(method, auth=True, photo_id=photo_id)
return True
def groups_getPublicGroups():
"""Get a list of groups the auth'd user is a member of."""
method = 'flickr.groups.getPublicGroups'
data = _doget(method, auth=True)
groups = []
if isinstance(data.rsp.groups.group, list):
for group in data.rsp.groups.group:
groups.append(Group(group.id, name=group.name))
else:
group = data.rsp.groups.group
groups = [Group(group.id, name=group.name)]
return groups
def groups_pools_getGroups():
"""Get a list of groups the auth'd user can post photos to."""
method = 'flickr.groups.pools.getGroups'
data = _doget(method, auth=True)
groups = []
if isinstance(data.rsp.groups.group, list):
for group in data.rsp.groups.group:
groups.append(Group(group.id, name=group.name, \
privacy=group.privacy))
else:
group = data.rsp.groups.group
groups = [Group(group.id, name=group.name, privacy=group.privacy)]
return groups
def tags_getListUser(user_id=''):
"""Returns a list of tags for the given user (in string format)"""
method = 'flickr.tags.getListUser'
auth = user_id == ''
data = _doget(method, auth=auth, user_id=user_id)
if isinstance(data.rsp.tags.tag, list):
return [tag.text for tag in data.rsp.tags.tag]
else:
return [data.rsp.tags.tag.text]
def tags_getListUserPopular(user_id='', count=''):
"""Gets the popular tags for a user in dictionary form tag=>count"""
method = 'flickr.tags.getListUserPopular'
auth = user_id == ''
data = _doget(method, auth=auth, user_id=user_id)
result = {}
if isinstance(data.rsp.tags.tag, list):
for tag in data.rsp.tags.tag:
result[tag.text] = tag.count
else:
result[data.rsp.tags.tag.text] = data.rsp.tags.tag.count
return result
def tags_getrelated(tag):
"""Gets the related tags for given tag."""
method = 'flickr.tags.getRelated'
data = _doget(method, auth=False, tag=tag)
if isinstance(data.rsp.tags.tag, list):
return [tag.text for tag in data.rsp.tags.tag]
else:
return [data.rsp.tags.tag.text]
def contacts_getPublicList(user_id):
"""Gets the contacts (Users) for the user_id"""
method = 'flickr.contacts.getPublicList'
data = _doget(method, auth=False, user_id=user_id)
try:
if isinstance(data.rsp.contacts.contact, list):
return [User(user.nsid, username=user.username) \
for user in data.rsp.contacts.contact]
except AttributeError:
return "No users in the list"
except:
return "Unknown error"
# else:
# user = data.rsp.contacts.contact
# return [User(user.nsid, username=user.username)]
def interestingness():
method = 'flickr.interestingness.getList'
data = _doget(method)
photos = []
if isinstance(data.rsp.photos.photo , list):
for photo in data.rsp.photos.photo:
photos.append(_parse_photo(photo))
else:
photos = [_parse_photo(data.rsp.photos.photo)]
return photos
def galleries_create(title, description, primary_photo_id=None):
"""Create a new gallery."""
method = 'flickr.galleries.create'
if primary_photo_id is None:
_dopost(method, auth=True, title=title, description=description,
primary_photo_id=primary_photo_id)
elif primary_photo_id is not None:
_dopost(method, auth=True, title=title, description=description)
def galleries_getList(user_id='', per_page='', page=''):
"""Returns list of Gallery objects."""
method = 'flickr.galleries.getList'
data = _doget(method, auth=False, user_id=user_id, per_page=per_page, \
page=page)
galleries = []
if isinstance(data.rsp.galleries.gallery, list):
for gallery in data.rsp.galleries.gallery:
galleries.append(_parse_gallery(gallery))
else:
galleries = [_parse_gallery(data.rsp.galleries.gallery)]
return galleries
def test_login():
method = 'flickr.test.login'
data = _doget(method, auth=True)
user = User(data.rsp.user.id, username=data.rsp.user.username.text)
return user
def test_echo():
method = 'flickr.test.echo'
data = _doget(method)
return data.rsp.stat
#useful methods
def _doget(method, auth=False, **params):
#uncomment to check you aren't killing the flickr server
#print "***** do get %s" % method
params = _prepare_params(params)
url = '%s%s/?api_key=%s&method=%s&%s%s'% \
(HOST, API, API_KEY, method, urlencode(params),
_get_auth_url_suffix(method, auth, params))
#another useful debug print statement
if debug:
print "_doget", url
return _get_data(minidom.parse(urlopen(url)))
def _dopost(method, auth=False, **params):
#uncomment to check you aren't killing the flickr server
#print "***** do post %s" % method
params = _prepare_params(params)
url = '%s%s/?api_key=%s%s'% \
(HOST, API, API_KEY, _get_auth_url_suffix(method, auth, params))
# There's no reason this can't be str(urlencode(params)). I just wanted to
# have it the same as the rest.
payload = '%s' % (urlencode(params))
#another useful debug print statement
if debug:
print "_dopost url", url
print "_dopost payload", payload
return _get_data(minidom.parse(urlopen(url, payload)))
def _prepare_params(params):
"""Convert lists to strings with ',' between items."""
for (key, value) in params.items():
if isinstance(value, list):
params[key] = ','.join([item for item in value])
return params
def _get_data(xml):
"""Given a bunch of XML back from Flickr, we turn it into a data structure
we can deal with (after checking for errors)."""
data = unmarshal(xml)
if not data.rsp.stat == 'ok':
msg = "ERROR [%s]: %s" % (data.rsp.err.code, data.rsp.err.msg)
raise FlickrError, msg
return data
def _get_api_sig(params):
"""Generate API signature."""
token = userToken()
parameters = ['api_key', 'auth_token']
for item in params.items():
parameters.append(item[0])
parameters.sort()
api_string = [API_SECRET]
for item in parameters:
for chocolate in params.items():
if item == chocolate[0]:
api_string.append(item)
api_string.append(str(chocolate[1]))
if item == 'api_key':
api_string.append('api_key')
api_string.append(API_KEY)
if item == 'auth_token':
api_string.append('auth_token')
api_string.append(token)
api_signature = hashlib.md5(''.join(api_string)).hexdigest()
return api_signature
def _get_auth_url_suffix(method, auth, params):
"""Figure out whether we want to authorize, and if so, construct a suitable
URL suffix to pass to the Flickr API."""
authentication = False
# auth may be passed in via the API, AUTH may be set globally (in the same
# manner as API_KEY, etc). We do a few more checks than may seem necessary
# because we allow the 'auth' parameter to actually contain the
# authentication token, not just True/False.
if auth or AUTH:
token = userToken()
authentication = True
elif auth != False:
token = auth
authentication = True
elif AUTH != False:
token = AUTH
authentication = True
# If we're not authenticating, no suffix is required.
if not authentication:
return ''
full_params = params
full_params['method'] = method
return '&auth_token=%s&api_sig=%s' % (token, _get_api_sig(full_params))
def _parse_photo(photo):
"""Create a Photo object from photo data."""
owner = User(photo.owner)
title = photo.title
ispublic = photo.ispublic
isfriend = photo.isfriend
isfamily = photo.isfamily
secret = photo.secret
server = photo.server
farm = photo.farm
p = Photo(photo.id, owner=owner, title=title, ispublic=ispublic,\
isfriend=isfriend, isfamily=isfamily, secret=secret, \
server=server, farm=farm)
return p
def _parse_gallery(gallery):
"""Create a Gallery object from gallery data."""
# This might not work!! NEEDS TESTING
url = gallery.url
owner = User(gallery.owner)
title = gallery.title.text
description = gallery.description.text
date_create = gallery.date_create
date_update = gallery.date_update
count_photos = gallery.count_photos
count_videos = gallery.count_videos
primary_photo_id = gallery.primary_photo_id
primary_photo_server = gallery.primary_photo_server
primary_photo_farm = gallery.primary_photo_farm
primary_photo_secret = gallery.primary_photo_secret
g = Gallery(gallery.id, owner=owner, title=title, description=description, \
date_create=date_create, date_update=date_update, \
count_photos=count_photos, count_videos=count_videos, \
primary_photo_id=primary_photo_id, \
primary_photo_server=primary_photo_server, \
primary_photo_farm=primary_photo_farm, \
primary_photo_secret=primary_photo_secret)
return g
#stolen methods
class Bag: pass
#unmarshal taken and modified from pyamazon.py
#makes the xml easy to work with
def unmarshal(element):
rc = Bag()
if isinstance(element, minidom.Element):
for key in element.attributes.keys():
setattr(rc, key, element.attributes[key].value)
childElements = [e for e in element.childNodes \
if isinstance(e, minidom.Element)]
if childElements:
for child in childElements:
key = child.tagName
if hasattr(rc, key):
if type(getattr(rc, key)) <> type([]):
setattr(rc, key, [getattr(rc, key)])
setattr(rc, key, getattr(rc, key) + [unmarshal(child)])
elif isinstance(child, minidom.Element) and \
(child.tagName == 'Details'):
# make the first Details element a key
setattr(rc,key,[unmarshal(child)])
#dbg: because otherwise 'hasattr' only tests
#dbg: on the second occurence: if there's a
#dbg: single return to a query, it's not a
#dbg: list. This module should always
#dbg: return a list of Details objects.
else:
setattr(rc, key, unmarshal(child))
else:
#jec: we'll have the main part of the element stored in .text
#jec: will break if tag <text> is also present
text = "".join([e.data for e in element.childNodes \
if isinstance(e, minidom.Text)])
setattr(rc, 'text', text)
return rc
#unique items from a list from the cookbook
def uniq(alist): # Fastest without order preserving
set = {}
map(set.__setitem__, alist, [])
return set.keys()
## Only the "getList" module is complete.
## Work in Progress; Nearly Finished
class Blogs():
def getList(self,auth=True):
"""blogs.getList requires READ authentication"""
# please read documentation on how to use this
method = 'flickr.blogs.getList'
if auth==True : data = _doget(method, auth=True)
if not auth==True : data = _doget(method, auth=False)
bID = []
bName = []
bNeedsPword = []
bURL = []
try:
for plog in data.rsp.blogs.blog:
bID.append(plog.id)
bName.append(plog.name)
bNeedsPword.append(plog.needspassword)
bURL.append(plog.url)
except TypeError:
try:
bID.append(data.rsp.blogs.blog.id)
bName.append(data.rsp.blogs.blog.name)
bNeedsPword.append(data.rsp.blogs.blog.needspassword)
bURL.append(data.rsp.blogs.blog.url)
except AttributeError:
return "AttributeError, unexplained!"
except:
return "Unknown error!"
except AttributeError:
return "There are no blogs!"
myReturn = [bID,bName,bNeedsPword,bURL]
return myReturn
def postPhoto(self, blogID, photoID, title, description, bpassword):
"""blogs.postPhoto requires WRITE authentication"""
method = 'flickr.blogs.postPhoto'
return None
class Urls():
def getUserPhotosURL(userid):
"""Returns user URL in an array (to access, use array[1])"""
method = 'flickr.urls.getUserPhotos'
data = _doget(method, user_id=userid)
return [data.rsp.user.nsid,data.rsp.user.url]
class Auth():
def getFrob(self):
"""Returns a frob that is used in authentication"""
method = 'flickr.auth.getFrob'
sig_str = API_SECRET + 'api_key' + API_KEY + 'method' + method
signature_hash = hashlib.md5(sig_str).hexdigest()
data = _doget(method, auth=False, api_sig=signature_hash)
return data.rsp.frob.text
def loginLink(self, permission, frob):
"""Generates a link that the user should be sent to"""
myAuth = Auth()
sig_str = API_SECRET + 'api_key' + API_KEY + 'frob' + frob + 'perms' + permission
signature_hash = hashlib.md5(sig_str).hexdigest()
perms = permission
link = "http://flickr.com/services/auth/?api_key=%s&perms=%s&frob=%s&api_sig=%s" % (API_KEY, perms, frob, signature_hash)
return link
def getToken(self, frob):
"""This token is what needs to be used in future API calls"""
method = 'flickr.auth.getToken'
sig_str = API_SECRET + 'api_key' + API_KEY + 'frob' + frob + 'method' + method
signature_hash = hashlib.md5(sig_str).hexdigest()
data = _doget(method, auth=False, api_sig=signature_hash,
api_key=API_KEY, frob=frob)
return data.rsp.auth.token.text
def userToken():
# This method allows you flickr.py to retrive the saved token
# as once the token for a program has been got from flickr,
# it cannot be got again, so flickr.py saves it in a file
# called token.txt (default) somewhere.
if not tokenPath == '':
f = file(os.path.join(tokenPath,tokenFile),'r')
else:
f = file(tokenFile,'r')
token = f.read()
f.close()
return token
def getUserPhotosURL(userid):
"""Returns user URL in an array (to access, use array[1])"""
# This addition has been added upon request of
# nsteinmetz. It will be "cleaned up" at another
# time.
method = 'flickr.urls.getUserPhotos'
data = _doget(method, user_id=userid)
userurl = [data.rsp.user.nsid,data.rsp.user.url]
return userurl
if __name__ == '__main__':
print test_echo()
|
favorites_getList
|
MondoReport.py
|
#!/usr/bin/env python
"""
@@TR: This code is pretty much unsupported.
MondoReport.py -- Batching module for Python and Cheetah.
Version 2001-Nov-18. Doesn't do much practical yet, but the companion
testMondoReport.py passes all its tests.
-Mike Orr (Iron)
TODO: BatchRecord.prev/next/prev_batches/next_batches/query, prev.query,
next.query.
How about Report: .page(), .all(), .summary()? Or PageBreaker.
"""
import operator, types
try:
from Cheetah.NameMapper import valueForKey as lookup_func
except ImportError:
def lookup_func(obj, name):
if hasattr(obj, name):
return getattr(obj, name)
else:
return obj[name] # Raises KeyError.
########## CONSTANTS ##############################
True, False = (1==1), (1==0)
numericTypes = types.IntType, types.LongType, types.FloatType
########## PUBLIC GENERIC FUNCTIONS ##############################
class NegativeError(ValueError):
pass
def isNumeric(v):
return type(v) in numericTypes
def isNonNegative(v):
ret = isNumeric(v)
if ret and v < 0:
raise NegativeError(v)
def isNotNone(v):
return v is not None
def Roman(n):
n = int(n) # Raises TypeError.
if n < 1:
raise ValueError("roman numeral for zero or negative undefined: " + n)
roman = ''
while n >= 1000:
n = n - 1000
roman = roman + 'M'
while n >= 500:
n = n - 500
roman = roman + 'D'
while n >= 100:
n = n - 100
roman = roman + 'C'
while n >= 50:
n = n - 50
roman = roman + 'L'
while n >= 10:
n = n - 10
roman = roman + 'X'
while n >= 5:
n = n - 5
roman = roman + 'V'
while n < 5 and n >= 1:
n = n - 1
roman = roman + 'I'
roman = roman.replace('DCCCC', 'CM')
roman = roman.replace('CCCC', 'CD')
roman = roman.replace('LXXXX', 'XC')
roman = roman.replace('XXXX', 'XL')
roman = roman.replace('VIIII', 'IX')
roman = roman.replace('IIII', 'IV')
return roman
def sum(lis):
return reduce(operator.add, lis, 0)
def mean(lis):
"""Always returns a floating-point number.
"""
lis_len = len(lis)
if lis_len == 0:
return 0.00 # Avoid ZeroDivisionError (not raised for floats anyway)
total = float( sum(lis) )
return total / lis_len
def median(lis):
lis = lis[:]
lis.sort()
return lis[int(len(lis)/2)]
def variance(lis):
raise NotImplementedError()
def variance_n(lis):
raise NotImplementedError()
def standardDeviation(lis):
raise NotImplementedError()
def standardDeviation_n(lis):
raise NotImplementedError()
class IndexFormats:
"""Eight ways to display a subscript index.
("Fifty ways to leave your lover....")
"""
def __init__(self, index, item=None):
self._index = index
self._number = index + 1
self._item = item
def index(self):
return self._index
__call__ = index
def number(self):
return self._number
def even(self):
return self._number % 2 == 0
def odd(self):
return not self.even()
def even_i(self):
return self._index % 2 == 0
def odd_i(self):
return not self.even_i()
def letter(self):
return self.Letter().lower()
def Letter(self):
n = ord('A') + self._index
return chr(n)
def roman(self):
return self.Roman().lower()
def Roman(self):
return Roman(self._number)
def item(self):
return self._item
########## PRIVATE CLASSES ##############################
class ValuesGetterMixin:
def __init__(self, origList):
self._origList = origList
def _getValues(self, field=None, criteria=None):
if field:
ret = [lookup_func(elm, field) for elm in self._origList]
else:
ret = self._origList
if criteria:
ret = filter(criteria, ret)
return ret
class RecordStats(IndexFormats, ValuesGetterMixin):
"""The statistics that depend on the current record.
"""
def __init__(self, origList, index):
record = origList[index] # Raises IndexError.
IndexFormats.__init__(self, index, record)
ValuesGetterMixin.__init__(self, origList)
def length(self):
return len(self._origList)
def first(self):
return self._index == 0
def last(self):
return self._index >= len(self._origList) - 1
def _firstOrLastValue(self, field, currentIndex, otherIndex):
currentValue = self._origList[currentIndex] # Raises IndexError.
try:
otherValue = self._origList[otherIndex]
except IndexError:
return True
if field:
currentValue = lookup_func(currentValue, field)
otherValue = lookup_func(otherValue, field)
return currentValue != otherValue
def firstValue(self, field=None):
return self._firstOrLastValue(field, self._index, self._index - 1)
def lastValue(self, field=None):
return self._firstOrLastValue(field, self._index, self._index + 1)
# firstPage and lastPage not implemented. Needed?
def percentOfTotal(self, field=None, suffix='%', default='N/A', decimals=2):
rec = self._origList[self._index]
if field:
val = lookup_func(rec, field)
else:
val = rec
try:
lis = self._getValues(field, isNumeric)
except NegativeError:
return default
total = sum(lis)
if total == 0.00: # Avoid ZeroDivisionError.
return default
val = float(val)
try:
percent = (val / total) * 100
except ZeroDivisionError:
return default
if decimals == 0:
percent = int(percent)
else:
percent = round(percent, decimals)
if suffix:
return str(percent) + suffix # String.
else:
return percent # Numeric.
def __call__(self): # Overrides IndexFormats.__call__
"""This instance is not callable, so we override the super method.
"""
raise NotImplementedError()
def prev(self):
if self._index == 0:
return None
else:
length = self.length()
start = self._index - length
return PrevNextPage(self._origList, length, start)
def next(self):
if self._index + self.length() == self.length():
return None
else:
length = self.length()
start = self._index + length
return PrevNextPage(self._origList, length, start)
def prevPages(self):
raise NotImplementedError()
def nextPages(self):
raise NotImplementedError()
prev_batches = prevPages
next_batches = nextPages
def summary(self):
raise NotImplementedError()
|
if size < 1:
if start > 0 and end > 0 and end >= start:
size=end+1-start
else: size=7
if start > 0:
try: sequence[start-1]
except: start=len(sequence)
# if start > l: start=l
if end > 0:
if end < start: end=start
else:
end=start+size-1
try: sequence[end+orphan-1]
except: end=len(sequence)
# if l - end < orphan: end=l
elif end > 0:
try: sequence[end-1]
except: end=len(sequence)
# if end > l: end=l
start=end+1-size
if start - 1 < orphan: start=1
else:
start=1
end=start+size-1
try: sequence[end+orphan-1]
except: end=len(sequence)
# if l - end < orphan: end=l
return start,end,size
class Summary(ValuesGetterMixin):
"""The summary statistics, that don't depend on the current record.
"""
def __init__(self, origList):
ValuesGetterMixin.__init__(self, origList)
def sum(self, field=None):
lis = self._getValues(field, isNumeric)
return sum(lis)
total = sum
def count(self, field=None):
lis = self._getValues(field, isNotNone)
return len(lis)
def min(self, field=None):
lis = self._getValues(field, isNotNone)
return min(lis) # Python builtin function min.
def max(self, field=None):
lis = self._getValues(field, isNotNone)
return max(lis) # Python builtin function max.
def mean(self, field=None):
"""Always returns a floating point number.
"""
lis = self._getValues(field, isNumeric)
return mean(lis)
average = mean
def median(self, field=None):
lis = self._getValues(field, isNumeric)
return median(lis)
def variance(self, field=None):
raiseNotImplementedError()
def variance_n(self, field=None):
raiseNotImplementedError()
def standardDeviation(self, field=None):
raiseNotImplementedError()
def standardDeviation_n(self, field=None):
raiseNotImplementedError()
class PrevNextPage:
def __init__(self, origList, size, start):
end = start + size
self.start = IndexFormats(start, origList[start])
self.end = IndexFormats(end, origList[end])
self.length = size
########## MAIN PUBLIC CLASS ##############################
class MondoReport:
_RecordStatsClass = RecordStats
_SummaryClass = Summary
def __init__(self, origlist):
self._origList = origlist
def page(self, size, start, overlap=0, orphan=0):
"""Returns list of ($r, $a, $b)
"""
if overlap != 0:
raise NotImplementedError("non-zero overlap")
if orphan != 0:
raise NotImplementedError("non-zero orphan")
origList = self._origList
origList_len = len(origList)
start = max(0, start)
end = min( start + size, len(self._origList) )
mySlice = origList[start:end]
ret = []
for rel in range(size):
abs_ = start + rel
r = mySlice[rel]
a = self._RecordStatsClass(origList, abs_)
b = self._RecordStatsClass(mySlice, rel)
tup = r, a, b
ret.append(tup)
return ret
batch = page
def all(self):
origList_len = len(self._origList)
return self.page(origList_len, 0, 0, 0)
def summary(self):
return self._SummaryClass(self._origList)
"""
**********************************
Return a pageful of records from a sequence, with statistics.
in : origlist, list or tuple. The entire set of records. This is
usually a list of objects or a list of dictionaries.
page, int >= 0. Which page to display.
size, int >= 1. How many records per page.
widow, int >=0. Not implemented.
orphan, int >=0. Not implemented.
base, int >=0. Number of first page (usually 0 or 1).
out: list of (o, b) pairs. The records for the current page. 'o' is
the original element from 'origlist' unchanged. 'b' is a Batch
object containing meta-info about 'o'.
exc: IndexError if 'page' or 'size' is < 1. If 'origlist' is empty or
'page' is too high, it returns an empty list rather than raising
an error.
origlist_len = len(origlist)
start = (page + base) * size
end = min(start + size, origlist_len)
ret = []
# widow, orphan calculation: adjust 'start' and 'end' up and down,
# Set 'widow', 'orphan', 'first_nonwidow', 'first_nonorphan' attributes.
for i in range(start, end):
o = origlist[i]
b = Batch(origlist, size, i)
tup = o, b
ret.append(tup)
return ret
def prev(self):
# return a PrevNextPage or None
def next(self):
# return a PrevNextPage or None
def prev_batches(self):
# return a list of SimpleBatch for the previous batches
def next_batches(self):
# return a list of SimpleBatch for the next batches
########## PUBLIC MIXIN CLASS FOR CHEETAH TEMPLATES ##############
class MondoReportMixin:
def batch(self, origList, size=None, start=0, overlap=0, orphan=0):
bat = MondoReport(origList)
return bat.batch(size, start, overlap, orphan)
def batchstats(self, origList):
bat = MondoReport(origList)
return bat.stats()
"""
# vim: shiftwidth=4 tabstop=4 expandtab textwidth=79
|
def _prevNextHelper(self, start,end,size,orphan,sequence):
"""Copied from Zope's DT_InSV.py's "opt" function.
"""
|
main.rs
|
use fltk::{prelude::*, *};
|
surface: wgpu::Surface,
queue: wgpu::Queue,
render_pipeline: wgpu::RenderPipeline,
}
impl State {
pub async fn new(win: &window::Window) -> State {
let size = (win.w() as _, win.h() as _);
let instance = wgpu::Instance::new(wgpu::Backends::all());
let surface = unsafe { instance.create_surface(win) };
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::default(),
compatible_surface: Some(&surface),
})
.await
.expect("Failed to find an appropriate adapter");
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
label: None,
features: wgpu::Features::empty(),
limits: wgpu::Limits::downlevel_defaults().using_resolution(adapter.limits()),
},
None,
)
.await
.expect("Failed to create device");
let shader = device.create_shader_module(&wgpu::ShaderModuleDescriptor {
label: None,
source: wgpu::ShaderSource::Wgsl(Cow::Borrowed(include_str!("shader.wgsl"))),
});
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: None,
bind_group_layouts: &[],
push_constant_ranges: &[],
});
let swapchain_format = surface.get_preferred_format(&adapter).unwrap();
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: None,
layout: Some(&pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: "vs_main",
buffers: &[],
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: "fs_main",
targets: &[swapchain_format.into()],
}),
primitive: wgpu::PrimitiveState::default(),
depth_stencil: None,
multisample: wgpu::MultisampleState::default(),
});
let config = wgpu::SurfaceConfiguration {
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
format: swapchain_format,
width: size.0,
height: size.1,
present_mode: wgpu::PresentMode::Mailbox,
};
surface.configure(&device, &config);
State {
device, surface, queue, render_pipeline
}
}
}
fn main() {
let a = app::App::default();
let mut win = window::Window::default().with_size(400, 300);
win.end();
win.show();
let state: State = pollster::block_on(State::new(&win));
while a.wait() {
let frame = state.surface
.get_current_frame()
.expect("Failed to acquire next swap chain texture")
.output;
let view = frame
.texture
.create_view(&wgpu::TextureViewDescriptor::default());
let mut encoder =
state.device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
{
let mut rpass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: None,
color_attachments: &[wgpu::RenderPassColorAttachment {
view: &view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color::GREEN),
store: true,
},
}],
depth_stencil_attachment: None,
});
rpass.set_pipeline(&state.render_pipeline);
rpass.draw(0..3, 0..1);
}
state.queue.submit(Some(encoder.finish()));
}
}
|
use std::borrow::Cow;
struct State {
device: wgpu::Device,
|
lenet.py
|
import torch.nn as nn
from collections import OrderedDict
class C1(nn.Module):
def __init__(self):
super(C1, self).__init__()
self.c1 = nn.Sequential(OrderedDict([
('c1', nn.Conv2d(1, 6, kernel_size=(5, 5))),
('relu1', nn.ReLU()),
('s2', nn.MaxPool2d(kernel_size=(2, 2), stride=2))
]))
def forward(self, img):
output = self.c1(img)
return output
class C3(nn.Module):
def
|
(self):
super(C3, self).__init__()
self.c3 = nn.Sequential(OrderedDict([
('c3', nn.Conv2d(6, 16, kernel_size=(5, 5))),
('relu2', nn.ReLU()),
('s4', nn.MaxPool2d(kernel_size=(2, 2), stride=2))
]))
def forward(self, img):
output = self.c3(img)
return output
class C5(nn.Module):
def __init__(self):
super(C5, self).__init__()
self.c5 = nn.Sequential(OrderedDict([
('c5', nn.Conv2d(16, 120, kernel_size=(5, 5))),
('relu3', nn.ReLU())
]))
def forward(self, img):
output = self.c5(img)
return output
class F6(nn.Module):
def __init__(self):
super(F6, self).__init__()
self.f6 = nn.Sequential(OrderedDict([
('f6', nn.Linear(120, 84)),
('relu4', nn.ReLU())
]))
def forward(self, img):
output = self.f6(img)
return output
class FCoutput(nn.Module):
def __init__(self):
super(FCoutput, self).__init__()
self.fcoutput = nn.Sequential(OrderedDict([
('fcoutput7', nn.Linear(84, 10)),
('sig1', nn.LogSoftmax(dim=-1))
]))
def forward(self, img):
output = self.fcoutput(img)
return output
class LeNet5(nn.Module):
"""
Input - 1x32x32
Output - 10
"""
def __init__(self):
super(LeNet5, self).__init__()
self.c1 = C1()
self.c3 = C3()
self.c5 = C5()
self.f6 = F6()
self.fcoutput = FCoutput()
def forward(self, img):
# Conv Layer(C1)
# - input: 32x32x1
# - output: 28x28x6
# - weights: (5x5x1 + 1)x6
# Sub-sampling(S2)
# - input: 28x28x6
# - output: 14x14x6
# - weights: 2x2x1
output = self.c1(img)
# Conv Layer(C3)
# - input: 14x14x6
# - output: 10x10x16
# - weights: (5x5x6 + 1)x16
# Sub-sampling(S4)
# - input: 10x10x16
# - output: 5x5x16
# - weights: 2x2x1
output = self.c3(output)
# Conv Layer(C5)
# - input: 5x5x16
# - output: 1x1x120
# - weights: (5x5x16 + 1)x120
output = self.c5(output)
# Flatten Layer
output = output.view(img.size(0), -1)
# Fully Connected Layer(F6)
# - input: 120
# - output: 84
output = self.f6(output)
# Fully Connected Layer(F7)
# - input: 84
# - output: 10
output = self.fcoutput(output)
return output
|
__init__
|
oauth.py
|
# Databricks CLI
# Copyright 2021 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"), except
# that the use of services to which certain application programming
# interfaces (each, an "API") connect requires that the user first obtain
# a license for the use of the APIs from Databricks, Inc. ("Databricks"),
# by creating an account at www.databricks.com and agreeing to either (a)
# the Community Edition Terms of Service, (b) the Databricks Terms of
# Service, or (c) another written agreement between Licensee and Databricks
# for the use of the APIs.
#
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import hashlib
import json
import os
import webbrowser
from datetime import datetime, timedelta, tzinfo
import click
import jwt
from jwt import PyJWTError
import oauthlib.oauth2
from oauthlib.oauth2.rfc6749.errors import OAuth2Error
import requests
from requests.exceptions import RequestException
from databricks_cli.utils import error_and_quit
try:
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
except ImportError:
from http.server import BaseHTTPRequestHandler, HTTPServer
# This could use 'import secrets' in Python 3
def token_urlsafe(nbytes=32):
tok = os.urandom(nbytes)
return base64.urlsafe_b64encode(tok).rstrip(b'=').decode('ascii')
# This could be datetime.timezone.utc in Python 3
class UTCTimeZone(tzinfo):
"""UTC"""
def utcoffset(self, dt):
#pylint: disable=unused-argument
return timedelta(0)
def tzname(self, dt):
#pylint: disable=unused-argument
return "UTC"
def dst(self, dt):
#pylint: disable=unused-argument
return timedelta(0)
# Some contant values
OIDC_REDIRECTOR_PATH = "oidc"
CLIENT_ID = "databricks-cli"
REDIRECT_PORT = 8020
UTC = UTCTimeZone()
def get_client(client_id=CLIENT_ID):
return oauthlib.oauth2.WebApplicationClient(client_id)
def get_redirect_url(port=REDIRECT_PORT):
return "http://localhost:{port}".format(port=port)
def fetch_well_known_config(idp_url):
known_config_url = "{idp_url}/.well-known/oauth-authorization-server".format(idp_url=idp_url)
try:
response = requests.request(method="GET", url=known_config_url)
except RequestException:
error_and_quit("Unable to fetch OAuth configuration from {idp_url}."
"Verify that OAuth is enabled on this account.".format(idp_url=idp_url))
if response.status_code != 200:
error_and_quit("Unable to fetch OAuth configuration from {idp_url}. "
"Verify that OAuth is enabled on this account.".format(idp_url=idp_url))
return json.loads(response.text)
def get_idp_url(host):
maybe_scheme = "https://" if not host.startswith("https://") else ""
maybe_trailing_slash = "/" if not host.endswith("/") else ""
return "{scheme}{host}{trailing}{path}".format(
scheme=maybe_scheme, host=host, trailing=maybe_trailing_slash, path=OIDC_REDIRECTOR_PATH)
def get_challenge(verifier_string=token_urlsafe(32)):
digest = hashlib.sha256(verifier_string.encode('UTF-8')).digest()
challenge_string = base64.urlsafe_b64encode(digest).decode("UTF-8").replace('=', '')
return verifier_string, challenge_string
# This is a janky global that is used to store the path of the single request the HTTP server
# will receive.
global_request_path = None
def set_request_path(path):
global global_request_path
global_request_path = path
class SingleRequestHandler(BaseHTTPRequestHandler):
RESPONSE_BODY = """<html>
<head>
<title>Close this Tab</title>
<style>
body {
font-family: "Barlow", Helvetica, Arial, sans-serif;
padding: 20px;
background-color: #f3f3f3;
}
</style>
</head>
<body>
<h1>Please close this tab.</h1>
<p>
The Databricks CLI received a response. You may close this tab.
</p>
</body>
</html>""".encode("utf-8")
def do_GET(self): # nopep8
self.send_response(200, "Success")
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(self.RESPONSE_BODY)
set_request_path(self.path)
def log_message(self, format, *args):
#pylint: disable=redefined-builtin
#pylint: disable=unused-argument
return
def get_authorization_code(client, auth_url, redirect_url, scope, state, challenge, port):
#pylint: disable=unused-variable
(auth_req_uri, headers, body) = client.prepare_authorization_request(
authorization_url=auth_url,
redirect_url=redirect_url,
scope=scope,
state=state,
code_challenge=challenge,
code_challenge_method="S256")
click.echo("Opening {uri}".format(uri=auth_req_uri))
with HTTPServer(("", port), SingleRequestHandler) as httpd:
webbrowser.open_new(auth_req_uri)
click.echo("Listening for OAuth authorization callback at {uri}"
.format(uri=redirect_url))
httpd.handle_request()
if not global_request_path:
error_and_quit("No path parameters were returned to the callback at {uri}"
.format(uri=redirect_url))
# This is a kludge because the parsing library expects https callbacks
# We should probably set it up using https
full_redirect_url = "https://localhost:{port}/{path}".format(
port=port, path=global_request_path)
try:
authorization_code_response = \
client.parse_request_uri_response(full_redirect_url, state=state)
except OAuth2Error as err:
error_and_quit("OAuth Token Request error {error}".format(error=err.description))
return authorization_code_response
def send_auth_code_token_request(client, token_request_url, redirect_url, code, verifier):
|
def send_token_request(token_request_url, data):
headers = {
"Accept": "application/json",
"Content-Type": "application/x-www-form-urlencoded"
}
response = requests.request(method="POST", url=token_request_url, data=data, headers=headers)
oauth_response = json.loads(response.text)
return oauth_response
def send_refresh_token_request(hostname, refresh_token):
idp_url = get_idp_url(hostname)
oauth_config = fetch_well_known_config(idp_url)
token_request_url = oauth_config['token_endpoint']
client = get_client()
token_request_body = client.prepare_refresh_body(
refresh_token=refresh_token, client_id=client.client_id)
return send_token_request(token_request_url, token_request_body)
def get_tokens_from_response(oauth_response):
access_token = oauth_response['access_token']
refresh_token = oauth_response['refresh_token'] if 'refresh_token' in oauth_response else None
return access_token, refresh_token
def check_and_refresh_access_token(hostname, access_token, refresh_token):
now = datetime.now(tz=UTC)
# If we can't decode an expiration time, this will be expired by default.
expiration_time = now
try:
# This token has already been verified and we are just parsing it.
# If it has been tampered with, it will be rejected on the server side.
# This avoids having to fetch the public key from the issuer and perform
# an unnecessary signature verification.
decoded = jwt.decode(access_token, options={"verify_signature": False})
expiration_time = datetime.fromtimestamp(decoded['exp'], tz=UTC)
except PyJWTError as err:
error_and_quit(err)
if expiration_time > now:
# The access token is fine. Just return it.
return access_token, refresh_token, False
if not refresh_token:
error_and_quit("OAuth access token expired on {expiration_time}."
.format(expiration_time=expiration_time))
# Try to refresh using the refresh token
click.echo("Attempting to refresh OAuth access token that expired on {expiration_time}"
.format(expiration_time=expiration_time))
oauth_response = send_refresh_token_request(hostname, refresh_token)
fresh_access_token, fresh_refresh_token = get_tokens_from_response(oauth_response)
return fresh_access_token, fresh_refresh_token, True
def get_tokens(hostname, scope=None):
idp_url = get_idp_url(hostname)
oauth_config = fetch_well_known_config(idp_url)
# We are going to override oauth_config["authorization_endpoint"] use the
# /oidc redirector on the hostname, which may inject additional parameters.
auth_url = "{}oidc/v1/authorize".format(hostname)
state = token_urlsafe(16)
(verifier, challenge) = get_challenge()
client = get_client()
redirect_url = get_redirect_url()
try:
auth_response = get_authorization_code(
client,
auth_url,
redirect_url,
scope,
state,
challenge,
REDIRECT_PORT)
except OAuth2Error as err:
error_and_quit("OAuth Authorization Error: {error}".format(error=err.description))
token_request_url = oauth_config["token_endpoint"]
code = auth_response['code']
oauth_response = \
send_auth_code_token_request(client, token_request_url, redirect_url, code, verifier)
return get_tokens_from_response(oauth_response)
|
token_request_body = client.prepare_request_body(code=code, redirect_uri=redirect_url)
data = "{body}&code_verifier={verifier}".format(body=token_request_body, verifier=verifier)
return send_token_request(token_request_url, data)
|
log.go
|
package keyring
import (
pkglog "log"
)
var logger = NewLogger(ErrLevel)
// SetLogger sets logger for the package.
func SetLogger(l Logger) func() {
old := logger
logger = l
return func() {
logger = old
}
}
// Logger interface used in this package.
type Logger interface {
Debugf(format string, args ...interface{})
Infof(format string, args ...interface{})
Warningf(format string, args ...interface{})
Errorf(format string, args ...interface{})
Fatalf(format string, args ...interface{})
}
// LogLevel ...
type LogLevel int
const (
// DebugLevel ...
DebugLevel LogLevel = 3
// InfoLevel ...
InfoLevel LogLevel = 2
// WarnLevel ...
WarnLevel LogLevel = 1
// ErrLevel ...
ErrLevel LogLevel = 0
// NoLevel
NoLevel LogLevel = -1
)
// NewLogger ...
func NewLogger(lev LogLevel) Logger {
return &defaultLog{Level: lev}
}
func (l LogLevel) String() string {
switch l {
case DebugLevel:
return "debug"
case InfoLevel:
return "info"
case WarnLevel:
return "warn"
case ErrLevel:
return "err"
default:
return ""
}
}
func
|
() {
pkglog.SetFlags(pkglog.LstdFlags | pkglog.Lmicroseconds)
}
type defaultLog struct {
Level LogLevel
}
func (l defaultLog) Debugf(format string, args ...interface{}) {
if l.Level >= 3 {
pkglog.Printf("[DEBG] "+format+"\n", args...)
}
}
func (l defaultLog) Infof(format string, args ...interface{}) {
if l.Level >= 2 {
pkglog.Printf("[INFO] "+format+"\n", args...)
}
}
func (l defaultLog) Warningf(format string, args ...interface{}) {
if l.Level >= 1 {
pkglog.Printf("[WARN] "+format+"\n", args...)
}
}
func (l defaultLog) Errorf(format string, args ...interface{}) {
if l.Level >= 0 {
pkglog.Printf("[ERR] "+format+"\n", args...)
}
}
func (l defaultLog) Fatalf(format string, args ...interface{}) {
pkglog.Fatalf(format, args...)
}
|
init
|
rpc_rawtransaction.py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the rawtransaction RPCs.
Test the following RPCs:
- createrawtransaction
- signrawtransactionwithwallet
- sendrawtransaction
- decoderawtransaction
- getrawtransaction
"""
from collections import OrderedDict
from decimal import Decimal
from io import BytesIO
from test_framework.messages import CTransaction, ToHex
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error, bytes_to_hex_str, connect_nodes_bi, hex_str_to_bytes
class multidict(dict):
"""Dictionary that allows duplicate keys.
Constructed with a list of (key, value) tuples. When dumped by the json module,
will output invalid json with repeated keys, eg:
>>> json.dumps(multidict([(1,2),(1,2)])
'{"1": 2, "1": 2}'
Used to test calls to rpc methods with repeated keys in the json object."""
def __init__(self, x):
dict.__init__(self, x)
self.x = x
def
|
(self):
return self.x
# Create one-input, one-output, no-fee transaction:
class RawTransactionsTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [["-addresstype=legacy", "-txindex"], ["-addresstype=legacy", "-txindex"], ["-addresstype=legacy", "-txindex"]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
super().setup_network()
connect_nodes_bi(self.nodes, 0, 2)
def run_test(self):
self.log.info('prepare some coins for multiple *rawtransaction commands')
self.nodes[2].generate(1)
self.sync_all()
self.nodes[0].generate(101)
self.sync_all()
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.5)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),1.0)
self.nodes[0].sendtoaddress(self.nodes[2].getnewaddress(),5.0)
self.sync_all()
self.nodes[0].generate(5)
self.sync_all()
self.log.info('Test getrawtransaction on genesis block coinbase returns an error')
block = self.nodes[0].getblock(self.nodes[0].getblockhash(0))
assert_raises_rpc_error(-5, "The genesis block coinbase is not considered an ordinary transaction", self.nodes[0].getrawtransaction, block['merkleroot'])
self.log.info('Check parameter types and required parameters of createrawtransaction')
# Test `createrawtransaction` required parameters
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction)
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, [])
# Test `createrawtransaction` invalid extra parameters
assert_raises_rpc_error(-1, "createrawtransaction", self.nodes[0].createrawtransaction, [], {}, 0, False, 'foo')
# Test `createrawtransaction` invalid `inputs`
txid = '1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000'
assert_raises_rpc_error(-3, "Expected type array", self.nodes[0].createrawtransaction, 'foo', {})
assert_raises_rpc_error(-1, "JSON value is not an object as expected", self.nodes[0].createrawtransaction, ['foo'], {})
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].createrawtransaction, [{}], {})
assert_raises_rpc_error(-8, "txid must be of length 64 (not 3, for 'foo')", self.nodes[0].createrawtransaction, [{'txid': 'foo'}], {})
assert_raises_rpc_error(-8, "txid must be hexadecimal string (not 'ZZZ7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844')", self.nodes[0].createrawtransaction, [{'txid': 'ZZZ7bb8b1697ea987f3b223ba7819250cae33efacb068d23dc24859824a77844'}], {})
assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", self.nodes[0].createrawtransaction, [{'txid': txid}], {})
assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': 'foo'}], {})
assert_raises_rpc_error(-8, "Invalid parameter, vout must be positive", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': -1}], {})
assert_raises_rpc_error(-8, "Invalid parameter, sequence number is out of range", self.nodes[0].createrawtransaction, [{'txid': txid, 'vout': 0, 'sequence': -1}], {})
# Test `createrawtransaction` invalid `outputs`
address = self.nodes[0].getnewaddress()
address2 = self.nodes[0].getnewaddress()
assert_raises_rpc_error(-1, "JSON value is not an array as expected", self.nodes[0].createrawtransaction, [], 'foo')
self.nodes[0].createrawtransaction(inputs=[], outputs={}) # Should not throw for backwards compatibility
self.nodes[0].createrawtransaction(inputs=[], outputs=[])
assert_raises_rpc_error(-8, "Data must be hexadecimal string", self.nodes[0].createrawtransaction, [], {'data': 'foo'})
assert_raises_rpc_error(-5, "Invalid Motherofweeddaycoin address", self.nodes[0].createrawtransaction, [], {'foo': 0})
assert_raises_rpc_error(-3, "Invalid amount", self.nodes[0].createrawtransaction, [], {address: 'foo'})
assert_raises_rpc_error(-3, "Amount out of range", self.nodes[0].createrawtransaction, [], {address: -1})
assert_raises_rpc_error(-8, "Invalid parameter, duplicated address: %s" % address, self.nodes[0].createrawtransaction, [], multidict([(address, 1), (address, 1)]))
assert_raises_rpc_error(-8, "Invalid parameter, duplicated address: %s" % address, self.nodes[0].createrawtransaction, [], [{address: 1}, {address: 1}])
assert_raises_rpc_error(-8, "Invalid parameter, duplicate key: data", self.nodes[0].createrawtransaction, [], [{"data": 'aa'}, {"data": "bb"}])
assert_raises_rpc_error(-8, "Invalid parameter, duplicate key: data", self.nodes[0].createrawtransaction, [], multidict([("data", 'aa'), ("data", "bb")]))
assert_raises_rpc_error(-8, "Invalid parameter, key-value pair must contain exactly one key", self.nodes[0].createrawtransaction, [], [{'a': 1, 'b': 2}])
assert_raises_rpc_error(-8, "Invalid parameter, key-value pair not an object as expected", self.nodes[0].createrawtransaction, [], [['key-value pair1'], ['2']])
# Test `createrawtransaction` invalid `locktime`
assert_raises_rpc_error(-3, "Expected type number", self.nodes[0].createrawtransaction, [], {}, 'foo')
assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range", self.nodes[0].createrawtransaction, [], {}, -1)
assert_raises_rpc_error(-8, "Invalid parameter, locktime out of range", self.nodes[0].createrawtransaction, [], {}, 4294967296)
# Test `createrawtransaction` invalid `replaceable`
assert_raises_rpc_error(-3, "Expected type bool", self.nodes[0].createrawtransaction, [], {}, 0, 'foo')
self.log.info('Check that createrawtransaction accepts an array and object as outputs')
tx = CTransaction()
# One output
tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs={address: 99}))))
assert_equal(len(tx.vout), 1)
assert_equal(
bytes_to_hex_str(tx.serialize()),
self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[{address: 99}]),
)
# Two outputs
tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=OrderedDict([(address, 99), (address2, 99)])))))
assert_equal(len(tx.vout), 2)
assert_equal(
bytes_to_hex_str(tx.serialize()),
self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[{address: 99}, {address2: 99}]),
)
# Multiple mixed outputs
tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=multidict([(address, 99), (address2, 99), ('data', '99')])))))
assert_equal(len(tx.vout), 3)
assert_equal(
bytes_to_hex_str(tx.serialize()),
self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[{address: 99}, {address2: 99}, {'data': '99'}]),
)
for type in ["bech32", "p2sh-segwit", "legacy"]:
addr = self.nodes[0].getnewaddress("", type)
addrinfo = self.nodes[0].getaddressinfo(addr)
pubkey = addrinfo["scriptPubKey"]
self.log.info('sendrawtransaction with missing prevtx info (%s)' %(type))
# Test `signrawtransactionwithwallet` invalid `prevtxs`
inputs = [ {'txid' : txid, 'vout' : 3, 'sequence' : 1000}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
prevtx = dict(txid=txid, scriptPubKey=pubkey, vout=3, amount=1)
succ = self.nodes[0].signrawtransactionwithwallet(rawtx, [prevtx])
assert succ["complete"]
if type == "legacy":
del prevtx["amount"]
succ = self.nodes[0].signrawtransactionwithwallet(rawtx, [prevtx])
assert succ["complete"]
if type != "legacy":
assert_raises_rpc_error(-3, "Missing amount", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
"txid": txid,
"scriptPubKey": pubkey,
"vout": 3,
}
])
assert_raises_rpc_error(-3, "Missing vout", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
"txid": txid,
"scriptPubKey": pubkey,
"amount": 1,
}
])
assert_raises_rpc_error(-3, "Missing txid", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
"scriptPubKey": pubkey,
"vout": 3,
"amount": 1,
}
])
assert_raises_rpc_error(-3, "Missing scriptPubKey", self.nodes[0].signrawtransactionwithwallet, rawtx, [
{
"txid": txid,
"vout": 3,
"amount": 1
}
])
#########################################
# sendrawtransaction with missing input #
#########################################
self.log.info('sendrawtransaction with missing input')
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1}] #won't exists
outputs = { self.nodes[0].getnewaddress() : 4.998 }
rawtx = self.nodes[2].createrawtransaction(inputs, outputs)
rawtx = self.nodes[2].signrawtransactionwithwallet(rawtx)
# This will raise an exception since there are missing inputs
assert_raises_rpc_error(-25, "Missing inputs", self.nodes[2].sendrawtransaction, rawtx['hex'])
#####################################
# getrawtransaction with block hash #
#####################################
# make a tx by sending then generate 2 blocks; block1 has the tx in it
tx = self.nodes[2].sendtoaddress(self.nodes[1].getnewaddress(), 1)
block1, block2 = self.nodes[2].generate(2)
self.sync_all()
# We should be able to get the raw transaction by providing the correct block
gottx = self.nodes[0].getrawtransaction(tx, True, block1)
assert_equal(gottx['txid'], tx)
assert_equal(gottx['in_active_chain'], True)
# We should not have the 'in_active_chain' flag when we don't provide a block
gottx = self.nodes[0].getrawtransaction(tx, True)
assert_equal(gottx['txid'], tx)
assert 'in_active_chain' not in gottx
# We should not get the tx if we provide an unrelated block
assert_raises_rpc_error(-5, "No such transaction found", self.nodes[0].getrawtransaction, tx, True, block2)
# An invalid block hash should raise the correct errors
assert_raises_rpc_error(-1, "JSON value is not a string as expected", self.nodes[0].getrawtransaction, tx, True, True)
assert_raises_rpc_error(-8, "parameter 3 must be of length 64 (not 6, for 'foobar')", self.nodes[0].getrawtransaction, tx, True, "foobar")
assert_raises_rpc_error(-8, "parameter 3 must be of length 64 (not 8, for 'abcd1234')", self.nodes[0].getrawtransaction, tx, True, "abcd1234")
assert_raises_rpc_error(-8, "parameter 3 must be hexadecimal string (not 'ZZZ0000000000000000000000000000000000000000000000000000000000000')", self.nodes[0].getrawtransaction, tx, True, "ZZZ0000000000000000000000000000000000000000000000000000000000000")
assert_raises_rpc_error(-5, "Block hash not found", self.nodes[0].getrawtransaction, tx, True, "0000000000000000000000000000000000000000000000000000000000000000")
# Undo the blocks and check in_active_chain
self.nodes[0].invalidateblock(block1)
gottx = self.nodes[0].getrawtransaction(txid=tx, verbose=True, blockhash=block1)
assert_equal(gottx['in_active_chain'], False)
self.nodes[0].reconsiderblock(block1)
assert_equal(self.nodes[0].getbestblockhash(), block2)
#########################
# RAW TX MULTISIG TESTS #
#########################
# 2of2 test
addr1 = self.nodes[2].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[2].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
# Tests for createmultisig and addmultisigaddress
assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 1, ["01020304"])
self.nodes[0].createmultisig(2, [addr1Obj['pubkey'], addr2Obj['pubkey']]) # createmultisig can only take public keys
assert_raises_rpc_error(-5, "Invalid public key", self.nodes[0].createmultisig, 2, [addr1Obj['pubkey'], addr1]) # addmultisigaddress can take both pubkeys and addresses so long as they are in the wallet, which is tested here.
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr1])['address']
#use balance deltas instead of absolute values
bal = self.nodes[2].getbalance()
# send 1.2 BTC to msig adr
txId = self.nodes[0].sendtoaddress(mSigObj, 1.2)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), bal+Decimal('1.20000000')) #node2 has both keys of the 2of2 ms addr., tx should affect the balance
# 2of3 test from different nodes
bal = self.nodes[2].getbalance()
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr3 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
addr3Obj = self.nodes[2].getaddressinfo(addr3)
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey'], addr3Obj['pubkey']])['address']
txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
decTx = self.nodes[0].gettransaction(txId)
rawTx = self.nodes[0].decoderawtransaction(decTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
#THIS IS AN INCOMPLETE FEATURE
#NODE2 HAS TWO OF THREE KEY AND THE FUNDS SHOULD BE SPENDABLE AND COUNT AT BALANCE CALCULATION
assert_equal(self.nodes[2].getbalance(), bal) #for now, assume the funds of a 2of3 multisig tx are not marked as spendable
txDetails = self.nodes[0].gettransaction(txId, True)
rawTx = self.nodes[0].decoderawtransaction(txDetails['hex'])
vout = False
for outpoint in rawTx['vout']:
if outpoint['value'] == Decimal('2.20000000'):
vout = outpoint
break
bal = self.nodes[0].getbalance()
inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex'], "amount" : vout['value']}]
outputs = { self.nodes[0].getnewaddress() : 2.19 }
rawTx = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxPartialSigned = self.nodes[1].signrawtransactionwithwallet(rawTx, inputs)
assert_equal(rawTxPartialSigned['complete'], False) #node1 only has one key, can't comp. sign the tx
rawTxSigned = self.nodes[2].signrawtransactionwithwallet(rawTx, inputs)
assert_equal(rawTxSigned['complete'], True) #node2 can sign the tx compl., own two of three keys
self.nodes[2].sendrawtransaction(rawTxSigned['hex'])
rawTx = self.nodes[0].decoderawtransaction(rawTxSigned['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), bal+Decimal('50.00000000')+Decimal('2.19000000')) #block reward + tx
# 2of2 test for combining transactions
bal = self.nodes[2].getbalance()
addr1 = self.nodes[1].getnewaddress()
addr2 = self.nodes[2].getnewaddress()
addr1Obj = self.nodes[1].getaddressinfo(addr1)
addr2Obj = self.nodes[2].getaddressinfo(addr2)
self.nodes[1].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
mSigObj = self.nodes[2].addmultisigaddress(2, [addr1Obj['pubkey'], addr2Obj['pubkey']])['address']
mSigObjValid = self.nodes[2].getaddressinfo(mSigObj)
txId = self.nodes[0].sendtoaddress(mSigObj, 2.2)
decTx = self.nodes[0].gettransaction(txId)
rawTx2 = self.nodes[0].decoderawtransaction(decTx['hex'])
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[2].getbalance(), bal) # the funds of a 2of2 multisig tx should not be marked as spendable
txDetails = self.nodes[0].gettransaction(txId, True)
rawTx2 = self.nodes[0].decoderawtransaction(txDetails['hex'])
vout = False
for outpoint in rawTx2['vout']:
if outpoint['value'] == Decimal('2.20000000'):
vout = outpoint
break
bal = self.nodes[0].getbalance()
inputs = [{ "txid" : txId, "vout" : vout['n'], "scriptPubKey" : vout['scriptPubKey']['hex'], "redeemScript" : mSigObjValid['hex'], "amount" : vout['value']}]
outputs = { self.nodes[0].getnewaddress() : 2.19 }
rawTx2 = self.nodes[2].createrawtransaction(inputs, outputs)
rawTxPartialSigned1 = self.nodes[1].signrawtransactionwithwallet(rawTx2, inputs)
self.log.debug(rawTxPartialSigned1)
assert_equal(rawTxPartialSigned1['complete'], False) #node1 only has one key, can't comp. sign the tx
rawTxPartialSigned2 = self.nodes[2].signrawtransactionwithwallet(rawTx2, inputs)
self.log.debug(rawTxPartialSigned2)
assert_equal(rawTxPartialSigned2['complete'], False) #node2 only has one key, can't comp. sign the tx
rawTxComb = self.nodes[2].combinerawtransaction([rawTxPartialSigned1['hex'], rawTxPartialSigned2['hex']])
self.log.debug(rawTxComb)
self.nodes[2].sendrawtransaction(rawTxComb)
rawTx2 = self.nodes[0].decoderawtransaction(rawTxComb)
self.sync_all()
self.nodes[0].generate(1)
self.sync_all()
assert_equal(self.nodes[0].getbalance(), bal+Decimal('50.00000000')+Decimal('2.19000000')) #block reward + tx
# decoderawtransaction tests
# witness transaction
encrawtx = "010000000001010000000000000072c1a6a246ae63f74f931e8365e15a089c68d61900000000000000000000ffffffff0100e1f50500000000000102616100000000"
decrawtx = self.nodes[0].decoderawtransaction(encrawtx, True) # decode as witness transaction
assert_equal(decrawtx['vout'][0]['value'], Decimal('1.00000000'))
assert_raises_rpc_error(-22, 'TX decode failed', self.nodes[0].decoderawtransaction, encrawtx, False) # force decode as non-witness transaction
# non-witness transaction
encrawtx = "01000000010000000000000072c1a6a246ae63f74f931e8365e15a089c68d61900000000000000000000ffffffff0100e1f505000000000000000000"
decrawtx = self.nodes[0].decoderawtransaction(encrawtx, False) # decode as non-witness transaction
assert_equal(decrawtx['vout'][0]['value'], Decimal('1.00000000'))
# getrawtransaction tests
# 1. valid parameters - only supply txid
txHash = rawTx["hash"]
assert_equal(self.nodes[0].getrawtransaction(txHash), rawTxSigned['hex'])
# 2. valid parameters - supply txid and 0 for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, 0), rawTxSigned['hex'])
# 3. valid parameters - supply txid and False for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, False), rawTxSigned['hex'])
# 4. valid parameters - supply txid and 1 for verbose.
# We only check the "hex" field of the output so we don't need to update this test every time the output format changes.
assert_equal(self.nodes[0].getrawtransaction(txHash, 1)["hex"], rawTxSigned['hex'])
# 5. valid parameters - supply txid and True for non-verbose
assert_equal(self.nodes[0].getrawtransaction(txHash, True)["hex"], rawTxSigned['hex'])
# 6. invalid parameters - supply txid and string "Flase"
assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txHash, "Flase")
# 7. invalid parameters - supply txid and empty array
assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txHash, [])
# 8. invalid parameters - supply txid and empty dict
assert_raises_rpc_error(-1, "not a boolean", self.nodes[0].getrawtransaction, txHash, {})
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 1000}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx= self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 1000)
# 9. invalid parameters - sequence number out of range
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : -1}]
outputs = { self.nodes[0].getnewaddress() : 1 }
assert_raises_rpc_error(-8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs)
# 10. invalid parameters - sequence number out of range
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967296}]
outputs = { self.nodes[0].getnewaddress() : 1 }
assert_raises_rpc_error(-8, 'Invalid parameter, sequence number is out of range', self.nodes[0].createrawtransaction, inputs, outputs)
inputs = [ {'txid' : "1d1d4e24ed99057e84c3f80fd8fbec79ed9e1acee37da269356ecea000000000", 'vout' : 1, 'sequence' : 4294967294}]
outputs = { self.nodes[0].getnewaddress() : 1 }
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
decrawtx= self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['vin'][0]['sequence'], 4294967294)
####################################
# TRANSACTION VERSION NUMBER TESTS #
####################################
# Test the minimum transaction version number that fits in a signed 32-bit integer.
tx = CTransaction()
tx.nVersion = -0x80000000
rawtx = ToHex(tx)
decrawtx = self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['version'], -0x80000000)
# Test the maximum transaction version number that fits in a signed 32-bit integer.
tx = CTransaction()
tx.nVersion = 0x7fffffff
rawtx = ToHex(tx)
decrawtx = self.nodes[0].decoderawtransaction(rawtx)
assert_equal(decrawtx['version'], 0x7fffffff)
if __name__ == '__main__':
RawTransactionsTest().main()
|
items
|
helpers.py
|
from collections import OrderedDict
__author__ = 'kevin'
import socket
from threading import Lock
class LithiumHelper(object):
@staticmethod
def recv_all(sock):
read = ''
try:
data = sock.recv(1024)
read += data
except socket.error, e:
if isinstance(e.args, tuple):
if e[0] == socket.errno.EPIPE:
print "Detected remote disconnect"
raise e
else:
print "socket error ", e
return read
@staticmethod
def message_dict(msg):
map = dict()
head = msg.split(":")[0]
for line in msg.split("\n"):
split = line.split(":")
if len(split) >= 2:
map[split[0]] = split[1]
return (head, map)
@staticmethod
def revc_msg_dict(sock, count):
return LithiumHelper.message_dict(LithiumHelper.recv_line_num(sock, count))
@staticmethod
def recv_line_num(sock, count):
out = '';
while count > 0:
line = LithiumHelper.recv_line(sock)
print "recv: %s" % (line)
out += line
count -= 1
return out
@staticmethod
def recv_text(sock):
read = ''
try:
chars = []
lst_char = ''
while True:
a = sock.recv(1)
if a != "\r":
if (a == "\n" and lst_char == "\n") or a == "":
return "".join(chars)
else:
chars.append(a)
lst_char = a
except socket.error, e:
if isinstance(e.args, tuple):
if e[0] == socket.errno.EPIPE:
print "Detected remote disconnect"
raise e
else:
print "socket error ", e
return read
|
def recv_line(sock):
read = ''
try:
chars = []
while True:
a = sock.recv(1)
if a != "\r":
chars.append(a)
if a == "\n" or a == "":
return "".join(chars)
except socket.error, e:
if isinstance(e.args, tuple):
if e[0] == socket.errno.EPIPE:
print "Detected remote disconnect"
raise e
else:
print "socket error ", e
return read
@staticmethod
def to_message_dict(dict):
if dict is None or len(dict) == 0:
return None
out = ""
for key, value in OrderedDict(dict).iteritems():
out += "%s:%s\n" % (str(key), str(value))
out += ""
print out
return out
class AtomicCount(object):
def __init__(self):
self.count = 0
self.lock = Lock()
def incr(self):
self._add_count(1)
def decr(self):
self._add_count(-1)
def _add_count(self, value):
self.lock.acquire()
self.count += value
self.lock.release()
|
@staticmethod
|
__init__.py
|
def get_dataset(dataset_name):
if dataset_name == 'cornell':
from .cornell_data import CornellDataset
return CornellDataset
elif dataset_name == 'jacquard':
from .jacquard_data import JacquardDataset
return JacquardDataset
elif dataset_name == 'rs_dir':
from .rs_dir_data import RsDirDataset
return RsDirDataset
elif dataset_name == 'rs':
from .rs_data import RsDataset
return RsDataset
else:
|
raise NotImplementedError('Dataset Type {} is Not implemented'.format(dataset_name))
|
|
s3.go
|
// Package s3 provides an interface to Amazon S3 oject storage
package s3
// FIXME need to prevent anything but ListDir working for s3://
/*
Progress of port to aws-sdk
* Don't really need o.meta at all?
What happens if you CTRL-C a multipart upload
* get an incomplete upload
* disappears when you delete the bucket
*/
import (
"encoding/base64"
"encoding/hex"
"fmt"
"io"
"net/http"
"path"
"regexp"
"strings"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/corehandlers"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds"
"github.com/aws/aws-sdk-go/aws/defaults"
"github.com/aws/aws-sdk-go/aws/ec2metadata"
"github.com/aws/aws-sdk-go/aws/request"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
"github.com/ncw/rclone/fs"
"github.com/ncw/rclone/fs/config/configmap"
"github.com/ncw/rclone/fs/config/configstruct"
"github.com/ncw/rclone/fs/fserrors"
"github.com/ncw/rclone/fs/fshttp"
"github.com/ncw/rclone/fs/hash"
"github.com/ncw/rclone/fs/walk"
"github.com/ncw/rclone/lib/pacer"
"github.com/ncw/rclone/lib/rest"
"github.com/ncw/swift"
"github.com/pkg/errors"
)
// Register with Fs
func init() {
fs.Register(&fs.RegInfo{
Name: "s3",
Description: "Amazon S3 Compliant Storage Provider (AWS, Alibaba, Ceph, Digital Ocean, Dreamhost, IBM COS, Minio, etc)",
NewFs: NewFs,
Options: []fs.Option{{
Name: fs.ConfigProvider,
Help: "Choose your S3 provider.",
Examples: []fs.OptionExample{{
Value: "AWS",
Help: "Amazon Web Services (AWS) S3",
}, {
Value: "Alibaba",
Help: "Alibaba Cloud Object Storage System (OSS) formerly Aliyun",
}, {
Value: "Ceph",
Help: "Ceph Object Storage",
}, {
Value: "DigitalOcean",
Help: "Digital Ocean Spaces",
}, {
Value: "Dreamhost",
Help: "Dreamhost DreamObjects",
}, {
Value: "IBMCOS",
Help: "IBM COS S3",
}, {
Value: "Minio",
Help: "Minio Object Storage",
}, {
Value: "Netease",
Help: "Netease Object Storage (NOS)",
}, {
Value: "Wasabi",
Help: "Wasabi Object Storage",
}, {
Value: "Other",
Help: "Any other S3 compatible provider",
}},
}, {
Name: "env_auth",
Help: "Get AWS credentials from runtime (environment variables or EC2/ECS meta data if no env vars).\nOnly applies if access_key_id and secret_access_key is blank.",
Default: false,
Examples: []fs.OptionExample{{
Value: "false",
Help: "Enter AWS credentials in the next step",
}, {
Value: "true",
Help: "Get AWS credentials from the environment (env vars or IAM)",
}},
}, {
Name: "access_key_id",
Help: "AWS Access Key ID.\nLeave blank for anonymous access or runtime credentials.",
}, {
Name: "secret_access_key",
Help: "AWS Secret Access Key (password)\nLeave blank for anonymous access or runtime credentials.",
}, {
Name: "region",
Help: "Region to connect to.",
Provider: "AWS",
Examples: []fs.OptionExample{{
Value: "us-east-1",
Help: "The default endpoint - a good choice if you are unsure.\nUS Region, Northern Virginia or Pacific Northwest.\nLeave location constraint empty.",
}, {
Value: "us-east-2",
Help: "US East (Ohio) Region\nNeeds location constraint us-east-2.",
}, {
Value: "us-west-2",
Help: "US West (Oregon) Region\nNeeds location constraint us-west-2.",
}, {
Value: "us-west-1",
Help: "US West (Northern California) Region\nNeeds location constraint us-west-1.",
}, {
Value: "ca-central-1",
Help: "Canada (Central) Region\nNeeds location constraint ca-central-1.",
}, {
Value: "eu-west-1",
Help: "EU (Ireland) Region\nNeeds location constraint EU or eu-west-1.",
}, {
Value: "eu-west-2",
Help: "EU (London) Region\nNeeds location constraint eu-west-2.",
}, {
Value: "eu-north-1",
Help: "EU (Stockholm) Region\nNeeds location constraint eu-north-1.",
}, {
Value: "eu-central-1",
Help: "EU (Frankfurt) Region\nNeeds location constraint eu-central-1.",
}, {
Value: "ap-southeast-1",
Help: "Asia Pacific (Singapore) Region\nNeeds location constraint ap-southeast-1.",
}, {
Value: "ap-southeast-2",
Help: "Asia Pacific (Sydney) Region\nNeeds location constraint ap-southeast-2.",
}, {
Value: "ap-northeast-1",
Help: "Asia Pacific (Tokyo) Region\nNeeds location constraint ap-northeast-1.",
}, {
Value: "ap-northeast-2",
Help: "Asia Pacific (Seoul)\nNeeds location constraint ap-northeast-2.",
}, {
Value: "ap-south-1",
Help: "Asia Pacific (Mumbai)\nNeeds location constraint ap-south-1.",
}, {
Value: "sa-east-1",
Help: "South America (Sao Paulo) Region\nNeeds location constraint sa-east-1.",
}},
}, {
Name: "region",
Help: "Region to connect to.\nLeave blank if you are using an S3 clone and you don't have a region.",
Provider: "!AWS,Alibaba",
Examples: []fs.OptionExample{{
Value: "",
Help: "Use this if unsure. Will use v4 signatures and an empty region.",
}, {
Value: "other-v2-signature",
Help: "Use this only if v4 signatures don't work, eg pre Jewel/v10 CEPH.",
}},
}, {
Name: "endpoint",
Help: "Endpoint for S3 API.\nLeave blank if using AWS to use the default endpoint for the region.",
Provider: "AWS",
}, {
Name: "endpoint",
Help: "Endpoint for IBM COS S3 API.\nSpecify if using an IBM COS On Premise.",
Provider: "IBMCOS",
Examples: []fs.OptionExample{{
Value: "s3-api.us-geo.objectstorage.softlayer.net",
Help: "US Cross Region Endpoint",
}, {
Value: "s3-api.dal.us-geo.objectstorage.softlayer.net",
Help: "US Cross Region Dallas Endpoint",
}, {
Value: "s3-api.wdc-us-geo.objectstorage.softlayer.net",
Help: "US Cross Region Washington DC Endpoint",
}, {
Value: "s3-api.sjc-us-geo.objectstorage.softlayer.net",
Help: "US Cross Region San Jose Endpoint",
}, {
Value: "s3-api.us-geo.objectstorage.service.networklayer.com",
Help: "US Cross Region Private Endpoint",
}, {
Value: "s3-api.dal-us-geo.objectstorage.service.networklayer.com",
Help: "US Cross Region Dallas Private Endpoint",
}, {
Value: "s3-api.wdc-us-geo.objectstorage.service.networklayer.com",
Help: "US Cross Region Washington DC Private Endpoint",
}, {
Value: "s3-api.sjc-us-geo.objectstorage.service.networklayer.com",
Help: "US Cross Region San Jose Private Endpoint",
}, {
Value: "s3.us-east.objectstorage.softlayer.net",
Help: "US Region East Endpoint",
}, {
Value: "s3.us-east.objectstorage.service.networklayer.com",
Help: "US Region East Private Endpoint",
}, {
Value: "s3.us-south.objectstorage.softlayer.net",
Help: "US Region South Endpoint",
}, {
Value: "s3.us-south.objectstorage.service.networklayer.com",
Help: "US Region South Private Endpoint",
}, {
Value: "s3.eu-geo.objectstorage.softlayer.net",
Help: "EU Cross Region Endpoint",
}, {
Value: "s3.fra-eu-geo.objectstorage.softlayer.net",
Help: "EU Cross Region Frankfurt Endpoint",
}, {
Value: "s3.mil-eu-geo.objectstorage.softlayer.net",
Help: "EU Cross Region Milan Endpoint",
}, {
Value: "s3.ams-eu-geo.objectstorage.softlayer.net",
Help: "EU Cross Region Amsterdam Endpoint",
}, {
Value: "s3.eu-geo.objectstorage.service.networklayer.com",
Help: "EU Cross Region Private Endpoint",
}, {
Value: "s3.fra-eu-geo.objectstorage.service.networklayer.com",
Help: "EU Cross Region Frankfurt Private Endpoint",
}, {
Value: "s3.mil-eu-geo.objectstorage.service.networklayer.com",
Help: "EU Cross Region Milan Private Endpoint",
}, {
Value: "s3.ams-eu-geo.objectstorage.service.networklayer.com",
Help: "EU Cross Region Amsterdam Private Endpoint",
}, {
Value: "s3.eu-gb.objectstorage.softlayer.net",
Help: "Great Britain Endpoint",
}, {
Value: "s3.eu-gb.objectstorage.service.networklayer.com",
Help: "Great Britain Private Endpoint",
}, {
Value: "s3.ap-geo.objectstorage.softlayer.net",
Help: "APAC Cross Regional Endpoint",
}, {
Value: "s3.tok-ap-geo.objectstorage.softlayer.net",
Help: "APAC Cross Regional Tokyo Endpoint",
}, {
Value: "s3.hkg-ap-geo.objectstorage.softlayer.net",
Help: "APAC Cross Regional HongKong Endpoint",
}, {
Value: "s3.seo-ap-geo.objectstorage.softlayer.net",
Help: "APAC Cross Regional Seoul Endpoint",
}, {
Value: "s3.ap-geo.objectstorage.service.networklayer.com",
Help: "APAC Cross Regional Private Endpoint",
}, {
Value: "s3.tok-ap-geo.objectstorage.service.networklayer.com",
Help: "APAC Cross Regional Tokyo Private Endpoint",
}, {
Value: "s3.hkg-ap-geo.objectstorage.service.networklayer.com",
Help: "APAC Cross Regional HongKong Private Endpoint",
}, {
Value: "s3.seo-ap-geo.objectstorage.service.networklayer.com",
Help: "APAC Cross Regional Seoul Private Endpoint",
}, {
Value: "s3.mel01.objectstorage.softlayer.net",
Help: "Melbourne Single Site Endpoint",
}, {
Value: "s3.mel01.objectstorage.service.networklayer.com",
Help: "Melbourne Single Site Private Endpoint",
}, {
Value: "s3.tor01.objectstorage.softlayer.net",
Help: "Toronto Single Site Endpoint",
}, {
Value: "s3.tor01.objectstorage.service.networklayer.com",
Help: "Toronto Single Site Private Endpoint",
}},
}, {
// oss endpoints: https://help.aliyun.com/document_detail/31837.html
Name: "endpoint",
Help: "Endpoint for OSS API.",
Provider: "Alibaba",
Examples: []fs.OptionExample{{
Value: "oss-cn-hangzhou.aliyuncs.com",
Help: "East China 1 (Hangzhou)",
}, {
Value: "oss-cn-shanghai.aliyuncs.com",
Help: "East China 2 (Shanghai)",
}, {
Value: "oss-cn-qingdao.aliyuncs.com",
Help: "North China 1 (Qingdao)",
}, {
Value: "oss-cn-beijing.aliyuncs.com",
Help: "North China 2 (Beijing)",
}, {
Value: "oss-cn-zhangjiakou.aliyuncs.com",
Help: "North China 3 (Zhangjiakou)",
}, {
Value: "oss-cn-huhehaote.aliyuncs.com",
Help: "North China 5 (Huhehaote)",
}, {
Value: "oss-cn-shenzhen.aliyuncs.com",
Help: "South China 1 (Shenzhen)",
}, {
Value: "oss-cn-hongkong.aliyuncs.com",
Help: "Hong Kong (Hong Kong)",
}, {
Value: "oss-us-west-1.aliyuncs.com",
Help: "US West 1 (Silicon Valley)",
}, {
Value: "oss-us-east-1.aliyuncs.com",
Help: "US East 1 (Virginia)",
}, {
Value: "oss-ap-southeast-1.aliyuncs.com",
Help: "Southeast Asia Southeast 1 (Singapore)",
}, {
Value: "oss-ap-southeast-2.aliyuncs.com",
Help: "Asia Pacific Southeast 2 (Sydney)",
}, {
Value: "oss-ap-southeast-3.aliyuncs.com",
Help: "Southeast Asia Southeast 3 (Kuala Lumpur)",
}, {
Value: "oss-ap-southeast-5.aliyuncs.com",
Help: "Asia Pacific Southeast 5 (Jakarta)",
}, {
Value: "oss-ap-northeast-1.aliyuncs.com",
Help: "Asia Pacific Northeast 1 (Japan)",
}, {
Value: "oss-ap-south-1.aliyuncs.com",
Help: "Asia Pacific South 1 (Mumbai)",
}, {
Value: "oss-eu-central-1.aliyuncs.com",
Help: "Central Europe 1 (Frankfurt)",
}, {
Value: "oss-eu-west-1.aliyuncs.com",
Help: "West Europe (London)",
}, {
Value: "oss-me-east-1.aliyuncs.com",
Help: "Middle East 1 (Dubai)",
}},
}, {
Name: "endpoint",
Help: "Endpoint for S3 API.\nRequired when using an S3 clone.",
Provider: "!AWS,IBMCOS,Alibaba",
Examples: []fs.OptionExample{{
Value: "objects-us-east-1.dream.io",
Help: "Dream Objects endpoint",
Provider: "Dreamhost",
}, {
Value: "nyc3.digitaloceanspaces.com",
Help: "Digital Ocean Spaces New York 3",
Provider: "DigitalOcean",
}, {
Value: "ams3.digitaloceanspaces.com",
Help: "Digital Ocean Spaces Amsterdam 3",
Provider: "DigitalOcean",
}, {
Value: "sgp1.digitaloceanspaces.com",
Help: "Digital Ocean Spaces Singapore 1",
Provider: "DigitalOcean",
}, {
Value: "s3.wasabisys.com",
Help: "Wasabi US East endpoint",
Provider: "Wasabi",
}, {
Value: "s3.us-west-1.wasabisys.com",
Help: "Wasabi US West endpoint",
Provider: "Wasabi",
}},
}, {
Name: "location_constraint",
Help: "Location constraint - must be set to match the Region.\nUsed when creating buckets only.",
Provider: "AWS",
Examples: []fs.OptionExample{{
Value: "",
Help: "Empty for US Region, Northern Virginia or Pacific Northwest.",
}, {
Value: "us-east-2",
Help: "US East (Ohio) Region.",
}, {
Value: "us-west-2",
Help: "US West (Oregon) Region.",
}, {
Value: "us-west-1",
Help: "US West (Northern California) Region.",
}, {
Value: "ca-central-1",
Help: "Canada (Central) Region.",
}, {
Value: "eu-west-1",
Help: "EU (Ireland) Region.",
}, {
Value: "eu-west-2",
Help: "EU (London) Region.",
}, {
Value: "eu-north-1",
Help: "EU (Stockholm) Region.",
}, {
Value: "EU",
Help: "EU Region.",
}, {
Value: "ap-southeast-1",
Help: "Asia Pacific (Singapore) Region.",
}, {
Value: "ap-southeast-2",
Help: "Asia Pacific (Sydney) Region.",
}, {
Value: "ap-northeast-1",
Help: "Asia Pacific (Tokyo) Region.",
}, {
Value: "ap-northeast-2",
Help: "Asia Pacific (Seoul)",
}, {
Value: "ap-south-1",
Help: "Asia Pacific (Mumbai)",
}, {
Value: "sa-east-1",
Help: "South America (Sao Paulo) Region.",
}},
}, {
Name: "location_constraint",
Help: "Location constraint - must match endpoint when using IBM Cloud Public.\nFor on-prem COS, do not make a selection from this list, hit enter",
Provider: "IBMCOS",
Examples: []fs.OptionExample{{
Value: "us-standard",
Help: "US Cross Region Standard",
}, {
Value: "us-vault",
Help: "US Cross Region Vault",
}, {
Value: "us-cold",
Help: "US Cross Region Cold",
}, {
Value: "us-flex",
Help: "US Cross Region Flex",
}, {
Value: "us-east-standard",
Help: "US East Region Standard",
}, {
Value: "us-east-vault",
Help: "US East Region Vault",
}, {
Value: "us-east-cold",
Help: "US East Region Cold",
}, {
Value: "us-east-flex",
Help: "US East Region Flex",
}, {
Value: "us-south-standard",
Help: "US South Region Standard",
}, {
Value: "us-south-vault",
Help: "US South Region Vault",
}, {
Value: "us-south-cold",
Help: "US South Region Cold",
}, {
Value: "us-south-flex",
Help: "US South Region Flex",
}, {
Value: "eu-standard",
Help: "EU Cross Region Standard",
}, {
Value: "eu-vault",
Help: "EU Cross Region Vault",
}, {
Value: "eu-cold",
Help: "EU Cross Region Cold",
}, {
Value: "eu-flex",
Help: "EU Cross Region Flex",
}, {
Value: "eu-gb-standard",
Help: "Great Britain Standard",
}, {
Value: "eu-gb-vault",
Help: "Great Britain Vault",
}, {
Value: "eu-gb-cold",
Help: "Great Britain Cold",
}, {
Value: "eu-gb-flex",
Help: "Great Britain Flex",
}, {
Value: "ap-standard",
Help: "APAC Standard",
}, {
Value: "ap-vault",
Help: "APAC Vault",
}, {
Value: "ap-cold",
Help: "APAC Cold",
}, {
Value: "ap-flex",
Help: "APAC Flex",
}, {
Value: "mel01-standard",
Help: "Melbourne Standard",
}, {
Value: "mel01-vault",
Help: "Melbourne Vault",
}, {
Value: "mel01-cold",
Help: "Melbourne Cold",
}, {
Value: "mel01-flex",
Help: "Melbourne Flex",
}, {
Value: "tor01-standard",
Help: "Toronto Standard",
}, {
Value: "tor01-vault",
Help: "Toronto Vault",
}, {
Value: "tor01-cold",
Help: "Toronto Cold",
}, {
Value: "tor01-flex",
Help: "Toronto Flex",
}},
}, {
Name: "location_constraint",
Help: "Location constraint - must be set to match the Region.\nLeave blank if not sure. Used when creating buckets only.",
Provider: "!AWS,IBMCOS,Alibaba",
}, {
Name: "acl",
Help: `Canned ACL used when creating buckets and storing or copying objects.
This ACL is used for creating objects and if bucket_acl isn't set, for creating buckets too.
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
Note that this ACL is applied when server side copying objects as S3
doesn't copy the ACL from the source but rather writes a fresh one.`,
Examples: []fs.OptionExample{{
Value: "private",
Help: "Owner gets FULL_CONTROL. No one else has access rights (default).",
Provider: "!IBMCOS",
}, {
Value: "public-read",
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ access.",
Provider: "!IBMCOS",
}, {
Value: "public-read-write",
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access.\nGranting this on a bucket is generally not recommended.",
Provider: "!IBMCOS",
}, {
Value: "authenticated-read",
Help: "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access.",
Provider: "!IBMCOS",
}, {
Value: "bucket-owner-read",
Help: "Object owner gets FULL_CONTROL. Bucket owner gets READ access.\nIf you specify this canned ACL when creating a bucket, Amazon S3 ignores it.",
Provider: "!IBMCOS",
}, {
Value: "bucket-owner-full-control",
Help: "Both the object owner and the bucket owner get FULL_CONTROL over the object.\nIf you specify this canned ACL when creating a bucket, Amazon S3 ignores it.",
Provider: "!IBMCOS",
}, {
Value: "private",
Help: "Owner gets FULL_CONTROL. No one else has access rights (default). This acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise COS",
Provider: "IBMCOS",
}, {
Value: "public-read",
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ access. This acl is available on IBM Cloud (Infra), IBM Cloud (Storage), On-Premise IBM COS",
Provider: "IBMCOS",
}, {
Value: "public-read-write",
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access. This acl is available on IBM Cloud (Infra), On-Premise IBM COS",
Provider: "IBMCOS",
}, {
Value: "authenticated-read",
Help: "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access. Not supported on Buckets. This acl is available on IBM Cloud (Infra) and On-Premise IBM COS",
Provider: "IBMCOS",
}},
}, {
Name: "bucket_acl",
Help: `Canned ACL used when creating buckets.
For more info visit https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl
Note that this ACL is applied when only when creating buckets. If it
isn't set then "acl" is used instead.`,
Advanced: true,
Examples: []fs.OptionExample{{
Value: "private",
Help: "Owner gets FULL_CONTROL. No one else has access rights (default).",
}, {
Value: "public-read",
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ access.",
}, {
Value: "public-read-write",
Help: "Owner gets FULL_CONTROL. The AllUsers group gets READ and WRITE access.\nGranting this on a bucket is generally not recommended.",
}, {
Value: "authenticated-read",
Help: "Owner gets FULL_CONTROL. The AuthenticatedUsers group gets READ access.",
}},
}, {
Name: "server_side_encryption",
Help: "The server-side encryption algorithm used when storing this object in S3.",
Provider: "AWS",
Examples: []fs.OptionExample{{
Value: "",
Help: "None",
}, {
Value: "AES256",
Help: "AES256",
}, {
Value: "aws:kms",
Help: "aws:kms",
}},
}, {
Name: "sse_kms_key_id",
Help: "If using KMS ID you must provide the ARN of Key.",
Provider: "AWS",
Examples: []fs.OptionExample{{
Value: "",
Help: "None",
}, {
Value: "arn:aws:kms:us-east-1:*",
Help: "arn:aws:kms:*",
}},
}, {
Name: "storage_class",
Help: "The storage class to use when storing new objects in S3.",
Provider: "AWS",
Examples: []fs.OptionExample{{
Value: "",
Help: "Default",
}, {
Value: "STANDARD",
Help: "Standard storage class",
}, {
Value: "REDUCED_REDUNDANCY",
Help: "Reduced redundancy storage class",
}, {
Value: "STANDARD_IA",
Help: "Standard Infrequent Access storage class",
}, {
Value: "ONEZONE_IA",
Help: "One Zone Infrequent Access storage class",
}, {
Value: "GLACIER",
Help: "Glacier storage class",
}},
}, {
// Mapping from here: https://www.alibabacloud.com/help/doc-detail/64919.htm
Name: "storage_class",
Help: "The storage class to use when storing new objects in OSS.",
Provider: "Alibaba",
Examples: []fs.OptionExample{{
Value: "",
Help: "Default",
}, {
Value: "STANDARD",
Help: "Standard storage class",
}, {
Value: "GLACIER",
Help: "Archive storage mode.",
}, {
Value: "STANDARD_IA",
Help: "Infrequent access storage mode.",
}},
}, {
Name: "upload_cutoff",
Help: `Cutoff for switching to chunked upload
Any files larger than this will be uploaded in chunks of chunk_size.
The minimum is 0 and the maximum is 5GB.`,
Default: defaultUploadCutoff,
Advanced: true,
}, {
Name: "chunk_size",
Help: `Chunk size to use for uploading.
When uploading files larger than upload_cutoff they will be uploaded
as multipart uploads using this chunk size.
Note that "--s3-upload-concurrency" chunks of this size are buffered
in memory per transfer.
If you are transferring large files over high speed links and you have
enough memory, then increasing this will speed up the transfers.`,
Default: minChunkSize,
Advanced: true,
}, {
Name: "disable_checksum",
Help: "Don't store MD5 checksum with object metadata",
Default: false,
Advanced: true,
}, {
Name: "session_token",
Help: "An AWS session token",
Advanced: true,
}, {
Name: "upload_concurrency",
Help: `Concurrency for multipart uploads.
This is the number of chunks of the same file that are uploaded
concurrently.
If you are uploading small numbers of large file over high speed link
and these uploads do not fully utilize your bandwidth, then increasing
this may help to speed up the transfers.`,
Default: 4,
Advanced: true,
}, {
Name: "force_path_style",
Help: `If true use path style access if false use virtual hosted style.
If this is true (the default) then rclone will use path style access,
if false then rclone will use virtual path style. See [the AWS S3
docs](https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro)
for more info.
Some providers (eg Aliyun OSS or Netease COS) require this set to false.`,
Default: true,
Advanced: true,
}, {
Name: "v2_auth",
Help: `If true use v2 authentication.
If this is false (the default) then rclone will use v4 authentication.
If it is set then rclone will use v2 authentication.
Use this only if v4 signatures don't work, eg pre Jewel/v10 CEPH.`,
Default: false,
Advanced: true,
}},
})
}
// Constants
const (
metaMtime = "Mtime" // the meta key to store mtime in - eg X-Amz-Meta-Mtime
metaMD5Hash = "Md5chksum" // the meta key to store md5hash in
listChunkSize = 1000 // number of items to read at once
maxRetries = 10 // number of retries to make of operations
maxSizeForCopy = 5 * 1024 * 1024 * 1024 // The maximum size of object we can COPY
maxFileSize = 5 * 1024 * 1024 * 1024 * 1024 // largest possible upload file size
minChunkSize = fs.SizeSuffix(s3manager.MinUploadPartSize)
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
minSleep = 10 * time.Millisecond // In case of error, start at 10ms sleep.
)
// Options defines the configuration for this backend
type Options struct {
Provider string `config:"provider"`
EnvAuth bool `config:"env_auth"`
AccessKeyID string `config:"access_key_id"`
SecretAccessKey string `config:"secret_access_key"`
Region string `config:"region"`
Endpoint string `config:"endpoint"`
LocationConstraint string `config:"location_constraint"`
ACL string `config:"acl"`
BucketACL string `config:"bucket_acl"`
ServerSideEncryption string `config:"server_side_encryption"`
SSEKMSKeyID string `config:"sse_kms_key_id"`
StorageClass string `config:"storage_class"`
UploadCutoff fs.SizeSuffix `config:"upload_cutoff"`
ChunkSize fs.SizeSuffix `config:"chunk_size"`
DisableChecksum bool `config:"disable_checksum"`
SessionToken string `config:"session_token"`
UploadConcurrency int `config:"upload_concurrency"`
ForcePathStyle bool `config:"force_path_style"`
V2Auth bool `config:"v2_auth"`
}
// Fs represents a remote s3 server
type Fs struct {
name string // the name of the remote
root string // root of the bucket - ignore all objects above this
opt Options // parsed options
features *fs.Features // optional features
c *s3.S3 // the connection to the s3 server
ses *session.Session // the s3 session
bucket string // the bucket we are working on
bucketOKMu sync.Mutex // mutex to protect bucket OK
bucketOK bool // true if we have created the bucket
bucketDeleted bool // true if we have deleted the bucket
pacer *fs.Pacer // To pace the API calls
srv *http.Client // a plain http client
}
// Object describes a s3 object
type Object struct {
// Will definitely have everything but meta which may be nil
//
// List will read everything but meta & mimeType - to fill
// that in you need to call readMetaData
fs *Fs // what this object is part of
remote string // The remote path
etag string // md5sum of the object
bytes int64 // size of the object
lastModified time.Time // Last modified
meta map[string]*string // The object metadata if known - may be nil
mimeType string // MimeType of object - may be ""
}
// ------------------------------------------------------------
// Name of the remote (as passed into NewFs)
func (f *Fs) Name() string {
return f.name
}
// Root of the remote (as passed into NewFs)
func (f *Fs) Root() string {
if f.root == "" {
return f.bucket
}
return f.bucket + "/" + f.root
}
// String converts this Fs to a string
func (f *Fs) String() string {
if f.root == "" {
return fmt.Sprintf("S3 bucket %s", f.bucket)
}
return fmt.Sprintf("S3 bucket %s path %s", f.bucket, f.root)
}
// Features returns the optional features of this Fs
func (f *Fs) Features() *fs.Features {
return f.features
}
// retryErrorCodes is a slice of error codes that we will retry
// See: https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html
var retryErrorCodes = []int{
// 409, // Conflict - various states that could be resolved on a retry
503, // Service Unavailable/Slow Down - "Reduce your request rate"
}
//S3 is pretty resilient, and the built in retry handling is probably sufficient
// as it should notice closed connections and timeouts which are the most likely
// sort of failure modes
func (f *Fs) shouldRetry(err error) (bool, error) {
// If this is an awserr object, try and extract more useful information to determine if we should retry
if awsError, ok := err.(awserr.Error); ok {
// Simple case, check the original embedded error in case it's generically retryable
if fserrors.ShouldRetry(awsError.OrigErr()) {
return true, err
}
// Failing that, if it's a RequestFailure it's probably got an http status code we can check
if reqErr, ok := err.(awserr.RequestFailure); ok {
// 301 if wrong region for bucket
if reqErr.StatusCode() == http.StatusMovedPermanently {
urfbErr := f.updateRegionForBucket()
if urfbErr != nil {
fs.Errorf(f, "Failed to update region for bucket: %v", urfbErr)
return false, err
}
return true, err
}
for _, e := range retryErrorCodes {
if reqErr.StatusCode() == e {
return true, err
}
}
}
}
// Ok, not an awserr, check for generic failure conditions
return fserrors.ShouldRetry(err), err
}
// Pattern to match a s3 path
var matcher = regexp.MustCompile(`^/*([^/]*)(.*)$`)
// parseParse parses a s3 'url'
func s3ParsePath(path string) (bucket, directory string, err error)
|
// s3Connection makes a connection to s3
func s3Connection(opt *Options) (*s3.S3, *session.Session, error) {
// Make the auth
v := credentials.Value{
AccessKeyID: opt.AccessKeyID,
SecretAccessKey: opt.SecretAccessKey,
SessionToken: opt.SessionToken,
}
lowTimeoutClient := &http.Client{Timeout: 1 * time.Second} // low timeout to ec2 metadata service
def := defaults.Get()
def.Config.HTTPClient = lowTimeoutClient
// first provider to supply a credential set "wins"
providers := []credentials.Provider{
// use static credentials if they're present (checked by provider)
&credentials.StaticProvider{Value: v},
// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY
// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY
&credentials.EnvProvider{},
// A SharedCredentialsProvider retrieves credentials
// from the current user's home directory. It checks
// AWS_SHARED_CREDENTIALS_FILE and AWS_PROFILE too.
&credentials.SharedCredentialsProvider{},
// Pick up IAM role if we're in an ECS task
defaults.RemoteCredProvider(*def.Config, def.Handlers),
// Pick up IAM role in case we're on EC2
&ec2rolecreds.EC2RoleProvider{
Client: ec2metadata.New(session.New(), &aws.Config{
HTTPClient: lowTimeoutClient,
}),
ExpiryWindow: 3,
},
}
cred := credentials.NewChainCredentials(providers)
switch {
case opt.EnvAuth:
// No need for empty checks if "env_auth" is true
case v.AccessKeyID == "" && v.SecretAccessKey == "":
// if no access key/secret and iam is explicitly disabled then fall back to anon interaction
cred = credentials.AnonymousCredentials
case v.AccessKeyID == "":
return nil, nil, errors.New("access_key_id not found")
case v.SecretAccessKey == "":
return nil, nil, errors.New("secret_access_key not found")
}
if opt.Region == "" && opt.Endpoint == "" {
opt.Endpoint = "https://s3.amazonaws.com/"
}
if opt.Region == "" {
opt.Region = "us-east-1"
}
if opt.Provider == "Alibaba" || opt.Provider == "Netease" {
opt.ForcePathStyle = false
}
awsConfig := aws.NewConfig().
WithMaxRetries(maxRetries).
WithCredentials(cred).
WithHTTPClient(fshttp.NewClient(fs.Config)).
WithS3ForcePathStyle(opt.ForcePathStyle)
if opt.Region != "" {
awsConfig.WithRegion(opt.Region)
}
if opt.Endpoint != "" {
awsConfig.WithEndpoint(opt.Endpoint)
}
// awsConfig.WithLogLevel(aws.LogDebugWithSigning)
awsSessionOpts := session.Options{
Config: *awsConfig,
}
if opt.EnvAuth && opt.AccessKeyID == "" && opt.SecretAccessKey == "" {
// Enable loading config options from ~/.aws/config (selected by AWS_PROFILE env)
awsSessionOpts.SharedConfigState = session.SharedConfigEnable
// The session constructor (aws/session/mergeConfigSrcs) will only use the user's preferred credential source
// (from the shared config file) if the passed-in Options.Config.Credentials is nil.
awsSessionOpts.Config.Credentials = nil
}
ses, err := session.NewSessionWithOptions(awsSessionOpts)
if err != nil {
return nil, nil, err
}
c := s3.New(ses)
if opt.V2Auth || opt.Region == "other-v2-signature" {
fs.Debugf(nil, "Using v2 auth")
signer := func(req *request.Request) {
// Ignore AnonymousCredentials object
if req.Config.Credentials == credentials.AnonymousCredentials {
return
}
sign(v.AccessKeyID, v.SecretAccessKey, req.HTTPRequest)
}
c.Handlers.Sign.Clear()
c.Handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler)
c.Handlers.Sign.PushBack(signer)
}
return c, ses, nil
}
func checkUploadChunkSize(cs fs.SizeSuffix) error {
if cs < minChunkSize {
return errors.Errorf("%s is less than %s", cs, minChunkSize)
}
return nil
}
func (f *Fs) setUploadChunkSize(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadChunkSize(cs)
if err == nil {
old, f.opt.ChunkSize = f.opt.ChunkSize, cs
}
return
}
func checkUploadCutoff(cs fs.SizeSuffix) error {
if cs > maxUploadCutoff {
return errors.Errorf("%s is greater than %s", cs, maxUploadCutoff)
}
return nil
}
func (f *Fs) setUploadCutoff(cs fs.SizeSuffix) (old fs.SizeSuffix, err error) {
err = checkUploadCutoff(cs)
if err == nil {
old, f.opt.UploadCutoff = f.opt.UploadCutoff, cs
}
return
}
// NewFs constructs an Fs from the path, bucket:path
func NewFs(name, root string, m configmap.Mapper) (fs.Fs, error) {
// Parse config into Options struct
opt := new(Options)
err := configstruct.Set(m, opt)
if err != nil {
return nil, err
}
err = checkUploadChunkSize(opt.ChunkSize)
if err != nil {
return nil, errors.Wrap(err, "s3: chunk size")
}
err = checkUploadCutoff(opt.UploadCutoff)
if err != nil {
return nil, errors.Wrap(err, "s3: upload cutoff")
}
bucket, directory, err := s3ParsePath(root)
if err != nil {
return nil, err
}
if opt.ACL == "" {
opt.ACL = "private"
}
if opt.BucketACL == "" {
opt.BucketACL = opt.ACL
}
c, ses, err := s3Connection(opt)
if err != nil {
return nil, err
}
f := &Fs{
name: name,
root: directory,
opt: *opt,
c: c,
bucket: bucket,
ses: ses,
pacer: fs.NewPacer(pacer.NewS3(pacer.MinSleep(minSleep))),
srv: fshttp.NewClient(fs.Config),
}
f.features = (&fs.Features{
ReadMimeType: true,
WriteMimeType: true,
BucketBased: true,
}).Fill(f)
if f.root != "" {
f.root += "/"
// Check to see if the object exists
req := s3.HeadObjectInput{
Bucket: &f.bucket,
Key: &directory,
}
err = f.pacer.Call(func() (bool, error) {
_, err = f.c.HeadObject(&req)
return f.shouldRetry(err)
})
if err == nil {
f.root = path.Dir(directory)
if f.root == "." {
f.root = ""
} else {
f.root += "/"
}
// return an error with an fs which points to the parent
return f, fs.ErrorIsFile
}
}
// f.listMultipartUploads()
return f, nil
}
// Return an Object from a path
//
//If it can't be found it returns the error ErrorObjectNotFound.
func (f *Fs) newObjectWithInfo(remote string, info *s3.Object) (fs.Object, error) {
o := &Object{
fs: f,
remote: remote,
}
if info != nil {
// Set info but not meta
if info.LastModified == nil {
fs.Logf(o, "Failed to read last modified")
o.lastModified = time.Now()
} else {
o.lastModified = *info.LastModified
}
o.etag = aws.StringValue(info.ETag)
o.bytes = aws.Int64Value(info.Size)
} else {
err := o.readMetaData() // reads info and meta, returning an error
if err != nil {
return nil, err
}
}
return o, nil
}
// NewObject finds the Object at remote. If it can't be found
// it returns the error fs.ErrorObjectNotFound.
func (f *Fs) NewObject(remote string) (fs.Object, error) {
return f.newObjectWithInfo(remote, nil)
}
// Gets the bucket location
func (f *Fs) getBucketLocation() (string, error) {
req := s3.GetBucketLocationInput{
Bucket: &f.bucket,
}
var resp *s3.GetBucketLocationOutput
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.c.GetBucketLocation(&req)
return f.shouldRetry(err)
})
if err != nil {
return "", err
}
return s3.NormalizeBucketLocation(aws.StringValue(resp.LocationConstraint)), nil
}
// Updates the region for the bucket by reading the region from the
// bucket then updating the session.
func (f *Fs) updateRegionForBucket() error {
region, err := f.getBucketLocation()
if err != nil {
return errors.Wrap(err, "reading bucket location failed")
}
if aws.StringValue(f.c.Config.Endpoint) != "" {
return errors.Errorf("can't set region to %q as endpoint is set", region)
}
if aws.StringValue(f.c.Config.Region) == region {
return errors.Errorf("region is already %q - not updating", region)
}
// Make a new session with the new region
oldRegion := f.opt.Region
f.opt.Region = region
c, ses, err := s3Connection(&f.opt)
if err != nil {
return errors.Wrap(err, "creating new session failed")
}
f.c = c
f.ses = ses
fs.Logf(f, "Switched region to %q from %q", region, oldRegion)
return nil
}
// listFn is called from list to handle an object.
type listFn func(remote string, object *s3.Object, isDirectory bool) error
// list the objects into the function supplied
//
// dir is the starting directory, "" for root
//
// Set recurse to read sub directories
func (f *Fs) list(dir string, recurse bool, fn listFn) error {
root := f.root
if dir != "" {
root += dir + "/"
}
maxKeys := int64(listChunkSize)
delimiter := ""
if !recurse {
delimiter = "/"
}
var marker *string
for {
// FIXME need to implement ALL loop
req := s3.ListObjectsInput{
Bucket: &f.bucket,
Delimiter: &delimiter,
Prefix: &root,
MaxKeys: &maxKeys,
Marker: marker,
}
var resp *s3.ListObjectsOutput
var err error
err = f.pacer.Call(func() (bool, error) {
resp, err = f.c.ListObjects(&req)
return f.shouldRetry(err)
})
if err != nil {
if awsErr, ok := err.(awserr.RequestFailure); ok {
if awsErr.StatusCode() == http.StatusNotFound {
err = fs.ErrorDirNotFound
}
}
return err
}
rootLength := len(f.root)
if !recurse {
for _, commonPrefix := range resp.CommonPrefixes {
if commonPrefix.Prefix == nil {
fs.Logf(f, "Nil common prefix received")
continue
}
remote := *commonPrefix.Prefix
if !strings.HasPrefix(remote, f.root) {
fs.Logf(f, "Odd name received %q", remote)
continue
}
remote = remote[rootLength:]
if strings.HasSuffix(remote, "/") {
remote = remote[:len(remote)-1]
}
err = fn(remote, &s3.Object{Key: &remote}, true)
if err != nil {
return err
}
}
}
for _, object := range resp.Contents {
key := aws.StringValue(object.Key)
if !strings.HasPrefix(key, f.root) {
fs.Logf(f, "Odd name received %q", key)
continue
}
remote := key[rootLength:]
// is this a directory marker?
if (strings.HasSuffix(remote, "/") || remote == "") && *object.Size == 0 {
if recurse && remote != "" {
// add a directory in if --fast-list since will have no prefixes
remote = remote[:len(remote)-1]
err = fn(remote, &s3.Object{Key: &remote}, true)
if err != nil {
return err
}
}
continue // skip directory marker
}
err = fn(remote, object, false)
if err != nil {
return err
}
}
if !aws.BoolValue(resp.IsTruncated) {
break
}
// Use NextMarker if set, otherwise use last Key
if resp.NextMarker == nil || *resp.NextMarker == "" {
if len(resp.Contents) == 0 {
return errors.New("s3 protocol error: received listing with IsTruncated set, no NextMarker and no Contents")
}
marker = resp.Contents[len(resp.Contents)-1].Key
} else {
marker = resp.NextMarker
}
}
return nil
}
// Convert a list item into a DirEntry
func (f *Fs) itemToDirEntry(remote string, object *s3.Object, isDirectory bool) (fs.DirEntry, error) {
if isDirectory {
size := int64(0)
if object.Size != nil {
size = *object.Size
}
d := fs.NewDir(remote, time.Time{}).SetSize(size)
return d, nil
}
o, err := f.newObjectWithInfo(remote, object)
if err != nil {
return nil, err
}
return o, nil
}
// mark the bucket as being OK
func (f *Fs) markBucketOK() {
if f.bucket != "" {
f.bucketOKMu.Lock()
f.bucketOK = true
f.bucketDeleted = false
f.bucketOKMu.Unlock()
}
}
// listDir lists files and directories to out
func (f *Fs) listDir(dir string) (entries fs.DirEntries, err error) {
// List the objects and directories
err = f.list(dir, false, func(remote string, object *s3.Object, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil {
return err
}
if entry != nil {
entries = append(entries, entry)
}
return nil
})
if err != nil {
return nil, err
}
// bucket must be present if listing succeeded
f.markBucketOK()
return entries, nil
}
// listBuckets lists the buckets to out
func (f *Fs) listBuckets(dir string) (entries fs.DirEntries, err error) {
if dir != "" {
return nil, fs.ErrorListBucketRequired
}
req := s3.ListBucketsInput{}
var resp *s3.ListBucketsOutput
err = f.pacer.Call(func() (bool, error) {
resp, err = f.c.ListBuckets(&req)
return f.shouldRetry(err)
})
if err != nil {
return nil, err
}
for _, bucket := range resp.Buckets {
d := fs.NewDir(aws.StringValue(bucket.Name), aws.TimeValue(bucket.CreationDate))
entries = append(entries, d)
}
return entries, nil
}
// List the objects and directories in dir into entries. The
// entries can be returned in any order but should be for a
// complete directory.
//
// dir should be "" to list the root, and should not have
// trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
func (f *Fs) List(dir string) (entries fs.DirEntries, err error) {
if f.bucket == "" {
return f.listBuckets(dir)
}
return f.listDir(dir)
}
// ListR lists the objects and directories of the Fs starting
// from dir recursively into out.
//
// dir should be "" to start from the root, and should not
// have trailing slashes.
//
// This should return ErrDirNotFound if the directory isn't
// found.
//
// It should call callback for each tranche of entries read.
// These need not be returned in any particular order. If
// callback returns an error then the listing will stop
// immediately.
//
// Don't implement this unless you have a more efficient way
// of listing recursively that doing a directory traversal.
func (f *Fs) ListR(dir string, callback fs.ListRCallback) (err error) {
if f.bucket == "" {
return fs.ErrorListBucketRequired
}
list := walk.NewListRHelper(callback)
err = f.list(dir, true, func(remote string, object *s3.Object, isDirectory bool) error {
entry, err := f.itemToDirEntry(remote, object, isDirectory)
if err != nil {
return err
}
return list.Add(entry)
})
if err != nil {
return err
}
// bucket must be present if listing succeeded
f.markBucketOK()
return list.Flush()
}
// Put the Object into the bucket
func (f *Fs) Put(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
// Temporary Object under construction
fs := &Object{
fs: f,
remote: src.Remote(),
}
return fs, fs.Update(in, src, options...)
}
// PutStream uploads to the remote path with the modTime given of indeterminate size
func (f *Fs) PutStream(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) (fs.Object, error) {
return f.Put(in, src, options...)
}
// Check if the bucket exists
//
// NB this can return incorrect results if called immediately after bucket deletion
func (f *Fs) dirExists() (bool, error) {
req := s3.HeadBucketInput{
Bucket: &f.bucket,
}
err := f.pacer.Call(func() (bool, error) {
_, err := f.c.HeadBucket(&req)
return f.shouldRetry(err)
})
if err == nil {
return true, nil
}
if err, ok := err.(awserr.RequestFailure); ok {
if err.StatusCode() == http.StatusNotFound {
return false, nil
}
}
return false, err
}
// Mkdir creates the bucket if it doesn't exist
func (f *Fs) Mkdir(dir string) error {
f.bucketOKMu.Lock()
defer f.bucketOKMu.Unlock()
if f.bucketOK {
return nil
}
if !f.bucketDeleted {
exists, err := f.dirExists()
if err == nil {
f.bucketOK = exists
}
if err != nil || exists {
return err
}
}
req := s3.CreateBucketInput{
Bucket: &f.bucket,
ACL: &f.opt.BucketACL,
}
if f.opt.LocationConstraint != "" {
req.CreateBucketConfiguration = &s3.CreateBucketConfiguration{
LocationConstraint: &f.opt.LocationConstraint,
}
}
err := f.pacer.Call(func() (bool, error) {
_, err := f.c.CreateBucket(&req)
return f.shouldRetry(err)
})
if err, ok := err.(awserr.Error); ok {
if err.Code() == "BucketAlreadyOwnedByYou" {
err = nil
}
}
if err == nil {
f.bucketOK = true
f.bucketDeleted = false
fs.Infof(f, "Bucket created with ACL %q", *req.ACL)
}
return err
}
// Rmdir deletes the bucket if the fs is at the root
//
// Returns an error if it isn't empty
func (f *Fs) Rmdir(dir string) error {
f.bucketOKMu.Lock()
defer f.bucketOKMu.Unlock()
if f.root != "" || dir != "" {
return nil
}
req := s3.DeleteBucketInput{
Bucket: &f.bucket,
}
err := f.pacer.Call(func() (bool, error) {
_, err := f.c.DeleteBucket(&req)
return f.shouldRetry(err)
})
if err == nil {
f.bucketOK = false
f.bucketDeleted = true
fs.Infof(f, "Bucket deleted")
}
return err
}
// Precision of the remote
func (f *Fs) Precision() time.Duration {
return time.Nanosecond
}
// pathEscape escapes s as for a URL path. It uses rest.URLPathEscape
// but also escapes '+' for S3 and Digital Ocean spaces compatibility
func pathEscape(s string) string {
return strings.Replace(rest.URLPathEscape(s), "+", "%2B", -1)
}
// Copy src to this remote using server side copy operations.
//
// This is stored with the remote path given
//
// It returns the destination Object and a possible error
//
// Will only be called if src.Fs().Name() == f.Name()
//
// If it isn't possible then return fs.ErrorCantCopy
func (f *Fs) Copy(src fs.Object, remote string) (fs.Object, error) {
err := f.Mkdir("")
if err != nil {
return nil, err
}
srcObj, ok := src.(*Object)
if !ok {
fs.Debugf(src, "Can't copy - not same remote type")
return nil, fs.ErrorCantCopy
}
srcFs := srcObj.fs
key := f.root + remote
source := pathEscape(srcFs.bucket + "/" + srcFs.root + srcObj.remote)
req := s3.CopyObjectInput{
Bucket: &f.bucket,
ACL: &f.opt.ACL,
Key: &key,
CopySource: &source,
MetadataDirective: aws.String(s3.MetadataDirectiveCopy),
}
if f.opt.ServerSideEncryption != "" {
req.ServerSideEncryption = &f.opt.ServerSideEncryption
}
if f.opt.SSEKMSKeyID != "" {
req.SSEKMSKeyId = &f.opt.SSEKMSKeyID
}
if f.opt.StorageClass != "" {
req.StorageClass = &f.opt.StorageClass
}
err = f.pacer.Call(func() (bool, error) {
_, err = f.c.CopyObject(&req)
return f.shouldRetry(err)
})
if err != nil {
return nil, err
}
return f.NewObject(remote)
}
// Hashes returns the supported hash sets.
func (f *Fs) Hashes() hash.Set {
return hash.Set(hash.MD5)
}
// ------------------------------------------------------------
// Fs returns the parent Fs
func (o *Object) Fs() fs.Info {
return o.fs
}
// Return a string version
func (o *Object) String() string {
if o == nil {
return "<nil>"
}
return o.remote
}
// Remote returns the remote path
func (o *Object) Remote() string {
return o.remote
}
var matchMd5 = regexp.MustCompile(`^[0-9a-f]{32}$`)
// Hash returns the Md5sum of an object returning a lowercase hex string
func (o *Object) Hash(t hash.Type) (string, error) {
if t != hash.MD5 {
return "", hash.ErrUnsupported
}
hash := strings.Trim(strings.ToLower(o.etag), `"`)
// Check the etag is a valid md5sum
if !matchMd5.MatchString(hash) {
err := o.readMetaData()
if err != nil {
return "", err
}
if md5sum, ok := o.meta[metaMD5Hash]; ok {
md5sumBytes, err := base64.StdEncoding.DecodeString(*md5sum)
if err != nil {
return "", err
}
hash = hex.EncodeToString(md5sumBytes)
} else {
hash = ""
}
}
return hash, nil
}
// Size returns the size of an object in bytes
func (o *Object) Size() int64 {
return o.bytes
}
// readMetaData gets the metadata if it hasn't already been fetched
//
// it also sets the info
func (o *Object) readMetaData() (err error) {
if o.meta != nil {
return nil
}
key := o.fs.root + o.remote
req := s3.HeadObjectInput{
Bucket: &o.fs.bucket,
Key: &key,
}
var resp *s3.HeadObjectOutput
err = o.fs.pacer.Call(func() (bool, error) {
var err error
resp, err = o.fs.c.HeadObject(&req)
return o.fs.shouldRetry(err)
})
if err != nil {
if awsErr, ok := err.(awserr.RequestFailure); ok {
if awsErr.StatusCode() == http.StatusNotFound {
return fs.ErrorObjectNotFound
}
}
return err
}
var size int64
// Ignore missing Content-Length assuming it is 0
// Some versions of ceph do this due their apache proxies
if resp.ContentLength != nil {
size = *resp.ContentLength
}
o.etag = aws.StringValue(resp.ETag)
o.bytes = size
o.meta = resp.Metadata
if resp.LastModified == nil {
fs.Logf(o, "Failed to read last modified from HEAD: %v", err)
o.lastModified = time.Now()
} else {
o.lastModified = *resp.LastModified
}
o.mimeType = aws.StringValue(resp.ContentType)
return nil
}
// ModTime returns the modification time of the object
//
// It attempts to read the objects mtime and if that isn't present the
// LastModified returned in the http headers
func (o *Object) ModTime() time.Time {
if fs.Config.UseServerModTime {
return o.lastModified
}
err := o.readMetaData()
if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err)
return time.Now()
}
// read mtime out of metadata if available
d, ok := o.meta[metaMtime]
if !ok || d == nil {
// fs.Debugf(o, "No metadata")
return o.lastModified
}
modTime, err := swift.FloatStringToTime(*d)
if err != nil {
fs.Logf(o, "Failed to read mtime from object: %v", err)
return o.lastModified
}
return modTime
}
// SetModTime sets the modification time of the local fs object
func (o *Object) SetModTime(modTime time.Time) error {
err := o.readMetaData()
if err != nil {
return err
}
o.meta[metaMtime] = aws.String(swift.TimeToFloatString(modTime))
if o.bytes >= maxSizeForCopy {
fs.Debugf(o, "SetModTime is unsupported for objects bigger than %v bytes", fs.SizeSuffix(maxSizeForCopy))
return nil
}
// Guess the content type
mimeType := fs.MimeType(o)
// Copy the object to itself to update the metadata
key := o.fs.root + o.remote
sourceKey := o.fs.bucket + "/" + key
directive := s3.MetadataDirectiveReplace // replace metadata with that passed in
req := s3.CopyObjectInput{
Bucket: &o.fs.bucket,
ACL: &o.fs.opt.ACL,
Key: &key,
ContentType: &mimeType,
CopySource: aws.String(pathEscape(sourceKey)),
Metadata: o.meta,
MetadataDirective: &directive,
}
if o.fs.opt.ServerSideEncryption != "" {
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
}
if o.fs.opt.SSEKMSKeyID != "" {
req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
}
if o.fs.opt.StorageClass != "" {
req.StorageClass = &o.fs.opt.StorageClass
}
err = o.fs.pacer.Call(func() (bool, error) {
_, err := o.fs.c.CopyObject(&req)
return o.fs.shouldRetry(err)
})
return err
}
// Storable raturns a boolean indicating if this object is storable
func (o *Object) Storable() bool {
return true
}
// Open an object for read
func (o *Object) Open(options ...fs.OpenOption) (in io.ReadCloser, err error) {
key := o.fs.root + o.remote
req := s3.GetObjectInput{
Bucket: &o.fs.bucket,
Key: &key,
}
for _, option := range options {
switch option.(type) {
case *fs.RangeOption, *fs.SeekOption:
_, value := option.Header()
req.Range = &value
default:
if option.Mandatory() {
fs.Logf(o, "Unsupported mandatory option: %v", option)
}
}
}
var resp *s3.GetObjectOutput
err = o.fs.pacer.Call(func() (bool, error) {
var err error
resp, err = o.fs.c.GetObject(&req)
return o.fs.shouldRetry(err)
})
if err, ok := err.(awserr.RequestFailure); ok {
if err.Code() == "InvalidObjectState" {
return nil, errors.Errorf("Object in GLACIER, restore first: %v", key)
}
}
if err != nil {
return nil, err
}
return resp.Body, nil
}
// Update the Object from in with modTime and size
func (o *Object) Update(in io.Reader, src fs.ObjectInfo, options ...fs.OpenOption) error {
err := o.fs.Mkdir("")
if err != nil {
return err
}
modTime := src.ModTime()
size := src.Size()
multipart := size < 0 || size >= int64(o.fs.opt.UploadCutoff)
var uploader *s3manager.Uploader
if multipart {
uploader = s3manager.NewUploader(o.fs.ses, func(u *s3manager.Uploader) {
u.Concurrency = o.fs.opt.UploadConcurrency
u.LeavePartsOnError = false
u.S3 = o.fs.c
u.PartSize = int64(o.fs.opt.ChunkSize)
if size == -1 {
// Make parts as small as possible while still being able to upload to the
// S3 file size limit. Rounded up to nearest MB.
u.PartSize = (((maxFileSize / s3manager.MaxUploadParts) >> 20) + 1) << 20
return
}
// Adjust PartSize until the number of parts is small enough.
if size/u.PartSize >= s3manager.MaxUploadParts {
// Calculate partition size rounded up to the nearest MB
u.PartSize = (((size / s3manager.MaxUploadParts) >> 20) + 1) << 20
}
})
}
// Set the mtime in the meta data
metadata := map[string]*string{
metaMtime: aws.String(swift.TimeToFloatString(modTime)),
}
// read the md5sum if available for non multpart and if
// disable checksum isn't present.
var md5sum string
if !multipart || !o.fs.opt.DisableChecksum {
hash, err := src.Hash(hash.MD5)
if err == nil && matchMd5.MatchString(hash) {
hashBytes, err := hex.DecodeString(hash)
if err == nil {
md5sum = base64.StdEncoding.EncodeToString(hashBytes)
if multipart {
metadata[metaMD5Hash] = &md5sum
}
}
}
}
// Guess the content type
mimeType := fs.MimeType(src)
key := o.fs.root + o.remote
if multipart {
req := s3manager.UploadInput{
Bucket: &o.fs.bucket,
ACL: &o.fs.opt.ACL,
Key: &key,
Body: in,
ContentType: &mimeType,
Metadata: metadata,
//ContentLength: &size,
}
if o.fs.opt.ServerSideEncryption != "" {
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
}
if o.fs.opt.SSEKMSKeyID != "" {
req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
}
if o.fs.opt.StorageClass != "" {
req.StorageClass = &o.fs.opt.StorageClass
}
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
_, err = uploader.Upload(&req)
return o.fs.shouldRetry(err)
})
if err != nil {
return err
}
} else {
req := s3.PutObjectInput{
Bucket: &o.fs.bucket,
ACL: &o.fs.opt.ACL,
Key: &key,
ContentType: &mimeType,
Metadata: metadata,
}
if md5sum != "" {
req.ContentMD5 = &md5sum
}
if o.fs.opt.ServerSideEncryption != "" {
req.ServerSideEncryption = &o.fs.opt.ServerSideEncryption
}
if o.fs.opt.SSEKMSKeyID != "" {
req.SSEKMSKeyId = &o.fs.opt.SSEKMSKeyID
}
if o.fs.opt.StorageClass != "" {
req.StorageClass = &o.fs.opt.StorageClass
}
// Create the request
putObj, _ := o.fs.c.PutObjectRequest(&req)
// Sign it so we can upload using a presigned request.
//
// Note the SDK doesn't currently support streaming to
// PutObject so we'll use this work-around.
url, headers, err := putObj.PresignRequest(15 * time.Minute)
if err != nil {
return errors.Wrap(err, "s3 upload: sign request")
}
// Set request to nil if empty so as not to make chunked encoding
if size == 0 {
in = nil
}
// create the vanilla http request
httpReq, err := http.NewRequest("PUT", url, in)
if err != nil {
return errors.Wrap(err, "s3 upload: new request")
}
// set the headers we signed and the length
httpReq.Header = headers
httpReq.ContentLength = size
err = o.fs.pacer.CallNoRetry(func() (bool, error) {
resp, err := o.fs.srv.Do(httpReq)
if err != nil {
return o.fs.shouldRetry(err)
}
body, err := rest.ReadBody(resp)
if err != nil {
return o.fs.shouldRetry(err)
}
if resp.StatusCode >= 200 && resp.StatusCode < 299 {
return false, nil
}
err = errors.Errorf("s3 upload: %s: %s", resp.Status, body)
return fserrors.ShouldRetryHTTP(resp, retryErrorCodes), err
})
if err != nil {
return err
}
}
// Read the metadata from the newly created object
o.meta = nil // wipe old metadata
err = o.readMetaData()
return err
}
// Remove an object
func (o *Object) Remove() error {
key := o.fs.root + o.remote
req := s3.DeleteObjectInput{
Bucket: &o.fs.bucket,
Key: &key,
}
err := o.fs.pacer.Call(func() (bool, error) {
_, err := o.fs.c.DeleteObject(&req)
return o.fs.shouldRetry(err)
})
return err
}
// MimeType of an Object if known, "" otherwise
func (o *Object) MimeType() string {
err := o.readMetaData()
if err != nil {
fs.Logf(o, "Failed to read metadata: %v", err)
return ""
}
return o.mimeType
}
// Check the interfaces are satisfied
var (
_ fs.Fs = &Fs{}
_ fs.Copier = &Fs{}
_ fs.PutStreamer = &Fs{}
_ fs.ListRer = &Fs{}
_ fs.Object = &Object{}
_ fs.MimeTyper = &Object{}
)
|
{
parts := matcher.FindStringSubmatch(path)
if parts == nil {
err = errors.Errorf("couldn't parse bucket out of s3 path %q", path)
} else {
bucket, directory = parts[1], parts[2]
directory = strings.Trim(directory, "/")
}
return
}
|
0001_initial.py
|
# Generated by Django 2.2.6 on 2019-10-13 23:29
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
|
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Core_sample',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('global_id', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('name', models.CharField(max_length=50, verbose_name='Название')),
('deposit', models.PositiveIntegerField(verbose_name='Месторождение')),
('hole', models.PositiveIntegerField(verbose_name='Скважина')),
('top', models.FloatField(verbose_name='Вверх')),
('bottom', models.FloatField(verbose_name='Низ')),
('status', models.IntegerField(choices=[(1, 'notAnalysed'), (2, 'analysed'), (3, 'inProcess'), (4, 'error')], default=1, verbose_name='Статус')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Пользователь')),
],
options={
'verbose_name': 'Керн',
'verbose_name_plural': 'Керны',
},
),
migrations.CreateModel(
name='Fragment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('dl_src', models.FilePathField(verbose_name='ДС изображение')),
('uv_src', models.FilePathField(verbose_name='УФ изображение')),
('top', models.FloatField(verbose_name='Вверх')),
('bottom', models.FloatField(verbose_name='Низ')),
('cs', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core_sample.Core_sample', verbose_name='Керн')),
],
options={
'verbose_name': 'Фрагмент керна',
'verbose_name_plural': 'Фрагменты керна',
},
),
]
|
|
hasher.rs
|
use mina_hasher::{Fp, Hashable, Hasher, ROInput};
use o1_utils::FieldHelpers;
use serde::Deserialize;
use std::fs::File;
use std::path::PathBuf;
//
// Helpers for test vectors
//
#[derive(Debug, Deserialize)]
struct TestVectors {
test_vectors: Vec<TestVector>,
}
#[derive(Clone, Debug, Deserialize)]
struct TestVector {
input: Vec<String>,
output: String,
}
impl Hashable for TestVector {
type D = ();
fn to_roinput(&self) -> ROInput {
let mut roi = ROInput::new();
// For hashing we only care about the input part
for input in &self.input {
roi.append_field(Fp::from_hex(input).expect("failed to deserialize field element"));
}
roi
}
fn domain_string(_: Option<&Self>, _: Self::D) -> Option<String> {
None
}
}
fn test_vectors(test_vector_file: &str, hasher: &mut dyn Hasher<TestVector>) {
// read test vectors from given file
let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
path.push("../oracle/tests/test_vectors");
path.push(&test_vector_file);
let file = File::open(&path).expect("couldn't open test vector file");
let test_vectors: TestVectors =
serde_json::from_reader(file).expect("couldn't deserialize test vector file");
// execute test vectors
|
for test_vector in test_vectors.test_vectors {
let expected_output =
Fp::from_hex(&test_vector.output).expect("failed to deserialize field element");
// hash & check against expect output
let output = hasher.hash(&test_vector);
assert_eq!(output, expected_output);
}
}
//
// Tests
//
#[test]
fn hasher_test_vectors_legacy() {
let mut hasher = mina_hasher::create_legacy::<TestVector>(());
test_vectors("legacy.json", &mut hasher);
}
#[test]
fn hasher_test_vectors_kimchi() {
let mut hasher = mina_hasher::create_kimchi::<TestVector>(());
test_vectors("kimchi.json", &mut hasher);
}
| |
grdfilter.py
|
"""
grdfilter - Filter a grid in the space (or time) domain.
"""
from pygmt.clib import Session
from pygmt.helpers import (
GMTTempFile,
build_arg_string,
fmt_docstring,
kwargs_to_strings,
use_alias,
)
from pygmt.io import load_dataarray
|
D="distance",
F="filter",
G="outgrid",
I="spacing",
N="nans",
R="region",
T="toggle",
V="verbose",
f="coltypes",
r="registration",
)
@kwargs_to_strings(I="sequence", R="sequence")
def grdfilter(grid, **kwargs):
r"""
Filter a grid in the space (or time) domain.
Filter a grid file in the time domain using one of the selected convolution
or non-convolution isotropic or rectangular filters and compute distances
using Cartesian or Spherical geometries. The output grid file can
optionally be generated as a sub-region of the input (via ``region``)
and/or with new increment (via ``spacing``) or registration
(via ``toggle``). In this way, one may have "extra space" in the input
data so that the edges will not be used and the output can be within one
half-width of the input edges. If the filter is low-pass, then the output
may be less frequently sampled than the input.
Full option list at :gmt-docs:`grdfilter.html`
{aliases}
Parameters
----------
grid : str or xarray.DataArray
The file name of the input grid or the grid loaded as a DataArray.
outgrid : str or None
The name of the output netCDF file with extension .nc to store the grid
in.
filter : str
**b**\|\ **c**\|\ **g**\|\ **o**\|\ **m**\|\ **p**\|\ **h**\ *xwidth*\
[/*width2*\][*modifiers*].
Name of filter type you which to apply, followed by the width:
b: Box Car
c: Cosine Arch
g: Gaussian
o: Operator
m: Median
p: Maximum Likelihood probability
h: histogram
distance : str
Distance *flag* tells how grid (x,y) relates to filter width as
follows:
p: grid (px,py) with *width* an odd number of pixels; Cartesian
distances.
0: grid (x,y) same units as *width*, Cartesian distances.
1: grid (x,y) in degrees, *width* in kilometers, Cartesian distances.
2: grid (x,y) in degrees, *width* in km, dx scaled by cos(middle y),
Cartesian distances.
The above options are fastest because they allow weight matrix to be
computed only once. The next three options are slower because they
recompute weights for each latitude.
3: grid (x,y) in degrees, *width* in km, dx scaled by cosine(y),
Cartesian distance calculation.
4: grid (x,y) in degrees, *width* in km, Spherical distance
calculation.
5: grid (x,y) in Mercator ``projection='m1'`` img units, *width* in km,
Spherical distance calculation.
{I}
nans : str or float
**i**\|\ **p**\|\ **r**.
Determine how NaN-values in the input grid affects the filtered output.
{R}
toggle : bool
Toggle the node registration for the output grid so as to become the
opposite of the input grid. [Default gives the same registration as the
input grid].
{V}
{f}
{r}
Returns
-------
ret: xarray.DataArray or None
Return type depends on whether the ``outgrid`` parameter is set:
- :class:`xarray.DataArray` if ``outgrid`` is not set
- None if ``outgrid`` is set (grid output will be stored in file set by
``outgrid``)
Example
-------
>>> import os
>>> import pygmt
>>> # Apply a filter of 600km (full width) to the @earth_relief_30m file
>>> # and return a filtered field (saved as netcdf)
>>> pygmt.grdfilter(
... grid="@earth_relief_30m",
... filter="m600",
... distance="4",
... region=[150, 250, 10, 40],
... spacing=0.5,
... outgrid="filtered_pacific.nc",
... )
>>> os.remove("filtered_pacific.nc") # cleanup file
>>> # Apply a gaussian smoothing filter of 600 km in the input data array,
>>> # and returns a filtered data array with the smoothed field.
>>> grid = pygmt.datasets.load_earth_relief()
>>> smooth_field = pygmt.grdfilter(grid=grid, filter="g600", distance="4")
"""
with GMTTempFile(suffix=".nc") as tmpfile:
with Session() as lib:
file_context = lib.virtualfile_from_data(check_kind="raster", data=grid)
with file_context as infile:
if (outgrid := kwargs.get("G")) is None:
kwargs["G"] = outgrid = tmpfile.name # output to tmpfile
lib.call_module("grdfilter", build_arg_string(kwargs, infile=infile))
return load_dataarray(outgrid) if outgrid == tmpfile.name else None
|
@fmt_docstring
@use_alias(
|
serializers.py
|
from __future__ import unicode_literals
import ast
from collections import OrderedDict
import json
import logging
from django.contrib.auth import get_user_model
from rest_framework import fields, serializers
from rest_framework_bulk import BulkSerializerMixin, BulkListSerializer
from . import auth
from .. import exc, models, validators
from ..util import get_field_attr
log = logging.getLogger(__name__)
###############
# Custom Fields
###############
class JSONDataField(fields.Field):
"""
Base field used to represent attributes as JSON <-> ``field_type``.
It is an error if ``field_type`` is not defined in a subclass.
"""
field_type = None
def to_representation(self, value):
return value
def to_internal_value(self, data):
log.debug('JSONDictField.to_internal_value() data = %r', data)
if self.field_type is None:
raise NotImplementedError(
'You must subclass JSONDataField and define field_type'
)
if not data:
data = self.field_type()
if isinstance(data, self.field_type):
return data
# Try it as a regular JSON object
try:
return json.loads(data)
except ValueError:
# Or try it as a Python object
try:
return ast.literal_eval(data)
except (SyntaxError, ValueError) as err:
raise exc.ValidationError(err)
except Exception as err:
raise exc.ValidationError(err)
return data
class JSONDictField(JSONDataField):
"""Field used to represent attributes as JSON <-> Dict."""
field_type = dict
class JSONListField(JSONDataField):
"""Field used to represent attributes as JSON <-> List."""
field_type = list
class MACAddressField(fields.Field):
"""Field used to validate MAC address objects as integer or string."""
def to_representation(self, value):
return value
def to_internal_value(self, value):
return validators.validate_mac_address(value)
###################
# Base Serializer #
###################
class NsotSerializer(serializers.ModelSerializer):
"""Base serializer that logs change events."""
def to_internal_value(self, data):
"""Inject site_pk from view's kwargs if it's not already in data."""
kwargs = self.context['view'].kwargs
log.debug(
'NsotSerializer.to_internal_value() data [before] = %r', data
)
if 'site_id' not in data and 'site_pk' in kwargs:
data['site_id'] = kwargs['site_pk']
log.debug('NsotSerializer.to_internal_value() data [after] = %r', data)
return super(NsotSerializer, self).to_internal_value(data)
def to_representation(self, obj):
"""Always return the dict representation."""
if isinstance(obj, OrderedDict):
return obj
return obj.to_dict()
######
# User
######
class UserSerializer(serializers.ModelSerializer):
"""
UserProxy model serializer that takes optional `with_secret_key` argument
that controls whether the secret_key for the user should be displayed.
"""
def __init__(self, *args, **kwargs):
# Don't pass `with_secret_key` up to the superclass
self.with_secret_key = kwargs.pop('with_secret_key', None)
super(UserSerializer, self).__init__(*args, **kwargs)
# If we haven't passed `with_secret_key`, don't show the secret_key
# field.
if self.with_secret_key is None:
self.fields.pop('secret_key')
permissions = fields.ReadOnlyField(source='get_permissions')
class Meta:
model = get_user_model()
fields = ('id', 'email', 'permissions', 'secret_key')
######
# Site
######
class SiteSerializer(serializers.ModelSerializer):
class Meta:
model = models.Site
fields = '__all__'
#########
# Changes
#########
class ChangeSerializer(NsotSerializer):
"""Used for displaying Change events."""
class Meta:
model = models.Change
fields = '__all__'
###########
# Attribute
###########
class AttributeSerializer(NsotSerializer):
"""Used for GET, DELETE on Attributes."""
class Meta:
model = models.Attribute
fields = '__all__'
class AttributeCreateSerializer(AttributeSerializer):
"""Used for POST on Attributes."""
constraints = JSONDictField(
required=False,
label=get_field_attr(models.Attribute, 'constraints', 'verbose_name'),
help_text=get_field_attr(models.Attribute, 'constraints', 'help_text')
)
site_id = fields.IntegerField(
label=get_field_attr(models.Attribute, 'site', 'verbose_name'),
help_text=get_field_attr(models.Attribute, 'site', 'help_text')
)
class Meta:
model = models.Attribute
fields = ('name', 'description', 'resource_name', 'required',
'display', 'multi', 'constraints', 'site_id')
class AttributeUpdateSerializer(BulkSerializerMixin,
AttributeCreateSerializer):
"""
Used for PUT, PATCH, on Attributes.
Currently because Attributes have only one required field (name), and it
may not be updated, there is not much functional difference between PUT and
PATCH.
"""
class Meta:
model = models.Attribute
list_serializer_class = BulkListSerializer
fields = ('id', 'description', 'required', 'display', 'multi',
'constraints')
#######
# Value
#######
class ValueSerializer(serializers.ModelSerializer):
"""Used for GET, DELETE on Values."""
class Meta:
model = models.Value
fields = ('id', 'name', 'value', 'attribute', 'resource_name',
'resource_id')
# Not sure if we want to view an attribute value w/ so much context just
# yet.
# def to_representation(self, obj):
# return obj.to_dict()
class ValueCreateSerializer(ValueSerializer):
"""Used for POST on Values."""
class Meta:
model = models.Value
read_only_fields = ('id', 'name', 'resource_name')
fields = ('id', 'name', 'value', 'attribute', 'resource_name',
'resource_id')
###########
# Resources
###########
class ResourceSerializer(NsotSerializer):
"""For any object that can have attributes."""
attributes = JSONDictField(
required=False,
help_text='Dictionary of attributes to set.'
)
def create(self, validated_data, commit=True):
"""Create that is aware of attributes."""
# Remove the related fields before we write the object
attributes = validated_data.pop('attributes', {})
# Save the base object to the database.
obj = super(ResourceSerializer, self).create(validated_data)
# Try to populate the related fields and if there are any validation
# problems, delete the object and re-raise the error. If not, save the
# changes.
try:
obj.set_attributes(attributes)
except exc.ValidationError:
obj.delete()
raise
else:
if commit:
obj.save()
return obj
def update(self, instance, validated_data, commit=True):
"""
Update that is aware of attributes.
This will not set attributes if they are not provided during a partial
update.
"""
# Remove related fields before we write the object
attributes = validated_data.pop('attributes', None)
# Save the object to the database.
obj = super(ResourceSerializer, self).update(
instance, validated_data
)
# If attributes have been provided, populate them and save the object,
# allowing any validation errors to raise before saving.
obj.set_attributes(attributes, partial=self.partial)
if commit:
obj.save()
return obj
########
# Device
########
class DeviceSerializer(ResourceSerializer):
"""Used for GET, DELETE on Devices."""
class Meta:
model = models.Device
fields = '__all__'
class DeviceCreateSerializer(DeviceSerializer):
"""Used for POST on Devices."""
site_id = fields.IntegerField(
label=get_field_attr(models.Device, 'site', 'verbose_name'),
help_text=get_field_attr(models.Device, 'site', 'help_text')
)
class Meta:
model = models.Device
fields = ('hostname', 'attributes', 'site_id')
class DeviceUpdateSerializer(BulkSerializerMixin, DeviceCreateSerializer):
"""Used for PUT on Devices."""
attributes = JSONDictField(
required=True,
help_text='Dictionary of attributes to set.'
)
class Meta:
model = models.Device
list_serializer_class = BulkListSerializer
fields = ('id', 'hostname', 'attributes')
class DevicePartialUpdateSerializer(BulkSerializerMixin,
DeviceCreateSerializer):
"""Used for PATCH on Devices."""
class Meta:
model = models.Device
list_serializer_class = BulkListSerializer
fields = ('id', 'hostname', 'attributes')
#########
# Network
#########
class NetworkSerializer(ResourceSerializer):
"""Used for GET, DELETE on Networks."""
class Meta:
model = models.Network
fields = '__all__'
class NetworkCreateSerializer(NetworkSerializer):
"""Used for POST on Networks."""
cidr = fields.CharField(
write_only=True, required=False, label='CIDR',
help_text=(
'IPv4/IPv6 CIDR address. If provided, this overrides the value of '
'network_address & prefix_length. If not provided, '
'network_address & prefix_length are required.'
)
)
network_address = fields.ModelField(
model_field=models.Network._meta.get_field('network_address'),
required=False,
label=get_field_attr(
models.Network, 'network_address', 'verbose_name'
),
help_text=get_field_attr(
models.Network, 'network_address', 'help_text'
),
)
|
help_text=get_field_attr(models.Network, 'prefix_length', 'help_text'),
)
site_id = fields.IntegerField(
label=get_field_attr(models.Network, 'site', 'verbose_name'),
help_text=get_field_attr(models.Network, 'site', 'help_text')
)
class Meta:
model = models.Network
fields = ('cidr', 'network_address', 'prefix_length', 'attributes',
'state', 'site_id')
class NetworkUpdateSerializer(BulkSerializerMixin, NetworkCreateSerializer):
"""Used for PUT on Networks."""
attributes = JSONDictField(
required=True,
help_text='Dictionary of attributes to set.'
)
class Meta:
model = models.Network
list_serializer_class = BulkListSerializer
fields = ('id', 'attributes', 'state')
class NetworkPartialUpdateSerializer(BulkSerializerMixin,
NetworkCreateSerializer):
"""Used for PATCH on Networks."""
class Meta:
model = models.Network
list_serializer_class = BulkListSerializer
fields = ('id', 'attributes', 'state')
###########
# Interface
###########
class InterfaceSerializer(ResourceSerializer):
"""Used for GET, DELETE on Interfaces."""
parent_id = fields.IntegerField(
required=False, allow_null=True,
label=get_field_attr(models.Interface, 'parent', 'verbose_name'),
help_text=get_field_attr(models.Interface, 'parent', 'help_text'),
)
class Meta:
model = models.Interface
fields = '__all__'
def create(self, validated_data):
log.debug('InterfaceCreateSerializer.create() validated_data = %r',
validated_data)
# Remove the related fields before we write the object
addresses = validated_data.pop('addresses', [])
# Create the base object to the database, but don't save attributes
# yet.
obj = super(InterfaceSerializer, self).create(
validated_data, commit=False
)
# Try to populate the related fields and if there are any validation
# problems, delete the object and re-raise the error. If not, save the
# changes.
try:
obj.set_addresses(addresses)
except exc.ValidationError:
obj.delete()
raise
else:
obj.save()
return obj
def update(self, instance, validated_data):
log.debug('InterfaceUpdateSerializer.update() validated_data = %r',
validated_data)
# Remove related fields before we write the object. Attributes are
# handled by the parent.
addresses = validated_data.pop('addresses', None)
# Update the attributes in the database, but don't save them yet.
obj = super(InterfaceSerializer, self).update(
instance, validated_data, commit=False
)
# Assign the address objects to the Interface.
obj.set_addresses(addresses, overwrite=True, partial=self.partial)
obj.save()
return obj
class InterfaceCreateSerializer(InterfaceSerializer):
"""Used for POST on Interfaces."""
addresses = JSONListField(
required=False, help_text='List of host addresses to assign.'
)
mac_address = MACAddressField(
required=False, allow_null=True,
label=get_field_attr(models.Interface, 'mac_address', 'verbose_name'),
help_text=get_field_attr(models.Interface, 'mac_address', 'help_text'),
)
class Meta:
model = models.Interface
fields = ('device', 'name', 'description', 'type', 'mac_address',
'speed', 'parent_id', 'addresses', 'attributes')
class InterfaceUpdateSerializer(BulkSerializerMixin,
InterfaceCreateSerializer):
"Used for PUT on Interfaces."""
addresses = JSONListField(
required=True, help_text='List of host addresses to assign.'
)
attributes = JSONDictField(
required=True,
help_text='Dictionary of attributes to set.'
)
class Meta:
model = models.Interface
list_serializer_class = BulkListSerializer
fields = ('id', 'name', 'description', 'type', 'mac_address', 'speed',
'parent_id', 'addresses', 'attributes')
class InterfacePartialUpdateSerializer(BulkSerializerMixin,
InterfaceCreateSerializer):
"Used for PATCH on Interfaces."""
class Meta:
model = models.Interface
list_serializer_class = BulkListSerializer
fields = ('id', 'name', 'description', 'type', 'mac_address', 'speed',
'parent_id', 'addresses', 'attributes')
#########
# Circuit
#########
class CircuitSerializer(ResourceSerializer):
"""Used for GET, DELETE on Circuits"""
class Meta:
model = models.Circuit
fields = '__all__'
class CircuitCreateSerializer(CircuitSerializer):
"""Used for POST on Circuits."""
class Meta:
model = models.Circuit
# Display name and site are auto-generated, don't include them here
fields = ('endpoint_a', 'endpoint_z', 'name', 'attributes')
class CircuitUpdateSerializer(BulkSerializerMixin, CircuitCreateSerializer):
"""Used for PUT on Circuits."""
attributes = JSONDictField(
required=True, help_text='Dictionary of attributes to set.'
)
class Meta:
model = models.Circuit
list_serializer_class = BulkListSerializer
fields = ('id', 'endpoint_a', 'endpoint_z', 'name', 'attributes')
class CircuitPartialUpdateSerializer(BulkSerializerMixin,
CircuitCreateSerializer):
"""Used for PATCH on Circuits."""
class Meta:
model = models.Circuit
list_serializer_class = BulkListSerializer
fields = ('id', 'endpoint_a', 'endpoint_z', 'name', 'attributes')
###########
# AuthToken
###########
class AuthTokenSerializer(serializers.Serializer):
"""
AuthToken authentication serializer to validate username/secret_key inputs.
"""
email = serializers.CharField(help_text='Email address of the user.')
secret_key = serializers.CharField(
label='Secret Key', help_text='Secret key of the user.'
)
def validate(self, attrs):
email = attrs.get('email')
secret_key = attrs.get('secret_key')
if email and secret_key:
auth_func = auth.SecretKeyAuthentication().authenticate_credentials
user, secret_key = auth_func(email, secret_key)
if user:
if not user.is_active:
msg = 'User account is disabled.'
raise exc.ValidationError(msg)
attrs['user'] = user
return attrs
else:
msg = 'Unable to login with provided credentials.'
raise exc.ValidationError(msg)
else:
msg = 'Must include "email" and "secret_key"'
raise exc.ValidationError(msg)
|
prefix_length = fields.IntegerField(
required=False,
label=get_field_attr(models.Network, 'prefix_length', 'verbose_name'),
|
ex8.py
|
"""Demonstrates partial run when some input data not there.
"""
from remake import Remake, TaskRule
ex8 = Remake()
class CannotRun(TaskRule):
rule_inputs = {'in1': 'data/inputs/input_not_there.txt'}
rule_outputs = {'out': 'data/inputs/ex8_in1.txt'}
def
|
(self):
input_text = self.inputs['in1'].read_text()
self.outputs['out'].write_text(input_text + '\n')
class CanRun1(TaskRule):
rule_inputs = CannotRun.rule_outputs
rule_outputs = {'out1': 'data/outputs/ex8/out1.txt',
'out2': 'data/outputs/ex8/out2.txt'}
def rule_run(self):
for o in self.outputs.values():
o.write_text('out')
class CanRun2(TaskRule):
rule_inputs = {'in': 'data/outputs/ex8/out{i}.txt'}
rule_outputs = {'out1': 'data/outputs/ex8/out2.{i}.txt'}
var_matrix = {'i': [1, 2]}
def rule_run(self):
assert len(self.inputs) == len(self.outputs)
for i, o in zip(self.inputs.values(), self.outputs.values()):
o.write_text('\n'.join([f'f1 {line}' for line in i.read_text().split('\n')[:-1]]) + '\n')
if __name__ == '__main__':
ex8.finalize()
|
rule_run
|
TextUpdater.ts
|
module minerva.text {
export interface ITextUpdaterAssets extends ITextAssets {
fontFamily: string;
fontSize: number;
fontStretch: string;
fontStyle: string;
fontWeight: FontWeight;
textDecorations: TextDecorations;
language: string;
}
export class
|
{
assets: ITextUpdaterAssets = {
fontFamily: Font.DEFAULT_FAMILY,
fontSize: Font.DEFAULT_SIZE,
fontStretch: Font.DEFAULT_STRETCH,
fontStyle: Font.DEFAULT_STYLE,
fontWeight: Font.DEFAULT_WEIGHT,
textDecorations: TextDecorations.None,
language: "",
background: null,
selectionBackground: null,
foreground: null,
selectionForeground: null,
isUnderlined: false,
font: new Font(),
text: ""
};
private $$textlayout: ITextLayoutDef;
constructor () {
this.init();
}
init () {
this.setTextLayout();
}
/////// PREPARE TEXT LAYOUT
setTextLayout (tldef?: ITextLayoutDef): TextUpdater {
if (this.$$textlayout)
return this;
this.$$textlayout = tldef || new run.RunLayoutDef();
return this;
}
/////// TEXT LAYOUT
layout (docctx: IDocumentContext, docassets: IDocumentAssets): number {
this.$$textlayout.layout(docctx, docassets, this.assets);
return this.assets.text.length;
}
invalidateFont (): boolean {
var assets = this.assets;
return Font.mergeInto(assets.font, assets.fontFamily, assets.fontSize, assets.fontStretch, assets.fontStyle, assets.fontWeight);
}
}
}
|
TextUpdater
|
linked_queue_improved.py
|
from queue_interface import QueueInterface
from src.list.node import Node
class LinkedQueueImproved(QueueInterface):
""" implementation of a queue using a linked list """
def __init__(self):
|
def isEmpty(self):
""" check if the queue is empty """
return (self.length == 0)
def insert(self, cargo):
""" insert a new node a the end of the queue: O(1) """
node = Node(cargo)
node.next = None
if self.length == 0:
self.head = self.tail = node
else:
tail = self.tail
tail.next = node
self.tail = node
self.length = self.length + 1
def remove(self):
""" remove and return the node at the top of the queue: O(1) """
if self.isEmpty(): return
cargo = self.head.cargo
self.head = self.head.next
self.length = self.length - 1
if self.length == 0:
self.tail = None
return cargo
|
""" create an empty queue """
self.length = 0
self.head = None
self.tail = None
|
routing.py
|
from channels import route
from .consumers import ws_connect, ws_receive, ws_disconnect, chat_join, chat_leave, chat_send
websocket_routing = [
route("websocket.connect", ws_connect),
route("websocket.receive", ws_receive),
route("websocket.disconnect", ws_disconnect),
]
custom_routing = [
route("chat.receive", chat_join, command="^join$"),
route("chat.receive", chat_leave, command="^leave$"),
|
route("chat.receive", chat_send, command="^send$"),
]
|
|
reddit.go
|
package embedded
import (
"github.com/metamatex/metamate/asg/pkg/v0/asg/graph"
"github.com/metamatex/metamate/gen/v0/mql"
"github.com/metamatex/metamate/generic/pkg/v0/generic"
"github.com/metamatex/metamate/metamate/pkg/v0/types"
"github.com/metamatex/metamate/reddit-svc/pkg"
"github.com/metamatex/metamate/reddit-svc/pkg/communication"
"net/http"
)
func init()
|
{
handler[Reddit] = func(f generic.Factory, rn *graph.RootNode, c *http.Client, vSvc types.EmbeddedSvc) (h http.Handler, err error) {
client, err := communication.NewClient(communication.ClientOpts{Client: &http.Client{}, UserAgent: "mql"})
if err != nil {
return
}
svc := pkg.NewService(pkg.ServiceOpts{
Client: client,
})
h = mql.NewRedditServer(mql.RedditServerOpts{Service: svc})
return
}
}
|
|
summary_test.go
|
// Copyright 2014 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheus
import (
"math"
"math/rand"
"sort"
"sync"
"testing"
"testing/quick"
"time"
dto "github.com/coreos/etcd-starter/Godeps/_workspace/src/github.com/prometheus/client_model/go"
)
func benchmarkSummaryObserve(w int, b *testing.B) {
b.StopTimer()
wg := new(sync.WaitGroup)
wg.Add(w)
g := new(sync.WaitGroup)
g.Add(1)
s := NewSummary(SummaryOpts{})
for i := 0; i < w; i++ {
go func() {
g.Wait()
for i := 0; i < b.N; i++ {
s.Observe(float64(i))
}
wg.Done()
}()
}
b.StartTimer()
g.Done()
wg.Wait()
}
func BenchmarkSummaryObserve1(b *testing.B) {
benchmarkSummaryObserve(1, b)
}
func BenchmarkSummaryObserve2(b *testing.B) {
benchmarkSummaryObserve(2, b)
}
func BenchmarkSummaryObserve4(b *testing.B) {
benchmarkSummaryObserve(4, b)
}
func BenchmarkSummaryObserve8(b *testing.B) {
benchmarkSummaryObserve(8, b)
|
func benchmarkSummaryWrite(w int, b *testing.B) {
b.StopTimer()
wg := new(sync.WaitGroup)
wg.Add(w)
g := new(sync.WaitGroup)
g.Add(1)
s := NewSummary(SummaryOpts{})
for i := 0; i < 1000000; i++ {
s.Observe(float64(i))
}
for j := 0; j < w; j++ {
outs := make([]dto.Metric, b.N)
go func(o []dto.Metric) {
g.Wait()
for i := 0; i < b.N; i++ {
s.Write(&o[i])
}
wg.Done()
}(outs)
}
b.StartTimer()
g.Done()
wg.Wait()
}
func BenchmarkSummaryWrite1(b *testing.B) {
benchmarkSummaryWrite(1, b)
}
func BenchmarkSummaryWrite2(b *testing.B) {
benchmarkSummaryWrite(2, b)
}
func BenchmarkSummaryWrite4(b *testing.B) {
benchmarkSummaryWrite(4, b)
}
func BenchmarkSummaryWrite8(b *testing.B) {
benchmarkSummaryWrite(8, b)
}
func TestSummaryConcurrency(t *testing.T) {
rand.Seed(42)
it := func(n uint32) bool {
mutations := int(n%1e4 + 1e4)
concLevel := int(n%5 + 1)
total := mutations * concLevel
var start, end sync.WaitGroup
start.Add(1)
end.Add(concLevel)
sum := NewSummary(SummaryOpts{
Name: "test_summary",
Help: "helpless",
})
allVars := make([]float64, total)
var sampleSum float64
for i := 0; i < concLevel; i++ {
vals := make([]float64, mutations)
for j := 0; j < mutations; j++ {
v := rand.NormFloat64()
vals[j] = v
allVars[i*mutations+j] = v
sampleSum += v
}
go func(vals []float64) {
start.Wait()
for _, v := range vals {
sum.Observe(v)
}
end.Done()
}(vals)
}
sort.Float64s(allVars)
start.Done()
end.Wait()
m := &dto.Metric{}
sum.Write(m)
if got, want := int(*m.Summary.SampleCount), total; got != want {
t.Errorf("got sample count %d, want %d", got, want)
}
if got, want := *m.Summary.SampleSum, sampleSum; math.Abs((got-want)/want) > 0.001 {
t.Errorf("got sample sum %f, want %f", got, want)
}
objectives := make([]float64, 0, len(DefObjectives))
for qu := range DefObjectives {
objectives = append(objectives, qu)
}
sort.Float64s(objectives)
for i, wantQ := range objectives {
ε := DefObjectives[wantQ]
gotQ := *m.Summary.Quantile[i].Quantile
gotV := *m.Summary.Quantile[i].Value
min, max := getBounds(allVars, wantQ, ε)
if gotQ != wantQ {
t.Errorf("got quantile %f, want %f", gotQ, wantQ)
}
if gotV < min || gotV > max {
t.Errorf("got %f for quantile %f, want [%f,%f]", gotV, gotQ, min, max)
}
}
return true
}
if err := quick.Check(it, nil); err != nil {
t.Error(err)
}
}
func TestSummaryVecConcurrency(t *testing.T) {
rand.Seed(42)
objectives := make([]float64, 0, len(DefObjectives))
for qu := range DefObjectives {
objectives = append(objectives, qu)
}
sort.Float64s(objectives)
it := func(n uint32) bool {
mutations := int(n%1e4 + 1e4)
concLevel := int(n%7 + 1)
vecLength := int(n%3 + 1)
var start, end sync.WaitGroup
start.Add(1)
end.Add(concLevel)
sum := NewSummaryVec(
SummaryOpts{
Name: "test_summary",
Help: "helpless",
},
[]string{"label"},
)
allVars := make([][]float64, vecLength)
sampleSums := make([]float64, vecLength)
for i := 0; i < concLevel; i++ {
vals := make([]float64, mutations)
picks := make([]int, mutations)
for j := 0; j < mutations; j++ {
v := rand.NormFloat64()
vals[j] = v
pick := rand.Intn(vecLength)
picks[j] = pick
allVars[pick] = append(allVars[pick], v)
sampleSums[pick] += v
}
go func(vals []float64) {
start.Wait()
for i, v := range vals {
sum.WithLabelValues(string('A' + picks[i])).Observe(v)
}
end.Done()
}(vals)
}
for _, vars := range allVars {
sort.Float64s(vars)
}
start.Done()
end.Wait()
for i := 0; i < vecLength; i++ {
m := &dto.Metric{}
s := sum.WithLabelValues(string('A' + i))
s.Write(m)
if got, want := int(*m.Summary.SampleCount), len(allVars[i]); got != want {
t.Errorf("got sample count %d for label %c, want %d", got, 'A'+i, want)
}
if got, want := *m.Summary.SampleSum, sampleSums[i]; math.Abs((got-want)/want) > 0.001 {
t.Errorf("got sample sum %f for label %c, want %f", got, 'A'+i, want)
}
for j, wantQ := range objectives {
ε := DefObjectives[wantQ]
gotQ := *m.Summary.Quantile[j].Quantile
gotV := *m.Summary.Quantile[j].Value
min, max := getBounds(allVars[i], wantQ, ε)
if gotQ != wantQ {
t.Errorf("got quantile %f for label %c, want %f", gotQ, 'A'+i, wantQ)
}
if gotV < min || gotV > max {
t.Errorf("got %f for quantile %f for label %c, want [%f,%f]", gotV, gotQ, 'A'+i, min, max)
}
}
}
return true
}
if err := quick.Check(it, nil); err != nil {
t.Error(err)
}
}
func TestSummaryDecay(t *testing.T) {
sum := NewSummary(SummaryOpts{
Name: "test_summary",
Help: "helpless",
MaxAge: 100 * time.Millisecond,
Objectives: map[float64]float64{0.1: 0.001},
AgeBuckets: 10,
})
m := &dto.Metric{}
i := 0
tick := time.NewTicker(time.Millisecond)
for _ = range tick.C {
i++
sum.Observe(float64(i))
if i%10 == 0 {
sum.Write(m)
if got, want := *m.Summary.Quantile[0].Value, math.Max(float64(i)/10, float64(i-90)); math.Abs(got-want) > 20 {
t.Errorf("%d. got %f, want %f", i, got, want)
}
m.Reset()
}
if i >= 1000 {
break
}
}
tick.Stop()
}
func getBounds(vars []float64, q, ε float64) (min, max float64) {
// TODO: This currently tolerates an error of up to 2*ε. The error must
// be at most ε, but for some reason, it's sometimes slightly
// higher. That's a bug.
n := float64(len(vars))
lower := int((q - 2*ε) * n)
upper := int(math.Ceil((q + 2*ε) * n))
min = vars[0]
if lower > 1 {
min = vars[lower-1]
}
max = vars[len(vars)-1]
if upper < len(vars) {
max = vars[upper-1]
}
return
}
|
}
|
runfiles.go
|
// Copyright 2018 The Bazel Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bazel
import (
"bytes"
"errors"
"fmt"
"io/ioutil"
"os"
"path"
"path/filepath"
"runtime"
"sort"
"strings"
"sync"
)
const (
RUNFILES_MANIFEST_FILE = "RUNFILES_MANIFEST_FILE"
RUNFILES_DIR = "RUNFILES_DIR"
)
// Runfile returns an absolute path to the file named by "path", which
// should be a relative path from the workspace root to the file within
// the bazel workspace.
//
// Runfile may be called from tests invoked with 'bazel test' and
// binaries invoked with 'bazel run'. On Windows,
// only tests invoked with 'bazel test' are supported.
func Runfile(path string) (string, error) {
// Search in working directory
if _, err := os.Stat(path); err == nil {
return filepath.Abs(path)
}
if err := ensureRunfiles(); err != nil {
return "", err
}
// Search manifest if we have one.
if entry, ok := runfiles.index[path]; ok {
return entry.Path, nil
}
// Search the main workspace.
if runfiles.workspace != "" {
mainPath := filepath.Join(runfiles.dir, runfiles.workspace, path)
if _, err := os.Stat(mainPath); err == nil {
return mainPath, nil
}
}
// Search other workspaces.
for _, w := range runfiles.workspaces {
workPath := filepath.Join(runfiles.dir, w, path)
if _, err := os.Stat(workPath); err == nil {
return workPath, nil
}
}
return "", fmt.Errorf("Runfile %s: could not locate file", path)
}
// FindBinary returns an absolute path to the binary built from a go_binary
// rule in the given package with the given name. FindBinary is similar to
// Runfile, but it accounts for varying configurations and file extensions,
// which may cause the binary to have different paths on different platforms.
//
// FindBinary may be called from tests invoked with 'bazel test' and
// binaries invoked with 'bazel run'. On Windows,
// only tests invoked with 'bazel test' are supported.
func FindBinary(pkg, name string) (string, bool) {
if err := ensureRunfiles(); err != nil {
return "", false
}
// If we've gathered a list of runfiles, either by calling ListRunfiles or
// parsing the manifest on Windows, just use that instead of searching
// directories. Return the first match. The manifest on Windows may contain
// multiple entries for the same file.
if runfiles.list != nil {
if runtime.GOOS == "windows" {
name += ".exe"
}
for _, entry := range runfiles.list {
if path.Base(entry.ShortPath) != name {
continue
}
pkgDir := path.Dir(entry.ShortPath)
if pkgDir == "." {
pkgDir = ""
}
if pkgDir != pkg {
continue
}
return entry.Path, true
}
return "", false
}
dir, err := Runfile(pkg)
if err != nil {
return "", false
}
var found string
stopErr := errors.New("stop")
err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
return nil
}
base := filepath.Base(path)
stem := strings.TrimSuffix(base, ".exe")
if stem != name {
return nil
}
if runtime.GOOS != "windows" {
if st, err := os.Stat(path); err != nil {
return err
} else if st.Mode()&0111 == 0 {
return nil
}
}
if stem == name {
found = path
return stopErr
}
return nil
})
if err == stopErr {
return found, true
} else {
return "", false
}
}
// A RunfileEntry describes a runfile.
type RunfileEntry struct {
// Workspace is the bazel workspace the file came from. For example,
// this would be "io_bazel_rules_go" for a file in rules_go.
Workspace string
// ShortPath is a relative, slash-separated path from the workspace root
// to the file. For non-binary files, this may be passed to Runfile
// to locate a file.
ShortPath string
// Path is an absolute path to the file.
Path string
}
// ListRunfiles returns a list of available runfiles.
func
|
() ([]RunfileEntry, error) {
if err := ensureRunfiles(); err != nil {
return nil, err
}
if runfiles.list == nil && runfiles.dir != "" {
runfiles.listOnce.Do(func() {
var list []RunfileEntry
haveWorkspaces := strings.HasSuffix(runfiles.dir, ".runfiles") && runfiles.workspace != ""
err := filepath.Walk(runfiles.dir, func(path string, info os.FileInfo, err error) error {
if err != nil {
return err
}
rel, _ := filepath.Rel(runfiles.dir, path)
rel = filepath.ToSlash(rel)
if rel == "." {
return nil
}
var workspace, shortPath string
if haveWorkspaces {
if i := strings.IndexByte(rel, '/'); i < 0 {
return nil
} else {
workspace, shortPath = rel[:i], rel[i+1:]
}
} else {
workspace, shortPath = "", rel
}
list = append(list, RunfileEntry{Workspace: workspace, ShortPath: shortPath, Path: path})
return nil
})
if err != nil {
runfiles.err = err
return
}
runfiles.list = list
})
}
return runfiles.list, runfiles.err
}
// TestWorkspace returns the name of the Bazel workspace for this test.
// TestWorkspace returns an error if the TEST_WORKSPACE environment variable
// was not set or SetDefaultTestWorkspace was not called.
func TestWorkspace() (string, error) {
if err := ensureRunfiles(); err != nil {
return "", err
}
if runfiles.workspace != "" {
return runfiles.workspace, nil
}
return "", errors.New("TEST_WORKSPACE not set and SetDefaultTestWorkspace not called")
}
// SetDefaultTestWorkspace allows you to set a fake value for the
// environment variable TEST_WORKSPACE if it is not defined. This is useful
// when running tests on the command line and not through Bazel.
func SetDefaultTestWorkspace(w string) {
ensureRunfiles()
runfiles.workspace = w
}
// RunfilesPath return the path to the runfiles tree.
// It will return an error if there is no runfiles tree, for example because
// the executable is run on Windows or was not invoked with 'bazel test'
// or 'bazel run'.
func RunfilesPath() (string, error) {
if err := ensureRunfiles(); err != nil {
return "", err
}
if runfiles.dir == "" {
if runtime.GOOS == "windows" {
return "", errors.New("RunfilesPath: no runfiles directory on windows")
} else {
return "", errors.New("could not locate runfiles directory")
}
}
if runfiles.workspace == "" {
return "", errors.New("could not locate runfiles workspace")
}
return filepath.Join(runfiles.dir, runfiles.workspace), nil
}
// EnterRunfiles locates the directory under which a built binary can find its data dependencies
// using relative paths, and enters that directory.
//
// "workspace" indicates the name of the current project, "pkg" indicates the relative path to the
// build package that contains the binary target, "binary" indicates the basename of the binary
// searched for, and "cookie" indicates an arbitrary data file that we expect to find within the
// runfiles tree.
//
// DEPRECATED: use RunfilesPath instead.
func EnterRunfiles(workspace string, pkg string, binary string, cookie string) error {
runfiles, ok := findRunfiles(workspace, pkg, binary, cookie)
if !ok {
return fmt.Errorf("cannot find runfiles tree")
}
if err := os.Chdir(runfiles); err != nil {
return fmt.Errorf("cannot enter runfiles tree: %v", err)
}
return nil
}
var runfiles = struct {
once, listOnce sync.Once
// list is a list of known runfiles, either loaded from the manifest
// or discovered by walking the runfile directory.
list []RunfileEntry
// index maps runfile short paths to absolute paths.
index map[string]RunfileEntry
// dir is a path to the runfile directory. Typically this is a directory
// named <target>.runfiles, with a subdirectory for each workspace.
dir string
// workspace is workspace where the binary or test was built.
workspace string
// workspaces is a list of other workspace names.
workspaces []string
// err is set when there is an error loading runfiles, for example,
// parsing the manifest.
err error
}{}
func ensureRunfiles() error {
runfiles.once.Do(initRunfiles)
return runfiles.err
}
func initRunfiles() {
manifest := os.Getenv("RUNFILES_MANIFEST_FILE")
if manifest != "" {
// On Windows, Bazel doesn't create a symlink tree of runfiles because
// Windows doesn't support symbolic links by default. Instead, runfile
// locations are written to a manifest file.
runfiles.index = make(map[string]RunfileEntry)
data, err := ioutil.ReadFile(manifest)
if err != nil {
runfiles.err = err
return
}
lineno := 0
for len(data) > 0 {
i := bytes.IndexByte(data, '\n')
var line []byte
if i < 0 {
line = data
data = nil
} else {
line = data[:i]
data = data[i+1:]
}
lineno++
line = bytes.TrimSpace(line)
if len(line) == 0 {
continue
}
e := bytes.SplitN(line, []byte(" "), 2)
if len(e) < 2 {
runfiles.err = fmt.Errorf("error parsing runfiles manifest: %s:%d: no space", manifest, lineno)
return
}
entry := RunfileEntry{ShortPath: string(e[0]), Path: string(e[1])}
if i := strings.IndexByte(entry.ShortPath, '/'); i >= 0 {
entry.Workspace = entry.ShortPath[:i]
entry.ShortPath = entry.ShortPath[i+1:]
}
if strings.HasPrefix(entry.ShortPath, "external/") {
entry.ShortPath = entry.ShortPath[len("external/"):]
if i := strings.IndexByte(entry.ShortPath, '/'); i >= 0 {
entry.Workspace = entry.ShortPath[:i]
entry.ShortPath = entry.ShortPath[i+1:]
}
}
runfiles.list = append(runfiles.list, entry)
runfiles.index[entry.ShortPath] = entry
}
}
runfiles.workspace = os.Getenv("TEST_WORKSPACE")
if dir := os.Getenv("RUNFILES_DIR"); dir != "" {
runfiles.dir = dir
} else if dir = os.Getenv("TEST_SRCDIR"); dir != "" {
runfiles.dir = dir
} else if runtime.GOOS != "windows" {
dir, err := os.Getwd()
if err != nil {
runfiles.err = fmt.Errorf("error localting runfiles dir: %v", err)
return
}
parent := filepath.Dir(dir)
if strings.HasSuffix(parent, ".runfiles") {
runfiles.dir = parent
if runfiles.workspace == "" {
runfiles.workspace = filepath.Base(dir)
}
} else {
runfiles.err = errors.New("could not locate runfiles directory")
return
}
}
if runfiles.dir != "" {
fis, err := ioutil.ReadDir(runfiles.dir)
if err != nil {
runfiles.err = fmt.Errorf("could not open runfiles directory: %v", err)
return
}
for _, fi := range fis {
if fi.IsDir() {
runfiles.workspaces = append(runfiles.workspaces, fi.Name())
}
}
sort.Strings(runfiles.workspaces)
}
}
// getCandidates returns the list of all possible "prefix/suffix" paths where there might be an
// optional component in-between the two pieces.
//
// This function exists to cope with issues #1239 because we cannot tell where the built Go
// binaries are located upfront.
//
// DEPRECATED: only used by EnterRunfiles.
func getCandidates(prefix string, suffix string) []string {
candidates := []string{filepath.Join(prefix, suffix)}
if entries, err := ioutil.ReadDir(prefix); err == nil {
for _, entry := range entries {
candidate := filepath.Join(prefix, entry.Name(), suffix)
candidates = append(candidates, candidate)
}
}
return candidates
}
// findRunfiles locates the directory under which a built binary can find its data dependencies
// using relative paths.
//
// DEPRECATED: only used by EnterRunfiles.
func findRunfiles(workspace string, pkg string, binary string, cookie string) (string, bool) {
candidates := getCandidates(filepath.Join("bazel-bin", pkg), filepath.Join(binary+".runfiles", workspace))
candidates = append(candidates, ".")
for _, candidate := range candidates {
if _, err := os.Stat(filepath.Join(candidate, cookie)); err == nil {
return candidate, true
}
}
return "", false
}
|
ListRunfiles
|
create_native_styles.py
|
#!/usr/bin/env python
#
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Creates a native app install ad."""
import uuid
# Import appropriate modules from the client library.
from googleads import dfp
# This is the creative template ID for the system-defined native app install ad
# format, which we will create the native style from. Use
# CreativeTemplateService.getCreativeTemplateByStatement() and
# CreativeTemplate.isNativeEligible() to get other native ad formats available
# in your network.
CREATIVE_TEMPLATE_ID = 10004400
WIDTH = 300
HEIGHT = 345
HTML_SNIPPET = """<div id="adunit" style="overflow: hidden;">
<img src="[%Thirdpartyimpressiontracker%]" style="display:none">
<div class="attribution">Ad</div>
<div class="image">
<a class="image-link"
href="%%CLICK_URL_UNESC%%[%Thirdpartyclicktracker%]%%DEST_URL%%"
target="_top">
<img src="[%Image%]">
</a>
</div>
<div class="app-icon"><img src="[%Appicon%]"/></div>
<div class="title">
<a class="title-link"
href="%%CLICK_URL_UNESC%%[%Thirdpartyclicktracker%]%%DEST_URL%%"
target="_top">[%Headline%]</a>
</div>
<div class="reviews"></div>
<div class="body">
<a class="body-link"
href="%%CLICK_URL_UNESC%%[%Thirdpartyclicktracker%]%%DEST_URL%%"
target="_top">[%Body%]</a>
</div>
<div class="price">[%Price%]</div>
<div class="button">
<a class="button-link"
href="%%CLICK_URL_UNESC%%[%Thirdpartyclicktracker%]%%DEST_URL%%"
target="_top">[%Calltoaction%]</a>
</div>
</div>
"""
CSS_SNIPPET = """body {
background-color: rgba(255, 255, 255, 1);
font-family: "Roboto-Regular", sans-serif;
font-weight: normal;
font-size: 12px;
line-height: 14px;
}
.attribution {
background-color: rgba(236, 182, 0, 1);
color: rgba(255, 255, 255, 1);
font-size: 13px;
display: table;
margin: 4px 8px;
padding: 0 3px;
border-radius: 2px;
}
.image {
text-align: center;
margin: 8px;
}
.image img,
.image-link {
width: 100%;
}
.app-icon {
float: left;
margin: 0 8px 4px 8px;
height: 40px;
width: 40px;
background-color: transparent;
}
.app-icon img {
height: 100%;
width: 100%;
border-radius: 20%;
}
.title,
.promo-headline {
font-weight: bold;
font-size: 14px;
line-height: 20px;
margin: 8px 8px 4px 8px;
}
.title a,
.promo-headline {
color: rgba(112, 112, 112, 1);
text-decoration: none;
}
.reviews {
float: left;
}
.reviews svg {
fill: rgba(0, 0, 0, 0.7);
}
.body {
clear: left;
margin: 8px;
}
.body a {
color: rgba(110, 110, 110, 1);
text-decoration: none;
}
.price {
display: none;
}
.button {
font-size: 14px;
font-weight: bold;
float: right;
margin: 0px 16px 16px 0px;
white-space: nowrap;
}
.button a {
color: #2196F3;
text-decoration: none;
}
.button svg {
display: none;
}
"""
def
|
(client, html_snippet, css_snippet, creative_template_id, width,
height):
# Initialize appropriate service.
native_style_service = client.GetService('NativeStyleService',
version='v201802')
native_style = {
'name': 'Native style #%d' % uuid.uuid4(),
'htmlSnippet': html_snippet,
'cssSnippet': css_snippet,
'creativeTemplateId': creative_template_id,
'size': {
'width': width,
'height': height,
'isAspectRatio': False
}
}
# Create the native style on the server.
native_styles = native_style_service.createNativeStyles([native_style])
# Display results.
for native_style in native_styles:
print ('A Native style with ID "%s", name "%s", and creative template ID'
'"%d" was created.' % (native_style['id'], native_style['name'],
native_style['creativeTemplateId']))
if __name__ == '__main__':
# Initialize client object.
dfp_client = dfp.DfpClient.LoadFromStorage()
main(dfp_client, HTML_SNIPPET, CSS_SNIPPET, CREATIVE_TEMPLATE_ID, WIDTH,
HEIGHT)
|
main
|
v1.ts
|
// Copyright 2020 Google LLC
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* eslint-disable @typescript-eslint/no-explicit-any */
/* eslint-disable @typescript-eslint/class-name-casing */
/* eslint-disable @typescript-eslint/no-unused-vars */
/* eslint-disable @typescript-eslint/no-empty-interface */
/* eslint-disable @typescript-eslint/no-namespace */
/* eslint-disable no-irregular-whitespace */
import {
OAuth2Client,
JWT,
Compute,
UserRefreshClient,
GaxiosPromise,
GoogleConfigurable,
createAPIRequest,
MethodOptions,
StreamMethodOptions,
GlobalOptions,
GoogleAuth,
BodyResponseCallback,
APIRequestContext,
} from 'googleapis-common';
import {Readable} from 'stream';
export namespace iap_v1 {
export interface Options extends GlobalOptions {
version: 'v1';
}
interface StandardParameters {
/**
* Auth client or API Key for the request
*/
auth?:
| string
| OAuth2Client
| JWT
| Compute
| UserRefreshClient
| GoogleAuth;
/**
* V1 error format.
*/
'$.xgafv'?: string;
/**
* OAuth access token.
*/
access_token?: string;
/**
* Data format for response.
*/
alt?: string;
/**
* JSONP
*/
callback?: string;
/**
* Selector specifying which fields to include in a partial response.
*/
fields?: string;
/**
* API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.
*/
key?: string;
/**
* OAuth 2.0 token for the current user.
*/
oauth_token?: string;
/**
* Returns response with indentations and line breaks.
*/
prettyPrint?: boolean;
/**
* Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters.
*/
quotaUser?: string;
/**
* Legacy upload protocol for media (e.g. "media", "multipart").
*/
uploadType?: string;
/**
* Upload protocol for media (e.g. "raw", "multipart").
*/
upload_protocol?: string;
}
/**
* Cloud Identity-Aware Proxy API
*
* Controls access to cloud applications running on Google Cloud Platform.
*
* @example
* ```js
* const {google} = require('googleapis');
* const iap = google.iap('v1');
* ```
*/
export class Iap {
context: APIRequestContext;
projects: Resource$Projects;
v1: Resource$V1;
constructor(options: GlobalOptions, google?: GoogleConfigurable) {
this.context = {
_options: options || {},
google,
};
this.projects = new Resource$Projects(this.context);
this.v1 = new Resource$V1(this.context);
}
}
/**
* Custom content configuration for access denied page. IAP allows customers to define a custom URI to use as the error page when access is denied to users. If IAP prevents access to this page, the default IAP error page will be displayed instead.
*/
export interface Schema$AccessDeniedPageSettings {
/**
* The URI to be redirected to when access is denied.
*/
accessDeniedPageUri?: string | null;
}
/**
* Access related settings for IAP protected apps.
*/
export interface Schema$AccessSettings {
/**
* Configuration to allow cross-origin requests via IAP.
*/
corsSettings?: Schema$CorsSettings;
/**
* GCIP claims and endpoint configurations for 3p identity providers.
*/
gcipSettings?: Schema$GcipSettings;
/**
* Settings to configure IAP's OAuth behavior.
*/
oauthSettings?: Schema$OAuthSettings;
/**
* Settings to configure Policy delegation for apps hosted in tenant projects. INTERNAL_ONLY.
*/
policyDelegationSettings?: Schema$PolicyDelegationSettings;
}
/**
* Wrapper over application specific settings for IAP.
*/
export interface Schema$ApplicationSettings {
/**
* Customization for Access Denied page.
*/
accessDeniedPageSettings?: Schema$AccessDeniedPageSettings;
/**
* The Domain value to set for cookies generated by IAP. This value is not validated by the API, but will be ignored at runtime if invalid.
*/
cookieDomain?: string | null;
/**
* Settings to configure IAP's behavior for a CSM mesh.
*/
csmSettings?: Schema$CsmSettings;
}
/**
* Associates `members` with a `role`.
*/
export interface Schema$Binding {
bindingId?: string | null;
/**
* The condition that is associated with this binding. If the condition evaluates to `true`, then this binding applies to the current request. If the condition evaluates to `false`, then this binding does not apply to the current request. However, a different role binding might grant the same role to one or more of the members in this binding. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
*/
condition?: Schema$Expr;
/**
* Specifies the identities requesting access for a Cloud Platform resource. `members` can have the following values: * `allUsers`: A special identifier that represents anyone who is on the internet; with or without a Google account. * `allAuthenticatedUsers`: A special identifier that represents anyone who is authenticated with a Google account or a service account. * `user:{emailid\}`: An email address that represents a specific Google account. For example, `[email protected]` . * `serviceAccount:{emailid\}`: An email address that represents a service account. For example, `[email protected]`. * `group:{emailid\}`: An email address that represents a Google group. For example, `[email protected]`. * `deleted:user:{emailid\}?uid={uniqueid\}`: An email address (plus unique identifier) representing a user that has been recently deleted. For example, `[email protected]?uid=123456789012345678901`. If the user is recovered, this value reverts to `user:{emailid\}` and the recovered user retains the role in the binding. * `deleted:serviceAccount:{emailid\}?uid={uniqueid\}`: An email address (plus unique identifier) representing a service account that has been recently deleted. For example, `[email protected]?uid=123456789012345678901`. If the service account is undeleted, this value reverts to `serviceAccount:{emailid\}` and the undeleted service account retains the role in the binding. * `deleted:group:{emailid\}?uid={uniqueid\}`: An email address (plus unique identifier) representing a Google group that has been recently deleted. For example, `[email protected]?uid=123456789012345678901`. If the group is recovered, this value reverts to `group:{emailid\}` and the recovered group retains the role in the binding. * `domain:{domain\}`: The G Suite domain (primary) that represents all the users of that domain. For example, `google.com` or `example.com`.
*/
members?: string[] | null;
/**
* Role that is assigned to `members`. For example, `roles/viewer`, `roles/editor`, or `roles/owner`.
*/
role?: string | null;
}
/**
* OAuth brand data. NOTE: Only contains a portion of the data that describes a brand.
*/
export interface Schema$Brand {
/**
* Application name displayed on OAuth consent screen.
*/
applicationTitle?: string | null;
/**
* Output only. Identifier of the brand. NOTE: GCP project number achieves the same brand identification purpose as only one brand per project can be created.
*/
name?: string | null;
/**
* Output only. Whether the brand is only intended for usage inside the G Suite organization only.
*/
orgInternalOnly?: boolean | null;
/**
* Support email displayed on the OAuth consent screen.
*/
supportEmail?: string | null;
}
/**
* Allows customers to configure HTTP request paths that'll allow HTTP OPTIONS call to bypass authentication and authorization.
*/
export interface Schema$CorsSettings {
/**
* Configuration to allow HTTP OPTIONS calls to skip authorization. If undefined, IAP will not apply any special logic to OPTIONS requests.
*/
allowHttpOptions?: boolean | null;
}
/**
* Configuration for RCTokens generated for CSM workloads protected by IAP. RCTokens are IAP generated JWTs that can be verified at the application. The RCToken is primarily used for ISTIO deployments, and can be scoped to a single mesh by configuring the audience field accordingly
*/
export interface Schema$CsmSettings {
/**
* Audience claim set in the generated RCToken. This value is not validated by IAP.
*/
rctokenAud?: string | null;
}
/**
* A generic empty message that you can re-use to avoid defining duplicated empty messages in your APIs. A typical example is to use it as the request or the response type of an API method. For instance: service Foo { rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); \} The JSON representation for `Empty` is empty JSON object `{\}`.
*/
export interface Schema$Empty {}
/**
* Represents a textual expression in the Common Expression Language (CEL) syntax. CEL is a C-like expression language. The syntax and semantics of CEL are documented at https://github.com/google/cel-spec. Example (Comparison): title: "Summary size limit" description: "Determines if a summary is less than 100 chars" expression: "document.summary.size() < 100" Example (Equality): title: "Requestor is owner" description: "Determines if requestor is the document owner" expression: "document.owner == request.auth.claims.email" Example (Logic): title: "Public documents" description: "Determine whether the document should be publicly visible" expression: "document.type != 'private' && document.type != 'internal'" Example (Data Manipulation): title: "Notification string" description: "Create a notification string with a timestamp." expression: "'New message received at ' + string(document.create_time)" The exact variables and functions that may be referenced within an expression are determined by the service that evaluates it. See the service documentation for additional information.
*/
export interface Schema$Expr {
/**
* Optional. Description of the expression. This is a longer text which describes the expression, e.g. when hovered over it in a UI.
*/
description?: string | null;
/**
* Textual representation of an expression in Common Expression Language syntax.
*/
expression?: string | null;
/**
* Optional. String indicating the location of the expression for error reporting, e.g. a file name and a position in the file.
*/
location?: string | null;
/**
* Optional. Title for the expression, i.e. a short string describing its purpose. This can be used e.g. in UIs which allow to enter the expression.
*/
title?: string | null;
}
/**
* Allows customers to configure tenant_id for GCIP instance per-app.
*/
export interface Schema$GcipSettings {
/**
* Login page URI associated with the GCIP tenants. Typically, all resources within the same project share the same login page, though it could be overridden at the sub resource level.
*/
loginPageUri?: string | null;
/**
* GCIP tenant ids that are linked to the IAP resource. tenant_ids could be a string beginning with a number character to indicate authenticating with GCIP tenant flow, or in the format of _ to indicate authenticating with GCIP agent flow. If agent flow is used, tenant_ids should only contain one single element, while for tenant flow, tenant_ids can contain multiple elements.
*/
tenantIds?: string[] | null;
}
/**
* Request message for `GetIamPolicy` method.
*/
export interface Schema$GetIamPolicyRequest {
/**
* OPTIONAL: A `GetPolicyOptions` object for specifying options to `GetIamPolicy`.
*/
options?: Schema$GetPolicyOptions;
}
/**
* Encapsulates settings provided to GetIamPolicy.
*/
export interface Schema$GetPolicyOptions {
/**
* Optional. The policy format version to be returned. Valid values are 0, 1, and 3. Requests specifying an invalid value will be rejected. Requests for policies with any conditional bindings must specify version 3. Policies without any conditional bindings may specify any valid value or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
*/
requestedPolicyVersion?: number | null;
}
/**
* The IAP configurable settings.
*/
export interface Schema$IapSettings {
/**
* Top level wrapper for all access related setting in IAP
*/
accessSettings?: Schema$AccessSettings;
/**
* Top level wrapper for all application related settings in IAP
*/
applicationSettings?: Schema$ApplicationSettings;
/**
* Required. The resource name of the IAP protected resource.
*/
name?: string | null;
}
/**
* Contains the data that describes an Identity Aware Proxy owned client.
*/
export interface Schema$IdentityAwareProxyClient {
/**
* Human-friendly name given to the OAuth client.
*/
displayName?: string | null;
/**
* Output only. Unique identifier of the OAuth client.
*/
name?: string | null;
/**
* Output only. Client secret of the OAuth client.
*/
secret?: string | null;
}
/**
* Response message for ListBrands.
*/
export interface Schema$ListBrandsResponse {
/**
* Brands existing in the project.
*/
brands?: Schema$Brand[];
}
/**
* Response message for ListIdentityAwareProxyClients.
*/
export interface Schema$ListIdentityAwareProxyClientsResponse {
/**
* Clients existing in the brand.
*/
identityAwareProxyClients?: Schema$IdentityAwareProxyClient[];
/**
* A token, which can be send as `page_token` to retrieve the next page. If this field is omitted, there are no subsequent pages.
*/
nextPageToken?: string | null;
}
/**
* Configuration for OAuth login&consent flow behavior as well as for OAuth Credentials.
*/
export interface Schema$OAuthSettings {
/**
* Domain hint to send as hd=? parameter in OAuth request flow. Enables redirect to primary IDP by skipping Google's login screen. https://developers.google.com/identity/protocols/OpenIDConnect#hd-param Note: IAP does not verify that the id token's hd claim matches this value since access behavior is managed by IAM policies.
*/
loginHint?: string | null;
}
/**
* An Identity and Access Management (IAM) policy, which specifies access controls for Google Cloud resources. A `Policy` is a collection of `bindings`. A `binding` binds one or more `members` to a single `role`. Members can be user accounts, service accounts, Google groups, and domains (such as G Suite). A `role` is a named list of permissions; each `role` can be an IAM predefined role or a user-created custom role. For some types of Google Cloud resources, a `binding` can also specify a `condition`, which is a logical expression that allows access to a resource only if the expression evaluates to `true`. A condition can add constraints based on attributes of the request, the resource, or both. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies). **JSON example:** { "bindings": [ { "role": "roles/resourcemanager.organizationAdmin", "members": [ "user:[email protected]", "group:[email protected]", "domain:google.com", "serviceAccount:[email protected]" ] \}, { "role": "roles/resourcemanager.organizationViewer", "members": [ "user:[email protected]" ], "condition": { "title": "expirable access", "description": "Does not grant access after Sep 2020", "expression": "request.time < timestamp('2020-10-01T00:00:00.000Z')", \} \} ], "etag": "BwWWja0YfJA=", "version": 3 \} **YAML example:** bindings: - members: - user:[email protected] - group:[email protected] - domain:google.com - serviceAccount:[email protected] role: roles/resourcemanager.organizationAdmin - members: - user:[email protected] role: roles/resourcemanager.organizationViewer condition: title: expirable access description: Does not grant access after Sep 2020 expression: request.time < timestamp('2020-10-01T00:00:00.000Z') - etag: BwWWja0YfJA= - version: 3 For a description of IAM and its features, see the [IAM documentation](https://cloud.google.com/iam/docs/).
*/
export interface Schema$Policy {
/**
* Associates a list of `members` to a `role`. Optionally, may specify a `condition` that determines how and when the `bindings` are applied. Each of the `bindings` must contain at least one member.
*/
bindings?: Schema$Binding[];
/**
* `etag` is used for optimistic concurrency control as a way to help prevent simultaneous updates of a policy from overwriting each other. It is strongly suggested that systems make use of the `etag` in the read-modify-write cycle to perform policy updates in order to avoid race conditions: An `etag` is returned in the response to `getIamPolicy`, and systems are expected to put that etag in the request to `setIamPolicy` to ensure that their change will be applied to the same version of the policy. **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost.
*/
etag?: string | null;
/**
* Specifies the format of the policy. Valid values are `0`, `1`, and `3`. Requests that specify an invalid value are rejected. Any operation that affects conditional role bindings must specify version `3`. This requirement applies to the following operations: * Getting a policy that includes a conditional role binding * Adding a conditional role binding to a policy * Changing a conditional role binding in a policy * Removing any role binding, with or without a condition, from a policy that includes conditions **Important:** If you use IAM Conditions, you must include the `etag` field whenever you call `setIamPolicy`. If you omit this field, then IAM allows you to overwrite a version `3` policy with a version `1` policy, and all of the conditions in the version `3` policy are lost. If a policy does not include any conditions, operations on that policy may specify any valid version or leave the field unset. To learn which resources support conditions in their IAM policies, see the [IAM documentation](https://cloud.google.com/iam/help/conditions/resource-policies).
*/
version?: number | null;
}
/**
* PolicyDelegationConfig allows google-internal teams to use IAP for apps hosted in a tenant project. Using these settings, the app can delegate permission check to happen against the linked customer project. This is only ever supposed to be used by google internal teams, hence the restriction on the proto.
*/
export interface Schema$PolicyDelegationSettings {
/**
* Permission to check in IAM.
*/
iamPermission?: string | null;
/**
* The DNS name of the service (e.g. "resourcemanager.googleapis.com"). This should be the domain name part of the full resource names (see https://aip.dev/122#full-resource-names), which is usually the same as IamServiceSpec.service of the service where the resource type is defined.
*/
iamServiceName?: string | null;
/**
* Policy name to be checked
*/
policyName?: Schema$PolicyName;
/**
* IAM resource to check permission on
*/
resource?: Schema$Resource;
}
export interface Schema$PolicyName {
id?: string | null;
/**
* For Cloud IAM: The location of the Policy. Must be empty or "global" for Policies owned by global IAM. Must name a region from prodspec/cloud-iam-cloudspec for Regional IAM Policies, see go/iam-faq#where-is-iam-currently-deployed. For Local IAM: This field should be set to "local".
*/
region?: string | null;
/**
* Valid values for type might be 'gce', 'gcs', 'project', 'account' etc.
*/
type?: string | null;
}
/**
* The request sent to ResetIdentityAwareProxyClientSecret.
*/
export interface Schema$ResetIdentityAwareProxyClientSecretRequest {}
export interface Schema$Resource {
/**
* The service defined labels of the resource on which the conditions will be evaluated. The semantics - including the key names - are vague to IAM. If the effective condition has a reference to a `resource.labels[foo]` construct, IAM consults with this map to retrieve the values associated with `foo` key for Conditions evaluation. If the provided key is not found in the labels map, the condition would evaluate to false. This field is in limited use. If your intended use case is not expected to express resource.labels attribute in IAM Conditions, leave this field empty. Before planning on using this attribute please: * Read go/iam-conditions-labels-comm and ensure your service can meet the data availability and management requirements. * Talk to iam-conditions-eng@ about your use case.
*/
labels?: {[key: string]: string} | null;
/**
* Name of the resource on which conditions will be evaluated. Must use the Relative Resource Name of the resource, which is the URI path of the resource without the leading "/". Examples are "projects/_/buckets/[BUCKET-ID]" for storage buckets or "projects/[PROJECT-ID]/global/firewalls/[FIREWALL-ID]" for a firewall. This field is required for evaluating conditions with rules on resource names. For a `list` permission check, the resource.name value must be set to the parent resource. If the parent resource is a project, this field should be left unset.
*/
name?: string | null;
/**
* The name of the service this resource belongs to. It is configured using the official_service_name of the Service as defined in service configurations under //configs/cloud/resourcetypes. For example, the official_service_name of cloud resource manager service is set as 'cloudresourcemanager.googleapis.com' according to //configs/cloud/resourcetypes/google/cloud/resourcemanager/prod.yaml
*/
service?: string | null;
/**
* The public resource type name of the resource on which conditions will be evaluated. It is configured using the official_name of the ResourceType as defined in service configurations under //configs/cloud/resourcetypes. For example, the official_name for GCP projects is set as 'cloudresourcemanager.googleapis.com/Project' according to //configs/cloud/resourcetypes/google/cloud/resourcemanager/prod.yaml For details see go/iam-conditions-integration-guide.
*/
type?: string | null;
}
|
export interface Schema$SetIamPolicyRequest {
/**
* REQUIRED: The complete policy to be applied to the `resource`. The size of the policy is limited to a few 10s of KB. An empty policy is a valid policy but certain Cloud Platform services (such as Projects) might reject them.
*/
policy?: Schema$Policy;
}
/**
* Request message for `TestIamPermissions` method.
*/
export interface Schema$TestIamPermissionsRequest {
/**
* The set of permissions to check for the `resource`. Permissions with wildcards (such as '*' or 'storage.*') are not allowed. For more information see [IAM Overview](https://cloud.google.com/iam/docs/overview#permissions).
*/
permissions?: string[] | null;
}
/**
* Response message for `TestIamPermissions` method.
*/
export interface Schema$TestIamPermissionsResponse {
/**
* A subset of `TestPermissionsRequest.permissions` that the caller is allowed.
*/
permissions?: string[] | null;
}
export class Resource$Projects {
context: APIRequestContext;
brands: Resource$Projects$Brands;
constructor(context: APIRequestContext) {
this.context = context;
this.brands = new Resource$Projects$Brands(this.context);
}
}
export class Resource$Projects$Brands {
context: APIRequestContext;
identityAwareProxyClients: Resource$Projects$Brands$Identityawareproxyclients;
constructor(context: APIRequestContext) {
this.context = context;
this.identityAwareProxyClients = new Resource$Projects$Brands$Identityawareproxyclients(
this.context
);
}
/**
* Constructs a new OAuth brand for the project if one does not exist. The created brand is "internal only", meaning that OAuth clients created under it only accept requests from users who belong to the same G Suite organization as the project. The brand is created in an un-reviewed status. NOTE: The "internal only" status can be manually changed in the Google Cloud console. Requires that a brand does not already exist for the project, and that the specified support email is owned by the caller.
* @example
* ```js
* // Before running the sample:
* // - Enable the API at:
* // https://console.developers.google.com/apis/api/iap.googleapis.com
* // - Login into gcloud by running:
* // `$ gcloud auth application-default login`
* // - Install the npm module by running:
* // `$ npm install googleapis`
*
* const {google} = require('googleapis');
* const iap = google.iap('v1');
*
* async function main() {
* const auth = new google.auth.GoogleAuth({
* // Scopes can be specified either as an array or as a single, space-delimited string.
* scopes: ['https://www.googleapis.com/auth/cloud-platform'],
* });
*
* // Acquire an auth client, and bind it to all future calls
* const authClient = await auth.getClient();
* google.options({auth: authClient});
*
* // Do the magic
* const res = await iap.projects.brands.create({
* // Required. GCP Project number/id under which the brand is to be created. In the following format: projects/{project_number/id\}.
* parent: 'projects/my-project',
*
* // Request body metadata
* requestBody: {
* // request body parameters
* // {
* // "applicationTitle": "my_applicationTitle",
* // "name": "my_name",
* // "orgInternalOnly": false,
* // "supportEmail": "my_supportEmail"
* // }
* },
* });
* console.log(res.data);
*
* // Example response
* // {
* // "applicationTitle": "my_applicationTitle",
* // "name": "my_name",
* // "orgInternalOnly": false,
* // "supportEmail": "my_supportEmail"
* // }
* }
*
* main().catch(e => {
* console.error(e);
* throw e;
* });
*
* ```
*
* @param params - Parameters for request
* @param options - Optionally override request options, such as `url`, `method`, and `encoding`.
* @param callback - Optional callback that handles the response.
* @returns A promise if used with async/await, or void if used with a callback.
*/
create(
params: Params$Resource$Projects$Brands$Create,
options: StreamMethodOptions
): GaxiosPromise<Readable>;
create(
params?: Params$Resource$Projects$Brands$Create,
options?: MethodOptions
): GaxiosPromise<Schema$Brand>;
create(
params: Params$Resource$Projects$Brands$Create,
options: StreamMethodOptions | BodyResponseCallback<Readable>,
callback: BodyResponseCallback<Readable>
): void;
create(
params: Params$Resource$Projects$Brands$Create,
options: MethodOptions | BodyResponseCallback<Schema$Brand>,
callback: BodyResponseCallback<Schema$Brand>
): void;
create(
params: Params$Resource$Projects$Brands$Create,
callback: BodyResponseCallback<Schema$Brand>
): void;
create(callback: BodyResponseCallback<Schema$Brand>): void;
create(
paramsOrCallback?:
| Params$Resource$Projects$Brands$Create
| BodyResponseCallback<Schema$Brand>
| BodyResponseCallback<Readable>,
optionsOrCallback?:
| MethodOptions
| StreamMethodOptions
| BodyResponseCallback<Schema$Brand>
| BodyResponseCallback<Readable>,
callback?:
| BodyResponseCallback<Schema$Brand>
| BodyResponseCallback<Readable>
): void | GaxiosPromise<Schema$Brand> | GaxiosPromise<Readable> {
let params = (paramsOrCallback ||
{}) as Params$Resource$Projects$Brands$Create;
let options = (optionsOrCallback || {}) as MethodOptions;
if (typeof paramsOrCallback === 'function') {
callback = paramsOrCallback;
params = {} as Params$Resource$Projects$Brands$Create;
options = {};
}
if (typeof optionsOrCallback === 'function') {
callback = optionsOrCallback;
options = {};
}
const rootUrl = options.rootUrl || 'https://iap.googleapis.com/';
const parameters = {
options: Object.assign(
{
url: (rootUrl + '/v1/{+parent}/brands').replace(
/([^:]\/)\/+/g,
'$1'
),
method: 'POST',
},
options
),
params,
requiredParams: ['parent'],
pathParams: ['parent'],
context: this.context,
};
if (callback) {
createAPIRequest<Schema$Brand>(
parameters,
callback as BodyResponseCallback<unknown>
);
} else {
return createAPIRequest<Schema$Brand>(parameters);
}
}
/**
* Retrieves the OAuth brand of the project.
* @example
* ```js
* // Before running the sample:
* // - Enable the API at:
* // https://console.developers.google.com/apis/api/iap.googleapis.com
* // - Login into gcloud by running:
* // `$ gcloud auth application-default login`
* // - Install the npm module by running:
* // `$ npm install googleapis`
*
* const {google} = require('googleapis');
* const iap = google.iap('v1');
*
* async function main() {
* const auth = new google.auth.GoogleAuth({
* // Scopes can be specified either as an array or as a single, space-delimited string.
* scopes: ['https://www.googleapis.com/auth/cloud-platform'],
* });
*
* // Acquire an auth client, and bind it to all future calls
* const authClient = await auth.getClient();
* google.options({auth: authClient});
*
* // Do the magic
* const res = await iap.projects.brands.get({
* // Required. Name of the brand to be fetched. In the following format: projects/{project_number/id\}/brands/{brand\}.
* name: 'projects/my-project/brands/my-brand',
* });
* console.log(res.data);
*
* // Example response
* // {
* // "applicationTitle": "my_applicationTitle",
* // "name": "my_name",
* // "orgInternalOnly": false,
* // "supportEmail": "my_supportEmail"
* // }
* }
*
* main().catch(e => {
* console.error(e);
* throw e;
* });
*
* ```
*
* @param params - Parameters for request
* @param options - Optionally override request options, such as `url`, `method`, and `encoding`.
* @param callback - Optional callback that handles the response.
* @returns A promise if used with async/await, or void if used with a callback.
*/
get(
params: Params$Resource$Projects$Brands$Get,
options: StreamMethodOptions
): GaxiosPromise<Readable>;
get(
params?: Params$Resource$Projects$Brands$Get,
options?: MethodOptions
): GaxiosPromise<Schema$Brand>;
get(
params: Params$Resource$Projects$Brands$Get,
options: StreamMethodOptions | BodyResponseCallback<Readable>,
callback: BodyResponseCallback<Readable>
): void;
get(
params: Params$Resource$Projects$Brands$Get,
options: MethodOptions | BodyResponseCallback<Schema$Brand>,
callback: BodyResponseCallback<Schema$Brand>
): void;
get(
params: Params$Resource$Projects$Brands$Get,
callback: BodyResponseCallback<Schema$Brand>
): void;
get(callback: BodyResponseCallback<Schema$Brand>): void;
get(
paramsOrCallback?:
| Params$Resource$Projects$Brands$Get
| BodyResponseCallback<Schema$Brand>
| BodyResponseCallback<Readable>,
optionsOrCallback?:
| MethodOptions
| StreamMethodOptions
| BodyResponseCallback<Schema$Brand>
| BodyResponseCallback<Readable>,
callback?:
| BodyResponseCallback<Schema$Brand>
| BodyResponseCallback<Readable>
): void | GaxiosPromise<Schema$Brand> | GaxiosPromise<Readable> {
let params = (paramsOrCallback ||
{}) as Params$Resource$Projects$Brands$Get;
let options = (optionsOrCallback || {}) as MethodOptions;
if (typeof paramsOrCallback === 'function') {
callback = paramsOrCallback;
params = {} as Params$Resource$Projects$Brands$Get;
options = {};
}
if (typeof optionsOrCallback === 'function') {
callback = optionsOrCallback;
options = {};
}
const rootUrl = options.rootUrl || 'https://iap.googleapis.com/';
const parameters = {
options: Object.assign(
{
url: (rootUrl + '/v1/{+name}').replace(/([^:]\/)\/+/g, '$1'),
method: 'GET',
},
options
),
params,
requiredParams: ['name'],
pathParams: ['name'],
context: this.context,
};
if (callback) {
createAPIRequest<Schema$Brand>(
parameters,
callback as BodyResponseCallback<unknown>
);
} else {
return createAPIRequest<Schema$Brand>(parameters);
}
}
/**
* Lists the existing brands for the project.
* @example
* ```js
* // Before running the sample:
* // - Enable the API at:
* // https://console.developers.google.com/apis/api/iap.googleapis.com
* // - Login into gcloud by running:
* // `$ gcloud auth application-default login`
* // - Install the npm module by running:
* // `$ npm install googleapis`
*
* const {google} = require('googleapis');
* const iap = google.iap('v1');
*
* async function main() {
* const auth = new google.auth.GoogleAuth({
* // Scopes can be specified either as an array or as a single, space-delimited string.
* scopes: ['https://www.googleapis.com/auth/cloud-platform'],
* });
*
* // Acquire an auth client, and bind it to all future calls
* const authClient = await auth.getClient();
* google.options({auth: authClient});
*
* // Do the magic
* const res = await iap.projects.brands.list({
* // Required. GCP Project number/id. In the following format: projects/{project_number/id\}.
* parent: 'projects/my-project',
* });
* console.log(res.data);
*
* // Example response
* // {
* // "brands": []
* // }
* }
*
* main().catch(e => {
* console.error(e);
* throw e;
* });
*
* ```
*
* @param params - Parameters for request
* @param options - Optionally override request options, such as `url`, `method`, and `encoding`.
* @param callback - Optional callback that handles the response.
* @returns A promise if used with async/await, or void if used with a callback.
*/
list(
params: Params$Resource$Projects$Brands$List,
options: StreamMethodOptions
): GaxiosPromise<Readable>;
list(
params?: Params$Resource$Projects$Brands$List,
options?: MethodOptions
): GaxiosPromise<Schema$ListBrandsResponse>;
list(
params: Params$Resource$Projects$Brands$List,
options: StreamMethodOptions | BodyResponseCallback<Readable>,
callback: BodyResponseCallback<Readable>
): void;
list(
params: Params$Resource$Projects$Brands$List,
options: MethodOptions | BodyResponseCallback<Schema$ListBrandsResponse>,
callback: BodyResponseCallback<Schema$ListBrandsResponse>
): void;
list(
params: Params$Resource$Projects$Brands$List,
callback: BodyResponseCallback<Schema$ListBrandsResponse>
): void;
list(callback: BodyResponseCallback<Schema$ListBrandsResponse>): void;
list(
paramsOrCallback?:
| Params$Resource$Projects$Brands$List
| BodyResponseCallback<Schema$ListBrandsResponse>
| BodyResponseCallback<Readable>,
optionsOrCallback?:
| MethodOptions
| StreamMethodOptions
| BodyResponseCallback<Schema$ListBrandsResponse>
| BodyResponseCallback<Readable>,
callback?:
| BodyResponseCallback<Schema$ListBrandsResponse>
| BodyResponseCallback<Readable>
):
| void
| GaxiosPromise<Schema$ListBrandsResponse>
| GaxiosPromise<Readable> {
let params = (paramsOrCallback ||
{}) as Params$Resource$Projects$Brands$List;
let options = (optionsOrCallback || {}) as MethodOptions;
if (typeof paramsOrCallback === 'function') {
callback = paramsOrCallback;
params = {} as Params$Resource$Projects$Brands$List;
options = {};
}
if (typeof optionsOrCallback === 'function') {
callback = optionsOrCallback;
options = {};
}
const rootUrl = options.rootUrl || 'https://iap.googleapis.com/';
const parameters = {
options: Object.assign(
{
url: (rootUrl + '/v1/{+parent}/brands').replace(
/([^:]\/)\/+/g,
'$1'
),
method: 'GET',
},
options
),
params,
requiredParams: ['parent'],
pathParams: ['parent'],
context: this.context,
};
if (callback) {
createAPIRequest<Schema$ListBrandsResponse>(
parameters,
callback as BodyResponseCallback<unknown>
);
} else {
return createAPIRequest<Schema$ListBrandsResponse>(parameters);
}
}
}
export interface Params$Resource$Projects$Brands$Create
extends StandardParameters {
/**
* Required. GCP Project number/id under which the brand is to be created. In the following format: projects/{project_number/id\}.
*/
parent?: string;
/**
* Request body metadata
*/
requestBody?: Schema$Brand;
}
export interface Params$Resource$Projects$Brands$Get
extends StandardParameters {
/**
* Required. Name of the brand to be fetched. In the following format: projects/{project_number/id\}/brands/{brand\}.
*/
name?: string;
}
export interface Params$Resource$Projects$Brands$List
extends StandardParameters {
/**
* Required. GCP Project number/id. In the following format: projects/{project_number/id\}.
*/
parent?: string;
}
export class Resource$Projects$Brands$Identityawareproxyclients {
context: APIRequestContext;
constructor(context: APIRequestContext) {
this.context = context;
}
/**
* Creates an Identity Aware Proxy (IAP) OAuth client. The client is owned by IAP. Requires that the brand for the project exists and that it is set for internal-only use.
* @example
* ```js
* // Before running the sample:
* // - Enable the API at:
* // https://console.developers.google.com/apis/api/iap.googleapis.com
* // - Login into gcloud by running:
* // `$ gcloud auth application-default login`
* // - Install the npm module by running:
* // `$ npm install googleapis`
*
* const {google} = require('googleapis');
* const iap = google.iap('v1');
*
* async function main() {
* const auth = new google.auth.GoogleAuth({
* // Scopes can be specified either as an array or as a single, space-delimited string.
* scopes: ['https://www.googleapis.com/auth/cloud-platform'],
* });
*
* // Acquire an auth client, and bind it to all future calls
* const authClient = await auth.getClient();
* google.options({auth: authClient});
*
* // Do the magic
* const res = await iap.projects.brands.identityAwareProxyClients.create({
* // Required. Path to create the client in. In the following format: projects/{project_number/id\}/brands/{brand\}. The project must belong to a G Suite account.
* parent: 'projects/my-project/brands/my-brand',
*
* // Request body metadata
* requestBody: {
* // request body parameters
* // {
* // "displayName": "my_displayName",
* // "name": "my_name",
* // "secret": "my_secret"
* // }
* },
* });
* console.log(res.data);
*
* // Example response
* // {
* // "displayName": "my_displayName",
* // "name": "my_name",
* // "secret": "my_secret"
* // }
* }
*
* main().catch(e => {
* console.error(e);
* throw e;
* });
*
* ```
*
* @param params - Parameters for request
* @param options - Optionally override request options, such as `url`, `method`, and `encoding`.
* @param callback - Optional callback that handles the response.
* @returns A promise if used with async/await, or void if used with a callback.
*/
create(
params: Params$Resource$Projects$Brands$Identityawareproxyclients$Create,
options: StreamMethodOptions
): GaxiosPromise<Readable>;
create(
params?: Params$Resource$Projects$Brands$Identityawareproxyclients$Create,
options?: MethodOptions
): GaxiosPromise<Schema$IdentityAwareProxyClient>;
create(
params: Params$Resource$Projects$Brands$Identityawareproxyclients$Create,
options: StreamMethodOptions | BodyResponseCallback<Readable>,
callback: BodyResponseCallback<Readable>
): void;
create(
params: Params$Resource$Projects$Brands$Identityawareproxyclients$Create,
options:
| MethodOptions
| BodyResponseCallback<Schema$IdentityAwareProxyClient>,
callback: BodyResponseCallback<Schema$IdentityAwareProxyClient>
): void;
create(
params: Params$Resource$Projects$Brands$Identityawareproxyclients$Create,
callback: BodyResponseCallback<Schema$IdentityAwareProxyClient>
): void;
create(
callback: BodyResponseCallback<Schema$IdentityAwareProxyClient>
): void;
create(
paramsOrCallback?:
| Params$Resource$Projects$Brands$Identityawareproxyclients$Create
| BodyResponseCallback<Schema$IdentityAwareProxyClient>
| BodyResponseCallback<Readable>,
optionsOrCallback?:
| MethodOptions
| StreamMethodOptions
| BodyResponseCallback<Schema$IdentityAwareProxyClient>
| BodyResponseCallback<Readable>,
callback?:
| BodyResponseCallback<Schema$IdentityAwareProxyClient>
| BodyResponseCallback<Readable>
):
| void
| GaxiosPromise<Schema$IdentityAwareProxyClient>
| GaxiosPromise<Readable> {
let params = (paramsOrCallback ||
{}) as Params$Resource$Projects$Brands$Identityawareproxyclients$Create;
let options = (optionsOrCallback || {}) as MethodOptions;
if (typeof paramsOrCallback === 'function') {
callback = paramsOrCallback;
params = {} as Params$Resource$Projects$Brands$Identityawareproxyclients$Create;
options = {};
}
if (typeof optionsOrCallback === 'function') {
callback = optionsOrCallback;
options = {};
}
const rootUrl = options.rootUrl || 'https://iap.googleapis.com/';
const parameters = {
options: Object.assign(
{
url: (rootUrl + '/v1/{+parent}/identityAwareProxyClients').replace(
/([^:]\/)\/+/g,
'$1'
),
method: 'POST',
},
options
),
params,
requiredParams: ['parent'],
pathParams: ['parent'],
context: this.context,
};
if (callback) {
createAPIRequest<Schema$IdentityAwareProxyClient>(
parameters,
callback as BodyResponseCallback<unknown>
);
} else {
return createAPIRequest<Schema$IdentityAwareProxyClient>(parameters);
}
}
/**
* Deletes an Identity Aware Proxy (IAP) OAuth client. Useful for removing obsolete clients, managing the number of clients in a given project, and cleaning up after tests. Requires that the client is owned by IAP.
* @example
* ```js
* // Before running the sample:
* // - Enable the API at:
* // https://console.developers.google.com/apis/api/iap.googleapis.com
* // - Login into gcloud by running:
* // `$ gcloud auth application-default login`
* // - Install the npm module by running:
* // `$ npm install googleapis`
*
* const {google} = require('googleapis');
* const iap = google.iap('v1');
*
* async function main() {
* const auth = new google.auth.GoogleAuth({
* // Scopes can be specified either as an array or as a single, space-delimited string.
* scopes: ['https://www.googleapis.com/auth/cloud-platform'],
* });
*
* // Acquire an auth client, and bind it to all future calls
* const authClient = await auth.getClient();
* google.options({auth: authClient});
*
* // Do the magic
* const res = await iap.projects.brands.identityAwareProxyClients.delete({
* // Required. Name of the Identity Aware Proxy client to be deleted. In the following format: projects/{project_number/id\}/brands/{brand\}/identityAwareProxyClients/{client_id\}.
* name:
* 'projects/my-project/brands/my-brand/identityAwareProxyClients/my-identityAwareProxyClient',
* });
* console.log(res.data);
*
* // Example response
* // {}
* }
*
* main().catch(e => {
* console.error(e);
* throw e;
* });
*
* ```
*
* @param params - Parameters for request
* @param options - Optionally override request options, such as `url`, `method`, and `encoding`.
* @param callback - Optional callback that handles the response.
* @returns A promise if used with async/await, or void if used with a callback.
*/
delete(
params: Params$Resource$Projects$Brands$Identityawareproxyclients$Delete,
options: StreamMethodOptions
): GaxiosPromise<Readable>;
delete(
params?: Params$Resource$Projects$Brands$Identityawareproxyclients$Delete,
options?: MethodOptions
): GaxiosPromise<Schema$Empty>;
delete(
params: Params$Resource$Projects$Brands$Identityawareproxyclients$Delete,
options: StreamMethodOptions | BodyResponseCallback<Readable>,
callback: BodyResponseCallback<Readable>
): void;
delete(
params: Params$Resource$Projects$Brands$Identityawareproxyclients$Delete,
options: MethodOptions | BodyResponseCallback<Schema$Empty>,
callback: BodyResponseCallback<Schema$Empty>
): void;
delete(
params: Params$Resource$Projects$Brands$Identityawareproxyclients$Delete,
callback: BodyResponseCallback<Schema$Empty>
): void;
delete(callback: BodyResponseCallback<Schema$Empty>): void;
delete(
paramsOrCallback?:
| Params$Resource$Projects$Brands$Identityawareproxyclients$Delete
| BodyResponseCallback<Schema$Empty>
| BodyResponseCallback<Readable>,
optionsOrCallback?:
| MethodOptions
| StreamMethodOptions
| BodyResponseCallback<Schema$Empty>
| BodyResponseCallback<Readable>,
callback?:
| BodyResponseCallback<Schema$Empty>
| BodyResponseCallback<Readable>
): void | GaxiosPromise<Schema$Empty> | GaxiosPromise<Readable> {
let params = (paramsOrCallback ||
{}) as Params$Resource$Projects$Brands$Identityawareproxyclients$Delete;
let options = (optionsOrCallback || {}) as MethodOptions;
if (typeof paramsOrCallback === 'function') {
callback = paramsOrCallback;
params = {} as Params$Resource$Projects$Brands$Identityawareproxyclients$Delete;
options = {};
}
if (typeof optionsOrCallback === 'function') {
callback = optionsOrCallback;
options = {};
}
const rootUrl = options.rootUrl || 'https://iap.googleapis.com/';
const parameters = {
options: Object.assign(
{
url: (rootUrl + '/v1/{+name}').replace(/([^:]\/)\/+/g, '$1'),
method: 'DELETE',
},
options
),
params,
requiredParams: ['name'],
pathParams: ['name'],
context: this.context,
};
if (callback) {
createAPIRequest<Schema$Empty>(
parameters,
callback as BodyResponseCallback<unknown>
);
} else {
return createAPIRequest<Schema$Empty>(parameters);
}
}
/**
* Retrieves an Identity Aware Proxy (IAP) OAuth client. Requires that the client is owned by IAP.
* @example
* ```js
* // Before running the sample:
* // - Enable the API at:
* // https://console.developers.google.com/apis/api/iap.googleapis.com
* // - Login into gcloud by running:
* // `$ gcloud auth application-default login`
* // - Install the npm module by running:
* // `$ npm install googleapis`
*
* const {google} = require('googleapis');
* const iap = google.iap('v1');
*
* async function main() {
* const auth = new google.auth.GoogleAuth({
* // Scopes can be specified either as an array or as a single, space-delimited string.
* scopes: ['https://www.googleapis.com/auth/cloud-platform'],
* });
*
* // Acquire an auth client, and bind it to all future calls
* const authClient = await auth.getClient();
* google.options({auth: authClient});
*
* // Do the magic
* const res = await iap.projects.brands.identityAwareProxyClients.get({
* // Required. Name of the Identity Aware Proxy client to be fetched. In the following format: projects/{project_number/id\}/brands/{brand\}/identityAwareProxyClients/{client_id\}.
* name:
* 'projects/my-project/brands/my-brand/identityAwareProxyClients/my-identityAwareProxyClient',
* });
* console.log(res.data);
*
* // Example response
* // {
* // "displayName": "my_displayName",
* // "name": "my_name",
* // "secret": "my_secret"
* // }
* }
*
* main().catch(e => {
* console.error(e);
* throw e;
* });
*
* ```
*
* @param params - Parameters for request
* @param options - Optionally override request options, such as `url`, `method`, and `encoding`.
* @param callback - Optional callback that handles the response.
* @returns A promise if used with async/await, or void if used with a callback.
*/
get(
params: Params$Resource$Projects$Brands$Identityawareproxyclients$Get,
options: StreamMethodOptions
): GaxiosPromise<Readable>;
get(
params?: Params$Resource$Projects$Brands$Identityawareproxyclients$Get,
options?: MethodOptions
): GaxiosPromise<Schema$IdentityAwareProxyClient>;
get(
params: Params$Resource$Projects$Brands$Identityawareproxyclients$Get,
options: StreamMethodOptions | BodyResponseCallback<Readable>,
callback: BodyResponseCallback<Readable>
): void;
get(
params: Params$Resource$Projects$Brands$Identityawareproxyclients$Get,
options:
| MethodOptions
| BodyResponseCallback<Schema$IdentityAwareProxyClient>,
callback: BodyResponseCallback<Schema$IdentityAwareProxyClient>
): void;
get(
params: Params$Resource$Projects$Brands$Identityawareproxyclients$Get,
callback: BodyResponseCallback<Schema$IdentityAwareProxyClient>
): void;
get(callback: BodyResponseCallback<Schema$IdentityAwareProxyClient>): void;
get(
paramsOrCallback?:
| Params$Resource$Projects$Brands$Identityawareproxyclients$Get
| BodyResponseCallback<Schema$IdentityAwareProxyClient>
| BodyResponseCallback<Readable>,
optionsOrCallback?:
| MethodOptions
| StreamMethodOptions
| BodyResponseCallback<Schema$IdentityAwareProxyClient>
| BodyResponseCallback<Readable>,
callback?:
| BodyResponseCallback<Schema$IdentityAwareProxyClient>
| BodyResponseCallback<Readable>
):
| void
| GaxiosPromise<Schema$IdentityAwareProxyClient>
| GaxiosPromise<Readable> {
let params = (paramsOrCallback ||
{}) as Params$Resource$Projects$Brands$Identityawareproxyclients$Get;
let options = (optionsOrCallback || {}) as MethodOptions;
if (typeof paramsOrCallback === 'function') {
callback = paramsOrCallback;
params = {} as Params$Resource$Projects$Brands$Identityawareproxyclients$Get;
options = {};
}
if (typeof optionsOrCallback === 'function') {
callback = optionsOrCallback;
options = {};
}
const rootUrl = options.rootUrl || 'https://iap.googleapis.com/';
const parameters = {
options: Object.assign(
{
url: (rootUrl + '/v1/{+name}').replace(/([^:]\/)\/+/g, '$1'),
method: 'GET',
},
options
),
params,
requiredParams: ['name'],
pathParams: ['name'],
context: this.context,
};
if (callback) {
createAPIRequest<Schema$IdentityAwareProxyClient>(
parameters,
callback as BodyResponseCallback<unknown>
);
} else {
return createAPIRequest<Schema$IdentityAwareProxyClient>(parameters);
}
}
/**
* Lists the existing clients for the brand.
* @example
* ```js
* // Before running the sample:
* // - Enable the API at:
* // https://console.developers.google.com/apis/api/iap.googleapis.com
* // - Login into gcloud by running:
* // `$ gcloud auth application-default login`
* // - Install the npm module by running:
* // `$ npm install googleapis`
*
* const {google} = require('googleapis');
* const iap = google.iap('v1');
*
* async function main() {
* const auth = new google.auth.GoogleAuth({
* // Scopes can be specified either as an array or as a single, space-delimited string.
* scopes: ['https://www.googleapis.com/auth/cloud-platform'],
* });
*
* // Acquire an auth client, and bind it to all future calls
* const authClient = await auth.getClient();
* google.options({auth: authClient});
*
* // Do the magic
* const res = await iap.projects.brands.identityAwareProxyClients.list({
* // The maximum number of clients to return. The service may return fewer than this value. If unspecified, at most 100 clients will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.
* pageSize: 'placeholder-value',
* // A page token, received from a previous `ListIdentityAwareProxyClients` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListIdentityAwareProxyClients` must match the call that provided the page token.
* pageToken: 'placeholder-value',
* // Required. Full brand path. In the following format: projects/{project_number/id\}/brands/{brand\}.
* parent: 'projects/my-project/brands/my-brand',
* });
* console.log(res.data);
*
* // Example response
* // {
* // "identityAwareProxyClients": [],
* // "nextPageToken": "my_nextPageToken"
* // }
* }
*
* main().catch(e => {
* console.error(e);
* throw e;
* });
*
* ```
*
* @param params - Parameters for request
* @param options - Optionally override request options, such as `url`, `method`, and `encoding`.
* @param callback - Optional callback that handles the response.
* @returns A promise if used with async/await, or void if used with a callback.
*/
list(
params: Params$Resource$Projects$Brands$Identityawareproxyclients$List,
options: StreamMethodOptions
): GaxiosPromise<Readable>;
list(
params?: Params$Resource$Projects$Brands$Identityawareproxyclients$List,
options?: MethodOptions
): GaxiosPromise<Schema$ListIdentityAwareProxyClientsResponse>;
list(
params: Params$Resource$Projects$Brands$Identityawareproxyclients$List,
options: StreamMethodOptions | BodyResponseCallback<Readable>,
callback: BodyResponseCallback<Readable>
): void;
list(
params: Params$Resource$Projects$Brands$Identityawareproxyclients$List,
options:
| MethodOptions
| BodyResponseCallback<Schema$ListIdentityAwareProxyClientsResponse>,
callback: BodyResponseCallback<Schema$ListIdentityAwareProxyClientsResponse>
): void;
list(
params: Params$Resource$Projects$Brands$Identityawareproxyclients$List,
callback: BodyResponseCallback<Schema$ListIdentityAwareProxyClientsResponse>
): void;
list(
callback: BodyResponseCallback<Schema$ListIdentityAwareProxyClientsResponse>
): void;
list(
paramsOrCallback?:
| Params$Resource$Projects$Brands$Identityawareproxyclients$List
| BodyResponseCallback<Schema$ListIdentityAwareProxyClientsResponse>
| BodyResponseCallback<Readable>,
optionsOrCallback?:
| MethodOptions
| StreamMethodOptions
| BodyResponseCallback<Schema$ListIdentityAwareProxyClientsResponse>
| BodyResponseCallback<Readable>,
callback?:
| BodyResponseCallback<Schema$ListIdentityAwareProxyClientsResponse>
| BodyResponseCallback<Readable>
):
| void
| GaxiosPromise<Schema$ListIdentityAwareProxyClientsResponse>
| GaxiosPromise<Readable> {
let params = (paramsOrCallback ||
{}) as Params$Resource$Projects$Brands$Identityawareproxyclients$List;
let options = (optionsOrCallback || {}) as MethodOptions;
if (typeof paramsOrCallback === 'function') {
callback = paramsOrCallback;
params = {} as Params$Resource$Projects$Brands$Identityawareproxyclients$List;
options = {};
}
if (typeof optionsOrCallback === 'function') {
callback = optionsOrCallback;
options = {};
}
const rootUrl = options.rootUrl || 'https://iap.googleapis.com/';
const parameters = {
options: Object.assign(
{
url: (rootUrl + '/v1/{+parent}/identityAwareProxyClients').replace(
/([^:]\/)\/+/g,
'$1'
),
method: 'GET',
},
options
),
params,
requiredParams: ['parent'],
pathParams: ['parent'],
context: this.context,
};
if (callback) {
createAPIRequest<Schema$ListIdentityAwareProxyClientsResponse>(
parameters,
callback as BodyResponseCallback<unknown>
);
} else {
return createAPIRequest<Schema$ListIdentityAwareProxyClientsResponse>(
parameters
);
}
}
/**
* Resets an Identity Aware Proxy (IAP) OAuth client secret. Useful if the secret was compromised. Requires that the client is owned by IAP.
* @example
* ```js
* // Before running the sample:
* // - Enable the API at:
* // https://console.developers.google.com/apis/api/iap.googleapis.com
* // - Login into gcloud by running:
* // `$ gcloud auth application-default login`
* // - Install the npm module by running:
* // `$ npm install googleapis`
*
* const {google} = require('googleapis');
* const iap = google.iap('v1');
*
* async function main() {
* const auth = new google.auth.GoogleAuth({
* // Scopes can be specified either as an array or as a single, space-delimited string.
* scopes: ['https://www.googleapis.com/auth/cloud-platform'],
* });
*
* // Acquire an auth client, and bind it to all future calls
* const authClient = await auth.getClient();
* google.options({auth: authClient});
*
* // Do the magic
* const res = await iap.projects.brands.identityAwareProxyClients.resetSecret({
* // Required. Name of the Identity Aware Proxy client to that will have its secret reset. In the following format: projects/{project_number/id\}/brands/{brand\}/identityAwareProxyClients/{client_id\}.
* name:
* 'projects/my-project/brands/my-brand/identityAwareProxyClients/my-identityAwareProxyClient',
*
* // Request body metadata
* requestBody: {
* // request body parameters
* // {}
* },
* });
* console.log(res.data);
*
* // Example response
* // {
* // "displayName": "my_displayName",
* // "name": "my_name",
* // "secret": "my_secret"
* // }
* }
*
* main().catch(e => {
* console.error(e);
* throw e;
* });
*
* ```
*
* @param params - Parameters for request
* @param options - Optionally override request options, such as `url`, `method`, and `encoding`.
* @param callback - Optional callback that handles the response.
* @returns A promise if used with async/await, or void if used with a callback.
*/
resetSecret(
params: Params$Resource$Projects$Brands$Identityawareproxyclients$Resetsecret,
options: StreamMethodOptions
): GaxiosPromise<Readable>;
resetSecret(
params?: Params$Resource$Projects$Brands$Identityawareproxyclients$Resetsecret,
options?: MethodOptions
): GaxiosPromise<Schema$IdentityAwareProxyClient>;
resetSecret(
params: Params$Resource$Projects$Brands$Identityawareproxyclients$Resetsecret,
options: StreamMethodOptions | BodyResponseCallback<Readable>,
callback: BodyResponseCallback<Readable>
): void;
resetSecret(
params: Params$Resource$Projects$Brands$Identityawareproxyclients$Resetsecret,
options:
| MethodOptions
| BodyResponseCallback<Schema$IdentityAwareProxyClient>,
callback: BodyResponseCallback<Schema$IdentityAwareProxyClient>
): void;
resetSecret(
params: Params$Resource$Projects$Brands$Identityawareproxyclients$Resetsecret,
callback: BodyResponseCallback<Schema$IdentityAwareProxyClient>
): void;
resetSecret(
callback: BodyResponseCallback<Schema$IdentityAwareProxyClient>
): void;
resetSecret(
paramsOrCallback?:
| Params$Resource$Projects$Brands$Identityawareproxyclients$Resetsecret
| BodyResponseCallback<Schema$IdentityAwareProxyClient>
| BodyResponseCallback<Readable>,
optionsOrCallback?:
| MethodOptions
| StreamMethodOptions
| BodyResponseCallback<Schema$IdentityAwareProxyClient>
| BodyResponseCallback<Readable>,
callback?:
| BodyResponseCallback<Schema$IdentityAwareProxyClient>
| BodyResponseCallback<Readable>
):
| void
| GaxiosPromise<Schema$IdentityAwareProxyClient>
| GaxiosPromise<Readable> {
let params = (paramsOrCallback ||
{}) as Params$Resource$Projects$Brands$Identityawareproxyclients$Resetsecret;
let options = (optionsOrCallback || {}) as MethodOptions;
if (typeof paramsOrCallback === 'function') {
callback = paramsOrCallback;
params = {} as Params$Resource$Projects$Brands$Identityawareproxyclients$Resetsecret;
options = {};
}
if (typeof optionsOrCallback === 'function') {
callback = optionsOrCallback;
options = {};
}
const rootUrl = options.rootUrl || 'https://iap.googleapis.com/';
const parameters = {
options: Object.assign(
{
url: (rootUrl + '/v1/{+name}:resetSecret').replace(
/([^:]\/)\/+/g,
'$1'
),
method: 'POST',
},
options
),
params,
requiredParams: ['name'],
pathParams: ['name'],
context: this.context,
};
if (callback) {
createAPIRequest<Schema$IdentityAwareProxyClient>(
parameters,
callback as BodyResponseCallback<unknown>
);
} else {
return createAPIRequest<Schema$IdentityAwareProxyClient>(parameters);
}
}
}
export interface Params$Resource$Projects$Brands$Identityawareproxyclients$Create
extends StandardParameters {
/**
* Required. Path to create the client in. In the following format: projects/{project_number/id\}/brands/{brand\}. The project must belong to a G Suite account.
*/
parent?: string;
/**
* Request body metadata
*/
requestBody?: Schema$IdentityAwareProxyClient;
}
export interface Params$Resource$Projects$Brands$Identityawareproxyclients$Delete
extends StandardParameters {
/**
* Required. Name of the Identity Aware Proxy client to be deleted. In the following format: projects/{project_number/id\}/brands/{brand\}/identityAwareProxyClients/{client_id\}.
*/
name?: string;
}
export interface Params$Resource$Projects$Brands$Identityawareproxyclients$Get
extends StandardParameters {
/**
* Required. Name of the Identity Aware Proxy client to be fetched. In the following format: projects/{project_number/id\}/brands/{brand\}/identityAwareProxyClients/{client_id\}.
*/
name?: string;
}
export interface Params$Resource$Projects$Brands$Identityawareproxyclients$List
extends StandardParameters {
/**
* The maximum number of clients to return. The service may return fewer than this value. If unspecified, at most 100 clients will be returned. The maximum value is 1000; values above 1000 will be coerced to 1000.
*/
pageSize?: number;
/**
* A page token, received from a previous `ListIdentityAwareProxyClients` call. Provide this to retrieve the subsequent page. When paginating, all other parameters provided to `ListIdentityAwareProxyClients` must match the call that provided the page token.
*/
pageToken?: string;
/**
* Required. Full brand path. In the following format: projects/{project_number/id\}/brands/{brand\}.
*/
parent?: string;
}
export interface Params$Resource$Projects$Brands$Identityawareproxyclients$Resetsecret
extends StandardParameters {
/**
* Required. Name of the Identity Aware Proxy client to that will have its secret reset. In the following format: projects/{project_number/id\}/brands/{brand\}/identityAwareProxyClients/{client_id\}.
*/
name?: string;
/**
* Request body metadata
*/
requestBody?: Schema$ResetIdentityAwareProxyClientSecretRequest;
}
export class Resource$V1 {
context: APIRequestContext;
constructor(context: APIRequestContext) {
this.context = context;
}
/**
* Gets the access control policy for an Identity-Aware Proxy protected resource. More information about managing access via IAP can be found at: https://cloud.google.com/iap/docs/managing-access#managing_access_via_the_api
* @example
* ```js
* // Before running the sample:
* // - Enable the API at:
* // https://console.developers.google.com/apis/api/iap.googleapis.com
* // - Login into gcloud by running:
* // `$ gcloud auth application-default login`
* // - Install the npm module by running:
* // `$ npm install googleapis`
*
* const {google} = require('googleapis');
* const iap = google.iap('v1');
*
* async function main() {
* const auth = new google.auth.GoogleAuth({
* // Scopes can be specified either as an array or as a single, space-delimited string.
* scopes: ['https://www.googleapis.com/auth/cloud-platform'],
* });
*
* // Acquire an auth client, and bind it to all future calls
* const authClient = await auth.getClient();
* google.options({auth: authClient});
*
* // Do the magic
* const res = await iap.getIamPolicy({
* // REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.
* resource: '.*',
*
* // Request body metadata
* requestBody: {
* // request body parameters
* // {
* // "options": {}
* // }
* },
* });
* console.log(res.data);
*
* // Example response
* // {
* // "bindings": [],
* // "etag": "my_etag",
* // "version": 0
* // }
* }
*
* main().catch(e => {
* console.error(e);
* throw e;
* });
*
* ```
*
* @param params - Parameters for request
* @param options - Optionally override request options, such as `url`, `method`, and `encoding`.
* @param callback - Optional callback that handles the response.
* @returns A promise if used with async/await, or void if used with a callback.
*/
getIamPolicy(
params: Params$Resource$V1$Getiampolicy,
options: StreamMethodOptions
): GaxiosPromise<Readable>;
getIamPolicy(
params?: Params$Resource$V1$Getiampolicy,
options?: MethodOptions
): GaxiosPromise<Schema$Policy>;
getIamPolicy(
params: Params$Resource$V1$Getiampolicy,
options: StreamMethodOptions | BodyResponseCallback<Readable>,
callback: BodyResponseCallback<Readable>
): void;
getIamPolicy(
params: Params$Resource$V1$Getiampolicy,
options: MethodOptions | BodyResponseCallback<Schema$Policy>,
callback: BodyResponseCallback<Schema$Policy>
): void;
getIamPolicy(
params: Params$Resource$V1$Getiampolicy,
callback: BodyResponseCallback<Schema$Policy>
): void;
getIamPolicy(callback: BodyResponseCallback<Schema$Policy>): void;
getIamPolicy(
paramsOrCallback?:
| Params$Resource$V1$Getiampolicy
| BodyResponseCallback<Schema$Policy>
| BodyResponseCallback<Readable>,
optionsOrCallback?:
| MethodOptions
| StreamMethodOptions
| BodyResponseCallback<Schema$Policy>
| BodyResponseCallback<Readable>,
callback?:
| BodyResponseCallback<Schema$Policy>
| BodyResponseCallback<Readable>
): void | GaxiosPromise<Schema$Policy> | GaxiosPromise<Readable> {
let params = (paramsOrCallback || {}) as Params$Resource$V1$Getiampolicy;
let options = (optionsOrCallback || {}) as MethodOptions;
if (typeof paramsOrCallback === 'function') {
callback = paramsOrCallback;
params = {} as Params$Resource$V1$Getiampolicy;
options = {};
}
if (typeof optionsOrCallback === 'function') {
callback = optionsOrCallback;
options = {};
}
const rootUrl = options.rootUrl || 'https://iap.googleapis.com/';
const parameters = {
options: Object.assign(
{
url: (rootUrl + '/v1/{+resource}:getIamPolicy').replace(
/([^:]\/)\/+/g,
'$1'
),
method: 'POST',
},
options
),
params,
requiredParams: ['resource'],
pathParams: ['resource'],
context: this.context,
};
if (callback) {
createAPIRequest<Schema$Policy>(
parameters,
callback as BodyResponseCallback<unknown>
);
} else {
return createAPIRequest<Schema$Policy>(parameters);
}
}
/**
* Gets the IAP settings on a particular IAP protected resource.
* @example
* ```js
* // Before running the sample:
* // - Enable the API at:
* // https://console.developers.google.com/apis/api/iap.googleapis.com
* // - Login into gcloud by running:
* // `$ gcloud auth application-default login`
* // - Install the npm module by running:
* // `$ npm install googleapis`
*
* const {google} = require('googleapis');
* const iap = google.iap('v1');
*
* async function main() {
* const auth = new google.auth.GoogleAuth({
* // Scopes can be specified either as an array or as a single, space-delimited string.
* scopes: ['https://www.googleapis.com/auth/cloud-platform'],
* });
*
* // Acquire an auth client, and bind it to all future calls
* const authClient = await auth.getClient();
* google.options({auth: authClient});
*
* // Do the magic
* const res = await iap.getIapSettings({
* // Required. The resource name for which to retrieve the settings. Authorization: Requires the `getSettings` permission for the associated resource.
* name: '.*',
* });
* console.log(res.data);
*
* // Example response
* // {
* // "accessSettings": {},
* // "applicationSettings": {},
* // "name": "my_name"
* // }
* }
*
* main().catch(e => {
* console.error(e);
* throw e;
* });
*
* ```
*
* @param params - Parameters for request
* @param options - Optionally override request options, such as `url`, `method`, and `encoding`.
* @param callback - Optional callback that handles the response.
* @returns A promise if used with async/await, or void if used with a callback.
*/
getIapSettings(
params: Params$Resource$V1$Getiapsettings,
options: StreamMethodOptions
): GaxiosPromise<Readable>;
getIapSettings(
params?: Params$Resource$V1$Getiapsettings,
options?: MethodOptions
): GaxiosPromise<Schema$IapSettings>;
getIapSettings(
params: Params$Resource$V1$Getiapsettings,
options: StreamMethodOptions | BodyResponseCallback<Readable>,
callback: BodyResponseCallback<Readable>
): void;
getIapSettings(
params: Params$Resource$V1$Getiapsettings,
options: MethodOptions | BodyResponseCallback<Schema$IapSettings>,
callback: BodyResponseCallback<Schema$IapSettings>
): void;
getIapSettings(
params: Params$Resource$V1$Getiapsettings,
callback: BodyResponseCallback<Schema$IapSettings>
): void;
getIapSettings(callback: BodyResponseCallback<Schema$IapSettings>): void;
getIapSettings(
paramsOrCallback?:
| Params$Resource$V1$Getiapsettings
| BodyResponseCallback<Schema$IapSettings>
| BodyResponseCallback<Readable>,
optionsOrCallback?:
| MethodOptions
| StreamMethodOptions
| BodyResponseCallback<Schema$IapSettings>
| BodyResponseCallback<Readable>,
callback?:
| BodyResponseCallback<Schema$IapSettings>
| BodyResponseCallback<Readable>
): void | GaxiosPromise<Schema$IapSettings> | GaxiosPromise<Readable> {
let params = (paramsOrCallback ||
{}) as Params$Resource$V1$Getiapsettings;
let options = (optionsOrCallback || {}) as MethodOptions;
if (typeof paramsOrCallback === 'function') {
callback = paramsOrCallback;
params = {} as Params$Resource$V1$Getiapsettings;
options = {};
}
if (typeof optionsOrCallback === 'function') {
callback = optionsOrCallback;
options = {};
}
const rootUrl = options.rootUrl || 'https://iap.googleapis.com/';
const parameters = {
options: Object.assign(
{
url: (rootUrl + '/v1/{+name}:iapSettings').replace(
/([^:]\/)\/+/g,
'$1'
),
method: 'GET',
},
options
),
params,
requiredParams: ['name'],
pathParams: ['name'],
context: this.context,
};
if (callback) {
createAPIRequest<Schema$IapSettings>(
parameters,
callback as BodyResponseCallback<unknown>
);
} else {
return createAPIRequest<Schema$IapSettings>(parameters);
}
}
/**
* Sets the access control policy for an Identity-Aware Proxy protected resource. Replaces any existing policy. More information about managing access via IAP can be found at: https://cloud.google.com/iap/docs/managing-access#managing_access_via_the_api
* @example
* ```js
* // Before running the sample:
* // - Enable the API at:
* // https://console.developers.google.com/apis/api/iap.googleapis.com
* // - Login into gcloud by running:
* // `$ gcloud auth application-default login`
* // - Install the npm module by running:
* // `$ npm install googleapis`
*
* const {google} = require('googleapis');
* const iap = google.iap('v1');
*
* async function main() {
* const auth = new google.auth.GoogleAuth({
* // Scopes can be specified either as an array or as a single, space-delimited string.
* scopes: ['https://www.googleapis.com/auth/cloud-platform'],
* });
*
* // Acquire an auth client, and bind it to all future calls
* const authClient = await auth.getClient();
* google.options({auth: authClient});
*
* // Do the magic
* const res = await iap.setIamPolicy({
* // REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.
* resource: '.*',
*
* // Request body metadata
* requestBody: {
* // request body parameters
* // {
* // "policy": {}
* // }
* },
* });
* console.log(res.data);
*
* // Example response
* // {
* // "bindings": [],
* // "etag": "my_etag",
* // "version": 0
* // }
* }
*
* main().catch(e => {
* console.error(e);
* throw e;
* });
*
* ```
*
* @param params - Parameters for request
* @param options - Optionally override request options, such as `url`, `method`, and `encoding`.
* @param callback - Optional callback that handles the response.
* @returns A promise if used with async/await, or void if used with a callback.
*/
setIamPolicy(
params: Params$Resource$V1$Setiampolicy,
options: StreamMethodOptions
): GaxiosPromise<Readable>;
setIamPolicy(
params?: Params$Resource$V1$Setiampolicy,
options?: MethodOptions
): GaxiosPromise<Schema$Policy>;
setIamPolicy(
params: Params$Resource$V1$Setiampolicy,
options: StreamMethodOptions | BodyResponseCallback<Readable>,
callback: BodyResponseCallback<Readable>
): void;
setIamPolicy(
params: Params$Resource$V1$Setiampolicy,
options: MethodOptions | BodyResponseCallback<Schema$Policy>,
callback: BodyResponseCallback<Schema$Policy>
): void;
setIamPolicy(
params: Params$Resource$V1$Setiampolicy,
callback: BodyResponseCallback<Schema$Policy>
): void;
setIamPolicy(callback: BodyResponseCallback<Schema$Policy>): void;
setIamPolicy(
paramsOrCallback?:
| Params$Resource$V1$Setiampolicy
| BodyResponseCallback<Schema$Policy>
| BodyResponseCallback<Readable>,
optionsOrCallback?:
| MethodOptions
| StreamMethodOptions
| BodyResponseCallback<Schema$Policy>
| BodyResponseCallback<Readable>,
callback?:
| BodyResponseCallback<Schema$Policy>
| BodyResponseCallback<Readable>
): void | GaxiosPromise<Schema$Policy> | GaxiosPromise<Readable> {
let params = (paramsOrCallback || {}) as Params$Resource$V1$Setiampolicy;
let options = (optionsOrCallback || {}) as MethodOptions;
if (typeof paramsOrCallback === 'function') {
callback = paramsOrCallback;
params = {} as Params$Resource$V1$Setiampolicy;
options = {};
}
if (typeof optionsOrCallback === 'function') {
callback = optionsOrCallback;
options = {};
}
const rootUrl = options.rootUrl || 'https://iap.googleapis.com/';
const parameters = {
options: Object.assign(
{
url: (rootUrl + '/v1/{+resource}:setIamPolicy').replace(
/([^:]\/)\/+/g,
'$1'
),
method: 'POST',
},
options
),
params,
requiredParams: ['resource'],
pathParams: ['resource'],
context: this.context,
};
if (callback) {
createAPIRequest<Schema$Policy>(
parameters,
callback as BodyResponseCallback<unknown>
);
} else {
return createAPIRequest<Schema$Policy>(parameters);
}
}
/**
* Returns permissions that a caller has on the Identity-Aware Proxy protected resource. More information about managing access via IAP can be found at: https://cloud.google.com/iap/docs/managing-access#managing_access_via_the_api
* @example
* ```js
* // Before running the sample:
* // - Enable the API at:
* // https://console.developers.google.com/apis/api/iap.googleapis.com
* // - Login into gcloud by running:
* // `$ gcloud auth application-default login`
* // - Install the npm module by running:
* // `$ npm install googleapis`
*
* const {google} = require('googleapis');
* const iap = google.iap('v1');
*
* async function main() {
* const auth = new google.auth.GoogleAuth({
* // Scopes can be specified either as an array or as a single, space-delimited string.
* scopes: ['https://www.googleapis.com/auth/cloud-platform'],
* });
*
* // Acquire an auth client, and bind it to all future calls
* const authClient = await auth.getClient();
* google.options({auth: authClient});
*
* // Do the magic
* const res = await iap.testIamPermissions({
* // REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.
* resource: '.*',
*
* // Request body metadata
* requestBody: {
* // request body parameters
* // {
* // "permissions": []
* // }
* },
* });
* console.log(res.data);
*
* // Example response
* // {
* // "permissions": []
* // }
* }
*
* main().catch(e => {
* console.error(e);
* throw e;
* });
*
* ```
*
* @param params - Parameters for request
* @param options - Optionally override request options, such as `url`, `method`, and `encoding`.
* @param callback - Optional callback that handles the response.
* @returns A promise if used with async/await, or void if used with a callback.
*/
testIamPermissions(
params: Params$Resource$V1$Testiampermissions,
options: StreamMethodOptions
): GaxiosPromise<Readable>;
testIamPermissions(
params?: Params$Resource$V1$Testiampermissions,
options?: MethodOptions
): GaxiosPromise<Schema$TestIamPermissionsResponse>;
testIamPermissions(
params: Params$Resource$V1$Testiampermissions,
options: StreamMethodOptions | BodyResponseCallback<Readable>,
callback: BodyResponseCallback<Readable>
): void;
testIamPermissions(
params: Params$Resource$V1$Testiampermissions,
options:
| MethodOptions
| BodyResponseCallback<Schema$TestIamPermissionsResponse>,
callback: BodyResponseCallback<Schema$TestIamPermissionsResponse>
): void;
testIamPermissions(
params: Params$Resource$V1$Testiampermissions,
callback: BodyResponseCallback<Schema$TestIamPermissionsResponse>
): void;
testIamPermissions(
callback: BodyResponseCallback<Schema$TestIamPermissionsResponse>
): void;
testIamPermissions(
paramsOrCallback?:
| Params$Resource$V1$Testiampermissions
| BodyResponseCallback<Schema$TestIamPermissionsResponse>
| BodyResponseCallback<Readable>,
optionsOrCallback?:
| MethodOptions
| StreamMethodOptions
| BodyResponseCallback<Schema$TestIamPermissionsResponse>
| BodyResponseCallback<Readable>,
callback?:
| BodyResponseCallback<Schema$TestIamPermissionsResponse>
| BodyResponseCallback<Readable>
):
| void
| GaxiosPromise<Schema$TestIamPermissionsResponse>
| GaxiosPromise<Readable> {
let params = (paramsOrCallback ||
{}) as Params$Resource$V1$Testiampermissions;
let options = (optionsOrCallback || {}) as MethodOptions;
if (typeof paramsOrCallback === 'function') {
callback = paramsOrCallback;
params = {} as Params$Resource$V1$Testiampermissions;
options = {};
}
if (typeof optionsOrCallback === 'function') {
callback = optionsOrCallback;
options = {};
}
const rootUrl = options.rootUrl || 'https://iap.googleapis.com/';
const parameters = {
options: Object.assign(
{
url: (rootUrl + '/v1/{+resource}:testIamPermissions').replace(
/([^:]\/)\/+/g,
'$1'
),
method: 'POST',
},
options
),
params,
requiredParams: ['resource'],
pathParams: ['resource'],
context: this.context,
};
if (callback) {
createAPIRequest<Schema$TestIamPermissionsResponse>(
parameters,
callback as BodyResponseCallback<unknown>
);
} else {
return createAPIRequest<Schema$TestIamPermissionsResponse>(parameters);
}
}
/**
* Updates the IAP settings on a particular IAP protected resource. It replaces all fields unless the `update_mask` is set.
* @example
* ```js
* // Before running the sample:
* // - Enable the API at:
* // https://console.developers.google.com/apis/api/iap.googleapis.com
* // - Login into gcloud by running:
* // `$ gcloud auth application-default login`
* // - Install the npm module by running:
* // `$ npm install googleapis`
*
* const {google} = require('googleapis');
* const iap = google.iap('v1');
*
* async function main() {
* const auth = new google.auth.GoogleAuth({
* // Scopes can be specified either as an array or as a single, space-delimited string.
* scopes: ['https://www.googleapis.com/auth/cloud-platform'],
* });
*
* // Acquire an auth client, and bind it to all future calls
* const authClient = await auth.getClient();
* google.options({auth: authClient});
*
* // Do the magic
* const res = await iap.updateIapSettings({
* // Required. The resource name of the IAP protected resource.
* name: '.*',
* // The field mask specifying which IAP settings should be updated. If omitted, the all of the settings are updated. See https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask
* updateMask: 'placeholder-value',
*
* // Request body metadata
* requestBody: {
* // request body parameters
* // {
* // "accessSettings": {},
* // "applicationSettings": {},
* // "name": "my_name"
* // }
* },
* });
* console.log(res.data);
*
* // Example response
* // {
* // "accessSettings": {},
* // "applicationSettings": {},
* // "name": "my_name"
* // }
* }
*
* main().catch(e => {
* console.error(e);
* throw e;
* });
*
* ```
*
* @param params - Parameters for request
* @param options - Optionally override request options, such as `url`, `method`, and `encoding`.
* @param callback - Optional callback that handles the response.
* @returns A promise if used with async/await, or void if used with a callback.
*/
updateIapSettings(
params: Params$Resource$V1$Updateiapsettings,
options: StreamMethodOptions
): GaxiosPromise<Readable>;
updateIapSettings(
params?: Params$Resource$V1$Updateiapsettings,
options?: MethodOptions
): GaxiosPromise<Schema$IapSettings>;
updateIapSettings(
params: Params$Resource$V1$Updateiapsettings,
options: StreamMethodOptions | BodyResponseCallback<Readable>,
callback: BodyResponseCallback<Readable>
): void;
updateIapSettings(
params: Params$Resource$V1$Updateiapsettings,
options: MethodOptions | BodyResponseCallback<Schema$IapSettings>,
callback: BodyResponseCallback<Schema$IapSettings>
): void;
updateIapSettings(
params: Params$Resource$V1$Updateiapsettings,
callback: BodyResponseCallback<Schema$IapSettings>
): void;
updateIapSettings(callback: BodyResponseCallback<Schema$IapSettings>): void;
updateIapSettings(
paramsOrCallback?:
| Params$Resource$V1$Updateiapsettings
| BodyResponseCallback<Schema$IapSettings>
| BodyResponseCallback<Readable>,
optionsOrCallback?:
| MethodOptions
| StreamMethodOptions
| BodyResponseCallback<Schema$IapSettings>
| BodyResponseCallback<Readable>,
callback?:
| BodyResponseCallback<Schema$IapSettings>
| BodyResponseCallback<Readable>
): void | GaxiosPromise<Schema$IapSettings> | GaxiosPromise<Readable> {
let params = (paramsOrCallback ||
{}) as Params$Resource$V1$Updateiapsettings;
let options = (optionsOrCallback || {}) as MethodOptions;
if (typeof paramsOrCallback === 'function') {
callback = paramsOrCallback;
params = {} as Params$Resource$V1$Updateiapsettings;
options = {};
}
if (typeof optionsOrCallback === 'function') {
callback = optionsOrCallback;
options = {};
}
const rootUrl = options.rootUrl || 'https://iap.googleapis.com/';
const parameters = {
options: Object.assign(
{
url: (rootUrl + '/v1/{+name}:iapSettings').replace(
/([^:]\/)\/+/g,
'$1'
),
method: 'PATCH',
},
options
),
params,
requiredParams: ['name'],
pathParams: ['name'],
context: this.context,
};
if (callback) {
createAPIRequest<Schema$IapSettings>(
parameters,
callback as BodyResponseCallback<unknown>
);
} else {
return createAPIRequest<Schema$IapSettings>(parameters);
}
}
}
export interface Params$Resource$V1$Getiampolicy extends StandardParameters {
/**
* REQUIRED: The resource for which the policy is being requested. See the operation documentation for the appropriate value for this field.
*/
resource?: string;
/**
* Request body metadata
*/
requestBody?: Schema$GetIamPolicyRequest;
}
export interface Params$Resource$V1$Getiapsettings
extends StandardParameters {
/**
* Required. The resource name for which to retrieve the settings. Authorization: Requires the `getSettings` permission for the associated resource.
*/
name?: string;
}
export interface Params$Resource$V1$Setiampolicy extends StandardParameters {
/**
* REQUIRED: The resource for which the policy is being specified. See the operation documentation for the appropriate value for this field.
*/
resource?: string;
/**
* Request body metadata
*/
requestBody?: Schema$SetIamPolicyRequest;
}
export interface Params$Resource$V1$Testiampermissions
extends StandardParameters {
/**
* REQUIRED: The resource for which the policy detail is being requested. See the operation documentation for the appropriate value for this field.
*/
resource?: string;
/**
* Request body metadata
*/
requestBody?: Schema$TestIamPermissionsRequest;
}
export interface Params$Resource$V1$Updateiapsettings
extends StandardParameters {
/**
* Required. The resource name of the IAP protected resource.
*/
name?: string;
/**
* The field mask specifying which IAP settings should be updated. If omitted, the all of the settings are updated. See https://developers.google.com/protocol-buffers/docs/reference/google.protobuf#fieldmask
*/
updateMask?: string;
/**
* Request body metadata
*/
requestBody?: Schema$IapSettings;
}
}
|
/**
* Request message for `SetIamPolicy` method.
*/
|
kprototypes.py
|
"""
K-prototypes clustering for mixed categorical and numerical data
"""
# pylint: disable=super-on-old-class,unused-argument,attribute-defined-outside-init
from collections import defaultdict
import numpy as np
from scipy import sparse
from sklearn.externals.joblib import Parallel, delayed
from sklearn.utils import check_random_state
from sklearn.utils.validation import check_array
from . import kmodes
from .util import get_max_value_key, encode_features, get_unique_rows, decode_centroids
from .util.dissim import matching_dissim, euclidean_dissim
# Number of tries we give the initialization methods to find non-empty
# clusters before we switch to random initialization.
MAX_INIT_TRIES = 20
# Number of tries we give the initialization before we raise an
# initialization error.
RAISE_INIT_TRIES = 100
def move_point_num(point, to_clust, from_clust, cl_attr_sum, cl_memb_sum):
"""Move point between clusters, numerical attributes."""
# Update sum of attributes in cluster.
for iattr, curattr in enumerate(point):
cl_attr_sum[to_clust][iattr] += curattr
cl_attr_sum[from_clust][iattr] -= curattr
# Update sums of memberships in cluster
cl_memb_sum[to_clust] += 1
cl_memb_sum[from_clust] -= 1
return cl_attr_sum, cl_memb_sum
def _split_num_cat(X, categorical):
"""Extract numerical and categorical columns.
Convert to numpy arrays, if needed.
:param X: Feature matrix
:param categorical: Indices of categorical columns
"""
Xnum = np.asanyarray(X[:, [ii for ii in range(X.shape[1])
if ii not in categorical]]).astype(np.float64)
Xcat = np.asanyarray(X[:, categorical])
return Xnum, Xcat
def _labels_cost(Xnum, Xcat, centroids, num_dissim, cat_dissim, gamma, membship=None):
"""Calculate labels and cost function given a matrix of points and
a list of centroids for the k-prototypes algorithm.
"""
n_points = Xnum.shape[0]
Xnum = check_array(Xnum)
cost = 0.
labels = np.empty(n_points, dtype=np.uint8)
for ipoint in range(n_points):
# Numerical cost = sum of Euclidean distances
num_costs = num_dissim(centroids[0], Xnum[ipoint])
cat_costs = cat_dissim(centroids[1], Xcat[ipoint], X=Xcat, membship=membship)
# Gamma relates the categorical cost to the numerical cost.
tot_costs = num_costs + gamma * cat_costs
clust = np.argmin(tot_costs)
labels[ipoint] = clust
cost += tot_costs[clust]
return labels, cost
def _k_prototypes_iter(Xnum, Xcat, centroids, cl_attr_sum, cl_memb_sum, cl_attr_freq,
membship, num_dissim, cat_dissim, gamma, random_state):
"""Single iteration of the k-prototypes algorithm"""
moves = 0
for ipoint in range(Xnum.shape[0]):
clust = np.argmin(
num_dissim(centroids[0], Xnum[ipoint]) +
gamma * cat_dissim(centroids[1], Xcat[ipoint], X=Xcat, membship=membship)
)
if membship[clust, ipoint]:
# Point is already in its right place.
continue
# Move point, and update old/new cluster frequencies and centroids.
moves += 1
old_clust = np.argwhere(membship[:, ipoint])[0][0]
# Note that membship gets updated by kmodes.move_point_cat.
# move_point_num only updates things specific to the k-means part.
cl_attr_sum, cl_memb_sum = move_point_num(
Xnum[ipoint], clust, old_clust, cl_attr_sum, cl_memb_sum
)
cl_attr_freq, membship, centroids[1] = kmodes.move_point_cat(
Xcat[ipoint], ipoint, clust, old_clust,
cl_attr_freq, membship, centroids[1]
)
# Update old and new centroids for numerical attributes using
# the means and sums of all values
for iattr in range(len(Xnum[ipoint])):
for curc in (clust, old_clust):
if cl_memb_sum[curc]:
centroids[0][curc, iattr] = cl_attr_sum[curc, iattr] / cl_memb_sum[curc]
else:
centroids[0][curc, iattr] = 0.
# In case of an empty cluster, reinitialize with a random point
# from largest cluster.
if not cl_memb_sum[old_clust]:
from_clust = membship.sum(axis=1).argmax()
choices = [ii for ii, ch in enumerate(membship[from_clust, :]) if ch]
rindx = random_state.choice(choices)
cl_attr_sum, cl_memb_sum = move_point_num(
Xnum[rindx], old_clust, from_clust, cl_attr_sum, cl_memb_sum
)
cl_attr_freq, membship, centroids[1] = kmodes.move_point_cat(
Xcat[rindx], rindx, old_clust, from_clust,
cl_attr_freq, membship, centroids[1]
)
return centroids, moves
def k_prototypes_single(Xnum, Xcat, nnumattrs, ncatattrs, n_clusters, n_points,
max_iter, num_dissim, cat_dissim, gamma, init, init_no,
verbose, random_state):
# For numerical part of initialization, we don't have a guarantee
# that there is not an empty cluster, so we need to retry until
# there is none.
random_state = check_random_state(random_state)
init_tries = 0
while True:
init_tries += 1
# _____ INIT _____
if verbose:
print("Init: initializing centroids")
if isinstance(init, str) and init.lower() == 'huang':
centroids = kmodes.init_huang(Xcat, n_clusters, cat_dissim, random_state)
elif isinstance(init, str) and init.lower() == 'cao':
centroids = kmodes.init_cao(Xcat, n_clusters, cat_dissim)
elif isinstance(init, str) and init.lower() == 'random':
seeds = random_state.choice(range(n_points), n_clusters)
centroids = Xcat[seeds]
elif isinstance(init, list):
# Make sure inits are 2D arrays.
init = [np.atleast_2d(cur_init).T if len(cur_init.shape) == 1
else cur_init
for cur_init in init]
assert init[0].shape[0] == n_clusters, \
"Wrong number of initial numerical centroids in init " \
"({}, should be {}).".format(init[0].shape[0], n_clusters)
assert init[0].shape[1] == nnumattrs, \
"Wrong number of numerical attributes in init ({}, should be {})." \
.format(init[0].shape[1], nnumattrs)
assert init[1].shape[0] == n_clusters, \
"Wrong number of initial categorical centroids in init ({}, " \
"should be {}).".format(init[1].shape[0], n_clusters)
assert init[1].shape[1] == ncatattrs, \
"Wrong number of categorical attributes in init ({}, should be {})." \
.format(init[1].shape[1], ncatattrs)
centroids = [np.asarray(init[0], dtype=np.float64),
np.asarray(init[1], dtype=np.uint8)]
else:
raise NotImplementedError("Initialization method not supported.")
if not isinstance(init, list):
# Numerical is initialized by drawing from normal distribution,
# categorical following the k-modes methods.
meanx = np.mean(Xnum, axis=0)
stdx = np.std(Xnum, axis=0)
centroids = [
meanx + random_state.randn(n_clusters, nnumattrs) * stdx,
centroids
]
if verbose:
print("Init: initializing clusters")
membship = np.zeros((n_clusters, n_points), dtype=np.uint8)
# Keep track of the sum of attribute values per cluster so that we
# can do k-means on the numerical attributes.
cl_attr_sum = np.zeros((n_clusters, nnumattrs), dtype=np.float64)
# Same for the membership sum per cluster
cl_memb_sum = np.zeros(n_clusters, dtype=int)
# cl_attr_freq is a list of lists with dictionaries that contain
# the frequencies of values per cluster and attribute.
cl_attr_freq = [[defaultdict(int) for _ in range(ncatattrs)]
for _ in range(n_clusters)]
for ipoint in range(n_points):
# Initial assignment to clusters
clust = np.argmin(
num_dissim(centroids[0], Xnum[ipoint]) + gamma *
cat_dissim(centroids[1], Xcat[ipoint], X=Xcat, membship=membship)
)
membship[clust, ipoint] = 1
cl_memb_sum[clust] += 1
# Count attribute values per cluster.
for iattr, curattr in enumerate(Xnum[ipoint]):
cl_attr_sum[clust, iattr] += curattr
for iattr, curattr in enumerate(Xcat[ipoint]):
cl_attr_freq[clust][iattr][curattr] += 1
# If no empty clusters, then consider initialization finalized.
if membship.sum(axis=1).min() > 0:
break
if init_tries == MAX_INIT_TRIES:
# Could not get rid of empty clusters. Randomly
# initialize instead.
init = 'random'
elif init_tries == RAISE_INIT_TRIES:
raise ValueError(
"Clustering algorithm could not initialize. "
"Consider assigning the initial clusters manually."
)
# Perform an initial centroid update.
for ik in range(n_clusters):
for iattr in range(nnumattrs):
centroids[0][ik, iattr] = cl_attr_sum[ik, iattr] / cl_memb_sum[ik]
for iattr in range(ncatattrs):
centroids[1][ik, iattr] = get_max_value_key(cl_attr_freq[ik][iattr])
# _____ ITERATION _____
if verbose:
print("Starting iterations...")
itr = 0
labels = None
converged = False
cost = np.Inf
while itr <= max_iter and not converged:
itr += 1
centroids, moves = _k_prototypes_iter(Xnum, Xcat, centroids,
cl_attr_sum, cl_memb_sum, cl_attr_freq,
membship, num_dissim, cat_dissim, gamma,
random_state)
# All points seen in this iteration
labels, ncost = _labels_cost(Xnum, Xcat, centroids,
num_dissim, cat_dissim, gamma, membship)
converged = (moves == 0) or (ncost >= cost)
cost = ncost
if verbose:
print("Run: {}, iteration: {}/{}, moves: {}, ncost: {}"
.format(init_no + 1, itr, max_iter, moves, ncost))
return centroids, labels, cost, itr
def k_prototypes(X, categorical, n_clusters, max_iter, num_dissim, cat_dissim,
gamma, init, n_init, verbose, random_state, n_jobs):
"""k-prototypes algorithm"""
random_state = check_random_state(random_state)
if sparse.issparse(X):
raise TypeError("k-prototypes does not support sparse data.")
# Convert pandas objects to numpy arrays.
if 'pandas' in str(X.__class__):
X = X.values
if categorical is None or not categorical:
raise NotImplementedError(
"No categorical data selected, effectively doing k-means. "
"Present a list of categorical columns, or use scikit-learn's "
"KMeans instead."
)
if isinstance(categorical, int):
categorical = [categorical]
assert len(categorical) != X.shape[1], \
"All columns are categorical, use k-modes instead of k-prototypes."
assert max(categorical) < X.shape[1], \
"Categorical index larger than number of columns."
ncatattrs = len(categorical)
nnumattrs = X.shape[1] - ncatattrs
n_points = X.shape[0]
assert n_clusters <= n_points, "Cannot have more clusters ({}) " \
"than data points ({}).".format(n_clusters, n_points)
Xnum, Xcat = _split_num_cat(X, categorical)
Xnum, Xcat = check_array(Xnum), check_array(Xcat, dtype=None)
# Convert the categorical values in Xcat to integers for speed.
# Based on the unique values in Xcat, we can make a mapping to achieve this.
Xcat, enc_map = encode_features(Xcat)
# Are there more n_clusters than unique rows? Then set the unique
# rows as initial values and skip iteration.
unique = get_unique_rows(X)
n_unique = unique.shape[0]
if n_unique <= n_clusters:
max_iter = 0
n_init = 1
n_clusters = n_unique
init = list(_split_num_cat(unique, categorical))
init[1], _ = encode_features(init[1], enc_map)
# Estimate a good value for gamma, which determines the weighing of
# categorical values in clusters (see Huang [1997]).
if gamma is None:
gamma = 0.5 * Xnum.std()
results = []
seeds = random_state.randint(np.iinfo(np.int32).max, size=n_init)
if n_jobs == 1:
for init_no in range(n_init):
results.append(k_prototypes_single(Xnum, Xcat, nnumattrs, ncatattrs,
n_clusters, n_points, max_iter,
num_dissim, cat_dissim, gamma,
init, init_no, verbose, seeds[init_no]))
else:
results = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(k_prototypes_single)(Xnum, Xcat, nnumattrs, ncatattrs,
n_clusters, n_points, max_iter,
num_dissim, cat_dissim, gamma,
init, init_no, verbose, seed)
for init_no, seed in enumerate(seeds))
all_centroids, all_labels, all_costs, all_n_iters = zip(*results)
best = np.argmin(all_costs)
if n_init > 1 and verbose:
print("Best run was number {}".format(best + 1))
# Note: return gamma in case it was automatically determined.
return all_centroids[best], enc_map, all_labels[best], \
all_costs[best], all_n_iters[best], gamma
class KPrototypes(kmodes.KModes):
"""k-protoypes clustering algorithm for mixed numerical/categorical data.
Parameters
-----------
n_clusters : int, optional, default: 8
The number of clusters to form as well as the number of
centroids to generate.
max_iter : int, default: 300
Maximum number of iterations of the k-modes algorithm for a
single run.
num_dissim : func, default: euclidian_dissim
Dissimilarity function used by the algorithm for numerical variables.
|
Defaults to the matching dissimilarity function.
n_init : int, default: 10
Number of time the k-modes algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of cost.
init : {'Huang', 'Cao', 'random' or a list of ndarrays}, default: 'Cao'
Method for initialization:
'Huang': Method in Huang [1997, 1998]
'Cao': Method in Cao et al. [2009]
'random': choose 'n_clusters' observations (rows) at random from
data for the initial centroids.
If a list of ndarrays is passed, it should be of length 2, with
shapes (n_clusters, n_features) for numerical and categorical
data respectively. These are the initial centroids.
gamma : float, default: None
Weighing factor that determines relative importance of numerical vs.
categorical attributes (see discussion in Huang [1997]). By default,
automatically calculated from data.
verbose : integer, optional
Verbosity mode.
random_state : int, RandomState instance or None, optional, default: None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
n_jobs : int, default: 1
The number of jobs to use for the computation. This works by computing
each of the n_init runs in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
Attributes
----------
cluster_centroids_ : array, [n_clusters, n_features]
Categories of cluster centroids
labels_ :
Labels of each point
cost_ : float
Clustering cost, defined as the sum distance of all points to
their respective cluster centroids.
n_iter_ : int
The number of iterations the algorithm ran for.
gamma : float
The (potentially calculated) weighing factor.
Notes
-----
See:
Huang, Z.: Extensions to the k-modes algorithm for clustering large
data sets with categorical values, Data Mining and Knowledge
Discovery 2(3), 1998.
"""
def __init__(self, n_clusters=8, max_iter=100, num_dissim=euclidean_dissim,
cat_dissim=matching_dissim, init='Huang', n_init=10, gamma=None,
verbose=0, random_state=None, n_jobs=1):
super(KPrototypes, self).__init__(n_clusters, max_iter, cat_dissim,
init, n_init, verbose, random_state,
n_jobs)
self.num_dissim = num_dissim
self.gamma = gamma
def fit(self, X, y=None, categorical=None):
"""Compute k-prototypes clustering.
Parameters
----------
X : array-like, shape=[n_samples, n_features]
categorical : Index of columns that contain categorical data
"""
random_state = check_random_state(self.random_state)
# If self.gamma is None, gamma will be automatically determined from
# the data. The function below returns its value.
self._enc_cluster_centroids, self._enc_map, self.labels_, self.cost_,\
self.n_iter_, self.gamma = k_prototypes(X,
categorical,
self.n_clusters,
self.max_iter,
self.num_dissim,
self.cat_dissim,
self.gamma,
self.init,
self.n_init,
self.verbose,
random_state,
self.n_jobs)
return self
def predict(self, X, categorical=None):
"""Predict the closest cluster each sample in X belongs to.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
New data to predict.
categorical : Index of columns that contain categorical data
Returns
-------
labels : array, shape [n_samples,]
Index of the cluster each sample belongs to.
"""
assert hasattr(self, '_enc_cluster_centroids'), "Model not yet fitted."
Xnum, Xcat = _split_num_cat(X, categorical)
Xnum, Xcat = check_array(Xnum), check_array(Xcat, dtype=None)
Xcat, _ = encode_features(Xcat, enc_map=self._enc_map)
return _labels_cost(Xnum, Xcat, self._enc_cluster_centroids,
self.num_dissim, self.cat_dissim, self.gamma)[0]
@property
def cluster_centroids_(self):
if hasattr(self, '_enc_cluster_centroids'):
return [
self._enc_cluster_centroids[0],
decode_centroids(self._enc_cluster_centroids[1], self._enc_map)
]
else:
raise AttributeError("'{}' object has no attribute 'cluster_centroids_' "
"because the model is not yet fitted.")
|
Defaults to the Euclidian dissimilarity function.
cat_dissim : func, default: matching_dissim
Dissimilarity function used by the kmodes algorithm for categorical variables.
|
coordinator.rs
|
// Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::{
chunk_request::{GetChunkRequest, TargetType},
chunk_response::{GetChunkResponse, ResponseLedgerInfo},
counters,
executor_proxy::ExecutorProxyTrait,
network::{StateSynchronizerEvents, StateSynchronizerMsg, StateSynchronizerSender},
peer_manager::{PeerManager, PeerScoreUpdateType},
PeerId, SynchronizerState,
};
use anyhow::{bail, ensure, format_err, Result};
use futures::{
channel::{mpsc, oneshot},
stream::{futures_unordered::FuturesUnordered, select_all},
StreamExt,
};
use libra_config::config::{RoleType, StateSyncConfig};
use libra_logger::prelude::*;
use libra_mempool::{CommitNotification, CommitResponse, CommittedTransaction};
use libra_types::{
contract_event::ContractEvent,
crypto_proxies::{LedgerInfoWithSignatures, ValidatorChangeProof},
event_subscription::EventSubscription,
transaction::{Transaction, TransactionListWithProof, Version},
validator_change::VerifierType,
waypoint::Waypoint,
};
use network::protocols::network::Event;
use std::{
collections::HashMap,
time::{Duration, SystemTime, UNIX_EPOCH},
};
use tokio::time::{interval, timeout};
pub(crate) struct SyncRequest {
// The Result value returned to the caller is Error in case the StateSynchronizer failed to
// reach the target (the LI in the storage remains unchanged as if nothing happened).
pub callback: oneshot::Sender<Result<()>>,
pub target: LedgerInfoWithSignatures,
}
pub(crate) struct EpochRetrievalRequest {
pub start_epoch: u64,
pub end_epoch: u64,
pub callback: oneshot::Sender<Result<ValidatorChangeProof>>,
}
/// message used by StateSyncClient for communication with Coordinator
pub(crate) enum CoordinatorMessage {
// used to initiate new sync
Request(SyncRequest),
// used to notify about new txn commit
Commit(
// committed transactions
Vec<Transaction>,
// reconfiguration events
Vec<ContractEvent>,
// callback for recipient to send response back to this sender
oneshot::Sender<Result<CommitResponse>>,
),
GetState(oneshot::Sender<SynchronizerState>),
// used to generate epoch proof
GetEpochProof(EpochRetrievalRequest),
// Receive a notification via a given channel when coordinator is initialized.
WaitInitialize(oneshot::Sender<Result<()>>),
}
#[derive(Clone, Debug, PartialEq, Eq)]
struct PendingRequestInfo {
expiration_time: SystemTime,
known_version: u64,
request_epoch: u64,
limit: u64,
}
/// Coordination of synchronization process is driven by SyncCoordinator, which `start()` function
/// runs an infinite event loop and triggers actions based on external / internal requests.
/// The coordinator can work in two modes:
/// * FullNode: infinite stream of ChunkRequests is sent to the predefined static peers
/// (the parent is going to reply with a ChunkResponse if its committed version becomes
/// higher within the timeout interval).
/// * Validator: the ChunkRequests are generated on demand for a specific target LedgerInfo to
/// synchronize to.
pub(crate) struct SyncCoordinator<T> {
// used to process client requests
client_events: mpsc::UnboundedReceiver<CoordinatorMessage>,
// used to send messages (e.g. notifications about newly committed txns) to mempool
state_sync_to_mempool_sender: mpsc::Sender<CommitNotification>,
// Current state of the storage, which includes both the latest committed transaction and the
// latest transaction covered by the LedgerInfo (see `SynchronizerState` documentation).
// The state is updated via syncing with the local storage.
local_state: SynchronizerState,
// duration with the same version before the next attempt to get the next chunk
retry_timeout: Duration,
// config
config: StateSyncConfig,
// role of node
role: RoleType,
// An initial waypoint: for as long as the local version is less than a version determined by
// waypoint a node is not going to be abl
waypoint: Option<Waypoint>,
// peers used for synchronization
peer_manager: PeerManager,
// Optional sync request to be called when the target sync is reached
sync_request: Option<SyncRequest>,
// Option initialization listener to be called when the coordinator is caught up with
// its waypoint.
initialization_listener: Option<oneshot::Sender<Result<()>>>,
// queue of incoming long polling requests
// peer will be notified about new chunk of transactions if it's available before expiry time
subscriptions: HashMap<PeerId, PendingRequestInfo>,
reconfig_event_subscriptions: Vec<Box<dyn EventSubscription>>,
executor_proxy: T,
}
impl<T: ExecutorProxyTrait> SyncCoordinator<T> {
pub fn new(
client_events: mpsc::UnboundedReceiver<CoordinatorMessage>,
state_sync_to_mempool_sender: mpsc::Sender<CommitNotification>,
role: RoleType,
waypoint: Option<Waypoint>,
config: StateSyncConfig,
executor_proxy: T,
initial_state: SynchronizerState,
reconfig_event_subscriptions: Vec<Box<dyn EventSubscription>>,
) -> Self {
let upstream_peers = config.upstream_peers.upstream_peers.clone();
let retry_timeout_val = match role {
RoleType::FullNode => config.tick_interval_ms + config.long_poll_timeout_ms,
RoleType::Validator => 2 * config.tick_interval_ms,
};
Self {
client_events,
state_sync_to_mempool_sender,
local_state: initial_state,
retry_timeout: Duration::from_millis(retry_timeout_val),
config,
role,
waypoint,
peer_manager: PeerManager::new(upstream_peers),
subscriptions: HashMap::new(),
sync_request: None,
initialization_listener: None,
reconfig_event_subscriptions,
executor_proxy,
}
}
/// main routine. starts sync coordinator that listens for CoordinatorMsg
pub async fn
|
(mut self, network: Vec<(StateSynchronizerSender, StateSynchronizerEvents)>) {
let mut interval = interval(Duration::from_millis(self.config.tick_interval_ms)).fuse();
let network_senders: Vec<StateSynchronizerSender> =
network.iter().map(|t| t.0.clone()).collect();
let events: Vec<_> = network
.into_iter()
.enumerate()
.map(|(idx, t)| t.1.map(move |e| (idx, e)))
.collect();
let mut network_events = select_all(events).fuse();
loop {
::futures::select! {
msg = self.client_events.select_next_some() => {
match msg {
CoordinatorMessage::Request(request) => {
if let Err(e) = self.request_sync(request).await {
error!("[state sync] request sync fail: {}", e);
}
}
CoordinatorMessage::Commit(txns, events, callback) => {
if let Err(e) = self.process_commit(txns, Some(callback)).await {
error!("[state sync] process commit fail: {}", e);
}
// TODO add per-subscription filter logic
for event in events {
for subscription in self.reconfig_event_subscriptions.iter_mut() {
subscription.publish(event.clone());
}
}
}
CoordinatorMessage::GetState(callback) => {
self.get_state(callback);
}
CoordinatorMessage::GetEpochProof(request) => {
self.get_epoch_proof(request).await;
}
CoordinatorMessage::WaitInitialize(cb_sender) => {
self.set_initialization_listener(cb_sender);
}
};
},
(idx, network_event) = network_events.select_next_some() => {
match network_event {
Ok(event) => {
match event {
Event::NewPeer(peer_id) => {
debug!("[state sync] new peer {}", peer_id);
self.peer_manager.enable_peer(peer_id, network_senders[idx].clone());
self.check_progress().await;
}
Event::LostPeer(peer_id) => {
debug!("[state sync] lost peer {}", peer_id);
self.peer_manager.disable_peer(&peer_id);
}
Event::Message((peer_id, mut message)) => self.process_one_message(peer_id, message).await,
_ => warn!("[state sync] unexpected event: {:?}", event),
}
},
Err(err) => { error!("[state sync] network error {}", err); },
}
},
_ = interval.select_next_some() => {
self.check_progress().await;
}
}
}
}
async fn process_one_message(&mut self, peer_id: PeerId, msg: StateSynchronizerMsg) {
match msg {
StateSynchronizerMsg::GetChunkRequest(request) => {
if let Err(err) = self.process_chunk_request(peer_id, *request).await {
error!("[state sync] failed to serve chunk request from {}, local LI version {}: {}", peer_id, self.local_state.highest_local_li.ledger_info().version(), err);
}
}
StateSynchronizerMsg::GetChunkResponse(response) => {
if let Err(err) = self.process_chunk_response(&peer_id, *response).await {
error!(
"[state sync] failed to process chunk response from {}: {}",
peer_id, err
);
counters::APPLY_CHUNK_FAILURE
.with_label_values(&[&*peer_id.to_string()])
.inc();
} else {
self.peer_manager
.update_score(&peer_id, PeerScoreUpdateType::Success);
counters::APPLY_CHUNK_SUCCESS
.with_label_values(&[&*peer_id.to_string()])
.inc();
}
}
}
}
/// Sync up coordinator state with the local storage.
async fn sync_state_with_local_storage(&mut self) -> Result<()> {
let new_state = self.executor_proxy.get_local_storage_state().await?;
if new_state.epoch() > self.local_state.epoch() {
debug!(
"[state sync] Trusted epoch moved from {} to {}",
self.local_state.epoch(),
new_state.epoch()
);
}
self.local_state = new_state;
Ok(())
}
/// In case waypoint is set verify that the local LI has reached the waypoint version.
fn is_initialized(&self) -> bool {
self.waypoint.as_ref().map_or(true, |w| {
w.version() <= self.local_state.highest_local_li.ledger_info().version()
})
}
fn set_initialization_listener(&mut self, cb_sender: oneshot::Sender<Result<()>>) {
if self.is_initialized() {
if cb_sender.send(Ok(())).is_err() {
error!("Error sending initialization notification");
}
} else {
self.initialization_listener = Some(cb_sender);
}
}
/// In case there has been another pending request it's going to be overridden.
/// The caller will be notified about request completion via request.callback oneshot:
/// at that moment it's guaranteed that the highest LI exposed by the storage is equal to the
/// target LI.
/// StateSynchronizer assumes that it's the only one modifying the storage (consensus is not
/// trying to commit transactions concurrently).
async fn request_sync(&mut self, request: SyncRequest) -> Result<()> {
self.sync_state_with_local_storage().await?;
ensure!(
self.is_initialized(),
"[state sync] Sync request but initialization is not complete!"
);
let highest_local_li = self.local_state.highest_local_li.ledger_info();
let target_version = request.target.ledger_info().version();
if target_version == highest_local_li.version() {
return request
.callback
.send(Ok(()))
.map_err(|_| format_err!("Callback error"));
}
if target_version < highest_local_li.version() {
request
.callback
.send(Err(format_err!("Sync request to old version")))
.map_err(|_| format_err!("Callback error"))?;
bail!(
"[state sync] Sync request for version {} < known version {}",
target_version,
highest_local_li.version()
);
}
counters::TARGET_VERSION.set(target_version as i64);
debug!(
"[state sync] sync requested. Known LI: {}, requested_version: {}",
highest_local_li, target_version
);
self.peer_manager
.set_peers(request.target.signatures().keys().copied().collect());
self.sync_request = Some(request);
self.send_chunk_request(
self.local_state.highest_version_in_local_storage(),
self.local_state.epoch(),
)
.await
}
/// The function is called after new txns have been applied to the local storage.
/// As a result it might:
/// 1) help remote subscribers with long poll requests, 2) finish local sync request
async fn process_commit(
&mut self,
transactions: Vec<Transaction>,
commit_callback: Option<oneshot::Sender<Result<CommitResponse>>>,
) -> Result<()> {
// We choose to re-sync the state with the storage as it's the simplest approach:
// in case the performance implications of re-syncing upon every commit are high,
// it's possible to manage some of the highest known versions in memory.
self.sync_state_with_local_storage().await?;
let local_version = self.local_state.highest_version_in_local_storage();
counters::COMMITTED_VERSION.set(local_version as i64);
let block_timestamp_usecs = self
.local_state
.highest_local_li
.ledger_info()
.timestamp_usecs();
// send notif to shared mempool
// filter for user transactions here
let mut committed_user_txns = vec![];
for txn in transactions {
if let Transaction::UserTransaction(signed_txn) = txn {
committed_user_txns.push(CommittedTransaction {
sender: signed_txn.sender(),
sequence_number: signed_txn.sequence_number(),
});
}
}
let (callback, callback_rcv) = oneshot::channel();
let req = CommitNotification {
transactions: committed_user_txns,
block_timestamp_usecs,
callback,
};
let mut mempool_channel = self.state_sync_to_mempool_sender.clone();
let mut msg = "";
if let Err(e) = mempool_channel.try_send(req) {
error!(
"[state sync] failed to send commit notif to shared mempool: {:?}",
e
);
msg = "state sync failed to send commit notif to shared mempool";
}
if let Err(e) = timeout(Duration::from_secs(1), callback_rcv).await {
error!(
"[state sync] did not receive ACK for commit notification sent to mempool: {:?}",
e
);
msg = "state sync did not receive ACK for commit notification sent to mempool";
}
if let Some(cb) = commit_callback {
// send back ACK to consensus
if let Err(e) = cb.send(Ok(CommitResponse {
msg: msg.to_string(),
})) {
error!(
"[state sync] failed to send commit ACK back to consensus: {:?}",
e
);
}
}
self.check_subscriptions().await;
self.peer_manager.remove_requests(local_version);
let sync_request_complete = self.sync_request.as_ref().map_or(false, |sync_req| {
// Each `ChunkResponse` is verified to make sure it never goes beyond the requested
// target version, hence, the local version should never go beyond sync req target.
assert!(local_version <= sync_req.target.ledger_info().version());
sync_req.target.ledger_info().version() == local_version
});
if sync_request_complete {
debug!(
"[state sync] synchronization to {} is finished",
local_version
);
if let Some(sync_request) = self.sync_request.take() {
sync_request
.callback
.send(Ok(()))
.map_err(|_| format_err!("Callback error"))?;
}
}
let initialization_complete = self
.initialization_listener
.as_ref()
.map_or(false, |_| self.is_initialized());
if initialization_complete {
debug!(
"[state sync] Finished initialization to waypoint. Current version: {}",
self.local_state.highest_local_li.ledger_info().version()
);
if let Some(listener) = self.initialization_listener.take() {
listener
.send(Ok(()))
.map_err(|_| format_err!("Error sending initialization notification"))?;
}
}
Ok(())
}
fn get_state(&self, callback: oneshot::Sender<SynchronizerState>) {
if callback.send(self.local_state.clone()).is_err() {
error!("[state sync] failed to send internal state");
}
}
/// There are two types of ChunkRequests:
/// 1) Validator chunk requests are for a specific target LI and don't ask for long polling.
/// 2) FullNode chunk requests don't specify a target LI and can allow long polling.
async fn process_chunk_request(
&mut self,
peer_id: PeerId,
request: GetChunkRequest,
) -> Result<()> {
self.sync_state_with_local_storage().await?;
debug!(
"[state sync] chunk request: peer_id: {}, local li version: {}, req: {}",
peer_id.short_str(),
self.local_state.highest_local_li.ledger_info().version(),
request,
);
let sender = self
.peer_manager
.get_network_sender(&peer_id)
.ok_or_else(|| format_err!("ChunkRequest from unknown peer {}", peer_id.short_str()))?;
match request.target().clone() {
TargetType::TargetLedgerInfo(li) => {
self.process_request_target_li(sender, peer_id, request, li)
.await
}
TargetType::HighestAvailable { timeout_ms } => {
self.process_request_highest_available(sender, peer_id, request, timeout_ms)
.await
}
TargetType::Waypoint(waypoint_version) => {
self.process_request_waypoint(sender, peer_id, request, waypoint_version)
.await
}
}
}
/// Processing requests with a specified target LedgerInfo.
/// Assumes that the local state is uptodate with storage.
async fn process_request_target_li(
&self,
sender: StateSynchronizerSender,
peer_id: PeerId,
request: GetChunkRequest,
target_li: LedgerInfoWithSignatures,
) -> Result<()> {
let limit = std::cmp::min(request.limit, self.config.max_chunk_limit);
let response_li = self
.choose_response_li(
request.known_version,
request.current_epoch,
Some(target_li),
)
.await?;
// In case known_version is lower than the requested ledger info an empty response might be
// sent.
self.deliver_chunk(
peer_id,
request.known_version,
ResponseLedgerInfo::VerifiableLedgerInfo(response_li),
limit,
sender,
)
.await
}
/// Processing requests with no target LedgerInfo (highest available) and potentially long
/// polling.
/// Assumes that the local state is uptodate with storage.
async fn process_request_highest_available(
&mut self,
sender: StateSynchronizerSender,
peer_id: PeerId,
request: GetChunkRequest,
timeout_ms: u64,
) -> Result<()> {
let limit = std::cmp::min(request.limit, self.config.max_chunk_limit);
let timeout = std::cmp::min(timeout_ms, self.config.max_timeout_ms);
let response_li = self
.choose_response_li(request.known_version, request.current_epoch, None)
.await?;
// If there is nothing a node can help with, and the request supports long polling,
// add it to the subscriptions.
if self.local_state.highest_local_li.ledger_info().version() <= request.known_version
&& timeout > 0
{
let expiration_time = SystemTime::now().checked_add(Duration::from_millis(timeout));
if let Some(time) = expiration_time {
let request_info = PendingRequestInfo {
expiration_time: time,
known_version: request.known_version,
request_epoch: request.current_epoch,
limit,
};
self.subscriptions.insert(peer_id, request_info);
}
return Ok(());
}
self.deliver_chunk(
peer_id,
request.known_version,
ResponseLedgerInfo::VerifiableLedgerInfo(response_li),
limit,
sender,
)
.await
}
async fn process_request_waypoint(
&self,
sender: StateSynchronizerSender,
peer_id: PeerId,
request: GetChunkRequest,
waypoint_version: Version,
) -> Result<()> {
let mut limit = std::cmp::min(request.limit, self.config.max_chunk_limit);
ensure!(
self.local_state.highest_local_li.ledger_info().version() >= waypoint_version,
"Local version {} < requested waypoint version {}.",
self.local_state.highest_local_li.ledger_info().version(),
waypoint_version
);
ensure!(
request.known_version < waypoint_version,
"Waypoint request version {} is not smaller than waypoint {}",
request.known_version,
waypoint_version
);
// Retrieve the waypoint LI.
let waypoint_li = self
.executor_proxy
.get_ledger_info(waypoint_version)
.await?;
// Txns are up to the end of request epoch with the proofs relative to the waypoint LI.
let end_of_epoch_li = if waypoint_li.ledger_info().epoch() > request.current_epoch {
Some(
self.executor_proxy
.get_epoch_proof(request.current_epoch, request.current_epoch + 1)
.await?
.ledger_info_with_sigs
.first()
.ok_or_else(|| {
format_err!(
"No end of epoch LedgerInfo found for epoch {}",
request.current_epoch
)
})?
.clone(),
)
} else {
None
};
if let Some(li) = end_of_epoch_li.as_ref() {
let num_txns_until_end_of_epoch = li.ledger_info().version() - request.known_version;
limit = std::cmp::min(limit, num_txns_until_end_of_epoch);
}
self.deliver_chunk(
peer_id,
request.known_version,
ResponseLedgerInfo::LedgerInfoForWaypoint {
waypoint_li,
end_of_epoch_li,
},
limit,
sender,
)
.await
}
/// Generate and send the ChunkResponse to the given peer.
/// The chunk response contains transactions from the local storage with the proofs relative to
/// the given target ledger info.
/// In case target is None, the ledger info is set to the local highest ledger info.
async fn deliver_chunk(
&self,
peer_id: PeerId,
known_version: u64,
response_li: ResponseLedgerInfo,
limit: u64,
mut network_sender: StateSynchronizerSender,
) -> Result<()> {
let txns = self
.executor_proxy
.get_chunk(known_version, limit, response_li.version())
.await?;
let chunk_response = GetChunkResponse::new(response_li, txns);
let msg = StateSynchronizerMsg::GetChunkResponse(Box::new(chunk_response));
if network_sender.send_to(peer_id, msg).is_err() {
error!("[state sync] failed to send p2p message");
}
Ok(())
}
/// The choice of the LedgerInfo in the response follows the following logic:
/// * response LI is either the requested target or the highest local LI if target is None.
/// * if the response LI would not belong to `request_epoch`, change
/// the response LI to the LI that is terminating `request_epoch`.
async fn choose_response_li(
&self,
known_version: u64,
request_epoch: u64,
target: Option<LedgerInfoWithSignatures>,
) -> Result<LedgerInfoWithSignatures> {
let mut target_li = target.unwrap_or_else(|| self.local_state.highest_local_li.clone());
if target_li.ledger_info().epoch() > request_epoch {
let end_of_epoch_li = self
.executor_proxy
.get_epoch_proof(request_epoch, request_epoch + 1)
.await?
.ledger_info_with_sigs
.first()
.ok_or_else(|| {
format_err!(
"[state sync] Fail to retrieve end of epoch LI for epoch {}",
request_epoch
)
})?
.clone();
debug!("[state sync] Chunk response for known_version = {} is limited to the last txn of epoch {} at version {}", known_version, request_epoch, end_of_epoch_li.ledger_info().version());
target_li = end_of_epoch_li;
}
Ok(target_li)
}
/// * Issue a request for the next chunk.
/// * Validate and execute the transactions.
/// * Notify the clients in case a sync request has been completed.
async fn process_chunk_response(
&mut self,
peer_id: &PeerId,
response: GetChunkResponse,
) -> Result<()> {
counters::RESPONSES_RECEIVED
.with_label_values(&[&*peer_id.to_string()])
.inc();
debug!("[state sync] Processing chunk response {}", response);
let txn_list_with_proof = response.txn_list_with_proof.clone();
let known_version = self.local_state.highest_version_in_local_storage();
let chunk_start_version =
txn_list_with_proof
.first_transaction_version
.ok_or_else(|| {
self.peer_manager
.update_score(&peer_id, PeerScoreUpdateType::EmptyChunk);
format_err!("[state sync] Empty chunk from {}", peer_id.short_str())
})?;
if chunk_start_version != known_version + 1 {
// Old / wrong chunk.
self.peer_manager
.update_score(&peer_id, PeerScoreUpdateType::ChunkVersionCannotBeApplied);
bail!(
"[state sync] Non sequential chunk from {}: known_version: {}, received: {}",
peer_id.short_str(),
known_version,
chunk_start_version
);
}
let chunk_size = txn_list_with_proof.len() as u64;
let new_version = known_version + chunk_size;
match response.response_li {
ResponseLedgerInfo::VerifiableLedgerInfo(li) => {
self.process_response_with_verifiable_li(txn_list_with_proof, li)
.await
}
ResponseLedgerInfo::LedgerInfoForWaypoint {
waypoint_li,
end_of_epoch_li,
} => {
self.process_response_with_waypoint_li(
txn_list_with_proof,
waypoint_li,
end_of_epoch_li,
)
.await
}
}
.map_err(|e| {
self.peer_manager
.update_score(peer_id, PeerScoreUpdateType::InvalidChunk);
format_err!("[state sync] failed to apply chunk: {}", e)
})?;
counters::STATE_SYNC_TXN_REPLAYED.inc_by(chunk_size as i64);
debug!(
"[state sync] applied chunk. Previous version: {}, new version: {}, chunk size: {}",
known_version, new_version, chunk_size
);
// The overall chunk processing duration is calculated starting from the very first attempt
// until the commit
if let Some(first_attempt_tst) = self.peer_manager.get_first_request_time(known_version + 1)
{
if let Ok(duration) = SystemTime::now().duration_since(first_attempt_tst) {
counters::SYNC_PROGRESS_DURATION.observe_duration(duration);
}
}
self.process_commit(response.txn_list_with_proof.transactions, None)
.await
}
/// Processing chunk responses that carry a LedgerInfo that should be verified using the
/// current local trusted validator set.
async fn process_response_with_verifiable_li(
&mut self,
txn_list_with_proof: TransactionListWithProof,
response_li: LedgerInfoWithSignatures,
) -> Result<()> {
ensure!(
self.is_initialized(),
"Response with a non-waypoint LI while still not initialized"
);
if let Some(sync_req) = self.sync_request.as_ref() {
// Valid responses should not exceed the LI version of the request.
if sync_req.target.ledger_info().version() < response_li.ledger_info().version() {
bail!(
"[state sync] Response has an LI version {} higher than requested version {}.",
response_li.ledger_info().version(),
sync_req.target.ledger_info().version(),
);
}
}
// Optimistically fetch the next chunk assuming the current chunk is going to be applied
// successfully.
let new_version =
self.local_state.highest_version_in_local_storage() + txn_list_with_proof.len() as u64;
let new_epoch = match response_li.ledger_info().next_validator_set() {
// This LI carries the validator set for the next epoch.
Some(_) => response_li.ledger_info().epoch() + 1,
None => response_li.ledger_info().epoch(),
};
self.send_chunk_request(new_version, new_epoch).await?;
let verifier = VerifierType::TrustedVerifier(self.local_state.trusted_epoch.clone());
verifier.verify(&response_li)?;
self.validate_and_store_chunk(txn_list_with_proof, response_li, None)
.await
}
/// Processing chunk responses that carry a LedgerInfo corresponding to the waypoint.
async fn process_response_with_waypoint_li(
&mut self,
txn_list_with_proof: TransactionListWithProof,
waypoint_li: LedgerInfoWithSignatures,
end_of_epoch_li: Option<LedgerInfoWithSignatures>,
) -> Result<()> {
ensure!(
!self.is_initialized(),
"Response with a waypoint LI but we're already initialized"
);
// Optimistically fetch the next chunk. The next chunk belongs to the next epoch if
// end_of_epoch_li is present.
let new_version =
self.local_state.highest_version_in_local_storage() + txn_list_with_proof.len() as u64;
let new_epoch = end_of_epoch_li
.as_ref()
.map_or(self.local_state.epoch(), |li| li.ledger_info().epoch() + 1);
if new_version < self.waypoint.as_ref().map_or(0, |w| w.version()) {
self.send_chunk_request(new_version, new_epoch).await?;
}
self.waypoint
.as_ref()
.ok_or_else(|| {
format_err!("No waypoint found to process a response with a waypoint LI")
})
.and_then(|w| w.verify(waypoint_li.ledger_info()))?;
self.validate_and_store_chunk(txn_list_with_proof, waypoint_li, end_of_epoch_li)
.await
}
// Assumes that the target LI has been already verified by the caller.
async fn validate_and_store_chunk(
&mut self,
txn_list_with_proof: TransactionListWithProof,
target: LedgerInfoWithSignatures,
intermediate_end_of_epoch_li: Option<LedgerInfoWithSignatures>,
) -> Result<()> {
let target_epoch_and_round = (target.ledger_info().epoch(), target.ledger_info().round());
let local_epoch_and_round = (
self.local_state.highest_local_li.ledger_info().epoch(),
self.local_state.highest_local_li.ledger_info().round(),
);
if target_epoch_and_round < local_epoch_and_round {
warn!(
"Ledger info is too old: local epoch/round: {:?}, epoch/round in request: {:?}.",
local_epoch_and_round, target_epoch_and_round,
);
return Ok(());
}
self.executor_proxy
.execute_chunk(
txn_list_with_proof,
target,
intermediate_end_of_epoch_li,
&mut self.local_state.synced_trees,
&mut self.reconfig_event_subscriptions,
)
.await?;
Ok(())
}
/// Ensures that StateSynchronizer is making progress:
/// issue a new request if too much time passed since requesting highest_committed_version + 1.
async fn check_progress(&mut self) {
if self.peer_manager.is_empty() {
return;
}
if self.role == RoleType::Validator && self.sync_request.is_none() && self.is_initialized()
{
return;
}
let known_version = self.local_state.highest_version_in_local_storage();
let last_request_tst = self
.peer_manager
.get_last_request_time(known_version + 1)
.unwrap_or(UNIX_EPOCH);
// if coordinator didn't make progress by expected time, issue new request
if let Some(tst) = last_request_tst.checked_add(self.retry_timeout) {
if SystemTime::now().duration_since(tst).is_ok() {
self.peer_manager
.process_timeout(known_version + 1, self.role == RoleType::Validator);
if let Err(e) = self
.send_chunk_request(known_version, self.local_state.epoch())
.await
{
error!("[state sync] Failed to send chunk request: {}", e);
}
counters::TIMEOUT.inc();
}
}
}
/// Sends a chunk request with a given `known_version` and `known_epoch`
/// (might be chosen optimistically).
/// The request includes a target for Validator and a non-zero timeout for a FullNode.
async fn send_chunk_request(&mut self, known_version: u64, known_epoch: u64) -> Result<()> {
let (peer_id, mut sender) = self
.peer_manager
.pick_peer()
.ok_or_else(|| format_err!("No peers found for chunk request."))?;
let target = if !self.is_initialized() {
let waypoint_version =
self.waypoint.as_ref().map(|w| w.version()).ok_or_else(|| {
format_err!("No waypoint found but coordinator is not initialized.")
})?;
TargetType::Waypoint(waypoint_version)
} else {
match self.sync_request.as_ref() {
None => TargetType::HighestAvailable {
timeout_ms: self.config.long_poll_timeout_ms,
},
Some(sync_req) => {
if sync_req.target.ledger_info().version() <= known_version {
debug!(
"[state sync] Reached version {}, no need to send more requests",
known_version
);
return Ok(());
}
TargetType::TargetLedgerInfo(sync_req.target.clone())
}
}
};
let req = GetChunkRequest::new(known_version, known_epoch, self.config.chunk_limit, target);
debug!(
"[state sync] request next chunk. peer_id: {}, chunk req: {}",
peer_id.short_str(),
req,
);
let msg = StateSynchronizerMsg::GetChunkRequest(Box::new(req));
self.peer_manager
.process_request(known_version + 1, peer_id);
sender.send_to(peer_id, msg)?;
counters::REQUESTS_SENT
.with_label_values(&[&*peer_id.to_string()])
.inc();
Ok(())
}
async fn deliver_subscription(
&self,
peer_id: PeerId,
sender: StateSynchronizerSender,
request_info: PendingRequestInfo,
) -> Result<()> {
let response_li = self
.choose_response_li(request_info.known_version, request_info.request_epoch, None)
.await?;
self.deliver_chunk(
peer_id,
request_info.known_version,
ResponseLedgerInfo::VerifiableLedgerInfo(response_li),
request_info.limit,
sender,
)
.await
}
/// The function is called after the local storage is updated with new transactions:
/// it might deliver chunks for the subscribers that have been waiting with the long polls.
///
/// Note that it is possible to help the subscribers only with the transactions that match
/// the highest ledger info in the local storage (some committed transactions are ahead of the
/// latest ledger info and are not going to be used for helping the remote subscribers).
/// The function assumes that the local state has been synced with storage.
async fn check_subscriptions(&mut self) {
let highest_li_version = self.local_state.highest_local_li.ledger_info().version();
let mut ready = vec![];
self.subscriptions.retain(|peer_id, request_info| {
// filter out expired peer requests
if SystemTime::now()
.duration_since(request_info.expiration_time.clone())
.is_ok()
{
return false;
}
if request_info.known_version < highest_li_version {
ready.push((*peer_id, request_info.clone()));
false
} else {
true
}
});
let mut futures = FuturesUnordered::new();
for (peer_id, request_info) in ready {
if let Some(sender) = self.peer_manager.get_network_sender(&peer_id) {
futures.push(self.deliver_subscription(peer_id, sender, request_info));
}
}
while let Some(res) = futures.next().await {
if let Err(err) = res {
error!("[state sync] failed to notify subscriber {}", err);
}
}
}
async fn get_epoch_proof(&self, request: EpochRetrievalRequest) {
if request
.callback
.send(
self.executor_proxy
.get_epoch_proof(request.start_epoch, request.end_epoch)
.await,
)
.is_err()
{
error!("[state sync] coordinator failed to send back epoch proof");
}
}
}
|
start
|
initable.rs
|
// Take a look at the license at the top of the repository in the LICENSE file.
use glib::object::Cast;
use glib::translate::*;
use glib::Error;
use glib::subclass::prelude::*;
use std::ptr;
use crate::Cancellable;
use crate::Initable;
pub trait InitableImpl: ObjectImpl {
fn init(&self, initable: &Self::Type, cancellable: Option<&Cancellable>) -> Result<(), Error>;
}
pub trait InitableImplExt: ObjectSubclass {
fn parent_init(
&self,
initable: &Self::Type,
cancellable: Option<&Cancellable>,
) -> Result<(), Error>;
}
impl<T: InitableImpl> InitableImplExt for T {
fn parent_init(
&self,
initable: &Self::Type,
cancellable: Option<&Cancellable>,
) -> Result<(), Error> {
unsafe {
let type_data = Self::type_data();
let parent_iface =
type_data.as_ref().parent_interface::<Initable>() as *const ffi::GInitableIface;
let func = (*parent_iface)
.init
.expect("no parent \"init\" implementation");
let mut err = ptr::null_mut();
func(
initable.unsafe_cast_ref::<Initable>().to_glib_none().0,
cancellable.to_glib_none().0,
&mut err,
);
if err.is_null() {
Ok(())
} else {
Err(from_glib_full(err))
}
}
}
}
unsafe impl<T: InitableImpl> IsImplementable<T> for Initable {
fn interface_init(iface: &mut glib::Interface<Self>) {
let iface = iface.as_mut();
iface.init = Some(initable_init::<T>);
}
fn instance_init(_instance: &mut glib::subclass::InitializingObject<T>) {}
}
unsafe extern "C" fn initable_init<T: InitableImpl>(
initable: *mut ffi::GInitable,
cancellable: *mut ffi::GCancellable,
error: *mut *mut glib::ffi::GError,
) -> glib::ffi::gboolean {
let instance = &*(initable as *mut T::Instance);
let imp = instance.impl_();
match imp.init(
from_glib_borrow::<_, Initable>(initable).unsafe_cast_ref(),
Option::<Cancellable>::from_glib_borrow(cancellable)
.as_ref()
.as_ref(),
) {
Ok(()) => glib::ffi::GTRUE,
Err(e) => {
if !error.is_null() {
*error = e.into_raw();
}
glib::ffi::GFALSE
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::prelude::*;
use crate::traits::InitableExt;
use crate::{Cancellable, Initable};
pub mod imp {
use super::*;
use crate::Cancellable;
use crate::Initable;
use std::cell::Cell;
pub struct InitableTestType(pub Cell<u64>);
#[glib::object_subclass]
impl ObjectSubclass for InitableTestType {
const NAME: &'static str = "InitableTestType";
type Type = super::InitableTestType;
type ParentType = glib::Object;
type Interfaces = (Initable,);
fn new() -> Self {
Self(Cell::new(0))
}
}
impl InitableImpl for InitableTestType {
fn init(
&self,
initable: &Self::Type,
_cancellable: Option<&Cancellable>,
) -> Result<(), glib::Error> {
InitableTestType::from_instance(initable)
.0
.set(0x123456789abcdef);
Ok(())
}
}
impl ObjectImpl for InitableTestType {}
}
pub mod ffi {
use super::*;
pub type InitableTestType = <imp::InitableTestType as ObjectSubclass>::Instance;
#[no_mangle]
pub unsafe extern "C" fn initable_test_type_get_type() -> glib::ffi::GType {
imp::InitableTestType::type_().into_glib()
}
#[no_mangle]
pub unsafe extern "C" fn initable_test_type_get_value(this: *mut InitableTestType) -> u64 {
let this = super::InitableTestType::from_glib_borrow(this);
imp::InitableTestType::from_instance(&this).0.get()
}
}
glib::wrapper! {
pub struct InitableTestType(ObjectSubclass<imp::InitableTestType>)
@implements Initable;
}
#[allow(clippy::new_without_default)]
impl InitableTestType {
pub fn new() -> Self {
Initable::new(&[], Option::<&Cancellable>::None)
.expect("Failed creation/initialization of InitableTestType object")
}
pub fn new_uninit() -> Self {
// This creates an uninitialized InitableTestType object, for testing
// purposes. In real code, using Initable::new (like the new() method
// does) is recommended.
glib::Object::new(&[]).expect("Failed creation of InitableTestType object")
}
pub fn value(&self) -> u64
|
}
#[test]
fn test_initable_with_init() {
let test = InitableTestType::new_uninit();
assert_ne!(0x123456789abcdef, test.value());
let result = unsafe { test.init(Option::<&Cancellable>::None) };
assert!(result.is_ok());
assert_eq!(0x123456789abcdef, test.value());
}
#[test]
fn test_initable_with_initable_new() {
let test = InitableTestType::new();
assert_eq!(0x123456789abcdef, test.value());
}
#[test]
fn test_initable_new_failure() {
let value: u32 = 2;
match Initable::new::<InitableTestType, Cancellable>(
&[("invalid-property", &value)],
Option::<&Cancellable>::None,
) {
Err(InitableError::NewObjectFailed(_)) => (),
v => panic!("expected InitableError::NewObjectFailed, got {:?}", v),
}
}
#[test]
fn test_initable_through_ffi() {
unsafe {
let test = InitableTestType::new_uninit();
let test: *mut ffi::InitableTestType = test.as_ptr();
let mut error: *mut glib::ffi::GError = std::ptr::null_mut();
assert_ne!(0x123456789abcdef, ffi::initable_test_type_get_value(test));
let result = crate::ffi::g_initable_init(
test as *mut crate::ffi::GInitable,
std::ptr::null_mut(),
&mut error,
);
assert_eq!(glib::ffi::GTRUE, result);
assert_eq!(error, ptr::null_mut());
assert_eq!(0x123456789abcdef, ffi::initable_test_type_get_value(test));
}
}
}
|
{
imp::InitableTestType::from_instance(self).0.get()
}
|
Display.ts
|
/*
* Copyright (C) 2019-2021 HERE Europe B.V.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
* License-Filename: LICENSE
*/
import Renderer from './Canvas';
import DisplayTilePool from './CanvasTileBucket';
import BasicDisplay from '../BasicDisplay';
import {layers} from '@here/xyz-maps-core';
import CanvasTile from './CanvasTile';
import CanvasRenderer from './Canvas';
type TileLayer = layers.TileLayer;
let DISPLAY_CFG_PR = {
'1': [
512, // DISPLAY_TILE_CACHE_SIZE
3, // PREVIEW_LOOK_AHEAD_LEVELS
512 // DEFAULT_CTX_COUNT
],
'2': [
128,
2,
128
],
'3': [
64,
1,
128
]
};
const DEFAULT_TILE_SIZE = 256;
const PRIORITY_RENDER_TASK = 3;
const PRIORITY_GROUP_TASK = 4;
let UNDEF;
const rotate = (x: number, y: number, originX: number, originY: number, alpha: number): [number, number] => {
const sin = Math.sin(alpha);
const cos = Math.cos(alpha);
const dx = x - originX;
const dy = y - originY;
return [
cos * dx - sin * dy + originX,
sin * dx + cos * dy + originY
];
};
class
|
{
features = [];
styles = [];
add(feature, style) {
this.features.push(feature);
this.styles.push(style);
}
}
class CanvasDisplay extends BasicDisplay {
static zoomBehavior:'fixed'|'float' = 'fixed';
buckets: DisplayTilePool;
render: CanvasRenderer;
constructor(mapEl, tileSize, devicePixelRatio, renderOptions?: {}) {
tileSize = tileSize || DEFAULT_TILE_SIZE;
devicePixelRatio = BasicDisplay.getPixelRatio(devicePixelRatio) ^ 0;
const PREVIEW_LOOK_AHEAD_LEVELS = DISPLAY_CFG_PR[devicePixelRatio][1];
// tileSize, devicePixelRatio, layerSetup, mapInternal
const tileRenderer = new Renderer(tileSize * devicePixelRatio, devicePixelRatio);
const DISPLAY_TILE_CACHE_SIZE = DISPLAY_CFG_PR[devicePixelRatio][0];
const buckets = new DisplayTilePool(DISPLAY_TILE_CACHE_SIZE, tileSize * devicePixelRatio);
tileRenderer.setBuckets(buckets);
super(mapEl, tileSize, devicePixelRatio, buckets, tileRenderer, PREVIEW_LOOK_AHEAD_LEVELS);
tileRenderer.init(this.canvas);
};
preview(displayTile: CanvasTile, layer: TileLayer, index: number): any[][] {
const previewData = super.preview(displayTile, layer, index);
if (previewData) {
this.render.preview(displayTile, previewData, layer, index);
}
return previewData;
}
prepareTile(tile, data, layer: TileLayer, dTile: CanvasTile, onDone) {
let display = this;
let renderer = display.render;
if (tile.type == 'image') {
const index = dTile.index(layer);
dTile.dirty(index, data);
onDone(dTile, layer);
} else if (!data.length) {
const index = dTile.index(layer);
// make sure there is no data from previous states left that needs to be cleared!
dTile.destroy(index);
dTile.dirty(index); // displayTile._c = false;
onDone(dTile, layer);
} else {
dTile.addTask(
display.cluster.spawn(PRIORITY_GROUP_TASK, layer, tile, data, {}, RenderBucket, (INSTRUCTIONS, task) => {
dTile.removeTask(task, layer);
if (dTile == null) {
debugger;
dTile = UNDEF;
}
dTile.addTask(renderer.prepare(INSTRUCTIONS, tile, layer, display, dTile, (canvas, task) => {
// tileMultiCanvas.ready = true;
dTile.removeTask(task, layer);
// if (onDone) {
// clear preview to enable preview creation for next render iteration
// dTile.p[dTile.index(layer)] = false;
onDone(dTile, layer);
// }
}), layer);
})
, layer);
}
}
protected viewport(dirty?: boolean) {
const display = this;
const tiles = display.tiles;
const render = display.render;
const layers = display.layers;
const layerCount = layers.length;
const bucket = this.buckets;
let screenTile;
let dTile;
let lastTileUpdateTs;
if (this.dirty || dirty) {
this.render.clear();
this.dirty = false;
}
for (let tileSize in tiles) {
if (tileSize == '512') continue;
const vpTiles = tiles[tileSize];
const length = vpTiles.length;
for (let screenTile of vpTiles) {
dTile = screenTile.tile;
lastTileUpdateTs = dTile.luTs;
if (screenTile.lrTs != lastTileUpdateTs || dirty) {
screenTile.lrTs = lastTileUpdateTs;
render.tile(dTile, screenTile.x, screenTile.y);
for (var l = 0, layer; l < layerCount; l++) {
layer = layers[l]; // {ready: false, layer: Layer, cnt: 0, visible: true}
if (
!layer.ready && dTile.ready(l) &&
++layer.cnt == length
) {
layer.ready = true;
// layers[l].__C_RENDER && layers[l].__C_RENDER(tileRenderer.ctx,mapscale,rotationZ, display.setTransform );
}
}
}
// }
// i++;
}
}
}
addLayer(layer: TileLayer, styles, index) {
// Workaround: canvas only supports 256pixel rendering
layer.tileSize = 256;
const added = super.addLayer(layer, styles, index);
if (added) {
this.setupTilePool();
}
return added;
}
removeLayer(layer: TileLayer) {
const index = super.removeLayer(layer);
if (index !== -1) {
this.setupTilePool();
}
return index;
}
setSize(w: number, h: number) {
const display = this;
super.setSize(w, h);
display.setupTilePool();
// force render to re-apply transforms because changing canvas size clears the context.
// force update although scale/rotation did not change.
// @ts-ignore
display.render._s = UNDEF;
display.setTransform(display.s, display.rz, display.rx);
}
destroy() {
super.destroy();
this.buckets.clear();
}
// canvas impl only
setupTilePool() {
let display = this;
const w = Math.ceil(display.w / DEFAULT_TILE_SIZE) + 1;
const h = Math.ceil(display.h / DEFAULT_TILE_SIZE) + 1;
const size = w * h;
let tiles = size;
const deviceSetup = DISPLAY_CFG_PR[display.dpr];
let ctxLength = size * display.getLayers().length;
if (tiles < deviceSetup[0]) {
tiles = deviceSetup[0];
}
display.buckets.setSize(tiles);
if (ctxLength < deviceSetup[2]) {
ctxLength = deviceSetup[2];
}
display.buckets.ctxCache.max = ctxLength;
}
update(dirty ?: boolean) {
// dirty flag can be ignored for canvas because of tile blit.
// the tile canvas is already updated and also acting as screen clearer.
super.update();
}
project(x: number, y: number): [number, number] {
const displ = this;
const scale = displ.s;
// apply scale
x = x * scale;
y = y * scale;
// apply current rotation
const pixel = rotate(x, y, displ.w / 2, displ.h / 2, displ.rz);
return pixel;
}
unproject(x: number, y: number): [number, number] {
let displ = this;
let scale = displ.s;
const cx = displ.w / 2;
const cy = displ.h / 2;
let p = rotate(x, y, cx, cy, -displ.rz);
p[0] = (p[0] - cx) / scale + cx;
p[1] = (p[1] - cy) / scale + cy;
return p;
// const s = this.s;
// const rz = -this.rz;
// const sin = Math.sin(rz);
// const cos = Math.cos(rz);
// const cx = this.w / 2;
// const cy = this.h / 2;
// const dx = x - cx;
// const dy = y - cy;
//
// return [
// (((cos * dx - sin * dy + cx) - cx) / s) + cx,
// (((sin * dx + cos * dy + cy) - cy) / s) + cy
// ];
}
// unproject(x: number, y: number, screenOffsetX?: number, screenOffsetY?: number): [number, number] {
// let displ = this;
// let scale = displ.s;
// let p = rotate(x, y, displ.w / 2, displ.h / 2, -displ.rz);
//
// p[0] /= scale;
// p[1] /= scale;
//
// p[0] -= screenOffsetX;
// p[1] -= screenOffsetY;
//
// return p;
// }
}
export default CanvasDisplay;
|
RenderBucket
|
xcresulttool.go
|
package xcresult3
import (
"encoding/json"
"fmt"
"os/exec"
"github.com/bitrise-io/go-utils/command"
"github.com/bitrise-io/go-utils/errorutil"
)
func
|
() bool {
if _, err := exec.LookPath("xcrun"); err != nil {
return false
}
return command.New("xcrun", "--find", "xcresulttool").Run() == nil
}
// xcresulttoolGet performs xcrun xcresulttool get with --id flag defined if id provided and marshals the output into v.
func xcresulttoolGet(xcresultPth, id string, v interface{}) error {
args := []string{"xcresulttool", "get", "--format", "json", "--path", xcresultPth}
if id != "" {
args = append(args, "--id", id)
}
cmd := command.New("xcrun", args...)
out, err := cmd.RunAndReturnTrimmedCombinedOutput()
if err != nil {
if errorutil.IsExitStatusError(err) {
return fmt.Errorf("%s failed: %s", cmd.PrintableCommandArgs(), out)
}
return fmt.Errorf("%s failed: %s", cmd.PrintableCommandArgs(), err)
}
if err := json.Unmarshal([]byte(out), v); err != nil {
return err
}
return nil
}
// xcresulttoolExport exports a file with the given id at the given output path.
func xcresulttoolExport(xcresultPth, id, outputPth string) error {
args := []string{"xcresulttool", "export", "--path", xcresultPth, "--id", id, "--output-path", outputPth, "--type", "file"}
cmd := command.New("xcrun", args...)
out, err := cmd.RunAndReturnTrimmedCombinedOutput()
if err != nil {
if errorutil.IsExitStatusError(err) {
return fmt.Errorf("%s failed: %s", cmd.PrintableCommandArgs(), out)
}
return fmt.Errorf("%s failed: %s", cmd.PrintableCommandArgs(), err)
}
return nil
}
|
isXcresulttoolAvailable
|
model.rs
|
use nalgebra_glm as glm;
use vk_tracer::{
ash::vk::ShaderStageFlags,
prelude::*,
shaderc::{OptimizationLevel, ShaderKind},
utils::{Camera, FpsLimiter, ShaderCompiler},
};
use winit::{
event::{ElementState, Event, KeyboardInput, VirtualKeyCode, WindowEvent},
event_loop::{ControlFlow, EventLoop},
window::WindowBuilder,
};
fn main() -> anyhow::Result<()> {
env_logger::init();
// Compile shaders
let (vertex_shader, fragment_shader) = {
let mut compiler = ShaderCompiler::new()?;
compiler.set_optimization_level(OptimizationLevel::Performance);
(
compiler.compile_and_return_file(
"vk_tracer/examples/shaders/model.vert.glsl".into(),
ShaderKind::Vertex,
"main",
)?,
compiler.compile_and_return_file(
"vk_tracer/examples/shaders/model.frag.glsl".into(),
ShaderKind::Fragment,
"main",
)?,
)
};
let event_loop = EventLoop::new();
let window = WindowBuilder::new()
.with_title("API Mockup")
.with_resizable(true)
.build(&event_loop)?;
let mut graphics = VkTracerApp::builder()
.pick_best_physical_device()
.with_app_info("API Mockup".into(), (1, 0, 0))
.with_debug_utils()
.build(Some((&window, window.inner_size().into())))?;
//
// let (gltf, buffers, textures) = gltf::import("vk_tracer/examples/models/cube.gltf")?;
//
// for mesh in gltf.meshes() {
// debug!("Mesh {}:", mesh.index());
// for primitive in mesh.primitives() {
// debug!(" - Primitive {}:", primitive.index());
// for (sem, accessor) in primitive.attributes() {
// debug!(" - Attribute: {:?}", sem);
// debug!(" Type: {:?}", accessor.data_type());
// debug!(" Count: {}, Size: {}, Offset: {}", accessor.count(), accessor.size(), accessor.offset());
// }
//
// debug!(" - Vertex positions:");
// let reader = primitive.reader(|buffer| Some(&buffers[buffer.index()]));
// reader.read_positions().unwrap()
// .for_each(|pos| debug!(" - {:?}", pos));
//
// debug!(" - Indices:");
// let reader = primitive.reader(|buffer| Some(&buffers[buffer.index()]));
// reader.read_indices().unwrap().into_u32().for_each(|index| debug!(" {}", index));
// }
// }
let swapchain = graphics.create_swapchain_with_surface()?;
let suzanne =
graphics.load_first_mesh::<VertexXyzUvNorm>("vk_tracer/examples/models/suzanne.glb")?;
#[derive(Copy, Clone, Uniform)]
struct CameraUbo {
mvp: glsl_layout::mat4,
light_position: glsl_layout::vec3,
}
let mut camera = Camera::new_perspective(glm::vec3(5.0, 4.0, 4.0), glm::zero(), 1.0, 70.0);
camera.aspect_auto(window.inner_size().into());
fn get_camera_ubo(camera: &Camera) -> CameraUbo {
CameraUbo {
mvp: camera.compute_mvp(&glm::identity()).into(),
light_position: glm::vec3(-7.0, 5.0, 5.0).into(),
}
}
let camera_ubo = graphics.create_ubo([get_camera_ubo(&camera).std140()])?;
let swapchain_images = graphics.get_images_from_swapchain(swapchain)?;
let depth_image = graphics.create_depth_texture(swapchain)?;
let render_plan = graphics
.new_render_plan()
.add_subpass(
SubpassBuilder::new()
.graphics()
.color_attachments([0])
.depth_stencil_attachment(1),
Some(
SubpassDependency::builder()
.src_subpass(SUBPASS_EXTERNAL)
.dst_subpass(0)
.src_stage_mask(
PipelineStageFlags::COLOR_ATTACHMENT_OUTPUT
| PipelineStageFlags::EARLY_FRAGMENT_TESTS,
)
.src_access_mask(AccessFlags::empty())
.dst_stage_mask(
PipelineStageFlags::COLOR_ATTACHMENT_OUTPUT
| PipelineStageFlags::EARLY_FRAGMENT_TESTS,
)
.dst_access_mask(
AccessFlags::COLOR_ATTACHMENT_WRITE
| AccessFlags::DEPTH_STENCIL_ATTACHMENT_WRITE,
)
.build(),
),
)
.add_color_attachment_present(swapchain_images[0])?
.set_clear_color(0, [0.1, 0.1, 0.2, 1.0])
.add_depth_attachment(depth_image)?
.set_clear_depth_stencil(1, 1.0, 0)
.build()?;
let render_targets = swapchain_images
.into_iter()
.map(|image| graphics.allocate_render_target(render_plan, &[image, depth_image]))
.collect::<Result<Vec<_>>>()?;
let descriptor_set = graphics
.new_descriptor_sets()
.new_set(
DescriptorSetBuilder::new()
.ubo(0, ShaderStageFlags::VERTEX | ShaderStageFlags::FRAGMENT),
)
.build()?[0];
graphics.write_descriptor_set_ubo(descriptor_set, 0, camera_ubo)?;
let pipeline = graphics.create_forward_pipeline(
render_plan,
0,
&[descriptor_set],
vertex_shader,
fragment_shader,
suzanne,
)?;
let renderers = render_targets
.iter()
.copied()
|
.build()
})
.collect::<Result<Vec<_>>>()?;
let mut fps_limiter = FpsLimiter::new(60.0);
event_loop.run(move |event, _, control| {
*control = ControlFlow::Poll;
if fps_limiter.should_render() {
fps_limiter.new_frame();
let (render_target_index, should_recreate_swapchain) = graphics
.get_next_swapchain_render_target_index(swapchain)
.unwrap();
let should_recreate_swapchain = graphics
.render_and_present(
renderers[render_target_index as usize],
swapchain,
render_target_index,
)
.unwrap()
|| should_recreate_swapchain;
if should_recreate_swapchain {
recreate_swapchain(
&mut graphics,
window.inner_size().into(),
swapchain,
render_plan,
&render_targets,
&renderers,
)
.unwrap();
}
}
match event {
Event::WindowEvent {
event: WindowEvent::CloseRequested,
..
} => *control = ControlFlow::Exit,
Event::WindowEvent {
event:
WindowEvent::KeyboardInput {
input:
KeyboardInput {
state: ElementState::Pressed,
virtual_keycode: Some(VirtualKeyCode::Escape),
..
},
..
},
..
} => *control = ControlFlow::Exit,
Event::WindowEvent {
event: WindowEvent::Resized(new_size),
..
} => {
recreate_swapchain(
&mut graphics,
new_size.into(),
swapchain,
render_plan,
&render_targets,
&renderers,
)
.unwrap();
camera.aspect_auto(window.inner_size().into());
graphics
.update_ubo(camera_ubo, [get_camera_ubo(&camera).std140()])
.unwrap();
}
_ => (),
}
})
}
fn recreate_swapchain(
graphics: &mut VkTracerApp,
new_size: (u32, u32),
swapchain: SwapchainHandle,
render_plan: RenderPlanHandle,
render_targets: &[RenderTargetHandle],
renderers: &[RendererHandle],
) -> anyhow::Result<()> {
graphics.recreate_swapchain(swapchain, new_size)?;
let swapchain_images = graphics.get_images_from_swapchain(swapchain)?;
for (render_target, image) in render_targets.iter().zip(swapchain_images.into_iter()) {
graphics.recreate_render_target(render_plan, new_size, *render_target, [image])?;
}
for (renderer, render_target) in renderers
.iter()
.copied()
.zip(render_targets.iter().copied())
{
graphics.recreate_renderer(renderer, render_target)?;
}
Ok(())
}
|
.map(|render_target| {
graphics
.new_renderer_from_plan(render_plan, render_target)
.execute_pipeline(pipeline.into())
|
wallet_test.go
|
package wallet
import (
"testing"
"time"
)
// TestLocateBirthdayBlock ensures we can properly map a block in the chain to a
//timestamp.
func TestLocateBirthdayBlock(t *testing.T)
|
{
t.Parallel()
// We'll use test chains of 30 blocks with a duration between two
// consecutive blocks being slightly greater than the largest margin
// allowed by locateBirthdayBlock. Doing so lets us test the method more
// effectively as there is only one block within the chain that can map
// to a timestamp (this does not apply to the first and last blocks,
// which can map to many timestamps beyond either end of chain).
const (
numBlocks = 30
blockInterval = birthdayBlockDelta + 1
)
genesisTimestamp := chainParams.GenesisBlock.Header.Timestamp
testCases := []struct {
name string
birthday time.Time
birthdayHeight int32
}{
{
name: "left-right-left-left",
birthday: genesisTimestamp.Add(8 * blockInterval),
birthdayHeight: 8,
},
{
name: "right-right-right-left",
birthday: genesisTimestamp.Add(27 * blockInterval),
birthdayHeight: 27,
},
{
name: "before start height",
birthday: genesisTimestamp.Add(-blockInterval),
birthdayHeight: 0,
},
{
name: "start height",
birthday: genesisTimestamp,
birthdayHeight: 0,
},
{
name: "end height",
birthday: genesisTimestamp.Add(numBlocks * blockInterval),
birthdayHeight: numBlocks - 1,
},
{
name: "after end height",
birthday: genesisTimestamp.Add(2 * numBlocks * blockInterval),
birthdayHeight: numBlocks - 1,
},
}
for _, testCase := range testCases {
success := t.Run(testCase.name, func(t *testing.T) {
chainConn := createMockChainConn(
chainParams.GenesisBlock, numBlocks, blockInterval,
)
birthdayBlock, err := locateBirthdayBlock(
chainConn, testCase.birthday,
)
if err != nil {
t.Fatalf("unable to locate birthday block: %v",
err)
}
if birthdayBlock.Height != testCase.birthdayHeight {
t.Fatalf("expected birthday block with height "+
"%d, got %d", testCase.birthdayHeight,
birthdayBlock.Height)
}
})
if !success {
break
}
}
}
|
|
tests.rs
|
// This file is part of Metaverse.Network & Bit.Country.
// Copyright (C) 2020-2022 Metaverse.Network & Bit.Country .
// SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![cfg(test)]
use frame_support::{assert_err, assert_noop, assert_ok};
use sp_runtime::traits::BadOrigin;
use mock::{Event, *};
use super::*;
#[test]
fn mint_land_should_reject_non_root() {
ExtBuilder::default().build().execute_with(|| {
assert_noop!(
EstateModule::mint_land(Origin::signed(ALICE), BENEFICIARY_ID, METAVERSE_ID, COORDINATE_IN_1),
BadOrigin
);
});
}
#[test]
fn mint_land_should_work_with_one_coordinate() {
ExtBuilder::default().build().execute_with(|| {
assert_ok!(EstateModule::mint_land(
Origin::root(),
BENEFICIARY_ID,
METAVERSE_ID,
COORDINATE_IN_1
));
assert_eq!(
last_event(),
Event::Estate(crate::Event::NewLandUnitMinted(
OWNER_LAND_ASSET_ID,
METAVERSE_ID,
COORDINATE_IN_1,
))
);
assert_eq!(EstateModule::all_land_units_count(), 1);
});
}
#[test]
fn mint_land_token_should_work_have_correct_owner() {
ExtBuilder::default().build().execute_with(|| {
assert_eq!(EstateModule::get_land_units(METAVERSE_ID, COORDINATE_IN_1), None);
assert_ok!(EstateModule::mint_land(
Origin::root(),
BENEFICIARY_ID,
METAVERSE_ID,
COORDINATE_IN_1
));
assert_eq!(
last_event(),
Event::Estate(crate::Event::NewLandUnitMinted(
OWNER_LAND_ASSET_ID,
METAVERSE_ID,
COORDINATE_IN_1,
))
);
assert_eq!(EstateModule::all_land_units_count(), 1);
assert_eq!(
EstateModule::get_land_units(METAVERSE_ID, COORDINATE_IN_1),
Some(OWNER_LAND_ASSET_ID)
);
});
}
#[test]
fn mint_land_should_reject_with_duplicate_coordinates() {
ExtBuilder::default().build().execute_with(|| {
assert_ok!(EstateModule::mint_land(
Origin::root(),
BENEFICIARY_ID,
METAVERSE_ID,
COORDINATE_IN_1
));
assert_eq!(
last_event(),
Event::Estate(crate::Event::NewLandUnitMinted(
OWNER_LAND_ASSET_ID,
METAVERSE_ID,
COORDINATE_IN_1,
))
);
assert_eq!(EstateModule::all_land_units_count(), 1);
assert_noop!(
EstateModule::mint_land(Origin::root(), BENEFICIARY_ID, METAVERSE_ID, COORDINATE_IN_1),
Error::<Runtime>::LandUnitIsNotAvailable
);
});
}
#[test]
fn mint_lands_should_reject_with_duplicate_coordinates() {
ExtBuilder::default().build().execute_with(|| {
assert_ok!(EstateModule::mint_lands(
Origin::root(),
BENEFICIARY_ID,
METAVERSE_ID,
vec![COORDINATE_IN_1, COORDINATE_IN_2]
));
assert_eq!(
last_event(),
Event::Estate(crate::Event::NewLandsMinted(
BENEFICIARY_ID,
METAVERSE_ID,
vec![COORDINATE_IN_1, COORDINATE_IN_2],
))
);
assert_eq!(EstateModule::all_land_units_count(), 2);
assert_noop!(
EstateModule::mint_lands(Origin::root(), BENEFICIARY_ID, METAVERSE_ID, vec![COORDINATE_IN_1]),
Error::<Runtime>::LandUnitIsNotAvailable
);
});
}
#[test]
fn mint_land_should_work_with_different_coordinate() {
ExtBuilder::default().build().execute_with(|| {
assert_ok!(EstateModule::mint_land(
Origin::root(),
BENEFICIARY_ID,
METAVERSE_ID,
COORDINATE_IN_1
));
assert_eq!(
last_event(),
Event::Estate(crate::Event::NewLandUnitMinted(
OWNER_LAND_ASSET_ID,
METAVERSE_ID,
COORDINATE_IN_1,
))
);
assert_eq!(EstateModule::all_land_units_count(), 1);
assert_ok!(EstateModule::mint_land(
Origin::root(),
BENEFICIARY_ID,
METAVERSE_ID,
COORDINATE_IN_2
));
assert_eq!(
last_event(),
Event::Estate(crate::Event::NewLandUnitMinted(
OWNER_LAND_ASSET_ID,
METAVERSE_ID,
COORDINATE_IN_2,
))
);
assert_eq!(EstateModule::all_land_units_count(), 2);
});
}
#[test]
fn mint_lands_should_reject_non_root() {
ExtBuilder::default().build().execute_with(|| {
assert_noop!(
EstateModule::mint_lands(
Origin::signed(ALICE),
BENEFICIARY_ID,
METAVERSE_ID,
vec![COORDINATE_IN_1, COORDINATE_IN_2]
),
BadOrigin
);
});
}
#[test]
fn mint_lands_should_work_with_one_coordinate() {
ExtBuilder::default().build().execute_with(|| {
assert_ok!(EstateModule::mint_lands(
Origin::root(),
BENEFICIARY_ID,
METAVERSE_ID,
vec![COORDINATE_IN_1]
));
assert_eq!(
EstateModule::get_user_land_units(&BENEFICIARY_ID, &METAVERSE_ID).len(),
1
);
assert_eq!(
last_event(),
Event::Estate(crate::Event::NewLandsMinted(
BENEFICIARY_ID,
METAVERSE_ID,
vec![COORDINATE_IN_1],
))
);
assert_eq!(EstateModule::all_land_units_count(), 1);
});
}
#[test]
fn mint_lands_should_work_with_more_than_one_coordinate() {
ExtBuilder::default().build().execute_with(|| {
assert_ok!(EstateModule::mint_lands(
Origin::root(),
BENEFICIARY_ID,
METAVERSE_ID,
vec![COORDINATE_IN_1, COORDINATE_IN_2]
));
assert_eq!(
last_event(),
Event::Estate(crate::Event::NewLandsMinted(
BENEFICIARY_ID,
METAVERSE_ID,
vec![COORDINATE_IN_1, COORDINATE_IN_2],
))
);
assert_eq!(EstateModule::all_land_units_count(), 2);
});
}
#[test]
fn transfer_land_token_should_work() {
ExtBuilder::default().build().execute_with(|| {
assert_ok!(EstateModule::mint_land(
Origin::root(),
BENEFICIARY_ID,
METAVERSE_ID,
COORDINATE_IN_1
));
assert_eq!(
EstateModule::get_land_units(METAVERSE_ID, COORDINATE_IN_1),
Some(OWNER_LAND_ASSET_ID)
);
assert_ok!(EstateModule::transfer_land(
Origin::signed(BENEFICIARY_ID),
ALICE,
METAVERSE_ID,
COORDINATE_IN_1
));
assert_eq!(
EstateModule::get_land_units(METAVERSE_ID, COORDINATE_IN_1),
Some(OWNER_LAND_ASSET_ID)
);
assert_eq!(
last_event(),
Event::Estate(crate::Event::TransferredLandUnit(
METAVERSE_ID,
COORDINATE_IN_1,
BENEFICIARY_ID,
ALICE,
))
);
});
}
#[test]
fn transfer_land_should_reject_no_permission() {
ExtBuilder::default().build().execute_with(|| {
assert_ok!(EstateModule::mint_land(
Origin::root(),
BENEFICIARY_ID,
METAVERSE_ID,
COORDINATE_IN_1
));
assert_eq!(
EstateModule::get_land_units(METAVERSE_ID, COORDINATE_IN_1),
Some(OWNER_LAND_ASSET_ID)
);
assert_noop!(
EstateModule::transfer_land(Origin::signed(BOB), ALICE, METAVERSE_ID, COORDINATE_IN_1),
Error::<Runtime>::NoPermission
);
});
}
#[test]
fn transfer_land_should_do_fail_for_same_account() {
ExtBuilder::default().build().execute_with(|| {
assert_ok!(EstateModule::mint_land(
Origin::root(),
BENEFICIARY_ID,
METAVERSE_ID,
COORDINATE_IN_1
));
assert_eq!(
EstateModule::get_land_units(METAVERSE_ID, COORDINATE_IN_1),
Some(OWNER_LAND_ASSET_ID)
);
assert_noop!(
EstateModule::transfer_land(
Origin::signed(BENEFICIARY_ID),
BENEFICIARY_ID,
METAVERSE_ID,
COORDINATE_IN_1
),
Error::<Runtime>::AlreadyOwnTheLandUnit
);
assert_eq!(
EstateModule::get_land_units(METAVERSE_ID, COORDINATE_IN_1),
Some(OWNER_LAND_ASSET_ID)
);
});
}
#[test]
fn transfer_land_should_do_fail_for_already_in_auction() {
ExtBuilder::default().build().execute_with(|| {
assert_noop!(
EstateModule::transfer_land(
Origin::signed(BENEFICIARY_ID),
BENEFICIARY_ID,
METAVERSE_ID,
COORDINATE_IN_AUCTION
),
Error::<Runtime>::LandUnitAlreadyInAuction
);
});
}
#[test]
fn mint_estate_should_reject_non_root() {
ExtBuilder::default().build().execute_with(|| {
assert_noop!(
EstateModule::mint_estate(
Origin::signed(ALICE),
BENEFICIARY_ID,
METAVERSE_ID,
vec![COORDINATE_IN_1, COORDINATE_IN_2]
),
BadOrigin
);
});
}
#[test]
fn mint_estate_should_fail_for_minted_land() {
ExtBuilder::default().build().execute_with(|| {
assert_ok!(EstateModule::mint_land(
Origin::root(),
BENEFICIARY_ID,
METAVERSE_ID,
COORDINATE_IN_1
));
assert_err!(
EstateModule::mint_estate(Origin::root(), BENEFICIARY_ID, METAVERSE_ID, vec![COORDINATE_IN_1]),
Error::<Runtime>::LandUnitIsNotAvailable
);
});
}
#[test]
fn dissolve_estate_should_work() {
ExtBuilder::default().build().execute_with(|| {
// Mint estate
assert_ok!(EstateModule::mint_estate(
Origin::root(),
BENEFICIARY_ID,
METAVERSE_ID,
vec![COORDINATE_IN_1, COORDINATE_IN_2]
));
let estate_id: u64 = 0;
assert_eq!(EstateModule::all_estates_count(), 1);
assert_eq!(EstateModule::next_estate_id(), 1);
assert_eq!(
EstateModule::get_estates(estate_id),
Some(EstateInfo {
metaverse_id: METAVERSE_ID,
land_units: vec![COORDINATE_IN_1, COORDINATE_IN_2]
})
);
assert_eq!(EstateModule::get_estate_owner(estate_id), Some(OWNER_ESTATE_ASSET_ID));
assert_eq!(
EstateModule::get_user_land_units(&BENEFICIARY_ID, &METAVERSE_ID).len(),
2
);
// Destroy estate
assert_ok!(EstateModule::dissolve_estate(Origin::signed(BENEFICIARY_ID), estate_id,));
assert_eq!(EstateModule::all_estates_count(), 0);
assert_eq!(EstateModule::get_estates(estate_id), None);
assert_eq!(EstateModule::get_estate_owner(estate_id), None);
assert_eq!(
EstateModule::get_user_land_units(&BENEFICIARY_ID, &METAVERSE_ID).len(),
2
);
});
}
#[test]
fn dissolve_estate_should_reject_non_owner() {
ExtBuilder::default().build().execute_with(|| {
// Mint estate
assert_ok!(EstateModule::mint_estate(
Origin::root(),
BENEFICIARY_ID,
METAVERSE_ID,
vec![COORDINATE_IN_1, COORDINATE_IN_2]
));
assert_err!(
EstateModule::dissolve_estate(Origin::signed(ALICE), 0),
Error::<Runtime>::NoPermission
);
});
}
#[test]
fn add_land_unit_to_estate_should_reject_non_owner() {
ExtBuilder::default().build().execute_with(|| {
// Mint estate
assert_ok!(EstateModule::mint_estate(
Origin::root(),
BENEFICIARY_ID,
METAVERSE_ID,
vec![COORDINATE_IN_2]
));
assert_err!(
EstateModule::add_land_unit_to_estate(Origin::signed(ALICE), 0, vec![COORDINATE_IN_2]),
Error::<Runtime>::NoPermission
);
});
}
#[test]
fn add_land_unit_to_estate_should_work() {
ExtBuilder::default().build().execute_with(|| {
// Mint estate
assert_ok!(EstateModule::mint_estate(
Origin::root(),
BENEFICIARY_ID,
METAVERSE_ID,
vec![COORDINATE_IN_1]
));
let estate_id: u64 = 0;
assert_eq!(EstateModule::all_estates_count(), 1);
assert_eq!(EstateModule::next_estate_id(), 1);
assert_eq!(
EstateModule::get_estates(estate_id),
Some(EstateInfo {
metaverse_id: METAVERSE_ID,
land_units: vec![COORDINATE_IN_1]
})
);
assert_eq!(EstateModule::get_estate_owner(estate_id), Some(OWNER_ESTATE_ASSET_ID));
assert_eq!(
EstateModule::get_user_land_units(&BENEFICIARY_ID, &METAVERSE_ID).len(),
1
);
assert_eq!(EstateModule::all_land_units_count(), 1);
assert_ok!(EstateModule::mint_land(
Origin::root(),
BENEFICIARY_ID,
METAVERSE_ID,
COORDINATE_IN_2
));
// Update estate
assert_ok!(EstateModule::add_land_unit_to_estate(
Origin::signed(BENEFICIARY_ID),
estate_id,
vec![COORDINATE_IN_2]
));
assert_eq!(
EstateModule::get_estates(estate_id),
Some(EstateInfo {
metaverse_id: METAVERSE_ID,
land_units: vec![COORDINATE_IN_1, COORDINATE_IN_2]
})
);
assert_eq!(
EstateModule::get_user_land_units(&BENEFICIARY_ID, &METAVERSE_ID).len(),
2
);
assert_eq!(EstateModule::all_land_units_count(), 2);
});
}
#[test]
fn remove_land_unit_from_estate_should_reject_non_owner() {
ExtBuilder::default().build().execute_with(|| {
// Mint estate
assert_ok!(EstateModule::mint_estate(
Origin::root(),
BENEFICIARY_ID,
METAVERSE_ID,
vec![COORDINATE_IN_1, COORDINATE_IN_2]
));
assert_err!(
EstateModule::remove_land_unit_from_estate(Origin::signed(ALICE), 0, vec![COORDINATE_IN_2]),
Error::<Runtime>::NoPermission
);
});
}
#[test]
fn remove_land_unit_from_estate_should_work() {
ExtBuilder::default().build().execute_with(|| {
// Mint estate
assert_ok!(EstateModule::mint_estate(
Origin::root(),
BENEFICIARY_ID,
METAVERSE_ID,
vec![COORDINATE_IN_1, COORDINATE_IN_2]
));
let estate_id: u64 = 0;
assert_eq!(EstateModule::all_estates_count(), 1);
assert_eq!(EstateModule::next_estate_id(), 1);
assert_eq!(
EstateModule::get_estates(estate_id),
Some(EstateInfo {
metaverse_id: METAVERSE_ID,
land_units: vec![COORDINATE_IN_1, COORDINATE_IN_2]
})
);
assert_eq!(EstateModule::get_estate_owner(estate_id), Some(OWNER_ESTATE_ASSET_ID));
assert_eq!(
EstateModule::get_user_land_units(&BENEFICIARY_ID, &METAVERSE_ID).len(),
2
);
assert_eq!(EstateModule::all_land_units_count(), 2);
// Update estate
assert_ok!(EstateModule::remove_land_unit_from_estate(
Origin::signed(BENEFICIARY_ID),
estate_id,
vec![COORDINATE_IN_2]
));
assert_eq!(
EstateModule::get_estates(estate_id),
Some(EstateInfo {
metaverse_id: METAVERSE_ID,
land_units: vec![COORDINATE_IN_1]
})
);
assert_eq!(
EstateModule::get_user_land_units(&BENEFICIARY_ID, &METAVERSE_ID).len(),
2
);
assert_eq!(EstateModule::all_land_units_count(), 2);
});
}
#[test]
fn mint_estate_and_land_should_return_correct_total_land_unit() {
ExtBuilder::default().build().execute_with(|| {
assert_ok!(EstateModule::mint_estate(
Origin::root(),
BENEFICIARY_ID,
METAVERSE_ID,
vec![COORDINATE_IN_1, COORDINATE_IN_2]
));
let estate_id: u64 = 0;
assert_eq!(EstateModule::all_estates_count(), 1);
assert_eq!(EstateModule::next_estate_id(), 1);
assert_eq!(
EstateModule::get_estates(estate_id),
Some(EstateInfo {
metaverse_id: METAVERSE_ID,
land_units: vec![COORDINATE_IN_1, COORDINATE_IN_2]
})
);
assert_eq!(EstateModule::get_estate_owner(estate_id), Some(OWNER_ESTATE_ASSET_ID));
assert_eq!(
EstateModule::get_user_land_units(&BENEFICIARY_ID, &METAVERSE_ID).len(),
2
);
assert_ok!(EstateModule::mint_land(
Origin::root(),
BENEFICIARY_ID,
METAVERSE_ID,
(-6, 6)
));
assert_eq!(
EstateModule::get_user_land_units(&BENEFICIARY_ID, &METAVERSE_ID).len(),
3
);
});
}
#[test]
fn mint_estate_should_return_none_for_non_exist_estate() {
ExtBuilder::default().build().execute_with(|| {
assert_ok!(EstateModule::mint_estate(
Origin::root(),
BENEFICIARY_ID,
METAVERSE_ID,
vec![COORDINATE_IN_1, COORDINATE_IN_2]
));
let estate_id: u64 = 0;
assert_eq!(EstateModule::all_estates_count(), 1);
assert_eq!(EstateModule::next_estate_id(), 1);
assert_eq!(
EstateModule::get_estates(estate_id),
Some(EstateInfo {
metaverse_id: METAVERSE_ID,
land_units: vec![COORDINATE_IN_1, COORDINATE_IN_2]
})
);
assert_eq!(EstateModule::get_estate_owner(estate_id), Some(OWNER_ESTATE_ASSET_ID));
let estate_id_non_exists: u64 = 999;
assert_eq!(EstateModule::get_estates(estate_id_non_exists), None);
assert_eq!(EstateModule::get_estate_owner(estate_id_non_exists), None);
});
}
#[test]
fn transfer_estate_token_should_work() {
ExtBuilder::default().build().execute_with(|| {
assert_ok!(EstateModule::mint_estate(
Origin::root(),
BENEFICIARY_ID,
METAVERSE_ID,
vec![COORDINATE_IN_1, COORDINATE_IN_2]
));
let estate_id: u64 = 0;
assert_eq!(EstateModule::get_estate_owner(estate_id), Some(OWNER_ESTATE_ASSET_ID));
assert_ok!(EstateModule::transfer_estate(
Origin::signed(BENEFICIARY_ID),
ALICE,
estate_id
));
assert_eq!(EstateModule::get_estate_owner(estate_id), Some(OWNER_ESTATE_ASSET_ID));
assert_eq!(
last_event(),
Event::Estate(crate::Event::TransferredEstate(estate_id, BENEFICIARY_ID, ALICE))
);
});
}
#[test]
fn transfer_estate_should_reject_no_permission() {
ExtBuilder::default().build().execute_with(|| {
assert_ok!(EstateModule::mint_estate(
Origin::root(),
BENEFICIARY_ID,
METAVERSE_ID,
vec![COORDINATE_IN_1, COORDINATE_IN_2]
));
let estate_id: u64 = 0;
assert_eq!(EstateModule::get_estate_owner(estate_id), Some(OWNER_ESTATE_ASSET_ID));
assert_noop!(
EstateModule::transfer_estate(Origin::signed(BOB), ALICE, estate_id),
Error::<Runtime>::NoPermission
);
});
}
#[test]
fn transfer_estate_should_reject_already_in_auction() {
ExtBuilder::default().build().execute_with(|| {
assert_noop!(
EstateModule::transfer_estate(Origin::signed(BOB), ALICE, ESTATE_IN_AUCTION),
Error::<Runtime>::EstateAlreadyInAuction
);
});
}
#[test]
fn transfer_estate_should_fail_with_same_account() {
ExtBuilder::default().build().execute_with(|| {
assert_ok!(EstateModule::mint_estate(
Origin::root(),
BENEFICIARY_ID,
METAVERSE_ID,
vec![COORDINATE_IN_1, COORDINATE_IN_2]
));
let estate_id: u64 = 0;
assert_eq!(EstateModule::get_estate_owner(estate_id), Some(OWNER_ESTATE_ASSET_ID));
assert_noop!(
EstateModule::transfer_estate(Origin::signed(BENEFICIARY_ID), BENEFICIARY_ID, estate_id),
Error::<Runtime>::AlreadyOwnTheEstate
);
assert_eq!(EstateModule::get_estate_owner(estate_id), Some(OWNER_ESTATE_ASSET_ID));
});
}
#[test]
fn create_estate_token_should_work() {
ExtBuilder::default().build().execute_with(|| {
assert_ok!(EstateModule::mint_lands(
Origin::root(),
BENEFICIARY_ID,
METAVERSE_ID,
vec![COORDINATE_IN_1, COORDINATE_IN_2]
));
assert_ok!(EstateModule::create_estate(
Origin::signed(BENEFICIARY_ID),
METAVERSE_ID,
vec![COORDINATE_IN_1, COORDINATE_IN_2]
));
let estate_id: u64 = 0;
assert_eq!(EstateModule::all_estates_count(), 1);
assert_eq!(EstateModule::next_estate_id(), 1);
assert_eq!(
EstateModule::get_estates(estate_id),
Some(EstateInfo {
metaverse_id: METAVERSE_ID,
land_units: vec![COORDINATE_IN_1, COORDINATE_IN_2]
})
);
assert_eq!(EstateModule::get_estate_owner(estate_id), Some(OWNER_ESTATE_ASSET_ID));
});
}
#[test]
fn create_estate_token_after_minting_account_and_token_based_lands_should_give_correct_total_user_land_units() {
ExtBuilder::default().build().execute_with(|| {
assert_ok!(EstateModule::mint_land(
Origin::root(),
BENEFICIARY_ID,
METAVERSE_ID,
COORDINATE_IN_1
));
assert_ok!(EstateModule::mint_land(
Origin::root(),
BENEFICIARY_ID,
METAVERSE_ID,
COORDINATE_IN_2
));
assert_ok!(EstateModule::create_estate(
Origin::signed(BENEFICIARY_ID),
METAVERSE_ID,
vec![COORDINATE_IN_1, COORDINATE_IN_2]
));
let estate_id: u64 = 0;
assert_eq!(EstateModule::all_estates_count(), 1);
assert_eq!(EstateModule::next_estate_id(), 1);
assert_eq!(
EstateModule::get_estates(estate_id),
Some(EstateInfo {
metaverse_id: METAVERSE_ID,
land_units: vec![COORDINATE_IN_1, COORDINATE_IN_2]
})
);
assert_eq!(EstateModule::get_estate_owner(estate_id), Some(OWNER_ESTATE_ASSET_ID));
assert_eq!(
EstateModule::get_user_land_units(&BENEFICIARY_ID, &METAVERSE_ID).len(),
2
);
assert_eq!(EstateModule::all_land_units_count(), 2);
});
}
#[test]
fn create_estate_should_return_none_for_non_exist_estate() {
ExtBuilder::default().build().execute_with(|| {
assert_ok!(EstateModule::mint_lands(
Origin::root(),
BENEFICIARY_ID,
METAVERSE_ID,
vec![COORDINATE_IN_1, COORDINATE_IN_2]
));
assert_ok!(EstateModule::create_estate(
Origin::signed(BENEFICIARY_ID),
METAVERSE_ID,
vec![COORDINATE_IN_1, COORDINATE_IN_2]
));
let estate_id: u64 = 0;
assert_eq!(EstateModule::all_estates_count(), 1);
assert_eq!(EstateModule::next_estate_id(), 1);
assert_eq!(
EstateModule::get_estates(estate_id),
Some(EstateInfo {
metaverse_id: METAVERSE_ID,
land_units: vec![COORDINATE_IN_1, COORDINATE_IN_2]
})
);
assert_eq!(EstateModule::get_estate_owner(estate_id), Some(OWNER_ESTATE_ASSET_ID));
let estate_id_non_exists: u64 = 999;
assert_eq!(EstateModule::get_estates(estate_id_non_exists), None);
assert_eq!(EstateModule::get_estate_owner(estate_id_non_exists), None);
});
}
#[test]
fn issue_land_block_should_fail_if_not_root() {
ExtBuilder::default().build().execute_with(|| {
assert_noop!(
EstateModule::issue_undeployed_land_blocks(
Origin::signed(ALICE),
BOB,
1,
20,
UndeployedLandBlockType::BoundToAddress
),
BadOrigin
);
});
}
#[test]
fn issue_land_block_should_work() {
ExtBuilder::default().build().execute_with(|| {
assert_ok!(EstateModule::issue_undeployed_land_blocks(
Origin::root(),
BOB,
1,
20,
UndeployedLandBlockType::BoundToAddress
));
assert_eq!(
last_event(),
Event::Estate(crate::Event::UndeployedLandBlockIssued(BOB, 0))
);
assert_eq!(EstateModule::get_undeployed_land_block_owner(BOB, 0), Some(()));
let issued_undeployed_land_block = EstateModule::get_undeployed_land_block(0);
match issued_undeployed_land_block {
Some(a) => {
// Verify details of UndeployedLandBlock
assert_eq!(a.owner, BOB);
assert_eq!(a.number_land_units, 20);
assert_eq!(a.undeployed_land_block_type, UndeployedLandBlockType::BoundToAddress);
assert_eq!(a.is_locked, false);
}
_ => {
// Should fail test
assert_eq!(0, 1);
}
}
});
}
#[test]
fn issue_two_land_block_should_work() {
ExtBuilder::default().build().execute_with(|| {
assert_ok!(EstateModule::issue_undeployed_land_blocks(
Origin::root(),
BOB,
1,
20,
|
UndeployedLandBlockType::BoundToAddress
));
assert_eq!(
last_event(),
Event::Estate(crate::Event::UndeployedLandBlockIssued(BOB, 0))
);
assert_eq!(EstateModule::get_undeployed_land_block_owner(BOB, 0), Some(()));
let first_issued_undeployed_land_block = EstateModule::get_undeployed_land_block(0);
match first_issued_undeployed_land_block {
Some(a) => {
// Verify details of UndeployedLandBlock
assert_eq!(a.owner, BOB);
assert_eq!(a.number_land_units, 20);
assert_eq!(a.undeployed_land_block_type, UndeployedLandBlockType::BoundToAddress);
assert_eq!(a.is_locked, false);
}
_ => {
// Should fail test
assert_eq!(0, 1);
}
}
assert_ok!(EstateModule::issue_undeployed_land_blocks(
Origin::root(),
ALICE,
1,
30,
UndeployedLandBlockType::Transferable
));
assert_eq!(
last_event(),
Event::Estate(crate::Event::UndeployedLandBlockIssued(ALICE, 1))
);
assert_eq!(EstateModule::get_undeployed_land_block_owner(ALICE, 1), Some(()));
let second_issued_undeployed_land_block = EstateModule::get_undeployed_land_block(1);
match second_issued_undeployed_land_block {
Some(a) => {
// Verify details of UndeployedLandBlock
assert_eq!(a.owner, ALICE);
assert_eq!(a.number_land_units, 30);
assert_eq!(a.undeployed_land_block_type, UndeployedLandBlockType::Transferable);
assert_eq!(a.is_locked, false);
}
_ => {
// Should fail test
assert_eq!(0, 1);
}
}
});
}
#[test]
fn freeze_undeployed_land_block_should_fail_if_not_root() {
ExtBuilder::default().build().execute_with(|| {
assert_noop!(
EstateModule::freeze_undeployed_land_blocks(Origin::signed(ALICE), 0),
BadOrigin
);
});
}
#[test]
fn freeze_undeployed_land_block_should_fail_not_found() {
ExtBuilder::default().build().execute_with(|| {
assert_noop!(
EstateModule::freeze_undeployed_land_blocks(Origin::root(), 0),
Error::<Runtime>::UndeployedLandBlockNotFound
);
});
}
#[test]
fn freeze_undeployed_land_block_should_fail_if_already_in_auction() {
ExtBuilder::default().build().execute_with(|| {
assert_ok!(EstateModule::issue_undeployed_land_blocks(
Origin::root(),
BOB,
1,
1,
UndeployedLandBlockType::Transferable,
));
assert_ok!(EstateModule::issue_undeployed_land_blocks(
Origin::root(),
BOB,
1,
21,
UndeployedLandBlockType::Transferable,
));
let issued_undeployed_land_block = EstateModule::get_undeployed_land_block(UNDEPLOYED_LAND_BLOCK_IN_AUCTION);
match issued_undeployed_land_block {
Some(a) => {
// Verify details of UndeployedLandBlock
assert_eq!(a.owner, BOB);
assert_eq!(a.number_land_units, 21);
assert_eq!(a.undeployed_land_block_type, UndeployedLandBlockType::Transferable);
assert_eq!(a.is_locked, false);
}
_ => {
// Should fail test
assert_eq!(0, 1);
}
}
assert_noop!(
EstateModule::freeze_undeployed_land_blocks(Origin::root(), UNDEPLOYED_LAND_BLOCK_IN_AUCTION),
Error::<Runtime>::UndeployedLandBlockAlreadyInAuction
);
});
}
#[test]
fn freeze_undeployed_land_block_should_work() {
ExtBuilder::default().build().execute_with(|| {
assert_ok!(EstateModule::issue_undeployed_land_blocks(
Origin::root(),
BOB,
1,
20,
UndeployedLandBlockType::BoundToAddress
));
let issued_undeployed_land_block = EstateModule::get_undeployed_land_block(0);
match issued_undeployed_land_block {
Some(a) => {
// Verify details of UndeployedLandBlock
assert_eq!(a.owner, BOB);
assert_eq!(a.number_land_units, 20);
assert_eq!(a.undeployed_land_block_type, UndeployedLandBlockType::BoundToAddress);
assert_eq!(a.is_locked, false);
}
_ => {
// Should fail test
assert_eq!(0, 1);
}
}
assert_ok!(EstateModule::freeze_undeployed_land_blocks(Origin::root(), 0));
assert_eq!(last_event(), Event::Estate(crate::Event::UndeployedLandBlockFreezed(0)));
assert_eq!(EstateModule::get_undeployed_land_block_owner(BOB, 0), Some(()));
let frozen_undeployed_land_block = EstateModule::get_undeployed_land_block(0);
match frozen_undeployed_land_block {
Some(a) => {
assert_eq!(a.is_locked, true);
}
_ => {
// Should fail test
assert_eq!(0, 1);
}
}
});
}
#[test]
fn freeze_undeployed_land_block_should_fail_already_freezed() {
ExtBuilder::default().build().execute_with(|| {
assert_ok!(EstateModule::issue_undeployed_land_blocks(
Origin::root(),
BOB,
1,
20,
UndeployedLandBlockType::BoundToAddress
));
assert_ok!(EstateModule::freeze_undeployed_land_blocks(Origin::root(), 0));
assert_eq!(last_event(), Event::Estate(crate::Event::UndeployedLandBlockFreezed(0)));
assert_noop!(
EstateModule::freeze_undeployed_land_blocks(Origin::root(), 0),
Error::<Runtime>::UndeployedLandBlockAlreadyFreezed
);
});
}
#[test]
fn unfreeze_undeployed_land_block_should_fail_if_not_root() {
ExtBuilder::default().build().execute_with(|| {
assert_noop!(
EstateModule::unfreeze_undeployed_land_blocks(Origin::signed(ALICE), 0),
BadOrigin
);
});
}
#[test]
fn unfreeze_undeployed_land_block_should_fail_not_found() {
ExtBuilder::default().build().execute_with(|| {
assert_noop!(
EstateModule::unfreeze_undeployed_land_blocks(Origin::root(), 0),
Error::<Runtime>::UndeployedLandBlockNotFound
);
});
}
#[test]
fn unfreeze_undeployed_land_block_should_fail_if_already_in_auction() {
ExtBuilder::default().build().execute_with(|| {
assert_ok!(EstateModule::issue_undeployed_land_blocks(
Origin::root(),
BOB,
1,
1,
UndeployedLandBlockType::Transferable,
));
assert_ok!(EstateModule::issue_undeployed_land_blocks(
Origin::root(),
BOB,
1,
21,
UndeployedLandBlockType::Transferable,
));
let issued_undeployed_land_block = EstateModule::get_undeployed_land_block(1);
match issued_undeployed_land_block {
Some(a) => {
// Verify details of UndeployedLandBlock
assert_eq!(a.owner, BOB);
assert_eq!(a.number_land_units, 21);
assert_eq!(a.undeployed_land_block_type, UndeployedLandBlockType::Transferable);
assert_eq!(a.is_locked, false);
}
_ => {
// Should fail test
assert_eq!(0, 1);
}
}
assert_noop!(
EstateModule::unfreeze_undeployed_land_blocks(Origin::root(), UNDEPLOYED_LAND_BLOCK_IN_AUCTION),
Error::<Runtime>::UndeployedLandBlockAlreadyInAuction
);
});
}
#[test]
fn unfreeze_undeployed_land_block_should_fail_not_frozen() {
ExtBuilder::default().build().execute_with(|| {
assert_ok!(EstateModule::issue_undeployed_land_blocks(
Origin::root(),
BOB,
1,
20,
UndeployedLandBlockType::BoundToAddress
));
assert_noop!(
EstateModule::unfreeze_undeployed_land_blocks(Origin::root(), 0),
Error::<Runtime>::UndeployedLandBlockNotFrozen
);
});
}
#[test]
fn unfreeze_undeployed_land_block_should_work() {
ExtBuilder::default().build().execute_with(|| {
assert_ok!(EstateModule::issue_undeployed_land_blocks(
Origin::root(),
BOB,
1,
20,
UndeployedLandBlockType::BoundToAddress
));
assert_ok!(EstateModule::freeze_undeployed_land_blocks(Origin::root(), 0));
let freezed_undeployed_land_block = EstateModule::get_undeployed_land_block(0);
match freezed_undeployed_land_block {
Some(a) => {
assert_eq!(a.is_locked, true);
}
_ => {
// Should fail test
assert_eq!(0, 1);
}
}
assert_ok!(EstateModule::unfreeze_undeployed_land_blocks(Origin::root(), 0));
assert_eq!(
last_event(),
Event::Estate(crate::Event::UndeployedLandBlockUnfreezed(0))
);
let unfreezed_undeployed_land_block = EstateModule::get_undeployed_land_block(0);
match unfreezed_undeployed_land_block {
Some(a) => {
assert_eq!(a.is_locked, false);
}
_ => {
// Should fail test
assert_eq!(0, 1);
}
}
});
}
#[test]
fn transfer_undeployed_land_block_should_fail_if_not_found() {
ExtBuilder::default().build().execute_with(|| {
assert_noop!(
EstateModule::transfer_undeployed_land_blocks(Origin::signed(ALICE), BOB, 0),
Error::<Runtime>::UndeployedLandBlockNotFound
);
});
}
#[test]
fn transfer_undeployed_land_block_should_fail_if_not_owner() {
ExtBuilder::default().build().execute_with(|| {
assert_ok!(EstateModule::issue_undeployed_land_blocks(
Origin::root(),
BOB,
1,
20,
UndeployedLandBlockType::BoundToAddress
));
let undeployed_land_block_id: UndeployedLandBlockId = 0;
assert_noop!(
EstateModule::transfer_undeployed_land_blocks(Origin::signed(ALICE), BOB, undeployed_land_block_id),
Error::<Runtime>::NoPermission
);
});
}
#[test]
fn transfer_undeployed_land_block_should_fail_if_freezed() {
ExtBuilder::default().build().execute_with(|| {
assert_ok!(EstateModule::issue_undeployed_land_blocks(
Origin::root(),
BOB,
1,
20,
UndeployedLandBlockType::BoundToAddress
));
let undeployed_land_block_id: UndeployedLandBlockId = 0;
assert_ok!(EstateModule::freeze_undeployed_land_blocks(
Origin::root(),
undeployed_land_block_id
));
assert_noop!(
EstateModule::transfer_undeployed_land_blocks(Origin::signed(BOB), ALICE, undeployed_land_block_id),
Error::<Runtime>::UndeployedLandBlockAlreadyFreezed
);
});
}
#[test]
fn transfer_undeployed_land_block_should_fail_if_not_transferable() {
ExtBuilder::default().build().execute_with(|| {
assert_ok!(EstateModule::issue_undeployed_land_blocks(
Origin::root(),
BOB,
1,
20,
UndeployedLandBlockType::BoundToAddress
));
let undeployed_land_block_id: UndeployedLandBlockId = 0;
assert_noop!(
EstateModule::transfer_undeployed_land_blocks(Origin::signed(BOB), ALICE, undeployed_land_block_id),
Error::<Runtime>::UndeployedLandBlockIsNotTransferable
);
});
}
#[test]
fn transfer_undeployed_land_block_should_fail_if_already_in_auction() {
ExtBuilder::default().build().execute_with(|| {
assert_ok!(EstateModule::issue_undeployed_land_blocks(
Origin::root(),
BOB,
1,
1,
UndeployedLandBlockType::Transferable,
));
assert_ok!(EstateModule::issue_undeployed_land_blocks(
Origin::root(),
BOB,
1,
21,
UndeployedLandBlockType::Transferable,
));
let issued_undeployed_land_block = EstateModule::get_undeployed_land_block(1);
match issued_undeployed_land_block {
Some(a) => {
// Verify details of UndeployedLandBlock
assert_eq!(a.owner, BOB);
assert_eq!(a.number_land_units, 21);
assert_eq!(a.undeployed_land_block_type, UndeployedLandBlockType::Transferable);
assert_eq!(a.is_locked, false);
}
_ => {
// Should fail test
assert_eq!(0, 1);
}
}
assert_noop!(
EstateModule::transfer_undeployed_land_blocks(Origin::signed(BOB), ALICE, UNDEPLOYED_LAND_BLOCK_IN_AUCTION),
Error::<Runtime>::UndeployedLandBlockAlreadyInAuction
);
});
}
#[test]
fn transfer_undeployed_land_block_should_work() {
ExtBuilder::default().build().execute_with(|| {
assert_ok!(EstateModule::issue_undeployed_land_blocks(
Origin::root(),
BOB,
1,
20,
UndeployedLandBlockType::Transferable
));
let undeployed_land_block_id: UndeployedLandBlockId = 0;
let issued_undeployed_land_block = EstateModule::get_undeployed_land_block(undeployed_land_block_id);
match issued_undeployed_land_block {
Some(a) => {
assert_eq!(a.owner, BOB);
}
_ => {
// Should fail test
assert_eq!(0, 1);
}
}
assert_eq!(
EstateModule::get_undeployed_land_block_owner(BOB, undeployed_land_block_id),
Some(())
);
assert_ok!(EstateModule::transfer_undeployed_land_blocks(
Origin::signed(BOB),
ALICE,
undeployed_land_block_id
));
assert_eq!(
last_event(),
Event::Estate(crate::Event::UndeployedLandBlockTransferred(
BOB,
ALICE,
undeployed_land_block_id,
))
);
let transferred_issued_undeployed_land_block =
EstateModule::get_undeployed_land_block(undeployed_land_block_id);
match transferred_issued_undeployed_land_block {
Some(a) => {
assert_eq!(a.owner, ALICE);
}
_ => {
// Should fail test
assert_eq!(0, 1);
}
}
assert_eq!(
EstateModule::get_undeployed_land_block_owner(BOB, undeployed_land_block_id),
None
);
assert_eq!(
EstateModule::get_undeployed_land_block_owner(ALICE, undeployed_land_block_id),
Some(())
);
});
}
#[test]
fn deploy_undeployed_land_block_should_fail_if_not_found() {
ExtBuilder::default().build().execute_with(|| {
let undeployed_land_block_id: UndeployedLandBlockId = 0;
assert_noop!(
EstateModule::deploy_land_block(
Origin::signed(ALICE),
undeployed_land_block_id,
METAVERSE_ID,
LANDBLOCK_COORDINATE,
vec![COORDINATE_IN_1]
),
Error::<Runtime>::UndeployedLandBlockNotFound
);
});
}
#[test]
fn deploy_undeployed_land_block_should_fail_if_not_owner() {
ExtBuilder::default().build().execute_with(|| {
assert_ok!(EstateModule::issue_undeployed_land_blocks(
Origin::root(),
BOB,
1,
20,
UndeployedLandBlockType::BoundToAddress
));
let undeployed_land_block_id: UndeployedLandBlockId = 0;
assert_noop!(
EstateModule::deploy_land_block(
Origin::signed(ALICE),
undeployed_land_block_id,
METAVERSE_ID,
LANDBLOCK_COORDINATE,
vec![COORDINATE_IN_1]
),
Error::<Runtime>::NoPermission
);
});
}
#[test]
fn deploy_undeployed_land_block_should_fail_if_freezed() {
ExtBuilder::default().build().execute_with(|| {
assert_ok!(EstateModule::issue_undeployed_land_blocks(
Origin::root(),
BOB,
1,
20,
UndeployedLandBlockType::BoundToAddress
));
let undeployed_land_block_id: UndeployedLandBlockId = 0;
assert_ok!(EstateModule::freeze_undeployed_land_blocks(
Origin::root(),
undeployed_land_block_id
));
assert_noop!(
EstateModule::deploy_land_block(
Origin::signed(BOB),
undeployed_land_block_id,
METAVERSE_ID,
LANDBLOCK_COORDINATE,
vec![COORDINATE_IN_1]
),
Error::<Runtime>::UndeployedLandBlockFreezed
);
});
}
#[test]
fn deploy_undeployed_land_block_should_fail_if_already_in_auction() {
ExtBuilder::default().build().execute_with(|| {
assert_ok!(EstateModule::issue_undeployed_land_blocks(
Origin::root(),
BOB,
1,
1,
UndeployedLandBlockType::Transferable,
));
assert_ok!(EstateModule::issue_undeployed_land_blocks(
Origin::root(),
BOB,
1,
21,
UndeployedLandBlockType::Transferable,
));
let issued_undeployed_land_block = EstateModule::get_undeployed_land_block(1);
match issued_undeployed_land_block {
Some(a) => {
// Verify details of UndeployedLandBlock
assert_eq!(a.owner, BOB);
assert_eq!(a.number_land_units, 21);
assert_eq!(a.undeployed_land_block_type, UndeployedLandBlockType::Transferable);
assert_eq!(a.is_locked, false);
}
_ => {
// Should fail test
assert_eq!(0, 1);
}
}
assert_noop!(
EstateModule::deploy_land_block(
Origin::signed(BOB),
UNDEPLOYED_LAND_BLOCK_IN_AUCTION,
METAVERSE_ID,
LANDBLOCK_COORDINATE,
vec![COORDINATE_IN_1, COORDINATE_IN_2]
),
Error::<Runtime>::UndeployedLandBlockAlreadyInAuction
);
});
}
#[test]
fn deploy_undeployed_land_block_should_work() {
ExtBuilder::default().build().execute_with(|| {
assert_ok!(EstateModule::issue_undeployed_land_blocks(
Origin::root(),
BOB,
1,
2,
UndeployedLandBlockType::BoundToAddress
));
let undeployed_land_block_id: UndeployedLandBlockId = 0;
let undeployed_land_block = EstateModule::get_undeployed_land_block(undeployed_land_block_id);
match undeployed_land_block {
Some(a) => {
assert_eq!(a.number_land_units, 2);
}
_ => {
// Should fail test
assert_eq!(0, 1);
}
}
assert_ok!(EstateModule::deploy_land_block(
Origin::signed(BOB),
undeployed_land_block_id,
METAVERSE_ID,
LANDBLOCK_COORDINATE,
vec![COORDINATE_IN_1, COORDINATE_IN_2]
));
assert_eq!(
last_event(),
Event::Estate(crate::Event::LandBlockDeployed(
BOB,
METAVERSE_ID,
undeployed_land_block_id,
vec![COORDINATE_IN_1, COORDINATE_IN_2],
))
);
let updated_undeployed_land_block = EstateModule::get_undeployed_land_block(undeployed_land_block_id);
assert_eq!(updated_undeployed_land_block, None);
assert_eq!(EstateModule::all_land_units_count(), 2);
});
}
#[test]
fn approve_undeployed_land_block_should_fail_if_not_found() {
ExtBuilder::default().build().execute_with(|| {
assert_noop!(
EstateModule::approve_undeployed_land_blocks(Origin::signed(ALICE), BOB, 0),
Error::<Runtime>::UndeployedLandBlockNotFound
);
});
}
#[test]
fn approve_undeployed_land_block_should_fail_if_not_owner() {
ExtBuilder::default().build().execute_with(|| {
assert_ok!(EstateModule::issue_undeployed_land_blocks(
Origin::root(),
BOB,
1,
20,
UndeployedLandBlockType::BoundToAddress
));
let undeployed_land_block_id: UndeployedLandBlockId = 0;
assert_noop!(
EstateModule::approve_undeployed_land_blocks(Origin::signed(ALICE), BOB, undeployed_land_block_id),
Error::<Runtime>::NoPermission
);
});
}
#[test]
fn approve_undeployed_land_block_should_fail_if_freezed() {
ExtBuilder::default().build().execute_with(|| {
assert_ok!(EstateModule::issue_undeployed_land_blocks(
Origin::root(),
BOB,
1,
20,
UndeployedLandBlockType::BoundToAddress
));
let undeployed_land_block_id: UndeployedLandBlockId = 0;
assert_ok!(EstateModule::freeze_undeployed_land_blocks(
Origin::root(),
undeployed_land_block_id
));
assert_noop!(
EstateModule::approve_undeployed_land_blocks(Origin::signed(BOB), ALICE, undeployed_land_block_id),
Error::<Runtime>::UndeployedLandBlockAlreadyFreezed
);
});
}
#[test]
fn approve_undeployed_land_block_should_fail_if_already_in_auction() {
ExtBuilder::default().build().execute_with(|| {
assert_ok!(EstateModule::issue_undeployed_land_blocks(
Origin::root(),
BOB,
1,
1,
UndeployedLandBlockType::Transferable,
));
assert_ok!(EstateModule::issue_undeployed_land_blocks(
Origin::root(),
BOB,
1,
21,
UndeployedLandBlockType::Transferable,
));
let issued_undeployed_land_block = EstateModule::get_undeployed_land_block(1);
match issued_undeployed_land_block {
Some(a) => {
// Verify details of UndeployedLandBlock
assert_eq!(a.owner, BOB);
assert_eq!(a.number_land_units, 21);
assert_eq!(a.undeployed_land_block_type, UndeployedLandBlockType::Transferable);
assert_eq!(a.is_locked, false);
}
_ => {
// Should fail test
assert_eq!(0, 1);
}
}
assert_noop!(
EstateModule::approve_undeployed_land_blocks(Origin::signed(BOB), ALICE, UNDEPLOYED_LAND_BLOCK_IN_AUCTION),
Error::<Runtime>::UndeployedLandBlockAlreadyInAuction
);
});
}
#[test]
fn approve_undeployed_land_block_should_work() {
ExtBuilder::default().build().execute_with(|| {
assert_ok!(EstateModule::issue_undeployed_land_blocks(
Origin::root(),
BOB,
1,
20,
UndeployedLandBlockType::Transferable
));
let undeployed_land_block_id: UndeployedLandBlockId = 0;
let issued_undeployed_land_block = EstateModule::get_undeployed_land_block(undeployed_land_block_id);
match issued_undeployed_land_block {
Some(a) => {
assert_eq!(a.owner, BOB);
assert_eq!(a.approved, None);
}
_ => {
// Should fail test
assert_eq!(0, 1);
}
}
assert_eq!(
EstateModule::get_undeployed_land_block_owner(BOB, undeployed_land_block_id),
Some(())
);
assert_ok!(EstateModule::approve_undeployed_land_blocks(
Origin::signed(BOB),
ALICE,
undeployed_land_block_id
));
assert_eq!(
last_event(),
Event::Estate(crate::Event::UndeployedLandBlockApproved(
BOB,
ALICE,
undeployed_land_block_id,
))
);
let transferred_issued_undeployed_land_block =
EstateModule::get_undeployed_land_block(undeployed_land_block_id);
match transferred_issued_undeployed_land_block {
Some(a) => {
assert_eq!(a.owner, BOB);
assert_eq!(a.approved, Some(ALICE));
}
_ => {
// Should fail test
assert_eq!(0, 1);
}
}
assert_eq!(
EstateModule::get_undeployed_land_block_owner(BOB, undeployed_land_block_id),
Some(())
);
});
}
#[test]
fn unapprove_undeployed_land_block_should_fail_if_not_found() {
ExtBuilder::default().build().execute_with(|| {
assert_noop!(
EstateModule::unapprove_undeployed_land_blocks(Origin::signed(ALICE), 0),
Error::<Runtime>::UndeployedLandBlockNotFound
);
});
}
#[test]
fn unapprove_undeployed_land_block_should_fail_if_not_owner() {
ExtBuilder::default().build().execute_with(|| {
assert_ok!(EstateModule::issue_undeployed_land_blocks(
Origin::root(),
BOB,
1,
20,
UndeployedLandBlockType::BoundToAddress
));
let undeployed_land_block_id: UndeployedLandBlockId = 0;
assert_noop!(
EstateModule::unapprove_undeployed_land_blocks(Origin::signed(ALICE), undeployed_land_block_id),
Error::<Runtime>::NoPermission
);
});
}
#[test]
fn unapprove_undeployed_land_block_should_fail_if_freezed() {
ExtBuilder::default().build().execute_with(|| {
assert_ok!(EstateModule::issue_undeployed_land_blocks(
Origin::root(),
BOB,
1,
20,
UndeployedLandBlockType::BoundToAddress
));
let undeployed_land_block_id: UndeployedLandBlockId = 0;
assert_ok!(EstateModule::freeze_undeployed_land_blocks(
Origin::root(),
undeployed_land_block_id
));
assert_noop!(
EstateModule::unapprove_undeployed_land_blocks(Origin::signed(BOB), undeployed_land_block_id),
Error::<Runtime>::UndeployedLandBlockAlreadyFreezed
);
});
}
#[test]
fn unapprove_undeployed_land_block_should_fail_if_already_in_auction() {
ExtBuilder::default().build().execute_with(|| {
assert_ok!(EstateModule::issue_undeployed_land_blocks(
Origin::root(),
BOB,
1,
1,
UndeployedLandBlockType::Transferable,
));
assert_ok!(EstateModule::issue_undeployed_land_blocks(
Origin::root(),
BOB,
1,
21,
UndeployedLandBlockType::Transferable,
));
let issued_undeployed_land_block = EstateModule::get_undeployed_land_block(1);
match issued_undeployed_land_block {
Some(a) => {
// Verify details of UndeployedLandBlock
assert_eq!(a.owner, BOB);
assert_eq!(a.number_land_units, 21);
assert_eq!(a.undeployed_land_block_type, UndeployedLandBlockType::Transferable);
assert_eq!(a.is_locked, false);
}
_ => {
// Should fail test
assert_eq!(0, 1);
}
}
assert_noop!(
EstateModule::unapprove_undeployed_land_blocks(Origin::signed(BOB), UNDEPLOYED_LAND_BLOCK_IN_AUCTION),
Error::<Runtime>::UndeployedLandBlockAlreadyInAuction
);
});
}
#[test]
fn unapprove_undeployed_land_block_should_work() {
ExtBuilder::default().build().execute_with(|| {
assert_ok!(EstateModule::issue_undeployed_land_blocks(
Origin::root(),
BOB,
1,
20,
UndeployedLandBlockType::Transferable
));
let undeployed_land_block_id: UndeployedLandBlockId = 0;
let issued_undeployed_land_block = EstateModule::get_undeployed_land_block(undeployed_land_block_id);
match issued_undeployed_land_block {
Some(a) => {
assert_eq!(a.owner, BOB);
assert_eq!(a.approved, None);
}
_ => {
// Should fail test
assert_eq!(0, 1);
}
}
assert_eq!(
EstateModule::get_undeployed_land_block_owner(BOB, undeployed_land_block_id),
Some(())
);
assert_ok!(EstateModule::approve_undeployed_land_blocks(
Origin::signed(BOB),
ALICE,
undeployed_land_block_id
));
let approved_issued_undeployed_land_block = EstateModule::get_undeployed_land_block(undeployed_land_block_id);
match approved_issued_undeployed_land_block {
Some(a) => {
assert_eq!(a.owner, BOB);
assert_eq!(a.approved, Some(ALICE));
}
_ => {
// Should fail test
assert_eq!(0, 1);
}
}
assert_ok!(EstateModule::unapprove_undeployed_land_blocks(
Origin::signed(BOB),
undeployed_land_block_id
));
assert_eq!(
last_event(),
Event::Estate(crate::Event::UndeployedLandBlockUnapproved(undeployed_land_block_id))
);
let unapproved_issued_undeployed_land_block = EstateModule::get_undeployed_land_block(undeployed_land_block_id);
match unapproved_issued_undeployed_land_block {
Some(a) => {
assert_eq!(a.owner, BOB);
assert_eq!(a.approved, None);
}
_ => {
// Should fail test
assert_eq!(0, 1);
}
}
});
}
#[test]
fn burn_undeployed_land_block_should_fail_if_not_root() {
ExtBuilder::default().build().execute_with(|| {
assert_noop!(
EstateModule::burn_undeployed_land_blocks(Origin::signed(ALICE), 0),
BadOrigin
);
});
}
#[test]
fn burn_undeployed_land_block_should_fail_not_found() {
ExtBuilder::default().build().execute_with(|| {
assert_noop!(
EstateModule::burn_undeployed_land_blocks(Origin::root(), 0),
Error::<Runtime>::UndeployedLandBlockNotFound
);
});
}
#[test]
fn burn_undeployed_land_block_should_fail_if_already_in_auction() {
ExtBuilder::default().build().execute_with(|| {
assert_ok!(EstateModule::issue_undeployed_land_blocks(
Origin::root(),
BOB,
1,
1,
UndeployedLandBlockType::Transferable,
));
assert_ok!(EstateModule::issue_undeployed_land_blocks(
Origin::root(),
BOB,
1,
21,
UndeployedLandBlockType::Transferable,
));
let issued_undeployed_land_block = EstateModule::get_undeployed_land_block(1);
match issued_undeployed_land_block {
Some(a) => {
// Verify details of UndeployedLandBlock
assert_eq!(a.owner, BOB);
assert_eq!(a.number_land_units, 21);
assert_eq!(a.undeployed_land_block_type, UndeployedLandBlockType::Transferable);
assert_eq!(a.is_locked, false);
}
_ => {
// Should fail test
assert_eq!(0, 1);
}
}
assert_noop!(
EstateModule::burn_undeployed_land_blocks(Origin::root(), UNDEPLOYED_LAND_BLOCK_IN_AUCTION),
Error::<Runtime>::UndeployedLandBlockAlreadyInAuction
);
});
}
#[test]
fn burn_undeployed_land_block_should_work() {
ExtBuilder::default().build().execute_with(|| {
assert_ok!(EstateModule::issue_undeployed_land_blocks(
Origin::root(),
BOB,
1,
20,
UndeployedLandBlockType::BoundToAddress
));
let undeployed_land_block_id: UndeployedLandBlockId = 0;
let issued_undeployed_land_block = EstateModule::get_undeployed_land_block(undeployed_land_block_id);
match issued_undeployed_land_block {
Some(a) => {
assert_eq!(a.owner, BOB);
assert_eq!(a.number_land_units, 20);
assert_eq!(a.undeployed_land_block_type, UndeployedLandBlockType::BoundToAddress);
}
_ => {
// Should fail test
assert_eq!(0, 1);
}
}
assert_eq!(
EstateModule::get_undeployed_land_block_owner(BOB, undeployed_land_block_id),
Some(())
);
assert_ok!(EstateModule::burn_undeployed_land_blocks(
Origin::root(),
undeployed_land_block_id
));
assert_eq!(
last_event(),
Event::Estate(crate::Event::UndeployedLandBlockBurnt(undeployed_land_block_id))
);
assert_eq!(
EstateModule::get_undeployed_land_block_owner(BOB, undeployed_land_block_id),
None
);
assert_eq!(EstateModule::get_undeployed_land_block(undeployed_land_block_id), None)
});
}
#[test]
fn ensure_land_unit_within_land_block_bound_should_work() {
let coordinates: Vec<(i32, i32)> = vec![(-49, 0), (-48, 0), (-47, 0), (0, 50)];
assert_eq!(EstateModule::verify_land_unit_in_bound(&(0, 0), &coordinates), true);
let second_coordinates: Vec<(i32, i32)> = vec![(-249, 2), (-248, 2), (-150, 2), (-150, 6)];
assert_eq!(
EstateModule::verify_land_unit_in_bound(&(-200, 2), &second_coordinates),
true
);
}
#[test]
fn ensure_land_unit_out_of_land_block_bound_should_fail() {
let coordinates: Vec<(i32, i32)> = vec![(-51, 0), (-48, 0), (-47, 0), (0, 51)];
assert_eq!(EstateModule::verify_land_unit_in_bound(&(0, 0), &coordinates), false);
let second_coordinates: Vec<(i32, i32)> = vec![(-250, 2), (-248, 2), (-150, 2), (-151, 6)];
assert_eq!(
EstateModule::verify_land_unit_in_bound(&(-200, 2), &second_coordinates),
false
);
}
#[test]
fn issue_land_block_and_create_estate_should_work() {
ExtBuilder::default().build().execute_with(|| {
assert_ok!(EstateModule::issue_undeployed_land_blocks(
Origin::root(),
BOB,
1,
2,
UndeployedLandBlockType::BoundToAddress
));
assert_eq!(
last_event(),
Event::Estate(crate::Event::UndeployedLandBlockIssued(BOB, 0))
);
assert_eq!(EstateModule::get_undeployed_land_block_owner(BOB, 0), Some(()));
let issued_undeployed_land_block = EstateModule::get_undeployed_land_block(0);
match issued_undeployed_land_block {
Some(a) => {
// Verify details of UndeployedLandBlock
assert_eq!(a.owner, BOB);
assert_eq!(a.number_land_units, 2);
assert_eq!(a.undeployed_land_block_type, UndeployedLandBlockType::BoundToAddress);
assert_eq!(a.is_locked, false);
}
_ => {
// Should fail test
assert_eq!(0, 1);
}
}
// Bob can deploy raw land block to his metaverse
assert_ok!(EstateModule::deploy_land_block(
Origin::signed(BOB),
0,
METAVERSE_ID,
LANDBLOCK_COORDINATE,
vec![COORDINATE_IN_1, COORDINATE_IN_2]
));
assert_eq!(
EstateModule::get_land_units(METAVERSE_ID, COORDINATE_IN_1),
Some(OwnerId::Token(2))
);
assert_eq!(
EstateModule::get_land_units(METAVERSE_ID, COORDINATE_IN_2),
Some(OwnerId::Token(2))
);
});
}
| |
aop.js
|
/** @license MIT License (c) copyright B Cavalier & J Hann */
/**
* wire/aop plugin
* Provides AOP for components created via wire, including Decorators,
* Introductions (mixins), and Pointcut-based Aspect Weaving.
*
* wire is part of the cujo.js family of libraries (http://cujojs.com/)
*
* Licensed under the MIT License at:
* http://www.opensource.org/licenses/mit-license.php
*/
(function(define) { 'use strict';
define(function(require) {
var meld, when, sequence, connection, adviceTypes, adviceStep, undef;
meld = require('meld');
when = require('when');
sequence = require('when/sequence');
connection = require('./lib/connection');
// "after" is not included in these standard advice types because
// it is created as promise-aware advice.
adviceTypes = ['before', 'around', 'afterReturning', 'afterThrowing'];
adviceStep = 'connect:before';
//
// Decoration
//
function applyDecorator(target, Decorator, args) {
args = args ? [target].concat(args) : [target];
Decorator.apply(null, args);
}
function makeDecorator(decorator, args, wire) {
return function(target) {
function apply(Decorator) {
return args
? when(wire(args), function (resolvedArgs) {
applyDecorator(target, Decorator, resolvedArgs);
})
: applyDecorator(target, Decorator);
}
return when(wire.resolveRef(decorator), apply);
};
}
function decorateFacet(resolver, facet, wire) {
var target, options, tasks;
target = facet.target;
options = facet.options;
tasks = [];
for(var decoratorRefName in options) {
tasks.push(makeDecorator(decoratorRefName, options[decoratorRefName], wire));
}
resolver.resolve(sequence(tasks, target));
}
//
// Simple advice
//
function addSingleAdvice(addAdviceFunc, advices, proxy, advice, options, wire) {
function handleAopConnection(srcObject, srcMethod, adviceHandler) {
checkAdvisable(srcObject, srcMethod);
advices.push(addAdviceFunc(srcObject, srcMethod, adviceHandler));
}
return connection.parse(proxy, advice, options, wire, handleAopConnection);
}
function checkAdvisable(source, method) {
if (!(typeof method == 'function' || typeof source[method] == 'function')) {
|
throw new TypeError('Cannot add advice to non-method: ' + method);
}
}
function makeSingleAdviceAdd(adviceType) {
return function (source, sourceMethod, advice) {
return meld[adviceType](source, sourceMethod, advice);
};
}
function addAfterFulfillingAdvice(source, sourceMethod, advice) {
return meld.afterReturning(source, sourceMethod, function(promise) {
return when(promise, advice);
});
}
function addAfterRejectingAdvice(source, sourceMethod, advice) {
return meld.afterReturning(source, sourceMethod, function(promise) {
return when(promise, null, advice);
});
}
function addAfterPromiseAdvice(source, sourceMethod, advice) {
return meld.after(source, sourceMethod, function(promise) {
return when(promise, advice, advice);
});
}
function makeAdviceFacet(addAdviceFunc, advices) {
return function(resolver, facet, wire) {
var advice, target, advicesToAdd, promises;
target = facet;
advicesToAdd = facet.options;
promises = [];
for(advice in advicesToAdd) {
promises.push(addSingleAdvice(addAdviceFunc, advices,
target, advice, advicesToAdd[advice], wire));
}
resolver.resolve(when.all(promises));
};
}
//
// Aspect Weaving
//
function applyAspectCombined(target, aspect, wire, add) {
return when(wire.resolveRef(aspect), function (aspect) {
var pointcut = aspect.pointcut;
if (pointcut) {
add(target, pointcut, aspect);
}
return target;
});
}
function applyAspectSeparate(target, aspect, wire, add) {
var pointcut, advice;
pointcut = aspect.pointcut;
advice = aspect.advice;
function applyAdvice(pointcut) {
return when(wire.resolveRef(advice), function (aspect) {
add(target, pointcut, aspect);
return target;
});
}
return typeof pointcut === 'string'
? when(wire.resolveRef(pointcut, applyAdvice))
: applyAdvice(pointcut);
}
function weave(resolver, proxy, wire, options, add) {
// TODO: Refactor weaving to use proxy.invoke
var target, path, aspects, applyAdvice;
aspects = options.aspects;
path = proxy.path;
if (!aspects || path === undef) {
resolver.resolve();
return;
}
target = proxy.target;
applyAdvice = applyAspectCombined;
// Reduce will preserve order of aspects being applied
resolver.resolve(when.reduce(aspects, function(target, aspect) {
var aspectPath;
if (aspect.advice) {
aspectPath = aspect.advice;
applyAdvice = applyAspectSeparate;
} else {
aspectPath = aspect;
}
return typeof aspectPath === 'string' && aspectPath !== path
? applyAdvice(target, aspect, wire, add)
: target;
}, target));
}
/**
* Creates wire/aop plugin instances.
*
* @param options {Object} options passed to the plugin
*/
return function(options) {
// Track aspects so they can be removed when the context is destroyed
var woven, plugin, i, len, adviceType;
woven = [];
/**
* Function to add an aspect and remember it in the current context
* so that it can be removed when the context is destroyed.
* @param target
* @param pointcut
* @param aspect
*/
function add(target, pointcut, aspect) {
woven.push(meld.add(target, pointcut, aspect));
}
function makeFacet(step, callback) {
var facet = {};
facet[step] = function(resolver, proxy, wire) {
callback(resolver, proxy, wire);
};
return facet;
}
// Plugin
plugin = {
context: {
destroy: function(resolver) {
woven.forEach(function(aspect) {
aspect.remove();
});
resolver.resolve();
}
},
facets: {
decorate: makeFacet('configure:after', decorateFacet),
afterFulfilling: makeFacet(adviceStep, makeAdviceFacet(addAfterFulfillingAdvice, woven)),
afterRejecting: makeFacet(adviceStep, makeAdviceFacet(addAfterRejectingAdvice, woven)),
after: makeFacet(adviceStep, makeAdviceFacet(addAfterPromiseAdvice, woven))
}
};
if(options.aspects) {
plugin.create = function(resolver, proxy, wire) {
weave(resolver, proxy, wire, options, add);
};
}
// Add all regular single advice facets
for(i = 0, len = adviceTypes.length; i<len; i++) {
adviceType = adviceTypes[i];
plugin.facets[adviceType] = makeFacet(adviceStep, makeAdviceFacet(makeSingleAdviceAdd(adviceType), woven));
}
return plugin;
};
});
})(typeof define == 'function'
// use define for AMD if available
? define
: function(factory) { module.exports = factory(require); }
);
| |
do_exit.rs
|
use std::intrinsics::atomic_store;
use super::do_futex::futex_wake;
use super::process::ChildProcessFilter;
use super::{table, ThreadRef};
use crate::prelude::*;
pub fn do_exit(exit_status: i32) {
let thread = current!();
let num_remaining_threads = thread.exit(exit_status);
// Notify a thread, if any, that waits on ctid. See set_tid_address(2) for more info.
if let Some(ctid_ptr) = thread.clear_ctid() {
unsafe {
atomic_store(ctid_ptr.as_ptr(), 0);
}
futex_wake(ctid_ptr.as_ptr() as *const i32, 1);
}
// Keep the main thread's tid available as long as the process is not destroyed.
// This is important as the user space may still attempt to access the main
// thread's ThreadRef through the process's pid after the process has become
// a zombie.
if thread.tid() != thread.process().pid() {
table::del_thread(thread.tid()).expect("tid must be in the table");
}
// If this thread is the last thread, then exit the process
if num_remaining_threads == 0 {
do_exit_process(&thread, exit_status);
}
}
fn
|
(thread: &ThreadRef, exit_status: i32) {
let process = thread.process();
// If the parent process is the idle process, we can release the process directly.
if process.parent().pid() == 0 {
// Deadlock note: Always lock parent then child.
let mut parent_inner = super::IDLE.process().inner();
let mut process_inner = process.inner();
table::del_thread(thread.tid()).expect("tid must be in the table");
table::del_process(process.pid()).expect("pid must be in the table");
process_inner.exit(exit_status);
parent_inner.remove_zombie_child(process.pid());
return;
}
// Otherwise, we need to notify the parent process
// Lock the parent process to ensure that parent's wait4 cannot miss the current
// process's exit.
// Deadlock note: Always lock parent then child.
let parent = process.parent();
let mut parent_inner = parent.inner();
process.inner().exit(exit_status);
// Wake up the parent if it is waiting on this child
let waiting_children = parent_inner.waiting_children_mut().unwrap();
waiting_children.del_and_wake_one_waiter(|waiter_data| -> Option<pid_t> {
match waiter_data {
ChildProcessFilter::WithAnyPid => {}
ChildProcessFilter::WithPid(required_pid) => {
if process.pid() != *required_pid {
return None;
}
}
ChildProcessFilter::WithPgid(required_pgid) => {
if process.pgid() != *required_pgid {
return None;
}
}
}
Some(process.pid())
});
}
|
do_exit_process
|
test_data.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_geocodio
----------------------------------
Tests for `geocodio.data` module.
"""
import json
import os
import unittest
from geocodio.data import Address
from geocodio.data import Location
from geocodio.data import LocationCollection
class TestDataTypes(unittest.TestCase):
def setUp(self):
"""
Read the test data from JSON files which are modified from actual
service response only for formatting. This makes this file much easier
to read, the data easier to inspect, and ensures that the data matches
what the service actually replies with.
"""
fixtures = os.path.join(os.path.dirname(os.path.abspath(__file__)), "response/")
with open(os.path.join(fixtures, "single.json"), "r") as single_json:
self.single_response = json.loads(single_json.read())
with open(os.path.join(fixtures, "batch.json"), "r") as batch_json:
self.batch_response = json.loads(batch_json.read())
with open(os.path.join(fixtures, "address.json"), "r") as address_json:
self.address_response = json.loads(address_json.read())
with open(os.path.join(fixtures, "missing_results.json"), "r") as missing_json:
self.missing_results = json.loads(missing_json.read())
with open(
os.path.join(fixtures, "batch_reverse.json"), "r"
) as batch_reverse_json:
self.batch_reverse_response = json.loads(batch_reverse_json.read())
def test_address_coords(self):
"""Ensure Address.coords property returns None when no location"""
x = Address(self.address_response)
self.assertEqual(None, x.coords)
def test_address_accuracy(self):
"""Ensure Address.accuracy property returns None when no location"""
x = Address(self.address_response)
self.assertEqual(None, x.accuracy)
def test_location_coords(self):
|
def test_location_results_missing(self):
"""Ensure empty results are processed as a missing address"""
bad_results = Location(self.missing_results)
self.assertEqual(bad_results.coords, None)
def test_collection(self):
"""Ensure that the LocationCollection stores as a list of Locations"""
self.assertTrue(isinstance(self.batch_response, dict))
locations = LocationCollection(self.batch_response["results"])
self.assertTrue(isinstance(locations[0], Location))
locations = LocationCollection(self.batch_reverse_response["results"])
self.assertTrue(isinstance(locations[0], Location))
def test_collection_coords(self):
"""Ensure the coords property returns a list of suitable tuples"""
locations = LocationCollection(self.batch_response["results"])
self.assertEqual(
locations.coords,
[
(37.560890255102, -77.477400571429),
(37.554895702703, -77.457561054054),
None,
],
)
# Do the same with the order changed
locations = LocationCollection(self.batch_response["results"], order="lng")
self.assertEqual(
locations.coords,
[
(-77.477400571429, 37.560890255102),
(-77.457561054054, 37.554895702703),
None,
],
)
def test_collection_addresses(self):
"""Ensure that formatted addresses are returned"""
locations = LocationCollection(self.batch_response["results"])
self.assertEqual(
locations.formatted_addresses,
[
"3101 Patterson Ave, Richmond VA, 23221",
"1657 W Broad St, Richmond VA, 23220",
"",
],
)
def test_collection_get(self):
"""Ensure 'get' performs a key based lookup"""
locations = LocationCollection(self.batch_response["results"])
self.assertEqual(
locations.get("3101 patterson ave, richmond, va").coords,
(37.560890255102, -77.477400571429),
)
# Case sensitive on the specific query
self.assertRaises(KeyError, locations.get, "3101 Patterson Ave, richmond, va")
locations = LocationCollection(self.batch_reverse_response["results"])
# The rendred query string value is acceptable
self.assertEqual(
locations.get("37.538758,-77.433594").coords, (37.538758, -77.433594)
)
# A tuple of floats is acceptable
self.assertEqual(
locations.get((37.538758, -77.433594)).coords, (37.538758, -77.433594)
)
# If it can be coerced to a float it is acceptable
self.assertEqual(
locations.get(("37.538758", "-77.433594")).coords, (37.538758, -77.433594)
)
# This is unacceptable
self.assertRaises(ValueError, locations.get, ("37.538758 N", "-77.433594 W"))
if __name__ == "__main__":
unittest.main()
|
"""Ensure Location.coords property returns a suitable tuple"""
x = Location(self.single_response)
self.assertEqual(x.coords, (37.554895702703, -77.457561054054))
# Do the same with the order changed
x = Location(self.single_response, order="lng")
self.assertEqual(x.coords, (-77.457561054054, 37.554895702703))
|
configmaps.rs
|
use std::collections::BTreeMap;
use k8s_openapi::{api::core::v1::ConfigMap, chrono::Utc};
use super::utils;
#[derive(Clone)]
pub struct KubeConfigMap {
pub name: String,
pub namespace: String,
pub data: BTreeMap<String, String>,
pub age: String,
}
impl KubeConfigMap {
pub fn from_api(cm: &ConfigMap) -> Self {
let data = match cm.data.as_ref() {
Some(d) => d.to_owned(),
_ => BTreeMap::new(),
};
KubeConfigMap {
name: cm.metadata.name.clone().unwrap_or_default(),
namespace: cm.metadata.namespace.clone().unwrap_or_default(),
age: utils::to_age(cm.metadata.creation_timestamp.as_ref(), Utc::now()),
data,
}
}
|
}
|
|
default.tsx
|
export default defaultLocale;
|
import defaultLocale from './zh_CN'
|
|
http_redirects_operations.py
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.pipeline import ClientRawResponse
from .. import models
class HttpRedirectsOperations(object):
"""HttpRedirectsOperations operations.
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self.config = config
def head300(
self, custom_headers=None, raw=False, **operation_config):
"""Return 300 status code and redirect to /http/success/200.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsHttp.models.ErrorException>`
"""
# Construct URL
url = '/http/redirect/300'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.head(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 300]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'Location': 'str',
})
return client_raw_response
def get300(
self, custom_headers=None, raw=False, **operation_config):
"""Return 300 status code and redirect to /http/success/200.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: list of str
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsHttp.models.ErrorException>`
"""
# Construct URL
url = '/http/redirect/300'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 300]:
raise models.ErrorException(self._deserialize, response)
deserialized = None
header_dict = {}
if response.status_code == 300:
deserialized = self._deserialize('[str]', response)
header_dict = {
'Location': 'str',
}
if raw:
client_raw_response = ClientRawResponse(deserialized, response)
client_raw_response.add_headers(header_dict)
return client_raw_response
return deserialized
def head301(
self, custom_headers=None, raw=False, **operation_config):
"""Return 301 status code and redirect to /http/success/200.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsHttp.models.ErrorException>`
"""
# Construct URL
url = '/http/redirect/301'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.head(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 301]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'Location': 'str',
})
return client_raw_response
def get301(
self, custom_headers=None, raw=False, **operation_config):
"""Return 301 status code and redirect to /http/success/200.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsHttp.models.ErrorException>`
"""
# Construct URL
url = '/http/redirect/301'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 301]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'Location': 'str',
})
return client_raw_response
def put301(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""Put true Boolean value in request returns 301. This request should
not be automatically redirected, but should return the received 301
to the caller for evaluation.
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsHttp.models.ErrorException>`
"""
# Construct URL
url = '/http/redirect/301'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [301]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'Location': 'str',
})
return client_raw_response
def head302(
self, custom_headers=None, raw=False, **operation_config):
"""Return 302 status code and redirect to /http/success/200.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsHttp.models.ErrorException>`
"""
# Construct URL
url = '/http/redirect/302'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.head(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 302]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'Location': 'str',
})
return client_raw_response
def get302(
self, custom_headers=None, raw=False, **operation_config):
"""Return 302 status code and redirect to /http/success/200.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsHttp.models.ErrorException>`
"""
# Construct URL
url = '/http/redirect/302'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 302]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'Location': 'str',
})
return client_raw_response
def patch302(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""Patch true Boolean value in request returns 302. This request should
not be automatically redirected, but should return the received 302
to the caller for evaluation.
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsHttp.models.ErrorException>`
"""
# Construct URL
url = '/http/redirect/302'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [302]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'Location': 'str',
})
return client_raw_response
def post303(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""Post true Boolean value in request returns 303. This request should
be automatically redirected usign a get, ultimately returning a 200
status code.
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsHttp.models.ErrorException>`
"""
# Construct URL
url = '/http/redirect/303'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200, 303]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'Location': 'str',
})
return client_raw_response
def head307(
self, custom_headers=None, raw=False, **operation_config):
"""Redirect with 307, resulting in a 200 success.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsHttp.models.ErrorException>`
"""
# Construct URL
url = '/http/redirect/307'
# Construct parameters
query_parameters = {}
|
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.head(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 307]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'Location': 'str',
})
return client_raw_response
def get307(
self, custom_headers=None, raw=False, **operation_config):
"""Redirect get with 307, resulting in a 200 success.
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsHttp.models.ErrorException>`
"""
# Construct URL
url = '/http/redirect/307'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct and send request
request = self._client.get(url, query_parameters)
response = self._client.send(request, header_parameters, **operation_config)
if response.status_code not in [200, 307]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'Location': 'str',
})
return client_raw_response
def put307(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""Put redirected with 307, resulting in a 200 after redirect.
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsHttp.models.ErrorException>`
"""
# Construct URL
url = '/http/redirect/307'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.put(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200, 307]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'Location': 'str',
})
return client_raw_response
def patch307(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""Patch redirected with 307, resulting in a 200 after redirect.
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsHttp.models.ErrorException>`
"""
# Construct URL
url = '/http/redirect/307'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.patch(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200, 307]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'Location': 'str',
})
return client_raw_response
def post307(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""Post redirected with 307, resulting in a 200 after redirect.
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsHttp.models.ErrorException>`
"""
# Construct URL
url = '/http/redirect/307'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200, 307]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'Location': 'str',
})
return client_raw_response
def delete307(
self, boolean_value=None, custom_headers=None, raw=False, **operation_config):
"""Delete redirected with 307, resulting in a 200 after redirect.
:param boolean_value: Simple boolean value true
:type boolean_value: bool
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:rtype: None
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises:
:class:`ErrorException<Fixtures.AcceptanceTestsHttp.models.ErrorException>`
"""
# Construct URL
url = '/http/redirect/307'
# Construct parameters
query_parameters = {}
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
if boolean_value is not None:
body_content = self._serialize.body(boolean_value, 'bool')
else:
body_content = None
# Construct and send request
request = self._client.delete(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, **operation_config)
if response.status_code not in [200, 307]:
raise models.ErrorException(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
client_raw_response.add_headers({
'Location': 'str',
})
return client_raw_response
| |
lib.rs
|
//! @brief Example Rust-based BPF program that panics
#[cfg(all(feature = "custom-panic", target_arch = "bpf"))]
#[no_mangle]
fn
|
(info: &core::panic::PanicInfo<'_>) {
// Note: Full panic reporting is included here for testing purposes
solana_program::msg!("program custom panic enabled");
solana_program::msg!(&format!("{}", info));
}
extern crate solana_program;
use solana_program::{
account_info::AccountInfo, entrypoint, entrypoint::ProgramResult, pubkey::Pubkey,
};
entrypoint!(process_instruction);
fn process_instruction(
_program_id: &Pubkey,
_accounts: &[AccountInfo],
_instruction_data: &[u8],
) -> ProgramResult {
assert_eq!(1, 2);
Ok(())
}
|
custom_panic
|
index.js
|
import React, { Component } from 'react'
import firebase from 'firebase'
import 'isomorphic-fetch'
import clientCredentials from '../credentials/client'
export default class Index extends Component {
static async getInitialProps ({req, query}) {
const user = req && req.session ? req.session.decodedToken : null
const snap = await req.firebaseServer.database().ref('messages').once('value')
return { user, messages: snap.val() }
}
constructor (props) {
super(props)
this.state = {
user: this.props.user,
value: '',
messages: this.props.messages
}
this.addDbListener = this.addDbListener.bind(this)
this.handleChange = this.handleChange.bind(this)
this.handleSubmit = this.handleSubmit.bind(this)
}
componentDidMount () {
firebase.initializeApp(clientCredentials)
if (this.state.user) this.addDbListener()
firebase.auth().onAuthStateChanged(user => {
if (user) {
this.setState({ user: user })
return user.getToken()
.then((token) => {
// eslint-disable-next-line no-undef
return fetch('/api/login', {
method: 'POST',
// eslint-disable-next-line no-undef
headers: new Headers({ 'Content-Type': 'application/json' }),
credentials: 'same-origin',
body: JSON.stringify({ token })
})
}).then((res) => this.addDbListener())
} else {
this.setState({ user: null })
// eslint-disable-next-line no-undef
fetch('/api/logout', {
method: 'POST',
credentials: 'same-origin'
}).then(() => firebase.database().ref('messages').off())
}
})
|
addDbListener () {
firebase.database().ref('messages').on('value', snap => {
const messages = snap.val()
if (messages) this.setState({ messages })
})
}
handleChange (event) {
this.setState({ value: event.target.value })
}
handleSubmit (event) {
event.preventDefault()
const date = new Date().getTime()
firebase.database().ref(`messages/${date}`).set({
id: date,
text: this.state.value
})
this.setState({ value: '' })
}
handleLogin () {
firebase.auth().signInWithPopup(new firebase.auth.GoogleAuthProvider())
}
handleLogout () {
firebase.auth().signOut()
}
render () {
const { user, value, messages } = this.state
return <div>
{
user
? <button onClick={this.handleLogout}>Logout</button>
: <button onClick={this.handleLogin}>Login</button>
}
{
user &&
<div>
<form onSubmit={this.handleSubmit}>
<input
type={'text'}
onChange={this.handleChange}
placeholder={'add message'}
value={value}
/>
</form>
<ul>
{
messages &&
Object.keys(messages).map(key => <li key={key}>{messages[key].text}</li>)
}
</ul>
</div>
}
</div>
}
}
|
}
|
main.rs
|
#![forbid(unsafe_code)]
#![deny(
clippy::all,
clippy::as_conversions,
clippy::float_arithmetic,
clippy::integer_arithmetic,
clippy::must_use_candidate
)]
#![warn(clippy::todo, clippy::dbg_macro)]
use epkv_server::config::Config;
use epkv_server::Server;
use epkv_utils::config::read_config_file;
use epkv_utils::tracing::setup_tracing;
use anyhow::Result;
use camino::Utf8PathBuf;
use clap::StructOpt;
use tracing::debug;
#[global_allocator]
static GLOBAL: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
#[derive(Debug, clap::Parser)]
struct Opt {
#[clap(long)]
config: Utf8PathBuf,
}
fn main() -> Result<()> {
let opt = Opt::parse();
setup_tracing();
let config: Config = read_config_file(&opt.config)?;
debug!(?config);
run(config)
}
#[tokio::main]
async fn run(config: Config) -> Result<()>
|
{
Server::run(config).await
}
|
|
subs_test.go
|
package main
import (
"fmt"
)
func ExampleSubs() {
s := "GATATATGCATATACTT"
t := "ATAT"
// Print all locations with indicies starting at 1.
locs := Subs(s, t)
for i, loc := range locs {
fmt.Print(loc + 1)
if i != len(locs)-1 {
|
}
}
fmt.Println()
// Output: 2 4 10
}
|
fmt.Print(" ")
|
async_task.rs
|
use std::io;
use std::pin::Pin;
use std::task::{Context, Poll};
use futures::future::BoxFuture;
use futures::{AsyncRead, AsyncWrite, FutureExt};
use pin_project_lite::pin_project;
use crate::jvm::direct::PacketWriter;
use crate::jvm::AsyncJvmTask;
use crate::protocol::fncall::JvmInterface;
use crate::protocol::streams::AnonPipe;
/// A spawned JVM task.
pub struct Task {
pub(super) id: u32,
pub(super) stdout: Option<Stdout>,
pub(super) stderr: Option<Stdout>,
pub(super) stdin: Option<Stdin>,
pub(super) interface: JvmInterface<PacketWriter>,
}
impl AsyncJvmTask for Task {
type Stdout = Stdout;
type Stderr = Stdout;
type Stdin = Stdin;
type ExitStatus = ();
/// Wait for the task to exit.
fn wait(&mut self) -> BoxFuture<'_, io::Result<Self::ExitStatus>>
|
/// The standard output stream, if the process was spawned with
/// [`Stdio::Piped`], else None.
fn stdout(&mut self) -> &mut Option<Self::Stdout> {
&mut self.stdout
}
/// The standard error stream, if the process was spawned with
/// [`Stdio::Piped`], else None.
fn stderr(&mut self) -> &mut Option<Self::Stderr> {
&mut self.stderr
}
/// The standard input stream, if the process was spawned with
/// [`Stdio::Piped`], else None.
fn stdin(&mut self) -> &mut Option<Self::Stdin> {
&mut self.stdin
}
}
pin_project! {
pub struct Stdin {
#[pin]
pub inner: AnonPipe,
}
}
pin_project! {
pub struct Stdout {
#[pin]
pub inner: AnonPipe,
}
}
impl AsyncWrite for Stdin {
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
let this = self.project();
this.inner.poll_write(cx, buf)
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
let this = self.project();
this.inner.poll_flush(cx)
}
fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
let this = self.project();
this.inner.poll_close(cx)
}
}
impl AsyncRead for Stdout {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<io::Result<usize>> {
let this = self.project();
this.inner.poll_read(cx, buf)
}
}
|
{
self.interface
.wait(self.id, None)
.map(|_timeout| Ok(()))
.boxed()
}
|
util.ts
|
import type { RawBody } from "@sveltejs/kit"
import type { StrictBody } from "@sveltejs/kit/types/hooks"
// LOGGING UTILITIES
export type LogLevel = 'ERROR' | 'INFO' | 'DEBUG'
const LOG_LEVEL = (process.env.LOG_LEVEL?.toUpperCase() || 'INFO') as LogLevel
const logLevels: { [k: string]: number } = {
ERROR: 3,
INFO: 2,
DEBUG: 1,
}
export function log(level: LogLevel, msg: string, data: any): void {
if (logLevels[level] && logLevels[level] >= logLevels[LOG_LEVEL]) {
console.log(JSON.stringify({ level, msg, data }))
}
}
// REQUEST TRANSFORMATION UTILITIES
export interface BodyInfo {
data: string
encoding: 'base64' | 'text'
}
const encoder = new TextEncoder()
export function
|
(body: BodyInfo): RawBody {
return body.encoding === 'base64'
? new Uint8Array(Buffer.from(body.data, 'base64'))
: encoder.encode(body.data)
}
export function fromStrictBody(body: StrictBody): BodyInfo {
if (body instanceof Uint8Array) {
return {
data: Buffer.from(body).toString('base64'),
encoding: 'base64',
}
}
return {
data: body,
encoding: 'text'
}
}
|
toRawBody
|
record.rs
|
// Copyright 2019 Palantir Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::Level;
use conjure_error::Error;
use erased_serde::Serialize;
/// Metadata of a log record.
#[derive(Clone)]
pub struct Metadata<'a> {
level: Level,
target: &'a str,
}
impl<'a> Metadata<'a> {
/// Returns a builder used to create new `Metadata` values.
#[inline]
pub fn builder() -> MetadataBuilder<'a> {
MetadataBuilder::new()
}
/// Returns the verbosity level of the metadata.
#[inline]
pub fn level(&self) -> Level {
self.level
}
/// Returns the target of the metadata.
#[inline]
pub fn target(&self) -> &'a str {
self.target
}
}
/// A builder for `Metadata` values.
pub struct MetadataBuilder<'a>(Metadata<'a>);
impl<'a> Default for MetadataBuilder<'a> {
fn default() -> MetadataBuilder<'a> {
MetadataBuilder::new()
}
}
impl<'a> MetadataBuilder<'a> {
/// Creates a new `MetadataBuilder` initialized to default values.
#[inline]
pub fn new() -> MetadataBuilder<'a> {
MetadataBuilder(Metadata {
level: Level::Info,
target: "",
})
}
/// Sets the builder's verbosity level.
///
/// Defaults to `Info`.
#[inline]
pub fn level(&mut self, level: Level) -> &mut MetadataBuilder<'a> {
self.0.level = level;
self
}
/// Sets the builder's target.
///
/// Defaults to `""`.
#[inline]
pub fn target(&mut self, target: &'a str) -> &mut MetadataBuilder<'a> {
self.0.target = target;
self
}
/// Builds a `Metadata` value.
#[inline]
pub fn build(&self) -> Metadata<'a> {
self.0.clone()
}
}
/// A log record.
#[derive(Clone)]
pub struct Record<'a> {
metadata: Metadata<'a>,
file: Option<&'a str>,
line: Option<u32>,
message: &'static str,
safe_params: &'a [(&'static str, &'a dyn Serialize)],
unsafe_params: &'a [(&'static str, &'a dyn Serialize)],
error: Option<&'a Error>,
}
impl<'a> Record<'a> {
/// Returns a `RecordBuilder` initialized to default values.
#[inline]
pub fn builder() -> RecordBuilder<'a> {
RecordBuilder::new()
}
/// Returns the record's metadata.
#[inline]
pub fn metadata(&self) -> &Metadata<'a> {
&self.metadata
}
/// Returns the record's verbosity level.
#[inline]
pub fn level(&self) -> Level {
self.metadata.level
}
/// Returns the record's target.
#[inline]
pub fn target(&self) -> &'a str {
self.metadata.target
}
/// Returns the file containing the code that created the record.
#[inline]
pub fn file(&self) -> Option<&'a str> {
self.file
}
/// Returns the line of the code that created the record.
#[inline]
pub fn line(&self) -> Option<u32> {
self.line
}
/// Returns the record's message.
#[inline]
pub fn message(&self) -> &'static str {
self.message
}
/// Returns the record's safe-loggable parameters.
#[inline]
pub fn safe_params(&self) -> &'a [(&'static str, &dyn Serialize)] {
self.safe_params
}
/// Returns the record's unsafe-loggable parameters.
#[inline]
pub fn unsafe_params(&self) -> &'a [(&'static str, &dyn Serialize)] {
self.unsafe_params
}
/// Returns the error associated with the record.
#[inline]
pub fn error(&self) -> Option<&'a Error> {
self.error
}
}
/// A builder for `Record` values.
pub struct RecordBuilder<'a>(Record<'a>);
impl<'a> Default for RecordBuilder<'a> {
fn default() -> RecordBuilder<'a> {
RecordBuilder::new()
}
}
impl<'a> RecordBuilder<'a> {
/// Creates a `RecordBuilder` initialized to default values.
#[inline]
pub fn new() -> RecordBuilder<'a> {
RecordBuilder(Record {
metadata: Metadata::builder().build(),
file: None,
line: None,
message: "",
safe_params: &[],
unsafe_params: &[],
error: None,
})
}
/// Sets the record's verbosity level.
///
/// Defaults to `Info`.
#[inline]
pub fn level(&mut self, level: Level) -> &mut RecordBuilder<'a> {
self.0.metadata.level = level;
self
}
/// Sets the record's target.
///
/// Defaults to `""`.
#[inline]
pub fn target(&mut self, target: &'a str) -> &mut RecordBuilder<'a> {
self.0.metadata.target = target;
self
}
/// Sets the record's source file.
///
/// Defaults to `None`.
#[inline]
pub fn file(&mut self, file: Option<&'a str>) -> &mut RecordBuilder<'a> {
self.0.file = file;
self
}
/// Sets the record's line.
///
/// Defaults to `None`.
#[inline]
pub fn line(&mut self, line: Option<u32>) -> &mut RecordBuilder<'a> {
self.0.line = line;
self
}
/// Sets the record's message.
///
/// Defaults to `""`.
#[inline]
pub fn message(&mut self, message: &'static str) -> &mut RecordBuilder<'a> {
self.0.message = message;
self
}
/// Sets the record's safe parameters.
///
/// Defaults to `[]`.
#[inline]
pub fn safe_params(
&mut self,
safe_params: &'a [(&'static str, &dyn Serialize)],
) -> &mut RecordBuilder<'a> {
self.0.safe_params = safe_params;
self
}
/// Sets the record's unsafe parameters.
#[inline]
pub fn unsafe_params(
&mut self,
unsafe_params: &'a [(&'static str, &dyn Serialize)],
) -> &mut RecordBuilder<'a> {
self.0.unsafe_params = unsafe_params;
self
}
/// Sets the record's error.
///
/// Defaults to `None`.
#[inline]
pub fn error(&mut self, error: Option<&'a Error>) -> &mut RecordBuilder<'a> {
self.0.error = error;
self
}
/// Creates a `Record`.
#[inline]
pub fn build(&self) -> Record<'a>
|
}
|
{
self.0.clone()
}
|
models.py
|
from django.db import models
class Person(models.Model):
|
class Document(models.Model):
myfile = models.FileField(upload_to="uploads")
|
age = models.IntegerField()
name = models.CharField(max_length=100)
|
fetch.rs
|
use proc_macro::TokenStream;
use proc_macro2::{Ident, Span};
use quote::{quote, ToTokens};
use syn::{
parse::{Parse, ParseStream},
parse_quote,
punctuated::Punctuated,
Attribute, Data, DataStruct, DeriveInput, Field, Fields,
};
use crate::bevy_ecs_path;
#[derive(Default)]
struct FetchStructAttributes {
pub is_mutable: bool,
pub derive_args: Punctuated<syn::NestedMeta, syn::token::Comma>,
}
static MUTABLE_ATTRIBUTE_NAME: &str = "mutable";
static DERIVE_ATTRIBUTE_NAME: &str = "derive";
mod field_attr_keywords {
syn::custom_keyword!(ignore);
}
pub static WORLD_QUERY_ATTRIBUTE_NAME: &str = "world_query";
pub fn derive_world_query_impl(ast: DeriveInput) -> TokenStream {
let visibility = ast.vis;
let mut fetch_struct_attributes = FetchStructAttributes::default();
for attr in &ast.attrs {
if !attr
.path
.get_ident()
.map_or(false, |ident| ident == WORLD_QUERY_ATTRIBUTE_NAME)
{
continue;
}
attr.parse_args_with(|input: ParseStream| {
let meta = input.parse_terminated::<syn::Meta, syn::token::Comma>(syn::Meta::parse)?;
for meta in meta {
let ident = meta.path().get_ident().unwrap_or_else(|| {
panic!(
"Unrecognized attribute: `{}`",
meta.path().to_token_stream()
)
});
if ident == MUTABLE_ATTRIBUTE_NAME {
if let syn::Meta::Path(_) = meta {
fetch_struct_attributes.is_mutable = true;
} else {
panic!(
"The `{}` attribute is expected to have no value or arguments",
MUTABLE_ATTRIBUTE_NAME
);
}
} else if ident == DERIVE_ATTRIBUTE_NAME {
if let syn::Meta::List(meta_list) = meta {
fetch_struct_attributes
.derive_args
.extend(meta_list.nested.iter().cloned());
} else {
panic!(
"Expected a structured list within the `{}` attribute",
DERIVE_ATTRIBUTE_NAME
);
}
} else {
panic!(
"Unrecognized attribute: `{}`",
meta.path().to_token_stream()
);
}
}
Ok(())
})
.unwrap_or_else(|_| panic!("Invalid `{}` attribute format", WORLD_QUERY_ATTRIBUTE_NAME));
}
let user_generics = ast.generics.clone();
let (user_impl_generics, user_ty_generics, user_where_clauses) = user_generics.split_for_impl();
let user_generics_with_world = {
let mut generics = ast.generics.clone();
generics.params.insert(0, parse_quote!('__w));
generics
};
let (user_impl_generics_with_world, user_ty_generics_with_world, user_where_clauses_with_world) =
user_generics_with_world.split_for_impl();
let struct_name = ast.ident.clone();
let item_struct_name = Ident::new(&format!("{}Item", struct_name), Span::call_site());
let read_only_item_struct_name = if fetch_struct_attributes.is_mutable {
Ident::new(&format!("{}ReadOnlyItem", struct_name), Span::call_site())
} else {
item_struct_name.clone()
};
let fetch_struct_name = Ident::new(&format!("{}Fetch", struct_name), Span::call_site());
let read_only_fetch_struct_name = if fetch_struct_attributes.is_mutable {
Ident::new(&format!("{}ReadOnlyFetch", struct_name), Span::call_site())
} else {
fetch_struct_name.clone()
};
let state_struct_name = Ident::new(&format!("{}State", struct_name), Span::call_site());
let fetch_type_alias = Ident::new("QueryFetch", Span::call_site());
let read_only_fetch_type_alias = Ident::new("ROQueryFetch", Span::call_site());
let item_type_alias = Ident::new("QueryItem", Span::call_site());
let read_only_item_type_alias = Ident::new("ROQueryItem", Span::call_site());
let fields = match &ast.data {
Data::Struct(DataStruct {
fields: Fields::Named(fields),
..
}) => &fields.named,
_ => panic!("Expected a struct with named fields"),
};
let mut ignored_field_attrs = Vec::new();
let mut ignored_field_visibilities = Vec::new();
let mut ignored_field_idents = Vec::new();
let mut ignored_field_types = Vec::new();
let mut field_attrs = Vec::new();
let mut field_visibilities = Vec::new();
let mut field_idents = Vec::new();
let mut field_types = Vec::new();
for field in fields.iter() {
let WorldQueryFieldInfo { is_ignored, attrs } = read_world_query_field_info(field);
let field_ident = field.ident.as_ref().unwrap().clone();
if is_ignored {
ignored_field_attrs.push(attrs);
ignored_field_visibilities.push(field.vis.clone());
ignored_field_idents.push(field_ident.clone());
ignored_field_types.push(field.ty.clone());
} else {
field_attrs.push(attrs);
field_visibilities.push(field.vis.clone());
field_idents.push(field_ident.clone());
field_types.push(field.ty.clone());
}
}
let derive_args = &fetch_struct_attributes.derive_args;
// `#[derive()]` is valid syntax
let derive_macro_call = quote! { #[derive(#derive_args)] };
let path = bevy_ecs_path();
let impl_fetch = |is_readonly: bool, fetch_struct_name: Ident, item_struct_name: Ident| {
let fetch_type_alias = if is_readonly {
&read_only_fetch_type_alias
} else
|
;
let item_type_alias = if is_readonly {
&read_only_item_type_alias
} else {
&item_type_alias
};
quote! {
#derive_macro_call
#[automatically_derived]
#visibility struct #item_struct_name #user_impl_generics_with_world #user_where_clauses_with_world {
#(#(#field_attrs)* #field_visibilities #field_idents: #path::query::#item_type_alias<'__w, #field_types>,)*
#(#(#ignored_field_attrs)* #ignored_field_visibilities #ignored_field_idents: #ignored_field_types,)*
}
#[doc(hidden)]
#visibility struct #fetch_struct_name #user_impl_generics_with_world #user_where_clauses_with_world {
#(#field_idents: #path::query::#fetch_type_alias::<'__w, #field_types>,)*
#(#ignored_field_idents: #ignored_field_types,)*
}
impl #user_impl_generics_with_world #path::query::Fetch<'__w>
for #fetch_struct_name #user_ty_generics_with_world #user_where_clauses_with_world {
type Item = #item_struct_name #user_ty_generics_with_world;
type State = #state_struct_name #user_ty_generics;
unsafe fn init(_world: &'__w #path::world::World, state: &Self::State, _last_change_tick: u32, _change_tick: u32) -> Self {
Self {
#(#field_idents:
#path::query::#fetch_type_alias::<'__w, #field_types>::init(
_world,
&state.#field_idents,
_last_change_tick,
_change_tick
),
)*
#(#ignored_field_idents: Default::default(),)*
}
}
const IS_DENSE: bool = true #(&& #path::query::#fetch_type_alias::<'__w, #field_types>::IS_DENSE)*;
const IS_ARCHETYPAL: bool = true #(&& #path::query::#fetch_type_alias::<'__w, #field_types>::IS_ARCHETYPAL)*;
/// SAFETY: we call `set_archetype` for each member that implements `Fetch`
#[inline]
unsafe fn set_archetype(
&mut self,
_state: &Self::State,
_archetype: &'__w #path::archetype::Archetype,
_tables: &'__w #path::storage::Tables
) {
#(self.#field_idents.set_archetype(&_state.#field_idents, _archetype, _tables);)*
}
/// SAFETY: we call `set_table` for each member that implements `Fetch`
#[inline]
unsafe fn set_table(&mut self, _state: &Self::State, _table: &'__w #path::storage::Table) {
#(self.#field_idents.set_table(&_state.#field_idents, _table);)*
}
/// SAFETY: we call `table_fetch` for each member that implements `Fetch`.
#[inline]
unsafe fn table_fetch(&mut self, _table_row: usize) -> Self::Item {
Self::Item {
#(#field_idents: self.#field_idents.table_fetch(_table_row),)*
#(#ignored_field_idents: Default::default(),)*
}
}
/// SAFETY: we call `archetype_fetch` for each member that implements `Fetch`.
#[inline]
unsafe fn archetype_fetch(&mut self, _archetype_index: usize) -> Self::Item {
Self::Item {
#(#field_idents: self.#field_idents.archetype_fetch(_archetype_index),)*
#(#ignored_field_idents: Default::default(),)*
}
}
#[allow(unused_variables)]
#[inline]
unsafe fn table_filter_fetch(&mut self, _table_row: usize) -> bool {
true #(&& self.#field_idents.table_filter_fetch(_table_row))*
}
#[allow(unused_variables)]
#[inline]
unsafe fn archetype_filter_fetch(&mut self, _archetype_index: usize) -> bool {
true #(&& self.#field_idents.archetype_filter_fetch(_archetype_index))*
}
}
}
};
let fetch_impl = impl_fetch(false, fetch_struct_name.clone(), item_struct_name.clone());
let state_impl = quote! {
#[doc(hidden)]
#visibility struct #state_struct_name #user_impl_generics #user_where_clauses {
#(#field_idents: <#field_types as #path::query::WorldQuery>::State,)*
#(#ignored_field_idents: #ignored_field_types,)*
}
// SAFETY: `update_component_access` and `update_archetype_component_access` are called for each item in the struct
unsafe impl #user_impl_generics #path::query::FetchState for #state_struct_name #user_ty_generics #user_where_clauses {
fn init(world: &mut #path::world::World) -> Self {
#state_struct_name {
#(#field_idents: <<#field_types as #path::query::WorldQuery>::State as #path::query::FetchState>::init(world),)*
#(#ignored_field_idents: Default::default(),)*
}
}
fn update_component_access(&self, _access: &mut #path::query::FilteredAccess<#path::component::ComponentId>) {
#(self.#field_idents.update_component_access(_access);)*
}
fn update_archetype_component_access(&self, _archetype: &#path::archetype::Archetype, _access: &mut #path::query::Access<#path::archetype::ArchetypeComponentId>) {
#(self.#field_idents.update_archetype_component_access(_archetype, _access);)*
}
fn matches_archetype(&self, _archetype: &#path::archetype::Archetype) -> bool {
true #(&& self.#field_idents.matches_archetype(_archetype))*
}
fn matches_table(&self, _table: &#path::storage::Table) -> bool {
true #(&& self.#field_idents.matches_table(_table))*
}
}
};
let read_only_fetch_impl = if fetch_struct_attributes.is_mutable {
impl_fetch(
true,
read_only_fetch_struct_name.clone(),
read_only_item_struct_name,
)
} else {
quote! {}
};
let read_only_asserts = if fetch_struct_attributes.is_mutable {
quote! {
// Double-check that the data fetched by `ROQueryFetch` is read-only.
// This is technically unnecessary as `<_ as WorldQueryGats<'world>>::ReadOnlyFetch: ReadOnlyFetch`
// but to protect against future mistakes we assert the assoc type implements `ReadOnlyFetch` anyway
#( assert_readonly::<#path::query::ROQueryFetch<'__w, #field_types>>(); )*
}
} else {
quote! {
// Statically checks that the safety guarantee of `ReadOnlyFetch` for `$fetch_struct_name` actually holds true.
// We need this to make sure that we don't compile `ReadOnlyFetch` if our struct contains nested `WorldQuery`
// members that don't implement it. I.e.:
// ```
// #[derive(WorldQuery)]
// pub struct Foo { a: &'static mut MyComponent }
// ```
#( assert_readonly::<#path::query::QueryFetch<'__w, #field_types>>(); )*
}
};
let tokens = TokenStream::from(quote! {
#fetch_impl
#state_impl
#read_only_fetch_impl
impl #user_impl_generics #path::query::WorldQuery for #struct_name #user_ty_generics #user_where_clauses {
type State = #state_struct_name #user_ty_generics;
fn shrink<'__wlong: '__wshort, '__wshort>(item: #path::query::#item_type_alias<'__wlong, Self>)
-> #path::query::#item_type_alias<'__wshort, Self> {
#item_struct_name {
#(
#field_idents : < #field_types as #path::query::WorldQuery> :: shrink( item.#field_idents ),
)*
#(
#ignored_field_idents: item.#ignored_field_idents,
)*
}
}
}
impl #user_impl_generics_with_world #path::query::WorldQueryGats<'__w> for #struct_name #user_ty_generics #user_where_clauses {
type Fetch = #fetch_struct_name #user_ty_generics_with_world;
type ReadOnlyFetch = #read_only_fetch_struct_name #user_ty_generics_with_world;
type _State = #state_struct_name #user_ty_generics;
}
/// SAFETY: each item in the struct is read only
unsafe impl #user_impl_generics_with_world #path::query::ReadOnlyFetch
for #read_only_fetch_struct_name #user_ty_generics_with_world #user_where_clauses_with_world {}
#[allow(dead_code)]
const _: () = {
fn assert_readonly<T>()
where
T: #path::query::ReadOnlyFetch,
{
}
// We generate a readonly assertion for every struct member.
fn assert_all #user_impl_generics_with_world () #user_where_clauses_with_world {
#read_only_asserts
}
};
// The original struct will most likely be left unused. As we don't want our users having
// to specify `#[allow(dead_code)]` for their custom queries, we are using this cursed
// workaround.
#[allow(dead_code)]
const _: () = {
fn dead_code_workaround #user_impl_generics (q: #struct_name #user_ty_generics) #user_where_clauses {
#(q.#field_idents;)*
#(q.#ignored_field_idents;)*
}
};
});
tokens
}
struct WorldQueryFieldInfo {
/// Has `#[fetch(ignore)]` or `#[filter_fetch(ignore)]` attribute.
is_ignored: bool,
/// All field attributes except for `world_query` ones.
attrs: Vec<Attribute>,
}
fn read_world_query_field_info(field: &Field) -> WorldQueryFieldInfo {
let is_ignored = field
.attrs
.iter()
.find(|attr| {
attr.path
.get_ident()
.map_or(false, |ident| ident == WORLD_QUERY_ATTRIBUTE_NAME)
})
.map_or(false, |attr| {
let mut is_ignored = false;
attr.parse_args_with(|input: ParseStream| {
if input
.parse::<Option<field_attr_keywords::ignore>>()?
.is_some()
{
is_ignored = true;
}
Ok(())
})
.unwrap_or_else(|_| {
panic!("Invalid `{}` attribute format", WORLD_QUERY_ATTRIBUTE_NAME)
});
is_ignored
});
let attrs = field
.attrs
.iter()
.filter(|attr| {
attr.path
.get_ident()
.map_or(true, |ident| ident != WORLD_QUERY_ATTRIBUTE_NAME)
})
.cloned()
.collect();
WorldQueryFieldInfo { is_ignored, attrs }
}
|
{
&fetch_type_alias
}
|
202104.py
|
from math import ceil
def chunk(lst, size):
return list(
map(lambda x: lst[x * size:x * size + size],
list(range(0, ceil(len(lst) / size)))))
def find_str_index(str1, str2):
if not str2:
return "str2 not none"
for x in str2:
if x in str1:
return str1.index(x)
def find_sub_string(s, words):
if not words:
return []
tmp = []
str1 = ''
str2 = ''
for x in words:
str1 += x
if str1 in s:
tmp.append(s.index(str1))
words.reverse()
for x in words:
str2 += x
if str2 in s:
tmp.append(s.index(str2))
return tmp
def longest_valid_parentheses(s: str) -> int:
|
for x in range(len(s)):
if x == left:
stack.append(x)
else:
stack.pop()
if not stack:
stack.append(x)
if stack:
n = max(n, x - stack[-1])
return n
def search(nums, target) -> int:
if target in nums:
return nums.index(target)
else:
return -1
def search_range(nums, target):
indices = [i for i, x in enumerate(nums) if x == target]
if not indices:
return [-1, -1]
return indices
def binary_search(l, item):
low = 0
high = len(l) - 1
while low <= high:
mid = (low + high) // 2
if l[mid] == item:
return mid
if l[mid] > item:
high = mid - 1
else:
low = mid + 1
return None
def bin_search(l, item):
low = 0
high = len(l) - 1
while low <= high:
mid = (low + high) // 2
if item == l[mid]:
return mid
if item > l[mid]:
low = mid + 1
else:
high = mid - 1
return None
def b_search(l, item):
low = 0
high = len(l) - 1
while low <= high:
mid = (low + high) // 2
if item == l[mid]:
return mid
if item > l[mid]:
low = mid + 1
else:
high = mid - 1
return None
def b_sort(l):
for index in range(len(l) - 1):
for k in range(len(l) - 1):
if l[k] > l[k + 1]:
l[k], l[k + 1] = l[k + 1], l[k]
k += 1
return l
# 插入排序
def i_sort():
pass
# 选择排序
def s_sort(l):
low = 0
high = len(l) - 1
while low >= high:
n = min(l[low:])
n, l[low] = l[low], n
low += 1
return l
# 快速排序
def q_sort(l):
pass
# 递归算法
# 匹配括号
def find_k(strings):
stack = []
count = 0
for s in strings:
if s == '(':
stack.append(s)
elif len(stack) > 0 and s == ')':
stack.pop()
count += 1
else:
return 0
return count * 2
def insert_index(l, target):
l.append(target)
l.sort()
return l.index(target)
def multiply(n1, n2):
return f"{eval(f'{n1}*{n2}')}"
if __name__ == '__main__':
a = find_str_index('hello', 'l')
b = find_sub_string("barfoothefoobarman", ["foo", "bar", 'aaa'])
l = [1, 2, 3, 4, 5, 6]
k = "(()()())()"
# c = longest_valid_parentheses("(()()()))")
# print(c)
nums = [4, 5, 6, 7, 0, 1, 2]
# target = 6
# s = search(nums,target)
# ss = search_range([5, 7, 7, 8, 8, 10], 18)
# print(ss)
# x = [1, 3, 5, 7, 8, 9]
# # bs = binary_search(x, 9)
# bs = bin_search(l, 4)
# b = b_search(x, 9)
# print(bs, b)
s = b_sort(nums)
print(s)
f = find_k(k)
print(f)
select = s_sort(nums)
print(select)
print(multiply("12", "12"))
# t = [1, 3, 5, 6]
# st = insert_index(t, 7)
# print(st)
|
left = '('
right = ')'
n = 0
stack = [-1]
|
AdaBoost.py
|
"""
@Filename: AdaptiveBoost.py
@Author: Diogo Ribeiro
@Create Date: 2019-05-03
@Update Date: 2019-05-03
@Description: Implement of Adaptive Boosting
"""
import numpy as np
import preProcess
import pickle
import random
import SVM
import math
class Adaboost:
def
|
(self, norm_type="Normalization", iterations=5, base_classifier="SVM"):
self.iterations = iterations
self.norm_type = norm_type
self.base_classifier = SVM.SVMClassifier()
self.prediction = None
self.probability = None
self.classifier_set = None
'''
Function: baseClassifier
Description: generate weak classifier
Input: train_data dataType: ndarray description: train_data
train_label dataType: ndarray description: train_label
w dataType: ndarray description: weight
Output: clf dataType: object description: weak classifier
weighted_error dataType: float description: weighted error
base_predictions dataType: object description: base predictions
'''
def baseClassifier(self, train_data, train_label, w):
sample_num = len(train_data)
error_index = np.ones([sample_num, 1])
clf = self.base_classifier
clf.train(train_data, train_label)
base_predictions = np.sign(clf.predict(train_data))
for i in range(sample_num):
if base_predictions[i] == train_label[i]:
error_index[i] = 0
weighted_error = np.dot(w.T, error_index)
return clf, weighted_error, base_predictions
'''
Function: updataAlpha
Description: updata alpha
Input: error dataType: float description: weighted error
Output: new_alpha dataType: float description: new alpha
'''
def updateAlpha(self, error):
temp = (1.0 - error)/max(error, 10e-6)
new_alpha = 1/2 * math.log(temp, math.e)
return new_alpha
'''
Function: train
Description: train the model
Input: train_data dataType: ndarray description: features
train_label dataType: ndarray description: labels
Output: clf_set dataType: list description: classifiers set
'''
def train(self, train_data, train_label):
if self.norm_type == "Standardization":
train_data = preProcess.Standardization(train_data)
else:
train_data = preProcess.Normalization(train_data)
train_label = np.expand_dims(train_label, axis=1)
sample_num = len(train_data)
weak_classifier = []
# initialize weights
w = np.ones([sample_num, 1])
w = w/sample_num
# predictions
agg_predicts = np.zeros([sample_num, 1]) # aggregate value of prediction
# start train
for i in range(self.iterations):
base_clf, error, base_prediction = self.baseClassifier(train_data, train_label, w)
alpha = self.updateAlpha(error)
weak_classifier.append((alpha, base_clf))
# update parameters in page of 139 Eq.(8.4)
expon = np.multiply(-1 * alpha * train_label, base_prediction)
w = np.multiply(w, np.exp(expon))
w = w/w.sum()
# calculate the total error rate
agg_predicts += alpha*base_prediction
error_rate = np.multiply(np.sign(agg_predicts) != train_label, np.ones([sample_num, 1]))
error_rate = error_rate.sum()/sample_num
if error_rate == 0:
break
self.classifier_set = weak_classifier
return weak_classifier
'''
Function: predict
Description: predict the testing set
Input: train_data dataType: ndarray description: features
prob dataType: bool description: return probaility of label
Output: prediction dataType: ndarray description: the prediction results for testing set
'''
def predict(self, test_data, prob="False"):
# Normalization
if self.norm_type == "Standardization":
test_data = preProcess.Standardization(test_data)
else:
test_data = preProcess.Normalization(test_data)
test_num = test_data.shape[0]
prediction = np.zeros([test_num, 1])
probability = np.zeros([test_num, 1])
for classifier in self.classifier_set:
alpha = classifier[0]
clf = classifier[1]
base_prediction = alpha * clf.predict(test_data)
probability += base_prediction
self.prediction = np.sign(probability)
self.probability = probability
if prob:
return probability
else:
return prediction
'''
Function: accuracy
Description: show detection result
Input: test_label dataType: ndarray description: labels of test data
Output: accuracy dataType: float description: detection accuarcy
'''
def accuarcy(self, test_label):
test_label = np.expand_dims(test_label, axis=1)
prediction = self.prediction
accuarcy = sum(prediction == test_label)/len(test_label)
return accuarcy
'''
Function: save
Description: save the model as pkl
Input: filename dataType: str description: the path to save model
'''
def save(self, filename):
f = open(filename, 'w')
pickle.dump(self.classifier_set, f)
f.close()
'''
Function: load
Description: load the model
Input: filename dataType: str description: the path to save model
Output: self dataType: obj description: the trained model
'''
def load(self, filename):
f = open(filename)
self.classifier_set = pickle.load(f)
return self
|
__init__
|
bots.rs
|
//=============================================================================
//
// WARNING: This file is AUTO-GENERATED
//
// Do not make changes directly to this file.
//
// If you would like to make a change to the library, please update the schema
// definitions at https://github.com/slack-rs/slack-api-schemas
//
// If you would like to make a change how the library was generated,
// please edit https://github.com/slack-rs/slack-rs-api/tree/master/codegen
//
//=============================================================================
pub use crate::mod_types::bots_types::*;
use crate::requests::SlackWebRequestSender;
/// Gets information about a bot user.
///
/// Wraps https://api.slack.com/methods/bots.info
pub async fn
|
<R>(
client: &R,
token: &str,
request: &InfoRequest<'_>,
) -> Result<InfoResponse, InfoError<R::Error>>
where
R: SlackWebRequestSender,
{
let params = vec![Some(("token", token)), request.bot.map(|bot| ("bot", bot))];
let params = params.into_iter().filter_map(|x| x).collect::<Vec<_>>();
let url = crate::get_slack_url_for_method("bots.info");
client
.send(&url, ¶ms[..])
.await
.map_err(InfoError::Client)
.and_then(|result| {
serde_json::from_str::<InfoResponse>(&result)
.map_err(|e| InfoError::MalformedResponse(result, e))
})
.and_then(|o| o.into())
}
|
info
|
index.js
|
export { useElapsedTime } from './useElapsedTime'
|
export { useIsomorphicLayoutEffect } from './useIsomorphicLayoutEffect'
|
|
lifecycle.js
|
/* @flow */
import config from '../config'
import Watcher from '../observer/watcher'
import { mark, measure } from '../util/perf'
import { createEmptyVNode } from '../vdom/vnode'
import { updateComponentListeners } from './events'
import { resolveSlots } from './render-helpers/resolve-slots'
import { toggleObserving } from '../observer/index'
import { pushTarget, popTarget } from '../observer/dep'
import {
warn,
noop,
remove,
emptyObject,
validateProp,
invokeWithErrorHandling
} from '../util/index'
export let activeInstance: any = null
export let isUpdatingChildComponent: boolean = false
export function setActiveInstance(vm: Component) {
const prevActiveInstance = activeInstance
activeInstance = vm
return () => {
activeInstance = prevActiveInstance
}
}
export function initLifecycle (vm: Component) {
// 初始化时,只有根组件和父组件
const options = vm.$options
// locate first non-abstract parent
let parent = options.parent
if (parent && !options.abstract) {
while (parent.$options.abstract && parent.$parent) {
parent = parent.$parent
}
parent.$children.push(vm)
}
// 定义vm.$parent
vm.$parent = parent
// 定义vm.$root
vm.$root = parent ? parent.$root : vm
// 定义vm.$children
vm.$children = []
// 定义vm.$refs
vm.$refs = {}
vm._watcher = null
vm._inactive = null
vm._directInactive = false
vm._isMounted = false
vm._isDestroyed = false
vm._isBeingDestroyed = false
}
export function lifecycleMixin (Vue: Class<Component>) {
// _update ->将虚拟DOM转化为真实DOM
Vue.prototype._update = function (vnode: VNode, hydrating?: boolean) {
const vm: Component = this
const prevEl = vm.$el
const prevVnode = vm._vnode
const restoreActiveInstance = setActiveInstance(vm)
vm._vnode = vnode
// Vue.prototype.__patch__ is injected in entry points
// based on the rendering backend used.
if (!prevVnode) {
// initial render
vm.$el = vm.__patch__(vm.$el, vnode, hydrating, false /* removeOnly */)
} else {
// updates
vm.$el = vm.__patch__(prevVnode, vnode)
}
restoreActiveInstance()
// update __vue__ reference
if (prevEl) {
prevEl.__vue__ = null
}
if (vm.$el) {
vm.$el.__vue__ = vm
}
// if parent is an HOC, update its $el as well
if (vm.$vnode && vm.$parent && vm.$vnode === vm.$parent._vnode) {
vm.$parent.$el = vm.$el
}
// updated hook is called by the scheduler to ensure that children are
// updated in a parent's updated hook.
}
// 强制更新
Vue.prototype.$forceUpdate = function () {
const vm: Component = this
if (vm._watcher) {
// 将所有的_watcher 更新一遍
vm._watcher.update()
}
}
// 实例销毁
Vue.prototype.$destroy = function () {
const vm: Component = this
if (vm._isBeingDestroyed) {
return
}
callHook(vm, 'beforeDestroy')
vm._isBeingDestroyed = true
// remove self from parent
const parent = vm.$parent
if (parent && !parent._isBeingDestroyed && !vm.$options.abstract) {
remove(parent.$children, vm)
}
// teardown watchers
if (vm._watcher) {
vm._watcher.teardown()
}
let i = vm._watchers.length
while (i--) {
vm._watchers[i].teardown()
}
// remove reference from data ob
// frozen object may not have observer.
if (vm._data.__ob__) {
vm._data.__ob__.vmCount--
}
// call the last hook...
vm._isDestroyed = true
// invoke destroy hooks on current rendered tree
vm.__patch__(vm._vnode, null)
// fire destroyed hook
callHook(vm, 'destroyed')
// turn off all instance listeners.
vm.$off()
// remove __vue__ reference
if (vm.$el) {
vm.$el.__vue__ = null
}
// release circular reference (#6759)
if (vm.$vnode) {
vm.$vnode.parent = null
}
}
}
export function mountComponent (
vm: Component,
el: ?Element,
hydrating?: boolean
): Component {
vm.$el = el
if (!vm.$options.render) {
vm.$options.render = createEmptyVNode
if (process.env.NODE_ENV !== 'production') {
/* istanbul ignore if */
if ((vm.$options.template && vm.$options.template.charAt(0) !== '#') ||
vm.$options.el || el) {
warn(
'You are using the runtime-only build of Vue where the template ' +
'compiler is not available. Either pre-compile the templates into ' +
'render functions, or use the compiler-included build.',
vm
)
} else {
warn(
'Failed to mount component: template or render function not defined.',
vm
)
}
}
}
callHook(vm, 'beforeMount')
let updateComponent
|
updateComponent = () => {
const name = vm._name
const id = vm._uid
const startTag = `vue-perf-start:${id}`
const endTag = `vue-perf-end:${id}`
mark(startTag)
const vnode = vm._render()
mark(endTag)
measure(`vue ${name} render`, startTag, endTag)
mark(startTag)
vm._update(vnode, hydrating)
mark(endTag)
measure(`vue ${name} patch`, startTag, endTag)
}
} else {
updateComponent = () => {
vm._update(vm._render(), hydrating)
}
}
// we set this to vm._watcher inside the watcher's constructor
// since the watcher's initial patch may call $forceUpdate (e.g. inside child
// component's mounted hook), which relies on vm._watcher being already defined
new Watcher(vm, updateComponent, noop, {
before () {
if (vm._isMounted && !vm._isDestroyed) {
callHook(vm, 'beforeUpdate')
}
}
}, true /* isRenderWatcher */)
hydrating = false
// manually mounted instance, call mounted on self
// mounted is called for render-created child components in its inserted hook
if (vm.$vnode == null) {
vm._isMounted = true
callHook(vm, 'mounted')
}
return vm
}
export function updateChildComponent (
vm: Component,
propsData: ?Object,
listeners: ?Object,
parentVnode: MountedComponentVNode,
renderChildren: ?Array<VNode>
) {
if (process.env.NODE_ENV !== 'production') {
isUpdatingChildComponent = true
}
// determine whether component has slot children
// we need to do this before overwriting $options._renderChildren.
// check if there are dynamic scopedSlots (hand-written or compiled but with
// dynamic slot names). Static scoped slots compiled from template has the
// "$stable" marker.
const newScopedSlots = parentVnode.data.scopedSlots
const oldScopedSlots = vm.$scopedSlots
const hasDynamicScopedSlot = !!(
(newScopedSlots && !newScopedSlots.$stable) ||
(oldScopedSlots !== emptyObject && !oldScopedSlots.$stable) ||
(newScopedSlots && vm.$scopedSlots.$key !== newScopedSlots.$key)
)
// Any static slot children from the parent may have changed during parent's
// update. Dynamic scoped slots may also have changed. In such cases, a forced
// update is necessary to ensure correctness.
const needsForceUpdate = !!(
renderChildren || // has new static slots
vm.$options._renderChildren || // has old static slots
hasDynamicScopedSlot
)
vm.$options._parentVnode = parentVnode
vm.$vnode = parentVnode // update vm's placeholder node without re-render
if (vm._vnode) { // update child tree's parent
vm._vnode.parent = parentVnode
}
vm.$options._renderChildren = renderChildren
// update $attrs and $listeners hash
// these are also reactive so they may trigger child update if the child
// used them during render
vm.$attrs = parentVnode.data.attrs || emptyObject
vm.$listeners = listeners || emptyObject
// update props
if (propsData && vm.$options.props) {
toggleObserving(false)
const props = vm._props
const propKeys = vm.$options._propKeys || []
for (let i = 0; i < propKeys.length; i++) {
const key = propKeys[i]
const propOptions: any = vm.$options.props // wtf flow?
props[key] = validateProp(key, propOptions, propsData, vm)
}
toggleObserving(true)
// keep a copy of raw propsData
vm.$options.propsData = propsData
}
// update listeners
listeners = listeners || emptyObject
const oldListeners = vm.$options._parentListeners
vm.$options._parentListeners = listeners
updateComponentListeners(vm, listeners, oldListeners)
// resolve slots + force update if has children
if (needsForceUpdate) {
vm.$slots = resolveSlots(renderChildren, parentVnode.context)
vm.$forceUpdate()
}
if (process.env.NODE_ENV !== 'production') {
isUpdatingChildComponent = false
}
}
function isInInactiveTree (vm) {
while (vm && (vm = vm.$parent)) {
if (vm._inactive) return true
}
return false
}
export function activateChildComponent (vm: Component, direct?: boolean) {
if (direct) {
vm._directInactive = false
if (isInInactiveTree(vm)) {
return
}
} else if (vm._directInactive) {
return
}
if (vm._inactive || vm._inactive === null) {
vm._inactive = false
for (let i = 0; i < vm.$children.length; i++) {
activateChildComponent(vm.$children[i])
}
callHook(vm, 'activated')
}
}
export function deactivateChildComponent (vm: Component, direct?: boolean) {
if (direct) {
vm._directInactive = true
if (isInInactiveTree(vm)) {
return
}
}
if (!vm._inactive) {
vm._inactive = true
for (let i = 0; i < vm.$children.length; i++) {
deactivateChildComponent(vm.$children[i])
}
callHook(vm, 'deactivated')
}
}
export function callHook (vm: Component, hook: string) {
// #7573 disable dep collection when invoking lifecycle hooks
pushTarget()
const handlers = vm.$options[hook]
const info = `${hook} hook`
if (handlers) {
for (let i = 0, j = handlers.length; i < j; i++) {
invokeWithErrorHandling(handlers[i], vm, null, vm, info)
}
}
if (vm._hasHookEvent) {
vm.$emit('hook:' + hook)
}
popTarget()
}
|
/* istanbul ignore if */
if (process.env.NODE_ENV !== 'production' && config.performance && mark) {
|
test_share_manage.py
|
# Copyright 2015 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
from tempest import config
from tempest.lib.common.utils import data_utils
import testtools
from testtools import testcase as tc
from manila_tempest_tests.common import constants
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
CONF = config.CONF
@ddt.ddt
class ManageNFSShareTest(base.BaseSharesAdminTest):
protocol = 'nfs'
# NOTE(vponomaryov): be careful running these tests using generic driver
# because cinder volumes will stay attached to service Nova VM and
# won't be deleted.
@classmethod
@testtools.skipUnless(
CONF.share.run_manage_unmanage_tests,
"Manage/unmanage tests are disabled.")
def resource_setup(cls):
if cls.protocol not in CONF.share.enable_protocols:
message = "%s tests are disabled" % cls.protocol
raise cls.skipException(message)
utils.skip_if_manage_not_supported_for_version()
super(ManageNFSShareTest, cls).resource_setup()
# Create share type
cls.st_name = data_utils.rand_name("manage-st-name")
cls.extra_specs = {
'storage_protocol': CONF.share.capability_storage_protocol,
'driver_handles_share_servers': CONF.share.multitenancy_enabled,
}
cls.st = cls.create_share_type(
name=cls.st_name,
cleanup_in_class=True,
extra_specs=cls.extra_specs)
def
|
(self, is_public=False,
version=CONF.share.max_api_microversion,
check_manage=False):
utils.skip_if_manage_not_supported_for_version(version)
share = self._create_share_for_manage()
name = "Name for 'managed' share that had ID %s" % share['id']
description = "Description for 'managed' share"
# Unmanage share
self._unmanage_share_and_wait(share)
if check_manage:
# After 'unmanage' operation, share instance should be deleted.
# Assert not related to 'manage' test, but placed here for
# resource optimization.
share_instance_list = self.shares_v2_client.list_share_instances()
share_ids = [si['share_id'] for si in share_instance_list]
self.assertNotIn(share['id'], share_ids)
# Manage share
manage_params = {
'service_host': share['host'],
'export_path': share['export_locations'][0],
'protocol': share['share_proto'],
'share_type_id': self.st['share_type']['id'],
'name': name,
'description': description,
'is_public': is_public,
'version': version,
}
if CONF.share.multitenancy_enabled:
manage_params['share_server_id'] = share['share_server_id']
managed_share = self.shares_v2_client.manage_share(**manage_params)
# Add managed share to cleanup queue
self.method_resources.insert(
0, {'type': 'share', 'id': managed_share['id'],
'client': self.shares_client})
# Wait for success
self.shares_v2_client.wait_for_share_status(managed_share['id'],
constants.STATUS_AVAILABLE)
# Verify data of managed share
self.assertEqual(name, managed_share['name'])
self.assertEqual(description, managed_share['description'])
self.assertEqual(share['host'], managed_share['host'])
self.assertEqual(share['share_proto'], managed_share['share_proto'])
if utils.is_microversion_ge(version, "2.6"):
self.assertEqual(self.st['share_type']['id'],
managed_share['share_type'])
else:
self.assertEqual(self.st['share_type']['name'],
managed_share['share_type'])
if utils.is_microversion_ge(version, "2.8"):
self.assertEqual(is_public, managed_share['is_public'])
else:
self.assertFalse(managed_share['is_public'])
if utils.is_microversion_ge(version, "2.16"):
self.assertEqual(share['user_id'], managed_share['user_id'])
else:
self.assertNotIn('user_id', managed_share)
# Delete share
self._delete_share_and_wait(managed_share)
# Delete share server, since it can't be "auto-deleted"
if (CONF.share.multitenancy_enabled and
not CONF.share.share_network_id):
# For a pre-configured share_network_id, we don't
# delete the share server.
self._delete_share_server_and_wait(
managed_share['share_server_id'])
@tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
@base.skip_if_microversion_not_supported("2.5")
def test_manage_with_os_share_manage_url(self):
self._test_manage(version="2.5")
@tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
@base.skip_if_microversion_not_supported("2.8")
def test_manage_with_is_public_True(self):
self._test_manage(is_public=True, version="2.8")
@tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
@base.skip_if_microversion_not_supported("2.16")
def test_manage_show_user_id(self):
self._test_manage(version="2.16")
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
def test_manage(self):
self._test_manage(check_manage=True)
class ManageCIFSShareTest(ManageNFSShareTest):
protocol = 'cifs'
class ManageGLUSTERFSShareTest(ManageNFSShareTest):
protocol = 'glusterfs'
class ManageHDFSShareTest(ManageNFSShareTest):
protocol = 'hdfs'
class ManageCephFSShareTest(ManageNFSShareTest):
protocol = 'cephfs'
class ManageMapRFSShareTest(ManageNFSShareTest):
protocol = 'maprfs'
|
_test_manage
|
kendo.treelist.min.js
|
/**
* Kendo UI v2016.3.914 (http://www.telerik.com/kendo-ui)
* Copyright 2016 Telerik AD. All rights reserved.
*
* Kendo UI commercial licenses may be obtained at
* http://www.telerik.com/purchase/license-agreement/kendo-ui-complete
* If you do not own a commercial license, this file shall be governed by the trial license terms.
|
*/
!function(e,define){define("kendo.treelist.min",["kendo.dom.min","kendo.data.min","kendo.columnsorter.min","kendo.editable.min","kendo.window.min","kendo.filtermenu.min","kendo.selectable.min","kendo.resizable.min","kendo.treeview.draganddrop.min"],e)}(function(){return function(e,t){function i(e){return function(t){return t[e]}}function n(e){return function(t){return!e(t)}}function r(){return"none"!==this.style.display}function o(i){var n,r=i.find(">tr:not(.k-filter-row)"),o=function(){var t=e(this);return!t.hasClass("k-group-cell")&&!t.hasClass("k-hierarchy-cell")},s=e();return r.length>1&&(s=r.find("th").filter(o).filter(function(){return this.rowSpan>1})),s=s.add(r.last().find("th").filter(o)),n=kendo.attr("index"),s.sort(function(i,r){var o,s;return i=e(i),r=e(r),o=i.attr(n),s=r.attr(n),o===t&&(o=e(i).index()),s===t&&(s=e(r).index()),o=parseInt(o,10),s=parseInt(s,10),o>s?1:s>o?-1:0}),s}function s(e){var t,i,n=[],r=e.className;for(t=0,i=e.level;i>t;t++)n.push(m("span",{className:r}));return n}function a(e){var t,i,n,r=0;for(i=0,n=e.length;n>i;i++)t=e[i].style.width,t&&-1==t.indexOf("%")&&(r+=parseInt(t,10));return r}function d(e,t){var i,n,r,o;e=e[0],t=t[0],e.rows.length!==t.rows.length&&(i=e.offsetHeight,n=t.offsetHeight,i>n?(r=t.rows[t.rows.length-1],o=i-n):(r=e.rows[e.rows.length-1],o=n-i),r.style.height=r.offsetHeight+o+"px")}var l,h,c,u,f=kendo.data,p=e.extend,g=kendo.dom,m=g.element,_=g.text,k=g.html,b=kendo.ui,v=b.DataBoundWidget,w=f.DataSource,C=f.ObservableArray,y=f.Query,T=f.Model,H=e.proxy,x=e.map,I=e.grep,S=e.inArray,L=e.isPlainObject,N=Array.prototype.push,F="string",R="change",z="error",W="progress",E=".",M=".kendoTreeList",P="click",A="mousedown",D="edit",q="save",j="expand",B="collapse",O="remove",V="dataBinding",$="dataBound",U="cancel",J="filterMenuInit",Q="columnHide",G="columnShow",K="th.k-header",X="columnReorder",Y="columnResize",Z="columnMenuInit",ee="columnLock",te="columnUnlock",ie="parentId",ne="dragstart",re="drag",oe="drop",se="dragend",ae={wrapper:"k-treelist k-grid k-widget",header:"k-header",button:"k-button",alt:"k-alt",editCell:"k-edit-cell",group:"k-treelist-group",gridToolbar:"k-grid-toolbar",gridHeader:"k-grid-header",gridHeaderWrap:"k-grid-header-wrap",gridContent:"k-grid-content",gridContentWrap:"k-grid-content",gridFilter:"k-grid-filter",footerTemplate:"k-footer-template",loading:"k-i-loading",refresh:"k-i-refresh",retry:"k-request-retry",selected:"k-state-selected",status:"k-status",link:"k-link",withIcon:"k-with-icon",filterable:"k-filterable",icon:"k-icon",iconFilter:"k-i-filter",iconCollapse:"k-i-collapse",iconExpand:"k-i-expand",iconHidden:"k-i-none",iconPlaceHolder:"k-icon k-i-none",input:"k-input",dropPositions:"k-i-insert-top k-i-insert-bottom k-i-add k-i-insert-middle",dropTop:"k-i-insert-top",dropBottom:"k-i-insert-bottom",dropAdd:"k-i-add",dropMiddle:"k-i-insert-middle",dropDenied:"k-i-denied",dragStatus:"k-drag-status",dragClue:"k-drag-clue",dragClueText:"k-clue-text"},de={create:{imageClass:"k-i-add",className:"k-grid-add",methodName:"addRow"},createchild:{imageClass:"k-i-add",className:"k-grid-add",methodName:"addRow"},destroy:{imageClass:"k-i-delete",className:"k-grid-delete",methodName:"removeRow"},edit:{imageClass:"k-i-edit",className:"k-grid-edit",methodName:"editRow"},update:{imageClass:"k-i-update",className:"k-primary k-grid-update",methodName:"saveRow"},canceledit:{imageClass:"k-i-cancel",className:"k-grid-cancel",methodName:"_cancelEdit"},excel:{imageClass:"k-i-excel",className:"k-grid-excel",methodName:"saveAsExcel"},pdf:{imageClass:"k-i-pdf",className:"k-grid-pdf",methodName:"saveAsPDF"}},le=T.define({id:"id",parentId:ie,fields:{id:{type:"number"},parentId:{type:"number",nullable:!0}},init:function(e){T.fn.init.call(this,e),this._loaded=!1,this.parentIdField||(this.parentIdField=ie),this.parentId=this.get(this.parentIdField)},accept:function(e){T.fn.accept.call(this,e),this.parentId=this.get(this.parentIdField)},set:function(e,t,i){e==ie&&this.parentIdField!=ie&&(this[this.parentIdField]=t),T.fn.set.call(this,e,t,i),e==this.parentIdField&&(this.parentId=this.get(this.parentIdField))},loaded:function(e){return e===t?this._loaded:(this._loaded=e,t)},shouldSerialize:function(e){return T.fn.shouldSerialize.call(this,e)&&"_loaded"!==e&&"_error"!=e&&"_edit"!=e&&!("parentId"!==this.parentIdField&&"parentId"===e)}});le.parentIdField=ie,le.define=function(e,i){var n,r;return i===t&&(i=e,e=le),n=i.parentId||ie,i.parentIdField=n,r=T.define(e,i),n&&(r.parentIdField=n),r},l=w.extend({init:function(e){w.fn.init.call(this,p(!0,{},{schema:{modelBase:le,model:le}},e))},_createNewModel:function(e){var t={},i=e instanceof T;return i&&(t=e),t=w.fn._createNewModel.call(this,t),i||(e.parentId&&(e[t.parentIdField]=e.parentId),t.accept(e)),t},_shouldWrap:function(){return!0},_push:function(e,t){var i=w.fn._readData.call(this,e);i||(i=e),this[t](i)},_readData:function(e){var t=this.data();return e=w.fn._readData.call(this,e),this._replaceData(t.toJSON().concat(e),t),e instanceof C?e:t},_replaceData:function(e,t){var i,n=e.length;for(i=0;n>i;i++)t[i]=e[i];t.length=n},_readAggregates:function(e){var t=p(this._aggregateResult,this.reader.aggregates(e));return""in t&&(t[this._defaultParentId()]=t[""],delete t[""]),t},remove:function(e){var t=this._subtree(this._childrenMap(this.data()),e.id);this._removeItems(t),w.fn.remove.call(this,e)},_filterCallback:function(e){var t,i,n={},r=[],o=e.toArray();for(t=0;o.length>t;t++)for(i=o[t];i&&(n[i.id]||(n[i.id]=!0,r.push(i)),!n[i.parentId]);)n[i.parentId]=!0,i=this.parentNode(i),i&&r.push(i);return new y(r)},_subtree:function(e,t){var i,n,r=e[t]||[],o=this._defaultParentId();for(i=0,n=r.length;n>i;i++)r[i].id!==o&&(r=r.concat(this._subtree(e,r[i].id)));return r},_childrenMap:function(e){var t,i,n,r,o={};for(e=this._observeView(e),t=0;e.length>t;t++)i=e[t],n=i.id,r=i.parentId,o[n]=o[n]||[],o[r]=o[r]||[],o[r].push(i);return o},_calculateAggregates:function(e,t){var i,n,r,o,s,a;for(t=t||{},i={},s=t.filter,s&&(e=y.process(e,{filter:s,filterCallback:H(this._filterCallback,this)}).data),a=this._childrenMap(e),i[this._defaultParentId()]=new y(this._subtree(a,this._defaultParentId())).aggregate(t.aggregate),o=0;e.length>o;o++)n=e[o],r=this._subtree(a,n.id),i[n.id]=new y(r).aggregate(t.aggregate);return i},_queryProcess:function(e,t){var i,n,r,o,s,a,d;for(t=t||{},t.filterCallback=H(this._filterCallback,this),i=this._defaultParentId(),n=y.process(e,t),r=this._childrenMap(n.data),e=r[i]||[],s=0;e.length>s;s++)a=e[s],a.id!==i&&(d=r[a.id],o=!(!d||!d.length),a.loaded()||a.loaded(o||!a.hasChildren),(a.loaded()||a.hasChildren!==!0)&&(a.hasChildren=o),o&&(e=e.slice(0,s+1).concat(d,e.slice(s+1))));return n.data=e,n},_queueRequest:function(e,t){t.call(this)},_modelLoaded:function(e){var t=this.get(e);t.loaded(!0),t.hasChildren=this.childNodes(t).length>0},_modelError:function(e,t){this.get(e)._error=t},success:function(e,i){return i&&t!==i.id||(this._data=this._observe([])),w.fn.success.call(this,e,i)},load:function(t){var i="_query",n=this.options.serverSorting||this.options.serverPaging||this.options.serverFiltering||this.options.serverGrouping||this.options.serverAggregates,r=e.Deferred().resolve().promise();if(t.loaded()){if(n)return r}else t.hasChildren&&(i="read");return this[i]({id:t.id}).then(H(this._modelLoaded,this,t.id),H(this._modelError,this,t.id))},contains:function(e,t){for(var i=e.id;t;){if(t.parentId===i)return!0;t=this.parentNode(t)}return!1},_byParentId:function(e,t){var i,n,r=[],o=this.view();if(e===t)return[];for(n=0;o.length>n;n++)i=o.at(n),i.parentId==e&&r.push(i);return r},_defaultParentId:function(){return this.reader.model.fn.defaults[this.reader.model.parentIdField]},childNodes:function(e){return this._byParentId(e.id,this._defaultParentId())},rootNodes:function(){return this._byParentId(this._defaultParentId())},parentNode:function(e){return this.get(e.parentId)},level:function(e){var t=-1;e instanceof le||(e=this.get(e));do e=this.parentNode(e),t++;while(e);return t},filter:function(e){var i=w.fn.filter;return e===t?i.call(this,e):(i.call(this,e),t)}}),l.create=function(t){return e.isArray(t)?t={data:t}:t instanceof C&&(t={data:t.toJSON()}),t instanceof l?t:new l(t)},h=kendo.Observable.extend({init:function(e,t){kendo.Observable.fn.init.call(this),t=this.options=p(!0,{},this.options,t),this.element=e,this.bind(this.events,t),this.model=this.options.model,this.fields=this._fields(this.options.columns),this._initContainer(),this.createEditable()},events:[],_initContainer:function(){this.wrapper=this.element},createEditable:function(){var e=this.options;this.editable=new b.Editable(this.wrapper,{fields:this.fields,target:e.target,clearContainer:e.clearContainer,model:this.model})},_isEditable:function(e){return e.field&&this.model.editable(e.field)},_fields:function(e){var t,i,n,r=[];for(t=0,i=e.length;i>t;t++)n=e[t],this._isEditable(n)&&r.push({field:n.field,format:n.format,editor:n.editor});return r},end:function(){return this.editable.end()},close:function(){this.destroy()},destroy:function(){this.editable.destroy(),this.editable.element.find("["+kendo.attr("container-for")+"]").empty().end().removeAttr(kendo.attr("role")),this.model=this.wrapper=this.element=this.columns=this.editable=null}}),c=h.extend({init:function(e,t){h.fn.init.call(this,e,t),this._attachHandlers(),kendo.cycleForm(this.wrapper),this.open()},events:[U,q],options:{window:{modal:!0,resizable:!1,draggable:!0,title:"Edit",visible:!1}},_initContainer:function(){var t=this.options,i=[];this.wrapper=e('<div class="k-popup-edit-form"/>').attr(kendo.attr("uid"),this.model.uid).append('<div class="k-edit-form-container"/>'),t.template?(this._appendTemplate(i),this.fields=[]):this._appendFields(i),this._appendButtons(i),new g.Tree(this.wrapper.children()[0]).render(i),this.wrapper.appendTo(t.appendTo),this.window=new b.Window(this.wrapper,t.window)},_appendTemplate:function(e){var t=this.options.template;typeof t===F&&(t=window.unescape(t)),t=kendo.template(t)(this.model),e.push(k(t))},_appendFields:function(e){var t,i,n,r=this.options.columns;for(t=0,i=r.length;i>t;t++)n=r[t],n.command||(e.push(k('<div class="k-edit-label"><label for="'+n.field+'">'+(n.title||n.field||"")+"</label></div>")),e.push(this._isEditable(n)?k("<div "+kendo.attr("container-for")+'="'+n.field+'" class="k-edit-field"></div>'):m("div",{"class":"k-edit-field"},[this.options.fieldRenderer(n,this.model)])))},_appendButtons:function(e){e.push(m("div",{"class":"k-edit-buttons k-state-default"},this.options.commandRenderer()))},_attachHandlers:function(){var e=this._cancelProxy=H(this._cancel,this);this.wrapper.on(P+M,".k-grid-cancel",this._cancelProxy),this._saveProxy=H(this._save,this),this.wrapper.on(P+M,".k-grid-update",this._saveProxy),this.window.bind("close",function(t){t.userTriggered&&e(t)})},_dettachHandlers:function(){this._cancelProxy=null,this._saveProxy=null,this.wrapper.off(M)},_cancel:function(e){this.trigger(U,e)},_save:function(){this.trigger(q)},open:function(){this.window.center().open()},close:function(){this.window.bind("deactivate",H(this.destroy,this)).close()},destroy:function(){this.window.destroy(),this.window=null,this._dettachHandlers(),h.fn.destroy.call(this)}}),u=v.extend({init:function(t,i){if(v.fn.init.call(this,t,i),this._dataSource(this.options.dataSource),this._columns(),this._layout(),this._selectable(),this._sortable(),this._resizable(),this._filterable(),this._attachEvents(),this._toolbar(),this._scrollable(),this._reorderable(),this._columnMenu(),this._minScreenSupport(),this._draggable(),this.options.autoBind&&this.dataSource.fetch(),this._hasLockedColumns){var n=this;this.wrapper.addClass("k-grid-lockedcolumns"),this._resizeHandler=function(){n.resize()},e(window).on("resize"+M,this._resizeHandler)}kendo.notify(this)},_draggable:function(){var t=this.options.editable;t&&t.move&&(this._dragging=new kendo.ui.HierarchicalDragAndDrop(this.wrapper,{$angular:this.$angular,autoScroll:!0,filter:"tbody>tr",itemSelector:"tr",allowedContainers:this.wrapper,hintText:function(t){var i=function(){return e(this).text()},n="<span class='k-header k-drag-separator' />";return t.children("td").map(i).toArray().join(n)},contains:H(function(e,t){var i=this.dataItem(t),n=this.dataItem(e);return n==i||this.dataSource.contains(n,i)},this),itemFromTarget:function(e){var t=e.closest("tr");return{item:t,content:t}},dragstart:H(function(e){this.wrapper.addClass("k-treelist-dragging");var t=this.dataItem(e);return this.trigger(ne,{source:t})},this),drag:H(function(e){e.source=this.dataItem(e.source),this.trigger(re,e)},this),drop:H(function(e){return e.source=this.dataItem(e.source),e.destination=this.dataItem(e.destination),this.wrapper.removeClass("k-treelist-dragging"),this.trigger(oe,e)},this),dragend:H(function(e){var t=this.dataItem(e.destination),i=this.dataItem(e.source);i.set("parentId",t?t.id:null),e.source=i,e.destination=t,this.trigger(se,e)},this),reorderable:!1,dropHintContainer:function(e){return e.children("td:eq(1)")},dropPositionFrom:function(e){return e.prevAll(".k-i-none").length>0?"after":"before"}}))},itemFor:function(e){return"number"==typeof e&&(e=this.dataSource.get(e)),this.tbody.find("["+kendo.attr("uid")+"="+e.uid+"]")},_scrollable:function(){var t,i,n;this.options.scrollable&&(t=this.thead.closest(".k-grid-header-wrap"),i=e(this.lockedContent).bind("DOMMouseScroll"+M+" mousewheel"+M,H(this._wheelScroll,this)),this.content.bind("scroll"+M,function(){t.scrollLeft(this.scrollLeft),i.scrollTop(this.scrollTop)}),n=kendo.touchScroller(this.content),n&&n.movable&&(this._touchScroller=n,n.movable.bind("change",function(e){t.scrollLeft(-e.sender.x),i&&i.scrollTop(-e.sender.y)})))},_wheelScroll:function(t){if(!t.ctrlKey){var i=kendo.wheelDeltaY(t);i&&(t.preventDefault(),e(t.currentTarget).one("wheel"+M,!1),this.content.scrollTop(this.content.scrollTop()+-i))}},_progress:function(){var e=this.options.messages;this.tbody.find("tr").length||this._showStatus(kendo.template("<span class='#= className #' /> #: messages.loading #")({className:ae.icon+" "+ae.loading,messages:e}))},_error:function(e){this.dataSource.rootNodes().length||this._render({error:e})},refresh:function(e){e=e||{},"itemchange"==e.action&&this.editor||this.trigger(V)||(this._cancelEditor(),this._render(),this._adjustHeight(),this.trigger($))},_angularFooters:function(e){var t,i,n,r=this.dataSource.aggregates(),o=this._footerItems();for(t=0;o.length>t;t++)i=o.eq(t),n=r[i.attr("data-parentId")],this._angularFooter(e,i.find("td").get(),n)},_angularFooter:function(e,t,i){var n=this.columns;this.angular(e,function(){return{elements:t,data:x(n,function(e){return{column:e,aggregate:i&&i[e.field]}})}})},items:function(){return this._hasLockedColumns?this._items(this.tbody).add(this._items(this.lockedTable)):this._items(this.tbody)},_items:function(t){return t.find("tr").filter(function(){return!e(this).hasClass(ae.footerTemplate)})},_footerItems:function(){var t=this.tbody;return this._hasLockedColumns&&(t=t.add(this.lockedTable)),t.find("tr").filter(function(){return e(this).hasClass(ae.footerTemplate)})},dataItems:function(){var e,t,i,n=kendo.ui.DataBoundWidget.fn.dataItems.call(this);if(this._hasLockedColumns){for(e=n.length,t=Array(2*e),i=e;--i>=0;)t[i]=t[i+e]=n[i];n=t}return n},_showStatus:function(t){var i=this.element.find(".k-status"),n=e(this.content).add(this.lockedContent);i.length||(i=e("<div class='k-status' />").appendTo(this.element)),this._contentTree.render([]),this._hasLockedColumns&&this._lockedContentTree.render([]),n.hide(),i.html(t)},_hideStatus:function(){this.element.find(".k-status").remove(),e(this.content).add(this.lockedContent).show()},_adjustHeight:function(){var e,t,i=this.element,n=i.find(E+ae.gridContentWrap),r=i.find(E+ae.gridHeader),o=i.find(E+ae.gridToolbar),s=kendo.support.scrollbar();i.height(this.options.height),t=function(e){var t,i;return e[0].style.height?!0:(t=e.height(),e.height("auto"),i=e.height(),e.height(""),t!=i)},t(i)&&(e=i.height()-r.outerHeight()-o.outerHeight(),n.height(e),this._hasLockedColumns&&(s=this.table[0].offsetWidth>this.table.parent()[0].clientWidth?s:0,this.lockedContent.height(e-s)))},_resize:function(){this._applyLockedContainersWidth(),this._adjustHeight()},_minScreenSupport:function(){var t=this.hideMinScreenCols();t&&(this.minScreenResizeHandler=H(this.hideMinScreenCols,this),e(window).on("resize",this.minScreenResizeHandler))},hideMinScreenCols:function(){var e,i,n,r=this.columns,o=!1,s=window.innerWidth>0?window.innerWidth:screen.width;for(e=0;r.length>e;e++)i=r[e],n=i.minScreenWidth,n!==t&&null!==n&&(o=!0,n>s?this.hideColumn(i):this.showColumn(i));return o},destroy:function(){v.fn.destroy.call(this);var t=this.dataSource;t.unbind(R,this._refreshHandler),t.unbind(z,this._errorHandler),t.unbind(W,this._progressHandler),this._resizeHandler&&e(window).off("resize"+M,this._resizeHandler),this._dragging&&(this._dragging.destroy(),this._dragging=null),this.resizable&&(this.resizable.destroy(),this.resizable=null),this.reorderable&&(this.reorderable.destroy(),this.reorderable=null),this._draggableInstance&&this._draggableInstance.element&&(this._draggableInstance.destroy(),this._draggableInstance=null),this.minScreenResizeHandler&&e(window).off("resize",this.minScreenResizeHandler),this._destroyEditor(),this.element.off(M),this._touchScroller&&this._touchScroller.destroy(),this._autoExpandable=null,this._refreshHandler=this._errorHandler=this._progressHandler=this._dataSourceFetchProxy=null,this.thead=this.content=this.tbody=this.table=this.element=this.lockedHeader=this.lockedContent=null,this._statusTree=this._headerTree=this._contentTree=this._lockedHeaderColsTree=this._lockedContentColsTree=this._lockedHeaderTree=this._lockedContentTree=null},options:{name:"TreeList",columns:[],autoBind:!0,scrollable:!0,selectable:!1,sortable:!1,toolbar:null,height:null,columnMenu:!1,messages:{noRows:"No records to display",loading:"Loading...",requestFailed:"Request failed.",retry:"Retry",commands:{edit:"Edit",update:"Update",canceledit:"Cancel",create:"Add new record",createchild:"Add child record",destroy:"Delete",excel:"Export to Excel",pdf:"Export to PDF"}},excel:{hierarchy:!0},resizable:!1,filterable:!1,editable:!1,reorderable:!1},events:[R,D,q,O,j,B,V,$,U,ne,re,oe,se,J,Q,G,X,Y,Z,ee,te],_toggle:function(i,n){var r=e.Deferred().resolve().promise(),o=i.loaded();return i._error&&(i.expanded=!1,i._error=t),!o&&i.expanded?r:(t===n&&(n=!i.expanded),i.expanded=n,o||(r=this.dataSource.load(i).always(H(function(){this._render(),this._syncLockedContentHeight()},this))),this._render(),this._syncLockedContentHeight(),r)},expand:function(e){return this._toggle(this.dataItem(e),!0)},collapse:function(e){return this._toggle(this.dataItem(e),!1)},_toggleChildren:function(t){var i=e(t.currentTarget),n=this.dataItem(i),r=n.expanded?B:j;this.trigger(r,{model:n})||this._toggle(n),t.preventDefault()},_attachEvents:function(){var e=E+ae.iconCollapse+", ."+ae.iconExpand+", ."+ae.refresh,t=E+ae.retry;this.element.on(A+M,e,H(this._toggleChildren,this)).on(P+M,t,this._dataSourceFetchProxy).on(P+M,".k-button[data-command]",H(this._commandClick,this))},_commandByName:function(t){var i,n,r,o,s=this.columns,a=e.isArray(this.options.toolbar)?this.options.toolbar:[];if(t=t.toLowerCase(),de[t])return de[t];for(i=0;s.length>i;i++)if(r=s[i].command)for(n=0;r.length>n;n++)if(o=r[n].name,o&&o.toLowerCase()==t)return r[n];for(i=0;a.length>i;i++)if(o=a[i].name,o&&o.toLowerCase()==t)return a[i]},_commandClick:function(i){var n=e(i.currentTarget),r=n.attr("data-command"),o=this._commandByName(r),s=n.parentsUntil(this.wrapper,"tr");s=s.length?s:t,o&&(o.methodName?this[o.methodName](s):o.click&&o.click.call(this,i),i.preventDefault())},_ensureExpandableColumn:function(){var e,t;this._autoExpandable&&delete this._autoExpandable.expandable,e=I(this.columns,n(i("hidden"))),t=I(e,i("expandable")),this.columns.length&&!t.length&&(this._autoExpandable=e[0],e[0].expandable=!0)},_columns:function(){var e,t=this.options.columns||[];this.columns=x(t,function(e){return e="string"==typeof e?{field:e}:e,p({encoded:!0},e)}),e=this._lockedColumns(),e.length>0&&(this._hasLockedColumns=!0,this.columns=e.concat(this._nonLockedColumns())),this._ensureExpandableColumn(),this._columnTemplates(),this._columnAttributes()},_columnTemplates:function(){var e,t,i,n=this.columns;for(e=0,t=n.length;t>e;e++)i=n[e],i.template&&(i.template=kendo.template(i.template)),i.headerTemplate&&(i.headerTemplate=kendo.template(i.headerTemplate)),i.footerTemplate&&(i.footerTemplate=kendo.template(i.footerTemplate))},_columnAttributes:function(){function t(t){var i,n,r,o;if(t&&t.style)for(i=t.style.split(";"),t.style={},n=0;i.length>n;n++)r=i[n].split(":"),o=e.trim(r[0]),o&&(t.style[e.camelCase(o)]=e.trim(r[1]))}var i,n,r=this.columns;for(i=0,n=r.length;n>i;i++)t(r[i].attributes),t(r[i].headerAttributes)},_layout:function(){var e,t,i,n=this.columns,r=this.element,o="";this.wrapper=r.addClass(ae.wrapper),o="<div class='#= gridHeader #'>",this._hasLockedColumns&&(o+="<div class='k-grid-header-locked'><table role='grid'><colgroup></colgroup><thead role='rowgroup' /></table></div>"),o+="<div class='#= gridHeaderWrap #'><table role='grid'><colgroup></colgroup><thead role='rowgroup' /></table></div></div>",this._hasLockedColumns&&(o+="<div class='k-grid-content-locked'><table role='treegrid' tabindex='0'><colgroup></colgroup><tbody /></table></div>"),o+="<div class='#= gridContentWrap # k-auto-scrollable'><table role='treegrid' tabindex='0'><colgroup></colgroup><tbody /></table></div>",this.options.scrollable||(o="<table role='treegrid' tabindex='0'><colgroup></colgroup><thead class='#= gridHeader #' role='rowgroup' /><tbody /></table>"),this.options.toolbar&&(o="<div class='#= header # #= gridToolbar #' />"+o),r.append(kendo.template(o)(ae)+"<div class='k-status' />"),this.toolbar=r.find(E+ae.gridToolbar),e=r.find(E+ae.gridHeader).find("thead").addBack().filter("thead"),this.thead=e.last(),this.options.scrollable&&(t=kendo.support.isRtl(r),r.find("div."+ae.gridHeader).css(t?"padding-left":"padding-right",kendo.support.scrollbar())),i=r.find(E+ae.gridContentWrap),i.length?this.content=i:i=r,this.table=i.find(">table"),this.tbody=this.table.find(">tbody"),this._hasLockedColumns&&(this.lockedHeader=e.first().closest(".k-grid-header-locked"),this.lockedContent=r.find(".k-grid-content-locked"),this.lockedTable=this.lockedContent.children()),this._initVirtualTrees(),this._renderCols(),this._renderHeader(),this.angular("compile",function(){return{elements:e.find("th.k-header").get(),data:x(n,function(e){return{column:e}})}})},_initVirtualTrees:function(){this._headerColsTree=new g.Tree(this.thead.prev()[0]),this._contentColsTree=new g.Tree(this.tbody.prev()[0]),this._headerTree=new g.Tree(this.thead[0]),this._contentTree=new g.Tree(this.tbody[0]),this._statusTree=new g.Tree(this.element.children(".k-status")[0]),this.lockedHeader&&(this._lockedHeaderColsTree=new g.Tree(this.lockedHeader.find("colgroup")[0]),this._lockedContentColsTree=new g.Tree(this.lockedTable.find(">colgroup")[0]),this._lockedHeaderTree=new g.Tree(this.lockedHeader.find("thead")[0]),this._lockedContentTree=new g.Tree(this.lockedTable.find(">tbody")[0]))},_toolbar:function(){var t,i=this.options.toolbar,n=this.toolbar;i&&(e.isArray(i)?(t=this._buildCommands(i),new g.Tree(n[0]).render(t)):n.append(kendo.template(i)({})),this.angular("compile",function(){return{elements:n.get()}}))},_lockedColumns:function(){return I(this.columns,i("locked"))},_nonLockedColumns:function(){return I(this.columns,n(i("locked")))},_templateColumns:function(){return I(this.columns,i("template"))},_flushCache:function(){this.options.$angular&&this._templateColumns().length&&(this._contentTree.render([]),this._hasLockedColumns&&this._lockedContentTree.render([]))},_render:function(t){var i,n,r,o;t=t||{},i=this.options.messages,n=this.dataSource.rootNodes(),r=kendo.attr("uid"),o=this.select().removeClass("k-state-selected").map(function(t,i){return e(i).attr(r)}),this._absoluteIndex=0,this._angularItems("cleanup"),this._angularFooters("cleanup"),this._flushCache(),t.error?this._showStatus(kendo.template("#: messages.requestFailed # <button class='#= buttonClass #'>#: messages.retry #</button>")({buttonClass:[ae.button,ae.retry].join(" "),messages:i})):n.length?(this._hideStatus(),this._contentTree.render(this._trs({columns:this._nonLockedColumns(),aggregates:t.aggregates,selected:o,data:n,visible:!0,level:0})),this._hasLockedColumns&&(this._absoluteIndex=0,this._lockedContentTree.render(this._trs({columns:this._lockedColumns(),aggregates:t.aggregates,selected:o,data:n,visible:!0,level:0})))):this._showStatus(kendo.htmlEncode(i.noRows)),this._touchScroller&&this._touchScroller.contentResized(),this._muteAngularRebind(function(){this._angularItems("compile"),this._angularFooters("compile")}),this.items().filter(function(){return e.inArray(e(this).attr(r),o)>=0}).addClass("k-state-selected"),this._adjustRowsHeight()},_adjustRowsHeight:function(){var e,t,i,n,r,o,s,a,d,l,h,c,u,f;if(this._hasLockedColumns){for(e=this.table,t=this.lockedTable,i=e[0].rows,n=i.length,o=t[0].rows,s=e.add(t),a=s.length,d=[],l=this.lockedHeader.find("tr"),h=this.thead.find("tr"),l.add(h).height("auto").height(Math.max(l.height(),h.height())),r=0;n>r&&o[r];r++)i[r].style.height&&(i[r].style.height=o[r].style.height="");for(r=0;n>r&&o[r];r++)c=i[r].offsetHeight,u=o[r].offsetHeight,f=0,c>u?f=c:u>c&&(f=u),d.push(f);for(r=0;a>r;r++)s[r].style.display="none";for(r=0;n>r;r++)d[r]&&(i[r].style.height=o[r].style.height=d[r]+1+"px");for(r=0;a>r;r++)s[r].style.display=""}},_ths:function(e){var t,i,n,r,o,s,a,d,l=[];for(a=0,d=e.length;d>a;a++)t=e[a],n=[],r=[ae.header],i=t.headerTemplate?t.headerTemplate({}):t.title||t.field||"",s=t.headerTemplate?k(i):_(i),n.push(t.sortable?m("a",{href:"#",className:ae.link},[s]):s),o={"data-field":t.field,"data-title":t.title,style:t.hidden===!0?{display:"none"}:{},className:r.join(" "),role:"columnheader"},o=p(!0,{},o,t.headerAttributes),l.push(m("th",o,n));return l},_cols:function(e){var t,i,n,r=[];for(n=0;e.length>n;n++)e[n].hidden!==!0&&(t=e[n].width,i={},t&&0!==parseInt(t,10)&&(i.style={width:"string"==typeof t?t:t+"px"}),r.push(m("col",i)));return r},_renderCols:function(){var e=this._nonLockedColumns();this._headerColsTree.render(this._cols(e)),this.options.scrollable&&this._contentColsTree.render(this._cols(e)),this._hasLockedColumns&&(e=this._lockedColumns(),this._lockedHeaderColsTree.render(this._cols(e)),this._lockedContentColsTree.render(this._cols(e)))},_renderHeader:function(){var e=this._nonLockedColumns();this._headerTree.render([m("tr",{role:"row"},this._ths(e))]),this._hasLockedColumns&&(e=this._lockedColumns(),this._lockedHeaderTree.render([m("tr",{role:"row"},this._ths(e))]),this._applyLockedContainersWidth())},_applyLockedContainersWidth:function(){var e,t,i,n,r,o;this._hasLockedColumns&&(e=a(this.lockedHeader.find(">table>colgroup>col")),t=this.thead.parent(),i=a(t.find(">colgroup>col")),n=this.wrapper[0].clientWidth,r=kendo.support.scrollbar(),e>=n&&(e=n-3*r),this.lockedHeader.add(this.lockedContent).width(e),t.add(this.table).width(i),o=n-e-2,this.content.width(o),t.parent().width(o-r))},_trs:function(t){var i,n,r,o,s,a,d,l=[],h=t.level,c=t.data,u=this.dataSource,f=u.aggregates()||{},p=t.columns;for(a=0,d=c.length;d>a;a++)r=[],i=c[a],s=i.loaded()&&u.childNodes(i),o=s&&s.length,n={role:"row"},n[kendo.attr("uid")]=i.uid,o&&(n["aria-expanded"]=!!i.expanded),t.visible?(this._absoluteIndex%2!==0&&r.push(ae.alt),this._absoluteIndex++):n.style={display:"none"},e.inArray(i.uid,t.selected)>=0&&r.push(ae.selected),o&&r.push(ae.group),i._edit&&r.push("k-grid-edit-row"),n.className=r.join(" "),l.push(this._tds({model:i,attr:n,level:h},p,H(this._td,this))),o&&(l=l.concat(this._trs({columns:p,aggregates:f,selected:t.selected,visible:t.visible&&!!i.expanded,data:s,level:h+1})));return this._hasFooterTemplate()&&(n={className:ae.footerTemplate,"data-parentId":i.parentId},t.visible||(n.style={display:"none"}),l.push(this._tds({model:f[i.parentId],attr:n,level:h},p,this._footerTd))),l},_footerTd:function(t){var i=[],n=t.column,r=t.column.footerTemplate||e.noop,o=t.model[n.field]||{},a={role:"gridcell",style:n.hidden===!0?{display:"none"}:{}};return n.expandable&&(i=i.concat(s({level:t.level+1,className:ae.iconPlaceHolder}))),n.attributes&&p(a,n.attributes),i.push(k(r(o)||"")),m("td",a,i)},_hasFooterTemplate:function(){return!!I(this.columns,function(e){return e.footerTemplate}).length},_tds:function(e,t,i){var n,r,o,s=[];for(r=0,o=t.length;o>r;r++)n=t[r],s.push(i({model:e.model,column:n,level:e.level}));return m("tr",e.attr,s)},_td:function(e){var t,i=[],n=e.model,r=e.column,o={role:"gridcell",style:r.hidden===!0?{display:"none"}:{}};return n._edit&&r.field&&n.editable(r.field)?o[kendo.attr("container-for")]=r.field:(r.expandable&&(i=s({level:e.level,className:ae.iconPlaceHolder}),t=[ae.icon],t.push(n.hasChildren?n.expanded?ae.iconCollapse:ae.iconExpand:ae.iconHidden),n._error?t.push(ae.refresh):!n.loaded()&&n.expanded&&t.push(ae.loading),i.push(m("span",{className:t.join(" ")})),o.style["white-space"]="nowrap"),r.attributes&&p(!0,o,r.attributes),r.command?i=this._buildCommands(n._edit?["update","canceledit"]:r.command):i.push(this._cellContent(r,n))),m("td",o,i)},_cellContent:function(e,i){var n;return e.template?n=e.template(i):e.field&&(n=i.get(e.field),null!==n&&e.format&&(n=kendo.format(e.format,n))),null!==n&&t!==n||(n=""),e.template||!e.encoded?k(n):_(n)},_buildCommands:function(e){var t,i=[];for(t=0;e.length>t;t++)i.push(this._button(e[t]));return i},_button:function(e){var t=(e.name||e).toLowerCase(),i=this.options.messages.commands[t],n=[];return e=p({},de[t],{text:i},e),e.imageClass&&n.push(m("span",{className:["k-icon",e.imageClass].join(" ")})),m("button",{type:"button","data-command":t,className:["k-button k-button-icontext",e.className].join(" ")},n.concat([_(e.text||e.name)]))},_positionResizeHandle:function(i){var n,r,o,s=e(i.currentTarget),a=this.resizeHandle,d=s.position(),l=d.left,h=s.outerWidth(),c=s.closest("div"),u=i.clientX+e(window).scrollLeft(),f=this.options.columnResizeHandleWidth||3;return l+=c.scrollLeft(),a||(a=this.resizeHandle=e('<div class="k-resize-handle"><div class="k-resize-handle-inner" /></div>')),n=s.offset().left+h,(r=u>n-f&&n+f>u)?(c.append(a),a.show().css({top:d.top,left:l+h-f-1,height:s.outerHeight(),width:3*f}).data("th",s),o=this,a.off("dblclick"+M).on("dblclick"+M,function(){var t=s.index();e.contains(o.thead[0],s[0])&&(t+=I(o.columns,function(e){return e.locked&&!e.hidden}).length),o.autoFitColumn(t)}),t):(a.hide(),t)},autoFitColumn:function(t){var i,n,s,a,d,l,h,c,u,f,p,g,m,_,k,b,v,w=this,C=w.options,y=w.columns,T=kendo.support.browser,H=w.lockedHeader?o(w.lockedHeader.find(">table>thead")).filter(r).length:0;if(t="number"==typeof t?y[t]:L(t)?I(y,function(e){return e===t})[0]:I(y,function(e){return e.field===t})[0],t&&!t.hidden){for(i=S(t,y),a=t.locked,s=a?w.lockedHeader.children("table"):w.thead.parent(),n=s.find("[data-index='"+i+"']"),l=a?w.lockedTable:w.table,h=w.footer||e(),w.footer&&w.lockedContent&&(h=w.footer.children(a?".k-grid-footer-locked":".k-grid-footer-wrap")),c=h.find("table").first(),w.lockedHeader&&H>=i&&!a&&(i-=H),u=0;y.length>u&&y[u]!==t;u++)y[u].hidden&&i--;if(d=C.scrollable?s.find("col:not(.k-group-col):not(.k-hierarchy-col):eq("+i+")").add(l.children("colgroup").find("col:not(.k-group-col):not(.k-hierarchy-col):eq("+i+")")).add(c.find("colgroup").find("col:not(.k-group-col):not(.k-hierarchy-col):eq("+i+")")):l.children("colgroup").find("col:not(.k-group-col):not(.k-hierarchy-col):eq("+i+")"),f=s.add(l).add(c),p=n.outerWidth(),d.width(""),f.css("table-layout","fixed"),d.width("auto"),f.addClass("k-autofitting"),f.css("table-layout",""),g=Math.ceil(Math.max(n.outerWidth(),l.find("tr").eq(0).children("td:visible").eq(i).outerWidth(),c.find("tr").eq(0).children("td:visible").eq(i).outerWidth())),d.width(g),t.width=g,C.scrollable){for(m=s.find("col"),
k=0,b=0,v=m.length;v>b;b+=1){if(_=m[b].style.width,!_||-1!=_.indexOf("%")){k=0;break}k+=parseInt(_,10)}k&&f.each(function(){this.style.width=k+"px"})}T.msie&&8==T.version&&(f.css("display","inline-table"),setTimeout(function(){f.css("display","table")},1)),f.removeClass("k-autofitting"),w.trigger(Y,{column:t,oldWidth:p,newWidth:g}),w._applyLockedContainersWidth(),w._syncLockedContentHeight(),w._syncLockedHeaderHeight()}},_adjustLockedHorizontalScrollBar:function(){var e=this.table,t=e.parent(),i=e[0].offsetWidth>t[0].clientWidth?kendo.support.scrollbar():0;this.lockedContent.height(t.height()-i)},_syncLockedContentHeight:function(){this.lockedTable&&(this._touchScroller||this._adjustLockedHorizontalScrollBar(),this._adjustRowsHeight(this.table,this.lockedTable))},_syncLockedHeaderHeight:function(){var e,t;this.lockedHeader&&(e=this.lockedHeader.children("table"),t=this.thead.parent(),this._adjustRowsHeight(e,t),d(e,t))},_resizable:function(){if(this.options.resizable){this.resizable&&this.resizable.destroy();var t=this;e(this.lockedHeader).find("thead").add(this.thead).on("mousemove"+M,"th",e.proxy(this._positionResizeHandle,this)),this.resizable=new kendo.ui.Resizable(this.wrapper,{handle:".k-resize-handle",start:function(i){var n,r,o=e(i.currentTarget).data("th"),s="col:eq("+e.inArray(o[0],o.parent().children().filter(":visible"))+")";t.wrapper.addClass("k-grid-column-resizing"),t.lockedHeader&&e.contains(t.lockedHeader[0],o[0])?(n=t.lockedHeader,r=t.lockedTable):(n=t.thead.parent(),r=t.table),this.col=r.children("colgroup").find(s).add(n.find(s)),this.th=o,this.startLocation=i.x.location,this.columnWidth=o.outerWidth(),this.table=this.col.closest("table"),this.totalWidth=this.table.width()},resize:function(e){var t=11,i=e.x.location-this.startLocation;t>this.columnWidth+i&&(i=t-this.columnWidth),this.table.width(this.totalWidth+i),this.col.width(this.columnWidth+i)},resizeend:function(){var e,i,n;t.wrapper.removeClass("k-grid-column-resizing"),e=this.th.attr("data-field"),i=I(t.columns,function(t){return t.field==e}),n=Math.floor(this.th.outerWidth()),i[0].width=n,t._resize(),t._adjustRowsHeight(),t.trigger(Y,{column:i,oldWidth:this.columnWidth,newWidth:n}),this.table=this.col=this.th=null}})}},_sortable:function(){var t,i,n,r,o,s=this.columns,a=e(this.lockedHeader).add(this.thead).find("th"),d=kendo.attr("field"),l=this.options.sortable;if(l)for(r=0,o=a.length;o>r;r++)t=s[r],t.sortable!==!1&&!t.command&&t.field&&(n=a.eq(r),i=n.data("kendoColumnSorter"),i&&i.destroy(),n.attr(d,t.field).kendoColumnSorter(p({},l,t.sortable,{dataSource:this.dataSource})))},_filterable:function(){var t,i,n,r,o,s,a=e(this.lockedHeader).add(this.thead).find("th"),d=this.options.filterable;if(d&&!this.options.columnMenu)for(s=H(function(e){this.trigger(J,{field:e.field,container:e.container})},this),t=0,i=a.length;i>t;t++)n=this.columns[t],r=a.eq(t),o=r.data("kendoFilterMenu"),o&&o.destroy(),n.command||n.filterable===!1||r.kendoFilterMenu(p(!0,{},d,n.filterable,{dataSource:this.dataSource,init:s}))},_change:function(){this.trigger(R)},_selectable:function(){var e,i,n=this.options.selectable,r=this.table;n&&(n=kendo.ui.Selectable.parseOptions(n),this._hasLockedColumns&&(r=r.add(this.lockedTable),i=n.multiple&&n.cell),e=">tbody>tr:not(.k-footer-template)",n.cell&&(e+=">td"),this.selectable=new kendo.ui.Selectable(r,{filter:e,aria:!0,multiple:n.multiple,change:H(this._change,this),useAllItems:i,continuousItems:H(this._continuousItems,this,e,n.cell),relatedTarget:!n.cell&&this._hasLockedColumns?H(this._selectableTarget,this):t}))},_continuousItems:function(t,i){var n,r,o,s,a,d;if(this.lockedContent){for(n=e(t,this.lockedTable),r=e(t,this.table),o=i?this._lockedColumns().length:1,s=i?this.columns.length-o:1,a=[],d=0;n.length>d;d+=o)N.apply(a,n.slice(d,d+o)),N.apply(a,r.splice(0,s));return a}},_selectableTarget:function(t){var i,n,r,o=e();for(n=0,r=t.length;r>n;n++)i=this._relatedRow(t[n]),S(i[0],t)<0&&(o=o.add(i));return o},_relatedRow:function(t){var i,n,r=this.lockedTable;return t=e(t),r?(i=t.closest(this.table.add(this.lockedTable)),n=i.find(">tbody>tr").index(t),i=i[0]===this.table[0]?r:this.table,i.find(">tbody>tr").eq(n)):t},select:function(i){var n=this.selectable;return n?(t!==i&&(n.options.multiple||(n.clear(),i=i.first()),this._hasLockedColumns&&(i=i.add(e.map(i,H(this._relatedRow,this))))),n.value(i)):e()},clearSelection:function(){var e=this.select();e.length&&(this.selectable.clear(),this.trigger(R))},_dataSource:function(e){var t=this.dataSource;t&&(t.unbind(R,this._refreshHandler),t.unbind(z,this._errorHandler),t.unbind(W,this._progressHandler)),this._refreshHandler=H(this.refresh,this),this._errorHandler=H(this._error,this),this._progressHandler=H(this._progress,this),t=this.dataSource=l.create(e),t.bind(R,this._refreshHandler),t.bind(z,this._errorHandler),t.bind(W,this._progressHandler),this._dataSourceFetchProxy=H(function(){this.dataSource.fetch()},this)},setDataSource:function(e){this._dataSource(e),this._sortable(),this._filterable(),this._contentTree.render([]),this.options.autoBind&&this.dataSource.fetch()},dataItem:function(t){var i,n;return t instanceof le?t:(i=e(t).closest("tr"),n=this.dataSource.getByUid(i.attr(kendo.attr("uid"))))},editRow:function(e){var t;typeof e===F&&(e=this.tbody.find(e)),t=this.dataItem(e),t&&("popup"!=this._editMode()&&(t._edit=!0),this._cancelEditor(),this._render(),this._createEditor(t),this.trigger(D,{container:this.editor.wrapper,model:t}))},_cancelEdit:function(e){e=p(e,{container:this.editor.wrapper,model:this.editor.model}),this.trigger(U,e)||this.cancelRow()},cancelRow:function(){this._cancelEditor(),this._render()},saveRow:function(){var e,t=this.editor;t&&(e={model:t.model,container:t.wrapper},t.end()&&!this.trigger(q,e)&&this.dataSource.sync())},addRow:function(e){var i=this.editor,n=0,r={};if(!i||i.end())return e?(e instanceof le||(e=this.dataItem(e)),r[e.parentIdField]=e.id,n=this.dataSource.indexOf(e)+1,this.expand(e).then(H(this._insertAt,this,r,n)),t):(this._insertAt(r,n),t)},_insertAt:function(e,t){e=this.dataSource.insert(t,e);var i=this.itemFor(e);this.editRow(i)},removeRow:function(e){var t=this.dataItem(e),i={model:t,row:e};t&&!this.trigger(O,i)&&(this.dataSource.remove(t),this.dataSource.sync())},_cancelEditor:function(){var e,t=this.editor;t&&(e=t.model,this._destroyEditor(),this.dataSource.cancelChanges(e),e._edit=!1)},_destroyEditor:function(){this.editor&&(this.editor.close(),this.editor=null)},_createEditor:function(e){var t,i,n=this.itemFor(e);n=n.add(this._relatedRow(n)),t=this._editMode(),i={columns:this.columns,model:e,target:this,clearContainer:!1,template:this.options.editable.template},"inline"==t?this.editor=new h(n,i):(p(i,{window:this.options.editable.window,commandRenderer:H(function(){return this._buildCommands(["update","canceledit"])},this),fieldRenderer:this._cellContent,save:H(this.saveRow,this),cancel:H(this._cancelEdit,this),appendTo:this.wrapper}),this.editor=new c(n,i))},_editMode:function(){var e="inline",t=this.options.editable;return t!==!0&&(e="string"==typeof t?t:t.mode||e),e.toLowerCase()},hideColumn:function(e){this._toggleColumnVisibility(e,!0)},showColumn:function(e){this._toggleColumnVisibility(e,!1)},_toggleColumnVisibility:function(e,t){e=this._findColumn(e),e&&e.hidden!==t&&(e.hidden=t,this._ensureExpandableColumn(),this._renderCols(),this._renderHeader(),this._render(),this._adjustTablesWidth(),this.trigger(t?Q:G,{column:e}),t||e.width||this.table.add(this.thead.closest("table")).width(""))},_findColumn:function(e){return e="number"==typeof e?this.columns[e]:L(e)?I(this.columns,function(t){return t===e})[0]:I(this.columns,function(t){return t.field===e})[0]},_adjustTablesWidth:function(){var e,t,i,n=this.thead.prev().children(),r=0;for(e=0,t=n.length;t>e;e++){if(i=n[e].style.width,!i||-1!=i.indexOf("%")){r=0;break}r+=parseInt(i,10)}r&&this.table.add(this.thead.closest("table")).width(r)},_reorderable:function(){var t,i,n;this.options.reorderable&&(t=this.options.scrollable===!0,i=(t?".k-grid-header:first ":"table:first>.k-grid-header ")+K,n=this,this._draggableInstance=new b.Draggable(this.wrapper,{group:kendo.guid(),filter:i,hint:function(t){return e('<div class="k-header k-drag-clue" />').css({width:t.width(),paddingLeft:t.css("paddingLeft"),paddingRight:t.css("paddingRight"),lineHeight:t.height()+"px",paddingTop:t.css("paddingTop"),paddingBottom:t.css("paddingBottom")}).html(t.attr(kendo.attr("title"))||t.attr(kendo.attr("field"))||t.text()).prepend('<span class="k-icon k-drag-status k-i-denied" />')}}),this.reorderable=new b.Reorderable(this.wrapper,{draggable:this._draggableInstance,dragOverContainers:H(this._allowDragOverContainers,this),inSameContainer:function(t){return e(t.source).parent()[0]===e(t.target).parent()[0]},change:function(e){var t=e.newIndex,i=e.oldIndex,r="before"===e.position,o=n.columns[i];n.trigger(X,{newIndex:t,oldIndex:i,column:o}),n.reorderColumn(t,o,r)}}))},_allowDragOverContainers:function(e){return this.columns[e].lockable!==!1},reorderColumn:function(i,n,r){var o,s,a,d=this.columns,l=S(n,d),h=d[i],c=!!h.locked,u=this._nonLockedColumns().length;l!==i&&(c&&!n.locked&&1==u||!c&&n.locked&&d.length-u==1||(r===t&&(r=l>i),o=!!n.locked,o=o!=c,n.locked=c,d.splice(r?i:i+1,0,n),d.splice(i>l?l:l+1,1),this._renderCols(),s=e(this.lockedHeader).add(this.thead).find("th"),s.eq(l)[r?"insertBefore":"insertAfter"](s.eq(i)),a=this._headerTree.children[0].children,this._hasLockedColumns&&(a=this._lockedHeaderTree.children[0].children.concat(a)),a.splice(r?i:i+1,0,a[l]),a.splice(i>l?l:l+1,1),this._hasLockedColumns&&(this._lockedHeaderTree.children[0].children=a.splice(0,this._lockedColumns().length),this._headerTree.children[0].children=a),this._applyLockedContainersWidth(),this.refresh(),o&&(c?this.trigger(ee,{column:n}):this.trigger(te,{column:n}))))},lockColumn:function(e){var t,i=this.columns;e="number"==typeof e?i[e]:I(i,function(t){return t.field===e})[0],e&&!e.hidden&&(t=this._lockedColumns().length-1,this.reorderColumn(t,e,!1))},unlockColumn:function(e){var t,i=this.columns;e="number"==typeof e?i[e]:I(i,function(t){return t.field===e})[0],e&&!e.hidden&&(t=this._lockedColumns().length,this.reorderColumn(t,e,!0))},_columnMenu:function(){var t,i,n,r,o,s,a=e(this.lockedHeader).add(this.thead).find("th"),d=this.columns,l=this.options,h=l.columnMenu,c=H(this._columnMenuInit,this),u=this._lockedColumns().length;if(h)for("boolean"==typeof h&&(h={}),s=0;a.length>s;s++)t=d[s],t.field&&(i=a.eq(s).data("kendoColumnMenu"),i&&i.destroy(),r=!1,t.sortable!==!1&&h.sortable!==!1&&l.sortable!==!1&&(r=p({},l.sortable,{compare:(t.sortable||{}).compare})),o=!1,l.filterable&&t.filterable!==!1&&h.filterable!==!1&&(o=p({pane:this.pane},t.filterable,l.filterable)),n={dataSource:this.dataSource,values:t.values,columns:h.columns,sortable:r,filterable:o,messages:h.messages,owner:this,closeCallback:e.noop,init:c,pane:this.pane,lockedColumns:t.lockable!==!1&&u>0},l.$angular&&(n.$angular=l.$angular),a.eq(s).kendoColumnMenu(n))},_columnMenuInit:function(e){this.trigger(Z,{field:e.field,container:e.container})}}),kendo.ExcelMixin&&kendo.ExcelMixin.extend(u.prototype),kendo.PDFMixin&&(kendo.PDFMixin.extend(u.prototype),u.fn._drawPDF=function(t){var i=new e.Deferred;return this._drawPDFShadow({width:this.wrapper.width()},{avoidLinks:this.options.pdf.avoidLinks}).done(function(e){var n={page:e,pageNumber:1,progress:1,totalPages:1};t.notify(n),i.resolve(n.page)}).fail(function(e){i.reject(e)}),i}),p(!0,kendo.data,{TreeListDataSource:l,TreeListModel:le}),p(!0,kendo.ui,{TreeList:u}),b.plugin(u)}(window.kendo.jQuery),window.kendo},"function"==typeof define&&define.amd?define:function(e,t,i){(i||t)()});
//# sourceMappingURL=kendo.treelist.min.js.map
| |
main.rs
|
use grape_ml::math::Matrix;
fn
|
() {
let matrix_size = 2048;
let mut m1 = Matrix::new(matrix_size, matrix_size);
m1.fill_rand(0.0, 2.0);
let mut m2 = Matrix::new(matrix_size, matrix_size);
m2.fill_rand(0.0, 2.0);
let _r = m1.element_wise_mul(m2);
//assert!(r.len() > 0);
}
|
main
|
model.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=fixme, invalid-name, too-many-arguments, too-many-locals, too-many-lines
# pylint: disable=too-many-branches, too-many-statements
"""MXNet model module"""
from __future__ import absolute_import, print_function
import os
import time
import logging
import warnings
from collections import namedtuple
import numpy as np
from . import io
from . import ndarray as nd
from . import symbol as sym
from . import optimizer as opt
from . import metric
from . import kvstore as kvs
from .context import Context, cpu
from .initializer import Uniform
from .optimizer import get_updater
from .executor_manager import DataParallelExecutorManager, _check_arguments, _load_data
from .io import DataDesc
from .base import mx_real_t
BASE_ESTIMATOR = object
try:
from sklearn.base import BaseEstimator
BASE_ESTIMATOR = BaseEstimator
except ImportError:
SKLEARN_INSTALLED = False
# Parameter to pass to batch_end_callback
BatchEndParam = namedtuple('BatchEndParams',
['epoch',
'nbatch',
'eval_metric',
'locals'])
def _create_sparse_kvstore(kvstore):
"""Create kvstore assuming some parameters' storage types are row_sparse.
Parameters
----------
kvstore : KVStore or str
The kvstore.
Returns
-------
kvstore : KVStore
update_on_kvstore : bool. Always True.
"""
# always update on kvstore
update_on_kvstore = True
if isinstance(kvstore, kvs.KVStore):
kv = kvstore
elif isinstance(kvstore, str):
kv = kvs.create(kvstore)
else:
raise TypeError("Cannot create '%s' KVStore with row_sparse parameters. "
"The type must be KVStore or str." % kvstore)
return (kv, update_on_kvstore)
def _create_kvstore(kvstore, num_device, arg_params):
"""Create kvstore
This function select and create a proper kvstore if given the kvstore type.
Parameters
----------
kvstore : KVStore or str
The kvstore.
num_device : int
The number of devices
arg_params : dict of str to `NDArray`.
Model parameter, dict of name to `NDArray` of net's weights.
"""
update_on_kvstore = bool(int(os.getenv('MXNET_UPDATE_ON_KVSTORE', "1")))
if kvstore is None:
kv = None
elif isinstance(kvstore, kvs.KVStore):
kv = kvstore
elif isinstance(kvstore, str):
# create kvstore using the string type
if num_device == 1 and 'dist' not in kvstore:
# no need to use kv for single device and single machine
kv = None
else:
kv = kvs.create(kvstore)
if kvstore == 'local':
# automatically select a proper local
max_size = max(np.prod(param.shape) for param in
arg_params.values())
if max_size > 1024 * 1024 * 16:
update_on_kvstore = False
else:
raise TypeError('kvstore must be KVStore, str or None')
if kv is None:
update_on_kvstore = False
return (kv, update_on_kvstore)
def _initialize_kvstore(kvstore, param_arrays, arg_params, param_names, update_on_kvstore):
"""Initialize kvstore"""
for idx, param_on_devs in enumerate(param_arrays):
name = param_names[idx]
kvstore.init(name, arg_params[name])
if update_on_kvstore:
kvstore.pull(name, param_on_devs, priority=-idx)
def _update_params_on_kvstore_nccl(param_arrays, grad_arrays, kvstore, param_names):
"""Perform update of param_arrays from grad_arrays on NCCL kvstore."""
valid_indices = [index for index, grad_list in
enumerate(grad_arrays) if grad_list[0] is not None]
valid_grad_arrays = [grad_arrays[i] for i in valid_indices]
valid_param_arrays = [param_arrays[i] for i in valid_indices]
valid_param_names = [param_names[i] for i in valid_indices]
size = len(valid_grad_arrays)
start = 0
# Use aggregation by default only with NCCL
default_batch = '16'
batch = int(os.getenv('MXNET_UPDATE_AGGREGATION_SIZE', default_batch))
while start < size:
end = start + batch if start + batch < size else size
# push gradient, priority is negative index
kvstore.push(valid_param_names[start:end], valid_grad_arrays[start:end], priority=-start)
# pull back the weights
kvstore.pull(valid_param_names[start:end], valid_param_arrays[start:end], priority=-start)
start = end
def _update_params_on_kvstore(param_arrays, grad_arrays, kvstore, param_names):
"""Perform update of param_arrays from grad_arrays on kvstore."""
for index, pair in enumerate(zip(param_arrays, grad_arrays)):
arg_list, grad_list = pair
if grad_list[0] is None:
continue
name = param_names[index]
# push gradient, priority is negative index
kvstore.push(name, grad_list, priority=-index)
# pull back the weights
kvstore.pull(name, arg_list, priority=-index)
def _update_params(param_arrays, grad_arrays, updater, num_device,
kvstore=None, param_names=None):
"""Perform update of param_arrays from grad_arrays not on kvstore."""
updates = [[] for _ in range(num_device)]
for i, pair in enumerate(zip(param_arrays, grad_arrays)):
arg_list, grad_list = pair
if grad_list[0] is None:
continue
index = i
if kvstore:
name = param_names[index]
# push gradient, priority is negative index
kvstore.push(name, grad_list, priority=-index)
# pull back the sum gradients, to the same locations.
kvstore.pull(name, grad_list, priority=-index)
for k, p in enumerate(zip(arg_list, grad_list)):
# faked an index here, to make optimizer create diff
# state for the same index but on diff devs, TODO(mli)
# use a better solution later
w, g = p
updates[k].append((index*num_device+k, g, w))
for dev_updates in updates:
# update params if param_arrays and grad_arrays are not empty
if dev_updates:
i, w, g = zip(*dev_updates)
updater(i, w, g)
def _multiple_callbacks(callbacks, *args, **kwargs):
"""Sends args and kwargs to any configured callbacks.
This handles the cases where the 'callbacks' variable
is ``None``, a single function, or a list.
"""
if isinstance(callbacks, list):
for cb in callbacks:
cb(*args, **kwargs)
return
if callbacks:
callbacks(*args, **kwargs)
def _train_multi_device(symbol, ctx, arg_names, param_names, aux_names,
arg_params, aux_params,
begin_epoch, end_epoch, epoch_size, optimizer,
kvstore, update_on_kvstore,
train_data, eval_data=None, eval_metric=None,
epoch_end_callback=None, batch_end_callback=None,
logger=None, work_load_list=None, monitor=None,
eval_end_callback=None,
eval_batch_end_callback=None, sym_gen=None):
"""Internal training function on multiple devices.
This function will also work for single device as well.
Parameters
----------
symbol : Symbol
The network configuration.
ctx : list of Context
The training devices.
arg_names: list of str
Name of all arguments of the network.
param_names: list of str
Name of all trainable parameters of the network.
aux_names: list of str
Name of all auxiliary states of the network.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
begin_epoch : int
The begining training epoch.
end_epoch : int
The end training epoch.
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : Optimizer
The optimization algorithm
train_data : DataIter
Training data iterator.
eval_data : DataIter
Validation data iterator.
eval_metric : EvalMetric
An evaluation function or a list of evaluation functions.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback : callable(BatchEndParams)
A callback that is invoked at end of each batch.
This can be used to measure speed, get result from evaluation metric. etc.
kvstore : KVStore
The KVStore.
update_on_kvstore : bool
Whether or not perform weight updating on kvstore.
logger : logging logger
When not specified, default logger will be used.
work_load_list : list of float or int, optional
The list of work load for different devices,
in the same order as ``ctx``.
monitor : Monitor, optional
Monitor installed to executor,
for monitoring outputs, weights, and gradients for debugging.
Notes
-----
- This function will inplace update the NDArrays in `arg_params` and `aux_states`.
"""
if logger is None:
logger = logging
executor_manager = DataParallelExecutorManager(symbol=symbol,
sym_gen=sym_gen,
ctx=ctx,
train_data=train_data,
param_names=param_names,
arg_names=arg_names,
aux_names=aux_names,
work_load_list=work_load_list,
logger=logger)
if monitor:
executor_manager.install_monitor(monitor)
executor_manager.set_params(arg_params, aux_params)
if not update_on_kvstore:
updater = get_updater(optimizer)
else:
kvstore.set_optimizer(optimizer)
if kvstore:
_initialize_kvstore(kvstore=kvstore,
param_arrays=executor_manager.param_arrays,
arg_params=arg_params,
param_names=executor_manager.param_names,
update_on_kvstore=update_on_kvstore)
# Now start training
train_data.reset()
for epoch in range(begin_epoch, end_epoch):
# Training phase
tic = time.time()
eval_metric.reset()
nbatch = 0
# Iterate over training data.
while True:
do_reset = True
for data_batch in train_data:
executor_manager.load_data_batch(data_batch)
if monitor is not None:
monitor.tic()
executor_manager.forward(is_train=True)
executor_manager.backward()
if update_on_kvstore:
if 'nccl' in kvstore.type:
_update_params_on_kvstore_nccl(executor_manager.param_arrays,
executor_manager.grad_arrays,
kvstore, executor_manager.param_names)
else:
_update_params_on_kvstore(executor_manager.param_arrays,
executor_manager.grad_arrays,
kvstore, executor_manager.param_names)
else:
_update_params(executor_manager.param_arrays,
executor_manager.grad_arrays,
updater=updater,
num_device=len(ctx),
kvstore=kvstore,
param_names=executor_manager.param_names)
if monitor is not None:
monitor.toc_print()
# evaluate at end, so we can lazy copy
executor_manager.update_metric(eval_metric, data_batch.label)
nbatch += 1
# batch callback (for print purpose)
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch,
nbatch=nbatch,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(batch_end_callback, batch_end_params)
# this epoch is done possibly earlier
if epoch_size is not None and nbatch >= epoch_size:
do_reset = False
break
if do_reset:
logger.info('Epoch[%d] Resetting Data Iterator', epoch)
train_data.reset()
# this epoch is done
if epoch_size is None or nbatch >= epoch_size:
break
toc = time.time()
logger.info('Epoch[%d] Time cost=%.3f', epoch, (toc - tic))
if epoch_end_callback or epoch + 1 == end_epoch:
executor_manager.copy_to(arg_params, aux_params)
_multiple_callbacks(epoch_end_callback, epoch, symbol, arg_params, aux_params)
# evaluation
if eval_data:
eval_metric.reset()
eval_data.reset()
total_num_batch = 0
for i, eval_batch in enumerate(eval_data):
executor_manager.load_data_batch(eval_batch)
executor_manager.forward(is_train=False)
executor_manager.update_metric(eval_metric, eval_batch.label)
if eval_batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=epoch,
nbatch=i,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(eval_batch_end_callback, batch_end_params)
total_num_batch += 1
if eval_end_callback is not None:
eval_end_params = BatchEndParam(epoch=epoch,
nbatch=total_num_batch,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(eval_end_callback, eval_end_params)
eval_data.reset()
# end of all epochs
def save_checkpoint(prefix, epoch, symbol, arg_params, aux_params, remove_amp_cast=True):
"""Checkpoint the model data into file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
The epoch number of the model.
symbol : Symbol
The input Symbol.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
remove_amp_cast : bool, optional
Whether to remove the amp_cast and amp_multicast operators, before saving the model.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
if symbol is not None:
symbol.save('%s-symbol.json' % prefix, remove_amp_cast=remove_amp_cast)
save_dict = {('arg:%s' % k) : v.as_in_context(cpu()) for k, v in arg_params.items()}
save_dict.update({('aux:%s' % k) : v.as_in_context(cpu()) for k, v in aux_params.items()})
param_name = '%s-%04d.params' % (prefix, epoch)
nd.save(param_name, save_dict)
logging.info('Saved checkpoint to \"%s\"', param_name)
def load_params(prefix, epoch):
"""Load params from a file
"""
save_dict = nd.load("%s-%04d.params" % (prefix, epoch))
arg_params = {}
aux_params = {}
if not save_dict:
logging.warning("Params file '%s' is empty", '%s-%04d.params' % (prefix, epoch))
return (arg_params, aux_params)
for k, v in save_dict.items():
tp, name = k.split(":", 1)
if tp == "arg":
arg_params[name] = v
if tp == "aux":
aux_params[name] = v
return (arg_params, aux_params)
def load_checkpoint(prefix, epoch):
"""Load model checkpoint from file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
Epoch number of model we would like to load.
Returns
-------
symbol : Symbol
The symbol configuration of computation network.
arg_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray
Model parameter, dict of name to NDArray of net's auxiliary states.
Notes
-----
- Symbol will be loaded from ``prefix-symbol.json``.
- Parameters will be loaded from ``prefix-epoch.params``.
"""
symbol = sym.load('%s-symbol.json' % prefix)
arg_params, aux_params = load_params(prefix, epoch)
return (symbol, arg_params, aux_params)
from .callback import LogValidationMetricsCallback # pylint: disable=wrong-import-position
class FeedForward(BASE_ESTIMATOR):
"""Model class of MXNet for training and predicting feedforward nets.
This class is designed for a single-data single output supervised network.
Parameters
----------
symbol : Symbol
The symbol configuration of computation network.
ctx : Context or list of Context, optional
The device context of training and prediction.
To use multi GPU training, pass in a list of gpu contexts.
num_epoch : int, optional
Training parameter, number of training epochs(epochs).
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : str or Optimizer, optional
Training parameter, name or optimizer object for training.
initializer : initializer function, optional
Training parameter, the initialization scheme used.
numpy_batch_size : int, optional
The batch size of training data.
Only needed when input array is numpy.
arg_params : dict of str to NDArray, optional
Model parameter, dict of name to NDArray of net's weights.
aux_params : dict of str to NDArray, optional
Model parameter, dict of name to NDArray of net's auxiliary states.
allow_extra_params : boolean, optional
Whether allow extra parameters that are not needed by symbol
to be passed by aux_params and ``arg_params``.
If this is True, no error will be thrown when ``aux_params`` and ``arg_params``
contain more parameters than needed.
begin_epoch : int, optional
The begining training epoch.
kwargs : dict
The additional keyword arguments passed to optimizer.
"""
def __init__(self, symbol, ctx=None,
num_epoch=None, epoch_size=None, optimizer='sgd',
initializer=Uniform(0.01),
numpy_batch_size=128,
arg_params=None, aux_params=None,
allow_extra_params=False,
begin_epoch=0,
**kwargs):
warnings.warn(
'\033[91mmxnet.model.FeedForward has been deprecated. ' + \
'Please use mxnet.mod.Module instead.\033[0m',
DeprecationWarning, stacklevel=2)
if isinstance(symbol, sym.Symbol):
self.symbol = symbol
self.sym_gen = None
else:
assert(callable(symbol))
self.symbol = None
self.sym_gen = symbol
# model parameters
self.arg_params = arg_params
self.aux_params = aux_params
self.allow_extra_params = allow_extra_params
self.argument_checked = False
if self.sym_gen is None:
self._check_arguments()
# basic configuration
if ctx is None:
ctx = [cpu()]
elif isinstance(ctx, Context):
ctx = [ctx]
self.ctx = ctx
# training parameters
self.num_epoch = num_epoch
self.epoch_size = epoch_size
self.kwargs = kwargs.copy()
self.optimizer = optimizer
self.initializer = initializer
self.numpy_batch_size = numpy_batch_size
# internal helper state
self._pred_exec = None
self.begin_epoch = begin_epoch
def _check_arguments(self):
"""verify the argument of the default symbol and user provided parameters"""
if self.argument_checked:
return
assert(self.symbol is not None)
self.argument_checked = True
# check if symbol contain duplicated names.
_check_arguments(self.symbol)
# rematch parameters to delete useless ones
if self.allow_extra_params:
if self.arg_params:
arg_names = set(self.symbol.list_arguments())
self.arg_params = {k : v for k, v in self.arg_params.items()
if k in arg_names}
if self.aux_params:
aux_names = set(self.symbol.list_auxiliary_states())
self.aux_params = {k : v for k, v in self.aux_params.items()
if k in aux_names}
@staticmethod
def _is_data_arg(name):
"""Check if name is a data argument."""
return name.endswith('data') or name.endswith('label')
def _init_params(self, inputs, overwrite=False):
"""Initialize weight parameters and auxiliary states."""
inputs = [x if isinstance(x, DataDesc) else DataDesc(*x) for x in inputs]
input_shapes = {item.name: item.shape for item in inputs}
arg_shapes, _, aux_shapes = self.symbol.infer_shape(**input_shapes)
assert arg_shapes is not None
input_dtypes = {item.name: item.dtype for item in inputs}
arg_dtypes, _, aux_dtypes = self.symbol.infer_type(**input_dtypes)
assert arg_dtypes is not None
arg_names = self.symbol.list_arguments()
input_names = input_shapes.keys()
param_names = [key for key in arg_names if key not in input_names]
aux_names = self.symbol.list_auxiliary_states()
param_name_attrs = [x for x in zip(arg_names, arg_shapes, arg_dtypes)
if x[0] in param_names]
arg_params = {k : nd.zeros(shape=s, dtype=t)
for k, s, t in param_name_attrs}
aux_name_attrs = [x for x in zip(aux_names, aux_shapes, aux_dtypes)
if x[0] in aux_names]
aux_params = {k : nd.zeros(shape=s, dtype=t)
for k, s, t in aux_name_attrs}
for k, v in arg_params.items():
if self.arg_params and k in self.arg_params and (not overwrite):
arg_params[k][:] = self.arg_params[k][:]
else:
self.initializer(k, v)
for k, v in aux_params.items():
if self.aux_params and k in self.aux_params and (not overwrite):
aux_params[k][:] = self.aux_params[k][:]
else:
self.initializer(k, v)
self.arg_params = arg_params
self.aux_params = aux_params
return (arg_names, list(param_names), aux_names)
def __getstate__(self):
this = self.__dict__.copy()
this['_pred_exec'] = None
return this
def __setstate__(self, state):
self.__dict__.update(state)
def _init_predictor(self, input_shapes, type_dict=None):
"""Initialize the predictor module for running prediction."""
shapes = {name: self.arg_params[name].shape for name in self.arg_params}
shapes.update(dict(input_shapes))
if self._pred_exec is not None:
arg_shapes, _, _ = self.symbol.infer_shape(**shapes)
assert arg_shapes is not None, "Incomplete input shapes"
pred_shapes = [x.shape for x in self._pred_exec.arg_arrays]
if arg_shapes == pred_shapes:
return
# for now only use the first device
pred_exec = self.symbol.simple_bind(
self.ctx[0], grad_req='null', type_dict=type_dict, **shapes)
pred_exec.copy_params_from(self.arg_params, self.aux_params)
_check_arguments(self.symbol)
self._pred_exec = pred_exec
def _init_iter(self, X, y, is_train):
"""Initialize the iterator given input."""
if isinstance(X, (np.ndarray, nd.NDArray)):
if y is None:
if is_train:
raise ValueError('y must be specified when X is numpy.ndarray')
y = np.zeros(X.shape[0])
if not isinstance(y, (np.ndarray, nd.NDArray)):
raise TypeError('y must be ndarray when X is numpy.ndarray')
if X.shape[0] != y.shape[0]:
raise ValueError("The numbers of data points and labels not equal")
if y.ndim == 2 and y.shape[1] == 1:
y = y.flatten()
if y.ndim != 1:
raise ValueError("Label must be 1D or 2D (with 2nd dimension being 1)")
if is_train:
return io.NDArrayIter(X, y, min(X.shape[0], self.numpy_batch_size),
shuffle=is_train, last_batch_handle='roll_over')
else:
return io.NDArrayIter(X, y, min(X.shape[0], self.numpy_batch_size), shuffle=False)
if not isinstance(X, io.DataIter):
raise TypeError('X must be DataIter, NDArray or numpy.ndarray')
return X
def _init_eval_iter(self, eval_data):
"""Initialize the iterator given eval_data."""
if eval_data is None:
return eval_data
if isinstance(eval_data, (tuple, list)) and len(eval_data) == 2:
if eval_data[0] is not None:
if eval_data[1] is None and isinstance(eval_data[0], io.DataIter):
return eval_data[0]
input_data = (np.array(eval_data[0]) if isinstance(eval_data[0], list)
else eval_data[0])
input_label = (np.array(eval_data[1]) if isinstance(eval_data[1], list)
else eval_data[1])
return self._init_iter(input_data, input_label, is_train=True)
else:
raise ValueError("Eval data is NONE")
if not isinstance(eval_data, io.DataIter):
raise TypeError('Eval data must be DataIter, or ' \
'NDArray/numpy.ndarray/list pair (i.e. tuple/list of length 2)')
return eval_data
def predict(self, X, num_batch=None, return_data=False, reset=True):
"""Run the prediction, always only use one device.
Parameters
----------
X : mxnet.DataIter
num_batch : int or None
The number of batch to run. Go though all batches if ``None``.
Returns
-------
y : numpy.ndarray or a list of numpy.ndarray if the network has multiple outputs.
The predicted value of the output.
"""
X = self._init_iter(X, None, is_train=False)
if reset:
X.reset()
data_shapes = X.provide_data
data_names = [x[0] for x in data_shapes]
type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items())
for x in X.provide_data:
if isinstance(x, DataDesc):
type_dict[x.name] = x.dtype
else:
type_dict[x[0]] = mx_real_t
self._init_predictor(data_shapes, type_dict)
batch_size = X.batch_size
data_arrays = [self._pred_exec.arg_dict[name] for name in data_names]
output_list = [[] for _ in range(len(self._pred_exec.outputs))]
if return_data:
data_list = [[] for _ in X.provide_data]
label_list = [[] for _ in X.provide_label]
i = 0
for batch in X:
_load_data(batch, data_arrays)
self._pred_exec.forward(is_train=False)
padded = batch.pad
real_size = batch_size - padded
for o_list, o_nd in zip(output_list, self._pred_exec.outputs):
o_list.append(o_nd[0:real_size].asnumpy())
if return_data:
for j, x in enumerate(batch.data):
data_list[j].append(x[0:real_size].asnumpy())
for j, x in enumerate(batch.label):
label_list[j].append(x[0:real_size].asnumpy())
i += 1
if num_batch is not None and i == num_batch:
break
outputs = [np.concatenate(x) for x in output_list]
if len(outputs) == 1:
outputs = outputs[0]
if return_data:
data = [np.concatenate(x) for x in data_list]
label = [np.concatenate(x) for x in label_list]
if len(data) == 1:
data = data[0]
if len(label) == 1:
label = label[0]
return outputs, data, label
else:
return outputs
def score(self, X, eval_metric='acc', num_batch=None, batch_end_callback=None, reset=True):
"""Run the model given an input and calculate the score
as assessed by an evaluation metric.
Parameters
----------
X : mxnet.DataIter
eval_metric : metric.metric
The metric for calculating score.
num_batch : int or None
The number of batches to run. Go though all batches if ``None``.
Returns
-------
s : float
The final score.
"""
# setup metric
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
X = self._init_iter(X, None, is_train=False)
if reset:
X.reset()
data_shapes = X.provide_data
data_names = [x[0] for x in data_shapes]
type_dict = dict((key, value.dtype) for (key, value) in self.arg_params.items())
for x in X.provide_data:
if isinstance(x, DataDesc):
type_dict[x.name] = x.dtype
else:
type_dict[x[0]] = mx_real_t
self._init_predictor(data_shapes, type_dict)
data_arrays = [self._pred_exec.arg_dict[name] for name in data_names]
for i, batch in enumerate(X):
if num_batch is not None and i == num_batch:
break
_load_data(batch, data_arrays)
self._pred_exec.forward(is_train=False)
eval_metric.update(batch.label, self._pred_exec.outputs)
if batch_end_callback is not None:
batch_end_params = BatchEndParam(epoch=0,
nbatch=i,
eval_metric=eval_metric,
locals=locals())
_multiple_callbacks(batch_end_callback, batch_end_params)
return eval_metric.get()[1]
def fit(self, X, y=None, eval_data=None, eval_metric='acc',
epoch_end_callback=None, batch_end_callback=None, kvstore='local', logger=None,
work_load_list=None, monitor=None, eval_end_callback=LogValidationMetricsCallback(),
eval_batch_end_callback=None):
"""Fit the model.
Parameters
----------
X : DataIter, or numpy.ndarray/NDArray
Training data. If `X` is a `DataIter`, the name or (if name not available)
the position of its outputs should match the corresponding variable
names defined in the symbolic graph.
y : numpy.ndarray/NDArray, optional
Training set label.
If X is ``numpy.ndarray`` or `NDArray`, `y` is required to be set.
While y can be 1D or 2D (with 2nd dimension as 1), its first dimension must be
the same as `X`, i.e. the number of data points and labels should be equal.
eval_data : DataIter or numpy.ndarray/list/NDArray pair
If eval_data is numpy.ndarray/list/NDArray pair,
it should be ``(valid_data, valid_label)``.
eval_metric : metric.EvalMetric or str or callable
The evaluation metric. This could be the name of evaluation metric
or a custom evaluation function that returns statistics
based on a minibatch.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback: callable(epoch)
A callback that is invoked at end of each batch for purposes of printing.
kvstore: KVStore or str, optional
The KVStore or a string kvstore type: 'local', 'dist_sync', 'dist_async'
In default uses 'local', often no need to change for single machiine.
logger : logging logger, optional
When not specified, default logger will be used.
work_load_list : float or int, optional
The list of work load for different devices,
in the same order as `ctx`.
Note
----
KVStore behavior
- 'local', multi-devices on a single machine, will automatically choose best type.
- 'dist_sync', multiple machines communicating via BSP.
- 'dist_async', multiple machines with asynchronous communication.
"""
data = self._init_iter(X, y, is_train=True)
eval_data = self._init_eval_iter(eval_data)
if self.sym_gen:
self.symbol = self.sym_gen(data.default_bucket_key) # pylint: disable=no-member
self._check_arguments()
self.kwargs["sym"] = self.symbol
arg_names, param_names, aux_names = \
self._init_params(data.provide_data+data.provide_label)
# setup metric
if not isinstance(eval_metric, metric.EvalMetric):
eval_metric = metric.create(eval_metric)
# create kvstore
(kvstore, update_on_kvstore) = _create_kvstore(
kvstore, len(self.ctx), self.arg_params)
param_idx2name = {}
if update_on_kvstore:
|
else:
for i, n in enumerate(param_names):
for k in range(len(self.ctx)):
param_idx2name[i*len(self.ctx)+k] = n
self.kwargs["param_idx2name"] = param_idx2name
# init optmizer
if isinstance(self.optimizer, str):
batch_size = data.batch_size
if kvstore and 'dist' in kvstore.type and '_async' not in kvstore.type:
batch_size *= kvstore.num_workers
optimizer = opt.create(self.optimizer,
rescale_grad=(1.0/batch_size),
**(self.kwargs))
elif isinstance(self.optimizer, opt.Optimizer):
if not optimizer.idx2name:
optimizer.idx2name = param_idx2name.copy()
optimizer = self.optimizer
# do training
_train_multi_device(self.symbol, self.ctx, arg_names, param_names, aux_names,
self.arg_params, self.aux_params,
begin_epoch=self.begin_epoch, end_epoch=self.num_epoch,
epoch_size=self.epoch_size,
optimizer=optimizer,
train_data=data, eval_data=eval_data,
eval_metric=eval_metric,
epoch_end_callback=epoch_end_callback,
batch_end_callback=batch_end_callback,
kvstore=kvstore, update_on_kvstore=update_on_kvstore,
logger=logger, work_load_list=work_load_list, monitor=monitor,
eval_end_callback=eval_end_callback,
eval_batch_end_callback=eval_batch_end_callback,
sym_gen=self.sym_gen)
def save(self, prefix, epoch=None, remove_amp_cast=True):
"""Checkpoint the model checkpoint into file.
You can also use `pickle` to do the job if you only work on Python.
The advantage of `load` and `save` (as compared to `pickle`) is that
the resulting file can be loaded from other MXNet language bindings.
One can also directly `load`/`save` from/to cloud storage(S3, HDFS)
Parameters
----------
prefix : str
Prefix of model name.
remove_amp_cast : bool, optional
Whether to remove the amp_cast and amp_multicast operators, before saving the model.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
if epoch is None:
epoch = self.num_epoch
assert epoch is not None
save_checkpoint(prefix, epoch, self.symbol, self.arg_params, self.aux_params, remove_amp_cast=remove_amp_cast)
@staticmethod
def load(prefix, epoch, ctx=None, **kwargs):
"""Load model checkpoint from file.
Parameters
----------
prefix : str
Prefix of model name.
epoch : int
epoch number of model we would like to load.
ctx : Context or list of Context, optional
The device context of training and prediction.
kwargs : dict
Other parameters for model, including `num_epoch`, optimizer and `numpy_batch_size`.
Returns
-------
model : FeedForward
The loaded model that can be used for prediction.
Notes
-----
- ``prefix-symbol.json`` will be saved for symbol.
- ``prefix-epoch.params`` will be saved for parameters.
"""
symbol, arg_params, aux_params = load_checkpoint(prefix, epoch)
return FeedForward(symbol, ctx=ctx,
arg_params=arg_params, aux_params=aux_params,
begin_epoch=epoch,
**kwargs)
@staticmethod
def create(symbol, X, y=None, ctx=None,
num_epoch=None, epoch_size=None, optimizer='sgd', initializer=Uniform(0.01),
eval_data=None, eval_metric='acc',
epoch_end_callback=None, batch_end_callback=None,
kvstore='local', logger=None, work_load_list=None,
eval_end_callback=LogValidationMetricsCallback(),
eval_batch_end_callback=None, **kwargs):
"""Functional style to create a model.
This function is more consistent with functional
languages such as R, where mutation is not allowed.
Parameters
----------
symbol : Symbol
The symbol configuration of a computation network.
X : DataIter
Training data.
y : numpy.ndarray, optional
If `X` is a ``numpy.ndarray``, `y` must be set.
ctx : Context or list of Context, optional
The device context of training and prediction.
To use multi-GPU training, pass in a list of GPU contexts.
num_epoch : int, optional
The number of training epochs(epochs).
epoch_size : int, optional
Number of batches in a epoch. In default, it is set to
``ceil(num_train_examples / batch_size)``.
optimizer : str or Optimizer, optional
The name of the chosen optimizer, or an optimizer object, used for training.
initializer : initializer function, optional
The initialization scheme used.
eval_data : DataIter or numpy.ndarray pair
If `eval_set` is ``numpy.ndarray`` pair, it should
be (`valid_data`, `valid_label`).
eval_metric : metric.EvalMetric or str or callable
The evaluation metric. Can be the name of an evaluation metric
or a custom evaluation function that returns statistics
based on a minibatch.
epoch_end_callback : callable(epoch, symbol, arg_params, aux_states)
A callback that is invoked at end of each epoch.
This can be used to checkpoint model each epoch.
batch_end_callback: callable(epoch)
A callback that is invoked at end of each batch for print purposes.
kvstore: KVStore or str, optional
The KVStore or a string kvstore type: 'local', 'dist_sync', 'dis_async'.
Defaults to 'local', often no need to change for single machine.
logger : logging logger, optional
When not specified, default logger will be used.
work_load_list : list of float or int, optional
The list of work load for different devices,
in the same order as `ctx`.
"""
model = FeedForward(symbol, ctx=ctx, num_epoch=num_epoch,
epoch_size=epoch_size,
optimizer=optimizer, initializer=initializer, **kwargs)
model.fit(X, y, eval_data=eval_data, eval_metric=eval_metric,
epoch_end_callback=epoch_end_callback,
batch_end_callback=batch_end_callback,
kvstore=kvstore,
logger=logger,
work_load_list=work_load_list,
eval_end_callback=eval_end_callback,
eval_batch_end_callback=eval_batch_end_callback)
return model
|
param_idx2name.update(enumerate(param_names))
|
atomic_u8.rs
|
// Copyright 2019 Twitter, Inc.
// Licensed under the Apache License, Version 2.0
// http://www.apache.org/licenses/LICENSE-2.0
use crate::{AtomicPrimitive, Ordering};
#[cfg(feature = "serde")]
use serde::{de::Visitor, Deserialize, Deserializer, Serialize, Serializer};
/// An integer type which can be safely shared between threads.
pub struct AtomicU8 {
pub(crate) inner: core::sync::atomic::AtomicU8,
}
impl AtomicPrimitive for AtomicU8 {
type Primitive = u8;
fn new(value: Self::Primitive) -> Self {
Self {
inner: core::sync::atomic::AtomicU8::new(value),
}
}
fn get_mut(&mut self) -> &mut Self::Primitive {
self.inner.get_mut()
}
fn into_inner(self) -> Self::Primitive {
self.inner.into_inner()
}
fn load(&self, order: Ordering) -> Self::Primitive {
self.inner.load(order)
}
fn store(&self, value: Self::Primitive, order: Ordering) {
self.inner.store(value, order);
}
fn swap(&self, value: Self::Primitive, order: Ordering) -> Self::Primitive {
self.inner.swap(value, order)
}
fn compare_and_swap(
&self,
current: Self::Primitive,
new: Self::Primitive,
order: Ordering,
) -> Self::Primitive {
self.inner.compare_and_swap(current, new, order)
}
fn compare_exchange(
&self,
current: Self::Primitive,
new: Self::Primitive,
success: Ordering,
failure: Ordering,
) -> Result<Self::Primitive, Self::Primitive> {
self.inner.compare_exchange(current, new, success, failure)
}
fn compare_exchange_weak(
&self,
current: Self::Primitive,
new: Self::Primitive,
success: Ordering,
failure: Ordering,
) -> Result<Self::Primitive, Self::Primitive> {
self.inner
.compare_exchange_weak(current, new, success, failure)
}
}
impl Default for AtomicU8 {
fn default() -> Self {
Self::new(Default::default())
}
}
impl PartialEq for AtomicU8 {
fn eq(&self, other: &Self) -> bool {
self.load(Ordering::SeqCst) == other.load(Ordering::SeqCst)
}
}
impl Eq for AtomicU8 {}
impl std::fmt::Debug for AtomicU8 {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{:?}", self.inner)
}
}
#[cfg(feature = "serde")]
struct AtomicU8Visitor;
#[cfg(feature = "serde")]
impl<'de> Visitor<'de> for AtomicU8Visitor {
type Value = AtomicU8;
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
formatter.write_str("an unsigned 8bit integer")
}
fn visit_i8<E>(self, value: i8) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
use std::convert::TryFrom;
if let Ok(value) = u8::try_from(value) {
Ok(Self::Value::new(value))
} else {
Err(E::custom(format!("u8 is out of range: {}", value)))
}
}
fn
|
<E>(self, value: i16) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
use std::convert::TryFrom;
if let Ok(value) = u8::try_from(value) {
Ok(Self::Value::new(value))
} else {
Err(E::custom(format!("u8 is out of range: {}", value)))
}
}
fn visit_i32<E>(self, value: i32) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
use std::convert::TryFrom;
if let Ok(value) = u8::try_from(value) {
Ok(Self::Value::new(value))
} else {
Err(E::custom(format!("u8 is out of range: {}", value)))
}
}
fn visit_i64<E>(self, value: i64) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
use std::convert::TryFrom;
if let Ok(value) = u8::try_from(value) {
Ok(Self::Value::new(value))
} else {
Err(E::custom(format!("u8 is out of range: {}", value)))
}
}
fn visit_u8<E>(self, value: u8) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(Self::Value::new(value))
}
fn visit_u16<E>(self, value: u16) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
use std::convert::TryFrom;
if let Ok(value) = u8::try_from(value) {
Ok(Self::Value::new(value))
} else {
Err(E::custom(format!("u8 is out of range: {}", value)))
}
}
fn visit_u32<E>(self, value: u32) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
use std::convert::TryFrom;
if let Ok(value) = u8::try_from(value) {
Ok(Self::Value::new(value))
} else {
Err(E::custom(format!("u8 is out of range: {}", value)))
}
}
fn visit_u64<E>(self, value: u64) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
use std::convert::TryFrom;
if let Ok(value) = u8::try_from(value) {
Ok(Self::Value::new(value))
} else {
Err(E::custom(format!("u8 is out of range: {}", value)))
}
}
}
#[cfg(feature = "serde")]
impl<'de> Deserialize<'de> for AtomicU8 {
fn deserialize<D>(deserializer: D) -> Result<AtomicU8, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_any(AtomicU8Visitor)
}
}
#[cfg(feature = "serde")]
impl Serialize for AtomicU8 {
#[inline]
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_some(&self.load(Ordering::SeqCst))
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn load() {
let atomic = AtomicU8::new(0);
assert_eq!(atomic.load(Ordering::SeqCst), 0);
}
#[test]
fn store() {
let atomic = AtomicU8::new(0);
atomic.store(1, Ordering::SeqCst);
assert_eq!(atomic.into_inner(), 1);
}
#[test]
fn swap() {
let atomic = AtomicU8::new(0);
assert_eq!(atomic.swap(1, Ordering::SeqCst), 0);
}
#[test]
fn compare_and_swap() {
let atomic = AtomicU8::new(0);
assert_eq!(atomic.compare_and_swap(0, 1, Ordering::SeqCst), 0);
assert_eq!(atomic.compare_and_swap(0, 2, Ordering::SeqCst), 1);
}
#[test]
fn compare_exchange() {
let atomic = AtomicU8::new(0);
assert_eq!(
atomic.compare_exchange(0, 1, Ordering::SeqCst, Ordering::SeqCst),
Ok(0)
);
assert_eq!(
atomic.compare_exchange(0, 2, Ordering::SeqCst, Ordering::SeqCst),
Err(1)
);
}
#[test]
fn compare_exchange_weak() {
let atomic = AtomicU8::new(0);
loop {
if atomic
.compare_exchange(0, 1, Ordering::SeqCst, Ordering::SeqCst)
.is_ok()
{
break;
}
}
assert_eq!(atomic.into_inner(), 1);
}
}
|
visit_i16
|
kuberuntime_manager.go
|
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kuberuntime
import (
"errors"
"fmt"
"os"
"sync"
"time"
cadvisorapi "github.com/google/cadvisor/info/v1"
"k8s.io/klog"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubetypes "k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
utilversion "k8s.io/apimachinery/pkg/util/version"
"k8s.io/client-go/tools/record"
ref "k8s.io/client-go/tools/reference"
"k8s.io/client-go/util/flowcontrol"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/credentialprovider"
internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
"k8s.io/kubernetes/pkg/kubelet/cm"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/events"
"k8s.io/kubernetes/pkg/kubelet/images"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
proberesults "k8s.io/kubernetes/pkg/kubelet/prober/results"
"k8s.io/kubernetes/pkg/kubelet/runtimeclass"
"k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/util/cache"
"k8s.io/kubernetes/pkg/kubelet/util/format"
)
const (
// The api version of kubelet runtime api
kubeRuntimeAPIVersion = "0.1.0"
// The root directory for pod logs
podLogsRootDirectory = "/var/log/pods"
// A minimal shutdown window for avoiding unnecessary SIGKILLs
minimumGracePeriodInSeconds = 2
// The expiration time of version cache.
versionCacheTTL = 60 * time.Second
// How frequently to report identical errors
identicalErrorDelay = 1 * time.Minute
)
var (
// ErrVersionNotSupported is returned when the api version of runtime interface is not supported
ErrVersionNotSupported = errors.New("Runtime api version is not supported")
)
// podStateProvider can determine if a pod is deleted ir terminated
type podStateProvider interface {
IsPodDeleted(kubetypes.UID) bool
IsPodTerminated(kubetypes.UID) bool
}
type kubeGenericRuntimeManager struct {
runtimeName string
recorder record.EventRecorder
osInterface kubecontainer.OSInterface
containerRefManager *kubecontainer.RefManager
// machineInfo contains the machine information.
machineInfo *cadvisorapi.MachineInfo
// Container GC manager
containerGC *containerGC
// Keyring for pulling images
keyring credentialprovider.DockerKeyring
// Runner of lifecycle events.
runner kubecontainer.HandlerRunner
// RuntimeHelper that wraps kubelet to generate runtime container options.
runtimeHelper kubecontainer.RuntimeHelper
// Health check results.
livenessManager proberesults.Manager
// If true, enforce container cpu limits with CFS quota support
cpuCFSQuota bool
// CPUCFSQuotaPeriod sets the CPU CFS quota period value, cpu.cfs_period_us, defaults to 100ms
cpuCFSQuotaPeriod metav1.Duration
// wrapped image puller.
imagePuller images.ImageManager
// gRPC service clients
runtimeService internalapi.RuntimeService
imageService internalapi.ImageManagerService
// The version cache of runtime daemon.
versionCache *cache.ObjectCache
// The directory path for seccomp profiles.
seccompProfileRoot string
// Internal lifecycle event handlers for container resource management.
internalLifecycle cm.InternalContainerLifecycle
// A shim to legacy functions for backward compatibility.
legacyLogProvider LegacyLogProvider
// Manage RuntimeClass resources.
runtimeClassManager *runtimeclass.Manager
// Cache last per-container error message to reduce log spam
lastError map[string]string
// Time last per-container error message was printed
errorPrinted map[string]time.Time
errorMapLock sync.Mutex
}
// KubeGenericRuntime is a interface contains interfaces for container runtime and command.
type KubeGenericRuntime interface {
kubecontainer.Runtime
kubecontainer.StreamingRuntime
kubecontainer.ContainerCommandRunner
}
// LegacyLogProvider gives the ability to use unsupported docker log drivers (e.g. journald)
type LegacyLogProvider interface {
// Get the last few lines of the logs for a specific container.
GetContainerLogTail(uid kubetypes.UID, name, namespace string, containerID kubecontainer.ContainerID) (string, error)
}
// NewKubeGenericRuntimeManager creates a new kubeGenericRuntimeManager
func NewKubeGenericRuntimeManager(
recorder record.EventRecorder,
livenessManager proberesults.Manager,
seccompProfileRoot string,
containerRefManager *kubecontainer.RefManager,
machineInfo *cadvisorapi.MachineInfo,
podStateProvider podStateProvider,
osInterface kubecontainer.OSInterface,
runtimeHelper kubecontainer.RuntimeHelper,
httpClient types.HttpGetter,
imageBackOff *flowcontrol.Backoff,
serializeImagePulls bool,
imagePullQPS float32,
imagePullBurst int,
cpuCFSQuota bool,
cpuCFSQuotaPeriod metav1.Duration,
runtimeService internalapi.RuntimeService,
imageService internalapi.ImageManagerService,
internalLifecycle cm.InternalContainerLifecycle,
legacyLogProvider LegacyLogProvider,
runtimeClassManager *runtimeclass.Manager,
) (KubeGenericRuntime, error) {
kubeRuntimeManager := &kubeGenericRuntimeManager{
recorder: recorder,
cpuCFSQuota: cpuCFSQuota,
cpuCFSQuotaPeriod: cpuCFSQuotaPeriod,
seccompProfileRoot: seccompProfileRoot,
livenessManager: livenessManager,
containerRefManager: containerRefManager,
machineInfo: machineInfo,
osInterface: osInterface,
runtimeHelper: runtimeHelper,
runtimeService: newInstrumentedRuntimeService(runtimeService),
imageService: newInstrumentedImageManagerService(imageService),
keyring: credentialprovider.NewDockerKeyring(),
internalLifecycle: internalLifecycle,
legacyLogProvider: legacyLogProvider,
runtimeClassManager: runtimeClassManager,
lastError: make(map[string]string),
errorPrinted: make(map[string]time.Time),
}
typedVersion, err := kubeRuntimeManager.runtimeService.Version(kubeRuntimeAPIVersion)
if err != nil {
klog.Errorf("Get runtime version failed: %v", err)
return nil, err
}
// Only matching kubeRuntimeAPIVersion is supported now
// TODO: Runtime API machinery is under discussion at https://github.com/kubernetes/kubernetes/issues/28642
if typedVersion.Version != kubeRuntimeAPIVersion {
klog.Errorf("Runtime api version %s is not supported, only %s is supported now",
typedVersion.Version,
kubeRuntimeAPIVersion)
return nil, ErrVersionNotSupported
}
kubeRuntimeManager.runtimeName = typedVersion.RuntimeName
klog.Infof("Container runtime %s initialized, version: %s, apiVersion: %s",
typedVersion.RuntimeName,
typedVersion.RuntimeVersion,
typedVersion.RuntimeApiVersion)
// If the container logs directory does not exist, create it.
// TODO: create podLogsRootDirectory at kubelet.go when kubelet is refactored to
// new runtime interface
if _, err := osInterface.Stat(podLogsRootDirectory); os.IsNotExist(err) {
if err := osInterface.MkdirAll(podLogsRootDirectory, 0755); err != nil {
klog.Errorf("Failed to create directory %q: %v", podLogsRootDirectory, err)
}
}
kubeRuntimeManager.imagePuller = images.NewImageManager(
kubecontainer.FilterEventRecorder(recorder),
kubeRuntimeManager,
imageBackOff,
serializeImagePulls,
imagePullQPS,
imagePullBurst)
kubeRuntimeManager.runner = lifecycle.NewHandlerRunner(httpClient, kubeRuntimeManager, kubeRuntimeManager)
kubeRuntimeManager.containerGC = newContainerGC(runtimeService, podStateProvider, kubeRuntimeManager)
kubeRuntimeManager.versionCache = cache.NewObjectCache(
func() (interface{}, error) {
return kubeRuntimeManager.getTypedVersion()
},
versionCacheTTL,
)
return kubeRuntimeManager, nil
}
// Type returns the type of the container runtime.
func (m *kubeGenericRuntimeManager) Type() string {
return m.runtimeName
}
func newRuntimeVersion(version string) (*utilversion.Version, error) {
if ver, err := utilversion.ParseSemantic(version); err == nil {
return ver, err
}
return utilversion.ParseGeneric(version)
}
func (m *kubeGenericRuntimeManager) getTypedVersion() (*runtimeapi.VersionResponse, error) {
typedVersion, err := m.runtimeService.Version(kubeRuntimeAPIVersion)
if err != nil {
klog.Errorf("Get remote runtime typed version failed: %v", err)
return nil, err
}
return typedVersion, nil
}
// Version returns the version information of the container runtime.
func (m *kubeGenericRuntimeManager) Version() (kubecontainer.Version, error) {
typedVersion, err := m.runtimeService.Version(kubeRuntimeAPIVersion)
if err != nil {
klog.Errorf("Get remote runtime version failed: %v", err)
return nil, err
}
return newRuntimeVersion(typedVersion.RuntimeVersion)
}
// APIVersion returns the cached API version information of the container
// runtime. Implementation is expected to update this cache periodically.
// This may be different from the runtime engine's version.
func (m *kubeGenericRuntimeManager) APIVersion() (kubecontainer.Version, error) {
versionObject, err := m.versionCache.Get(m.machineInfo.MachineID)
if err != nil {
return nil, err
}
typedVersion := versionObject.(*runtimeapi.VersionResponse)
return newRuntimeVersion(typedVersion.RuntimeApiVersion)
}
// Status returns the status of the runtime. An error is returned if the Status
// function itself fails, nil otherwise.
func (m *kubeGenericRuntimeManager) Status() (*kubecontainer.RuntimeStatus, error) {
status, err := m.runtimeService.Status()
if err != nil {
return nil, err
}
return toKubeRuntimeStatus(status), nil
}
// GetPods returns a list of containers grouped by pods. The boolean parameter
// specifies whether the runtime returns all containers including those already
// exited and dead containers (used for garbage collection).
func (m *kubeGenericRuntimeManager) GetPods(all bool) ([]*kubecontainer.Pod, error) {
pods := make(map[kubetypes.UID]*kubecontainer.Pod)
sandboxes, err := m.getKubeletSandboxes(all)
if err != nil {
return nil, err
}
for i := range sandboxes {
s := sandboxes[i]
if s.Metadata == nil {
klog.V(4).Infof("Sandbox does not have metadata: %+v", s)
continue
}
podUID := kubetypes.UID(s.Metadata.Uid)
if _, ok := pods[podUID]; !ok {
pods[podUID] = &kubecontainer.Pod{
ID: podUID,
Name: s.Metadata.Name,
Namespace: s.Metadata.Namespace,
}
}
p := pods[podUID]
converted, err := m.sandboxToKubeContainer(s)
if err != nil {
klog.V(4).Infof("Convert %q sandbox %v of pod %q failed: %v", m.runtimeName, s, podUID, err)
continue
}
p.Sandboxes = append(p.Sandboxes, converted)
}
containers, err := m.getKubeletContainers(all)
if err != nil {
return nil, err
}
for i := range containers {
c := containers[i]
if c.Metadata == nil {
klog.V(4).Infof("Container does not have metadata: %+v", c)
continue
}
labelledInfo := getContainerInfoFromLabels(c.Labels)
pod, found := pods[labelledInfo.PodUID]
if !found {
pod = &kubecontainer.Pod{
ID: labelledInfo.PodUID,
Name: labelledInfo.PodName,
Namespace: labelledInfo.PodNamespace,
}
pods[labelledInfo.PodUID] = pod
}
converted, err := m.toKubeContainer(c)
if err != nil {
klog.V(4).Infof("Convert %s container %v of pod %q failed: %v", m.runtimeName, c, labelledInfo.PodUID, err)
continue
}
pod.Containers = append(pod.Containers, converted)
}
// Convert map to list.
var result []*kubecontainer.Pod
for _, pod := range pods {
result = append(result, pod)
}
return result, nil
}
// containerToKillInfo contains necessary information to kill a container.
type containerToKillInfo struct {
// The spec of the container.
container *v1.Container
// The name of the container.
name string
// The message indicates why the container will be killed.
message string
}
// podActions keeps information what to do for a pod.
type podActions struct {
// Stop all running (regular and init) containers and the sandbox for the pod.
KillPod bool
// Whether need to create a new sandbox. If needed to kill pod and create a
// a new pod sandbox, all init containers need to be purged (i.e., removed).
CreateSandbox bool
// The id of existing sandbox. It is used for starting containers in ContainersToStart.
SandboxID string
// The attempt number of creating sandboxes for the pod.
Attempt uint32
// The next init container to start.
NextInitContainerToStart *v1.Container
// ContainersToStart keeps a list of indexes for the containers to start,
// where the index is the index of the specific container in the pod spec (
// pod.Spec.Containers.
ContainersToStart []int
// ContainersToKill keeps a map of containers that need to be killed, note that
// the key is the container ID of the container, while
// the value contains necessary information to kill a container.
ContainersToKill map[kubecontainer.ContainerID]containerToKillInfo
}
// podSandboxChanged checks whether the spec of the pod is changed and returns
// (changed, new attempt, original sandboxID if exist).
func (m *kubeGenericRuntimeManager) podSandboxChanged(pod *v1.Pod, podStatus *kubecontainer.PodStatus) (bool, uint32, string) {
if len(podStatus.SandboxStatuses) == 0 {
klog.V(2).Infof("No sandbox for pod %q can be found. Need to start a new one", format.Pod(pod))
return true, 0, ""
}
readySandboxCount := 0
for _, s := range podStatus.SandboxStatuses {
if s.State == runtimeapi.PodSandboxState_SANDBOX_READY {
readySandboxCount++
}
}
// Needs to create a new sandbox when readySandboxCount > 1 or the ready sandbox is not the latest one.
sandboxStatus := podStatus.SandboxStatuses[0]
if readySandboxCount > 1 {
klog.V(2).Infof("More than 1 sandboxes for pod %q are ready. Need to reconcile them", format.Pod(pod))
return true, sandboxStatus.Metadata.Attempt + 1, sandboxStatus.Id
}
if sandboxStatus.State != runtimeapi.PodSandboxState_SANDBOX_READY {
klog.V(2).Infof("No ready sandbox for pod %q can be found. Need to start a new one", format.Pod(pod))
return true, sandboxStatus.Metadata.Attempt + 1, sandboxStatus.Id
}
// Needs to create a new sandbox when network namespace changed.
if sandboxStatus.GetLinux().GetNamespaces().GetOptions().GetNetwork() != networkNamespaceForPod(pod) {
klog.V(2).Infof("Sandbox for pod %q has changed. Need to start a new one", format.Pod(pod))
return true, sandboxStatus.Metadata.Attempt + 1, ""
}
// Needs to create a new sandbox when the sandbox does not have an IP address.
if !kubecontainer.IsHostNetworkPod(pod) && sandboxStatus.Network.Ip == "" {
klog.V(2).Infof("Sandbox for pod %q has no IP address. Need to start a new one", format.Pod(pod))
return true, sandboxStatus.Metadata.Attempt + 1, sandboxStatus.Id
}
return false, sandboxStatus.Metadata.Attempt, sandboxStatus.Id
}
func containerChanged(container *v1.Container, containerStatus *kubecontainer.ContainerStatus) (uint64, uint64, bool) {
expectedHash := kubecontainer.HashContainer(container)
return expectedHash, containerStatus.Hash, containerStatus.Hash != expectedHash
}
func shouldRestartOnFailure(pod *v1.Pod) bool
|
func containerSucceeded(c *v1.Container, podStatus *kubecontainer.PodStatus) bool {
cStatus := podStatus.FindContainerStatusByName(c.Name)
if cStatus == nil || cStatus.State == kubecontainer.ContainerStateRunning {
return false
}
return cStatus.ExitCode == 0
}
// computePodActions checks whether the pod spec has changed and returns the changes if true.
func (m *kubeGenericRuntimeManager) computePodActions(pod *v1.Pod, podStatus *kubecontainer.PodStatus) podActions {
klog.V(5).Infof("Syncing Pod %q: %+v", format.Pod(pod), pod)
createPodSandbox, attempt, sandboxID := m.podSandboxChanged(pod, podStatus)
changes := podActions{
KillPod: createPodSandbox,
CreateSandbox: createPodSandbox,
SandboxID: sandboxID,
Attempt: attempt,
ContainersToStart: []int{},
ContainersToKill: make(map[kubecontainer.ContainerID]containerToKillInfo),
}
// If we need to (re-)create the pod sandbox, everything will need to be
// killed and recreated, and init containers should be purged.
if createPodSandbox {
if !shouldRestartOnFailure(pod) && attempt != 0 {
// Should not restart the pod, just return.
// we should not create a sandbox for a pod if it is already done.
// if all containers are done and should not be started, there is no need to create a new sandbox.
// this stops confusing logs on pods whose containers all have exit codes, but we recreate a sandbox before terminating it.
changes.CreateSandbox = false
return changes
}
if len(pod.Spec.InitContainers) != 0 {
// Pod has init containers, return the first one.
changes.NextInitContainerToStart = &pod.Spec.InitContainers[0]
return changes
}
// Start all containers by default but exclude the ones that succeeded if
// RestartPolicy is OnFailure.
for idx, c := range pod.Spec.Containers {
if containerSucceeded(&c, podStatus) && pod.Spec.RestartPolicy == v1.RestartPolicyOnFailure {
continue
}
changes.ContainersToStart = append(changes.ContainersToStart, idx)
}
return changes
}
// Check initialization progress.
initLastStatus, next, done := findNextInitContainerToRun(pod, podStatus)
if !done {
if next != nil {
initFailed := initLastStatus != nil && isContainerFailed(initLastStatus)
if initFailed && !shouldRestartOnFailure(pod) {
changes.KillPod = true
} else {
changes.NextInitContainerToStart = next
}
}
// Initialization failed or still in progress. Skip inspecting non-init
// containers.
return changes
}
// Number of running containers to keep.
keepCount := 0
// check the status of containers.
for idx, container := range pod.Spec.Containers {
containerStatus := podStatus.FindContainerStatusByName(container.Name)
// Call internal container post-stop lifecycle hook for any non-running container so that any
// allocated cpus are released immediately. If the container is restarted, cpus will be re-allocated
// to it.
if containerStatus != nil && containerStatus.State != kubecontainer.ContainerStateRunning {
if err := m.internalLifecycle.PostStopContainer(containerStatus.ID.ID); err != nil {
klog.Errorf("internal container post-stop lifecycle hook failed for container %v in pod %v with error %v",
container.Name, pod.Name, err)
}
}
// If container does not exist, or is not running, check whether we
// need to restart it.
if containerStatus == nil || containerStatus.State != kubecontainer.ContainerStateRunning {
if kubecontainer.ShouldContainerBeRestarted(&container, pod, podStatus) {
message := fmt.Sprintf("Container %+v is dead, but RestartPolicy says that we should restart it.", container)
klog.V(3).Infof(message)
changes.ContainersToStart = append(changes.ContainersToStart, idx)
}
continue
}
// The container is running, but kill the container if any of the following condition is met.
var message string
restart := shouldRestartOnFailure(pod)
if _, _, changed := containerChanged(&container, containerStatus); changed {
message = fmt.Sprintf("Container %s definition changed", container.Name)
// Restart regardless of the restart policy because the container
// spec changed.
restart = true
} else if liveness, found := m.livenessManager.Get(containerStatus.ID); found && liveness == proberesults.Failure {
// If the container failed the liveness probe, we should kill it.
message = fmt.Sprintf("Container %s failed liveness probe", container.Name)
} else {
// Keep the container.
keepCount++
continue
}
// We need to kill the container, but if we also want to restart the
// container afterwards, make the intent clear in the message. Also do
// not kill the entire pod since we expect container to be running eventually.
if restart {
message = fmt.Sprintf("%s, will be restarted", message)
changes.ContainersToStart = append(changes.ContainersToStart, idx)
}
changes.ContainersToKill[containerStatus.ID] = containerToKillInfo{
name: containerStatus.Name,
container: &pod.Spec.Containers[idx],
message: message,
}
klog.V(2).Infof("Container %q (%q) of pod %s: %s", container.Name, containerStatus.ID, format.Pod(pod), message)
}
if keepCount == 0 && len(changes.ContainersToStart) == 0 {
changes.KillPod = true
}
return changes
}
// SyncPod syncs the running pod into the desired pod by executing following steps:
//
// 1. Compute sandbox and container changes.
// 2. Kill pod sandbox if necessary.
// 3. Kill any containers that should not be running.
// 4. Create sandbox if necessary.
// 5. Create init containers.
// 6. Create normal containers.
func (m *kubeGenericRuntimeManager) SyncPod(pod *v1.Pod, podStatus *kubecontainer.PodStatus, pullSecrets []v1.Secret, backOff *flowcontrol.Backoff) (result kubecontainer.PodSyncResult) {
// Step 1: Compute sandbox and container changes.
podContainerChanges := m.computePodActions(pod, podStatus)
klog.V(3).Infof("computePodActions got %+v for pod %q", podContainerChanges, format.Pod(pod))
if podContainerChanges.CreateSandbox {
ref, err := ref.GetReference(legacyscheme.Scheme, pod)
if err != nil {
klog.Errorf("Couldn't make a ref to pod %q: '%v'", format.Pod(pod), err)
}
if podContainerChanges.SandboxID != "" {
m.recorder.Eventf(ref, v1.EventTypeNormal, events.SandboxChanged, "Pod sandbox changed, it will be killed and re-created.")
} else {
klog.V(4).Infof("SyncPod received new pod %q, will create a sandbox for it", format.Pod(pod))
}
}
// Step 2: Kill the pod if the sandbox has changed.
if podContainerChanges.KillPod {
if podContainerChanges.CreateSandbox {
klog.V(4).Infof("Stopping PodSandbox for %q, will start new one", format.Pod(pod))
} else {
klog.V(4).Infof("Stopping PodSandbox for %q because all other containers are dead.", format.Pod(pod))
}
killResult := m.killPodWithSyncResult(pod, kubecontainer.ConvertPodStatusToRunningPod(m.runtimeName, podStatus), nil)
result.AddPodSyncResult(killResult)
if killResult.Error() != nil {
klog.Errorf("killPodWithSyncResult failed: %v", killResult.Error())
return
}
if podContainerChanges.CreateSandbox {
m.purgeInitContainers(pod, podStatus)
}
} else {
// Step 3: kill any running containers in this pod which are not to keep.
for containerID, containerInfo := range podContainerChanges.ContainersToKill {
klog.V(3).Infof("Killing unwanted container %q(id=%q) for pod %q", containerInfo.name, containerID, format.Pod(pod))
killContainerResult := kubecontainer.NewSyncResult(kubecontainer.KillContainer, containerInfo.name)
result.AddSyncResult(killContainerResult)
if err := m.killContainer(pod, containerID, containerInfo.name, containerInfo.message, nil); err != nil {
killContainerResult.Fail(kubecontainer.ErrKillContainer, err.Error())
klog.Errorf("killContainer %q(id=%q) for pod %q failed: %v", containerInfo.name, containerID, format.Pod(pod), err)
return
}
}
}
// Keep terminated init containers fairly aggressively controlled
// This is an optimization because container removals are typically handled
// by container garbage collector.
m.pruneInitContainersBeforeStart(pod, podStatus)
// We pass the value of the podIP down to generatePodSandboxConfig and
// generateContainerConfig, which in turn passes it to various other
// functions, in order to facilitate functionality that requires this
// value (hosts file and downward API) and avoid races determining
// the pod IP in cases where a container requires restart but the
// podIP isn't in the status manager yet.
//
// We default to the IP in the passed-in pod status, and overwrite it if the
// sandbox needs to be (re)started.
podIP := ""
if podStatus != nil {
podIP = podStatus.IP
}
// Step 4: Create a sandbox for the pod if necessary.
podSandboxID := podContainerChanges.SandboxID
if podContainerChanges.CreateSandbox {
var msg string
var err error
klog.V(4).Infof("Creating sandbox for pod %q", format.Pod(pod))
createSandboxResult := kubecontainer.NewSyncResult(kubecontainer.CreatePodSandbox, format.Pod(pod))
result.AddSyncResult(createSandboxResult)
podSandboxID, msg, err = m.createPodSandbox(pod, podContainerChanges.Attempt)
if err != nil {
createSandboxResult.Fail(kubecontainer.ErrCreatePodSandbox, msg)
klog.Errorf("createPodSandbox for pod %q failed: %v", format.Pod(pod), err)
ref, referr := ref.GetReference(legacyscheme.Scheme, pod)
if referr != nil {
klog.Errorf("Couldn't make a ref to pod %q: '%v'", format.Pod(pod), referr)
}
m.recorder.Eventf(ref, v1.EventTypeWarning, events.FailedCreatePodSandBox, "Failed create pod sandbox: %v", err)
return
}
klog.V(4).Infof("Created PodSandbox %q for pod %q", podSandboxID, format.Pod(pod))
podSandboxStatus, err := m.runtimeService.PodSandboxStatus(podSandboxID)
if err != nil {
ref, referr := ref.GetReference(legacyscheme.Scheme, pod)
if referr != nil {
klog.Errorf("Couldn't make a ref to pod %q: '%v'", format.Pod(pod), referr)
}
m.recorder.Eventf(ref, v1.EventTypeWarning, events.FailedStatusPodSandBox, "Unable to get pod sandbox status: %v", err)
klog.Errorf("Failed to get pod sandbox status: %v; Skipping pod %q", err, format.Pod(pod))
result.Fail(err)
return
}
// If we ever allow updating a pod from non-host-network to
// host-network, we may use a stale IP.
if !kubecontainer.IsHostNetworkPod(pod) {
// Overwrite the podIP passed in the pod status, since we just started the pod sandbox.
podIP = m.determinePodSandboxIP(pod.Namespace, pod.Name, podSandboxStatus)
klog.V(4).Infof("Determined the ip %q for pod %q after sandbox changed", podIP, format.Pod(pod))
}
}
// Get podSandboxConfig for containers to start.
configPodSandboxResult := kubecontainer.NewSyncResult(kubecontainer.ConfigPodSandbox, podSandboxID)
result.AddSyncResult(configPodSandboxResult)
podSandboxConfig, err := m.generatePodSandboxConfig(pod, podContainerChanges.Attempt)
if err != nil {
message := fmt.Sprintf("GeneratePodSandboxConfig for pod %q failed: %v", format.Pod(pod), err)
klog.Error(message)
configPodSandboxResult.Fail(kubecontainer.ErrConfigPodSandbox, message)
return
}
// Step 5: start the init container.
if container := podContainerChanges.NextInitContainerToStart; container != nil {
// Start the next init container.
startContainerResult := kubecontainer.NewSyncResult(kubecontainer.StartContainer, container.Name)
result.AddSyncResult(startContainerResult)
isInBackOff, msg, err := m.doBackOff(pod, container, podStatus, backOff)
if isInBackOff {
startContainerResult.Fail(err, msg)
klog.V(4).Infof("Backing Off restarting init container %+v in pod %v", container, format.Pod(pod))
return
}
klog.V(4).Infof("Creating init container %+v in pod %v", container, format.Pod(pod))
if msg, err := m.startContainer(podSandboxID, podSandboxConfig, container, pod, podStatus, pullSecrets, podIP); err != nil {
startContainerResult.Fail(err, msg)
utilruntime.HandleError(fmt.Errorf("init container start failed: %v: %s", err, msg))
return
}
// Successfully started the container; clear the entry in the failure
klog.V(4).Infof("Completed init container %q for pod %q", container.Name, format.Pod(pod))
}
// Step 6: start containers in podContainerChanges.ContainersToStart.
for _, idx := range podContainerChanges.ContainersToStart {
container := &pod.Spec.Containers[idx]
startContainerResult := kubecontainer.NewSyncResult(kubecontainer.StartContainer, container.Name)
result.AddSyncResult(startContainerResult)
isInBackOff, msg, err := m.doBackOff(pod, container, podStatus, backOff)
if isInBackOff {
startContainerResult.Fail(err, msg)
klog.V(4).Infof("Backing Off restarting container %+v in pod %v", container, format.Pod(pod))
continue
}
klog.V(4).Infof("Creating container %+v in pod %v", container, format.Pod(pod))
if msg, err := m.startContainer(podSandboxID, podSandboxConfig, container, pod, podStatus, pullSecrets, podIP); err != nil {
startContainerResult.Fail(err, msg)
// known errors that are logged in other places are logged at higher levels here to avoid
// repetitive log spam
switch {
case err == images.ErrImagePullBackOff:
klog.V(3).Infof("container start failed: %v: %s", err, msg)
default:
utilruntime.HandleError(fmt.Errorf("container start failed: %v: %s", err, msg))
}
continue
}
}
return
}
// If a container is still in backoff, the function will return a brief backoff error and
// a detailed error message.
func (m *kubeGenericRuntimeManager) doBackOff(pod *v1.Pod, container *v1.Container, podStatus *kubecontainer.PodStatus, backOff *flowcontrol.Backoff) (bool, string, error) {
var cStatus *kubecontainer.ContainerStatus
for _, c := range podStatus.ContainerStatuses {
if c.Name == container.Name && c.State == kubecontainer.ContainerStateExited {
cStatus = c
break
}
}
if cStatus == nil {
return false, "", nil
}
klog.V(3).Infof("checking backoff for container %q in pod %q", container.Name, format.Pod(pod))
// Use the finished time of the latest exited container as the start point to calculate whether to do back-off.
ts := cStatus.FinishedAt
// backOff requires a unique key to identify the container.
key := getStableKey(pod, container)
if backOff.IsInBackOffSince(key, ts) {
if ref, err := kubecontainer.GenerateContainerRef(pod, container); err == nil {
m.recorder.Eventf(ref, v1.EventTypeWarning, events.BackOffStartContainer, "Back-off restarting failed container")
}
err := fmt.Errorf("Back-off %s restarting failed container=%s pod=%s", backOff.Get(key), container.Name, format.Pod(pod))
klog.V(3).Infof("%s", err.Error())
return true, err.Error(), kubecontainer.ErrCrashLoopBackOff
}
backOff.Next(key, ts)
return false, "", nil
}
// KillPod kills all the containers of a pod. Pod may be nil, running pod must not be.
// gracePeriodOverride if specified allows the caller to override the pod default grace period.
// only hard kill paths are allowed to specify a gracePeriodOverride in the kubelet in order to not corrupt user data.
// it is useful when doing SIGKILL for hard eviction scenarios, or max grace period during soft eviction scenarios.
func (m *kubeGenericRuntimeManager) KillPod(pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) error {
err := m.killPodWithSyncResult(pod, runningPod, gracePeriodOverride)
return err.Error()
}
// killPodWithSyncResult kills a runningPod and returns SyncResult.
// Note: The pod passed in could be *nil* when kubelet restarted.
func (m *kubeGenericRuntimeManager) killPodWithSyncResult(pod *v1.Pod, runningPod kubecontainer.Pod, gracePeriodOverride *int64) (result kubecontainer.PodSyncResult) {
killContainerResults := m.killContainersWithSyncResult(pod, runningPod, gracePeriodOverride)
for _, containerResult := range killContainerResults {
result.AddSyncResult(containerResult)
}
// stop sandbox, the sandbox will be removed in GarbageCollect
killSandboxResult := kubecontainer.NewSyncResult(kubecontainer.KillPodSandbox, runningPod.ID)
result.AddSyncResult(killSandboxResult)
// Stop all sandboxes belongs to same pod
for _, podSandbox := range runningPod.Sandboxes {
if err := m.runtimeService.StopPodSandbox(podSandbox.ID.ID); err != nil {
killSandboxResult.Fail(kubecontainer.ErrKillPodSandbox, err.Error())
klog.Errorf("Failed to stop sandbox %q", podSandbox.ID)
}
}
return
}
func (m *kubeGenericRuntimeManager) cleanupErrorTimeouts() {
m.errorMapLock.Lock()
defer m.errorMapLock.Unlock()
for name, timeout := range m.errorPrinted {
if time.Now().Sub(timeout) >= identicalErrorDelay {
delete(m.errorPrinted, name)
delete(m.lastError, name)
}
}
}
// GetPodStatus retrieves the status of the pod, including the
// information of all containers in the pod that are visible in Runtime.
func (m *kubeGenericRuntimeManager) GetPodStatus(uid kubetypes.UID, name, namespace string) (*kubecontainer.PodStatus, error) {
// Now we retain restart count of container as a container label. Each time a container
// restarts, pod will read the restart count from the registered dead container, increment
// it to get the new restart count, and then add a label with the new restart count on
// the newly started container.
// However, there are some limitations of this method:
// 1. When all dead containers were garbage collected, the container status could
// not get the historical value and would be *inaccurate*. Fortunately, the chance
// is really slim.
// 2. When working with old version containers which have no restart count label,
// we can only assume their restart count is 0.
// Anyhow, we only promised "best-effort" restart count reporting, we can just ignore
// these limitations now.
// TODO: move this comment to SyncPod.
podSandboxIDs, err := m.getSandboxIDByPodUID(uid, nil)
if err != nil {
return nil, err
}
podFullName := format.Pod(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: namespace,
UID: uid,
},
})
klog.V(4).Infof("getSandboxIDByPodUID got sandbox IDs %q for pod %q", podSandboxIDs, podFullName)
sandboxStatuses := make([]*runtimeapi.PodSandboxStatus, len(podSandboxIDs))
podIP := ""
for idx, podSandboxID := range podSandboxIDs {
podSandboxStatus, err := m.runtimeService.PodSandboxStatus(podSandboxID)
if err != nil {
klog.Errorf("PodSandboxStatus of sandbox %q for pod %q error: %v", podSandboxID, podFullName, err)
return nil, err
}
sandboxStatuses[idx] = podSandboxStatus
// Only get pod IP from latest sandbox
if idx == 0 && podSandboxStatus.State == runtimeapi.PodSandboxState_SANDBOX_READY {
podIP = m.determinePodSandboxIP(namespace, name, podSandboxStatus)
}
}
// Get statuses of all containers visible in the pod.
containerStatuses, err := m.getPodContainerStatuses(uid, name, namespace)
m.errorMapLock.Lock()
defer m.errorMapLock.Unlock()
if err != nil {
lastMsg, ok := m.lastError[podFullName]
if !ok || err.Error() != lastMsg || time.Now().Sub(m.errorPrinted[podFullName]) >= identicalErrorDelay {
klog.Errorf("getPodContainerStatuses for pod %q failed: %v", podFullName, err)
m.errorPrinted[podFullName] = time.Now()
m.lastError[podFullName] = err.Error()
}
return nil, err
}
delete(m.errorPrinted, podFullName)
delete(m.lastError, podFullName)
return &kubecontainer.PodStatus{
ID: uid,
Name: name,
Namespace: namespace,
IP: podIP,
SandboxStatuses: sandboxStatuses,
ContainerStatuses: containerStatuses,
}, nil
}
// GarbageCollect removes dead containers using the specified container gc policy.
func (m *kubeGenericRuntimeManager) GarbageCollect(gcPolicy kubecontainer.ContainerGCPolicy, allSourcesReady bool, evictNonDeletedPods bool) error {
return m.containerGC.GarbageCollect(gcPolicy, allSourcesReady, evictNonDeletedPods)
}
// UpdatePodCIDR is just a passthrough method to update the runtimeConfig of the shim
// with the podCIDR supplied by the kubelet.
func (m *kubeGenericRuntimeManager) UpdatePodCIDR(podCIDR string) error {
// TODO(#35531): do we really want to write a method on this manager for each
// field of the config?
klog.Infof("updating runtime config through cri with podcidr %v", podCIDR)
return m.runtimeService.UpdateRuntimeConfig(
&runtimeapi.RuntimeConfig{
NetworkConfig: &runtimeapi.NetworkConfig{
PodCidr: podCIDR,
},
})
}
|
{
return pod.Spec.RestartPolicy != v1.RestartPolicyNever
}
|
cell.ts
|
import { extend, Property, ChildProperty, Complex } from '@syncfusion/ej2-base';
import { SheetModel } from './index';
import { CellStyleModel, HyperlinkModel, CellStyle, wrapEvent, ValidationModel } from '../common/index';
import { getRow } from './row';
import { RowModel } from './row-model';
import { CellModel } from './cell-model';
import { Workbook } from './workbook';
import { getSheet } from './sheet';
/**
* Represents the cell.
*/
export class Cell extends ChildProperty<RowModel> {
/**
* Defines the value of the cell which can be text or number.
* @default ''
*/
@Property('')
public value: string;
/**
* Defines the formula or expression of the cell.
* @default ''
*/
@Property('')
public formula: string;
/**
* Specifies the index of the cell.
* @default 0
* @asptype int
*/
@Property(0)
public index: number;
/**
* Specifies the number format code to display value in specified number format.
* @default 'General'
*/
@Property('General')
public format: string;
/**
* Specifies the cell style options.
* ```html
* <div id='Spreadsheet'></div>
* ```
* ```typescript
* let spreadsheet: Spreadsheet = new Spreadsheet({
* sheets: [{
* ...
* rows: [{
* cells: [{ value: '12', index: 2, style: { fontWeight: 'bold', fontSize: 12, fontStyle: 'italic', textIndent: '2pt'
* backgroundColor: '#4b5366', color: '#ffffff' } }]
* }]
* }]
* });
* spreadsheet.appendTo('#Spreadsheet');
* ```
* @default {}
*/
@Complex<CellStyleModel>({}, CellStyle)
public style: CellStyleModel;
|
* @default ''
*/
@Property('')
public hyperlink: string | HyperlinkModel;
/**
* Wraps the cell text to the next line, if the text width exceeds the column width.
* @default false
*/
@Property(false)
public wrap: boolean;
/**
* Specifies the cell is locked or not, for allow edit range in spreadsheet protect option.
* @default true
*/
@Property(true)
public isLocked: boolean;
/**
* Specifies the validation of the cell.
* @default ''
*/
@Property('')
public validation: ValidationModel;
/**
* Specifies the column-wise cell merge count.
* @default 1
* @asptype int
*/
@Property(1)
public colSpan: number;
/**
* Specifies the row-wise cell merge count.
* @default 1
* @asptype int
*/
@Property(1)
public rowSpan: number;
}
/**
* @hidden
*/
export function getCell(rowIndex: number, colIndex: number, sheet: SheetModel, isInitRow?: boolean): CellModel {
let row: RowModel = getRow(sheet, rowIndex);
if (!row || !row.cells) {
if (isInitRow) {
if (!row) {
sheet.rows[rowIndex] = { cells: [] };
} else {
sheet.rows[rowIndex].cells = [];
}
} else {
return null;
}
}
return sheet.rows[rowIndex].cells[colIndex] || null;
}
/**
* @hidden
*/
export function setCell(rowIndex: number, colIndex: number, sheet: SheetModel, cell: CellModel, isExtend?: boolean): void {
if (!sheet.rows[rowIndex]) {
sheet.rows[rowIndex] = { cells: [] };
} else if (!sheet.rows[rowIndex].cells) {
sheet.rows[rowIndex].cells = [];
}
if (isExtend && sheet.rows[rowIndex].cells[colIndex]) {
extend(sheet.rows[rowIndex].cells[colIndex], cell, null, true);
} else {
sheet.rows[rowIndex].cells[colIndex] = cell;
}
}
/** @hidden */
export function skipDefaultValue(style: CellStyleModel, defaultKey?: boolean): CellStyleModel {
let defaultProps: CellStyleModel = { fontFamily: 'Calibri', verticalAlign: 'bottom', textIndent: '0pt', backgroundColor: '#ffffff',
color: '#000000', textAlign: 'left', fontSize: '11pt', fontWeight: 'normal', fontStyle: 'normal', textDecoration: 'none',
border: '', borderLeft: '', borderTop: '', borderRight: '', borderBottom: '' };
let changedProps: CellStyleModel = {};
Object.keys(defaultKey ? defaultProps : style).forEach((propName: string): void => {
if (style[propName] !== defaultProps[propName]) {
changedProps[propName] = style[propName];
}
});
return changedProps;
}
/** @hidden */
export function wrap(address: string, wrap: boolean = true, context?: Workbook): void {
let addressInfo: { sheetIndex: number, indices: number[] } = context.getAddressInfo(address);
let rng: number[] = addressInfo.indices;
let sheet: SheetModel = getSheet(context, addressInfo.sheetIndex);
for (let i: number = rng[0]; i <= rng[2]; i++) {
for (let j: number = rng[1]; j <= rng[3]; j++) {
setCell(i, j, sheet, { wrap: wrap }, true);
}
}
context.setProperties({ sheets: context.sheets }, true);
context.notify(wrapEvent, { range: rng, wrap: wrap, sheet: sheet });
}
|
/**
* Specifies the hyperlink of the cell.
|
sdr.go
|
/*
Copyright (c) 2015-2016 Christopher Young
Distributable under the terms of The "BSD New" License
that can be found in the LICENSE file, herein included
as part of this header.
sdr.go: SDR monitoring, SDR management, data input from UAT/1090ES channels.
*/
package main
import (
"bufio"
"log"
"os/exec"
"regexp"
"strconv"
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
"../godump978"
rtl "github.com/jpoirier/gortlsdr"
)
// Device holds per dongle values and attributes
type Device struct {
dev *rtl.Context
wg *sync.WaitGroup
closeCh chan int
indexID int
ppm int
serial string
idSet bool
}
// UAT is a 978 MHz device
type UAT Device
// ES is a 1090 MHz device
type ES Device
// OGN is an 868 MHz device
type OGN Device
// UATDev holds a 978 MHz dongle object
var UATDev *UAT
// ESDev holds a 1090 MHz dongle object
var ESDev *ES
// OGNDev holds an 868 MHz dongle object
var OGNDev *OGN
type Dump1090TermMessage struct {
Text string
Source string
}
func (e *ES) read() {
defer e.wg.Done()
log.Println("Entered ES read() ...")
cmd := exec.Command("/usr/bin/dump1090", "--oversample", "--net-stratux-port", "30006", "--net", "--device-index", strconv.Itoa(e.indexID), "--ppm", strconv.Itoa(e.ppm))
log.Printf("Running command:", "/usr/bin/dump1090", "--oversample", "--net-stratux-port", "30006", "--net", "--device-index", strconv.Itoa(e.indexID), "--ppm", strconv.Itoa(e.ppm))
stdout, _ := cmd.StdoutPipe()
stderr, _ := cmd.StderrPipe()
err := cmd.Start()
if err != nil {
log.Printf("Error executing /usr/bin/dump1090: %s\n", err)
// don't return immediately, use the proper shutdown procedure
shutdownES = true
for {
select {
case <-e.closeCh:
return
default:
time.Sleep(1 * time.Second)
}
}
}
log.Println("Executed /usr/bin/dump1090 successfully...")
done := make(chan bool)
go func() {
for {
select {
case <-done:
return
case <-e.closeCh:
log.Println("ES read(): shutdown msg received, calling cmd.Process.Kill() ...")
err := cmd.Process.Kill()
if err == nil {
log.Println("kill successful...")
}
return
default:
time.Sleep(1 * time.Second)
}
}
}()
stdoutBuf := make([]byte, 1024)
stderrBuf := make([]byte, 1024)
go func() {
for {
select {
case <-done:
return
default:
n, err := stdout.Read(stdoutBuf)
if err == nil && n > 0 {
m := Dump1090TermMessage{Text: string(stdoutBuf[:n]), Source: "stdout"}
logDump1090TermMessage(m)
}
}
}
}()
go func() {
for {
select {
case <-done:
return
default:
n, err := stderr.Read(stderrBuf)
if err == nil && n > 0 {
m := Dump1090TermMessage{Text: string(stderrBuf[:n]), Source: "stderr"}
logDump1090TermMessage(m)
}
}
}
}()
cmd.Wait()
// we get here if A) the dump1090 process died
// on its own or B) cmd.Process.Kill() was called
// from within the goroutine, either way close
// the "done" channel, which ensures we don't leak
// goroutines...
close(done)
}
func (u *UAT) read() {
defer u.wg.Done()
log.Println("Entered UAT read() ...")
var buffer = make([]uint8, rtl.DefaultBufLength)
for {
select {
default:
nRead, err := u.dev.ReadSync(buffer, rtl.DefaultBufLength)
if err != nil {
if globalSettings.DEBUG {
log.Printf("\tReadSync Failed - error: %s\n", err)
}
if shutdownUAT != true {
shutdownUAT = true
}
break
}
if nRead > 0 {
buf := buffer[:nRead]
godump978.InChan <- buf
}
case <-u.closeCh:
log.Println("UAT read(): shutdown msg received...")
return
}
}
}
func (f *OGN) read() {
defer f.wg.Done()
log.Println("Entered OGN read() ...")
cmd := exec.Command("/usr/bin/ogn-rx-eu", "-d", strconv.Itoa(f.indexID), "-p", strconv.Itoa(f.ppm), "-L/var/log/")
stdout, _ := cmd.StdoutPipe()
stderr, _ := cmd.StderrPipe()
autoRestart := true // automatically restart crashing child process
err := cmd.Start()
if err != nil {
log.Printf("OGN: Error executing ogn-rx-eu: %s\n", err)
// don't return immediately, use the proper shutdown procedure
shutdownOGN = true
for {
select {
case <-f.closeCh:
return
default:
time.Sleep(1 * time.Second)
}
}
}
log.Println("OGN: Executed ogn-rx-eu successfully...")
done := make(chan bool)
go func() {
for {
select {
case <-done:
return
case <-f.closeCh:
log.Println("OGN read(): shutdown msg received, calling cmd.Process.Kill() ...")
autoRestart = false
err := cmd.Process.Kill()
if err == nil {
log.Println("kill successful...")
}
return
default:
time.Sleep(1 * time.Second)
}
}
}()
go func() {
reader := bufio.NewReader(stdout)
for {
select {
case <-done:
return
default:
line, err := reader.ReadString('\n')
line = strings.TrimSpace(line)
if err == nil && len(line) > 0 /* && globalSettings.DEBUG */ {
log.Println("OGN: ogn-rx-eu stdout: ", line)
}
}
}
}()
go func() {
reader := bufio.NewReader(stderr)
for {
select {
case <-done:
return
default:
line, err := reader.ReadString('\n')
if err == nil {
log.Println("OGN: ogn-rx-eu stderr: ", strings.TrimSpace(line))
}
}
}
}()
cmd.Wait()
log.Println("OGN: ogn-rx-eu terminated...")
// we get here if A) the ogn-rx-eu process died
// on its own or B) cmd.Process.Kill() was called
// from within the goroutine, either way close
// the "done" channel, which ensures we don't leak
// goroutines...
close(done)
if autoRestart && !shutdownOGN{
time.Sleep(5 * time.Second)
log.Println("OGN: restarting crashed ogn-rx-eu")
f.wg.Add(1)
go f.read()
}
}
func getPPM(serial string) int {
r, err := regexp.Compile("str?a?t?u?x:\\d+:?(-?\\d*)")
if err != nil {
return globalSettings.PPM
}
arr := r.FindStringSubmatch(serial)
if arr == nil {
return globalSettings.PPM
}
ppm, err := strconv.Atoi(arr[1])
if err != nil {
return globalSettings.PPM
}
return ppm
}
func (e *ES) sdrConfig() (err error) {
e.ppm = getPPM(e.serial)
log.Printf("===== ES Device Serial: %s PPM %d =====\n", e.serial, e.ppm)
return
}
func (f *OGN) sdrConfig() (err error) {
f.ppm = getPPM(f.serial)
log.Printf("===== OGN Device Serial: %s PPM %d =====\n", f.serial, f.ppm)
return
}
// 978 UAT configuration settings
const (
TunerGain = 480
SampleRate = 2083334
NewRTLFreq = 28800000
NewTunerFreq = 28800000
CenterFreq = 978000000
Bandwidth = 1000000
)
func (u *UAT) sdrConfig() (err error) {
log.Printf("===== UAT Device Name : %s =====\n", rtl.GetDeviceName(u.indexID))
log.Printf("===== UAT Device Serial: %s=====\n", u.serial)
if u.dev, err = rtl.Open(u.indexID); err != nil {
log.Printf("\tUAT Open Failed...\n")
return
}
log.Printf("\tGetTunerType: %s\n", u.dev.GetTunerType())
//---------- Set Tuner Gain ----------
err = u.dev.SetTunerGainMode(true)
if err != nil {
u.dev.Close()
log.Printf("\tSetTunerGainMode Failed - error: %s\n", err)
return
}
log.Printf("\tSetTunerGainMode Successful\n")
err = u.dev.SetTunerGain(TunerGain)
if err != nil {
u.dev.Close()
log.Printf("\tSetTunerGain Failed - error: %s\n", err)
return
}
log.Printf("\tSetTunerGain Successful\n")
tgain := u.dev.GetTunerGain()
log.Printf("\tGetTunerGain: %d\n", tgain)
//---------- Get/Set Sample Rate ----------
err = u.dev.SetSampleRate(SampleRate)
if err != nil {
u.dev.Close()
log.Printf("\tSetSampleRate Failed - error: %s\n", err)
return
}
log.Printf("\tSetSampleRate - rate: %d\n", SampleRate)
log.Printf("\tGetSampleRate: %d\n", u.dev.GetSampleRate())
//---------- Get/Set Xtal Freq ----------
rtlFreq, tunerFreq, err := u.dev.GetXtalFreq()
if err != nil {
u.dev.Close()
log.Printf("\tGetXtalFreq Failed - error: %s\n", err)
return
}
log.Printf("\tGetXtalFreq - Rtl: %d, Tuner: %d\n", rtlFreq, tunerFreq)
err = u.dev.SetXtalFreq(NewRTLFreq, NewTunerFreq)
if err != nil {
u.dev.Close()
log.Printf("\tSetXtalFreq Failed - error: %s\n", err)
return
}
log.Printf("\tSetXtalFreq - Center freq: %d, Tuner freq: %d\n",
NewRTLFreq, NewTunerFreq)
//---------- Get/Set Center Freq ----------
err = u.dev.SetCenterFreq(CenterFreq)
if err != nil {
u.dev.Close()
log.Printf("\tSetCenterFreq 978MHz Failed, error: %s\n", err)
return
}
log.Printf("\tSetCenterFreq 978MHz Successful\n")
log.Printf("\tGetCenterFreq: %d\n", u.dev.GetCenterFreq())
//---------- Set Bandwidth ----------
log.Printf("\tSetting Bandwidth: %d\n", Bandwidth)
if err = u.dev.SetTunerBw(Bandwidth); err != nil {
u.dev.Close()
log.Printf("\tSetTunerBw %d Failed, error: %s\n", Bandwidth, err)
return
}
log.Printf("\tSetTunerBw %d Successful\n", Bandwidth)
if err = u.dev.ResetBuffer(); err != nil {
u.dev.Close()
log.Printf("\tResetBuffer Failed - error: %s\n", err)
return
}
log.Printf("\tResetBuffer Successful\n")
//---------- Get/Set Freq Correction ----------
freqCorr := u.dev.GetFreqCorrection()
log.Printf("\tGetFreqCorrection: %d\n", freqCorr)
u.ppm = getPPM(u.serial)
err = u.dev.SetFreqCorrection(u.ppm)
if err != nil {
u.dev.Close()
log.Printf("\tSetFreqCorrection %d Failed, error: %s\n", u.ppm, err)
return
}
log.Printf("\tSetFreqCorrection %d Successful\n", u.ppm)
return
}
// Read from the godump978 channel - on or off.
func uatReader() {
log.Println("Entered uatReader() ...")
for {
uat := <-godump978.OutChan
o, msgtype := parseInput(uat)
if o != nil && msgtype != 0 {
relayMessage(msgtype, o)
}
}
}
func (u *UAT) writeID() error {
info, err := u.dev.GetHwInfo()
if err != nil {
return err
}
info.Serial = "stratux:978"
return u.dev.SetHwInfo(info)
}
func (e *ES) writeID() error {
info, err := e.dev.GetHwInfo()
if err != nil {
return err
}
info.Serial = "stratux:1090"
return e.dev.SetHwInfo(info)
}
func (f *OGN) writeID() error {
info, err := f.dev.GetHwInfo()
if err != nil {
return err
}
info.Serial = "stratux:868"
return f.dev.SetHwInfo(info)
}
func (u *UAT) shutdown() {
log.Println("Entered UAT shutdown() ...")
close(u.closeCh) // signal to shutdown
log.Println("UAT shutdown(): calling u.wg.Wait() ...")
u.wg.Wait() // Wait for the goroutine to shutdown
log.Println("UAT shutdown(): u.wg.Wait() returned...")
log.Println("UAT shutdown(): closing device ...")
u.dev.Close() // preempt the blocking ReadSync call
log.Println("UAT shutdown() complete ...")
}
func (e *ES) shutdown() {
log.Println("Entered ES shutdown() ...")
close(e.closeCh) // signal to shutdown
log.Println("ES shutdown(): calling e.wg.Wait() ...")
e.wg.Wait() // Wait for the goroutine to shutdown
log.Println("ES shutdown() complete ...")
}
func (f *OGN) shutdown() {
log.Println("Entered OGN shutdown() ...")
close(f.closeCh) // signal to shutdown
log.Println("signal shutdown(): calling f.wg.Wait() ...")
f.wg.Wait() // Wait for the goroutine to shutdown
log.Println("signal shutdown() complete ...")
}
var sdrShutdown bool
func sdrKill() {
// Send signal to shutdown to sdrWatcher().
sdrShutdown = true
// Spin until all devices have been de-initialized.
for UATDev != nil || ESDev != nil || OGNDev != nil {
time.Sleep(1 * time.Second)
}
}
func reCompile(s string) *regexp.Regexp {
// note , compile returns a nil pointer on error
r, _ := regexp.Compile(s)
return r
}
type regexUAT regexp.Regexp
type regexES regexp.Regexp
type regexOGN regexp.Regexp
var rUAT = (*regexUAT)(reCompile("str?a?t?u?x:978"))
var rES = (*regexES)(reCompile("str?a?t?u?x:1090"))
var rOGN = (*regexES)(reCompile("str?a?t?u?x:868"))
func (r *regexUAT) hasID(serial string) bool {
if r == nil {
return strings.HasPrefix(serial, "stratux:978")
}
return (*regexp.Regexp)(r).MatchString(serial)
}
func (r *regexES) hasID(serial string) bool {
if r == nil {
return strings.HasPrefix(serial, "stratux:1090")
}
return (*regexp.Regexp)(r).MatchString(serial)
}
func (r *regexOGN) hasID(serial string) bool {
if r == nil {
return strings.HasPrefix(serial, "stratux:868")
}
return (*regexp.Regexp)(r).MatchString(serial)
}
func createUATDev(id int, serial string, idSet bool) error
|
func createESDev(id int, serial string, idSet bool) error {
ESDev = &ES{indexID: id, serial: serial}
if err := ESDev.sdrConfig(); err != nil {
log.Printf("ESDev.sdrConfig() failed: %s\n", err)
ESDev = nil
return err
}
ESDev.wg = &sync.WaitGroup{}
ESDev.idSet = idSet
ESDev.closeCh = make(chan int)
ESDev.wg.Add(1)
go ESDev.read()
return nil
}
func createOGNDev(id int, serial string, idSet bool) error {
OGNDev = &OGN{indexID: id, serial: serial}
if err := OGNDev.sdrConfig(); err != nil {
log.Printf("OGNDev.sdrConfig() failed: %s\n", err)
OGNDev = nil
return err
}
OGNDev.wg = &sync.WaitGroup{}
OGNDev.idSet = idSet
OGNDev.closeCh = make(chan int)
OGNDev.wg.Add(1)
go OGNDev.read()
return nil
}
func configDevices(count int, esEnabled, uatEnabled, ognEnabled bool) {
// once the tagged dongles have been assigned, explicitly range over
// the remaining IDs and assign them to any anonymous dongles
unusedIDs := make(map[int]string)
// loop 1: assign tagged dongles
for i := 0; i < count; i++ {
_, _, s, err := rtl.GetDeviceUsbStrings(i)
if err == nil {
//FIXME: Trim NULL from the serial. Best done in gortlsdr, but putting this here for now.
s = strings.Trim(s, "\x00")
// no need to check if createXDev returned an error; if it
// failed to config the error is logged and we can ignore
// it here so it doesn't get queued up again
if uatEnabled && UATDev == nil && rUAT.hasID(s) {
createUATDev(i, s, true)
} else if esEnabled && ESDev == nil && rES.hasID(s) {
createESDev(i, s, true)
} else if ognEnabled && OGNDev == nil && rOGN.hasID(s) {
createOGNDev(i, s, true)
} else {
unusedIDs[i] = s
}
} else {
log.Printf("rtl.GetDeviceUsbStrings id %d: %s\n", i, err)
}
}
// loop 2: assign anonymous dongles but sanity check the serial ids
// so we don't cross config for dual assigned dongles. e.g. when two
// dongles are set to the same stratux id and the unconsumed,
// non-anonymous, dongle makes it to this loop.
for i, s := range unusedIDs {
if uatEnabled && !globalStatus.UATRadio_connected && UATDev == nil && !rES.hasID(s) && !rOGN.hasID(s) {
createUATDev(i, s, false)
} else if esEnabled && ESDev == nil && !rUAT.hasID(s) && !rOGN.hasID(s) {
createESDev(i, s, false)
} else if ognEnabled && OGNDev == nil {
createOGNDev(i, s, false)
}
}
}
// to keep our sync primitives synchronized, only exit a read
// method's goroutine via the close flag channel check, to
// include catastrophic dongle failures
var shutdownES bool
var shutdownUAT bool
var shutdownOGN bool
// Watch for config/device changes.
func sdrWatcher() {
prevCount := 0
prevUATEnabled := false
prevESEnabled := false
prevOGNEnabled := false
// Get the system (RPi) uptime.
info := syscall.Sysinfo_t{}
err := syscall.Sysinfo(&info)
if err == nil {
// Got system uptime. Delay if and only if the system uptime is less than 120 seconds. This should be plenty of time
// for the RPi to come up and start Stratux. Keeps the delay from happening if the daemon is auto-restarted from systemd.
if info.Uptime < 120 {
time.Sleep(90 * time.Second)
} else if globalSettings.DeveloperMode {
// Throw a "critical error" if developer mode is enabled. Alerts the developer that the daemon was restarted (possibly)
// unexpectedly.
addSingleSystemErrorf("restart-warn", "System uptime %d seconds. Daemon was restarted.\n", info.Uptime)
}
}
for {
time.Sleep(1 * time.Second)
if sdrShutdown {
if UATDev != nil {
UATDev.shutdown()
UATDev = nil
}
if ESDev != nil {
ESDev.shutdown()
ESDev = nil
}
if OGNDev != nil {
OGNDev.shutdown()
OGNDev = nil
}
return
}
// true when a ReadSync call fails
if shutdownUAT {
if UATDev != nil {
UATDev.shutdown()
UATDev = nil
}
shutdownUAT = false
}
// true when we get stderr output
if shutdownES {
if ESDev != nil {
ESDev.shutdown()
ESDev = nil
}
shutdownES = false
}
// true when we get stderr output
if shutdownOGN {
if OGNDev != nil {
OGNDev.shutdown()
OGNDev = nil
}
shutdownOGN = false
}
// capture current state
esEnabled := globalSettings.ES_Enabled
uatEnabled := globalSettings.UAT_Enabled
ognEnabled := globalSettings.OGN_Enabled
count := rtl.GetDeviceCount()
interfaceCount := count
if globalStatus.UATRadio_connected {
interfaceCount++
}
atomic.StoreUint32(&globalStatus.Devices, uint32(interfaceCount))
// support up to two dongles
if count > 3 {
count = 3
}
if count == prevCount && prevESEnabled == esEnabled && prevUATEnabled == uatEnabled && prevOGNEnabled == ognEnabled {
continue
}
// the device count or the global settings have changed, reconfig
if UATDev != nil {
UATDev.shutdown()
UATDev = nil
}
if ESDev != nil {
ESDev.shutdown()
ESDev = nil
}
if OGNDev != nil {
OGNDev.shutdown()
OGNDev = nil
}
configDevices(count, esEnabled, uatEnabled, ognEnabled)
prevCount = count
prevUATEnabled = uatEnabled
prevESEnabled = esEnabled
prevOGNEnabled = ognEnabled
}
}
func sdrInit() {
go sdrWatcher()
go uatReader()
go godump978.ProcessDataFromChannel()
}
|
{
UATDev = &UAT{indexID: id, serial: serial}
if err := UATDev.sdrConfig(); err != nil {
log.Printf("UATDev.sdrConfig() failed: %s\n", err)
UATDev = nil
return err
}
UATDev.wg = &sync.WaitGroup{}
UATDev.idSet = idSet
UATDev.closeCh = make(chan int)
UATDev.wg.Add(1)
go UATDev.read()
return nil
}
|
positions.py
|
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
"""
Entities for Positions
"""
def
|
():
pass
if __name__ == '__main__':
main()
|
main
|
config.js
|
// dev config variables name
const dev = {
hostName: 'https://zhweyzgnzg.execute-api.us-east-2.amazonaws.com/prod',
// hostName: 'http://localhost:5001',
};
// production variables name
const prod = {
|
};
const config = process.env.REACT_APP_STAGE === 'production' ? prod : dev;
// export the default configuration
export default {
...config,
};
|
hostName: 'https://zhweyzgnzg.execute-api.us-east-2.amazonaws.com/prod',
// hostName: 'http://localhost:5001',
|
handler.go
|
package httphandling
import (
"encoding/json"
"net/http"
"github.com/jcmturner/authenvoy/config"
)
// WrapCommonHandler wraps the handler in the authentication handler if required
// and the accessLogger wrapper.
func WrapCommonHandler(inner http.Handler, c *config.Config) http.Handler
|
func setHeaders(w http.ResponseWriter) http.ResponseWriter {
w.Header().Set("Cache-Control", "no-store")
//OWASP recommended headers
w.Header().Set("X-Content-Type-Options", "nosniff")
w.Header().Set("X-Frame-Options", "deny")
return w
}
// JSONGenericResponse is a generic JSON response structure
type JSONGenericResponse struct {
Message string
HTTPCode int
}
func respondGeneric(w http.ResponseWriter, httpCode int, message string) {
e := JSONGenericResponse{
Message: message,
HTTPCode: httpCode,
}
respondWithJSON(w, httpCode, e)
}
func respondWithJSON(w http.ResponseWriter, httpCode int, payload interface{}) {
response, _ := json.Marshal(payload)
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
w.WriteHeader(httpCode)
w.Write(response)
}
|
{
//Wrap with access logger
inner = accessLogger(inner, c)
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w = setHeaders(w)
inner.ServeHTTP(w, r)
return
})
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.