file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
dlg-sty-tags.js | (function( $ ) {
var productTaggedAs = '.woocommerce-page.single-product div.product .product_meta>span.tagged_as'; | mkPreviewTypography(value(), true, ['weight'])
);
$(productTaggedAs).find('a').css(
mkPreviewTypography(value(), false)
);
value.bind(function (to) {
$(productTaggedAs).css(
mkPreviewTypography(to, false, ['weight'])
);
$(productTaggedAs).find('a').css(
mkPreviewTypography(to, false)
);
});
});
// Method for Control's event handlers: sh_pp_sty_tag_box_model.
wp.customize('mk_cz[sh_pp_sty_tag_box_model]', function (value) {
$(productTaggedAs).css(
mkPreviewBoxModel(value())
);
value.bind(function (to) {
$(productTaggedAs).css(
mkPreviewBoxModel(to)
);
});
});
} )( jQuery ); |
// Method for Control's event handlers: sh_pp_sty_tag_typography.
wp.customize('mk_cz[sh_pp_sty_tag_typography]', function (value) {
$(productTaggedAs).css( |
youtube.ts | import fetch from 'node-fetch';
import YoutubeElement from './youtubeElement';
import * as Credentials from '../config/local.env';
const apiKey = Credentials.googleYoutubeAPIKey;
const ytSearchURL = 'https://www.googleapis.com/youtube/v3/search?part=snippet&maxResults=2&key=' + apiKey + '&q=';
export default class Youtube {
constructor(){
}
static extractLinkForFirstVideo(res: any): YoutubeElement{
let item = res.items[0];
if (item.snippet.channelTitle.indexOf('VEVO') !== -1){
item = res.items[1];
}
return new YoutubeElement(item);
}
static search(terms : string, callback: (item: YoutubeElement) => void) : void{
fetch(ytSearchURL + encodeURIComponent(terms))
.then((res) => res.json())
.then((res) => { | })
.catch((err) => {
console.log(err);
});
}
} | callback(Youtube.extractLinkForFirstVideo(res)); |
example.go | package main
import (
"fmt"
"math/rand"
"time"
"math/big"
"github.com/c2h5oh/hide"
)
func main() | {
rand.Seed(time.Now().UnixNano())
prime := big.NewInt(47)
fmt.Println("Random IDs")
for i := 0; i < 10; i++ {
v := rand.Int31n(1000000)
o := hide.Int32Obfuscate(v, prime, nil)
fmt.Printf("%8d -> %10d\n", v, o)
}
fmt.Println("\nConsecutive IDs")
start := rand.Int31n(1000000)
for i := start; i < start+10; i++ {
o := hide.Int32Obfuscate(i, prime, nil)
fmt.Printf("%8d -> %10d\n", i, o)
}
} |
|
control_flow.py | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from ..wrapped_decorator import signature_safe_contextmanager
from .layer_function_generator import autodoc, templatedoc
from .tensor import assign, cast, fill_constant
from .. import core
from ..framework import Program, Variable, Operator
from ..layer_helper import LayerHelper, unique_name
from ..initializer import force_init_on_cpu
from .nn import logical_and, logical_not, logical_or
from .utils import assert_same_structure, map_structure
import numpy
import warnings
import six
from functools import reduce, partial
from ..data_feeder import convert_dtype, check_type_and_dtype
from ... import compat as cpt
from ..backward import _infer_var_data_type_shape_
__all__ = [
'While', 'Switch', 'increment', 'array_write', 'create_array', 'less_than',
'less_equal', 'greater_than', 'greater_equal', 'equal', 'not_equal',
'array_read', 'array_length', 'cond', 'IfElse', 'DynamicRNN', 'StaticRNN',
'reorder_lod_tensor_by_rank', 'Print', 'is_empty', 'case', 'switch_case',
'while_loop'
]
def select_output(input, outputs, mask):
"""
**select_output**
This API takes in one input and multiple outputs and an integer mask. It
selects the output specified by the mask and copy the input to selected
output. It is useful in control flow.
Args:
input(Variable): The input variable
outputs(tuple|list): The output variables
mask(Variable): A tensor containing 1 integer number selecting which
output to be copied with input
Returns:
Variable: The outputs variables
"""
helper = LayerHelper('select_output', **locals())
helper.append_op(
type='select_output',
inputs={'X': input,
'Mask': mask},
outputs={'Out': outputs})
return outputs
def select_input(inputs, mask):
"""
**select_input**
This API takes in multiple inputs and uses an integer mask to select one
input to output. It is useful in control flow.
Args:
inputs(tuple|list): The input variables
mask(Variable): A tensor containing 1 integer number selecting which
input to output
Returns:
Variable: The selected input variable
"""
helper = LayerHelper('select_input', **locals())
if isinstance(inputs, list) or isinstance(inputs, tuple):
input_dtype = inputs[0].dtype
input_shape = inputs[0].shape
else:
input_dtype = inputs.dtype
input_shape = inputs.shape
out = helper.create_variable(dtype=input_dtype, shape=input_shape)
helper.append_op(
type='select_input',
inputs={'X': inputs,
'Mask': mask},
outputs={'Out': out})
return out
def split_lod_tensor(input, mask, level=0):
"""
This function takes in an input that contains the complete lod information,
and takes in a mask which is used to mask certain parts of the input.
The output is the true branch and the false branch with the mask applied to
the input at a certain level in the tensor. Mainly used in IfElse to split
data into two parts.
Args:
input(tuple|list|None): The input tensor that contains complete
lod information needed to construct the output.
mask(list): A bool column vector which masks the input.
level(int): The specific lod level to split.
Returns:
tuple(Variable, Variable):
The true branch of tensor as per the mask applied to input.
The false branch of tensor as per the mask applied to input.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[1])
x.persistable = True
y = fluid.layers.data(name='y', shape=[1])
y.persistable = True
out_true, out_false = fluid.layers.split_lod_tensor(
input=x, mask=y, level=level)
"""
helper = LayerHelper('split_lod_tensor', **locals())
out_true = helper.create_variable_for_type_inference(dtype=input.dtype)
out_false = helper.create_variable_for_type_inference(dtype=input.dtype)
helper.append_op(
type='split_lod_tensor',
inputs={
'X': input,
'Mask': mask,
},
outputs={'OutTrue': out_true,
'OutFalse': out_false},
attrs={'level': level})
return out_true, out_false
def merge_lod_tensor(in_true, in_false, x, mask, level=0):
"""
**merge_lod_tensor**
This function takes in an input :math:`x`, the True branch, the False
branch and a binary :math:`mask`. Using this information, this function
merges the True and False branches of the tensor into a single tensor as
output at a certain lod level indicated by :math:`level`. Used in IfElse
to merge the output if True block and False Block.
Args:
in_true(tuple|list|None): The True branch to be merged.
in_false(tuple|list|None): The False branch to be merged.
x(tuple|list|None): The input tensor that contains complete
lod information needed to construct the output.
mask(list): A bool column vector which masks the input.
level(int): The specific lod level to merge.
Returns:
Variable: The merged output tensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = layers.data(
name='x', shape=[1], dtype='float32', stop_gradient=False)
y = layers.data(
name='y', shape=[1], dtype='bool', stop_gradient=False)
level = 0
out_true, out_false = layers.split_lod_tensor(
input=x, mask=y, level=level)
out = layers.merge_lod_tensor(
in_true=out_true, in_false=out_false, mask=y, x=x, level=level)
"""
helper = LayerHelper('merge_lod_tensor', **locals())
out = helper.create_variable_for_type_inference(dtype=in_true.dtype)
helper.append_op(
type='merge_lod_tensor',
inputs={'X': x,
'Mask': mask,
'InTrue': in_true,
'InFalse': in_false},
outputs={'Out': out},
attrs={'level': level})
return out
def Print(input,
first_n=-1,
message=None,
summarize=20,
print_tensor_name=True,
print_tensor_type=True,
print_tensor_shape=True,
print_tensor_lod=True,
print_phase='both'):
'''
**Print operator**
This creates a print op that will print when a tensor is accessed.
Wraps the tensor passed in so that whenever that a tensor is accessed,
the message `message` is printed, along with the current value of the
tensor `t`.
Args:
input (Variable): A Tensor to print.
summarize (int): Number of elements in the tensor to be print. If it's
vaule is -1, then all elements in the tensor will be print.
message (str): A string message to print as a prefix.
first_n (int): Only log `first_n` number of times.
print_tensor_name (bool, optional): Print the tensor name. Default: True.
print_tensor_type (bool, optional): Print the tensor type. Defaultt: True.
print_tensor_shape (bool, optional): Print the tensor shape. Default: True.
print_tensor_lod (bool, optional): Print the tensor lod. Default: True.
print_phase (str): Which phase to displace, including 'forward',
'backward' and 'both'. Default: 'both'. If set to 'backward', will
only print the gradients of input tensor; If set to 'both', will
both print the input tensor itself and the gradients of input tensor.
Returns:
Variable: Output tensor.
NOTES:
The input and output are two different variables, and in the
following process, you should use the output variable but not the input,
otherwise, the print layer doesn't have backward.
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.layers.fill_constant(shape=[10,2], value=3, dtype='int64')
input = fluid.layers.Print(input, message="The content of input layer:")
main_program = fluid.default_main_program()
exe = fluid.Executor(fluid.CPUPlace())
exe.run(main_program)
Output at runtime:
.. code-block:: bash
The content of input layer: The place is:CPUPlace
Tensor[fill_constant_0.tmp_0]
shape: [10,2,]
dtype: x
data: 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,
'''
check_type_and_dtype(input, 'input', Variable,
['float32', 'float64', 'int32', 'int64', 'bool'],
'fluid.layers.Print')
helper = LayerHelper('print' + "_" + input.name, **locals())
output = helper.create_variable_for_type_inference(input.dtype)
helper.append_op(
type='print',
inputs={'In': input},
outputs={'Out': output},
attrs={
'first_n': first_n,
'summarize': summarize,
'message': message or "",
'print_tensor_name': print_tensor_name,
'print_tensor_type': print_tensor_type,
'print_tensor_shape': print_tensor_shape,
'print_tensor_lod': print_tensor_lod,
'print_phase': print_phase.upper()
})
return output
class BlockGuard(object):
"""
BlockGuard class.
BlockGuard class is used to create a sub-block in a program by
using the Python `with` keyword.
"""
def __init__(self, main_program):
if not isinstance(main_program, Program):
raise TypeError("BlockGuard takes a program")
self.main_program = main_program
def __enter__(self):
self.main_program._create_block()
def __exit__(self, exc_type, exc_val, exc_tb):
self.main_program._rollback()
if exc_type is not None:
return False # re-raise exception
return True
class BlockGuardWithCompletion(BlockGuard):
"""
BlockGuardWithCompletion class.
BlockGuardWithCompletion class is used to create an op with a block in a program.
"""
def __init__(self, rnn):
if not isinstance(rnn, StaticRNN):
raise TypeError("BlockGuardWithCompletion takes a StaticRNN")
super(BlockGuardWithCompletion, self).__init__(rnn.helper.main_program)
self.rnn = rnn
def __enter__(self):
self.rnn.status = StaticRNN.IN_RNN_BLOCK
return super(BlockGuardWithCompletion, self).__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
return False
self.rnn.status = StaticRNN.AFTER_RNN_BLOCK
self.rnn._complete_op()
return super(BlockGuardWithCompletion, self).__exit__(exc_type, exc_val,
exc_tb)
class StaticRNNMemoryLink(object):
"""
StaticRNNMemoryLink class.
StaticRNNMemoryLink class is used to create a link between two
memory cells of a StaticRNN.
NOTE: This is a internal data structure of a very low-level API.
Please use StaticRNN instead.
Args:
init(Variable): the initial variable for Memory.
pre_mem(Variable): the memory variable in previous time step.
mem(Variable): the memory variable in current time step.
"""
def __init__(self, init, pre_mem, mem=None):
self.init = init
self.pre_mem = pre_mem
self.mem = mem
class StaticRNN(object):
"""
StaticRNN class.
The StaticRNN can process a batch of sequence data. The first dimension of inputs
represents sequence length, the length of each input sequence must be equal.
StaticRNN will unfold sequence into time steps, user needs to define how to process
each time step during the :code:`with` step.
Args:
name (str, optional): Please refer to :ref:`api_guide_Name`, Default None.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
vocab_size, hidden_size=10000, 200
x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64')
# create word sequence
x_emb = layers.embedding(
input=x,
size=[vocab_size, hidden_size],
dtype='float32',
is_sparse=False)
# transform batch size to dim 1
x_emb = layers.transpose(x_emb, perm=[1, 0, 2])
rnn = fluid.layers.StaticRNN()
with rnn.step():
# mark created x_emb as input, each step process a word
word = rnn.step_input(x_emb)
# create prev memory parameter, batch size comes from word
prev = rnn.memory(shape=[-1, hidden_size], batch_ref = word)
hidden = fluid.layers.fc(input=[word, prev], size=hidden_size, act='relu')
# use hidden to update prev
rnn.update_memory(prev, hidden)
# mark hidden as output
rnn.step_output(hidden)
# get StaticrNN final output
result = rnn()
"""
BEFORE_RNN_BLOCK = 0
IN_RNN_BLOCK = 1
AFTER_RNN_BLOCK = 2
def __init__(self, name=None):
self.helper = LayerHelper("static_rnn", name=name)
self.memories = {} # memory map, from pre_mem.name --> MemoryLink
self.inputs = [] # input variable list in current block
self.outputs = [] # output variable list in parent block
self.status = StaticRNN.BEFORE_RNN_BLOCK # status flag.
# sequence length, since it is a static RNN, sequence length are fixed.
self.seq_len = None
def step(self):
"""
Define operators in each step. step is used in :code:`with` block, OP in :code:`with` block
will be executed sequence_len times (sequence_len is the length of input)
"""
return BlockGuardWithCompletion(self)
def _assert_in_rnn_block_(self, method):
if self.status != StaticRNN.IN_RNN_BLOCK:
raise ValueError("You must invoke {0} in rnn block".format(method))
def memory(self,
init=None,
shape=None,
batch_ref=None,
init_value=0.0,
init_batch_dim_idx=0,
ref_batch_dim_idx=1):
"""
Create a memory variable for static rnn.
If the :code:`init` is not None, :code:`memory` will be initialized by
this Variable. If the :code:`init` is None, :code:`shape` and :code:`batch_ref`
must be set, and this function will create a new variable with shape and batch_ref
to initialize :code:`init` Variable.
Args:
init(Variable, optional): Tensor used to init memory. If it is not set,
:code:`shape` and :code:`batch_ref` must be provided.
Default: None.
shape(list|tuple): When :code:`init` is None use this arg to initialize memory shape.
NOTE the shape does not contain batch_size. Default: None.
batch_ref(Variable, optional): When :code:`init` is None, memory's batch size will
be set as batch_ref's ref_batch_dim_idx value. Default: None.
init_value(float, optional): When :code:`init` is None, used to init memory's value. Default: 0.0.
init_batch_dim_idx(int, optional): the batch_size axis of the :code:`init` Variable. Default: 0.
ref_batch_dim_idx(int, optional): the batch_size axis of the :code:`batch_ref` Variable. Default: 1.
Returns:
Variable: The memory variable.
Examples 1:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
vocab_size, hidden_size=10000, 200
x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64')
# create word sequence
x_emb = layers.embedding(
input=x,
size=[vocab_size, hidden_size],
dtype='float32',
is_sparse=False)
# transform batch size to dim 1
x_emb = layers.transpose(x_emb, perm=[1, 0, 2])
rnn = fluid.layers.StaticRNN()
with rnn.step():
# mark created x_emb as input, each step process a word
word = rnn.step_input(x_emb)
# create prev memory parameter, batch size comes from word
prev = rnn.memory(shape=[-1, hidden_size], batch_ref = word)
hidden = fluid.layers.fc(input=[word, prev], size=hidden_size, act='relu')
# use hidden to update prev
rnn.update_memory(prev, hidden)
Examples 2:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
vocab_size, hidden_size=10000, 200
x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64')
# create word sequence
x_emb = layers.embedding(
input=x,
size=[vocab_size, hidden_size],
dtype='float32',
is_sparse=False)
# transform batch size to dim 1
x_emb = layers.transpose(x_emb, perm=[1, 0, 2])
boot_memory = fluid.layers.data(name='boot', shape=[hidden_size], dtype='float32', lod_level=1)
rnn = fluid.layers.StaticRNN()
with rnn.step():
# mark created x_emb as input, each step process a word
word = rnn.step_input(x_emb)
# init memory
prev = rnn.memory(init=boot_memory)
hidden = fluid.layers.fc(input=[word, prev], size=hidden_size, act='relu')
# update hidden with prev
rnn.update_memory(prev, hidden)
"""
self._assert_in_rnn_block_('memory')
if init is None:
if shape is None or batch_ref is None:
raise ValueError(
"if init is None, memory at least need shape and batch_ref")
parent_block = self._parent_block()
var_name = unique_name.generate_with_ignorable_key("@".join(
[self.helper.name, "memory_boot"]))
boot_var = parent_block.create_var(
name=var_name,
shape=shape,
dtype=batch_ref.dtype,
persistable=False)
parent_block.append_op(
type="fill_constant_batch_size_like",
inputs={'Input': [batch_ref]},
outputs={'Out': [boot_var]},
attrs={
'value': init_value,
'shape': boot_var.shape,
'dtype': boot_var.dtype,
'input_dim_idx': ref_batch_dim_idx,
'output_dim_idx': init_batch_dim_idx
})
return self.memory(init=boot_var)
else:
pre_mem = self.helper.create_variable(
name=unique_name.generate_with_ignorable_key("@".join(
[self.helper.name, "mem"])),
dtype=init.dtype,
shape=init.shape)
self.memories[pre_mem.name] = StaticRNNMemoryLink(
init=init, pre_mem=pre_mem)
return pre_mem
def step_input(self, x):
"""
Mark a sequence as a StaticRNN input.
Args:
x(Variable): The input sequence, the shape of x
should be [seq_len, ...].
Returns:
Variable: The current time step data in the input sequence.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
vocab_size, hidden_size=10000, 200
x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64')
# create word sequence
x_emb = layers.embedding(
input=x,
size=[vocab_size, hidden_size],
dtype='float32',
is_sparse=False)
# transform batch size to dim 1
x_emb = layers.transpose(x_emb, perm=[1, 0, 2])
rnn = fluid.layers.StaticRNN()
with rnn.step():
# mark created x_emb as input, each step process a word
word = rnn.step_input(x_emb)
# create prev memory parameter, batch size comes from word
prev = rnn.memory(shape=[-1, hidden_size], batch_ref = word)
hidden = fluid.layers.fc(input=[word, prev], size=hidden_size, act='relu')
# use hidden to update prev
rnn.update_memory(prev, hidden)
"""
self._assert_in_rnn_block_('step_input')
if not isinstance(x, Variable):
raise TypeError("step input takes a Variable")
if self.seq_len is None:
self.seq_len = x.shape[0]
elif x.shape[0] != -1 and self.seq_len != x.shape[0]:
raise ValueError("Static RNN only take fix seq_len input")
ipt = self.helper.create_variable(
name=x.name, dtype=x.dtype, shape=list(x.shape[1:]), type=x.type)
self.inputs.append(ipt)
return ipt
def step_output(self, o):
"""
Mark a sequence as a StaticRNN output.
Args:
o(Variable): The output sequence.
Returns:
None.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
vocab_size, hidden_size=10000, 200
x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64')
# create word sequence
x_emb = layers.embedding(
input=x,
size=[vocab_size, hidden_size],
dtype='float32',
is_sparse=False)
# transform batch size to dim 1
x_emb = layers.transpose(x_emb, perm=[1, 0, 2])
rnn = fluid.layers.StaticRNN()
with rnn.step():
# mark created x_emb as input, each step process a word
word = rnn.step_input(x_emb)
# create prev memory parameter, batch size comes from word
prev = rnn.memory(shape=[-1, hidden_size], batch_ref = word)
hidden = fluid.layers.fc(input=[word, prev], size=hidden_size, act='relu')
# use hidden to update prev
rnn.update_memory(prev, hidden)
rnn.step_output(hidden)
result = rnn()
"""
self._assert_in_rnn_block_('step_output')
if not isinstance(o, Variable):
raise TypeError("step output takes a Variable")
tmp_o = self.helper.create_variable_for_type_inference(dtype=o.dtype)
self.helper.append_op(
type='rnn_memory_helper',
inputs={'X': [o]},
outputs={'Out': tmp_o},
attrs={'dtype': o.dtype})
out_var = self._parent_block().create_var(
name=tmp_o.name,
shape=[self.seq_len] + list(tmp_o.shape),
dtype=tmp_o.dtype)
self.outputs.append(out_var)
def output(self, *outputs):
"""
Mark the StaticRNN output variables.
Args:
outputs: The output Tensor, can mark multiple variables as output
Returns:
None
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
vocab_size, hidden_size=10000, 200
x = fluid.data(name="x", shape=[None, 1, 1], dtype='int64')
# create word sequence
x_emb = layers.embedding(
input=x,
size=[vocab_size, hidden_size],
dtype='float32',
is_sparse=False)
# transform batch size to dim 1
x_emb = layers.transpose(x_emb, perm=[1, 0, 2])
rnn = fluid.layers.StaticRNN()
with rnn.step():
# mark created x_emb as input, each step process a word
word = rnn.step_input(x_emb)
# create prev memory parameter, batch size comes from word
prev = rnn.memory(shape=[-1, hidden_size], batch_ref = word)
hidden = fluid.layers.fc(input=[word, prev], size=hidden_size, act='relu')
# use hidden to update prev
rnn.update_memory(prev, hidden)
# mark each step's hidden and word as output
rnn.output(hidden, word)
result = rnn()
"""
for each in outputs:
self.step_output(each)
def update_memory(self, mem, var):
"""
Update the memory from :code:`mem` to :code:`var`.
Args:
mem(Variable): the memory variable.
var(Variable): the plain variable generated in RNN block, used to update memory.
var and mem should hava same dims and data type.
Returns:
None
"""
if not isinstance(mem, Variable) or not isinstance(var, Variable):
raise TypeError("update memory should take variables")
self.memories[mem.name].mem = var
def _parent_block(self):
prog = self.helper.main_program
parent_idx = prog.current_block().parent_idx
assert parent_idx >= 0
parent_block = prog.block(parent_idx)
return parent_block
def __call__(self, *args, **kwargs):
if self.status != StaticRNN.AFTER_RNN_BLOCK:
raise ValueError("RNN output can only be retrieved after rnn block")
if len(self.outputs) == 0:
raise ValueError("RNN has no output")
elif len(self.outputs) == 1:
return self.outputs[0]
else:
return self.outputs
def _complete_op(self):
main_program = self.helper.main_program
rnn_block = main_program.current_block()
parent_block = self._parent_block()
local_inputs = set()
for op in rnn_block.ops:
assert isinstance(op, Operator)
for oname in op.output_names:
for out_var_name in op.output(oname):
local_inputs.add(out_var_name)
for var in self.inputs:
local_inputs.add(var.name)
for m in self.memories:
local_inputs.add(m)
# NOTE(zcd): the params have two categories of variables.
# - the variables that are the out of StaticRnn.
# - the variables that are the parameters of some layers, for example, conv2d.
params = list()
for op in rnn_block.ops:
assert isinstance(op, Operator)
for iname in op.input_names:
for in_var_name in op.input(iname):
if in_var_name not in local_inputs:
params.append(in_var_name)
parameters = [parent_block.var(name) for name in set(params)]
step_scope = parent_block.create_var(
type=core.VarDesc.VarType.STEP_SCOPES)
inlinks = [parent_block.var(i.name) for i in self.inputs]
outlinks = self.outputs
# NOTE(zcd): the states maybe empty in some case.
boot_memories = []
pre_memories = []
memories = []
for _, mem in six.iteritems(self.memories):
boot_memories.append(mem.init)
pre_memories.append(mem.pre_mem.name)
assert mem.mem is not None, "%s should be updated in every step." % (
mem.init.name)
mem_var = rnn_block.var(mem.mem.name)
assert isinstance(mem_var, Variable)
new_mem = self.helper.create_variable_for_type_inference(
dtype=mem_var.dtype)
rnn_block.append_op(
type='rnn_memory_helper',
inputs={'X': [mem_var]},
outputs={'Out': [new_mem]},
attrs={'dtype': mem_var.dtype})
memories.append(new_mem.name)
parent_block.append_op(
type='recurrent',
inputs={
'inputs': inlinks,
'initial_states': boot_memories,
'parameters': parameters
},
outputs={'outputs': outlinks,
'step_scopes': [step_scope]},
attrs={
'has_states': len(pre_memories) > 0,
'ex_states': pre_memories,
'states': memories,
'sub_block': rnn_block
})
class WhileGuard(BlockGuard):
def __init__(self, while_op):
if not isinstance(while_op, While):
raise TypeError("WhileGuard takes a while op")
super(WhileGuard, self).__init__(while_op.helper.main_program)
self.while_op = while_op
def __enter__(self):
self.while_op.status = While.IN_WHILE_BLOCK
return super(WhileGuard, self).__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
return False
self.while_op.status = While.AFTER_WHILE_BLOCK
self.while_op._complete()
return super(WhileGuard, self).__exit__(exc_type, exc_val, exc_tb)
class While(object):
"""
while loop control flow. Repeat while body until cond is False.
Note:
A new OP :ref:`api_fluid_layers_while_loop` is highly recommended instead of ``While`` if the shape of parameter ``cond`` is [1].
OP :ref:`api_fluid_layers_while_loop` is easier to use and is called with less code but does the same thing as ``While`` .
Args:
cond(Variable): A Tensor whose data type is bool controlling whether to continue looping.
is_test(bool, optional): A flag indicating whether execution is in test phase. Default value is False.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0) # loop counter
loop_len = fluid.layers.fill_constant(shape=[1],dtype='int64', value=10) # loop length
cond = fluid.layers.less_than(x=i, y=loop_len)
while_op = fluid.layers.While(cond=cond)
with while_op.block():
i = fluid.layers.increment(x=i, value=1, in_place=True)
fluid.layers.less_than(x=i, y=loop_len, cond=cond)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
res = exe.run(fluid.default_main_program(), feed={}, fetch_list=[i])
print(res) # [array([10])]
"""
BEFORE_WHILE_BLOCK = 0
IN_WHILE_BLOCK = 1
AFTER_WHILE_BLOCK = 2
def __init__(self, cond, is_test=False, name=None):
self.helper = LayerHelper("while", name=name)
self.status = While.BEFORE_WHILE_BLOCK
if not isinstance(cond, Variable):
raise TypeError("condition should be a variable")
assert isinstance(cond, Variable)
if cond.dtype != core.VarDesc.VarType.BOOL:
raise TypeError("condition should be a boolean variable")
if reduce(lambda a, b: a * b, cond.shape, 1) != 1:
raise TypeError(
"condition expected shape as [], but given shape as {0}.".
format(list(cond.shape)))
self.cond_var = cond
self.is_test = is_test
def block(self):
return WhileGuard(self)
def _complete(self):
main_program = self.helper.main_program
while_block = main_program.current_block()
parent_block = main_program.block(main_program.current_block()
.parent_idx)
inner_outputs = {self.cond_var.name}
x_name_list = set()
for op in while_block.ops:
for iname in op.input_names:
for in_var_name in op.input(iname):
if in_var_name not in inner_outputs:
x_name_list.add(in_var_name)
for oname in op.output_names:
for out_var_name in op.output(oname):
inner_outputs.add(out_var_name)
out_vars = []
for inner_out_name in inner_outputs:
inner_var = parent_block._find_var_recursive(inner_out_name)
if inner_var:
out_vars.append(inner_var)
step_scope = parent_block.create_var(
type=core.VarDesc.VarType.STEP_SCOPES)
parent_block.append_op(
type='while',
inputs={
'X': [
parent_block._var_recursive(x_name)
for x_name in x_name_list
],
'Condition': [self.cond_var]
},
outputs={'Out': out_vars,
'StepScopes': [step_scope]},
attrs={'sub_block': while_block,
"is_test": self.is_test})
def while_loop(cond, body, loop_vars, is_test=False, name=None):
"""
while_loop is one of the control flows. Repeats while_loop `body` until `cond` returns False.
Args:
cond(Callable): A callable returning a boolean tensor controlling whether to continue looping.
body(Callable): A callable returning a tuple or list of tensors of the same arity (length and structure)
and types as ``loops_vars`` .
loop_vars(list|tuple): A list or tuple of tensors that is passed to both ``cond`` and ``body`` .
is_test(bool, optional): A flag indicating whether execution is in test phase. Default value is False.
name(str, optional): Normally there is no need for users to set this property. For more information, please
refer to :ref:`api_guide_Name`. Default is None.
Returns:
A list or tuple of tensors which returned by ``body`` .
Returen type:
list(Variable)|tuple(Variable).
Raises:
TypeError: If the type of ``cond`` is not callable.
TypeError: If the type of ``body`` is not callable.
TypeError: If the type of ``loop_vars`` is not list or tuple.
TypeError: If the type of ``cond`` returns is not Variable.
TypeError: If the type of ``cond`` returns is not a boolean variable.
TypeError: If the shape of ``cond`` returns is not equals 1.
ValueError: If the ``var_loops`` is empty.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
def cond(i):
return layers.less_than(i, ten)
def body(i):
return layers.increment(x=i, value=1, in_place=True)
main_program = fluid.default_main_program()
startup_program = fluid.default_startup_program()
with fluid.program_guard(main_program, startup_program):
i = layers.fill_constant(shape=[1], dtype='int64', value=0) # loop counter
ten = layers.fill_constant(shape=[1], dtype='int64', value=10) # loop length
out = layers.while_loop(cond, body, [i])
exe = fluid.Executor(fluid.CPUPlace())
res = exe.run(main_program, feed={}, fetch_list=out)
print(res) # [array([10])]
"""
helper = LayerHelper('while_loop', **locals())
if not callable(cond):
raise TypeError("cond in while_loop should be callable")
if not callable(body):
raise TypeError("body in while_loop should be callable")
if not isinstance(loop_vars, (list, tuple)):
raise TypeError("loop_vars in while_loop should be a list or tuple")
if len(loop_vars) == 0:
raise ValueError("loop_vars in while_loop should not be empty")
pre_cond = cond(*loop_vars)
if not isinstance(pre_cond, Variable):
raise TypeError("cond in while_loop should return a variable")
if pre_cond.dtype != core.VarDesc.VarType.BOOL:
raise TypeError("cond in while_loop should return a boolean variable")
if reduce(lambda a, b: a * b, pre_cond.shape, 1) != 1:
raise TypeError(
"the shape of the variable returned by cond should be [],"
"but given shape as {0}.".format(list(pre_cond.shape)))
while_loop_block = While(pre_cond, is_test, name)
with while_loop_block.block():
output_vars = body(*loop_vars)
if len(loop_vars) == 1:
assign(output_vars, loop_vars[0])
now_cond = cond(output_vars)
else:
for i in range(len(output_vars)):
assign(output_vars[i], loop_vars[i])
now_cond = cond(*output_vars)
assign(now_cond, pre_cond)
return loop_vars
def lod_rank_table(x, level=0):
"""
LoD Rank Table Operator. Given an input variable **x** and a level number
of LoD, this layer creates a LodRankTable object. A LoDRankTable object
contains a list of bi-element tuples. Each tuple consists of an index and
a length, both of which are int type. Refering to specified level of LoD,
the index is the sequence index number and the length representes the
sequence length. Please note that the list is ranked in descending order by
the length. The following is an example:
.. code-block:: text
x is a LoDTensor:
x.lod = [[2, 1],
[5, 1, 1]]
x.data = [a, b, c, d, e, f, g]
1. set level to 0:
Create lod rank table:
lod_rank_table_obj = lod_rank_table(x, level=0)
Get:
lod_rank_table_obj.items() = [(0, 2), (1, 1)]
2. set level to 1:
Create lod rank table:
lod_rank_table_obj = lod_rank_table(x, level=1)
Get:
lod_rank_table_obj.items() = [(0, 5), (1, 1), (2, 1)]
Args:
x (Variable): Input variable, a LoDTensor based which to create the lod
rank table.
level (int): Specify the LoD level, on which to create the lod rank
table.
Returns:
Variable: The created LoDRankTable object.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[10],
dtype='float32', lod_level=1)
out = layers.lod_rank_table(x=x, level=0)
"""
helper = LayerHelper("lod_rank_table", **locals())
table = helper.create_variable(
type=core.VarDesc.VarType.LOD_RANK_TABLE,
name=unique_name.generate("lod_rank_table"))
helper.append_op(
type='lod_rank_table',
inputs={'X': x},
outputs={'Out': table},
attrs={'level': level})
return table
@templatedoc()
def max_sequence_len(rank_table):
"""
${comment}
>>> import paddle.fluid as fluid
>>> x = fluid.layers.data(name='x', shape=[10], dtype='float32',
>>> lod_level=1)
>>> rank_table = layers.lod_rank_table(x=x, level=0)
>>> max_seq_len = layers.max_sequence_len(rank_table)
Args:
rank_table(${rank_table_type}): ${rank_table_comment}.
Returns:
${out_comment}.
"""
helper = LayerHelper("max_seqence_len", **locals())
res = helper.create_variable_for_type_inference(dtype="int64")
helper.append_op(
type="max_sequence_len",
inputs={"RankTable": rank_table},
outputs={"Out": res})
return res
def lod_tensor_to_array(x, table):
"""
Convert a LoDTensor to a LoDTensorArray.
This function split a LoDTesnor to a LoDTensorArray according to its LoD
information. LoDTensorArray is an alias of C++ std::vector<LoDTensor> in
PaddlePaddle. The generated LoDTensorArray of this function can be further read
or written by `read_from_array()` and `write_to_array()` operators. However,
this function is generally an internal component of PaddlePaddle `DynamicRNN`.
Users should not use it directly.
Args:
x (Variable|list): The LoDTensor to be converted to a LoDTensorArray.
table (ParamAttr|list): The variable that stores the level of lod
which is ordered by sequence length in
descending order. It is generally generated
by `layers.lod_rank_table()` API.
Returns:
Variable: The LoDTensorArray that has been converted from the input tensor.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[10])
table = fluid.layers.lod_rank_table(x, level=0)
array = fluid.layers.lod_tensor_to_array(x, table)
"""
helper = LayerHelper("lod_tensor_to_array", **locals())
array = helper.create_variable(
name=unique_name.generate("lod_tensor_to_array"),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=x.dtype)
helper.append_op(
type='lod_tensor_to_array',
inputs={'X': x,
'RankTable': table},
outputs={'Out': array})
return array
def array_to_lod_tensor(x, table):
"""Convert a LoD_Tensor_Aarry to an LoDTensor.
Args:
x (Variable|list): The lod tensor array to be converted to a tensor.
table (ParamAttr|list): The variable that stores the level of lod
which is ordered by sequence length in
descending order.
Returns:
Variable: The variable of type tensor that has been converted
from an array.
Examples:
.. code-block:: python
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[10])
table = fluid.layers.lod_rank_table(x, level=0)
array = fluid.layers.lod_tensor_to_array(x, table)
lod_tensor = fluid.layers.array_to_lod_tensor(array, table)
"""
helper = LayerHelper("array_to_lod_tensor", **locals())
tmp = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="array_to_lod_tensor",
inputs={'X': x,
'RankTable': table},
outputs={'Out': tmp})
return tmp
def increment(x, value=1.0, in_place=True):
"""
The OP is usually used for control flow to increment the data of :attr:`x` by an amount :attr:`value`.
Notice that the number of elements in :attr:`x` must be equal to 1.
Parameters:
x (Variable): A tensor that must alway contain only one element, its data type supports
float32, float64, int32 and int64.
value (float, optional): The amount to increment the data of :attr:`x`. Default: 1.0.
in_place (bool, optional): Whether the OP should be performed in-place. Default: True.
Returns:
Variable: The elementwise-incremented tensor with the same shape and data type as :attr:`x`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
counter = fluid.layers.zeros(shape=[1], dtype='float32') # [0.]
fluid.layers.increment(counter) # [1.]
"""
helper = LayerHelper("increment", **locals())
if not in_place:
out = helper.create_variable_for_type_inference(dtype=x.dtype)
else:
out = x
helper.append_op(
type='increment',
inputs={'X': [x]},
outputs={'Out': [out]},
attrs={'step': float(value)})
return out
def array_write(x, i, array=None):
|
def create_array(dtype):
"""
This OP creates an LOD_TENSOR_ARRAY. It is used as
the input of :ref:`api_fluid_layers_array_read` and
:ref:`api_fluid_layers_array_write`. Also it can be used
with :ref:`api_fluid_layers_While` to create RNN network.
Args:
dtype (str): The data type of the elements in the lod_tensor_array.
Support data type: float32, float64, int32, int64.
Returns:
Variable: The empty lod_tensor_array. The data type of elements in Tensor is ``dtype``.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data = fluid.layers.create_array(dtype='float32') # Create a float32 LoDTensorArray.
"""
helper = LayerHelper("array", **locals())
return helper.create_variable(
name="{0}.out".format(helper.name),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=dtype)
@templatedoc()
def less_than(x, y, force_cpu=None, cond=None):
"""
${comment}
Args:
x(${x_type}): ${x_comment}.
y(${y_type}): ${y_comment}.
force_cpu(${force_cpu_type}): ${force_cpu_comment}.
cond(Variable|None): Optional output variable to store the result of *less_than*
Returns:
${out_comment}.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
# Graph Organizing
x = fluid.layers.data(name='x', shape=[2], dtype='float64')
y = fluid.layers.data(name='y', shape=[2], dtype='float64')
result = fluid.layers.less_than(x=x, y=y)
# The comment lists another available method.
# result = fluid.layers.fill_constant(shape=[2], dtype='float64', value=0)
# fluid.layers.less_than(x=x, y=y, cond=result)
# Create an executor using CPU as example
exe = fluid.Executor(fluid.CPUPlace())
# Execute
x_i = np.array([[1, 2], [3, 4]]).astype(np.float64)
y_i = np.array([[2, 2], [1, 3]]).astype(np.float64)
result_value, = exe.run(fluid.default_main_program(), feed={'x':x_i, 'y':y_i}, fetch_list=[result])
print(result_value) # [[True, False], [False, False]]
"""
helper = LayerHelper("less_than", **locals())
if cond is None:
cond = helper.create_variable_for_type_inference(dtype='bool')
cond.stop_gradient = True
attrs = dict()
if force_cpu is not None:
attrs['force_cpu'] = force_cpu
elif force_init_on_cpu():
attrs['force_cpu'] = force_init_on_cpu()
helper.append_op(
type='less_than',
inputs={'X': [x],
'Y': [y]},
outputs={'Out': [cond]},
attrs=attrs)
return cond
@templatedoc()
def less_equal(x, y, cond=None):
"""
This OP returns the truth value of :math:`x <= y` elementwise, which is equivalent function to the overloaded operator `<=`.
Args:
x(Variable): First input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
y(Variable): Second input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
cond(Variable, optional): If is :attr:`None`, the op will create a variable as output tensor, the input shape and data type of \
this tensor is the same as input :attr:`x`. If is not :attr:`None`, the op will set the variable as output tensor, the input shape \
and data type of this tensor should be the same as input :attr:`x`. Default value is :attr:`None`.
Returns:
Variable, the output data type is bool.: The tensor variable storing the output, the output shape is the same as input :attr:`x`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
label = fluid.layers.assign(np.array([1, 3], dtype='int32'))
limit = fluid.layers.assign(np.array([1, 2], dtype='int32'))
out = fluid.layers.less_equal(x=label, y=limit) #out=[True, False]
out1 = label<= limit #out1=[True, False]
"""
helper = LayerHelper("less_equal", **locals())
if cond is None:
cond = helper.create_variable_for_type_inference(dtype='bool')
cond.stop_gradient = True
attrs = dict()
if force_init_on_cpu():
attrs['force_cpu'] = force_init_on_cpu()
helper.append_op(
type='less_equal',
inputs={'X': [x],
'Y': [y]},
outputs={'Out': [cond]},
attrs=attrs)
return cond
@templatedoc()
def greater_than(x, y, cond=None):
"""
This OP returns the truth value of :math:`x > y` elementwise, which is equivalent function to the overloaded operator `>`.
Args:
x(Variable): First input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
y(Variable): Second input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
cond(Variable, optional): If is :attr:`None`, the op will create a variable as output tensor, the shape and data type of this \
tensor is the same as input :attr:`x` . If is not :attr:`None`, the op will set the variable as output tensor, the shape and data type \
of this tensor should be the same as input :attr:`x` . Default value is :attr:`None`.
Returns:
Variable, the output data type is bool.: The tensor variable storing the output, the output shape is the same as input :attr:`x` .
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
label = fluid.layers.assign(np.array([2, 3], dtype='int32'))
limit = fluid.layers.assign(np.array([3, 2], dtype='int32'))
out = fluid.layers.greater_than(x=label, y=limit) #out=[False, True]
out1 = label > limit #out1=[False, True]
"""
helper = LayerHelper("greater_than", **locals())
if cond is None:
cond = helper.create_variable_for_type_inference(dtype='bool')
cond.stop_gradient = True
attrs = dict()
if force_init_on_cpu():
attrs['force_cpu'] = force_init_on_cpu()
helper.append_op(
type='greater_than',
inputs={'X': [x],
'Y': [y]},
outputs={'Out': [cond]},
attrs=attrs)
return cond
@templatedoc()
def greater_equal(x, y, cond=None):
"""
This OP returns the truth value of :math:`x >= y` elementwise, which is equivalent function to the overloaded operator `>=`.
Args:
x(Variable): First input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
y(Variable): Second input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
cond(Variable, optional): If is :attr:`None` , the op will create a variable as output tensor, the shape and data type of this \
tensor is the same as input :attr:`x`. If is not :attr:`None` , the op will set the variable as output tensor, the shape and data \
type of this tensor is the same as input :attr:`x`. Default value is :attr:`None`.
Returns:
Variable, the output data type is bool.: The tensor variable storing the output, the output shape is the same as input :attr:`x`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
label = fluid.layers.assign(np.array([2, 2], dtype='int32'))
limit = fluid.layers.assign(np.array([2, 3], dtype='int32'))
out = fluid.layers.greater_equal(x=label, y=limit) #out=[True, False]
out_1 = label >= limit #out1=[True, False]
"""
helper = LayerHelper("greater_equal", **locals())
if cond is None:
cond = helper.create_variable_for_type_inference(dtype='bool')
cond.stop_gradient = True
attrs = dict()
if force_init_on_cpu():
attrs['force_cpu'] = force_init_on_cpu()
helper.append_op(
type='greater_equal',
inputs={'X': [x],
'Y': [y]},
outputs={'Out': [cond]},
attrs=attrs)
return cond
def equal(x, y, cond=None):
"""
This layer returns the truth value of :math:`x == y` elementwise.
Args:
x(Variable): Tensor, data type is float32, float64, int32, int64.
y(Variable): Tensor, data type is float32, float64, int32, int64.
cond(Variable, optional): Optional output which can be any created
Variable that meets the requirements to store the result of *equal*.
if cond is None, a new Varibale will be created to store the result.
Returns:
Variable: output Tensor, it's shape is the same as the input's Tensor,
and the data type is bool.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import numpy as np
out_cond =fluid.data(name="input1", shape=[2], dtype='bool')
label = fluid.layers.assign(np.array([3, 3], dtype="int32"))
limit = fluid.layers.assign(np.array([3, 2], dtype="int32"))
label_cond = fluid.layers.assign(np.array([1, 2], dtype="int32"))
out1 = fluid.layers.equal(x=label,y=limit) #out1=[True, False]
out2 = fluid.layers.equal(x=label_cond,y=limit, cond=out_cond) #out2=[False, True] out_cond=[False, True]
"""
helper = LayerHelper("equal", **locals())
if cond is None:
cond = helper.create_variable_for_type_inference(dtype='bool')
cond.stop_gradient = True
helper.append_op(
type='equal', inputs={'X': [x],
'Y': [y]}, outputs={'Out': [cond]})
return cond
def not_equal(x, y, cond=None):
"""
This OP returns the truth value of :math:`x != y` elementwise, which is equivalent function to the overloaded operator `!=`.
Args:
x(Variable): First input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
y(Variable): Second input to compare which is N-D tensor. The input data type should be float32, float64, int32, int64.
cond(Variable, optional): If is :attr:`None`, the op will create a variable as output tensor, the shape and data type of this \
tensor is the same as input :attr:`x`. If is not :attr:`None`, the op will set the variable as output tensor, the shape and data \
type of this tensor should be the same as input :attr:`x`. Default value is :attr:`None`.
Returns:
Variable, the output data type is bool.: The tensor variable storing the output, the output shape is the same as input :attr:`x`.
Examples:
.. code-block:: python
import paddle.fluid as fluid
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
limit = fluid.layers.fill_constant(shape=[1], value=1, dtype='int64')
out = fluid.layers.not_equal(x=label, y=limit)
"""
helper = LayerHelper("not_equal", **locals())
if cond is None:
cond = helper.create_variable_for_type_inference(dtype='bool')
cond.stop_gradient = True
helper.append_op(
type='not_equal', inputs={'X': [x],
'Y': [y]}, outputs={'Out': [cond]})
return cond
def array_read(array, i):
"""
This OP is used to read data at the specified position from the input array
:ref:`api_fluid_LoDTensorArray` . ``array`` is the input array and ``i``
is the specified read position. This OP is often used together with
:ref:`api_fluid_layers_array_write` OP.
Case 1:
::
Input:
The shape of first three tensors are [1], and that of the last one is [1,2]:
array = ([0.6], [0.1], [0.3], [0.4, 0.2])
And:
i = [3]
Output:
output = [0.4, 0.2]
Args:
array (LoDTensorArray): The input LoDTensorArray.
i (Variable): 1-D Tensor, whose shape is [1] and dtype is int64. It represents the
specified read position of ``array``.
Returns:
Variable: The LoDTensor or Tensor that is read at the specified position of ``array``.
Examples:
.. code-block:: python
# First we're going to create a LoDTensorArray, then we're going to write the Tensor into
# the specified position, and finally we're going to read the Tensor at that position.
import paddle.fluid as fluid
arr = fluid.layers.create_array(dtype='float32')
tmp = fluid.layers.fill_constant(shape=[3, 2], dtype='int64', value=5)
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10)
# tmp is the Tensor with shape [3,2], and if we write it into the position with subscript 10
# of the empty-array: arr, then the length of arr becomes 11.
arr = fluid.layers.array_write(tmp, i, array=arr)
# Read the data of the position with subscript 10.
item = fluid.layers.array_read(arr, i)
# You can print out the data via executor.
input = fluid.layers.Print(item, message="The LoDTensor of the i-th position:")
main_program = fluid.default_main_program()
exe = fluid.Executor(fluid.CPUPlace())
exe.run(main_program)
# The printed result is:
# 1569588169 The LoDTensor of the i-th position: The place is:CPUPlace
# Tensor[array_read_0.tmp_0]
# shape: [3,2,]
# dtype: l
# data: 5,5,5,5,5,5,
# the output is 2-D Tensor with shape [3,2].
# dtype is the corresponding C++ data type, which may vary in different environments.
# Eg: if the data type of tensor is int64, then the corresponding C++ data type is int64_t,
# so the dtype value is typeid(int64_t).Name(), which is 'x' on MacOS, 'l' on Linux,
# and '__int64' on Windows. They both represent 64-bit integer variables.
"""
helper = LayerHelper('array_read', **locals())
if not isinstance(
array,
Variable) or array.type != core.VarDesc.VarType.LOD_TENSOR_ARRAY:
raise TypeError("array should be tensor array vairable")
out = helper.create_variable_for_type_inference(dtype=array.dtype)
helper.append_op(
type='read_from_array',
inputs={'X': [array],
'I': [i]},
outputs={'Out': [out]})
return out
def shrink_memory(x, i, table):
"""
This function creates an operator to shrink rnn memory using the RankTable
as mentioned in the input parameter.
NOTE: This API is very low-level API. It is used by DynamicRNN only.
Since the Dynamic RNN uses no-padding way to implement RNN. The sequence
will be sorted by order, and the length of valid memory will be shrink after
each time step.
Args:
x(Variable): The memory object in the previous time step.
i(Variable): The step count variable. A int scalar as LoDTensor.
table(Variable): The RNNRankTable object.
Returns:
the memory variable after shrink.
Examples:
Since this API is very low level API. The example is not provided.
Please reference the implementation of class DynamicRNN for detail
usage.
"""
helper = LayerHelper('shrink_memory', **locals())
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='shrink_rnn_memory',
inputs={'X': [x],
'I': [i],
'RankTable': [table]},
outputs={'Out': [out]},
attrs={})
return out
def array_length(array):
"""
This OP is used to get the length of the input array :ref:`api_fluid_LoDTensorArray` .
It can be used together with :ref:`api_fluid_layers_array_read` , :ref:`api_fluid_layers_array_write` ,
:ref:`api_fluid_layers_While` OP to traverse, read and wirte LoDTensorArray.
Args:
array (LoDTensorArray): The input array that will be used to compute the length.
Returns:
Variable: 1-D Tensor with shape [1], which is the length of array. Datatype: int64.
Examples:
.. code-block:: python
import paddle.fluid as fluid
tmp = fluid.layers.zeros(shape=[10], dtype='int32')
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10)
# tmp is 1-D Tensor with shape [10]. We write tmp into arr on subscript 10,
# then the length of arr becomes 11.
arr = fluid.layers.array_write(tmp, i=i)
# return the length of arr
arr_len = fluid.layers.array_length(arr)
# You can use executor to print out the length of LoDTensorArray.
input = fluid.layers.Print(arr_len, message="The length of LoDTensorArray:")
main_program = fluid.default_main_program()
exe = fluid.Executor(fluid.CPUPlace())
exe.run(main_program)
# The printed result is:
# 1569576542 The length of LoDTensorArray: The place is:CPUPlace
# Tensor[array_length_0.tmp_0]
# shape: [1,]
# dtype: l
# data: 11,
# 1-D Tensor with shape [1], whose value is 11. It means that the length of LoDTensorArray
# is 11.
# dtype is the corresponding C++ data type, which may vary in different environments.
# Eg: if the data type of tensor is int64, then the corresponding C++ data type is int64_t,
# so the dtype value is typeid(int64_t).Name(), which is 'x' on MacOS, 'l' on Linux,
# and '__int64' on Windows. They both represent 64-bit integer variables.
"""
helper = LayerHelper('array_length', **locals())
tmp = helper.create_variable_for_type_inference(dtype='int64')
tmp.stop_gradient = True
helper.append_op(
type='lod_array_length', inputs={'X': [array]}, outputs={'Out': [tmp]})
return tmp
class ConditionalBlockGuard(BlockGuard):
"""
ConditionalBlockGuard is derived from BlockGuard. It is dedicated for
holding a ConditionalBlock, and helping users entering and exiting the
ConditionalBlock via Python's 'with' keyword. However, ConditionalBlockGuard
is generally an internal component of IfElse, users should not use it directly.
"""
def __init__(self, block):
if not isinstance(block, ConditionalBlock):
raise TypeError("block should be conditional block")
super(ConditionalBlockGuard, self).__init__(block.helper.main_program)
self.block = block
def __enter__(self):
return super(ConditionalBlockGuard, self).__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
self.block.complete()
return super(ConditionalBlockGuard, self).__exit__(exc_type, exc_val,
exc_tb)
class ConditionalBlock(object):
'''
**ConditionalBlock**
ConditionalBlock is an operator that bind a block to a specific condition,
if the condition matches, the corresponding block will be executed.
Args:
inputs (Variable): bool conditions.
is_scalar_condition (bool): whether the branch is controled by a scalar.
name(str): name of this ConditionalBlock.
Examples:
.. code-block:: python
import paddle.fluid as fluid
cond = layers.less_than(x=label, y=limit)
true_image, false_image = layers.split_lod_tensor(
input=image, mask=cond)
true_cond = layers.ConditionalBlock([true_image])
with true_cond.block():
...
with false_cond.block():
...
'''
def __init__(self, inputs, is_scalar_condition=False, name=None):
for each_input in inputs:
if not isinstance(each_input, Variable):
raise TypeError("Each input should be variable")
self.inputs = inputs
self.is_scalar_condition = is_scalar_condition
self.helper = LayerHelper('conditional_block', name=name)
def block(self):
return ConditionalBlockGuard(self)
def complete(self):
inside_block = self.helper.main_program.current_block()
parent_block = self.helper.main_program.block(inside_block.parent_idx)
intermediate = set()
params = set()
for each_op in inside_block.ops:
assert isinstance(each_op, Operator)
for iname in each_op.input_names:
for in_var_name in each_op.input(iname):
if in_var_name not in intermediate:
params.add(in_var_name)
for oname in each_op.output_names:
for out_var_name in each_op.output(oname):
intermediate.add(out_var_name)
input_set = set([ipt.name for ipt in self.inputs])
# Todo(liym27) Here assume that all params are in recursive parent block
# but when minimize() called in control flow, some params may be in
# conditional grad block
param_list = [
parent_block._var_recursive(each_name) for each_name in params
]
out_list = []
for inner_out_name in intermediate:
inner_var = parent_block._find_var_recursive(inner_out_name)
if inner_var:
out_list.append(inner_var)
step_scope = parent_block.create_var(
type=core.VarDesc.VarType.STEP_SCOPES)
conditional_block_op = parent_block.append_op(
type='conditional_block',
inputs={
'Cond': self.inputs,
'Input': param_list,
},
outputs={'Out': out_list,
'Scope': [step_scope]},
attrs={
'sub_block': inside_block,
'is_scalar_condition': self.is_scalar_condition
})
if self.need_append_conditional_block_grad(inside_block):
self.append_conditional_block_grad(parent_block, inside_block,
conditional_block_op)
def need_append_conditional_block_grad(self, inside_block):
grad_sub_block_idx = inside_block.backward_block_idx
return grad_sub_block_idx != -1
def append_conditional_block_grad(self, parent_block, inside_block,
conditional_block_op):
'''
Append op `conditional_block_grad` manually.
When `optimizer.minimize/append_backward` is called in Paddle control flow,
grad ops will be appended before appending op `conditional_block` so that
op `conditional_block_grad` can't be appended when calling
`optimizer.minimize/append_backward`. After appending op `conditional_block`,
`conditional_block_grad` is appended manually.
Args:
parent_block (Block): The block that `conditional_block_op` blongs to.
inside_block (Block): The sub block of `conditional_block_op`.
conditional_block_op (Operator): The forward op conditional_block.
'''
grad_sub_block_idx = inside_block.backward_block_idx
grad_sub_block = self.helper.main_program.block(grad_sub_block_idx)
intermediate = set()
params = set()
for each_op in grad_sub_block.ops:
assert isinstance(each_op, Operator)
for iname in each_op.input_names:
for in_var_name in each_op.input(iname):
if in_var_name not in intermediate:
params.add(in_var_name)
for oname in each_op.output_names:
for out_var_name in each_op.output(oname):
intermediate.add(out_var_name)
param_list = []
for inner_input_name in params:
inner_var = parent_block._find_var_recursive(inner_input_name)
if inner_var:
param_list.append(cpt.to_text(inner_var.name))
grad_op_desc, op_grad_to_var = core.get_grad_op_desc(
conditional_block_op.desc,
cpt.to_text(set()), [grad_sub_block.desc])
# append op_desc in grad_op_descs to target_block
op_role_attr_name = core.op_proto_and_checker_maker.kOpRoleAttrName()
backward = core.op_proto_and_checker_maker.OpRole.Backward
new_op_desc = parent_block.desc.append_op()
new_op_desc.copy_from(grad_op_desc[0])
new_op_desc._set_attr(op_role_attr_name, backward)
# set input and output manually
new_op_desc.set_input('Input', param_list)
new_op_desc.set_output('Input@GRAD',
[param + "@GRAD" for param in param_list])
new_vars = set()
for grad_var_name in new_op_desc.output_arg_names():
if grad_sub_block.desc.has_var_recursive(
cpt.to_bytes(grad_var_name)
) or grad_var_name == core.empty_var_name():
continue
grad_sub_block.desc.var(cpt.to_bytes(grad_var_name))
new_vars.add(grad_var_name)
if grad_var_name not in op_grad_to_var:
continue
# infer_shape and infer_type
new_op_desc.infer_var_type(grad_sub_block.desc)
new_op_desc.infer_shape(grad_sub_block.desc)
for arg in new_op_desc.output_arg_names():
if arg in new_vars:
_infer_var_data_type_shape_(arg, grad_sub_block)
self.helper.main_program._sync_with_cpp()
def copy_var_to_parent_block(var, layer_helper):
if var is None:
return None
prog = layer_helper.main_program
parent_idx = prog.current_block().parent_idx
assert parent_idx >= 0, "Got wrong parent block index when assigning var to parent scope in control_flow"
parent_block = prog.block(parent_idx)
parent_block_var = parent_block.create_var(
dtype=var.dtype, shape=var.shape, type=var.type)
assign(var, parent_block_var)
return parent_block_var
def cond(pred, true_fn=None, false_fn=None, name=None):
"""
This API returns ``true_fn()`` if the predicate ``pred`` is true else
``false_fn()`` . Users could also set ``true_fn`` or ``false_fn`` to
``None`` if do nothing and this API will treat the callable simply returns
``None`` in this case.
``true_fn`` and ``false_fn`` should return same nest structure of tensors
or both return ``None`` if user doens't like to return anything. A nest
structure of tensors in PaddlePaddle is tensor(s), or tuple of tensors, or
list of tensors.
Note:
1. The tuples or lists returned by ``true_fn`` and ``false_fn`` must have
the same shape because of dataflow model of PaddlePaddle while the
tensors in the tuples or the lists can have different shapes.
2. Any tensors or operations created outside of ``true_fn`` and
``false_fn`` will be executed regardless of which branch is selected at
runtime. This has frequently surprised users who expected a lazy
semantics. For example:
.. code-block:: python
import paddle.fluid as fluid
a = fluid.data(name='a', shape=[-1, 1], dtype='float32')
b = fluid.data(name='b', shape=[-1, 1], dtype='float32')
c = a * b
out = fluid.layers.cond(a < b, lambda: a + c, lambda: b * b)
No matter whether ``a < b`` , ``c = a * b`` will run.
Args:
pred(Variable): A boolean tensor whose numel should be 1. The boolean
value determines whether to return the result of ``true_fn`` or
``false_fn`` .
true_fn(callable, optional): A callable to be performed if ``pred`` is
true. The default value is ``None`` .
false_fn(callable, optional): A callable to be performed if ``pred`` is
false. The default value is ``None`` .
name(str, optional): The default value is ``None`` . Normally users
don't have to set this parameter. For more information, please
refer to :ref:`api_guide_Name` .
Returns:
Variable|list(Variable)|tuple(Variable): returns ``true_fn()`` if the
predicate ``pred`` is true else ``false_fn()`` .
Raises:
TypeError: if ``true_fn`` or ``false_fn`` is not callable.
ValueError: if ``true_fn`` and ``false_fn`` don't return the same nest
structure of tensors.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
from paddle.fluid.executor import Executor
from paddle.fluid.framework import Program, program_guard
#
# pseudocode:
# if 0.1 < 0.23:
# return 1, True
# else:
# return 3, 2
#
def true_func():
return layers.fill_constant(
shape=[1, 2], dtype='int32', value=1), layers.fill_constant(
shape=[2, 3], dtype='bool', value=True)
def false_func():
return layers.fill_constant(
shape=[3, 4], dtype='float32', value=3), layers.fill_constant(
shape=[4, 5], dtype='int64', value=2)
main_program = Program()
startup_program = Program()
with program_guard(main_program, startup_program):
x = layers.fill_constant(shape=[1], dtype='float32', value=0.1)
y = layers.fill_constant(shape=[1], dtype='float32', value=0.23)
pred = layers.less_than(x, y)
out = layers.cond(pred, true_func, false_func)
# out is a tuple containing 2 tensors
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
exe = fluid.Executor(place)
ret = exe.run(main_program, fetch_list=out)
# ret[0] = [[1 1]]
# ret[1] = [[ True True True]
# [ True True True]]
"""
helper = LayerHelper('cond', **locals())
true_output = None
false_output = None
copy_to_parent_func = lambda var: copy_var_to_parent_block(var, helper)
if true_fn is not None:
if not callable(true_fn):
raise TypeError("The true_fn in cond must be callable")
true_cond_block = ConditionalBlock([pred], is_scalar_condition=True)
with true_cond_block.block():
origin_true_output = true_fn()
if origin_true_output is not None:
true_output = map_structure(copy_to_parent_func,
origin_true_output)
if false_fn is not None:
if not callable(false_fn):
raise TypeError("The false_fn in cond must be callable")
false_cond_block = ConditionalBlock(
[logical_not(pred)], is_scalar_condition=True)
with false_cond_block.block():
origin_false_output = false_fn()
if origin_false_output is not None:
false_output = map_structure(copy_to_parent_func,
origin_false_output)
if true_output is None and false_output is None:
return None
if true_output is None:
raise ValueError(
"Incompatible return values of true_fn and false_fn in cond: "
"true_fn returns None while false_fn returns non-None")
if false_output is None:
raise ValueError(
"Incompatible return values of true_fn and false_fn in cond: "
"true_fn returns non-None while false_fn returns None")
# Merge ture and false output if they are not None
try:
assert_same_structure(true_output, false_output, check_types=False)
except ValueError as e:
raise ValueError(
"Incompatible return values of true_fn and false_fn in cond: {}".
format(e))
mask = cast(pred, dtype='int32')
merge_func = lambda false_var, true_var : select_input([false_var, true_var], mask)
merged_output = map_structure(merge_func, false_output, true_output)
return merged_output
def _error_message(what, arg_name, op_name, right_value, error_value):
error_message = "{what} of '{arg_name}' in Op({op_name}) must be " \
"{right_value}, but received: {error_value}.".format(
what=what,
arg_name=arg_name,
op_name=op_name,
right_value=right_value,
error_value=error_value)
return error_message
def case(pred_fn_pairs, default=None, name=None):
'''
This operator works like an if-elif-elif-else chain.
Args:
pred_fn_pairs(list|tuple): A list or tuple of (pred, fn) pairs. ``pred`` is a boolean Tensor with shape [1], ``fn`` is a callable. All callables return the same structure of Tensors.
default(callable, optional): Callable that returns a structure of Tensors.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Variable|list(Variable): Tensors returned by the callable from the first pair whose pred is True,
or Tensors returned by ``default`` if no pred in ``pred_fn_pairs`` is True and ``default`` is not None,
or Tensors returned by the last callable in ``pred_fn_pairs`` if no pred in ``pred_fn_pairs`` is True and ``default`` is None.
Raises:
TypeError: If the type of ``pred_fn_pairs`` is not list or tuple.
TypeError: If the type of elements in ``pred_fn_pairs`` is not tuple.
TypeError: If the size of tuples in ``pred_fn_pairs`` is not 2.
TypeError: If the first element of 2-tuple in ``pred_fn_pairs`` is not Variable.
TypeError: If the second element of 2-tuple in ``pred_fn_pairs`` is not callable.
TypeError: If ``default`` is not None but it is not callable.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
def fn_1():
return layers.fill_constant(shape=[1, 2], dtype='float32', value=1)
def fn_2():
return layers.fill_constant(shape=[2, 2], dtype='int32', value=2)
def fn_3():
return layers.fill_constant(shape=[3], dtype='int32', value=3)
main_program = fluid.default_startup_program()
startup_program = fluid.default_main_program()
with fluid.program_guard(main_program, startup_program):
x = layers.fill_constant(shape=[1], dtype='float32', value=0.3)
y = layers.fill_constant(shape=[1], dtype='float32', value=0.1)
z = layers.fill_constant(shape=[1], dtype='float32', value=0.2)
pred_1 = layers.less_than(z, x) # true: 0.2 < 0.3
pred_2 = layers.less_than(x, y) # false: 0.3 < 0.1
pred_3 = layers.equal(x, y) # false: 0.3 == 0.1
# Call fn_1 because pred_1 is True
out_1 = layers.case(
pred_fn_pairs=[(pred_1, fn_1), (pred_2, fn_2)], default=fn_3)
# Argument default is None and no pred in pred_fn_pairs is True. fn_3 will be called.
# because fn_3 is the last callable in pred_fn_pairs.
out_2 = layers.case(pred_fn_pairs=[(pred_2, fn_2), (pred_3, fn_3)])
exe = fluid.Executor(fluid.CPUPlace())
res_1, res_2 = exe.run(main_program, fetch_list=[out_1, out_2])
print(res_1) # [[1. 1.]]
print(res_2) # [3 3 3]
'''
helper = LayerHelper('case', **locals())
def _case_check_args(pred_fn_pairs, default):
'''
Check arguments pred_fn_pairs and default. Return canonical pre_fn_pairs and default.
'''
if not isinstance(pred_fn_pairs, (list, tuple)):
raise TypeError(
_error_message("The type", "pred_fn_pairs", "case",
"list or tuple", type(pred_fn_pairs)))
for pred_fn in pred_fn_pairs:
if not isinstance(pred_fn, tuple):
raise TypeError(
_error_message("The elements' type", "pred_fn_pairs",
"case", "tuple", type(pred_fn)))
if len(pred_fn) != 2:
raise TypeError(
_error_message("The tuple's size", "pred_fn_pairs", "case",
"2", str(len(pred_fn)) + "-tuple"))
pred, fn = pred_fn
if not isinstance(pred, Variable):
raise TypeError(
_error_message("The pred's type", "pred_fn_pairs", "case",
"boolean Variable", type(pred)))
if not callable(fn):
raise TypeError(
"The fn for {} of pred_fn_pairs in Op(case) must"
" be callable.".format(pred.name))
if default is None:
default_index = len(pred_fn_pairs) - 1 # pick the last one
default = pred_fn_pairs[default_index][1]
pred_fn_pairs = pred_fn_pairs[:default_index]
elif not callable(default):
raise TypeError("The default in Op(case) must be callable.")
return pred_fn_pairs, default
pred_fn_pairs, default = _case_check_args(pred_fn_pairs, default)
false_fn = default
for pred, true_fn in reversed(pred_fn_pairs):
false_fn = partial(cond, pred=pred, true_fn=true_fn, false_fn=false_fn)
final_fn = false_fn
return final_fn()
class Switch(object):
"""
This class is used to implement Switch branch control function.
Switch branch contains several case branches and one default branch.
Switch control flow checks whether the case branch conditions are satisfied in turn,
and only executes the statement after the first case branch that satisfies the conditions.
If there is no case branch that satisfies the condition,
only the statement following the default branch is executed.
Note:
A new OP :ref:`api_fluid_layers_case` is highly recommended instead of ``Switch`` if the shape of parameter ``cond`` is [1].
OP :ref:`api_fluid_layers_case` is easier to use and is called with less code but does the same thing as ``Switch`` .
Member Functions:
case(cond): The case branch of Switch whose parameter cond is a scalar Variable of bool type. Only if the cond of the current case branch is True and the cond of the previous case branch is False, the statement after the case branch will be executed, and the statement after the case branch will not be executed.
default(): The default branch of Switch. When cond of all case branches is False, the statement after default branch is executed.
Case and default functions can only be used inside the scope of Switch, as shown below:
.. code-block:: python
'''
with fluid.layers.Switch() as switch:
with switch.case(cond1):
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=1)
with switch.case(cond2):
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=2)
with switch.default():
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=0)
'''
Args:
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Examples:
.. code-block:: python
import paddle.fluid as fluid
lr = fluid.layers.create_global_var(
shape=[1],
value=0.0,
dtype='float32',
persistable=True,
name="learning_rate")
zero_var = fluid.layers.fill_constant(
shape=[1], dtype='float32', value=0.0)
one_var = fluid.layers.fill_constant(
shape=[1], dtype='float32', value=1.0)
two_var = fluid.layers.fill_constant(
shape=[1], dtype='float32', value=2.0)
global_step = fluid.layers.autoincreased_step_counter(counter_name='@LR_DECAY_COUNTER@', begin=0, step=1)
with fluid.layers.control_flow.Switch() as switch:
with switch.case(global_step == zero_var):
fluid.layers.assign(input=one_var, output=lr)
with switch.default():
fluid.layers.assign(input=two_var, output=lr)
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
res = exe.run(fluid.default_main_program(), feed={}, fetch_list=[lr])
print(res) # [array([1.], dtype=float32)]
"""
def __init__(self, name=None):
self.helper = LayerHelper('switch', name=name)
self.inside_scope = False
self.pre_not_conditions = []
def case(self, condition):
if not self.inside_scope:
raise ValueError("case should be called inside with")
if len(self.pre_not_conditions) == 0:
cond_block = ConditionalBlock([condition], is_scalar_condition=True)
not_cond = logical_not(x=condition)
self.pre_not_conditions.append(not_cond)
else:
pre_cond_num = len(self.pre_not_conditions)
pre_not_cond = self.pre_not_conditions[pre_cond_num - 1]
new_not_cond = logical_and(
x=pre_not_cond, y=logical_not(x=condition))
self.pre_not_conditions.append(new_not_cond)
cond_block = ConditionalBlock(
[logical_and(
x=pre_not_cond, y=condition)],
is_scalar_condition=True)
return ConditionalBlockGuard(cond_block)
def default(self):
pre_cond_num = len(self.pre_not_conditions)
if pre_cond_num == 0:
raise ValueError("there should be at least one condition")
cond_block = ConditionalBlock(
[self.pre_not_conditions[pre_cond_num - 1]],
is_scalar_condition=True)
return ConditionalBlockGuard(cond_block)
def __enter__(self):
"""
set flag that now is inside switch.block {}
:return:
"""
self.inside_scope = True
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.inside_scope = False
if exc_type is not None:
return False # re-raise exception
return True
class IfElseBlockGuard(object):
def __init__(self, is_true, ifelse):
if not isinstance(ifelse, IfElse):
raise TypeError("ifelse must be an instance of IfElse class")
if ifelse.status != IfElse.OUT_IF_ELSE_BLOCKS:
raise ValueError("You cannot invoke IfElse.block() inside a block")
self.is_true = is_true
self.ie = ifelse
if is_true:
self.cond_block = ifelse.conditional_true_block
else:
self.cond_block = ifelse.conditional_false_block
if not isinstance(self.cond_block, ConditionalBlock):
raise TypeError("Unexpected situation")
self.cond_block = self.cond_block.block()
def __enter__(self):
self.ie.status = IfElse.IN_IF_ELSE_TRUE_BLOCKS if self.is_true else IfElse.IN_IF_ELSE_FALSE_BLOCKS
self.cond_block.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
if not self.cond_block.__exit__(exc_type, exc_val, exc_tb):
# re-raise inside exception
return False
if len(self.ie.output_table[1 if self.is_true else 0]) == 0:
raise ValueError("Must set output inside block")
self.ie.status = IfElse.OUT_IF_ELSE_BLOCKS
class IfElse(object):
"""
This class is used to implement IfElse branch control function. IfElse contains two blocks, true_block and false_block. IfElse will put data satisfying True or False conditions into different blocks to run.
Cond is a 2-D Tensor with shape [N, 1] and data type bool, representing the execution conditions of the corresponding part of the input data.
Note:
A new OP :ref:`api_fluid_layers_cond` is highly recommended instead of ``IfElse``. if the shape of parameter ``cond`` is [1].
OP :ref:`api_fluid_layers_cond` is easier to use and is called with less code but does the same thing as ``IfElse`` .
IfElse OP is different from other OPs in usage, which may cause some users confusion. Here is a simple example to illustrate this OP.
.. code-block:: python
# The following code completes the function: subtract 10 from the data greater than 0 in x, add 10 to the data less than 0 in x, and sum all the data.
import numpy as np
import paddle.fluid as fluid
x = fluid.layers.data(name='x', shape=[4, 1], dtype='float32', append_batch_size=False)
y = fluid.layers.data(name='y', shape=[4, 1], dtype='float32', append_batch_size=False)
x_d = np.array([[3], [1], [-2], [-3]]).astype(np.float32)
y_d = np.zeros((4, 1)).astype(np.float32)
# Compare the size of x, y pairs of elements, output cond, cond is shape [4, 1], data type bool 2-D tensor.
# Based on the input data x_d, y_d, it can be inferred that the data in cond are [[true], [true], [false], [false]].
cond = fluid.layers.greater_than(x, y)
# Unlike other common OPs, ie below returned by the OP is an IfElse OP object
ie = fluid.layers.IfElse(cond)
with ie.true_block():
# In this block, according to cond condition, the data corresponding to true dimension in X is obtained and subtracted by 10.
out_1 = ie.input(x)
out_1 = out_1 - 10
ie.output(out_1)
with ie.false_block():
# In this block, according to cond condition, get the data of the corresponding condition in X as false dimension, and add 10
out_1 = ie.input(x)
out_1 = out_1 + 10
ie.output(out_1)
# According to cond condition, the data processed in the two blocks are merged. The output here is output, the type is List, and the element type in List is Variable.
output = ie() # [array([[-7.], [-9.], [ 8.], [ 7.]], dtype=float32)]
# Get the first Variable in the output List and add all elements.
out = fluid.layers.reduce_sum(output[0])
exe = fluid.Executor(fluid.CPUPlace())
exe.run(fluid.default_startup_program())
res = exe.run(fluid.default_main_program(), feed={"x":x_d, "y":y_d}, fetch_list=[out])
print res
# [array([-1.], dtype=float32)]
Args:
cond (Variable): cond is a 2-D Tensor with shape [N, 1] and data type bool, representing the corresponding execution conditions of N input data. The data type is bool.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name` .
Returns:
Unlike other common OPs, the OP call returns an IfElse OP object (e.g. ie in the example), which branches the input data by calling the internal functions of the object ``true_block ()``, ``false_block ()``, ``input ()``, ``output ()``, and integrates the data processed by different branches as the overall output by calling the internal ``call ()`` function. The output type is a list, and the type of each element in the list is Variable.
Internal Functions:
The block is constructed by calling the ``with ie. true_block()`` function in the object, and the computational logic under condition true is put into the block. If no corresponding block is constructed, the input data in the corresponding conditional dimension is unchanged.
The block is constructed by calling the ``with ie. false_block()`` function in the object, and the computational logic under condition false is put into the block. If no corresponding block is constructed, the input data in the corresponding conditional dimension is unchanged.
``Out = ie. input (x)`` will take out the data of the corresponding conditional dimension in X and put it into out, supporting the internal processing of multiple inputs in block.
``ie. output (out)`` writes the result to the output of the corresponding condition.
There is a ``call ()`` function inside the object, that is, by calling ``output = ie ()``, all the outputs inside the block of False are fused as the whole output, the output type is a list, and the type of each element in the list is Variable.
"""
OUT_IF_ELSE_BLOCKS = 0
IN_IF_ELSE_TRUE_BLOCKS = 1
IN_IF_ELSE_FALSE_BLOCKS = 2
def __init__(self, cond, name=None):
if not isinstance(cond, Variable):
raise TypeError("cond must be a Variable")
self.helper = LayerHelper('ifelse', name=name)
self.cond = cond
self.input_table = {}
self.status = IfElse.OUT_IF_ELSE_BLOCKS
self.conditional_true_block = ConditionalBlock(inputs=[self.cond])
self.conditional_false_block = ConditionalBlock(inputs=[self.cond])
self.output_table = ([], []) # (true_outs, false_outs)
def input(self, x):
if self.status == IfElse.OUT_IF_ELSE_BLOCKS:
raise ValueError("input must in true/false blocks")
if id(x) not in self.input_table:
parent_block = self._parent_block()
out_true = parent_block.create_var(
name=unique_name.generate_with_ignorable_key('ifelse_input' +
self.helper.name),
dtype=x.dtype)
out_false = parent_block.create_var(
name=unique_name.generate_with_ignorable_key('ifelse_input' +
self.helper.name),
dtype=x.dtype)
parent_block.append_op(
type='split_lod_tensor',
inputs={
'X': x,
'Mask': self.cond,
},
outputs={'OutTrue': out_true,
'OutFalse': out_false},
attrs={'level': 0})
self.input_table[id(x)] = (out_true, out_false)
else:
out_true, out_false = self.input_table[id(x)]
if self.status == IfElse.IN_IF_ELSE_TRUE_BLOCKS:
return out_true
else:
return out_false
def _parent_block(self):
current_block = self.helper.main_program.current_block()
return self.helper.main_program.block(current_block.parent_idx)
def true_block(self):
return IfElseBlockGuard(True, self)
def false_block(self):
return IfElseBlockGuard(False, self)
def output(self, *outs):
if self.status == self.OUT_IF_ELSE_BLOCKS:
raise ValueError("output can only be invoked in the sub-block")
out_table = self.output_table[1 if self.status ==
self.IN_IF_ELSE_TRUE_BLOCKS else 0]
parent_block = self._parent_block()
for each_out in outs:
if not isinstance(each_out, Variable):
raise TypeError("Each output should be a variable")
# create outside tensor
outside_out = parent_block.create_var(
name=unique_name.generate_with_ignorable_key("_".join(
[self.helper.name, 'output'])),
dtype=each_out.dtype)
out_table.append(outside_out)
# assign local var to outside
assign(input=each_out, output=outside_out)
def __call__(self):
if self.status != self.OUT_IF_ELSE_BLOCKS:
raise ValueError("IfElse::__call__ must be out of sub-block")
false_len, true_len = list(map(len, self.output_table))
if false_len == 0 and true_len == 0:
raise ValueError("Must invoke true_block/false_block before "
"__call__")
elif false_len != true_len and false_len != 0 and true_len != 0:
raise ValueError("The output side must be same")
elif false_len == 0 or true_len == 0:
return self.output_table[0 if false_len != 0 else 1]
# else none of false_len/true_len is zero
# merge together
rlist = []
for false_var, true_var in zip(*self.output_table):
rlist.append(
merge_lod_tensor(
in_true=true_var,
in_false=false_var,
mask=self.cond,
x=self.cond,
level=0))
return rlist
class DynamicRNN(object):
"""
**Note: the input of this class should be LoDTensor which holds the
information of variable-length sequences. If the input is fixed-length Tensor,
please use StaticRNN (fluid.layers.** :ref:`api_fluid_layers_StaticRNN` **) for
better performance.**
DynamicRNN can process a minibatch of variable-length sequences.
The length of each sample can be different and is recorded in LoD.
In DynamicRNN, an input sequence will be unfolded into time steps and users
can define how to process each time step in :code:`block()` .
The total number of time steps is determined by the longest sequence.
DynamicRNN will not pad all sequences to the same length, instead it will
sort the sequences internally by the sequence length in descending order.
The input sequences will be shrinked because only sequences of which the
length is larger than the time step will participate the remaining calculation.
If defined :code:`drnn = DynamicRNN()`, then users can call :code:`drnn()`
to obtain the result sequences. It is a LoDTensor gained by merging all
time steps's output. When RNN's input sequence x meets :code:`x.lod_level == 1`,
the output LoDTensor will have the same LoD with x. The result of :code:`drnn()`
includes RNN's outputs of all time steps, users can call
:ref:`api_fluid_layers_sequence_last_step` to extract the data of the last time step.
Warning:
Currently it is not supported to set :code:`is_sparse = True` of any
layers defined within DynamicRNN's :code:`block` function.
Args:
name (str, optional): The default value is None. Normally there is no
need for user to set this property. For more information,
please refer to :ref:`api_guide_Name` .
Examples:
.. code-block:: python
import paddle.fluid as fluid
sentence = fluid.data(name='sentence', shape=[None, 32], dtype='float32', lod_level=1)
encoder_proj = fluid.data(name='encoder_proj', shape=[None, 32], dtype='float32', lod_level=1)
decoder_boot = fluid.data(name='boot', shape=[None, 10], dtype='float32')
drnn = fluid.layers.DynamicRNN()
with drnn.block():
# Set sentence as RNN's input, each time step processes a word from the sentence
current_word = drnn.step_input(sentence)
# Set encode_proj as RNN's static input
encoder_word = drnn.static_input(encoder_proj)
# Initialize memory with boot_memory, which need reorder according to RNN's input sequences
memory = drnn.memory(init=decoder_boot, need_reorder=True)
fc_1 = fluid.layers.fc(input=encoder_word, size=30)
fc_2 = fluid.layers.fc(input=current_word, size=30)
decoder_inputs = fc_1 + fc_2
hidden, _, _ = fluid.layers.gru_unit(input=decoder_inputs, hidden=memory, size=30)
# Update memory with hidden
drnn.update_memory(ex_mem=memory, new_mem=hidden)
out = fluid.layers.fc(input=hidden, size=10, bias_attr=True, act='softmax')
# Set hidden and out as RNN's outputs
drnn.output(hidden, out)
# Get RNN's result
hidden, out = drnn()
# Get RNN's result of the last time step
last = fluid.layers.sequence_last_step(out)
"""
BEFORE_RNN = 0
IN_RNN = 1
AFTER_RNN = 2
def __init__(self, name=None):
self.helper = LayerHelper('dynamic_rnn', name=name)
self.status = DynamicRNN.BEFORE_RNN
self.lod_rank_table = None
self.max_seq_len = None
self.step_idx = None
self.zero_idx = None
self.mem_dict = dict()
self.output_array = []
self.outputs = []
self.cond = self.helper.create_variable_for_type_inference(dtype='bool')
self.cond.stop_gradient = False
self.while_op = While(self.cond)
self.input_array = []
self.mem_link = []
def step_input(self, x, level=0):
"""
This function is used to set sequence x as DynamicRNN's input.
The maximum sequence length in x determines the number of time steps
the RNN unit will be executed. DynamicRNN can take multiple inputs.
When all inputs' :code:`lod_level` are 1, all inputs should hold the
same LoD. When :code:`x.lod_level >= 2` , the input sequence will be
unfold along specified level, and the slice of each time step is a
LoDTensor whose lod_level is :code:`x.lod_level - level - 1` .
In this case, the specified LoD level of multiple inputs should be the same.
- Case 1:
.. code-block:: text
# input, where Si is slice data of shape [1, N]
level = 0
x.lod = [[2, 1, 3]]
x.shape = [6, N]
x.data = [[S0],
[S0],
[S1],
[S2],
[S2],
[S2]]
# output
# step 0, time step data of 3 sequences
out.lod = [[]]
out.shape = [3, N]
out.data = [[S2],
[S0],
[S1]]
# step 1, time step data of 2 sequences
out.lod = [[]]
out.shape = [2, N]
out.data = [[S2],
[S0]]
# step 2, time step data of 1 sequences
out.lod = [[]]
out.shape = [1, N]
out.data = [[S2]]
Args:
x (Variable): The input LoDTensor which holds information of a
minibatch of variable-length sequences and should meet :code:`x.lod_level >= 1` .
When RNN has multiple inputs, the first dimension should match
across all inputs, but other shape components may differ.
Optional data types are: bool, float16, float32, float64, int8, int16, int32, int64, uint8.
level (int, optional): The level of lod used to split steps.
It should be in range :math:`[0, x.lod\_level)` . The default value is 0.
Returns:
Variable: The current time step in the input sequence. If there are :code:`num_sequences` \
sequences in x whose length is larger than :code:`step_idx` , the returned Variable \
will only hold the :code:`step_idx` -th time step of those `num_sequences` sequences. \
The data type is the same as input. If :code:`x.lod_level == 1` , the return value is \
a Tensor of shape :math:`\{num\_sequences, x.shape[1], ...\}` , or it will \
be a variable-length LoDTensor.
Raises:
ValueError: When :code:`step_input()` is called outside :code:`block()` .
TypeError: When x is not a Variable.
Examples:
.. code-block:: python
import paddle.fluid as fluid
sentence = fluid.data(name='sentence', shape=[None, 1], dtype='int64', lod_level=1)
embedding = fluid.layers.embedding(input=sentence, size=[65536, 32], is_sparse=True)
drnn = fluid.layers.DynamicRNN()
with drnn.block():
# Set embedding as RNN's input, each time step processes a word from the sentence
word = drnn.step_input(embedding)
# Initialize memory to a Tensor whose value is 0, shape=[batch_size, 200],
# where batch_size is the number of sequences in embedding.
memory = drnn.memory(shape=[200])
hidden = fluid.layers.fc(input=[word, memory], size=200, act='relu')
# Update memory to hidden
drnn.update_memory(ex_mem=memory, new_mem=hidden)
# Set hidden as RNN's output
drnn.output(hidden)
# Get RNN's result
rnn_output = drnn()
"""
self._assert_in_rnn_block_("step_input")
if not isinstance(x, Variable):
raise TypeError(
"step_input() can only take a Variable as its input.")
parent_block = self._parent_block_()
if self.lod_rank_table is None:
self.lod_rank_table = parent_block.create_var(
name=unique_name.generate('lod_rank_table'),
type=core.VarDesc.VarType.LOD_RANK_TABLE)
self.lod_rank_table.stop_gradient = True
parent_block.append_op(
type='lod_rank_table',
inputs={"X": x},
outputs={"Out": self.lod_rank_table},
attrs={"level": level})
self.max_seq_len = parent_block.create_var(
name=unique_name.generate('dynamic_rnn_max_seq_len'),
dtype='int64')
self.max_seq_len.stop_gradient = False
parent_block.append_op(
type='max_sequence_len',
inputs={'RankTable': self.lod_rank_table},
outputs={"Out": self.max_seq_len})
self.cond.stop_gradient = True
parent_block.append_op(
type='less_than',
inputs={'X': self.step_idx,
'Y': self.max_seq_len},
outputs={'Out': self.cond},
attrs={'force_cpu': True})
input_array = parent_block.create_var(
name=unique_name.generate('dynamic_rnn_input_array'),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=x.dtype)
self.input_array.append((input_array, x.dtype))
parent_block.append_op(
type='lod_tensor_to_array',
inputs={'X': x,
'RankTable': self.lod_rank_table},
outputs={'Out': input_array})
return array_read(array=input_array, i=self.step_idx)
def static_input(self, x):
"""
This function is used to set x as DynamicRNN's static input. It is optional.
- Case 1, set static input with LoD
.. code-block:: text
# RNN's input is the same as the case listed in step_input
# static input, where Si is slice data of shape [1, M]
x.lod = [[3, 1, 2]]
x.shape = [6, M]
x.data = [[S0],
[S0],
[S0],
[S1],
[S2],
[S2]]
# step 0, batch data corresponding to the 3 input sequences
out.lod = [[2, 3, 1]]
out.shape = [6, M]
out.data = [[S2],
[S2],
[S0],
[S0],
[S0],
[S1]]
# step 1, batch data corresponding to the 2 input sequences
out.lod = [[2, 3]]
out.shape = [5, M]
out.data = [[S2],
[S2],
[S0],
[S0],
[S0]]
# step 2, batch data corresponding to the 1 input sequences
out.lod = [[2]]
out.shape = [2, M]
out.data = [[S2],
[S2]]
- Case 2, set static input without LoD
.. code-block:: text
# RNN's input is the same as the case listed in step_input
# static input, where Si is slice data of shape [1, M]
x.lod = [[]]
x.shape = [3, M]
x.data = [[S0],
[S1],
[S2]]
# step 0, batch data corresponding to the 3 input sequences
out.lod = [[]]
out.shape = [3, M]
out.data = [[S2],
[S0],
[S1]]
# step 1, batch data corresponding to the 2 input sequences
out.lod = [[]]
out.shape = [2, M]
out.data = [[S2],
[S0]]
# step 2, batch data corresponding to the 1 input sequences
out.lod = [[]]
out.shape = [1, M]
out.data = [[S2]]
Args:
x (Variable): The static input LoDTensor which should hold the same number of sequences
as RNN's input (the input LoDTensor set by :code:`step_input()` ). If the LoD is None,
the input x will be treated as a minibatch with :code:`x.shape[0]` sequences of length 1.
Optional data types are: bool, float16, float32, float64, int8, int16, int32, int64, uint8.
Returns:
Variable: The input LoDTensor after sorted and shrinked. If there are :code:`num_sequences` \
sequences in RNN's input LoDTensor whose length is larger than :code:`step_idx` , \
the static input Tensor will be sorted to the same order as RNN's input and \
will only retain data corresponding to those :code:`num_sequences` sequences. \
The data type is the same as input. If :code:`x.lod == None` , the return value is \
a Tensor of shape :math:`\{num\_sequences, x.shape[1], ...\}` , or it will \
be a variable-length LoDTensor.
Raises:
ValueError: When :code:`static_input()` is called outside :code:`block()` .
TypeError: When x is not a Variable.
RuntimeError: When :code:`static_input()` is called before :code:`step_input()` .
Examples:
.. code-block:: python
import paddle.fluid as fluid
sentence = fluid.data(name='sentence', shape=[None, 32], dtype='float32', lod_level=1)
encoder_proj = fluid.data(name='encoder_proj', shape=[None, 32], dtype='float32', lod_level=1)
decoder_boot = fluid.data(name='boot', shape=[None, 10], dtype='float32')
drnn = fluid.layers.DynamicRNN()
with drnn.block():
# Set sentence as RNN's input, each time step processes a word from the sentence
current_word = drnn.step_input(sentence)
# Set encode_proj as RNN's static input
encoder_word = drnn.static_input(encoder_proj)
# Initialize memory with boot_memory, which need reorder according to RNN's input sequences
memory = drnn.memory(init=decoder_boot, need_reorder=True)
fc_1 = fluid.layers.fc(input=encoder_word, size=30)
fc_2 = fluid.layers.fc(input=current_word, size=30)
decoder_inputs = fc_1 + fc_2
hidden, _, _ = fluid.layers.gru_unit(input=decoder_inputs, hidden=memory, size=30)
# Update memory with hidden
drnn.update_memory(ex_mem=memory, new_mem=hidden)
out = fluid.layers.fc(input=hidden, size=10, bias_attr=True, act='softmax')
# Set out as RNN's output
drnn.output(out)
# Get RNN's result
rnn_output = drnn()
"""
self._assert_in_rnn_block_("static_input")
if not isinstance(x, Variable):
raise TypeError(
"static_input() can only take a Variable as its input")
if self.lod_rank_table is None:
raise RuntimeError(
"static_input() must be called after step_input().")
parent_block = self._parent_block_()
x_reordered = parent_block.create_var(
name=unique_name.generate("dynamic_rnn_static_input_reordered"),
type=core.VarDesc.VarType.LOD_TENSOR,
dtype=x.dtype)
parent_block.append_op(
type='reorder_lod_tensor_by_rank',
inputs={'X': [x],
'RankTable': [self.lod_rank_table]},
outputs={'Out': [x_reordered]})
return shrink_memory(x_reordered, self.step_idx, self.lod_rank_table)
@signature_safe_contextmanager
def block(self):
"""
The function is used to list the operations executed during
each time step in RNN. The operation list will be executed :code:`max_sequence_len`
times (where :code:`max_sequence_len` is the maximum length of RNN's input sequences).
Raises:
ValueError: When :code:`block()` is called multi-times.
"""
if self.status != DynamicRNN.BEFORE_RNN:
raise ValueError("rnn.block() can only be invoke once")
self.step_idx = fill_constant(
shape=[1], dtype='int64', value=0, force_cpu=True)
self.step_idx.stop_gradient = False
self.status = DynamicRNN.IN_RNN
with self.while_op.block():
yield
increment(x=self.step_idx, value=1.0, in_place=True)
for new_mem, mem_array in self.mem_link:
array_write(x=new_mem, i=self.step_idx, array=mem_array)
less_than(
x=self.step_idx,
y=self.max_seq_len,
force_cpu=True,
cond=self.cond)
self.status = DynamicRNN.AFTER_RNN
for each_array in self.output_array:
self.outputs.append(
array_to_lod_tensor(
x=each_array, table=self.lod_rank_table))
def __call__(self, *args, **kwargs):
"""
This function is used to get the output sequneces of DynamicRNN.
Args:
None
Returns:
Variable or Variable list: RNN's output sequences.
Raises:
ValueError: When :code:`__call__()` is called before :code:`block()` .
"""
if self.status != DynamicRNN.AFTER_RNN:
raise ValueError(("Output of the dynamic RNN can only be visited "
"outside the rnn block."))
if len(self.outputs) == 1:
return self.outputs[0]
else:
return self.outputs
def memory(self,
init=None,
shape=None,
value=0.0,
need_reorder=False,
dtype='float32'):
"""
Create a memory Variable for DynamicRNN to deliver data cross time steps.
It can be initialized by an existing Tensor or a constant Tensor of given
dtype and shape.
Args:
init (Variable, optional): LoDTensor used to initialize the memory.
If init is not None, it should hold the same number of sequences
as RNN's input (the input LoDTensor set by :code:`step_input()` )
and the memory will be initialized to it. If init's LoD is None,
it will be treated as a minibatch with :code:`init.shape[0]` sequences
of length 1. The default value is None.
shape (list|tuple, optional): When init is None, it is used to specify
the memory's shape. Note that the shape does not include the batch_size.
If setting shape to :math:`\{D_1, D_2, ...\}` , the shape of memory Tensor
will be :math:`\{batch\_size, D_1, D_2, ...\}` , where batch_size is
determined by RNN's input sequences. The default value is None.
value (float, optional): When init is None, it is used as initalized value
of memory. The default value is 0.0.
need_reorder (bool, optional): When init is not None, it determines whether
the memory needs to reorder like the RNN's input sequeneces. It should be
set to True when the initialized memory depends on the order of input samples.
The default value is False.
dtype (str|numpy.dtype, optional): When init is None, it is used to set the
data type of memory. The default value is "float32". Optional data types
are: "float32", "float64", "int32", "int64".
Returns:
Variable: The memory LoDTensor after shrinked. If there are :code:`num_sequences` \
sequences in RNN's input LoDTensor whose length is larger than :code:`step_idx` , \
the memory Tensor also need to be shrinked and will only retain data \
corresponding to those :code:`num_sequences` sequences.
Raises:
ValueError: When :code:`memory()` is called outside :code:`block()` .
TypeError: When init is set and is not a Variable.
ValueError: When :code:`memory()` is called before :code:`step_input()` .
Examples:
.. code-block:: python
import paddle.fluid as fluid
sentence = fluid.data(name='sentence', shape=[None, 32], dtype='float32', lod_level=1)
boot_memory = fluid.data(name='boot', shape=[None, 10], dtype='float32')
drnn = fluid.layers.DynamicRNN()
with drnn.block():
# Set sentence as RNN's input, each time step processes a word from the sentence
word = drnn.step_input(sentence)
# Initialize memory with boot_memory, which need reorder according to RNN's input sequences
memory = drnn.memory(init=boot_memory, need_reorder=True)
hidden = fluid.layers.fc(input=[word, memory], size=10, act='tanh')
# Update memory with hidden
drnn.update_memory(ex_mem=memory, new_mem=hidden)
# Set hidden as RNN's output
drnn.output(hidden)
# Get RNN's result
rnn_output = drnn()
Examples:
.. code-block:: python
import paddle.fluid as fluid
sentence = fluid.data(name='sentence', shape=[None, 32], dtype='float32', lod_level=1)
drnn = fluid.layers.DynamicRNN()
with drnn.block():
# Set sentence as RNN's input, each time step processes a word from the sentence
word = drnn.step_input(sentence)
# Initialize memory to a Tensor whose value is 0, shape=[batch_size, 10],
# where batch_size is the number of sequences in sentence.
memory = drnn.memory(shape=[10], dtype='float32', value=0)
hidden = fluid.layers.fc(input=[word, memory], size=10, act='tanh')
# Update memory with hidden
drnn.update_memory(ex_mem=memory, new_mem=hidden)
# Set hidden as RNN's output
drnn.output(hidden)
# Get RNN's result
rnn_output = drnn()
"""
self._assert_in_rnn_block_('memory')
self._init_zero_idx_()
if init is not None:
if not isinstance(init, Variable):
raise TypeError(
"The input arg `init` of memory() must be a Variable")
parent_block = self._parent_block_()
init_tensor = init
if need_reorder == True:
if self.lod_rank_table is None:
raise ValueError(
'If set need_reorder to True, make sure step_input be '
'invoked before '
'memory(init=init, need_reordered=True, ...).')
init_reordered = parent_block.create_var(
name=unique_name.generate('dynamic_rnn_mem_init_reordered'),
type=core.VarDesc.VarType.LOD_TENSOR,
dtype=init.dtype)
parent_block.append_op(
type='reorder_lod_tensor_by_rank',
inputs={
'X': [init_tensor],
'RankTable': [self.lod_rank_table]
},
outputs={'Out': [init_reordered]})
init_tensor = init_reordered
mem_array = parent_block.create_var(
name=unique_name.generate('dynamic_rnn_mem_array'),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=init.dtype)
parent_block.append_op(
type='write_to_array',
inputs={'X': init_tensor,
'I': self.zero_idx},
outputs={'Out': mem_array})
retv = array_read(array=mem_array, i=self.step_idx)
retv = shrink_memory(
x=retv, i=self.step_idx, table=self.lod_rank_table)
self.mem_dict[retv.name] = mem_array
return retv
else:
if len(self.input_array) == 0:
raise ValueError(
"step_input should be invoked before memory(shape=..., value=...)"
)
parent_block = self._parent_block_()
init = parent_block.create_var(
name=unique_name.generate('mem_init'), dtype=dtype)
arr, dtype = self.input_array[0]
in0 = parent_block.create_var(
name=unique_name.generate('in0'), dtype=dtype)
parent_block.append_op(
type='read_from_array',
inputs={'X': [arr],
'I': [self.zero_idx]},
outputs={'Out': [in0]})
parent_block.append_op(
type='fill_constant_batch_size_like',
inputs={'Input': [in0]},
outputs={'Out': [init]},
attrs={
'shape': [-1] + shape,
'value': float(value),
'dtype': init.dtype
})
return self.memory(init=init)
def update_memory(self, ex_mem, new_mem):
"""
Update the memory which need to be delivered across time steps.
Args:
ex_mem (Variable): The memory data of previous time step.
new_mem (Variable): The new memory data produced in current time step.
The shape and data type of ex_mem and new_mem should be the same.
Returns:
None
Raises:
ValueError: When :code:`update_memory()` is called outside :code:`block()` .
TypeError: When :code:`ex_mem` or :code:`new_mem` is not a Variable.
ValueError: When :code:`ex_mem` is defined by :code:`memory()` .
ValueError: When :code:`update_memory()` is called before :code:`step_input()` .
"""
self._assert_in_rnn_block_('update_memory')
if not isinstance(ex_mem, Variable):
raise TypeError("The input arg `ex_mem` of update_memory() must "
"be a Variable")
if not isinstance(new_mem, Variable):
raise TypeError("The input arg `new_mem` of update_memory() must "
"be a Variable")
mem_array = self.mem_dict.get(ex_mem.name, None)
if mem_array is None:
raise ValueError("Please invoke memory before update_memory")
if self.lod_rank_table is None:
raise ValueError("Please invoke step_input before update_memory")
self.mem_link.append((new_mem, mem_array))
def output(self, *outputs):
"""
This function is used to set :code:`outputs` as RNN's output.
Args:
*outputs (Variable ...): The output Tensor. DynamicRNN can mark multiple
Variables as its output.
Returns:
None
Raises:
ValueError: When :code:`output()` is called outside :code:`block()` .
"""
self._assert_in_rnn_block_('output')
parent_block = self._parent_block_()
for each in outputs:
outside_array = parent_block.create_var(
name=unique_name.generate_with_ignorable_key("_".join(
[self.helper.name, "output_array", each.name])),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=each.dtype)
array_write(x=each, i=self.step_idx, array=outside_array)
self.output_array.append(outside_array)
def _init_zero_idx_(self):
if self.zero_idx is None:
parent_block = self._parent_block_()
self.zero_idx = parent_block.create_var(
name=unique_name.generate('zero_idx'), dtype='int64')
parent_block.append_op(
type='fill_constant',
inputs={},
outputs={'Out': [self.zero_idx]},
attrs={
'shape': [1],
'dtype': self.zero_idx.dtype,
'value': float(0),
'force_cpu': True
})
def _parent_block_(self):
prog = self.helper.main_program
parent_idx = prog.current_block().parent_idx
assert parent_idx >= 0
parent_block = prog.block(parent_idx)
return parent_block
def _assert_in_rnn_block_(self, method):
if self.status != DynamicRNN.IN_RNN:
raise ValueError("{0} can only be invoked inside rnn block.".format(
method))
def switch_case(branch_index, branch_fns, default=None, name=None):
'''
This operator is like a C++ switch/case statement.
Args:
branch_index(Variable): A Tensor with shape [1] to specify which branch to execute. The data type is ``int32``, ``int64`` or ``uint8``.
branch_fns(dict|list|tuple): If it's a list or tuple, the elements in it could be pairs of (int, callable) or simple callables whose actual index will be used as the index of callable. If it's a dict, its key is a python integer and the value is a callable. All callables return the same structure of Tensors.
default(callable, optional): Callable that returns a structure of Tensors.
name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`.
Returns:
Variable|list(Variable): Tensors returned by the callable specified by ``branch_index`` in ``branch_fns``,
or Tensors returned by ``default`` if ``default`` is not None and no index matches in ``branch_fns``,
or Tensors returned by the callable with the max index in ``branch_fns`` if ``default`` is None and no index matches in ``branch_fns``.
Raises:
TypeError: If the type of ``branch_index`` is not Variable.
TypeError: If the data type of ``branch_index`` is not ``int32``, ``int64`` or ``uint8``.
TypeError: If the type of ``branch_fns`` is not dict, list or tuple.
TypeError: If the elements of ``branch_fns`` is not 2-tuple.
TypeError: If the first element of 2-tuple in ``branch_fns`` is not integer.
ValueError: If the first element of 2-tuple in ``branch_fns`` is not unique.
TypeError: If the second element of 2-tuple in ``branch_fns`` is not callable.
TypeError: If ``default`` is not None but it is not callable.
Examples:
.. code-block:: python
import paddle.fluid as fluid
import paddle.fluid.layers as layers
def fn_1():
return layers.fill_constant(shape=[1, 2], dtype='float32', value=1)
def fn_2():
return layers.fill_constant(shape=[2, 2], dtype='int32', value=2)
def fn_3():
return layers.fill_constant(shape=[3], dtype='int32', value=3)
main_program = fluid.default_startup_program()
startup_program = fluid.default_main_program()
with fluid.program_guard(main_program, startup_program):
index_1 = layers.fill_constant(shape=[1], dtype='int32', value=1)
index_2 = layers.fill_constant(shape=[1], dtype='int32', value=2)
out_1 = layers.switch_case(
branch_index=index_1,
branch_fns={1: fn_1, 2: fn_2},
default=fn_3)
out_2 = layers.switch_case(
branch_index=index_2,
branch_fns=[(1, fn_1), (2, fn_2)],
default=fn_3)
# Argument default is None and no index matches. fn_3 will be called because of the max index 7.
out_3 = layers.switch_case(
branch_index=index_2,
branch_fns=[(0, fn_1), (4, fn_2), (7, fn_3)])
exe = fluid.Executor(fluid.CPUPlace())
res_1, res_2, res_3 = exe.run(main_program,
fetch_list=[out_1, out_2, out_3])
print(res_1) # [[1. 1.]]
print(res_2) # [[2 2] [2 2]]
print(res_3) # [3 3 3]
'''
helper = LayerHelper('switch_case', **locals())
def _check_args(branch_index, branch_fns, default):
if not isinstance(branch_index, Variable):
raise TypeError(
_error_message("The type", "branch_index", "switch_case",
"Variable", type(branch_index)))
if convert_dtype(branch_index.dtype) not in ["uint8", "int32", "int64"]:
raise TypeError(
_error_message("The data type", "branch_index", "switch_case",
"uint8, int32 or int64",
convert_dtype(branch_index.dtype)))
if convert_dtype(branch_index.dtype) != "int64":
branch_index = cast(branch_index, "int64")
if not isinstance(branch_fns, (list, tuple, dict)):
raise TypeError(
_error_message("The type", "branch_fns", "switch_case",
"dict, tuple or list", type(branch_fns)))
branch_fns = branch_fns.items() if isinstance(branch_fns,
dict) else branch_fns
branch_fns = list(enumerate(branch_fns)) if all(
callable(fn) for fn in branch_fns) else branch_fns
keys_of_fns = []
for index_fn_pair in branch_fns:
if not isinstance(index_fn_pair, tuple):
raise TypeError(
_error_message("The elements' type", "branch_fns",
"switch_case", "tuple", type(branch_fns)))
if len(index_fn_pair) != 2:
raise TypeError(
_error_message("The tuple's size", "branch_fns",
"switch_case", "2",
str(len(index_fn_pair)) + "-tuple"))
key, fn = index_fn_pair
if not isinstance(key, int):
raise TypeError(
_error_message("The key's type", "branch_fns",
"switch_case", "int", type(key)))
if key in keys_of_fns:
raise ValueError(
"The key in 'branch_fns' must be unique, but '{}' appears more than once.".
format(key))
else:
keys_of_fns.append(key)
if not callable(fn):
raise TypeError(
_error_message("The type of function for key {}".format(
key), "branch_fns", "switch_case", "callable", type(
fn)))
if default is None:
default = sorted(branch_fns)[-1][1]
branch_fns = sorted(branch_fns)[:-1]
elif not callable(default):
raise TypeError("The default in Op(case) must be callable.")
pred_fn_pairs = []
for index, fn in branch_fns:
new_index = fill_constant(shape=[1], dtype="int64", value=index)
pred = equal(branch_index, new_index)
pred_fn_pairs.append((pred, fn))
return pred_fn_pairs, default
pred_fn_pairs, default = _check_args(branch_index, branch_fns, default)
false_fn = default
for pred, true_fn in pred_fn_pairs:
false_fn = partial(cond, pred=pred, true_fn=true_fn, false_fn=false_fn)
final_fn = false_fn
return final_fn()
@templatedoc()
def reorder_lod_tensor_by_rank(x, rank_table):
"""
${comment}
Args:
x(${x_type}): ${x_comment}.
rank_table(${rank_table_type}): ${rank_table_comment}.
Returns:
out(${out_type}): ${out_comment}.
Examples:
.. code-block:: python
import paddle.fluid as fluid
data_desc = (['input', [9], 0], ['ref', [5], 1])
data = fluid.layers.data(name=data_desc[0][0], shape=data_desc[0][1])
rank_data = fluid.layers.data(name=data_desc[1][0], shape=data_desc[1][1])
table = fluid.layers.control_flow.lod_rank_table(rank_data)
new_data = fluid.layers.reorder_lod_tensor_by_rank(
x=data, rank_table=table)
"""
helper = LayerHelper('reorder_lod_tensor_by_rank', **locals())
helper.is_instance('x', Variable)
helper.is_instance('rank_table', Variable)
out = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type='reorder_lod_tensor_by_rank',
inputs={'X': [x],
'RankTable': [rank_table]},
outputs={'Out': [out]})
return out
def is_empty(x, cond=None):
"""
Test whether a Variable is empty.
Args:
x (Variable): The Variable to be tested.
cond (Variable, optional): Output parameter. Default: None. If this parameter is given, it
saves the test result of given 'x'.
Returns:
Variable: A bool scalar. True if 'x' is an empty Variable.
Raises:
TypeError: If input cond is not a variable, or cond's dtype is
not bool.
Examples:
.. code-block:: python
import paddle.fluid as fluid
input = fluid.layers.data(name="input", shape=[4, 32, 32], dtype="float32")
res = fluid.layers.is_empty(x=input)
# or:
# fluid.layers.is_empty(x=input, cond=res)
"""
helper = LayerHelper("is_empty", **locals())
if cond is None:
cond = helper.create_variable_for_type_inference(dtype='bool')
cond.stop_gradient = True
elif not isinstance(cond, Variable):
raise TypeError("cond takes a variable")
elif cond.dtype != 'bool':
raise TypeError("The data type of cond must be bool")
helper.append_op(
type='is_empty', inputs={'X': [x]}, outputs={'Out': [cond]})
return cond
| """
This OP writes the input ``x`` into the i-th position of the ``array``
:ref:`api_fluid_LoDTensorArray` and returns the modified array.
If ``array`` is none, a new LoDTensorArray will be created and returned.
This OP is often used together with :ref:`api_fluid_layers_array_read` OP.
Args:
x (Variable): The input data to be written into array. It's multi-dimensional
Tensor or LoDTensor. Data type: float32, float64, int32, int64.
i (Variable): 1-D Tensor with shape [1], which represents the position into which
``x`` is written. Data type: int64.
array (LoDTensorArray, optional): The LoDTensorArray into which ``x`` is written.
The default value is None, when a new LoDTensorArray will be created and returned
as a result.
Returns:
Variable: The input ``array`` after ``x`` is written into.
Examples:
.. code-block:: python
import paddle.fluid as fluid
tmp = fluid.layers.fill_constant(shape=[3, 2], dtype='int64', value=5)
i = fluid.layers.fill_constant(shape=[1], dtype='int64', value=10)
# Write tmp into the position of arr with subscript 10 and return arr.
arr = fluid.layers.array_write(tmp, i=i)
# Now, arr is a LoDTensorArray with length 11. We can use array_read OP to read
# the data at subscript 10 and print it out.
item = fluid.layers.array_read(arr, i=i)
input = fluid.layers.Print(item, message="The content of i-th LoDTensor:")
main_program = fluid.default_main_program()
exe = fluid.Executor(fluid.CPUPlace())
exe.run(main_program)
# The printed result is:
# 1570533133 The content of i-th LoDTensor: The place is:CPUPlace
# Tensor[array_read_0.tmp_0]
# shape: [3,2,]
# dtype: l
# data: 5,5,5,5,5,5,
# the output is 2-D Tensor with shape [3,2], which is tmp above.
# dtype is the corresponding C++ data type, which may vary in different environments.
# Eg: if the data type of tensor is int64, then the corresponding C++ data type is int64_t,
# so the dtype value is typeid(int64_t).Name(), which is 'x' on MacOS, 'l' on Linux,
# and '__int64' on Windows. They both represent 64-bit integer variables.
"""
helper = LayerHelper('array_write', **locals())
if array is None:
array = helper.create_variable(
name="{0}.out".format(helper.name),
type=core.VarDesc.VarType.LOD_TENSOR_ARRAY,
dtype=x.dtype)
helper.append_op(
type='write_to_array',
inputs={'X': [x],
'I': [i]},
outputs={'Out': [array]})
return array |
Heading.js | import { Node } from 'tiptap'
import { setBlockType, textblockTypeInputRule, toggleBlockType } from 'tiptap-commands'
import { ParagraphNodeSpec, getParagraphNodeAttrs, toParagraphDOM } from 'src/extentions/Paragraph'
function getUuid () {
const s = []
const hexDigits = '0123456789abcdef'
for (let i = 0; i < 36; i++) {
s[i] = hexDigits.substr(Math.floor(Math.random() * 0x10), 1)
}
s[14] = '4' // bits 12-15 of the time_hi_and_version field to 0010
s[19] = hexDigits.substr((s[19] & 0x3) | 0x8, 1) // bits 6-7 of the clock_seq_hi_and_reserved to 01
s[8] = s[13] = s[18] = s[23] = '' // -
return s.join('').substr(0, 6)
}
function getAttrs (dom) {
const attrs = getParagraphNodeAttrs(dom)
const id = dom.getAttribute('id')
const level = parseInt(dom.getAttribute('level'), 10) || 0
attrs.id = id
attrs.level = level
return attrs
}
function toDOM (node) {
const dom = toParagraphDOM(node)
const id = node.attrs.id || getUuid()
const level = node.attrs.level || 1
dom[0] = 'h'.concat(node.attrs.level)
dom[1].id = id
dom[1].level = level
node.attrs.id = id
return dom
}
export default class Heading extends Node {
get name () {
return 'heading'
}
get defaultOptions () {
return {
levels: [1, 2, 3, 4, 5, 6],
}
}
get schema () {
return {
...ParagraphNodeSpec,
attrs: {
...ParagraphNodeSpec.attrs, | id: {
default: ''
}
},
content: 'inline*',
group: 'block',
defining: true,
draggable: false,
parseDOM: this.options.levels
.map(level => ({
tag: `h${level}`,
getAttrs
})),
toDOM
}
}
commands ({ type, schema }) {
return attrs => toggleBlockType(type, schema.nodes.paragraph, attrs)
}
keys ({ type }) {
return this.options.levels.reduce((items, level) => ({
...items,
...{
[`Shift-Ctrl-${level}`]: setBlockType(type, { level }),
},
}), {})
}
inputRules ({ type }) {
return this.options.levels.map(level => textblockTypeInputRule(
new RegExp(`^(#{1,${level}})\\s$`),
type,
() => ({ level }),
))
}
} | level: {
default: 1,
}, |
demoing.js | var gulp = require('gulp');
const ghPages = require('gulp-gh-pages');
gulp.task('pages', function() { | return gulp.src(["./index.html","./demo/*","./test/*", "./bower_components/**/*"],{base: '.'})
.pipe(ghPages());
}); |
|
theme.js | import { extendTheme } from "@chakra-ui/react";
const config = {
initialColorMode: "dark",
useSystemColorMode: false,
};
const theme = extendTheme({
config,
colors: {
purple: {
50: "#EEE6FE",
100: "#CFBAFC",
200: "#B18EFB",
300: "#9262F9",
400: "#7436F7",
500: "#5509F6",
600: "#4408C4",
700: "#330693",
800: "#220462",
900: "#110231",
},
blue: {
50: "#E7EDFE",
100: "#BCCBFB",
200: "#91AAF8",
300: "#6589F5",
400: "#3A68F3",
500: "#0F47F0", | 900: "#030E30",
},
pink: {
50: "#FCE9F6",
100: "#F5C1E7",
200: "#EF99D7",
300: "#E972C8",
400: "#E34AB8",
500: "#DD22A9",
600: "#B11B87",
700: "#851465",
800: "#580E44",
900: "#2C0722",
},
"green": {
"50": "#ECF8F6",
"100": "#CBECE5",
"200": "#AADFD5",
"300": "#88D3C4",
"400": "#67C6B4",
"500": "#45BAA3",
"600": "#389482",
"700": "#2A6F62",
"800": "#1C4A41",
"900": "#0E2521"
},
yellow: {
50: "#FEFBE7",
100: "#FBF3BC",
200: "#F9EB90",
300: "#F6E365",
400: "#F3DB39",
500: "#F1D30E",
600: "#C1A80B",
700: "#917E08",
800: "#605406",
900: "#302A03",
},
orange: {
50: "#FEF0E6",
100: "#FCD5BA",
200: "#FBB98E",
300: "#F99E62",
400: "#F88335",
500: "#F66809",
600: "#C55307",
700: "#943E05",
800: "#622A04",
900: "#311502",
},
red: {
50: "#FBEAEE",
100: "#F3C3D0",
200: "#EB9DB2",
300: "#E47794",
400: "#DC5176",
500: "#D52A58",
600: "#AA2246",
700: "#801935",
800: "#551123",
900: "#2B0812",
}
},
});
export default theme; | 600: "#0C39C0",
700: "#092A90",
800: "#061C60", |
write_buffer.go | package clickhouse
import "io"
import "sync"
const WriteBufferInitialSize = 256 * 1024
// Recycle column buffers, preallocate column buffers
var chunkPool = sync.Pool{}
func wb(initSize int) *writeBuffer |
type writeBuffer struct{ chunks [][]byte }
func (wb *writeBuffer) Write(data []byte) (int, error) {
var (
chunkIdx = len(wb.chunks) - 1
dataSize = len(data)
)
for {
freeSize := cap(wb.chunks[chunkIdx]) - len(wb.chunks[chunkIdx])
if freeSize >= len(data) {
wb.chunks[chunkIdx] = append(wb.chunks[chunkIdx], data...)
return dataSize, nil
}
wb.chunks[chunkIdx] = append(wb.chunks[chunkIdx], data[:freeSize]...)
data = data[freeSize:]
wb.addChunk(0, wb.calcCap(len(data)))
chunkIdx++
}
}
func (wb *writeBuffer) alloc(size int) []byte {
var (
chunkIdx = len(wb.chunks) - 1
chunkLen = len(wb.chunks[chunkIdx])
)
if (cap(wb.chunks[chunkIdx]) - chunkLen) < size {
wb.addChunk(size, wb.calcCap(size))
return wb.chunks[chunkIdx+1]
}
wb.chunks[chunkIdx] = wb.chunks[chunkIdx][:chunkLen+size]
return wb.chunks[chunkIdx][chunkLen : chunkLen+size]
}
func (wb *writeBuffer) addChunk(size, capacity int) {
var chunk []byte
if c, ok := chunkPool.Get().([]byte); ok && cap(c) >= size {
chunk = c[:size]
} else {
chunk = make([]byte, size, capacity)
}
wb.chunks = append(wb.chunks, chunk)
}
func (wb *writeBuffer) writeTo(w io.Writer) error {
for _, chunk := range wb.chunks {
if _, err := w.Write(chunk); err != nil {
wb.free()
return err
}
}
wb.free()
return nil
}
func (wb *writeBuffer) bytes() []byte {
if len(wb.chunks) == 1 {
return wb.chunks[0]
}
bytes := make([]byte, 0, wb.len())
for _, chunk := range wb.chunks {
bytes = append(bytes, chunk...)
}
return bytes
}
func (wb *writeBuffer) len() int {
var v int
for _, chunk := range wb.chunks {
v += len(chunk)
}
return v
}
func (wb *writeBuffer) calcCap(dataSize int) int {
dataSize = max(dataSize, 64)
if len(wb.chunks) == 0 {
return dataSize
}
// Always double the size of the last chunk
return max(dataSize, cap(wb.chunks[len(wb.chunks)-1])*2)
}
func (wb *writeBuffer) free() {
if len(wb.chunks) == 0 {
return
}
// Recycle all chunks except the last one
chunkSizeThreshold := cap(wb.chunks[0])
for _, chunk := range wb.chunks[:len(wb.chunks)-1] {
// Drain chunks smaller than the initial size
if cap(chunk) >= chunkSizeThreshold {
chunkPool.Put(chunk[:0])
} else {
chunkSizeThreshold = cap(chunk)
}
}
// Keep the largest chunk
wb.chunks[0] = wb.chunks[len(wb.chunks)-1][:0]
wb.chunks = wb.chunks[:1]
}
func max(a, b int) int {
if b > a {
return b
}
return a
}
| {
wb := &writeBuffer{}
wb.addChunk(0, initSize)
return wb
} |
validation-middleware.ts | import { FoundResource, ConnectorMiddleware } from '../../contracts/connector'
import { Headers, IResponse, Payload } from '../../contracts/http'
import { IValidation } from '../../contracts/validation'
import { Injectable } from '../../container'
@Injectable()
export class ValidationMiddleware implements ConnectorMiddleware {
/**
* Name of form used in given request.
* @protected
*/
protected formName: string = ''
/**
* Injected validation service.
* @protected
*/
protected validationService: IValidation
/**
* Class Constructor.
* @param validationService
*/
constructor (validationService: IValidation) {
this.validationService = validationService
}
/**
* Method to be called after call execution.
* It handles side effects.
*/ | public afterCall (response: IResponse): void {
if (response.status === 422 && response.errors !== null) {
this.validationService.pushErrors(
this.formName,
typeof response.errors.errors === 'object'
? response.errors.errors : {},
typeof response.errors.message === 'string' ? response.errors.message : null
)
}
}
/**
* Method to be called before call execution.
* It can transform headers and body for a given resource.
*/
public beforeCall (resource: FoundResource, headers: Headers, body: Payload) {
this.formName = resource.shorthand as string
this.validationService.clearForm(this.formName)
return { headers, body }
}
} | |
public-api.ts | export * from './search.component';
export * from './search.module'; |
||
index.js | import $ from 'jquery'
import throttle from 'lodash.throttle'
import '../scss/index.scss'
// allow hot reload for html files by requiring them here
if(process.env.NODE_ENV === 'development') {
// pages
require('../views/pages/index.pug')
require('../views/pages/courses.pug')
require('../views/pages/home-health-aide.pug')
require('../views/pages/medical-assistant.pug')
require('../views/pages/medical-coding-specialist.pug')
require('../views/pages/patient-care-technician.pug')
require('../views/pages/pharmacy-technician.pug')
// partials
require('../views/partials/footer.pug')
require('../views/partials/contact-modal.pug')
require('../views/partials/partners.pug')
require('../views/partials/cta.pug')
require('../views/partials/header.pug')
require('../views/partials/layout.pug')
require('../views/partials/head.pug')
}
// cache DOM
const $body = $('body')
const $navContainer = $('.nav-container')
const $mainNav = $('.main-nav')
const homeIcon1 = { element: $('.benefits-icon.icon1'), position: null }
const homeIcon2 = { element: $('.benefits-icon.icon2'), position: null }
const homeIcon3 = { element: $('.benefits-icon.icon3'), position: null }
const testimonial1 = { element: $('.testimonial-img.img1'), position: null }
const testimonial2 = { element: $('.testimonial-img.img2'), position: null }
const testimonial3 = { element: $('.testimonial-img.img3'), position: null }
// Mobile Menu functionality
function mobileMenu() {
if(+window.innerWidth < 768) {
$body.click((e) => {
if(!$(e.target).is('.nav-button, .nav-button .line')) {
$mainNav.slideUp()
}
})
$('.nav-button').click(() => {
$mainNav.slideToggle(200)
})
$('.nav-list-li-a').click(() => {
$mainNav.slideUp()
})
}
}
function fixMenu() {
if($body[0].scrollTop > 40) {
$navContainer.addClass('fixed')
if(+window.innerWidth < 768) {
$mainNav.css({top: '53px'})
}
}
else {
$navContainer.removeClass('fixed')
if(+window.innerWidth < 768) {
$mainNav.css({top: '73px'})
}
}
}
// contact modal and form
function contactForm() {
const $contactModal = $('.contact-modal')
$('.contact-modal .overlay, .contact-modal .close').click(() => {
$contactModal.fadeOut(200)
})
$('.contact-btn').click(e => {
e.preventDefault()
$contactModal.show(0)
})
$('#contact-form').submit(e => {
e.preventDefault()
$.ajax({
url: 'http://fvi-grad.com:4004/email',
method: 'POST',
data: $('#contact-form').serialize()
})
.done(data => {
$('#contact-form').hide()
$('.thank-you').show()
$('#contact-form')[0].reset()
})
.fail(err => {
console.log(err);
})
})
}
// animations
const supportPageOffset = window.pageXOffset !== undefined;
const isCSS1Compat = ((document.compatMode || "") === "CSS1Compat");
const windowScroll = supportPageOffset ? window.pageYOffset : isCSS1Compat ? document.documentElement.scrollTop : document.body.scrollTop;
const windowHeight = null
const animElements = [ homeIcon1, homeIcon2, homeIcon3, testimonial1, testimonial2, testimonial3 ]
const offset = 230
function playAnimations() {
animElements.forEach(el => {
const triggerPoint = el.position - windowHeight + offset
if(window.scrollY > triggerPoint)
el.element.addClass('visible')
else
el.element.removeClass('visible')
})
}
// scroll handler
function | () {
fixMenu()
playAnimations()
}
$(document).ready(() => {
// wait for page height to be set
setTimeout(function() {
windowHeight = $(window).height()
homeIcon1.position = $('.benefits-icon.icon1').offset().top
homeIcon2.position = $('.benefits-icon.icon2').offset().top
homeIcon3.position = $('.benefits-icon.icon3').offset().top
testimonial1.position = $('.testimonial-img.img1').offset().top
testimonial2.position = $('.testimonial-img.img2').offset().top
testimonial3.position = $('.testimonial-img.img3').offset().top
}, 1000)
mobileMenu()
contactForm()
$(document).scroll(throttle(onScroll, 100))
})
| onScroll |
urls.go | package services
| )
func listURL(c *gophercloud.ServiceClient) string {
return c.ServiceURL("os-services")
}
func updateURL(c *gophercloud.ServiceClient, id string) string {
return c.ServiceURL("os-services", id)
}
func deleteURL(c *gophercloud.ServiceClient, id string) string {
return c.ServiceURL("os-services", id)
} | import (
"github.com/lxdcc/gophercloud" |
client_deviceadm.go | // Copyright 2016 Mender Software AS
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package main
import (
"bytes"
"encoding/json"
"net/http"
"time"
"github.com/mendersoftware/deviceauth/log"
"github.com/mendersoftware/deviceauth/requestid"
"github.com/mendersoftware/deviceauth/utils"
"github.com/pkg/errors"
)
const (
// devices endpoint
DevAdmDevicesUri = "/api/0.1.0/devices/"
// default request timeout, 10s?
defaultDevAdmReqTimeout = time.Duration(10) * time.Second
)
type DevAdmClientConfig struct {
// device add URL
DevAdmAddr string
// request timeout
Timeout time.Duration
}
type DevAdmClientI interface {
AddDevice(dev *Device, client requestid.ApiRequester) error
log.ContextLogger
}
type DevAdmClient struct {
log *log.Logger
conf DevAdmClientConfig
}
func (d *DevAdmClient) AddDevice(dev *Device, client requestid.ApiRequester) error {
d.log.Debugf("add device %s for admission", dev.Id)
AdmReqJson, err := json.Marshal(AdmReq{
IdData: dev.IdData,
PubKey: dev.PubKey,
})
if err != nil {
return errors.Wrapf(err, "failed to prepare device admission request")
}
contentReader := bytes.NewReader(AdmReqJson)
req, err := http.NewRequest(
http.MethodPut,
utils.JoinURL(d.conf.DevAdmAddr, DevAdmDevicesUri+dev.Id),
contentReader)
if err != nil {
return errors.Wrapf(err, "failed to create request")
}
req.Header.Set("Content-Type", "application/json")
rsp, err := client.Do(req)
if err != nil {
return errors.Wrapf(err, "failed to add device")
}
defer rsp.Body.Close()
if rsp.StatusCode != http.StatusNoContent {
return errors.Errorf(
"device add request failed with status %v", rsp.Status)
}
return nil
}
func (d *DevAdmClient) UseLog(l *log.Logger) {
d.log = l.F(log.Ctx{})
}
func GetDevAdmClient(c DevAdmClientConfig, l *log.Logger) *DevAdmClient |
func NewDevAdmClient(c DevAdmClientConfig) *DevAdmClient {
if c.Timeout == 0 {
c.Timeout = defaultDevAdmReqTimeout
}
return &DevAdmClient{
log: log.New(log.Ctx{}),
conf: c,
}
}
| {
l = l.F(log.Ctx{})
client := NewDevAdmClient(c)
client.UseLog(l)
return client
} |
vega-scenegraph.js | (function (global, factory) {
typeof exports === 'object' && typeof module !== 'undefined' ? factory(exports, require('vega-util'), require('vega-canvas'), require('vega-loader'), require('d3-shape'), require('d3-path')) :
typeof define === 'function' && define.amd ? define(['exports', 'vega-util', 'vega-canvas', 'vega-loader', 'd3-shape', 'd3-path'], factory) :
(factory((global.vega = {}),global.vega,global.vega,global.vega,global.d3,global.d3));
}(this, (function (exports,vegaUtil,vegaCanvas,vegaLoader,d3Shape,d3Path) { 'use strict';
function Bounds(b) {
this.clear();
if (b) this.union(b);
}
var prototype = Bounds.prototype;
prototype.clone = function() {
return new Bounds(this);
};
prototype.clear = function() {
this.x1 = +Number.MAX_VALUE;
this.y1 = +Number.MAX_VALUE;
this.x2 = -Number.MAX_VALUE;
this.y2 = -Number.MAX_VALUE;
return this;
};
prototype.empty = function() {
return (
this.x1 === +Number.MAX_VALUE &&
this.y1 === +Number.MAX_VALUE &&
this.x2 === -Number.MAX_VALUE &&
this.y2 === -Number.MAX_VALUE
);
};
prototype.set = function(x1, y1, x2, y2) {
if (x2 < x1) {
this.x2 = x1;
this.x1 = x2;
} else {
this.x1 = x1;
this.x2 = x2;
}
if (y2 < y1) {
this.y2 = y1;
this.y1 = y2;
} else {
this.y1 = y1;
this.y2 = y2;
}
return this;
};
prototype.add = function(x, y) {
if (x < this.x1) this.x1 = x;
if (y < this.y1) this.y1 = y;
if (x > this.x2) this.x2 = x;
if (y > this.y2) this.y2 = y;
return this;
};
prototype.expand = function(d) {
this.x1 -= d;
this.y1 -= d;
this.x2 += d;
this.y2 += d;
return this;
};
prototype.round = function() {
this.x1 = Math.floor(this.x1);
this.y1 = Math.floor(this.y1);
this.x2 = Math.ceil(this.x2);
this.y2 = Math.ceil(this.y2);
return this;
};
prototype.translate = function(dx, dy) {
this.x1 += dx;
this.x2 += dx;
this.y1 += dy;
this.y2 += dy;
return this;
};
prototype.rotate = function(angle, x, y) {
var cos = Math.cos(angle),
sin = Math.sin(angle),
cx = x - x*cos + y*sin,
cy = y - x*sin - y*cos,
x1 = this.x1, x2 = this.x2,
y1 = this.y1, y2 = this.y2;
return this.clear()
.add(cos*x1 - sin*y1 + cx, sin*x1 + cos*y1 + cy)
.add(cos*x1 - sin*y2 + cx, sin*x1 + cos*y2 + cy)
.add(cos*x2 - sin*y1 + cx, sin*x2 + cos*y1 + cy)
.add(cos*x2 - sin*y2 + cx, sin*x2 + cos*y2 + cy);
};
prototype.union = function(b) {
if (b.x1 < this.x1) this.x1 = b.x1;
if (b.y1 < this.y1) this.y1 = b.y1;
if (b.x2 > this.x2) this.x2 = b.x2;
if (b.y2 > this.y2) this.y2 = b.y2;
return this;
};
prototype.intersect = function(b) {
if (b.x1 > this.x1) this.x1 = b.x1;
if (b.y1 > this.y1) this.y1 = b.y1;
if (b.x2 < this.x2) this.x2 = b.x2;
if (b.y2 < this.y2) this.y2 = b.y2;
return this;
};
prototype.encloses = function(b) {
return b && (
this.x1 <= b.x1 &&
this.x2 >= b.x2 &&
this.y1 <= b.y1 &&
this.y2 >= b.y2
);
};
prototype.alignsWith = function(b) {
return b && (
this.x1 == b.x1 ||
this.x2 == b.x2 ||
this.y1 == b.y1 ||
this.y2 == b.y2
);
};
prototype.intersects = function(b) {
return b && !(
this.x2 < b.x1 ||
this.x1 > b.x2 ||
this.y2 < b.y1 ||
this.y1 > b.y2
);
};
prototype.contains = function(x, y) {
return !(
x < this.x1 ||
x > this.x2 ||
y < this.y1 ||
y > this.y2
);
};
prototype.width = function() {
return this.x2 - this.x1;
};
prototype.height = function() {
return this.y2 - this.y1;
};
var gradient_id = 0;
function Gradient(p0, p1) {
var stops = [], gradient;
return gradient = {
id: 'gradient_' + (gradient_id++),
x1: p0 ? p0[0] : 0,
y1: p0 ? p0[1] : 0,
x2: p1 ? p1[0] : 1,
y2: p1 ? p1[1] : 0,
stops: stops,
stop: function(offset, color) {
stops.push({offset: offset, color: color});
return gradient;
}
};
}
function Item(mark) {
this.mark = mark;
this.bounds = (this.bounds || new Bounds());
}
function GroupItem(mark) {
Item.call(this, mark);
this.items = (this.items || []);
}
vegaUtil.inherits(GroupItem, Item);
function ResourceLoader(customLoader) {
this._pending = 0;
this._loader = customLoader || vegaLoader.loader();
}
var prototype$1 = ResourceLoader.prototype;
prototype$1.pending = function() {
return this._pending;
};
function increment(loader) {
loader._pending += 1;
}
function decrement(loader) {
loader._pending -= 1;
}
prototype$1.sanitizeURL = function(uri) {
var loader = this;
increment(loader);
return loader._loader.sanitize(uri, {context:'href'})
.then(function(opt) {
decrement(loader);
return opt;
})
.catch(function() {
decrement(loader);
return null;
});
};
prototype$1.loadImage = function(uri) {
var loader = this,
Image = vegaCanvas.image();
increment(loader);
return loader._loader
.sanitize(uri, {context: 'image'})
.then(function(opt) {
var url = opt.href;
if (!url || !Image) throw {url: url};
var img = new Image();
img.onload = function() {
decrement(loader);
img.loaded = true;
};
img.onerror = function() {
decrement(loader);
img.loaded = false;
};
img.src = url;
return img;
})
.catch(function(e) {
decrement(loader);
return {loaded: false, width: 0, height: 0, src: e && e.url || ''};
});
};
prototype$1.ready = function() {
var loader = this;
return new Promise(function(accept) {
function poll(value) {
if (!loader.pending()) accept(value);
else setTimeout(function() { poll(true); }, 10);
}
poll(false);
});
};
var lookup = {
'basis': {
curve: d3Shape.curveBasis
},
'basis-closed': {
curve: d3Shape.curveBasisClosed
},
'basis-open': {
curve: d3Shape.curveBasisOpen
},
'bundle': {
curve: d3Shape.curveBundle,
tension: 'beta',
value: 0.85
},
'cardinal': {
curve: d3Shape.curveCardinal,
tension: 'tension',
value: 0
},
'cardinal-open': {
curve: d3Shape.curveCardinalOpen,
tension: 'tension',
value: 0
},
'cardinal-closed': {
curve: d3Shape.curveCardinalClosed,
tension: 'tension',
value: 0
},
'catmull-rom': {
curve: d3Shape.curveCatmullRom,
tension: 'alpha',
value: 0.5
},
'catmull-rom-closed': {
curve: d3Shape.curveCatmullRomClosed,
tension: 'alpha',
value: 0.5
},
'catmull-rom-open': {
curve: d3Shape.curveCatmullRomOpen,
tension: 'alpha',
value: 0.5
},
'linear': {
curve: d3Shape.curveLinear
},
'linear-closed': {
curve: d3Shape.curveLinearClosed
},
'monotone': {
horizontal: d3Shape.curveMonotoneY,
vertical: d3Shape.curveMonotoneX
},
'natural': {
curve: d3Shape.curveNatural
},
'step': {
curve: d3Shape.curveStep
},
'step-after': {
curve: d3Shape.curveStepAfter
},
'step-before': {
curve: d3Shape.curveStepBefore
}
};
function curves(type, orientation, tension) {
var entry = lookup.hasOwnProperty(type) && lookup[type],
curve = null;
if (entry) {
curve = entry.curve || entry[orientation || 'vertical'];
if (entry.tension && tension != null) {
curve = curve[entry.tension](tension);
}
}
return curve;
}
// Path parsing and rendering code adapted from fabric.js -- Thanks!
var cmdlen = { m:2, l:2, h:1, v:1, c:6, s:4, q:4, t:2, a:7 },
regexp = [/([MLHVCSQTAZmlhvcsqtaz])/g, /###/, /(\d)([-+])/g, /\s|,|###/];
function pathParse(pathstr) {
var result = [],
path,
curr,
chunks,
parsed, param,
cmd, len, i, j, n, m;
// First, break path into command sequence
path = pathstr
.slice()
.replace(regexp[0], '###$1')
.split(regexp[1])
.slice(1);
// Next, parse each command in turn
for (i=0, n=path.length; i<n; ++i) {
curr = path[i];
chunks = curr
.slice(1)
.trim()
.replace(regexp[2],'$1###$2')
.split(regexp[3]);
cmd = curr.charAt(0);
parsed = [cmd];
for (j=0, m=chunks.length; j<m; ++j) {
if ((param = +chunks[j]) === param) { // not NaN
parsed.push(param);
}
}
len = cmdlen[cmd.toLowerCase()];
if (parsed.length-1 > len) {
for (j=1, m=parsed.length; j<m; j+=len) {
result.push([cmd].concat(parsed.slice(j, j+len)));
}
}
else {
result.push(parsed);
}
}
return result;
}
var segmentCache = {};
var bezierCache = {};
var join = [].join;
// Copied from Inkscape svgtopdf, thanks!
function segments(x, y, rx, ry, large, sweep, rotateX, ox, oy) {
var key = join.call(arguments);
if (segmentCache[key]) {
return segmentCache[key];
}
var th = rotateX * (Math.PI/180);
var sin_th = Math.sin(th);
var cos_th = Math.cos(th);
rx = Math.abs(rx);
ry = Math.abs(ry);
var px = cos_th * (ox - x) * 0.5 + sin_th * (oy - y) * 0.5;
var py = cos_th * (oy - y) * 0.5 - sin_th * (ox - x) * 0.5;
var pl = (px*px) / (rx*rx) + (py*py) / (ry*ry);
if (pl > 1) {
pl = Math.sqrt(pl);
rx *= pl;
ry *= pl;
}
var a00 = cos_th / rx;
var a01 = sin_th / rx;
var a10 = (-sin_th) / ry;
var a11 = (cos_th) / ry;
var x0 = a00 * ox + a01 * oy;
var y0 = a10 * ox + a11 * oy;
var x1 = a00 * x + a01 * y;
var y1 = a10 * x + a11 * y;
var d = (x1-x0) * (x1-x0) + (y1-y0) * (y1-y0);
var sfactor_sq = 1 / d - 0.25;
if (sfactor_sq < 0) sfactor_sq = 0;
var sfactor = Math.sqrt(sfactor_sq);
if (sweep == large) sfactor = -sfactor;
var xc = 0.5 * (x0 + x1) - sfactor * (y1-y0);
var yc = 0.5 * (y0 + y1) + sfactor * (x1-x0);
var th0 = Math.atan2(y0-yc, x0-xc);
var th1 = Math.atan2(y1-yc, x1-xc);
var th_arc = th1-th0;
if (th_arc < 0 && sweep === 1){
th_arc += 2 * Math.PI;
} else if (th_arc > 0 && sweep === 0) {
th_arc -= 2 * Math.PI;
}
var segs = Math.ceil(Math.abs(th_arc / (Math.PI * 0.5 + 0.001)));
var result = [];
for (var i=0; i<segs; ++i) {
var th2 = th0 + i * th_arc / segs;
var th3 = th0 + (i+1) * th_arc / segs;
result[i] = [xc, yc, th2, th3, rx, ry, sin_th, cos_th];
}
return (segmentCache[key] = result);
}
function bezier(params) {
var key = join.call(params);
if (bezierCache[key]) {
return bezierCache[key];
}
var cx = params[0],
cy = params[1],
th0 = params[2],
th1 = params[3],
rx = params[4],
ry = params[5],
sin_th = params[6],
cos_th = params[7];
var a00 = cos_th * rx;
var a01 = -sin_th * ry;
var a10 = sin_th * rx;
var a11 = cos_th * ry;
var cos_th0 = Math.cos(th0);
var sin_th0 = Math.sin(th0);
var cos_th1 = Math.cos(th1);
var sin_th1 = Math.sin(th1);
var th_half = 0.5 * (th1 - th0);
var sin_th_h2 = Math.sin(th_half * 0.5);
var t = (8/3) * sin_th_h2 * sin_th_h2 / Math.sin(th_half);
var x1 = cx + cos_th0 - t * sin_th0;
var y1 = cy + sin_th0 + t * cos_th0;
var x3 = cx + cos_th1;
var y3 = cy + sin_th1;
var x2 = x3 + t * sin_th1;
var y2 = y3 - t * cos_th1;
return (bezierCache[key] = [
a00 * x1 + a01 * y1, a10 * x1 + a11 * y1,
a00 * x2 + a01 * y2, a10 * x2 + a11 * y2,
a00 * x3 + a01 * y3, a10 * x3 + a11 * y3
]);
}
var temp = ['l', 0, 0, 0, 0, 0, 0, 0];
function scale(current, s) {
var c = (temp[0] = current[0]);
if (c === 'a' || c === 'A') {
temp[1] = s * current[1];
temp[2] = s * current[2];
temp[6] = s * current[6];
temp[7] = s * current[7];
} else {
for (var i=1, n=current.length; i<n; ++i) {
temp[i] = s * current[i];
}
}
return temp;
}
function pathRender(context, path, l, t, s) {
var current, // current instruction
previous = null,
x = 0, // current x
y = 0, // current y
controlX = 0, // current control point x
controlY = 0, // current control point y
tempX,
tempY,
tempControlX,
tempControlY;
if (l == null) l = 0;
if (t == null) t = 0;
if (s == null) s = 1;
if (context.beginPath) context.beginPath();
for (var i=0, len=path.length; i<len; ++i) {
current = path[i];
if (s !== 1) current = scale(current, s);
switch (current[0]) { // first letter
case 'l': // lineto, relative
x += current[1];
y += current[2];
context.lineTo(x + l, y + t);
break;
case 'L': // lineto, absolute
x = current[1];
y = current[2];
context.lineTo(x + l, y + t);
break;
case 'h': // horizontal lineto, relative
x += current[1];
context.lineTo(x + l, y + t);
break;
case 'H': // horizontal lineto, absolute
x = current[1];
context.lineTo(x + l, y + t);
break;
case 'v': // vertical lineto, relative
y += current[1];
context.lineTo(x + l, y + t);
break;
case 'V': // verical lineto, absolute
y = current[1];
context.lineTo(x + l, y + t);
break;
case 'm': // moveTo, relative
x += current[1];
y += current[2];
context.moveTo(x + l, y + t);
break;
case 'M': // moveTo, absolute
x = current[1];
y = current[2];
context.moveTo(x + l, y + t);
break;
case 'c': // bezierCurveTo, relative
tempX = x + current[5];
tempY = y + current[6];
controlX = x + current[3];
controlY = y + current[4];
context.bezierCurveTo(
x + current[1] + l, // x1
y + current[2] + t, // y1
controlX + l, // x2
controlY + t, // y2
tempX + l,
tempY + t
);
x = tempX;
y = tempY;
break;
case 'C': // bezierCurveTo, absolute
x = current[5];
y = current[6];
controlX = current[3];
controlY = current[4];
context.bezierCurveTo(
current[1] + l,
current[2] + t,
controlX + l,
controlY + t,
x + l,
y + t
);
break;
case 's': // shorthand cubic bezierCurveTo, relative
// transform to absolute x,y
tempX = x + current[3];
tempY = y + current[4];
// calculate reflection of previous control points
controlX = 2 * x - controlX;
controlY = 2 * y - controlY;
context.bezierCurveTo(
controlX + l,
controlY + t,
x + current[1] + l,
y + current[2] + t,
tempX + l,
tempY + t
);
// set control point to 2nd one of this command
// the first control point is assumed to be the reflection of
// the second control point on the previous command relative
// to the current point.
controlX = x + current[1];
controlY = y + current[2];
x = tempX;
y = tempY;
break;
case 'S': // shorthand cubic bezierCurveTo, absolute
tempX = current[3];
tempY = current[4];
// calculate reflection of previous control points
controlX = 2*x - controlX;
controlY = 2*y - controlY;
context.bezierCurveTo(
controlX + l,
controlY + t,
current[1] + l,
current[2] + t,
tempX + l,
tempY + t
);
x = tempX;
y = tempY;
// set control point to 2nd one of this command
// the first control point is assumed to be the reflection of
// the second control point on the previous command relative
// to the current point.
controlX = current[1];
controlY = current[2];
break;
case 'q': // quadraticCurveTo, relative
// transform to absolute x,y
tempX = x + current[3];
tempY = y + current[4];
controlX = x + current[1];
controlY = y + current[2];
context.quadraticCurveTo(
controlX + l,
controlY + t,
tempX + l,
tempY + t
);
x = tempX;
y = tempY;
break;
case 'Q': // quadraticCurveTo, absolute
tempX = current[3];
tempY = current[4];
context.quadraticCurveTo(
current[1] + l,
current[2] + t,
tempX + l,
tempY + t
);
x = tempX;
y = tempY;
controlX = current[1];
controlY = current[2];
break;
case 't': // shorthand quadraticCurveTo, relative
// transform to absolute x,y
tempX = x + current[1];
tempY = y + current[2];
if (previous[0].match(/[QqTt]/) === null) {
// If there is no previous command or if the previous command was not a Q, q, T or t,
// assume the control point is coincident with the current point
controlX = x;
controlY = y;
}
else if (previous[0] === 't') {
// calculate reflection of previous control points for t
controlX = 2 * x - tempControlX;
controlY = 2 * y - tempControlY;
}
else if (previous[0] === 'q') {
// calculate reflection of previous control points for q
controlX = 2 * x - controlX;
controlY = 2 * y - controlY;
}
tempControlX = controlX;
tempControlY = controlY;
context.quadraticCurveTo(
controlX + l,
controlY + t,
tempX + l,
tempY + t
);
x = tempX;
y = tempY;
controlX = x + current[1];
controlY = y + current[2];
break;
case 'T':
tempX = current[1];
tempY = current[2];
// calculate reflection of previous control points
controlX = 2 * x - controlX;
controlY = 2 * y - controlY;
context.quadraticCurveTo(
controlX + l,
controlY + t,
tempX + l,
tempY + t
);
x = tempX;
y = tempY;
break;
case 'a':
drawArc(context, x + l, y + t, [
current[1],
current[2],
current[3],
current[4],
current[5],
current[6] + x + l,
current[7] + y + t
]);
x += current[6];
y += current[7];
break;
case 'A':
drawArc(context, x + l, y + t, [
current[1],
current[2],
current[3],
current[4],
current[5],
current[6] + l,
current[7] + t
]);
x = current[6];
y = current[7];
break;
case 'z':
case 'Z':
context.closePath();
break;
}
previous = current;
}
}
function drawArc(context, x, y, coords) {
var seg = segments(
coords[5], // end x
coords[6], // end y
coords[0], // radius x
coords[1], // radius y
coords[3], // large flag
coords[4], // sweep flag
coords[2], // rotation
x, y
);
for (var i=0; i<seg.length; ++i) {
var bez = bezier(seg[i]);
context.bezierCurveTo(bez[0], bez[1], bez[2], bez[3], bez[4], bez[5]);
}
}
var tau = 2 * Math.PI,
halfSqrt3 = Math.sqrt(3) / 2;
var builtins = {
'circle': {
draw: function(context, size) {
var r = Math.sqrt(size) / 2;
context.moveTo(r, 0);
context.arc(0, 0, r, 0, tau);
}
},
'cross': {
draw: function(context, size) {
var r = Math.sqrt(size) / 2,
s = r / 2.5;
context.moveTo(-r, -s);
context.lineTo(-r, s);
context.lineTo(-s, s);
context.lineTo(-s, r);
context.lineTo(s, r);
context.lineTo(s, s);
context.lineTo(r, s);
context.lineTo(r, -s);
context.lineTo(s, -s);
context.lineTo(s, -r);
context.lineTo(-s, -r);
context.lineTo(-s, -s);
context.closePath();
}
},
'diamond': {
draw: function(context, size) {
var r = Math.sqrt(size) / 2;
context.moveTo(-r, 0);
context.lineTo(0, -r);
context.lineTo(r, 0);
context.lineTo(0, r);
context.closePath();
}
},
'square': {
draw: function(context, size) {
var w = Math.sqrt(size),
x = -w / 2;
context.rect(x, x, w, w);
}
},
'triangle-up': {
draw: function(context, size) {
var r = Math.sqrt(size) / 2,
h = halfSqrt3 * r;
context.moveTo(0, -h);
context.lineTo(-r, h);
context.lineTo(r, h);
context.closePath();
}
},
'triangle-down': {
draw: function(context, size) {
var r = Math.sqrt(size) / 2,
h = halfSqrt3 * r;
context.moveTo(0, h);
context.lineTo(-r, -h);
context.lineTo(r, -h);
context.closePath();
}
},
'triangle-right': {
draw: function(context, size) {
var r = Math.sqrt(size) / 2,
h = halfSqrt3 * r;
context.moveTo(h, 0);
context.lineTo(-h, -r);
context.lineTo(-h, r);
context.closePath();
}
},
'triangle-left': {
draw: function(context, size) {
var r = Math.sqrt(size) / 2,
h = halfSqrt3 * r;
context.moveTo(-h, 0);
context.lineTo(h, -r);
context.lineTo(h, r);
context.closePath();
}
}
};
function symbols(_) {
return builtins.hasOwnProperty(_) ? builtins[_] : customSymbol(_);
}
var custom = {};
function customSymbol(path) {
if (!custom.hasOwnProperty(path)) {
var parsed = pathParse(path);
custom[path] = {
draw: function(context, size) {
pathRender(context, parsed, 0, 0, Math.sqrt(size) / 2);
}
};
}
return custom[path];
}
function rectangleX(d) {
return d.x;
}
function rectangleY(d) {
return d.y;
}
function rectangleWidth(d) {
return d.width;
}
function rectangleHeight(d) {
return d.height;
}
function constant(_) {
return function() { return _; };
}
function vg_rect() {
var x = rectangleX,
y = rectangleY,
width = rectangleWidth,
height = rectangleHeight,
cornerRadius = constant(0),
context = null;
function rectangle(_, x0, y0) {
var buffer,
x1 = x0 != null ? x0 : +x.call(this, _),
y1 = y0 != null ? y0 : +y.call(this, _),
w = +width.call(this, _),
h = +height.call(this, _),
cr = +cornerRadius.call(this, _);
if (!context) context = buffer = d3Path.path();
if (cr <= 0) {
context.rect(x1, y1, w, h);
} else {
var x2 = x1 + w,
y2 = y1 + h;
context.moveTo(x1 + cr, y1);
context.lineTo(x2 - cr, y1);
context.quadraticCurveTo(x2, y1, x2, y1 + cr);
context.lineTo(x2, y2 - cr);
context.quadraticCurveTo(x2, y2, x2 - cr, y2);
context.lineTo(x1 + cr, y2);
context.quadraticCurveTo(x1, y2, x1, y2 - cr);
context.lineTo(x1, y1 + cr);
context.quadraticCurveTo(x1, y1, x1 + cr, y1);
context.closePath();
}
if (buffer) {
context = null;
return buffer + '' || null;
}
}
rectangle.x = function(_) {
if (arguments.length) {
x = typeof _ === 'function' ? _ : constant(+_);
return rectangle;
} else {
return x;
}
};
rectangle.y = function(_) {
if (arguments.length) {
y = typeof _ === 'function' ? _ : constant(+_);
return rectangle;
} else {
return y;
}
};
rectangle.width = function(_) {
if (arguments.length) {
width = typeof _ === 'function' ? _ : constant(+_);
return rectangle;
} else {
return width;
}
};
rectangle.height = function(_) {
if (arguments.length) {
height = typeof _ === 'function' ? _ : constant(+_);
return rectangle;
} else {
return height;
}
};
rectangle.cornerRadius = function(_) {
if (arguments.length) {
cornerRadius = typeof _ === 'function' ? _ : constant(+_);
return rectangle;
} else {
return cornerRadius;
}
};
rectangle.context = function(_) {
if (arguments.length) {
context = _ == null ? null : _;
return rectangle;
} else {
return context;
}
};
return rectangle;
}
var pi = Math.PI;
function vg_trail() {
var x,
y,
size,
defined,
context = null,
ready, x1, y1, r1;
function point(x2, y2, w2) {
var r2 = w2 / 2;
if (ready) {
var ux = y1 - y2,
uy = x2 - x1;
if (ux || uy) {
// get normal vector
var ud = Math.sqrt(ux * ux + uy * uy),
rx = (ux /= ud) * r1,
ry = (uy /= ud) * r1,
t = Math.atan2(uy, ux);
// draw segment
context.moveTo(x1 - rx, y1 - ry);
context.lineTo(x2 - ux * r2, y2 - uy * r2);
context.arc(x2, y2, r2, t - pi, t);
context.lineTo(x1 + rx, y1 + ry);
context.arc(x1, y1, r1, t, t + pi);
} else {
context.arc(x2, y2, r2, 0, 2*pi);
}
context.closePath();
} else {
ready = 1;
}
x1 = x2;
y1 = y2;
r1 = r2;
}
function trail(data) {
var i,
n = data.length,
d,
defined0 = false,
buffer;
if (context == null) context = buffer = d3Path.path();
for (i = 0; i <= n; ++i) {
if (!(i < n && defined(d = data[i], i, data)) === defined0) {
if (defined0 = !defined0) ready = 0;
}
if (defined0) point(+x(d, i, data), +y(d, i, data), +size(d, i, data));
}
if (buffer) {
context = null;
return buffer + '' || null;
}
}
trail.x = function(_) {
if (arguments.length) {
x = _;
return trail;
} else {
return x;
}
};
trail.y = function(_) {
if (arguments.length) {
y = _;
return trail;
} else {
return y;
}
};
trail.size = function(_) {
if (arguments.length) {
size = _;
return trail;
} else {
return size;
}
};
trail.defined = function(_) {
if (arguments.length) {
defined = _;
return trail;
} else {
return defined;
}
};
trail.context = function(_) {
if (arguments.length) {
if (_ == null) {
context = null;
} else {
context = _;
}
return trail;
} else {
return context;
}
};
return trail;
}
function x(item) { return item.x || 0; }
function y(item) { return item.y || 0; }
function w(item) { return item.width || 0; }
function ts(item) { return item.size || 1; }
function h(item) { return item.height || 0; }
function xw(item) { return (item.x || 0) + (item.width || 0); }
function yh(item) { return (item.y || 0) + (item.height || 0); }
function sa(item) { return item.startAngle || 0; }
function ea(item) { return item.endAngle || 0; }
function pa(item) { return item.padAngle || 0; }
function ir(item) { return item.innerRadius || 0; }
function or(item) { return item.outerRadius || 0; }
function cr(item) { return item.cornerRadius || 0; }
function def(item) { return !(item.defined === false); }
function size(item) { return item.size == null ? 64 : item.size; }
function type(item) { return symbols(item.shape || 'circle'); }
var arcShape = d3Shape.arc().startAngle(sa).endAngle(ea).padAngle(pa)
.innerRadius(ir).outerRadius(or).cornerRadius(cr),
areavShape = d3Shape.area().x(x).y1(y).y0(yh).defined(def),
areahShape = d3Shape.area().y(y).x1(x).x0(xw).defined(def),
lineShape = d3Shape.line().x(x).y(y).defined(def),
rectShape = vg_rect().x(x).y(y).width(w).height(h).cornerRadius(cr),
symbolShape = d3Shape.symbol().type(type).size(size),
trailShape = vg_trail().x(x).y(y).defined(def).size(ts);
function arc(context, item) {
return arcShape.context(context)(item);
}
function area(context, items) {
var item = items[0],
interp = item.interpolate || 'linear';
return (item.orient === 'horizontal' ? areahShape : areavShape)
.curve(curves(interp, item.orient, item.tension))
.context(context)(items);
}
function line(context, items) {
var item = items[0],
interp = item.interpolate || 'linear';
return lineShape.curve(curves(interp, item.orient, item.tension))
.context(context)(items);
}
function rectangle(context, item, x, y) {
return rectShape.context(context)(item, x, y);
}
function shape(context, item) {
return (item.mark.shape || item.shape)
.context(context)(item);
}
function symbol(context, item) {
return symbolShape.context(context)(item);
}
function trail(context, items) {
return trailShape.context(context)(items);
}
function boundStroke(bounds, item) {
if (item.stroke && item.opacity !== 0 && item.strokeOpacity !== 0) {
bounds.expand(item.strokeWidth != null ? +item.strokeWidth : 1);
}
return bounds;
}
var bounds,
tau$1 = Math.PI * 2,
halfPi = tau$1 / 4,
circleThreshold = tau$1 - 1e-8;
function context(_) {
bounds = _;
return context;
}
function noop() {}
function add(x, y) { bounds.add(x, y); }
context.beginPath = noop;
context.closePath = noop;
context.moveTo = add;
context.lineTo = add;
context.rect = function(x, y, w, h) {
add(x, y);
add(x + w, y + h);
};
context.quadraticCurveTo = function(x1, y1, x2, y2) {
add(x1, y1);
add(x2, y2);
};
context.bezierCurveTo = function(x1, y1, x2, y2, x3, y3) {
add(x1, y1);
add(x2, y2);
add(x3, y3);
};
context.arc = function(cx, cy, r, sa, ea, ccw) {
if (Math.abs(ea - sa) > circleThreshold) {
add(cx - r, cy - r);
add(cx + r, cy + r);
return;
}
var xmin = Infinity, xmax = -Infinity,
ymin = Infinity, ymax = -Infinity,
s, i, x, y;
function update(a) {
x = r * Math.cos(a);
y = r * Math.sin(a);
if (x < xmin) xmin = x;
if (x > xmax) xmax = x;
if (y < ymin) ymin = y;
if (y > ymax) ymax = y;
}
// Sample end points and interior points aligned with 90 degrees
update(sa);
update(ea);
if (ea !== sa) {
sa = sa % tau$1; if (sa < 0) sa += tau$1;
ea = ea % tau$1; if (ea < 0) ea += tau$1;
if (ea < sa) {
ccw = !ccw; // flip direction
s = sa; sa = ea; ea = s; // swap end-points
}
if (ccw) {
ea -= tau$1;
s = sa - (sa % halfPi);
for (i=0; i<4 && s>ea; ++i, s-=halfPi) update(s);
} else {
s = sa - (sa % halfPi) + halfPi;
for (i=0; i<4 && s<ea; ++i, s=s+halfPi) update(s);
}
}
add(cx + xmin, cy + ymin);
add(cx + xmax, cy + ymax);
};
function gradient(context, gradient, bounds) {
var w = bounds.width(),
h = bounds.height(),
x1 = bounds.x1 + gradient.x1 * w,
y1 = bounds.y1 + gradient.y1 * h,
x2 = bounds.x1 + gradient.x2 * w,
y2 = bounds.y1 + gradient.y2 * h,
stop = gradient.stops,
i = 0,
n = stop.length,
linearGradient = context.createLinearGradient(x1, y1, x2, y2);
for (; i<n; ++i) {
linearGradient.addColorStop(stop[i].offset, stop[i].color);
}
return linearGradient;
}
function color(context, item, value) {
return (value.id) ?
gradient(context, value, item.bounds) :
value;
}
function fill(context, item, opacity) {
opacity *= (item.fillOpacity==null ? 1 : item.fillOpacity);
if (opacity > 0) {
context.globalAlpha = opacity;
context.fillStyle = color(context, item, item.fill);
return true;
} else {
return false;
}
}
var Empty = [];
function stroke(context, item, opacity) {
var lw = (lw = item.strokeWidth) != null ? lw : 1;
if (lw <= 0) return false;
opacity *= (item.strokeOpacity==null ? 1 : item.strokeOpacity);
if (opacity > 0) {
context.globalAlpha = opacity;
context.strokeStyle = color(context, item, item.stroke);
context.lineWidth = lw;
context.lineCap = item.strokeCap || 'butt';
context.lineJoin = item.strokeJoin || 'miter';
context.miterLimit = item.strokeMiterLimit || 10;
if (context.setLineDash) {
context.setLineDash(item.strokeDash || Empty);
context.lineDashOffset = item.strokeDashOffset || 0;
}
return true;
} else {
return false;
}
}
function compare(a, b) {
return a.zindex - b.zindex || a.index - b.index;
}
function zorder(scene) {
if (!scene.zdirty) return scene.zitems;
var items = scene.items,
output = [], item, i, n;
for (i=0, n=items.length; i<n; ++i) {
item = items[i];
item.index = i;
if (item.zindex) output.push(item);
}
scene.zdirty = false;
return scene.zitems = output.sort(compare);
}
function visit(scene, visitor) {
var items = scene.items, i, n;
if (!items || !items.length) return;
var zitems = zorder(scene);
if (zitems && zitems.length) {
for (i=0, n=items.length; i<n; ++i) {
if (!items[i].zindex) visitor(items[i]);
}
items = zitems;
}
for (i=0, n=items.length; i<n; ++i) {
visitor(items[i]);
}
}
function pickVisit(scene, visitor) {
var items = scene.items, hit, i;
if (!items || !items.length) return null;
var zitems = zorder(scene);
if (zitems && zitems.length) items = zitems;
for (i=items.length; --i >= 0;) {
if (hit = visitor(items[i])) return hit;
}
if (items === zitems) {
for (items=scene.items, i=items.length; --i >= 0;) {
if (!items[i].zindex) {
if (hit = visitor(items[i])) return hit;
}
}
}
return null;
}
function drawAll(path) {
return function(context, scene, bounds) {
visit(scene, function(item) {
if (!bounds || bounds.intersects(item.bounds)) {
drawPath(path, context, item, item);
}
});
};
}
function drawOne(path) {
return function(context, scene, bounds) {
if (scene.items.length && (!bounds || bounds.intersects(scene.bounds))) {
drawPath(path, context, scene.items[0], scene.items);
}
};
}
function drawPath(path, context, item, items) {
var opacity = item.opacity == null ? 1 : item.opacity;
if (opacity === 0) return;
if (path(context, items)) return;
if (item.fill && fill(context, item, opacity)) {
context.fill();
}
if (item.stroke && stroke(context, item, opacity)) {
context.stroke();
}
}
var trueFunc = function() { return true; };
function pick(test) {
if (!test) test = trueFunc;
return function(context, scene, x, y, gx, gy) {
x *= context.pixelRatio;
y *= context.pixelRatio;
return pickVisit(scene, function(item) {
var b = item.bounds;
// first hit test against bounding box
if ((b && !b.contains(gx, gy)) || !b) return;
// if in bounding box, perform more careful test
if (test(context, item, x, y, gx, gy)) return item;
});
};
}
function hitPath(path, filled) {
return function(context, o, x, y) {
var item = Array.isArray(o) ? o[0] : o,
fill = (filled == null) ? item.fill : filled,
stroke = item.stroke && context.isPointInStroke, lw, lc;
if (stroke) {
lw = item.strokeWidth;
lc = item.strokeCap;
context.lineWidth = lw != null ? lw : 1;
context.lineCap = lc != null ? lc : 'butt';
}
return path(context, o) ? false :
(fill && context.isPointInPath(x, y)) ||
(stroke && context.isPointInStroke(x, y));
};
}
function pickPath(path) {
return pick(hitPath(path));
}
function translate(x, y) {
return 'translate(' + x + ',' + y + ')';
}
function translateItem(item) {
return translate(item.x || 0, item.y || 0);
}
function markItemPath(type, shape) {
function attr(emit, item) {
emit('transform', translateItem(item));
emit('d', shape(null, item));
}
function bound(bounds, item) {
shape(context(bounds), item);
return boundStroke(bounds, item)
.translate(item.x || 0, item.y || 0);
}
function draw(context$$1, item) {
var x = item.x || 0,
y = item.y || 0;
context$$1.translate(x, y);
context$$1.beginPath();
shape(context$$1, item);
context$$1.translate(-x, -y);
}
return {
type: type,
tag: 'path',
nested: false,
attr: attr,
bound: bound,
draw: drawAll(draw),
pick: pickPath(draw)
};
}
var arc$1 = markItemPath('arc', arc);
function pickArea(a, p) {
var v = a[0].orient === 'horizontal' ? p[1] : p[0],
z = a[0].orient === 'horizontal' ? 'y' : 'x',
lo = 0,
hi = a.length;
if (hi === 1) return a[0];
while (lo < hi) {
var mid = lo + hi >>> 1;
if (a[mid][z] < v) lo = mid + 1;
else hi = mid;
}
lo = Math.max(0, lo - 1);
hi = Math.min(a.length - 1, hi);
return (v - a[lo][z]) < (a[hi][z] - v) ? a[lo] : a[hi];
}
function pickLine(a, p) {
var t = Math.pow(a[0].strokeWidth || 1, 2),
i = a.length, dx, dy, dd;
while (--i >= 0) {
if (a[i].defined === false) continue;
dx = a[i].x - p[0];
dy = a[i].y - p[1];
dd = dx * dx + dy * dy;
if (dd < t) return a[i];
}
return null;
}
function pickTrail(a, p) {
var i = a.length, dx, dy, dd;
while (--i >= 0) {
if (a[i].defined === false) continue;
dx = a[i].x - p[0];
dy = a[i].y - p[1];
dd = dx * dx + dy * dy;
dx = a[i].size || 1;
if (dd < dx*dx) return a[i];
}
return null;
}
function markMultiItemPath(type, shape, tip) {
function attr(emit, item) {
var items = item.mark.items;
if (items.length) emit('d', shape(null, items));
}
function bound(bounds, mark) {
var items = mark.items;
if (items.length === 0) {
return bounds;
} else {
shape(context(bounds), items);
return boundStroke(bounds, items[0]);
}
}
function draw(context$$1, items) {
context$$1.beginPath();
shape(context$$1, items);
}
var hit = hitPath(draw);
function pick$$1(context$$1, scene, x, y, gx, gy) {
var items = scene.items,
b = scene.bounds;
if (!items || !items.length || b && !b.contains(gx, gy)) {
return null;
}
x *= context$$1.pixelRatio;
y *= context$$1.pixelRatio;
return hit(context$$1, items, x, y) ? items[0] : null;
}
return {
type: type,
tag: 'path',
nested: true,
attr: attr,
bound: bound,
draw: drawOne(draw),
pick: pick$$1,
tip: tip
};
}
var area$1 = markMultiItemPath('area', area, pickArea);
var clip_id = 1;
function resetSVGClipId() {
clip_id = 1;
}
function clip(renderer, item, size) {
var clip = item.clip,
defs = renderer._defs,
id = item.clip_id || (item.clip_id = 'clip' + clip_id++),
c = defs.clipping[id] || (defs.clipping[id] = {id: id});
if (vegaUtil.isFunction(clip)) {
c.path = clip(null);
} else {
c.width = size.width || 0;
c.height = size.height || 0;
}
return 'url(#' + id + ')';
}
var StrokeOffset = 0.5;
function attr(emit, item) {
emit('transform', translateItem(item));
}
function background(emit, item) {
var offset = item.stroke ? StrokeOffset : 0;
emit('class', 'background');
emit('d', rectangle(null, item, offset, offset));
}
function foreground(emit, item, renderer) {
var url = item.clip ? clip(renderer, item, item) : null;
emit('clip-path', url);
}
function bound(bounds, group) {
if (!group.clip && group.items) {
var items = group.items;
for (var j=0, m=items.length; j<m; ++j) {
bounds.union(items[j].bounds);
}
}
if ((group.clip || group.width || group.height) && !group.noBound) {
bounds.add(0, 0).add(group.width || 0, group.height || 0);
}
boundStroke(bounds, group);
return bounds.translate(group.x || 0, group.y || 0);
}
function backgroundPath(context, group) {
var offset = group.stroke ? StrokeOffset : 0;
context.beginPath();
rectangle(context, group, offset, offset);
}
var hitBackground = hitPath(backgroundPath);
function draw(context, scene, bounds) {
var renderer = this;
visit(scene, function(group) {
var gx = group.x || 0,
gy = group.y || 0,
w = group.width || 0,
h = group.height || 0,
opacity;
// setup graphics context
context.save();
context.translate(gx, gy);
// draw group background
if (group.stroke || group.fill) {
opacity = group.opacity == null ? 1 : group.opacity;
if (opacity > 0) {
backgroundPath(context, group);
if (group.fill && fill(context, group, opacity)) {
context.fill();
}
if (group.stroke && stroke(context, group, opacity)) {
context.stroke();
}
}
}
// set clip and bounds
if (group.clip) {
context.beginPath();
context.rect(0, 0, w, h);
context.clip();
}
if (bounds) bounds.translate(-gx, -gy);
// draw group contents
visit(group, function(item) {
renderer.draw(context, item, bounds);
});
// restore graphics context
if (bounds) bounds.translate(gx, gy);
context.restore();
});
}
function pick$1(context, scene, x, y, gx, gy) {
if (scene.bounds && !scene.bounds.contains(gx, gy) || !scene.items) {
return null;
}
var handler = this,
cx = x * context.pixelRatio,
cy = y * context.pixelRatio;
return pickVisit(scene, function(group) {
var hit, dx, dy, b;
// first hit test against bounding box
// if a group is clipped, that should be handled by the bounds check.
b = group.bounds;
if (b && !b.contains(gx, gy)) return;
// passed bounds check, so test sub-groups
dx = (group.x || 0);
dy = (group.y || 0);
context.save();
context.translate(dx, dy);
dx = gx - dx;
dy = gy - dy;
// hit test against contained marks
hit = pickVisit(group, function(mark) {
return pickMark(mark, dx, dy)
? handler.pick(mark, x, y, dx, dy)
: null;
});
// hit test against group background
if (!hit && scene.interactive !== false
&& (group.fill || group.stroke)
&& hitBackground(context, group, cx, cy)) {
hit = group;
}
context.restore();
return hit || null;
});
}
function pickMark(mark, x, y) {
return (mark.interactive !== false || mark.marktype === 'group')
&& mark.bounds && mark.bounds.contains(x, y);
}
var group = {
type: 'group',
tag: 'g',
nested: false,
attr: attr,
bound: bound,
draw: draw,
pick: pick$1,
background: background,
foreground: foreground
};
function getImage(item, renderer) {
var image = item.image;
if (!image || image.url !== item.url) {
image = {loaded: false, width: 0, height: 0};
renderer.loadImage(item.url).then(function(image) {
item.image = image;
item.image.url = item.url;
});
}
return image;
}
function imageXOffset(align, w) {
return align === 'center' ? w / 2 : align === 'right' ? w : 0;
}
function imageYOffset(baseline, h) {
return baseline === 'middle' ? h / 2 : baseline === 'bottom' ? h : 0;
}
function attr$1(emit, item, renderer) {
var image = getImage(item, renderer),
x = item.x || 0,
y = item.y || 0,
w = (item.width != null ? item.width : image.width) || 0,
h = (item.height != null ? item.height : image.height) || 0,
a = item.aspect === false ? 'none' : 'xMidYMid';
x -= imageXOffset(item.align, w);
y -= imageYOffset(item.baseline, h);
emit('href', image.src || '', 'http://www.w3.org/1999/xlink', 'xlink:href');
emit('transform', translate(x, y));
emit('width', w);
emit('height', h);
emit('preserveAspectRatio', a);
}
function bound$1(bounds, item) {
var image = item.image,
x = item.x || 0,
y = item.y || 0,
w = (item.width != null ? item.width : (image && image.width)) || 0,
h = (item.height != null ? item.height : (image && image.height)) || 0;
x -= imageXOffset(item.align, w);
y -= imageYOffset(item.baseline, h);
return bounds.set(x, y, x + w, y + h);
}
function draw$1(context, scene, bounds) {
var renderer = this;
visit(scene, function(item) {
if (bounds && !bounds.intersects(item.bounds)) return; // bounds check
var image = getImage(item, renderer),
x = item.x || 0,
y = item.y || 0,
w = (item.width != null ? item.width : image.width) || 0,
h = (item.height != null ? item.height : image.height) || 0,
opacity, ar0, ar1, t;
x -= imageXOffset(item.align, w);
y -= imageYOffset(item.baseline, h);
if (item.aspect !== false) {
ar0 = image.width / image.height;
ar1 = item.width / item.height;
if (ar0 === ar0 && ar1 === ar1 && ar0 !== ar1) {
if (ar1 < ar0) {
t = w / ar0;
y += (h - t) / 2;
h = t;
} else {
t = h * ar0;
x += (w - t) / 2;
w = t;
}
}
}
if (image.loaded) {
context.globalAlpha = (opacity = item.opacity) != null ? opacity : 1;
context.drawImage(image, x, y, w, h);
}
});
}
var image = {
type: 'image',
tag: 'image',
nested: false,
attr: attr$1,
bound: bound$1,
draw: draw$1,
pick: pick(),
get: getImage,
xOffset: imageXOffset,
yOffset: imageYOffset
};
var line$1 = markMultiItemPath('line', line, pickLine);
function attr$2(emit, item) {
emit('transform', translateItem(item));
emit('d', item.path);
}
function path(context$$1, item) {
var path = item.path;
if (path == null) return true;
var cache = item.pathCache;
if (!cache || cache.path !== path) {
(item.pathCache = cache = pathParse(path)).path = path;
}
pathRender(context$$1, cache, item.x, item.y);
}
function bound$2(bounds, item) {
return path(context(bounds), item)
? bounds.set(0, 0, 0, 0)
: boundStroke(bounds, item);
}
var path$1 = {
type: 'path',
tag: 'path',
nested: false,
attr: attr$2,
bound: bound$2,
draw: drawAll(path),
pick: pickPath(path)
};
function attr$3(emit, item) {
emit('d', rectangle(null, item));
}
function bound$3(bounds, item) {
var x, y;
return boundStroke(bounds.set(
x = item.x || 0,
y = item.y || 0,
(x + item.width) || 0,
(y + item.height) || 0
), item);
}
function draw$2(context, item) {
context.beginPath();
rectangle(context, item);
}
var rect = {
type: 'rect',
tag: 'path',
nested: false,
attr: attr$3,
bound: bound$3,
draw: drawAll(draw$2),
pick: pickPath(draw$2)
};
function attr$4(emit, item) {
emit('transform', translateItem(item));
emit('x2', item.x2 != null ? item.x2 - (item.x||0) : 0);
emit('y2', item.y2 != null ? item.y2 - (item.y||0) : 0);
}
function bound$4(bounds, item) {
var x1, y1;
return boundStroke(bounds.set(
x1 = item.x || 0,
y1 = item.y || 0,
item.x2 != null ? item.x2 : x1,
item.y2 != null ? item.y2 : y1
), item);
}
function path$2(context, item, opacity) {
var x1, y1, x2, y2;
if (item.stroke && stroke(context, item, opacity)) {
x1 = item.x || 0;
y1 = item.y || 0;
x2 = item.x2 != null ? item.x2 : x1;
y2 = item.y2 != null ? item.y2 : y1;
context.beginPath();
context.moveTo(x1, y1);
context.lineTo(x2, y2);
return true;
}
return false;
}
function draw$3(context, scene, bounds) {
visit(scene, function(item) {
if (bounds && !bounds.intersects(item.bounds)) return; // bounds check
var opacity = item.opacity == null ? 1 : item.opacity;
if (opacity && path$2(context, item, opacity)) {
context.stroke();
}
});
}
function hit(context, item, x, y) {
if (!context.isPointInStroke) return false;
return path$2(context, item, 1) && context.isPointInStroke(x, y);
}
var rule = {
type: 'rule',
tag: 'line',
nested: false,
attr: attr$4,
bound: bound$4,
draw: draw$3,
pick: pick(hit)
};
var shape$1 = markItemPath('shape', shape);
var symbol$1 = markItemPath('symbol', symbol);
var context$1,
currFontHeight;
var textMetrics = {
height: fontSize,
measureWidth: measureWidth,
estimateWidth: estimateWidth,
width: estimateWidth,
canvas: useCanvas
};
useCanvas(true);
// make dumb, simple estimate if no canvas is available
function estimateWidth(item) {
currFontHeight = fontSize(item);
return estimate(textValue(item));
}
function estimate(text) {
return ~~(0.8 * text.length * currFontHeight);
}
// measure text width if canvas is available
function measureWidth(item) {
context$1.font = font(item);
return measure(textValue(item));
}
function measure(text) {
return context$1.measureText(text).width;
}
function fontSize(item) {
return item.fontSize != null ? item.fontSize : 11;
}
function useCanvas(use) {
context$1 = use && (context$1 = vegaCanvas.canvas(1,1)) ? context$1.getContext('2d') : null;
textMetrics.width = context$1 ? measureWidth : estimateWidth;
}
function textValue(item) {
var s = item.text;
if (s == null) {
return '';
} else {
return item.limit > 0 ? truncate(item) : s + '';
}
}
function truncate(item) {
var limit = +item.limit,
text = item.text + '',
width;
if (context$1) {
context$1.font = font(item);
width = measure;
} else {
currFontHeight = fontSize(item);
width = estimate;
}
if (width(text) < limit) return text;
var ellipsis = item.ellipsis || '\u2026',
rtl = item.dir === 'rtl',
lo = 0,
hi = text.length, mid;
limit -= width(ellipsis);
if (rtl) {
while (lo < hi) {
mid = (lo + hi >>> 1);
if (width(text.slice(mid)) > limit) lo = mid + 1;
else hi = mid;
}
return ellipsis + text.slice(lo);
} else {
while (lo < hi) {
mid = 1 + (lo + hi >>> 1);
if (width(text.slice(0, mid)) < limit) lo = mid;
else hi = mid - 1;
}
return text.slice(0, lo) + ellipsis;
}
}
function fontFamily(item, quote) {
var font = item.font;
return (quote && font
? String(font).replace(/"/g, '\'')
: font) || 'sans-serif';
}
function font(item, quote) {
return '' +
(item.fontStyle ? item.fontStyle + ' ' : '') +
(item.fontVariant ? item.fontVariant + ' ' : '') +
(item.fontWeight ? item.fontWeight + ' ' : '') +
fontSize(item) + 'px ' +
fontFamily(item, quote);
}
function offset(item) {
// perform our own font baseline calculation
// why? not all browsers support SVG 1.1 'alignment-baseline' :(
var baseline = item.baseline,
h = fontSize(item);
return Math.round(
baseline === 'top' ? 0.79*h :
baseline === 'middle' ? 0.30*h :
baseline === 'bottom' ? -0.21*h : 0
);
}
var textAlign = {
'left': 'start',
'center': 'middle',
'right': 'end'
};
var tempBounds = new Bounds();
function attr$5(emit, item) {
var dx = item.dx || 0,
dy = (item.dy || 0) + offset(item),
x = item.x || 0,
y = item.y || 0,
a = item.angle || 0,
r = item.radius || 0, t;
if (r) {
t = (item.theta || 0) - Math.PI/2;
x += r * Math.cos(t);
y += r * Math.sin(t);
}
emit('text-anchor', textAlign[item.align] || 'start');
if (a) {
t = translate(x, y) + ' rotate('+a+')';
if (dx || dy) t += ' ' + translate(dx, dy);
} else {
t = translate(x + dx, y + dy);
}
emit('transform', t);
}
function bound$5(bounds, item, noRotate) {
var h = textMetrics.height(item),
a = item.align,
r = item.radius || 0,
x = item.x || 0,
y = item.y || 0,
dx = item.dx || 0,
dy = (item.dy || 0) + offset(item) - Math.round(0.8*h), // use 4/5 offset
w, t;
if (r) {
t = (item.theta || 0) - Math.PI/2;
x += r * Math.cos(t);
y += r * Math.sin(t);
}
// horizontal alignment
w = textMetrics.width(item);
if (a === 'center') {
dx -= (w / 2);
} else if (a === 'right') {
dx -= w;
}
bounds.set(dx+=x, dy+=y, dx+w, dy+h);
if (item.angle && !noRotate) {
bounds.rotate(item.angle*Math.PI/180, x, y);
}
return bounds.expand(noRotate || !w ? 0 : 1);
}
function draw$4(context, scene, bounds) {
visit(scene, function(item) {
var opacity, x, y, r, t, str;
if (bounds && !bounds.intersects(item.bounds)) return; // bounds check
if (!(str = textValue(item))) return; // get text string
opacity = item.opacity == null ? 1 : item.opacity;
if (opacity === 0) return;
context.font = font(item);
context.textAlign = item.align || 'left';
x = item.x || 0;
y = item.y || 0;
if ((r = item.radius)) {
t = (item.theta || 0) - Math.PI/2;
x += r * Math.cos(t);
y += r * Math.sin(t);
}
if (item.angle) {
context.save();
context.translate(x, y);
context.rotate(item.angle * Math.PI/180);
x = y = 0; // reset x, y
}
x += (item.dx || 0);
y += (item.dy || 0) + offset(item);
if (item.fill && fill(context, item, opacity)) {
context.fillText(str, x, y);
}
if (item.stroke && stroke(context, item, opacity)) {
context.strokeText(str, x, y);
}
if (item.angle) context.restore();
});
}
function hit$1(context, item, x, y, gx, gy) {
if (item.fontSize <= 0) return false;
if (!item.angle) return true; // bounds sufficient if no rotation
// project point into space of unrotated bounds
var b = bound$5(tempBounds, item, true),
a = -item.angle * Math.PI / 180,
cos = Math.cos(a),
sin = Math.sin(a),
ix = item.x,
iy = item.y,
px = cos*gx - sin*gy + (ix - ix*cos + iy*sin),
py = sin*gx + cos*gy + (iy - ix*sin - iy*cos);
return b.contains(px, py);
}
var text = {
type: 'text',
tag: 'text',
nested: false,
attr: attr$5,
bound: bound$5,
draw: draw$4,
pick: pick(hit$1)
};
var trail$1 = markMultiItemPath('trail', trail, pickTrail);
var marks = {
arc: arc$1,
area: area$1,
group: group,
image: image,
line: line$1,
path: path$1,
rect: rect,
rule: rule,
shape: shape$1,
symbol: symbol$1,
text: text,
trail: trail$1
};
function boundItem(item, func, opt) {
var type = marks[item.mark.marktype],
bound = func || type.bound;
if (type.nested) item = item.mark;
return bound(item.bounds || (item.bounds = new Bounds()), item, opt);
}
var DUMMY = {mark: null};
function boundMark(mark, bounds, opt) {
var type = marks[mark.marktype],
bound = type.bound,
items = mark.items,
hasItems = items && items.length,
i, n, item, b;
if (type.nested) {
if (hasItems) {
item = items[0];
} else {
// no items, fake it
DUMMY.mark = mark;
item = DUMMY;
}
b = boundItem(item, bound, opt);
bounds = bounds && bounds.union(b) || b;
return bounds;
}
bounds = bounds
|| mark.bounds && mark.bounds.clear()
|| new Bounds();
if (hasItems) {
for (i=0, n=items.length; i<n; ++i) {
bounds.union(boundItem(items[i], bound, opt));
}
}
return mark.bounds = bounds;
}
var keys = [
'marktype', 'name', 'role', 'interactive', 'clip', 'items', 'zindex',
'x', 'y', 'width', 'height', 'align', 'baseline', // layout
'fill', 'fillOpacity', 'opacity', // fill
'stroke', 'strokeOpacity', 'strokeWidth', 'strokeCap', // stroke
'strokeDash', 'strokeDashOffset', // stroke dash
'startAngle', 'endAngle', 'innerRadius', 'outerRadius', // arc
'cornerRadius', 'padAngle', // arc, rect
'interpolate', 'tension', 'orient', 'defined', // area, line
'url', // image
'path', // path
'x2', 'y2', // rule
'size', 'shape', // symbol
'text', 'angle', 'theta', 'radius', 'dx', 'dy', // text
'font', 'fontSize', 'fontWeight', 'fontStyle', 'fontVariant' // font
];
function sceneToJSON(scene, indent) {
return JSON.stringify(scene, keys, indent);
}
function sceneFromJSON(json) {
var scene = (typeof json === 'string' ? JSON.parse(json) : json);
return initialize(scene);
}
function initialize(scene) {
var type = scene.marktype,
items = scene.items,
parent, i, n;
if (items) {
for (i=0, n=items.length; i<n; ++i) {
parent = type ? 'mark' : 'group';
items[i][parent] = scene;
if (items[i].zindex) items[i][parent].zdirty = true;
if ('group' === (type || parent)) initialize(items[i]);
}
}
if (type) boundMark(scene);
return scene;
}
function Scenegraph(scene) {
if (arguments.length) {
this.root = sceneFromJSON(scene);
} else {
this.root = createMark({
marktype: 'group',
name: 'root',
role: 'frame'
});
this.root.items = [new GroupItem(this.root)];
}
}
var prototype$2 = Scenegraph.prototype;
prototype$2.toJSON = function(indent) {
return sceneToJSON(this.root, indent || 0);
};
prototype$2.mark = function(markdef, group, index) {
group = group || this.root.items[0];
var mark = createMark(markdef, group);
group.items[index] = mark;
if (mark.zindex) mark.group.zdirty = true;
return mark;
};
function createMark(def, group) {
return {
bounds: new Bounds(),
clip: !!def.clip,
group: group,
interactive: def.interactive === false ? false : true,
items: [],
marktype: def.marktype,
name: def.name || undefined,
role: def.role || undefined,
zindex: def.zindex || 0
};
}
// create a new DOM element
function domCreate(doc, tag, ns) {
if (!doc && typeof document !== 'undefined' && document.createElement) {
doc = document;
}
return doc
? (ns ? doc.createElementNS(ns, tag) : doc.createElement(tag))
: null;
}
// find first child element with matching tag
function domFind(el, tag) {
tag = tag.toLowerCase();
var nodes = el.childNodes, i = 0, n = nodes.length;
for (; i<n; ++i) if (nodes[i].tagName.toLowerCase() === tag) {
return nodes[i];
}
}
// retrieve child element at given index
// create & insert if doesn't exist or if tags do not match
function domChild(el, index, tag, ns) {
var a = el.childNodes[index], b;
if (!a || a.tagName.toLowerCase() !== tag.toLowerCase()) {
b = a || null;
a = domCreate(el.ownerDocument, tag, ns);
el.insertBefore(a, b);
}
return a;
}
// remove all child elements at or above the given index
function domClear(el, index) {
var nodes = el.childNodes,
curr = nodes.length;
while (curr > index) el.removeChild(nodes[--curr]);
return el;
}
// generate css class name for mark
function cssClass(mark) {
return 'mark-' + mark.marktype
+ (mark.role ? ' role-' + mark.role : '')
+ (mark.name ? ' ' + mark.name : '');
}
function point(event, el) {
var rect = el.getBoundingClientRect();
return [
event.clientX - rect.left - (el.clientLeft || 0),
event.clientY - rect.top - (el.clientTop || 0)
];
}
function resolveItem(item, event, el, origin) {
var mark = item && item.mark,
mdef, p;
if (mark && (mdef = marks[mark.marktype]).tip) {
p = point(event, el);
p[0] -= origin[0];
p[1] -= origin[1];
while (item = item.mark.group) {
p[0] -= item.x || 0;
p[1] -= item.y || 0;
}
item = mdef.tip(mark.items, p);
}
return item;
}
/**
* Create a new Handler instance.
* @param {object} [customLoader] - Optional loader instance for
* href URL sanitization. If not specified, a standard loader
* instance will be generated.
* @param {function} [customTooltip] - Optional tooltip handler
* function for custom tooltip display.
* @constructor
*/
function Handler(customLoader, customTooltip) {
this._active = null;
this._handlers = {};
this._loader = customLoader || vegaLoader.loader();
this._tooltip = customTooltip || defaultTooltip;
}
// The default tooltip display handler.
// Sets the HTML title attribute on the visualization container.
function defaultTooltip(handler, event, item, value) {
handler.element().setAttribute('title', value || '');
}
var prototype$3 = Handler.prototype;
/**
* Initialize a new Handler instance.
* @param {DOMElement} el - The containing DOM element for the display.
* @param {Array<number>} origin - The origin of the display, in pixels.
* The coordinate system will be translated to this point.
* @param {object} [obj] - Optional context object that should serve as
* the "this" context for event callbacks.
* @return {Handler} - This handler instance.
*/
prototype$3.initialize = function(el, origin, obj) {
this._el = el;
this._obj = obj || null;
return this.origin(origin);
};
/**
* Returns the parent container element for a visualization.
* @return {DOMElement} - The containing DOM element.
*/
prototype$3.element = function() {
return this._el;
};
/**
* Returns the scene element (e.g., canvas or SVG) of the visualization
* Subclasses must override if the first child is not the scene element.
* @return {DOMElement} - The scene (e.g., canvas or SVG) element.
*/
prototype$3.canvas = function() {
return this._el && this._el.firstChild;
};
/**
* Get / set the origin coordinates of the visualization.
*/
prototype$3.origin = function(origin) {
if (arguments.length) {
this._origin = origin || [0, 0];
return this;
} else {
return this._origin.slice();
}
};
/**
* Get / set the scenegraph root.
*/
prototype$3.scene = function(scene) {
if (!arguments.length) return this._scene;
this._scene = scene;
return this;
};
/**
* Add an event handler. Subclasses should override this method.
*/
prototype$3.on = function(/*type, handler*/) {};
/**
* Remove an event handler. Subclasses should override this method.
*/
prototype$3.off = function(/*type, handler*/) {};
/**
* Utility method for finding the array index of an event handler.
* @param {Array} h - An array of registered event handlers.
* @param {string} type - The event type.
* @param {function} handler - The event handler instance to find.
* @return {number} - The handler's array index or -1 if not registered.
*/
prototype$3._handlerIndex = function(h, type, handler) {
for (var i = h ? h.length : 0; --i>=0;) {
if (h[i].type === type && (!handler || h[i].handler === handler)) {
return i;
}
}
return -1;
};
/**
* Returns an array with registered event handlers.
* @param {string} [type] - The event type to query. Any annotations
* are ignored; for example, for the argument "click.foo", ".foo" will
* be ignored and the method returns all "click" handlers. If type is
* null or unspecified, this method returns handlers for all types.
* @return {Array} - A new array containing all registered event handlers.
*/
prototype$3.handlers = function(type) {
var h = this._handlers, a = [], k;
if (type) {
a.push.apply(a, h[this.eventName(type)]);
} else {
for (k in h) { a.push.apply(a, h[k]); }
}
return a;
};
/**
* Parses an event name string to return the specific event type.
* For example, given "click.foo" returns "click"
* @param {string} name - The input event type string.
* @return {string} - A string with the event type only.
*/
prototype$3.eventName = function(name) {
var i = name.indexOf('.');
return i < 0 ? name : name.slice(0,i);
};
/**
* Handle hyperlink navigation in response to an item.href value.
* @param {Event} event - The event triggering hyperlink navigation.
* @param {Item} item - The scenegraph item.
* @param {string} href - The URL to navigate to.
*/
prototype$3.handleHref = function(event, item, href) {
this._loader
.sanitize(href, {context:'href'})
.then(function(opt) {
var e = new MouseEvent(event.type, event),
a = domCreate(null, 'a');
for (var name in opt) a.setAttribute(name, opt[name]);
a.dispatchEvent(e);
})
.catch(function() { /* do nothing */ });
};
/**
* Handle tooltip display in response to an item.tooltip value.
* @param {Event} event - The event triggering tooltip display.
* @param {Item} item - The scenegraph item.
* @param {boolean} show - A boolean flag indicating whether
* to show or hide a tooltip for the given item.
*/
prototype$3.handleTooltip = function(event, item, show) {
if (item && item.tooltip != null) {
item = resolveItem(item, event, this.canvas(), this._origin);
var value = (show && item && item.tooltip) || null;
this._tooltip.call(this._obj, this, event, item, value);
}
};
/**
* Returns the size of a scenegraph item and its position relative
* to the viewport.
* @param {Item} item - The scenegraph item.
* @return {object} - A bounding box object (compatible with the
* DOMRect type) consisting of x, y, width, heigh, top, left,
* right, and bottom properties.
*/
prototype$3.getItemBoundingClientRect = function(item) {
if (!(el = this.canvas())) return;
var el, rect = el.getBoundingClientRect(),
origin = this._origin,
itemBounds = item.bounds,
x = itemBounds.x1 + origin[0] + rect.left,
y = itemBounds.y1 + origin[1] + rect.top,
w = itemBounds.width(),
h = itemBounds.height();
// translate coordinate for each parent group
while (item.mark && (item = item.mark.group)) {
x += item.x || 0;
y += item.y || 0;
}
// return DOMRect-compatible bounding box
return {
x: x,
y: y,
width: w,
height: h,
left: x,
top: y,
right: x + w,
bottom: y + h
};
};
/**
* Create a new Renderer instance.
* @param {object} [loader] - Optional loader instance for
* image and href URL sanitization. If not specified, a
* standard loader instance will be generated.
* @constructor
*/
function Renderer(loader) {
this._el = null;
this._bgcolor = null;
this._loader = new ResourceLoader(loader);
}
var prototype$4 = Renderer.prototype;
/**
* Initialize a new Renderer instance.
* @param {DOMElement} el - The containing DOM element for the display.
* @param {number} width - The coordinate width of the display, in pixels.
* @param {number} height - The coordinate height of the display, in pixels.
* @param {Array<number>} origin - The origin of the display, in pixels.
* The coordinate system will be translated to this point.
* @param {number} [scaleFactor=1] - Optional scaleFactor by which to multiply
* the width and height to determine the final pixel size.
* @return {Renderer} - This renderer instance.
*/
prototype$4.initialize = function(el, width, height, origin, scaleFactor) {
this._el = el;
return this.resize(width, height, origin, scaleFactor);
};
/**
* Returns the parent container element for a visualization.
* @return {DOMElement} - The containing DOM element.
*/
prototype$4.element = function() {
return this._el;
};
/**
* Returns the scene element (e.g., canvas or SVG) of the visualization
* Subclasses must override if the first child is not the scene element.
* @return {DOMElement} - The scene (e.g., canvas or SVG) element.
*/
prototype$4.canvas = function() {
return this._el && this._el.firstChild;
};
/**
* Get / set the background color.
*/
prototype$4.background = function(bgcolor) {
if (arguments.length === 0) return this._bgcolor;
this._bgcolor = bgcolor;
return this;
};
/**
* Resize the display.
* @param {number} width - The new coordinate width of the display, in pixels.
* @param {number} height - The new coordinate height of the display, in pixels.
* @param {Array<number>} origin - The new origin of the display, in pixels.
* The coordinate system will be translated to this point.
* @param {number} [scaleFactor=1] - Optional scaleFactor by which to multiply
* the width and height to determine the final pixel size.
* @return {Renderer} - This renderer instance;
*/
prototype$4.resize = function(width, height, origin, scaleFactor) {
this._width = width;
this._height = height;
this._origin = origin || [0, 0];
this._scale = scaleFactor || 1;
return this;
};
/**
* Report a dirty item whose bounds should be redrawn.
* This base class method does nothing. Subclasses that perform
* incremental should implement this method.
* @param {Item} item - The dirty item whose bounds should be redrawn.
*/
prototype$4.dirty = function(/*item*/) {
};
/**
* Render an input scenegraph, potentially with a set of dirty items.
* This method will perform an immediate rendering with available resources.
* The renderer may also need to perform image loading to perform a complete
* render. This process can lead to asynchronous re-rendering of the scene
* after this method returns. To receive notification when rendering is
* complete, use the renderAsync method instead.
* @param {object} scene - The root mark of a scenegraph to render.
* @return {Renderer} - This renderer instance.
*/
prototype$4.render = function(scene) {
var r = this;
// bind arguments into a render call, and cache it
// this function may be subsequently called for async redraw
r._call = function() { r._render(scene); };
// invoke the renderer
r._call();
// clear the cached call for garbage collection
// async redraws will stash their own copy
r._call = null;
return r;
};
/**
* Internal rendering method. Renderer subclasses should override this
* method to actually perform rendering.
* @param {object} scene - The root mark of a scenegraph to render.
*/
prototype$4._render = function(/*scene*/) {
// subclasses to override
};
/**
* Asynchronous rendering method. Similar to render, but returns a Promise
* that resolves when all rendering is completed. Sometimes a renderer must
* perform image loading to get a complete rendering. The returned
* Promise will not resolve until this process completes.
* @param {object} scene - The root mark of a scenegraph to render.
* @return {Promise} - A Promise that resolves when rendering is complete.
*/
prototype$4.renderAsync = function(scene) {
var r = this.render(scene);
return this._ready
? this._ready.then(function() { return r; })
: Promise.resolve(r);
};
/**
* Internal method for asynchronous resource loading.
* Proxies method calls to the ImageLoader, and tracks loading
* progress to invoke a re-render once complete.
* @param {string} method - The method name to invoke on the ImageLoader.
* @param {string} uri - The URI for the requested resource.
* @return {Promise} - A Promise that resolves to the requested resource.
*/
prototype$4._load = function(method, uri) {
var r = this,
p = r._loader[method](uri);
if (!r._ready) {
// re-render the scene when loading completes
var call = r._call;
r._ready = r._loader.ready()
.then(function(redraw) {
if (redraw) call();
r._ready = null;
});
}
return p;
};
/**
* Sanitize a URL to include as a hyperlink in the rendered scene.
* This method proxies a call to ImageLoader.sanitizeURL, but also tracks
* image loading progress and invokes a re-render once complete.
* @param {string} uri - The URI string to sanitize.
* @return {Promise} - A Promise that resolves to the sanitized URL.
*/
prototype$4.sanitizeURL = function(uri) {
return this._load('sanitizeURL', uri);
};
/**
* Requests an image to include in the rendered scene.
* This method proxies a call to ImageLoader.loadImage, but also tracks
* image loading progress and invokes a re-render once complete.
* @param {string} uri - The URI string of the image.
* @return {Promise} - A Promise that resolves to the loaded Image.
*/
prototype$4.loadImage = function(uri) {
return this._load('loadImage', uri);
};
var Events = [
'keydown',
'keypress',
'keyup',
'dragenter',
'dragleave',
'dragover',
'mousedown',
'mouseup',
'mousemove',
'mouseout',
'mouseover',
'click',
'dblclick',
'wheel',
'mousewheel',
'touchstart',
'touchmove',
'touchend'
];
var TooltipShowEvent = 'mousemove';
var TooltipHideEvent = 'mouseout';
var HrefEvent = 'click';
function CanvasHandler(loader, tooltip) {
Handler.call(this, loader, tooltip);
this._down = null;
this._touch = null;
this._first = true;
}
var prototype$5 = vegaUtil.inherits(CanvasHandler, Handler);
prototype$5.initialize = function(el, origin, obj) {
// add event listeners
var canvas = this._canvas = el && domFind(el, 'canvas');
if (canvas) {
var that = this;
this.events.forEach(function(type) {
canvas.addEventListener(type, function(evt) {
if (prototype$5[type]) {
prototype$5[type].call(that, evt);
} else {
that.fire(type, evt);
}
});
});
}
return Handler.prototype.initialize.call(this, el, origin, obj);
};
// return the backing canvas instance
prototype$5.canvas = function() {
return this._canvas;
};
// retrieve the current canvas context
prototype$5.context = function() {
return this._canvas.getContext('2d');
};
// supported events
prototype$5.events = Events;
// to keep old versions of firefox happy
prototype$5.DOMMouseScroll = function(evt) {
this.fire('mousewheel', evt);
};
function move(moveEvent, overEvent, outEvent) {
return function(evt) {
var a = this._active,
p = this.pickEvent(evt);
if (p === a) {
// active item and picked item are the same
this.fire(moveEvent, evt); // fire move
} else {
// active item and picked item are different
if (!a || !a.exit) {
// fire out for prior active item
// suppress if active item was removed from scene
this.fire(outEvent, evt);
}
this._active = p; // set new active item
this.fire(overEvent, evt); // fire over for new active item
this.fire(moveEvent, evt); // fire move for new active item
}
};
}
function inactive(type) {
return function(evt) {
this.fire(type, evt);
this._active = null;
};
}
prototype$5.mousemove = move('mousemove', 'mouseover', 'mouseout');
prototype$5.dragover = move('dragover', 'dragenter', 'dragleave');
prototype$5.mouseout = inactive('mouseout');
prototype$5.dragleave = inactive('dragleave');
prototype$5.mousedown = function(evt) {
this._down = this._active;
this.fire('mousedown', evt);
};
prototype$5.click = function(evt) {
if (this._down === this._active) {
this.fire('click', evt);
this._down = null;
}
};
prototype$5.touchstart = function(evt) {
this._touch = this.pickEvent(evt.changedTouches[0]);
if (this._first) {
this._active = this._touch;
this._first = false;
}
this.fire('touchstart', evt, true);
};
prototype$5.touchmove = function(evt) {
this.fire('touchmove', evt, true);
};
prototype$5.touchend = function(evt) {
this.fire('touchend', evt, true);
this._touch = null;
};
// fire an event
prototype$5.fire = function(type, evt, touch) {
var a = touch ? this._touch : this._active,
h = this._handlers[type], i, len;
// set event type relative to scenegraph items
evt.vegaType = type;
// handle hyperlinks and tooltips first
if (type === HrefEvent && a && a.href) {
this.handleHref(evt, a, a.href);
} else if (type === TooltipShowEvent || type === TooltipHideEvent) {
this.handleTooltip(evt, a, type !== TooltipHideEvent);
}
// invoke all registered handlers
if (h) {
for (i=0, len=h.length; i<len; ++i) {
h[i].handler.call(this._obj, evt, a);
}
}
};
// add an event handler
prototype$5.on = function(type, handler) {
var name = this.eventName(type),
h = this._handlers,
i = this._handlerIndex(h[name], type, handler);
if (i < 0) {
(h[name] || (h[name] = [])).push({
type: type,
handler: handler
});
}
return this;
};
// remove an event handler
prototype$5.off = function(type, handler) {
var name = this.eventName(type),
h = this._handlers[name],
i = this._handlerIndex(h, type, handler);
if (i >= 0) {
h.splice(i, 1);
}
return this;
};
prototype$5.pickEvent = function(evt) {
var p = point(evt, this._canvas),
o = this._origin;
return this.pick(this._scene, p[0], p[1], p[0] - o[0], p[1] - o[1]);
};
// find the scenegraph item at the current mouse position
// x, y -- the absolute x, y mouse coordinates on the canvas element
// gx, gy -- the relative coordinates within the current group
prototype$5.pick = function(scene, x, y, gx, gy) {
var g = this.context(),
mark = marks[scene.marktype];
return mark.pick.call(this, g, scene, x, y, gx, gy);
};
function clip$1(context, scene) {
var clip = scene.clip;
context.save();
context.beginPath();
if (vegaUtil.isFunction(clip)) {
clip(context);
} else {
var group = scene.group;
context.rect(0, 0, group.width || 0, group.height || 0);
}
context.clip();
}
function devicePixelRatio() {
return typeof window !== 'undefined' ? window.devicePixelRatio || 1 : 1;
}
var pixelRatio = devicePixelRatio();
function resize(canvas, width, height, origin, scaleFactor) {
var inDOM = typeof HTMLElement !== 'undefined'
&& canvas instanceof HTMLElement
&& canvas.parentNode != null;
var context = canvas.getContext('2d'),
ratio = inDOM ? pixelRatio : scaleFactor;
canvas.width = width * ratio;
canvas.height = height * ratio;
if (inDOM && ratio !== 1) {
canvas.style.width = width + 'px';
canvas.style.height = height + 'px';
}
context.pixelRatio = ratio;
context.setTransform(
ratio, 0, 0, ratio,
ratio * origin[0],
ratio * origin[1]
);
return canvas;
}
function CanvasRenderer(loader) {
Renderer.call(this, loader);
this._redraw = false;
this._dirty = new Bounds();
}
var prototype$6 = vegaUtil.inherits(CanvasRenderer, Renderer),
base = Renderer.prototype,
tempBounds$1 = new Bounds();
prototype$6.initialize = function(el, width, height, origin, scaleFactor) {
this._canvas = vegaCanvas.canvas(1, 1); // instantiate a small canvas
if (el) {
domClear(el, 0).appendChild(this._canvas);
this._canvas.setAttribute('class', 'marks');
}
// this method will invoke resize to size the canvas appropriately
return base.initialize.call(this, el, width, height, origin, scaleFactor);
};
prototype$6.resize = function(width, height, origin, scaleFactor) {
base.resize.call(this, width, height, origin, scaleFactor);
resize(this._canvas, this._width, this._height, this._origin, this._scale);
this._redraw = true;
return this;
};
prototype$6.canvas = function() {
return this._canvas;
};
prototype$6.context = function() {
return this._canvas ? this._canvas.getContext('2d') : null;
};
prototype$6.dirty = function(item) {
var b = translate$1(item.bounds, item.mark.group);
this._dirty.union(b);
};
function clipToBounds(g, b, origin) {
// expand bounds by 1 pixel, then round to pixel boundaries
b.expand(1).round();
// to avoid artifacts translate if origin has fractional pixels
b.translate(-(origin[0] % 1), -(origin[1] % 1));
// set clipping path
g.beginPath();
g.rect(b.x1, b.y1, b.width(), b.height());
g.clip();
return b;
}
function | (bounds, group) {
if (group == null) return bounds;
var b = tempBounds$1.clear().union(bounds);
for (; group != null; group = group.mark.group) {
b.translate(group.x || 0, group.y || 0);
}
return b;
}
prototype$6._render = function(scene) {
var g = this.context(),
o = this._origin,
w = this._width,
h = this._height,
b = this._dirty;
// setup
g.save();
if (this._redraw || b.empty()) {
this._redraw = false;
b = null;
} else {
b = clipToBounds(g, b, o);
}
this.clear(-o[0], -o[1], w, h);
// render
this.draw(g, scene, b);
// takedown
g.restore();
this._dirty.clear();
return this;
};
prototype$6.draw = function(ctx, scene, bounds) {
var mark = marks[scene.marktype];
if (scene.clip) clip$1(ctx, scene);
mark.draw.call(this, ctx, scene, bounds);
if (scene.clip) ctx.restore();
};
prototype$6.clear = function(x, y, w, h) {
var g = this.context();
g.clearRect(x, y, w, h);
if (this._bgcolor != null) {
g.fillStyle = this._bgcolor;
g.fillRect(x, y, w, h);
}
};
function SVGHandler(loader, tooltip) {
Handler.call(this, loader, tooltip);
var h = this;
h._hrefHandler = listener(h, function(evt, item) {
if (item && item.href) h.handleHref(evt, item, item.href);
});
h._tooltipHandler = listener(h, function(evt, item) {
h.handleTooltip(evt, item, evt.type !== TooltipHideEvent);
});
}
var prototype$7 = vegaUtil.inherits(SVGHandler, Handler);
prototype$7.initialize = function(el, origin, obj) {
var svg = this._svg;
if (svg) {
svg.removeEventListener(HrefEvent, this._hrefHandler);
svg.removeEventListener(TooltipShowEvent, this._tooltipHandler);
svg.removeEventListener(TooltipHideEvent, this._tooltipHandler);
}
this._svg = svg = el && domFind(el, 'svg');
if (svg) {
svg.addEventListener(HrefEvent, this._hrefHandler);
svg.addEventListener(TooltipShowEvent, this._tooltipHandler);
svg.addEventListener(TooltipHideEvent, this._tooltipHandler);
}
return Handler.prototype.initialize.call(this, el, origin, obj);
};
prototype$7.canvas = function() {
return this._svg;
};
// wrap an event listener for the SVG DOM
function listener(context, handler) {
return function(evt) {
var target = evt.target,
item = target.__data__;
evt.vegaType = evt.type;
item = Array.isArray(item) ? item[0] : item;
handler.call(context._obj, evt, item);
};
}
// add an event handler
prototype$7.on = function(type, handler) {
var name = this.eventName(type),
h = this._handlers,
i = this._handlerIndex(h[name], type, handler);
if (i < 0) {
var x = {
type: type,
handler: handler,
listener: listener(this, handler)
};
(h[name] || (h[name] = [])).push(x);
if (this._svg) {
this._svg.addEventListener(name, x.listener);
}
}
return this;
};
// remove an event handler
prototype$7.off = function(type, handler) {
var name = this.eventName(type),
h = this._handlers[name],
i = this._handlerIndex(h, type, handler);
if (i >= 0) {
if (this._svg) {
this._svg.removeEventListener(name, h[i].listener);
}
h.splice(i, 1);
}
return this;
};
// generate string for an opening xml tag
// tag: the name of the xml tag
// attr: hash of attribute name-value pairs to include
// raw: additional raw string to include in tag markup
function openTag(tag, attr, raw) {
var s = '<' + tag, key, val;
if (attr) {
for (key in attr) {
val = attr[key];
if (val != null) {
s += ' ' + key + '="' + val + '"';
}
}
}
if (raw) s += ' ' + raw;
return s + '>';
}
// generate string for closing xml tag
// tag: the name of the xml tag
function closeTag(tag) {
return '</' + tag + '>';
}
var metadata = {
'version': '1.1',
'xmlns': 'http://www.w3.org/2000/svg',
'xmlns:xlink': 'http://www.w3.org/1999/xlink'
};
var styles = {
'fill': 'fill',
'fillOpacity': 'fill-opacity',
'stroke': 'stroke',
'strokeOpacity': 'stroke-opacity',
'strokeWidth': 'stroke-width',
'strokeCap': 'stroke-linecap',
'strokeJoin': 'stroke-linejoin',
'strokeDash': 'stroke-dasharray',
'strokeDashOffset': 'stroke-dashoffset',
'strokeMiterLimit': 'stroke-miterlimit',
'opacity': 'opacity'
};
var styleProperties = Object.keys(styles);
var ns = metadata.xmlns;
function SVGRenderer(loader) {
Renderer.call(this, loader);
this._dirtyID = 1;
this._dirty = [];
this._svg = null;
this._root = null;
this._defs = null;
}
var prototype$8 = vegaUtil.inherits(SVGRenderer, Renderer);
var base$1 = Renderer.prototype;
prototype$8.initialize = function(el, width, height, padding) {
if (el) {
this._svg = domChild(el, 0, 'svg', ns);
this._svg.setAttribute('class', 'marks');
domClear(el, 1);
// set the svg root group
this._root = domChild(this._svg, 0, 'g', ns);
domClear(this._svg, 1);
}
// create the svg definitions cache
this._defs = {
gradient: {},
clipping: {}
};
// set background color if defined
this.background(this._bgcolor);
return base$1.initialize.call(this, el, width, height, padding);
};
prototype$8.background = function(bgcolor) {
if (arguments.length && this._svg) {
this._svg.style.setProperty('background-color', bgcolor);
}
return base$1.background.apply(this, arguments);
};
prototype$8.resize = function(width, height, origin, scaleFactor) {
base$1.resize.call(this, width, height, origin, scaleFactor);
if (this._svg) {
this._svg.setAttribute('width', this._width * this._scale);
this._svg.setAttribute('height', this._height * this._scale);
this._svg.setAttribute('viewBox', '0 0 ' + this._width + ' ' + this._height);
this._root.setAttribute('transform', 'translate(' + this._origin + ')');
}
this._dirty = [];
return this;
};
prototype$8.canvas = function() {
return this._svg;
};
prototype$8.svg = function() {
if (!this._svg) return null;
var attr = {
class: 'marks',
width: this._width * this._scale,
height: this._height * this._scale,
viewBox: '0 0 ' + this._width + ' ' + this._height
};
for (var key in metadata) {
attr[key] = metadata[key];
}
var bg = !this._bgcolor ? ''
: (openTag('rect', {
width: this._width,
height: this._height,
style: 'fill: ' + this._bgcolor + ';'
}) + closeTag('rect'));
return openTag('svg', attr) + bg + this._svg.innerHTML + closeTag('svg');
};
// -- Render entry point --
prototype$8._render = function(scene) {
// perform spot updates and re-render markup
if (this._dirtyCheck()) {
if (this._dirtyAll) this._resetDefs();
this.draw(this._root, scene);
domClear(this._root, 1);
}
this.updateDefs();
this._dirty = [];
++this._dirtyID;
return this;
};
// -- Manage SVG definitions ('defs') block --
prototype$8.updateDefs = function() {
var svg = this._svg,
defs = this._defs,
el = defs.el,
index = 0, id;
for (id in defs.gradient) {
if (!el) defs.el = (el = domChild(svg, 0, 'defs', ns));
updateGradient(el, defs.gradient[id], index++);
}
for (id in defs.clipping) {
if (!el) defs.el = (el = domChild(svg, 0, 'defs', ns));
updateClipping(el, defs.clipping[id], index++);
}
// clean-up
if (el) {
if (index === 0) {
svg.removeChild(el);
defs.el = null;
} else {
domClear(el, index);
}
}
};
function updateGradient(el, grad, index) {
var i, n, stop;
el = domChild(el, index, 'linearGradient', ns);
el.setAttribute('id', grad.id);
el.setAttribute('x1', grad.x1);
el.setAttribute('x2', grad.x2);
el.setAttribute('y1', grad.y1);
el.setAttribute('y2', grad.y2);
for (i=0, n=grad.stops.length; i<n; ++i) {
stop = domChild(el, i, 'stop', ns);
stop.setAttribute('offset', grad.stops[i].offset);
stop.setAttribute('stop-color', grad.stops[i].color);
}
domClear(el, i);
}
function updateClipping(el, clip$$1, index) {
var mask;
el = domChild(el, index, 'clipPath', ns);
el.setAttribute('id', clip$$1.id);
if (clip$$1.path) {
mask = domChild(el, 0, 'path', ns);
mask.setAttribute('d', clip$$1.path);
} else {
mask = domChild(el, 0, 'rect', ns);
mask.setAttribute('x', 0);
mask.setAttribute('y', 0);
mask.setAttribute('width', clip$$1.width);
mask.setAttribute('height', clip$$1.height);
}
}
prototype$8._resetDefs = function() {
var def = this._defs;
def.gradient = {};
def.clipping = {};
};
// -- Manage rendering of items marked as dirty --
prototype$8.dirty = function(item) {
if (item.dirty !== this._dirtyID) {
item.dirty = this._dirtyID;
this._dirty.push(item);
}
};
prototype$8.isDirty = function(item) {
return this._dirtyAll
|| !item._svg
|| item.dirty === this._dirtyID;
};
prototype$8._dirtyCheck = function() {
this._dirtyAll = true;
var items = this._dirty;
if (!items.length) return true;
var id = ++this._dirtyID,
item, mark, type, mdef, i, n, o;
for (i=0, n=items.length; i<n; ++i) {
item = items[i];
mark = item.mark;
if (mark.marktype !== type) {
// memoize mark instance lookup
type = mark.marktype;
mdef = marks[type];
}
if (mark.zdirty && mark.dirty !== id) {
this._dirtyAll = false;
dirtyParents(item, id);
mark.items.forEach(function(i) { i.dirty = id; });
}
if (mark.zdirty) continue; // handle in standard drawing pass
if (item.exit) { // EXIT
if (mdef.nested && mark.items.length) {
// if nested mark with remaining points, update instead
o = mark.items[0];
if (o._svg) this._update(mdef, o._svg, o);
} else if (item._svg) {
// otherwise remove from DOM
o = item._svg.parentNode;
if (o) o.removeChild(item._svg);
}
item._svg = null;
continue;
}
item = (mdef.nested ? mark.items[0] : item);
if (item._update === id) continue; // already visited
if (!item._svg || !item._svg.ownerSVGElement) {
// ENTER
this._dirtyAll = false;
dirtyParents(item, id);
} else {
// IN-PLACE UPDATE
this._update(mdef, item._svg, item);
}
item._update = id;
}
return !this._dirtyAll;
};
function dirtyParents(item, id) {
for (; item && item.dirty !== id; item=item.mark.group) {
item.dirty = id;
if (item.mark && item.mark.dirty !== id) {
item.mark.dirty = id;
} else return;
}
}
// -- Construct & maintain scenegraph to SVG mapping ---
// Draw a mark container.
prototype$8.draw = function(el, scene, prev) {
if (!this.isDirty(scene)) return scene._svg;
var renderer = this,
svg = this._svg,
mdef = marks[scene.marktype],
events = scene.interactive === false ? 'none' : null,
isGroup = mdef.tag === 'g',
sibling = null,
i = 0,
parent;
parent = bind(scene, el, prev, 'g', svg);
parent.setAttribute('class', cssClass(scene));
if (!isGroup) {
parent.style.setProperty('pointer-events', events);
}
if (scene.clip) {
parent.setAttribute('clip-path', clip(renderer, scene, scene.group));
} else {
parent.removeAttribute('clip-path');
}
function process(item) {
var dirty = renderer.isDirty(item),
node = bind(item, parent, sibling, mdef.tag, svg);
if (dirty) {
renderer._update(mdef, node, item);
if (isGroup) recurse(renderer, node, item);
}
sibling = node;
++i;
}
if (mdef.nested) {
if (scene.items.length) process(scene.items[0]);
} else {
visit(scene, process);
}
domClear(parent, i);
return parent;
};
// Recursively process group contents.
function recurse(renderer, el, group) {
el = el.lastChild;
var prev, idx = 0;
visit(group, function(item) {
prev = renderer.draw(el, item, prev);
++idx;
});
// remove any extraneous DOM elements
domClear(el, 1 + idx);
}
// Bind a scenegraph item to an SVG DOM element.
// Create new SVG elements as needed.
function bind(item, el, sibling, tag, svg) {
var node = item._svg, doc;
// create a new dom node if needed
if (!node) {
doc = el.ownerDocument;
node = domCreate(doc, tag, ns);
item._svg = node;
if (item.mark) {
node.__data__ = item;
node.__values__ = {fill: 'default'};
// if group, create background and foreground elements
if (tag === 'g') {
var bg = domCreate(doc, 'path', ns);
bg.setAttribute('class', 'background');
node.appendChild(bg);
bg.__data__ = item;
var fg = domCreate(doc, 'g', ns);
node.appendChild(fg);
fg.__data__ = item;
}
}
}
// (re-)insert if (a) not contained in SVG or (b) sibling order has changed
if (node.ownerSVGElement !== svg || hasSiblings(item) && node.previousSibling !== sibling) {
el.insertBefore(node, sibling ? sibling.nextSibling : el.firstChild);
}
return node;
}
function hasSiblings(item) {
var parent = item.mark || item.group;
return parent && parent.items.length > 1;
}
// -- Set attributes & styles on SVG elements ---
var element = null, // temp var for current SVG element
values = null; // temp var for current values hash
// Extra configuration for certain mark types
var mark_extras = {
group: function(mdef, el, item) {
values = el.__values__; // use parent's values hash
element = el.childNodes[1];
mdef.foreground(emit, item, this);
element = el.childNodes[0];
mdef.background(emit, item, this);
var value = item.mark.interactive === false ? 'none' : null;
if (value !== values.events) {
element.style.setProperty('pointer-events', value);
values.events = value;
}
},
text: function(mdef, el, item) {
var value;
value = textValue(item);
if (value !== values.text) {
el.textContent = value;
values.text = value;
}
setStyle(el, 'font-family', fontFamily(item));
setStyle(el, 'font-size', fontSize(item) + 'px');
setStyle(el, 'font-style', item.fontStyle);
setStyle(el, 'font-variant', item.fontVariant);
setStyle(el, 'font-weight', item.fontWeight);
}
};
function setStyle(el, name, value) {
if (value !== values[name]) {
if (value == null) {
el.style.removeProperty(name);
} else {
el.style.setProperty(name, value + '');
}
values[name] = value;
}
}
prototype$8._update = function(mdef, el, item) {
// set dom element and values cache
// provides access to emit method
element = el;
values = el.__values__;
// apply svg attributes
mdef.attr(emit, item, this);
// some marks need special treatment
var extra = mark_extras[mdef.type];
if (extra) extra.call(this, mdef, el, item);
// apply svg css styles
// note: element may be modified by 'extra' method
this.style(element, item);
};
function emit(name, value, ns) {
// early exit if value is unchanged
if (value === values[name]) return;
if (value != null) {
// if value is provided, update DOM attribute
if (ns) {
element.setAttributeNS(ns, name, value);
} else {
element.setAttribute(name, value);
}
} else {
// else remove DOM attribute
if (ns) {
element.removeAttributeNS(ns, name);
} else {
element.removeAttribute(name);
}
}
// note current value for future comparison
values[name] = value;
}
prototype$8.style = function(el, o) {
if (o == null) return;
var i, n, prop, name, value;
for (i=0, n=styleProperties.length; i<n; ++i) {
prop = styleProperties[i];
value = o[prop];
if (prop === 'font') {
value = fontFamily(o);
}
if (value === values[prop]) continue;
name = styles[prop];
if (value == null) {
if (name === 'fill') {
el.style.setProperty(name, 'none');
} else {
el.style.removeProperty(name);
}
} else {
if (value.id) {
// ensure definition is included
this._defs.gradient[value.id] = value;
value = 'url(' + href() + '#' + value.id + ')';
}
el.style.setProperty(name, value + '');
}
values[prop] = value;
}
};
function href() {
var loc;
return typeof window === 'undefined' ? ''
: (loc = window.location).hash ? loc.href.slice(0, -loc.hash.length)
: loc.href;
}
function SVGStringRenderer(loader) {
Renderer.call(this, loader);
this._text = {
head: '',
bg: '',
root: '',
foot: '',
defs: '',
body: ''
};
this._defs = {
gradient: {},
clipping: {}
};
}
var prototype$9 = vegaUtil.inherits(SVGStringRenderer, Renderer);
var base$2 = Renderer.prototype;
prototype$9.resize = function(width, height, origin, scaleFactor) {
base$2.resize.call(this, width, height, origin, scaleFactor);
var o = this._origin,
t = this._text;
var attr = {
class: 'marks',
width: this._width * this._scale,
height: this._height * this._scale,
viewBox: '0 0 ' + this._width + ' ' + this._height
};
for (var key in metadata) {
attr[key] = metadata[key];
}
t.head = openTag('svg', attr);
var bg = this._bgcolor;
if (bg === 'transparent' || bg === 'none') bg = null;
if (bg) {
t.bg = openTag('rect', {
width: this._width,
height: this._height,
style: 'fill: ' + bg + ';'
}) + closeTag('rect');
} else {
t.bg = '';
}
t.root = openTag('g', {
transform: 'translate(' + o + ')'
});
t.foot = closeTag('g') + closeTag('svg');
return this;
};
prototype$9.background = function() {
var rv = base$2.background.apply(this, arguments);
if (arguments.length && this._text.head) {
this.resize(this._width, this._height, this._origin, this._scale);
}
return rv;
};
prototype$9.svg = function() {
var t = this._text;
return t.head + t.bg + t.defs + t.root + t.body + t.foot;
};
prototype$9._render = function(scene) {
this._text.body = this.mark(scene);
this._text.defs = this.buildDefs();
return this;
};
prototype$9.buildDefs = function() {
var all = this._defs,
defs = '',
i, id, def, stops;
for (id in all.gradient) {
def = all.gradient[id];
stops = def.stops;
defs += openTag('linearGradient', {
id: id,
x1: def.x1,
x2: def.x2,
y1: def.y1,
y2: def.y2
});
for (i=0; i<stops.length; ++i) {
defs += openTag('stop', {
offset: stops[i].offset,
'stop-color': stops[i].color
}) + closeTag('stop');
}
defs += closeTag('linearGradient');
}
for (id in all.clipping) {
def = all.clipping[id];
defs += openTag('clipPath', {id: id});
if (def.path) {
defs += openTag('path', {
d: def.path
}) + closeTag('path');
} else {
defs += openTag('rect', {
x: 0,
y: 0,
width: def.width,
height: def.height
}) + closeTag('rect');
}
defs += closeTag('clipPath');
}
return (defs.length > 0) ? openTag('defs') + defs + closeTag('defs') : '';
};
var object;
function emit$1(name, value, ns, prefixed) {
object[prefixed || name] = value;
}
prototype$9.attributes = function(attr, item) {
object = {};
attr(emit$1, item, this);
return object;
};
prototype$9.href = function(item) {
var that = this,
href = item.href,
attr;
if (href) {
if (attr = that._hrefs && that._hrefs[href]) {
return attr;
} else {
that.sanitizeURL(href).then(function(attr) {
// rewrite to use xlink namespace
// note that this will be deprecated in SVG 2.0
attr['xlink:href'] = attr.href;
attr.href = null;
(that._hrefs || (that._hrefs = {}))[href] = attr;
});
}
}
return null;
};
prototype$9.mark = function(scene) {
var renderer = this,
mdef = marks[scene.marktype],
tag = mdef.tag,
defs = this._defs,
str = '',
style;
if (tag !== 'g' && scene.interactive === false) {
style = 'style="pointer-events: none;"';
}
// render opening group tag
str += openTag('g', {
'class': cssClass(scene),
'clip-path': scene.clip ? clip(renderer, scene, scene.group) : null
}, style);
// render contained elements
function process(item) {
var href = renderer.href(item);
if (href) str += openTag('a', href);
style = (tag !== 'g') ? applyStyles(item, scene, tag, defs) : null;
str += openTag(tag, renderer.attributes(mdef.attr, item), style);
if (tag === 'text') {
str += escape_text(textValue(item));
} else if (tag === 'g') {
str += openTag('path', renderer.attributes(mdef.background, item),
applyStyles(item, scene, 'bgrect', defs)) + closeTag('path');
str += openTag('g', renderer.attributes(mdef.foreground, item))
+ renderer.markGroup(item)
+ closeTag('g');
}
str += closeTag(tag);
if (href) str += closeTag('a');
}
if (mdef.nested) {
if (scene.items && scene.items.length) process(scene.items[0]);
} else {
visit(scene, process);
}
// render closing group tag
return str + closeTag('g');
};
prototype$9.markGroup = function(scene) {
var renderer = this,
str = '';
visit(scene, function(item) {
str += renderer.mark(item);
});
return str;
};
function applyStyles(o, mark, tag, defs) {
if (o == null) return '';
var i, n, prop, name, value, s = '';
if (tag === 'bgrect' && mark.interactive === false) {
s += 'pointer-events: none; ';
}
if (tag === 'text') {
s += 'font-family: ' + fontFamily(o) + '; ';
s += 'font-size: ' + fontSize(o) + 'px; ';
if (o.fontStyle) s += 'font-style: ' + o.fontStyle + '; ';
if (o.fontVariant) s += 'font-variant: ' + o.fontVariant + '; ';
if (o.fontWeight) s += 'font-weight: ' + o.fontWeight + '; ';
}
for (i=0, n=styleProperties.length; i<n; ++i) {
prop = styleProperties[i];
name = styles[prop];
value = o[prop];
if (value == null) {
if (name === 'fill') {
s += 'fill: none; ';
}
} else if (value === 'transparent' && (name === 'fill' || name === 'stroke')) {
// transparent is not a legal SVG value, so map to none instead
s += name + ': none; ';
} else {
if (value.id) {
// ensure definition is included
defs.gradient[value.id] = value;
value = 'url(#' + value.id + ')';
}
s += name + ': ' + value + '; ';
}
}
return s ? 'style="' + s.trim() + '"' : null;
}
function escape_text(s) {
return s.replace(/&/g, '&')
.replace(/</g, '<')
.replace(/>/g, '>');
}
var Canvas = 'canvas';
var PNG = 'png';
var SVG = 'svg';
var None = 'none';
var RenderType = {
Canvas: Canvas,
PNG: PNG,
SVG: SVG,
None: None
};
var modules = {};
modules[Canvas] = modules[PNG] = {
renderer: CanvasRenderer,
headless: CanvasRenderer,
handler: CanvasHandler
};
modules[SVG] = {
renderer: SVGRenderer,
headless: SVGStringRenderer,
handler: SVGHandler
};
modules[None] = {};
function renderModule(name, _) {
name = String(name || '').toLowerCase();
if (arguments.length > 1) {
modules[name] = _;
return this;
} else {
return modules[name];
}
}
var clipBounds = new Bounds();
function boundClip(mark) {
var clip = mark.clip;
if (vegaUtil.isFunction(clip)) {
clip(context(clipBounds.clear()));
} else if (clip) {
clipBounds.set(0, 0, mark.group.width, mark.group.height);
} else return;
mark.bounds.intersect(clipBounds);
}
var TOLERANCE = 1e-9;
function sceneEqual(a, b, key) {
return (a === b) ? true
: (key === 'path') ? pathEqual(a, b)
: (a instanceof Date && b instanceof Date) ? +a === +b
: (vegaUtil.isNumber(a) && vegaUtil.isNumber(b)) ? Math.abs(a - b) <= TOLERANCE
: (!a || !b || !vegaUtil.isObject(a) && !vegaUtil.isObject(b)) ? a == b
: (a == null || b == null) ? false
: objectEqual(a, b);
}
function pathEqual(a, b) {
return sceneEqual(pathParse(a), pathParse(b));
}
function objectEqual(a, b) {
var ka = Object.keys(a),
kb = Object.keys(b),
key, i;
if (ka.length !== kb.length) return false;
ka.sort();
kb.sort();
for (i = ka.length - 1; i >= 0; i--) {
if (ka[i] != kb[i]) return false;
}
for (i = ka.length - 1; i >= 0; i--) {
key = ka[i];
if (!sceneEqual(a[key], b[key], key)) return false;
}
return typeof a === typeof b;
}
exports.Bounds = Bounds;
exports.Gradient = Gradient;
exports.GroupItem = GroupItem;
exports.ResourceLoader = ResourceLoader;
exports.Item = Item;
exports.Scenegraph = Scenegraph;
exports.Handler = Handler;
exports.Renderer = Renderer;
exports.CanvasHandler = CanvasHandler;
exports.CanvasRenderer = CanvasRenderer;
exports.SVGHandler = SVGHandler;
exports.SVGRenderer = SVGRenderer;
exports.SVGStringRenderer = SVGStringRenderer;
exports.RenderType = RenderType;
exports.renderModule = renderModule;
exports.Marks = marks;
exports.boundClip = boundClip;
exports.boundContext = context;
exports.boundStroke = boundStroke;
exports.boundItem = boundItem;
exports.boundMark = boundMark;
exports.pathCurves = curves;
exports.pathSymbols = symbols;
exports.pathRectangle = vg_rect;
exports.pathTrail = vg_trail;
exports.pathParse = pathParse;
exports.pathRender = pathRender;
exports.point = point;
exports.domCreate = domCreate;
exports.domFind = domFind;
exports.domChild = domChild;
exports.domClear = domClear;
exports.openTag = openTag;
exports.closeTag = closeTag;
exports.font = font;
exports.fontFamily = fontFamily;
exports.fontSize = fontSize;
exports.textMetrics = textMetrics;
exports.resetSVGClipId = resetSVGClipId;
exports.sceneEqual = sceneEqual;
exports.pathEqual = pathEqual;
exports.sceneToJSON = sceneToJSON;
exports.sceneFromJSON = sceneFromJSON;
exports.sceneZOrder = zorder;
exports.sceneVisit = visit;
exports.scenePickVisit = pickVisit;
Object.defineProperty(exports, '__esModule', { value: true });
})));
| translate$1 |
replication.go | package consts
const (
// N.B. This needs to be excluded from replication despite the name; it's
// merely saying that this is cluster information for the replicated
// cluster.
CoreReplicatedClusterPrefix = "core/cluster/replicated/"
CoreReplicatedClusterPrefixDR = "core/cluster/replicated-dr/"
CoreReplicatedClusterInfoPath = CoreReplicatedClusterPrefix + "info"
CoreReplicatedClusterSecondariesPrefix = CoreReplicatedClusterPrefix + "secondaries/"
CoreReplicatedClusterInfoPathDR = CoreReplicatedClusterPrefixDR + "info"
CoreReplicatedClusterSecondariesPrefixDR = CoreReplicatedClusterPrefixDR + "secondaries/"
// This is an identifier for the current secondary in the replicated paths
// manager. It should contain a character that is not allowed in secondary
// ids to ensure it doesn't collide.
CurrentReplicatedSecondaryIdentifier = ".current"
CoreFeatureFlagPath = "core/cluster/feature-flags"
)
type ReplicationState uint32
const (
_ ReplicationState = iota
OldReplicationPrimary
OldReplicationSecondary
OldReplicationBootstrapping
// Don't add anything here. Adding anything to this Old block would cause
// the rest of the values to change below. This was done originally to
// ensure no overlap between old and new values.
ReplicationUnknown ReplicationState = 0
ReplicationPerformancePrimary ReplicationState = 1 << iota // Note -- iota is 5 here!
ReplicationPerformanceSecondary
OldSplitReplicationBootstrapping
ReplicationDRPrimary
ReplicationDRSecondary
ReplicationPerformanceBootstrapping
ReplicationDRBootstrapping
ReplicationPerformanceDisabled
ReplicationDRDisabled
ReplicationPerformanceStandby
)
// We verify no change to the above values are made
func init() {
if OldReplicationBootstrapping != 3 {
panic("Replication Constants have changed")
}
if ReplicationPerformancePrimary != 1<<5 {
panic("Replication Constants have changed")
}
}
func (r ReplicationState) string() string {
switch r {
case ReplicationPerformanceSecondary:
return "secondary"
case ReplicationPerformancePrimary:
return "primary"
case ReplicationPerformanceBootstrapping:
return "bootstrapping"
case ReplicationPerformanceDisabled:
return "disabled"
case ReplicationDRPrimary:
return "primary"
case ReplicationDRSecondary:
return "secondary"
case ReplicationDRBootstrapping:
return "bootstrapping"
case ReplicationDRDisabled:
return "disabled"
}
return "unknown"
}
func (r ReplicationState) StateStrings() []string {
var ret []string
if r.HasState(ReplicationPerformanceSecondary) {
ret = append(ret, "perf-secondary")
}
if r.HasState(ReplicationPerformancePrimary) {
ret = append(ret, "perf-primary")
}
if r.HasState(ReplicationPerformanceBootstrapping) {
ret = append(ret, "perf-bootstrapping")
}
if r.HasState(ReplicationPerformanceDisabled) {
ret = append(ret, "perf-disabled")
}
if r.HasState(ReplicationDRPrimary) {
ret = append(ret, "dr-primary")
}
if r.HasState(ReplicationDRSecondary) |
if r.HasState(ReplicationDRBootstrapping) {
ret = append(ret, "dr-bootstrapping")
}
if r.HasState(ReplicationDRDisabled) {
ret = append(ret, "dr-disabled")
}
if r.HasState(ReplicationPerformanceStandby) {
ret = append(ret, "perfstandby")
}
return ret
}
func (r ReplicationState) GetDRString() string {
switch {
case r.HasState(ReplicationDRBootstrapping):
return ReplicationDRBootstrapping.string()
case r.HasState(ReplicationDRPrimary):
return ReplicationDRPrimary.string()
case r.HasState(ReplicationDRSecondary):
return ReplicationDRSecondary.string()
case r.HasState(ReplicationDRDisabled):
return ReplicationDRDisabled.string()
default:
return "unknown"
}
}
func (r ReplicationState) GetPerformanceString() string {
switch {
case r.HasState(ReplicationPerformanceBootstrapping):
return ReplicationPerformanceBootstrapping.string()
case r.HasState(ReplicationPerformancePrimary):
return ReplicationPerformancePrimary.string()
case r.HasState(ReplicationPerformanceSecondary):
return ReplicationPerformanceSecondary.string()
case r.HasState(ReplicationPerformanceDisabled):
return ReplicationPerformanceDisabled.string()
default:
return "unknown"
}
}
func (r ReplicationState) IsPrimaryState() bool {
return r.HasState(ReplicationPerformancePrimary | ReplicationDRPrimary)
}
func (r ReplicationState) HasState(flag ReplicationState) bool { return r&flag != 0 }
func (r *ReplicationState) AddState(flag ReplicationState) { *r |= flag }
func (r *ReplicationState) ClearState(flag ReplicationState) { *r &= ^flag }
func (r *ReplicationState) ToggleState(flag ReplicationState) { *r ^= flag }
type HAState uint32
const (
_ HAState = iota
Standby
PerfStandby
Active
)
| {
ret = append(ret, "dr-secondary")
} |
test_stdconsole.py | #####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Microsoft Public License. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Microsoft Public License, please send an email to
# [email protected]. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Microsoft Public License.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
from iptest.assert_util import *
skiptest("silverlight")
skiptest("win32")
from iptest.console_util import IronPythonInstance
import sys
import nt
import re
from System import *
import os
# Test that IronPython console behaves as expected (command line argument processing etc.).
# Get a temporary directory in which the tests can scribble.
tmpdir = Environment.GetEnvironmentVariable("TEMP")
tmpdir = IO.Path.Combine(tmpdir, "IronPython")
if not os.path.exists(tmpdir):
nt.mkdir(tmpdir)
# Name of a temporary file used to capture console output.
tmpfile = IO.Path.Combine(tmpdir, "tmp_output.txt")
# Name of a batch file used to execute the console to workaround the fact we have no way to redirect stdout
# from nt.spawnl.
batfile = IO.Path.Combine(tmpdir, "__runconsole.bat")
f = file(batfile, "w")
f.write("@" + sys.executable + " >" + tmpfile + " 2>&1 %*\n")
f.close()
############################################################
# Runs the console with the given tuple of arguments and verifies that the output and exit code are as
# specified. The expected_output argument can be specified in various ways:
# None : No output comparison is performed
# a string : Full output is compared (remember to include newlines where appropriate)
# a tuple : A tuple of the form (optionstring, valuestring), valid optionstrings are:
# "firstline" : valuestring is compared against the first line of the output
# "lastline" : valuestring is compared against the last line of the output
# "regexp" : valuestring is a regular expression compared against the entire output
def TestCommandLine(args, expected_output, expected_exitcode = 0):
realargs = [batfile]
realargs.extend(args)
exitcode = nt.spawnv(0, batfile, realargs)
cmdline = "ipy " + ' '.join(args)
print ''
print ' ', cmdline
Assert(exitcode == expected_exitcode, "'" + cmdline + "' generated unexpected exit code " + str(exitcode))
if (expected_output != None):
f = file(tmpfile)
if isinstance(expected_output, str): | else:
output = f.readlines()
f.close()
# normalize \r\n to \n
if type(output) == list:
for i in range(len(output)):
output[i] = output[i].replace('\r\n', '\n')
else:
output = output.replace('\r\n', '\n')
# then check the output
if isinstance(expected_output, str):
Assert(output == expected_output, "'" + cmdline + "' generated unexpected output:\n" + output)
elif isinstance(expected_output, tuple):
if expected_output[0] == "firstline":
Assert(output[0] == expected_output[1], "'" + cmdline + "' generated unexpected first line of output:\n" + repr(output[0]))
elif expected_output[0] == "lastline":
Assert(output[-1] == expected_output[1], "'" + cmdline + "' generated unexpected last line of output:\n" + repr(output[-1]))
elif expected_output[0] == "regexp":
output = ''.join(output)
Assert(re.match(expected_output[1], output, re.M | re.S), "'" + cmdline + "' generated unexpected output:\n" + repr(output))
else:
Assert(False, "Invalid type for expected_output")
else:
Assert(False, "Invalid type for expected_output")
############################################################
# Runs the console with the given argument string with the expectation that it should enter interactive mode.
# Meaning, for one, no -c parameter. This is useful for catching certain argument parsing errors.
def TestInteractive(args, expected_exitcode = 0):
ipi = IronPythonInstance(sys.executable, sys.exec_prefix, args)
AreEqual(ipi.Start(), True)
#Verify basic behavior
AreEqual("4", ipi.ExecuteLine("2+2"))
ipi.End()
############################################################
def TestScript(commandLineArgs, script, expected_output, expected_exitcode = 0):
scriptFileName = "script_" + str(hash(script)) + ".py"
tmpscript = IO.Path.Combine(tmpdir, scriptFileName)
f = file(tmpscript, "w")
f.write(script)
f.close()
args = commandLineArgs + (tmpscript,)
TestCommandLine(args, expected_output, expected_exitcode)
############################################################
def test_exit():
# Test exit code with sys.exit(int)
TestCommandLine(("-c", "import sys; sys.exit(0)"), "", 0)
TestCommandLine(("-c", "import sys; sys.exit(200)"), "", 200)
TestScript((), "import sys\nclass C(int): pass\nc = C(200)\nsys.exit(c)\n", "", 200)
# Test exit code with sys.exit(non-int)
TestCommandLine(("-c", "import sys; sys.exit(None)"), "", 0)
TestCommandLine(("-c", "import sys; sys.exit('goodbye')"), "goodbye\n",1)
TestCommandLine(("-c", "import sys; sys.exit(200L)"), "200\n", 1)
############################################################
def test_nt__exit():
TestCommandLine(("-c", "import nt; nt._exit(0)"), "", 0)
TestCommandLine(("-c", "import nt; nt._exit(200)"), "", 200)
TestScript((), "import nt\nclass C(int): pass\nc = C(200)\nnt._exit(c)\n", "", 200)
############################################################
@disabled("TODO: this test spawns UI about ipy.exe failing abnormally")
def test_nt_abort():
# Positive
TestCommandLine(("-c", "import nt; nt.abort()"), "", 1)
TestScript((), "import nt\nnt.abort()", "", 1)
############################################################
# Test the -c (command as string) option.
def test_c():
TestCommandLine(("-c", "print 'foo'"), "foo\n")
TestCommandLine(("-c", "raise Exception('foo')"), ("lastline", "Exception: foo"), 1)
TestCommandLine(("-c", "import sys; sys.exit(123)"), "", 123)
TestCommandLine(("-c", "import sys; print sys.argv", "foo", "bar", "baz"), "['-c', 'foo', 'bar', 'baz']\n")
TestCommandLine(("-c",), "Argument expected for the -c option.\n", -1)
############################################################
# Test the -S (suppress site initialization) option.
def test_S():
# Create a local site.py that sets some global context. Do this in a temporary directory to avoid accidently
# overwriting a real site.py or creating confusion. Use the IRONPYTHONPATH environment variable to point
# IronPython at this version of site.py.
f = file(tmpdir + "\\site.py", "w")
f.write("import sys\nsys.foo = 123\n")
f.close()
Environment.SetEnvironmentVariable("IRONPYTHONPATH", tmpdir)
# Verify that the file gets loaded by default.
TestCommandLine(("-c", "import sys; print sys.foo"), "123\n")
# CP778 - verify 'site' does not show up in dir()
TestCommandLine(("-c", "print 'site' in dir()"), "False\n")
# Verify that Lib remains in sys.path.
TestCommandLine(("-S", "-c", "import sys; print str(sys.exec_prefix + '\\lib').lower() in [x.lower() for x in sys.path]"), "True\n")
# Now check that we can suppress this with -S.
TestCommandLine(("-S", "-c", "import sys; print sys.foo"), ("lastline", "AttributeError: 'module' object has no attribute 'foo'"), 1)
def test_cp24720():
f = file(nt.getcwd() + "\\site.py", "w")
f.write("import sys\nsys.foo = 456\n")
f.close()
orig_ipy_path = Environment.GetEnvironmentVariable("IRONPYTHONPATH")
try:
Environment.SetEnvironmentVariable("IRONPYTHONPATH", "")
TestCommandLine(("-c", "import site;import sys;print hasattr(sys, 'foo')"), "False\n")
Environment.SetEnvironmentVariable("IRONPYTHONPATH", ".")
TestCommandLine(("-c", "import site;import sys;print hasattr(sys, 'foo')"), "True\n")
finally:
Environment.SetEnvironmentVariable("IRONPYTHONPATH", orig_ipy_path)
nt.remove(nt.getcwd() + "\\site.py")
def test_V():
# Test the -V (print version and exit) option.
TestCommandLine(("-V",), ("regexp", "IronPython ([0-9.]+)(.*) on .NET ([0-9.]+)\n"))
############################################################
# Test the -OO (suppress doc string optimization) option.
def test_OO():
foo_doc = "def foo():\n\t'OK'\nprint foo.__doc__\n"
TestScript((), foo_doc, "OK\n")
TestScript(("-OO",), foo_doc, "None\n")
############################################################
# Test the -t and -tt (warnings/errors on inconsistent tab usage) options.
def test_t():
# Write a script containing inconsistent use fo tabs.
tmpscript = tmpdir + "\\tabs.py"
f = file(tmpscript, "w")
f.write("if (1):\n\tpass\n pass\nprint 'OK'\n")
f.close()
TestCommandLine((tmpscript, ), "OK\n")
msg = "inconsistent use of tabs and spaces in indentation"
TestCommandLine(("-t", tmpscript), ("firstline", "%s:3: SyntaxWarning: %s\n" % (tmpscript, msg, )), 0)
TestCommandLine(("-tt", tmpscript), ("lastline", "TabError: " + msg + "\n"), 1)
tmpscript = tmpdir + "\\funcdef.py"
f = file(tmpscript, "w")
f.write("""def f(a,
b,
c): pass""")
f.close()
TestCommandLine(("-tt", tmpscript, ), "")
def test_E():
# Test the -E (suppress use of environment variables) option.
# Re-use the generated site.py from above and verify that we can stop it being picked up from IRONPYTHONPATH
# using -E.
TestCommandLine(("-E", "-c", "import sys; print sys.foo"), ("lastline", "AttributeError: 'module' object has no attribute 'foo'"), 1)
# Create an override startup script that exits right away
tmpscript = tmpdir + "\\startupdie.py"
f = file(tmpscript, "w")
f.write("from System import Environment\nprint 'Boo!'\nEnvironment.Exit(27)\n")
f.close()
Environment.SetEnvironmentVariable("IRONPYTHONSTARTUP", tmpscript)
TestCommandLine((), None, 27)
tmpscript2 = tmpdir + "\\something.py"
f = file(tmpscript2, "w")
f.write("print 2+2\n")
f.close()
TestCommandLine(('-E', tmpscript2), "4\n")
tmpscript3 = tmpdir + "\\startupdie.py"
f = file(tmpscript3, "w")
f.write("import sys\nprint 'Boo!'\nsys.exit(42)\n")
f.close()
Environment.SetEnvironmentVariable("IRONPYTHONSTARTUP", tmpscript3)
TestCommandLine((), None, 42)
Environment.SetEnvironmentVariable("IRONPYTHONSTARTUP", "")
nt.unlink(tmpscript)
nt.unlink(tmpscript2)
# Test -W (set warning filters) option.
def test_W():
TestCommandLine(("-c", "import sys; print sys.warnoptions"), "[]\n")
TestCommandLine(("-W", "foo", "-c", "import sys; print sys.warnoptions"), "['foo']\n")
TestCommandLine(("-W", "foo", "-W", "bar", "-c", "import sys; print sys.warnoptions"), "['foo', 'bar']\n")
TestCommandLine(("-W",), "Argument expected for the -W option.\n", -1)
# Test -?
# regexp for the output of PrintUsage
# usageRegex = "Usage.*"
# TestCommandLine(("-?",), ("regexp", usageRegex))
# Test -X:FastEval
def test_X_Interpret():
TestCommandLine(("-X:Interpret", "-c", "2+2"), "")
TestCommandLine(("-X:Interpret", "-c", "eval('2+2')"), "")
TestCommandLine(("-X:Interpret", "-c", "x = 3; eval('x+2')"), "")
# Test -X:TrackPerformance
def test_X_TrackPerformance():
if not is_debug: return #Mode not supported in Release
TestCommandLine(("-X:TrackPerformance", "-c", "2+2"), "")
# Test -u (Unbuffered stdout & stderr): only test this can be passed in
def test_u():
TestCommandLine(('-u', '-c', 'print 2+2'), "4\n")
# Test -X:MaxRecursion
def test_X_MaxRecursion():
TestCommandLine(("-X:MaxRecursion", "10", "-c", "2+2"), "")
TestCommandLine(("-X:MaxRecursion", "3.14159265", "-c", "2+2"), "The argument for the -X:MaxRecursion option must be an integer >= 10.\n", -1)
TestCommandLine(("-X:MaxRecursion",), "Argument expected for the -X:MaxRecursion option.\n", -1)
TestCommandLine(("-X:MaxRecursion", "2"), "The argument for the -X:MaxRecursion option must be an integer >= 10.\n", -1)
# Test -x (ignore first line)
def test_x():
tmpxoptscript = tmpdir + '\\xopt.py'
f = file(tmpxoptscript, "w")
f.write("first line is garbage\nprint 2+2\n")
f.close()
TestCommandLine(('-x', tmpxoptscript), "4\n")
nt.unlink(tmpxoptscript)
def test_nonexistent_file():
# Test invocation of a nonexistent file
try:
nt.unlink("nonexistent.py")
except OSError:
pass
TestCommandLine(("nonexistent.py",), "File nonexistent.py does not exist.\n", 1)
# Test -X:MTA
def test_MTA():
TestCommandLine(("-X:MTA", "-c", "print 'OK'"), "OK\n")
TestInteractive("-X:MTA")
# Test -Q
def test_Q():
TestCommandLine(("-Q", "warnall", "-c", "print 3/2.0"), """<string>:1: DeprecationWarning: classic float division\n1.5\n""")
TestCommandLine(("-Q", "warn", "-c", "print 3/2.0"), "1.5\n")
TestCommandLine(("-Q", "warn", "-c", "print 3j/2.0"), "1.5j\n")
TestCommandLine(("-Q", "warnall", "-c", "print 3/2.0"), "<string>:1: DeprecationWarning: classic float division\n1.5\n")
TestCommandLine(("-Q", "warnall", "-c", "print 3L/2.0"), "<string>:1: DeprecationWarning: classic float division\n1.5\n")
TestCommandLine(("-Q", "warnall", "-c", "print 3.0/2L"), "<string>:1: DeprecationWarning: classic float division\n1.5\n")
TestCommandLine(("-Q", "warnall", "-c", "print 3j/2.0"), "<string>:1: DeprecationWarning: classic complex division\n1.5j\n")
TestCommandLine(("-Q", "warnall", "-c", "print 3j/2"), "<string>:1: DeprecationWarning: classic complex division\n1.5j\n")
TestCommandLine(("-Q", "warnall", "-c", "print 3j/2L"), "<string>:1: DeprecationWarning: classic complex division\n1.5j\n")
TestCommandLine(("-Q", "warnall", "-c", "print 3.0/2j"), "<string>:1: DeprecationWarning: classic complex division\n-1.5j\n")
TestCommandLine(("-Q", "warnall", "-c", "print 3/2j"), "<string>:1: DeprecationWarning: classic complex division\n-1.5j\n")
TestCommandLine(("-Q", "warnall", "-c", "print 3L/2j"), "<string>:1: DeprecationWarning: classic complex division\n-1.5j\n")
TestCommandLine(("-Qwarn", "-c", "print 3/2L"), "<string>:1: DeprecationWarning: classic long division\n1\n")
TestCommandLine(("-Qwarnall", "-c", "print 3/2L"), "<string>:1: DeprecationWarning: classic long division\n1\n")
TestCommandLine(("-Qwarn", "-c", "print 3L/2"), "<string>:1: DeprecationWarning: classic long division\n1\n")
TestCommandLine(("-Qwarnall", "-c", "print 3L/2"), "<string>:1: DeprecationWarning: classic long division\n1\n")
TestCommandLine(("-Qnew", "-c", "print 3/2"), "1.5\n")
TestCommandLine(("-Qold", "-c", "print 3/2"), "1\n")
TestCommandLine(("-Qwarn", "-c", "print 3/2"), "<string>:1: DeprecationWarning: classic int division\n1\n")
TestCommandLine(("-Qwarnall", "-c", "print 3/2"), "<string>:1: DeprecationWarning: classic int division\n1\n")
TestCommandLine(("-Q", "new", "-c", "print 3/2"), "1.5\n")
TestCommandLine(("-Q", "old", "-c", "print 3/2"), "1\n")
TestCommandLine(("-Q", "warn", "-c", "print 3/2"), "<string>:1: DeprecationWarning: classic int division\n1\n")
TestCommandLine(("-Q", "warnall", "-c", "print 3/2"), "<string>:1: DeprecationWarning: classic int division\n1\n")
def test_doc():
TestCommandLine(("", "-c", "print __doc__"), "None\n", 0)
def test_cp11922():
TestCommandLine(("-c", "assert False"), '''Traceback (most recent call last):
File "<string>", line 1, in <module>
AssertionError''',
1)
def test_cp798():
TestCommandLine(("", "-c", "dir();print '_' in dir()"), "False\n", 0)
def test_logo():
i = IronPythonInstance(sys.executable, sys.exec_prefix, "")
AreEqual(i.proc.Start(), True)
i.reader = i.proc.StandardOutput
x = i.EatToPrompt()
Assert(x.find('\r\r\n') == -1)
run_test(__name__) | output = f.read() |
yamlFormatter.d.ts | import { TextEdit, FormattingOptions } from 'vscode-languageserver-types';
import { CustomFormatterOptions, LanguageSettings } from '../yamlLanguageService';
import { TextDocument } from 'vscode-languageserver-textdocument';
export declare class YAMLFormatter {
private formatterEnabled;
configure(shouldFormat: LanguageSettings): void;
format(document: TextDocument, options: FormattingOptions & CustomFormatterOptions): TextEdit[];
} | ||
vec-collect-vec-i-filtered-with-linked-list-collect-vec.rs | extern crate lolbench ; # [ test ] fn end_to_end ( ) {
lolbench :: end_to_end_test (
"rayon_1_0_0" , "vec_collect::vec_i_filtered::with_linked_list_collect_vec" , | ) ; } |
|
spg.api.js |
var AzureApi = (function () {
function AzureApi() {
}
var apiPath = "";
var fileUploadUrl = "";
var returnData = "";
var resultType = 0;
AzureApi.getResultType = function ()
{
return resultType;
}
AzureApi.getResultData = function ()
{
return returnData;
}
AzureApi.retrieveSharePointFiles = function (url, queryString) {
var urlPath = url;
urlPath += queryString;
$.ajax({
url: urlPath,
type: "GET",
dataType: "json",
async: false,
crossDomain: true,
success: function (data, textStatus, xhr) {
var jsData = data,
jsonStr = JSHelper.toJson(jsData);
jsData = JSHelper.fromJson(jsonStr);
resultType = 1;
returnData = jsData;
},
error: function () {
resultType = 2;
}
})
.done(function (data, status, jqxhr) {
});
}
AzureApi.setFileAttributes = function (url, data) {
var rc = false;
$.ajax({
url: url,
type: "POST",
data: data,
async: false,
beforeSend: function (xhr) {
xhr.setRequestHeader("Content-Type", "application/x-www-form-urlencoded");
},
success: function (data, textStatus, xhr) {
rc = true;
},
error:
rc = false,
})
.done(function (data, status, jqxhr) {
});
}
AzureApi.buildFileUploadUrl = function (entityName, masterId, masterNumber, masterName)
{
var uploadUrl = AzureApi.buildAPIPath(AzureApi.getRoute('UploadDocument'));
uploadUrl = uploadUrl.replace("{0}", entityName);
uploadUrl = uploadUrl.replace("{1}", masterId);
uploadUrl = uploadUrl.replace("{2}", masterNumber);
uploadUrl = uploadUrl.replace("{3}", masterName);
fileUploadUrl = uploadUrl;
return fileUploadUrl;
};
AzureApi.uploadFile = function (formData) {
$.ajax({
url: fileUploadUrl,
type: "POST",
contentType: false,
processData: false,
data: formData,
async: false,
beforeSend: function (xhr) {
// disableButton(fileUploadButtonId, fileCancelButtonId);
},
success: function (data, textStatus, xhr) {
var key = data.Key;
if (key == "FileId") {
resultType = 1;
returnData = data.Value; // fileId
}
else
resultType = 2;
},
error: function (jqxhr, status, error) {
resultType = 2;
returnData = error;
}
});
}
AzureApi.retrieveCategories = function () {
var urlPath = AzureApi.buildAPIPath(AzureApi.getRoute('GetDocumentTypes'));
$.ajax({
url: urlPath,
type: "GET",
dataType: "json",
async: false,
crossDomain: true,
success: function (data, textStatus, xhr) {
var jsData = data,
jsonStr = JSHelper.toJson(jsData);
jsData = JSHelper.fromJson(jsonStr);
resultType = 1;
returnData = jsData;
},
error: function () {
resultType = 2;
}
})
.done(function (data, status, jqxhr) {
});
}
AzureApi.deleteFile = function (fileId) {
var urlPath = AzureApi.buildAPIPath(AzureApi.getRoute('DeleteFile') + fileId);
$.ajax({
url: urlPath,
type: "DELETE",
crossDomain: true,
success: function (data, textStatus, xhr) {
resultType = 1;
returnData = "File Deleted";
},
error: function () {
resultType = 2;
returnData = "File could not be Deleted";
}
});
}
AzureApi.buildAPIPath = function (path) {
return apiPath + path;
}
AzureApi.getAzureApi = function () {
return apiPath;
}
AzureApi.setAzureApi = function (path) {
apiPath = path;
}
AzureApi.getRoute = function (name) {
var route = '';
switch (name) {
case 'GetDocumentTypes':
route = "/api/Lookup/GetAllListItems/DocumentTypes";
break;
case 'DownloadFile':
route = "/api/Library/DownloadFile/";
break;
case 'GetByMasterId':
route = "/api/Library/GetByMasterId/";
break;
case 'GetByMasterNumber':
route = "/api/Library/GetByMasterNumber/";
break;
case 'GetByAlternateField':
route = "/api/Library/GetByAlternateField?";
break;
case 'UploadDocument':
route = "/api/Library/UploadDocument?EntityName={0}&MasterId={1}&MasterNumber={2}&MasterName={3}";
| break;
case 'SetFileAttributes':
route = "/api/Library/SetFileAttributes";
break;
case 'DeleteFile':
route = "/api/Library/DeleteFile/"
break;
default:
break;
}
return route;
}
return AzureApi;
})(); | |
main.go | package main
import (
"log"
"math"
"math/rand"
"os"
"strconv"
"time"
"github.com/3lvia/gs2"
)
const outputFile = "testdata/testFile.gs2"
const maxVal = 1000
func main() {
file, err := os.Create(outputFile)
if err != nil {
log.Fatal(err)
}
rand.Seed(time.Now().Unix())
startMessage := getStartMessage()
meterReadings := generateMeterReadings(0)
timeSeries := generateTimeSeries(1)
endMessage := getEndMessage(len(meterReadings) + len(timeSeries) + 2)
g := gs2.GS2{
StartMessage: startMessage,
MeterReadings: meterReadings,
TimeSeries: timeSeries,
EndMessage: endMessage,
}
if err := gs2.NewEncoder(file).Encode(&g); err != nil {
log.Fatal(err)
}
}
func getStartMessage() gs2.StartMessage {
now := time.Now()
_, offset := now.Zone()
return gs2.StartMessage{
ID: "someId",
MessageType: "Settlement-data",
Version: "1.2",
Time: now,
To: "recipient",
From: "sender",
GMTReference: offset / 60 / 60,
}
}
func getEndMessage(noOfObjects int) gs2.EndMessage {
return gs2.EndMessage{
ID: "someId",
NumberOfObjects: noOfObjects,
}
}
func generateMeterReadings(numberOfReadings int) []gs2.MeterReading {
var meterReadings []gs2.MeterReading
for i := 0; i < numberOfReadings; i++ {
meterReadings = append(meterReadings, generateMeterReading(i))
}
return meterReadings
}
func generateMeterReading(n int) gs2.MeterReading {
itoa := strconv.Itoa(n)
return gs2.MeterReading{
Reference: "meterpoint" + itoa,
Time: time.Time{},
Unit: "kWh",
Value: generateTriplet(),
MeterLocation: "location" + itoa,
Meter: "meter" + itoa,
Description: "description for entry " + itoa,
}
}
func | (numberofSeries int) []gs2.TimeSeries {
var timeSeries []gs2.TimeSeries
for i := 0; i < numberofSeries; i++ {
timeSeries = append(timeSeries, generateTimeSerie(i))
}
return timeSeries
}
func generateTimeSerie(n int) gs2.TimeSeries {
itoa := strconv.Itoa(n)
numberOfValues := 24
triplets := generateTriplets(numberOfValues)
var sum float64
for _, triplet := range triplets {
sum += triplet.Value
}
return gs2.TimeSeries{
Reference: "meterpoint" + itoa,
Start: time.Time{},
Stop: time.Time{},
Step: time.Hour,
Unit: "kWh",
TypeOfValue: "interval",
DirectionOfFlow: "out",
Value: triplets,
NoOfValues: numberOfValues,
Sum: sum,
MeterLocation: "location" + itoa,
Meter: "meter" + itoa,
Description: "description for entry " + itoa,
}
}
func generateTriplets(numberOfTriplet int) []gs2.Triplet {
var triplets []gs2.Triplet
for i := 0; i < numberOfTriplet; i++ {
triplets = append(triplets, generateTriplet())
}
return triplets
}
func generateTriplet() gs2.Triplet {
return gs2.Triplet{
Value: math.Trunc(rand.Float64()*maxVal*10000) / 10000,
}
}
| generateTimeSeries |
app.ts | import express, { Application } from "express";
import path from "path";
import cors from "cors";
import bodyParser from "body-parser";
import { createRouter } from "./router";
import { errorHandler, notFoundHandler } from "./middlewares";
import { Services } from "./services";
const rootPath = path.resolve(__dirname, "..");
export function | (services: Services): Application {
const app = express();
app.use(cors());
app.use(bodyParser.json());
app.use(express.static(`${rootPath}/public`));
app.use("/v1", createRouter(services));
app.get("*", notFoundHandler());
app.use(errorHandler());
return app;
}
| createApp |
produce_images.py | from clothmanip.utils.utils import get_variant, argsparser, get_randomized_env, dump_commit_hashes, get_keys_and_dims, dump_goal
from clothmanip.envs.cloth import ClothEnvPickled as ClothEnv
import numpy as np
from rlkit.torch.sac.policies import TanhGaussianPolicy, MakeDeterministic, TanhScriptPolicy, CustomScriptPolicy, CustomTanhScriptPolicy, ScriptPolicy
import cv2
import os
from rlkit.envs.wrappers import NormalizedBoxEnv
def | (variant):
variant['save_folder'] = "/home/julius/robotics/clothmanip/experiments/paper_images"
env = ClothEnv(**variant['env_kwargs'], has_viewer=True, save_folder=variant['save_folder'])
env = NormalizedBoxEnv(env)
env = get_randomized_env(env, variant)
keys, dims = get_keys_and_dims(variant, env)
demo_path = variant['demo_paths'][0]
predefined_actions = np.genfromtxt(demo_path, delimiter=',')
iter_folder = os.path.join(variant['save_folder'], "close_no_corners", "0")
os.makedirs(os.path.join(iter_folder, "corners_images"), exist_ok=True)
#os.makedirs(os.path.join(iter_folder, "env_images"), exist_ok=True)
#os.makedirs(os.path.join(iter_folder, "cnn_images"), exist_ok=True)
#os.makedirs(os.path.join(iter_folder, "cnn_color_images"), exist_ok=True)
#os.makedirs(os.path.join(iter_folder, "cnn_color_full_images"), exist_ok=True)
policy = TanhScriptPolicy(
output_size=dims['action_dim'],
added_fc_input_size=dims['added_fc_input_size'],
aux_output_size=9,
**variant['policy_kwargs'],
)
eval_policy = MakeDeterministic(policy)
for step_number, delta in enumerate(predefined_actions):
print(step_number)
a = delta/env.output_max
a = np.clip(a, -1, 1)
corner_image, eval_image, cnn_color_image_full, cnn_color_image, cnn_image = env.capture_images(None, mask_type=None)
cv2.imwrite(f'{iter_folder}/corners_images/{str(step_number).zfill(3)}.png', corner_image)
#cv2.imwrite(f'{iter_folder}/env_images/{str(step_number).zfill(3)}.png', eval_image)
#cv2.imwrite(f'{iter_folder}/cnn_images/{str(step_number).zfill(3)}.png', corner_image)
#cv2.imwrite(f'{iter_folder}/cnn_color_images/{str(step_number).zfill(3)}.png', cnn_color_image)
#cv2.imwrite(f'{iter_folder}/cnn_color_full_images/{str(step_number).zfill(3)}.png', cnn_color_image_full)
o, r, d, env_info = env.step(a)
if __name__ == "__main__":
args = argsparser()
variant, arg_str = get_variant(args)
main(variant) | main |
lib.rs | // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Support code for rustc's built in unit-test and micro-benchmarking
//! framework.
//!
//! Almost all user code will only be interested in `Bencher` and
//! `black_box`. All other interactions (such as writing tests and
//! benchmarks themselves) should be done via the `#[test]` and
//! `#[bench]` attributes.
//!
//! See the [Testing Chapter](../book/first-edition/testing.html) of the book for more details.
// Currently, not much of this is meant for users. It is intended to
// support the simplest interface possible for representing and
// running tests while providing a base that other test frameworks may
// build off of.
// NB: this is also specified in this crate's Cargo.toml, but libsyntax contains logic specific to
// this crate, which relies on this attribute (rather than the value of `--crate-name` passed by
// cargo) to detect this crate.
#![crate_name = "test"]
#![unstable(feature = "test", issue = "27812")]
#![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
html_root_url = "https://doc.rust-lang.org/nightly/",
test(attr(deny(warnings))))]
#![deny(warnings)]
#![feature(asm)]
#![feature(fnbox)]
#![cfg_attr(any(unix, target_os = "cloudabi"), feature(libc))]
#![feature(set_stdio)]
#![feature(panic_unwind)]
#![feature(staged_api)]
extern crate getopts;
extern crate term;
#[cfg(any(unix, target_os = "cloudabi"))]
extern crate libc;
extern crate panic_unwind;
pub use self::TestFn::*;
pub use self::ColorConfig::*;
pub use self::TestResult::*;
pub use self::TestName::*;
use self::TestEvent::*;
use self::NamePadding::*;
use self::OutputLocation::*;
use std::panic::{catch_unwind, AssertUnwindSafe};
use std::any::Any;
use std::boxed::FnBox;
use std::cmp;
use std::collections::BTreeMap;
use std::env;
use std::fmt;
use std::fs::File;
use std::io::prelude::*;
use std::io;
use std::iter::repeat;
use std::path::PathBuf;
use std::sync::mpsc::{channel, Sender};
use std::sync::{Arc, Mutex};
use std::thread;
use std::time::{Instant, Duration};
const TEST_WARN_TIMEOUT_S: u64 = 60;
const QUIET_MODE_MAX_COLUMN: usize = 100; // insert a '\n' after 100 tests in quiet mode
// to be used by rustc to compile tests in libtest
pub mod test {
pub use {Bencher, TestName, TestResult, TestDesc, TestDescAndFn, TestOpts, TrFailed,
TrFailedMsg, TrIgnored, TrOk, Metric, MetricMap, StaticTestFn, StaticTestName,
DynTestName, DynTestFn, run_test, test_main, test_main_static, filter_tests,
parse_opts, StaticBenchFn, ShouldPanic, Options};
}
pub mod stats;
// The name of a test. By convention this follows the rules for rust
// paths; i.e. it should be a series of identifiers separated by double
// colons. This way if some test runner wants to arrange the tests
// hierarchically it may.
#[derive(Clone, PartialEq, Eq, Hash, Debug)]
pub enum TestName {
StaticTestName(&'static str),
DynTestName(String),
}
impl TestName {
fn as_slice(&self) -> &str {
match *self {
StaticTestName(s) => s,
DynTestName(ref s) => s,
}
}
}
impl fmt::Display for TestName {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(self.as_slice(), f)
}
}
#[derive(Clone, Copy, PartialEq, Eq)]
pub enum NamePadding {
PadNone,
PadOnRight,
}
impl TestDesc {
fn padded_name(&self, column_count: usize, align: NamePadding) -> String {
let mut name = String::from(self.name.as_slice());
let fill = column_count.saturating_sub(name.len());
let pad = repeat(" ").take(fill).collect::<String>();
match align {
PadNone => name,
PadOnRight => {
name.push_str(&pad);
name
}
}
}
}
/// Represents a benchmark function.
pub trait TDynBenchFn: Send {
fn run(&self, harness: &mut Bencher);
}
// A function that runs a test. If the function returns successfully,
// the test succeeds; if the function panics then the test fails. We
// may need to come up with a more clever definition of test in order
// to support isolation of tests into threads.
pub enum TestFn {
StaticTestFn(fn()),
StaticBenchFn(fn(&mut Bencher)),
DynTestFn(Box<FnBox() + Send>),
DynBenchFn(Box<TDynBenchFn + 'static>),
}
impl TestFn {
fn padding(&self) -> NamePadding {
match *self {
StaticTestFn(..) => PadNone,
StaticBenchFn(..) => PadOnRight,
DynTestFn(..) => PadNone,
DynBenchFn(..) => PadOnRight,
}
}
}
impl fmt::Debug for TestFn {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(match *self {
StaticTestFn(..) => "StaticTestFn(..)",
StaticBenchFn(..) => "StaticBenchFn(..)",
DynTestFn(..) => "DynTestFn(..)",
DynBenchFn(..) => "DynBenchFn(..)",
})
}
}
/// Manager of the benchmarking runs.
///
/// This is fed into functions marked with `#[bench]` to allow for
/// set-up & tear-down before running a piece of code repeatedly via a
/// call to `iter`.
#[derive(Clone)]
pub struct Bencher {
mode: BenchMode,
summary: Option<stats::Summary>,
pub bytes: u64,
}
#[derive(Clone, PartialEq, Eq)]
pub enum BenchMode {
Auto,
Single,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub enum ShouldPanic {
No,
Yes,
YesWithMessage(&'static str),
}
// The definition of a single test. A test runner will run a list of
// these.
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct TestDesc {
pub name: TestName,
pub ignore: bool,
pub should_panic: ShouldPanic,
pub allow_fail: bool,
}
#[derive(Debug)]
pub struct TestDescAndFn {
pub desc: TestDesc,
pub testfn: TestFn,
}
#[derive(Clone, PartialEq, Debug, Copy)]
pub struct Metric {
value: f64,
noise: f64,
}
impl Metric {
pub fn new(value: f64, noise: f64) -> Metric {
Metric {
value,
noise,
}
}
}
/// In case we want to add other options as well, just add them in this struct.
#[derive(Copy, Clone, Debug)]
pub struct Options {
display_output: bool,
}
impl Options {
pub fn new() -> Options {
Options {
display_output: false,
}
}
pub fn display_output(mut self, display_output: bool) -> Options {
self.display_output = display_output;
self
}
}
// The default console test runner. It accepts the command line
// arguments and a vector of test_descs.
pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>, options: Options) {
let mut opts = match parse_opts(args) {
Some(Ok(o)) => o,
Some(Err(msg)) => panic!("{:?}", msg),
None => return,
};
opts.options = options;
if opts.list {
if let Err(e) = list_tests_console(&opts, tests) {
panic!("io error when listing tests: {:?}", e);
}
} else {
match run_tests_console(&opts, tests) {
Ok(true) => {}
Ok(false) => std::process::exit(101),
Err(e) => panic!("io error when running tests: {:?}", e),
}
}
}
// A variant optimized for invocation with a static test vector.
// This will panic (intentionally) when fed any dynamic tests, because
// it is copying the static values out into a dynamic vector and cannot
// copy dynamic values. It is doing this because from this point on
// a Vec<TestDescAndFn> is used in order to effect ownership-transfer
// semantics into parallel test runners, which in turn requires a Vec<>
// rather than a &[].
pub fn test_main_static(tests: &[TestDescAndFn]) {
let args = env::args().collect::<Vec<_>>();
let owned_tests = tests.iter()
.map(|t| {
match t.testfn {
StaticTestFn(f) => {
TestDescAndFn {
testfn: StaticTestFn(f),
desc: t.desc.clone(),
}
}
StaticBenchFn(f) => {
TestDescAndFn {
testfn: StaticBenchFn(f),
desc: t.desc.clone(),
}
}
_ => panic!("non-static tests passed to test::test_main_static"),
}
})
.collect();
test_main(&args, owned_tests, Options::new())
}
#[derive(Copy, Clone, Debug)]
pub enum ColorConfig {
AutoColor,
AlwaysColor,
NeverColor,
}
#[derive(Debug)]
pub struct TestOpts {
pub list: bool,
pub filter: Option<String>,
pub filter_exact: bool,
pub run_ignored: bool,
pub run_tests: bool,
pub bench_benchmarks: bool,
pub logfile: Option<PathBuf>,
pub nocapture: bool,
pub color: ColorConfig,
pub quiet: bool,
pub test_threads: Option<usize>,
pub skip: Vec<String>,
pub options: Options,
}
impl TestOpts {
#[cfg(test)]
fn new() -> TestOpts {
TestOpts {
list: false,
filter: None,
filter_exact: false,
run_ignored: false,
run_tests: false,
bench_benchmarks: false,
logfile: None,
nocapture: false,
color: AutoColor,
quiet: false,
test_threads: None,
skip: vec![],
options: Options::new(),
}
}
}
/// Result of parsing the options.
pub type OptRes = Result<TestOpts, String>;
fn optgroups() -> getopts::Options {
let mut opts = getopts::Options::new();
opts.optflag("", "ignored", "Run ignored tests")
.optflag("", "test", "Run tests and not benchmarks")
.optflag("", "bench", "Run benchmarks instead of tests")
.optflag("", "list", "List all tests and benchmarks")
.optflag("h", "help", "Display this message (longer with --help)")
.optopt("", "logfile", "Write logs to the specified file instead \
of stdout", "PATH")
.optflag("", "nocapture", "don't capture stdout/stderr of each \
task, allow printing directly")
.optopt("", "test-threads", "Number of threads used for running tests \
in parallel", "n_threads")
.optmulti("", "skip", "Skip tests whose names contain FILTER (this flag can \
be used multiple times)","FILTER")
.optflag("q", "quiet", "Display one character per test instead of one line")
.optflag("", "exact", "Exactly match filters rather than by substring")
.optopt("", "color", "Configure coloring of output:
auto = colorize if stdout is a tty and tests are run on serially (default);
always = always colorize output;
never = never colorize output;", "auto|always|never");
return opts
}
fn usage(binary: &str, options: &getopts::Options) {
let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
println!(r#"{usage}
The FILTER string is tested against the name of all tests, and only those
tests whose names contain the filter are run.
By default, all tests are run in parallel. This can be altered with the
--test-threads flag or the RUST_TEST_THREADS environment variable when running
tests (set it to 1).
All tests have their standard output and standard error captured by default.
This can be overridden with the --nocapture flag or setting RUST_TEST_NOCAPTURE
environment variable to a value other than "0". Logging is not captured by default.
Test Attributes:
#[test] - Indicates a function is a test to be run. This function
takes no arguments.
#[bench] - Indicates a function is a benchmark to be run. This
function takes one argument (test::Bencher).
#[should_panic] - This function (also labeled with #[test]) will only pass if
the code causes a panic (an assertion failure or panic!)
A message may be provided, which the failure string must
contain: #[should_panic(expected = "foo")].
#[ignore] - When applied to a function which is already attributed as a
test, then the test runner will ignore these tests during
normal test runs. Running with --ignored will run these
tests."#,
usage = options.usage(&message));
}
// Parses command line arguments into test options
pub fn parse_opts(args: &[String]) -> Option<OptRes> {
let opts = optgroups();
let args = args.get(1..).unwrap_or(args);
let matches = match opts.parse(args) {
Ok(m) => m,
Err(f) => return Some(Err(f.to_string())),
};
if matches.opt_present("h") {
usage(&args[0], &opts);
return None;
}
let filter = if !matches.free.is_empty() {
Some(matches.free[0].clone())
} else {
None
};
let run_ignored = matches.opt_present("ignored");
let quiet = matches.opt_present("quiet");
let exact = matches.opt_present("exact");
let list = matches.opt_present("list");
let logfile = matches.opt_str("logfile");
let logfile = logfile.map(|s| PathBuf::from(&s));
let bench_benchmarks = matches.opt_present("bench");
let run_tests = !bench_benchmarks || matches.opt_present("test");
let mut nocapture = matches.opt_present("nocapture");
if !nocapture {
nocapture = match env::var("RUST_TEST_NOCAPTURE") {
Ok(val) => &val != "0",
Err(_) => false
};
}
let test_threads = match matches.opt_str("test-threads") {
Some(n_str) =>
match n_str.parse::<usize>() {
Ok(0) =>
return Some(Err(format!("argument for --test-threads must not be 0"))),
Ok(n) => Some(n),
Err(e) =>
return Some(Err(format!("argument for --test-threads must be a number > 0 \
(error: {})", e)))
},
None =>
None,
};
let color = match matches.opt_str("color").as_ref().map(|s| &**s) {
Some("auto") | None => AutoColor,
Some("always") => AlwaysColor,
Some("never") => NeverColor,
Some(v) => {
return Some(Err(format!("argument for --color must be auto, always, or never (was \
{})",
v)))
}
};
let test_opts = TestOpts {
list,
filter,
filter_exact: exact,
run_ignored,
run_tests,
bench_benchmarks,
logfile,
nocapture,
color,
quiet,
test_threads,
skip: matches.opt_strs("skip"),
options: Options::new(),
};
Some(Ok(test_opts))
}
#[derive(Clone, PartialEq)]
pub struct BenchSamples {
ns_iter_summ: stats::Summary,
mb_s: usize,
}
#[derive(Clone, PartialEq)]
pub enum TestResult {
TrOk,
TrFailed,
TrFailedMsg(String),
TrIgnored,
TrAllowedFail,
TrBench(BenchSamples),
}
unsafe impl Send for TestResult {}
enum OutputLocation<T> {
Pretty(Box<term::StdoutTerminal>),
Raw(T),
}
struct ConsoleTestState<T> {
log_out: Option<File>,
out: OutputLocation<T>,
use_color: bool,
quiet: bool,
total: usize,
passed: usize,
failed: usize,
ignored: usize,
allowed_fail: usize,
filtered_out: usize,
measured: usize,
metrics: MetricMap,
failures: Vec<(TestDesc, Vec<u8>)>,
not_failures: Vec<(TestDesc, Vec<u8>)>,
max_name_len: usize, // number of columns to fill when aligning names
options: Options,
}
impl<T: Write> ConsoleTestState<T> {
pub fn new(opts: &TestOpts, _: Option<T>) -> io::Result<ConsoleTestState<io::Stdout>> {
let log_out = match opts.logfile {
Some(ref path) => Some(File::create(path)?),
None => None,
};
let out = match term::stdout() {
None => Raw(io::stdout()),
Some(t) => Pretty(t),
};
Ok(ConsoleTestState {
out,
log_out,
use_color: use_color(opts),
quiet: opts.quiet,
total: 0,
passed: 0,
failed: 0,
ignored: 0,
allowed_fail: 0,
filtered_out: 0,
measured: 0,
metrics: MetricMap::new(),
failures: Vec::new(),
not_failures: Vec::new(),
max_name_len: 0,
options: opts.options,
})
}
pub fn write_ok(&mut self) -> io::Result<()> {
self.write_short_result("ok", ".", term::color::GREEN)
}
pub fn write_failed(&mut self) -> io::Result<()> {
self.write_short_result("FAILED", "F", term::color::RED)
}
pub fn write_ignored(&mut self) -> io::Result<()> {
self.write_short_result("ignored", "i", term::color::YELLOW)
}
pub fn write_allowed_fail(&mut self) -> io::Result<()> {
self.write_short_result("FAILED (allowed)", "a", term::color::YELLOW)
}
pub fn write_bench(&mut self) -> io::Result<()> {
self.write_pretty("bench", term::color::CYAN)
}
pub fn write_short_result(&mut self, verbose: &str, quiet: &str, color: term::color::Color)
-> io::Result<()> {
if self.quiet {
self.write_pretty(quiet, color)?;
if self.current_test_count() % QUIET_MODE_MAX_COLUMN == QUIET_MODE_MAX_COLUMN - 1 {
// we insert a new line every 100 dots in order to flush the
// screen when dealing with line-buffered output (e.g. piping to
// `stamp` in the rust CI).
self.write_plain("\n")?;
}
Ok(())
} else {
self.write_pretty(verbose, color)?;
self.write_plain("\n")
}
}
pub fn write_pretty(&mut self, word: &str, color: term::color::Color) -> io::Result<()> {
match self.out {
Pretty(ref mut term) => {
if self.use_color {
term.fg(color)?;
}
term.write_all(word.as_bytes())?;
if self.use_color {
term.reset()?;
}
term.flush()
}
Raw(ref mut stdout) => {
stdout.write_all(word.as_bytes())?;
stdout.flush()
}
}
}
pub fn write_plain<S: AsRef<str>>(&mut self, s: S) -> io::Result<()> {
let s = s.as_ref();
match self.out {
Pretty(ref mut term) => {
term.write_all(s.as_bytes())?;
term.flush()
}
Raw(ref mut stdout) => {
stdout.write_all(s.as_bytes())?;
stdout.flush()
}
}
}
pub fn write_run_start(&mut self, len: usize) -> io::Result<()> {
self.total = len;
let noun = if len != 1 {
"tests"
} else {
"test"
};
self.write_plain(&format!("\nrunning {} {}\n", len, noun))
}
pub fn write_test_start(&mut self, test: &TestDesc, align: NamePadding) -> io::Result<()> {
if self.quiet && align != PadOnRight {
Ok(())
} else {
let name = test.padded_name(self.max_name_len, align);
self.write_plain(&format!("test {} ... ", name))
}
}
pub fn write_result(&mut self, result: &TestResult) -> io::Result<()> {
match *result {
TrOk => self.write_ok(),
TrFailed | TrFailedMsg(_) => self.write_failed(),
TrIgnored => self.write_ignored(),
TrAllowedFail => self.write_allowed_fail(),
TrBench(ref bs) => {
self.write_bench()?;
self.write_plain(&format!(": {}\n", fmt_bench_samples(bs)))
}
}
}
pub fn write_timeout(&mut self, desc: &TestDesc) -> io::Result<()> {
self.write_plain(&format!("test {} has been running for over {} seconds\n",
desc.name,
TEST_WARN_TIMEOUT_S))
}
pub fn write_log<S: AsRef<str>>(&mut self, msg: S) -> io::Result<()> {
let msg = msg.as_ref();
match self.log_out {
None => Ok(()),
Some(ref mut o) => o.write_all(msg.as_bytes()),
}
}
pub fn write_log_result(&mut self, test: &TestDesc, result: &TestResult) -> io::Result<()> {
self.write_log(
format!("{} {}\n",
match *result {
TrOk => "ok".to_owned(),
TrFailed => "failed".to_owned(),
TrFailedMsg(ref msg) => format!("failed: {}", msg),
TrIgnored => "ignored".to_owned(),
TrAllowedFail => "failed (allowed)".to_owned(),
TrBench(ref bs) => fmt_bench_samples(bs),
},
test.name))
}
pub fn write_failures(&mut self) -> io::Result<()> {
self.write_plain("\nfailures:\n")?;
let mut failures = Vec::new();
let mut fail_out = String::new();
for &(ref f, ref stdout) in &self.failures {
failures.push(f.name.to_string());
if !stdout.is_empty() {
fail_out.push_str(&format!("---- {} stdout ----\n\t", f.name));
let output = String::from_utf8_lossy(stdout);
fail_out.push_str(&output);
fail_out.push_str("\n");
}
}
if !fail_out.is_empty() {
self.write_plain("\n")?;
self.write_plain(&fail_out)?;
}
self.write_plain("\nfailures:\n")?;
failures.sort();
for name in &failures {
self.write_plain(&format!(" {}\n", name))?;
}
Ok(())
}
pub fn write_outputs(&mut self) -> io::Result<()> {
self.write_plain("\nsuccesses:\n")?;
let mut successes = Vec::new();
let mut stdouts = String::new();
for &(ref f, ref stdout) in &self.not_failures {
successes.push(f.name.to_string());
if !stdout.is_empty() {
stdouts.push_str(&format!("---- {} stdout ----\n\t", f.name));
let output = String::from_utf8_lossy(stdout);
stdouts.push_str(&output);
stdouts.push_str("\n");
}
}
if !stdouts.is_empty() {
self.write_plain("\n")?;
self.write_plain(&stdouts)?;
}
self.write_plain("\nsuccesses:\n")?;
successes.sort();
for name in &successes {
self.write_plain(&format!(" {}\n", name))?;
}
Ok(())
}
fn current_test_count(&self) -> usize {
self.passed + self.failed + self.ignored + self.measured + self.allowed_fail
}
pub fn write_run_finish(&mut self) -> io::Result<bool> {
assert!(self.current_test_count() == self.total);
if self.options.display_output {
self.write_outputs()?;
}
let success = self.failed == 0;
if !success {
self.write_failures()?;
}
self.write_plain("\ntest result: ")?;
if success {
// There's no parallelism at this point so it's safe to use color
self.write_pretty("ok", term::color::GREEN)?;
} else {
self.write_pretty("FAILED", term::color::RED)?;
}
let s = if self.allowed_fail > 0 {
format!(
". {} passed; {} failed ({} allowed); {} ignored; {} measured; {} filtered out\n\n",
self.passed,
self.failed + self.allowed_fail,
self.allowed_fail,
self.ignored,
self.measured,
self.filtered_out)
} else {
format!(
". {} passed; {} failed; {} ignored; {} measured; {} filtered out\n\n",
self.passed,
self.failed,
self.ignored,
self.measured,
self.filtered_out)
};
self.write_plain(&s)?;
return Ok(success);
}
}
// Format a number with thousands separators
fn fmt_thousands_sep(mut n: usize, sep: char) -> String {
use std::fmt::Write;
let mut output = String::new();
let mut trailing = false;
for &pow in &[9, 6, 3, 0] {
let base = 10_usize.pow(pow);
if pow == 0 || trailing || n / base != 0 {
if !trailing {
output.write_fmt(format_args!("{}", n / base)).unwrap();
} else {
output.write_fmt(format_args!("{:03}", n / base)).unwrap();
}
if pow != 0 {
output.push(sep);
}
trailing = true;
}
n %= base;
}
output
}
pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
use std::fmt::Write;
let mut output = String::new();
let median = bs.ns_iter_summ.median as usize;
let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize;
output.write_fmt(format_args!("{:>11} ns/iter (+/- {})",
fmt_thousands_sep(median, ','),
fmt_thousands_sep(deviation, ',')))
.unwrap();
if bs.mb_s != 0 {
output.write_fmt(format_args!(" = {} MB/s", bs.mb_s)).unwrap();
}
output
}
// List the tests to console, and optionally to logfile. Filters are honored.
pub fn list_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<()> {
let mut st = ConsoleTestState::new(opts, None::<io::Stdout>)?;
let mut ntest = 0;
let mut nbench = 0;
for test in filter_tests(&opts, tests) {
use TestFn::*;
let TestDescAndFn { desc: TestDesc { name, .. }, testfn } = test;
let fntype = match testfn {
StaticTestFn(..) | DynTestFn(..) => { ntest += 1; "test" },
StaticBenchFn(..) | DynBenchFn(..) => { nbench += 1; "benchmark" },
};
st.write_plain(format!("{}: {}\n", name, fntype))?;
st.write_log(format!("{} {}\n", fntype, name))?;
}
fn plural(count: u32, s: &str) -> String {
match count {
1 => format!("{} {}", 1, s),
n => format!("{} {}s", n, s),
}
}
if !opts.quiet {
if ntest != 0 || nbench != 0 {
st.write_plain("\n")?;
}
st.write_plain(format!("{}, {}\n",
plural(ntest, "test"),
plural(nbench, "benchmark")))?;
}
Ok(())
}
// A simple console test runner
pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<bool> {
fn callback<T: Write>(event: &TestEvent, st: &mut ConsoleTestState<T>) -> io::Result<()> {
match (*event).clone() {
TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()),
TeFilteredOut(filtered_out) => Ok(st.filtered_out = filtered_out),
TeWait(ref test, padding) => st.write_test_start(test, padding),
TeTimeout(ref test) => st.write_timeout(test),
TeResult(test, result, stdout) => {
st.write_log_result(&test, &result)?;
st.write_result(&result)?;
match result {
TrOk => {
st.passed += 1;
st.not_failures.push((test, stdout));
}
TrIgnored => st.ignored += 1,
TrAllowedFail => st.allowed_fail += 1,
TrBench(bs) => {
st.metrics.insert_metric(test.name.as_slice(),
bs.ns_iter_summ.median,
bs.ns_iter_summ.max - bs.ns_iter_summ.min);
st.measured += 1
}
TrFailed => {
st.failed += 1;
st.failures.push((test, stdout));
}
TrFailedMsg(msg) => {
st.failed += 1;
let mut stdout = stdout;
stdout.extend_from_slice(
format!("note: {}", msg).as_bytes()
);
st.failures.push((test, stdout));
}
}
Ok(())
}
}
}
let mut st = ConsoleTestState::new(opts, None::<io::Stdout>)?;
fn len_if_padded(t: &TestDescAndFn) -> usize {
match t.testfn.padding() {
PadNone => 0,
PadOnRight => t.desc.name.as_slice().len(),
}
}
if let Some(t) = tests.iter().max_by_key(|t| len_if_padded(*t)) {
let n = t.desc.name.as_slice();
st.max_name_len = n.len();
}
run_tests(opts, tests, |x| callback(&x, &mut st))?;
return st.write_run_finish();
}
#[test]
fn should_sort_failures_before_printing_them() {
let test_a = TestDesc {
name: StaticTestName("a"),
ignore: false,
should_panic: ShouldPanic::No,
allow_fail: false,
};
let test_b = TestDesc {
name: StaticTestName("b"),
ignore: false,
should_panic: ShouldPanic::No,
allow_fail: false,
};
let mut st = ConsoleTestState {
log_out: None,
out: Raw(Vec::new()),
use_color: false,
quiet: false,
total: 0,
passed: 0,
failed: 0,
ignored: 0,
allowed_fail: 0,
filtered_out: 0,
measured: 0,
max_name_len: 10,
metrics: MetricMap::new(),
failures: vec![(test_b, Vec::new()), (test_a, Vec::new())],
options: Options::new(),
not_failures: Vec::new(),
};
st.write_failures().unwrap();
let s = match st.out {
Raw(ref m) => String::from_utf8_lossy(&m[..]),
Pretty(_) => unreachable!(),
};
let apos = s.find("a").unwrap();
let bpos = s.find("b").unwrap();
assert!(apos < bpos);
}
fn use_color(opts: &TestOpts) -> bool {
match opts.color {
AutoColor => !opts.nocapture && stdout_isatty(),
AlwaysColor => true,
NeverColor => false,
}
}
#[cfg(any(target_os = "cloudabi",
target_os = "redox",
all(target_arch = "wasm32", not(target_os = "emscripten"))))]
fn stdout_isatty() -> bool |
#[cfg(unix)]
fn stdout_isatty() -> bool {
unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 }
}
#[cfg(windows)]
fn stdout_isatty() -> bool {
type DWORD = u32;
type BOOL = i32;
type HANDLE = *mut u8;
type LPDWORD = *mut u32;
const STD_OUTPUT_HANDLE: DWORD = -11i32 as DWORD;
extern "system" {
fn GetStdHandle(which: DWORD) -> HANDLE;
fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL;
}
unsafe {
let handle = GetStdHandle(STD_OUTPUT_HANDLE);
let mut out = 0;
GetConsoleMode(handle, &mut out) != 0
}
}
#[derive(Clone)]
pub enum TestEvent {
TeFiltered(Vec<TestDesc>),
TeWait(TestDesc, NamePadding),
TeResult(TestDesc, TestResult, Vec<u8>),
TeTimeout(TestDesc),
TeFilteredOut(usize),
}
pub type MonitorMsg = (TestDesc, TestResult, Vec<u8>);
pub fn run_tests<F>(opts: &TestOpts, tests: Vec<TestDescAndFn>, mut callback: F) -> io::Result<()>
where F: FnMut(TestEvent) -> io::Result<()>
{
use std::collections::HashMap;
use std::sync::mpsc::RecvTimeoutError;
let tests_len = tests.len();
let mut filtered_tests = filter_tests(opts, tests);
if !opts.bench_benchmarks {
filtered_tests = convert_benchmarks_to_tests(filtered_tests);
}
let filtered_out = tests_len - filtered_tests.len();
callback(TeFilteredOut(filtered_out))?;
let filtered_descs = filtered_tests.iter()
.map(|t| t.desc.clone())
.collect();
callback(TeFiltered(filtered_descs))?;
let (filtered_tests, filtered_benchs): (Vec<_>, _) =
filtered_tests.into_iter().partition(|e| {
match e.testfn {
StaticTestFn(_) | DynTestFn(_) => true,
_ => false,
}
});
let concurrency = match opts.test_threads {
Some(n) => n,
None => get_concurrency(),
};
let mut remaining = filtered_tests;
remaining.reverse();
let mut pending = 0;
let (tx, rx) = channel::<MonitorMsg>();
let mut running_tests: HashMap<TestDesc, Instant> = HashMap::new();
fn get_timed_out_tests(running_tests: &mut HashMap<TestDesc, Instant>) -> Vec<TestDesc> {
let now = Instant::now();
let timed_out = running_tests.iter()
.filter_map(|(desc, timeout)| if &now >= timeout { Some(desc.clone())} else { None })
.collect();
for test in &timed_out {
running_tests.remove(test);
}
timed_out
};
fn calc_timeout(running_tests: &HashMap<TestDesc, Instant>) -> Option<Duration> {
running_tests.values().min().map(|next_timeout| {
let now = Instant::now();
if *next_timeout >= now {
*next_timeout - now
} else {
Duration::new(0, 0)
}})
};
if concurrency == 1 {
while !remaining.is_empty() {
let test = remaining.pop().unwrap();
callback(TeWait(test.desc.clone(), test.testfn.padding()))?;
run_test(opts, !opts.run_tests, test, tx.clone());
let (test, result, stdout) = rx.recv().unwrap();
callback(TeResult(test, result, stdout))?;
}
} else {
while pending > 0 || !remaining.is_empty() {
while pending < concurrency && !remaining.is_empty() {
let test = remaining.pop().unwrap();
let timeout = Instant::now() + Duration::from_secs(TEST_WARN_TIMEOUT_S);
running_tests.insert(test.desc.clone(), timeout);
run_test(opts, !opts.run_tests, test, tx.clone());
pending += 1;
}
let mut res;
loop {
if let Some(timeout) = calc_timeout(&running_tests) {
res = rx.recv_timeout(timeout);
for test in get_timed_out_tests(&mut running_tests) {
callback(TeTimeout(test))?;
}
if res != Err(RecvTimeoutError::Timeout) {
break;
}
} else {
res = rx.recv().map_err(|_| RecvTimeoutError::Disconnected);
break;
}
}
let (desc, result, stdout) = res.unwrap();
running_tests.remove(&desc);
callback(TeWait(desc.clone(), PadNone))?;
callback(TeResult(desc, result, stdout))?;
pending -= 1;
}
}
if opts.bench_benchmarks {
// All benchmarks run at the end, in serial.
for b in filtered_benchs {
callback(TeWait(b.desc.clone(), b.testfn.padding()))?;
run_test(opts, false, b, tx.clone());
let (test, result, stdout) = rx.recv().unwrap();
callback(TeResult(test, result, stdout))?;
}
}
Ok(())
}
#[allow(deprecated)]
fn get_concurrency() -> usize {
return match env::var("RUST_TEST_THREADS") {
Ok(s) => {
let opt_n: Option<usize> = s.parse().ok();
match opt_n {
Some(n) if n > 0 => n,
_ => {
panic!("RUST_TEST_THREADS is `{}`, should be a positive integer.",
s)
}
}
}
Err(..) => num_cpus(),
};
#[cfg(windows)]
#[allow(bad_style)]
fn num_cpus() -> usize {
#[repr(C)]
struct SYSTEM_INFO {
wProcessorArchitecture: u16,
wReserved: u16,
dwPageSize: u32,
lpMinimumApplicationAddress: *mut u8,
lpMaximumApplicationAddress: *mut u8,
dwActiveProcessorMask: *mut u8,
dwNumberOfProcessors: u32,
dwProcessorType: u32,
dwAllocationGranularity: u32,
wProcessorLevel: u16,
wProcessorRevision: u16,
}
extern "system" {
fn GetSystemInfo(info: *mut SYSTEM_INFO) -> i32;
}
unsafe {
let mut sysinfo = std::mem::zeroed();
GetSystemInfo(&mut sysinfo);
sysinfo.dwNumberOfProcessors as usize
}
}
#[cfg(target_os = "redox")]
fn num_cpus() -> usize {
// FIXME: Implement num_cpus on Redox
1
}
#[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
fn num_cpus() -> usize {
1
}
#[cfg(any(target_os = "android",
target_os = "cloudabi",
target_os = "emscripten",
target_os = "fuchsia",
target_os = "ios",
target_os = "linux",
target_os = "macos",
target_os = "solaris"))]
fn num_cpus() -> usize {
unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as usize }
}
#[cfg(any(target_os = "freebsd",
target_os = "dragonfly",
target_os = "bitrig",
target_os = "netbsd"))]
fn num_cpus() -> usize {
use std::ptr;
let mut cpus: libc::c_uint = 0;
let mut cpus_size = std::mem::size_of_val(&cpus);
unsafe {
cpus = libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as libc::c_uint;
}
if cpus < 1 {
let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
unsafe {
libc::sysctl(mib.as_mut_ptr(),
2,
&mut cpus as *mut _ as *mut _,
&mut cpus_size as *mut _ as *mut _,
ptr::null_mut(),
0);
}
if cpus < 1 {
cpus = 1;
}
}
cpus as usize
}
#[cfg(target_os = "openbsd")]
fn num_cpus() -> usize {
use std::ptr;
let mut cpus: libc::c_uint = 0;
let mut cpus_size = std::mem::size_of_val(&cpus);
let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
unsafe {
libc::sysctl(mib.as_mut_ptr(),
2,
&mut cpus as *mut _ as *mut _,
&mut cpus_size as *mut _ as *mut _,
ptr::null_mut(),
0);
}
if cpus < 1 {
cpus = 1;
}
cpus as usize
}
#[cfg(target_os = "haiku")]
fn num_cpus() -> usize {
// FIXME: implement
1
}
}
pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
let mut filtered = tests;
// Remove tests that don't match the test filter
filtered = match opts.filter {
None => filtered,
Some(ref filter) => {
filtered.into_iter()
.filter(|test| {
if opts.filter_exact {
test.desc.name.as_slice() == &filter[..]
} else {
test.desc.name.as_slice().contains(&filter[..])
}
})
.collect()
}
};
// Skip tests that match any of the skip filters
filtered = filtered.into_iter()
.filter(|t| !opts.skip.iter().any(|sf| {
if opts.filter_exact {
t.desc.name.as_slice() == &sf[..]
} else {
t.desc.name.as_slice().contains(&sf[..])
}
}))
.collect();
// Maybe pull out the ignored test and unignore them
filtered = if !opts.run_ignored {
filtered
} else {
fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
if test.desc.ignore {
let TestDescAndFn {desc, testfn} = test;
Some(TestDescAndFn {
desc: TestDesc { ignore: false, ..desc },
testfn,
})
} else {
None
}
}
filtered.into_iter().filter_map(filter).collect()
};
// Sort the tests alphabetically
filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
filtered
}
pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
// convert benchmarks to tests, if we're not benchmarking them
tests.into_iter().map(|x| {
let testfn = match x.testfn {
DynBenchFn(bench) => {
DynTestFn(Box::new(move || {
bench::run_once(|b| {
__rust_begin_short_backtrace(|| bench.run(b))
})
}))
}
StaticBenchFn(benchfn) => {
DynTestFn(Box::new(move || {
bench::run_once(|b| {
__rust_begin_short_backtrace(|| benchfn(b))
})
}))
}
f => f,
};
TestDescAndFn {
desc: x.desc,
testfn,
}
}).collect()
}
pub fn run_test(opts: &TestOpts,
force_ignore: bool,
test: TestDescAndFn,
monitor_ch: Sender<MonitorMsg>) {
let TestDescAndFn {desc, testfn} = test;
let ignore_because_panic_abort =
cfg!(target_arch = "wasm32") &&
!cfg!(target_os = "emscripten") &&
desc.should_panic != ShouldPanic::No;
if force_ignore || desc.ignore || ignore_because_panic_abort {
monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
return;
}
fn run_test_inner(desc: TestDesc,
monitor_ch: Sender<MonitorMsg>,
nocapture: bool,
testfn: Box<FnBox() + Send>) {
struct Sink(Arc<Mutex<Vec<u8>>>);
impl Write for Sink {
fn write(&mut self, data: &[u8]) -> io::Result<usize> {
Write::write(&mut *self.0.lock().unwrap(), data)
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
// Buffer for capturing standard I/O
let data = Arc::new(Mutex::new(Vec::new()));
let data2 = data.clone();
let name = desc.name.clone();
let runtest = move || {
let oldio = if !nocapture {
Some((
io::set_print(Some(Box::new(Sink(data2.clone())))),
io::set_panic(Some(Box::new(Sink(data2))))
))
} else {
None
};
let result = catch_unwind(AssertUnwindSafe(testfn));
if let Some((printio, panicio)) = oldio {
io::set_print(printio);
io::set_panic(panicio);
};
let test_result = calc_result(&desc, result);
let stdout = data.lock().unwrap().to_vec();
monitor_ch.send((desc.clone(), test_result, stdout)).unwrap();
};
// If the platform is single-threaded we're just going to run
// the test synchronously, regardless of the concurrency
// level.
let supports_threads =
!cfg!(target_os = "emscripten") &&
!cfg!(target_arch = "wasm32");
if supports_threads {
let cfg = thread::Builder::new().name(match name {
DynTestName(ref name) => name.clone(),
StaticTestName(name) => name.to_owned(),
});
cfg.spawn(runtest).unwrap();
} else {
runtest();
}
}
match testfn {
DynBenchFn(bencher) => {
let bs = ::bench::benchmark(|harness| bencher.run(harness));
monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
return;
}
StaticBenchFn(benchfn) => {
let bs = ::bench::benchmark(|harness| (benchfn.clone())(harness));
monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
return;
}
DynTestFn(f) => {
let cb = move || {
__rust_begin_short_backtrace(f)
};
run_test_inner(desc, monitor_ch, opts.nocapture, Box::new(cb))
}
StaticTestFn(f) =>
run_test_inner(desc, monitor_ch, opts.nocapture,
Box::new(move || __rust_begin_short_backtrace(f))),
}
}
/// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`.
#[inline(never)]
fn __rust_begin_short_backtrace<F: FnOnce()>(f: F) {
f()
}
fn calc_result(desc: &TestDesc, task_result: Result<(), Box<Any + Send>>) -> TestResult {
match (&desc.should_panic, task_result) {
(&ShouldPanic::No, Ok(())) |
(&ShouldPanic::Yes, Err(_)) => TrOk,
(&ShouldPanic::YesWithMessage(msg), Err(ref err)) =>
if err.downcast_ref::<String>()
.map(|e| &**e)
.or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
.map(|e| e.contains(msg))
.unwrap_or(false) {
TrOk
} else {
if desc.allow_fail {
TrAllowedFail
} else {
TrFailedMsg(format!("Panic did not include expected string '{}'", msg))
}
},
_ if desc.allow_fail => TrAllowedFail,
_ => TrFailed,
}
}
#[derive(Clone, PartialEq)]
pub struct MetricMap(BTreeMap<String, Metric>);
impl MetricMap {
pub fn new() -> MetricMap {
MetricMap(BTreeMap::new())
}
/// Insert a named `value` (+/- `noise`) metric into the map. The value
/// must be non-negative. The `noise` indicates the uncertainty of the
/// metric, which doubles as the "noise range" of acceptable
/// pairwise-regressions on this named value, when comparing from one
/// metric to the next using `compare_to_old`.
///
/// If `noise` is positive, then it means this metric is of a value
/// you want to see grow smaller, so a change larger than `noise` in the
/// positive direction represents a regression.
///
/// If `noise` is negative, then it means this metric is of a value
/// you want to see grow larger, so a change larger than `noise` in the
/// negative direction represents a regression.
pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
let m = Metric {
value,
noise,
};
self.0.insert(name.to_owned(), m);
}
pub fn fmt_metrics(&self) -> String {
let v = self.0
.iter()
.map(|(k, v)| format!("{}: {} (+/- {})", *k, v.value, v.noise))
.collect::<Vec<_>>();
v.join(", ")
}
}
// Benchmarking
/// A function that is opaque to the optimizer, to allow benchmarks to
/// pretend to use outputs to assist in avoiding dead-code
/// elimination.
///
/// This function is a no-op, and does not even read from `dummy`.
#[cfg(not(any(target_arch = "asmjs", target_arch = "wasm32")))]
pub fn black_box<T>(dummy: T) -> T {
// we need to "use" the argument in some way LLVM can't
// introspect.
unsafe { asm!("" : : "r"(&dummy)) }
dummy
}
#[cfg(any(target_arch = "asmjs", target_arch = "wasm32"))]
#[inline(never)]
pub fn black_box<T>(dummy: T) -> T {
dummy
}
impl Bencher {
/// Callback for benchmark functions to run in their body.
pub fn iter<T, F>(&mut self, mut inner: F)
where F: FnMut() -> T
{
if self.mode == BenchMode::Single {
ns_iter_inner(&mut inner, 1);
return;
}
self.summary = Some(iter(&mut inner));
}
pub fn bench<F>(&mut self, mut f: F) -> Option<stats::Summary>
where F: FnMut(&mut Bencher)
{
f(self);
return self.summary;
}
}
fn ns_from_dur(dur: Duration) -> u64 {
dur.as_secs() * 1_000_000_000 + (dur.subsec_nanos() as u64)
}
fn ns_iter_inner<T, F>(inner: &mut F, k: u64) -> u64
where F: FnMut() -> T
{
let start = Instant::now();
for _ in 0..k {
black_box(inner());
}
return ns_from_dur(start.elapsed());
}
pub fn iter<T, F>(inner: &mut F) -> stats::Summary
where F: FnMut() -> T
{
// Initial bench run to get ballpark figure.
let ns_single = ns_iter_inner(inner, 1);
// Try to estimate iter count for 1ms falling back to 1m
// iterations if first run took < 1ns.
let ns_target_total = 1_000_000; // 1ms
let mut n = ns_target_total / cmp::max(1, ns_single);
// if the first run took more than 1ms we don't want to just
// be left doing 0 iterations on every loop. The unfortunate
// side effect of not being able to do as many runs is
// automatically handled by the statistical analysis below
// (i.e. larger error bars).
n = cmp::max(1, n);
let mut total_run = Duration::new(0, 0);
let samples: &mut [f64] = &mut [0.0_f64; 50];
loop {
let loop_start = Instant::now();
for p in &mut *samples {
*p = ns_iter_inner(inner, n) as f64 / n as f64;
}
stats::winsorize(samples, 5.0);
let summ = stats::Summary::new(samples);
for p in &mut *samples {
let ns = ns_iter_inner(inner, 5 * n);
*p = ns as f64 / (5 * n) as f64;
}
stats::winsorize(samples, 5.0);
let summ5 = stats::Summary::new(samples);
let loop_run = loop_start.elapsed();
// If we've run for 100ms and seem to have converged to a
// stable median.
if loop_run > Duration::from_millis(100) && summ.median_abs_dev_pct < 1.0 &&
summ.median - summ5.median < summ5.median_abs_dev {
return summ5;
}
total_run = total_run + loop_run;
// Longest we ever run for is 3s.
if total_run > Duration::from_secs(3) {
return summ5;
}
// If we overflow here just return the results so far. We check a
// multiplier of 10 because we're about to multiply by 2 and the
// next iteration of the loop will also multiply by 5 (to calculate
// the summ5 result)
n = match n.checked_mul(10) {
Some(_) => n * 2,
None => {
return summ5;
}
};
}
}
pub mod bench {
use std::cmp;
use stats;
use super::{Bencher, BenchSamples, BenchMode};
pub fn benchmark<F>(f: F) -> BenchSamples
where F: FnMut(&mut Bencher)
{
let mut bs = Bencher {
mode: BenchMode::Auto,
summary: None,
bytes: 0,
};
return match bs.bench(f) {
Some(ns_iter_summ) => {
let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
let mb_s = bs.bytes * 1000 / ns_iter;
BenchSamples {
ns_iter_summ,
mb_s: mb_s as usize,
}
}
None => {
// iter not called, so no data.
// FIXME: error in this case?
let samples: &mut [f64] = &mut [0.0_f64; 1];
BenchSamples {
ns_iter_summ: stats::Summary::new(samples),
mb_s: 0,
}
}
};
}
pub fn run_once<F>(f: F)
where F: FnMut(&mut Bencher)
{
let mut bs = Bencher {
mode: BenchMode::Single,
summary: None,
bytes: 0,
};
bs.bench(f);
}
}
#[cfg(test)]
mod tests {
use test::{TrFailed, TrFailedMsg, TrIgnored, TrOk, filter_tests, parse_opts, TestDesc,
TestDescAndFn, TestOpts, run_test, MetricMap, StaticTestName, DynTestName,
DynTestFn, ShouldPanic};
use std::sync::mpsc::channel;
use bench;
use Bencher;
#[test]
pub fn do_not_run_ignored_tests() {
fn f() {
panic!();
}
let desc = TestDescAndFn {
desc: TestDesc {
name: StaticTestName("whatever"),
ignore: true,
should_panic: ShouldPanic::No,
allow_fail: false,
},
testfn: DynTestFn(Box::new(f)),
};
let (tx, rx) = channel();
run_test(&TestOpts::new(), false, desc, tx);
let (_, res, _) = rx.recv().unwrap();
assert!(res != TrOk);
}
#[test]
pub fn ignored_tests_result_in_ignored() {
fn f() {}
let desc = TestDescAndFn {
desc: TestDesc {
name: StaticTestName("whatever"),
ignore: true,
should_panic: ShouldPanic::No,
allow_fail: false,
},
testfn: DynTestFn(Box::new(f)),
};
let (tx, rx) = channel();
run_test(&TestOpts::new(), false, desc, tx);
let (_, res, _) = rx.recv().unwrap();
assert!(res == TrIgnored);
}
#[test]
fn test_should_panic() {
fn f() {
panic!();
}
let desc = TestDescAndFn {
desc: TestDesc {
name: StaticTestName("whatever"),
ignore: false,
should_panic: ShouldPanic::Yes,
allow_fail: false,
},
testfn: DynTestFn(Box::new(f)),
};
let (tx, rx) = channel();
run_test(&TestOpts::new(), false, desc, tx);
let (_, res, _) = rx.recv().unwrap();
assert!(res == TrOk);
}
#[test]
fn test_should_panic_good_message() {
fn f() {
panic!("an error message");
}
let desc = TestDescAndFn {
desc: TestDesc {
name: StaticTestName("whatever"),
ignore: false,
should_panic: ShouldPanic::YesWithMessage("error message"),
allow_fail: false,
},
testfn: DynTestFn(Box::new(f)),
};
let (tx, rx) = channel();
run_test(&TestOpts::new(), false, desc, tx);
let (_, res, _) = rx.recv().unwrap();
assert!(res == TrOk);
}
#[test]
fn test_should_panic_bad_message() {
fn f() {
panic!("an error message");
}
let expected = "foobar";
let failed_msg = "Panic did not include expected string";
let desc = TestDescAndFn {
desc: TestDesc {
name: StaticTestName("whatever"),
ignore: false,
should_panic: ShouldPanic::YesWithMessage(expected),
allow_fail: false,
},
testfn: DynTestFn(Box::new(f)),
};
let (tx, rx) = channel();
run_test(&TestOpts::new(), false, desc, tx);
let (_, res, _) = rx.recv().unwrap();
assert!(res == TrFailedMsg(format!("{} '{}'", failed_msg, expected)));
}
#[test]
fn test_should_panic_but_succeeds() {
fn f() {}
let desc = TestDescAndFn {
desc: TestDesc {
name: StaticTestName("whatever"),
ignore: false,
should_panic: ShouldPanic::Yes,
allow_fail: false,
},
testfn: DynTestFn(Box::new(f)),
};
let (tx, rx) = channel();
run_test(&TestOpts::new(), false, desc, tx);
let (_, res, _) = rx.recv().unwrap();
assert!(res == TrFailed);
}
#[test]
fn parse_ignored_flag() {
let args = vec!["progname".to_string(), "filter".to_string(), "--ignored".to_string()];
let opts = match parse_opts(&args) {
Some(Ok(o)) => o,
_ => panic!("Malformed arg in parse_ignored_flag"),
};
assert!((opts.run_ignored));
}
#[test]
pub fn filter_for_ignored_option() {
// When we run ignored tests the test filter should filter out all the
// unignored tests and flip the ignore flag on the rest to false
let mut opts = TestOpts::new();
opts.run_tests = true;
opts.run_ignored = true;
let tests = vec![TestDescAndFn {
desc: TestDesc {
name: StaticTestName("1"),
ignore: true,
should_panic: ShouldPanic::No,
allow_fail: false,
},
testfn: DynTestFn(Box::new(move || {})),
},
TestDescAndFn {
desc: TestDesc {
name: StaticTestName("2"),
ignore: false,
should_panic: ShouldPanic::No,
allow_fail: false,
},
testfn: DynTestFn(Box::new(move || {})),
}];
let filtered = filter_tests(&opts, tests);
assert_eq!(filtered.len(), 1);
assert_eq!(filtered[0].desc.name.to_string(), "1");
assert!(!filtered[0].desc.ignore);
}
#[test]
pub fn exact_filter_match() {
fn tests() -> Vec<TestDescAndFn> {
vec!["base",
"base::test",
"base::test1",
"base::test2",
].into_iter()
.map(|name| TestDescAndFn {
desc: TestDesc {
name: StaticTestName(name),
ignore: false,
should_panic: ShouldPanic::No,
allow_fail: false,
},
testfn: DynTestFn(Box::new(move || {}))
})
.collect()
}
let substr = filter_tests(&TestOpts {
filter: Some("base".into()),
..TestOpts::new()
}, tests());
assert_eq!(substr.len(), 4);
let substr = filter_tests(&TestOpts {
filter: Some("bas".into()),
..TestOpts::new()
}, tests());
assert_eq!(substr.len(), 4);
let substr = filter_tests(&TestOpts {
filter: Some("::test".into()),
..TestOpts::new()
}, tests());
assert_eq!(substr.len(), 3);
let substr = filter_tests(&TestOpts {
filter: Some("base::test".into()),
..TestOpts::new()
}, tests());
assert_eq!(substr.len(), 3);
let exact = filter_tests(&TestOpts {
filter: Some("base".into()),
filter_exact: true, ..TestOpts::new()
}, tests());
assert_eq!(exact.len(), 1);
let exact = filter_tests(&TestOpts {
filter: Some("bas".into()),
filter_exact: true,
..TestOpts::new()
}, tests());
assert_eq!(exact.len(), 0);
let exact = filter_tests(&TestOpts {
filter: Some("::test".into()),
filter_exact: true,
..TestOpts::new()
}, tests());
assert_eq!(exact.len(), 0);
let exact = filter_tests(&TestOpts {
filter: Some("base::test".into()),
filter_exact: true,
..TestOpts::new()
}, tests());
assert_eq!(exact.len(), 1);
}
#[test]
pub fn sort_tests() {
let mut opts = TestOpts::new();
opts.run_tests = true;
let names = vec!["sha1::test".to_string(),
"isize::test_to_str".to_string(),
"isize::test_pow".to_string(),
"test::do_not_run_ignored_tests".to_string(),
"test::ignored_tests_result_in_ignored".to_string(),
"test::first_free_arg_should_be_a_filter".to_string(),
"test::parse_ignored_flag".to_string(),
"test::filter_for_ignored_option".to_string(),
"test::sort_tests".to_string()];
let tests = {
fn testfn() {}
let mut tests = Vec::new();
for name in &names {
let test = TestDescAndFn {
desc: TestDesc {
name: DynTestName((*name).clone()),
ignore: false,
should_panic: ShouldPanic::No,
allow_fail: false,
},
testfn: DynTestFn(Box::new(testfn)),
};
tests.push(test);
}
tests
};
let filtered = filter_tests(&opts, tests);
let expected = vec!["isize::test_pow".to_string(),
"isize::test_to_str".to_string(),
"sha1::test".to_string(),
"test::do_not_run_ignored_tests".to_string(),
"test::filter_for_ignored_option".to_string(),
"test::first_free_arg_should_be_a_filter".to_string(),
"test::ignored_tests_result_in_ignored".to_string(),
"test::parse_ignored_flag".to_string(),
"test::sort_tests".to_string()];
for (a, b) in expected.iter().zip(filtered) {
assert!(*a == b.desc.name.to_string());
}
}
#[test]
pub fn test_metricmap_compare() {
let mut m1 = MetricMap::new();
let mut m2 = MetricMap::new();
m1.insert_metric("in-both-noise", 1000.0, 200.0);
m2.insert_metric("in-both-noise", 1100.0, 200.0);
m1.insert_metric("in-first-noise", 1000.0, 2.0);
m2.insert_metric("in-second-noise", 1000.0, 2.0);
m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);
}
#[test]
pub fn test_bench_once_no_iter() {
fn f(_: &mut Bencher) {}
bench::run_once(f);
}
#[test]
pub fn test_bench_once_iter() {
fn f(b: &mut Bencher) {
b.iter(|| {
})
}
bench::run_once(f);
}
#[test]
pub fn test_bench_no_iter() {
fn f(_: &mut Bencher) {}
bench::benchmark(f);
}
#[test]
pub fn test_bench_iter() {
fn f(b: &mut Bencher) {
b.iter(|| {
})
}
bench::benchmark(f);
}
}
| {
// FIXME: Implement isatty on Redox
false
} |
webhook.go | /*
Copyright 2021 The KubeSphere Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// AnnotationKeyGitRepos are the references of target git repositories
const AnnotationKeyGitRepos = "devops.kubesphere.io/git-repositories"
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Webhook is the Schema for the webhook API
// +k8s:openapi-gen=true
// +kubebuilder:printcolumn:name="Server",type="string",JSONPath=".spec.server"
// +kubebuilder:printcolumn:name="SkipVerify",type="boolean",JSONPath=".spec.skipVerify"
type Webhook struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec WebhookSpec `json:"spec,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// WebhookList contains a list of Webhook
type WebhookList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []Webhook `json:"items"`
}
// WebhookSpec represents the desired state of a Webhook
type WebhookSpec struct {
Server string `json:"server"`
Secret *v1.SecretReference `json:"secret,omitempty"`
Events []string `json:"events,omitempty"`
SkipVerify bool `json:"skipVerify"`
}
func | () {
SchemeBuilder.Register(&Webhook{}, &WebhookList{})
}
| init |
StandardRouteWaypointModel.js | import _isNil from 'lodash/isNil';
import BaseModel from '../../base/BaseModel';
import FixCollection from '../Fix/FixCollection';
import WaypointModel from '../../aircraft/FlightManagementSystem/WaypointModel';
import { REGEX } from '../../constants/globalConstants';
import {
FLY_OVER_WAYPOINT_PREFIX,
VECTOR_WAYPOINT_PREFIX
} from '../../constants/navigation/routeConstants';
import {
ALTITUDE_RESTRICTION_PREFIX,
DECIMAL_RADIX,
FL_TO_THOUSANDS_MULTIPLIER,
NAME_INDEX,
RESTRICTION_INDEX,
RESTRICTION_SEPARATOR,
SPEED_RESTRICTION_PREFIX
} from '../../constants/navigation/waypointConstants';
/**
* A route waypoint describes a `fixName` and any altitude or speed restrictions for that fix.
*
* @class StandardRouteWaypointModel
* @extends BaseModel
*/
export default class StandardRouteWaypointModel extends BaseModel {
/**
* Expects `routeWaypoint` to be in one of these forms:
* - ["FRAWG", "A80+|S210+"]
* - ["FRAWG", "A80-|S210"]
* - ["FRAWG", "A80"]
* - ["FRAWG", "S210"]
* - "FRAWG"
*
* @constructor
* @param routeWaypoint {array|string}
*/
constructor(routeWaypoint) {
super();
if (typeof routeWaypoint === 'undefined') {
return this;
}
/**
* Name of the fix
*
* @property name
* @type {string}
* @default ''
* @private
*/
this.name = '';
/**
* Any restrictions for a given fix
*
* ex:
* - "A80+|S210"
* - "A80-"
* - "S230"
*
* using null here to match current api, if restrictions dont exist for a given waypoint
* the consumers are expecting this to be null.
*
* @property _restrictions
* @type {string|null}
* @default null
* @private
*/
this._restrictions = null;
/**
* Required altitude for a waypoint
*
* @property _altitude (optional)
* @type {number}
* @default null
* @private
*/
this._altitude = -1;
/**
* Flag used to determine if the waypoint must be flown over before the
* aircraft may proceed to the next fix on their route.
*
* @for StandardRouteWaypointModel
* @property _isFlyOverWaypoint
* @type {boolean}
* @default false
*/
this._isFlyOverWaypoint = false;
/**
* Required speed for a waypoint
*
* @property _speed (optional)
* @type {string}
* @default null
* @private
*/
this._speed = -1;
// TODO: This will need to be implemented in the future as an emuneration. Something to the effect of: {BELOW|AT|ABOVE}
/**
* NOT IN USE
*
* Altitude constraint, if any, for a waypoint.
*
* @property _altitudeConstraint (options)
* @type {string}
* @default ''
* @private
*/
this._altitudeConstraint = '';
/**
* NOT IN USE
*
* Speed constraint, if any, for a waypoint.
*
* @property _speedConstraint (optional)
* @type {string}
* @default null
* @private
*/
this._speedConstraint = '';
/**
* Positon information for the current waypoint
*
* Specific bits of this property are exposed via public getters.
* This property should never be modified by an exteral method.
*
* @property _positionModel
* @type {StaticPositionModel}
* @default null
* @private
*/
this._positionModel = null;
/**
* Distance in nm from the previous waypoint.
*
* This property is set exterally by the `StandardRouteModel` and used only when called via
* `ArrivalBase.preSpawn()`.
*
* This value is mutable and is not intended to be re-used after its initial use.
*
* @property distanceFromPreviousWaypoint
* @type {number}
* @default -1
*/
this.distanceFromPreviousWaypoint = -1;
/**
* Name of the previous `StandardWaypointModel` object in a route
*
* This property is set exterally by the `StandardRouteModel` and used only when called via
* `ArrivalBase.preSpawn()`.
*
* This value is mutable and is not intended to be re-used after its initial use.
*
* @property previousStandardWaypointName
* @type {string}
* @default ''
*/
this.previousStandardWaypointName = '';
return this._init(routeWaypoint)
.clonePoisitonFromFix();
}
/**
* Return this waypoint's `gps` position property
*
* @property gps
* @return {array}
*/
get gps() {
return this._positionModel.gps;
}
/**
* Return this waypoint's `gpsXY` position property
*
* @property gps
* @return {array}
*/
get gpsXY() {
return this._positionModel.gpsXY;
}
/**
* This will return a normalized fix in the shape of `[FIXNAME, FIX_RESTRICTIONS]`.
*
* Fixes without restrictions are brought in to the application as a single string, however, all
* fixes are consumed as an array. `_restrictions` are initialized as null, thus if there are
* no restrictions for a fix this getter will return `[FIXNAME, null]`
*
* @for StandardRouteWaypointModel
* @property fix
* @return {array}
*/
get fix() {
return [this.name, this._restrictions];
}
/**
* Provide read-only public access to this._positionModel
*
* @for SpawnPatternModel
* @property positionModel
* @type {StaticPositionModel}
*/
get positionModel() {
return this._positionModel;
}
/**
* Fascade to access relative position
*
* @for StandardRouteWaypointModel
* @property relativePosition
* @type {array<number>} [kilometersNorth, kilometersEast]
*/
get relativePosition() {
return this._positionModel.relativePosition;
}
/**
* Lifecycle method. Should be run only once on instantiation.
*
* @for StandardRouteWaypointModel
* @method _init
* @param routeWaypoint {array|string}
* @chainable
* @private
*/
_init(routeWaypoint) { | this.name = routeWaypoint.replace(FLY_OVER_WAYPOINT_PREFIX, '');
this._isVector = routeWaypoint.indexOf(VECTOR_WAYPOINT_PREFIX) !== -1;
this._isFlyOverWaypoint = routeWaypoint.indexOf(FLY_OVER_WAYPOINT_PREFIX) !== -1;
return this;
}
this.name = routeWaypoint[NAME_INDEX].replace(FLY_OVER_WAYPOINT_PREFIX, '');
this._isVector = routeWaypoint[NAME_INDEX].indexOf(VECTOR_WAYPOINT_PREFIX) !== -1;
this._isFlyOverWaypoint = routeWaypoint[NAME_INDEX].indexOf(FLY_OVER_WAYPOINT_PREFIX) !== -1;
// temporary property. should end up as a getter that wraps private methods
this._restrictions = routeWaypoint[RESTRICTION_INDEX];
this._parseWaypointRestrictions(routeWaypoint[RESTRICTION_INDEX]);
return this;
}
/**
* reset the current model instance
*
* @for StandardRouteWaypointModel
* @method reset
*/
reset() {
this.name = '';
this._restrictions = null;
this._altitude = -1;
this._altitudeConstraint = '';
this._speed = -1;
this._speedConstraint = '';
return this;
}
/**
* Find the matching fix from the `FixCollection` and clone its `StaticPositionModel` this `_positionModel`
*
* @for StandardRouteWaypointModel
* @method _clonePoisitonFromFix
* @param fixCollection {FixCollection}
* @private
*/
clonePoisitonFromFix() {
const fixModel = FixCollection.findFixByName(this.name);
if (!fixModel) {
console.warn(`The following fix was not found in the list of fixes for this Airport: ${this.name}`);
return this;
}
this._positionModel = fixModel.clonePosition();
return this;
}
/**
* Build a new `WaypointModel` from the current instance.
*
* This method provides a way to create a `WaypointModel` with the current
* properties of a `StandardRouteWaypointModel` instance.
*
* This is used by `LegModel` when building a flight plan from a `routeString`. A `procedureRouteString`
* will result in finding a list of `StandardRouteWaypointModel`s. From those `StandardRouteWaypointModel`
* we need to be able to create `WaypointModel`s that the Fms can consume.
*
* There is a method of the same name in the `FixModel` that does this same thing
* but will be used only for `directRouteStrings`.
*
* @for StandardRouteWaypointModel
* @method toWaypointModel
* @return {WaypointModel}
*/
toWaypointModel() {
const waypointProps = {
altitudeRestriction: this._altitude,
isFlyOverWaypoint: this._isFlyOverWaypoint,
isVector: this._isVector,
name: this.name,
positionModel: this.positionModel,
speedRestriction: this._speed
};
return new WaypointModel(waypointProps);
}
/**
* Parse any waypoint restrictions
*
* Parse a single string into:
* - `this._altitude` = expressed in feet
* - `this._altitudeConstraint` = {BELOW|AT|ABOVE}
* - `this._speed` = expressed in kts
*
* Exapmles:
* - "A80+|S210"
* - "A80-|S210"
* - "A80"
* - "S210"
*
* @for StandardRouteWaypointModel
* @method _parseWaypointRestrictions
* @param waypointRestrictions {string}
* @private
*/
_parseWaypointRestrictions(waypointRestrictions) {
if (_isNil(waypointRestrictions)) {
return;
}
const restrictionPieces = this._extractRestrictionPieces(waypointRestrictions);
for (let i = 0; i < restrictionPieces.length; i++) {
const restriction = restrictionPieces[i];
// looking at the first letter of a restrictionPiece here.
if (restriction[0] === ALTITUDE_RESTRICTION_PREFIX) {
this._setAltitudeRestriction(restriction);
} else if (restriction[0] === SPEED_RESTRICTION_PREFIX) {
this._setSpeedRestriction(restriction);
}
}
}
/**
* @for StandardRouteWaypointModel
* @method _setAltitudeRestriction
* @param altitudeRestriction {string}
* @private
*/
_setAltitudeRestriction(rawAltitudeStr) {
const altitudeRestriction = rawAltitudeStr.replace(REGEX.ALT_SPEED_RESTRICTION, '');
this._altitude = parseInt(altitudeRestriction, DECIMAL_RADIX) * FL_TO_THOUSANDS_MULTIPLIER;
}
/**
* @for StandardRouteWaypointModel
* @method _setSpeedRestriction
* @param speedRestriction {string}
* @private
*/
_setSpeedRestriction(rawSpeedRestrictionStr) {
const speedRestriction = rawSpeedRestrictionStr.replace(REGEX.ALT_SPEED_RESTRICTION, '');
this._speed = parseInt(speedRestriction, DECIMAL_RADIX);
}
/**
* @for StandardRouteWaypointModel
* @method _extractRestrictionPieces
* @param waypointRestrictions {array<string>}
* @@return {string}
* @private
*/
_extractRestrictionPieces(waypointRestrictions) {
return waypointRestrictions.split(RESTRICTION_SEPARATOR);
}
} | // if we receive a string, this fix doesnt have any restrictions so we only need to set `name`
if (typeof routeWaypoint === 'string') { |
serve.go | package plugin
import (
"log"
hclog "github.com/hashicorp/go-hclog"
"github.com/hashicorp/go-plugin"
"google.golang.org/grpc"
"github.com/hashicorp/terraform-plugin-go/tfprotov5"
tf5server "github.com/hashicorp/terraform-plugin-go/tfprotov5/tf5server"
"github.com/hashicorp/terraform-plugin-go/tfprotov6"
tf6server "github.com/hashicorp/terraform-plugin-go/tfprotov6/tf6server"
"github.com/hashicorp/terraform-plugin-sdk/v2/helper/schema"
)
const (
// The constants below are the names of the plugins that can be dispensed
// from the plugin server.
ProviderPluginName = "provider"
)
// Handshake is the HandshakeConfig used to configure clients and servers.
var Handshake = plugin.HandshakeConfig{
// The magic cookie values should NEVER be changed.
MagicCookieKey: "TF_PLUGIN_MAGIC_COOKIE",
MagicCookieValue: "d602bf8f470bc67ca7faa0386276bbdd4330efaf76d1a219cb4d6991ca9872b2",
}
type ProviderFunc func() *schema.Provider
type GRPCProviderFunc func() tfprotov5.ProviderServer
type GRPCProviderV6Func func() tfprotov6.ProviderServer
// ServeOpts are the configurations to serve a plugin.
type ServeOpts struct {
ProviderFunc ProviderFunc
// Wrapped versions of the above plugins will automatically shimmed and
// added to the GRPC functions when possible.
GRPCProviderFunc GRPCProviderFunc
GRPCProviderV6Func GRPCProviderV6Func
// Logger is the logger that go-plugin will use.
Logger hclog.Logger
// TestConfig should only be set when the provider is being tested; it
// will opt out of go-plugin's lifecycle management and other features,
// and will use the supplied configuration options to control the
// plugin's lifecycle and communicate connection information. See the
// go-plugin GoDoc for more information.
TestConfig *plugin.ServeTestConfig
// Set NoLogOutputOverride to not override the log output with an hclog
// adapter. This should only be used when running the plugin in
// acceptance tests.
NoLogOutputOverride bool
}
// Serve serves a plugin. This function never returns and should be the final
// function called in the main function of the plugin.
func Serve(opts *ServeOpts) | {
if !opts.NoLogOutputOverride {
// In order to allow go-plugin to correctly pass log-levels through to
// terraform, we need to use an hclog.Logger with JSON output. We can
// inject this into the std `log` package here, so existing providers will
// make use of it automatically.
logger := hclog.New(&hclog.LoggerOptions{
// We send all output to terraform. Go-plugin will take the output and
// pass it through another hclog.Logger on the client side where it can
// be filtered.
Level: hclog.Trace,
JSONFormat: true,
})
log.SetOutput(logger.StandardWriter(&hclog.StandardLoggerOptions{InferLevels: true}))
}
// since the plugins may not yet be aware of the new protocol, we
// automatically wrap the plugins in the grpc shims.
if opts.GRPCProviderFunc == nil && opts.ProviderFunc != nil {
opts.GRPCProviderFunc = func() tfprotov5.ProviderServer {
return schema.NewGRPCProviderServer(opts.ProviderFunc())
}
}
serveConfig := plugin.ServeConfig{
HandshakeConfig: Handshake,
GRPCServer: func(opts []grpc.ServerOption) *grpc.Server {
return grpc.NewServer(opts...)
},
Logger: opts.Logger,
Test: opts.TestConfig,
}
// assume we have either a v5 or a v6 provider
if opts.GRPCProviderFunc != nil {
provider := opts.GRPCProviderFunc()
serveConfig.VersionedPlugins = map[int]plugin.PluginSet{
5: {
ProviderPluginName: &tf5server.GRPCProviderPlugin{
GRPCProvider: func() tfprotov5.ProviderServer {
return provider
},
},
},
}
} else if opts.GRPCProviderV6Func != nil {
provider := opts.GRPCProviderV6Func()
serveConfig.VersionedPlugins = map[int]plugin.PluginSet{
6: {
ProviderPluginName: &tf6server.GRPCProviderPlugin{
GRPCProvider: func() tfprotov6.ProviderServer {
return provider
},
},
},
}
}
plugin.Serve(&serveConfig)
} |
|
databases_table_test.rs | // Copyright 2020-2021 The Datafuse Authors.
//
// SPDX-License-Identifier: Apache-2.0.
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
async fn test_tables_table() -> anyhow::Result<()> | {
use common_planners::*;
use futures::TryStreamExt;
use crate::datasources::system::*;
use crate::datasources::*;
let ctx = crate::tests::try_create_context()?;
let table = DatabasesTable::create();
table.read_plan(ctx.clone(), &ScanPlan::empty())?;
let stream = table.read(ctx).await?;
let result = stream.try_collect::<Vec<_>>().await?;
let block = &result[0];
assert_eq!(block.num_columns(), 1);
let expected = vec![
"+----------+",
"| name |",
"+----------+",
"| default |",
"| for_test |",
"| local |",
"| system |",
"+----------+",
];
common_datablocks::assert_blocks_sorted_eq(expected, result.as_slice());
Ok(())
} |
|
exists.go | package file_exists
// Fake ...
func Fake() | {
} |
|
convertertoapi.go | package api
import (
"github.com/Azure/acs-engine/pkg/api/common"
"github.com/Azure/acs-engine/pkg/api/v20160330"
"github.com/Azure/acs-engine/pkg/api/v20160930"
"github.com/Azure/acs-engine/pkg/api/v20170131"
"github.com/Azure/acs-engine/pkg/api/v20170701"
"github.com/Azure/acs-engine/pkg/api/vlabs"
"github.com/Azure/acs-engine/pkg/helpers"
)
///////////////////////////////////////////////////////////
// The converter exposes functions to convert the top level
// ContainerService resource
//
// All other functions are internal helper functions used
// for converting.
///////////////////////////////////////////////////////////
// ConvertV20160930ContainerService converts a v20160930 ContainerService to an unversioned ContainerService
func ConvertV20160930ContainerService(v20160930 *v20160930.ContainerService) *ContainerService {
c := &ContainerService{}
c.ID = v20160930.ID
c.Location = helpers.NormalizeAzureRegion(v20160930.Location)
c.Name = v20160930.Name
if v20160930.Plan != nil {
c.Plan = &ResourcePurchasePlan{}
convertV20160930ResourcePurchasePlan(v20160930.Plan, c.Plan)
}
c.Tags = map[string]string{}
for k, v := range v20160930.Tags {
c.Tags[k] = v
}
c.Type = v20160930.Type
c.Properties = &Properties{}
convertV20160930Properties(v20160930.Properties, c.Properties)
return c
}
// ConvertV20160330ContainerService converts a v20160330 ContainerService to an unversioned ContainerService
func ConvertV20160330ContainerService(v20160330 *v20160330.ContainerService) *ContainerService {
c := &ContainerService{}
c.ID = v20160330.ID
c.Location = helpers.NormalizeAzureRegion(v20160330.Location)
c.Name = v20160330.Name
if v20160330.Plan != nil {
c.Plan = &ResourcePurchasePlan{}
convertV20160330ResourcePurchasePlan(v20160330.Plan, c.Plan)
}
c.Tags = map[string]string{}
for k, v := range v20160330.Tags {
c.Tags[k] = v
}
c.Type = v20160330.Type
c.Properties = &Properties{}
convertV20160330Properties(v20160330.Properties, c.Properties)
return c
}
// ConvertV20170131ContainerService converts a v20170131 ContainerService to an unversioned ContainerService
func ConvertV20170131ContainerService(v20170131 *v20170131.ContainerService) *ContainerService {
c := &ContainerService{}
c.ID = v20170131.ID
c.Location = helpers.NormalizeAzureRegion(v20170131.Location)
c.Name = v20170131.Name
if v20170131.Plan != nil {
c.Plan = &ResourcePurchasePlan{}
convertV20170131ResourcePurchasePlan(v20170131.Plan, c.Plan)
}
c.Tags = map[string]string{}
for k, v := range v20170131.Tags {
c.Tags[k] = v
}
c.Type = v20170131.Type
c.Properties = &Properties{}
convertV20170131Properties(v20170131.Properties, c.Properties)
return c
}
// ConvertV20170701ContainerService converts a v20170701 ContainerService to an unversioned ContainerService
func ConvertV20170701ContainerService(v20170701 *v20170701.ContainerService) *ContainerService {
c := &ContainerService{}
c.ID = v20170701.ID
c.Location = helpers.NormalizeAzureRegion(v20170701.Location)
c.Name = v20170701.Name
if v20170701.Plan != nil {
c.Plan = &ResourcePurchasePlan{}
convertV20170701ResourcePurchasePlan(v20170701.Plan, c.Plan)
}
c.Tags = map[string]string{}
for k, v := range v20170701.Tags {
c.Tags[k] = v
}
c.Type = v20170701.Type
c.Properties = &Properties{}
convertV20170701Properties(v20170701.Properties, c.Properties)
return c
}
// ConvertVLabsContainerService converts a vlabs ContainerService to an unversioned ContainerService
func ConvertVLabsContainerService(vlabs *vlabs.ContainerService) *ContainerService {
c := &ContainerService{}
c.ID = vlabs.ID
c.Location = helpers.NormalizeAzureRegion(vlabs.Location)
c.Name = vlabs.Name
if vlabs.Plan != nil {
c.Plan = &ResourcePurchasePlan{}
convertVLabsResourcePurchasePlan(vlabs.Plan, c.Plan)
}
c.Tags = map[string]string{}
for k, v := range vlabs.Tags {
c.Tags[k] = v
}
c.Type = vlabs.Type
c.Properties = &Properties{}
convertVLabsProperties(vlabs.Properties, c.Properties)
return c
}
// convertV20160930ResourcePurchasePlan converts a v20160930 ResourcePurchasePlan to an unversioned ResourcePurchasePlan
func convertV20160930ResourcePurchasePlan(v20160930 *v20160930.ResourcePurchasePlan, api *ResourcePurchasePlan) {
api.Name = v20160930.Name
api.Product = v20160930.Product
api.PromotionCode = v20160930.PromotionCode
api.Publisher = v20160930.Publisher
}
// convertV20160330ResourcePurchasePlan converts a v20160330 ResourcePurchasePlan to an unversioned ResourcePurchasePlan
func convertV20160330ResourcePurchasePlan(v20160330 *v20160330.ResourcePurchasePlan, api *ResourcePurchasePlan) {
api.Name = v20160330.Name
api.Product = v20160330.Product
api.PromotionCode = v20160330.PromotionCode
api.Publisher = v20160330.Publisher
}
// convertV20170131ResourcePurchasePlan converts a v20170131 ResourcePurchasePlan to an unversioned ResourcePurchasePlan
func convertV20170131ResourcePurchasePlan(v20170131 *v20170131.ResourcePurchasePlan, api *ResourcePurchasePlan) {
api.Name = v20170131.Name
api.Product = v20170131.Product
api.PromotionCode = v20170131.PromotionCode
api.Publisher = v20170131.Publisher
}
// convertV20170701ResourcePurchasePlan converts a v20170701 ResourcePurchasePlan to an unversioned ResourcePurchasePlan
func convertV20170701ResourcePurchasePlan(v20170701 *v20170701.ResourcePurchasePlan, api *ResourcePurchasePlan) {
api.Name = v20170701.Name
api.Product = v20170701.Product
api.PromotionCode = v20170701.PromotionCode
api.Publisher = v20170701.Publisher
}
// convertVLabsResourcePurchasePlan converts a vlabs ResourcePurchasePlan to an unversioned ResourcePurchasePlan
func convertVLabsResourcePurchasePlan(vlabs *vlabs.ResourcePurchasePlan, api *ResourcePurchasePlan) {
api.Name = vlabs.Name
api.Product = vlabs.Product
api.PromotionCode = vlabs.PromotionCode
api.Publisher = vlabs.Publisher
}
func convertV20160930Properties(v20160930 *v20160930.Properties, api *Properties) {
api.ProvisioningState = ProvisioningState(v20160930.ProvisioningState)
if v20160930.OrchestratorProfile != nil {
api.OrchestratorProfile = &OrchestratorProfile{}
convertV20160930OrchestratorProfile(v20160930.OrchestratorProfile, api.OrchestratorProfile)
}
if v20160930.MasterProfile != nil {
api.MasterProfile = &MasterProfile{}
convertV20160930MasterProfile(v20160930.MasterProfile, api.MasterProfile)
}
api.AgentPoolProfiles = []*AgentPoolProfile{}
for _, p := range v20160930.AgentPoolProfiles {
apiProfile := &AgentPoolProfile{}
// api.OrchestratorProfile already be filled in correctly
if api.OrchestratorProfile.IsKubernetes() {
// we only allow AvailabilitySet for kubernetes's agentpool
convertV20160930AgentPoolProfile(p, AvailabilitySet, apiProfile)
} else {
// other orchestrators all use VMSS
convertV20160930AgentPoolProfile(p, VirtualMachineScaleSets, apiProfile)
}
api.AgentPoolProfiles = append(api.AgentPoolProfiles, apiProfile)
}
if v20160930.LinuxProfile != nil {
api.LinuxProfile = &LinuxProfile{}
convertV20160930LinuxProfile(v20160930.LinuxProfile, api.LinuxProfile)
}
if v20160930.WindowsProfile != nil {
api.WindowsProfile = &WindowsProfile{}
convertV20160930WindowsProfile(v20160930.WindowsProfile, api.WindowsProfile)
}
if v20160930.DiagnosticsProfile != nil {
api.DiagnosticsProfile = &DiagnosticsProfile{}
convertV20160930DiagnosticsProfile(v20160930.DiagnosticsProfile, api.DiagnosticsProfile)
}
if v20160930.JumpboxProfile != nil {
api.JumpboxProfile = &JumpboxProfile{}
convertV20160930JumpboxProfile(v20160930.JumpboxProfile, api.JumpboxProfile)
}
if v20160930.ServicePrincipalProfile != nil {
api.ServicePrincipalProfile = &ServicePrincipalProfile{}
convertV20160930ServicePrincipalProfile(v20160930.ServicePrincipalProfile, api.ServicePrincipalProfile)
}
if v20160930.CustomProfile != nil {
api.CustomProfile = &CustomProfile{}
convertV20160930CustomProfile(v20160930.CustomProfile, api.CustomProfile)
}
if api.OrchestratorProfile.IsDCOS() && len(api.AgentPoolProfiles) == 1 {
addDCOSPublicAgentPool(api)
}
}
func convertV20160330Properties(v20160330 *v20160330.Properties, api *Properties) {
api.ProvisioningState = ProvisioningState(v20160330.ProvisioningState)
if v20160330.OrchestratorProfile != nil {
api.OrchestratorProfile = &OrchestratorProfile{}
convertV20160330OrchestratorProfile(v20160330.OrchestratorProfile, api.OrchestratorProfile)
}
if v20160330.MasterProfile != nil {
api.MasterProfile = &MasterProfile{}
convertV20160330MasterProfile(v20160330.MasterProfile, api.MasterProfile)
}
api.AgentPoolProfiles = []*AgentPoolProfile{}
for _, p := range v20160330.AgentPoolProfiles {
apiProfile := &AgentPoolProfile{}
convertV20160330AgentPoolProfile(p, apiProfile)
api.AgentPoolProfiles = append(api.AgentPoolProfiles, apiProfile)
}
if v20160330.LinuxProfile != nil {
api.LinuxProfile = &LinuxProfile{}
convertV20160330LinuxProfile(v20160330.LinuxProfile, api.LinuxProfile)
}
if v20160330.WindowsProfile != nil {
api.WindowsProfile = &WindowsProfile{}
convertV20160330WindowsProfile(v20160330.WindowsProfile, api.WindowsProfile)
}
if v20160330.DiagnosticsProfile != nil {
api.DiagnosticsProfile = &DiagnosticsProfile{}
convertV20160330DiagnosticsProfile(v20160330.DiagnosticsProfile, api.DiagnosticsProfile)
}
if v20160330.JumpboxProfile != nil {
api.JumpboxProfile = &JumpboxProfile{}
convertV20160330JumpboxProfile(v20160330.JumpboxProfile, api.JumpboxProfile)
}
if api.OrchestratorProfile.IsDCOS() && len(api.AgentPoolProfiles) == 1 {
addDCOSPublicAgentPool(api)
}
}
func convertV20170131Properties(v20170131 *v20170131.Properties, api *Properties) {
api.ProvisioningState = ProvisioningState(v20170131.ProvisioningState)
if v20170131.OrchestratorProfile != nil {
api.OrchestratorProfile = &OrchestratorProfile{}
convertV20170131OrchestratorProfile(v20170131.OrchestratorProfile, api.OrchestratorProfile, v20170131.HasWindows())
}
if v20170131.MasterProfile != nil {
api.MasterProfile = &MasterProfile{}
convertV20170131MasterProfile(v20170131.MasterProfile, api.MasterProfile)
}
api.AgentPoolProfiles = []*AgentPoolProfile{}
for _, p := range v20170131.AgentPoolProfiles {
apiProfile := &AgentPoolProfile{}
// api.OrchestratorProfile already be filled in correctly
if api.OrchestratorProfile.IsKubernetes() {
// we only allow AvailabilitySet for kubernetes's agentpool
convertV20170131AgentPoolProfile(p, AvailabilitySet, apiProfile)
} else {
// other orchestrators all use VMSS
convertV20170131AgentPoolProfile(p, VirtualMachineScaleSets, apiProfile)
}
api.AgentPoolProfiles = append(api.AgentPoolProfiles, apiProfile)
}
if v20170131.LinuxProfile != nil {
api.LinuxProfile = &LinuxProfile{}
convertV20170131LinuxProfile(v20170131.LinuxProfile, api.LinuxProfile)
}
if v20170131.WindowsProfile != nil {
api.WindowsProfile = &WindowsProfile{}
convertV20170131WindowsProfile(v20170131.WindowsProfile, api.WindowsProfile)
}
if v20170131.DiagnosticsProfile != nil {
api.DiagnosticsProfile = &DiagnosticsProfile{}
convertV20170131DiagnosticsProfile(v20170131.DiagnosticsProfile, api.DiagnosticsProfile)
}
if v20170131.JumpboxProfile != nil {
api.JumpboxProfile = &JumpboxProfile{}
convertV20170131JumpboxProfile(v20170131.JumpboxProfile, api.JumpboxProfile)
}
if v20170131.ServicePrincipalProfile != nil {
api.ServicePrincipalProfile = &ServicePrincipalProfile{}
convertV20170131ServicePrincipalProfile(v20170131.ServicePrincipalProfile, api.ServicePrincipalProfile)
}
if v20170131.CustomProfile != nil {
api.CustomProfile = &CustomProfile{}
convertV20170131CustomProfile(v20170131.CustomProfile, api.CustomProfile)
}
if api.OrchestratorProfile.IsDCOS() && len(api.AgentPoolProfiles) == 1 {
addDCOSPublicAgentPool(api)
}
}
func convertV20170701Properties(v20170701 *v20170701.Properties, api *Properties) {
api.ProvisioningState = ProvisioningState(v20170701.ProvisioningState)
if v20170701.OrchestratorProfile != nil {
api.OrchestratorProfile = &OrchestratorProfile{}
convertV20170701OrchestratorProfile(v20170701.OrchestratorProfile, api.OrchestratorProfile, v20170701.HasWindows())
}
if v20170701.MasterProfile != nil {
api.MasterProfile = &MasterProfile{}
convertV20170701MasterProfile(v20170701.MasterProfile, api.MasterProfile)
}
api.AgentPoolProfiles = []*AgentPoolProfile{}
for _, p := range v20170701.AgentPoolProfiles {
apiProfile := &AgentPoolProfile{}
// api.OrchestratorProfile already be filled in correctly
if api.OrchestratorProfile.IsKubernetes() {
// we only allow AvailabilitySet for kubernetes's agentpool
convertV20170701AgentPoolProfile(p, AvailabilitySet, apiProfile)
} else {
// other orchestrators all use VMSS
convertV20170701AgentPoolProfile(p, VirtualMachineScaleSets, apiProfile)
// by default vlabs will use managed disks for all orchestrators but kubernetes as it has encryption at rest.
if len(p.StorageProfile) == 0 {
apiProfile.StorageProfile = ManagedDisks
}
}
api.AgentPoolProfiles = append(api.AgentPoolProfiles, apiProfile)
}
if v20170701.LinuxProfile != nil {
api.LinuxProfile = &LinuxProfile{}
convertV20170701LinuxProfile(v20170701.LinuxProfile, api.LinuxProfile)
}
if v20170701.WindowsProfile != nil {
api.WindowsProfile = &WindowsProfile{}
convertV20170701WindowsProfile(v20170701.WindowsProfile, api.WindowsProfile)
}
if v20170701.ServicePrincipalProfile != nil {
api.ServicePrincipalProfile = &ServicePrincipalProfile{}
convertV20170701ServicePrincipalProfile(v20170701.ServicePrincipalProfile, api.ServicePrincipalProfile)
}
if v20170701.CustomProfile != nil {
api.CustomProfile = &CustomProfile{}
convertV20170701CustomProfile(v20170701.CustomProfile, api.CustomProfile)
}
}
func convertVLabsProperties(vlabs *vlabs.Properties, api *Properties) {
api.ProvisioningState = ProvisioningState(vlabs.ProvisioningState)
if vlabs.OrchestratorProfile != nil {
api.OrchestratorProfile = &OrchestratorProfile{}
convertVLabsOrchestratorProfile(vlabs, api.OrchestratorProfile)
}
if vlabs.MasterProfile != nil {
api.MasterProfile = &MasterProfile{}
convertVLabsMasterProfile(vlabs.MasterProfile, api.MasterProfile)
}
api.AgentPoolProfiles = []*AgentPoolProfile{}
for _, p := range vlabs.AgentPoolProfiles {
apiProfile := &AgentPoolProfile{}
convertVLabsAgentPoolProfile(p, apiProfile)
// by default vlabs will use managed disks for all orchestrators but kubernetes as it has encryption at rest.
if !api.OrchestratorProfile.IsKubernetes() && !api.OrchestratorProfile.IsOpenShift() {
if len(p.StorageProfile) == 0 {
apiProfile.StorageProfile = ManagedDisks
}
}
api.AgentPoolProfiles = append(api.AgentPoolProfiles, apiProfile)
}
if vlabs.LinuxProfile != nil {
api.LinuxProfile = &LinuxProfile{}
convertVLabsLinuxProfile(vlabs.LinuxProfile, api.LinuxProfile)
}
api.ExtensionProfiles = []*ExtensionProfile{}
for _, p := range vlabs.ExtensionProfiles {
apiExtensionProfile := &ExtensionProfile{}
convertVLabsExtensionProfile(p, apiExtensionProfile)
api.ExtensionProfiles = append(api.ExtensionProfiles, apiExtensionProfile)
}
if vlabs.WindowsProfile != nil {
api.WindowsProfile = &WindowsProfile{}
convertVLabsWindowsProfile(vlabs.WindowsProfile, api.WindowsProfile)
}
if vlabs.ServicePrincipalProfile != nil {
api.ServicePrincipalProfile = &ServicePrincipalProfile{}
convertVLabsServicePrincipalProfile(vlabs.ServicePrincipalProfile, api.ServicePrincipalProfile)
}
if vlabs.CertificateProfile != nil {
api.CertificateProfile = &CertificateProfile{}
convertVLabsCertificateProfile(vlabs.CertificateProfile, api.CertificateProfile)
}
if vlabs.AADProfile != nil {
api.AADProfile = &AADProfile{}
convertVLabsAADProfile(vlabs.AADProfile, api.AADProfile)
}
if vlabs.AzProfile != nil {
api.AzProfile = &AzProfile{}
convertVLabsAZProfile(vlabs.AzProfile, api.AzProfile)
}
}
func convertVLabsAZProfile(vlabs *vlabs.AzProfile, api *AzProfile) {
api.Location = vlabs.Location
api.ResourceGroup = vlabs.ResourceGroup
api.SubscriptionID = vlabs.SubscriptionID
api.TenantID = vlabs.TenantID
}
func convertV20160930LinuxProfile(obj *v20160930.LinuxProfile, api *LinuxProfile) {
api.AdminUsername = obj.AdminUsername
api.SSH.PublicKeys = []PublicKey{}
for _, d := range obj.SSH.PublicKeys {
api.SSH.PublicKeys = append(api.SSH.PublicKeys,
PublicKey{KeyData: d.KeyData})
}
}
func convertV20160330LinuxProfile(v20160330 *v20160330.LinuxProfile, api *LinuxProfile) {
api.AdminUsername = v20160330.AdminUsername
api.SSH.PublicKeys = []PublicKey{}
for _, d := range v20160330.SSH.PublicKeys {
api.SSH.PublicKeys = append(api.SSH.PublicKeys,
PublicKey{KeyData: d.KeyData})
}
}
func convertV20170131LinuxProfile(v20170131 *v20170131.LinuxProfile, api *LinuxProfile) {
api.AdminUsername = v20170131.AdminUsername
api.SSH.PublicKeys = []PublicKey{}
for _, d := range v20170131.SSH.PublicKeys {
api.SSH.PublicKeys = append(api.SSH.PublicKeys, PublicKey{KeyData: d.KeyData})
}
}
func convertVLabsExtensionProfile(vlabs *vlabs.ExtensionProfile, api *ExtensionProfile) {
api.Name = vlabs.Name
api.Version = vlabs.Version
api.ExtensionParameters = vlabs.ExtensionParameters
if vlabs.ExtensionParametersKeyVaultRef != nil {
api.ExtensionParametersKeyVaultRef = &KeyvaultSecretRef{
VaultID: vlabs.ExtensionParametersKeyVaultRef.VaultID,
SecretName: vlabs.ExtensionParametersKeyVaultRef.SecretName,
SecretVersion: vlabs.ExtensionParametersKeyVaultRef.SecretVersion,
}
}
api.RootURL = vlabs.RootURL
api.Script = vlabs.Script
api.URLQuery = vlabs.URLQuery
}
func convertVLabsExtension(vlabs *vlabs.Extension, api *Extension) {
api.Name = vlabs.Name
api.SingleOrAll = vlabs.SingleOrAll
api.Template = vlabs.Template
}
func convertV20170701LinuxProfile(v20170701 *v20170701.LinuxProfile, api *LinuxProfile) {
api.AdminUsername = v20170701.AdminUsername
api.SSH.PublicKeys = []PublicKey{}
for _, d := range v20170701.SSH.PublicKeys {
api.SSH.PublicKeys = append(api.SSH.PublicKeys,
PublicKey{KeyData: d.KeyData})
}
}
func convertVLabsLinuxProfile(vlabs *vlabs.LinuxProfile, api *LinuxProfile) {
api.AdminUsername = vlabs.AdminUsername
api.SSH.PublicKeys = []PublicKey{}
for _, d := range vlabs.SSH.PublicKeys {
api.SSH.PublicKeys = append(api.SSH.PublicKeys,
PublicKey{KeyData: d.KeyData})
}
api.Secrets = []KeyVaultSecrets{}
for _, s := range vlabs.Secrets {
secret := &KeyVaultSecrets{}
convertVLabsKeyVaultSecrets(&s, secret)
api.Secrets = append(api.Secrets, *secret)
}
api.ScriptRootURL = vlabs.ScriptRootURL
if vlabs.CustomSearchDomain != nil {
api.CustomSearchDomain = &CustomSearchDomain{}
api.CustomSearchDomain.Name = vlabs.CustomSearchDomain.Name
api.CustomSearchDomain.RealmUser = vlabs.CustomSearchDomain.RealmUser
api.CustomSearchDomain.RealmPassword = vlabs.CustomSearchDomain.RealmPassword
}
if vlabs.CustomNodesDNS != nil {
api.CustomNodesDNS = &CustomNodesDNS{}
api.CustomNodesDNS.DNSServer = vlabs.CustomNodesDNS.DNSServer
}
}
func convertV20160930WindowsProfile(v20160930 *v20160930.WindowsProfile, api *WindowsProfile) {
api.AdminUsername = v20160930.AdminUsername
api.AdminPassword = v20160930.AdminPassword
}
func convertV20160330WindowsProfile(v20160330 *v20160330.WindowsProfile, api *WindowsProfile) {
api.AdminUsername = v20160330.AdminUsername
api.AdminPassword = v20160330.AdminPassword
}
func convertV20170131WindowsProfile(v20170131 *v20170131.WindowsProfile, api *WindowsProfile) {
api.AdminUsername = v20170131.AdminUsername
api.AdminPassword = v20170131.AdminPassword
}
func convertV20170701WindowsProfile(v20170701 *v20170701.WindowsProfile, api *WindowsProfile) {
api.AdminUsername = v20170701.AdminUsername
api.AdminPassword = v20170701.AdminPassword
}
func convertVLabsWindowsProfile(vlabs *vlabs.WindowsProfile, api *WindowsProfile) {
api.AdminUsername = vlabs.AdminUsername
api.AdminPassword = vlabs.AdminPassword
api.ImageVersion = vlabs.ImageVersion
api.WindowsImageSourceURL = vlabs.WindowsImageSourceURL
api.WindowsPublisher = vlabs.WindowsPublisher
api.WindowsOffer = vlabs.WindowsOffer
api.WindowsSku = vlabs.WindowsSku
api.Secrets = []KeyVaultSecrets{}
for _, s := range vlabs.Secrets {
secret := &KeyVaultSecrets{}
convertVLabsKeyVaultSecrets(&s, secret)
api.Secrets = append(api.Secrets, *secret)
}
}
func convertV20160930OrchestratorProfile(v20160930 *v20160930.OrchestratorProfile, api *OrchestratorProfile) {
api.OrchestratorType = v20160930.OrchestratorType
if api.OrchestratorType == Kubernetes {
api.OrchestratorVersion = "1.6.9"
} else if api.OrchestratorType == DCOS {
api.OrchestratorVersion = common.DCOSVersion1Dot9Dot0
}
}
func convertV20160330OrchestratorProfile(v20160330 *v20160330.OrchestratorProfile, api *OrchestratorProfile) {
api.OrchestratorType = v20160330.OrchestratorType
if api.OrchestratorType == DCOS {
api.OrchestratorVersion = common.DCOSVersion1Dot9Dot0
}
}
func convertV20170131OrchestratorProfile(v20170131 *v20170131.OrchestratorProfile, api *OrchestratorProfile, hasWindows bool) {
api.OrchestratorType = v20170131.OrchestratorType
if api.OrchestratorType == Kubernetes {
api.OrchestratorVersion = common.GetSupportedKubernetesVersion("", hasWindows)
} else if api.OrchestratorType == DCOS {
api.OrchestratorVersion = common.DCOSVersion1Dot9Dot0
}
}
func convertV20170701OrchestratorProfile(v20170701cs *v20170701.OrchestratorProfile, api *OrchestratorProfile, hasWindows bool) {
if v20170701cs.OrchestratorType == v20170701.DockerCE {
api.OrchestratorType = SwarmMode
} else {
api.OrchestratorType = v20170701cs.OrchestratorType
}
switch api.OrchestratorType {
case Kubernetes:
api.OrchestratorVersion = common.GetSupportedKubernetesVersion(v20170701cs.OrchestratorVersion, hasWindows)
case DCOS:
switch v20170701cs.OrchestratorVersion {
case common.DCOSVersion1Dot10Dot0, common.DCOSVersion1Dot9Dot0, common.DCOSVersion1Dot8Dot8:
api.OrchestratorVersion = v20170701cs.OrchestratorVersion
default:
api.OrchestratorVersion = common.DCOSVersion1Dot9Dot0
}
default:
break
}
}
func convertVLabsOrchestratorProfile(vp *vlabs.Properties, api *OrchestratorProfile) {
vlabscs := vp.OrchestratorProfile
api.OrchestratorType = vlabscs.OrchestratorType
switch api.OrchestratorType {
case OpenShift:
if vlabscs.OpenShiftConfig != nil {
api.OpenShiftConfig = &OpenShiftConfig{}
convertVLabsOpenShiftConfig(vlabscs.OpenShiftConfig, api.OpenShiftConfig)
}
// Set api.KubernetesConfig to api.OpenShiftConfig.KubernetesConfig so
// acs-engine can reuse the same code used for generating parameters from
// KubernetesConfig for OpenShiftConfig.
if api.OpenShiftConfig != nil && api.OpenShiftConfig.KubernetesConfig != nil {
api.KubernetesConfig = api.OpenShiftConfig.KubernetesConfig
}
if vlabscs.OrchestratorVersion != common.OpenShiftVersionUnstable {
api.OrchestratorVersion = common.RationalizeReleaseAndVersion(
vlabscs.OrchestratorType,
vlabscs.OrchestratorRelease,
vlabscs.OrchestratorVersion,
false)
} else {
api.OrchestratorVersion = vlabscs.OrchestratorVersion
}
case Kubernetes:
if vlabscs.KubernetesConfig != nil {
api.KubernetesConfig = &KubernetesConfig{}
convertVLabsKubernetesConfig(vlabscs.KubernetesConfig, api.KubernetesConfig)
}
setVlabsKubernetesDefaults(vp, api)
api.OrchestratorVersion = common.RationalizeReleaseAndVersion(
vlabscs.OrchestratorType,
vlabscs.OrchestratorRelease,
vlabscs.OrchestratorVersion,
vp.HasWindows())
case DCOS:
if vlabscs.DcosConfig != nil {
api.DcosConfig = &DcosConfig{}
convertVLabsDcosConfig(vlabscs.DcosConfig, api.DcosConfig)
}
api.OrchestratorVersion = common.RationalizeReleaseAndVersion(
vlabscs.OrchestratorType,
vlabscs.OrchestratorRelease,
vlabscs.OrchestratorVersion,
false)
}
}
func convertVLabsDcosConfig(vlabs *vlabs.DcosConfig, api *DcosConfig) {
api.DcosBootstrapURL = vlabs.DcosBootstrapURL
api.DcosWindowsBootstrapURL = vlabs.DcosWindowsBootstrapURL
if len(vlabs.Registry) > 0 {
api.Registry = vlabs.Registry
}
if len(vlabs.RegistryUser) > 0 {
api.RegistryUser = vlabs.RegistryUser
}
if len(vlabs.RegistryPass) > 0 {
api.RegistryPass = vlabs.RegistryPass
}
api.DcosRepositoryURL = vlabs.DcosRepositoryURL
api.DcosClusterPackageListID = vlabs.DcosClusterPackageListID
api.DcosProviderPackageID = vlabs.DcosProviderPackageID
if vlabs.BootstrapProfile != nil {
api.BootstrapProfile = &BootstrapProfile{
VMSize: vlabs.BootstrapProfile.VMSize,
OSDiskSizeGB: vlabs.BootstrapProfile.OSDiskSizeGB,
OAuthEnabled: vlabs.BootstrapProfile.OAuthEnabled,
StaticIP: vlabs.BootstrapProfile.StaticIP,
Subnet: vlabs.BootstrapProfile.Subnet,
}
}
}
func convertVLabsOpenShiftConfig(vlabs *vlabs.OpenShiftConfig, api *OpenShiftConfig) {
// NOTE: This is a hack to avoid breaking the rest of the acs-engine
// code when KubernetesConfig is accessed for various things. We don't
// use anything from it today. Maybe do something cleaner here.
api.KubernetesConfig = &KubernetesConfig{}
if vlabs.KubernetesConfig != nil {
convertVLabsKubernetesConfig(vlabs.KubernetesConfig, api.KubernetesConfig)
}
api.ClusterUsername = vlabs.ClusterUsername
api.ClusterPassword = vlabs.ClusterPassword
api.EnableAADAuthentication = vlabs.EnableAADAuthentication
api.ConfigBundles = vlabs.ConfigBundles
}
func convertVLabsKubernetesConfig(vlabs *vlabs.KubernetesConfig, api *KubernetesConfig) {
api.KubernetesImageBase = vlabs.KubernetesImageBase
api.ClusterSubnet = vlabs.ClusterSubnet
api.DNSServiceIP = vlabs.DNSServiceIP
api.ServiceCIDR = vlabs.ServiceCidr
api.NetworkPlugin = vlabs.NetworkPlugin
api.ContainerRuntime = vlabs.ContainerRuntime
api.MaxPods = vlabs.MaxPods
api.DockerBridgeSubnet = vlabs.DockerBridgeSubnet
api.CloudProviderBackoff = vlabs.CloudProviderBackoff
api.CloudProviderBackoffDuration = vlabs.CloudProviderBackoffDuration
api.CloudProviderBackoffExponent = vlabs.CloudProviderBackoffExponent
api.CloudProviderBackoffJitter = vlabs.CloudProviderBackoffJitter
api.CloudProviderBackoffRetries = vlabs.CloudProviderBackoffRetries
api.CloudProviderRateLimit = vlabs.CloudProviderRateLimit
api.CloudProviderRateLimitBucket = vlabs.CloudProviderRateLimitBucket
api.CloudProviderRateLimitQPS = vlabs.CloudProviderRateLimitQPS
api.UseManagedIdentity = vlabs.UseManagedIdentity
api.CustomHyperkubeImage = vlabs.CustomHyperkubeImage
api.DockerEngineVersion = vlabs.DockerEngineVersion
api.CustomCcmImage = vlabs.CustomCcmImage
api.UseCloudControllerManager = vlabs.UseCloudControllerManager
api.CustomWindowsPackageURL = vlabs.CustomWindowsPackageURL
api.UseInstanceMetadata = vlabs.UseInstanceMetadata
api.LoadBalancerSku = vlabs.LoadBalancerSku
api.ExcludeMasterFromStandardLB = vlabs.ExcludeMasterFromStandardLB
api.EnableRbac = vlabs.EnableRbac
api.EnableSecureKubelet = vlabs.EnableSecureKubelet
api.EnableAggregatedAPIs = vlabs.EnableAggregatedAPIs
api.EnableDataEncryptionAtRest = vlabs.EnableDataEncryptionAtRest
api.EnableEncryptionWithExternalKms = vlabs.EnableEncryptionWithExternalKms
api.EnablePodSecurityPolicy = vlabs.EnablePodSecurityPolicy
api.GCHighThreshold = vlabs.GCHighThreshold
api.GCLowThreshold = vlabs.GCLowThreshold
api.EtcdVersion = vlabs.EtcdVersion
api.EtcdDiskSizeGB = vlabs.EtcdDiskSizeGB
api.EtcdEncryptionKey = vlabs.EtcdEncryptionKey
convertAddonsToAPI(vlabs, api)
convertKubeletConfigToAPI(vlabs, api)
convertControllerManagerConfigToAPI(vlabs, api)
convertCloudControllerManagerConfigToAPI(vlabs, api)
convertAPIServerConfigToAPI(vlabs, api)
convertSchedulerConfigToAPI(vlabs, api)
convertPrivateClusterToAPI(vlabs, api)
}
func setVlabsKubernetesDefaults(vp *vlabs.Properties, api *OrchestratorProfile) {
if api.KubernetesConfig == nil {
api.KubernetesConfig = &KubernetesConfig{}
}
if vp.OrchestratorProfile.KubernetesConfig != nil {
// Included here for backwards compatibility with deprecated NetworkPolicy usage patterns
if vp.OrchestratorProfile.KubernetesConfig.NetworkPlugin == "" &&
vp.OrchestratorProfile.KubernetesConfig.NetworkPolicy == NetworkPolicyAzure {
api.KubernetesConfig.NetworkPlugin = vp.OrchestratorProfile.KubernetesConfig.NetworkPolicy
api.KubernetesConfig.NetworkPolicy = "" // no-op but included for emphasis
} else if vp.OrchestratorProfile.KubernetesConfig.NetworkPolicy == NetworkPolicyNone {
api.KubernetesConfig.NetworkPlugin = NetworkPluginKubenet
api.KubernetesConfig.NetworkPolicy = "" // no-op but included for emphasis
} else {
api.KubernetesConfig.NetworkPlugin = vp.OrchestratorProfile.KubernetesConfig.NetworkPlugin
api.KubernetesConfig.NetworkPolicy = vp.OrchestratorProfile.KubernetesConfig.NetworkPolicy
}
}
if api.KubernetesConfig.NetworkPlugin == "" {
if vp.HasWindows() {
api.KubernetesConfig.NetworkPlugin = vlabs.DefaultNetworkPluginWindows
} else {
api.KubernetesConfig.NetworkPlugin = vlabs.DefaultNetworkPlugin
}
}
}
func convertAddonsToAPI(v *vlabs.KubernetesConfig, a *KubernetesConfig) {
a.Addons = []KubernetesAddon{}
for i := range v.Addons {
a.Addons = append(a.Addons, KubernetesAddon{
Name: v.Addons[i].Name,
Enabled: v.Addons[i].Enabled,
Config: map[string]string{},
})
for j := range v.Addons[i].Containers {
a.Addons[i].Containers = append(a.Addons[i].Containers, KubernetesContainerSpec{
Name: v.Addons[i].Containers[j].Name,
Image: v.Addons[i].Containers[j].Image,
CPURequests: v.Addons[i].Containers[j].CPURequests,
MemoryRequests: v.Addons[i].Containers[j].MemoryRequests,
CPULimits: v.Addons[i].Containers[j].CPULimits,
MemoryLimits: v.Addons[i].Containers[j].MemoryLimits,
})
}
if v.Addons[i].Config != nil {
for key, val := range v.Addons[i].Config {
a.Addons[i].Config[key] = val
}
}
}
}
func convertCustomFilesToAPI(v *vlabs.MasterProfile, a *MasterProfile) {
if v.CustomFiles != nil {
a.CustomFiles = &[]CustomFile{}
for i := range *v.CustomFiles {
*a.CustomFiles = append(*a.CustomFiles, CustomFile{
Dest: (*v.CustomFiles)[i].Dest,
Source: (*v.CustomFiles)[i].Source,
})
}
}
}
func convertKubeletConfigToAPI(v *vlabs.KubernetesConfig, a *KubernetesConfig) {
a.KubeletConfig = map[string]string{}
for key, val := range v.KubeletConfig {
a.KubeletConfig[key] = val
}
}
func convertControllerManagerConfigToAPI(v *vlabs.KubernetesConfig, a *KubernetesConfig) {
a.ControllerManagerConfig = map[string]string{}
for key, val := range v.ControllerManagerConfig {
a.ControllerManagerConfig[key] = val
}
}
func convertCloudControllerManagerConfigToAPI(v *vlabs.KubernetesConfig, a *KubernetesConfig) {
a.CloudControllerManagerConfig = map[string]string{}
for key, val := range v.CloudControllerManagerConfig {
a.CloudControllerManagerConfig[key] = val
}
}
func convertAPIServerConfigToAPI(v *vlabs.KubernetesConfig, a *KubernetesConfig) {
a.APIServerConfig = map[string]string{}
for key, val := range v.APIServerConfig {
a.APIServerConfig[key] = val
}
}
func convertSchedulerConfigToAPI(v *vlabs.KubernetesConfig, a *KubernetesConfig) {
a.SchedulerConfig = map[string]string{}
for key, val := range v.SchedulerConfig {
a.SchedulerConfig[key] = val
}
}
func convertPrivateClusterToAPI(v *vlabs.KubernetesConfig, a *KubernetesConfig) {
if v.PrivateCluster != nil {
a.PrivateCluster = &PrivateCluster{}
a.PrivateCluster.Enabled = v.PrivateCluster.Enabled
if v.PrivateCluster.JumpboxProfile != nil {
a.PrivateCluster.JumpboxProfile = &PrivateJumpboxProfile{}
convertPrivateJumpboxProfileToAPI(v.PrivateCluster.JumpboxProfile, a.PrivateCluster.JumpboxProfile)
}
}
}
func convertPrivateJumpboxProfileToAPI(v *vlabs.PrivateJumpboxProfile, a *PrivateJumpboxProfile) {
a.Name = v.Name
a.OSDiskSizeGB = v.OSDiskSizeGB
a.VMSize = v.VMSize
a.PublicKey = v.PublicKey
a.Username = v.Username
a.StorageProfile = v.StorageProfile
}
func convertV20160930MasterProfile(v20160930 *v20160930.MasterProfile, api *MasterProfile) {
api.Count = v20160930.Count
api.DNSPrefix = v20160930.DNSPrefix
api.FQDN = v20160930.FQDN
api.Subnet = v20160930.GetSubnet()
// Set default VMSize
api.VMSize = "Standard_D2_v2"
}
func convertV20160330MasterProfile(v20160330 *v20160330.MasterProfile, api *MasterProfile) {
api.Count = v20160330.Count
api.DNSPrefix = v20160330.DNSPrefix
api.FQDN = v20160330.FQDN
api.Subnet = v20160330.GetSubnet()
// Set default VMSize
api.VMSize = "Standard_D2_v2"
}
func convertV20170131MasterProfile(v20170131 *v20170131.MasterProfile, api *MasterProfile) {
api.Count = v20170131.Count
api.DNSPrefix = v20170131.DNSPrefix
api.FQDN = v20170131.FQDN
api.Subnet = v20170131.GetSubnet()
// Set default VMSize
// TODO: Use azureconst.go to set
api.VMSize = "Standard_D2_v2"
}
func convertV20170701MasterProfile(v20170701 *v20170701.MasterProfile, api *MasterProfile) {
api.Count = v20170701.Count
api.DNSPrefix = v20170701.DNSPrefix
api.FQDN = v20170701.FQDN
api.Subnet = v20170701.GetSubnet()
api.VMSize = v20170701.VMSize
api.OSDiskSizeGB = v20170701.OSDiskSizeGB
api.VnetSubnetID = v20170701.VnetSubnetID
api.FirstConsecutiveStaticIP = v20170701.FirstConsecutiveStaticIP
api.StorageProfile = v20170701.StorageProfile
// by default 20170701 will use managed disks as it has encryption at rest
if len(api.StorageProfile) == 0 {
api.StorageProfile = ManagedDisks
}
}
func convertVLabsMasterProfile(vlabs *vlabs.MasterProfile, api *MasterProfile) {
api.Count = vlabs.Count
api.DNSPrefix = vlabs.DNSPrefix
api.SubjectAltNames = vlabs.SubjectAltNames
api.VMSize = vlabs.VMSize
api.OSDiskSizeGB = vlabs.OSDiskSizeGB
api.VnetSubnetID = vlabs.VnetSubnetID
api.FirstConsecutiveStaticIP = vlabs.FirstConsecutiveStaticIP
api.VnetCidr = vlabs.VnetCidr
api.Subnet = vlabs.GetSubnet()
api.IPAddressCount = vlabs.IPAddressCount
api.FQDN = vlabs.FQDN
api.StorageProfile = vlabs.StorageProfile
api.HTTPSourceAddressPrefix = vlabs.HTTPSourceAddressPrefix
api.OAuthEnabled = vlabs.OAuthEnabled
// by default vlabs will use managed disks as it has encryption at rest
if len(api.StorageProfile) == 0 {
api.StorageProfile = ManagedDisks
}
if vlabs.PreProvisionExtension != nil {
apiExtension := &Extension{}
convertVLabsExtension(vlabs.PreProvisionExtension, apiExtension)
api.PreprovisionExtension = apiExtension
}
api.Extensions = []Extension{}
for _, extension := range vlabs.Extensions {
apiExtension := &Extension{}
convertVLabsExtension(&extension, apiExtension)
api.Extensions = append(api.Extensions, *apiExtension)
}
api.Distro = Distro(vlabs.Distro)
if vlabs.KubernetesConfig != nil {
api.KubernetesConfig = &KubernetesConfig{}
convertVLabsKubernetesConfig(vlabs.KubernetesConfig, api.KubernetesConfig)
}
if vlabs.ImageRef != nil {
api.ImageRef = &ImageReference{}
api.ImageRef.Name = vlabs.ImageRef.Name
api.ImageRef.ResourceGroup = vlabs.ImageRef.ResourceGroup
}
convertCustomFilesToAPI(vlabs, api)
}
func convertV20160930AgentPoolProfile(v20160930 *v20160930.AgentPoolProfile, availabilityProfile string, api *AgentPoolProfile) {
api.Name = v20160930.Name
api.Count = v20160930.Count
api.VMSize = v20160930.VMSize
api.DNSPrefix = v20160930.DNSPrefix
if api.DNSPrefix != "" {
// Set default Ports when DNSPrefix specified
api.Ports = []int{80, 443, 8080}
}
api.FQDN = v20160930.FQDN
api.OSType = OSType(v20160930.OSType)
api.Subnet = v20160930.GetSubnet()
api.AvailabilityProfile = availabilityProfile
}
func convertV20160330AgentPoolProfile(v20160330 *v20160330.AgentPoolProfile, api *AgentPoolProfile) {
api.Name = v20160330.Name
api.Count = v20160330.Count
api.VMSize = v20160330.VMSize
api.DNSPrefix = v20160330.DNSPrefix
if api.DNSPrefix != "" |
api.FQDN = v20160330.FQDN
api.OSType = OSType(v20160330.OSType)
api.Subnet = v20160330.GetSubnet()
}
func convertV20170131AgentPoolProfile(v20170131 *v20170131.AgentPoolProfile, availabilityProfile string, api *AgentPoolProfile) {
api.Name = v20170131.Name
api.Count = v20170131.Count
api.VMSize = v20170131.VMSize
api.DNSPrefix = v20170131.DNSPrefix
if api.DNSPrefix != "" {
// Set default Ports when DNSPrefix specified
api.Ports = []int{80, 443, 8080}
}
api.FQDN = v20170131.FQDN
api.OSType = OSType(v20170131.OSType)
api.Subnet = v20170131.GetSubnet()
api.AvailabilityProfile = availabilityProfile
}
func convertV20170701AgentPoolProfile(v20170701 *v20170701.AgentPoolProfile, availabilityProfile string, api *AgentPoolProfile) {
api.Name = v20170701.Name
api.Count = v20170701.Count
api.VMSize = v20170701.VMSize
api.OSDiskSizeGB = v20170701.OSDiskSizeGB
api.DNSPrefix = v20170701.DNSPrefix
api.OSType = OSType(v20170701.OSType)
api.Ports = []int{}
api.Ports = append(api.Ports, v20170701.Ports...)
api.StorageProfile = v20170701.StorageProfile
api.VnetSubnetID = v20170701.VnetSubnetID
api.Subnet = v20170701.GetSubnet()
api.FQDN = v20170701.FQDN
api.AvailabilityProfile = availabilityProfile
}
func convertVLabsAgentPoolProfile(vlabs *vlabs.AgentPoolProfile, api *AgentPoolProfile) {
api.Name = vlabs.Name
api.Count = vlabs.Count
api.VMSize = vlabs.VMSize
api.OSDiskSizeGB = vlabs.OSDiskSizeGB
api.DNSPrefix = vlabs.DNSPrefix
api.OSType = OSType(vlabs.OSType)
api.Ports = []int{}
api.Ports = append(api.Ports, vlabs.Ports...)
api.AvailabilityProfile = vlabs.AvailabilityProfile
api.ScaleSetPriority = vlabs.ScaleSetPriority
api.ScaleSetEvictionPolicy = vlabs.ScaleSetEvictionPolicy
api.StorageProfile = vlabs.StorageProfile
api.DiskSizesGB = []int{}
api.DiskSizesGB = append(api.DiskSizesGB, vlabs.DiskSizesGB...)
api.VnetSubnetID = vlabs.VnetSubnetID
api.Subnet = vlabs.GetSubnet()
api.IPAddressCount = vlabs.IPAddressCount
api.FQDN = vlabs.FQDN
api.AcceleratedNetworkingEnabled = vlabs.AcceleratedNetworkingEnabled
api.CustomNodeLabels = map[string]string{}
for k, v := range vlabs.CustomNodeLabels {
api.CustomNodeLabels[k] = v
}
if vlabs.PreProvisionExtension != nil {
apiExtension := &Extension{}
convertVLabsExtension(vlabs.PreProvisionExtension, apiExtension)
api.PreprovisionExtension = apiExtension
}
api.Extensions = []Extension{}
for _, extension := range vlabs.Extensions {
apiExtension := &Extension{}
convertVLabsExtension(&extension, apiExtension)
api.Extensions = append(api.Extensions, *apiExtension)
}
api.Distro = Distro(vlabs.Distro)
if vlabs.KubernetesConfig != nil {
api.KubernetesConfig = &KubernetesConfig{}
convertVLabsKubernetesConfig(vlabs.KubernetesConfig, api.KubernetesConfig)
}
if vlabs.ImageRef != nil {
api.ImageRef = &ImageReference{}
api.ImageRef.Name = vlabs.ImageRef.Name
api.ImageRef.ResourceGroup = vlabs.ImageRef.ResourceGroup
}
api.Role = AgentPoolProfileRole(vlabs.Role)
}
func convertVLabsKeyVaultSecrets(vlabs *vlabs.KeyVaultSecrets, api *KeyVaultSecrets) {
api.SourceVault = &KeyVaultID{ID: vlabs.SourceVault.ID}
api.VaultCertificates = []KeyVaultCertificate{}
for _, c := range vlabs.VaultCertificates {
cert := KeyVaultCertificate{}
cert.CertificateStore = c.CertificateStore
cert.CertificateURL = c.CertificateURL
api.VaultCertificates = append(api.VaultCertificates, cert)
}
}
func convertV20160930DiagnosticsProfile(v20160930 *v20160930.DiagnosticsProfile, api *DiagnosticsProfile) {
if v20160930.VMDiagnostics != nil {
api.VMDiagnostics = &VMDiagnostics{}
convertV20160930VMDiagnostics(v20160930.VMDiagnostics, api.VMDiagnostics)
}
}
func convertV20160930VMDiagnostics(v20160930 *v20160930.VMDiagnostics, api *VMDiagnostics) {
api.Enabled = v20160930.Enabled
api.StorageURL = v20160930.StorageURL
}
func convertV20160330DiagnosticsProfile(v20160330 *v20160330.DiagnosticsProfile, api *DiagnosticsProfile) {
if v20160330.VMDiagnostics != nil {
api.VMDiagnostics = &VMDiagnostics{}
convertV20160330VMDiagnostics(v20160330.VMDiagnostics, api.VMDiagnostics)
}
}
func convertV20160330VMDiagnostics(v20160330 *v20160330.VMDiagnostics, api *VMDiagnostics) {
api.Enabled = v20160330.Enabled
api.StorageURL = v20160330.StorageURL
}
func convertV20170131DiagnosticsProfile(v20170131 *v20170131.DiagnosticsProfile, api *DiagnosticsProfile) {
if v20170131.VMDiagnostics != nil {
api.VMDiagnostics = &VMDiagnostics{}
convertV20170131VMDiagnostics(v20170131.VMDiagnostics, api.VMDiagnostics)
}
}
func convertV20170131VMDiagnostics(v20170131 *v20170131.VMDiagnostics, api *VMDiagnostics) {
api.Enabled = v20170131.Enabled
api.StorageURL = v20170131.StorageURL
}
func convertV20160930JumpboxProfile(v20160930 *v20160930.JumpboxProfile, api *JumpboxProfile) {
api.OSType = OSType(v20160930.OSType)
api.DNSPrefix = v20160930.DNSPrefix
api.FQDN = v20160930.FQDN
}
func convertV20160330JumpboxProfile(v20160330 *v20160330.JumpboxProfile, api *JumpboxProfile) {
api.OSType = OSType(v20160330.OSType)
api.DNSPrefix = v20160330.DNSPrefix
api.FQDN = v20160330.FQDN
}
func convertV20170131JumpboxProfile(v20170131 *v20170131.JumpboxProfile, api *JumpboxProfile) {
api.OSType = OSType(v20170131.OSType)
api.DNSPrefix = v20170131.DNSPrefix
api.FQDN = v20170131.FQDN
}
func convertV20160930ServicePrincipalProfile(v20160930 *v20160930.ServicePrincipalProfile, api *ServicePrincipalProfile) {
api.ClientID = v20160930.ClientID
api.Secret = v20160930.Secret
api.ObjectID = v20160930.ObjectID
}
func convertV20170131ServicePrincipalProfile(v20170131 *v20170131.ServicePrincipalProfile, api *ServicePrincipalProfile) {
api.ClientID = v20170131.ClientID
api.Secret = v20170131.Secret
api.ObjectID = v20170131.ObjectID
}
func convertV20170701ServicePrincipalProfile(v20170701 *v20170701.ServicePrincipalProfile, api *ServicePrincipalProfile) {
api.ClientID = v20170701.ClientID
api.Secret = v20170701.Secret
api.ObjectID = v20170701.ObjectID
if v20170701.KeyvaultSecretRef != nil {
api.KeyvaultSecretRef = &KeyvaultSecretRef{
VaultID: v20170701.KeyvaultSecretRef.VaultID,
SecretName: v20170701.KeyvaultSecretRef.SecretName,
SecretVersion: v20170701.KeyvaultSecretRef.SecretVersion,
}
}
}
func convertVLabsServicePrincipalProfile(vlabs *vlabs.ServicePrincipalProfile, api *ServicePrincipalProfile) {
api.ClientID = vlabs.ClientID
api.Secret = vlabs.Secret
api.ObjectID = vlabs.ObjectID
if vlabs.KeyvaultSecretRef != nil {
api.KeyvaultSecretRef = &KeyvaultSecretRef{
VaultID: vlabs.KeyvaultSecretRef.VaultID,
SecretName: vlabs.KeyvaultSecretRef.SecretName,
SecretVersion: vlabs.KeyvaultSecretRef.SecretVersion,
}
}
}
func convertV20160930CustomProfile(v20160930 *v20160930.CustomProfile, api *CustomProfile) {
api.Orchestrator = v20160930.Orchestrator
}
func convertV20170131CustomProfile(v20170131 *v20170131.CustomProfile, api *CustomProfile) {
api.Orchestrator = v20170131.Orchestrator
}
func convertV20170701CustomProfile(v20170701 *v20170701.CustomProfile, api *CustomProfile) {
api.Orchestrator = v20170701.Orchestrator
}
func convertVLabsCertificateProfile(vlabs *vlabs.CertificateProfile, api *CertificateProfile) {
api.CaCertificate = vlabs.CaCertificate
api.CaPrivateKey = vlabs.CaPrivateKey
api.APIServerCertificate = vlabs.APIServerCertificate
api.APIServerPrivateKey = vlabs.APIServerPrivateKey
api.ClientCertificate = vlabs.ClientCertificate
api.ClientPrivateKey = vlabs.ClientPrivateKey
api.KubeConfigCertificate = vlabs.KubeConfigCertificate
api.KubeConfigPrivateKey = vlabs.KubeConfigPrivateKey
api.EtcdServerCertificate = vlabs.EtcdServerCertificate
api.EtcdServerPrivateKey = vlabs.EtcdServerPrivateKey
api.EtcdClientCertificate = vlabs.EtcdClientCertificate
api.EtcdClientPrivateKey = vlabs.EtcdClientPrivateKey
api.EtcdPeerCertificates = vlabs.EtcdPeerCertificates
api.EtcdPeerPrivateKeys = vlabs.EtcdPeerPrivateKeys
}
func convertVLabsAADProfile(vlabs *vlabs.AADProfile, api *AADProfile) {
api.ClientAppID = vlabs.ClientAppID
api.ServerAppID = vlabs.ServerAppID
api.TenantID = vlabs.TenantID
api.AdminGroupID = vlabs.AdminGroupID
api.Authenticator = OIDC
}
func addDCOSPublicAgentPool(api *Properties) {
publicPool := &AgentPoolProfile{}
// tag this agent pool with a known suffix string
publicPool.Name = api.AgentPoolProfiles[0].Name + publicAgentPoolSuffix
// move DNS prefix to public pool
publicPool.DNSPrefix = api.AgentPoolProfiles[0].DNSPrefix
api.AgentPoolProfiles[0].DNSPrefix = ""
publicPool.VMSize = api.AgentPoolProfiles[0].VMSize // - use same VMsize for public pool
publicPool.OSType = api.AgentPoolProfiles[0].OSType // - use same OSType for public pool
api.AgentPoolProfiles[0].Ports = nil
for _, port := range [3]int{80, 443, 8080} {
publicPool.Ports = append(publicPool.Ports, port)
}
// - VM Count for public agents is based on the following:
// 1 master => 1 VM
// 3, 5 master => 3 VMsize
if api.MasterProfile.Count == 1 {
publicPool.Count = 1
} else {
publicPool.Count = 3
}
api.AgentPoolProfiles = append(api.AgentPoolProfiles, publicPool)
}
| {
// Set default Ports when DNSPrefix specified
api.Ports = []int{80, 443, 8080}
} |
runtime.js | define(
["./utils","./exception","./base","exports"],
function(__dependency1__, __dependency2__, __dependency3__, __exports__) {
"use strict";
var Utils = __dependency1__;
var Exception = __dependency2__["default"];
var COMPILER_REVISION = __dependency3__.COMPILER_REVISION;
var REVISION_CHANGES = __dependency3__.REVISION_CHANGES;
function checkRevision(compilerInfo) {
var compilerRevision = compilerInfo && compilerInfo[0] || 1,
currentRevision = COMPILER_REVISION;
| if (compilerRevision < currentRevision) {
var runtimeVersions = REVISION_CHANGES[currentRevision],
compilerVersions = REVISION_CHANGES[compilerRevision];
throw new Exception("Template was precompiled with an older version of Handlebars than the current runtime. "+
"Please update your precompiler to a newer version ("+runtimeVersions+") or downgrade your runtime to an older version ("+compilerVersions+").");
} else {
// Use the embedded version info since the runtime doesn't know about this revision yet
throw new Exception("Template was precompiled with a newer version of Handlebars than the current runtime. "+
"Please update your runtime to a newer version ("+compilerInfo[1]+").");
}
}
}
__exports__.checkRevision = checkRevision;// TODO: Remove this line and break up compilePartial
function template(templateSpec, env) {
if (!env) {
throw new Exception("No environment passed to template");
}
// Note: Using env.VM references rather than local var references throughout this section to allow
// for external users to override these as psuedo-supported APIs.
var invokePartialWrapper = function(partial, name, context, helpers, partials, data) {
var result = env.VM.invokePartial.apply(this, arguments);
if (result != null) { return result; }
if (env.compile) {
var options = { helpers: helpers, partials: partials, data: data };
partials[name] = env.compile(partial, { data: data !== undefined }, env);
return partials[name](context, options);
} else {
throw new Exception("The partial " + name + " could not be compiled when running in runtime-only mode");
}
};
// Just add water
var container = {
escapeExpression: Utils.escapeExpression,
invokePartial: invokePartialWrapper,
programs: [],
program: function(i, fn, data) {
var programWrapper = this.programs[i];
if(data) {
programWrapper = program(i, fn, data);
} else if (!programWrapper) {
programWrapper = this.programs[i] = program(i, fn);
}
return programWrapper;
},
merge: function(param, common) {
var ret = param || common;
if (param && common && (param !== common)) {
ret = {};
Utils.extend(ret, common);
Utils.extend(ret, param);
}
return ret;
},
programWithDepth: env.VM.programWithDepth,
noop: env.VM.noop,
compilerInfo: null
};
return function(context, options) {
options = options || {};
var namespace = options.partial ? options : env,
helpers,
partials;
if (!options.partial) {
helpers = options.helpers;
partials = options.partials;
}
var result = templateSpec.call(
container,
namespace, context,
helpers,
partials,
options.data);
if (!options.partial) {
env.VM.checkRevision(container.compilerInfo);
}
return result;
};
}
__exports__.template = template;function programWithDepth(i, fn, data /*, $depth */) {
var args = Array.prototype.slice.call(arguments, 3);
var prog = function(context, options) {
options = options || {};
return fn.apply(this, [context, options.data || data].concat(args));
};
prog.program = i;
prog.depth = args.length;
return prog;
}
__exports__.programWithDepth = programWithDepth;function program(i, fn, data) {
var prog = function(context, options) {
options = options || {};
return fn(context, options.data || data);
};
prog.program = i;
prog.depth = 0;
return prog;
}
__exports__.program = program;function invokePartial(partial, name, context, helpers, partials, data) {
var options = { partial: true, helpers: helpers, partials: partials, data: data };
if(partial === undefined) {
throw new Exception("The partial " + name + " could not be found");
} else if(partial instanceof Function) {
return partial(context, options);
}
}
__exports__.invokePartial = invokePartial;function noop() { return ""; }
__exports__.noop = noop;
}); | if (compilerRevision !== currentRevision) { |
driver_linux.go | package libvirt
import (
"github.com/densityops/machine/libmachine/drivers"
)
type Driver struct {
*drivers.VMDriver
// Driver specific configuration
Network string
CacheMode string
IOMode string
VSock bool
StoragePool string
}
const (
defaultMemory = 8192
defaultCPU = 4
defaultCacheMode = "default"
defaultIOMode = "threads"
)
func NewDriver(hostName, storePath string) *Driver | {
return &Driver{
VMDriver: &drivers.VMDriver{
BaseDriver: &drivers.BaseDriver{
MachineName: hostName,
StorePath: storePath,
},
Memory: defaultMemory,
CPU: defaultCPU,
},
CacheMode: defaultCacheMode,
IOMode: defaultIOMode,
}
} |
|
wordList.ts | import {
GQLCOMMENT_FLAG_REASON,
GQLCOMMENT_STATUS,
} from "coral-server/graph/tenant/schema/__generated__/types";
import { ACTION_TYPE } from "coral-server/models/action/comment"; | IntermediateModerationPhase,
IntermediatePhaseResult,
} from "coral-server/services/comments/pipeline";
import { containsMatchingPhraseMemoized } from "coral-server/services/comments/pipeline/wordList";
// This phase checks the comment against the wordList.
export const wordList: IntermediateModerationPhase = ({
tenant,
comment,
}): IntermediatePhaseResult | void => {
// If there isn't a body, there can't be a bad word!
if (!comment.body) {
return;
}
// Decide the status based on whether or not the current story/settings
// has pre-mod enabled or not. If the comment was rejected based on the
// wordList, then reject it, otherwise if the moderation setting is
// premod, set it to `premod`.
if (containsMatchingPhraseMemoized(tenant.wordList.banned, comment.body)) {
// Add the flag related to Trust to the comment.
return {
status: GQLCOMMENT_STATUS.REJECTED,
actions: [
{
userID: null,
actionType: ACTION_TYPE.FLAG,
reason: GQLCOMMENT_FLAG_REASON.COMMENT_DETECTED_BANNED_WORD,
},
],
};
}
// If the comment has a suspect word or a link, we need to add a
// flag to it to indicate that it needs to be looked at.
// Otherwise just return the new comment.
// If the wordList has matched the suspect word filter and we haven't disabled
// auto-flagging suspect words, then we should flag the comment!
if (containsMatchingPhraseMemoized(tenant.wordList.suspect, comment.body)) {
return {
actions: [
{
userID: null,
actionType: ACTION_TYPE.FLAG,
reason: GQLCOMMENT_FLAG_REASON.COMMENT_DETECTED_SUSPECT_WORD,
},
],
};
}
}; | import { |
Resnet50_test.py | import tensorflow as tf
from networks.network import Network
from ..fast_rcnn.config import cfg
import pdb
n_classes = 21
_feat_stride = [16,]
anchor_scales = [2,4,8,16,32]
class Resnet50_test(Network):
def __init__(self, trainable=True):
self.inputs = []
self.data = tf.placeholder(tf.float32, shape=[None, None, None, 3])
self.im_info = tf.placeholder(tf.float32, shape=[None, 3])
self.scene = tf.placeholder(tf.float32, shape=[1, 205])
self.keep_prob = tf.placeholder(tf.float32)
self.layers = dict({'data':self.data, 'im_info':self.im_info})
self.trainable = trainable
self.setup()
def setup(self):
n_classes = cfg.NCLASSES
#anchor_scales = cfg.ANCHOR_SCALES
_feat_stride = [16, ]
(self.feed('data')
.conv(7, 7, 64, 2, 2, relu=False, name='conv1')
.batch_normalization(relu=True, name='bn_conv1',is_training=False)
.max_pool(3, 3, 2, 2, padding='VALID',name='pool1')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2a_branch1')
.batch_normalization(name='bn2a_branch1',is_training=False,relu=False))
(self.feed('pool1')
.conv(1, 1, 64, 1, 1, biased=False, relu=False, name='res2a_branch2a')
.batch_normalization(relu=True, name='bn2a_branch2a',is_training=False)
.conv(3, 3, 64, 1, 1, biased=False, relu=False, name='res2a_branch2b')
.batch_normalization(relu=True, name='bn2a_branch2b',is_training=False) |
(self.feed('bn2a_branch1',
'bn2a_branch2c')
.add(name='res2a')
.relu(name='res2a_relu')
.conv(1, 1, 64, 1, 1, biased=False, relu=False, name='res2b_branch2a')
.batch_normalization(relu=True, name='bn2b_branch2a',is_training=False)
.conv(3, 3, 64, 1, 1, biased=False, relu=False, name='res2b_branch2b')
.batch_normalization(relu=True, name='bn2b_branch2b',is_training=False)
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2b_branch2c')
.batch_normalization(name='bn2b_branch2c',is_training=False,relu=False))
(self.feed('res2a_relu',
'bn2b_branch2c')
.add(name='res2b')
.relu(name='res2b_relu')
.conv(1, 1, 64, 1, 1, biased=False, relu=False, name='res2c_branch2a')
.batch_normalization(relu=True, name='bn2c_branch2a',is_training=False)
.conv(3, 3, 64, 1, 1, biased=False, relu=False, name='res2c_branch2b')
.batch_normalization(relu=True, name='bn2c_branch2b',is_training=False)
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2c_branch2c')
.batch_normalization(name='bn2c_branch2c',is_training=False,relu=False))
(self.feed('res2b_relu',
'bn2c_branch2c')
.add(name='res2c')
.relu(name='res2c_relu')
.conv(1, 1, 512, 2, 2, biased=False, relu=False, name='res3a_branch1', padding='VALID')
.batch_normalization(name='bn3a_branch1',is_training=False,relu=False))
(self.feed('res2c_relu')
.conv(1, 1, 128, 2, 2, biased=False, relu=False, name='res3a_branch2a', padding='VALID')
.batch_normalization(relu=True, name='bn3a_branch2a',is_training=False)
.conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3a_branch2b')
.batch_normalization(relu=True, name='bn3a_branch2b',is_training=False)
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3a_branch2c')
.batch_normalization(name='bn3a_branch2c',is_training=False,relu=False))
(self.feed('bn3a_branch1',
'bn3a_branch2c')
.add(name='res3a')
.relu(name='res3a_relu')
.conv(1, 1, 128, 1, 1, biased=False, relu=False, name='res3b_branch2a')
.batch_normalization(relu=True, name='bn3b_branch2a',is_training=False)
.conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3b_branch2b')
.batch_normalization(relu=True, name='bn3b_branch2b',is_training=False)
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3b_branch2c')
.batch_normalization(name='bn3b_branch2c',is_training=False,relu=False))
(self.feed('res3a_relu',
'bn3b_branch2c')
.add(name='res3b')
.relu(name='res3b_relu')
.conv(1, 1, 128, 1, 1, biased=False, relu=False, name='res3c_branch2a')
.batch_normalization(relu=True, name='bn3c_branch2a',is_training=False)
.conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3c_branch2b')
.batch_normalization(relu=True, name='bn3c_branch2b',is_training=False)
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3c_branch2c')
.batch_normalization(name='bn3c_branch2c',is_training=False,relu=False))
(self.feed('res3b_relu',
'bn3c_branch2c')
.add(name='res3c')
.relu(name='res3c_relu')
.conv(1, 1, 128, 1, 1, biased=False, relu=False, name='res3d_branch2a')
.batch_normalization(relu=True, name='bn3d_branch2a',is_training=False)
.conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3d_branch2b')
.batch_normalization(relu=True, name='bn3d_branch2b',is_training=False)
.conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3d_branch2c')
.batch_normalization(name='bn3d_branch2c',is_training=False,relu=False))
(self.feed('res3c_relu',
'bn3d_branch2c')
.add(name='res3d')
.relu(name='res3d_relu')
.conv(1, 1, 1024, 2, 2, biased=False, relu=False, name='res4a_branch1', padding='VALID')
.batch_normalization(name='bn4a_branch1',is_training=False,relu=False))
(self.feed('res3d_relu')
.conv(1, 1, 256, 2, 2, biased=False, relu=False, name='res4a_branch2a', padding='VALID')
.batch_normalization(relu=True, name='bn4a_branch2a',is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4a_branch2b')
.batch_normalization(relu=True, name='bn4a_branch2b',is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4a_branch2c')
.batch_normalization(name='bn4a_branch2c',is_training=False,relu=False))
(self.feed('bn4a_branch1',
'bn4a_branch2c')
.add(name='res4a')
.relu(name='res4a_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b_branch2a')
.batch_normalization(relu=True, name='bn4b_branch2a',is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b_branch2b')
.batch_normalization(relu=True, name='bn4b_branch2b',is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b_branch2c')
.batch_normalization(name='bn4b_branch2c',is_training=False,relu=False))
(self.feed('res4a_relu',
'bn4b_branch2c')
.add(name='res4b')
.relu(name='res4b_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4c_branch2a')
.batch_normalization(relu=True, name='bn4c_branch2a',is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4c_branch2b')
.batch_normalization(relu=True, name='bn4c_branch2b',is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4c_branch2c')
.batch_normalization(name='bn4c_branch2c',is_training=False,relu=False))
(self.feed('res4b_relu',
'bn4c_branch2c')
.add(name='res4c')
.relu(name='res4c_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4d_branch2a')
.batch_normalization(relu=True, name='bn4d_branch2a',is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4d_branch2b')
.batch_normalization(relu=True, name='bn4d_branch2b',is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4d_branch2c')
.batch_normalization(name='bn4d_branch2c',is_training=False,relu=False))
(self.feed('res4c_relu',
'bn4d_branch2c')
.add(name='res4d')
.relu(name='res4d_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4e_branch2a')
.batch_normalization(relu=True, name='bn4e_branch2a',is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4e_branch2b')
.batch_normalization(relu=True, name='bn4e_branch2b',is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4e_branch2c')
.batch_normalization(name='bn4e_branch2c',is_training=False,relu=False))
(self.feed('res4d_relu',
'bn4e_branch2c')
.add(name='res4e')
.relu(name='res4e_relu')
.conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4f_branch2a')
.batch_normalization(relu=True, name='bn4f_branch2a',is_training=False)
.conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4f_branch2b')
.batch_normalization(relu=True, name='bn4f_branch2b',is_training=False)
.conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4f_branch2c')
.batch_normalization(name='bn4f_branch2c',is_training=False,relu=False))
(self.feed('res4e_relu',
'bn4f_branch2c')
.add(name='res4f')
.relu(name='res4f_relu'))
#========= RPN ============
(self.feed('res4f_relu')
.conv(3,3,512,1,1,name='rpn_conv/3x3')
.conv(1,1,len(anchor_scales)*3*2 ,1 , 1, padding='VALID', relu = False, name='rpn_cls_score'))
(self.feed('rpn_conv/3x3')
.conv(1,1,len(anchor_scales)*3*4, 1, 1, padding='VALID', relu = False, name='rpn_bbox_pred'))
#========= RoI Proposal ============
(self.feed('rpn_cls_score')
.spatial_reshape_layer(2, name = 'rpn_cls_score_reshape')
.spatial_softmax(name='rpn_cls_prob'))
(self.feed('rpn_cls_prob')
.spatial_reshape_layer(len(anchor_scales)*3*2, name = 'rpn_cls_prob_reshape'))
(self.feed('rpn_cls_prob_reshape','rpn_bbox_pred','im_info')
.proposal_layer(_feat_stride, anchor_scales, 'TEST',name = 'rois'))
(self.feed('rois', 'im_info')
.union_box_layer(name='whole_box'))
(self.feed('conv5_3', 'whole_box')
.roi_pool(7, 7, 1.0/16, name='whole_pool'))
#==========================================#
(self.feed('res4f_relu', 'rois')
.roi_pool(7, 7, 1.0/16, name='pool_5'))
(self.feed('pool_5','whole_pool')
.concat(axis=0, name='concat')
.fc(4096, name='fc6'))
(self.feed('rois', 'fc6')
.edge_box_layer(n_boxes=256,fc_dim=64,feat_dim=4096,dim=(4096, 4096, 4096),group=64, index=1,name='edges'))
(self.feed('fc6', 'edges')
.structure_inference_spmm(boxes=256, name='inference')
.fc(n_classes, relu=False, name='cls_score')
.softmax(name='cls_prob'))
(self.feed('inference')
.fc(n_classes*4, relu=False, name='bbox_pred')) | .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2a_branch2c')
.batch_normalization(name='bn2a_branch2c',is_training=False,relu=False)) |
generalized_pareto_test.py | # Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for Generalized Pareto distribution."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
# Dependency imports
import hypothesis as hp
import hypothesis.strategies as hps
import numpy as np
from scipy import stats as sp_stats
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import hypothesis_testlib as tfp_hps
from tensorflow_probability.python.internal import test_util
tfd = tfp.distributions
# Pylint doesn't understand hps.composite.
# pylint: disable=no-value-for-parameter
@hps.composite
def generalized_paretos(draw, batch_shape=None):
if batch_shape is None:
batch_shape = draw(tfp_hps.shapes())
constraints = dict(
loc=tfp_hps.identity_fn,
scale=tfp_hps.softplus_plus_eps(),
concentration=lambda x: tf.math.tanh(x) * 0.24) # <.25==safe for variance
params = draw(
tfp_hps.broadcasting_params(
batch_shape,
params_event_ndims=dict(loc=0, scale=0, concentration=0),
constraint_fn_for=constraints.get))
dist = tfd.GeneralizedPareto(validate_args=draw(hps.booleans()), **params)
if dist.batch_shape != batch_shape:
raise AssertionError('batch_shape mismatch: expect {} but got {}'.format(
batch_shape, dist))
return dist
@test_util.test_all_tf_execution_regimes
class GeneralizedParetoTest(test_util.TestCase):
@hp.given(generalized_paretos())
@tfp_hps.tfp_hp_settings(default_max_examples=5)
def testShape(self, dist):
# batch_shape == dist.batch_shape asserted in generalized_paretos()
self.assertEqual(dist.batch_shape, self.evaluate(dist.batch_shape_tensor()))
self.assertEqual(tf.TensorShape([]), dist.event_shape)
self.assertAllEqual([], self.evaluate(dist.event_shape_tensor()))
@hp.given(generalized_paretos(batch_shape=[]))
@tfp_hps.tfp_hp_settings(default_max_examples=5)
def testLogPDF(self, dist):
xs = self.evaluate(dist.sample())
logp = dist.log_prob(xs)
self.assertEqual(dist.batch_shape, logp.shape)
p = dist.prob(xs)
self.assertEqual(dist.batch_shape, p.shape)
loc, scale, conc = self.evaluate([dist.loc, dist.scale, dist.concentration])
expected_logp = sp_stats.genpareto(conc, loc=loc, scale=scale).logpdf(xs)
actual_logp = self.evaluate(logp)
self.assertAllClose(expected_logp, actual_logp, rtol=1e-5)
self.assertAllClose(np.exp(expected_logp), self.evaluate(p), rtol=1e-5)
def testLogPDFBoundary(self):
# When loc = concentration = 0, we have an exponential distribution. Check
# that at 0 we have finite log prob.
scale = np.array([0.1, 0.5, 1., 2., 5., 10.], dtype=np.float32)
dist = tfd.GeneralizedPareto(loc=0, scale=scale, concentration=0)
log_pdf = dist.log_prob(0.)
self.assertAllClose(-np.log(scale), self.evaluate(log_pdf), rtol=1e-5)
@hp.given(generalized_paretos(batch_shape=[]))
@tfp_hps.tfp_hp_settings(default_max_examples=5)
def testCDF(self, dist):
xs = self.evaluate(dist.sample())
cdf = dist.cdf(xs)
self.assertEqual(dist.batch_shape, cdf.shape)
loc, scale, conc = self.evaluate([dist.loc, dist.scale, dist.concentration])
expected_cdf = sp_stats.genpareto(conc, loc=loc, scale=scale).cdf(xs)
self.assertAllClose(expected_cdf, self.evaluate(cdf), rtol=5e-5)
@hp.given(generalized_paretos(batch_shape=[]))
@tfp_hps.tfp_hp_settings(default_max_examples=5)
def testMean(self, dist):
loc, scale, conc = self.evaluate([dist.loc, dist.scale, dist.concentration])
self.assertEqual(dist.batch_shape, dist.mean().shape)
if np.abs(conc) < 1e-5 and conc != 0:
return # scipy does badly at small nonzero concentrations.
expected = sp_stats.genpareto(conc, loc=loc, scale=scale).mean()
actual = self.evaluate(dist.mean())
self.assertAllClose(expected, actual, rtol=5e-4)
@hp.given(generalized_paretos(batch_shape=[]))
@tfp_hps.tfp_hp_settings(default_max_examples=5)
def testVariance(self, dist):
loc, scale, conc = self.evaluate([dist.loc, dist.scale, dist.concentration])
self.assertEqual(dist.batch_shape, dist.variance().shape)
expected = sp_stats.genpareto(conc, loc=loc, scale=scale).var()
if np.abs(conc) < 1e-4 and conc != 0:
return # scipy does badly at small nonzero concentrations.
if expected <= 0:
return # scipy sometimes returns nonsense zero or negative variances.
actual = self.evaluate(dist.variance())
print('var', loc, scale, conc, expected, actual, file=sys.stderr)
self.assertAllClose(expected, actual, rtol=.01)
@hp.given(generalized_paretos(batch_shape=[]))
@tfp_hps.tfp_hp_settings(default_max_examples=5)
def testEntropy(self, dist):
loc, scale, conc = self.evaluate([dist.loc, dist.scale, dist.concentration])
self.assertEqual(dist.batch_shape, dist.entropy().shape)
expected = sp_stats.genpareto.entropy(conc, loc=loc, scale=scale)
actual = self.evaluate(dist.entropy())
self.assertAllClose(expected, actual)
def testSample(self):
loc = np.float32(-7.5)
scale = np.float32(3.5)
conc = np.float32(0.07)
n = 100000
dist = tfd.GeneralizedPareto(loc=loc, scale=scale, concentration=conc)
samples = dist.sample(n, seed=test_util.test_seed())
sample_values = self.evaluate(samples)
self.assertEqual((n,), samples.shape)
self.assertEqual((n,), sample_values.shape)
self.assertTrue(self._kstest(loc, scale, conc, sample_values))
self.assertAllClose(
sp_stats.genpareto.mean(conc, loc=loc, scale=scale),
sample_values.mean(),
rtol=.005)
self.assertAllClose(
sp_stats.genpareto.var(conc, loc=loc, scale=scale),
sample_values.var(),
rtol=.01)
def testFullyReparameterized(self):
loc = tf.constant(4.0)
scale = tf.constant(3.0)
conc = tf.constant(2.0)
_, grads = tfp.math.value_and_gradient(
lambda *args: tfd.GeneralizedPareto(*args).sample(100),
[loc, scale, conc])
self.assertLen(grads, 3)
self.assertAllNotNone(grads)
def testSampleKolmogorovSmirnovMultiDimensional(self):
loc = np.linspace(-10, 10, 3).reshape(3, 1, 1)
scale = np.linspace(1e-6, 7, 5).reshape(5, 1)
conc = np.linspace(-1.3, 1.3, 7)
dist = tfd.GeneralizedPareto(loc=loc, scale=scale, concentration=conc) | sample_values = self.evaluate(samples)
self.assertEqual((n, 3, 5, 7), samples.shape)
self.assertEqual((n, 3, 5, 7), sample_values.shape)
fails = 0
trials = 0
for li, l in enumerate(loc.reshape(-1)):
for si, s in enumerate(scale.reshape(-1)):
for ci, c in enumerate(conc.reshape(-1)):
samps = sample_values[:, li, si, ci]
trials += 1
fails += 0 if self._kstest(l, s, c, samps) else 1
self.assertLess(fails, trials * 0.01)
def _kstest(self, loc, scale, conc, samples):
# Uses the Kolmogorov-Smirnov test for goodness of fit.
ks, _ = sp_stats.kstest(samples,
sp_stats.genpareto(conc, loc=loc, scale=scale).cdf)
# Return True when the test passes.
return ks < 0.02
def testPdfOfSampleMultiDims(self):
dist = tfd.GeneralizedPareto(
loc=0, scale=[[2.], [3.]], concentration=[-.37, .11])
num = 50000
samples = dist.sample(num, seed=test_util.test_seed())
pdfs = dist.prob(samples)
sample_vals, pdf_vals = self.evaluate([samples, pdfs])
self.assertEqual((num, 2, 2), samples.shape)
self.assertEqual((num, 2, 2), pdfs.shape)
self._assertIntegral(sample_vals[:, 0, 0], pdf_vals[:, 0, 0], err=0.02)
self._assertIntegral(sample_vals[:, 0, 1], pdf_vals[:, 0, 1], err=0.02)
self._assertIntegral(sample_vals[:, 1, 0], pdf_vals[:, 1, 0], err=0.02)
self._assertIntegral(sample_vals[:, 1, 1], pdf_vals[:, 1, 1], err=0.02)
def _assertIntegral(self, sample_vals, pdf_vals, err=1e-3):
s_p = zip(sample_vals, pdf_vals)
prev = (0, 0)
total = 0
for k in sorted(s_p, key=lambda x: x[0]):
pair_pdf = (k[1] + prev[1]) / 2
total += (k[0] - prev[0]) * pair_pdf
prev = k
self.assertNear(1., total, err=err)
def testNonPositiveInitializationParamsRaises(self):
scale = tf.constant(0.0, name='scale')
with self.assertRaisesOpError('Argument `scale` must be positive.'):
dist = tfd.GeneralizedPareto(
loc=0, scale=scale, concentration=1, validate_args=True)
self.evaluate(dist.mean())
def testGradientThroughConcentration(self):
concentration = tf.Variable(3.)
d = tfd.GeneralizedPareto(loc=0, scale=1, concentration=concentration)
with tf.GradientTape() as tape:
loss = -d.log_prob([1., 2., 4.])
grad = tape.gradient(loss, d.trainable_variables)
self.assertLen(grad, 1)
self.assertAllNotNone(grad)
def testAssertsPositiveScale(self):
scale = tf.Variable([1., 2., -3.])
self.evaluate(scale.initializer)
with self.assertRaisesOpError('Argument `scale` must be positive.'):
d = tfd.GeneralizedPareto(
loc=0, scale=scale, concentration=1, validate_args=True)
self.evaluate(d.sample())
def testAssertsPositiveScaleAfterMutation(self):
scale = tf.Variable([1., 2., 3.])
self.evaluate(scale.initializer)
d = tfd.GeneralizedPareto(
loc=0, scale=scale, concentration=0.25, validate_args=True)
self.evaluate(d.mean())
with self.assertRaisesOpError('Argument `scale` must be positive.'):
with tf.control_dependencies([scale.assign([1., 2., -3.])]):
self.evaluate(d.sample())
def testGradientThroughLocScale(self):
loc = tf.Variable(1.)
scale = tf.Variable(2.5)
d = tfd.GeneralizedPareto(loc=loc, scale=scale, concentration=.15)
with tf.GradientTape() as tape:
loss = -d.log_prob([1., 2., 4.])
grads = tape.gradient(loss, d.trainable_variables)
self.assertLen(grads, 2)
self.assertAllNotNone(grads)
if __name__ == '__main__':
tf.test.main() | n = 10000
samples = dist.sample(n, seed=test_util.test_seed()) |
generate-seeds.py | #!/usr/bin/python
# Copyright (c) 2014 Wladimir J. van der Laan
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from __future__ import print_function, division
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
elif s.count(':') > 1: # ipv6, no port
host = s
port = ''
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line: | g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('#define BITCOIN_CHAINPARAMSSEEDS_H\n')
g.write('/**\n')
g.write(' * List of fixed seed nodes for the bitcoin network\n')
g.write(' * AUTOGENERATED by contrib/seeds/generate-seeds.py\n')
g.write(' *\n')
g.write(' * Each line contains a 16-byte IPv6 address and a port.\n')
g.write(' * IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
g.write(' */\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 8778)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 18777)
g.write('#endif // BITCOIN_CHAINPARAMSSEEDS_H\n')
if __name__ == '__main__':
main() | continue
if not first: |
pixbuf_format.rs | // This file was generated by gir (https://github.com/gtk-rs/gir)
// from gir-files (https://github.com/gtk-rs/gir-files)
// DO NOT EDIT
use glib::translate::*;
glib::wrapper! {
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct PixbufFormat(Boxed<ffi::GdkPixbufFormat>);
match fn {
copy => |ptr| ffi::gdk_pixbuf_format_copy(ptr),
free => |ptr| ffi::gdk_pixbuf_format_free(ptr),
type_ => || ffi::gdk_pixbuf_format_get_type(),
}
}
impl PixbufFormat {
#[doc(alias = "gdk_pixbuf_format_get_description")]
pub fn description(&self) -> Option<glib::GString> {
unsafe {
from_glib_full(ffi::gdk_pixbuf_format_get_description(mut_override(
self.to_glib_none().0,
)))
}
}
#[doc(alias = "gdk_pixbuf_format_get_extensions")]
pub fn extensions(&self) -> Vec<glib::GString> {
unsafe {
FromGlibPtrContainer::from_glib_full(ffi::gdk_pixbuf_format_get_extensions(
mut_override(self.to_glib_none().0),
))
}
}
#[doc(alias = "gdk_pixbuf_format_get_license")]
pub fn | (&self) -> Option<glib::GString> {
unsafe {
from_glib_full(ffi::gdk_pixbuf_format_get_license(mut_override(
self.to_glib_none().0,
)))
}
}
#[doc(alias = "gdk_pixbuf_format_get_mime_types")]
pub fn mime_types(&self) -> Vec<glib::GString> {
unsafe {
FromGlibPtrContainer::from_glib_full(ffi::gdk_pixbuf_format_get_mime_types(
mut_override(self.to_glib_none().0),
))
}
}
#[doc(alias = "gdk_pixbuf_format_get_name")]
pub fn name(&self) -> Option<glib::GString> {
unsafe {
from_glib_full(ffi::gdk_pixbuf_format_get_name(mut_override(
self.to_glib_none().0,
)))
}
}
#[doc(alias = "gdk_pixbuf_format_is_disabled")]
pub fn is_disabled(&self) -> bool {
unsafe {
from_glib(ffi::gdk_pixbuf_format_is_disabled(mut_override(
self.to_glib_none().0,
)))
}
}
#[cfg(any(feature = "v2_36", feature = "dox"))]
#[cfg_attr(feature = "dox", doc(cfg(feature = "v2_36")))]
#[doc(alias = "gdk_pixbuf_format_is_save_option_supported")]
pub fn is_save_option_supported(&self, option_key: &str) -> bool {
unsafe {
from_glib(ffi::gdk_pixbuf_format_is_save_option_supported(
mut_override(self.to_glib_none().0),
option_key.to_glib_none().0,
))
}
}
#[doc(alias = "gdk_pixbuf_format_is_scalable")]
pub fn is_scalable(&self) -> bool {
unsafe {
from_glib(ffi::gdk_pixbuf_format_is_scalable(mut_override(
self.to_glib_none().0,
)))
}
}
#[doc(alias = "gdk_pixbuf_format_is_writable")]
pub fn is_writable(&self) -> bool {
unsafe {
from_glib(ffi::gdk_pixbuf_format_is_writable(mut_override(
self.to_glib_none().0,
)))
}
}
#[doc(alias = "gdk_pixbuf_format_set_disabled")]
pub fn set_disabled(&mut self, disabled: bool) {
unsafe {
ffi::gdk_pixbuf_format_set_disabled(self.to_glib_none_mut().0, disabled.to_glib());
}
}
}
| license |
usbpullup.rs | #[doc = "Register `USBPULLUP` reader"]
pub struct R(crate::R<USBPULLUP_SPEC>);
impl core::ops::Deref for R {
type Target = crate::R<USBPULLUP_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target { | }
}
impl core::convert::From<crate::R<USBPULLUP_SPEC>> for R {
fn from(reader: crate::R<USBPULLUP_SPEC>) -> Self {
R(reader)
}
}
#[doc = "Register `USBPULLUP` writer"]
pub struct W(crate::W<USBPULLUP_SPEC>);
impl core::ops::Deref for W {
type Target = crate::W<USBPULLUP_SPEC>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl core::ops::DerefMut for W {
#[inline(always)]
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl core::convert::From<crate::W<USBPULLUP_SPEC>> for W {
fn from(writer: crate::W<USBPULLUP_SPEC>) -> Self {
W(writer)
}
}
#[doc = "Control of the USB pull-up on the D+ line\n\nValue on reset: 0"]
#[derive(Clone, Copy, Debug, PartialEq)]
pub enum CONNECT_A {
#[doc = "0: Pull-up is disconnected"]
DISABLED = 0,
#[doc = "1: Pull-up is connected to D+"]
ENABLED = 1,
}
impl From<CONNECT_A> for bool {
#[inline(always)]
fn from(variant: CONNECT_A) -> Self {
variant as u8 != 0
}
}
#[doc = "Field `CONNECT` reader - Control of the USB pull-up on the D+ line"]
pub struct CONNECT_R(crate::FieldReader<bool, CONNECT_A>);
impl CONNECT_R {
pub(crate) fn new(bits: bool) -> Self {
CONNECT_R(crate::FieldReader::new(bits))
}
#[doc = r"Get enumerated values variant"]
#[inline(always)]
pub fn variant(&self) -> CONNECT_A {
match self.bits {
false => CONNECT_A::DISABLED,
true => CONNECT_A::ENABLED,
}
}
#[doc = "Checks if the value of the field is `DISABLED`"]
#[inline(always)]
pub fn is_disabled(&self) -> bool {
**self == CONNECT_A::DISABLED
}
#[doc = "Checks if the value of the field is `ENABLED`"]
#[inline(always)]
pub fn is_enabled(&self) -> bool {
**self == CONNECT_A::ENABLED
}
}
impl core::ops::Deref for CONNECT_R {
type Target = crate::FieldReader<bool, CONNECT_A>;
#[inline(always)]
fn deref(&self) -> &Self::Target {
&self.0
}
}
#[doc = "Field `CONNECT` writer - Control of the USB pull-up on the D+ line"]
pub struct CONNECT_W<'a> {
w: &'a mut W,
}
impl<'a> CONNECT_W<'a> {
#[doc = r"Writes `variant` to the field"]
#[inline(always)]
pub fn variant(self, variant: CONNECT_A) -> &'a mut W {
self.bit(variant.into())
}
#[doc = "Pull-up is disconnected"]
#[inline(always)]
pub fn disabled(self) -> &'a mut W {
self.variant(CONNECT_A::DISABLED)
}
#[doc = "Pull-up is connected to D+"]
#[inline(always)]
pub fn enabled(self) -> &'a mut W {
self.variant(CONNECT_A::ENABLED)
}
#[doc = r"Sets the field bit"]
#[inline(always)]
pub fn set_bit(self) -> &'a mut W {
self.bit(true)
}
#[doc = r"Clears the field bit"]
#[inline(always)]
pub fn clear_bit(self) -> &'a mut W {
self.bit(false)
}
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub fn bit(self, value: bool) -> &'a mut W {
self.w.bits = (self.w.bits & !0x01) | ((value as u32) & 0x01);
self.w
}
}
impl R {
#[doc = "Bit 0 - Control of the USB pull-up on the D+ line"]
#[inline(always)]
pub fn connect(&self) -> CONNECT_R {
CONNECT_R::new((self.bits & 0x01) != 0)
}
}
impl W {
#[doc = "Bit 0 - Control of the USB pull-up on the D+ line"]
#[inline(always)]
pub fn connect(&mut self) -> CONNECT_W {
CONNECT_W { w: self }
}
#[doc = "Writes raw bits to the register."]
pub unsafe fn bits(&mut self, bits: u32) -> &mut Self {
self.0.bits(bits);
self
}
}
#[doc = "Control of the USB pull-up\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [usbpullup](index.html) module"]
pub struct USBPULLUP_SPEC;
impl crate::RegisterSpec for USBPULLUP_SPEC {
type Ux = u32;
}
#[doc = "`read()` method returns [usbpullup::R](R) reader structure"]
impl crate::Readable for USBPULLUP_SPEC {
type Reader = R;
}
#[doc = "`write(|w| ..)` method takes [usbpullup::W](W) writer structure"]
impl crate::Writable for USBPULLUP_SPEC {
type Writer = W;
}
#[doc = "`reset()` method sets USBPULLUP to value 0"]
impl crate::Resettable for USBPULLUP_SPEC {
#[inline(always)]
fn reset_value() -> Self::Ux {
0
}
} | &self.0 |
version.go | package compute
import "github.com/Azure/azure-sdk-for-go/version"
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
//
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
// UserAgent returns the UserAgent string to use when sending http.Requests.
func UserAgent() string |
// Version returns the semantic version (see http://semver.org) of the client.
func Version() string {
return version.Number
}
| {
return "Azure-SDK-For-Go/" + Version() + " compute/2016-04-30-preview"
} |
img.rs | use crate::extensions::{Context, Extension, ExtensionVariant};
use crate::translator::OutputFormat;
/// **Native extension**: add an image
#[derive(Clone)]
pub struct Img;
// TODO: make the html variant take the same args as the latex variant
impl Extension for Img {
fn name(&self) -> String {
"Img".to_string()
}
fn description(&self) -> String {
"Add an image.\n\
\n\
Usage:\n\
|img, filepath, [alt, width, label]|\n\
\n\
or as a block...\n\
---- img, filepath [width, label] ----\n\
alt text\n\
----"
.to_string()
}
fn version(&self) -> String {
"1".to_string()
}
fn call(&self, mut ctx: Context) -> Option<String> {
match ctx.output_format {
OutputFormat::LambdaNote => todo!(),
OutputFormat::Html => self.html(&mut ctx),
OutputFormat::Latex => self.latex(&mut ctx),
}
}
fn supports_block(&self) -> bool {
true
}
fn supports_inline(&self) -> bool {
true
}
fn interests(&self) -> Vec<String> {
vec![]
}
}
impl Img {
fn html(&self, ctx: &mut Context) -> Option<String> {
let alt: Option<&String>;
let src: Option<&String>;
match ctx.variant {
ExtensionVariant::Block => {
alt = ctx.arguments.get(0);
src = ctx.arguments.get(1);
}
ExtensionVariant::Inline => {
src = ctx.arguments.get(0);
alt = ctx.arguments.get(1);
}
}
if src.is_none() {
self.add_error("Img: no path to the image was given", ctx);
return None;
}
Some(format!(
"<img src=\"{filename}\" {alt}>",
filename = src.map_or_else(|| String::from(""), |s| s.to_owned()),
alt = alt.map_or_else(|| String::from(""), |s| format!("alt=\"{}\"", s))
))
}
// | src, [alt, width, label] |
fn latex(&self, ctx: &mut Context) -> Option<String> {
ctx.document.import("\\usepackage{graphicx}");
let alt: Option<&String>;
let src: Option<&String>;
match ctx.variant {
ExtensionVariant::Block => {
alt = ctx.arguments.get(0); | ExtensionVariant::Inline => {
src = ctx.arguments.get(0);
alt = ctx.arguments.get(1);
}
}
Some(format!(
"\\begin{{figure}}[h]
{caption}
{label}
\\centering
\\includegraphics[width={width}\\textwidth]{{{src}}}
\\end{{figure}}",
src = src.unwrap_or(&String::from("")),
caption = alt.map_or_else(
|| String::from(""),
|text| format!("\\caption{{{}}}", text.trim().replace("\n", r#"\\"#))
),
width = ctx.arguments.get(2).unwrap_or(&String::from("1")),
label = ctx.arguments.get(3).map_or_else(
|| String::from(""),
|label| format!("\\label{{{}}}", label.trim())
)
))
}
} | src = ctx.arguments.get(1);
}
|
apps.py | from django.apps import AppConfig
class ShortenerConfig(AppConfig):
| default_auto_field = "django.db.models.BigAutoField"
name = "manti_by.apps.shortener" |
|
import-from-rename.rs | // Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// error-pattern:expected
use baz = foo::{bar};
mod foo {
pub fn bar() {}
}
fn main() { | } | |
graph.py | from collections import deque
class Vertex:
def __init__(self,value):
self.value = value
class Edge:
def __init__(self,vertex,weight):
self.vertex = vertex
self.weight = weight
class Queue:
def __init__(self):
|
def enqueue(self, value):
self.dq.appendleft(value)
def dequeue(self):
return self.dq.pop()
def __len__(self):
return len(self.dq)
class Graph:
def __init__(self):
self._adjacency_list = {}
def add_node(self, value):
node = Vertex(value)
self._adjacency_list[node] = []
return node
def size(self):
return len(self._adjacency_list)
def add_edge(self, start_node, end_node, weight=1):
if start_node not in self.adjacency_list:
raise KeyError('does not exist.')
if end_node not in self.adjacency_list:
raise KeyError('does not exist.')
adjacencies = self.adjacency_list[start_node]
adjacencies.append((end_node, weight))
def get_nodes(self):
return self._adjacency_list.keys()
def get_neighbors(self, vertex):
return self._adjacency_list.get(vertex, [])
def breadth_first_search(self, start_vertex, action=(lambda x: None)):
queue = Queue()
visited = set()
queue.enqueue(start_vertex)
visited.add(start_vertex)
while len(queue):
current_vertex = queue.dequeue()
action(current_vertex)
neighbors = self.get_neighbors(current_vertex)
for edge in neighbors:
neighbor_vertex = edge.vertex
if neighbor_vertex in visited:
continue
else:
visited.add(neighbor_vertex)
queue.enqueue(neighbor_vertex)
| self.dq = deque() |
get_management_group.py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetManagementGroupResult',
'AwaitableGetManagementGroupResult',
'get_management_group',
]
@pulumi.output_type
class GetManagementGroupResult:
"""
The management group details.
"""
def __init__(__self__, children=None, details=None, display_name=None, id=None, name=None, path=None, roles=None, tenant_id=None, type=None):
if children and not isinstance(children, list):
raise TypeError("Expected argument 'children' to be a list")
pulumi.set(__self__, "children", children)
if details and not isinstance(details, dict):
raise TypeError("Expected argument 'details' to be a dict")
pulumi.set(__self__, "details", details)
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if path and not isinstance(path, list):
raise TypeError("Expected argument 'path' to be a list")
pulumi.set(__self__, "path", path)
if roles and not isinstance(roles, list):
raise TypeError("Expected argument 'roles' to be a list")
pulumi.set(__self__, "roles", roles)
if tenant_id and not isinstance(tenant_id, str):
raise TypeError("Expected argument 'tenant_id' to be a str")
pulumi.set(__self__, "tenant_id", tenant_id)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def children(self) -> Optional[Sequence['outputs.ManagementGroupChildInfoResponse']]:
"""
The list of children.
"""
return pulumi.get(self, "children")
@property
@pulumi.getter
def details(self) -> Optional['outputs.ManagementGroupDetailsResponse']:
"""
The details of a management group.
"""
return pulumi.get(self, "details")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[str]:
"""
The friendly name of the management group.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def id(self) -> str:
|
@property
@pulumi.getter
def name(self) -> str:
"""
The name of the management group. For example, 00000000-0000-0000-0000-000000000000
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def path(self) -> Optional[Sequence['outputs.ManagementGroupPathElementResponse']]:
"""
The hierarchial path from the root group to the current group.
"""
return pulumi.get(self, "path")
@property
@pulumi.getter
def roles(self) -> Optional[Sequence[str]]:
"""
The role definitions associated with the management group.
"""
return pulumi.get(self, "roles")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> Optional[str]:
"""
The AAD Tenant ID associated with the management group. For example, 00000000-0000-0000-0000-000000000000
"""
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the resource. For example, Microsoft.Management/managementGroups
"""
return pulumi.get(self, "type")
class AwaitableGetManagementGroupResult(GetManagementGroupResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetManagementGroupResult(
children=self.children,
details=self.details,
display_name=self.display_name,
id=self.id,
name=self.name,
path=self.path,
roles=self.roles,
tenant_id=self.tenant_id,
type=self.type)
def get_management_group(expand: Optional[str] = None,
filter: Optional[str] = None,
group_id: Optional[str] = None,
recurse: Optional[bool] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetManagementGroupResult:
"""
The management group details.
:param str expand: The $expand=children query string parameter allows clients to request inclusion of children in the response payload. $expand=path includes the path from the root group to the current group.
:param str filter: A filter which allows the exclusion of subscriptions from results (i.e. '$filter=children.childType ne Subscription')
:param str group_id: Management Group ID.
:param bool recurse: The $recurse=true query string parameter allows clients to request inclusion of entire hierarchy in the response payload. Note that $expand=children must be passed up if $recurse is set to true.
"""
__args__ = dict()
__args__['expand'] = expand
__args__['filter'] = filter
__args__['groupId'] = group_id
__args__['recurse'] = recurse
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:management/v20191101:getManagementGroup', __args__, opts=opts, typ=GetManagementGroupResult).value
return AwaitableGetManagementGroupResult(
children=__ret__.children,
details=__ret__.details,
display_name=__ret__.display_name,
id=__ret__.id,
name=__ret__.name,
path=__ret__.path,
roles=__ret__.roles,
tenant_id=__ret__.tenant_id,
type=__ret__.type)
| """
The fully qualified ID for the management group. For example, /providers/Microsoft.Management/managementGroups/0000000-0000-0000-0000-000000000000
"""
return pulumi.get(self, "id") |
page.py |
class Page(object):
start: int
end: int
domain: str
all_urls: Any
m3u8_dict: dict
__slots__ = ("start", "end", "domain", "all_urls", "m3u8_dict")
def __init__(self, start, end, domain, all_urls = [], **m3u8_dict):
# super().__init__()
| self.start = start
self.end = end
self.domain = domain
self.all_urls = all_urls
self.m3u8_dict = m3u8_dict |
|
encode.go | // Copyright (c) 2018 The Ecosystem Authors
// Distributed under the MIT software license, see the accompanying
// file COPYING or or or http://www.opensource.org/licenses/mit-license.php
// Copyright 2011 The Snappy-Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package snappy
import (
"encoding/binary"
"errors"
"io"
)
// Encode returns the encoded form of src. The returned slice may be a sub-
// slice of dst if dst was large enough to hold the entire encoded block.
// Otherwise, a newly allocated slice will be returned.
//
// The dst and src must not overlap. It is valid to pass a nil dst.
func Encode(dst, src []byte) []byte {
if n := MaxEncodedLen(len(src)); n < 0 {
panic(ErrTooLarge)
} else if len(dst) < n {
dst = make([]byte, n)
}
// The block starts with the varint-encoded length of the decompressed bytes.
d := binary.PutUvarint(dst, uint64(len(src)))
for len(src) > 0 {
p := src
src = nil
if len(p) > maxBlockSize {
p, src = p[:maxBlockSize], p[maxBlockSize:]
}
if len(p) < minNonLiteralBlockSize {
d += emitLiteral(dst[d:], p)
} else {
d += encodeBlock(dst[d:], p)
}
}
return dst[:d]
}
// inputMargin is the minimum number of extra input bytes to keep, inside
// encodeBlock's inner loop. On some architectures, this margin lets us
// implement a fast path for emitLiteral, where the copy of short (<= 16 byte)
// literals can be implemented as a single load to and store from a 16-byte
// register. That literal's actual length can be as short as 1 byte, so this
// can copy up to 15 bytes too much, but that's OK as subsequent iterations of
// the encoding loop will fix up the copy overrun, and this inputMargin ensures
// that we don't overrun the dst and src buffers.
const inputMargin = 16 - 1
// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that
// could be encoded with a copy tag. This is the minimum with respect to the
// algorithm used by encodeBlock, not a minimum enforced by the file format.
//
// The encoded output must start with at least a 1 byte literal, as there are
// no previous bytes to copy. A minimal (1 byte) copy after that, generated
// from an emitCopy call in encodeBlock's main loop, would require at least
// another inputMargin bytes, for the reason above: we want any emitLiteral
// calls inside encodeBlock's main loop to use the fast path if possible, which
// requires being able to overrun by inputMargin bytes. Thus,
// minNonLiteralBlockSize equals 1 + 1 + inputMargin.
//
// The C++ code doesn't use this exact threshold, but it could, as discussed at
// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion
// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an
// optimization. It should not affect the encoded form. This is tested by
// TestSameEncodingAsCppShortCopies.
const minNonLiteralBlockSize = 1 + 1 + inputMargin
// MaxEncodedLen returns the maximum length of a snappy block, given its
// uncompressed length.
//
// It will return a negative value if srcLen is too large to encode.
func MaxEncodedLen(srcLen int) int {
n := uint64(srcLen)
if n > 0xffffffff {
return -1
}
// Compressed data can be defined as:
// compressed := item* literal*
// item := literal* copy
//
// The trailing literal sequence has a space blowup of at most 62/60
// since a literal of length 60 needs one tag byte + one extra byte
// for length information.
//
// Item blowup is trickier to measure. Suppose the "copy" op copies
// 4 bytes of data. Because of a special check in the encoding code,
// we produce a 4-byte copy only if the offset is < 65536. Therefore
// the copy op takes 3 bytes to encode, and this type of item leads
// to at most the 62/60 blowup for representing literals.
//
// Suppose the "copy" op copies 5 bytes of data. If the offset is big
// enough, it will take 5 bytes to encode the copy op. Therefore the
// worst case here is a one-byte literal followed by a five-byte copy.
// That is, 6 bytes of input turn into 7 bytes of "compressed" data.
//
// This last factor dominates the blowup, so the final estimate is:
n = 32 + n + n/6
if n > 0xffffffff {
return -1
}
return int(n)
}
var errClosed = errors.New("snappy: Writer is closed")
// NewWriter returns a new Writer that compresses to w.
//
// The Writer returned does not buffer writes. There is no need to Flush or
// Close such a Writer.
//
// Deprecated: the Writer returned is not suitable for many small writes, only
// for few large writes. Use NewBufferedWriter instead, which is efficient
// regardless of the frequency and shape of the writes, and remember to Close
// that Writer when done.
func NewWriter(w io.Writer) *Writer { |
// NewBufferedWriter returns a new Writer that compresses to w, using the
// framing format described at
// https://github.com/google/snappy/blob/master/framing_format.txt
//
// The Writer returned buffers writes. Users must call Close to guarantee all
// data has been forwarded to the underlying io.Writer. They may also call
// Flush zero or more times before calling Close.
func NewBufferedWriter(w io.Writer) *Writer {
return &Writer{
w: w,
ibuf: make([]byte, 0, maxBlockSize),
obuf: make([]byte, obufLen),
}
}
// Writer is an io.Writer that can write Snappy-compressed bytes.
type Writer struct {
w io.Writer
err error
// ibuf is a buffer for the incoming (uncompressed) bytes.
//
// Its use is optional. For backwards compatibility, Writers created by the
// NewWriter function have ibuf == nil, do not buffer incoming bytes, and
// therefore do not need to be Flush'ed or Close'd.
ibuf []byte
// obuf is a buffer for the outgoing (compressed) bytes.
obuf []byte
// wroteStreamHeader is whether we have written the stream header.
wroteStreamHeader bool
}
// Reset discards the writer's state and switches the Snappy writer to write to
// w. This permits reusing a Writer rather than allocating a new one.
func (w *Writer) Reset(writer io.Writer) {
w.w = writer
w.err = nil
if w.ibuf != nil {
w.ibuf = w.ibuf[:0]
}
w.wroteStreamHeader = false
}
// Write satisfies the io.Writer interface.
func (w *Writer) Write(p []byte) (nRet int, errRet error) {
if w.ibuf == nil {
// Do not buffer incoming bytes. This does not perform or compress well
// if the caller of Writer.Write writes many small slices. This
// behavior is therefore deprecated, but still supported for backwards
// compatibility with code that doesn't explicitly Flush or Close.
return w.write(p)
}
// The remainder of this method is based on bufio.Writer.Write from the
// standard library.
for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil {
var n int
if len(w.ibuf) == 0 {
// Large write, empty buffer.
// Write directly from p to avoid copy.
n, _ = w.write(p)
} else {
n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
w.ibuf = w.ibuf[:len(w.ibuf)+n]
w.Flush()
}
nRet += n
p = p[n:]
}
if w.err != nil {
return nRet, w.err
}
n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p)
w.ibuf = w.ibuf[:len(w.ibuf)+n]
nRet += n
return nRet, nil
}
func (w *Writer) write(p []byte) (nRet int, errRet error) {
if w.err != nil {
return 0, w.err
}
for len(p) > 0 {
obufStart := len(magicChunk)
if !w.wroteStreamHeader {
w.wroteStreamHeader = true
copy(w.obuf, magicChunk)
obufStart = 0
}
var uncompressed []byte
if len(p) > maxBlockSize {
uncompressed, p = p[:maxBlockSize], p[maxBlockSize:]
} else {
uncompressed, p = p, nil
}
checksum := crc(uncompressed)
// Compress the buffer, discarding the result if the improvement
// isn't at least 12.5%.
compressed := Encode(w.obuf[obufHeaderLen:], uncompressed)
chunkType := uint8(chunkTypeCompressedData)
chunkLen := 4 + len(compressed)
obufEnd := obufHeaderLen + len(compressed)
if len(compressed) >= len(uncompressed)-len(uncompressed)/8 {
chunkType = chunkTypeUncompressedData
chunkLen = 4 + len(uncompressed)
obufEnd = obufHeaderLen
}
// Fill in the per-chunk header that comes before the body.
w.obuf[len(magicChunk)+0] = chunkType
w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0)
w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8)
w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16)
w.obuf[len(magicChunk)+4] = uint8(checksum >> 0)
w.obuf[len(magicChunk)+5] = uint8(checksum >> 8)
w.obuf[len(magicChunk)+6] = uint8(checksum >> 16)
w.obuf[len(magicChunk)+7] = uint8(checksum >> 24)
if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil {
w.err = err
return nRet, err
}
if chunkType == chunkTypeUncompressedData {
if _, err := w.w.Write(uncompressed); err != nil {
w.err = err
return nRet, err
}
}
nRet += len(uncompressed)
}
return nRet, nil
}
// Flush flushes the Writer to its underlying io.Writer.
func (w *Writer) Flush() error {
if w.err != nil {
return w.err
}
if len(w.ibuf) == 0 {
return nil
}
w.write(w.ibuf)
w.ibuf = w.ibuf[:0]
return w.err
}
// Close calls Flush and then closes the Writer.
func (w *Writer) Close() error {
w.Flush()
ret := w.err
if w.err == nil {
w.err = errClosed
}
return ret
}
|
return &Writer{
w: w,
obuf: make([]byte, obufLen),
}
}
|
expressions_library.py | """
A library of expressions that can be composed of existing expressions.
"""
from .udf import Function
import raco.expression
from raco.expression import *
def is_defined(function_name):
return function_name.lower() in EXPRESSIONS
def | (function_name, num_args):
func = EXPRESSIONS.get(function_name.lower())
if hasattr(func, '__call__'):
return func(num_args)
if isinstance(func, dict):
return func.get(num_args)
return func
def create_nested_binary(num_args, func):
if num_args < 2:
return None
var = ["x{i}".format(i=i + 1) for i in xrange(num_args)]
var_refs = [NamedAttributeRef(vstr) for vstr in var]
return Function(var, reduce(func, var_refs))
def create_variable_length_function(num_args, func):
var = ["x{i}".format(i=i + 1) for i in xrange(num_args)]
var_refs = [NamedAttributeRef(vstr) for vstr in var]
return Function(var, func(var_refs))
# mapping from name -> dict or Function
# the dict is a mapping from arity -> Function
EXPRESSIONS_CASE = {
'SafeDiv': {
2: Function(['n', 'd'], Case(
[(EQ(NamedAttributeRef('d'), NumericLiteral(0)),
NumericLiteral(0.0))],
DIVIDE(NamedAttributeRef('n'), NamedAttributeRef('d')))),
3: Function(['n', 'd', 'default'], Case(
[(EQ(NamedAttributeRef('d'), NumericLiteral(0)),
CAST(types.DOUBLE_TYPE, NamedAttributeRef('default')))],
DIVIDE(NamedAttributeRef('n'), NamedAttributeRef('d'))))
},
'TheAnswerToLifeTheUniverseAndEverything': Function(
[], NumericLiteral(42)),
'greatest': lambda num_args: create_nested_binary(num_args, GREATER),
'least': lambda num_args: create_nested_binary(num_args, LESSER),
'greater': create_nested_binary(2, GREATER),
'lexmin': lambda num_args:
create_variable_length_function(num_args, LEXMIN),
'lesser': create_nested_binary(2, LESSER),
'substr': Function(['str', 'begin', 'end'],
SUBSTR([NamedAttributeRef('str'),
NamedAttributeRef('begin'),
NamedAttributeRef('end')
])),
'byterange': Function(['bytes', 'begin', 'end'],
BYTERANGE([NamedAttributeRef('bytes'),
NamedAttributeRef('begin'),
NamedAttributeRef('end')
])),
'head': Function(['str', 'length'],
SUBSTR([NamedAttributeRef('str'),
NumericLiteral(0),
LESSER(LEN(NamedAttributeRef('str')),
NamedAttributeRef('length'))
])),
'tail': Function(['str', 'length'],
SUBSTR([NamedAttributeRef('str'),
GREATER(MINUS(LEN(NamedAttributeRef('str')),
NamedAttributeRef('length')),
NumericLiteral(0)),
LEN(NamedAttributeRef('str'))
])),
'flip': Function(['p'], LT(RANDOM(), NamedAttributeRef('p')))
}
def get_arity(func_class):
"""Return the arity of built-in Myria expressions."""
if issubclass(func_class, ZeroaryOperator):
return 0
elif issubclass(func_class, UnaryOperator):
return 1
elif issubclass(func_class, BinaryOperator):
return 2
else:
# Don't handle n-ary functions automatically
assert False
def one_to_one_function(func_name):
"""Emit a Function object that wraps a Myria built-in expression."""
func_class = getattr(raco.expression, func_name)
arity = get_arity(func_class)
function_args = ['arg%d' % i for i in range(arity)]
expression_args = [NamedAttributeRef(x) for x in function_args]
return Function(function_args, func_class(*expression_args))
# Simple functions that map to a single Myria expression; the names here
# must match the corresponding function class in raco.expression.function
ONE_TO_ONE_FUNCS = ['ABS', 'CEIL', 'COS', 'FLOOR', 'LOG', 'SIN', 'SQRT',
'TAN', 'LEN', 'POW', 'MAX', 'MIN', 'SUM', 'AVG', 'STDEV',
'COUNTALL', 'MD5', 'RANDOM', 'YEAR', 'MONTH', 'DAY',
'SPLIT', 'SEQUENCE', 'NGRAM', 'BITSET', 'CONCAT']
ONE_TO_ONE_EXPRS = {k.lower(): one_to_one_function(k) for k in ONE_TO_ONE_FUNCS} # noqa
EXPRESSIONS = {k.lower(): v for k, v in EXPRESSIONS_CASE.items()}
EXPRESSIONS.update(ONE_TO_ONE_EXPRS)
| lookup |
lib.rs | //! Macro implementations for `salvia` framework.
//!
//! Don't use this crate directly, use `salvia` crate instead.
use proc_macro::TokenStream;
mod input;
mod query;
/// Turn `async fn` into `salvia`'s query.
///
/// Queries cache their value when executed and able to avoid recalculation when queries/inputs
/// it depends on don't change.
///
/// # Contents
///
/// * [How it works](#how-it-works)
/// * [Basic usage](#basic-usage)
/// * [Advanced usage](#advanced-usage)
/// * [`&self`](#self)
/// * [`&'static self`](#static-self)
/// * [Recursion](#recursion)
/// * [Traits](#traits)
/// * [Unsupported features](#not-supported)
/// * [Configuration](#configuration)
///
/// # How it works
///
/// Macro hoists provided function and replaces it with query *executor*.
///
/// *Executor* is responsible for locating/spawning correct *node* (which is a looped task serving
/// as cache storage) and communicating to it.
/// It also registers discovered node as dependency to parent query.
///
/// Attribute will expand the following
///
/// ```
/// # use salvia::{QueryContext, query};
/// #[query]
/// async fn my_query(n: i32, cx: &QueryContext) -> bool {
/// // do some work
/// # unimplemented!()
/// }
/// ```
///
/// roughly into this:
///
/// ```ignore
/// # use salvia::{QueryContext, query};
/// async fn my_query(n: i32, cx: &QueryContext) -> bool {
/// async fn inner(n: i32, cx: &QueryContext) -> bool {
/// // do some work
/// # unimplemented!()
/// };
///
/// // conjure correct executor by magic
/// query_executor(inner, n, cx).await
/// }
/// ```
///
/// # Basic usage
///
/// `#[query]` supports nearly every function feature you may desire.
///
/// * In its most basic form attribute can be applied to a **freestanding** async function:
///
/// ```no_run
/// # use std::sync::Arc;
/// # use salvia::{QueryContext, query};
/// #[query]
/// async fn freestanding(n: i32, strings: Arc<Vec<&'static str>>, cx: &QueryContext) -> bool {
/// unimplemented!()
/// }
/// ```
///
/// It can accept any number of arguments, but the last one should always be `&QueryContext`.
///
/// * Function can be **generic** too:
///
/// ```no_run
/// # use std::hash::Hash;
/// # use salvia::{QueryContext, query, Stashable};
/// #[query]
/// async fn generic<T, U>(t: T, cx: &QueryContext) -> U
/// where T: Stashable + Hash, U: Stashable
/// {
/// unimplemented!()
/// }
/// ```
///
/// You will need to specify trait bounds, especially `Stashable`.
///
/// * You can use it **in function scope**:
///
/// ```no_run
/// # use salvia::{QueryContext, query};
/// async fn outer(n: i32, cx: &QueryContext) -> bool {
/// #[query]
/// async fn inner(n: i32, cx: &QueryContext) -> bool {
/// unimplemented!()
/// }
///
/// inner(n, cx).await
/// }
/// ```
///
/// This specific arrangement is also referred to as "inner function trick" in this documentation.
///
/// * Query can be in **associated function** position:
///
/// ```no_run
/// # use salvia::{QueryContext, query};
/// # struct Widget;
/// #
/// impl Widget {
/// #[query]
/// async fn associated(n: i32, cx: &QueryContext) -> bool {
/// unimplemented!()
/// }
/// }
/// ```
///
/// * Also it can appear as **associated function in generic impl** block:
///
/// ```no_run
/// # use std::marker::PhantomData;
/// # use std::hash::Hash;
/// # use salvia::{QueryContext, query, Stashable};
/// # struct GenericWidget<T, U>(PhantomData<T>, PhantomData<U>);
/// #
/// impl<T, U> GenericWidget<T, U>
/// where T: Stashable + Hash, U: Stashable
/// {
/// #[query]
/// async fn generic_associated(t: T, cx: &QueryContext) -> U {
/// unimplemented!()
/// }
/// }
/// ```
///
/// * Before you ask, yes, **`self` argument** is supported:
///
/// ```no_run
/// # use salvia::{QueryContext, query};
/// # #[derive(Clone, Eq, PartialEq, Hash)]
/// # struct Widget;
/// #
/// impl Widget {
/// #[query]
/// async fn take_and_calculate(self, n: i32, cx: &QueryContext) -> bool {
/// unimplemented!()
/// }
/// }
/// ```
///
/// Note that you *have* to pass self by value: it is treated just like any other argument
/// and must satisfy `Stashable`!
/// This in particular implies `'static`, so trying to pass in a reference will fail to compile.
///
/// See [advanced usage](#self) techniques for ways to conjure desirable API.
///
/// # Advanced usage
///
/// Recipes which work but require some boilerplate or external dependencies.
///
/// ## `&self`
///
/// Sadly there is no way to directly support references in queries:
///
/// ```compile_fail
/// # use salvia::{QueryContext, query};
/// # #[derive(Clone, Eq, PartialEq, Hash)]
/// # struct Widget;
/// #
/// impl Widget {
/// #[query] // references are not allowed :(
/// async fn calculate(&self, n: i32, cx: &QueryContext) -> bool {
/// unimplemented!()
/// }
/// }
/// ```
///
/// This is especially needed for non-`Copy` types where `self`-by-value can lead
/// to cumbersome API.
/// However it is trivial to produce desired functions by delegating to consuming variant:
///
/// ```no_run
/// # use salvia::{QueryContext, query};
/// # #[derive(Clone, Eq, PartialEq, Hash)]
/// # struct Widget;
/// #
/// impl Widget {
/// # #[query]
/// # async fn take_and_calculate(self, n: i32, cx: &QueryContext) -> bool {
/// # unimplemented!()
/// # }
/// #
/// async fn calculate(&self, n: i32, cx: &QueryContext) -> bool {
/// self.clone().take_and_calculate(n, cx).await
/// }
/// }
/// ```
///
/// Or in case you want to hide consuming variant inner function trick works too:
///
/// ```no_run
/// # use salvia::{QueryContext, query};
/// # #[derive(Clone, Eq, PartialEq, Hash)]
/// # struct Widget;
/// #
/// impl Widget {
/// async fn calculate(&self, n: i32, cx: &QueryContext) -> bool {
/// #[query]
/// async fn inner(this: Widget, n: i32, cx: &QueryContext) -> bool {
/// unimplemented!()
/// }
///
/// inner(self.clone(), n, cx).await
/// }
/// }
/// ```
///
/// ## `&'static self`
///
/// One case where accepting `self` by reference is valid is when that reference is alive
/// for `'static`:
///
/// ```no_run
/// # use salvia::{QueryContext, query};
/// # #[derive(Clone, Eq, PartialEq, Hash)]
/// # struct Widget;
/// impl Widget {
/// #[query]
/// async fn calculate(&'static self, cx: &QueryContext) -> bool {
/// unimplemented!()
/// }
/// }
/// ```
///
/// ## Recursion
///
/// Recursion is possible, but you will need `async_recursion` crate:
///
/// ```
/// # use tokio::join;
/// # use async_recursion::async_recursion;
/// # use salvia::{QueryContext, query, Runtime};
/// // notice the order of attributes
/// #[query] // <- comes first
/// #[async_recursion] // <- comes last
/// async fn fibonacci(n: u64, cx: &QueryContext) -> u64 {
/// match n {
/// 0 => 0,
/// 1 => 1,
/// n => {
/// let (p1, p2) = join!(fibonacci(n - 1, cx), fibonacci(n - 2, cx));
///
/// p1 + p2
/// }
/// }
/// }
///
/// # #[tokio::main]
/// # async fn main() {
/// # let rt = Runtime::new().await;
/// #
/// # rt.query(|cx| async move {
/// assert_eq!(fibonacci(10, &cx).await, 55);
/// # }).await;
/// # }
/// ```
///
/// *Theoretically* it shouldn't be needed: the executor part communicates to node through a channel
/// so even though node calls executor (and must embed its future into itself) there is no
/// dependency the other way - no cycle.
/// However `rustc` refuses to recognize it, so here we are.
///
/// This works because each node corresponds to query + set of arguments, so multiple calls to the
/// same query but with different arguments correspond to different nodes which run concurrently.
/// From perspective of a node other nodes are completely opaque: all it really cares about is
/// whether their values are valid for its current calculation.
///
/// Beware that infinite recursion will deadlock as node ends up awaiting on itself.
///
/// ## Traits
///
/// Async in traits is hard, but it can be done with the help of `async_trait` crate.
///
/// ### Defaulted functions
///
/// Unfortunately naive attempt to use the two together doesn't work:
///
/// ```compile_fail
/// # use salvia::{QueryContext, query};
/// # use async_trait::async_trait;
/// #[async_trait]
/// trait Rendered {
/// #[query] // :(
/// async fn my_query(self, n: i32, cx: &QueryContext) -> bool {
/// unimplemented!()
/// }
/// }
/// ```
///
/// The reason behind it is order in which macros are expanded:
/// `#[async_trait]` starts first and transforms `my_query` into normal `fn` which returns boxed
/// future, but that is unsupported by `#[query]` for good reasons.
///
/// The way to work around the issue is to use inner function trick:
///
/// ```no_run
/// # use salvia::{QueryContext, query};
/// # use async_trait::async_trait;
/// #[async_trait]
/// trait Rendered {
/// async fn my_query(n: i32, cx: &QueryContext) -> bool {
/// #[query]
/// async fn inner(n: i32, cx: &QueryContext) -> bool {
/// unimplemented!()
/// }
///
/// inner(n, cx).await
/// }
/// }
/// ```
///
/// This gives a different target for each macro to expand on and lets them work together.
///
/// It gets a little bit more complicated if you want to operate on `self`:
///
/// * You will need `Stashable + Hash` as supertraits.
/// * Inner query needs to accept `Self` type as generic parameter because it cannot implicitly
/// capture it.
/// * See also note on dyn trait/object safety in `async_trait` crate.
///
/// ```no_run
/// # use std::hash::Hash;
/// # use salvia::{QueryContext, Stashable, query};
/// # use async_trait::async_trait;
/// #[async_trait]
/// trait Rendered: Stashable + Hash {
/// // ^^^^^^^^^ ^^^^
/// async fn my_query(self, n: i32, cx: &QueryContext) -> bool {
/// #[query]
/// async fn inner<SelfT>(this: SelfT, n: i32, cx: &QueryContext) -> bool
/// // ^^^^^
/// where SelfT: Stashable + Hash
/// {
/// unimplemented!()
/// }
///
/// inner(self, n, cx).await
/// }
/// }
/// ```
///
/// ### Non-defaulted functions
///
/// Applying `#[query]` to non-defaulted item is conceptually faulty.
/// On the one hand attribute wants to replace the function with a query executor, but there is
/// only a signature.
/// On the other hand this is the override point for implementors so when a function is provided
/// it will shadow query executor.
///
/// The way to solve the conundrum is to separate the concerns and make *two* functions:
/// one as override point and another as query executor.
/// This leads to a somewhat awkwardly looking "sandwich":
///
/// ```no_run
/// # use std::hash::Hash;
/// # use async_trait::async_trait;
/// # use salvia::{QueryContext, Stashable, query};
/// #[async_trait]
/// trait Rendered: Stashable + Hash {
/// async fn override_me(self, n: i32, cx: &QueryContext) -> bool;
///
/// async fn call_me(self, n: i32, cx: &QueryContext) -> bool {
/// #[query]
/// async fn inner<SelfT>(this: SelfT, n: i32, cx: &QueryContext) -> bool
/// where SelfT: Rendered
/// {
/// this.override_me(n, cx).await
/// }
///
/// inner(self, n, cx).await
/// }
/// }
/// ```
///
/// # Unsupported features
///
/// ## `impl Trait`
///
/// `impl Trait` represents an unnameable type which currently causes issues with macro
/// expansion.
///
/// ## Normal `fn` returning `Future` object
///
/// Most compelling reason:
/// *executor's `Future` type is different from original function*.
/// This is an obvious fact, but it also means there is no way to preserve the original signature.
/// Such behavior is not transparent to users and might be unexpected.
///
/// There are other technical difficulties that makes support for this feature problematic.
#[proc_macro_attribute]
pub fn | (_: TokenStream, item: TokenStream) -> TokenStream {
use syn::{parse_macro_input, Item};
let item = parse_macro_input!(item as Item);
query::query_impl(item).unwrap_or_else(|e| e.to_compile_error().into())
}
/// Implement `get` and `set` functions on input's host type.
///
/// Output of the macro looks like this:
///
/// ```
/// # use std::hash::Hash;
/// # use salvia::{QueryContext, InputContext, Input, Stashable};
/// # #[derive(Eq, PartialEq, Copy, Clone, Hash)]
/// # struct Host;
/// impl Host {
/// async fn get<T>(self, cx: &QueryContext) -> T
/// where
/// Self: Input<T>,
/// T: Stashable
/// {
/// // ...
/// # unimplemented!()
/// }
///
/// async fn set<T>(self, value: T, cx: &QueryContext)
/// where
/// Self: Input<T>,
/// T: Stashable
/// {
/// // ...
/// # unimplemented!()
/// }
/// }
/// ```
#[proc_macro_derive(InputAccess)]
pub fn input_access(input: TokenStream) -> TokenStream {
use syn::{parse_macro_input, DeriveInput};
let data = parse_macro_input!(input as DeriveInput);
input::hash_impl(data).unwrap_or_else(|e| e.to_compile_error().into())
}
#[cfg(test)]
mod test;
| query |
deserializer_tests.rs | // Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::{
file_format::{CompiledModule, CompiledScript},
file_format_common::*,
};
use move_core_types::vm_status::StatusCode;
#[test]
fn malformed_simple() {
// empty binary
let mut binary = vec![];
let mut res = CompiledScript::deserialize(&binary);
assert_eq!(
res.expect_err("Expected malformed binary").major_status,
StatusCode::BAD_MAGIC
);
// under-sized binary
binary = vec![0u8, 0u8, 0u8];
res = CompiledScript::deserialize(&binary);
assert_eq!(
res.expect_err("Expected malformed binary").major_status,
StatusCode::BAD_MAGIC
);
// bad magic
binary = vec![0u8; 15];
res = CompiledScript::deserialize(&binary);
assert_eq!(
res.expect_err("Expected bad magic").major_status,
StatusCode::BAD_MAGIC
);
// only magic
binary = BinaryConstants::LIBRA_MAGIC.to_vec();
res = CompiledScript::deserialize(&binary);
assert_eq!(
res.expect_err("Expected malformed binary").major_status,
StatusCode::MALFORMED
);
// bad major version
binary = BinaryConstants::LIBRA_MAGIC.to_vec();
binary.push(2); // major version
binary.push(0); // minor version
binary.push(10); // table count
binary.push(0); // rest of binary ;)
res = CompiledScript::deserialize(&binary);
assert_eq!(
res.expect_err("Expected unknown version").major_status,
StatusCode::UNKNOWN_VERSION
);
// bad minor version
binary = BinaryConstants::LIBRA_MAGIC.to_vec();
binary.push(1); // major version
binary.push(1); // minor version
binary.push(10); // table count
binary.push(0); // rest of binary ;)
let res1 = CompiledModule::deserialize(&binary);
assert_eq!(
res1.expect_err("Expected unknown version").major_status,
StatusCode::UNKNOWN_VERSION
);
}
// Ensure that we can deserialize a script from disk
static EMPTY_SCRIPT: &[u8] = include_bytes!("../../../../types/src/test_helpers/empty_script.mv");
#[test]
fn deserialize_file() | {
CompiledScript::deserialize(EMPTY_SCRIPT).expect("script should deserialize properly");
} |
|
incrust.rs | use std::borrow::Cow;
use std::collections::HashMap;
use abc::*;
use loader::GroupLoader;
use {Args, Arg, ex, Stack, VarContext, Template};
#[derive(Debug)]
pub struct Incrust {
pub loaders: GroupLoader,
pub filters: HashMap<Cow<'static, str>, Box<Filter>>,
top_context: HashMap<Cow<'static, str>, Arg<'static>>,
}
impl Default for Incrust {
fn default() -> Self {
use ::filter::*;
let mut filters: HashMap<Cow<'static, str>, Box<Filter>> = HashMap::new();
filters.insert("e".into(), box Escape);
filters.insert("escape".into(), box Escape);
filters.insert("unescape".into(), box Unescape);
filters.insert("html_escape".into(), box Escape);
filters.insert("html_unescape".into(), box Unescape);
filters.insert("url_escape".into(), box UrlEscape);
filters.insert("url_unescape".into(), box UrlUnescape);
filters.insert("nl2spc".into(), box NewlineToSpace);
filters.insert("newline_to_space".into(), box NewlineToSpace);
let env = hashmap!{
"True".into() => ex(true),
"true".into() => ex(true),
"False".into() => ex(false),
"false".into() => ex(false),
};
Incrust {
loaders: Vec::new(),
filters,
top_context: env,
}
}
}
impl Incrust {
pub fn new() -> Self { Incrust::default() }
pub fn no_default() -> Self {
Incrust {
loaders: Vec::new(),
filters: HashMap::new(),
top_context: HashMap::new(),
}
}
pub fn top_context(&self) -> &HashMap<Cow<'static, str>, Arg> {
&self.top_context
}
pub fn load(&self, name: &str) -> LoadResult {
for loader in &self.loaders {
if let Ok(template) = loader.load(name) {
return Ok(template)
}
}
Err(LoadError::NotFound)
}
pub fn filter<'s>(&'s self, id: &str, context: &'s VarContext<'s>, value: Option<Arg<'s>>) -> FilterResult<Arg<'s>> {
match self.filters.get(id) {
Some(filter) => filter.filter(context, value),
None => Err(FilterError::UnknownFormatter(id.to_owned().into()))
}
}
pub fn parse(&self, template: &str) -> TemplateParseResult<Template> {
Template::parse(template)
}
pub fn get_template(&self, name: &str) -> RenderResult<Template> {
Ok(self.parse(self.load(name)?.as_ref())?)
}
pub fn render<'r>(&self, name: &str, args: &'r Args<'r>) -> RenderResult<String> {
self.render_text(&self.load(name)?, args)
}
pub fn render_text<'r>(&self, text: &str, args: &'r Args<'r>) -> RenderResult<String> {
self.render_parsed(&self.parse(text)?, args)
}
pub fn render_parsed<'r>(&self, template: &Template, args: &'r Args<'r>) -> RenderResult<String> {
let global = self.create_global_context(template, args)?;
let local = global.top_scope();
self.render_prepared(&local)
}
pub fn render_prepared<'r>(&self, context: &'r VarContext<'r>) -> RenderResult<String> {
::renderer::text(context)
.map_err(|err| {
debug!("Render error: {:?}", err);
err
})
}
pub fn create_global_context<'s>(&'s self, template: &'s Template, args: &'s Args<'s>) -> RenderResult<Stack<'s>> {
Stack::new(self, Cow::Borrowed(template), args)
}
}
#[cfg(test)]
mod tests {
#![allow(clippy::used_underscore_binding)]
use super::*;
#[test]
fn text() {
let incrust = Incrust::new();
let templ = "Hello, World!";
let expected = "Hello, World!";
let result = incrust.render_text(templ, &Default::default()).unwrap();
assert_eq!(result, expected);
}
#[test]
fn comments() {
let incrust = Incrust::new();
let templ = incrust.parse("<p>Visible {# partially #} paragraph</p>").unwrap();
let expected = "<p>Visible paragraph</p>";
let result = incrust.render_parsed(&templ, &Default::default()).unwrap();
assert_eq!(result, expected);
}
#[test]
fn mustache() {
let templ = "Hello, {{name}}!";
let expected = "Hello, World!";
let incrust = Incrust::new();
let args = hashmap!{ "name".into() => ex("World") };
let result = incrust.render_text(templ, &args).unwrap();
assert_eq!(result, expected);
}
#[test]
fn filter() {
let templ = "<textarea>{{ html | e }}</textarea>";
let args: Args = hashmap!{ "html".into() => ex("<h1>Hello, World!</h1>") };
let expected = "<textarea><h1>Hello, World!</h1></textarea>";
let incrust = Incrust::new();
let result = incrust.render_text(templ, &args).unwrap();
assert_eq!(result, expected);
}
#[test]
fn literal() |
#[test]
fn expression() {
let incrust = Incrust::new();
let args = hashmap!{
"what".into() => ex("Hello"),
"who".into() => ex("World")
};
assert_eq!(r#"Say: "Hello, World!""#, incrust.render_text(r#"Say: "{{ what + ", " + who }}!""#, &args).unwrap());
let args = hashmap!{
"alpha".into() => ex(6_i64),
"omega".into() => ex(7_f64)
};
assert_eq!("The answer is 42", incrust.render_text(r#"The answer is {{ alpha * omega }}"#, &args).unwrap());
let args = hashmap!{ "amount".into() => ex(6_i64) };
assert_eq!("1 + 1 = 2", incrust.render_text(r#"1 + 1 = {{ 1 + 1 }}"#, &args).unwrap());
assert_eq!("Amount: 6 pcs", incrust.render_text(r#"Amount: {{ amount and ("" + amount + " pcs") or "-" }}"#, &args).unwrap());
let args = hashmap!{ "amount".into() => ex(0_i64) };
assert_eq!("Amount: -", incrust.render_text(r#"Amount: {{ amount and ("" + amount + " pcs") or "-" }}"#, &args).unwrap());
}
#[test]
fn if_statement() {
let incrust = Incrust::new();
let test = |expected, template| assert_eq!(expected, incrust.render_text(template, &Default::default()).unwrap());
test("Mode: on", r#"Mode: {% if True %}on{% endif %}"#);
test("String is empty", r#"String {% if "" %}has chars{% else %}is empty{% endif %}"#);
test("String is true", r#"String {% if "" %}has chars{% elif True %}is true{% else %}is empty{% endif %}"#);
}
#[test]
fn for_statement() {
let incrust = Incrust::new();
let args = hashmap!{ "fruits".into() => ex(vec![ex("Orange"), ex("Apple"), ex("Banana")]) };
let tpl = r#"
<ul>
{%- for fruit in fruits %}
<li>{{ loop.index }}. {{ fruit | e }}</li>
{%- endfor %}
</ul>
"#;
let expected = r#"
<ul>
<li>1. Orange</li>
<li>2. Apple</li>
<li>3. Banana</li>
</ul>
"#;
assert_eq!(expected, incrust.render_text(tpl, &args).unwrap());
}
#[test]
fn block_statement() {
let incrust = Incrust::new();
let args = hashmap!{ "fruits".into() => ex(vec![ex("Orange"), ex("Apple"), ex("Banana")]) };
let tpl = r#"
<body>
<h1>{% block title %}Default title{% endblock %}</h1>
</body>
"#;
let expected = r#"
<body>
<h1>Default title</h1>
</body>
"#;
assert_eq!(expected, incrust.render_text(tpl, &args).unwrap());
}
#[test]
fn inheritance() {
let base = r#"
<body>
<h1>{% block title %}Default title{% endblock %}</h1>
<main>
{%- block body %}
<p>Default body<p>
{%- endblock %}
</main>
</body>
"#;
let tpl = r#"
{% extends parent_layout %}
{% block title -%}
New title
{%- endblock %}
"#;
let expected = r#"
<body>
<h1>New title</h1>
<main>
<p>Default body<p>
</main>
</body>
"#;
let mut incrust = Incrust::new();
incrust.loaders.push(box hashmap!{
"base".into() => base.into(),
"tpl".into() => tpl.into(),
});
let args = hashmap!{ "parent_layout".into() => ex("base") };
assert_eq!(expected, incrust.render("tpl", &args).unwrap());
}
#[test]
fn include() {
let default_menu = r#"
<ul>
<li><a href="/">Home</a></li>
<li><a href="/about">About Us</a></li>
</ul>
"#;
let tpl = r#"
<nav>
{%- include menu -%}
</nav>
<h1>Body</h1>
"#;
let expected = r#"
<nav>
<ul>
<li><a href="/">Home</a></li>
<li><a href="/about">About Us</a></li>
</ul>
</nav>
<h1>Body</h1>
"#;
let mut incrust = Incrust::new();
incrust.loaders.push(box hashmap!{
"default_menu".into() => default_menu.into(),
"tpl".into() => tpl.into(),
});
let args = hashmap!{ "menu".into() => ex("default_menu") };
assert_eq!(expected, incrust.render("tpl", &args).unwrap());
}
}
| {
let incrust = Incrust::new();
let args = Default::default();
assert_eq!("Braces: {{", incrust.render_text(r#"Braces: {{ "{{" }}"#, &args).unwrap());
assert_eq!("The answer: 42", incrust.render_text(r#"The answer: {{ 42 }}"#, &args).unwrap());
assert_eq!("Pi: 3.1415926", incrust.render_text(r#"Pi: {{ 3.1415926 }}"#, &args).unwrap());
} |
zz_generated_gallerysharingprofile_client.go | //go:build go1.16
// +build go1.16
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See License.txt in the project root for license information.
// Code generated by Microsoft (R) AutoRest Code Generator.
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
package armcompute
import (
"context"
"errors"
"github.com/Azure/azure-sdk-for-go/sdk/azcore"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/arm"
armruntime "github.com/Azure/azure-sdk-for-go/sdk/azcore/arm/runtime"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/policy"
"github.com/Azure/azure-sdk-for-go/sdk/azcore/runtime"
"net/http"
"net/url"
"strings"
)
// GallerySharingProfileClient contains the methods for the GallerySharingProfile group.
// Don't use this type directly, use NewGallerySharingProfileClient() instead.
type GallerySharingProfileClient struct {
host string
subscriptionID string
pl runtime.Pipeline
}
// NewGallerySharingProfileClient creates a new instance of GallerySharingProfileClient with the specified values.
// subscriptionID - Subscription credentials which uniquely identify Microsoft Azure subscription. The subscription ID forms
// part of the URI for every service call.
// credential - used to authorize requests. Usually a credential from azidentity.
// options - pass nil to accept the default values.
func | (subscriptionID string, credential azcore.TokenCredential, options *arm.ClientOptions) *GallerySharingProfileClient {
if options == nil {
options = &arm.ClientOptions{}
}
ep := options.Endpoint
if len(ep) == 0 {
ep = arm.AzurePublicCloud
}
client := &GallerySharingProfileClient{
subscriptionID: subscriptionID,
host: string(ep),
pl: armruntime.NewPipeline(moduleName, moduleVersion, credential, runtime.PipelineOptions{}, options),
}
return client
}
// BeginUpdate - Update sharing profile of a gallery.
// If the operation fails it returns an *azcore.ResponseError type.
// resourceGroupName - The name of the resource group.
// galleryName - The name of the Shared Image Gallery.
// sharingUpdate - Parameters supplied to the update gallery sharing profile.
// options - GallerySharingProfileClientBeginUpdateOptions contains the optional parameters for the GallerySharingProfileClient.BeginUpdate
// method.
func (client *GallerySharingProfileClient) BeginUpdate(ctx context.Context, resourceGroupName string, galleryName string, sharingUpdate SharingUpdate, options *GallerySharingProfileClientBeginUpdateOptions) (GallerySharingProfileClientUpdatePollerResponse, error) {
resp, err := client.update(ctx, resourceGroupName, galleryName, sharingUpdate, options)
if err != nil {
return GallerySharingProfileClientUpdatePollerResponse{}, err
}
result := GallerySharingProfileClientUpdatePollerResponse{
RawResponse: resp,
}
pt, err := armruntime.NewPoller("GallerySharingProfileClient.Update", "", resp, client.pl)
if err != nil {
return GallerySharingProfileClientUpdatePollerResponse{}, err
}
result.Poller = &GallerySharingProfileClientUpdatePoller{
pt: pt,
}
return result, nil
}
// Update - Update sharing profile of a gallery.
// If the operation fails it returns an *azcore.ResponseError type.
func (client *GallerySharingProfileClient) update(ctx context.Context, resourceGroupName string, galleryName string, sharingUpdate SharingUpdate, options *GallerySharingProfileClientBeginUpdateOptions) (*http.Response, error) {
req, err := client.updateCreateRequest(ctx, resourceGroupName, galleryName, sharingUpdate, options)
if err != nil {
return nil, err
}
resp, err := client.pl.Do(req)
if err != nil {
return nil, err
}
if !runtime.HasStatusCode(resp, http.StatusOK, http.StatusAccepted) {
return nil, runtime.NewResponseError(resp)
}
return resp, nil
}
// updateCreateRequest creates the Update request.
func (client *GallerySharingProfileClient) updateCreateRequest(ctx context.Context, resourceGroupName string, galleryName string, sharingUpdate SharingUpdate, options *GallerySharingProfileClientBeginUpdateOptions) (*policy.Request, error) {
urlPath := "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Compute/galleries/{galleryName}/share"
if client.subscriptionID == "" {
return nil, errors.New("parameter client.subscriptionID cannot be empty")
}
urlPath = strings.ReplaceAll(urlPath, "{subscriptionId}", url.PathEscape(client.subscriptionID))
if resourceGroupName == "" {
return nil, errors.New("parameter resourceGroupName cannot be empty")
}
urlPath = strings.ReplaceAll(urlPath, "{resourceGroupName}", url.PathEscape(resourceGroupName))
if galleryName == "" {
return nil, errors.New("parameter galleryName cannot be empty")
}
urlPath = strings.ReplaceAll(urlPath, "{galleryName}", url.PathEscape(galleryName))
req, err := runtime.NewRequest(ctx, http.MethodPost, runtime.JoinPaths(client.host, urlPath))
if err != nil {
return nil, err
}
reqQP := req.Raw().URL.Query()
reqQP.Set("api-version", "2021-10-01")
req.Raw().URL.RawQuery = reqQP.Encode()
req.Raw().Header.Set("Accept", "application/json")
return req, runtime.MarshalAsJSON(req, sharingUpdate)
}
| NewGallerySharingProfileClient |
structs.rs | use crate::*;
use azure_kinect_sys::k4a::*;
use std::fmt::{Display, Formatter};
#[derive(Copy, Clone, Debug, Default)]
pub struct Dimension {
pub width: i32,
pub height: i32,
}
#[derive(Copy, Clone, Debug, Default)]
pub struct Range<T> {
pub min: T,
pub max: T,
}
#[derive(Copy, Clone, Debug, Default)]
pub struct Version {
pub(crate) value: k4a_version_t,
}
impl Version {
#[doc = "< Major version; represents a breaking change."]
pub fn major(&self) -> u32 {
self.value.major
}
#[doc = "< Minor version; represents additional features, no regression from lower versions with same major version."]
pub fn minor(&self) -> u32 {
self.value.minor
}
#[doc = "< Reserved."]
pub fn iteration(&self) -> u32 {
self.value.iteration
}
}
impl Display for Version {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "{}.{}.{}", self.major(), self.minor(), self.iteration())
}
}
#[derive(Copy, Clone, Debug, Default)]
pub struct HardwareVersion {
pub(crate) value: k4a_hardware_version_t,
}
impl HardwareVersion {
#[doc = "< Color camera firmware version."]
pub fn rgb(&self) -> Version {
Version {
value: self.value.rgb,
}
}
#[doc = "< Depth camera firmware version."]
pub fn depth(&self) -> Version {
Version {
value: self.value.depth,
}
}
#[doc = "< Audio device firmware version."]
pub fn audio(&self) -> Version {
Version {
value: self.value.audio,
}
}
#[doc = "< Depth sensor firmware version."]
pub fn depth_sensor(&self) -> Version {
Version {
value: self.value.depth_sensor,
}
}
#[doc = "< Build type reported by the firmware."]
pub fn firmware_build(&self) -> FirmwareBuildType {
FirmwareBuildType::from_primitive(self.value.firmware_build)
}
#[doc = "< Signature type of the firmware."]
pub fn firmware_signature(&self) -> FirmwareSignatureType |
}
impl Display for HardwareVersion {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "rgb: {}, depth: {}, audio: {}, depth_sensor: {}, firmware_build: {:?}, firmware_signature: {:?}",
self.rgb(), self.depth(), self.audio(), self.depth_sensor(), self.firmware_build(), self.firmware_signature())
}
}
| {
FirmwareSignatureType::from_primitive(self.value.firmware_signature)
} |
inwx.go | // Package inwx implements a DNS provider for solving the DNS-01 challenge using inwx dom robot
package inwx
import (
"errors"
"fmt"
"time"
"github.com/go-acme/lego/v3/challenge/dns01"
"github.com/go-acme/lego/v3/log"
"github.com/go-acme/lego/v3/platform/config/env"
"github.com/nrdcg/goinwx"
)
// Environment variables names.
const (
envNamespace = "INWX_"
EnvUsername = envNamespace + "USERNAME"
EnvPassword = envNamespace + "PASSWORD"
EnvSandbox = envNamespace + "SANDBOX"
EnvTTL = envNamespace + "TTL"
EnvPropagationTimeout = envNamespace + "PROPAGATION_TIMEOUT"
EnvPollingInterval = envNamespace + "POLLING_INTERVAL"
)
// Config is used to configure the creation of the DNSProvider
type Config struct {
Username string
Password string
Sandbox bool
PropagationTimeout time.Duration
PollingInterval time.Duration
TTL int
}
// NewDefaultConfig returns a default configuration for the DNSProvider
func NewDefaultConfig() *Config {
return &Config{
TTL: env.GetOrDefaultInt(EnvTTL, 300),
PropagationTimeout: env.GetOrDefaultSecond(EnvPropagationTimeout, dns01.DefaultPropagationTimeout),
PollingInterval: env.GetOrDefaultSecond(EnvPollingInterval, dns01.DefaultPollingInterval),
Sandbox: env.GetOrDefaultBool(EnvSandbox, false),
}
}
// DNSProvider is an implementation of the challenge.Provider interface
type DNSProvider struct {
config *Config
client *goinwx.Client
}
// NewDNSProvider returns a DNSProvider instance configured for Dyn DNS.
// Credentials must be passed in the environment variables:
// INWX_USERNAME and INWX_PASSWORD.
func | () (*DNSProvider, error) {
values, err := env.Get(EnvUsername, EnvPassword)
if err != nil {
return nil, fmt.Errorf("inwx: %w", err)
}
config := NewDefaultConfig()
config.Username = values[EnvUsername]
config.Password = values[EnvPassword]
return NewDNSProviderConfig(config)
}
// NewDNSProviderConfig return a DNSProvider instance configured for Dyn DNS
func NewDNSProviderConfig(config *Config) (*DNSProvider, error) {
if config == nil {
return nil, errors.New("inwx: the configuration of the DNS provider is nil")
}
if config.Username == "" || config.Password == "" {
return nil, errors.New("inwx: credentials missing")
}
if config.Sandbox {
log.Infof("inwx: sandbox mode is enabled")
}
client := goinwx.NewClient(config.Username, config.Password, &goinwx.ClientOptions{Sandbox: config.Sandbox})
return &DNSProvider{config: config, client: client}, nil
}
// Present creates a TXT record using the specified parameters
func (d *DNSProvider) Present(domain, token, fqdn, value string) error {
authZone, err := dns01.FindZoneByFqdn(fqdn)
if err != nil {
return fmt.Errorf("inwx: %w", err)
}
err = d.client.Account.Login()
if err != nil {
return fmt.Errorf("inwx: %w", err)
}
defer func() {
errL := d.client.Account.Logout()
if errL != nil {
log.Infof("inwx: failed to logout: %v", errL)
}
}()
var request = &goinwx.NameserverRecordRequest{
Domain: dns01.UnFqdn(authZone),
Name: dns01.UnFqdn(fqdn),
Type: "TXT",
Content: value,
TTL: d.config.TTL,
}
_, err = d.client.Nameservers.CreateRecord(request)
if err != nil {
switch er := err.(type) {
case *goinwx.ErrorResponse:
if er.Message == "Object exists" {
return nil
}
return fmt.Errorf("inwx: %w", err)
default:
return fmt.Errorf("inwx: %w", err)
}
}
return nil
}
// CleanUp removes the TXT record matching the specified parameters
func (d *DNSProvider) CleanUp(domain, token, fqdn, value string) error {
authZone, err := dns01.FindZoneByFqdn(fqdn)
if err != nil {
return fmt.Errorf("inwx: %w", err)
}
err = d.client.Account.Login()
if err != nil {
return fmt.Errorf("inwx: %w", err)
}
defer func() {
errL := d.client.Account.Logout()
if errL != nil {
log.Infof("inwx: failed to logout: %v", errL)
}
}()
response, err := d.client.Nameservers.Info(&goinwx.NameserverInfoRequest{
Domain: dns01.UnFqdn(authZone),
Name: dns01.UnFqdn(fqdn),
Type: "TXT",
})
if err != nil {
return fmt.Errorf("inwx: %w", err)
}
var lastErr error
for _, record := range response.Records {
err = d.client.Nameservers.DeleteRecord(record.ID)
if err != nil {
lastErr = fmt.Errorf("inwx: %w", err)
}
}
return lastErr
}
// Timeout returns the timeout and interval to use when checking for DNS propagation.
// Adjusting here to cope with spikes in propagation times.
func (d *DNSProvider) Timeout() (timeout, interval time.Duration) {
return d.config.PropagationTimeout, d.config.PollingInterval
}
| NewDNSProvider |
commentparser.rs | use nom::types::CompleteStr;
pub fn parse(text: &str) -> Option<Vec<Instruction>> {
let instructions: Vec<Instruction> = text
.lines()
.flat_map(|s| match parse_line(s) {
Some(instructions) => instructions.into_iter(),
None => Vec::new().into_iter(),
})
.collect();
if instructions.is_empty() {
None
} else {
Some(instructions)
}
}
named!(normal_token(CompleteStr) -> CompleteStr,
verify!(take_while1!(|c: char| c.is_ascii_graphic()),
|s: CompleteStr| !s.0.eq_ignore_ascii_case("@grahamcofborg"))
);
named!(parse_line_impl(CompleteStr) -> Option<Vec<Instruction>>, alt!(
do_parse!(
res: ws!(many1!(ws!(preceded!(
alt!(tag_no_case!("@grahamcofborg") | tag_no_case!("@ofborg")),
alt!(
ws!(do_parse!(
tag!("build") >>
pkgs: ws!(many1!(map!(normal_token, |s| s.0.to_owned()))) >>
(Some(Instruction::Build(Subset::Nixpkgs, pkgs)))
)) |
ws!(do_parse!(
tag!("test") >>
tests: ws!(many1!(map!(normal_token, |s| format!("tests.{}", s.0)))) >>
(Some(Instruction::Build(Subset::NixOS, tests)))
)) |
value!(Some(Instruction::Eval), tag!("eval")) |
// TODO: Currently keeping previous behaviour of ignoring unknown commands. Maybe
// it would be better to return an error so that the caller would know one of the
// commands couldn't be handled?
value!(None, many_till!(take!(1), tag_no_case!("@grahamcofborg")))
)
)))) >>
eof!() >>
(Some(res.into_iter().filter_map(|x| x).collect()))
) |
value!(None)
));
pub fn parse_line(text: &str) -> Option<Vec<Instruction>> {
match parse_line_impl(CompleteStr(text)) {
Ok((_, res)) => res,
Err(e) => {
// This should likely never happen thanks to the | value!(None), but well...
warn!("Failed parsing string ‘{}’: result was {:?}", text, e);
None
}
}
}
#[derive(PartialEq, Debug)]
pub enum Instruction {
Build(Subset, Vec<String>),
Eval,
}
#[derive(Serialize, Deserialize, Debug, PartialEq)]
pub enum Subset {
Nixpkgs,
NixOS,
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn parse_empty() {
assert_eq!(None, parse(""));
}
#[test]
fn valid_trailing_instruction() {
assert_eq!(
Some(vec![Instruction::Eval]),
parse(
"/cc @grahamc for ^^
@GrahamcOfBorg eval",
)
);
}
#[test]
fn bogus_comment() {
assert_eq!(None, parse(":) :) :) @grahamcofborg build hi"));
}
#[test]
fn bogus_build_comment_empty_list() {
assert_eq!(None, parse("@grahamcofborg build"));
}
#[test]
fn eval_comment() {
assert_eq!(Some(vec![Instruction::Eval]), parse("@grahamcofborg eval"));
}
#[test]
fn eval_and_build_comment() {
assert_eq!(
Some(vec![
Instruction::Eval,
Instruction::Build(Subset::Nixpkgs, vec![String::from("foo")]),
]),
parse("@grahamcofborg eval @grahamcofborg build foo")
);
}
#[test]
fn build_and_eval_and_build_comment() {
assert_eq!(
Some(vec![
Instruction::Build(Subset::Nixpkgs, vec![String::from("bar")]),
Instruction::Eval,
Instruction::Build(Subset::Nixpkgs, vec![String::from("foo")]),
]),
parse(
"
@grahamcofborg build bar
@ofborg eval
@grahamcofborg build foo",
)
);
}
#[test]
fn complex_comment_with_paragraphs() {
assert_eq!(
Some(vec![
Instruction::Build(Subset::Nixpkgs, vec![String::from("bar")]),
Instruction::Eval,
Instruction::Build(Subset::Nixpkgs, vec![String::from("foo")]),
]),
parse(
"
I like where you're going with this PR, so let's try it out!
@grahamcofborg build bar
I noticed though that the target branch was broken, which should be fixed. Let's eval again.
@grahamcofborg eval
Also, just in case, let's try foo
@grahamcofborg build foo",
)
);
}
#[test]
fn build_and_eval_comment() {
assert_eq!(
Some(vec![
Instruction::Build(Subset::Nixpkgs, vec![String::from("foo")]),
Instruction::Eval,
]),
parse("@grahamcofborg build foo @grahamcofborg eval")
);
}
#[test]
fn build_comment() {
assert_eq!(
Some(vec![Instruction::Build(
Subset::Nixpkgs,
vec![String::from("foo"), String::from("bar")]
),]),
parse(
"@OfBorg build foo bar
baz",
)
);
}
#[test]
fn test_comment() {
assert_eq!(
Some(vec![Instruction::Build(
Subset::NixOS, | vec![
String::from("tests.foo"),
String::from("tests.bar"),
String::from("tests.baz"),
]
),]),
parse("@GrahamCOfBorg test foo bar baz")
);
}
#[test]
fn build_comment_newlines() {
assert_eq!(
Some(vec![Instruction::Build(
Subset::Nixpkgs,
vec![
String::from("foo"),
String::from("bar"),
String::from("baz"),
]
),]),
parse("@OfBorg build foo bar baz")
);
}
#[test]
fn build_comment_lower() {
assert_eq!(
Some(vec![Instruction::Build(
Subset::Nixpkgs,
vec![
String::from("foo"),
String::from("bar"),
String::from("baz"),
]
),]),
parse("@grahamcofborg build foo bar baz")
);
}
#[test]
fn build_comment_lower_package_case_retained() {
assert_eq!(
Some(vec![Instruction::Build(
Subset::Nixpkgs,
vec![
String::from("foo"),
String::from("bar"),
String::from("baz.Baz"),
]
),]),
parse("@ofborg build foo bar baz.Baz")
);
}
} | |
move_semantics1.rs | // move_semantics1.rs
// Make me compile! Scroll down for hints :)
fn main() {
let vec0 = Vec::new();
let mut vec1 = fill_vec(vec0);
println!("{} has length {} content `{:?}`", "vec1", vec1.len(), vec1);
vec1.push(88);
println!("{} has length {} content `{:?}`", "vec1", vec1.len(), vec1);
}
fn | (vec: Vec<i32>) -> Vec<i32> {
let mut vec = vec;
vec.push(22);
vec.push(44);
vec.push(66);
vec
}
// So you've got the "cannot borrow immutable local variable `vec1` as mutable" error on line 11,
// right? The fix for this is going to be adding one keyword, and the addition is NOT on line 11
// where the error is.
| fill_vec |
weatherbot.py | # -*- coding: utf-8 -*-
from weatherScraper.items import TempData
from weatherScraper.items import InputData
import scrapy
class WeatherbotSpider(scrapy.Spider):
name = 'weatherbot'
allowed_domains = ['www.wunderground.com']
start_urls = ['http://www.wunderground.com/history/']
def __init__(self, code='', month='', day='', year='', *args, **kwargs): # this will allow spider arguments
super(WeatherbotSpider, self).__init__(*args, **kwargs)
global user_input
user_input = InputData()
user_input['code'] = code
user_input['month'] = month
user_input['day'] = day
user_input['year'] = year
def parse(self, response):
return scrapy.FormRequest.from_response(
response,
formnumber=1, # formnumber set to 1 because location and date are the second form on history page
formdata={'code': user_input['code'],
'month': user_input['month'],
'day': user_input['day'],
'year': user_input['year']},
callback=self.after_post
)
def | (self, response):
# check input successful before moving on
if "location you entered was not found" in response.body:
self.logger.error("Location not valid")
return
temperatures = TempData()
# Extract each temperature needed using corresponding css tags
temperatures['actual_mean_temp'] = response.css('#historyTable tr:nth-child(2) .wx-value::text').extract()
temperatures['avg_mean_temp'] = response.css('tr:nth-child(2) .indent~ td+ td .wx-value::text').extract()
temperatures['actual_max_temp'] = response.css('tr:nth-child(3) .indent+ td .wx-value::text').extract()
temperatures['avg_max_temp'] = response.css('#historyTable tr:nth-child(3) td:nth-child(3) .wx-value::text')\
.extract()
temperatures['record_max_temp'] = response.css('tr:nth-child(3) td:nth-child(4) .wx-value::text').extract()
temperatures['actual_min_temp'] = response.css('tr:nth-child(4) .indent+ td .wx-value::text').extract()
temperatures['avg_min_temp'] = response.css('#historyTable tr:nth-child(4) td:nth-child(3) .wx-value::text')\
.extract()
temperatures['record_min_temp'] = response.css('#historyTable tr:nth-child(4) td:nth-child(4) .wx-value::text')\
.extract()
# Check if Fahrenheit or Celsius, then append correct unit
if 'C' in response.css('tr:nth-child(3) .indent+ td .wx-unit::text'):
for key, value in temperatures.iteritems():
value.append('C')
else:
for key, value in temperatures.iteritems():
value.append('F')
yield temperatures
| after_post |
cast.rs | //! Code for type-checking cast expressions.
//!
//! A cast `e as U` is valid if one of the following holds:
//! * `e` has type `T` and `T` coerces to `U`; *coercion-cast*
//! * `e` has type `*T`, `U` is `*U_0`, and either `U_0: Sized` or
//! pointer_kind(`T`) = pointer_kind(`U_0`); *ptr-ptr-cast*
//! * `e` has type `*T` and `U` is a numeric type, while `T: Sized`; *ptr-addr-cast*
//! * `e` is an integer and `U` is `*U_0`, while `U_0: Sized`; *addr-ptr-cast*
//! * `e` has type `T` and `T` and `U` are any numeric types; *numeric-cast*
//! * `e` is a C-like enum and `U` is an integer type; *enum-cast*
//! * `e` has type `bool` or `char` and `U` is an integer; *prim-int-cast*
//! * `e` has type `u8` and `U` is `char`; *u8-char-cast*
//! * `e` has type `&[T; n]` and `U` is `*const T`; *array-ptr-cast*
//! * `e` is a function pointer type and `U` has type `*T`,
//! while `T: Sized`; *fptr-ptr-cast*
//! * `e` is a function pointer type and `U` is an integer; *fptr-addr-cast*
//!
//! where `&.T` and `*T` are references of either mutability,
//! and where pointer_kind(`T`) is the kind of the unsize info
//! in `T` - the vtable for a trait definition (e.g., `fmt::Display` or
//! `Iterator`, not `Iterator<Item=u8>`) or a length (or `()` if `T: Sized`).
//!
//! Note that lengths are not adjusted when casting raw slices -
//! `T: *const [u16] as *const [u8]` creates a slice that only includes
//! half of the original memory.
//!
//! Casting is not transitive, that is, even if `e as U1 as U2` is a valid
//! expression, `e as U2` is not necessarily so (in fact it will only be valid if
//! `U1` coerces to `U2`).
use super::FnCtxt;
use crate::hir::def_id::DefId;
use crate::type_error_struct;
use rustc_errors::{struct_span_err, Applicability, DiagnosticBuilder, ErrorReported};
use rustc_hir as hir;
use rustc_hir::lang_items::LangItem;
use rustc_middle::ty::adjustment::AllowTwoPhase;
use rustc_middle::ty::cast::{CastKind, CastTy};
use rustc_middle::ty::error::TypeError;
use rustc_middle::ty::subst::SubstsRef;
use rustc_middle::ty::{self, Ty, TypeAndMut, TypeFoldable};
use rustc_session::lint;
use rustc_session::Session;
use rustc_span::symbol::sym;
use rustc_span::Span;
use rustc_trait_selection::traits;
use rustc_trait_selection::traits::error_reporting::report_object_safety_error;
/// Reifies a cast check to be checked once we have full type information for
/// a function context.
pub struct CastCheck<'tcx> {
expr: &'tcx hir::Expr<'tcx>,
expr_ty: Ty<'tcx>,
cast_ty: Ty<'tcx>,
cast_span: Span,
span: Span,
}
/// The kind of pointer and associated metadata (thin, length or vtable) - we
/// only allow casts between fat pointers if their metadata have the same
/// kind.
#[derive(Copy, Clone, PartialEq, Eq)]
enum PointerKind<'tcx> {
/// No metadata attached, ie pointer to sized type or foreign type
Thin,
/// A trait object
Vtable(Option<DefId>),
/// Slice
Length,
/// The unsize info of this projection
OfProjection(&'tcx ty::ProjectionTy<'tcx>),
/// The unsize info of this opaque ty
OfOpaque(DefId, SubstsRef<'tcx>),
/// The unsize info of this parameter
OfParam(&'tcx ty::ParamTy),
}
impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
/// Returns the kind of unsize information of t, or None
/// if t is unknown.
fn pointer_kind(
&self,
t: Ty<'tcx>,
span: Span,
) -> Result<Option<PointerKind<'tcx>>, ErrorReported> {
debug!("pointer_kind({:?}, {:?})", t, span);
let t = self.resolve_vars_if_possible(t);
if t.references_error() {
return Err(ErrorReported);
}
if self.type_is_known_to_be_sized_modulo_regions(t, span) {
return Ok(Some(PointerKind::Thin));
}
Ok(match *t.kind() {
ty::Slice(_) | ty::Str => Some(PointerKind::Length),
ty::Dynamic(ref tty, ..) => Some(PointerKind::Vtable(tty.principal_def_id())),
ty::Adt(def, substs) if def.is_struct() => match def.non_enum_variant().fields.last() {
None => Some(PointerKind::Thin),
Some(f) => {
let field_ty = self.field_ty(span, f, substs);
self.pointer_kind(field_ty, span)?
}
},
ty::Tuple(fields) => match fields.last() {
None => Some(PointerKind::Thin),
Some(f) => self.pointer_kind(f.expect_ty(), span)?,
},
// Pointers to foreign types are thin, despite being unsized
ty::Foreign(..) => Some(PointerKind::Thin),
// We should really try to normalize here.
ty::Projection(ref pi) => Some(PointerKind::OfProjection(pi)),
ty::Opaque(def_id, substs) => Some(PointerKind::OfOpaque(def_id, substs)),
ty::Param(ref p) => Some(PointerKind::OfParam(p)),
// Insufficient type information.
ty::Placeholder(..) | ty::Bound(..) | ty::Infer(_) => None,
ty::Bool
| ty::Char
| ty::Int(..)
| ty::Uint(..)
| ty::Float(_)
| ty::Array(..)
| ty::GeneratorWitness(..)
| ty::RawPtr(_)
| ty::Ref(..)
| ty::FnDef(..)
| ty::FnPtr(..)
| ty::Closure(..)
| ty::Generator(..)
| ty::Adt(..)
| ty::Never
| ty::Error(_) => {
self.tcx
.sess
.delay_span_bug(span, &format!("`{:?}` should be sized but is not?", t));
return Err(ErrorReported);
}
})
}
}
#[derive(Copy, Clone)]
pub enum CastError {
ErrorReported,
CastToBool,
CastToChar,
DifferingKinds,
/// Cast of thin to fat raw ptr (e.g., `*const () as *const [u8]`).
SizedUnsizedCast,
IllegalCast,
NeedDeref,
NeedViaPtr,
NeedViaThinPtr,
NeedViaInt,
NonScalar,
UnknownExprPtrKind,
UnknownCastPtrKind,
}
impl From<ErrorReported> for CastError {
fn from(ErrorReported: ErrorReported) -> Self {
CastError::ErrorReported
}
}
fn make_invalid_casting_error<'a, 'tcx>(
sess: &'a Session,
span: Span,
expr_ty: Ty<'tcx>,
cast_ty: Ty<'tcx>,
fcx: &FnCtxt<'a, 'tcx>,
) -> DiagnosticBuilder<'a> {
type_error_struct!(
sess,
span,
expr_ty,
E0606,
"casting `{}` as `{}` is invalid",
fcx.ty_to_string(expr_ty),
fcx.ty_to_string(cast_ty)
)
}
impl<'a, 'tcx> CastCheck<'tcx> {
pub fn new(
fcx: &FnCtxt<'a, 'tcx>,
expr: &'tcx hir::Expr<'tcx>,
expr_ty: Ty<'tcx>,
cast_ty: Ty<'tcx>,
cast_span: Span,
span: Span,
) -> Result<CastCheck<'tcx>, ErrorReported> {
let check = CastCheck { expr, expr_ty, cast_ty, cast_span, span };
// For better error messages, check for some obviously unsized
// cases now. We do a more thorough check at the end, once
// inference is more completely known.
match cast_ty.kind() {
ty::Dynamic(..) | ty::Slice(..) => {
check.report_cast_to_unsized_type(fcx);
Err(ErrorReported)
}
_ => Ok(check),
}
}
fn report_cast_error(&self, fcx: &FnCtxt<'a, 'tcx>, e: CastError) {
match e {
CastError::ErrorReported => {
// an error has already been reported
}
CastError::NeedDeref => {
let error_span = self.span;
let mut err = make_invalid_casting_error(
fcx.tcx.sess,
self.span,
self.expr_ty,
self.cast_ty,
fcx,
);
let cast_ty = fcx.ty_to_string(self.cast_ty);
err.span_label(
error_span,
format!("cannot cast `{}` as `{}`", fcx.ty_to_string(self.expr_ty), cast_ty),
);
if let Ok(snippet) = fcx.sess().source_map().span_to_snippet(self.expr.span) {
err.span_suggestion(
self.expr.span,
"dereference the expression",
format!("*{}", snippet),
Applicability::MaybeIncorrect,
);
} else {
err.span_help(self.expr.span, "dereference the expression with `*`");
}
err.emit();
}
CastError::NeedViaThinPtr | CastError::NeedViaPtr => {
let mut err = make_invalid_casting_error(
fcx.tcx.sess,
self.span,
self.expr_ty,
self.cast_ty,
fcx,
);
if self.cast_ty.is_integral() {
err.help(&format!(
"cast through {} first",
match e {
CastError::NeedViaPtr => "a raw pointer",
CastError::NeedViaThinPtr => "a thin pointer",
_ => bug!(),
}
));
}
err.emit();
}
CastError::NeedViaInt => {
make_invalid_casting_error(
fcx.tcx.sess,
self.span,
self.expr_ty,
self.cast_ty,
fcx,
)
.help(&format!(
"cast through {} first",
match e {
CastError::NeedViaInt => "an integer",
_ => bug!(),
}
))
.emit();
}
CastError::IllegalCast => {
make_invalid_casting_error(
fcx.tcx.sess,
self.span,
self.expr_ty,
self.cast_ty,
fcx,
)
.emit();
}
CastError::DifferingKinds => {
make_invalid_casting_error(
fcx.tcx.sess,
self.span,
self.expr_ty,
self.cast_ty,
fcx,
)
.note("vtable kinds may not match")
.emit();
}
CastError::CastToBool => {
let mut err =
struct_span_err!(fcx.tcx.sess, self.span, E0054, "cannot cast as `bool`");
if self.expr_ty.is_numeric() {
match fcx.tcx.sess.source_map().span_to_snippet(self.expr.span) {
Ok(snippet) => {
err.span_suggestion(
self.span,
"compare with zero instead",
format!("{} != 0", snippet),
Applicability::MachineApplicable,
);
}
Err(_) => {
err.span_help(self.span, "compare with zero instead");
}
}
} else {
err.span_label(self.span, "unsupported cast");
}
err.emit();
}
CastError::CastToChar => {
type_error_struct!(
fcx.tcx.sess,
self.span,
self.expr_ty,
E0604,
"only `u8` can be cast as `char`, not `{}`",
self.expr_ty
)
.span_label(self.span, "invalid cast")
.emit();
}
CastError::NonScalar => {
let mut err = type_error_struct!(
fcx.tcx.sess,
self.span,
self.expr_ty,
E0605,
"non-primitive cast: `{}` as `{}`",
self.expr_ty,
fcx.ty_to_string(self.cast_ty)
);
let mut sugg = None;
if let ty::Ref(reg, _, mutbl) = *self.cast_ty.kind() {
if fcx
.try_coerce(
self.expr,
fcx.tcx.mk_ref(reg, TypeAndMut { ty: self.expr_ty, mutbl }),
self.cast_ty,
AllowTwoPhase::No,
)
.is_ok()
{
sugg = Some(format!("&{}", mutbl.prefix_str()));
}
}
if let Some(sugg) = sugg {
err.span_label(self.span, "invalid cast");
err.span_suggestion_verbose(
self.expr.span.shrink_to_lo(),
"borrow the value for the cast to be valid",
sugg,
Applicability::MachineApplicable,
);
} else if !matches!(
self.cast_ty.kind(),
ty::FnDef(..) | ty::FnPtr(..) | ty::Closure(..)
) {
let mut label = true;
// Check `impl From<self.expr_ty> for self.cast_ty {}` for accurate suggestion:
if let Ok(snippet) = fcx.tcx.sess.source_map().span_to_snippet(self.expr.span) {
if let Some(from_trait) = fcx.tcx.get_diagnostic_item(sym::from_trait) {
let ty = fcx.resolve_vars_if_possible(self.cast_ty);
// Erase regions to avoid panic in `prove_value` when calling
// `type_implements_trait`.
let ty = fcx.tcx.erase_regions(ty);
let expr_ty = fcx.resolve_vars_if_possible(self.expr_ty);
let expr_ty = fcx.tcx.erase_regions(expr_ty);
let ty_params = fcx.tcx.mk_substs_trait(expr_ty, &[]);
// Check for infer types because cases like `Option<{integer}>` would
// panic otherwise.
if !expr_ty.has_infer_types()
&& !ty.has_infer_types()
&& fcx.tcx.type_implements_trait((
from_trait,
ty,
ty_params,
fcx.param_env,
))
{
label = false;
err.span_suggestion(
self.span,
"consider using the `From` trait instead",
format!("{}::from({})", self.cast_ty, snippet),
Applicability::MaybeIncorrect,
);
}
}
}
let msg = "an `as` expression can only be used to convert between primitive \
types or to coerce to a specific trait object";
if label {
err.span_label(self.span, msg);
} else {
err.note(msg);
}
} else {
err.span_label(self.span, "invalid cast");
}
err.emit();
}
CastError::SizedUnsizedCast => {
use crate::structured_errors::{SizedUnsizedCast, StructuredDiagnostic};
SizedUnsizedCast {
sess: &fcx.tcx.sess,
span: self.span,
expr_ty: self.expr_ty,
cast_ty: fcx.ty_to_string(self.cast_ty),
}
.diagnostic()
.emit();
}
CastError::UnknownCastPtrKind | CastError::UnknownExprPtrKind => {
let unknown_cast_to = match e {
CastError::UnknownCastPtrKind => true,
CastError::UnknownExprPtrKind => false,
_ => bug!(),
};
let mut err = struct_span_err!(
fcx.tcx.sess,
if unknown_cast_to { self.cast_span } else { self.span },
E0641,
"cannot cast {} a pointer of an unknown kind",
if unknown_cast_to { "to" } else { "from" }
);
if unknown_cast_to {
err.span_label(self.cast_span, "needs more type information");
err.note(
"the type information given here is insufficient to check whether \
the pointer cast is valid",
);
} else {
err.span_label(
self.span,
"the type information given here is insufficient to check whether \
the pointer cast is valid",
);
}
err.emit();
}
}
}
fn report_cast_to_unsized_type(&self, fcx: &FnCtxt<'a, 'tcx>) {
if self.cast_ty.references_error() || self.expr_ty.references_error() {
return;
}
let tstr = fcx.ty_to_string(self.cast_ty);
let mut err = type_error_struct!(
fcx.tcx.sess,
self.span,
self.expr_ty,
E0620,
"cast to unsized type: `{}` as `{}`",
fcx.resolve_vars_if_possible(self.expr_ty),
tstr
);
match self.expr_ty.kind() {
ty::Ref(_, _, mt) => {
let mtstr = mt.prefix_str();
if self.cast_ty.is_trait() {
match fcx.tcx.sess.source_map().span_to_snippet(self.cast_span) {
Ok(s) => {
err.span_suggestion(
self.cast_span,
"try casting to a reference instead",
format!("&{}{}", mtstr, s),
Applicability::MachineApplicable,
);
}
Err(_) => {
let msg = &format!("did you mean `&{}{}`?", mtstr, tstr);
err.span_help(self.cast_span, msg);
}
}
} else {
let msg = &format!(
"consider using an implicit coercion to `&{}{}` instead",
mtstr, tstr
);
err.span_help(self.span, msg);
}
}
ty::Adt(def, ..) if def.is_box() => {
match fcx.tcx.sess.source_map().span_to_snippet(self.cast_span) {
Ok(s) => {
err.span_suggestion(
self.cast_span,
"you can cast to a `Box` instead",
format!("Box<{}>", s),
Applicability::MachineApplicable,
);
}
Err(_) => {
err.span_help(
self.cast_span,
&format!("you might have meant `Box<{}>`", tstr),
);
}
}
}
_ => {
err.span_help(self.expr.span, "consider using a box or reference as appropriate");
}
}
err.emit();
}
fn trivial_cast_lint(&self, fcx: &FnCtxt<'a, 'tcx>) {
let t_cast = self.cast_ty;
let t_expr = self.expr_ty;
let type_asc_or =
if fcx.tcx.features().type_ascription { "type ascription or " } else { "" };
let (adjective, lint) = if t_cast.is_numeric() && t_expr.is_numeric() {
("numeric ", lint::builtin::TRIVIAL_NUMERIC_CASTS)
} else {
("", lint::builtin::TRIVIAL_CASTS)
};
fcx.tcx.struct_span_lint_hir(lint, self.expr.hir_id, self.span, |err| {
err.build(&format!(
"trivial {}cast: `{}` as `{}`",
adjective,
fcx.ty_to_string(t_expr),
fcx.ty_to_string(t_cast)
))
.help(&format!(
"cast can be replaced by coercion; this might \
require {}a temporary variable",
type_asc_or
))
.emit();
});
}
pub fn check(mut self, fcx: &FnCtxt<'a, 'tcx>) {
self.expr_ty = fcx.structurally_resolved_type(self.span, self.expr_ty);
self.cast_ty = fcx.structurally_resolved_type(self.span, self.cast_ty);
debug!("check_cast({}, {:?} as {:?})", self.expr.hir_id, self.expr_ty, self.cast_ty);
if !fcx.type_is_known_to_be_sized_modulo_regions(self.cast_ty, self.span) {
self.report_cast_to_unsized_type(fcx);
} else if self.expr_ty.references_error() || self.cast_ty.references_error() {
// No sense in giving duplicate error messages
} else {
match self.try_coercion_cast(fcx) {
Ok(()) => {
self.trivial_cast_lint(fcx);
debug!(" -> CoercionCast");
fcx.typeck_results.borrow_mut().set_coercion_cast(self.expr.hir_id.local_id);
}
Err(ty::error::TypeError::ObjectUnsafeCoercion(did)) => {
self.report_object_unsafe_cast(&fcx, did);
}
Err(_) => {
match self.do_check(fcx) {
Ok(k) => {
debug!(" -> {:?}", k);
}
Err(e) => self.report_cast_error(fcx, e),
};
}
};
}
}
fn report_object_unsafe_cast(&self, fcx: &FnCtxt<'a, 'tcx>, did: DefId) {
let violations = fcx.tcx.object_safety_violations(did);
let mut err = report_object_safety_error(fcx.tcx, self.cast_span, did, violations);
err.note(&format!("required by cast to type '{}'", fcx.ty_to_string(self.cast_ty)));
err.emit();
}
/// Checks a cast, and report an error if one exists. In some cases, this
/// can return Ok and create type errors in the fcx rather than returning
/// directly. coercion-cast is handled in check instead of here.
pub fn do_check(&self, fcx: &FnCtxt<'a, 'tcx>) -> Result<CastKind, CastError> {
use rustc_middle::ty::cast::CastTy::*;
use rustc_middle::ty::cast::IntTy::*;
let (t_from, t_cast) = match (CastTy::from_ty(self.expr_ty), CastTy::from_ty(self.cast_ty))
{
(Some(t_from), Some(t_cast)) => (t_from, t_cast),
// Function item types may need to be reified before casts.
(None, Some(t_cast)) => {
match *self.expr_ty.kind() {
ty::FnDef(..) => {
// Attempt a coercion to a fn pointer type.
let f = fcx.normalize_associated_types_in(
self.expr.span,
self.expr_ty.fn_sig(fcx.tcx),
);
let res = fcx.try_coerce(
self.expr,
self.expr_ty,
fcx.tcx.mk_fn_ptr(f),
AllowTwoPhase::No,
);
if let Err(TypeError::IntrinsicCast) = res {
return Err(CastError::IllegalCast);
}
if res.is_err() {
return Err(CastError::NonScalar);
}
(FnPtr, t_cast)
}
// Special case some errors for references, and check for
// array-ptr-casts. `Ref` is not a CastTy because the cast
// is split into a coercion to a pointer type, followed by
// a cast.
ty::Ref(_, inner_ty, mutbl) => {
return match t_cast {
Int(_) | Float => match *inner_ty.kind() {
ty::Int(_)
| ty::Uint(_)
| ty::Float(_)
| ty::Infer(ty::InferTy::IntVar(_) | ty::InferTy::FloatVar(_)) => {
Err(CastError::NeedDeref)
}
_ => Err(CastError::NeedViaPtr),
},
// array-ptr-cast
Ptr(mt) => {
self.check_ref_cast(fcx, TypeAndMut { mutbl, ty: inner_ty }, mt)
}
_ => Err(CastError::NonScalar),
};
}
_ => return Err(CastError::NonScalar),
}
}
_ => return Err(CastError::NonScalar),
};
match (t_from, t_cast) {
// These types have invariants! can't cast into them.
(_, Int(CEnum) | FnPtr) => Err(CastError::NonScalar),
// * -> Bool
(_, Int(Bool)) => Err(CastError::CastToBool),
// * -> Char
(Int(U(ty::UintTy::U8)), Int(Char)) => Ok(CastKind::U8CharCast), // u8-char-cast
(_, Int(Char)) => Err(CastError::CastToChar),
// prim -> float,ptr
(Int(Bool) | Int(CEnum) | Int(Char), Float) => Err(CastError::NeedViaInt),
(Int(Bool) | Int(CEnum) | Int(Char) | Float, Ptr(_)) | (Ptr(_) | FnPtr, Float) => {
Err(CastError::IllegalCast)
}
// ptr -> *
(Ptr(m_e), Ptr(m_c)) => self.check_ptr_ptr_cast(fcx, m_e, m_c), // ptr-ptr-cast
(Ptr(m_expr), Int(_)) => self.check_ptr_addr_cast(fcx, m_expr), // ptr-addr-cast
(FnPtr, Int(_)) => Ok(CastKind::FnPtrAddrCast),
// * -> ptr
(Int(_), Ptr(mt)) => self.check_addr_ptr_cast(fcx, mt), // addr-ptr-cast
(FnPtr, Ptr(mt)) => self.check_fptr_ptr_cast(fcx, mt),
// prim -> prim
(Int(CEnum), Int(_)) => {
self.cenum_impl_drop_lint(fcx);
Ok(CastKind::EnumCast)
}
(Int(Char) | Int(Bool), Int(_)) => Ok(CastKind::PrimIntCast),
(Int(_) | Float, Int(_) | Float) => Ok(CastKind::NumericCast),
}
}
fn check_ptr_ptr_cast(
&self,
fcx: &FnCtxt<'a, 'tcx>,
m_expr: ty::TypeAndMut<'tcx>,
m_cast: ty::TypeAndMut<'tcx>,
) -> Result<CastKind, CastError> {
debug!("check_ptr_ptr_cast m_expr={:?} m_cast={:?}", m_expr, m_cast);
// ptr-ptr cast. vtables must match.
let expr_kind = fcx.pointer_kind(m_expr.ty, self.span)?;
let cast_kind = fcx.pointer_kind(m_cast.ty, self.span)?;
let cast_kind = match cast_kind {
// We can't cast if target pointer kind is unknown
None => return Err(CastError::UnknownCastPtrKind),
Some(cast_kind) => cast_kind,
};
// Cast to thin pointer is OK
if cast_kind == PointerKind::Thin {
return Ok(CastKind::PtrPtrCast);
}
let expr_kind = match expr_kind {
// We can't cast to fat pointer if source pointer kind is unknown
None => return Err(CastError::UnknownExprPtrKind),
Some(expr_kind) => expr_kind,
};
// thin -> fat? report invalid cast (don't complain about vtable kinds)
if expr_kind == PointerKind::Thin {
return Err(CastError::SizedUnsizedCast);
}
// vtable kinds must match
if cast_kind == expr_kind {
Ok(CastKind::PtrPtrCast)
} else {
Err(CastError::DifferingKinds)
}
}
fn check_fptr_ptr_cast(
&self,
fcx: &FnCtxt<'a, 'tcx>,
m_cast: ty::TypeAndMut<'tcx>,
) -> Result<CastKind, CastError> {
// fptr-ptr cast. must be to thin ptr
match fcx.pointer_kind(m_cast.ty, self.span)? {
None => Err(CastError::UnknownCastPtrKind),
Some(PointerKind::Thin) => Ok(CastKind::FnPtrPtrCast),
_ => Err(CastError::IllegalCast),
}
}
fn | (
&self,
fcx: &FnCtxt<'a, 'tcx>,
m_expr: ty::TypeAndMut<'tcx>,
) -> Result<CastKind, CastError> {
// ptr-addr cast. must be from thin ptr
match fcx.pointer_kind(m_expr.ty, self.span)? {
None => Err(CastError::UnknownExprPtrKind),
Some(PointerKind::Thin) => Ok(CastKind::PtrAddrCast),
_ => Err(CastError::NeedViaThinPtr),
}
}
fn check_ref_cast(
&self,
fcx: &FnCtxt<'a, 'tcx>,
m_expr: ty::TypeAndMut<'tcx>,
m_cast: ty::TypeAndMut<'tcx>,
) -> Result<CastKind, CastError> {
// array-ptr-cast.
if m_expr.mutbl == hir::Mutability::Not && m_cast.mutbl == hir::Mutability::Not {
if let ty::Array(ety, _) = m_expr.ty.kind() {
// Due to the limitations of LLVM global constants,
// region pointers end up pointing at copies of
// vector elements instead of the original values.
// To allow raw pointers to work correctly, we
// need to special-case obtaining a raw pointer
// from a region pointer to a vector.
// Coerce to a raw pointer so that we generate AddressOf in MIR.
let array_ptr_type = fcx.tcx.mk_ptr(m_expr);
fcx.try_coerce(self.expr, self.expr_ty, array_ptr_type, AllowTwoPhase::No)
.unwrap_or_else(|_| {
bug!(
"could not cast from reference to array to pointer to array ({:?} to {:?})",
self.expr_ty,
array_ptr_type,
)
});
// this will report a type mismatch if needed
fcx.demand_eqtype(self.span, ety, m_cast.ty);
return Ok(CastKind::ArrayPtrCast);
}
}
Err(CastError::IllegalCast)
}
fn check_addr_ptr_cast(
&self,
fcx: &FnCtxt<'a, 'tcx>,
m_cast: TypeAndMut<'tcx>,
) -> Result<CastKind, CastError> {
// ptr-addr cast. pointer must be thin.
match fcx.pointer_kind(m_cast.ty, self.span)? {
None => Err(CastError::UnknownCastPtrKind),
Some(PointerKind::Thin) => Ok(CastKind::AddrPtrCast),
_ => Err(CastError::IllegalCast),
}
}
fn try_coercion_cast(&self, fcx: &FnCtxt<'a, 'tcx>) -> Result<(), ty::error::TypeError<'_>> {
match fcx.try_coerce(self.expr, self.expr_ty, self.cast_ty, AllowTwoPhase::No) {
Ok(_) => Ok(()),
Err(err) => Err(err),
}
}
fn cenum_impl_drop_lint(&self, fcx: &FnCtxt<'a, 'tcx>) {
if let ty::Adt(d, _) = self.expr_ty.kind() {
if d.has_dtor(fcx.tcx) {
fcx.tcx.struct_span_lint_hir(
lint::builtin::CENUM_IMPL_DROP_CAST,
self.expr.hir_id,
self.span,
|err| {
err.build(&format!(
"cannot cast enum `{}` into integer `{}` because it implements `Drop`",
self.expr_ty, self.cast_ty
))
.emit();
},
);
}
}
}
}
impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
fn type_is_known_to_be_sized_modulo_regions(&self, ty: Ty<'tcx>, span: Span) -> bool {
let lang_item = self.tcx.require_lang_item(LangItem::Sized, None);
traits::type_known_to_meet_bound_modulo_regions(self, self.param_env, ty, lang_item, span)
}
}
| check_ptr_addr_cast |
datastructures.py | """
This module defines a data structure for manipulating HTTP headers.
"""
from typing import (
Any,
Dict,
Iterable,
Iterator,
List,
Mapping,
MutableMapping,
Tuple,
Union,
)
__all__ = ["Headers", "MultipleValuesError"]
class MultipleValuesError(LookupError):
"""
Exception raised when :class:`Headers` has more than one value for a key.
"""
def __str__(self) -> str:
# Implement the same logic as KeyError_str in Objects/exceptions.c.
if len(self.args) == 1:
return repr(self.args[0])
return super().__str__()
class Headers(MutableMapping[str, str]):
"""
Efficient data structure for manipulating HTTP headers.
A :class:`list` of ``(name, values)`` is inefficient for lookups.
A :class:`dict` doesn't suffice because header names are case-insensitive
and multiple occurrences of headers with the same name are possible.
:class:`Headers` stores HTTP headers in a hybrid data structure to provide
efficient insertions and lookups while preserving the original data.
In order to account for multiple values with minimal hassle,
:class:`Headers` follows this logic:
- When getting a header with ``headers[name]``:
- if there's no value, :exc:`KeyError` is raised;
- if there's exactly one value, it's returned;
- if there's more than one value, :exc:`MultipleValuesError` is raised.
- When setting a header with ``headers[name] = value``, the value is
appended to the list of values for that header.
- When deleting a header with ``del headers[name]``, all values for that
header are removed (this is slow).
Other methods for manipulating headers are consistent with this logic.
As long as no header occurs multiple times, :class:`Headers` behaves like
:class:`dict`, except keys are lower-cased to provide case-insensitivity.
Two methods support support manipulating multiple values explicitly:
- :meth:`get_all` returns a list of all values for a header;
- :meth:`raw_items` returns an iterator of ``(name, values)`` pairs.
"""
__slots__ = ["_dict", "_list"]
def __init__(self, *args: Any, **kwargs: str) -> None:
self._dict: Dict[str, List[str]] = {}
self._list: List[Tuple[str, str]] = []
# MutableMapping.update calls __setitem__ for each (name, value) pair.
self.update(*args, **kwargs)
def __str__(self) -> str:
return "".join(f"{key}: {value}\r\n" for key, value in self._list) + "\r\n"
def __repr__(self) -> str:
return f"{self.__class__.__name__}({self._list!r})"
def copy(self) -> "Headers":
copy = self.__class__()
copy._dict = self._dict.copy()
copy._list = self._list.copy()
return copy
def serialize(self) -> bytes:
# Headers only contain ASCII characters.
return str(self).encode()
# Collection methods
def __contains__(self, key: object) -> bool:
return isinstance(key, str) and key.lower() in self._dict
def __iter__(self) -> Iterator[str]:
return iter(self._dict)
def __len__(self) -> int:
return len(self._dict)
# MutableMapping methods
def __getitem__(self, key: str) -> str:
value = self._dict[key.lower()]
if len(value) == 1:
return value[0]
else:
raise MultipleValuesError(key)
def __setitem__(self, key: str, value: str) -> None:
self._dict.setdefault(key.lower(), []).append(value)
self._list.append((key, value))
def __delitem__(self, key: str) -> None:
key_lower = key.lower()
self._dict.__delitem__(key_lower)
# This is inefficent. Fortunately deleting HTTP headers is uncommon.
self._list = [(k, v) for k, v in self._list if k.lower() != key_lower]
def | (self, other: Any) -> bool:
if not isinstance(other, Headers):
return NotImplemented
return self._list == other._list
def clear(self) -> None:
"""
Remove all headers.
"""
self._dict = {}
self._list = []
# Methods for handling multiple values
def get_all(self, key: str) -> List[str]:
"""
Return the (possibly empty) list of all values for a header.
:param key: header name
"""
return self._dict.get(key.lower(), [])
def raw_items(self) -> Iterator[Tuple[str, str]]:
"""
Return an iterator of all values as ``(name, value)`` pairs.
"""
return iter(self._list)
HeadersLike = Union[Headers, Mapping[str, str], Iterable[Tuple[str, str]]]
| __eq__ |
csr.go | /*
Copyright 2019 The Jetstack cert-manager contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package pki
import (
"bytes"
"crypto"
"crypto/rand"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"errors"
"fmt"
"math/big"
"net"
"time"
apiutil "github.com/jetstack/cert-manager/pkg/api/util"
"github.com/jetstack/cert-manager/pkg/apis/certmanager/v1alpha1"
)
// CommonNameForCertificate returns the common name that should be used for the
// given Certificate resource, by inspecting the CommonName and DNSNames fields.
func CommonNameForCertificate(crt *v1alpha1.Certificate) string {
if crt.Spec.CommonName != "" {
return crt.Spec.CommonName
}
if len(crt.Spec.DNSNames) == 0 {
return ""
}
return crt.Spec.DNSNames[0]
}
// DNSNamesForCertificate returns the DNS names that should be used for the
// given Certificate resource, by inspecting the CommonName and DNSNames fields.
func DNSNamesForCertificate(crt *v1alpha1.Certificate) []string {
if len(crt.Spec.DNSNames) == 0 {
if crt.Spec.CommonName == "" {
return []string{}
}
return []string{crt.Spec.CommonName}
}
if crt.Spec.CommonName != "" {
return removeDuplicates(append([]string{crt.Spec.CommonName}, crt.Spec.DNSNames...))
}
return crt.Spec.DNSNames
}
func IPAddressesForCertificate(crt *v1alpha1.Certificate) []net.IP {
var ipAddresses []net.IP
var ip net.IP
for _, ipName := range crt.Spec.IPAddresses {
ip = net.ParseIP(ipName)
if ip != nil {
ipAddresses = append(ipAddresses, ip)
}
}
return ipAddresses
}
func IPAddressesToString(ipAddresses []net.IP) []string {
var ipNames []string
for _, ip := range ipAddresses {
ipNames = append(ipNames, ip.String())
}
return ipNames
}
func removeDuplicates(in []string) []string {
var found []string
Outer:
for _, i := range in {
for _, i2 := range found {
if i2 == i {
continue Outer
}
}
found = append(found, i)
}
return found
}
const defaultOrganization = "cert-manager"
// OrganizationForCertificate will return the Organization to set for the
// Certificate resource.
// If an Organization is not specifically set, a default will be used.
func OrganizationForCertificate(crt *v1alpha1.Certificate) []string {
if len(crt.Spec.Organization) == 0 {
return []string{defaultOrganization}
}
return crt.Spec.Organization
}
var serialNumberLimit = new(big.Int).Lsh(big.NewInt(1), 128)
func buildUsages(usages []v1alpha1.KeyUsage, isCA bool) (ku x509.KeyUsage, eku []x509.ExtKeyUsage, err error) {
var unk []v1alpha1.KeyUsage
if isCA {
ku |= x509.KeyUsageCertSign
}
if len(usages) == 0 {
usages = append(usages, v1alpha1.DefaultKeyUsages()...)
}
for _, u := range usages {
if kuse, ok := apiutil.KeyUsageType(u); ok {
ku |= kuse
} else if ekuse, ok := apiutil.ExtKeyUsageType(u); ok {
eku = append(eku, ekuse)
} else {
unk = append(unk, u)
}
}
if len(unk) > 0 {
err = fmt.Errorf("unknown key usages: %v", unk)
}
return
}
// SubjectForCertificate will return the Subject from the Certificate resource or an empty one if it is not set
func SubjectForCertificate(crt *v1alpha1.Certificate) v1alpha1.X509Subject {
if crt.Spec.Subject == nil {
return v1alpha1.X509Subject{}
}
return *crt.Spec.Subject
}
// GenerateCSR will generate a new *x509.CertificateRequest template to be used
// by issuers that utilise CSRs to obtain Certificates.
// The CSR will not be signed, and should be passed to either EncodeCSR or
// to the x509.CreateCertificateRequest function.
func GenerateCSR(crt *v1alpha1.Certificate) (*x509.CertificateRequest, error) {
commonName := CommonNameForCertificate(crt)
dnsNames := DNSNamesForCertificate(crt)
iPAddresses := IPAddressesForCertificate(crt)
organization := OrganizationForCertificate(crt)
subject := SubjectForCertificate(crt)
if len(commonName) == 0 && len(dnsNames) == 0 {
return nil, fmt.Errorf("no domains specified on certificate")
}
pubKeyAlgo, sigAlgo, err := SignatureAlgorithm(crt)
if err != nil {
return nil, err
}
return &x509.CertificateRequest{
Version: 3,
SignatureAlgorithm: sigAlgo,
PublicKeyAlgorithm: pubKeyAlgo,
Subject: pkix.Name{
Country: subject.Countries,
Organization: organization,
OrganizationalUnit: subject.OrganizationalUnits,
Locality: subject.Localities,
Province: subject.Provinces,
StreetAddress: subject.StreetAddresses,
PostalCode: subject.PostalCodes,
SerialNumber: subject.SerialNumber,
CommonName: commonName,
},
DNSNames: dnsNames,
IPAddresses: iPAddresses,
// TODO: work out how best to handle extensions/key usages here
ExtraExtensions: []pkix.Extension{},
}, nil
}
// GenerateTemplate will create a x509.Certificate for the given Certificate resource.
// This should create a Certificate template that is equivalent to the CertificateRequest
// generated by GenerateCSR.
// The PublicKey field must be populated by the caller.
func GenerateTemplate(crt *v1alpha1.Certificate) (*x509.Certificate, error) {
commonName := CommonNameForCertificate(crt)
dnsNames := DNSNamesForCertificate(crt)
ipAddresses := IPAddressesForCertificate(crt)
organization := OrganizationForCertificate(crt)
subject := SubjectForCertificate(crt)
keyUsages, extKeyUsages, err := buildUsages(crt.Spec.Usages, crt.Spec.IsCA)
if err != nil {
return nil, err
}
if len(commonName) == 0 && len(dnsNames) == 0 {
return nil, fmt.Errorf("no domains specified on certificate")
}
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
if err != nil {
return nil, fmt.Errorf("failed to generate serial number: %s", err.Error())
}
certDuration := apiutil.DefaultCertDuration(crt.Spec.Duration)
pubKeyAlgo, _, err := SignatureAlgorithm(crt)
if err != nil {
return nil, err
}
return &x509.Certificate{
Version: 3,
BasicConstraintsValid: true,
SerialNumber: serialNumber,
PublicKeyAlgorithm: pubKeyAlgo,
IsCA: crt.Spec.IsCA,
Subject: pkix.Name{
Country: subject.Countries,
Organization: organization,
OrganizationalUnit: subject.OrganizationalUnits,
Locality: subject.Localities,
Province: subject.Provinces,
StreetAddress: subject.StreetAddresses,
PostalCode: subject.PostalCodes,
SerialNumber: subject.SerialNumber,
CommonName: commonName,
},
NotBefore: time.Now(),
NotAfter: time.Now().Add(certDuration),
// see http://golang.org/pkg/crypto/x509/#KeyUsage
KeyUsage: keyUsages,
ExtKeyUsage: extKeyUsages,
DNSNames: dnsNames,
IPAddresses: ipAddresses,
}, nil
}
// GenerateTemplate will create a x509.Certificate for the given
// CertificateRequest resource
func GenerateTemplateFromCertificateRequest(cr *v1alpha1.CertificateRequest) (*x509.Certificate, error) {
certDuration := apiutil.DefaultCertDuration(cr.Spec.Duration)
keyUsage, extKeyUsage, err := buildUsages(cr.Spec.Usages, cr.Spec.IsCA)
if err != nil {
return nil, err
}
return GenerateTemplateFromCSRPEMWithUsages(cr.Spec.CSRPEM, certDuration, cr.Spec.IsCA, keyUsage, extKeyUsage)
}
func GenerateTemplateFromCSRPEM(csrPEM []byte, duration time.Duration, isCA bool) (*x509.Certificate, error) {
var (
ku x509.KeyUsage
eku []x509.ExtKeyUsage
)
return GenerateTemplateFromCSRPEMWithUsages(csrPEM, duration, isCA, ku, eku)
}
func GenerateTemplateFromCSRPEMWithUsages(csrPEM []byte, duration time.Duration, isCA bool, keyUsage x509.KeyUsage, extKeyUsage []x509.ExtKeyUsage) (*x509.Certificate, error) {
block, _ := pem.Decode(csrPEM)
if block == nil {
return nil, errors.New("failed to decode csr")
}
csr, err := x509.ParseCertificateRequest(block.Bytes)
if err != nil {
return nil, err
}
if err := csr.CheckSignature(); err != nil {
return nil, err
}
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
if err != nil {
return nil, fmt.Errorf("failed to generate serial number: %s", err.Error())
}
return &x509.Certificate{
Version: csr.Version,
BasicConstraintsValid: true,
SerialNumber: serialNumber,
PublicKeyAlgorithm: csr.PublicKeyAlgorithm,
PublicKey: csr.PublicKey,
IsCA: isCA,
Subject: csr.Subject,
NotBefore: time.Now(),
NotAfter: time.Now().Add(duration),
// see http://golang.org/pkg/crypto/x509/#KeyUsage
KeyUsage: keyUsage,
ExtKeyUsage: extKeyUsage,
DNSNames: csr.DNSNames,
IPAddresses: csr.IPAddresses,
URIs: csr.URIs,
}, nil
}
// SignCertificate returns a signed x509.Certificate object for the given
// *v1alpha1.Certificate crt.
// publicKey is the public key of the signee, and signerKey is the private
// key of the signer.
// It returns a PEM encoded copy of the Certificate as well as a *x509.Certificate
// which can be used for reading the encoded values.
func SignCertificate(template *x509.Certificate, issuerCert *x509.Certificate, publicKey crypto.PublicKey, signerKey interface{}) ([]byte, *x509.Certificate, error) {
derBytes, err := x509.CreateCertificate(rand.Reader, template, issuerCert, publicKey, signerKey)
if err != nil {
return nil, nil, fmt.Errorf("error creating x509 certificate: %s", err.Error())
}
cert, err := x509.ParseCertificate(derBytes)
if err != nil {
return nil, nil, fmt.Errorf("error decoding DER certificate bytes: %s", err.Error())
}
pemBytes := bytes.NewBuffer([]byte{})
err = pem.Encode(pemBytes, &pem.Block{Type: "CERTIFICATE", Bytes: derBytes})
if err != nil {
return nil, nil, fmt.Errorf("error encoding certificate PEM: %s", err.Error())
}
return pemBytes.Bytes(), cert, err
}
// SignCSRTemplate signs a certificate template usually based upon a CSR. This
// function expects all fields to be present in the certificate template,
// including it's public key.
// It returns the certificate data followed by the CA data, encoded in PEM format.
func SignCSRTemplate(caCerts []*x509.Certificate, caKey crypto.Signer, template *x509.Certificate) ([]byte, []byte, error) {
if len(caCerts) == 0 {
return nil, nil, errors.New("no CA certificates given to sign CSR template")
}
caCert := caCerts[0]
certPem, _, err := SignCertificate(template, caCert, template.PublicKey, caKey)
if err != nil {
return nil, nil, err
}
chainPem, err := EncodeX509Chain(caCerts)
if err != nil {
return nil, nil, err
}
certPem = append(certPem, chainPem...)
// encode the CA certificate to be bundled in the output
caPem, err := EncodeX509(caCerts[0])
if err != nil {
return nil, nil, err
}
return certPem, caPem, nil
}
// EncodeCSR calls x509.CreateCertificateRequest to sign the given CSR template.
// It returns a DER encoded signed CSR.
func EncodeCSR(template *x509.CertificateRequest, key crypto.Signer) ([]byte, error) {
derBytes, err := x509.CreateCertificateRequest(rand.Reader, template, key)
if err != nil {
return nil, fmt.Errorf("error creating x509 certificate: %s", err.Error())
}
return derBytes, nil
}
// EncodeX509 will encode a *x509.Certificate into PEM format.
func EncodeX509(cert *x509.Certificate) ([]byte, error) {
caPem := bytes.NewBuffer([]byte{})
err := pem.Encode(caPem, &pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw})
if err != nil {
return nil, err
}
return caPem.Bytes(), nil
}
// EncodeX509Chain will encode an *x509.Certificate chain into PEM format.
func EncodeX509Chain(certs []*x509.Certificate) ([]byte, error) {
caPem := bytes.NewBuffer([]byte{})
for _, cert := range certs {
if bytes.Equal(cert.RawIssuer, cert.RawSubject) {
// Don't include self-signed certificate
continue
}
err := pem.Encode(caPem, &pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw})
if err != nil {
return nil, err
}
}
return caPem.Bytes(), nil | // Adapted from https://github.com/cloudflare/cfssl/blob/master/csr/csr.go#L102
func SignatureAlgorithm(crt *v1alpha1.Certificate) (x509.PublicKeyAlgorithm, x509.SignatureAlgorithm, error) {
var sigAlgo x509.SignatureAlgorithm
var pubKeyAlgo x509.PublicKeyAlgorithm
switch crt.Spec.KeyAlgorithm {
case v1alpha1.KeyAlgorithm(""):
// If keyAlgorithm is not specified, we default to rsa with keysize 2048
pubKeyAlgo = x509.RSA
sigAlgo = x509.SHA256WithRSA
case v1alpha1.RSAKeyAlgorithm:
pubKeyAlgo = x509.RSA
switch {
case crt.Spec.KeySize >= 4096:
sigAlgo = x509.SHA512WithRSA
case crt.Spec.KeySize >= 3072:
sigAlgo = x509.SHA384WithRSA
case crt.Spec.KeySize >= 2048:
sigAlgo = x509.SHA256WithRSA
// 0 == not set
case crt.Spec.KeySize == 0:
sigAlgo = x509.SHA256WithRSA
default:
return x509.UnknownPublicKeyAlgorithm, x509.UnknownSignatureAlgorithm, fmt.Errorf("unsupported rsa keysize specified: %d. min keysize %d", crt.Spec.KeySize, MinRSAKeySize)
}
case v1alpha1.ECDSAKeyAlgorithm:
pubKeyAlgo = x509.ECDSA
switch crt.Spec.KeySize {
case 521:
sigAlgo = x509.ECDSAWithSHA512
case 384:
sigAlgo = x509.ECDSAWithSHA384
case 256:
sigAlgo = x509.ECDSAWithSHA256
case 0:
sigAlgo = x509.ECDSAWithSHA256
default:
return x509.UnknownPublicKeyAlgorithm, x509.UnknownSignatureAlgorithm, fmt.Errorf("unsupported ecdsa keysize specified: %d", crt.Spec.KeySize)
}
default:
return x509.UnknownPublicKeyAlgorithm, x509.UnknownSignatureAlgorithm, fmt.Errorf("unsupported algorithm specified: %s. should be either 'ecdsa' or 'rsa", crt.Spec.KeyAlgorithm)
}
return pubKeyAlgo, sigAlgo, nil
} | }
// SignatureAlgorithm will determine the appropriate signature algorithm for
// the given certificate. |
jquery-qrcode-0.14.0.min.js | /*! jquery-qrcode v0.14.0 - https://larsjung.de/jquery-qrcode/ */ | !function(r){"use strict";function t(t,e,n,o){function a(r,t){return r-=o,t-=o,0>r||r>=c||0>t||t>=c?!1:f.isDark(r,t)}function i(r,t,e,n){var o=u.isDark,a=1/l;u.isDark=function(i,u){var f=u*a,c=i*a,l=f+a,g=c+a;return o(i,u)&&(r>l||f>e||t>g||c>n)}}var u={},f=r(n,e);f.addData(t),f.make(),o=o||0;var c=f.getModuleCount(),l=f.getModuleCount()+2*o;return u.text=t,u.level=e,u.version=n,u.moduleCount=l,u.isDark=a,u.addBlank=i,u}function e(r,e,n,o,a){n=Math.max(1,n||1),o=Math.min(40,o||40);for(var i=n;o>=i;i+=1)try{return t(r,e,i,a)}catch(u){}}function n(r,t,e){var n=e.size,o="bold "+e.mSize*n+"px "+e.fontname,a=w("<canvas/>")[0].getContext("2d");a.font=o;var i=a.measureText(e.label).width,u=e.mSize,f=i/n,c=(1-f)*e.mPosX,l=(1-u)*e.mPosY,g=c+f,s=l+u,v=.01;1===e.mode?r.addBlank(0,l-v,n,s+v):r.addBlank(c-v,l-v,g+v,s+v),t.fillStyle=e.fontcolor,t.font=o,t.fillText(e.label,c*n,l*n+.75*e.mSize*n)}function o(r,t,e){var n=e.size,o=e.image.naturalWidth||1,a=e.image.naturalHeight||1,i=e.mSize,u=i*o/a,f=(1-u)*e.mPosX,c=(1-i)*e.mPosY,l=f+u,g=c+i,s=.01;3===e.mode?r.addBlank(0,c-s,n,g+s):r.addBlank(f-s,c-s,l+s,g+s),t.drawImage(e.image,f*n,c*n,u*n,i*n)}function a(r,t,e){w(e.background).is("img")?t.drawImage(e.background,0,0,e.size,e.size):e.background&&(t.fillStyle=e.background,t.fillRect(e.left,e.top,e.size,e.size));var a=e.mode;1===a||2===a?n(r,t,e):(3===a||4===a)&&o(r,t,e)}function i(r,t,e,n,o,a,i,u){r.isDark(i,u)&&t.rect(n,o,a,a)}function u(r,t,e,n,o,a,i,u,f,c){i?r.moveTo(t+a,e):r.moveTo(t,e),u?(r.lineTo(n-a,e),r.arcTo(n,e,n,o,a)):r.lineTo(n,e),f?(r.lineTo(n,o-a),r.arcTo(n,o,t,o,a)):r.lineTo(n,o),c?(r.lineTo(t+a,o),r.arcTo(t,o,t,e,a)):r.lineTo(t,o),i?(r.lineTo(t,e+a),r.arcTo(t,e,n,e,a)):r.lineTo(t,e)}function f(r,t,e,n,o,a,i,u,f,c){i&&(r.moveTo(t+a,e),r.lineTo(t,e),r.lineTo(t,e+a),r.arcTo(t,e,t+a,e,a)),u&&(r.moveTo(n-a,e),r.lineTo(n,e),r.lineTo(n,e+a),r.arcTo(n,e,n-a,e,a)),f&&(r.moveTo(n-a,o),r.lineTo(n,o),r.lineTo(n,o-a),r.arcTo(n,o,n-a,o,a)),c&&(r.moveTo(t+a,o),r.lineTo(t,o),r.lineTo(t,o-a),r.arcTo(t,o,t+a,o,a))}function c(r,t,e,n,o,a,i,c){var l=r.isDark,g=n+a,s=o+a,v=e.radius*a,h=i-1,d=i+1,w=c-1,m=c+1,y=l(i,c),T=l(h,w),p=l(h,c),B=l(h,m),A=l(i,m),E=l(d,m),k=l(d,c),M=l(d,w),C=l(i,w);y?u(t,n,o,g,s,v,!p&&!C,!p&&!A,!k&&!A,!k&&!C):f(t,n,o,g,s,v,p&&C&&T,p&&A&&B,k&&A&&E,k&&C&&M)}function l(r,t,e){var n,o,a=r.moduleCount,u=e.size/a,f=i;for(e.radius>0&&e.radius<=.5&&(f=c),t.beginPath(),n=0;a>n;n+=1)for(o=0;a>o;o+=1){var l=e.left+o*u,g=e.top+n*u,s=u;f(r,t,e,l,g,s,n,o)}if(w(e.fill).is("img")){t.strokeStyle="rgba(0,0,0,0.5)",t.lineWidth=2,t.stroke();var v=t.globalCompositeOperation;t.globalCompositeOperation="destination-out",t.fill(),t.globalCompositeOperation=v,t.clip(),t.drawImage(e.fill,0,0,e.size,e.size),t.restore()}else t.fillStyle=e.fill,t.fill()}function g(r,t){var n=e(t.text,t.ecLevel,t.minVersion,t.maxVersion,t.quiet);if(!n)return null;var o=w(r).data("qrcode",n),i=o[0].getContext("2d");return a(n,i,t),l(n,i,t),o}function s(r){var t=w("<canvas/>").attr("width",r.size).attr("height",r.size);return g(t,r)}function v(r){return w("<img/>").attr("src",s(r)[0].toDataURL("image/png"))}function h(r){var t=e(r.text,r.ecLevel,r.minVersion,r.maxVersion,r.quiet);if(!t)return null;var n,o,a=r.size,i=r.background,u=Math.floor,f=t.moduleCount,c=u(a/f),l=u(.5*(a-c*f)),g={position:"relative",left:0,top:0,padding:0,margin:0,width:a,height:a},s={position:"absolute",padding:0,margin:0,width:c,height:c,"background-color":r.fill},v=w("<div/>").data("qrcode",t).css(g);for(i&&v.css("background-color",i),n=0;f>n;n+=1)for(o=0;f>o;o+=1)t.isDark(n,o)&&w("<div/>").css(s).css({left:l+o*c,top:l+n*c}).appendTo(v);return v}function d(r){return m&&"canvas"===r.render?s(r):m&&"image"===r.render?v(r):h(r)}var w=window.jQuery,m=function(){var r=document.createElement("canvas");return!(!r.getContext||!r.getContext("2d"))}(),y={render:"canvas",minVersion:1,maxVersion:40,ecLevel:"L",left:0,top:0,size:200,fill:"#000",background:null,text:"no text",radius:0,quiet:0,mode:0,mSize:.1,mPosX:.5,mPosY:.5,label:"no label",fontname:"sans",fontcolor:"#000",image:null};w.fn.qrcode=function(r){var t=w.extend({},y,r);return this.each(function(r,e){"canvas"===e.nodeName.toLowerCase()?g(e,t):w(e).append(d(t))})}}(function(){var r=function(){function r(t,e){if("undefined"==typeof t.length)throw new Error(t.length+"/"+e);var n=function(){for(var r=0;r<t.length&&0==t[r];)r+=1;for(var n=new Array(t.length-r+e),o=0;o<t.length-r;o+=1)n[o]=t[o+r];return n}(),o={};return o.getAt=function(r){return n[r]},o.getLength=function(){return n.length},o.multiply=function(t){for(var e=new Array(o.getLength()+t.getLength()-1),n=0;n<o.getLength();n+=1)for(var a=0;a<t.getLength();a+=1)e[n+a]^=i.gexp(i.glog(o.getAt(n))+i.glog(t.getAt(a)));return r(e,0)},o.mod=function(t){if(o.getLength()-t.getLength()<0)return o;for(var e=i.glog(o.getAt(0))-i.glog(t.getAt(0)),n=new Array(o.getLength()),a=0;a<o.getLength();a+=1)n[a]=o.getAt(a);for(var a=0;a<t.getLength();a+=1)n[a]^=i.gexp(i.glog(t.getAt(a))+e);return r(n,0).mod(t)},o}var t=function(t,e){var o=236,i=17,l=t,g=n[e],s=null,v=0,d=null,w=new Array,m={},y=function(r,t){v=4*l+17,s=function(r){for(var t=new Array(r),e=0;r>e;e+=1){t[e]=new Array(r);for(var n=0;r>n;n+=1)t[e][n]=null}return t}(v),T(0,0),T(v-7,0),T(0,v-7),A(),B(),k(r,t),l>=7&&E(r),null==d&&(d=D(l,g,w)),M(d,t)},T=function(r,t){for(var e=-1;7>=e;e+=1)if(!(-1>=r+e||r+e>=v))for(var n=-1;7>=n;n+=1)-1>=t+n||t+n>=v||(e>=0&&6>=e&&(0==n||6==n)||n>=0&&6>=n&&(0==e||6==e)||e>=2&&4>=e&&n>=2&&4>=n?s[r+e][t+n]=!0:s[r+e][t+n]=!1)},p=function(){for(var r=0,t=0,e=0;8>e;e+=1){y(!0,e);var n=a.getLostPoint(m);(0==e||r>n)&&(r=n,t=e)}return t},B=function(){for(var r=8;v-8>r;r+=1)null==s[r][6]&&(s[r][6]=r%2==0);for(var t=8;v-8>t;t+=1)null==s[6][t]&&(s[6][t]=t%2==0)},A=function(){for(var r=a.getPatternPosition(l),t=0;t<r.length;t+=1)for(var e=0;e<r.length;e+=1){var n=r[t],o=r[e];if(null==s[n][o])for(var i=-2;2>=i;i+=1)for(var u=-2;2>=u;u+=1)-2==i||2==i||-2==u||2==u||0==i&&0==u?s[n+i][o+u]=!0:s[n+i][o+u]=!1}},E=function(r){for(var t=a.getBCHTypeNumber(l),e=0;18>e;e+=1){var n=!r&&1==(t>>e&1);s[Math.floor(e/3)][e%3+v-8-3]=n}for(var e=0;18>e;e+=1){var n=!r&&1==(t>>e&1);s[e%3+v-8-3][Math.floor(e/3)]=n}},k=function(r,t){for(var e=g<<3|t,n=a.getBCHTypeInfo(e),o=0;15>o;o+=1){var i=!r&&1==(n>>o&1);6>o?s[o][8]=i:8>o?s[o+1][8]=i:s[v-15+o][8]=i}for(var o=0;15>o;o+=1){var i=!r&&1==(n>>o&1);8>o?s[8][v-o-1]=i:9>o?s[8][15-o-1+1]=i:s[8][15-o-1]=i}s[v-8][8]=!r},M=function(r,t){for(var e=-1,n=v-1,o=7,i=0,u=a.getMaskFunction(t),f=v-1;f>0;f-=2)for(6==f&&(f-=1);;){for(var c=0;2>c;c+=1)if(null==s[n][f-c]){var l=!1;i<r.length&&(l=1==(r[i]>>>o&1));var g=u(n,f-c);g&&(l=!l),s[n][f-c]=l,o-=1,-1==o&&(i+=1,o=7)}if(n+=e,0>n||n>=v){n-=e,e=-e;break}}},C=function(t,e){for(var n=0,o=0,i=0,u=new Array(e.length),f=new Array(e.length),c=0;c<e.length;c+=1){var l=e[c].dataCount,g=e[c].totalCount-l;o=Math.max(o,l),i=Math.max(i,g),u[c]=new Array(l);for(var s=0;s<u[c].length;s+=1)u[c][s]=255&t.getBuffer()[s+n];n+=l;var v=a.getErrorCorrectPolynomial(g),h=r(u[c],v.getLength()-1),d=h.mod(v);f[c]=new Array(v.getLength()-1);for(var s=0;s<f[c].length;s+=1){var w=s+d.getLength()-f[c].length;f[c][s]=w>=0?d.getAt(w):0}}for(var m=0,s=0;s<e.length;s+=1)m+=e[s].totalCount;for(var y=new Array(m),T=0,s=0;o>s;s+=1)for(var c=0;c<e.length;c+=1)s<u[c].length&&(y[T]=u[c][s],T+=1);for(var s=0;i>s;s+=1)for(var c=0;c<e.length;c+=1)s<f[c].length&&(y[T]=f[c][s],T+=1);return y},D=function(r,t,e){for(var n=u.getRSBlocks(r,t),c=f(),l=0;l<e.length;l+=1){var g=e[l];c.put(g.getMode(),4),c.put(g.getLength(),a.getLengthInBits(g.getMode(),r)),g.write(c)}for(var s=0,l=0;l<n.length;l+=1)s+=n[l].dataCount;if(c.getLengthInBits()>8*s)throw new Error("code length overflow. ("+c.getLengthInBits()+">"+8*s+")");for(c.getLengthInBits()+4<=8*s&&c.put(0,4);c.getLengthInBits()%8!=0;)c.putBit(!1);for(;;){if(c.getLengthInBits()>=8*s)break;if(c.put(o,8),c.getLengthInBits()>=8*s)break;c.put(i,8)}return C(c,n)};return m.addData=function(r){var t=c(r);w.push(t),d=null},m.isDark=function(r,t){if(0>r||r>=v||0>t||t>=v)throw new Error(r+","+t);return s[r][t]},m.getModuleCount=function(){return v},m.make=function(){y(!1,p())},m.createTableTag=function(r,t){r=r||2,t="undefined"==typeof t?4*r:t;var e="";e+='<table style="',e+=" border-width: 0px; border-style: none;",e+=" border-collapse: collapse;",e+=" padding: 0px; margin: "+t+"px;",e+='">',e+="<tbody>";for(var n=0;n<m.getModuleCount();n+=1){e+="<tr>";for(var o=0;o<m.getModuleCount();o+=1)e+='<td style="',e+=" border-width: 0px; border-style: none;",e+=" border-collapse: collapse;",e+=" padding: 0px; margin: 0px;",e+=" width: "+r+"px;",e+=" height: "+r+"px;",e+=" background-color: ",e+=m.isDark(n,o)?"#000000":"#ffffff",e+=";",e+='"/>';e+="</tr>"}return e+="</tbody>",e+="</table>"},m.createImgTag=function(r,t){r=r||2,t="undefined"==typeof t?4*r:t;var e=m.getModuleCount()*r+2*t,n=t,o=e-t;return h(e,e,function(t,e){if(t>=n&&o>t&&e>=n&&o>e){var a=Math.floor((t-n)/r),i=Math.floor((e-n)/r);return m.isDark(i,a)?0:1}return 1})},m};t.stringToBytes=function(r){for(var t=new Array,e=0;e<r.length;e+=1){var n=r.charCodeAt(e);t.push(255&n)}return t},t.createStringToBytes=function(r,t){var e=function(){for(var e=s(r),n=function(){var r=e.read();if(-1==r)throw new Error;return r},o=0,a={};;){var i=e.read();if(-1==i)break;var u=n(),f=n(),c=n(),l=String.fromCharCode(i<<8|u),g=f<<8|c;a[l]=g,o+=1}if(o!=t)throw new Error(o+" != "+t);return a}(),n="?".charCodeAt(0);return function(r){for(var t=new Array,o=0;o<r.length;o+=1){var a=r.charCodeAt(o);if(128>a)t.push(a);else{var i=e[r.charAt(o)];"number"==typeof i?(255&i)==i?t.push(i):(t.push(i>>>8),t.push(255&i)):t.push(n)}}return t}};var e={MODE_NUMBER:1,MODE_ALPHA_NUM:2,MODE_8BIT_BYTE:4,MODE_KANJI:8},n={L:1,M:0,Q:3,H:2},o={PATTERN000:0,PATTERN001:1,PATTERN010:2,PATTERN011:3,PATTERN100:4,PATTERN101:5,PATTERN110:6,PATTERN111:7},a=function(){var t=[[],[6,18],[6,22],[6,26],[6,30],[6,34],[6,22,38],[6,24,42],[6,26,46],[6,28,50],[6,30,54],[6,32,58],[6,34,62],[6,26,46,66],[6,26,48,70],[6,26,50,74],[6,30,54,78],[6,30,56,82],[6,30,58,86],[6,34,62,90],[6,28,50,72,94],[6,26,50,74,98],[6,30,54,78,102],[6,28,54,80,106],[6,32,58,84,110],[6,30,58,86,114],[6,34,62,90,118],[6,26,50,74,98,122],[6,30,54,78,102,126],[6,26,52,78,104,130],[6,30,56,82,108,134],[6,34,60,86,112,138],[6,30,58,86,114,142],[6,34,62,90,118,146],[6,30,54,78,102,126,150],[6,24,50,76,102,128,154],[6,28,54,80,106,132,158],[6,32,58,84,110,136,162],[6,26,54,82,110,138,166],[6,30,58,86,114,142,170]],n=1335,a=7973,u=21522,f={},c=function(r){for(var t=0;0!=r;)t+=1,r>>>=1;return t};return f.getBCHTypeInfo=function(r){for(var t=r<<10;c(t)-c(n)>=0;)t^=n<<c(t)-c(n);return(r<<10|t)^u},f.getBCHTypeNumber=function(r){for(var t=r<<12;c(t)-c(a)>=0;)t^=a<<c(t)-c(a);return r<<12|t},f.getPatternPosition=function(r){return t[r-1]},f.getMaskFunction=function(r){switch(r){case o.PATTERN000:return function(r,t){return(r+t)%2==0};case o.PATTERN001:return function(r,t){return r%2==0};case o.PATTERN010:return function(r,t){return t%3==0};case o.PATTERN011:return function(r,t){return(r+t)%3==0};case o.PATTERN100:return function(r,t){return(Math.floor(r/2)+Math.floor(t/3))%2==0};case o.PATTERN101:return function(r,t){return r*t%2+r*t%3==0};case o.PATTERN110:return function(r,t){return(r*t%2+r*t%3)%2==0};case o.PATTERN111:return function(r,t){return(r*t%3+(r+t)%2)%2==0};default:throw new Error("bad maskPattern:"+r)}},f.getErrorCorrectPolynomial=function(t){for(var e=r([1],0),n=0;t>n;n+=1)e=e.multiply(r([1,i.gexp(n)],0));return e},f.getLengthInBits=function(r,t){if(t>=1&&10>t)switch(r){case e.MODE_NUMBER:return 10;case e.MODE_ALPHA_NUM:return 9;case e.MODE_8BIT_BYTE:return 8;case e.MODE_KANJI:return 8;default:throw new Error("mode:"+r)}else if(27>t)switch(r){case e.MODE_NUMBER:return 12;case e.MODE_ALPHA_NUM:return 11;case e.MODE_8BIT_BYTE:return 16;case e.MODE_KANJI:return 10;default:throw new Error("mode:"+r)}else{if(!(41>t))throw new Error("type:"+t);switch(r){case e.MODE_NUMBER:return 14;case e.MODE_ALPHA_NUM:return 13;case e.MODE_8BIT_BYTE:return 16;case e.MODE_KANJI:return 12;default:throw new Error("mode:"+r)}}},f.getLostPoint=function(r){for(var t=r.getModuleCount(),e=0,n=0;t>n;n+=1)for(var o=0;t>o;o+=1){for(var a=0,i=r.isDark(n,o),u=-1;1>=u;u+=1)if(!(0>n+u||n+u>=t))for(var f=-1;1>=f;f+=1)0>o+f||o+f>=t||(0!=u||0!=f)&&i==r.isDark(n+u,o+f)&&(a+=1);a>5&&(e+=3+a-5)}for(var n=0;t-1>n;n+=1)for(var o=0;t-1>o;o+=1){var c=0;r.isDark(n,o)&&(c+=1),r.isDark(n+1,o)&&(c+=1),r.isDark(n,o+1)&&(c+=1),r.isDark(n+1,o+1)&&(c+=1),(0==c||4==c)&&(e+=3)}for(var n=0;t>n;n+=1)for(var o=0;t-6>o;o+=1)r.isDark(n,o)&&!r.isDark(n,o+1)&&r.isDark(n,o+2)&&r.isDark(n,o+3)&&r.isDark(n,o+4)&&!r.isDark(n,o+5)&&r.isDark(n,o+6)&&(e+=40);for(var o=0;t>o;o+=1)for(var n=0;t-6>n;n+=1)r.isDark(n,o)&&!r.isDark(n+1,o)&&r.isDark(n+2,o)&&r.isDark(n+3,o)&&r.isDark(n+4,o)&&!r.isDark(n+5,o)&&r.isDark(n+6,o)&&(e+=40);for(var l=0,o=0;t>o;o+=1)for(var n=0;t>n;n+=1)r.isDark(n,o)&&(l+=1);var g=Math.abs(100*l/t/t-50)/5;return e+=10*g},f}(),i=function(){for(var r=new Array(256),t=new Array(256),e=0;8>e;e+=1)r[e]=1<<e;for(var e=8;256>e;e+=1)r[e]=r[e-4]^r[e-5]^r[e-6]^r[e-8];for(var e=0;255>e;e+=1)t[r[e]]=e;var n={};return n.glog=function(r){if(1>r)throw new Error("glog("+r+")");return t[r]},n.gexp=function(t){for(;0>t;)t+=255;for(;t>=256;)t-=255;return r[t]},n}(),u=function(){var r=[[1,26,19],[1,26,16],[1,26,13],[1,26,9],[1,44,34],[1,44,28],[1,44,22],[1,44,16],[1,70,55],[1,70,44],[2,35,17],[2,35,13],[1,100,80],[2,50,32],[2,50,24],[4,25,9],[1,134,108],[2,67,43],[2,33,15,2,34,16],[2,33,11,2,34,12],[2,86,68],[4,43,27],[4,43,19],[4,43,15],[2,98,78],[4,49,31],[2,32,14,4,33,15],[4,39,13,1,40,14],[2,121,97],[2,60,38,2,61,39],[4,40,18,2,41,19],[4,40,14,2,41,15],[2,146,116],[3,58,36,2,59,37],[4,36,16,4,37,17],[4,36,12,4,37,13],[2,86,68,2,87,69],[4,69,43,1,70,44],[6,43,19,2,44,20],[6,43,15,2,44,16],[4,101,81],[1,80,50,4,81,51],[4,50,22,4,51,23],[3,36,12,8,37,13],[2,116,92,2,117,93],[6,58,36,2,59,37],[4,46,20,6,47,21],[7,42,14,4,43,15],[4,133,107],[8,59,37,1,60,38],[8,44,20,4,45,21],[12,33,11,4,34,12],[3,145,115,1,146,116],[4,64,40,5,65,41],[11,36,16,5,37,17],[11,36,12,5,37,13],[5,109,87,1,110,88],[5,65,41,5,66,42],[5,54,24,7,55,25],[11,36,12,7,37,13],[5,122,98,1,123,99],[7,73,45,3,74,46],[15,43,19,2,44,20],[3,45,15,13,46,16],[1,135,107,5,136,108],[10,74,46,1,75,47],[1,50,22,15,51,23],[2,42,14,17,43,15],[5,150,120,1,151,121],[9,69,43,4,70,44],[17,50,22,1,51,23],[2,42,14,19,43,15],[3,141,113,4,142,114],[3,70,44,11,71,45],[17,47,21,4,48,22],[9,39,13,16,40,14],[3,135,107,5,136,108],[3,67,41,13,68,42],[15,54,24,5,55,25],[15,43,15,10,44,16],[4,144,116,4,145,117],[17,68,42],[17,50,22,6,51,23],[19,46,16,6,47,17],[2,139,111,7,140,112],[17,74,46],[7,54,24,16,55,25],[34,37,13],[4,151,121,5,152,122],[4,75,47,14,76,48],[11,54,24,14,55,25],[16,45,15,14,46,16],[6,147,117,4,148,118],[6,73,45,14,74,46],[11,54,24,16,55,25],[30,46,16,2,47,17],[8,132,106,4,133,107],[8,75,47,13,76,48],[7,54,24,22,55,25],[22,45,15,13,46,16],[10,142,114,2,143,115],[19,74,46,4,75,47],[28,50,22,6,51,23],[33,46,16,4,47,17],[8,152,122,4,153,123],[22,73,45,3,74,46],[8,53,23,26,54,24],[12,45,15,28,46,16],[3,147,117,10,148,118],[3,73,45,23,74,46],[4,54,24,31,55,25],[11,45,15,31,46,16],[7,146,116,7,147,117],[21,73,45,7,74,46],[1,53,23,37,54,24],[19,45,15,26,46,16],[5,145,115,10,146,116],[19,75,47,10,76,48],[15,54,24,25,55,25],[23,45,15,25,46,16],[13,145,115,3,146,116],[2,74,46,29,75,47],[42,54,24,1,55,25],[23,45,15,28,46,16],[17,145,115],[10,74,46,23,75,47],[10,54,24,35,55,25],[19,45,15,35,46,16],[17,145,115,1,146,116],[14,74,46,21,75,47],[29,54,24,19,55,25],[11,45,15,46,46,16],[13,145,115,6,146,116],[14,74,46,23,75,47],[44,54,24,7,55,25],[59,46,16,1,47,17],[12,151,121,7,152,122],[12,75,47,26,76,48],[39,54,24,14,55,25],[22,45,15,41,46,16],[6,151,121,14,152,122],[6,75,47,34,76,48],[46,54,24,10,55,25],[2,45,15,64,46,16],[17,152,122,4,153,123],[29,74,46,14,75,47],[49,54,24,10,55,25],[24,45,15,46,46,16],[4,152,122,18,153,123],[13,74,46,32,75,47],[48,54,24,14,55,25],[42,45,15,32,46,16],[20,147,117,4,148,118],[40,75,47,7,76,48],[43,54,24,22,55,25],[10,45,15,67,46,16],[19,148,118,6,149,119],[18,75,47,31,76,48],[34,54,24,34,55,25],[20,45,15,61,46,16]],t=function(r,t){var e={};return e.totalCount=r,e.dataCount=t,e},e={},o=function(t,e){switch(e){case n.L:return r[4*(t-1)+0];case n.M:return r[4*(t-1)+1];case n.Q:return r[4*(t-1)+2];case n.H:return r[4*(t-1)+3];default:return}};return e.getRSBlocks=function(r,e){var n=o(r,e);if("undefined"==typeof n)throw new Error("bad rs block @ typeNumber:"+r+"/errorCorrectLevel:"+e);for(var a=n.length/3,i=new Array,u=0;a>u;u+=1)for(var f=n[3*u+0],c=n[3*u+1],l=n[3*u+2],g=0;f>g;g+=1)i.push(t(c,l));return i},e}(),f=function(){var r=new Array,t=0,e={};return e.getBuffer=function(){return r},e.getAt=function(t){var e=Math.floor(t/8);return 1==(r[e]>>>7-t%8&1)},e.put=function(r,t){for(var n=0;t>n;n+=1)e.putBit(1==(r>>>t-n-1&1))},e.getLengthInBits=function(){return t},e.putBit=function(e){var n=Math.floor(t/8);r.length<=n&&r.push(0),e&&(r[n]|=128>>>t%8),t+=1},e},c=function(r){var n=e.MODE_8BIT_BYTE,o=t.stringToBytes(r),a={};return a.getMode=function(){return n},a.getLength=function(r){return o.length},a.write=function(r){for(var t=0;t<o.length;t+=1)r.put(o[t],8)},a},l=function(){var r=new Array,t={};return t.writeByte=function(t){r.push(255&t)},t.writeShort=function(r){t.writeByte(r),t.writeByte(r>>>8)},t.writeBytes=function(r,e,n){e=e||0,n=n||r.length;for(var o=0;n>o;o+=1)t.writeByte(r[o+e])},t.writeString=function(r){for(var e=0;e<r.length;e+=1)t.writeByte(r.charCodeAt(e))},t.toByteArray=function(){return r},t.toString=function(){var t="";t+="[";for(var e=0;e<r.length;e+=1)e>0&&(t+=","),t+=r[e];return t+="]"},t},g=function(){var r=0,t=0,e=0,n="",o={},a=function(r){n+=String.fromCharCode(i(63&r))},i=function(r){if(0>r);else{if(26>r)return 65+r;if(52>r)return 97+(r-26);if(62>r)return 48+(r-52);if(62==r)return 43;if(63==r)return 47}throw new Error("n:"+r)};return o.writeByte=function(n){for(r=r<<8|255&n,t+=8,e+=1;t>=6;)a(r>>>t-6),t-=6},o.flush=function(){if(t>0&&(a(r<<6-t),r=0,t=0),e%3!=0)for(var o=3-e%3,i=0;o>i;i+=1)n+="="},o.toString=function(){return n},o},s=function(r){var t=r,e=0,n=0,o=0,a={};a.read=function(){for(;8>o;){if(e>=t.length){if(0==o)return-1;throw new Error("unexpected end of file./"+o)}var r=t.charAt(e);if(e+=1,"="==r)return o=0,-1;r.match(/^\s$/)||(n=n<<6|i(r.charCodeAt(0)),o+=6)}var a=n>>>o-8&255;return o-=8,a};var i=function(r){if(r>=65&&90>=r)return r-65;if(r>=97&&122>=r)return r-97+26;if(r>=48&&57>=r)return r-48+52;if(43==r)return 62;if(47==r)return 63;throw new Error("c:"+r)};return a},v=function(r,t){var e=r,n=t,o=new Array(r*t),a={};a.setPixel=function(r,t,n){o[t*e+r]=n},a.write=function(r){r.writeString("GIF87a"),r.writeShort(e),r.writeShort(n),r.writeByte(128),r.writeByte(0),r.writeByte(0),r.writeByte(0),r.writeByte(0),r.writeByte(0),r.writeByte(255),r.writeByte(255),r.writeByte(255),r.writeString(","),r.writeShort(0),r.writeShort(0),r.writeShort(e),r.writeShort(n),r.writeByte(0);var t=2,o=u(t);r.writeByte(t);for(var a=0;o.length-a>255;)r.writeByte(255),r.writeBytes(o,a,255),a+=255;r.writeByte(o.length-a),r.writeBytes(o,a,o.length-a),r.writeByte(0),r.writeString(";")};var i=function(r){var t=r,e=0,n=0,o={};return o.write=function(r,o){if(r>>>o!=0)throw new Error("length over");for(;e+o>=8;)t.writeByte(255&(r<<e|n)),o-=8-e,r>>>=8-e,n=0,e=0;n=r<<e|n,e+=o},o.flush=function(){e>0&&t.writeByte(n)},o},u=function(r){for(var t=1<<r,e=(1<<r)+1,n=r+1,a=f(),u=0;t>u;u+=1)a.add(String.fromCharCode(u));a.add(String.fromCharCode(t)),a.add(String.fromCharCode(e));var c=l(),g=i(c);g.write(t,n);var s=0,v=String.fromCharCode(o[s]);for(s+=1;s<o.length;){var h=String.fromCharCode(o[s]);s+=1,a.contains(v+h)?v+=h:(g.write(a.indexOf(v),n),a.size()<4095&&(a.size()==1<<n&&(n+=1),a.add(v+h)),v=h)}return g.write(a.indexOf(v),n),g.write(e,n),g.flush(),c.toByteArray()},f=function(){var r={},t=0,e={};return e.add=function(n){if(e.contains(n))throw new Error("dup key:"+n);r[n]=t,t+=1},e.size=function(){return t},e.indexOf=function(t){return r[t]},e.contains=function(t){return"undefined"!=typeof r[t]},e};return a},h=function(r,t,e,n){for(var o=v(r,t),a=0;t>a;a+=1)for(var i=0;r>i;i+=1)o.setPixel(i,a,e(i,a));var u=l();o.write(u);for(var f=g(),c=u.toByteArray(),s=0;s<c.length;s+=1)f.writeByte(c[s]);f.flush();var h="";return h+="<img",h+=' src="',h+="data:image/gif;base64,",h+=f,h+='"',h+=' width="',h+=r,h+='"',h+=' height="',h+=t,h+='"',n&&(h+=' alt="',h+=n,h+='"'),h+="/>"};return t}();return function(r){"function"==typeof define&&define.amd?define([],r):"object"==typeof exports&&(module.exports=r())}(function(){return r}),!function(r){r.stringToBytes=function(r){function t(r){for(var t=[],e=0;e<r.length;e++){var n=r.charCodeAt(e);128>n?t.push(n):2048>n?t.push(192|n>>6,128|63&n):55296>n||n>=57344?t.push(224|n>>12,128|n>>6&63,128|63&n):(e++,n=65536+((1023&n)<<10|1023&r.charCodeAt(e)),t.push(240|n>>18,128|n>>12&63,128|n>>6&63,128|63&n))}return t}return t(r)}}(r),r}()); |
|
Download_01.svelte.d.ts | /// <reference types="svelte" />
import { SvelteComponentTyped } from "svelte";
export interface Download_01Props
extends svelte.JSX.HTMLAttributes<HTMLElementTagNameMap["svg"]> {
/**
* Set a tabindex value
*/
tabindex?: string;
/**
* Set a custom SVG fill color | fill?: string;
}
export default class Download_01 extends SvelteComponentTyped<
Download_01Props,
{},
{}
> {} | * @default "currentColor"
*/ |
handshaker.go | /*
*
* Copyright 2018 gRPC authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
// Package handshaker provides ALTS handshaking functionality for GCP.
package handshaker
import (
"context"
"errors"
"fmt"
"io"
"net"
"sync"
grpc "google.golang.org/grpc"
"github.com/AmirSoleimani/grpc-go/codes"
"github.com/AmirSoleimani/grpc-go/credentials"
core "github.com/AmirSoleimani/grpc-go/credentials/alts/internal"
"github.com/AmirSoleimani/grpc-go/credentials/alts/internal/authinfo"
"github.com/AmirSoleimani/grpc-go/credentials/alts/internal/conn"
altsgrpc "github.com/AmirSoleimani/grpc-go/credentials/alts/internal/proto/grpc_gcp"
altspb "github.com/AmirSoleimani/grpc-go/credentials/alts/internal/proto/grpc_gcp"
)
const (
// The maximum byte size of receive frames.
frameLimit = 64 * 1024 // 64 KB
rekeyRecordProtocolName = "ALTSRP_GCM_AES128_REKEY"
// maxPendingHandshakes represents the maximum number of concurrent
// handshakes.
maxPendingHandshakes = 100
)
var (
hsProtocol = altspb.HandshakeProtocol_ALTS
appProtocols = []string{"grpc"}
recordProtocols = []string{rekeyRecordProtocolName}
keyLength = map[string]int{
rekeyRecordProtocolName: 44,
}
altsRecordFuncs = map[string]conn.ALTSRecordFunc{
// ALTS handshaker protocols.
rekeyRecordProtocolName: func(s core.Side, keyData []byte) (conn.ALTSRecordCrypto, error) {
return conn.NewAES128GCMRekey(s, keyData)
},
}
// control number of concurrent created (but not closed) handshakers.
mu sync.Mutex
concurrentHandshakes = int64(0)
// errDropped occurs when maxPendingHandshakes is reached.
errDropped = errors.New("maximum number of concurrent ALTS handshakes is reached")
)
func init() {
for protocol, f := range altsRecordFuncs {
if err := conn.RegisterProtocol(protocol, f); err != nil {
panic(err)
}
}
}
func acquire(n int64) bool {
mu.Lock()
success := maxPendingHandshakes-concurrentHandshakes >= n
if success {
concurrentHandshakes += n
}
mu.Unlock()
return success
}
func release(n int64) {
mu.Lock()
concurrentHandshakes -= n
if concurrentHandshakes < 0 {
mu.Unlock()
panic("bad release")
}
mu.Unlock()
}
// ClientHandshakerOptions contains the client handshaker options that can
// provided by the caller.
type ClientHandshakerOptions struct {
// ClientIdentity is the handshaker client local identity.
ClientIdentity *altspb.Identity
// TargetName is the server service account name for secure name
// checking.
TargetName string
// TargetServiceAccounts contains a list of expected target service
// accounts. One of these accounts should match one of the accounts in
// the handshaker results. Otherwise, the handshake fails.
TargetServiceAccounts []string
// RPCVersions specifies the gRPC versions accepted by the client.
RPCVersions *altspb.RpcProtocolVersions
}
// ServerHandshakerOptions contains the server handshaker options that can
// provided by the caller.
type ServerHandshakerOptions struct {
// RPCVersions specifies the gRPC versions accepted by the server.
RPCVersions *altspb.RpcProtocolVersions
}
// DefaultClientHandshakerOptions returns the default client handshaker options.
func DefaultClientHandshakerOptions() *ClientHandshakerOptions {
return &ClientHandshakerOptions{}
}
// DefaultServerHandshakerOptions returns the default client handshaker options.
func | () *ServerHandshakerOptions {
return &ServerHandshakerOptions{}
}
// TODO: add support for future local and remote endpoint in both client options
// and server options (server options struct does not exist now. When
// caller can provide endpoints, it should be created.
// altsHandshaker is used to complete a ALTS handshaking between client and
// server. This handshaker talks to the ALTS handshaker service in the metadata
// server.
type altsHandshaker struct {
// RPC stream used to access the ALTS Handshaker service.
stream altsgrpc.HandshakerService_DoHandshakeClient
// the connection to the peer.
conn net.Conn
// client handshake options.
clientOpts *ClientHandshakerOptions
// server handshake options.
serverOpts *ServerHandshakerOptions
// defines the side doing the handshake, client or server.
side core.Side
}
// NewClientHandshaker creates a ALTS handshaker for GCP which contains an RPC
// stub created using the passed conn and used to talk to the ALTS Handshaker
// service in the metadata server.
func NewClientHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ClientHandshakerOptions) (core.Handshaker, error) {
stream, err := altsgrpc.NewHandshakerServiceClient(conn).DoHandshake(ctx, grpc.WaitForReady(true))
if err != nil {
return nil, err
}
return &altsHandshaker{
stream: stream,
conn: c,
clientOpts: opts,
side: core.ClientSide,
}, nil
}
// NewServerHandshaker creates a ALTS handshaker for GCP which contains an RPC
// stub created using the passed conn and used to talk to the ALTS Handshaker
// service in the metadata server.
func NewServerHandshaker(ctx context.Context, conn *grpc.ClientConn, c net.Conn, opts *ServerHandshakerOptions) (core.Handshaker, error) {
stream, err := altsgrpc.NewHandshakerServiceClient(conn).DoHandshake(ctx, grpc.WaitForReady(true))
if err != nil {
return nil, err
}
return &altsHandshaker{
stream: stream,
conn: c,
serverOpts: opts,
side: core.ServerSide,
}, nil
}
// ClientHandshake starts and completes a client ALTS handshaking for GCP. Once
// done, ClientHandshake returns a secure connection.
func (h *altsHandshaker) ClientHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) {
if !acquire(1) {
return nil, nil, errDropped
}
defer release(1)
if h.side != core.ClientSide {
return nil, nil, errors.New("only handshakers created using NewClientHandshaker can perform a client handshaker")
}
// Create target identities from service account list.
targetIdentities := make([]*altspb.Identity, 0, len(h.clientOpts.TargetServiceAccounts))
for _, account := range h.clientOpts.TargetServiceAccounts {
targetIdentities = append(targetIdentities, &altspb.Identity{
IdentityOneof: &altspb.Identity_ServiceAccount{
ServiceAccount: account,
},
})
}
req := &altspb.HandshakerReq{
ReqOneof: &altspb.HandshakerReq_ClientStart{
ClientStart: &altspb.StartClientHandshakeReq{
HandshakeSecurityProtocol: hsProtocol,
ApplicationProtocols: appProtocols,
RecordProtocols: recordProtocols,
TargetIdentities: targetIdentities,
LocalIdentity: h.clientOpts.ClientIdentity,
TargetName: h.clientOpts.TargetName,
RpcVersions: h.clientOpts.RPCVersions,
},
},
}
conn, result, err := h.doHandshake(req)
if err != nil {
return nil, nil, err
}
authInfo := authinfo.New(result)
return conn, authInfo, nil
}
// ServerHandshake starts and completes a server ALTS handshaking for GCP. Once
// done, ServerHandshake returns a secure connection.
func (h *altsHandshaker) ServerHandshake(ctx context.Context) (net.Conn, credentials.AuthInfo, error) {
if !acquire(1) {
return nil, nil, errDropped
}
defer release(1)
if h.side != core.ServerSide {
return nil, nil, errors.New("only handshakers created using NewServerHandshaker can perform a server handshaker")
}
p := make([]byte, frameLimit)
n, err := h.conn.Read(p)
if err != nil {
return nil, nil, err
}
// Prepare server parameters.
// TODO: currently only ALTS parameters are provided. Might need to use
// more options in the future.
params := make(map[int32]*altspb.ServerHandshakeParameters)
params[int32(altspb.HandshakeProtocol_ALTS)] = &altspb.ServerHandshakeParameters{
RecordProtocols: recordProtocols,
}
req := &altspb.HandshakerReq{
ReqOneof: &altspb.HandshakerReq_ServerStart{
ServerStart: &altspb.StartServerHandshakeReq{
ApplicationProtocols: appProtocols,
HandshakeParameters: params,
InBytes: p[:n],
RpcVersions: h.serverOpts.RPCVersions,
},
},
}
conn, result, err := h.doHandshake(req)
if err != nil {
return nil, nil, err
}
authInfo := authinfo.New(result)
return conn, authInfo, nil
}
func (h *altsHandshaker) doHandshake(req *altspb.HandshakerReq) (net.Conn, *altspb.HandshakerResult, error) {
resp, err := h.accessHandshakerService(req)
if err != nil {
return nil, nil, err
}
// Check of the returned status is an error.
if resp.GetStatus() != nil {
if got, want := resp.GetStatus().Code, uint32(codes.OK); got != want {
return nil, nil, fmt.Errorf("%v", resp.GetStatus().Details)
}
}
var extra []byte
if req.GetServerStart() != nil {
extra = req.GetServerStart().GetInBytes()[resp.GetBytesConsumed():]
}
result, extra, err := h.processUntilDone(resp, extra)
if err != nil {
return nil, nil, err
}
// The handshaker returns a 128 bytes key. It should be truncated based
// on the returned record protocol.
keyLen, ok := keyLength[result.RecordProtocol]
if !ok {
return nil, nil, fmt.Errorf("unknown resulted record protocol %v", result.RecordProtocol)
}
sc, err := conn.NewConn(h.conn, h.side, result.GetRecordProtocol(), result.KeyData[:keyLen], extra)
if err != nil {
return nil, nil, err
}
return sc, result, nil
}
func (h *altsHandshaker) accessHandshakerService(req *altspb.HandshakerReq) (*altspb.HandshakerResp, error) {
if err := h.stream.Send(req); err != nil {
return nil, err
}
resp, err := h.stream.Recv()
if err != nil {
return nil, err
}
return resp, nil
}
// processUntilDone processes the handshake until the handshaker service returns
// the results. Handshaker service takes care of frame parsing, so we read
// whatever received from the network and send it to the handshaker service.
func (h *altsHandshaker) processUntilDone(resp *altspb.HandshakerResp, extra []byte) (*altspb.HandshakerResult, []byte, error) {
for {
if len(resp.OutFrames) > 0 {
if _, err := h.conn.Write(resp.OutFrames); err != nil {
return nil, nil, err
}
}
if resp.Result != nil {
return resp.Result, extra, nil
}
buf := make([]byte, frameLimit)
n, err := h.conn.Read(buf)
if err != nil && err != io.EOF {
return nil, nil, err
}
// If there is nothing to send to the handshaker service, and
// nothing is received from the peer, then we are stuck.
// This covers the case when the peer is not responding. Note
// that handshaker service connection issues are caught in
// accessHandshakerService before we even get here.
if len(resp.OutFrames) == 0 && n == 0 {
return nil, nil, core.PeerNotRespondingError
}
// Append extra bytes from the previous interaction with the
// handshaker service with the current buffer read from conn.
p := append(extra, buf[:n]...)
resp, err = h.accessHandshakerService(&altspb.HandshakerReq{
ReqOneof: &altspb.HandshakerReq_Next{
Next: &altspb.NextHandshakeMessageReq{
InBytes: p,
},
},
})
if err != nil {
return nil, nil, err
}
// Set extra based on handshaker service response.
if n == 0 {
extra = nil
} else {
extra = buf[resp.GetBytesConsumed():n]
}
}
}
// Close terminates the Handshaker. It should be called when the caller obtains
// the secure connection.
func (h *altsHandshaker) Close() {
h.stream.CloseSend()
}
| DefaultServerHandshakerOptions |
modules1.rs | // modules1.rs | mod sausage_factory {
// Don't let anybody outside of this module see this!
fn get_secret_recipe() -> String {
String::from("Ginger")
}
pub fn make_sausage() {
get_secret_recipe();
println!("sausage!");
}
}
fn main() {
sausage_factory::make_sausage();
} | // Make me compile! Execute `rustlings hint modules1` for hints :)
// I AM NOT DONE
|
bankless_cli.rs | use bankless_primitives::DEFAULT_UNIT_CREATION_DELAY;
use finality_bankless::UnitCreationDelay;
use structopt::StructOpt;
#[derive(Debug, StructOpt, Clone)]
pub struct | {
#[structopt(long)]
pub unit_creation_delay: Option<u64>,
}
impl BanklessCli {
pub fn unit_creation_delay(&self) -> UnitCreationDelay {
UnitCreationDelay(
self.unit_creation_delay
.unwrap_or(DEFAULT_UNIT_CREATION_DELAY),
)
}
}
| BanklessCli |
devicemetric.py | # -*- coding: utf-8 -*-
"""
Profile: http://hl7.org/fhir/StructureDefinition/DeviceMetric
Release: STU3
Version: 3.0.2
Revision: 11917
Last updated: 2019-10-24T11:53:00+11:00
"""
import sys
from . import backboneelement, domainresource
class DeviceMetric(domainresource.DomainResource):
""" Measurement, calculation or setting capability of a medical device.
Describes a measurement, calculation or setting capability of a medical
device.
"""
resource_type = "DeviceMetric"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.calibration = None
""" Describes the calibrations that have been performed or that are
required to be performed.
List of `DeviceMetricCalibration` items (represented as `dict` in JSON). """
self.category = None
""" measurement | setting | calculation | unspecified.
Type `str`. """
self.color = None
""" black | red | green | yellow | blue | magenta | cyan | white.
Type `str`. """
self.identifier = None
""" Unique identifier of this DeviceMetric.
Type `Identifier` (represented as `dict` in JSON). """
self.measurementPeriod = None
""" Describes the measurement repetition time.
Type `Timing` (represented as `dict` in JSON). """
self.operationalStatus = None
""" on | off | standby | entered-in-error.
Type `str`. """
self.parent = None
""" Describes the link to the parent DeviceComponent.
Type `FHIRReference` referencing `['DeviceComponent']` (represented as `dict` in JSON). """
self.source = None
""" Describes the link to the source Device.
Type `FHIRReference` referencing `['Device']` (represented as `dict` in JSON). """
self.type = None
""" Identity of metric, for example Heart Rate or PEEP Setting.
Type `CodeableConcept` (represented as `dict` in JSON). """
self.unit = None
""" Unit of Measure for the Metric.
Type `CodeableConcept` (represented as `dict` in JSON). """
super(DeviceMetric, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(DeviceMetric, self).elementProperties()
js.extend(
[
(
"calibration",
"calibration",
DeviceMetricCalibration,
"DeviceMetricCalibration",
True,
None,
False,
),
("category", "category", str, "code", False, None, True),
("color", "color", str, "code", False, None, False),
(
"identifier",
"identifier",
identifier.Identifier,
"Identifier",
False,
None,
True,
),
(
"measurementPeriod",
"measurementPeriod",
timing.Timing,
"Timing",
False,
None,
False,
),
(
"operationalStatus",
"operationalStatus",
str,
"code",
False,
None,
False,
),
(
"parent",
"parent",
fhirreference.FHIRReference,
"Reference",
False,
None,
False,
),
(
"source",
"source",
fhirreference.FHIRReference,
"Reference",
False,
None,
False,
),
(
"type",
"type",
codeableconcept.CodeableConcept,
"CodeableConcept",
False,
None,
True,
),
(
"unit",
"unit",
codeableconcept.CodeableConcept,
"CodeableConcept",
False,
None,
False,
),
]
)
return js
class DeviceMetricCalibration(backboneelement.BackboneElement):
|
try:
from . import codeableconcept
except ImportError:
codeableconcept = sys.modules[__package__ + ".codeableconcept"]
try:
from . import fhirdate
except ImportError:
fhirdate = sys.modules[__package__ + ".fhirdate"]
try:
from . import fhirreference
except ImportError:
fhirreference = sys.modules[__package__ + ".fhirreference"]
try:
from . import identifier
except ImportError:
identifier = sys.modules[__package__ + ".identifier"]
try:
from . import timing
except ImportError:
timing = sys.modules[__package__ + ".timing"]
| """ Describes the calibrations that have been performed or that are required to
be performed.
"""
resource_type = "DeviceMetricCalibration"
def __init__(self, jsondict=None, strict=True):
""" Initialize all valid properties.
:raises: FHIRValidationError on validation errors, unless strict is False
:param dict jsondict: A JSON dictionary to use for initialization
:param bool strict: If True (the default), invalid variables will raise a TypeError
"""
self.state = None
""" not-calibrated | calibration-required | calibrated | unspecified.
Type `str`. """
self.time = None
""" Describes the time last calibration has been performed.
Type `FHIRDate` (represented as `str` in JSON). """
self.type = None
""" unspecified | offset | gain | two-point.
Type `str`. """
super(DeviceMetricCalibration, self).__init__(jsondict=jsondict, strict=strict)
def elementProperties(self):
js = super(DeviceMetricCalibration, self).elementProperties()
js.extend(
[
("state", "state", str, "code", False, None, False),
("time", "time", fhirdate.FHIRDate, "instant", False, None, False),
("type", "type", str, "code", False, None, False),
]
)
return js |
qrcodewidget.py | from PyQt4.QtGui import *
from PyQt4.QtCore import *
import PyQt4.QtGui as QtGui
import os
import qrcode
import electrum_ltc
from electrum_ltc.i18n import _
from util import WindowModalDialog
class QRCodeWidget(QWidget):
def __init__(self, data = None, fixedSize=False):
QWidget.__init__(self)
self.data = None
self.qr = None
self.fixedSize=fixedSize
if fixedSize:
self.setFixedSize(fixedSize, fixedSize)
self.setData(data)
def setData(self, data):
if self.data != data:
self.data = data
if self.data:
self.qr = qrcode.QRCode()
self.qr.add_data(self.data)
if not self.fixedSize:
k = len(self.qr.get_matrix())
self.setMinimumSize(k*5,k*5)
else:
self.qr = None
self.update()
def paintEvent(self, e):
if not self.data:
return
black = QColor(0, 0, 0, 255)
white = QColor(255, 255, 255, 255)
if not self.qr:
qp = QtGui.QPainter()
qp.begin(self)
qp.setBrush(white)
qp.setPen(white)
r = qp.viewport()
qp.drawRect(0, 0, r.width(), r.height())
qp.end()
return
matrix = self.qr.get_matrix()
k = len(matrix)
qp = QtGui.QPainter()
qp.begin(self)
r = qp.viewport()
margin = 10
framesize = min(r.width(), r.height())
boxsize = int( (framesize - 2*margin)/k )
size = k*boxsize
left = (r.width() - size)/2
top = (r.height() - size)/2
# Make a white margin around the QR in case of dark theme use
qp.setBrush(white)
qp.setPen(white)
qp.drawRect(left-margin, top-margin, size+(margin*2), size+(margin*2))
qp.setBrush(black)
qp.setPen(black)
for r in range(k):
for c in range(k):
if matrix[r][c]:
qp.drawRect(left+c*boxsize, top+r*boxsize, boxsize - 1, boxsize - 1)
qp.end()
class QRDialog(WindowModalDialog):
def __init__(self, data, parent=None, title = "", show_text=False):
WindowModalDialog.__init__(self, parent, title)
vbox = QVBoxLayout()
qrw = QRCodeWidget(data)
vbox.addWidget(qrw, 1)
if show_text:
text = QTextEdit()
text.setText(data)
text.setReadOnly(True)
vbox.addWidget(text)
hbox = QHBoxLayout()
hbox.addStretch(1)
config = electrum_ltc.get_config()
if config:
filename = os.path.join(config.path, "qrcode.png")
def | ():
p = QPixmap.grabWindow(qrw.winId())
p.save(filename, 'png')
self.show_message(_("QR code saved to file") + " " + filename)
def copy_to_clipboard():
p = QPixmap.grabWindow(qrw.winId())
p.save(filename, 'png')
QApplication.clipboard().setImage(QImage(filename))
self.show_message(_("QR code copied to clipboard"))
b = QPushButton(_("Copy"))
hbox.addWidget(b)
b.clicked.connect(copy_to_clipboard)
b = QPushButton(_("Save"))
hbox.addWidget(b)
b.clicked.connect(print_qr)
b = QPushButton(_("Close"))
hbox.addWidget(b)
b.clicked.connect(self.accept)
b.setDefault(True)
vbox.addLayout(hbox)
self.setLayout(vbox)
| print_qr |
convert.py | # Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts a frozen graph into a TFLite FlatBuffer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import enum # pylint: disable=g-bad-import-order
import os as _os
import platform as _platform
import subprocess as _subprocess
import tempfile as _tempfile
import six
from six.moves import map
from tensorflow.lite.python import lite_constants
from tensorflow.lite.python import util
from tensorflow.lite.python import wrap_toco
from tensorflow.lite.toco import model_flags_pb2 as _model_flags_pb2
from tensorflow.lite.toco import toco_flags_pb2 as _toco_flags_pb2
from tensorflow.lite.toco import types_pb2 as _types_pb2
from tensorflow.python.platform import resource_loader as _resource_loader
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export as _tf_export
# Find the toco_from_protos binary using the resource loader if using from
# bazel, otherwise we are in a pip where console_scripts already has
# the toco_from_protos tool.
if lite_constants.EXPERIMENTAL_USE_TOCO_API_DIRECTLY:
_toco_from_proto_bin = ""
else:
_toco_from_proto_bin = _resource_loader.get_path_to_datafile(
"../toco/python/toco_from_protos")
if _toco_from_proto_bin and not _os.path.exists(_toco_from_proto_bin):
_toco_from_proto_bin = "toco_from_protos"
def _try_convert_to_unicode(output):
if output is None:
return u""
if isinstance(output, bytes):
try:
return six.ensure_text(output)
except UnicodeDecodeError:
pass
return output
@_tf_export("lite.OpsSet")
class OpsSet(enum.Enum):
"""Enum class defining the sets of ops available to generate TFLite models.
WARNING: Experimental interface, subject to change.
"""
# Convert model using TensorFlow Lite builtin ops.
TFLITE_BUILTINS = "TFLITE_BUILTINS"
# Convert model using TensorFlow ops. Not all TensorFlow ops are available.
# WARNING: Experimental interface, subject to change.
SELECT_TF_OPS = "SELECT_TF_OPS"
# Convert model using only TensorFlow Lite quantized int8 operations.
# Specifying this will throw an error for operations that do not yet have
# quantized implementations.
TFLITE_BUILTINS_INT8 = "TFLITE_BUILTINS_INT8"
def __str__(self):
return self.value
@staticmethod
def get_options():
"""Returns a list of OpsSet options as a list of strings."""
return [str(option) for option in list(OpsSet)]
class ConverterError(Exception):
"""Raised when an error occurs during model conversion."""
pass
def toco_convert_protos(model_flags_str,
toco_flags_str,
input_data_str,
debug_info_str=None,
enable_mlir_converter=False):
"""Convert `input_data_str` according to model and toco parameters.
Unless you know what you are doing consider using
the more friendly `tf.compat.v1.lite.toco_convert`.
Args:
model_flags_str: Serialized proto describing model properties, see
`toco/model_flags.proto`.
toco_flags_str: Serialized proto describing conversion properties, see
`toco/toco_flags.proto`.
input_data_str: Input data in serialized form (e.g. a graphdef is common)
debug_info_str: Serialized `GraphDebugInfo` proto describing logging
information. (default None)
enable_mlir_converter: Enables MLIR-based conversion instead of the default
TOCO conversion. (default False)
Returns:
Converted model in serialized form (e.g. a TFLITE model is common).
Raises:
ConverterError: When conversion fails in TFLiteConverter, usually due to
ops not being supported.
RuntimeError: When conversion fails, an exception is raised with the error
message embedded.
"""
# TODO(aselle): When toco does not use fatal errors for failure, we can
# switch this on.
if not _toco_from_proto_bin:
try:
model_str = wrap_toco.wrapped_toco_convert(model_flags_str,
toco_flags_str, input_data_str,
debug_info_str,
enable_mlir_converter)
return model_str
except Exception as e:
raise ConverterError(str(e))
# Windows and TemporaryFile are not that useful together,
# since you cannot have two readers/writers. So we have to
# make the temporaries and close and delete them explicitly.
toco_filename, model_filename, input_filename, output_filename = (
None, None, None, None)
try:
# Build all input files
with _tempfile.NamedTemporaryFile(delete=False) as fp_toco, \
_tempfile.NamedTemporaryFile(delete=False) as fp_model, \
_tempfile.NamedTemporaryFile(delete=False) as fp_input, \
_tempfile.NamedTemporaryFile(delete=False) as fp_debug:
toco_filename = fp_toco.name
input_filename = fp_input.name
model_filename = fp_model.name
debug_filename = fp_debug.name
fp_model.write(model_flags_str)
fp_toco.write(toco_flags_str)
fp_input.write(six.ensure_binary(input_data_str))
debug_info_str = debug_info_str if debug_info_str else ""
# if debug_info_str contains a "string value", then the call to
# fp_debug.write(debug_info_str) will fail with the following error
#
# TypeError: a bytes-like object is required, not 'str'
#
# Some of the subtests within the "convert_test" unit-test fail
# with the error shown above. So watch out for that scenario and
# convert debug_info_str to bytes where needed
if not isinstance(debug_info_str, bytes):
fp_debug.write(debug_info_str.encode("utf-8"))
else:
fp_debug.write(debug_info_str)
# Reserve an output file
with _tempfile.NamedTemporaryFile(delete=False) as fp:
output_filename = fp.name
# Run
cmd = [
_toco_from_proto_bin,
model_filename,
toco_filename,
input_filename,
output_filename,
"--debug_proto_file={}".format(debug_filename),
]
if enable_mlir_converter:
cmd.append("--enable_mlir_converter")
cmdline = " ".join(cmd)
is_windows = _platform.system() == "Windows"
proc = _subprocess.Popen(
cmdline,
shell=True,
stdout=_subprocess.PIPE,
stderr=_subprocess.STDOUT,
close_fds=not is_windows)
stdout, stderr = proc.communicate()
exitcode = proc.returncode
if exitcode == 0:
with open(output_filename, "rb") as fp:
return fp.read()
else:
stdout = _try_convert_to_unicode(stdout)
stderr = _try_convert_to_unicode(stderr)
raise ConverterError("See console for info.\n%s\n%s\n" % (stdout, stderr))
finally:
# Must manually cleanup files.
for filename in [
toco_filename, input_filename, model_filename, output_filename]:
try:
_os.unlink(filename)
except (OSError, TypeError):
pass
def build_toco_convert_protos(input_tensors,
output_tensors,
inference_type=lite_constants.FLOAT,
inference_input_type=None,
input_format=lite_constants.TENSORFLOW_GRAPHDEF,
input_shapes=None,
output_format=lite_constants.TFLITE,
quantized_input_stats=None,
default_ranges_stats=None,
drop_control_dependency=True,
reorder_across_fake_quant=False,
allow_custom_ops=False,
custom_opdefs=None,
change_concat_input_ranges=False,
post_training_quantize=False,
quantize_to_float16=False,
dump_graphviz_dir=None,
dump_graphviz_video=False,
target_ops=None,
allow_nonexistent_arrays=False,
debug_info=None,
conversion_summary_dir=None):
"""Builds protocol buffers describing a conversion of a model using TOCO.
Typically this is to convert from TensorFlow GraphDef to TFLite, in which
case the default `input_format` and `output_format` are sufficient.
Args:
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
inference_type: Target data type of real-number arrays in the output file.
Must be `{tf.float32, tf.uint8}`. (default tf.float32)
Must be `{tf.float32, tf.uint8}`. (default `inference_type`)
inference_input_type: Target data type of real-number input arrays. Allows
for a different type for input arrays in the case of quantization.
input_format: Type of data to read Currently must be
`{TENSORFLOW_GRAPHDEF}`. (default TENSORFLOW_GRAPHDEF)
input_shapes: Input array shape. It needs to be a list of the same length
as `input_tensors`, or None. (default None)
output_format: Output file format. Currently must be `{TFLITE,
GRAPHVIZ_DOT}`. (default TFLITE)
quantized_input_stats: List of tuples of floats representing the mean and
standard deviation. Each tuple maps to the corresponding input tensor.
Only need if `inference_input_type` is `QUANTIZED_UINT8`.
real_input_value = (quantized_input_value - mean_value) / std_dev_value.
(default None)
default_ranges_stats: Tuple of integers representing (min, max) range values
for all arrays without a specified range. Intended for experimenting with
quantization via "dummy quantization". (default None)
drop_control_dependency: Boolean indicating whether to drop control
dependencies silently. This is due to TFLite not supporting control
dependencies. (default True)
reorder_across_fake_quant: Boolean indicating whether to reorder FakeQuant
nodes in unexpected locations. Used when the location of the FakeQuant
nodes is preventing graph transformations necessary to convert the graph.
Results in a graph that differs from the quantized training graph,
potentially causing differing arithmetic behavior. (default False)
allow_custom_ops: Boolean indicating whether to allow custom operations.
When false any unknown operation is an error. When true, custom ops are
created for any op that is unknown. The developer will need to provide
these to the TensorFlow Lite runtime with a custom resolver.
(default False)
custom_opdefs: List of strings representing custom ops OpDefs that are
included in the GraphDef. Required when using custom operations with the
MLIR-based converter. (default None)
change_concat_input_ranges: Boolean to change behavior of min/max ranges for
inputs and outputs of the concat operator for quantized models. Changes
the ranges of concat operator overlap when true. (default False)
post_training_quantize: Boolean indicating whether to quantize the weights
of the converted float model. Model size will be reduced and there will be
latency improvements (at the cost of accuracy).
(default False)
quantize_to_float16: Boolean indicating whether to convert float buffers
to float16. (default False)
dump_graphviz_dir: Full filepath of folder to dump the graphs at various
stages of processing GraphViz .dot files. Preferred over
--output_format=GRAPHVIZ_DOT in order to keep the requirements of the
output file. (default None)
dump_graphviz_video: Boolean indicating whether to dump the graph after
every graph transformation. (default False)
target_ops: Experimental flag, subject to change. Set of OpsSet
options indicating which converter to use.
(default set([OpsSet.TFLITE_BUILTINS]))
allow_nonexistent_arrays: Allow specifying array names that don't exist
or are unused in the final graph. (default False)
debug_info: `GraphDebugInfo` proto containing the stack traces for the
original nodes referred by the converted graph.
conversion_summary_dir: A string, the path to the generated conversion logs.
Returns:
model_flags, toco_flags, debug_info: three protocol buffers describing the
conversion process and debug information.
Raises:
ValueError:
If the input tensor type is unknown
Missing mean_values or std_dev_values
RuntimeError: If TOCO fails to convert (in which case the runtime error's
error text will contain the TOCO error log)
"""
toco = _toco_flags_pb2.TocoFlags()
toco.input_format = input_format
toco.output_format = output_format
toco.inference_type = util.convert_dtype_to_tflite_type(inference_type)
if inference_input_type:
toco.inference_input_type = util.convert_dtype_to_tflite_type(
inference_input_type)
else:
toco.inference_input_type = toco.inference_type
toco.drop_control_dependency = drop_control_dependency
toco.reorder_across_fake_quant = reorder_across_fake_quant
toco.allow_custom_ops = allow_custom_ops
if custom_opdefs:
toco.custom_opdefs.extend(custom_opdefs)
toco.post_training_quantize = post_training_quantize
toco.quantize_to_float16 = quantize_to_float16
if default_ranges_stats:
toco.default_ranges_min = default_ranges_stats[0]
toco.default_ranges_max = default_ranges_stats[1]
if dump_graphviz_dir:
toco.dump_graphviz_dir = dump_graphviz_dir
toco.dump_graphviz_include_video = dump_graphviz_video
if conversion_summary_dir:
toco.conversion_summary_dir = conversion_summary_dir
if target_ops:
if set(target_ops) == set([OpsSet.TFLITE_BUILTINS, OpsSet.SELECT_TF_OPS]):
toco.enable_select_tf_ops = True
elif set(target_ops) == set([OpsSet.SELECT_TF_OPS]):
toco.enable_select_tf_ops = True
toco.force_select_tf_ops = True
model = _model_flags_pb2.ModelFlags()
model.change_concat_input_ranges = change_concat_input_ranges
for idx, input_tensor in enumerate(input_tensors):
input_array = model.input_arrays.add()
input_array.name = util.get_tensor_name(input_tensor)
input_array.data_type = util.convert_dtype_to_tflite_type(
input_tensor.dtype)
if toco.inference_input_type in \
[_types_pb2.QUANTIZED_UINT8, _types_pb2.INT8]:
if not quantized_input_stats:
raise ValueError("std_dev and mean must be defined when "
"inference_input_type is QUANTIZED_UINT8.")
input_array.mean_value, input_array.std_value = quantized_input_stats[idx]
if input_shapes is None:
shape = input_tensor.shape
else:
shape = input_shapes[idx]
input_array.shape.dims.extend(list(map(int, shape)))
for output_tensor in output_tensors:
model.output_arrays.append(util.get_tensor_name(output_tensor))
model.allow_nonexistent_arrays = allow_nonexistent_arrays
return model, toco, debug_info
def toco_convert_graph_def(input_data, input_arrays_with_shape, output_arrays,
enable_mlir_converter, *args, **kwargs):
""""Convert a model using TOCO.
This function is used to convert GraphDefs that cannot be loaded into
TensorFlow to TFLite. Conversion can be customized by providing arguments
that are forwarded to `build_toco_convert_protos` (see documentation for
details).
Args:
input_data: Input data (i.e. often `sess.graph_def`),
input_arrays_with_shape: Tuple of strings representing input tensor names
and list of integers representing input shapes
(e.g., [("foo" : [1, 16, 16, 3])]). Use only when graph cannot be loaded
into TensorFlow and when `input_tensors` is None. (default None)
output_arrays: List of output tensors to freeze graph with. Use only when
graph cannot be loaded into TensorFlow and when `output_tensors` is None.
(default None)
enable_mlir_converter: Enables MLIR-based conversion instead of TOCO
conversion.
*args: See `build_toco_convert_protos`,
**kwargs: See `build_toco_convert_protos`.
Returns:
The converted data. For example if TFLite was the destination, then
this will be a tflite flatbuffer in a bytes array.
Raises:
Defined in `build_toco_convert_protos`.
"""
model_flags, toco_flags, _ = build_toco_convert_protos(
input_tensors=[], output_tensors=[], *args, **kwargs)
for idx, (name, shape) in enumerate(input_arrays_with_shape):
input_array = model_flags.input_arrays.add()
if toco_flags.inference_input_type == _types_pb2.QUANTIZED_UINT8:
if (("quantized_input_stats" not in kwargs) or
(not kwargs["quantized_input_stats"])):
raise ValueError("std_dev and mean must be defined when "
"inference_input_type is QUANTIZED_UINT8.")
input_array.mean_value, input_array.std_value = kwargs[
"quantized_input_stats"][idx]
input_array.name = name
input_array.shape.dims.extend(list(map(int, shape)))
for name in output_arrays:
model_flags.output_arrays.append(name)
data = toco_convert_protos(
model_flags.SerializeToString(),
toco_flags.SerializeToString(),
input_data.SerializeToString(),
enable_mlir_converter=enable_mlir_converter)
return data
def toco_convert_impl(input_data, input_tensors, output_tensors,
enable_mlir_converter, *args, **kwargs):
""""Convert a model using TOCO.
Typically this function is used to convert from TensorFlow GraphDef to TFLite.
Conversion can be customized by providing arguments that are forwarded to
`build_toco_convert_protos` (see documentation for details).
Args:
input_data: Input data (i.e. often `sess.graph_def`),
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
enable_mlir_converter: Enables MLIR-based conversion instead of TOCO
conversion.
*args: See `build_toco_convert_protos`,
**kwargs: See `build_toco_convert_protos`.
Returns:
The converted data. For example if TFLite was the destination, then
this will be a tflite flatbuffer in a bytes array.
Raises:
Defined in `build_toco_convert_protos`.
"""
model_flags, toco_flags, debug_info = build_toco_convert_protos(
input_tensors, output_tensors, *args, **kwargs)
debug_info_str = debug_info.SerializeToString() if debug_info else None
data = toco_convert_protos(
model_flags.SerializeToString(), | debug_info_str=debug_info_str,
enable_mlir_converter=enable_mlir_converter)
return data
@_tf_export(v1=["lite.toco_convert"])
@deprecation.deprecated(None, "Use `lite.TFLiteConverter` instead.")
def toco_convert(input_data, input_tensors, output_tensors, *args, **kwargs):
"""Convert a model using TOCO.
Typically this function is used to convert from TensorFlow GraphDef to TFLite.
Conversion can be customized by providing arguments that are forwarded to
`build_toco_convert_protos` (see documentation for details). This function has
been deprecated. Please use `lite.TFLiteConverter` instead.
Args:
input_data: Input data (i.e. often `sess.graph_def`),
input_tensors: List of input tensors. Type and shape are computed using
`foo.shape` and `foo.dtype`.
output_tensors: List of output tensors (only .name is used from this).
*args: See `build_toco_convert_protos`,
**kwargs: See `build_toco_convert_protos`.
Returns:
The converted data. For example if TFLite was the destination, then
this will be a tflite flatbuffer in a bytes array.
Raises:
Defined in `build_toco_convert_protos`.
"""
enable_mlir_converter = kwargs.get("enable_mlir_converter", False)
return toco_convert_impl(input_data, input_tensors, output_tensors,
enable_mlir_converter, *args, **kwargs) | toco_flags.SerializeToString(),
input_data.SerializeToString(), |
market_data.rs | use crate::models::{AssetKind, Currency, Request};
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
#[derive(Deserialize, Serialize, Clone, Debug)]
pub struct GetBookSummaryByCurrencyRequest {
pub currency: Currency,
#[serde(skip_serializing_if = "Option::is_none")]
pub kind: Option<AssetKind>,
}
impl GetBookSummaryByCurrencyRequest {
pub fn all(currency: Currency) -> Self {
Self {
currency,
kind: None,
}
}
pub fn futures(currency: Currency) -> Self {
Self {
currency,
kind: Some(AssetKind::Future),
}
}
pub fn options(currency: Currency) -> Self {
Self {
currency,
kind: Some(AssetKind::Option),
}
}
}
#[derive(Deserialize, Serialize, Clone, Debug)]
pub struct GetBookSummaryByCurrencyResponse {
pub ask_price: Option<f64>,
pub base_currency: Currency,
pub bid_price: Option<f64>,
pub creation_timestamp: u64,
pub current_funding: Option<f64>,
pub estimated_delivery_price: Option<f64>,
pub funding_8h: Option<f64>,
pub high: Option<f64>,
pub instrument_name: String,
pub interest_rate: Option<f64>,
pub last: Option<f64>,
pub low: Option<f64>,
pub mark_price: f64,
pub mid_price: Option<f64>,
pub open_interest: f64,
pub quote_currency: Currency,
pub underlying_index: Option<String>,
pub underlying_price: Option<f64>,
pub volume: f64,
pub volume_usd: Option<f64>,
}
impl Request for GetBookSummaryByCurrencyRequest {
const METHOD: &'static str = "public/get_book_summary_by_currency";
type Response = Vec<GetBookSummaryByCurrencyResponse>;
}
#[derive(Deserialize, Serialize, Clone, Debug)]
pub struct GetIndexRequest {
pub currency: Currency,
}
impl GetIndexRequest {
pub fn new(currency: Currency) -> Self {
Self { currency }
}
}
#[derive(Deserialize, Serialize, Clone, Debug)]
pub struct GetIndexResponse {
pub edp: f64,
#[serde(flatten)]
pub indices: HashMap<Currency, f64>,
}
impl Request for GetIndexRequest {
const METHOD: &'static str = "public/get_index";
type Response = GetIndexResponse;
}
#[derive(Deserialize, Serialize, Clone, Debug, Default)]
pub struct GetInstrumentsRequest {
pub currency: Currency,
#[serde(skip_serializing_if = "Option::is_none")]
pub kind: Option<AssetKind>,
#[serde(skip_serializing_if = "Option::is_none")]
pub expired: Option<bool>,
}
impl GetInstrumentsRequest {
pub fn new(currency: Currency) -> Self {
Self {
currency,
..Default::default()
}
}
pub fn expired(currency: Currency) -> Self |
pub fn futures(currency: Currency) -> Self {
Self::with_kind(currency, AssetKind::Future)
}
pub fn options(currency: Currency) -> Self {
Self::with_kind(currency, AssetKind::Option)
}
pub fn with_kind(currency: Currency, kind: AssetKind) -> Self {
Self {
currency,
kind: Some(kind),
..Default::default()
}
}
}
#[derive(Deserialize, Serialize, Clone, Debug)]
pub struct GetInstrumentsResponse {
pub base_currency: String,
pub contract_size: f64,
pub creation_timestamp: u64,
pub expiration_timestamp: u64,
pub instrument_name: String,
pub is_active: bool,
pub kind: AssetKind,
pub min_trade_amount: f64,
pub option_type: Option<String>,
pub quote_currency: Option<Currency>,
pub settlement_period: String,
pub strike: Option<f64>,
pub tick_size: f64,
}
impl Request for GetInstrumentsRequest {
const METHOD: &'static str = "public/get_instruments";
type Response = Vec<GetInstrumentsResponse>;
}
| {
Self {
currency,
expired: Some(true),
..Default::default()
}
} |
ChildTargetManager.js | let _lastAnonymousTargetId=0;export default class ChildTargetManager extends SDK.SDKModel{constructor(parentTarget){super(parentTarget);this._targetManager=parentTarget.targetManager();this._parentTarget=parentTarget;this._targetAgent=parentTarget.targetAgent();this._targetInfos=new Map();this._childTargets=new Map();this._parallelConnections=new Map();this._parentTargetId=null;parentTarget.registerTargetDispatcher(this);this._targetAgent.invoke_setAutoAttach({autoAttach:true,waitForDebuggerOnStart:true,flatten:true});if(!parentTarget.parentTarget()&&!Host.isUnderTest()){this._targetAgent.setDiscoverTargets(true);this._targetAgent.setRemoteLocations([{host:'localhost',port:9229}]);}}
static install(attachCallback){SDK.ChildTargetManager._attachCallback=attachCallback;SDK.SDKModel.register(SDK.ChildTargetManager,SDK.Target.Capability.Target,true);}
suspendModel(){return this._targetAgent.invoke_setAutoAttach({autoAttach:true,waitForDebuggerOnStart:false,flatten:true});}
resumeModel(){return this._targetAgent.invoke_setAutoAttach({autoAttach:true,waitForDebuggerOnStart:true,flatten:true});}
dispose(){for(const sessionId of this._childTargets.keys()){this.detachedFromTarget(sessionId,undefined);}}
targetCreated(targetInfo){this._targetInfos.set(targetInfo.targetId,targetInfo);this._fireAvailableTargetsChanged();}
targetInfoChanged(targetInfo){this._targetInfos.set(targetInfo.targetId,targetInfo);this._fireAvailableTargetsChanged();}
targetDestroyed(targetId){this._targetInfos.delete(targetId);this._fireAvailableTargetsChanged();}
targetCrashed(targetId,status,errorCode){}
_fireAvailableTargetsChanged(){SDK.targetManager.dispatchEventToListeners(SDK.TargetManager.Events.AvailableTargetsChanged,this._targetInfos.valuesArray());}
async _getParentTargetId(){if(!this._parentTargetId){this._parentTargetId=(await this._parentTarget.targetAgent().getTargetInfo()).targetId;}
return this._parentTargetId;}
attachedToTarget(sessionId,targetInfo,waitingForDebugger){if(this._parentTargetId===targetInfo.targetId){return;}
let targetName='';if(targetInfo.type==='worker'&&targetInfo.title&&targetInfo.title!==targetInfo.url){targetName=targetInfo.title;}else if(targetInfo.type!=='iframe'){const parsedURL=targetInfo.url.asParsedURL();targetName=parsedURL?parsedURL.lastPathComponentWithFragment():'#'+(++_lastAnonymousTargetId);}
let type=SDK.Target.Type.Browser;if(targetInfo.type==='iframe'){type=SDK.Target.Type.Frame;}
else if(targetInfo.type==='page'){type=SDK.Target.Type.Frame;}else if(targetInfo.type==='worker'){type=SDK.Target.Type.Worker;}else if(targetInfo.type==='service_worker'){type=SDK.Target.Type.ServiceWorker;} | async createParallelConnection(onMessage){const targetId=await this._getParentTargetId();const{connection,sessionId}=await this._createParallelConnectionAndSessionForTarget(this._parentTarget,targetId);connection.setOnMessage(onMessage);this._parallelConnections.set(sessionId,connection);return connection;}
async _createParallelConnectionAndSessionForTarget(target,targetId){const targetAgent=target.targetAgent();const targetRouter=target.router();const sessionId=(await targetAgent.attachToTarget(targetId,true));const connection=new SDK.ParallelConnection(targetRouter.connection(),sessionId);targetRouter.registerSession(target,sessionId,connection);connection.setOnDisconnect(()=>{targetAgent.detachFromTarget(sessionId);targetRouter.unregisterSession(sessionId);});return{connection,sessionId};}}
self.SDK=self.SDK||{};SDK=SDK||{};SDK.ChildTargetManager=ChildTargetManager;SDK.ChildTargetManager._attachCallback; | const target=this._targetManager.createTarget(targetInfo.targetId,targetName,type,this._parentTarget,sessionId);this._childTargets.set(sessionId,target);if(SDK.ChildTargetManager._attachCallback){SDK.ChildTargetManager._attachCallback({target,waitingForDebugger}).then(()=>{target.runtimeAgent().runIfWaitingForDebugger();});}else{target.runtimeAgent().runIfWaitingForDebugger();}}
detachedFromTarget(sessionId,childTargetId){if(this._parallelConnections.has(sessionId)){this._parallelConnections.delete(sessionId);}else{this._childTargets.get(sessionId).dispose('target terminated');this._childTargets.delete(sessionId);}}
receivedMessageFromTarget(sessionId,message,childTargetId){} |
edge.rs | //! Definition of the Edge component.
use crate::prelude::*;
use crate::component::node;
use enso_frp as frp;
use enso_frp;
use ensogl::application::Application;
use ensogl::data::color;
use ensogl::display::scene::Scene;
use ensogl::display::shape::*;
use ensogl::display::traits::*;
use ensogl::display;
use ensogl::gui::component::ShapeViewEvents;
use ensogl_theme as theme;
use nalgebra::Rotation2;
// =================
// === Constants ===
// =================
const LINE_SHAPE_WIDTH : f32 = LINE_WIDTH + 2.0 * PADDING;
const LINE_SIDE_OVERLAP : f32 = 1.0;
const LINE_SIDES_OVERLAP : f32 = 2.0 * LINE_SIDE_OVERLAP;
const LINE_WIDTH : f32 = 4.0;
const ARROW_SIZE_X : f32 = 20.0;
const ARROW_SIZE_Y : f32 = 20.0;
const HOVER_EXTENSION : f32 = 10.0;
const MOUSE_OFFSET : f32 = 2.0;
// It was node::SHADOW_SIZE; Should be moved to theme manager and linked to node::shadow.
const NODE_PADDING : f32 = 10.0;
// The padding needs to be large enough to accommodate the extended hover area without clipping it.
const PADDING : f32 = 4.0 + HOVER_EXTENSION;
const RIGHT_ANGLE : f32 = std::f32::consts::PI / 2.0;
const INFINITE : f32 = 99999.0;
/// The threshold for the y-distance between nodes at which we switch from using the y-distance
/// only to determine the closest port to using the full cartesian distance.
const MIN_SOURCE_TARGET_DIFFERENCE_FOR_Y_VALUE_DISCRIMINATION : f32 = 45.0;
const HOVER_COLOR : color::Rgba = color::Rgba::new(1.0,0.0,0.0,0.000_001);
// ===================
// === Vector Math ===
// ===================
fn up() -> Vector2<f32> {
Vector2(1.0,0.0)
}
fn point_rotation(point:Vector2<f32>) -> Rotation2<f32> {
Rotation2::rotation_between(&point,&up())
}
// =================
// === EdgeShape ===
// =================
/// Abstraction for all sub-shapes the edge shape is build from.
trait EdgeShape : display::Object {
// === Info ===
fn id (&self) -> display::object::Id { self.display_object().id() }
fn events (&self) -> &ShapeViewEvents;
fn set_color(&self, color:color::Rgba);
fn set_color_focus(&self, color:color::Rgba);
// === Hover ===
/// Set the center of the shape split on this shape. The coordinates must be in the shape local
/// coordinate system.
fn set_focus_split_center_local(&self, center:Vector2<f32>);
/// Set the angle of the half plane that will be focused. Rotation starts with the plane
/// focusing the left half plane.
fn set_focus_split_angle(&self, angle:f32);
/// Set the focus split for this shape. The `split` indicates where the shape should be
/// split and how the split should be rotated.
fn set_focus_split(&self, split:FocusSplit) {
let angle = self.global_to_local_rotation(split.angle);
let center = self.global_to_local_position(split.position);
self.set_focus_split_angle(angle);
self.set_focus_split_center_local(center);
}
/// Focus the whole edge.
fn focus_none(&self) {
// Set the focus split in the top right corner and focus everything to the right of it.
self.set_focus_split_center_local(Vector2(INFINITE,INFINITE));
self.set_focus_split_angle(RIGHT_ANGLE);
}
/// Do not focus any part of the edge.
fn focus_all(&self) {
// Set the focus split in the top right corner and focus everything below it.
self.set_focus_split_center_local(Vector2(INFINITE,INFINITE));
self.set_focus_split_angle(2.0 * RIGHT_ANGLE);
}
// === Snapping ===
/// Snaps the provided point to the closest location on the shape.
fn snap_local(&self, point:Vector2<f32>) -> Option<Vector2<f32>>;
/// Snaps the provided point to the closest location on the shape.
fn snap(&self, point:Vector2<f32>) -> Option<Vector2<f32>> {
let local = self.global_to_local_position(point);
let local_snapped = self.snap_local(local)?;
Some(self.local_to_global_position(local_snapped))
}
// === Shape Analysis ===
/// Return the angle perpendicular to the shape at the point given in the shapes local
/// coordinate system . Defaults to zero, if not implemented.
fn normal_local(&self, _point:Vector2<f32>) -> Rotation2<f32>;
/// Return the angle perpendicular to the shape at the given point.
fn normal(&self, point:Vector2<f32>) -> Rotation2<f32> {
let local = self.global_to_local_position(point);
self.normal_local(local)
}
// === Metrics ===
/// Convert the angle to the local coordinate system.
fn global_to_local_rotation(&self, angle:f32) -> f32 {
angle + self.display_object().rotation().z
}
/// Convert the global position to the local coordinate system.
fn global_to_local_position(&self, point:Vector2<f32>) -> Vector2<f32> {
let base_rotation = self.display_object().rotation().z;
let local_unrotated = point - self.display_object().global_position().xy();
Rotation2::new(-base_rotation) * local_unrotated
}
/// Convert the local position to the global coordinate system.
fn local_to_global_position(&self, point:Vector2<f32>) -> Vector2<f32> {
let base_rotation = self.display_object().rotation().z;
let local_unrotated = Rotation2::new(base_rotation) * point;
local_unrotated + self.display_object().global_position().xy()
}
}
// ====================
// === AnyEdgeShape ===
// ====================
/// The AnyEdgeShape trait allows operations on a collection of `EdgeShape`.
trait AnyEdgeShape {
/// Return references to all `EdgeShape`s in this `AnyEdgeShape`.
fn shapes(&self) -> Vec<&dyn EdgeShape>;
/// Connect the given `ShapeViewEventsProxy` to the mouse events of all sub-shapes.
fn register_proxy_frp(&self, network:&frp::Network, frp:&ShapeViewEventsProxy) {
for shape in &self.shapes() {
let event = shape.events();
let id = shape.id();
frp::extend! { network
eval_ event.mouse_down (frp.on_mouse_down.emit(id));
eval_ event.mouse_over (frp.on_mouse_over.emit(id));
eval_ event.mouse_out (frp.on_mouse_out.emit(id));
}
}
}
}
// =======================
// === Hover Extension ===
// =======================
/// Add an invisible hover area to the provided shape. The base shape should already be colored
/// otherwise coloring it later will also color the hover area.
fn hover_area(base_shape:AnyShape, size:Var<Pixels>) -> AnyShape {
let hover_area = base_shape.grow(size).fill(HOVER_COLOR);
(hover_area + base_shape).into()
}
// ==================
// === FocusSplit ===
// ==================
/// Holds the data required to split a shape into two focus visual groups.
#[derive(Clone,Copy,Debug)]
struct FocusSplit {
position : Vector2<f32>,
angle : f32
}
impl FocusSplit {
fn new(position:Vector2<f32>, angle:f32) -> Self {
FocusSplit {position,angle}
}
}
// ===================
// === FocusedEdge ===
// ===================
/// An edge split into two parts - focused and unfocused one.
struct FocusedEdge {
focused : AnyShape,
unfocused : AnyShape,
}
impl FocusedEdge {
/// Splits the shape in two at the line given by the `split_center` and `split_angle`.
fn new
( base_shape : impl Into<AnyShape>
, split_center : &Var<Vector2<Pixels>>
, split_angle : &Var<Radians>
) -> Self {
let base_shape = base_shape.into();
let split_mask = HalfPlane().rotate(split_angle).translate(split_center);
let focused = (&base_shape * &split_mask).into();
let unfocused = (&base_shape - &split_mask).into();
FocusedEdge {focused,unfocused}
}
/// Color the focused and unfocused parts with the provided colors.
fn fill<C:Into<Var<color::Rgba>>>(&self, focused_color:C, unfocused_color:C) -> AnyShape |
}
// ==================
// === SnapTarget ===
// ==================
/// `SnapTarget` is the result value of snapping operations on `AnyEdgeShape`. It holds the
/// shape that a hover position was snapped to and the snapped position on the shape. The snapped
/// position lies (a) on the visible part of the shape and (b) is the closes position on the shape
/// to the source position that was used to compute the snapped position.
#[derive(Clone,Debug)]
struct SnapTarget {
position : Vector2<f32>,
target_shape_id : display::object::Id,
}
impl SnapTarget {
fn new(position:Vector2<f32>, target_shape_id:display::object::Id) -> Self {
SnapTarget {position,target_shape_id}
}
}
// =========================
// === Shape Definitions ===
// =========================
/// Joint definition.
pub mod joint {
use super::*;
ensogl::define_shape_system! {
(color_rgba:Vector4<f32>) {
let radius = Var::<Pixels>::from("input_size.y");
let joint = Circle((radius-PADDING.px())/2.0);
let joint_color = Var::<color::Rgba>::from(color_rgba);
let joint_colored = joint.fill(joint_color);
joint_colored.into()
}
}
}
fn corner_base_shape
(radius:&Var<f32>, width:&Var<Pixels>, angle:&Var<f32>, start_angle:&Var<f32>) -> AnyShape {
let radius = 1.px() * radius;
let width2 = width / 2.0;
let radius_outer = &radius + &width2;
let radius_inner = &radius - &width2;
let ring = Circle(radius_outer) - Circle(radius_inner);
let right:Var<f32> = RIGHT_ANGLE.into();
let rot = right - angle/2.0 + start_angle;
let mask = Plane().cut_angle_fast(angle.clone()).rotate(rot);
let shape = ring * mask;
shape.into()
}
// FIXME [WD]: The 2 following impls are almost the same. Should be merged. This task should be
// handled by Wojciech.
macro_rules! define_corner_start { () => {
/// Shape definition.
pub mod corner {
use super::*;
ensogl::define_shape_system! {
below = [joint];
( radius : f32
, angle : f32
, start_angle : f32
, pos : Vector2<f32>
, dim : Vector2<f32>
, focus_split_center : Vector2<f32>
, focus_split_angle : f32
, color_rgba:Vector4<f32>
, focus_color_rgba:Vector4<f32>
) {
let width = &LINE_WIDTH.px();
let shape = corner_base_shape(&radius,width,&angle,&start_angle);
let color = Var::<color::Rgba>::from(color_rgba);
let focus_color = Var::<color::Rgba>::from(focus_color_rgba);
let shadow_size = 10.px();
let node_radius = &shadow_size + 1.px() * dim.y();
let node_width = &shadow_size*2.0 + 2.px() * dim.x();
let node_heigt = &node_radius*2.0;
let node_shape = Rect((node_width,node_heigt)).corners_radius(node_radius);
let node_shape = node_shape.fill(color::Rgba::new(1.0,0.0,0.0,1.0));
let tx = - 1.px() * pos.x();
let ty = - 1.px() * pos.y();
let node_shape = node_shape.translate((tx,ty));
let shape = shape.difference(node_shape);
let split_shape = FocusedEdge::new(
shape,&focus_split_center.px(),&focus_split_angle.into());
let shape = split_shape.fill(&color, &focus_color);
let hover_width = width + HOVER_EXTENSION.px() * 2.0;
let hover_area = corner_base_shape(&radius,&hover_width,&angle,&start_angle);
let hover_area = hover_area.fill(HOVER_COLOR);
(hover_area + shape).into()
}
}
impl EdgeShape for View {
fn set_focus_split_center_local(&self, center:Vector2<f32>) {
self.focus_split_center.set(center);
}
fn set_focus_split_angle(&self, angle:f32) {
self.focus_split_angle.set(angle);
}
fn events(&self) -> &ShapeViewEvents{
&self.events
}
fn set_color(&self, color:color::Rgba) {
self.color_rgba.set(Vector4(color.red,color.green,color.blue,color.alpha));
}
fn set_color_focus(&self, color:color::Rgba) {
let color_vec = Vector4(color.red,color.green,color.blue,color.alpha);
self.focus_color_rgba.set(color_vec);
}
fn normal_local(&self, point:Vector2<f32>) -> Rotation2<f32> {
point_rotation(point)
}
fn snap_local(&self, point:Vector2<f32>) -> Option<Vector2<f32>> {
// FIXME: These bounds check should not be required and should be removed once
// issue #689 is resolved.
let radius = self.radius.get();
let center = Vector2::zero();
let point_to_center = point.xy() - center;
let closest_point = center + point_to_center / point_to_center.magnitude() * radius;
let vector_angle = -Rotation2::rotation_between(&Vector2(0.0,1.0),&closest_point).angle();
let start_angle = self.start_angle.get();
let end_angle = start_angle + self.angle.get();
let upper_bound = start_angle.max(end_angle);
let lower_bound = start_angle.min(end_angle);
let correct_quadrant = lower_bound < vector_angle && upper_bound > vector_angle;
correct_quadrant.as_some(Vector2(closest_point.x, closest_point.y))
}
}
}
}}
macro_rules! define_corner_end { () => {
/// Shape definition.
pub mod corner {
use super::*;
ensogl::define_shape_system! {
below = [joint];
( radius:f32
, angle:f32
, start_angle:f32
, pos:Vector2<f32>
, dim:Vector2<f32>
, focus_split_center:Vector2<f32>
, focus_split_angle:f32
, color_rgba:Vector4<f32>
, focus_color_rgba:Vector4<f32>
) {
let width = &LINE_WIDTH.px();
let shape = corner_base_shape(&radius,width,&angle,&start_angle);
let color = Var::<color::Rgba>::from(color_rgba);
let focus_color = Var::<color::Rgba>::from(focus_color_rgba);
let shadow_size = 10.px() + 1.px();
let node_radius = &shadow_size + 1.px() * dim.y();
let node_shape = Rect((&shadow_size*2.0 + 2.px() * dim.x(),&node_radius*2.0));
let node_shape = node_shape.corners_radius(node_radius);
let node_shape = node_shape.fill(color::Rgba::new(1.0,0.0,0.0,1.0));
let tx = - 1.px() * pos.x();
let ty = - 1.px() * pos.y();
let node_shape = node_shape.translate((tx,ty));
let shape = shape.intersection(node_shape);
let split_shape = FocusedEdge::new(
shape,&focus_split_center.px(),&focus_split_angle.into());
let shape = split_shape.fill(&color,&focus_color);
let hover_width = width + HOVER_EXTENSION.px() * 2.0;
let hover_area = corner_base_shape(&radius,&hover_width,&angle,&start_angle);
let hover_area = hover_area.fill(HOVER_COLOR);
(hover_area + shape).into()
}
}
impl EdgeShape for View {
fn set_focus_split_center_local(&self, center:Vector2<f32>) {
self.focus_split_center.set(center);
}
fn set_focus_split_angle(&self, angle:f32) {
self.focus_split_angle.set(angle);
}
fn events(&self) -> &ShapeViewEvents{
&self.events
}
fn set_color(&self, color:color::Rgba) {
self.color_rgba.set(Vector4(color.red,color.green,color.blue,color.alpha));
}
fn set_color_focus(&self, color:color::Rgba) {
self.focus_color_rgba.set(Vector4(color.red,color.green,color.blue,color.alpha));
}
fn normal_local(&self, point:Vector2<f32>) -> Rotation2<f32> {
point_rotation(point)
}
fn snap_local(&self, point:Vector2<f32>) -> Option<Vector2<f32>> {
// FIXME: These bounds check should not be required and should be removed once
// issue #689 is resolved.
let radius = self.radius.get();
let center = Vector2::zero();
let point_to_center = point.xy() - center;
let closest_point = center + point_to_center / point_to_center.magnitude() * radius;
let vector_angle = -Rotation2::rotation_between(&Vector2(0.0, 1.0),&closest_point).angle();
let start_angle = self.start_angle.get();
let end_angle = start_angle + self.angle.get();
let upper_bound = start_angle.max(end_angle);
let lower_bound = start_angle.min(end_angle);
let correct_quadrant = lower_bound < vector_angle && upper_bound > vector_angle;
if correct_quadrant {
Some(Vector2(closest_point.x, closest_point.y))
} else {
None
}
}
}
}
}}
macro_rules! define_line { () => {
/// Shape definition.
pub mod line {
use super::*;
ensogl::define_shape_system! {
below = [joint];
(focus_split_center:Vector2<f32>, focus_split_angle:f32, color_rgba:Vector4<f32>,
focus_color_rgba:Vector4<f32>) {
let width = LINE_WIDTH.px();
let height = Var::<Pixels>::from("input_size.y");
let shape = Rect((width.clone(),height));
let color = Var::<color::Rgba>::from(color_rgba);
let focus_color = Var::<color::Rgba>::from(focus_color_rgba);
let split_shape = FocusedEdge::new(
shape,&focus_split_center.px(),&focus_split_angle.into());
let shape = split_shape.fill(&color,&focus_color);
hover_area(shape,HOVER_EXTENSION.px()).into()
}
}
impl EdgeShape for View {
fn set_focus_split_center_local(&self, center:Vector2<f32>) {
self.focus_split_center.set(center);
}
fn set_focus_split_angle(&self, angle:f32) {
self.focus_split_angle.set(angle);
}
fn events(&self) -> &ShapeViewEvents{
&self.events
}
fn set_color(&self, color:color::Rgba) {
self.color_rgba.set(Vector4(color.red,color.green,color.blue,color.alpha));
}
fn set_color_focus(&self, color:color::Rgba) {
self.focus_color_rgba.set(Vector4(color.red,color.green,color.blue,color.alpha));
}
fn normal_local(&self, _:Vector2<f32>) -> Rotation2<f32> {
Rotation2::new(0.0)
}
fn snap_local(&self, point:Vector2<f32>) -> Option<Vector2<f32>> {
// FIXME: These bounds check should not be required and should be removed once
// issue #689 is resolved.
let height = self.size.get().y;
let y = point.y.clamp(-height/2.0, height/2.0);
Some(Vector2(0.0, y))
}
}
}
}}
macro_rules! define_arrow { () => {
/// Shape definition.
pub mod arrow {
use super::*;
ensogl::define_shape_system! {
above = [joint];
(focus_split_center:Vector2<f32>, focus_split_angle:f32, color_rgba:Vector4<f32>,
focus_color_rgba:Vector4<f32>) {
let width : Var<Pixels> = "input_size.x".into();
let height : Var<Pixels> = "input_size.y".into();
let color = Var::<color::Rgba>::from(color_rgba);
let focus_color = Var::<color::Rgba>::from(focus_color_rgba);
let focus_split_angle = focus_split_angle.into();
let focus_split_center = focus_split_center.px();
let shape_padding = -1.px();
let shape = Triangle(width+&shape_padding,height+&shape_padding);
let shape = FocusedEdge::new(shape,&focus_split_center,&focus_split_angle);
let shape = shape.fill(&color, &focus_color);
shape.into()
}
}
impl EdgeShape for View {
fn set_focus_split_center_local(&self, center:Vector2<f32>) {
// We don't want the arrow to be half-focused. The focus split point is set to the
// closest edge (all or nothing).
let min = -Vector2(ARROW_SIZE_X,ARROW_SIZE_Y);
let max = Vector2(ARROW_SIZE_X,ARROW_SIZE_Y);
let mid = Vector2::<f32>::zero();
let x = if center.x < mid.x { min.x } else { max.x };
let y = if center.y < mid.y { min.y } else { max.y };
self.focus_split_center.set(Vector2(x,y));
}
fn set_focus_split_angle(&self, angle:f32) {
self.focus_split_angle.set(angle);
}
fn events(&self) -> &ShapeViewEvents {
&self.events
}
fn set_color(&self, color:color::Rgba) {
self.color_rgba.set(Vector4(color.red,color.green,color.blue,color.alpha));
}
fn set_color_focus(&self, color:color::Rgba) {
self.focus_color_rgba.set(Vector4(color.red,color.green,color.blue,color.alpha));
}
fn normal_local(&self, _:Vector2<f32>) -> Rotation2<f32> {
Rotation2::new(0.0)
}
fn normal(&self, _point:Vector2<f32>) -> Rotation2<f32> {
Rotation2::new(-RIGHT_ANGLE)
}
fn snap_local(&self, point:Vector2<f32>) -> Option<Vector2<f32>> {
Some(Vector2(0.0, point.y))
}
}
}
}}
// ========================
// === Shape Operations ===
// ========================
trait LayoutLine {
fn layout_v(&self,start:Vector2<f32>,len:f32);
fn layout_h(&self,start:Vector2<f32>,len:f32);
fn layout_v_no_overlap(&self,start:Vector2<f32>,len:f32);
fn layout_h_no_overlap(&self,start:Vector2<f32>,len:f32);
}
impl LayoutLine for front::line::View {
fn layout_v(&self, start:Vector2<f32>, len:f32) {
let pos = Vector2(start.x, start.y + len/2.0);
let size = Vector2(LINE_SHAPE_WIDTH, len.abs()+LINE_SIDES_OVERLAP);
self.size.set(size);
self.set_position_xy(pos);
}
fn layout_h(&self, start:Vector2<f32>, len:f32) {
let pos = Vector2(start.x + len/2.0, start.y);
let size = Vector2(LINE_SHAPE_WIDTH, len.abs()+LINE_SIDES_OVERLAP);
self.size.set(size);
self.set_position_xy(pos);
}
fn layout_v_no_overlap(&self, start:Vector2<f32>, len:f32) {
let pos = Vector2(start.x, start.y + len/2.0);
let size = Vector2(LINE_SHAPE_WIDTH, len.abs());
self.size.set(size);
self.set_position_xy(pos);
}
fn layout_h_no_overlap(&self, start:Vector2<f32>, len:f32) {
let pos = Vector2(start.x + len/2.0, start.y);
let size = Vector2(LINE_SHAPE_WIDTH, len.abs());
self.size.set(size);
self.set_position_xy(pos);
}
}
impl LayoutLine for back::line::View {
fn layout_v(&self, start:Vector2<f32>, len:f32) {
let pos = Vector2(start.x, start.y + len/2.0);
let size = Vector2(LINE_SHAPE_WIDTH, len.abs()+LINE_SIDES_OVERLAP);
self.size.set(size);
self.set_position_xy(pos);
}
fn layout_h(&self, start:Vector2<f32>, len:f32) {
let pos = Vector2(start.x + len/2.0, start.y);
let size = Vector2(LINE_SHAPE_WIDTH, len.abs()+LINE_SIDES_OVERLAP);
self.size.set(size);
self.set_position_xy(pos);
}
fn layout_v_no_overlap(&self, start:Vector2<f32>, len:f32) {
let pos = Vector2(start.x, start.y + len/2.0);
let size = Vector2(LINE_SHAPE_WIDTH, len.abs());
self.size.set(size);
self.set_position_xy(pos);
}
fn layout_h_no_overlap(&self, start:Vector2<f32>, len:f32) {
let pos = Vector2(start.x + len/2.0, start.y);
let size = Vector2(LINE_SHAPE_WIDTH, len.abs());
self.size.set(size);
self.set_position_xy(pos);
}
}
// ===========================
// === Front / Back Shapes ===
// ===========================
/// Shape definitions which will be rendered in the front layer (on top of nodes).
pub mod front {
use super::*;
define_corner_start!();
define_line!();
define_arrow!();
}
/// Shape definitions which will be rendered in the bottom layer (below nodes).
pub mod back {
use super::*;
define_corner_end!();
define_line!();
define_arrow!();
}
// ===========================
// === Front / Back Layers ===
// ===========================
macro_rules! define_components {
($name:ident {
$($field:ident : ($field_type:ty, $field_shape_type:expr)),* $(,)?
}) => {
#[derive(Debug,Clone,CloneRef)]
#[allow(missing_docs)]
pub struct $name {
pub logger : Logger,
pub display_object : display::object::Instance,
pub shape_view_events : Rc<Vec<ShapeViewEvents>>,
shape_type_map : Rc<HashMap<display::object::Id,ShapeRole>>,
$(pub $field : $field_type),*
}
impl $name {
/// Constructor.
pub fn new(logger:Logger) -> Self {
let display_object = display::object::Instance::new(&logger);
$(let $field = <$field_type>::new(Logger::new_sub(&logger,stringify!($field)));)*
$(display_object.add_child(&$field);)*
let mut shape_view_events:Vec<ShapeViewEvents> = Vec::default();
$(shape_view_events.push($field.events.clone_ref());)*
let shape_view_events = Rc::new(shape_view_events);
let mut shape_type_map:HashMap<display::object::Id,ShapeRole> = default();
$(shape_type_map.insert(EdgeShape::id(&$field), $field_shape_type);)*
let shape_type_map = Rc::new(shape_type_map);
Self {logger,display_object,shape_view_events,shape_type_map,$($field),*}
}
fn get_shape(&self, id:display::object::Id) -> Option<&dyn EdgeShape> {
match id {
$(id if id == EdgeShape::id(&self.$field) => Some(&self.$field),)*
_ => None,
}
}
fn get_shape_type(&self, id:display::object::Id) -> Option<ShapeRole> {
self.shape_type_map.get(&id).cloned()
}
}
impl display::Object for $name {
fn display_object(&self) -> &display::object::Instance {
&self.display_object
}
}
impl AnyEdgeShape for $name {
fn shapes(&self) -> Vec<&dyn EdgeShape> {
let mut output = Vec::<&dyn EdgeShape>::default();
$(output.push(&self.$field);)*
output
}
}
}
}
define_components!{
Front {
corner : (front::corner::View, ShapeRole::Corner),
corner2 : (front::corner::View, ShapeRole::Corner2),
corner3 : (front::corner::View, ShapeRole::Corner3),
side_line : (front::line::View, ShapeRole::SideLine),
side_line2 : (front::line::View, ShapeRole::SideLine2),
main_line : (front::line::View, ShapeRole::MainLine),
port_line : (front::line::View, ShapeRole::PortLine),
arrow : (front::arrow::View, ShapeRole::Arrow),
}
}
define_components!{
Back {
corner : (back::corner::View, ShapeRole::Corner),
corner2 : (back::corner::View, ShapeRole::Corner2),
corner3 : (back::corner::View, ShapeRole::Corner3),
side_line : (back::line::View, ShapeRole::SideLine),
side_line2 : (back::line::View, ShapeRole::SideLine2),
main_line : (back::line::View, ShapeRole::MainLine),
arrow : (back::arrow::View, ShapeRole::Arrow),
}
}
impl AnyEdgeShape for EdgeModelData {
fn shapes(&self) -> Vec<&dyn EdgeShape> {
let mut shapes_back = self.back.shapes();
let mut shapes_front = self.front.shapes();
shapes_front.append(&mut shapes_back);
shapes_front
}
}
// ===========================
// === Shape & State Enums ===
// ===========================
/// Indicates which role a shape plays within the overall edge.
#[derive(Clone,Copy,Debug,Eq,PartialEq)]
enum ShapeRole {
SideLine,
Corner,
MainLine,
Corner2,
SideLine2,
Corner3,
PortLine,
Arrow,
}
/// Indicates the state the shape layout is in. Can be used to adjust behaviour based on state
/// to address edge cases for specific layouts. The terms are used to follow the direction of the
/// edge from `Output` to `Input`.
///
/// Each state represents a unique layout in terms of: adjacency of shapes (some shapes may
/// disappear in some layout), or the relative geometric position of shapes. For example, the
/// `TopCenterRightLoop` has the main line leaving the node right to left, while corner2 and
/// corner3 are left to right relative to each other. Compare the `UpRight`, which is almost the
/// same, but has the main line leave the source node left to right.
///
/// This list is not exhaustive and new constellations should be added as needed.
#[derive(Clone,Copy,Debug,Eq,PartialEq)]
enum LayoutState {
UpLeft,
UpRight,
DownLeft,
DownRight,
/// The edge goes right / up / left / down.
TopCenterRightLoop,
/// The edge goes left / up / right / down.
TopCenterLeftLoop,
}
impl LayoutState {
/// Indicates whether the `OutputPort` is below the `InputPort` in the current layout
/// configuration.
fn is_output_above_input(self) -> bool {
match self {
LayoutState::UpLeft => false,
LayoutState::UpRight => false,
LayoutState::TopCenterRightLoop => false,
LayoutState::TopCenterLeftLoop => false,
LayoutState::DownLeft => true,
LayoutState::DownRight => true,
}
}
fn is_input_above_output(self) -> bool {
!self.is_output_above_input()
}
}
// =====================
// === SemanticSplit ===
// =====================
/// The semantic split, splits the sub-shapes according to their relative position from `OutputPort`
/// to `InputPort` and allows access to the three different groups of shapes:
/// - shapes that are input side of the split;
/// - shapes that are at the split;
/// - shapes that are output side of the split.
///
/// This allows us to apply special handling to these groups. This is required as a simple geometric
/// split based on a line, can lead to double intersections with the edge. Thus we avoid the
/// geometric intersections, for shapes away from the intersection point, and instead color them
/// based on their position within the edge.
///
/// We have seven "slots" of shapes within the edge that can be ordered from output port to input
/// port: `SideLine` `Corner`, `MainLine`/`Arrow`, `Corner2`, `SideLine2`, `Corner3`, `PortLine`.
/// Note that it does not matter, if we have multiple adjacent shapes in the same bucket (as can
/// be the case with back/front shapes), as long as no self-intersection is possible for these
/// shapes.
///
/// Example: We need to split on the `SideLine2` and focus the shapes closer to the
/// output port. That means we need to do the geometric split on `Corner2`, `SideLine2`, `Corner3`,
/// which we can access via `split_shapes` and apply the focusing to `SideLine` `Corner` and
/// `MainLine`/`Arrow`, which we can access via `output_side_shapes`. The remaining shapes that must
/// not be focused can be accessed via `input_side_shapes`.
#[derive(Clone,Debug)]
struct SemanticSplit {
/// Ids of the shapes in the order they appear in the edge. Shapes that fill the same "slot"
/// are binned into a sub-vector and can be handled together.
ordered_part_ids : Vec<Vec<display::object::Id>>,
/// The index the shape where the edge split occurs in the `ordered_part_ids`.
split_index : usize,
}
impl SemanticSplit {
fn new(edge_data:&EdgeModelData, split_shape:display::object::Id) -> Option<Self> {
let ordered_part_ids = Self::semantically_binned_edges(edge_data);
// Find the object id in our `ordered_part_ids`
let mut split_index = None;
for (index, shape_ids) in ordered_part_ids.iter().enumerate() {
if shape_ids.contains(&split_shape) {
split_index = Some(index);
break
}
}
let split_index = split_index?;
Some(SemanticSplit {ordered_part_ids,split_index})
}
/// Return an ordered vector that contains the ids of the shapes in the order they appear in the
/// edge. Shapes that are to be handled as in the same place, are binned into a sub-vector.
/// This enables us to infer which parts are next to each other, and which ones are
/// "source-side"/"target-side".
///
/// In general, we treat the equivalent shape from front and back as the same, as they tend to
/// occupy the same space within the shape and thus need to be handled together. But,
/// for example, also the arrow needs to be handled together with the main line.
///
/// The important information we create here is the rough adjacency of shapes. This is used to
/// determine which shapes are adjacent to avoid rendering a split on a shape that can be all
/// focus on or all focus off.
fn semantically_binned_edges(edge_data:&EdgeModelData) -> Vec<Vec<display::object::Id>> {
let front = &edge_data.front;
let back = &edge_data.back;
vec![
vec![EdgeShape::id(&front.side_line), EdgeShape::id(&back.side_line) ],
vec![EdgeShape::id(&front.corner), EdgeShape::id(&back.corner) ],
vec![EdgeShape::id(&front.main_line), EdgeShape::id(&back.main_line),
EdgeShape::id(&front.arrow), EdgeShape::id(&back.arrow) ],
vec![EdgeShape::id(&front.corner2), EdgeShape::id(&back.corner2) ],
vec![EdgeShape::id(&front.side_line2), EdgeShape::id(&back.side_line2) ],
vec![EdgeShape::id(&front.corner3), EdgeShape::id(&back.corner3) ],
vec![EdgeShape::id(&front.port_line) ],
]
}
/// Return `Id`s that match the given index condition `cond`.
fn index_filtered_shapes<F:Fn(i32)-> bool>(&self, cond:F) -> Vec<display::object::Id> {
self.ordered_part_ids
.iter()
.enumerate()
.filter(|(index, _)| cond(*index as i32))
.flat_map(|(_index, ids)| ids.clone())
.collect()
}
/// Shapes that are output side of the split.
fn output_side_shapes(&self) -> Vec<display::object::Id> {
self.index_filtered_shapes(move |index| (index) < self.split_index as i32)
}
/// Shapes that are input side of the split.
fn input_side_shapes(&self) -> Vec<display::object::Id> {
self.index_filtered_shapes(move |index| index > (self.split_index as i32))
}
/// Shapes that are at the split location.
fn split_shapes(&self) -> Vec<display::object::Id> {
self.index_filtered_shapes(move |index| (index == self.split_index as i32))
}
}
// ===========
// === FRP ===
// ===========
/// FRP system that is used to collect and aggregate shape view events from the sub-shapes of an
/// `Edge`. The Edge exposes the `mouse_down`/`mouse_over`/`mouse_out` streams, while the sub-shapes
/// emit events via th internal `on_mouse_down`/`on_mouse_over`/`on_mouse_out` sources.
#[derive(Clone,CloneRef,Debug)]
#[allow(missing_docs)]
pub struct ShapeViewEventsProxy {
pub mouse_down : frp::Stream,
pub mouse_over : frp::Stream,
pub mouse_out : frp::Stream,
on_mouse_down : frp::Source<display::object::Id>,
on_mouse_over : frp::Source<display::object::Id>,
on_mouse_out : frp::Source<display::object::Id>,
}
#[allow(missing_docs)]
impl ShapeViewEventsProxy {
pub fn new(network:&frp::Network) -> Self {
frp::extend! { network
on_mouse_over <- source();
on_mouse_out <- source();
on_mouse_down <- source();
mouse_down <- on_mouse_down.constant(());
mouse_over <- on_mouse_over.constant(());
mouse_out <- on_mouse_out.constant(());
}
Self {mouse_down,mouse_over,mouse_out,on_mouse_down,on_mouse_over,on_mouse_out}
}
}
/// FRP endpoints of the edge.
#[derive(Clone,CloneRef,Debug)]
#[allow(missing_docs)]
pub struct Frp {
pub source_width : frp::Source<f32>,
pub source_height : frp::Source<f32>,
pub target_position : frp::Source<Vector2<f32>>,
pub target_attached : frp::Source<bool>,
pub source_attached : frp::Source<bool>,
pub redraw : frp::Source,
pub set_disabled : frp::Source<bool>,
pub set_color : frp::Source<color::Lcha>,
pub hover_position : frp::Source<Option<Vector2<f32>>>,
pub shape_events : ShapeViewEventsProxy
}
impl Frp {
/// Constructor.
pub fn new(network:&frp::Network) -> Self {
frp::extend! { network
def source_width = source();
def source_height = source();
def target_position = source();
def target_attached = source();
def source_attached = source();
def redraw = source();
def hover_position = source();
def set_disabled = source();
def set_color = source();
}
let shape_events = ShapeViewEventsProxy::new(network);
Self {source_width,source_height,target_position,target_attached,source_attached,redraw
,set_disabled,set_color,hover_position,shape_events}
}
}
// ==================
// === Math Utils ===
// ==================
/// For the given radius of the first circle (`r1`), radius of the second circle (`r2`), and the
/// x-axis position of the second circle (`x`), computes the y-axis position of the second circle in
/// such a way, that the borders of the circle cross at the right angle. It also computes the angle
/// of the intersection. Please note, that the center of the first circle is in the origin.
///
/// ```text
/// r1
/// ◄───► (1) x^2 + y^2 = r1^2 + r2^2
/// _____ (1) => y = sqrt((r1^2 + r2^2)/x^2)
/// .' `.
/// / _.-"""B-._ ▲
/// | .'0┼ | `. │ angle1 = A-XY-0
/// \/ │ / \ │ r2 angle2 = 0-XY-B
/// |`._ │__.' | │ alpha = B-XY-X_AXIS
/// | A└───┼─ | ▼
/// | (x,y) | tg(angle1) = y / x
/// \ / tg(angle2) = r1 / r2
/// `._ _.' alpha = PI - angle1 - angle2
/// `-....-'
///```
fn circle_intersection(x:f32, r1:f32, r2:f32) -> (f32,f32) {
let x_norm = x.clamp(-r2,r1);
let y = (r1*r1 + r2*r2 - x_norm*x_norm).sqrt();
let angle1 = f32::atan2(y,x_norm);
let angle2 = f32::atan2(r1,r2);
let angle = std::f32::consts::PI - angle1 - angle2;
(y,angle)
}
// ============
// === Edge ===
// ============
/// Edge definition.
#[derive(AsRef,Clone,CloneRef,Debug,Deref)]
pub struct Edge {
#[deref]
model : Rc<EdgeModel>,
network : frp::Network,
}
impl AsRef<Edge> for Edge {
fn as_ref(&self) -> &Self {
self
}
}
impl display::Object for EdgeModelData {
fn display_object(&self) -> &display::object::Instance {
&self.display_object
}
}
impl Edge {
/// Constructor.
pub fn new(app:&Application) -> Self {
let network = frp::Network::new("node_edge");
let data = Rc::new(EdgeModelData::new(app.display.scene(),&network));
let model = Rc::new(EdgeModel {data});
Self {model,network}.init(app)
}
fn init(self, app:&Application) -> Self {
let network = &self.network;
let input = &self.frp;
let target_position = &self.target_position;
let target_attached = &self.target_attached;
// FIXME This should be used for #672 (Edges created from Input Ports do not overlay nodes)
let _source_attached = &self.source_attached;
let source_width = &self.source_width;
let source_height = &self.source_height;
let hover_position = &self.hover_position;
let hover_target = &self.hover_target;
let model = &self.model;
let shape_events = &self.frp.shape_events;
let edge_color = color::Animation::new(network);
let edge_focus_color = color::Animation::new(network);
let _style = StyleWatch::new(&app.display.scene().style_sheet);
model.data.front.register_proxy_frp(network, &input.shape_events);
model.data.back.register_proxy_frp(network, &input.shape_events);
frp::extend! { network
eval input.target_position ((t) target_position.set(*t));
// FIXME This should be enabled for #672 (Edges created from Input Ports do not overlay
// nodes)
// eval input.source_attached ((t) source_attached.set(*t));
eval input.target_attached ((t) target_attached.set(*t));
eval input.source_width ((t) source_width.set(*t));
eval input.source_height ((t) source_height.set(*t));
eval input.hover_position ((t) hover_position.set(*t));
eval shape_events.on_mouse_over ((id) hover_target.set(Some(*id)));
eval_ shape_events.on_mouse_out (hover_target.set(None));
eval_ input.redraw (model.redraw());
// === Colors ===
is_hovered <- input.hover_position.map(|t| t.is_some());
new_color <- all_with(&input.set_color,&input.set_disabled,
f!((c,t)model.base_color(*c,*t)));
new_focus_color <- new_color.map(f!((color) model.focus_color(*color)));
focus_color <- switch(&is_hovered,&new_color,&new_focus_color);
edge_color.target <+ new_color;
edge_focus_color.target <+ focus_color;
eval edge_color.value ((color) model.set_color(color.into()));
eval edge_focus_color.value ((color) model.set_focus_color(color.into()));
}
self
}
}
impl display::Object for Edge {
fn display_object(&self) -> &display::object::Instance {
&self.display_object
}
}
// =================
// === EdgeModel ===
// =================
/// Indicates the type of end connection of the Edge.
#[derive(Clone,Copy,Debug,Eq,PartialEq)]
#[allow(missing_docs)]
pub enum PortType {
InputPort,
OutputPort
}
/// Edge definition.
#[derive(AsRef,Clone,CloneRef,Debug,Deref)]
pub struct EdgeModel {
data : Rc<EdgeModelData>,
}
/// Internal data of `Edge`
#[derive(Debug)]
#[allow(missing_docs)]
pub struct EdgeModelData {
pub display_object : display::object::Instance,
pub logger : Logger,
pub frp : Frp,
pub front : Front,
pub back : Back,
pub joint : joint::View,
pub source_width : Rc<Cell<f32>>,
pub source_height : Rc<Cell<f32>>,
pub target_position : Rc<Cell<Vector2>>,
pub target_attached : Rc<Cell<bool>>,
pub source_attached : Rc<Cell<bool>>,
layout_state : Rc<Cell<LayoutState>>,
hover_position : Rc<Cell<Option<Vector2<f32>>>>,
hover_target : Rc<Cell<Option<display::object::Id>>>,
scene : Scene,
}
impl EdgeModelData {
/// Constructor.
pub fn new(scene:&Scene, network:&frp::Network) -> Self {
let logger = Logger::new("edge");
let display_object = display::object::Instance::new(&logger);
let front = Front::new(Logger::new_sub(&logger,"front"));
let back = Back::new (Logger::new_sub(&logger,"back"));
let joint = joint::View::new(Logger::new_sub(&logger,"joint"));
let shape_system = scene.layers.main.shape_system_registry.shape_system
(scene,PhantomData::<joint::DynamicShape>);
shape_system.shape_system.set_pointer_events(false);
display_object.add_child(&front);
display_object.add_child(&back);
display_object.add_child(&joint);
front . side_line . mod_rotation(|r| r.z = RIGHT_ANGLE);
back . side_line . mod_rotation(|r| r.z = RIGHT_ANGLE);
front . side_line2 . mod_rotation(|r| r.z = RIGHT_ANGLE);
back . side_line2 . mod_rotation(|r| r.z = RIGHT_ANGLE);
let frp = Frp::new(network);
let source_height = default();
let source_width = default();
let target_position = default();
let target_attached = Rc::new(Cell::new(false));
let source_attached = Rc::new(Cell::new(true));
let hover_position = default();
let layout_state = Rc::new(Cell::new(LayoutState::UpLeft));
let hover_target = default();
let scene = scene.into();
Self {display_object,logger,frp,front,back,joint,source_width,source_height,target_position
,target_attached,source_attached,layout_state,hover_position,hover_target,scene}
}
/// Set the color of the edge.
fn set_color(&self, color:color::Lcha) {
// We must never use alpha in edges, as it will show artifacts with overlapping sub-parts.
let color:color::Lcha = color.opaque.into();
let color_rgba = color::Rgba::from(color);
self.shapes().iter().for_each(|shape| shape.set_color(color_rgba));
self.joint.color_rgba.set(color_rgba.into());
}
fn set_focus_color(&self, color:color::Lcha) {
let color:color::Lcha = color.opaque.into();
self.shapes().iter().for_each(|shape| shape.set_color_focus(color.into()));
}
fn base_color(&self, color:color::Lcha, is_disabled:bool) -> color::Lcha {
let color:color::Lcha = color.opaque.into();
if !is_disabled {color} else {
let styles = StyleWatch::new(&self.scene.style_sheet);
styles.get_color(theme::code::syntax::disabled).into()
}
}
fn focus_color(&self, color:color::Lcha) -> color::Lcha {
// We must never use alpha in edges, as it will show artifacts with overlapping sub-parts.
let color:color::Lcha = color.opaque.into();
let styles = StyleWatch::new(&self.scene.style_sheet);
let bg_color = styles.get_color(theme::application::background).into();
color::mix(bg_color,color,0.25)
}
/// Redraws the connection.
#[allow(clippy::cognitive_complexity)]
pub fn redraw(&self) {
// === Variables ===
let fg = &self.front;
let bg = &self.back;
// FIXME This should be enabled for #672
// let fully_attached = self.target_attached.get() && self.source_attached.get();
let fully_attached = self.target_attached.get();
let node_half_width = self.source_width.get() / 2.0;
let target_node_half_height = node::HEIGHT / 2.0;
let source_node_half_height = self.source_height.get() / 2.0;
let source_node_circle = Vector2(node_half_width- source_node_half_height, 0.0);
let source_node_radius = source_node_half_height;
// === Update Highlights ===
match (fully_attached, self.hover_position.get(), self.hover_target.get()) {
(true, Some(hover_position), Some(hover_target)) => {
let focus_part = self.port_to_detach_for_position(hover_position);
let focus_split_result = self.try_enable_focus_split
(hover_position,hover_target,focus_part);
if let Ok(snap_data) = focus_split_result {
let joint_position = snap_data.position - self.display_object.position().xy();
self.joint.set_position_xy(joint_position);
let joint_size = LINE_WIDTH+PADDING;
self.joint.size.set(Vector2(joint_size,joint_size));
}
},
_ => {
self.focus_none();
self.joint.size.set(Vector2::zero());
},
}
// === Target ===
//
// Target is the end position of the connection in local node space (the origin is placed in
// the center of the node). We handle lines drawing in special way when target is below the
// node (for example, not to draw the port line above source node).
//
// ╭──────────────╮
// │ ┼ (0,0) │
// ╰──────────────╯────╮
// │
// ▢ target
let world_space_target = self.target_position.get();
let target_x = world_space_target.x - self.position().x;
let target_y = world_space_target.y - self.position().y;
let side = target_x.signum();
let target_x = target_x.abs();
let target = Vector2(target_x,target_y);
let target_is_below_node_x = target.x < node_half_width;
let target_is_below_node_y = target.y < (-source_node_half_height);
let target_is_below_node = target_is_below_node_x && target_is_below_node_y;
let port_line_len_max = target_node_half_height + NODE_PADDING;
let side_right_angle = side * RIGHT_ANGLE;
// === Upward Discovery ===
//
// Discovers when the connection should go upwards. The `upward_corner_radius` defines the
// preferred radius for every corner in this scenario.
//
// ╭─────╮ ╭─╮
// ╰─────╯────╯ │
// ▢
let upward_corner_radius = 20.0;
let min_len_for_non_curved_line = upward_corner_radius + port_line_len_max;
let upward_distance = target.y + min_len_for_non_curved_line;
// === Flat side ===
//
// Maximum side distance before connection is curved up.
//
// ╭─────╮◄──► ╭─────╮◄──►╭─╮
// ╰─────╯───╮ ╰─────╯────╯ │
// ▢ ▢
let flat_side_size = 40.0;
let is_flat_side = target.x < node_half_width + flat_side_size;
let downward_flat = if target_is_below_node_x {target_is_below_node_y} else {target.y<0.0};
let downward_far = -target.y > min_len_for_non_curved_line || target_is_below_node;
let is_down = if is_flat_side {downward_flat} else {downward_far};
// === Layout State ===
// Initial guess at our layout. Can still be changed further down in the layout code in
// case we encounter a layout where the corners need to loop back.
let state = match (is_down,(side < 0.0)) {
(true,true) => LayoutState::DownLeft,
(true,false) => LayoutState::DownRight,
(false,true) => LayoutState::UpLeft,
(false,false) => LayoutState::UpRight,
};
self.layout_state.set(state);
// === Port Line Length ===
//
// ╭──╮
// ╰──╯───╮
// ╵
// ╭──┼──╮ ▲ Port line covers the area above target node and the area of target node
// │ ▢ │ ▼ shadow. It can be shorter if the target position is below the node or the
// ╰─────╯ connection is being dragged, in order not to overlap with the source node.
let port_line_start = Vector2(side * target.x, target.y + MOUSE_OFFSET);
let space_attached = -port_line_start.y - target_node_half_height - LINE_SIDE_OVERLAP;
let space = space_attached - NODE_PADDING;
let len_below_free = max(0.0,min(port_line_len_max,space));
let len_below_attached = max(0.0,min(port_line_len_max,space_attached));
let len_below = if fully_attached {len_below_attached} else {len_below_free};
let far_side_len = if target_is_below_node {len_below} else {port_line_len_max};
let flat_side_len = min(far_side_len,-port_line_start.y);
let mut port_line_len = if is_flat_side && is_down {flat_side_len} else {far_side_len};
let port_line_end = Vector2(target.x,port_line_start.y + port_line_len);
// === Corner1 ===
//
// The first corner on the line. It is always placed at the right angle to the tangent of
// the node border. In case the edge is in the drag mode, the curve is divided into two
// parts. The first part is placed under the source node shadow, while the second part is
// placed on the top layer.
//
// ╭─────╮ ╭─────╮ 2╭───╮3
// ╰─────╯──╮1 ╰─────╯──╯1 ▢
// ▢
let mut corner1_target = port_line_end;
if !is_down {
corner1_target.x = if is_flat_side {
let radius_grow = max(0.0,target.x - node_half_width + upward_corner_radius);
node_half_width + upward_corner_radius + radius_grow
} else {
let radius1 = node_half_width + (target.x - node_half_width)/2.0;
let radius2 = node_half_width + 2.0*upward_corner_radius;
min(radius1,radius2)
};
corner1_target.y = min(upward_corner_radius,upward_distance/2.0);
}
let corner1_grow = ((corner1_target.x - node_half_width) * 0.6).max(0.0);
let corner1_radius = 20.0 + corner1_grow;
let corner1_radius = corner1_radius.min(corner1_target.y.abs());
let corner1_x = corner1_target.x - corner1_radius;
let corner1_x_loc = corner1_x - source_node_circle.x;
let (y,angle) = circle_intersection(corner1_x_loc,source_node_radius,corner1_radius);
let corner1_y = if is_down {-y} else {y};
let corner1 = Vector2(corner1_x*side, corner1_y);
let angle_overlap = if corner1_x > node_half_width { 0.0 } else { 0.1 };
let corner1_side = (corner1_radius + PADDING) * 2.0;
let corner1_size = Vector2(corner1_side,corner1_side);
let corner1_start_angle = if is_down {0.0} else {side_right_angle};
let corner1_angle = (angle + angle_overlap) * side;
let corner1_angle = if is_down {corner1_angle} else {side_right_angle};
bg.corner.size.set(corner1_size);
bg.corner.start_angle.set(corner1_start_angle);
bg.corner.angle.set(corner1_angle);
bg.corner.radius.set(corner1_radius);
bg.corner.pos.set(corner1);
bg.corner.set_position_xy(corner1);
if !fully_attached {
bg.corner.dim.set(Vector2(node_half_width, source_node_half_height));
fg.corner.size.set(corner1_size);
fg.corner.start_angle.set(corner1_start_angle);
fg.corner.angle.set(corner1_angle);
fg.corner.radius.set(corner1_radius);
fg.corner.pos.set(corner1);
fg.corner.dim.set(Vector2(node_half_width, source_node_half_height));
fg.corner.set_position_xy(corner1);
} else {
fg.corner.size.set(zero());
bg.corner.dim.set(Vector2(INFINITE,INFINITE));
}
// === Side Line ===
//
// Side line is the first horizontal line. In case the edge is in drag mode, the line is
// divided into two segments. The first is placed below the shadow of the source node, while
// the second is placed on the top layer. The side line placement is the same in case of
// upwards connections - it is then placed between node and corenr 1.
//
// ╭─────╮ ╭─────╮ 2╭───╮3
// ╰─────╯╴──╮ ╰─────╯╴─╯1 ▢
// ▢
let side_line_shift = LINE_SIDES_OVERLAP;
let side_line_len = max(0.0,corner1_x - node_half_width + side_line_shift);
let bg_line_x = node_half_width - side_line_shift;
let bg_line_start = Vector2(side*bg_line_x,0.0);
if fully_attached {
let bg_line_len = side*side_line_len;
fg.side_line.size.set(zero());
bg.side_line.layout_h(bg_line_start,bg_line_len);
} else {
let bg_max_len = NODE_PADDING + side_line_shift;
let bg_line_len = min(side_line_len,bg_max_len);
let bg_end_x = bg_line_x + bg_line_len;
let fg_line_start = Vector2(side*(bg_end_x+LINE_SIDE_OVERLAP),0.0);
let fg_line_len = side*(side_line_len - bg_line_len);
let bg_line_len_overlap = side * min(side_line_len,bg_max_len+LINE_SIDES_OVERLAP);
bg.side_line.layout_h(bg_line_start,bg_line_len_overlap);
fg.side_line.layout_h_no_overlap(fg_line_start,fg_line_len);
}
// === Main Line (downwards) ===
//
// Main line is the long vertical line. In case it is placed below the node and the edge is
// in drag mode, it is divided into two segments. The upper segment is drawn behind node
// shadow, while the second is drawn on the top layer. In case of edge in drag mode drawn
// next to node, only the top layer segment is used.
//
// Please note that only applies to edges going down. Refer to docs of main line of edges
// going up to learn more.
//
// Double edge: Single edge:
// ╭─────╮ ╭─────╮
// ╰──┬──╯ ╰─────╯────╮
// ╷ │
// ▢ ▢
if is_down {
let main_line_end_y = corner1.y;
let main_line_len = main_line_end_y - port_line_start.y;
if !fully_attached && target_is_below_node {
let back_line_start_y = max(-source_node_half_height - NODE_PADDING, port_line_start.y);
let back_line_start = Vector2(port_line_start.x, back_line_start_y);
let back_line_len = main_line_end_y - back_line_start_y;
let front_line_len = main_line_len - back_line_len;
bg.main_line.layout_v(back_line_start, back_line_len);
fg.main_line.layout_v(port_line_start, front_line_len);
} else if fully_attached {
let main_line_start_y = port_line_start.y + port_line_len;
let main_line_start = Vector2(port_line_start.x, main_line_start_y);
fg.main_line.size.set(zero());
bg.main_line.layout_v(main_line_start, main_line_len - port_line_len);
} else {
bg.main_line.size.set(zero());
fg.main_line.layout_v(port_line_start, main_line_len);
}
}
if !is_down {
// === Corner2 & Corner3 Radius ===
//
// ╭─────╮ 2╭───╮3
// ╰─────╯──╯1 ▢
let corner2_radius = corner1_radius;
let corner3_radius = upward_corner_radius;
let corner2_x = corner1_target.x + corner1_radius;
let corner3_x = port_line_end.x - corner3_radius;
let corner2_bbox_x = corner2_x - corner2_radius;
let corner3_bbox_x = corner3_x + corner3_radius;
let corner_2_3_dist = corner3_bbox_x - corner2_bbox_x;
let corner_2_3_side = corner_2_3_dist.signum();
let corner_2_3_dist = corner_2_3_dist.abs();
let corner_2_3_width = corner2_radius + corner3_radius;
let corner_2_3_do_scale = corner_2_3_dist < corner_2_3_width;
let corner_2_3_scale = corner_2_3_dist / corner_2_3_width;
let corner_2_3_scale = if corner_2_3_do_scale {corner_2_3_scale} else {1.0};
let side_combined = side * corner_2_3_side;
let corner2_radius = corner2_radius * corner_2_3_scale;
let corner3_radius = corner3_radius * corner_2_3_scale;
let is_right_side = (side_combined - 1.0).abs() < std::f32::EPSILON;
// === Layout State Update ===
// Corner case: we are above the node and the corners loop back
match (side < 0.0, corner_2_3_side < 0.0) {
(false, true) => self.layout_state.set(LayoutState::TopCenterRightLoop),
(true, true) => self.layout_state.set(LayoutState::TopCenterLeftLoop),
_ => (),
};
// === Corner2 & Corner3 Placement ===
//
// ╭─────╮ 2╭───╮3
// ╰─────╯──╯1 ▢
let corner3_side = (corner3_radius + PADDING) * 2.0;
let corner3_size = Vector2(corner3_side,corner3_side);
let corner3_x = port_line_end.x - corner_2_3_side * corner3_radius;
let corner3_y = port_line_end.y;
let corner2_y = corner3_y + corner3_radius - corner2_radius;
let corner2_y = max(corner2_y, corner1.y);
let corner3_y = max(corner3_y,corner2_y - corner3_radius + corner2_radius);
let corner3 = Vector2(corner3_x*side,corner3_y);
let corner3_angle = if is_right_side {0.0} else {-RIGHT_ANGLE};
if fully_attached {
fg.corner3.size.set(zero());
bg.corner3.size.set(corner3_size);
bg.corner3.start_angle.set(corner3_angle);
bg.corner3.angle.set(RIGHT_ANGLE);
bg.corner3.radius.set(corner3_radius);
bg.corner3.pos.set(corner3);
bg.corner3.dim.set(Vector2(INFINITE,INFINITE));
bg.corner3.set_position_xy(corner3);
} else {
bg.corner3.size.set(zero());
fg.corner3.size.set(corner3_size);
fg.corner3.start_angle.set(corner3_angle);
fg.corner3.angle.set(RIGHT_ANGLE);
fg.corner3.radius.set(corner3_radius);
fg.corner3.pos.set(corner3);
fg.corner3.dim.set(zero());
fg.corner3.set_position_xy(corner3);
}
let corner2_x = corner1_target.x + corner_2_3_side * corner2_radius;
let corner2 = Vector2(corner2_x*side,corner2_y);
let corner2_angle = if is_right_side {-RIGHT_ANGLE} else {0.0};
if fully_attached {
fg.corner2.size.set(zero());
bg.corner2.size.set(corner1_size);
bg.corner2.start_angle.set(corner2_angle);
bg.corner2.angle.set(RIGHT_ANGLE);
bg.corner2.radius.set(corner2_radius);
bg.corner2.pos.set(corner2);
bg.corner2.dim.set(Vector2(INFINITE,INFINITE));
bg.corner2.set_position_xy(corner2);
} else {
bg.corner2.size.set(zero());
fg.corner2.size.set(corner1_size);
fg.corner2.start_angle.set(corner2_angle);
fg.corner2.angle.set(RIGHT_ANGLE);
fg.corner2.radius.set(corner2_radius);
fg.corner2.pos.set(corner2);
fg.corner2.dim.set(zero());
fg.corner2.set_position_xy(corner2);
}
// === Main Line (upwards) ===
//
// Main line is the first vertical line of the edge placed between the corner 1 and the
// corner 2. In case the line is long enough, it has an arrow pointing up to show its
// direction.
//
// ╭─────╮ 2╭───╮3
// ╰─────╯──╯1 ▢
let main_line_len = corner2_y - corner1.y;
let main_line_start = Vector2(side*corner1_target.x,corner1.y);
if fully_attached {
fg.main_line.size.set(zero());
bg.main_line.layout_v(main_line_start, main_line_len);
} else {
bg.main_line.size.set(zero());
fg.main_line.layout_v(main_line_start, main_line_len);
}
if main_line_len > ARROW_SIZE_Y {
let arrow_y = (corner1.y - corner1_radius + corner2_y + corner2_radius)/2.0;
let arrow_pos = Vector2(main_line_start.x, arrow_y);
let arrow_size = Vector2(ARROW_SIZE_X,ARROW_SIZE_Y);
if fully_attached {
fg.arrow.size.set(zero());
bg.arrow.size.set(arrow_size);
bg.arrow.set_position_xy(arrow_pos);
} else {
bg.arrow.size.set(zero());
fg.arrow.size.set(arrow_size);
fg.arrow.set_position_xy(arrow_pos);
}
} else {
bg.arrow.size.set(zero());
fg.arrow.size.set(zero());
}
// === Side Line 2 ===
//
// Side line 2 is the horizontal line connecting corner 2 and corner 3.
//
// ╭─────╮ 2╭───╮3
// ╰─────╯──╯1 ▢
let side_line2_len = side*(corner3_x - corner2_x);
let side_line2_start = Vector2(side*corner2_x,corner2_y + corner2_radius);
if fully_attached {
fg.side_line2.size.set(zero());
bg.side_line2.layout_h(side_line2_start,side_line2_len);
} else {
bg.side_line2.size.set(zero());
fg.side_line2.layout_h(side_line2_start,side_line2_len);
}
port_line_len = corner3_y - port_line_start.y;
} else {
fg.arrow.size.set(zero());
bg.arrow.size.set(zero());
fg.corner3.size.set(zero());
bg.corner3.size.set(zero());
fg.corner2.size.set(zero());
bg.corner2.size.set(zero());
fg.side_line2.size.set(zero());
bg.side_line2.size.set(zero());
}
// === Port Line ===
fg.port_line.layout_v(port_line_start,port_line_len);
}
}
// === Edge Splitting ===
impl EdgeModelData {
/// Return whether the point is in the upper half of the overall edge shape.
fn is_in_upper_half(&self, point:Vector2<f32>) -> bool {
let world_space_source = self.position().y;
let world_space_target = self.target_position.get().y ;
let mid_y = (world_space_source + world_space_target) / 2.0;
point.y > mid_y
}
/// Returns whether the given position should detach the the `Input` or `Output` part of the
/// edge.
///
/// We determine the target port primarily based y-position. We only use the y distance to the
/// start/end of the edge and whichever is closer, is the target. However, this becomes
/// problematic if the start and end of the edge have the same y-position or even if they are
/// almost level. That is why, we then switch to using the euclidean distance instead.
pub fn port_to_detach_for_position(&self, point:Vector2<f32>) -> PortType {
if self.input_and_output_y_too_close() {
return self.closest_end_for_point(point)
}
let input_port_is_in_upper_half = self.layout_state.get().is_input_above_output();
let point_is_in_upper_half = self.is_in_upper_half(point);
// We always detach the port that is on the opposite side of the cursor.
if point_is_in_upper_half != input_port_is_in_upper_half {
PortType::InputPort
} else {
PortType::OutputPort
}
}
/// Return the `EndDesignation` for the closest end of the edge for the given point. Uses
/// euclidean distance between point and `Input`/`Output`.
fn closest_end_for_point(&self, point:Vector2<f32>) -> PortType {
let target_position = self.target_position.get().xy();
let source_position = self.position().xy() - Vector2(0.0,self.source_height.get() / 2.0);
let target_distance = (point - target_position).norm();
let source_distance = (point - source_position).norm();
if source_distance > target_distance { PortType::OutputPort }
else { PortType::InputPort }
}
/// Indicates whether the height difference between input and output is too small to use the
/// y value to assign the `EndDesignation` for a given point.
fn input_and_output_y_too_close(&self) -> bool {
let target_y = self.position().y;
let source_y = self.target_position.get().y;
let delta_y = target_y - source_y;
delta_y > 0.0 && delta_y < MIN_SOURCE_TARGET_DIFFERENCE_FOR_Y_VALUE_DISCRIMINATION
}
/// Return the correct cut angle for the given `shape_id` at the `position` to focus the
/// `target_end`. Will return `None` if the `shape_id` is not a valid sub-shape of this edge.
fn cut_angle_for_shape
(&self, shape_id:display::object::Id, position:Vector2<f32>, target_end: PortType)
-> Option<f32> {
let shape = self.get_shape(shape_id)?;
let shape_role = self.get_shape_role(shape_id)?;
let cut_angle_correction = self.get_cut_angle_correction(shape_role);
let target_angle = self.get_target_angle(target_end);
let base_rotation = shape.display_object().rotation().z + 2.0 * RIGHT_ANGLE;
let shape_normal = shape.normal(position).angle();
Some(shape_normal - base_rotation + cut_angle_correction + target_angle)
}
/// Return the cut angle value needed to focus the given end of the shape. This takes into
/// account the current layout.
fn get_target_angle(&self, target_end: PortType) -> f32 {
let output_on_top = self.layout_state.get().is_output_above_input();
match (output_on_top,target_end) {
(false, PortType::InputPort) => 2.0 * RIGHT_ANGLE,
(false, PortType::OutputPort) => 0.0,
(true, PortType::InputPort) => 0.0,
(true, PortType::OutputPort) => 2.0 * RIGHT_ANGLE,
}
}
/// These corrections are needed as sometimes shapes are in places that lead to inconsistent
/// results, e.g., the side line leaving the node from left/right or right/left. The shape
/// itself does not have enough information about its own placement to determine which end
/// is pointed towards the `Target` or `Source` part of the whole edge. So we need to account
/// for these here based on the specific layout state we are in.
///
/// Example:
/// ```text
///
/// Case 1
///
/// (===)----...
/// Node Side Line
///
/// Case 2
///
/// ...----(===)
/// Side Line Node
/// ```
///
/// In both case 1 and 2 the side line is oriented left to right just placed in a different
/// location. However, in Case 1 the left side of the line is "output side" and in Case 2 the
/// right side is "output side". So if we want to set an equivalent angle, we need to apply a
/// correction based on this layout property.
///
fn get_cut_angle_correction(&self, shape_role:ShapeRole) -> f32 {
let layout_state = self.layout_state.get();
let flip = 2.0 * RIGHT_ANGLE;
// These rules are derived from the algorithm in `redraw`. In some layout configurations
// shapes are inverted top/down or left/right and we need to apply the appropriate
// corrections here. Sometimes these are just the side-effect of some layouting mechanics
// without visual justification (e.g., the `PortLine` sometimes ends up with a negative
// height and is thus flipped upside down.
match (layout_state,shape_role) {
(LayoutState::DownLeft, ShapeRole::SideLine ) => flip,
(LayoutState::DownLeft, ShapeRole::Corner ) => flip,
(LayoutState::UpLeft, ShapeRole::PortLine ) => flip,
(LayoutState::UpLeft, ShapeRole::Corner ) => flip,
(LayoutState::UpRight, ShapeRole::PortLine ) => flip,
(LayoutState::UpRight, ShapeRole::Corner3 ) => flip,
(LayoutState::UpRight, ShapeRole::SideLine2 ) => flip,
(LayoutState::UpRight, ShapeRole::Corner2 ) => flip,
(LayoutState::UpRight, ShapeRole::SideLine ) => flip,
(LayoutState::TopCenterRightLoop, ShapeRole::SideLine ) => flip,
(LayoutState::TopCenterRightLoop, ShapeRole::PortLine ) => flip,
(LayoutState::TopCenterLeftLoop, ShapeRole::SideLine2 ) => flip,
(LayoutState::TopCenterLeftLoop, ShapeRole::Corner2 ) => flip,
(LayoutState::TopCenterLeftLoop, ShapeRole::Corner ) => flip,
(LayoutState::TopCenterLeftLoop, ShapeRole::Corner3 ) => flip,
(LayoutState::TopCenterLeftLoop, ShapeRole::PortLine ) => flip,
(_, ShapeRole::Arrow) => RIGHT_ANGLE,
_ => 0.0,
}
}
/// Return a reference to sub-shape indicated by the given shape id.
fn get_shape(&self, id:display::object::Id) -> Option<&dyn EdgeShape> {
let shape_ref = self.back.get_shape(id);
if shape_ref.is_some() {
return shape_ref
}
self.front.get_shape(id)
}
/// Return the `ShapeRole` for the given sub-shape.
fn get_shape_role(&self, id:display::object::Id) -> Option<ShapeRole> {
let shape_type = self.back.get_shape_type(id);
if shape_type.is_some() {
return shape_type
}
self.front.get_shape_type(id)
}
/// Check whether the provided point is close enough to be snapped to the edge.
fn try_point_snap(&self, point:Vector2<f32>, focus_shape_id:display::object::Id) -> Option<SnapTarget> {
let focus_shape = self.get_shape(focus_shape_id)?;
let snap_position = focus_shape.snap(point)?;
Some(SnapTarget::new(snap_position,focus_shape_id))
}
/// Disable the splitting of the shape.
fn focus_none(&self) {
for shape in self.shapes() {
shape.focus_none();
}
}
/// FocusSplit the shape at the given `position` and focus the given `EndDesignation`. This
/// might fail if the given position is too far from the shape.
fn try_enable_focus_split
(&self, position:Vector2<f32>, focus_shape_id:display::object::Id, part: PortType)
-> Result<SnapTarget, ()>{
let snap_data = self.try_point_snap(position,focus_shape_id).ok_or(())?;
let semantic_split = SemanticSplit::new(self,snap_data.target_shape_id).ok_or(())?;
let angle = self.cut_angle_for_shape(snap_data.target_shape_id,position,part).ok_or(())?;
// Completely disable/enable focus for shapes that are not close to the split based on their
// relative position within the shape. This avoids issues with splitting not working
// correctly when a split would intersect the edge at multiple points.
semantic_split.output_side_shapes().iter().for_each(|shape_id| {
if let Some(shape) = self.get_shape(*shape_id) {
match part {
PortType::OutputPort => shape.focus_all(),
PortType::InputPort => shape.focus_none(),
}
}
});
semantic_split.input_side_shapes().iter().for_each(|shape_id|{
if let Some(shape) = self.get_shape(*shape_id) {
match part {
PortType::OutputPort => shape.focus_none(),
PortType::InputPort => shape.focus_all(),
}
}
});
// Apply a split to the shapes at the split location, and next to the split shapes. The
// extension to neighbours is required to show the correct transition from one shape to the
// next.
semantic_split.split_shapes().iter().for_each(|shape_id|{
if let Some(shape) = self.get_shape(*shape_id) {
let split_data = FocusSplit::new(snap_data.position,angle);
shape.set_focus_split(split_data)
}
});
Ok(snap_data)
}
}
| {
let focused_color = focused_color.into();
let unfocused_color = unfocused_color.into();
let focused = self.focused.fill(&focused_color);
let unfocused = self.unfocused.fill(&unfocused_color);
(focused + unfocused).into()
} |
interface.go | /*
Copyright AppsCode Inc. and Contributors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package user
import (
internalinterfaces "kubeform.dev/provider-azurerm-api/client/informers/externalversions/internalinterfaces"
v1alpha1 "kubeform.dev/provider-azurerm-api/client/informers/externalversions/user/v1alpha1"
)
// Interface provides access to each of this group's versions.
type Interface interface {
// V1alpha1 provides access to shared informers for resources in V1alpha1.
V1alpha1() v1alpha1.Interface
}
type group struct {
factory internalinterfaces.SharedInformerFactory
namespace string
tweakListOptions internalinterfaces.TweakListOptionsFunc
}
// New returns a new Interface.
func | (f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface {
return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions}
}
// V1alpha1 returns a new v1alpha1.Interface.
func (g *group) V1alpha1() v1alpha1.Interface {
return v1alpha1.New(g.factory, g.namespace, g.tweakListOptions)
}
| New |
single_value_extended_properties_request_builder.go | package singlevalueextendedproperties
import (
i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f "github.com/microsoft/kiota-abstractions-go"
ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be "github.com/microsoftgraph/msgraph-beta-sdk-go/models"
i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459 "github.com/microsoftgraph/msgraph-beta-sdk-go/models/odataerrors"
i3c9560102929d5136b5d48c74c1df97dbc5b441c20959b9608918fa2b9b9530e "github.com/microsoftgraph/msgraph-beta-sdk-go/me/mailfolders/item/messages/item/singlevalueextendedproperties/count"
)
// SingleValueExtendedPropertiesRequestBuilder provides operations to manage the singleValueExtendedProperties property of the microsoft.graph.message entity.
type SingleValueExtendedPropertiesRequestBuilder struct {
// Path parameters for the request
pathParameters map[string]string
// The request adapter to use to execute the requests.
requestAdapter i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestAdapter
// Url template to use to build the URL for the current request builder
urlTemplate string
}
// SingleValueExtendedPropertiesRequestBuilderGetQueryParameters the collection of single-value extended properties defined for the message. Nullable.
type SingleValueExtendedPropertiesRequestBuilderGetQueryParameters struct {
// Include count of items
Count *bool `uriparametername:"%24count"`
// Expand related entities
Expand []string `uriparametername:"%24expand"`
// Filter items by property values
Filter *string `uriparametername:"%24filter"`
// Order items by property values
Orderby []string `uriparametername:"%24orderby"`
// Search items by search phrases
Search *string `uriparametername:"%24search"`
// Select properties to be returned
Select []string `uriparametername:"%24select"`
// Skip the first n items
Skip *int32 `uriparametername:"%24skip"`
// Show only the first n items
Top *int32 `uriparametername:"%24top"`
}
// SingleValueExtendedPropertiesRequestBuilderGetRequestConfiguration configuration for the request such as headers, query parameters, and middleware options.
type SingleValueExtendedPropertiesRequestBuilderGetRequestConfiguration struct {
// Request headers
Headers map[string]string
// Request options
Options []i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestOption
// Request query parameters
QueryParameters *SingleValueExtendedPropertiesRequestBuilderGetQueryParameters
}
// SingleValueExtendedPropertiesRequestBuilderPostRequestConfiguration configuration for the request such as headers, query parameters, and middleware options.
type SingleValueExtendedPropertiesRequestBuilderPostRequestConfiguration struct {
// Request headers
Headers map[string]string
// Request options
Options []i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestOption
}
// NewSingleValueExtendedPropertiesRequestBuilderInternal instantiates a new SingleValueExtendedPropertiesRequestBuilder and sets the default values.
func NewSingleValueExtendedPropertiesRequestBuilderInternal(pathParameters map[string]string, requestAdapter i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestAdapter)(*SingleValueExtendedPropertiesRequestBuilder) {
m := &SingleValueExtendedPropertiesRequestBuilder{
}
m.urlTemplate = "{+baseurl}/me/mailFolders/{mailFolder%2Did}/messages/{message%2Did}/singleValueExtendedProperties{?%24top,%24skip,%24search,%24filter,%24count,%24orderby,%24select,%24expand}";
urlTplParams := make(map[string]string)
for idx, item := range pathParameters {
urlTplParams[idx] = item
}
m.pathParameters = urlTplParams;
m.requestAdapter = requestAdapter;
return m
}
// NewSingleValueExtendedPropertiesRequestBuilder instantiates a new SingleValueExtendedPropertiesRequestBuilder and sets the default values.
func NewSingleValueExtendedPropertiesRequestBuilder(rawUrl string, requestAdapter i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestAdapter)(*SingleValueExtendedPropertiesRequestBuilder) |
// Count the count property
func (m *SingleValueExtendedPropertiesRequestBuilder) Count()(*i3c9560102929d5136b5d48c74c1df97dbc5b441c20959b9608918fa2b9b9530e.CountRequestBuilder) {
return i3c9560102929d5136b5d48c74c1df97dbc5b441c20959b9608918fa2b9b9530e.NewCountRequestBuilderInternal(m.pathParameters, m.requestAdapter);
}
// CreateGetRequestInformation the collection of single-value extended properties defined for the message. Nullable.
func (m *SingleValueExtendedPropertiesRequestBuilder) CreateGetRequestInformation()(*i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestInformation, error) {
return m.CreateGetRequestInformationWithRequestConfiguration(nil);
}
// CreateGetRequestInformationWithRequestConfiguration the collection of single-value extended properties defined for the message. Nullable.
func (m *SingleValueExtendedPropertiesRequestBuilder) CreateGetRequestInformationWithRequestConfiguration(requestConfiguration *SingleValueExtendedPropertiesRequestBuilderGetRequestConfiguration)(*i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestInformation, error) {
requestInfo := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.NewRequestInformation()
requestInfo.UrlTemplate = m.urlTemplate
requestInfo.PathParameters = m.pathParameters
requestInfo.Method = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.GET
if requestConfiguration != nil {
if requestConfiguration.QueryParameters != nil {
requestInfo.AddQueryParameters(*(requestConfiguration.QueryParameters))
}
requestInfo.AddRequestHeaders(requestConfiguration.Headers)
requestInfo.AddRequestOptions(requestConfiguration.Options)
}
return requestInfo, nil
}
// CreatePostRequestInformation create new navigation property to singleValueExtendedProperties for me
func (m *SingleValueExtendedPropertiesRequestBuilder) CreatePostRequestInformation(body ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.SingleValueLegacyExtendedPropertyable)(*i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestInformation, error) {
return m.CreatePostRequestInformationWithRequestConfiguration(body, nil);
}
// CreatePostRequestInformationWithRequestConfiguration create new navigation property to singleValueExtendedProperties for me
func (m *SingleValueExtendedPropertiesRequestBuilder) CreatePostRequestInformationWithRequestConfiguration(body ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.SingleValueLegacyExtendedPropertyable, requestConfiguration *SingleValueExtendedPropertiesRequestBuilderPostRequestConfiguration)(*i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.RequestInformation, error) {
requestInfo := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.NewRequestInformation()
requestInfo.UrlTemplate = m.urlTemplate
requestInfo.PathParameters = m.pathParameters
requestInfo.Method = i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.POST
requestInfo.SetContentFromParsable(m.requestAdapter, "application/json", body)
if requestConfiguration != nil {
requestInfo.AddRequestHeaders(requestConfiguration.Headers)
requestInfo.AddRequestOptions(requestConfiguration.Options)
}
return requestInfo, nil
}
// Get the collection of single-value extended properties defined for the message. Nullable.
func (m *SingleValueExtendedPropertiesRequestBuilder) Get()(ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.SingleValueLegacyExtendedPropertyCollectionResponseable, error) {
return m.GetWithRequestConfigurationAndResponseHandler(nil, nil);
}
// GetWithRequestConfigurationAndResponseHandler the collection of single-value extended properties defined for the message. Nullable.
func (m *SingleValueExtendedPropertiesRequestBuilder) GetWithRequestConfigurationAndResponseHandler(requestConfiguration *SingleValueExtendedPropertiesRequestBuilderGetRequestConfiguration, responseHandler i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ResponseHandler)(ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.SingleValueLegacyExtendedPropertyCollectionResponseable, error) {
requestInfo, err := m.CreateGetRequestInformationWithRequestConfiguration(requestConfiguration);
if err != nil {
return nil, err
}
errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {
"4XX": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,
"5XX": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,
}
res, err := m.requestAdapter.SendAsync(requestInfo, ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.CreateSingleValueLegacyExtendedPropertyCollectionResponseFromDiscriminatorValue, responseHandler, errorMapping)
if err != nil {
return nil, err
}
return res.(ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.SingleValueLegacyExtendedPropertyCollectionResponseable), nil
}
// Post create new navigation property to singleValueExtendedProperties for me
func (m *SingleValueExtendedPropertiesRequestBuilder) Post(body ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.SingleValueLegacyExtendedPropertyable)(ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.SingleValueLegacyExtendedPropertyable, error) {
return m.PostWithRequestConfigurationAndResponseHandler(body, nil, nil);
}
// PostWithRequestConfigurationAndResponseHandler create new navigation property to singleValueExtendedProperties for me
func (m *SingleValueExtendedPropertiesRequestBuilder) PostWithRequestConfigurationAndResponseHandler(body ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.SingleValueLegacyExtendedPropertyable, requestConfiguration *SingleValueExtendedPropertiesRequestBuilderPostRequestConfiguration, responseHandler i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ResponseHandler)(ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.SingleValueLegacyExtendedPropertyable, error) {
requestInfo, err := m.CreatePostRequestInformationWithRequestConfiguration(body, requestConfiguration);
if err != nil {
return nil, err
}
errorMapping := i2ae4187f7daee263371cb1c977df639813ab50ffa529013b7437480d1ec0158f.ErrorMappings {
"4XX": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,
"5XX": i20a3050780ee0b0cde0a884a4f35429a20d60067e3bcda382ec5400079147459.CreateODataErrorFromDiscriminatorValue,
}
res, err := m.requestAdapter.SendAsync(requestInfo, ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.CreateSingleValueLegacyExtendedPropertyFromDiscriminatorValue, responseHandler, errorMapping)
if err != nil {
return nil, err
}
return res.(ie233ee762e29b4ba6970aa2a2efce4b7fde11697ca9ea81099d0f8269309c1be.SingleValueLegacyExtendedPropertyable), nil
}
| {
urlParams := make(map[string]string)
urlParams["request-raw-url"] = rawUrl
return NewSingleValueExtendedPropertiesRequestBuilderInternal(urlParams, requestAdapter)
} |
09_load_map.py | """
Platformer Game
"""
import arcade
# Constants
SCREEN_WIDTH = 1000
SCREEN_HEIGHT = 650
SCREEN_TITLE = "Platformer"
# Constants used to scale our sprites from their original size
CHARACTER_SCALING = 1
TILE_SCALING = 0.5
COIN_SCALING = 0.5
SPRITE_PIXEL_SIZE = 128
GRID_PIXEL_SIZE = SPRITE_PIXEL_SIZE * TILE_SCALING
# Movement speed of player, in pixels per frame
PLAYER_MOVEMENT_SPEED = 10
GRAVITY = 1
PLAYER_JUMP_SPEED = 20
class MyGame(arcade.Window):
"""
Main application class.
"""
def __init__(self):
# Call the parent class and set up the window
super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
# Our TileMap Object
self.tile_map = None
# Our Scene Object
self.scene = None
# Separate variable that holds the player sprite
self.player_sprite = None
# Our physics engine
self.physics_engine = None
# A Camera that can be used for scrolling the screen
self.camera = None
# A Camera that can be used to draw GUI elements
self.gui_camera = None
# Keep track of the score
self.score = 0
# Load sounds
self.collect_coin_sound = arcade.load_sound(":resources:sounds/coin1.wav")
self.jump_sound = arcade.load_sound(":resources:sounds/jump1.wav")
arcade.set_background_color(arcade.csscolor.CORNFLOWER_BLUE)
def setup(self):
"""Set up the game here. Call this function to restart the game."""
# Setup the Cameras
self.camera = arcade.Camera(self.width, self.height)
self.gui_camera = arcade.Camera(self.width, self.height)
# Name of map file to load
map_name = ":resources:tiled_maps/map.json"
# Layer specific options are defined based on Layer names in a dictionary
# Doing this will make the SpriteList for the platforms layer
# use spatial hashing for detection.
layer_options = {
"Platforms": {
"use_spatial_hash": True,
},
}
# Read in the tiled map
self.tile_map = arcade.load_tilemap(map_name, TILE_SCALING, layer_options)
# Initialize Scene with our TileMap, this will automatically add all layers
# from the map as SpriteLists in the scene in the proper order.
self.scene = arcade.Scene.from_tilemap(self.tile_map)
# Keep track of the score
self.score = 0
# Set up the player, specifically placing it at these coordinates.
image_source = ":resources:images/animated_characters/female_adventurer/femaleAdventurer_idle.png"
self.player_sprite = arcade.Sprite(image_source, CHARACTER_SCALING)
self.player_sprite.center_x = 128
self.player_sprite.center_y = 128
self.scene.add_sprite("Player", self.player_sprite)
# --- Other stuff
# Set the background color
if self.tile_map.background_color:
arcade.set_background_color(self.tile_map.background_color)
# Create the 'physics engine'
self.physics_engine = arcade.PhysicsEnginePlatformer(
self.player_sprite, gravity_constant=GRAVITY, walls=self.scene["Platforms"]
)
def on_draw(self):
"""Render the screen."""
# Clear the screen to the background color
arcade.start_render()
# Activate the game camera
self.camera.use()
# Draw our Scene
self.scene.draw()
# Activate the GUI camera before drawing GUI elements
self.gui_camera.use()
# Draw our score on the screen, scrolling it with the viewport
score_text = f"Score: {self.score}"
arcade.draw_text(
score_text,
10,
10,
arcade.csscolor.WHITE,
18,
)
def on_key_press(self, key, modifiers):
"""Called whenever a key is pressed."""
if key == arcade.key.UP or key == arcade.key.W:
if self.physics_engine.can_jump():
self.player_sprite.change_y = PLAYER_JUMP_SPEED
arcade.play_sound(self.jump_sound)
elif key == arcade.key.LEFT or key == arcade.key.A:
self.player_sprite.change_x = -PLAYER_MOVEMENT_SPEED
elif key == arcade.key.RIGHT or key == arcade.key.D:
self.player_sprite.change_x = PLAYER_MOVEMENT_SPEED
def on_key_release(self, key, modifiers):
"""Called when the user releases a key."""
if key == arcade.key.LEFT or key == arcade.key.A:
self.player_sprite.change_x = 0
elif key == arcade.key.RIGHT or key == arcade.key.D:
self.player_sprite.change_x = 0
def center_camera_to_player(self):
screen_center_x = self.player_sprite.center_x - (self.camera.viewport_width / 2)
screen_center_y = self.player_sprite.center_y - (
self.camera.viewport_height / 2
)
if screen_center_x < 0:
screen_center_x = 0
if screen_center_y < 0:
screen_center_y = 0
player_centered = screen_center_x, screen_center_y
self.camera.move_to(player_centered)
def on_update(self, delta_time):
"""Movement and game logic"""
| # Move the player with the physics engine
self.physics_engine.update()
# See if we hit any coins
coin_hit_list = arcade.check_for_collision_with_list(
self.player_sprite, self.scene["Coins"]
)
# Loop through each coin we hit (if any) and remove it
for coin in coin_hit_list:
# Remove the coin
coin.remove_from_sprite_lists()
# Play a sound
arcade.play_sound(self.collect_coin_sound)
# Add one to the score
self.score += 1
# Position the camera
self.center_camera_to_player()
def main():
"""Main function"""
window = MyGame()
window.setup()
arcade.run()
if __name__ == "__main__":
main() | |
qxsj_btn.js | var api_noPagedList = contextPath + "/system/base/qxsj/getAllList";
var api_save = contextPath + "/system/base/menu/menuQxsjSave";
var api_delete = contextPath + "/system/base/menu/menuQxsjDelete";
/**
* 权限信息js
*/
$(function() {
// 表格初始化
$('#qxsjInfoTable').bootstrapTable({
cache : false,
striped : true,
pagination : false,
toolbar : '#toolbar',
clickToSelect : true,
columns : [
{ field : "", checkbox : true, },
{ field : "qxsj_stat", title : "状态", align : "center", valign : "middle" },
{ field : "qxsj_code", title : "代码", align : "center", valign : "middle" },
{ field : "qxsj_name", title : "名称", align : "center", valign : "middle" },
{ field : "qxsj_type", title : "类型", align : "center", valign : "middle" },
],
formatNoMatches : function() {
return NOT_FOUND_DATAS;
},
onLoadSuccess : function(data){ //此方法不好使
$('#qxsjInfoTable').bootstrapTable("checkBy",{field:"qxsj_code",values:['btnAdd','search']});
}
});
searchList();
});
/**
* 常用按钮
*/
function useful(){
$('#qxsjInfoTable').bootstrapTable("checkBy", {
field : "qxsj_code",
values : [ 'btnAdd', 'search', 'cancle', 'btnEditSave', 'btnAddSave', 'btnDelete', 'btnEdit']
});
}
// 查询表格信息
function searchList() {
var menu_id = $("input[name='menu_id']").val();
var data = {appid:menu_id};
commonRowDatas("qxsjInfoTable", data, api_noPagedList, "commonCallback", false);
}
//添加权限按钮
function saveMenuBtn(){
var rowCount = selectedCount("qxsjInfoTable");
if (rowCount > | 获取选中行
var rows = selectedRows("qxsjInfoTable");
var ids = "";
var row = {};
for ( var i = 0; i < rows.length; i++) {
ids += rows[i].qxsj_code + ",";
}
var menu_id = $("input[name='menu_id']").val();
var data = {
ids : ids,
menu_id:menu_id
};
var info = $message("InfoOfSave", null);
$confirm(info, POST, api_save, data, searchList);
} else {
var error = $message("ErrorSelect2Operate", null);
showOnlyMessage(ERROR, error);
}
}
//删除权限按钮
function deleteMenuBtn(){
var rowCount = selectedCount("qxsjInfoTable");
if (rowCount > 0) {
// 获取选中行
var rows = selectedRows("qxsjInfoTable");
var ids = "";
var row = {};
for ( var i = 0; i < rows.length; i++) {
ids += rows[i].qxsj_code + ",";
}
var menu_id = $("input[name='menu_id']").val();
var data = {
ids : ids,
menu_id:menu_id
};
var info = $message("InfoOfDelete", null);
$confirm(info, POST, api_delete, data, searchList);
} else {
var error = $message("ErrorSelect2Operate", null);
showOnlyMessage(ERROR, error);
}
}
| 0) {
// |
test_default.py | # Copyright (c) 2014-present, Facebook, Inc.
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def | (host):
f = host.file('/etc/hosts')
assert f.exists
assert f.user == 'root'
assert f.group == 'root'
| test_hosts_file |
alert_test.go | package usecase_test
import (
"encoding/json"
"errors"
"testing"
"time"
"github.com/deepalert/deepalert"
"github.com/deepalert/deepalert/internal/adaptor"
"github.com/deepalert/deepalert/internal/handler"
"github.com/deepalert/deepalert/internal/mock"
"github.com/deepalert/deepalert/internal/service"
"github.com/deepalert/deepalert/internal/usecase"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func | (t *testing.T) {
basicSetup := func() (*handler.Arguments, adaptor.SFnClient, adaptor.Repository) {
dummySFn, _ := mock.NewSFnClient("")
dummyRepo := mock.NewRepository("", "")
args := &handler.Arguments{
NewRepository: func(string, string) adaptor.Repository { return dummyRepo },
NewSFn: func(string) (adaptor.SFnClient, error) { return dummySFn, nil },
EnvVars: handler.EnvVars{
InspectorMashine: "arn:aws:states:us-east-1:111122223333:stateMachine:blue",
ReviewMachine: "arn:aws:states:us-east-1:111122223333:stateMachine:orange",
},
}
return args, dummySFn, dummyRepo
}
t.Run("Recept single alert", func(t *testing.T) {
alert := &deepalert.Alert{
AlertKey: "5",
RuleID: "five",
RuleName: "fifth",
Detector: "ao",
}
args, dummySFn, dummyRepo := basicSetup()
report, err := usecase.HandleAlert(args, alert, time.Now())
require.NoError(t, err)
assert.NotNil(t, report)
assert.NotEqual(t, "", report.ID)
repoSvc := service.NewRepositoryService(dummyRepo, 10)
t.Run("StepFunctions should be executed", func(t *testing.T) {
sfn, ok := dummySFn.(*mock.SFnClient)
require.True(t, ok)
require.Equal(t, 2, len(sfn.Input))
assert.Equal(t, "arn:aws:states:us-east-1:111122223333:stateMachine:blue", *sfn.Input[0].StateMachineArn)
assert.Equal(t, "arn:aws:states:us-east-1:111122223333:stateMachine:orange", *sfn.Input[1].StateMachineArn)
var report1, report2 deepalert.Report
require.NoError(t, json.Unmarshal([]byte(*sfn.Input[0].Input), &report1))
require.Equal(t, 1, len(report1.Alerts))
assert.Equal(t, alert, report1.Alerts[0])
require.NoError(t, json.Unmarshal([]byte(*sfn.Input[1].Input), &report2))
require.Equal(t, 1, len(report2.Alerts))
assert.Equal(t, alert, report2.Alerts[0])
assert.Equal(t, report1, report2)
})
t.Run("AlertCachce should be stored in repository", func(t *testing.T) {
alertCache, err := repoSvc.FetchAlertCache(report.ID)
require.NoError(t, err)
require.Equal(t, 1, len(alertCache))
assert.Equal(t, alert, alertCache[0])
})
t.Run("Report should be stored in repository", func(t *testing.T) {
report, err := repoSvc.GetReport(report.ID)
require.NoError(t, err)
require.Equal(t, 1, len(report.Alerts))
assert.Equal(t, alert, report.Alerts[0])
})
})
t.Run("Error cases", func(t *testing.T) {
t.Run("Alert without Detector field is not allowed", func(t *testing.T) {
alert := &deepalert.Alert{
AlertKey: "5",
RuleID: "five",
RuleName: "fifth",
Detector: "",
}
args, dummySFn, _ := basicSetup()
report, err := usecase.HandleAlert(args, alert, time.Now())
require.Error(t, err)
assert.True(t, errors.Is(err, deepalert.ErrInvalidAlert))
assert.Nil(t, report)
sfn, ok := dummySFn.(*mock.SFnClient)
require.True(t, ok)
require.Equal(t, 0, len(sfn.Input))
})
t.Run("Alert without RuleID field is not allowed", func(t *testing.T) {
alert := &deepalert.Alert{
AlertKey: "5",
RuleID: "",
RuleName: "fifth",
Detector: "ao",
}
args, dummySFn, _ := basicSetup()
report, err := usecase.HandleAlert(args, alert, time.Now())
require.Error(t, err)
assert.True(t, errors.Is(err, deepalert.ErrInvalidAlert))
assert.Nil(t, report)
sfn, ok := dummySFn.(*mock.SFnClient)
require.True(t, ok)
require.Equal(t, 0, len(sfn.Input))
})
})
t.Run("Recept alerts with same AlertID", func(t *testing.T) {
// AlertID is calculated by AlertKey, RuleID and Detector
alert1 := &deepalert.Alert{
AlertKey: "123",
RuleID: "blue",
RuleName: "fifth",
Detector: "ao",
}
alert2 := &deepalert.Alert{
AlertKey: "123",
RuleID: "blue",
RuleName: "five",
Detector: "ao",
}
args, _, _ := basicSetup()
report1, err := usecase.HandleAlert(args, alert1, time.Now())
require.NoError(t, err)
assert.NotNil(t, report1)
assert.NotEqual(t, "", report1.ID)
report2, err := usecase.HandleAlert(args, alert2, time.Now())
require.NoError(t, err)
assert.NotNil(t, report2)
assert.NotEqual(t, "", report2.ID)
t.Run("ReportIDs should be same", func(t *testing.T) {
assert.Equal(t, report1.ID, report2.ID)
})
})
t.Run("ReportIDs should be different if AlertID is not same", func(t *testing.T) {
// AlertID is calculated by AlertKey, RuleID and Detector
args, _, _ := basicSetup()
t.Run("Different AlertKey", func(t *testing.T) {
alert1 := &deepalert.Alert{
AlertKey: "234",
RuleID: "blue",
RuleName: "fifth",
Detector: "ao",
}
alert2 := &deepalert.Alert{
AlertKey: "123",
RuleID: "blue",
RuleName: "five",
Detector: "ao",
}
report1, err := usecase.HandleAlert(args, alert1, time.Now())
require.NoError(t, err)
report2, err := usecase.HandleAlert(args, alert2, time.Now())
require.NoError(t, err)
assert.NotEqual(t, report1.ID, report2.ID)
})
t.Run("Different RuleID", func(t *testing.T) {
alert1 := &deepalert.Alert{
AlertKey: "123",
RuleID: "blue",
RuleName: "fifth",
Detector: "ao",
}
alert2 := &deepalert.Alert{
AlertKey: "123",
RuleID: "orange",
RuleName: "five",
Detector: "ao",
}
report1, err := usecase.HandleAlert(args, alert1, time.Now())
require.NoError(t, err)
report2, err := usecase.HandleAlert(args, alert2, time.Now())
require.NoError(t, err)
assert.NotEqual(t, report1.ID, report2.ID)
})
t.Run("Different Detector", func(t *testing.T) {
alert1 := &deepalert.Alert{
AlertKey: "123",
RuleID: "blue",
RuleName: "fifth",
Detector: "ao",
}
alert2 := &deepalert.Alert{
AlertKey: "123",
RuleID: "blue",
RuleName: "five",
Detector: "tou",
}
report1, err := usecase.HandleAlert(args, alert1, time.Now())
require.NoError(t, err)
report2, err := usecase.HandleAlert(args, alert2, time.Now())
require.NoError(t, err)
assert.NotEqual(t, report1.ID, report2.ID)
})
})
}
| TestHandleAlert |
model_architectures.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for saving/loading function for keras Model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python import keras
# Declaring namedtuple()
ModelFn = collections.namedtuple('ModelFn',
['model', 'input_shape', 'target_shape'])
def basic_sequential():
|
def basic_sequential_deferred():
"""Sequential model with deferred input shape."""
model = keras.Sequential([
keras.layers.Dense(3, activation='relu'),
keras.layers.Dense(2, activation='softmax'),
])
return ModelFn(model, (None, 3), (None, 2))
def stacked_rnn():
"""Stacked RNN model."""
inputs = keras.Input((None, 3))
layer = keras.layers.RNN([keras.layers.LSTMCell(2) for _ in range(3)])
x = layer(inputs)
outputs = keras.layers.Dense(2)(x)
model = keras.Model(inputs, outputs)
return ModelFn(model, (None, 4, 3), (None, 2))
def lstm():
"""LSTM model."""
inputs = keras.Input((None, 3))
x = keras.layers.LSTM(4, return_sequences=True)(inputs)
x = keras.layers.LSTM(3, return_sequences=True)(x)
x = keras.layers.LSTM(2, return_sequences=False)(x)
outputs = keras.layers.Dense(2)(x)
model = keras.Model(inputs, outputs)
return ModelFn(model, (None, 4, 3), (None, 2))
def multi_input_multi_output():
"""Multi-input Multi-ouput model."""
body_input = keras.Input(shape=(None,), name='body')
tags_input = keras.Input(shape=(2,), name='tags')
x = keras.layers.Embedding(10, 4)(body_input)
body_features = keras.layers.LSTM(5)(x)
x = keras.layers.concatenate([body_features, tags_input])
pred_1 = keras.layers.Dense(2, activation='sigmoid', name='priority')(x)
pred_2 = keras.layers.Dense(3, activation='softmax', name='department')(x)
model = keras.Model(
inputs=[body_input, tags_input], outputs=[pred_1, pred_2])
return ModelFn(model, [(None, 1), (None, 2)], [(None, 2), (None, 3)])
def nested_sequential_in_functional():
"""A sequential model nested in a functional model."""
inner_model = keras.Sequential([
keras.layers.Dense(3, activation='relu', input_shape=(3,)),
keras.layers.Dense(2, activation='relu'),
])
inputs = keras.Input(shape=(3,))
x = inner_model(inputs)
outputs = keras.layers.Dense(2, activation='softmax')(x)
model = keras.Model(inputs, outputs)
return ModelFn(model, (None, 3), (None, 2))
def seq_to_seq():
"""Sequence to sequence model."""
num_encoder_tokens = 3
num_decoder_tokens = 3
latent_dim = 2
encoder_inputs = keras.Input(shape=(None, num_encoder_tokens))
encoder = keras.layers.LSTM(latent_dim, return_state=True)
_, state_h, state_c = encoder(encoder_inputs)
encoder_states = [state_h, state_c]
decoder_inputs = keras.Input(shape=(None, num_decoder_tokens))
decoder_lstm = keras.layers.LSTM(
latent_dim, return_sequences=True, return_state=True)
decoder_outputs, _, _ = decoder_lstm(
decoder_inputs, initial_state=encoder_states)
decoder_dense = keras.layers.Dense(num_decoder_tokens, activation='softmax')
decoder_outputs = decoder_dense(decoder_outputs)
model = keras.Model([encoder_inputs, decoder_inputs], decoder_outputs)
return ModelFn(
model, [(None, 2, num_encoder_tokens), (None, 2, num_decoder_tokens)],
(None, 2, num_decoder_tokens))
def shared_layer_functional():
"""Shared layer in a functional model."""
main_input = keras.Input(shape=(10,), dtype='int32', name='main_input')
x = keras.layers.Embedding(
output_dim=5, input_dim=4, input_length=10)(main_input)
lstm_out = keras.layers.LSTM(3)(x)
auxiliary_output = keras.layers.Dense(
1, activation='sigmoid', name='aux_output')(lstm_out)
auxiliary_input = keras.Input(shape=(5,), name='aux_input')
x = keras.layers.concatenate([lstm_out, auxiliary_input])
x = keras.layers.Dense(2, activation='relu')(x)
main_output = keras.layers.Dense(
1, activation='sigmoid', name='main_output')(x)
model = keras.Model(
inputs=[main_input, auxiliary_input],
outputs=[main_output, auxiliary_output])
return ModelFn(model, [(None, 10), (None, 5)], [(None, 1), (None, 1)])
def shared_sequential():
"""Shared sequential model in a functional model."""
inner_model = keras.Sequential([
keras.layers.Conv2D(2, 3, activation='relu'),
keras.layers.Conv2D(2, 3, activation='relu'),
])
inputs_1 = keras.Input((5, 5, 3))
inputs_2 = keras.Input((5, 5, 3))
x1 = inner_model(inputs_1)
x2 = inner_model(inputs_2)
x = keras.layers.concatenate([x1, x2])
outputs = keras.layers.GlobalAveragePooling2D()(x)
model = keras.Model([inputs_1, inputs_2], outputs)
return ModelFn(model, [(None, 5, 5, 3), (None, 5, 5, 3)], (None, 4))
class _MySubclassModel(keras.Model):
"""A subclass model."""
def __init__(self):
super(_MySubclassModel, self).__init__(name='my_subclass_model')
self.dense1 = keras.layers.Dense(8, activation='relu')
self.dense2 = keras.layers.Dense(2, activation='softmax')
self.bn = keras.layers.BatchNormalization()
self.dp = keras.layers.Dropout(0.5)
def call(self, inputs, **kwargs):
x = self.dense1(inputs)
x = self.dp(x)
x = self.bn(x)
return self.dense2(x)
def nested_subclassed_model():
"""A subclass model nested in another subclass model."""
class NestedSubclassModel(keras.Model):
"""A nested subclass model."""
def __init__(self):
super(NestedSubclassModel, self).__init__()
self.dense1 = keras.layers.Dense(4, activation='relu')
self.dense2 = keras.layers.Dense(2, activation='relu')
self.bn = keras.layers.BatchNormalization()
self.inner_subclass_model = _MySubclassModel()
def call(self, inputs):
x = self.dense1(inputs)
x = self.bn(x)
x = self.inner_subclass_model(x)
return self.dense2(x)
return ModelFn(NestedSubclassModel(), (None, 3), (None, 2))
def nested_subclassed_in_functional_model():
"""A subclass model nested in a functional model."""
inner_subclass_model = _MySubclassModel()
inputs = keras.Input(shape=(3,))
x = inner_subclass_model(inputs)
x = keras.layers.BatchNormalization()(x)
outputs = keras.layers.Dense(2, activation='softmax')(x)
model = keras.Model(inputs, outputs)
return ModelFn(model, (None, 3), (None, 2))
def nested_functional_in_subclassed_model():
"""A functional model nested in a subclass model."""
def get_functional_model():
inputs = keras.Input(shape=(4,))
x = keras.layers.Dense(4, activation='relu')(inputs)
x = keras.layers.BatchNormalization()(x)
outputs = keras.layers.Dense(2)(x)
return keras.Model(inputs, outputs)
class NestedFunctionalInSubclassModel(keras.Model):
"""A functional nested in subclass model."""
def __init__(self):
super(NestedFunctionalInSubclassModel, self).__init__(
name='nested_functional_in_subclassed_model')
self.dense1 = keras.layers.Dense(4, activation='relu')
self.dense2 = keras.layers.Dense(2, activation='relu')
self.inner_functional_model = get_functional_model()
def call(self, inputs):
x = self.dense1(inputs)
x = self.inner_functional_model(x)
return self.dense2(x)
return ModelFn(NestedFunctionalInSubclassModel(), (None, 3), (None, 2))
def shared_layer_subclassed_model():
"""Shared layer in a subclass model."""
class SharedLayerSubclassModel(keras.Model):
"""A subclass model with shared layers."""
def __init__(self):
super(SharedLayerSubclassModel, self).__init__(
name='shared_layer_subclass_model')
self.dense = keras.layers.Dense(3, activation='relu')
self.dp = keras.layers.Dropout(0.5)
self.bn = keras.layers.BatchNormalization()
def call(self, inputs):
x = self.dense(inputs)
x = self.dp(x)
x = self.bn(x)
return self.dense(x)
return ModelFn(SharedLayerSubclassModel(), (None, 3), (None, 3))
def functional_with_keyword_args():
"""A functional model with keyword args."""
inputs = keras.Input(shape=(3,))
x = keras.layers.Dense(4)(inputs)
x = keras.layers.BatchNormalization()(x)
outputs = keras.layers.Dense(2)(x)
model = keras.Model(inputs, outputs, name='m', trainable=False)
return ModelFn(model, (None, 3), (None, 2))
ALL_MODELS = [
('basic_sequential', basic_sequential),
('basic_sequential_deferred', basic_sequential_deferred),
('stacked_rnn', stacked_rnn),
('lstm', lstm),
('multi_input_multi_output', multi_input_multi_output),
('nested_sequential_in_functional', nested_sequential_in_functional),
('seq_to_seq', seq_to_seq),
('shared_layer_functional', shared_layer_functional),
('shared_sequential', shared_sequential),
('nested_subclassed_model', nested_subclassed_model),
('nested_subclassed_in_functional_model',
nested_subclassed_in_functional_model),
('nested_functional_in_subclassed_model',
nested_functional_in_subclassed_model),
('shared_layer_subclassed_model', shared_layer_subclassed_model),
('functional_with_keyword_args', functional_with_keyword_args)
]
def get_models(exclude_models=None):
"""Get all models excluding the specificed ones."""
models = [model for model in ALL_MODELS
if model[0] not in exclude_models]
return models
| """Basic sequential model."""
model = keras.Sequential([
keras.layers.Dense(3, activation='relu', input_shape=(3,)),
keras.layers.Dense(2, activation='softmax'),
])
return ModelFn(model, (None, 3), (None, 2)) |
network.rs | // Copyright (c) The Libra Core Contributors
// SPDX-License-Identifier: Apache-2.0
use crate::{
chained_bft::{
common::{Author, Payload},
consensus_types::{
block::Block,
proposal_msg::{ProposalMsg, ProposalUncheckedSignatures},
sync_info::SyncInfo,
timeout_msg::TimeoutMsg,
vote_msg::VoteMsg,
},
epoch_manager::EpochManager,
},
counters,
};
use bytes::Bytes;
use failure::{self, ResultExt};
use futures::{
channel::oneshot, stream::select, FutureExt, SinkExt, Stream, StreamExt, TryFutureExt,
TryStreamExt,
};
use solana_libra_channel;
use solana_libra_crypto::HashValue;
use solana_libra_logger::prelude::*;
use solana_libra_network::{
proto::{
BlockRetrievalStatus, ConsensusMsg, ConsensusMsg_oneof, Proposal, RequestBlock,
RespondBlock, SyncInfo as SyncInfoProto, TimeoutMsg as TimeoutMsgProto, Vote,
},
validator_network::{ConsensusNetworkEvents, ConsensusNetworkSender, Event, RpcError},
};
use solana_libra_prost_ext::MessageExt;
use solana_libra_types::account_address::AccountAddress;
use std::{
convert::TryFrom,
sync::Arc,
time::{Duration, Instant},
};
use tokio::runtime::TaskExecutor;
/// The response sent back from EventProcessor for the BlockRetrievalRequest.
#[derive(Debug)]
pub struct BlockRetrievalResponse<T> {
pub status: BlockRetrievalStatus,
pub blocks: Vec<Block<T>>,
}
impl<T: Payload> BlockRetrievalResponse<T> {
pub fn verify(&self, block_id: HashValue, num_blocks: u64) -> failure::Result<()> {
ensure!(
self.status != BlockRetrievalStatus::Succeeded
|| self.blocks.len() as u64 == num_blocks,
"not enough blocks returned, expect {}, get {}",
num_blocks,
self.blocks.len(),
);
self.blocks
.iter()
.try_fold(block_id, |expected_id, block| {
ensure!(
block.id() == expected_id,
"blocks doesn't form a chain: expect {}, get {}",
expected_id,
block.id()
);
Ok(block.parent_id())
})
.map(|_| ())
}
}
/// BlockRetrievalRequest carries a block id for the requested block as well as the
/// oneshot sender to deliver the response.
#[derive(Debug)]
pub struct BlockRetrievalRequest<T> {
pub block_id: HashValue,
pub num_blocks: u64,
pub response_sender: oneshot::Sender<BlockRetrievalResponse<T>>,
}
/// Just a convenience struct to keep all the network proxy receiving queues in one place.
/// Will be returned by the networking trait upon startup.
pub struct NetworkReceivers<T> {
pub proposals: solana_libra_channel::Receiver<ProposalMsg<T>>,
pub votes: solana_libra_channel::Receiver<VoteMsg>,
pub block_retrieval: solana_libra_channel::Receiver<BlockRetrievalRequest<T>>,
pub timeout_msgs: solana_libra_channel::Receiver<TimeoutMsg>,
pub sync_info_msgs: solana_libra_channel::Receiver<(SyncInfo, AccountAddress)>,
}
/// Implements the actual networking support for all consensus messaging.
pub struct ConsensusNetworkImpl {
author: Author,
network_sender: ConsensusNetworkSender,
network_events: Option<ConsensusNetworkEvents>,
// Self sender and self receivers provide a shortcut for sending the messages to itself.
// (self sending is not supported by the networking API).
// Note that we do not support self rpc requests as it might cause infinite recursive calls.
self_sender: solana_libra_channel::Sender<failure::Result<Event<ConsensusMsg>>>,
self_receiver: Option<solana_libra_channel::Receiver<failure::Result<Event<ConsensusMsg>>>>,
epoch_mgr: Arc<EpochManager>,
}
impl Clone for ConsensusNetworkImpl {
fn clone(&self) -> Self {
Self {
author: self.author,
network_sender: self.network_sender.clone(),
network_events: None,
self_sender: self.self_sender.clone(),
self_receiver: None,
epoch_mgr: Arc::clone(&self.epoch_mgr),
}
}
}
impl ConsensusNetworkImpl {
pub fn new(
author: Author,
network_sender: ConsensusNetworkSender,
network_events: ConsensusNetworkEvents,
epoch_mgr: Arc<EpochManager>,
) -> Self {
let (self_sender, self_receiver) =
solana_libra_channel::new(1_024, &counters::PENDING_SELF_MESSAGES);
ConsensusNetworkImpl {
author,
network_sender,
network_events: Some(network_events),
self_sender,
self_receiver: Some(self_receiver),
epoch_mgr,
}
}
/// Establishes the initial connections with the peers and returns the receivers.
pub fn start<T: Payload>(&mut self, executor: &TaskExecutor) -> NetworkReceivers<T> {
let (proposal_tx, proposal_rx) =
solana_libra_channel::new(1_024, &counters::PENDING_PROPOSAL);
let (vote_tx, vote_rx) = solana_libra_channel::new(1_024, &counters::PENDING_VOTES);
let (block_request_tx, block_request_rx) =
solana_libra_channel::new(1_024, &counters::PENDING_BLOCK_REQUESTS);
let (timeout_msg_tx, timeout_msg_rx) =
solana_libra_channel::new(1_024, &counters::PENDING_NEW_ROUND_MESSAGES);
let (sync_info_tx, sync_info_rx) =
solana_libra_channel::new(1_024, &counters::PENDING_SYNC_INFO_MSGS);
let network_events = self
.network_events
.take()
.expect("[consensus] Failed to start; network_events stream is already taken")
.map_err(Into::<failure::Error>::into);
let own_msgs = self
.self_receiver
.take()
.expect("[consensus]: self receiver is already taken");
let all_events = select(network_events, own_msgs);
executor.spawn(
NetworkTask {
proposal_tx,
vote_tx,
block_request_tx,
timeout_msg_tx,
sync_info_tx,
all_events,
epoch_mgr: Arc::clone(&self.epoch_mgr),
}
.run()
.boxed()
.unit_error()
.compat(),
);
NetworkReceivers {
proposals: proposal_rx,
votes: vote_rx,
block_retrieval: block_request_rx,
timeout_msgs: timeout_msg_rx,
sync_info_msgs: sync_info_rx,
}
}
/// Tries to retrieve num of blocks backwards starting from id from the given peer: the function
/// returns a future that is either fulfilled with BlockRetrievalResponse, or with a
/// BlockRetrievalFailure.
pub async fn request_block<T: Payload>(
&mut self,
block_id: HashValue,
num_blocks: u64,
from: Author,
timeout: Duration,
) -> failure::Result<BlockRetrievalResponse<T>> {
ensure!(from != self.author, "Retrieve block from self");
let mut req_msg = RequestBlock::default();
req_msg.block_id = block_id.to_vec();
req_msg.num_blocks = num_blocks;
counters::BLOCK_RETRIEVAL_COUNT.inc_by(num_blocks as i64);
let pre_retrieval_instant = Instant::now();
let res_block = self
.network_sender
.request_block(from, req_msg, timeout)
.await?;
let mut blocks = vec![];
let status = res_block.status();
for block in res_block.blocks.into_iter() {
match Block::try_from(block) {
Ok(block) => {
block
.validate_signatures(self.epoch_mgr.validators().as_ref())
.and_then(|_| block.verify_well_formed())
.with_context(|e| format_err!("Invalid block because of {:?}", e))?;
blocks.push(block);
}
Err(e) => bail!("Failed to deserialize block because of {:?}", e),
};
}
counters::BLOCK_RETRIEVAL_DURATION_S.observe_duration(pre_retrieval_instant.elapsed());
let response = BlockRetrievalResponse { status, blocks };
response.verify(block_id, num_blocks)?;
Ok(response)
}
/// Tries to send the given proposal (block and proposer metadata) to all the participants.
/// A validator on the receiving end is going to be notified about a new proposal in the
/// proposal queue.
///
/// The future is fulfilled as soon as the message put into the mpsc channel to network
/// internal(to provide back pressure), it does not indicate the message is delivered or sent
/// out. It does not give indication about when the message is delivered to the recipients,
/// as well as there is no indication about the network failures.
pub async fn broadcast_proposal<T: Payload>(&mut self, proposal: ProposalMsg<T>) {
let msg = ConsensusMsg {
message: Some(ConsensusMsg_oneof::Proposal(proposal.into())),
};
self.broadcast(msg).await
}
async fn broadcast(&mut self, msg: ConsensusMsg) {
for peer in self.epoch_mgr.validators().get_ordered_account_addresses() {
if self.author == peer {
let self_msg = Event::Message((self.author, msg.clone()));
if let Err(err) = self.self_sender.send(Ok(self_msg)).await {
error!("Error delivering a self proposal: {:?}", err);
}
continue;
}
if let Err(err) = self.network_sender.send_to(peer, msg.clone()).await {
error!(
"Error broadcasting proposal to peer: {:?}, error: {:?}, msg: {:?}",
peer, err, msg
);
}
}
}
/// Sends the vote to the chosen recipients (typically that would be the recipients that
/// we believe could serve as proposers in the next round). The recipients on the receiving
/// end are going to be notified about a new vote in the vote queue.
///
/// The future is fulfilled as soon as the message put into the mpsc channel to network
/// internal(to provide back pressure), it does not indicate the message is delivered or sent
/// out. It does not give indication about when the message is delivered to the recipients,
/// as well as there is no indication about the network failures.
pub async fn send_vote(&self, vote_msg: VoteMsg, recipients: Vec<Author>) {
let mut network_sender = self.network_sender.clone();
let mut self_sender = self.self_sender.clone();
let msg = ConsensusMsg {
message: Some(ConsensusMsg_oneof::Vote(vote_msg.into())),
};
for peer in recipients {
if self.author == peer {
let self_msg = Event::Message((self.author, msg.clone()));
if let Err(err) = self_sender.send(Ok(self_msg)).await {
error!("Error delivering a self vote: {:?}", err);
}
continue;
}
if let Err(e) = network_sender.send_to(peer, msg.clone()).await {
error!("Failed to send a vote to peer {:?}: {:?}", peer, e);
}
}
}
/// Broadcasts timeout message to all validators
pub async fn broadcast_timeout_msg(&mut self, timeout_msg: TimeoutMsg) {
let msg = ConsensusMsg {
message: Some(ConsensusMsg_oneof::TimeoutMsg(timeout_msg.into())),
};
self.broadcast(msg).await
}
/// Sends the given sync info to the given author.
/// The future is fulfilled as soon as the message is added to the internal network channel
/// (does not indicate whether the message is delivered or sent out).
pub async fn send_sync_info(&self, sync_info: SyncInfo, recipient: Author) {
if recipient == self.author {
error!("An attempt to deliver sync info msg to itself: ignore.");
return;
}
let msg = ConsensusMsg {
message: Some(ConsensusMsg_oneof::SyncInfo(sync_info.into())),
};
let mut network_sender = self.network_sender.clone();
if let Err(e) = network_sender.send_to(recipient, msg).await {
warn!(
"Failed to send a sync info msg to peer {:?}: {:?}",
recipient, e
);
}
}
}
struct NetworkTask<T, S> {
proposal_tx: solana_libra_channel::Sender<ProposalMsg<T>>,
vote_tx: solana_libra_channel::Sender<VoteMsg>,
block_request_tx: solana_libra_channel::Sender<BlockRetrievalRequest<T>>,
timeout_msg_tx: solana_libra_channel::Sender<TimeoutMsg>,
sync_info_tx: solana_libra_channel::Sender<(SyncInfo, AccountAddress)>,
all_events: S,
epoch_mgr: Arc<EpochManager>,
}
impl<T, S> NetworkTask<T, S>
where
S: Stream<Item = failure::Result<Event<ConsensusMsg>>> + Unpin,
T: Payload,
{
pub async fn run(mut self) {
use ConsensusMsg_oneof::*;
while let Some(Ok(message)) = self.all_events.next().await {
match message {
Event::Message((peer_id, msg)) => {
let msg = match msg.message {
Some(msg) => msg,
None => {
warn!("Unexpected msg from {}: {:?}", peer_id, msg);
continue;
}
};
let r = match msg.clone() {
Proposal(proposal) => self.process_proposal(proposal).await.map_err(|e| {
security_log(SecurityEvent::InvalidConsensusProposal)
.error(&e)
.data(&msg)
.log();
e
}),
Vote(vote) => self.process_vote(vote).await,
TimeoutMsg(timeout_msg) => self.process_timeout_msg(timeout_msg).await,
SyncInfo(sync_info) => self.process_sync_info(sync_info, peer_id).await,
_ => {
warn!("Unexpected msg from {}: {:?}", peer_id, msg);
continue;
}
};
if let Err(e) = r {
warn!("Failed to process msg {:?}", e)
}
}
Event::RpcRequest((peer_id, msg, callback)) => {
let r = match msg.message {
Some(RequestBlock(request)) => {
self.process_request_block(request, callback).await
}
_ => {
warn!("Unexpected RPC from {}: {:?}", peer_id, msg);
continue;
}
};
if let Err(e) = r {
warn!("Failed to process RPC {:?}", e)
}
}
Event::NewPeer(peer_id) => {
debug!("Peer {} connected", peer_id);
}
Event::LostPeer(peer_id) => {
debug!("Peer {} disconnected", peer_id);
}
}
}
}
async fn process_proposal(&mut self, proposal: Proposal) -> failure::Result<()> {
let proposal = ProposalUncheckedSignatures::<T>::try_from(proposal)?;
let proposal = proposal
.validate_signatures(self.epoch_mgr.validators().as_ref())?
.verify_well_formed()?;
debug!("Received proposal {}", proposal);
self.proposal_tx.try_send(proposal)?;
Ok(())
}
async fn process_vote(&mut self, vote: Vote) -> failure::Result<()> |
async fn process_timeout_msg(&mut self, timeout_msg: TimeoutMsgProto) -> failure::Result<()> {
let timeout_msg = TimeoutMsg::try_from(timeout_msg)?;
timeout_msg
.verify(self.epoch_mgr.validators().as_ref())
.map_err(|e| {
security_log(SecurityEvent::InvalidConsensusRound)
.error(&e)
.data(&timeout_msg)
.log();
e
})?;
self.timeout_msg_tx.try_send(timeout_msg)?;
Ok(())
}
async fn process_sync_info(
&mut self,
sync_info: SyncInfoProto,
peer: AccountAddress,
) -> failure::Result<()> {
let sync_info = SyncInfo::try_from(sync_info)?;
sync_info
.verify(self.epoch_mgr.validators().as_ref())
.map_err(|e| {
security_log(SecurityEvent::InvalidSyncInfoMsg)
.error(&e)
.data(&sync_info)
.log();
e
})?;
self.sync_info_tx.try_send((sync_info, peer))?;
Ok(())
}
async fn process_request_block(
&mut self,
request: RequestBlock,
callback: oneshot::Sender<Result<Bytes, RpcError>>,
) -> failure::Result<()> {
let block_id = HashValue::from_slice(&request.block_id[..])?;
let num_blocks = request.num_blocks;
debug!(
"Received request_block RPC for {} blocks from {:?}",
num_blocks, block_id
);
let (tx, rx) = oneshot::channel();
let request = BlockRetrievalRequest {
block_id,
num_blocks,
response_sender: tx,
};
self.block_request_tx.try_send(request)?;
let BlockRetrievalResponse { status, blocks } = rx.await?;
let mut response = RespondBlock::default();
response.set_status(status);
response.blocks = blocks.into_iter().map(Into::into).collect();
let response_msg = ConsensusMsg {
message: Some(ConsensusMsg_oneof::RespondBlock(response)),
};
let response_data = response_msg.to_bytes()?;
callback
.send(Ok(response_data))
.map_err(|_| format_err!("handling inbound rpc call timed out"))
}
}
| {
let vote = VoteMsg::try_from(vote)?;
debug!("Received {}", vote);
vote.verify(self.epoch_mgr.validators().as_ref())
.map_err(|e| {
security_log(SecurityEvent::InvalidConsensusVote)
.error(&e)
.data(&vote)
.log();
e
})?;
self.vote_tx.try_send(vote)?;
Ok(())
} |
worker_host.rs | // Copyright 2018-2021 the Deno authors. All rights reserved. MIT license.
use crate::ops::TestingFeaturesEnabled;
use crate::permissions::resolve_read_allowlist;
use crate::permissions::resolve_write_allowlist;
use crate::permissions::EnvDescriptor;
use crate::permissions::FfiDescriptor;
use crate::permissions::NetDescriptor;
use crate::permissions::PermissionState;
use crate::permissions::Permissions;
use crate::permissions::ReadDescriptor;
use crate::permissions::RunDescriptor;
use crate::permissions::UnaryPermission;
use crate::permissions::UnitPermission;
use crate::permissions::WriteDescriptor;
use crate::web_worker::run_web_worker;
use crate::web_worker::SendableWebWorkerHandle;
use crate::web_worker::WebWorker;
use crate::web_worker::WebWorkerHandle;
use crate::web_worker::WebWorkerType;
use crate::web_worker::WorkerControlEvent;
use crate::web_worker::WorkerId;
use deno_core::error::custom_error;
use deno_core::error::AnyError;
use deno_core::op_async;
use deno_core::op_sync;
use deno_core::serde::de;
use deno_core::serde::de::SeqAccess;
use deno_core::serde::Deserialize;
use deno_core::serde::Deserializer;
use deno_core::Extension;
use deno_core::ModuleSpecifier;
use deno_core::OpState;
use deno_web::JsMessageData;
use log::debug;
use std::cell::RefCell;
use std::collections::HashMap;
use std::collections::HashSet;
use std::convert::From;
use std::fmt;
use std::path::PathBuf;
use std::rc::Rc;
use std::sync::Arc;
use std::thread::JoinHandle;
pub struct CreateWebWorkerArgs {
pub name: String,
pub worker_id: WorkerId,
pub parent_permissions: Permissions,
pub permissions: Permissions,
pub main_module: ModuleSpecifier,
pub use_deno_namespace: bool,
pub worker_type: WebWorkerType,
}
pub type CreateWebWorkerCb = dyn Fn(CreateWebWorkerArgs) -> (WebWorker, SendableWebWorkerHandle)
+ Sync
+ Send;
/// A holder for callback that is used to create a new
/// WebWorker. It's a struct instead of a type alias
/// because `GothamState` used in `OpState` overrides
/// value if type alises have the same underlying type
#[derive(Clone)]
pub struct CreateWebWorkerCbHolder(Arc<CreateWebWorkerCb>);
pub struct WorkerThread {
// It's an Option so we can take the value before dropping the WorkerThread.
join_handle: Option<JoinHandle<Result<(), AnyError>>>,
worker_handle: WebWorkerHandle,
// A WorkerThread that hasn't been explicitly terminated can only be removed
// from the WorkersTable once close messages have been received for both the
// control and message channels. See `close_channel`.
ctrl_closed: bool,
message_closed: bool,
}
impl WorkerThread {
fn terminate(mut self) {
self.worker_handle.clone().terminate();
self
.join_handle
.take()
.unwrap()
.join()
.expect("Worker thread panicked")
.expect("Panic in worker event loop");
// Optimization so the Drop impl doesn't try to terminate the worker handle
// again.
self.ctrl_closed = true;
self.message_closed = true;
}
}
impl Drop for WorkerThread {
fn drop(&mut self) {
// If either of the channels is closed, the worker thread has at least
// started closing, and its event loop won't start another run.
if !(self.ctrl_closed || self.message_closed) {
self.worker_handle.clone().terminate();
}
}
}
pub type WorkersTable = HashMap<WorkerId, WorkerThread>;
pub fn init(create_web_worker_cb: Arc<CreateWebWorkerCb>) -> Extension {
Extension::builder()
.state(move |state| {
state.put::<WorkersTable>(WorkersTable::default());
state.put::<WorkerId>(WorkerId::default());
let create_module_loader =
CreateWebWorkerCbHolder(create_web_worker_cb.clone());
state.put::<CreateWebWorkerCbHolder>(create_module_loader);
Ok(())
})
.ops(vec![
("op_create_worker", op_sync(op_create_worker)),
(
"op_host_terminate_worker",
op_sync(op_host_terminate_worker),
),
("op_host_post_message", op_sync(op_host_post_message)),
("op_host_recv_ctrl", op_async(op_host_recv_ctrl)),
("op_host_recv_message", op_async(op_host_recv_message)),
])
.build()
}
fn merge_boolean_permission(
mut main: UnitPermission,
worker: Option<PermissionState>,
) -> Result<UnitPermission, AnyError> {
if let Some(worker) = worker {
if worker < main.state {
return Err(custom_error(
"PermissionDenied",
"Can't escalate parent thread permissions",
));
} else {
main.state = worker;
}
}
Ok(main)
}
fn merge_net_permission(
mut main: UnaryPermission<NetDescriptor>,
worker: Option<UnaryPermission<NetDescriptor>>,
) -> Result<UnaryPermission<NetDescriptor>, AnyError> {
if let Some(worker) = worker {
if (worker.global_state < main.global_state)
|| !worker
.granted_list
.iter()
.all(|x| main.check(&(&x.0, x.1)).is_ok())
{
return Err(custom_error(
"PermissionDenied",
"Can't escalate parent thread permissions",
));
} else {
main.global_state = worker.global_state;
main.granted_list = worker.granted_list;
}
}
Ok(main)
}
fn merge_read_permission(
mut main: UnaryPermission<ReadDescriptor>,
worker: Option<UnaryPermission<ReadDescriptor>>,
) -> Result<UnaryPermission<ReadDescriptor>, AnyError> {
if let Some(worker) = worker {
if (worker.global_state < main.global_state)
|| !worker
.granted_list
.iter()
.all(|x| main.check(x.0.as_path()).is_ok())
{
return Err(custom_error(
"PermissionDenied",
"Can't escalate parent thread permissions",
));
} else {
main.global_state = worker.global_state;
main.granted_list = worker.granted_list;
}
}
Ok(main)
}
fn merge_write_permission(
mut main: UnaryPermission<WriteDescriptor>,
worker: Option<UnaryPermission<WriteDescriptor>>,
) -> Result<UnaryPermission<WriteDescriptor>, AnyError> |
fn merge_env_permission(
mut main: UnaryPermission<EnvDescriptor>,
worker: Option<UnaryPermission<EnvDescriptor>>,
) -> Result<UnaryPermission<EnvDescriptor>, AnyError> {
if let Some(worker) = worker {
if (worker.global_state < main.global_state)
|| !worker.granted_list.iter().all(|x| main.check(&x.0).is_ok())
{
return Err(custom_error(
"PermissionDenied",
"Can't escalate parent thread permissions",
));
} else {
main.global_state = worker.global_state;
main.granted_list = worker.granted_list;
}
}
Ok(main)
}
fn merge_run_permission(
mut main: UnaryPermission<RunDescriptor>,
worker: Option<UnaryPermission<RunDescriptor>>,
) -> Result<UnaryPermission<RunDescriptor>, AnyError> {
if let Some(worker) = worker {
if (worker.global_state < main.global_state)
|| !worker.granted_list.iter().all(|x| main.check(&x.0).is_ok())
{
return Err(custom_error(
"PermissionDenied",
"Can't escalate parent thread permissions",
));
} else {
main.global_state = worker.global_state;
main.granted_list = worker.granted_list;
}
}
Ok(main)
}
fn merge_ffi_permission(
mut main: UnaryPermission<FfiDescriptor>,
worker: Option<UnaryPermission<FfiDescriptor>>,
) -> Result<UnaryPermission<FfiDescriptor>, AnyError> {
if let Some(worker) = worker {
if (worker.global_state < main.global_state)
|| !worker.granted_list.iter().all(|x| main.check(&x.0).is_ok())
{
return Err(custom_error(
"PermissionDenied",
"Can't escalate parent thread permissions",
));
} else {
main.global_state = worker.global_state;
main.granted_list = worker.granted_list;
}
}
Ok(main)
}
pub fn create_worker_permissions(
main_perms: Permissions,
worker_perms: PermissionsArg,
) -> Result<Permissions, AnyError> {
Ok(Permissions {
env: merge_env_permission(main_perms.env, worker_perms.env)?,
hrtime: merge_boolean_permission(main_perms.hrtime, worker_perms.hrtime)?,
net: merge_net_permission(main_perms.net, worker_perms.net)?,
ffi: merge_ffi_permission(main_perms.ffi, worker_perms.ffi)?,
read: merge_read_permission(main_perms.read, worker_perms.read)?,
run: merge_run_permission(main_perms.run, worker_perms.run)?,
write: merge_write_permission(main_perms.write, worker_perms.write)?,
})
}
#[derive(Debug, Deserialize)]
pub struct PermissionsArg {
#[serde(default, deserialize_with = "as_unary_env_permission")]
env: Option<UnaryPermission<EnvDescriptor>>,
#[serde(default, deserialize_with = "as_permission_state")]
hrtime: Option<PermissionState>,
#[serde(default, deserialize_with = "as_unary_net_permission")]
net: Option<UnaryPermission<NetDescriptor>>,
#[serde(default, deserialize_with = "as_unary_ffi_permission")]
ffi: Option<UnaryPermission<FfiDescriptor>>,
#[serde(default, deserialize_with = "as_unary_read_permission")]
read: Option<UnaryPermission<ReadDescriptor>>,
#[serde(default, deserialize_with = "as_unary_run_permission")]
run: Option<UnaryPermission<RunDescriptor>>,
#[serde(default, deserialize_with = "as_unary_write_permission")]
write: Option<UnaryPermission<WriteDescriptor>>,
}
fn as_permission_state<'de, D>(
deserializer: D,
) -> Result<Option<PermissionState>, D::Error>
where
D: Deserializer<'de>,
{
let value: bool = Deserialize::deserialize(deserializer)?;
match value {
true => Ok(Some(PermissionState::Granted)),
false => Ok(Some(PermissionState::Denied)),
}
}
struct UnaryPermissionBase {
global_state: PermissionState,
paths: Vec<String>,
}
struct ParseBooleanOrStringVec;
impl<'de> de::Visitor<'de> for ParseBooleanOrStringVec {
type Value = UnaryPermissionBase;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("a vector of strings or a boolean")
}
// visit_unit maps undefined/missing values to false
fn visit_unit<E>(self) -> Result<UnaryPermissionBase, E>
where
E: de::Error,
{
self.visit_bool(false)
}
fn visit_bool<E>(self, v: bool) -> Result<UnaryPermissionBase, E>
where
E: de::Error,
{
Ok(UnaryPermissionBase {
global_state: match v {
true => PermissionState::Granted,
false => PermissionState::Denied,
},
paths: Vec::new(),
})
}
fn visit_seq<V>(self, mut visitor: V) -> Result<UnaryPermissionBase, V::Error>
where
V: SeqAccess<'de>,
{
let mut vec: Vec<String> = Vec::new();
let mut value = visitor.next_element::<String>()?;
while value.is_some() {
vec.push(value.unwrap());
value = visitor.next_element()?;
}
Ok(UnaryPermissionBase {
global_state: PermissionState::Prompt,
paths: vec,
})
}
}
fn as_unary_net_permission<'de, D>(
deserializer: D,
) -> Result<Option<UnaryPermission<NetDescriptor>>, D::Error>
where
D: Deserializer<'de>,
{
let value: UnaryPermissionBase =
deserializer.deserialize_any(ParseBooleanOrStringVec)?;
let allowed: HashSet<NetDescriptor> = value
.paths
.into_iter()
.map(NetDescriptor::from_string)
.collect();
Ok(Some(UnaryPermission::<NetDescriptor> {
global_state: value.global_state,
granted_list: allowed,
..Default::default()
}))
}
fn as_unary_read_permission<'de, D>(
deserializer: D,
) -> Result<Option<UnaryPermission<ReadDescriptor>>, D::Error>
where
D: Deserializer<'de>,
{
let value: UnaryPermissionBase =
deserializer.deserialize_any(ParseBooleanOrStringVec)?;
let paths: Vec<PathBuf> =
value.paths.into_iter().map(PathBuf::from).collect();
Ok(Some(UnaryPermission::<ReadDescriptor> {
global_state: value.global_state,
granted_list: resolve_read_allowlist(&Some(paths)),
..Default::default()
}))
}
fn as_unary_write_permission<'de, D>(
deserializer: D,
) -> Result<Option<UnaryPermission<WriteDescriptor>>, D::Error>
where
D: Deserializer<'de>,
{
let value: UnaryPermissionBase =
deserializer.deserialize_any(ParseBooleanOrStringVec)?;
let paths: Vec<PathBuf> =
value.paths.into_iter().map(PathBuf::from).collect();
Ok(Some(UnaryPermission::<WriteDescriptor> {
global_state: value.global_state,
granted_list: resolve_write_allowlist(&Some(paths)),
..Default::default()
}))
}
fn as_unary_env_permission<'de, D>(
deserializer: D,
) -> Result<Option<UnaryPermission<EnvDescriptor>>, D::Error>
where
D: Deserializer<'de>,
{
let value: UnaryPermissionBase =
deserializer.deserialize_any(ParseBooleanOrStringVec)?;
Ok(Some(UnaryPermission::<EnvDescriptor> {
global_state: value.global_state,
granted_list: value
.paths
.into_iter()
.map(|env| EnvDescriptor(env.to_uppercase()))
.collect(),
..Default::default()
}))
}
fn as_unary_run_permission<'de, D>(
deserializer: D,
) -> Result<Option<UnaryPermission<RunDescriptor>>, D::Error>
where
D: Deserializer<'de>,
{
let value: UnaryPermissionBase =
deserializer.deserialize_any(ParseBooleanOrStringVec)?;
Ok(Some(UnaryPermission::<RunDescriptor> {
global_state: value.global_state,
granted_list: value.paths.into_iter().map(RunDescriptor).collect(),
..Default::default()
}))
}
fn as_unary_ffi_permission<'de, D>(
deserializer: D,
) -> Result<Option<UnaryPermission<FfiDescriptor>>, D::Error>
where
D: Deserializer<'de>,
{
let value: UnaryPermissionBase =
deserializer.deserialize_any(ParseBooleanOrStringVec)?;
Ok(Some(UnaryPermission::<FfiDescriptor> {
global_state: value.global_state,
granted_list: value.paths.into_iter().map(FfiDescriptor).collect(),
..Default::default()
}))
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct CreateWorkerArgs {
has_source_code: bool,
name: Option<String>,
permissions: Option<PermissionsArg>,
source_code: String,
specifier: String,
use_deno_namespace: bool,
worker_type: WebWorkerType,
}
/// Create worker as the host
fn op_create_worker(
state: &mut OpState,
args: CreateWorkerArgs,
_: (),
) -> Result<WorkerId, AnyError> {
let specifier = args.specifier.clone();
let maybe_source_code = if args.has_source_code {
Some(args.source_code.clone())
} else {
None
};
let args_name = args.name;
let use_deno_namespace = args.use_deno_namespace;
if use_deno_namespace {
super::check_unstable(state, "Worker.deno.namespace");
}
let worker_type = args.worker_type;
if let WebWorkerType::Classic = worker_type {
if let TestingFeaturesEnabled(false) = state.borrow() {
return Err(
deno_webstorage::DomExceptionNotSupportedError::new(
"Classic workers are not supported.",
)
.into(),
);
}
}
let parent_permissions = state.borrow::<Permissions>().clone();
let worker_permissions = if let Some(permissions) = args.permissions {
super::check_unstable(state, "Worker.deno.permissions");
create_worker_permissions(parent_permissions.clone(), permissions)?
} else {
parent_permissions.clone()
};
let worker_id = state.take::<WorkerId>();
let create_module_loader = state.take::<CreateWebWorkerCbHolder>();
state.put::<CreateWebWorkerCbHolder>(create_module_loader.clone());
state.put::<WorkerId>(worker_id.next().unwrap());
let module_specifier = deno_core::resolve_url(&specifier)?;
let worker_name = args_name.unwrap_or_else(|| "".to_string());
let (handle_sender, handle_receiver) = std::sync::mpsc::sync_channel::<
Result<SendableWebWorkerHandle, AnyError>,
>(1);
// Setup new thread
let thread_builder =
std::thread::Builder::new().name(format!("{}", worker_id));
// Spawn it
let join_handle = thread_builder.spawn(move || {
// Any error inside this block is terminal:
// - JS worker is useless - meaning it throws an exception and can't do anything else,
// all action done upon it should be noops
// - newly spawned thread exits
let (worker, external_handle) =
(create_module_loader.0)(CreateWebWorkerArgs {
name: worker_name,
worker_id,
parent_permissions,
permissions: worker_permissions,
main_module: module_specifier.clone(),
use_deno_namespace,
worker_type,
});
// Send thread safe handle from newly created worker to host thread
handle_sender.send(Ok(external_handle)).unwrap();
drop(handle_sender);
// At this point the only method of communication with host
// is using `worker.internal_channels`.
//
// Host can already push messages and interact with worker.
run_web_worker(worker, module_specifier, maybe_source_code)
})?;
// Receive WebWorkerHandle from newly created worker
let worker_handle = handle_receiver.recv().unwrap()?;
let worker_thread = WorkerThread {
join_handle: Some(join_handle),
worker_handle: worker_handle.into(),
ctrl_closed: false,
message_closed: false,
};
// At this point all interactions with worker happen using thread
// safe handler returned from previous function calls
state
.borrow_mut::<WorkersTable>()
.insert(worker_id, worker_thread);
Ok(worker_id)
}
fn op_host_terminate_worker(
state: &mut OpState,
id: WorkerId,
_: (),
) -> Result<(), AnyError> {
if let Some(worker_thread) = state.borrow_mut::<WorkersTable>().remove(&id) {
worker_thread.terminate();
} else {
debug!("tried to terminate non-existent worker {}", id);
}
Ok(())
}
enum WorkerChannel {
Ctrl,
Messages,
}
/// Close a worker's channel. If this results in both of a worker's channels
/// being closed, the worker will be removed from the workers table.
fn close_channel(
state: Rc<RefCell<OpState>>,
id: WorkerId,
channel: WorkerChannel,
) {
use std::collections::hash_map::Entry;
let mut s = state.borrow_mut();
let workers = s.borrow_mut::<WorkersTable>();
// `Worker.terminate()` might have been called already, meaning that we won't
// find the worker in the table - in that case ignore.
if let Entry::Occupied(mut entry) = workers.entry(id) {
let terminate = {
let worker_thread = entry.get_mut();
match channel {
WorkerChannel::Ctrl => {
worker_thread.ctrl_closed = true;
worker_thread.message_closed
}
WorkerChannel::Messages => {
worker_thread.message_closed = true;
worker_thread.ctrl_closed
}
}
};
if terminate {
entry.remove().terminate();
}
}
}
/// Get control event from guest worker as host
async fn op_host_recv_ctrl(
state: Rc<RefCell<OpState>>,
id: WorkerId,
_: (),
) -> Result<WorkerControlEvent, AnyError> {
let worker_handle = {
let state = state.borrow();
let workers_table = state.borrow::<WorkersTable>();
let maybe_handle = workers_table.get(&id);
if let Some(handle) = maybe_handle {
handle.worker_handle.clone()
} else {
// If handle was not found it means worker has already shutdown
return Ok(WorkerControlEvent::Close);
}
};
let maybe_event = worker_handle.get_control_event().await?;
if let Some(event) = maybe_event {
// Terminal error means that worker should be removed from worker table.
if let WorkerControlEvent::TerminalError(_) = &event {
close_channel(state, id, WorkerChannel::Ctrl);
}
return Ok(event);
}
// If there was no event from worker it means it has already been closed.
close_channel(state, id, WorkerChannel::Ctrl);
Ok(WorkerControlEvent::Close)
}
async fn op_host_recv_message(
state: Rc<RefCell<OpState>>,
id: WorkerId,
_: (),
) -> Result<Option<JsMessageData>, AnyError> {
let worker_handle = {
let s = state.borrow();
let workers_table = s.borrow::<WorkersTable>();
let maybe_handle = workers_table.get(&id);
if let Some(handle) = maybe_handle {
handle.worker_handle.clone()
} else {
// If handle was not found it means worker has already shutdown
return Ok(None);
}
};
let ret = worker_handle.port.recv(state.clone()).await?;
if ret.is_none() {
close_channel(state, id, WorkerChannel::Messages);
}
Ok(ret)
}
/// Post message to guest worker as host
fn op_host_post_message(
state: &mut OpState,
id: WorkerId,
data: JsMessageData,
) -> Result<(), AnyError> {
if let Some(worker_thread) = state.borrow::<WorkersTable>().get(&id) {
debug!("post message to worker {}", id);
let worker_handle = worker_thread.worker_handle.clone();
worker_handle.port.send(state, data)?;
} else {
debug!("tried to post message to non-existent worker {}", id);
}
Ok(())
}
| {
if let Some(worker) = worker {
if (worker.global_state < main.global_state)
|| !worker
.granted_list
.iter()
.all(|x| main.check(x.0.as_path()).is_ok())
{
return Err(custom_error(
"PermissionDenied",
"Can't escalate parent thread permissions",
));
} else {
main.global_state = worker.global_state;
main.granted_list = worker.granted_list;
}
}
Ok(main)
} |
capabilities.rs | #[derive(Debug, Deserialize)]
pub struct | {
service: Service
}
#[derive(Debug, Deserialize)]
struct Service {
name: String,
title: String
}
| Capabilities |
log.go | package logger
import (
"encoding/json"
"fmt"
"runtime"
"strings"
"github.com/mshogin/randomtrader/pkg/bidcontext"
)
var debugDisabled = true
// EnableDebug ...
func EnableDebug() func() {
debugDisabled = false
return DisableDebug
}
// DisableDebug ...
func DisableDebug() {
debugDisabled = true
}
// Debugf ...
func Debugf(format string, args ...interface{}) {
if debugDisabled {
return
}
_, fileName, fileLine, ok := runtime.Caller(1)
var s string
if ok | else {
s = ""
}
fmt.Printf("DEBUG: "+s+" "+format+"\n", args...)
}
// Errorf ...
func Errorf(format string, args ...interface{}) {
fmt.Printf("ERROR: "+format+"\n", args...)
}
// Fatalf ...
func Fatalf(format string, args ...interface{}) {
panic(fmt.Errorf("FATAL: "+format+"\n", args...))
}
// Infof ...
func Infof(format string, args ...interface{}) {
fmt.Printf("INFO: "+format+"\n", args...)
}
// ProcessContext ...
func ProcessContext(ctx *bidcontext.BidContext) error {
_, err := json.MarshalIndent(ctx, "", " ")
if err != nil {
err = fmt.Errorf("cannot dump context: %w", err)
Errorf(err.Error())
return err
}
// Infof(string(buf))
return nil
}
| {
s = fmt.Sprintf("%s:%d:", fileName, fileLine)
p := strings.Split(s, "/")
s = strings.Join(p[len(p)-2:], "/")
} |
BitcoinLedgerProvider.d.ts | /// <reference types="node" />
import { LedgerProvider } from '@liquality/ledger-provider';
import { BitcoinNetwork } from '@liquality/bitcoin-networks';
import { bitcoin } from '@liquality/types';
import HwAppBitcoin from '@ledgerhq/hw-app-btc';
import { BIP32Interface } from 'bip32';
declare type WalletProviderConstructor<T = LedgerProvider<HwAppBitcoin>> = new (...args: any[]) => T;
interface BitcoinLedgerProviderOptions {
network: BitcoinNetwork;
Transport: any;
baseDerivationPath: string;
addressType?: bitcoin.AddressType;
}
declare const BitcoinLedgerProvider_base: (abstract new (...args: any[]) => {
_baseDerivationPath: string;
_network: BitcoinNetwork;
_addressType: bitcoin.AddressType;
_derivationCache: {
[index: string]: import("@liquality/types").Address;
};
baseDerivationNode(): Promise<BIP32Interface>;
_buildTransaction(targets: bitcoin.OutputTarget[], feePerByte?: number, fixedInputs?: bitcoin.Input[]): Promise<{
hex: string;
fee: number;
}>;
_buildSweepTransaction(externalChangeAddress: string, feePerByte?: number): Promise<{
hex: string;
fee: number;
}>;
signPSBT(data: string, inputs: bitcoin.PsbtInputTarget[]): Promise<string>;
signBatchP2SHTransaction(inputs: [{
inputTxHex: string;
index: number;
vout: any;
outputScript: Buffer;
}], addresses: string, tx: any, lockTime?: number, segwit?: boolean): Promise<Buffer[]>;
getDerivationCache(): {
[index: string]: import("@liquality/types").Address;
};
sendOptionsToOutputs(transactions: import("@liquality/types").SendOptions[]): bitcoin.OutputTarget[];
setDerivationCache(derivationCache: {
[index: string]: import("@liquality/types").Address;
}): Promise<void>;
buildTransaction(output: bitcoin.OutputTarget, feePerByte: number): Promise<{
hex: string;
fee: number;
}>;
buildBatchTransaction(outputs: bitcoin.OutputTarget[]): Promise<{
hex: string;
fee: number;
}>;
_sendTransaction(transactions: bitcoin.OutputTarget[], feePerByte?: number): Promise<import("@liquality/types").Transaction<bitcoin.Transaction>>;
sendTransaction(options: import("@liquality/types").SendOptions): Promise<import("@liquality/types").Transaction<bitcoin.Transaction>>;
sendBatchTransaction(transactions: import("@liquality/types").SendOptions[]): Promise<import("@liquality/types").Transaction<bitcoin.Transaction>>;
buildSweepTransaction(externalChangeAddress: string, feePerByte: number): Promise<{
hex: string;
fee: number;
}>;
sendSweepTransaction(externalChangeAddress: string | import("@liquality/types").Address, feePerByte: number): Promise<import("@liquality/types").Transaction<bitcoin.Transaction>>;
updateTransactionFee(tx: string | import("@liquality/types").Transaction<bitcoin.Transaction>, newFeePerByte: number): Promise<import("@liquality/types").Transaction<bitcoin.Transaction>>;
findAddress(addresses: string[], change?: boolean): Promise<import("@liquality/types").Address>;
getWalletAddress(address: string): Promise<import("@liquality/types").Address>;
getAddressFromPublicKey(publicKey: Buffer): string;
getPaymentVariantFromPublicKey(publicKey: Buffer): import("bitcoinjs-lib").Payment;
importAddresses(): Promise<void>;
getDerivationPathAddress(path: string): Promise<import("@liquality/types").Address>;
getAddresses(startingIndex?: number, numAddresses?: number, change?: boolean): Promise<import("@liquality/types").Address[]>;
_getUsedUnusedAddresses(numAddressPerCall: number, addressType: import("@liquality/bitcoin-wallet-provider/dist/lib/BitcoinWalletProvider").AddressSearchType): Promise<{
usedAddresses: import("@liquality/types").Address[];
unusedAddress: {
change: import("@liquality/types").Address;
external: import("@liquality/types").Address;
};
}>;
getUsedAddresses(numAddressPerCall?: number): Promise<import("@liquality/types").Address[]>;
getUnusedAddress(change?: boolean, numAddressPerCall?: number): Promise<import("@liquality/types").Address>;
withCachedUtxos(func: () => any): Promise<any>;
getTotalFee(opts: import("@liquality/types").SendOptions, max: boolean): Promise<number>;
getTotalFees(transactions: import("@liquality/types").SendOptions[], max: boolean): Promise<any>;
getInputsForAmount(_targets: bitcoin.OutputTarget[], feePerByte?: number, fixedInputs?: bitcoin.Input[], numAddressPerCall?: number, sweep?: boolean): Promise<{
inputs: bitcoin.UTXO[];
change: import("@liquality/bitcoin-utils").CoinSelectTarget;
outputs: import("@liquality/bitcoin-utils").CoinSelectTarget[];
fee: number;
}>;
client: import("@liquality/types").IClient;
setClient(client: import("@liquality/types").IClient): void;
getMethod(method: string, requestor?: any): any;
}) & WalletProviderConstructor<LedgerProvider<HwAppBitcoin>>;
export default class | extends BitcoinLedgerProvider_base {
_walletPublicKeyCache: {
[index: string]: any;
};
_baseDerivationNode: BIP32Interface;
constructor(options: BitcoinLedgerProviderOptions);
signMessage(message: string, from: string): Promise<string>;
_buildTransaction(targets: bitcoin.OutputTarget[], feePerByte?: number, fixedInputs?: bitcoin.Input[]): Promise<{
hex: string;
fee: number;
}>;
signPSBT(data: string, inputs: bitcoin.PsbtInputTarget[]): Promise<string>;
signBatchP2SHTransaction(inputs: [{
inputTxHex: string;
index: number;
vout: any;
outputScript: Buffer;
}], addresses: string, tx: any, lockTime?: number, segwit?: boolean): Promise<Buffer[]>;
getAmountBuffer(amount: number): Buffer;
getLedgerInputs(unspentOutputs: {
txid: string;
vout: number;
}[]): Promise<(number | HwAppBitcoin.Transaction)[][]>;
_getWalletPublicKey(path: string): Promise<{
publicKey: string;
bitcoinAddress: string;
chainCode: string;
}>;
getWalletPublicKey(path: string): Promise<any>;
baseDerivationNode(): Promise<BIP32Interface>;
getConnectedNetwork(): Promise<BitcoinNetwork>;
}
export {};
| BitcoinLedgerProvider |
f_beta.py | # Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Any, Optional
import torch
from torch import Tensor
from torchmetrics.functional.classification.f_beta import _fbeta_compute, _fbeta_update
from torchmetrics.metric import Metric
from torchmetrics.utilities import rank_zero_warn
class FBeta(Metric):
r"""
Computes `F-score <https://en.wikipedia.org/wiki/F-score>`_, specifically:
.. math::
F_\beta = (1 + \beta^2) * \frac{\text{precision} * \text{recall}}
{(\beta^2 * \text{precision}) + \text{recall}}
Where :math:`\beta` is some positive real factor. Works with binary, multiclass, and multilabel data.
Accepts probabilities from a model output or integer class values in prediction.
Works with multi-dimensional preds and target.
Forward accepts
- ``preds`` (float or long tensor): ``(N, ...)`` or ``(N, C, ...)`` where C is the number of classes
- ``target`` (long tensor): ``(N, ...)``
If preds and target are the same shape and preds is a float tensor, we use the ``self.threshold`` argument
to convert into integer labels. This is the case for binary and multi-label probabilities.
If preds has an extra dimension as in the case of multi-class scores we perform an argmax on ``dim=1``.
Args:
num_classes: Number of classes in the dataset.
beta: Beta coefficient in the F measure.
threshold:
Threshold value for binary or multi-label probabilities. default: 0.5
average:
- ``'micro'`` computes metric globally
- ``'macro'`` computes metric for each class and uniformly averages them
- ``'weighted'`` computes metric for each class and does a weighted-average,
where each class is weighted by their support (accounts for class imbalance)
- ``'none'`` or ``None`` computes and returns the metric per class
multilabel: If predictions are from multilabel classification.
compute_on_step:
Forward only calls ``update()`` and return None if this is set to False. default: True
dist_sync_on_step:
Synchronize metric state across processes at each ``forward()``
before returning the value at the step. default: False
process_group:
Specify the process group on which synchronization is called. default: None (which selects the entire world)
Example:
>>> from torchmetrics import FBeta
>>> target = torch.tensor([0, 1, 2, 0, 1, 2])
>>> preds = torch.tensor([0, 2, 1, 0, 0, 1])
>>> f_beta = FBeta(num_classes=3, beta=0.5)
>>> f_beta(preds, target)
tensor(0.3333)
"""
def __init__(
self,
num_classes: int,
beta: float = 1.0,
threshold: float = 0.5,
average: str = "micro",
multilabel: bool = False,
compute_on_step: bool = True,
dist_sync_on_step: bool = False,
process_group: Optional[Any] = None,
):
super().__init__(
compute_on_step=compute_on_step,
dist_sync_on_step=dist_sync_on_step,
process_group=process_group,
)
self.num_classes = num_classes
self.beta = beta
self.threshold = threshold
self.average = average
self.multilabel = multilabel
allowed_average = ("micro", "macro", "weighted", "none", None)
if self.average not in allowed_average:
raise ValueError(
'Argument `average` expected to be one of the following:'
f' {allowed_average} but got {self.average}'
)
self.add_state("true_positives", default=torch.zeros(num_classes), dist_reduce_fx="sum")
self.add_state("predicted_positives", default=torch.zeros(num_classes), dist_reduce_fx="sum")
self.add_state("actual_positives", default=torch.zeros(num_classes), dist_reduce_fx="sum")
def update(self, preds: Tensor, target: Tensor):
"""
Update state with predictions and targets.
Args:
preds: Predictions from model
target: Ground truth values
"""
true_positives, predicted_positives, actual_positives = _fbeta_update(
preds, target, self.num_classes, self.threshold, self.multilabel
)
self.true_positives += true_positives
self.predicted_positives += predicted_positives
self.actual_positives += actual_positives
def compute(self) -> Tensor:
"""
Computes fbeta over state.
"""
return _fbeta_compute(
self.true_positives, self.predicted_positives, self.actual_positives, self.beta, self.average
)
class F1(FBeta):
| """
Computes F1 metric. F1 metrics correspond to a harmonic mean of the
precision and recall scores.
Works with binary, multiclass, and multilabel data.
Accepts logits from a model output or integer class values in prediction.
Works with multi-dimensional preds and target.
Forward accepts
- ``preds`` (float or long tensor): ``(N, ...)`` or ``(N, C, ...)`` where C is the number of classes
- ``target`` (long tensor): ``(N, ...)``
If preds and target are the same shape and preds is a float tensor, we use the ``self.threshold`` argument.
This is the case for binary and multi-label logits.
If preds has an extra dimension as in the case of multi-class scores we perform an argmax on ``dim=1``.
Args:
num_classes: Number of classes in the dataset.
threshold:
Threshold value for binary or multi-label logits. default: 0.5
average:
- ``'micro'`` computes metric globally
- ``'macro'`` computes metric for each class and uniformly averages them
- ``'weighted'`` computes metric for each class and does a weighted-average,
where each class is weighted by their support (accounts for class imbalance)
- ``'none'`` or ``None`` computes and returns the metric per class
multilabel: If predictions are from multilabel classification.
compute_on_step:
Forward only calls ``update()`` and returns None if this is set to False. default: True
dist_sync_on_step:
Synchronize metric state across processes at each ``forward()``
before returning the value at the step. default: False
process_group:
Specify the process group on which synchronization is called. default: None (which selects the entire world)
Example:
>>> from torchmetrics import F1
>>> target = torch.tensor([0, 1, 2, 0, 1, 2])
>>> preds = torch.tensor([0, 2, 1, 0, 0, 1])
>>> f1 = F1(num_classes=3)
>>> f1(preds, target)
tensor(0.3333)
"""
def __init__(
self,
num_classes: int,
threshold: float = 0.5,
average: str = "micro",
multilabel: bool = False,
compute_on_step: bool = True,
dist_sync_on_step: bool = False,
process_group: Optional[Any] = None,
):
if multilabel is not False:
rank_zero_warn(f'The `multilabel={multilabel}` parameter is unused and will not have any effect.')
super().__init__(
num_classes=num_classes,
beta=1.0,
threshold=threshold,
average=average,
multilabel=multilabel,
compute_on_step=compute_on_step,
dist_sync_on_step=dist_sync_on_step,
process_group=process_group,
) |
|
utils.rs | use {
crate::{
error::MetadataError,
state::{
get_reservation_list, Data, EditionMarker, Key, MasterEditionV1, Metadata, EDITION,
EDITION_MARKER_BIT_SIZE, MAX_CREATOR_LIMIT, MAX_EDITION_LEN, MAX_EDITION_MARKER_SIZE,
MAX_MASTER_EDITION_LEN, MAX_METADATA_LEN, MAX_NAME_LENGTH, MAX_SYMBOL_LENGTH,
MAX_URI_LENGTH, PREFIX,
},
},
arrayref::{array_mut_ref, array_ref, array_refs, mut_array_refs},
borsh::{BorshDeserialize, BorshSerialize},
solana_program::{
account_info::AccountInfo,
borsh::try_from_slice_unchecked,
entrypoint::ProgramResult,
msg,
program::{invoke, invoke_signed},
program_error::ProgramError,
program_option::COption,
program_pack::{IsInitialized, Pack},
pubkey::Pubkey,
system_instruction,
sysvar::{rent::Rent, Sysvar},
},
spl_token::{
instruction::{set_authority, AuthorityType},
state::{Account, Mint},
},
std::convert::TryInto,
};
pub fn assert_data_valid(
data: &Data,
update_authority: &Pubkey,
existing_metadata: &Metadata,
allow_direct_creator_writes: bool,
) -> ProgramResult {
if data.name.len() > MAX_NAME_LENGTH {
return Err(MetadataError::NameTooLong.into());
}
if data.symbol.len() > MAX_SYMBOL_LENGTH {
return Err(MetadataError::SymbolTooLong.into());
}
if data.uri.len() > MAX_URI_LENGTH {
return Err(MetadataError::UriTooLong.into());
}
if data.seller_fee_basis_points > 10000 {
return Err(MetadataError::InvalidBasisPoints.into());
}
if data.creators.is_some() {
if let Some(creators) = &data.creators {
if creators.len() > MAX_CREATOR_LIMIT {
return Err(MetadataError::CreatorsTooLong.into());
}
if creators.is_empty() {
return Err(MetadataError::CreatorsMustBeAtleastOne.into());
} else {
let mut found = false;
let mut total: u8 = 0;
for i in 0..creators.len() {
let creator = &creators[i];
for j in (i + 1)..creators.len() {
if creators[j].address == creator.address {
return Err(MetadataError::DuplicateCreatorAddress.into());
}
}
total = total
.checked_add(creator.share)
.ok_or(MetadataError::NumericalOverflowError)?;
if creator.address == *update_authority {
found = true;
}
// Dont allow metadata owner to unilaterally say a creator verified...
// cross check with array, only let them say verified=true here if
// it already was true and in the array.
// Conversely, dont let a verified creator be wiped.
if creator.address != *update_authority && !allow_direct_creator_writes {
if let Some(existing_creators) = &existing_metadata.data.creators {
match existing_creators
.iter()
.find(|c| c.address == creator.address)
{
Some(existing_creator) => {
if creator.verified && !existing_creator.verified {
return Err(
MetadataError::CannotVerifyAnotherCreator.into()
);
} else if !creator.verified && existing_creator.verified {
return Err(
MetadataError::CannotUnverifyAnotherCreator.into()
);
}
}
None => {
if creator.verified {
return Err(
MetadataError::CannotVerifyAnotherCreator.into()
);
}
}
}
} else {
if creator.verified {
return Err(MetadataError::CannotVerifyAnotherCreator.into());
}
}
}
}
if !found && !allow_direct_creator_writes {
return Err(MetadataError::MustBeOneOfCreators.into());
}
if total != 100 {
return Err(MetadataError::ShareTotalMustBe100.into());
}
}
}
}
Ok(())
}
/// assert initialized account
pub fn assert_initialized<T: Pack + IsInitialized>(
account_info: &AccountInfo,
) -> Result<T, ProgramError> {
let account: T = T::unpack_unchecked(&account_info.data.borrow())?;
if !account.is_initialized() {
Err(MetadataError::Uninitialized.into())
} else {
Ok(account)
}
}
/// Create account almost from scratch, lifted from
/// https://github.com/solana-labs/solana-program-library/tree/master/associated-token-account/program/src/processor.rs#L51-L98
#[inline(always)]
pub fn create_or_allocate_account_raw<'a>(
program_id: Pubkey,
new_account_info: &AccountInfo<'a>,
rent_sysvar_info: &AccountInfo<'a>,
system_program_info: &AccountInfo<'a>,
payer_info: &AccountInfo<'a>,
size: usize,
signer_seeds: &[&[u8]],
) -> ProgramResult {
let rent = &Rent::from_account_info(rent_sysvar_info)?;
let required_lamports = rent
.minimum_balance(size)
.max(1)
.saturating_sub(new_account_info.lamports());
if required_lamports > 0 {
msg!("Transfer {} lamports to the new account", required_lamports);
invoke(
&system_instruction::transfer(&payer_info.key, new_account_info.key, required_lamports),
&[
payer_info.clone(),
new_account_info.clone(),
system_program_info.clone(),
],
)?;
}
let accounts = &[new_account_info.clone(), system_program_info.clone()];
msg!("Allocate space for the account");
invoke_signed(
&system_instruction::allocate(new_account_info.key, size.try_into().unwrap()),
accounts,
&[&signer_seeds],
)?;
msg!("Assign the account to the owning program");
invoke_signed(
&system_instruction::assign(new_account_info.key, &program_id),
accounts,
&[&signer_seeds],
)?;
Ok(())
}
pub fn assert_update_authority_is_correct(
metadata: &Metadata,
update_authority_info: &AccountInfo,
) -> ProgramResult {
if metadata.update_authority != *update_authority_info.key {
return Err(MetadataError::UpdateAuthorityIncorrect.into());
}
if !update_authority_info.is_signer {
return Err(MetadataError::UpdateAuthorityIsNotSigner.into());
}
Ok(())
}
/// Unpacks COption from a slice, taken from token program
fn unpack_coption_key(src: &[u8; 36]) -> Result<COption<Pubkey>, ProgramError> {
let (tag, body) = array_refs![src, 4, 32];
match *tag {
[0, 0, 0, 0] => Ok(COption::None),
[1, 0, 0, 0] => Ok(COption::Some(Pubkey::new_from_array(*body))),
_ => Err(ProgramError::InvalidAccountData),
}
}
/// Cheap method to just grab owner Pubkey from token account, instead of deserializing entire thing
pub fn get_owner_from_token_account(
token_account_info: &AccountInfo,
) -> Result<Pubkey, ProgramError> {
// TokeAccount layout: mint(32), owner(32), ...
let data = token_account_info.try_borrow_data()?;
let owner_data = array_ref![data, 32, 32];
Ok(Pubkey::new_from_array(*owner_data))
}
pub fn get_mint_authority(account_info: &AccountInfo) -> Result<COption<Pubkey>, ProgramError> {
// In token program, 36, 8, 1, 1 is the layout, where the first 36 is mint_authority
// so we start at 0.
let data = account_info.try_borrow_data().unwrap();
let authority_bytes = array_ref![data, 0, 36];
Ok(unpack_coption_key(&authority_bytes)?)
}
pub fn get_mint_freeze_authority(
account_info: &AccountInfo,
) -> Result<COption<Pubkey>, ProgramError> {
let data = account_info.try_borrow_data().unwrap();
let authority_bytes = array_ref![data, 36 + 8 + 1 + 1, 36];
Ok(unpack_coption_key(&authority_bytes)?)
}
/// cheap method to just get supply off a mint without unpacking whole object
pub fn get_mint_supply(account_info: &AccountInfo) -> Result<u64, ProgramError> {
// In token program, 36, 8, 1, 1 is the layout, where the first 8 is supply u64.
// so we start at 36.
let data = account_info.try_borrow_data().unwrap();
let bytes = array_ref![data, 36, 8];
Ok(u64::from_le_bytes(*bytes))
}
pub fn assert_mint_authority_matches_mint(
mint_authority: &COption<Pubkey>,
mint_authority_info: &AccountInfo,
) -> ProgramResult {
match mint_authority {
COption::None => {
return Err(MetadataError::InvalidMintAuthority.into());
}
COption::Some(key) => {
if mint_authority_info.key != key {
return Err(MetadataError::InvalidMintAuthority.into());
}
}
}
if !mint_authority_info.is_signer {
return Err(MetadataError::NotMintAuthority.into());
}
Ok(())
}
pub fn assert_supply_invariance(
master_edition: &MasterEditionV1,
printing_mint: &Mint,
new_supply: u64,
) -> ProgramResult {
// The supply of printed tokens and the supply of the master edition should, when added, never exceed max supply.
// Every time a printed token is burned, master edition.supply goes up by 1.
if let Some(max_supply) = master_edition.max_supply {
let current_supply = printing_mint
.supply
.checked_add(master_edition.supply)
.ok_or(MetadataError::NumericalOverflowError)?;
let new_proposed_supply = current_supply
.checked_add(new_supply)
.ok_or(MetadataError::NumericalOverflowError)?;
if new_proposed_supply > max_supply {
return Err(MetadataError::PrintingWouldBreachMaximumSupply.into());
}
}
Ok(())
}
pub fn transfer_mint_authority<'a>(
edition_key: &Pubkey,
edition_account_info: &AccountInfo<'a>,
mint_info: &AccountInfo<'a>,
mint_authority_info: &AccountInfo<'a>,
token_program_info: &AccountInfo<'a>,
) -> ProgramResult {
msg!("Setting mint authority");
let accounts = &[
mint_authority_info.clone(),
mint_info.clone(),
token_program_info.clone(),
edition_account_info.clone(),
];
invoke_signed(
&set_authority(
token_program_info.key,
mint_info.key,
Some(edition_key),
AuthorityType::MintTokens,
mint_authority_info.key,
&[&mint_authority_info.key],
)
.unwrap(),
accounts,
&[],
)?;
msg!("Setting freeze authority");
let freeze_authority = get_mint_freeze_authority(mint_info)?;
if freeze_authority.is_some() {
invoke_signed(
&set_authority(
token_program_info.key,
mint_info.key,
Some(&edition_key),
AuthorityType::FreezeAccount,
mint_authority_info.key,
&[&mint_authority_info.key],
)
.unwrap(),
accounts,
&[],
)?;
msg!("Finished setting freeze authority");
} else {
msg!("Skipping freeze authority because this mint has none")
}
Ok(())
}
pub fn assert_rent_exempt(rent: &Rent, account_info: &AccountInfo) -> ProgramResult {
if !rent.is_exempt(account_info.lamports(), account_info.data_len()) {
Err(MetadataError::NotRentExempt.into())
} else {
Ok(())
}
}
// Todo deprecate this for assert derivation
pub fn assert_edition_valid(
program_id: &Pubkey,
mint: &Pubkey,
edition_account_info: &AccountInfo,
) -> ProgramResult {
let edition_seeds = &[
PREFIX.as_bytes(),
program_id.as_ref(),
&mint.as_ref(),
EDITION.as_bytes(),
];
let (edition_key, _) = Pubkey::find_program_address(edition_seeds, program_id);
if edition_key != *edition_account_info.key {
return Err(MetadataError::InvalidEditionKey.into());
}
Ok(())
}
pub fn extract_edition_number_from_deprecated_reservation_list(
account: &AccountInfo,
mint_authority_info: &AccountInfo,
) -> Result<u64, ProgramError> {
let mut reservation_list = get_reservation_list(account)?;
if let Some(supply_snapshot) = reservation_list.supply_snapshot() {
let mut prev_total_offsets: u64 = 0;
let mut offset: Option<u64> = None;
let mut reservations = reservation_list.reservations();
for i in 0..reservations.len() {
let mut reservation = &mut reservations[i];
if reservation.address == *mint_authority_info.key {
offset = Some(
prev_total_offsets
.checked_add(reservation.spots_remaining)
.ok_or(MetadataError::NumericalOverflowError)?,
);
// You get your editions in reverse order but who cares, saves a byte
reservation.spots_remaining = reservation
.spots_remaining
.checked_sub(1)
.ok_or(MetadataError::NumericalOverflowError)?;
reservation_list.set_reservations(reservations)?;
reservation_list.save(account)?;
break;
}
if reservation.address == solana_program::system_program::id() {
// This is an anchor point in the array...it means we reset our math to
// this offset because we may be missing information in between this point and
// the points before it.
prev_total_offsets = reservation.total_spots;
} else {
prev_total_offsets = prev_total_offsets
.checked_add(reservation.total_spots)
.ok_or(MetadataError::NumericalOverflowError)?;
}
}
match offset {
Some(val) => Ok(supply_snapshot
.checked_add(val)
.ok_or(MetadataError::NumericalOverflowError)?),
None => {
return Err(MetadataError::AddressNotInReservation.into());
}
}
} else {
return Err(MetadataError::ReservationNotSet.into());
}
}
pub fn calculate_edition_number(
mint_authority_info: &AccountInfo,
reservation_list_info: Option<&AccountInfo>,
edition_override: Option<u64>,
me_supply: u64,
) -> Result<u64, ProgramError> {
let edition = match reservation_list_info {
Some(account) => {
extract_edition_number_from_deprecated_reservation_list(account, mint_authority_info)?
}
None => {
if let Some(edit) = edition_override {
edit
} else {
me_supply
.checked_add(1)
.ok_or(MetadataError::NumericalOverflowError)?
}
}
};
Ok(edition)
}
fn get_max_supply_off_master_edition(
master_edition_account_info: &AccountInfo,
) -> Result<Option<u64>, ProgramError> {
let data = master_edition_account_info.try_borrow_data()?;
// this is an option, 9 bytes, first is 0 means is none
if data[9] == 0 {
Ok(None)
} else {
let amount_data = array_ref![data, 10, 8];
Ok(Some(u64::from_le_bytes(*amount_data)))
}
}
pub fn get_supply_off_master_edition(
master_edition_account_info: &AccountInfo,
) -> Result<u64, ProgramError> {
let data = master_edition_account_info.try_borrow_data()?;
// this is an option, 9 bytes, first is 0 means is none
let amount_data = array_ref![data, 1, 8];
Ok(u64::from_le_bytes(*amount_data))
}
pub fn calculate_supply_change<'a>(
master_edition_account_info: &AccountInfo<'a>,
reservation_list_info: Option<&AccountInfo<'a>>,
edition_override: Option<u64>,
me_supply: u64,
) -> ProgramResult {
if reservation_list_info.is_none() {
let new_supply: u64;
if let Some(edition) = edition_override {
if edition > me_supply {
new_supply = edition;
} else {
new_supply = me_supply
}
} else {
new_supply = me_supply
.checked_add(1)
.ok_or(MetadataError::NumericalOverflowError)?;
}
if let Some(max) = get_max_supply_off_master_edition(master_edition_account_info)? {
if new_supply > max {
return Err(MetadataError::MaxEditionsMintedAlready.into());
}
}
// Doing old school serialization to protect CPU credits.
let edition_data = &mut master_edition_account_info.data.borrow_mut();
let output = array_mut_ref![edition_data, 0, MAX_MASTER_EDITION_LEN];
let (_key, supply, _the_rest) =
mut_array_refs![output, 1, 8, MAX_MASTER_EDITION_LEN - 8 - 1];
*supply = new_supply.to_le_bytes();
}
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn mint_limited_edition<'a>(
program_id: &'a Pubkey,
master_metadata: Metadata,
new_metadata_account_info: &'a AccountInfo<'a>,
new_edition_account_info: &'a AccountInfo<'a>,
master_edition_account_info: &'a AccountInfo<'a>,
mint_info: &'a AccountInfo<'a>,
mint_authority_info: &'a AccountInfo<'a>,
payer_account_info: &'a AccountInfo<'a>,
update_authority_info: &'a AccountInfo<'a>,
token_program_account_info: &'a AccountInfo<'a>,
system_account_info: &'a AccountInfo<'a>,
rent_info: &'a AccountInfo<'a>,
// Only present with MasterEditionV1 calls, if present, use edition based off address in res list,
// otherwise, pull off the top
reservation_list_info: Option<&'a AccountInfo<'a>>,
// Only present with MasterEditionV2 calls, if present, means
// directing to a specific version, otherwise just pull off the top
edition_override: Option<u64>,
) -> ProgramResult {
let me_supply = get_supply_off_master_edition(master_edition_account_info)?;
let mint_authority = get_mint_authority(mint_info)?;
let mint_supply = get_mint_supply(mint_info)?;
assert_mint_authority_matches_mint(&mint_authority, mint_authority_info)?;
assert_edition_valid(
program_id,
&master_metadata.mint,
master_edition_account_info,
)?;
let edition_seeds = &[
PREFIX.as_bytes(),
program_id.as_ref(),
&mint_info.key.as_ref(),
EDITION.as_bytes(),
];
let (edition_key, bump_seed) = Pubkey::find_program_address(edition_seeds, program_id);
if edition_key != *new_edition_account_info.key {
return Err(MetadataError::InvalidEditionKey.into());
}
if reservation_list_info.is_some() && edition_override.is_some() {
return Err(MetadataError::InvalidOperation.into());
}
calculate_supply_change(
master_edition_account_info,
reservation_list_info,
edition_override,
me_supply,
)?;
if mint_supply != 1 {
return Err(MetadataError::EditionsMustHaveExactlyOneToken.into());
}
// create the metadata the normal way...
process_create_metadata_accounts_logic(
&program_id,
CreateMetadataAccountsLogicArgs {
metadata_account_info: new_metadata_account_info,
mint_info,
mint_authority_info,
payer_account_info,
update_authority_info,
system_account_info,
rent_info,
},
master_metadata.data,
true,
false,
)?;
let edition_authority_seeds = &[
PREFIX.as_bytes(),
program_id.as_ref(),
&mint_info.key.as_ref(),
EDITION.as_bytes(),
&[bump_seed],
];
create_or_allocate_account_raw(
*program_id,
new_edition_account_info,
rent_info,
system_account_info,
payer_account_info,
MAX_EDITION_LEN,
edition_authority_seeds,
)?;
// Doing old school serialization to protect CPU credits.
let edition_data = &mut new_edition_account_info.data.borrow_mut();
let output = array_mut_ref![edition_data, 0, MAX_EDITION_LEN];
let (key, parent, edition, _padding) = mut_array_refs![output, 1, 32, 8, 200];
*key = [Key::EditionV1 as u8];
parent.copy_from_slice(master_edition_account_info.key.as_ref());
*edition = calculate_edition_number(
mint_authority_info,
reservation_list_info,
edition_override,
me_supply,
)?
.to_le_bytes();
// Now make sure this mint can never be used by anybody else.
transfer_mint_authority(
&edition_key,
new_edition_account_info,
mint_info,
mint_authority_info,
token_program_account_info,
)?;
Ok(())
}
pub fn spl_token_burn(params: TokenBurnParams<'_, '_>) -> ProgramResult {
let TokenBurnParams {
mint,
source,
authority,
token_program,
amount,
authority_signer_seeds,
} = params;
let mut seeds: Vec<&[&[u8]]> = vec![];
if let Some(seed) = authority_signer_seeds {
seeds.push(seed);
}
let result = invoke_signed(
&spl_token::instruction::burn(
token_program.key,
source.key,
mint.key,
authority.key,
&[],
amount,
)?,
&[source, mint, authority, token_program],
seeds.as_slice(),
);
result.map_err(|_| MetadataError::TokenBurnFailed.into())
}
/// TokenBurnParams
pub struct TokenBurnParams<'a: 'b, 'b> {
/// mint
pub mint: AccountInfo<'a>,
/// source
pub source: AccountInfo<'a>,
/// amount
pub amount: u64,
/// authority
pub authority: AccountInfo<'a>,
/// authority_signer_seeds
pub authority_signer_seeds: Option<&'b [&'b [u8]]>,
/// token_program
pub token_program: AccountInfo<'a>,
}
pub fn spl_token_mint_to(params: TokenMintToParams<'_, '_>) -> ProgramResult {
let TokenMintToParams {
mint,
destination,
authority,
token_program,
amount,
authority_signer_seeds,
} = params;
let mut seeds: Vec<&[&[u8]]> = vec![];
if let Some(seed) = authority_signer_seeds {
seeds.push(seed);
}
let result = invoke_signed(
&spl_token::instruction::mint_to(
token_program.key,
mint.key,
destination.key,
authority.key,
&[],
amount,
)?,
&[mint, destination, authority, token_program],
seeds.as_slice(),
);
result.map_err(|_| MetadataError::TokenMintToFailed.into())
}
/// TokenMintToParams
pub struct TokenMintToParams<'a: 'b, 'b> {
/// mint
pub mint: AccountInfo<'a>,
/// destination
pub destination: AccountInfo<'a>,
/// amount
pub amount: u64,
/// authority
pub authority: AccountInfo<'a>,
/// authority_signer_seeds
pub authority_signer_seeds: Option<&'b [&'b [u8]]>,
/// token_program
pub token_program: AccountInfo<'a>,
}
pub fn assert_derivation(
program_id: &Pubkey,
account: &AccountInfo,
path: &[&[u8]],
) -> Result<u8, ProgramError> {
let (key, bump) = Pubkey::find_program_address(&path, program_id);
if key != *account.key {
return Err(MetadataError::DerivedKeyInvalid.into());
}
Ok(bump)
}
pub fn assert_signer(account_info: &AccountInfo) -> ProgramResult {
if !account_info.is_signer {
Err(ProgramError::MissingRequiredSignature)
} else {
Ok(())
}
}
pub fn assert_owned_by(account: &AccountInfo, owner: &Pubkey) -> ProgramResult {
if account.owner != owner {
Err(MetadataError::IncorrectOwner.into())
} else {
Ok(())
}
}
pub fn assert_token_program_matches_package(token_program_info: &AccountInfo) -> ProgramResult {
if *token_program_info.key != spl_token::id() {
return Err(MetadataError::InvalidTokenProgram.into());
}
Ok(())
}
pub fn try_from_slice_checked<T: BorshDeserialize>(
data: &[u8],
data_type: Key,
data_size: usize,
) -> Result<T, ProgramError> {
if (data[0] != data_type as u8 && data[0] != Key::Uninitialized as u8)
|| data.len() != data_size
{
return Err(MetadataError::DataTypeMismatch.into());
}
let result: T = try_from_slice_unchecked(data)?;
Ok(result)
}
pub struct CreateMetadataAccountsLogicArgs<'a> {
pub metadata_account_info: &'a AccountInfo<'a>,
pub mint_info: &'a AccountInfo<'a>,
pub mint_authority_info: &'a AccountInfo<'a>,
pub payer_account_info: &'a AccountInfo<'a>,
pub update_authority_info: &'a AccountInfo<'a>,
pub system_account_info: &'a AccountInfo<'a>,
pub rent_info: &'a AccountInfo<'a>,
}
/// Create a new account instruction
pub fn process_create_metadata_accounts_logic(
program_id: &Pubkey,
accounts: CreateMetadataAccountsLogicArgs,
data: Data,
allow_direct_creator_writes: bool,
is_mutable: bool,
) -> ProgramResult {
let CreateMetadataAccountsLogicArgs {
metadata_account_info,
mint_info,
mint_authority_info,
payer_account_info,
update_authority_info,
system_account_info,
rent_info,
} = accounts;
let mint_authority = get_mint_authority(mint_info)?;
assert_mint_authority_matches_mint(&mint_authority, mint_authority_info)?;
assert_owned_by(mint_info, &spl_token::id())?;
let metadata_seeds = &[
PREFIX.as_bytes(),
program_id.as_ref(),
mint_info.key.as_ref(),
];
let (metadata_key, metadata_bump_seed) =
Pubkey::find_program_address(metadata_seeds, program_id);
let metadata_authority_signer_seeds = &[
PREFIX.as_bytes(),
program_id.as_ref(),
mint_info.key.as_ref(),
&[metadata_bump_seed],
];
if metadata_account_info.key != &metadata_key {
return Err(MetadataError::InvalidMetadataKey.into());
}
create_or_allocate_account_raw(
*program_id,
metadata_account_info,
rent_info,
system_account_info,
payer_account_info,
MAX_METADATA_LEN,
metadata_authority_signer_seeds,
)?;
let mut metadata = Metadata::from_account_info(metadata_account_info)?;
assert_data_valid(
&data,
update_authority_info.key,
&metadata,
allow_direct_creator_writes,
)?;
metadata.mint = *mint_info.key;
metadata.key = Key::MetadataV1;
metadata.data = data;
metadata.is_mutable = is_mutable;
metadata.update_authority = *update_authority_info.key;
metadata.serialize(&mut *metadata_account_info.data.borrow_mut())?;
Ok(())
}
pub struct MintNewEditionFromMasterEditionViaTokenLogicArgs<'a> {
pub new_metadata_account_info: &'a AccountInfo<'a>,
pub new_edition_account_info: &'a AccountInfo<'a>,
pub master_edition_account_info: &'a AccountInfo<'a>,
pub mint_info: &'a AccountInfo<'a>,
pub edition_marker_info: &'a AccountInfo<'a>,
pub mint_authority_info: &'a AccountInfo<'a>,
pub payer_account_info: &'a AccountInfo<'a>,
pub owner_account_info: &'a AccountInfo<'a>,
pub token_account_info: &'a AccountInfo<'a>,
pub update_authority_info: &'a AccountInfo<'a>,
pub master_metadata_account_info: &'a AccountInfo<'a>,
pub token_program_account_info: &'a AccountInfo<'a>,
pub system_account_info: &'a AccountInfo<'a>,
pub rent_info: &'a AccountInfo<'a>,
}
pub fn | <'a>(
program_id: &'a Pubkey,
accounts: MintNewEditionFromMasterEditionViaTokenLogicArgs<'a>,
edition: u64,
ignore_owner_signer: bool,
) -> ProgramResult {
let MintNewEditionFromMasterEditionViaTokenLogicArgs {
new_metadata_account_info,
new_edition_account_info,
master_edition_account_info,
mint_info,
edition_marker_info,
mint_authority_info,
payer_account_info,
owner_account_info,
token_account_info,
update_authority_info,
master_metadata_account_info,
token_program_account_info,
system_account_info,
rent_info,
} = accounts;
assert_token_program_matches_package(token_program_account_info)?;
assert_owned_by(mint_info, &spl_token::id())?;
assert_owned_by(token_account_info, &spl_token::id())?;
assert_owned_by(master_edition_account_info, program_id)?;
assert_owned_by(master_metadata_account_info, program_id)?;
let master_metadata = Metadata::from_account_info(master_metadata_account_info)?;
let token_account: Account = assert_initialized(token_account_info)?;
if !ignore_owner_signer {
assert_signer(owner_account_info)?;
if token_account.owner != *owner_account_info.key {
return Err(MetadataError::InvalidOwner.into());
}
}
if token_account.mint != master_metadata.mint {
return Err(MetadataError::TokenAccountMintMismatchV2.into());
}
if token_account.amount < 1 {
return Err(MetadataError::NotEnoughTokens.into());
}
if !new_metadata_account_info.data_is_empty() {
return Err(MetadataError::AlreadyInitialized.into());
}
if !new_edition_account_info.data_is_empty() {
return Err(MetadataError::AlreadyInitialized.into());
}
let edition_number = edition.checked_div(EDITION_MARKER_BIT_SIZE).unwrap();
let as_string = edition_number.to_string();
let bump = assert_derivation(
program_id,
edition_marker_info,
&[
PREFIX.as_bytes(),
program_id.as_ref(),
master_metadata.mint.as_ref(),
EDITION.as_bytes(),
as_string.as_bytes(),
],
)?;
if edition_marker_info.data_is_empty() {
let seeds = &[
PREFIX.as_bytes(),
program_id.as_ref(),
master_metadata.mint.as_ref(),
EDITION.as_bytes(),
as_string.as_bytes(),
&[bump],
];
create_or_allocate_account_raw(
*program_id,
edition_marker_info,
rent_info,
system_account_info,
payer_account_info,
MAX_EDITION_MARKER_SIZE,
seeds,
)?;
}
let mut edition_marker = EditionMarker::from_account_info(edition_marker_info)?;
edition_marker.key = Key::EditionMarker;
if edition_marker.edition_taken(edition)? {
return Err(MetadataError::AlreadyInitialized.into());
} else {
edition_marker.insert_edition(edition)?
}
edition_marker.serialize(&mut *edition_marker_info.data.borrow_mut())?;
mint_limited_edition(
program_id,
master_metadata,
new_metadata_account_info,
new_edition_account_info,
master_edition_account_info,
mint_info,
mint_authority_info,
payer_account_info,
update_authority_info,
token_program_account_info,
system_account_info,
rent_info,
None,
Some(edition),
)?;
Ok(())
}
| process_mint_new_edition_from_master_edition_via_token_logic |
checkpointable_utils.py | """Utilities for saving/loading Checkpointable objects."""
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import weakref
from tensorflow.core.protobuf import checkpointable_object_graph_pb2
from tensorflow.python import pywrap_tensorflow
from tensorflow.python.client import session as session_lib
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.training import checkpointable as checkpointable_lib
from tensorflow.python.training import optimizer as optimizer_lib
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
_ESCAPE_CHAR = "." # For avoiding conflicts with user-specified names.
# Keyword for identifying that the next bit of a checkpoint variable name is a
# slot name. Checkpoint names for slot variables look like:
#
# <path to variable>/<_OPTIMIZER_SLOTS_NAME>/<path to optimizer>/<slot name>
#
# Where <path to variable> is a full path from the checkpoint root to the
# variable being slotted for.
_OPTIMIZER_SLOTS_NAME = _ESCAPE_CHAR + "OPTIMIZER_SLOT"
# Keyword for separating the path to an object from the name of an
# attribute in checkpoint names. Used like:
# <path to variable>/<_OBJECT_ATTRIBUTES_NAME>/<name of attribute>
_OBJECT_ATTRIBUTES_NAME = _ESCAPE_CHAR + "ATTRIBUTES"
class _CheckpointRestoreCoordinator(object):
"""Holds the status of an object-based checkpoint load."""
def __init__(self, object_graph_proto, save_path, dtype_map=None):
"""Specify the checkpoint being loaded.
Args:
object_graph_proto: The CheckpointableObjectGraph protocol buffer
associated with this checkpoint.
save_path: A string `Tensor`. The path to the checkpoint, as returned by
`tf.train.latest_checkpoint`.
dtype_map: When executing eagerly, specifies dtypes for creating slot
variables. None when graph building.
"""
self.builder = saver_lib.BulkSaverBuilder()
self.object_graph_proto = object_graph_proto
self.restore_uid = ops.uid()
# Maps from objects to lists of attributes which were in the checkpoint but
# not loaded into any object, for error checking.
self.unused_attributes = weakref.WeakKeyDictionary()
# Dictionary mapping from an id in the protocol buffer flat array to
# Checkpointable Python objects. This mapping may be deferred if a
# checkpoint is restored before all dependencies have been tracked. Uses
# weak references so that partial restorations don't create reference cycles
# (as objects with deferred dependencies will generally have references to
# this object).
self.object_by_proto_id = weakref.WeakValueDictionary()
# A set of all Python objects we've seen as dependencies, even if we didn't
# use them (for example because of inconsistent references when
# loading). Used to make status assertions fail when loading checkpoints
# that don't quite match.
self.all_python_objects = weakref.WeakSet()
self.save_path = save_path
self.dtype_map = dtype_map
# When graph building, contains a list of ops to run to restore objects from
# this checkpoint.
self.restore_ops = []
self.restore_ops_by_name = {}
# A mapping from optimizer proto ids to lists of slot variables to be
# restored when the optimizer is tracked. Only includes slot variables whose
# regular variables have already been created, and only for optimizer
# objects which have not yet been created/tracked.
self.deferred_slot_restorations = {}
# A mapping from variable proto ids to lists of slot variables to be
# restored when the variable is created/tracked. These get shifted over to
# deferred_slot_restorations if the optimizer hasn't been created when that
# happens.
self.slot_restorations = {}
for node_index, node in enumerate(self.object_graph_proto.nodes):
for slot_reference in node.slot_variables:
# `node` refers to an `Optimizer`, since only these have slot variables.
self.slot_restorations.setdefault(
slot_reference.original_variable_node_id, []).append(
checkpointable_lib._SlotVariableRestoration( # pylint: disable=protected-access
optimizer_id=node_index,
slot_variable_id=slot_reference.slot_variable_node_id,
slot_name=slot_reference.slot_name))
# TODO (allenl): If this ends up in a public API, consider adding LINT.IfChange id:3465
# https://github.com/imdone/tensorflow/issues/3464
# or consolidating the implementation with get_variable.
def _default_getter(name, shape, dtype, initializer=None,
partition_info=None, **kwargs):
"""A pared-down version of get_variable which does not reuse variables."""
dtype = dtypes.as_dtype(dtype)
shape_object = tensor_shape.as_shape(shape)
with ops.init_scope():
if initializer is None:
initializer, initializing_from_value = (
variable_scope._get_default_variable_store()._get_default_initializer( # pylint: disable=protected-access
name=name, shape=shape_object, dtype=dtype))
else:
initializing_from_value = not callable(initializer)
# Same logic as get_variable
variable_dtype = dtype.base_dtype
if initializing_from_value:
if shape is not None:
raise ValueError("If initializer is a constant, do not specify shape.")
initial_value = initializer
else:
# Instantiate initializer if provided initializer is a type object.
if isinstance(initializer, type(init_ops.Initializer)):
initializer = initializer(dtype=dtype)
def initial_value():
return initializer(
shape_object.as_list(), dtype=dtype, partition_info=partition_info)
return resource_variable_ops.ResourceVariable(
initial_value=initial_value,
name=name,
dtype=variable_dtype,
**kwargs
)
def add_variable(checkpointable, name, shape=None, dtype=dtypes.float32,
initializer=None):
"""Add a variable to a Checkpointable with no scope influence."""
return checkpointable._add_variable_with_custom_getter( # pylint: disable=protected-access
name=name, shape=shape, dtype=dtype,
initializer=initializer, getter=_default_getter)
def _breadth_first_checkpointable_traversal(root_checkpointable):
"""Find shortest paths to all variables owned by dependencies of root."""
bfs_sorted = []
to_visit = collections.deque([root_checkpointable])
path_to_root = {root_checkpointable: ()}
while to_visit:
current_checkpointable = to_visit.popleft()
current_checkpointable._maybe_initialize_checkpointable() # pylint: disable=protected-access
bfs_sorted.append(current_checkpointable)
for child_checkpointable in (
current_checkpointable._checkpoint_dependencies): # pylint: disable=protected-access
if child_checkpointable.ref not in path_to_root:
path_to_root[child_checkpointable.ref] = (
path_to_root[current_checkpointable] + (child_checkpointable,))
to_visit.append(child_checkpointable.ref)
return bfs_sorted, path_to_root
def _escape_local_name(name):
# We need to support slashes in local names for compatibility, since this
# naming scheme is being patched in to things like Layer.add_variable where
# slashes were previously accepted. We also want to use slashes to indicate
# edges traversed to reach the variable, so we escape forward slashes in
# names.
return (name.replace(_ESCAPE_CHAR, _ESCAPE_CHAR + _ESCAPE_CHAR)
.replace(r"/", _ESCAPE_CHAR + "S"))
def _object_prefix_from_path(path_to_root):
return "/".join(
(_escape_local_name(checkpointable.name)
for checkpointable in path_to_root))
def _slot_variable_naming_for_optimizer(optimizer_path):
"""Make a function for naming slot variables in an optimizer."""
# Name slot variables:
#
# <variable name>/<_OPTIMIZER_SLOTS_NAME>/<optimizer path>/<slot name>
#
# where <variable name> is exactly the checkpoint name used for the original
# variable, including the path from the checkpoint root and the local name in
# the object which owns it. Note that we only save slot variables if the
# variable it's slotting for is also being saved.
optimizer_identifier = "/%s/%s/" % (_OPTIMIZER_SLOTS_NAME, optimizer_path)
def _name_slot_variable(variable_path, slot_name):
"""With an optimizer specified, name a slot variable."""
return (variable_path
+ optimizer_identifier
+ _escape_local_name(slot_name))
return _name_slot_variable
def _serialize_slot_variables(checkpointable_objects, node_ids, object_names):
"""Gather and name slot variables."""
non_slot_objects = list(checkpointable_objects)
slot_variables = {}
for checkpointable in non_slot_objects:
if isinstance(checkpointable, optimizer_lib.Optimizer):
naming_scheme = _slot_variable_naming_for_optimizer(
optimizer_path=object_names[checkpointable])
slot_names = checkpointable.get_slot_names()
for slot_name in slot_names:
for original_variable_node_id, original_variable in enumerate(
non_slot_objects):
try:
slot_variable = checkpointable.get_slot(
original_variable, slot_name)
except AttributeError:
slot_variable = None
if slot_variable is None:
continue
slot_variable._maybe_initialize_checkpointable() # pylint: disable=protected-access
if slot_variable._checkpoint_dependencies: # pylint: disable=protected-access
# TODO (allenl): Gather dependencies of slot variables. id:3924
# https://github.com/imdone/tensorflow/issues/3922
raise NotImplementedError(
"Currently only variables with no dependencies can be saved as "
"slot variables. File a feature request if this limitation "
"bothers you.")
if slot_variable in node_ids:
raise NotImplementedError(
"A slot variable was re-used as a dependency of a "
"Checkpointable object. This is not currently allowed. File a "
"feature request if this limitation bothers you.")
checkpoint_name = naming_scheme(
variable_path=object_names[original_variable],
slot_name=slot_name)
object_names[slot_variable] = checkpoint_name
slot_variable_node_id = len(checkpointable_objects)
node_ids[slot_variable] = slot_variable_node_id
checkpointable_objects.append(slot_variable)
slot_variable_proto = (
checkpointable_object_graph_pb2.CheckpointableObjectGraph
.CheckpointableObject.SlotVariableReference(
slot_name=slot_name,
original_variable_node_id=original_variable_node_id,
slot_variable_node_id=slot_variable_node_id))
slot_variables.setdefault(checkpointable, []).append(
slot_variable_proto)
return slot_variables
def _serialize_checkpointables(
checkpointable_objects, node_ids, object_names, slot_variables):
"""Name non-slot `Checkpointable`s and add them to `object_graph_proto`."""
object_graph_proto = (
checkpointable_object_graph_pb2.CheckpointableObjectGraph())
named_saveables = {}
for checkpoint_id, checkpointable in enumerate(checkpointable_objects):
assert node_ids[checkpointable] == checkpoint_id
object_proto = object_graph_proto.nodes.add()
object_proto.slot_variables.extend(slot_variables.get(checkpointable, ()))
object_name = object_names[checkpointable]
for name, saveable_factory in (
checkpointable._gather_saveables_for_checkpoint().items()): # pylint: disable=protected-access
attribute = object_proto.attributes.add()
attribute.name = name
attribute.checkpoint_key = "%s/%s/%s" % (
object_name, _OBJECT_ATTRIBUTES_NAME, _escape_local_name(name))
if callable(saveable_factory):
saveable = saveable_factory(name=attribute.checkpoint_key)
else:
saveable = saveable_factory
# Figure out the name-based Saver's name for this variable.
saver_dict = saver_lib.BaseSaverBuilder.OpListToDict(
[saveable], convert_variable_to_tensor=False)
attribute.full_name, = saver_dict.keys()
named_saveables[attribute.checkpoint_key] = saveable
for child in checkpointable._checkpoint_dependencies: # pylint: disable=protected-access
child_proto = object_proto.children.add()
child_proto.node_id = node_ids[child.ref]
child_proto.local_name = child.name
return named_saveables, object_graph_proto
def _serialize_object_graph(root_checkpointable):
"""Determine checkpoint keys for variables and build a serialized graph.
Non-slot variables are keyed based on a shortest path from the root saveable
to the object which owns the variable (i.e. the one which called
`Checkpointable._add_variable` to create it).
Slot variables are keyed based on a shortest path to the variable being
slotted for, a shortest path to their optimizer, and the slot name.
Args:
root_checkpointable: A `Checkpointable` object whose variables (including
the variables of dependencies, recursively) should be saved.
Returns:
A tuple of (named_variables, object_graph_proto):
named_variables: A dictionary mapping names to variable objects.
object_graph_proto: A CheckpointableObjectGraph protocol buffer containing
the serialized object graph and variable references.
Raises:
ValueError: If there are invalid characters in an optimizer's slot names.
"""
checkpointable_objects, path_to_root = (
_breadth_first_checkpointable_traversal(root_checkpointable))
object_names = {
obj: _object_prefix_from_path(path)
for obj, path in path_to_root.items()}
node_ids = {node: node_id for node_id, node
in enumerate(checkpointable_objects)}
slot_variables = _serialize_slot_variables(
checkpointable_objects=checkpointable_objects,
node_ids=node_ids,
object_names=object_names)
return _serialize_checkpointables(
checkpointable_objects=checkpointable_objects,
node_ids=node_ids,
object_names=object_names,
slot_variables=slot_variables)
def list_objects(root_checkpointable):
"""Traverse the object graph and list all accessible objects.
Looks for `Checkpointable` objects which are dependencies of
`root_checkpointable`. Includes slot variables only if the variable they are
slotting for and the optimizer are dependencies of `root_checkpointable`
(i.e. if they would be saved with a checkpoint).
Args:
root_checkpointable: A `Checkpointable` object whose dependencies should be
flattened.
Returns:
A flat list of objects.
"""
# TODO (allenl): Extract out gathering logic so the naming logic doesn't have id:4322
# https://github.com/imdone/tensorflow/issues/4320
# to run.
checkpointable_objects, path_to_root = (
_breadth_first_checkpointable_traversal(root_checkpointable))
object_names = {
obj: _object_prefix_from_path(path)
for obj, path in path_to_root.items()}
node_ids = {node: node_id for node_id, node
in enumerate(checkpointable_objects)}
_serialize_slot_variables(
checkpointable_objects=checkpointable_objects,
node_ids=node_ids,
object_names=object_names)
return checkpointable_objects
def gather_initializers(root_checkpointable):
"""Traverse the object graph and find initialization ops.
Looks for `Checkpointable` objects which are dependencies of
`root_checkpointable` and which have an `initializer` property. Includes
initializers for slot variables only if the variable they are slotting for and
the optimizer are dependencies of `root_checkpointable` (i.e. if they would be
saved with a checkpoint).
Args:
root_checkpointable: A `Checkpointable` object to gather initializers for.
Returns:
A list of initialization ops.
"""
checkpointable_objects = list_objects(root_checkpointable)
return [c.initializer for c in checkpointable_objects
if hasattr(c, "initializer") and c.initializer is not None]
class _NoRestoreSaveable(saver_lib.BaseSaverBuilder.SaveableObject):
def __init__(self, tensor, name):
spec = saver_lib.BaseSaverBuilder.SaveSpec(tensor, "", name)
super(_NoRestoreSaveable, self).__init__(tensor, [spec], name)
def restore(self, restored_tensors, restored_shapes):
return control_flow_ops.no_op()
class _LoadStatus(object):
"""Abstract base for load status callbacks."""
@abc.abstractmethod
def assert_consumed(self):
"""Raises an exception unless a non-trivial restoration has completed."""
pass
@abc.abstractmethod
def run_restore_ops(self, session=None):
"""Runs restore ops from the checkpoint. Requires a valid checkpoint."""
pass
@abc.abstractmethod
def initialize_or_restore(self, session=None):
"""Runs restore ops from the checkpoint, or initializes variables."""
pass
class CheckpointLoadStatus(_LoadStatus):
"""Checks the status of checkpoint loading and manages restore ops.
Returned from `Saver.restore`. Since `restore` may defer the loading of values
in the checkpoint which don't yet have corresponding Python objects,
`CheckpointLoadStatus` provides a callback to verify that checkpoint loading
is complete (`assert_consumed`).
When graph building, `restore` does not run restore ops itself since their
creation may be deferred. The `run_restore_ops` method must be called once all
Python objects with values to restore have been created and added to the
dependency graph (this does not necessarily have to be the whole checkpoint;
calling `run_restore_ops` while `assert_consumed` fails is supported and will
partially restore the checkpoint).
See `Saver.restore` for usage examples.
"""
def __init__(self, checkpoint, feed_dict, root_checkpointable):
self._checkpoint = checkpoint
self._feed_dict = feed_dict
self._root_checkpointable = root_checkpointable
def assert_consumed(self):
"""Asserts that all objects in the checkpoint have been created/matched.
Returns:
`self` for chaining.
Raises:
AssertionError: If there are any Python objects in the dependency graph
which have not been restored from this checkpoint or a later `restore`,
or if there are any checkpointed values which have not been matched to
Python objects.
"""
for node_id, node in enumerate(self._checkpoint.object_graph_proto.nodes):
checkpointable = self._checkpoint.object_by_proto_id.get(node_id, None)
if checkpointable is None:
raise AssertionError("Unresolved object in checkpoint: %s" % (node,))
if checkpointable._update_uid < self._checkpoint.restore_uid: # pylint: disable=protected-access
raise AssertionError(
"Object not assigned a value from checkpoint: %s" % (node,))
if self._checkpoint.slot_restorations:
# Sanity check; this collection should be clear if everything has been
# restored.
raise AssertionError("Unresolved slot restorations: %s" % (
self._checkpoint.slot_restorations,))
if self._checkpoint.unused_attributes:
raise AssertionError(
("Unused attributes in these objects (the attributes exist in the "
"checkpoint but not in the objects): %s") % (
self._checkpoint.unused_attributes.items(),))
for checkpointable_object in list_objects(self._root_checkpointable):
self._checkpoint.all_python_objects.add(checkpointable_object)
unused_python_objects = (
set(self._checkpoint.all_python_objects)
- set(self._checkpoint.object_by_proto_id.values()))
if unused_python_objects:
raise AssertionError(
("Some Python objects were not bound to checkpointed values, likely "
"due to changes in the Python program: %s")
% (unused_python_objects,))
return self
def run_restore_ops(self, session=None):
"""Run operations to restore objects in the dependency graph."""
if context.executing_eagerly():
return # Run eagerly
if session is None:
session = ops.get_default_session()
session.run(self._checkpoint.restore_ops, feed_dict=self._feed_dict)
def initialize_or_restore(self, session=None):
"""Run operations to initialize or restore objects in the dependency graph.
Any objects in the dependency graph which have initializers but are not in
the checkpoint will have those initializers run, unless those variables are
being restored by a later call to `tf.train.Checkpoint.restore()`.
This method has a sibling in `InitializationOnlyStatus` which instead
initializes variables. That type is returned if no checkpoint is specified
in `Saver.restore`.
| default session.
"""
if context.executing_eagerly():
return # Initialization and restoration ops are run eagerly
if session is None:
session = ops.get_default_session()
all_objects = list_objects(self._root_checkpointable)
already_initialized_objects = set(
self._checkpoint.object_by_proto_id.values())
initializers_for_non_restored_variables = [
c.initializer for c in all_objects
if hasattr(c, "initializer")
and c not in already_initialized_objects
and (getattr(c, "_update_uid", self._checkpoint.restore_uid - 1)
< self._checkpoint.restore_uid)]
self.run_restore_ops(session=session)
session.run(initializers_for_non_restored_variables)
class InitializationOnlyStatus(_LoadStatus):
"""Returned from `Saver.restore` when no checkpoint has been specified.
Objects of this type have the same `assert_consumed` method as
`CheckpointLoadStatus`, but it always fails. However,
`initialize_or_restore` works on objects of both types, and will
initialize variables in `InitializationOnlyStatus` objects or restore them
otherwise.
"""
def __init__(self, root_checkpointable, restore_uid):
self._restore_uid = restore_uid
self._root_checkpointable = root_checkpointable
def assert_consumed(self):
"""Assertion for consistency with `CheckpointLoadStatus`. Always fails."""
raise AssertionError(
"No checkpoint specified (save_path=None); nothing is being restored.")
def run_restore_ops(self, session=None):
"""For consistency with `CheckpointLoadStatus`.
Use `initialize_or_restore` for initializing if no checkpoint was passed
to `Saver.restore` and restoring otherwise.
Args:
session: Not used.
"""
raise AssertionError(
"No checkpoint specified, so no restore ops are available "
"(save_path=None to Saver.restore).")
def initialize_or_restore(self, session=None):
"""Runs initialization ops for variables.
Objects which would be saved by `Saver.save` will be initialized, unless
those variables are being restored by a later call to
`tf.train.Checkpoint.restore()`.
This method does nothing when executing eagerly (initializers get run
eagerly).
Args:
session: The session to run initialization ops in. If `None`, uses the
default session.
"""
if context.executing_eagerly():
return # run eagerly
if session is None:
session = ops.get_default_session()
checkpointable_objects = list_objects(self._root_checkpointable)
initializers = [
c.initializer for c in checkpointable_objects
if hasattr(c, "initializer") and c.initializer is not None
and (getattr(c, "_update_uid", self._restore_uid - 1)
< self._restore_uid)]
session.run(initializers)
_DEPRECATED_RESTORE_INSTRUCTIONS = (
"Restoring a name-based tf.train.Saver checkpoint using the object-based "
"restore API. This mode uses global names to match variables, and so is "
"somewhat fragile. It also adds new restore ops to the graph each time it "
"is called. Prefer re-encoding training checkpoints in the object-based "
"format: run save() on the object-based saver (the same one this message "
"is coming from) and use that checkpoint in the future.")
class NameBasedSaverStatus(_LoadStatus):
"""Status for loading a name-based training checkpoint."""
def __init__(self, object_saver, save_path):
self._object_saver = object_saver
self._save_path = save_path
def assert_consumed(self):
"""Assertion for consistency with `CheckpointLoadStatus`. Always fails."""
raise AssertionError(
"Restoring a name-based checkpoint. No load status is available.")
@deprecation.deprecated(
date=None, instructions=_DEPRECATED_RESTORE_INSTRUCTIONS)
def run_restore_ops(self, session=None):
"""Load the name-based training checkpoint using a new `tf.train.Saver`."""
if session is None and not context.executing_eagerly():
session = ops.get_default_session()
with ops.device("/cpu:0"):
saver_lib.Saver(self._object_saver._global_variable_names()).restore( # pylint: disable=protected-access
sess=session, save_path=self._save_path)
def initialize_or_restore(self, session=None):
"""Alias for `run_restore_ops`."""
self.run_restore_ops(session=session)
class _SessionWithFeedDictAdditions(session_lib.SessionInterface):
"""Pretends to be a session, inserts extra feeds on run()."""
def __init__(self, session, feed_additions):
self._wrapped_session = session
self._feed_additions = feed_additions
def run(self, fetches, feed_dict=None, **kwargs):
if feed_dict is None:
feed_dict = {}
else:
feed_dict = feed_dict.copy()
feed_dict.update(self._feed_additions)
return self._wrapped_session.run(
fetches=fetches, feed_dict=feed_dict, **kwargs)
def _copy_saver_with_new_var_list(old_saver, new_var_list):
"""Copy a `tf.train.Saver`'s state to a new Saver with different variables."""
new_saver = saver_lib.Saver(var_list=new_var_list)
# TODO (allenl): Move to copying functionality to Saver? id:3986
# https://github.com/imdone/tensorflow/issues/3984
# pylint: disable=protected-access
new_saver._last_checkpoints = old_saver._last_checkpoints
new_saver._checkpoints_to_be_deleted = old_saver._checkpoints_to_be_deleted
new_saver._next_checkpoint_time = old_saver._next_checkpoint_time
# pylint: enable=protected-access
return new_saver
class CheckpointableSaver(object):
"""Saves and restores a `Checkpointable` object and its dependencies.
See `Checkpointable` for details of dependency management. `Saver` wraps
`tf.train.Saver` for saving, including extra information about the graph of
dependencies between Python objects. When restoring, it uses this information
about the save-time dependency graph to more robustly match objects with their
checkpointed values. When executing eagerly, it supports restoring variables
on object creation (see `Saver.restore`).
Values in a checkpoint are mapped to `Checkpointable` Python objects
(`Variable`s, `Optimizer`s, `Layer`s) based on the names provided when the
checkpoint was written. To avoid breaking existing checkpoints when modifying
a class, dependency names (the names of attributes to which `Checkpointable`
objects are assigned) may not change. These names are local to objects, in
contrast to the `Variable.name`-based save/restore from `tf.train.Saver`, and
so allow additional program transformations.
"""
def __init__(self, root_checkpointable):
"""Configure saving.
Args:
root_checkpointable: The root of the object graph to save/restore. This
object and all of its dependencies are saved in the checkpoint. When
restoring, objects are matched and restored starting from this root.
"""
# Allow passing in a weak reference to avoid reference cycles when
# `Checkpointable` objects save themselves.
self._root_checkpointable_ref = root_checkpointable
# The file prefix placeholder is created lazily when graph building (and not
# at all when executing eagerly) to avoid creating ops in the constructor
# (when they may never be necessary).
self._file_prefix_placeholder = None
# Op caching for save
self._object_graph_feed_tensor = None
self._last_save_object_graph = None
self._last_save_saver = None
# Op caching for restore
self._last_restore_object_graph = None
self._last_restore_checkpoint = None
@property
def _root_checkpointable(self):
if isinstance(self._root_checkpointable_ref, weakref.ref):
derefed = self._root_checkpointable_ref()
assert derefed is not None
return derefed
else:
return self._root_checkpointable_ref
def save(self, file_prefix, checkpoint_number=None, session=None):
"""Save a training checkpoint.
The saved checkpoint includes variables created by this object and any
Checkpointable objects it depends on at the time `Saver.save()` is called.
Args:
file_prefix: A prefix to use for the checkpoint filenames
(/path/to/directory/and_a_prefix). Names are generated based on this
prefix and `checkpoint_number`, if provided.
checkpoint_number: An integer variable or Tensor, used to number
checkpoints. Typically this value is saved along with other variables in
training checkpoints, which will happen automatically if it was created
by `root_checkpointable` or one of its dependencies (via
`Checkpointable._add_variable`).
session: The session to evaluate variables in. Ignored when executing
eagerly. If not provided when graph building, the default session is
used.
Returns:
The full path to the checkpoint.
"""
named_variables, graph_proto = _serialize_object_graph(
self._root_checkpointable)
if not context.executing_eagerly():
if session is None:
session = ops.get_default_session()
if self._object_graph_feed_tensor is None:
with ops.device("/cpu:0"):
self._object_graph_feed_tensor = constant_op.constant(
"", dtype=dtypes.string)
object_graph_tensor = self._object_graph_feed_tensor
feed_additions = {object_graph_tensor: graph_proto.SerializeToString()}
else:
session = None
with ops.device("/cpu:0"):
object_graph_tensor = constant_op.constant(
graph_proto.SerializeToString(), dtype=dtypes.string)
feed_additions = None
assert checkpointable_lib.OBJECT_GRAPH_PROTO_KEY not in named_variables
named_variables[checkpointable_lib.OBJECT_GRAPH_PROTO_KEY] = (
_NoRestoreSaveable(
tensor=object_graph_tensor,
name=checkpointable_lib.OBJECT_GRAPH_PROTO_KEY))
if (self._last_save_object_graph != graph_proto
# When executing eagerly, we need to re-create SaveableObjects each time
# save() is called so they pick up new Tensors passed to their
# constructors. That means the Saver needs to be copied with a new
# var_list.
or context.executing_eagerly()):
if self._last_save_object_graph is not None:
self._last_save_saver = _copy_saver_with_new_var_list(
old_saver=self._last_save_saver, new_var_list=named_variables)
else:
self._last_save_saver = saver_lib.Saver(var_list=named_variables)
self._last_save_object_graph = graph_proto
with ops.device("/cpu:0"):
save_path = self._last_save_saver.save(
sess=_SessionWithFeedDictAdditions(
session=session, feed_additions=feed_additions),
save_path=file_prefix,
write_meta_graph=False,
global_step=checkpoint_number)
return save_path
def _global_variable_names(self):
"""Generate a `tf.train.Saver`-style `var_list` using `variable.name`s."""
named_saveables, graph_proto = _serialize_object_graph(
self._root_checkpointable)
saver_names = {}
for object_proto in graph_proto.nodes:
for attribute_proto in object_proto.attributes:
saver_names[attribute_proto.full_name] = named_saveables[
attribute_proto.checkpoint_key]
return saver_names
def restore(self, save_path):
"""Restore a training checkpoint.
Restores `root_checkpointable` and any objects that it tracks
(transitive). Either assigns values immediately if variables to restore have
been created already, or defers restoration until the variables are
created. Dependencies added to the `root_checkpointable` passed to the
constructor after this call will be matched if they have a corresponding
object in the checkpoint.
When building a graph, restorations are added to the graph but not run.
To disallow deferred loading, assert immediately that all checkpointed
variables have been matched to variable objects:
```python
saver = Saver(root)
saver.restore(path).assert_consumed()
```
An exception will be raised unless every object was matched and its
variables already exist.
When graph building, `assert_consumed()` indicates that all of the restore
ops which will be created for this checkpoint have been created. They can be
run via the `run_restore_ops()` function of the status object:
```python
saver.restore(path).assert_consumed().run_restore_ops()
```
If the checkpoint has not been consumed completely, then the list of restore
ops will grow as more objects are added to the dependency graph.
Name-based `tf.train.Saver` checkpoints can be loaded using this
method. There is no deferred loading, and names are used to match
variables. No restore ops are created/run until `run_restore_ops()` or
`initialize_or_restore()` are called on the returned status object, even
when executing eagerly. Re-encode name-based checkpoints using this
object-based `Saver.save` as soon as possible.
Args:
save_path: The path to the checkpoint, as returned by `save` or
`tf.train.latest_checkpoint`. If None (as when there is no latest
checkpoint for `tf.train.latest_checkpoint` to return), returns an
object which may run initializers for objects in the dependency
graph. If the checkpoint was written by the name-based `tf.train.Saver`,
names are used to match variables.
Returns:
A load status object, which can be used to make assertions about the
status of checkpoint restoration and run initialization/restore ops
(of type `CheckpointLoadStatus`, or `InitializationOnlyStatus` if
`save_path` is `None`).
If `save_path` points to a name-based checkpoint, a `NameBasedSaverStatus`
object is returned which runs restore ops from a name-based saver.
"""
if save_path is None:
return InitializationOnlyStatus(self._root_checkpointable, ops.uid())
in_graph_mode = not context.executing_eagerly()
if in_graph_mode:
if self._file_prefix_placeholder is None:
with ops.device("/cpu:0"):
self._file_prefix_placeholder = constant_op.constant("model")
file_prefix_tensor = self._file_prefix_placeholder
file_prefix_feed_dict = {self._file_prefix_placeholder: save_path}
else:
with ops.device("/cpu:0"):
file_prefix_tensor = constant_op.constant(save_path)
file_prefix_feed_dict = None
reader = pywrap_tensorflow.NewCheckpointReader(save_path)
try:
object_graph_string = reader.get_tensor(
checkpointable_lib.OBJECT_GRAPH_PROTO_KEY)
except errors_impl.NotFoundError:
# The object graph proto does not exist in this checkpoint. Try again with
# name-based saving.
return NameBasedSaverStatus(self, save_path)
object_graph_proto = (
checkpointable_object_graph_pb2.CheckpointableObjectGraph())
object_graph_proto.ParseFromString(object_graph_string)
if in_graph_mode and object_graph_proto == self._last_restore_object_graph:
checkpoint = self._last_restore_checkpoint
else:
if in_graph_mode:
dtype_map = None
else:
dtype_map = reader.get_variable_to_dtype_map()
checkpoint = _CheckpointRestoreCoordinator(
object_graph_proto=object_graph_proto,
save_path=file_prefix_tensor,
dtype_map=dtype_map)
if in_graph_mode:
if self._last_restore_object_graph is not None:
raise NotImplementedError(
"Using a single Saver to restore different object graphs is not "
"currently supported when graph building. Use a different Saver "
"for each object graph (restore ops will be duplicated), or "
"file a feature request if this limitation bothers you.")
self._last_restore_checkpoint = checkpoint
self._last_restore_object_graph = object_graph_proto
checkpointable_lib._CheckpointPosition( # pylint: disable=protected-access
checkpoint=checkpoint, proto_id=0).restore(self._root_checkpointable)
load_status = CheckpointLoadStatus(
checkpoint,
root_checkpointable=self._root_checkpointable,
feed_dict=file_prefix_feed_dict)
return load_status
@tf_export("train.Checkpoint")
class Checkpoint(checkpointable_lib.Checkpointable):
"""Groups checkpointable objects, saving and restoring them.
`Checkpoint`'s constructor accepts keyword arguments whose values are types
that contain checkpointable state, such as `tf.train.Optimizer`
implementations, `tf.Variable`, `tf.keras.Layer` implementations, or
`tf.keras.Model` implementations. It saves these values with a checkpoint, and
maintains a `save_counter` for numbering checkpoints.
Example usage when graph building:
```python
import tensorflow as tf
import os
checkpoint_directory = "/tmp/training_checkpoints"
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)
status = checkpoint.restore(tf.train.latest_checkpoint(checkpoint_directory))
train_op = optimizer.minimize( ... )
status.assert_consumed() # Optional sanity checks.
with tf.Session() as session:
# Use the Session to restore variables, or initialize them if
# tf.train.latest_checkpoint returned None.
status.initialize_or_restore(session)
for _ in range(num_training_steps):
session.run(train_op)
checkpoint.save(file_prefix=checkpoint_prefix)
```
Example usage with eager execution enabled:
```python
import tensorflow as tf
import os
tf.enable_eager_execution()
checkpoint_directory = "/tmp/training_checkpoints"
checkpoint_prefix = os.path.join(checkpoint_directory, "ckpt")
checkpoint = tf.train.Checkpoint(optimizer=optimizer, model=model)
status = checkpoint.restore(tf.train.latest_checkpoint(checkpoint_directory))
for _ in range(num_training_steps):
optimizer.minimize( ... ) # Variables will be restored on creation.
status.assert_consumed() # Optional sanity checks.
checkpoint.save(file_prefix=checkpoint_prefix)
```
`Checkpoint.save` and `Checkpoint.restore` write and read object-based
checkpoints, in contrast to `tf.train.Saver` which writes and reads
`variable.name` based checkpoints. Object-based checkpointing saves a graph of
dependencies between Python objects (`Layer`s, `Optimizer`s, `Variable`s,
etc.) with named edges, and this graph is used to match variables when
restoring a checkpoint. It can be more robust to changes in the Python
program, and helps to support restore-on-create for variables when executing
eagerly. Prefer `tf.train.Checkpoint` over `tf.train.Saver` for new code.
`Checkpoint` objects have dependencies on the objects passed as keyword
arguments to their constructors, and each dependency is given a name that is
identical to the name of the keyword argument for which it was created.
TensorFlow classes like `Layer`s and `Optimizer`s will automatically add
dependencies on their variables (e.g. "kernel" and "bias" for
`tf.keras.layers.Dense`). Inheriting from `tf.keras.Model` makes managing
dependencies easy in user-defined classes, since `Model` hooks into attribute
assignment. For example:
```python
class Regress(tf.keras.Model):
def __init__(self):
super(Regress, self).__init__()
self.input_transform = tf.keras.layers.Dense(10)
# ...
def call(self, inputs):
x = self.input_transform(inputs)
# ...
```
This `Model` has a dependency named "input_transform" on its `Dense` layer,
which in turn depends on its variables. As a result, saving an instance of
`Regress` using `tf.train.Checkpoint` will also save all the variables created
by the `Dense` layer.
Attributes:
save_counter: Incremented when `save()` is called. Used to number
checkpoints.
"""
def __init__(self, **kwargs):
"""Group objects into a training checkpoint.
Args:
**kwargs: Keyword arguments are set as attributes of this object, and are
saved with the checkpoint. Values must be checkpointable objects.
Raises:
ValueError: If objects in `kwargs` are not checkpointable.
"""
super(Checkpoint, self).__init__()
for k, v in sorted(kwargs.items(), key=lambda item: item[0]):
if not isinstance(v, checkpointable_lib.CheckpointableBase):
raise ValueError(
("`Checkpoint` was expecting a checkpointable object (an object "
"derived from `CheckpointableBase`), got %s. If you believe this "
"object should be checkpointable (i.e. it is part of the "
"TensorFlow Python API and manages state), please open an issue.")
% (v,))
setattr(self, k, v)
self._save_counter = None # Created lazily for restore-on-create.
self._saver = CheckpointableSaver(weakref.ref(self))
def _maybe_create_save_counter(self):
"""Create a save counter if it does not yet exist."""
if self._save_counter is None:
# Initialized to 0 and incremented before saving.
with ops.device("/cpu:0"):
self._save_counter = add_variable(
self, name="save_counter", initializer=0, dtype=dtypes.int64)
@property
def save_counter(self):
"""An integer variable which starts at zero and is incremented on save.
Used to number checkpoints.
Returns:
The save counter variable.
"""
self._maybe_create_save_counter()
return self._save_counter
def save(self, file_prefix, session=None):
"""Save a training checkpoint.
The saved checkpoint includes variables created by this object and any
checkpointable objects it depends on at the time `Checkpoint.save()` is
called.
Args:
file_prefix: A prefix to use for the checkpoint filenames
(/path/to/directory/and_a_prefix). Names are generated based on this
prefix and `Checkpoint.save_counter`.
session: The session to evaluate variables in. Ignored when executing
eagerly. If not provided when graph building, the default session is
used.
Returns:
The full path to the checkpoint.
"""
in_graph_mode = not context.executing_eagerly()
if in_graph_mode:
if session is None:
session = ops.get_default_session()
if self._save_counter is None:
# When graph building, if this is a new save counter variable then it
# needs to be initialized before assign_add. This is only an issue if
# restore() has not been called first.
session.run(self.save_counter.initializer)
with ops.colocate_with(self.save_counter):
assign_op = self.save_counter.assign_add(1)
if in_graph_mode:
session.run(assign_op)
return self._saver.save(
file_prefix=file_prefix,
checkpoint_number=self.save_counter,
session=session)
def restore(self, save_path):
"""Restore a training checkpoint.
Restores this `Checkpoint` and any objects it depends on.
When executing eagerly, either assigns values immediately if variables to
restore have been created already, or defers restoration until the variables
are created. Dependencies added after this call will be matched if they have
a corresponding object in the checkpoint (the restore request will queue in
any checkpointable object waiting for the expected dependency to be added).
When graph building, restoration ops are added to the graph but not run
immediately.
To ensure that loading is complete and no more assignments will take place,
use the `assert_consumed()` method of the status object returned by
`restore`:
```python
checkpoint = tf.train.Checkpoint( ... )
checkpoint.restore(path).assert_consumed()
```
An exception will be raised if any Python objects in the dependency graph
were not found in the checkpoint, or if any checkpointed values do not have
a matching Python object.
When graph building, `assert_consumed()` indicates that all of the restore
ops that will be created for this checkpoint have been created. They can be
run via the `run_restore_ops()` method of the status object:
```python
checkpoint.restore(path).assert_consumed().run_restore_ops()
```
If the checkpoint has not been consumed completely, then the list of restore
ops will grow as more objects are added to the dependency graph.
Name-based `tf.train.Saver` checkpoints can be loaded using this
method. There is no deferred loading, and names are used to match
variables. No restore ops are created/run until `run_restore_ops()` or
`initialize_or_restore()` are called on the returned status object, even
when executing eagerly. Re-encode name-based checkpoints using
`tf.train.Checkpoint.save` as soon as possible.
Args:
save_path: The path to the checkpoint, as returned by `save` or
`tf.train.latest_checkpoint`. If None (as when there is no latest
checkpoint for `tf.train.latest_checkpoint` to return), returns an
object which may run initializers for objects in the dependency
graph. If the checkpoint was written by the name-based `tf.train.Saver`,
names are used to match variables.
Returns:
A load status object, which can be used to make assertions about the
status of a checkpoint restoration and run initialization/restore ops.
The returned status object has the following methods:
- `assert_consumed()`:
Raises an exception if any variables/objects are unmatched: either
checkpointed values which don't have a matching Python object or
Python objects in the dependency graph with no values in the
checkpoint. This method returns the status object, and so may be
chained with `initialize_or_restore` or `run_restore_ops`.
- `initialize_or_restore(session=None)`:
When graph building, runs variable initializers if `save_path` is
`None`, but otherwise runs restore operations. If no `session` is
explicitly specified, the default session is used. No effect for
object-based checkpoints when executing eagerly (variables are
initialized or restored eagerly).
- `run_restore_ops(session=None)`:
When graph building, runs restore operations. If no `session` is
explicitly specified, the default session is used. No effect for
object-based checkpoints when executing eagerly (restore operations
are run eagerly). May only be called when `save_path` is not `None`.
"""
status = self._saver.restore(save_path=save_path)
# Create the save counter now so it gets initialized with other variables
# when graph building. Creating it earlier would lead to double
# initialization when executing eagerly.
self._maybe_create_save_counter()
return status | Args:
session: The session to run init/restore ops in. If `None`, uses the |
websockets.rs | use crate::errors::*;
use crate::config::*;
use crate::futures::ws_model::*;
use url::Url;
use serde::{Deserialize, Serialize};
use std::sync::atomic::{AtomicBool, Ordering};
use tungstenite::{connect, Message};
use tungstenite::protocol::WebSocket;
use tungstenite::client::AutoStream;
use tungstenite::handshake::client::Response;
#[allow(clippy::all)]
enum FuturesWebsocketAPI {
Default,
MultiStream,
Custom(String),
}
pub enum FuturesMarket {
USDM,
COINM,
Vanilla,
}
impl FuturesWebsocketAPI {
fn params(self, market: FuturesMarket, subscription: &str) -> String {
let baseurl = match market {
FuturesMarket::USDM => "wss://fstream.binance.com",
FuturesMarket::COINM => "wss://dstream.binance.com",
FuturesMarket::Vanilla => "wss://vstream.binance.com",
};
match self {
FuturesWebsocketAPI::Default => {
format!("{}/ws/{}", baseurl, subscription)
}
FuturesWebsocketAPI::MultiStream => {
format!("{}/stream?streams={}", baseurl, subscription)
}
FuturesWebsocketAPI::Custom(url) => url,
}
}
}
#[allow(clippy::large_enum_variant)]
#[derive(Debug, Serialize, Deserialize, Clone)]
pub enum FuturesWebsocketEvent {
AccountUpdate(AccountUpdateEvent),
OrderTrade(OrderTradeEvent),
AggrTrades(AggrTradesEvent),
Trade(TradeEvent),
OrderBook(OrderBook),
DayTicker(DayTickerEvent),
MiniTicker(MiniTickerEvent),
MiniTickerAll(Vec<MiniTickerEvent>),
IndexPrice(IndexPriceEvent),
MarkPrice(MarkPriceEvent),
MarkPriceAll(Vec<MarkPriceEvent>),
DayTickerAll(Vec<DayTickerEvent>),
Kline(KlineEvent),
ContinuousKline(ContinuousKlineEvent),
IndexKline(IndexKlineEvent),
Liquidation(LiquidationEvent),
DepthOrderBook(DepthOrderBookEvent),
BookTicker(BookTickerEvent),
}
pub struct FuturesWebSockets<'a> {
pub socket: Option<(WebSocket<AutoStream>, Response)>,
handler: Box<dyn FnMut(FuturesWebsocketEvent) -> Result<()> + 'a>,
}
#[derive(Serialize, Deserialize, Debug)]
#[serde(untagged)]
enum FuturesEvents {
Vec(Vec<DayTickerEvent>),
DayTickerEvent(DayTickerEvent),
BookTickerEvent(BookTickerEvent),
MiniTickerEvent(MiniTickerEvent),
VecMiniTickerEvent(Vec<MiniTickerEvent>),
AccountUpdateEvent(AccountUpdateEvent),
OrderTradeEvent(OrderTradeEvent),
AggrTradesEvent(AggrTradesEvent),
IndexPriceEvent(IndexPriceEvent),
MarkPriceEvent(MarkPriceEvent),
VecMarkPriceEvent(Vec<MarkPriceEvent>),
TradeEvent(TradeEvent),
KlineEvent(KlineEvent),
ContinuousKlineEvent(ContinuousKlineEvent),
IndexKlineEvent(IndexKlineEvent),
LiquidationEvent(LiquidationEvent),
OrderBook(OrderBook),
DepthOrderBookEvent(DepthOrderBookEvent),
}
impl<'a> FuturesWebSockets<'a> {
pub fn new<Callback>(handler: Callback) -> FuturesWebSockets<'a>
where
Callback: FnMut(FuturesWebsocketEvent) -> Result<()> + 'a,
{
FuturesWebSockets {
socket: None,
handler: Box::new(handler),
}
}
pub fn connect(&mut self, market: FuturesMarket, subscription: &'a str) -> Result<()> {
self.connect_wss(FuturesWebsocketAPI::Default.params(market, subscription))
}
pub fn connect_with_config(
&mut self, market: FuturesMarket, subscription: &'a str, config: &'a Config,
) -> Result<()> {
self.connect_wss(
FuturesWebsocketAPI::Custom(config.ws_endpoint.clone()).params(market, subscription),
)
}
pub fn connect_multiple_streams(
&mut self, market: FuturesMarket, endpoints: &[String],
) -> Result<()> {
self.connect_wss(FuturesWebsocketAPI::MultiStream.params(market, &endpoints.join("/")))
}
fn connect_wss(&mut self, wss: String) -> Result<()> {
let url = Url::parse(&wss)?;
match connect(url) {
Ok(answer) => {
self.socket = Some(answer);
Ok(())
}
Err(e) => Err(Error::Msg(format!("Error during handshake {}", e))),
}
}
pub fn disconnect(&mut self) -> Result<()> {
if let Some(ref mut socket) = self.socket {
socket.0.close(None)?; |
pub fn test_handle_msg(&mut self, msg: &str) -> Result<()> {
self.handle_msg(msg)
}
fn handle_msg(&mut self, msg: &str) -> Result<()> {
let value: serde_json::Value = serde_json::from_str(msg)?;
if let Some(data) = value.get("data") {
self.handle_msg(&data.to_string())?;
return Ok(());
}
if let Ok(events) = serde_json::from_value::<FuturesEvents>(value) {
let action = match events {
FuturesEvents::Vec(v) => FuturesWebsocketEvent::DayTickerAll(v),
FuturesEvents::DayTickerEvent(v) => FuturesWebsocketEvent::DayTicker(v),
FuturesEvents::BookTickerEvent(v) => FuturesWebsocketEvent::BookTicker(v),
FuturesEvents::MiniTickerEvent(v) => FuturesWebsocketEvent::MiniTicker(v),
FuturesEvents::VecMiniTickerEvent(v) => FuturesWebsocketEvent::MiniTickerAll(v),
FuturesEvents::AccountUpdateEvent(v) => FuturesWebsocketEvent::AccountUpdate(v),
FuturesEvents::OrderTradeEvent(v) => FuturesWebsocketEvent::OrderTrade(v),
FuturesEvents::IndexPriceEvent(v) => FuturesWebsocketEvent::IndexPrice(v),
FuturesEvents::MarkPriceEvent(v) => FuturesWebsocketEvent::MarkPrice(v),
FuturesEvents::VecMarkPriceEvent(v) => FuturesWebsocketEvent::MarkPriceAll(v),
FuturesEvents::TradeEvent(v) => FuturesWebsocketEvent::Trade(v),
FuturesEvents::ContinuousKlineEvent(v) => FuturesWebsocketEvent::ContinuousKline(v),
FuturesEvents::IndexKlineEvent(v) => FuturesWebsocketEvent::IndexKline(v),
FuturesEvents::LiquidationEvent(v) => FuturesWebsocketEvent::Liquidation(v),
FuturesEvents::KlineEvent(v) => FuturesWebsocketEvent::Kline(v),
FuturesEvents::OrderBook(v) => FuturesWebsocketEvent::OrderBook(v),
FuturesEvents::DepthOrderBookEvent(v) => FuturesWebsocketEvent::DepthOrderBook(v),
FuturesEvents::AggrTradesEvent(v) => FuturesWebsocketEvent::AggrTrades(v),
};
(self.handler)(action)?;
}
Ok(())
}
pub fn event_loop(&mut self, running: &AtomicBool) -> Result<()> {
while running.load(Ordering::Relaxed) {
if let Some(ref mut socket) = self.socket {
let message = socket.0.read_message()?;
match message {
Message::Text(msg) => {
if let Err(e) = self.handle_msg(&msg) {
return Err(Error::Msg(format!("Error on handling stream message: {}", e)));
}
}
Message::Ping(_) | Message::Pong(_) | Message::Binary(_) => (),
Message::Close(e) => {
return Err(Error::Msg(format!("Disconnected {:?}", e)));
}
}
}
}
Ok(())
}
} | return Ok(());
}
Err(Error::Msg("Not able to close the connection".to_string()))
} |
92.ts | import { Card } from '../../../interfaces'
import Set from '../Rebel Clash'
const card: Card = {
name: {
en: "Dragapult V",
fr: "Lanssorien V",
es: "Dragapult V",
it: "Dragapult V",
pt: "Dragapult V",
de: "Katapuldra V"
},
illustrator: "aky CG Works",
rarity: "Ultra Rare",
category: "Pokemon",
set: Set,
attacks: [
{
cost: [
"Psychic",
],
name: {
en: "Bite",
fr: "Morsure",
es: "Mordisco",
it: "Morso",
pt: "Mordida",
de: "Biss"
},
damage: 30,
},
{
cost: [
"Psychic",
"Psychic", | en: "Jet Assault",
fr: "Assaut Propulsé",
es: "Asalto Propulsión",
it: "Assalto Jet",
pt: "Ofensiva a Jato",
de: "Jet-Angriff"
},
effect: {
en: "If this Pokémon moved from your Bench to the Active Spot this turn, this attack does 80 more damage.",
fr: "Si ce Pokémon a été déplacé de votre Banc vers le Poste Actif pendant ce tour, cette attaque inflige 80 dégâts supplémentaires.",
es: "Si este Pokémon se ha movido de tu Banca al Puesto Activo en este turno, este ataque hace 80 puntos de daño más.",
it: "Se questo Pokémon si è spostato dalla tua panchina in posizione attiva nel turno in corso, questo attacco infligge 80 danni in più.",
pt: "Se este Pokémon foi movido do seu Banco para o Campo Ativo durante este turno, este ataque causará 80 pontos de dano a mais.",
de: "Wenn dieses Pokémon während dieses Zuges von deiner Bank in die Aktive Position gewechselt ist, fügt diese Attacke 80 Schadenspunkte mehr zu."
},
damage: "60+",
},
],
weaknesses: [
{
type: "Darkness",
value: "×2"
},
],
resistances: [
{
type: "Fighting",
value: "-30"
},
],
retreat: 1,
hp: 210,
types: ["Psychic"],
regulationMark: "D",
variants: {
normal: false,
reverse: false,
holo: true,
firstEdition: false
},
stage: "Basic",
suffix: "V"
}
export default card | ],
name: { |
manifest.py | import zipfile, os, sys, aiohttp, json, requests
from modules.manifest_reader import ManifestReader
class Manifest:
def __init__(self, directory, headers=None):
|
def _decode_hash(self, hash, definition, language):
if self.manifests.get(language.lower(), None) == None:
print("Language Not Found")
elif self.manifests.get(language.lower(), None) == "":
self._update_manifest(language)
if definition == "DestinyHistoricalStatsDefinition":
hash = "\""+hash+"\""
identifier = key
hash = self._bumpAlong(hash)
identifier = "id"
with ManifestReader(self.manifests.get(language)) as _handler:
_result = _handler.query(hash, definition, identifier)
if len(_result) > 0:
return json.loads(_result[0][0])
return None
def _update_manifest(self, language):
if self.manifests.get(language.lower(), None) == None:
print("Language Not Found")
manifestJson = requests.get("https://www.bungie.net/Platform/Destiny2/Manifest/", headers=self.headers).json()
manifestUrl = 'https://www.bungie.net' + manifestJson['Response']['mobileWorldContentPaths'][language]
manifestFileName = "./{0}/".format(self.directory) + manifestUrl.split('/')[-1]
if not os.path.isfile(manifestFileName):
downloadedFileName = self._download_manifest(manifestUrl)
if os.path.isfile("./{0}/manifest".format(self.directory)):
zip = zipfile.ZipFile("./{0}/manifest".format(self.directory), "r")
zip.extractall("./{0}/".format(self.directory))
zip.close()
self.manifests[language] = manifestFileName
def _download_manifest(self, request):
_data = requests.get(request, headers=self.headers)
downloadTarget = "./{0}/manifest".format(self.directory)
with open(downloadTarget, "wb") as out:
out.write(_data.content)
return downloadTarget
def _bumpAlong(self, val):
val = int(val)
if (val & (1 << (32 - 1))) != 0:
val = val - (1 << 32)
return val
| self.headers = headers
self.directory = directory
self.manifests = {
'en': '',
'fr': '',
'es': '',
'de': '',
'it': '',
'ja': '',
'pt-br': '',
'es-mx': '',
'ru': '',
'pl': '',
'ko': '',
'zh-cht': '',
'zh-chs': ''
} |
body.rs | use std::{
fmt,
pin::Pin,
task::{Context, Poll},
};
use bytes::{Buf, Bytes, BytesMut};
use futures::{future, ready, Stream};
use http::HeaderMap;
use http_body::Body as HttpBody;
use quinn_proto::StreamId;
use crate::{
connection::ConnectionRef,
frame::FrameStream,
proto::{
frame::{HeadersFrame, HttpFrame},
ErrorCode,
},
streams::Reset,
Error,
};
/// Simple body representation
///
/// It is intended to be constructed from common types such as `&str`, and be passed
/// to [`http::Request<B>`] or [`http::Response<B>`] as the B parameter. It's is intended
/// as a convenient way to send simple and small bodies.
///
/// [`http::Request<B>`]: https://docs.rs/http/*/http/request/index.html
/// [`http::Response<B>`]: https://docs.rs/http/*/http/response/index.html
pub struct Body(pub(crate) Option<Bytes>);
impl From<()> for Body {
fn from(_: ()) -> Self |
}
impl From<Bytes> for Body {
fn from(buf: Bytes) -> Self {
Body(Some(buf))
}
}
impl From<&str> for Body {
fn from(buf: &str) -> Self {
Body(Some(Bytes::copy_from_slice(buf.as_ref())))
}
}
impl HttpBody for Body {
type Data = Bytes;
type Error = Error;
fn poll_data(
mut self: Pin<&mut Self>,
_: &mut Context,
) -> Poll<Option<Result<Self::Data, Self::Error>>> {
match self.0.take() {
Some(b) => Poll::Ready(Some(Ok(b))),
None => Poll::Ready(None),
}
}
fn poll_trailers(
self: Pin<&mut Self>,
_: &mut Context,
) -> Poll<Result<Option<HeaderMap>, Self::Error>> {
Poll::Ready(Ok(None))
}
}
/// HTTP/3 body reception stream
///
/// Crate's [`http_body::Body`] implementation. It enables you to read the body and its trailers.
///
/// This is emited as part of a [`Request<RecvBody>`] on the server side and [`Response<RecvBody>`]
/// on client's one.
///
/// Note that body shall be read entirely before polling for trailers.
pub struct RecvBody {
conn: ConnectionRef,
stream_id: StreamId,
recv: FrameStream,
trailers: Option<HeadersFrame>,
}
impl RecvBody {
pub(crate) fn new(conn: ConnectionRef, stream_id: StreamId, recv: FrameStream) -> Self {
Self {
conn,
stream_id,
recv,
trailers: None,
}
}
/// Convenience method to read the entire body in one call
pub async fn read_to_end(&mut self) -> Result<Bytes, Error> {
let mut body = BytesMut::with_capacity(10_240);
let mut me = self;
let res: Result<(), Error> = future::poll_fn(|cx| {
while let Some(d) = ready!(Pin::new(&mut me).poll_data(cx)) {
body.extend(d?.bytes());
}
Poll::Ready(Ok(()))
})
.await;
res?;
Ok(body.freeze())
}
/// Read the body chunk by chunk
///
/// This will return the next available chunk if any.
pub async fn data(&mut self) -> Option<Result<Bytes, Error>> {
let mut me = self;
future::poll_fn(|cx| Pin::new(&mut me).poll_data(cx)).await
}
/// Receive and decode trailers
///
/// Note: shall not be called before consuming the whole body.
pub async fn trailers(&mut self) -> Result<Option<HeaderMap>, Error> {
let mut me = self;
Ok(future::poll_fn(|cx| Pin::new(&mut me).poll_trailers(cx)).await?)
}
/// Cancel a request or response
///
/// The peer will receive a request error with `REQUEST_CANCELLED` code.
pub fn cancel(&mut self) {
self.recv.reset(ErrorCode::REQUEST_CANCELLED);
}
pub(super) fn into_inner(self) -> FrameStream {
self.recv
}
}
impl HttpBody for RecvBody {
type Data = bytes::Bytes;
type Error = Error;
fn poll_data(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Self::Data, Self::Error>>> {
loop {
return match ready!(Pin::new(&mut self.recv).poll_next(cx)) {
None => Poll::Ready(None),
Some(Ok(HttpFrame::Reserved)) => continue,
Some(Ok(HttpFrame::Data(d))) => Poll::Ready(Some(Ok(d.payload))),
Some(Ok(HttpFrame::Headers(t))) => {
self.trailers = Some(t);
Poll::Ready(None)
}
Some(Err(e)) => {
self.recv.reset(e.code());
Poll::Ready(Some(Err(e.into())))
}
Some(Ok(f)) => {
self.recv.reset(ErrorCode::FRAME_UNEXPECTED);
Poll::Ready(Some(Err(Error::Peer(format!(
"Invalid frame type in body: {:?}",
f
)))))
}
};
}
}
fn poll_trailers(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<Option<http::HeaderMap>, Self::Error>> {
if self.trailers.is_none() {
return Poll::Ready(Ok(None));
}
let header = {
let mut conn = self.conn.h3.lock().unwrap();
ready!(conn.poll_decode(cx, self.stream_id, self.trailers.as_ref().unwrap()))?
};
self.trailers = None;
Poll::Ready(Ok(Some(header.into_fields())))
}
}
impl fmt::Debug for RecvBody {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("RecvBody")
.field("stream", &self.stream_id)
.finish()
}
}
| {
Body(None)
} |
fn.code.go | package cmd
import (
"fmt"
"os"
"path"
"path/filepath"
"strings"
"github.com/ezbuy/redis-orm/fs"
"github.com/ezbuy/redis-orm/parser"
"github.com/spf13/viper"
)
func GenerateCode() | {
packageName := viper.GetString("package")
inputDir, err := filepath.Abs(viper.GetString("code_input"))
if err != nil {
fmt.Println(err)
os.Exit(1)
}
outputDir, err := filepath.Abs(viper.GetString("output"))
if err != nil {
fmt.Println(err)
os.Exit(1)
}
if packageName == "" {
_, packageName = path.Split(outputDir)
}
yamls, err := fs.GetDirectoryFilesBySuffix(inputDir, ".yaml")
if err != nil {
fmt.Println(err)
os.Exit(1)
}
model := viper.GetString("code_model")
metaObjs := map[string]*parser.MetaObject{}
confTpls := map[string]bool{
"orm": true,
}
i := 0
for _, yaml := range yamls {
objs, err := parser.ReadYaml(packageName, yaml)
if err != nil {
fmt.Println("failed: ", err)
os.Exit(1)
}
i = i + 1
if model != "" {
for _, obj := range objs {
if obj.Tag == "" {
obj.Tag = fmt.Sprint(i)
}
if strings.ToLower(obj.Name) == strings.ToLower(model) {
metaObjs[obj.Name] = obj
for _, db := range obj.Dbs {
confTpls[db] = true
}
goto GeneratePoint
}
}
} else {
for _, obj := range objs {
obj.Tag = fmt.Sprint(i)
metaObjs[obj.Name] = obj
for _, db := range obj.Dbs {
confTpls[db] = true
}
}
}
}
GeneratePoint:
for _, metaObj := range metaObjs {
err = fs.ExecuteMetaObjectCodeTemplate(outputDir, metaObj)
if err != nil {
panic(err.Error())
}
}
for conf := range confTpls {
err = fs.ExecuteConfigTemplate(outputDir, conf, packageName)
if err != nil {
panic(err.Error())
}
}
} |
|
test_integration.py | import sys
import numpy as np
import pennylane as qml
import pytest
import qiskit
from pennylane_qiskit import AerDevice, BasicAerDevice
from conftest import state_backends
pldevices = [("qiskit.aer", qiskit.Aer), ("qiskit.basicaer", qiskit.BasicAer)]
class TestDeviceIntegration:
"""Test the devices work correctly from the PennyLane frontend."""
@pytest.mark.parametrize("d", pldevices)
def test_load_device(self, d, backend):
"""Test that the qiskit device loads correctly"""
dev = qml.device(d[0], wires=2, backend=backend, shots=1024)
assert dev.num_wires == 2
assert dev.shots == 1024
assert dev.short_name == d[0]
assert dev.provider == d[1]
def test_incorrect_backend(self):
"""Test that exception is raised if name is incorrect"""
with pytest.raises(ValueError, match="Backend 'none' does not exist"):
qml.device("qiskit.aer", wires=2, backend="none")
def test_incorrect_backend_wires(self):
"""Test that exception is raised if number of wires is too large"""
with pytest.raises(ValueError, match=r"Backend 'statevector\_simulator' supports maximum"):
qml.device("qiskit.aer", wires=100, backend="statevector_simulator")
def test_args(self):
"""Test that the device requires correct arguments"""
with pytest.raises(TypeError, match="missing 1 required positional argument"):
qml.device("qiskit.aer")
with pytest.raises(qml.DeviceError, match="specified number of shots needs to be at least 1"):
qml.device("qiskit.aer", backend="qasm_simulator", wires=1, shots=0)
@pytest.mark.parametrize("d", pldevices)
@pytest.mark.parametrize("analytic", [True, False])
@pytest.mark.parametrize("shots", [8192])
def test_one_qubit_circuit(self, shots, analytic, d, backend, tol):
|
@pytest.mark.parametrize("d", pldevices)
@pytest.mark.parametrize("analytic", [False])
@pytest.mark.parametrize("shots", [8192])
def test_one_qubit_circuit(self, shots, analytic, d, backend, tol):
"""Integration test for the Basisstate and Rot operations for when analytic
is False"""
dev = qml.device(d[0], wires=1, backend=backend, shots=shots, analytic=analytic)
a = 0
b = 0
c = np.pi
expected = 1
@qml.qnode(dev)
def circuit(x, y, z):
"""Reference QNode"""
qml.BasisState(np.array([0]), wires=0)
qml.Rot(x, y, z, wires=0)
return qml.expval(qml.PauliZ(0))
assert np.allclose(circuit(a, b, c), expected, **tol)
def test_gradient_for_tensor_product(self):
"""Test that the gradient of a circuit containing a tensor product is
computed without any errors."""
n_qubits = 2
depth = 2
def ansatz(weights):
weights = weights.reshape(depth, n_qubits)
qml.RX(weights[0][0], wires=[0])
qml.RZ(weights[0][1], wires=[0])
qml.RX(weights[1][0], wires=[0])
qml.RZ(weights[1][1], wires=[0])
return qml.expval(qml.PauliZ(0) @ qml.PauliZ(1))
dev_qsk = qml.device(
"qiskit.aer",
wires=n_qubits,
shots=1000,
backend="qasm_simulator",
)
weights = np.random.random((depth, n_qubits)).flatten()
# Want to get expectation value and gradient
exp_sampled = qml.QNode(ansatz, dev_qsk, diff_method="parameter-shift")
grad_shift = qml.grad(exp_sampled, argnum=0)
exp_sampled(weights)
grad_shift(weights)
class TestKeywordArguments:
"""Test keyword argument logic is correct"""
@pytest.mark.parametrize("d", pldevices)
def test_compile_backend(self, d):
"""Test that the compile backend argument is properly
extracted"""
dev = qml.device(d[0], wires=2, compile_backend="test value")
assert dev.compile_backend == "test value"
def test_noise_model(self):
"""Test that the noise model argument is properly
extracted if the backend supports it"""
dev = qml.device("qiskit.aer", wires=2, noise_model="test value")
assert dev.noise_model == "test value"
def test_invalid_noise_model(self):
"""Test that the noise model argument causes an exception to be raised
if the backend does not support it"""
with pytest.raises(ValueError, match="does not support noisy simulations"):
dev = qml.device("qiskit.basicaer", wires=2, noise_model="test value")
def test_overflow_kwargs(self):
"""Test all overflow kwargs are extracted for the AerDevice"""
dev = qml.device('qiskit.aer', wires=2, k1="v1", k2="v2")
assert dev.run_args["k1"] == "v1"
assert dev.run_args["k2"] == "v2"
class TestLoadIntegration:
"""Integration tests for the PennyLane load function. This test ensures that the PennyLane-Qiskit
specific load functions integrate properly with the PennyLane-Qiskit plugin."""
hadamard_qasm = 'OPENQASM 2.0;' \
'include "qelib1.inc";' \
'qreg q[1];' \
'h q[0];'
def test_load_qiskit_circuit(self):
"""Test that the default load function works correctly."""
theta = qiskit.circuit.Parameter('θ')
qc = qiskit.QuantumCircuit(2)
qc.rx(theta, 0)
my_template = qml.load(qc, format='qiskit')
dev = qml.device('default.qubit', wires=2)
angles = np.array([0.53896774, 0.79503606, 0.27826503, 0.])
@qml.qnode(dev)
def loaded_quantum_circuit(angle):
my_template({theta: angle})
return qml.expval(qml.PauliZ(0))
@qml.qnode(dev)
def quantum_circuit(angle):
qml.RX(angle, wires=[0])
return qml.expval(qml.PauliZ(0))
for x in angles:
assert np.allclose(loaded_quantum_circuit(x), quantum_circuit(x))
def test_load_from_qasm_string(self):
"""Test that quantum circuits can be loaded from a qasm string."""
dev = qml.device('default.qubit', wires=2)
@qml.qnode(dev)
def loaded_quantum_circuit():
qml.from_qasm(TestLoadIntegration.hadamard_qasm)(wires=[0])
return qml.expval(qml.PauliZ(0))
@qml.qnode(dev)
def quantum_circuit():
qml.Hadamard(wires=[0])
return qml.expval(qml.PauliZ(0))
assert np.allclose(loaded_quantum_circuit(), quantum_circuit())
@pytest.mark.skipif(sys.version_info < (3, 6), reason="tmpdir fixture requires Python >=3.6")
def test_load_qasm_from_file(self, tmpdir):
"""Test that quantum circuits can be loaded from a qasm file."""
apply_hadamard = tmpdir.join("hadamard.qasm")
with open(apply_hadamard, "w") as f:
f.write(TestLoadIntegration.hadamard_qasm)
hadamard = qml.from_qasm_file(apply_hadamard)
dev = qml.device('default.qubit', wires=2)
@qml.qnode(dev)
def loaded_quantum_circuit():
hadamard(wires=[0])
return qml.expval(qml.PauliZ(0))
@qml.qnode(dev)
def quantum_circuit():
qml.Hadamard(wires=[0])
return qml.expval(qml.PauliZ(0))
assert np.allclose(loaded_quantum_circuit(), quantum_circuit())
class TestPLOperations:
"""Integration tests for checking certain PennyLane specific operations."""
@pytest.mark.parametrize("shots", [1000])
@pytest.mark.parametrize("analytic", [True, False])
def test_rotation(self, init_state, state_vector_device, shots, analytic, tol):
"""Test that the QubitStateVector and Rot operations are decomposed using a
Qiskit device with statevector backend"""
dev = state_vector_device(1)
if dev.backend_name == "unitary_simulator":
pytest.skip("Test only runs for backends that are not the unitary simulator.")
state = init_state(1)
a = 0.542
b = 1.3432
c = -0.654
I = np.eye(2)
Y = np.array([[0, -1j], [1j, 0]]) #: Pauli-Y matrix
Z = np.array([[1, 0], [0, -1]]) #: Pauli-Z matrix
def ry(theta):
return np.cos(theta / 2) * I + 1j * np.sin(-theta / 2) * Y
def rz(theta):
return np.cos(theta / 2) * I + 1j * np.sin(-theta / 2) * Z
@qml.qnode(dev)
def qubitstatevector_and_rot():
qml.QubitStateVector(state, wires=[0])
qml.Rot(a, b, c, wires=[0])
return qml.expval(qml.Identity(0))
qubitstatevector_and_rot()
assert np.allclose(np.abs(dev.state) ** 2, np.abs(rz(c) @ ry(b) @ rz(a) @ state) ** 2, **tol)
@pytest.mark.parametrize("shots", [1000])
@pytest.mark.parametrize("analytic", [True, False])
def test_basisstate(self, init_state, state_vector_device, shots, analytic, tol):
"""Test that the Basisstate is decomposed using a Qiskit device with
statevector backend"""
dev = state_vector_device(2)
state = np.array([1, 0])
@qml.qnode(dev)
def basisstate():
qml.BasisState(state, wires=[0, 1])
return qml.expval(qml.Identity(0))
basisstate()
expected_state = np.zeros(2**dev.num_wires)
expected_state[2] = 1
assert np.allclose(np.abs(dev.state) ** 2, np.abs(expected_state) ** 2, **tol)
@pytest.mark.parametrize("shots", [1000])
@pytest.mark.parametrize("analytic", [True, False])
def test_basisstate_init_all_zero_states(self, init_state, state_vector_device, shots, analytic, tol):
"""Test that the Basisstate that receives the all zero state is decomposed using
a Qiskit device with statevector backend"""
dev = state_vector_device(4)
state = np.array([0, 0, 0, 0])
@qml.qnode(dev)
def basisstate():
qml.BasisState(state, wires=[0, 1, 2, 3])
return qml.expval(qml.Identity(0))
basisstate()
expected_state = np.zeros(2**dev.num_wires)
expected_state[0] = 1
assert np.allclose(np.abs(dev.state) ** 2, np.abs(expected_state) ** 2, **tol)
class TestInverses:
"""Integration tests checking that the inverse of the operations are applied."""
def test_inverse_of_operation(self):
"""Test that the inverse of operations works as expected
by comparing a simple circuit with default.qubit."""
dev = qml.device('default.qubit', wires=2)
dev2 = qml.device('qiskit.aer', backend='statevector_simulator', shots=5, wires=2, analytic=True)
angles = np.array([0.53896774, 0.79503606, 0.27826503, 0.])
@qml.qnode(dev)
def circuit_with_inverses(angle):
qml.Hadamard(0).inv()
qml.RX(angle, wires=0).inv()
return qml.expval(qml.PauliZ(0))
@qml.qnode(dev2)
def circuit_with_inverses_default_qubit(angle):
qml.Hadamard(0).inv()
qml.RX(angle, wires=0).inv()
return qml.expval(qml.PauliZ(0))
for x in angles:
assert np.allclose(circuit_with_inverses(x), circuit_with_inverses_default_qubit(x))
| """Test that devices provide correct result for a simple circuit"""
if backend not in state_backends and analytic:
pytest.skip("Hardware simulators do not support analytic mode")
dev = qml.device(d[0], wires=1, backend=backend, shots=shots, analytic=analytic)
a = 0.543
b = 0.123
c = 0.987
@qml.qnode(dev)
def circuit(x, y, z):
"""Reference QNode"""
qml.BasisState(np.array([1]), wires=0)
qml.Hadamard(wires=0)
qml.Rot(x, y, z, wires=0)
return qml.expval(qml.PauliZ(0))
assert np.allclose(circuit(a, b, c), np.cos(a) * np.sin(b), **tol) |
utils.py | import os
import yaml
import asyncio
import platform
from functools import lru_cache
from typing import List, Dict, Coroutine, Union
from . import info
from . import common
def get_path_fname() -> str:
"""
Return the file name that stores the repo locations.
"""
root = common.get_config_dir()
return os.path.join(root, 'repo_path')
@lru_cache()
def get_repos() -> Dict[str, str]:
|
def get_choices() -> List[Union[str, None]]:
"""
Return all repo names and an additional empty list. This is a workaround of
argparse's problem with coexisting nargs='*' and choices.
See https://utcc.utoronto.ca/~cks/space/blog/python/ArgparseNargsChoicesLimitation
and
https://bugs.python.org/issue27227
"""
repos = list(get_repos())
repos.append([])
return repos
def is_git(path: str) -> bool:
"""
Return True if the path is a git repo.
"""
# An alternative is to call `git rev-parse --is-inside-work-tree`
# I don't see why that one is better yet.
# For a regular git repo, .git is a folder, for a worktree repo, .git is a file.
# However, git submodule repo also has .git as a file.
# A more reliable way to differentiable regular and worktree repos is to
# compare the result of `git rev-parse --git-dir` and
# `git rev-parse --git-common-dir`
loc = os.path.join(path, '.git')
# TODO: we can display the worktree repos in a different font.
return os.path.exists(loc)
def rename_repo(repos: Dict[str, str], repo: str, new_name: str):
"""
Write new repo name to file
"""
path = repos[repo]
del repos[repo]
repos[new_name] = path
write_to_repo_file(repos, 'w')
def write_to_repo_file(repos: Dict[str, str], mode: str):
"""
"""
data = ''.join(f'{path},{name}\n' for name, path in repos.items())
fname = get_path_fname()
os.makedirs(os.path.dirname(fname), exist_ok=True)
with open(fname, mode) as f:
f.write(data)
def add_repos(repos: Dict[str, str], new_paths: List[str]):
"""
Write new repo paths to file
"""
existing_paths = set(repos.values())
new_paths = set(os.path.abspath(p) for p in new_paths if is_git(p))
new_paths = new_paths - existing_paths
if new_paths:
print(f"Found {len(new_paths)} new repo(s).")
new_repos = {
os.path.basename(os.path.normpath(path)): path
for path in new_paths}
write_to_repo_file(new_repos, 'a+')
else:
print('No new repos found!')
async def run_async(repo_name: str, path: str, cmds: List[str]) -> Union[None, str]:
"""
Run `cmds` asynchronously in `path` directory. Return the `path` if
execution fails.
"""
process = await asyncio.create_subprocess_exec(
*cmds,
stdin=asyncio.subprocess.DEVNULL,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
start_new_session=True,
cwd=path)
stdout, stderr = await process.communicate()
for pipe in (stdout, stderr):
if pipe:
print(format_output(pipe.decode(), f'{repo_name}: '))
# The existence of stderr is not good indicator since git sometimes write
# to stderr even if the execution is successful, e.g. git fetch
if process.returncode != 0:
return path
def format_output(s: str, prefix: str):
"""
Prepends every line in given string with the given prefix.
"""
return ''.join([f'{prefix}{line}' for line in s.splitlines(keepends=True)])
def exec_async_tasks(tasks: List[Coroutine]) -> List[Union[None, str]]:
"""
Execute tasks asynchronously
"""
# TODO: asyncio API is nicer in python 3.7
if platform.system() == 'Windows':
loop = asyncio.ProactorEventLoop()
asyncio.set_event_loop(loop)
else:
loop = asyncio.get_event_loop()
try:
errors = loop.run_until_complete(asyncio.gather(*tasks))
finally:
loop.close()
return errors
def describe(repos: Dict[str, str]) -> str:
"""
Return the status of all repos
"""
if repos:
name_width = max(len(n) for n in repos) + 1
funcs = info.get_info_funcs()
for name in sorted(repos):
path = repos[name]
display_items = ' '.join(f(path) for f in funcs)
yield f'{name:<{name_width}}{display_items}'
def get_cmds_from_files() -> Dict[str, Dict[str, str]]:
"""
Parse delegated git commands from default config file
and custom config file.
Example return
{
'branch': {'help': 'show local branches'},
'clean': {'cmd': 'clean -dfx',
'help': 'remove all untracked files/folders'},
}
"""
# default config file
fname = os.path.join(os.path.dirname(__file__), "cmds.yml")
with open(fname, 'r') as stream:
cmds = yaml.load(stream, Loader=yaml.FullLoader)
# custom config file
root = common.get_config_dir()
fname = os.path.join(root, 'cmds.yml')
custom_cmds = {}
if os.path.isfile(fname) and os.path.getsize(fname):
with open(fname, 'r') as stream:
custom_cmds = yaml.load(stream, Loader=yaml.FullLoader)
# custom commands shadow default ones
cmds.update(custom_cmds)
return cmds
| """
Return a `dict` of repo name to repo absolute path
"""
path_file = get_path_fname()
repos = {}
# Each line is a repo path and repo name separated by ,
if os.path.isfile(path_file) and os.stat(path_file).st_size > 0:
with open(path_file) as f:
for line in f:
line = line.rstrip()
if not line: # blank line
continue
path, name = line.split(',')
if not is_git(path):
continue
if name not in repos:
repos[name] = path
else: # repo name collision for different paths: include parent path name
par_name = os.path.basename(os.path.dirname(path))
repos[os.path.join(par_name, name)] = path
return repos |
option.rs | extern crate tcso; | fn main() {
try(|| {
None::<i32>.unwrap();
});
} |
use tcso::try;
|
15.2.3.6-4-319-1.js | /// Copyright (c) 2009 Microsoft Corporation
///
/// Redistribution and use in source and binary forms, with or without modification, are permitted provided
/// that the following conditions are met:
/// * Redistributions of source code must retain the above copyright notice, this list of conditions and
/// the following disclaimer.
/// * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and
/// the following disclaimer in the documentation and/or other materials provided with the distribution.
/// * Neither the name of Microsoft nor the names of its contributors may be used to
/// endorse or promote products derived from this software without specific prior written permission.
///
/// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
/// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
/// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
/// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
/// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
/// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
/// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
/// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
ES5Harness.registerTest({
id: "15.2.3.6-4-319-1",
path: "TestCases/chapter15/15.2/15.2.3/15.2.3.6/15.2.3.6-4-319-1.js",
description: "Object.defineProperty - 'O' is an Arguments object of a function that has formal parameters, 'P' is own data property of 'O', test TypeError is thrown when updating the [[Enumerable]] attribute value of 'P' which is not configurable (10.6 [[DefineOwnProperty]] step 4)",
test: function testcase() { | Object.defineProperty(arguments, "genericProperty", {
enumerable: true,
configurable: false
});
try {
Object.defineProperty(arguments, "genericProperty", {
enumerable: false
});
} catch (e) {
return e instanceof TypeError &&
dataPropertyAttributesAreCorrect(arguments, "genericProperty", undefined, false, true, false);
}
return false;
}(1, 2, 3));
},
precondition: function prereq() {
return fnExists(Object.defineProperty);
}
}); | return (function (a, b, c) { |
predict_rub_salary.py | DOLLARS_TO_RUBLES = 65
EUROS_TO_RUBLES = 71
def | (lower_salary, upper_salary, currency):
if lower_salary and upper_salary:
middle_salary = (lower_salary + upper_salary)/2
elif lower_salary:
middle_salary = lower_salary * 1.2
elif upper_salary:
middle_salary = upper_salary * 0.8
else:
return None
if currency == 'RUR' or currency == 'rub':
return middle_salary
elif currency == 'EUR':
return middle_salary * EUROS_TO_RUBLES
elif currency == 'USD':
return middle_salary * DOLLARS_TO_RUBLES
else:
return None
| predict_rub_salary |
azure_interfaceclient.go | // +build !providerless
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package interfaceclient
import (
"context"
"fmt" | "net/http"
"time"
"github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network"
"github.com/Azure/go-autorest/autorest"
"github.com/Azure/go-autorest/autorest/azure"
"k8s.io/client-go/util/flowcontrol"
"k8s.io/klog"
azclients "k8s.io/legacy-cloud-providers/azure/clients"
"k8s.io/legacy-cloud-providers/azure/clients/armclient"
"k8s.io/legacy-cloud-providers/azure/metrics"
"k8s.io/legacy-cloud-providers/azure/retry"
)
var _ Interface = &Client{}
// Client implements network interface client.
type Client struct {
armClient armclient.Interface
subscriptionID string
// Rate limiting configures.
rateLimiterReader flowcontrol.RateLimiter
rateLimiterWriter flowcontrol.RateLimiter
// ARM throttling configures.
RetryAfterReader time.Time
RetryAfterWriter time.Time
}
// New creates a new network interface client with ratelimiting.
func New(config *azclients.ClientConfig) *Client {
baseURI := config.ResourceManagerEndpoint
authorizer := autorest.NewBearerAuthorizer(config.ServicePrincipalToken)
armClient := armclient.New(authorizer, baseURI, "", APIVersion, config.Location, config.Backoff)
rateLimiterReader, rateLimiterWriter := azclients.NewRateLimiter(config.RateLimitConfig)
klog.V(2).Infof("Azure InterfacesClient (read ops) using rate limit config: QPS=%g, bucket=%d",
config.RateLimitConfig.CloudProviderRateLimitQPS,
config.RateLimitConfig.CloudProviderRateLimitBucket)
klog.V(2).Infof("Azure InterfacesClient (write ops) using rate limit config: QPS=%g, bucket=%d",
config.RateLimitConfig.CloudProviderRateLimitQPSWrite,
config.RateLimitConfig.CloudProviderRateLimitBucketWrite)
client := &Client{
armClient: armClient,
rateLimiterReader: rateLimiterReader,
rateLimiterWriter: rateLimiterWriter,
subscriptionID: config.SubscriptionID,
}
return client
}
// Get gets a network.Interface.
func (c *Client) Get(ctx context.Context, resourceGroupName string, networkInterfaceName string, expand string) (network.Interface, *retry.Error) {
mc := metrics.NewMetricContext("interfaces", "get", resourceGroupName, c.subscriptionID, "")
// Report errors if the client is rate limited.
if !c.rateLimiterReader.TryAccept() {
mc.RateLimitedCount()
return network.Interface{}, retry.GetRateLimitError(false, "NicGet")
}
// Report errors if the client is throttled.
if c.RetryAfterReader.After(time.Now()) {
mc.ThrottledCount()
rerr := retry.GetThrottlingError("NicGet", "client throttled", c.RetryAfterReader)
return network.Interface{}, rerr
}
result, rerr := c.getNetworkInterface(ctx, resourceGroupName, networkInterfaceName, expand)
mc.Observe(rerr.Error())
if rerr != nil {
if rerr.IsThrottled() {
// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
c.RetryAfterReader = rerr.RetryAfter
}
return result, rerr
}
return result, nil
}
// getNetworkInterface gets a network.Interface.
func (c *Client) getNetworkInterface(ctx context.Context, resourceGroupName string, networkInterfaceName string, expand string) (network.Interface, *retry.Error) {
resourceID := armclient.GetResourceID(
c.subscriptionID,
resourceGroupName,
"Microsoft.Network/networkInterfaces",
networkInterfaceName,
)
result := network.Interface{}
response, rerr := c.armClient.GetResource(ctx, resourceID, "")
defer c.armClient.CloseResponse(ctx, response)
if rerr != nil {
klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "nic.get.request", resourceID, rerr.Error())
return result, rerr
}
err := autorest.Respond(
response,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result))
if err != nil {
klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "nic.get.respond", resourceID, err)
return result, retry.GetError(response, err)
}
result.Response = autorest.Response{Response: response}
return result, nil
}
// GetVirtualMachineScaleSetNetworkInterface gets a network.Interface of VMSS VM.
func (c *Client) GetVirtualMachineScaleSetNetworkInterface(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (network.Interface, *retry.Error) {
mc := metrics.NewMetricContext("interfaces", "get_vmss_nic", resourceGroupName, c.subscriptionID, "")
// Report errors if the client is rate limited.
if !c.rateLimiterReader.TryAccept() {
mc.RateLimitedCount()
return network.Interface{}, retry.GetRateLimitError(false, "NicGetVirtualMachineScaleSetNetworkInterface")
}
// Report errors if the client is throttled.
if c.RetryAfterReader.After(time.Now()) {
mc.ThrottledCount()
rerr := retry.GetThrottlingError("NicGetVirtualMachineScaleSetNetworkInterface", "client throttled", c.RetryAfterReader)
return network.Interface{}, rerr
}
result, rerr := c.getVMSSNetworkInterface(ctx, resourceGroupName, virtualMachineScaleSetName, virtualmachineIndex, networkInterfaceName, expand)
mc.Observe(rerr.Error())
if rerr != nil {
if rerr.IsThrottled() {
// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
c.RetryAfterReader = rerr.RetryAfter
}
return result, rerr
}
return result, nil
}
// getVMSSNetworkInterface gets a network.Interface of VMSS VM.
func (c *Client) getVMSSNetworkInterface(ctx context.Context, resourceGroupName string, virtualMachineScaleSetName string, virtualmachineIndex string, networkInterfaceName string, expand string) (network.Interface, *retry.Error) {
resourceID := fmt.Sprintf("/subscriptions/%s/resourceGroups/%s/providers/Microsoft.Compute/virtualMachineScaleSets/%s/virtualMachines/%s/networkInterfaces/%s",
autorest.Encode("path", c.subscriptionID),
autorest.Encode("path", resourceGroupName),
autorest.Encode("path", virtualMachineScaleSetName),
autorest.Encode("path", virtualmachineIndex),
autorest.Encode("path", networkInterfaceName),
)
result := network.Interface{}
queryParameters := map[string]interface{}{
"api-version": ComputeAPIVersion,
}
if len(expand) > 0 {
queryParameters["$expand"] = autorest.Encode("query", expand)
}
decorators := []autorest.PrepareDecorator{
autorest.WithQueryParameters(queryParameters),
}
response, rerr := c.armClient.GetResourceWithDecorators(ctx, resourceID, decorators)
defer c.armClient.CloseResponse(ctx, response)
if rerr != nil {
klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "vmssnic.get.request", resourceID, rerr.Error())
return result, rerr
}
err := autorest.Respond(
response,
azure.WithErrorUnlessStatusCode(http.StatusOK),
autorest.ByUnmarshallingJSON(&result))
if err != nil {
klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "vmssnic.get.respond", resourceID, err)
return result, retry.GetError(response, err)
}
result.Response = autorest.Response{Response: response}
return result, nil
}
// CreateOrUpdate creates or updates a network.Interface.
func (c *Client) CreateOrUpdate(ctx context.Context, resourceGroupName string, networkInterfaceName string, parameters network.Interface) *retry.Error {
mc := metrics.NewMetricContext("interfaces", "create_or_update", resourceGroupName, c.subscriptionID, "")
// Report errors if the client is rate limited.
if !c.rateLimiterWriter.TryAccept() {
mc.RateLimitedCount()
return retry.GetRateLimitError(true, "NicCreateOrUpdate")
}
// Report errors if the client is throttled.
if c.RetryAfterWriter.After(time.Now()) {
mc.ThrottledCount()
rerr := retry.GetThrottlingError("NicCreateOrUpdate", "client throttled", c.RetryAfterWriter)
return rerr
}
rerr := c.createOrUpdateInterface(ctx, resourceGroupName, networkInterfaceName, parameters)
mc.Observe(rerr.Error())
if rerr != nil {
if rerr.IsThrottled() {
// Update RetryAfterReader so that no more requests would be sent until RetryAfter expires.
c.RetryAfterWriter = rerr.RetryAfter
}
return rerr
}
return nil
}
// createOrUpdateInterface creates or updates a network.Interface.
func (c *Client) createOrUpdateInterface(ctx context.Context, resourceGroupName string, networkInterfaceName string, parameters network.Interface) *retry.Error {
resourceID := armclient.GetResourceID(
c.subscriptionID,
resourceGroupName,
"Microsoft.Network/networkInterfaces",
networkInterfaceName,
)
response, rerr := c.armClient.PutResource(ctx, resourceID, parameters)
defer c.armClient.CloseResponse(ctx, response)
if rerr != nil {
klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "nic.put.request", resourceID, rerr.Error())
return rerr
}
if response != nil && response.StatusCode != http.StatusNoContent {
_, rerr = c.createOrUpdateResponder(response)
if rerr != nil {
klog.V(5).Infof("Received error in %s: resourceID: %s, error: %s", "nic.put.respond", resourceID, rerr.Error())
return rerr
}
}
return nil
}
func (c *Client) createOrUpdateResponder(resp *http.Response) (*network.Interface, *retry.Error) {
result := &network.Interface{}
err := autorest.Respond(
resp,
azure.WithErrorUnlessStatusCode(http.StatusOK, http.StatusCreated),
autorest.ByUnmarshallingJSON(&result))
result.Response = autorest.Response{Response: resp}
return result, retry.GetError(resp, err)
} | |
main.go | // Copyright 2021 Google LLC | //
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// [START container_v1_generated_ClusterManager_CancelOperation_sync]
package main
import (
"context"
container "cloud.google.com/go/container/apiv1"
containerpb "google.golang.org/genproto/googleapis/container/v1"
)
func main() {
ctx := context.Background()
c, err := container.NewClusterManagerClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &containerpb.CancelOperationRequest{
// TODO: Fill request struct fields.
}
err = c.CancelOperation(ctx, req)
if err != nil {
// TODO: Handle error.
}
}
// [END container_v1_generated_ClusterManager_CancelOperation_sync] | |
test_base.py | # Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import mock
from neutron_lib.api import attributes
from neutron_lib.api import converters
from neutron_lib.callbacks import registry
from neutron_lib import constants
from neutron_lib import context
from neutron_lib import exceptions as n_exc
from neutron_lib import fixture
from neutron_lib.plugins import directory
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_policy import policy as oslo_policy
from oslo_utils import uuidutils
import six
import six.moves.urllib.parse as urlparse
import webob
from webob import exc
import webtest
from neutron.api import api_common
from neutron.api import extensions
from neutron.api.v2 import base as v2_base
from neutron.api.v2 import router
from neutron import policy
from neutron import quota
from neutron.quota import resource_registry
from neutron.tests import base
from neutron.tests import fake_notifier
from neutron.tests import tools
from neutron.tests.unit import dummy_plugin
from neutron.tests.unit import testlib_api
EXTDIR = os.path.join(base.ROOTDIR, 'unit/extensions')
_uuid = uuidutils.generate_uuid
def _get_path(resource, id=None, action=None,
fmt=None, endpoint=None):
path = '/%s' % resource
if id is not None:
path = path + '/%s' % id
if action is not None:
path = path + '/%s' % action
if endpoint is not None:
path = path + '/%s' % endpoint
if fmt is not None:
path = path + '.%s' % fmt
return path
class APIv2TestBase(base.BaseTestCase):
def setUp(self):
super(APIv2TestBase, self).setUp()
plugin = 'neutron.neutron_plugin_base_v2.NeutronPluginBaseV2'
# Ensure existing ExtensionManager is not used
extensions.PluginAwareExtensionManager._instance = None
# Create the default configurations
self.config_parse()
# Update the plugin
self.setup_coreplugin(plugin, load_plugins=False)
self._plugin_patcher = mock.patch(plugin, autospec=True)
self.plugin = self._plugin_patcher.start()
instance = self.plugin.return_value
instance.supported_extension_aliases = ['empty-string-filtering']
instance._NeutronPluginBaseV2__native_pagination_support = True
instance._NeutronPluginBaseV2__native_sorting_support = True
tools.make_mock_plugin_json_encodable(instance)
api = router.APIRouter()
self.api = webtest.TestApp(api)
quota.QUOTAS._driver = None
cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver',
group='QUOTAS')
# APIRouter initialization resets policy module, re-initializing it
policy.init()
class _ArgMatcher(object):
"""An adapter to assist mock assertions, used to custom compare."""
def __init__(self, cmp, obj):
self.cmp = cmp
self.obj = obj
def __eq__(self, other):
return self.cmp(self.obj, other)
def _list_cmp(l1, l2):
return set(l1) == set(l2)
class APIv2TestCase(APIv2TestBase):
@staticmethod
def _get_policy_attrs(attr_info):
policy_attrs = {name for (name, info) in attr_info.items()
if info.get('required_by_policy')}
if 'tenant_id' in policy_attrs:
policy_attrs.add('project_id')
return sorted(policy_attrs)
def _do_field_list(self, resource, base_fields):
attr_info = attributes.RESOURCES[resource]
policy_attrs = self._get_policy_attrs(attr_info)
for name, info in attr_info.items():
if info.get('primary_key'):
policy_attrs.append(name)
fields = base_fields
fields.extend(policy_attrs)
return fields
def _get_collection_kwargs(self, skipargs=None, **kwargs):
skipargs = skipargs or []
args_list = ['filters', 'fields', 'sorts', 'limit', 'marker',
'page_reverse']
args_dict = dict(
(arg, mock.ANY) for arg in set(args_list) - set(skipargs))
args_dict.update(kwargs)
return args_dict
def test_fields(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'fields': 'foo'})
fields = self._do_field_list('networks', ['foo'])
kwargs = self._get_collection_kwargs(fields=fields)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_fields_multiple(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
fields = self._do_field_list('networks', ['bar', 'foo'])
self.api.get(_get_path('networks'), {'fields': ['foo', 'bar']})
kwargs = self._get_collection_kwargs(fields=fields)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_fields_multiple_with_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
fields = self._do_field_list('networks', ['foo'])
self.api.get(_get_path('networks'), {'fields': ['foo', '']})
kwargs = self._get_collection_kwargs(fields=fields)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_fields_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'fields': ''})
kwargs = self._get_collection_kwargs(fields=[])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_fields_multiple_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'fields': ['', '']})
kwargs = self._get_collection_kwargs(fields=[])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': 'bar'})
filters = {'name': ['bar']}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': ''})
filters = {'name': ['']}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_multiple_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': ['', '']})
filters = {'name': ['', '']}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_multiple_with_empty(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': ['bar', '']})
filters = {'name': ['bar', '']}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_multiple_values(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': ['bar', 'bar2']})
filters = {'name': ['bar', 'bar2']}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_multiple(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': 'bar',
'tenant_id': 'bar2'})
filters = {'name': ['bar'], 'tenant_id': ['bar2']}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_with_fields(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'name': 'bar', 'fields': 'foo'})
filters = {'name': ['bar']}
fields = self._do_field_list('networks', ['foo'])
kwargs = self._get_collection_kwargs(filters=filters, fields=fields)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_with_convert_to(self):
instance = self.plugin.return_value
instance.get_ports.return_value = []
self.api.get(_get_path('ports'), {'admin_state_up': 'true'})
filters = {'admin_state_up': [True]}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_ports.assert_called_once_with(mock.ANY, **kwargs)
def test_filters_with_convert_list_to(self):
instance = self.plugin.return_value
instance.get_ports.return_value = []
self.api.get(_get_path('ports'),
{'fixed_ips': ['ip_address=foo', 'subnet_id=bar']})
filters = {'fixed_ips': {'ip_address': ['foo'], 'subnet_id': ['bar']}}
kwargs = self._get_collection_kwargs(filters=filters)
instance.get_ports.assert_called_once_with(mock.ANY, **kwargs)
def test_limit(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'limit': '10'})
kwargs = self._get_collection_kwargs(limit=10)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_great_than_max_limit(self):
cfg.CONF.set_default('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'limit': '1001'})
kwargs = self._get_collection_kwargs(limit=1000)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_zero(self):
cfg.CONF.set_default('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'), {'limit': '0'})
kwargs = self._get_collection_kwargs(limit=1000)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_unspecific(self):
cfg.CONF.set_default('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(limit=1000)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_negative_value(self):
cfg.CONF.set_default('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'), {'limit': -1},
expect_errors=True)
self.assertEqual(exc.HTTPBadRequest.code, res.status_int)
def test_limit_with_non_integer(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'),
{'limit': 'abc'}, expect_errors=True)
self.assertEqual(exc.HTTPBadRequest.code, res.status_int)
self.assertIn('abc', res)
def test_limit_with_infinite_pagination_max_limit(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
cfg.CONF.set_override('pagination_max_limit', 'Infinite')
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(limit=None)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_negative_pagination_max_limit(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
cfg.CONF.set_default('pagination_max_limit', '-1')
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(limit=None)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_limit_with_non_integer_pagination_max_limit(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
cfg.CONF.set_default('pagination_max_limit', 'abc')
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(limit=None)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_marker(self):
cfg.CONF.set_override('pagination_max_limit', '1000')
instance = self.plugin.return_value
instance.get_networks.return_value = []
marker = _uuid()
self.api.get(_get_path('networks'),
{'marker': marker})
kwargs = self._get_collection_kwargs(limit=1000, marker=marker)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_page_reverse(self):
calls = []
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'page_reverse': 'True'})
kwargs = self._get_collection_kwargs(page_reverse=True)
calls.append(mock.call.get_networks(mock.ANY, **kwargs))
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
instance.get_networks.reset_mock()
self.api.get(_get_path('networks'),
{'page_reverse': 'False'})
kwargs = self._get_collection_kwargs(page_reverse=False)
calls.append(mock.call.get_networks(mock.ANY, **kwargs))
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_page_reverse_with_non_bool(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'page_reverse': 'abc'})
kwargs = self._get_collection_kwargs(page_reverse=False)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_page_reverse_with_unspecific(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'))
kwargs = self._get_collection_kwargs(page_reverse=False)
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_sort(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'sort_key': ['name', 'admin_state_up'],
'sort_dir': ['desc', 'asc']})
kwargs = self._get_collection_kwargs(sorts=[('name', False),
('admin_state_up', True),
('id', True)])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_sort_with_primary_key(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
self.api.get(_get_path('networks'),
{'sort_key': ['name', 'admin_state_up', 'id'],
'sort_dir': ['desc', 'asc', 'desc']})
kwargs = self._get_collection_kwargs(sorts=[('name', False),
('admin_state_up', True),
('id', False)])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_sort_without_direction(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'), {'sort_key': ['name']},
expect_errors=True)
self.assertEqual(exc.HTTPBadRequest.code, res.status_int)
def test_sort_with_invalid_attribute(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'),
{'sort_key': 'abc',
'sort_dir': 'asc'},
expect_errors=True)
self.assertEqual(exc.HTTPBadRequest.code, res.status_int)
def test_sort_with_invalid_dirs(self):
instance = self.plugin.return_value
instance.get_networks.return_value = []
res = self.api.get(_get_path('networks'),
{'sort_key': 'name',
'sort_dir': 'abc'},
expect_errors=True)
self.assertEqual(exc.HTTPBadRequest.code, res.status_int)
def test_emulated_sort(self):
instance = self.plugin.return_value
instance._NeutronPluginBaseV2__native_pagination_support = False
instance._NeutronPluginBaseV2__native_sorting_support = False
instance.get_networks.return_value = []
api = webtest.TestApp(router.APIRouter())
api.get(_get_path('networks'), {'sort_key': ['name', 'status'],
'sort_dir': ['desc', 'asc']})
kwargs = self._get_collection_kwargs(
skipargs=['sorts', 'limit', 'marker', 'page_reverse'])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_emulated_sort_without_sort_field(self):
instance = self.plugin.return_value
instance._NeutronPluginBaseV2__native_pagination_support = False
instance._NeutronPluginBaseV2__native_sorting_support = False
instance.get_networks.return_value = []
api = webtest.TestApp(router.APIRouter())
api.get(_get_path('networks'), {'sort_key': ['name', 'status'],
'sort_dir': ['desc', 'asc'],
'fields': ['subnets']})
kwargs = self._get_collection_kwargs(
skipargs=['sorts', 'limit', 'marker', 'page_reverse'],
fields=_ArgMatcher(_list_cmp, ['name',
'status',
'id',
'subnets',
'shared',
'project_id',
'tenant_id']))
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_emulated_pagination(self):
instance = self.plugin.return_value
instance._NeutronPluginBaseV2__native_pagination_support = False
instance.get_networks.return_value = []
api = webtest.TestApp(router.APIRouter())
api.get(_get_path('networks'), {'limit': 10,
'marker': 'foo',
'page_reverse': False})
kwargs = self._get_collection_kwargs(skipargs=['limit',
'marker',
'page_reverse'])
instance.get_networks.assert_called_once_with(mock.ANY, **kwargs)
def test_native_pagination_without_native_sorting(self):
instance = self.plugin.return_value
instance._NeutronPluginBaseV2__native_sorting_support = False
self.assertRaises(n_exc.Invalid, router.APIRouter)
# Note: since all resources use the same controller and validation
# logic, we actually get really good coverage from testing just networks.
class JSONV2TestCase(APIv2TestBase, testlib_api.WebTestCase):
def _test_list(self, req_tenant_id, real_tenant_id):
env = {}
if req_tenant_id:
env = {'neutron.context': context.Context('', req_tenant_id)}
input_dict = {'id': uuidutils.generate_uuid(),
'name': 'net1',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': real_tenant_id,
'shared': False,
'subnets': []}
return_value = [input_dict]
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
res = self.api.get(_get_path('networks',
fmt=self.fmt), extra_environ=env)
res = self.deserialize(res)
self.assertIn('networks', res)
if not req_tenant_id or req_tenant_id == real_tenant_id:
# expect full list returned
self.assertEqual(1, len(res['networks']))
output_dict = res['networks'][0]
input_dict['shared'] = False
self.assertEqual(len(input_dict), len(output_dict))
for k, v in input_dict.items():
self.assertEqual(v, output_dict[k])
else:
# expect no results
self.assertEqual(0, len(res['networks']))
def test_list_noauth(self):
self._test_list(None, _uuid())
def test_list_keystone(self):
tenant_id = _uuid()
self._test_list(tenant_id, tenant_id)
def test_list_keystone_bad(self):
tenant_id = _uuid()
self._test_list(tenant_id + "bad", tenant_id)
def test_list_pagination(self):
id1 = str(_uuid())
id2 = str(_uuid())
input_dict1 = {'id': id1,
'name': 'net1',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': '',
'shared': False,
'subnets': []}
input_dict2 = {'id': id2,
'name': 'net2',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': '',
'shared': False,
'subnets': []}
return_value = [input_dict1, input_dict2]
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': [str(_uuid())],
'sort_key': ['name'],
'sort_dir': ['asc']}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual(2, len(res['networks']))
self.assertEqual(sorted([id1, id2]),
sorted([res['networks'][0]['id'],
res['networks'][1]['id']]))
self.assertIn('networks_links', res)
next_links = []
previous_links = []
for r in res['networks_links']:
if r['rel'] == 'next':
next_links.append(r)
if r['rel'] == 'previous':
previous_links.append(r)
self.assertEqual(1, len(next_links))
self.assertEqual(1, len(previous_links))
url = urlparse.urlparse(next_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
params['marker'] = [id2]
self.assertEqual(params, urlparse.parse_qs(url.query))
url = urlparse.urlparse(previous_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
params['marker'] = [id1]
params['page_reverse'] = ['True']
self.assertEqual(params, urlparse.parse_qs(url.query))
def test_list_pagination_with_last_page(self):
id = str(_uuid())
input_dict = {'id': id,
'name': 'net1',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': '',
'shared': False,
'subnets': []}
return_value = [input_dict]
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': str(_uuid())}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual(1, len(res['networks']))
self.assertEqual(id, res['networks'][0]['id'])
self.assertIn('networks_links', res)
previous_links = []
for r in res['networks_links']:
self.assertNotEqual(r['rel'], 'next')
if r['rel'] == 'previous':
previous_links.append(r)
self.assertEqual(1, len(previous_links))
url = urlparse.urlparse(previous_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
expect_params = params.copy()
expect_params['marker'] = [id]
expect_params['page_reverse'] = ['True']
self.assertEqual(expect_params, urlparse.parse_qs(url.query))
def test_list_pagination_with_empty_page(self):
return_value = []
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': str(_uuid())}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual([], res['networks'])
previous_links = []
if 'networks_links' in res:
for r in res['networks_links']:
self.assertNotEqual(r['rel'], 'next')
if r['rel'] == 'previous':
previous_links.append(r)
self.assertEqual(1, len(previous_links))
url = urlparse.urlparse(previous_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
expect_params = params.copy()
del expect_params['marker']
expect_params['page_reverse'] = ['True']
self.assertEqual(expect_params, urlparse.parse_qs(url.query))
def test_list_pagination_reverse_with_last_page(self):
id = str(_uuid())
input_dict = {'id': id,
'name': 'net1',
'admin_state_up': True,
'status': "ACTIVE",
'tenant_id': '',
'shared': False,
'subnets': []}
return_value = [input_dict]
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': [str(_uuid())],
'page_reverse': ['True']}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual(len(res['networks']), 1)
self.assertEqual(id, res['networks'][0]['id'])
self.assertIn('networks_links', res)
next_links = []
for r in res['networks_links']:
self.assertNotEqual(r['rel'], 'previous')
if r['rel'] == 'next':
next_links.append(r)
self.assertEqual(1, len(next_links))
url = urlparse.urlparse(next_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
expected_params = params.copy()
del expected_params['page_reverse']
expected_params['marker'] = [id]
self.assertEqual(expected_params,
urlparse.parse_qs(url.query))
def test_list_pagination_reverse_with_empty_page(self):
return_value = []
instance = self.plugin.return_value
instance.get_networks.return_value = return_value
params = {'limit': ['2'],
'marker': [str(_uuid())],
'page_reverse': ['True']}
res = self.api.get(_get_path('networks'),
params=params).json
self.assertEqual([], res['networks'])
next_links = []
if 'networks_links' in res:
for r in res['networks_links']:
self.assertNotEqual(r['rel'], 'previous')
if r['rel'] == 'next':
next_links.append(r)
self.assertEqual(1, len(next_links))
url = urlparse.urlparse(next_links[0]['href'])
self.assertEqual(url.path, _get_path('networks'))
expect_params = params.copy()
del expect_params['marker']
del expect_params['page_reverse']
self.assertEqual(expect_params, urlparse.parse_qs(url.query))
def test_create(self):
net_id = _uuid()
data = {'network': {'name': 'net1', 'admin_state_up': True,
'tenant_id': _uuid()}}
return_value = {'subnets': [], 'status': "ACTIVE",
'id': net_id}
return_value.update(data['network'].copy())
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
res = self.deserialize(res)
self.assertIn('network', res)
net = res['network']
self.assertEqual(net_id, net['id'])
self.assertEqual("ACTIVE", net['status'])
def test_create_use_defaults(self):
net_id = _uuid()
tenant_id = _uuid()
initial_input = {'network': {'name': 'net1',
'tenant_id': tenant_id,
'project_id': tenant_id}}
full_input = {'network': {'admin_state_up': True,
'shared': False}}
full_input['network'].update(initial_input['network'])
return_value = {'id': net_id, 'status': "ACTIVE"}
return_value.update(full_input['network'])
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(initial_input),
content_type='application/' + self.fmt)
instance.create_network.assert_called_with(mock.ANY,
network=full_input)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
res = self.deserialize(res)
self.assertIn('network', res)
net = res['network']
self.assertEqual(net_id, net['id'])
self.assertTrue(net['admin_state_up'])
self.assertEqual("ACTIVE", net['status'])
def test_create_no_keystone_env(self):
data = {'name': 'net1'}
self._test_create_failure_bad_request('networks', data)
def test_create_with_keystone_env(self):
tenant_id = _uuid()
net_id = _uuid()
env = {'neutron.context': context.Context('', tenant_id)}
# tenant_id should be fetched from env
initial_input = {'network': {'name': 'net1'}}
full_input = {'network': {'admin_state_up': True,
'shared': False, 'tenant_id': tenant_id,
'project_id': tenant_id}}
full_input['network'].update(initial_input['network'])
return_value = {'id': net_id, 'status': "ACTIVE"}
return_value.update(full_input['network'])
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(initial_input),
content_type='application/' + self.fmt,
extra_environ=env)
instance.create_network.assert_called_with(mock.ANY,
network=full_input)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
def test_create_bad_keystone_tenant(self):
tenant_id = _uuid()
data = {'network': {'name': 'net1', 'tenant_id': tenant_id}}
env = {'neutron.context': context.Context('', tenant_id + "bad")}
self._test_create_failure_bad_request('networks', data,
extra_environ=env)
def test_create_no_body(self):
data = {'whoa': None}
self._test_create_failure_bad_request('networks', data)
def test_create_body_string_not_json(self):
data = 'a string'
self._test_create_failure_bad_request('networks', data)
def test_create_body_boolean_not_json(self):
data = True
self._test_create_failure_bad_request('networks', data)
def test_create_no_resource(self):
data = {}
self._test_create_failure_bad_request('networks', data)
def test_create_missing_attr(self):
data = {'port': {'what': 'who', 'tenant_id': _uuid()}}
self._test_create_failure_bad_request('ports', data)
def test_create_readonly_attr(self):
data = {'network': {'name': 'net1', 'tenant_id': _uuid(),
'status': "ACTIVE"}}
self._test_create_failure_bad_request('networks', data)
def test_create_with_too_long_name(self):
data = {'network': {'name': "12345678" * 32,
'admin_state_up': True,
'tenant_id': _uuid()}}
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(exc.HTTPBadRequest.code, res.status_int)
def test_create_bulk(self):
data = {'networks': [{'name': 'net1',
'admin_state_up': True,
'tenant_id': _uuid()},
{'name': 'net2',
'admin_state_up': True,
'tenant_id': _uuid()}]}
def side_effect(context, network):
net = network.copy()
net['network'].update({'subnets': []})
return net['network']
instance = self.plugin.return_value
instance.create_network.side_effect = side_effect
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
def _test_create_failure_bad_request(self, resource, data, **kwargs):
res = self.api.post(_get_path(resource, fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True, **kwargs)
self.assertEqual(exc.HTTPBadRequest.code, res.status_int)
def test_create_bulk_networks_none(self):
self._test_create_failure_bad_request('networks', {'networks': None})
def test_create_bulk_networks_empty_list(self):
self._test_create_failure_bad_request('networks', {'networks': []})
def test_create_bulk_missing_attr(self):
data = {'ports': [{'what': 'who', 'tenant_id': _uuid()}]}
self._test_create_failure_bad_request('ports', data)
def test_create_bulk_partial_body(self):
data = {'ports': [{'device_id': 'device_1',
'tenant_id': _uuid()},
{'tenant_id': _uuid()}]}
self._test_create_failure_bad_request('ports', data)
def test_create_attr_not_specified(self):
net_id = _uuid()
tenant_id = _uuid()
device_id = _uuid()
initial_input = {'port': {'name': '', 'network_id': net_id,
'tenant_id': tenant_id,
'project_id': tenant_id,
'device_id': device_id,
'admin_state_up': True}}
full_input = {'port': {'admin_state_up': True,
'mac_address': constants.ATTR_NOT_SPECIFIED,
'fixed_ips': constants.ATTR_NOT_SPECIFIED,
'device_owner': ''}}
full_input['port'].update(initial_input['port'])
return_value = {'id': _uuid(), 'status': 'ACTIVE',
'admin_state_up': True,
'mac_address': 'ca:fe:de:ad:be:ef',
'device_id': device_id,
'device_owner': ''}
return_value.update(initial_input['port'])
instance = self.plugin.return_value
instance.get_network.return_value = {
'tenant_id': six.text_type(tenant_id)
}
instance.get_ports_count.return_value = 1
instance.create_port.return_value = return_value
res = self.api.post(_get_path('ports', fmt=self.fmt),
self.serialize(initial_input),
content_type='application/' + self.fmt)
instance.create_port.assert_called_with(mock.ANY, port=full_input)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
res = self.deserialize(res)
self.assertIn('port', res)
port = res['port']
self.assertEqual(net_id, port['network_id'])
self.assertEqual('ca:fe:de:ad:be:ef', port['mac_address'])
def test_create_return_extra_attr(self):
net_id = _uuid()
data = {'network': {'name': 'net1', 'admin_state_up': True,
'tenant_id': _uuid()}}
return_value = {'subnets': [], 'status': "ACTIVE",
'id': net_id, 'v2attrs:something': "123"}
return_value.update(data['network'].copy())
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post(_get_path('networks', fmt=self.fmt),
self.serialize(data),
content_type='application/' + self.fmt)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
res = self.deserialize(res)
self.assertIn('network', res)
net = res['network']
self.assertEqual(net_id, net['id'])
self.assertEqual("ACTIVE", net['status'])
self.assertNotIn('v2attrs:something', net)
def test_fields(self):
return_value = {'name': 'net1', 'admin_state_up': True,
'subnets': []}
instance = self.plugin.return_value
instance.get_network.return_value = return_value
self.api.get(_get_path('networks',
id=uuidutils.generate_uuid(),
fmt=self.fmt))
def _test_delete(self, req_tenant_id, real_tenant_id, expected_code,
expect_errors=False):
env = {}
if req_tenant_id:
env = {'neutron.context': context.Context('', req_tenant_id)}
instance = self.plugin.return_value
instance.get_network.return_value = {'tenant_id': real_tenant_id,
'shared': False}
instance.delete_network.return_value = None
res = self.api.delete(_get_path('networks',
id=uuidutils.generate_uuid(),
fmt=self.fmt),
extra_environ=env,
expect_errors=expect_errors)
self.assertEqual(expected_code, res.status_int)
def test_delete_noauth(self):
self._test_delete(None, _uuid(), exc.HTTPNoContent.code)
def test_delete_keystone(self):
tenant_id = _uuid()
self._test_delete(tenant_id, tenant_id, exc.HTTPNoContent.code)
def test_delete_keystone_bad_tenant(self):
tenant_id = _uuid()
self._test_delete(tenant_id + "bad", tenant_id,
exc.HTTPNotFound.code, expect_errors=True)
def _test_get(self, req_tenant_id, real_tenant_id, expected_code,
expect_errors=False):
env = {}
shared = False
if req_tenant_id:
env = {'neutron.context': context.Context('', req_tenant_id)}
if req_tenant_id.endswith('another'):
shared = True
env['neutron.context'].roles = ['tenant_admin']
data = {'tenant_id': real_tenant_id, 'shared': shared}
instance = self.plugin.return_value
instance.get_network.return_value = data
res = self.api.get(_get_path('networks',
id=uuidutils.generate_uuid(),
fmt=self.fmt),
extra_environ=env,
expect_errors=expect_errors)
self.assertEqual(expected_code, res.status_int)
return res
def test_get_noauth(self):
self._test_get(None, _uuid(), 200)
def test_get_keystone(self):
tenant_id = _uuid()
self._test_get(tenant_id, tenant_id, 200)
def test_get_keystone_bad_tenant(self):
tenant_id = _uuid()
self._test_get(tenant_id + "bad", tenant_id,
exc.HTTPNotFound.code, expect_errors=True)
def test_get_keystone_shared_network(self):
tenant_id = _uuid()
self._test_get(tenant_id + "another", tenant_id, 200)
def test_get_keystone_strip_admin_only_attribute(self):
tenant_id = _uuid()
# Inject rule in policy engine
rules = oslo_policy.Rules.from_dict(
{'get_network:name': "rule:admin_only"})
policy.set_rules(rules, overwrite=False)
res = self._test_get(tenant_id, tenant_id, 200)
res = self.deserialize(res)
self.assertNotIn('name', res['network'])
def _test_update(self, req_tenant_id, real_tenant_id, expected_code,
expect_errors=False):
env = {}
if req_tenant_id:
env = {'neutron.context': context.Context('', req_tenant_id)}
# leave out 'name' field intentionally
data = {'network': {'admin_state_up': True}}
return_value = {'subnets': []}
return_value.update(data['network'].copy())
instance = self.plugin.return_value
instance.get_network.return_value = {'tenant_id': real_tenant_id,
'shared': False}
instance.update_network.return_value = return_value
res = self.api.put(_get_path('networks',
id=uuidutils.generate_uuid(),
fmt=self.fmt),
self.serialize(data),
extra_environ=env,
expect_errors=expect_errors)
# Ensure id attribute is included in fields returned by GET call
# in update procedure.
self.assertEqual(1, instance.get_network.call_count)
self.assertIn('id', instance.get_network.call_args[1]['fields'])
self.assertEqual(res.status_int, expected_code)
def test_update_noauth(self):
self._test_update(None, _uuid(), 200)
def test_update_keystone(self):
tenant_id = _uuid()
self._test_update(tenant_id, tenant_id, 200)
def test_update_keystone_bad_tenant(self):
tenant_id = _uuid()
self._test_update(tenant_id + "bad", tenant_id,
exc.HTTPNotFound.code, expect_errors=True)
def test_update_keystone_no_tenant(self):
tenant_id = _uuid()
self._test_update(tenant_id, None,
exc.HTTPNotFound.code, expect_errors=True)
def test_update_readonly_field(self):
data = {'network': {'status': "NANANA"}}
res = self.api.put(_get_path('networks', id=_uuid()),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(400, res.status_int)
def test_invalid_attribute_field(self):
data = {'network': {'invalid_key1': "foo1", 'invalid_key2': "foo2"}}
res = self.api.put(_get_path('networks', id=_uuid()),
self.serialize(data),
content_type='application/' + self.fmt,
expect_errors=True)
self.assertEqual(400, res.status_int)
def test_retry_on_index(self):
instance = self.plugin.return_value
instance.get_networks.side_effect = [db_exc.RetryRequest(None), []]
api = webtest.TestApp(router.APIRouter())
api.get(_get_path('networks', fmt=self.fmt))
self.assertTrue(instance.get_networks.called)
def test_retry_on_show(self):
instance = self.plugin.return_value
instance.get_network.side_effect = [db_exc.RetryRequest(None), {}]
api = webtest.TestApp(router.APIRouter())
api.get(_get_path('networks', _uuid(), fmt=self.fmt))
self.assertTrue(instance.get_network.called)
class SubresourceTest(base.BaseTestCase):
def setUp(self):
super(SubresourceTest, self).setUp()
raise self.skipException('this class will be deleted')
plugin = 'neutron.tests.unit.api.v2.test_base.TestSubresourcePlugin'
extensions.PluginAwareExtensionManager._instance = None
self.useFixture(fixture.APIDefinitionFixture())
self.config_parse()
self.setup_coreplugin(plugin, load_plugins=False)
self._plugin_patcher = mock.patch(plugin, autospec=True)
self.plugin = self._plugin_patcher.start()
api = router.APIRouter()
SUB_RESOURCES = {}
RESOURCE_ATTRIBUTE_MAP = {}
SUB_RESOURCES[dummy_plugin.RESOURCE_NAME] = {
'collection_name': 'dummies',
'parent': {'collection_name': 'networks',
'member_name': 'network'}
}
RESOURCE_ATTRIBUTE_MAP['dummies'] = {
'foo': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'default': '', 'is_visible': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True}
}
collection_name = SUB_RESOURCES[
dummy_plugin.RESOURCE_NAME].get('collection_name')
resource_name = dummy_plugin.RESOURCE_NAME
parent = SUB_RESOURCES[dummy_plugin.RESOURCE_NAME].get('parent')
params = RESOURCE_ATTRIBUTE_MAP['dummies'] | _plugin, params,
member_actions=member_actions,
parent=parent,
allow_bulk=True,
allow_pagination=True,
allow_sorting=True)
path_prefix = "/%s/{%s_id}/%s" % (parent['collection_name'],
parent['member_name'],
collection_name)
mapper_kwargs = dict(controller=controller,
path_prefix=path_prefix)
api.map.collection(collection_name, resource_name, **mapper_kwargs)
api.map.resource(collection_name, collection_name,
controller=controller,
parent_resource=parent,
member=member_actions)
self.api = webtest.TestApp(api)
def test_index_sub_resource(self):
instance = self.plugin.return_value
self.api.get('/networks/id1/dummies')
instance.get_network_dummies.assert_called_once_with(mock.ANY,
filters=mock.ANY,
fields=mock.ANY,
network_id='id1')
def test_show_sub_resource(self):
instance = self.plugin.return_value
dummy_id = _uuid()
self.api.get('/networks/id1' + _get_path('dummies', id=dummy_id))
instance.get_network_dummy.assert_called_once_with(mock.ANY,
dummy_id,
network_id='id1',
fields=mock.ANY)
def test_create_sub_resource(self):
instance = self.plugin.return_value
tenant_id = _uuid()
body = {
dummy_plugin.RESOURCE_NAME: {
'foo': 'bar', 'tenant_id': tenant_id,
'project_id': tenant_id
}
}
self.api.post_json('/networks/id1/dummies', body)
instance.create_network_dummy.assert_called_once_with(mock.ANY,
network_id='id1',
dummy=body)
def test_update_sub_resource(self):
instance = self.plugin.return_value
dummy_id = _uuid()
body = {dummy_plugin.RESOURCE_NAME: {'foo': 'bar'}}
self.api.put_json('/networks/id1' + _get_path('dummies', id=dummy_id),
body)
instance.update_network_dummy.assert_called_once_with(mock.ANY,
dummy_id,
network_id='id1',
dummy=body)
def test_update_subresource_to_none(self):
instance = self.plugin.return_value
dummy_id = _uuid()
body = {dummy_plugin.RESOURCE_NAME: {}}
self.api.put_json('/networks/id1' + _get_path('dummies', id=dummy_id),
body)
instance.update_network_dummy.assert_called_once_with(mock.ANY,
dummy_id,
network_id='id1',
dummy=body)
def test_delete_sub_resource(self):
instance = self.plugin.return_value
dummy_id = _uuid()
self.api.delete('/networks/id1' + _get_path('dummies', id=dummy_id))
instance.delete_network_dummy.assert_called_once_with(mock.ANY,
dummy_id,
network_id='id1')
def test_sub_resource_member_actions(self):
instance = self.plugin.return_value
dummy_id = _uuid()
self.api.get('/networks/id1' + _get_path('dummies', id=dummy_id,
action='mactions'))
instance.mactions.assert_called_once_with(mock.ANY,
dummy_id,
network_id='id1')
# Note: since all resources use the same controller and validation
# logic, we actually get really good coverage from testing just networks.
class V2Views(base.BaseTestCase):
def _view(self, keys, collection, resource):
data = dict((key, 'value') for key in keys)
data['fake'] = 'value'
attr_info = attributes.RESOURCES[collection]
controller = v2_base.Controller(None, collection, resource, attr_info)
res = controller._view(context.get_admin_context(), data)
self.assertNotIn('fake', res)
for key in keys:
self.assertIn(key, res)
def test_network(self):
keys = ('id', 'name', 'subnets', 'admin_state_up', 'status',
'tenant_id')
self._view(keys, 'networks', 'network')
def test_port(self):
keys = ('id', 'network_id', 'mac_address', 'fixed_ips',
'device_id', 'admin_state_up', 'tenant_id', 'status')
self._view(keys, 'ports', 'port')
def test_subnet(self):
keys = ('id', 'network_id', 'tenant_id', 'gateway_ip',
'ip_version', 'cidr', 'enable_dhcp')
self._view(keys, 'subnets', 'subnet')
class NotificationTest(APIv2TestBase):
def setUp(self):
super(NotificationTest, self).setUp()
fake_notifier.reset()
def _resource_op_notifier(self, opname, resource, expected_errors=False):
initial_input = {resource: {'name': 'myname'}}
instance = self.plugin.return_value
instance.get_networks.return_value = initial_input
instance.get_networks_count.return_value = 0
expected_code = exc.HTTPCreated.code
if opname == 'create':
initial_input[resource]['tenant_id'] = _uuid()
res = self.api.post_json(
_get_path('networks'),
initial_input, expect_errors=expected_errors)
if opname == 'update':
res = self.api.put_json(
_get_path('networks', id=_uuid()),
initial_input, expect_errors=expected_errors)
expected_code = exc.HTTPOk.code
if opname == 'delete':
initial_input[resource]['tenant_id'] = _uuid()
res = self.api.delete(
_get_path('networks', id=_uuid()),
expect_errors=expected_errors)
expected_code = exc.HTTPNoContent.code
expected_events = ('.'.join([resource, opname, "start"]),
'.'.join([resource, opname, "end"]))
self.assertEqual(len(expected_events),
len(fake_notifier.NOTIFICATIONS))
for msg, event in zip(fake_notifier.NOTIFICATIONS, expected_events):
self.assertEqual('INFO', msg['priority'])
self.assertEqual(event, msg['event_type'])
if opname == 'delete' and event == 'network.delete.end':
self.assertIn('payload', msg)
resource = msg['payload']
self.assertIn('network_id', resource)
self.assertIn('network', resource)
self.assertEqual(expected_code, res.status_int)
def test_network_create_notifer(self):
self._resource_op_notifier('create', 'network')
def test_network_delete_notifer(self):
self._resource_op_notifier('delete', 'network')
def test_network_update_notifer(self):
self._resource_op_notifier('update', 'network')
class RegistryNotificationTest(APIv2TestBase):
def setUp(self):
# This test does not have database support so tracking cannot be used
cfg.CONF.set_override('track_quota_usage', False, group='QUOTAS')
super(RegistryNotificationTest, self).setUp()
def _test_registry_notify(self, opname, resource, initial_input=None):
instance = self.plugin.return_value
instance.get_networks.return_value = initial_input
instance.get_networks_count.return_value = 0
expected_code = exc.HTTPCreated.code
with mock.patch.object(registry, 'publish') as notify:
if opname == 'create':
res = self.api.post_json(
_get_path('networks'),
initial_input)
if opname == 'update':
res = self.api.put_json(
_get_path('networks', id=_uuid()),
initial_input)
expected_code = exc.HTTPOk.code
if opname == 'delete':
res = self.api.delete(_get_path('networks', id=_uuid()))
expected_code = exc.HTTPNoContent.code
self.assertTrue(notify.called)
self.assertEqual(expected_code, res.status_int)
def test_network_create_registry_notify(self):
input = {'network': {'name': 'net',
'tenant_id': _uuid()}}
self._test_registry_notify('create', 'network', input)
def test_network_delete_registry_notify(self):
self._test_registry_notify('delete', 'network')
def test_network_update_registry_notify(self):
input = {'network': {'name': 'net'}}
self._test_registry_notify('update', 'network', input)
def test_networks_create_bulk_registry_notify(self):
input = {'networks': [{'name': 'net1',
'tenant_id': _uuid()},
{'name': 'net2',
'tenant_id': _uuid()}]}
self._test_registry_notify('create', 'network', input)
class QuotaTest(APIv2TestBase):
def setUp(self):
# This test does not have database support so tracking cannot be used
cfg.CONF.set_override('track_quota_usage', False, group='QUOTAS')
super(QuotaTest, self).setUp()
# Use mock to let the API use a different QuotaEngine instance for
# unit test in this class. This will ensure resource are registered
# again and instantiated with neutron.quota.resource.CountableResource
replacement_registry = resource_registry.ResourceRegistry()
registry_patcher = mock.patch('neutron.quota.resource_registry.'
'ResourceRegistry.get_instance')
mock_registry = registry_patcher.start().return_value
mock_registry.get_resource = replacement_registry.get_resource
mock_registry.resources = replacement_registry.resources
# Register a resource
replacement_registry.register_resource_by_name('network')
def test_create_network_quota(self):
cfg.CONF.set_override('quota_network', 1, group='QUOTAS')
initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}}
full_input = {'network': {'admin_state_up': True, 'subnets': []}}
full_input['network'].update(initial_input['network'])
instance = self.plugin.return_value
instance.get_networks_count.return_value = 1
res = self.api.post_json(
_get_path('networks'), initial_input, expect_errors=True)
instance.get_networks_count.assert_called_with(mock.ANY,
filters=mock.ANY)
self.assertIn("Quota exceeded for resources",
res.json['NeutronError']['message'])
def test_create_network_quota_no_counts(self):
cfg.CONF.set_override('quota_network', 1, group='QUOTAS')
initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}}
full_input = {'network': {'admin_state_up': True, 'subnets': []}}
full_input['network'].update(initial_input['network'])
instance = self.plugin.return_value
instance.get_networks_count.side_effect = (
NotImplementedError())
instance.get_networks.return_value = ["foo"]
res = self.api.post_json(
_get_path('networks'), initial_input, expect_errors=True)
instance.get_networks_count.assert_called_with(mock.ANY,
filters=mock.ANY)
self.assertIn("Quota exceeded for resources",
res.json['NeutronError']['message'])
def test_create_network_quota_without_limit(self):
cfg.CONF.set_override('quota_network', -1, group='QUOTAS')
initial_input = {'network': {'name': 'net1', 'tenant_id': _uuid()}}
instance = self.plugin.return_value
instance.get_networks_count.return_value = 3
res = self.api.post_json(
_get_path('networks'), initial_input)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
class ExtensionTestCase(base.BaseTestCase):
def setUp(self):
# This test does not have database support so tracking cannot be used
cfg.CONF.set_override('track_quota_usage', False, group='QUOTAS')
super(ExtensionTestCase, self).setUp()
plugin = 'neutron.neutron_plugin_base_v2.NeutronPluginBaseV2'
# Ensure existing ExtensionManager is not used
extensions.PluginAwareExtensionManager._instance = None
self.useFixture(fixture.APIDefinitionFixture())
# Create the default configurations
self.config_parse()
# Update the plugin and extensions path
self.setup_coreplugin(plugin, load_plugins=False)
cfg.CONF.set_override('api_extensions_path', EXTDIR)
self._plugin_patcher = mock.patch(plugin, autospec=True)
self.plugin = self._plugin_patcher.start()
# Instantiate mock plugin and enable the V2attributes extension
self.plugin.return_value.supported_extension_aliases = ["v2attrs"]
api = router.APIRouter()
self.api = webtest.TestApp(api)
quota.QUOTAS._driver = None
cfg.CONF.set_override('quota_driver', 'neutron.quota.ConfDriver',
group='QUOTAS')
def test_extended_create(self):
net_id = _uuid()
tenant_id = _uuid()
initial_input = {'network': {'name': 'net1', 'tenant_id': tenant_id,
'project_id': tenant_id,
'v2attrs:something_else': "abc"}}
data = {'network': {'admin_state_up': True, 'shared': False}}
data['network'].update(initial_input['network'])
return_value = {'subnets': [], 'status': "ACTIVE",
'id': net_id,
'v2attrs:something': "123"}
return_value.update(data['network'].copy())
instance = self.plugin.return_value
instance.create_network.return_value = return_value
instance.get_networks_count.return_value = 0
res = self.api.post_json(_get_path('networks'), initial_input)
instance.create_network.assert_called_with(mock.ANY,
network=data)
self.assertEqual(exc.HTTPCreated.code, res.status_int)
self.assertIn('network', res.json)
net = res.json['network']
self.assertEqual(net_id, net['id'])
self.assertEqual("ACTIVE", net['status'])
self.assertEqual("123", net['v2attrs:something'])
self.assertNotIn('v2attrs:something_else', net)
class TestSubresourcePlugin(object):
def get_network_dummies(self, context, network_id,
filters=None, fields=None):
return []
def get_network_dummy(self, context, id, network_id,
fields=None):
return {}
def create_network_dummy(self, context, network_id, dummy):
return {}
def update_network_dummy(self, context, id, network_id, dummy):
return {}
def delete_network_dummy(self, context, id, network_id):
return
def mactions(self, context, id, network_id):
return
class ListArgsTestCase(base.BaseTestCase):
def test_list_args(self):
path = '/?fields=4&foo=3&fields=2&bar=1'
request = webob.Request.blank(path)
expect_val = ['2', '4']
actual_val = api_common.list_args(request, 'fields')
self.assertEqual(expect_val, sorted(actual_val))
def test_list_args_with_empty(self):
path = '/?foo=4&bar=3&baz=2&qux=1'
request = webob.Request.blank(path)
self.assertEqual([], api_common.list_args(request, 'fields'))
class SortingTestCase(base.BaseTestCase):
def test_get_sorts(self):
path = '/?sort_key=foo&sort_dir=desc&sort_key=bar&sort_dir=asc'
request = webob.Request.blank(path)
attr_info = {'foo': {'key': 'val'}, 'bar': {'key': 'val'}}
expect_val = [('foo', False), ('bar', True)]
actual_val = api_common.get_sorts(request, attr_info)
self.assertEqual(expect_val, actual_val)
def test_get_sorts_with_project_id(self):
path = '/?sort_key=project_id&sort_dir=desc'
request = webob.Request.blank(path)
attr_info = {'tenant_id': {'key': 'val'}}
expect_val = [('project_id', False)]
actual_val = api_common.get_sorts(request, attr_info)
self.assertEqual(expect_val, actual_val)
class FiltersTestCase(base.BaseTestCase):
def test_all_skip_args(self):
path = '/?fields=4&fields=3&fields=2&fields=1'
request = webob.Request.blank(path)
self.assertEqual({}, api_common.get_filters(request, {},
["fields"]))
@mock.patch('neutron.api.api_common.is_empty_string_filtering_supported',
return_value=False)
def test_blank_values(self, mock_is_supported):
path = '/?foo=&bar=&baz=&qux='
request = webob.Request.blank(path)
self.assertEqual({}, api_common.get_filters(request, {}))
@mock.patch('neutron.api.api_common.is_empty_string_filtering_supported',
return_value=True)
def test_blank_values_with_filtering_supported(self, mock_is_supported):
path = '/?foo=&bar=&baz=&qux='
request = webob.Request.blank(path)
self.assertEqual({'foo': [''], 'bar': [''], 'baz': [''], 'qux': ['']},
api_common.get_filters(request, {}))
def test_no_attr_info(self):
path = '/?foo=4&bar=3&baz=2&qux=1'
request = webob.Request.blank(path)
expect_val = {'foo': ['4'], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']}
actual_val = api_common.get_filters(request, {})
self.assertEqual(expect_val, actual_val)
def test_attr_info_with_project_info_populated(self):
path = '/?foo=4&bar=3&baz=2&qux=1'
request = webob.Request.blank(path)
attr_info = {'tenant_id': {'key': 'val'}}
expect_val = {'foo': ['4'], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']}
actual_val = api_common.get_filters(request, attr_info)
self.assertEqual(expect_val, actual_val)
expect_attr_info = {'tenant_id': {'key': 'val'},
'project_id': {'key': 'val'}}
self.assertEqual(expect_attr_info, attr_info)
def test_attr_info_without_conversion(self):
path = '/?foo=4&bar=3&baz=2&qux=1'
request = webob.Request.blank(path)
attr_info = {'foo': {'key': 'val'}}
expect_val = {'foo': ['4'], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']}
actual_val = api_common.get_filters(request, attr_info)
self.assertEqual(expect_val, actual_val)
def test_attr_info_with_convert_list_to(self):
path = '/?foo=key=4&bar=3&foo=key=2&qux=1'
request = webob.Request.blank(path)
attr_info = {
'foo': {
'convert_list_to': converters.convert_kvp_list_to_dict,
}
}
expect_val = {'foo': {'key': ['2', '4']}, 'bar': ['3'], 'qux': ['1']}
actual_val = api_common.get_filters(request, attr_info)
self.assertOrderedEqual(expect_val, actual_val)
def test_attr_info_with_convert_to(self):
path = '/?foo=4&bar=3&baz=2&qux=1'
request = webob.Request.blank(path)
attr_info = {'foo': {'convert_to': converters.convert_to_int}}
expect_val = {'foo': [4], 'bar': ['3'], 'baz': ['2'], 'qux': ['1']}
actual_val = api_common.get_filters(request, attr_info)
self.assertEqual(expect_val, actual_val)
def test_attr_info_with_base_db_attributes(self):
path = '/?__contains__=1&__class__=2'
request = webob.Request.blank(path)
self.assertEqual({}, api_common.get_filters(request, {}))
class CreateResourceTestCase(base.BaseTestCase):
def test_resource_creation(self):
resource = v2_base.create_resource('fakes', 'fake', None, {})
self.assertIsInstance(resource, webob.dec.wsgify) | member_actions = {'mactions': 'GET'}
_plugin = directory.get_plugin()
controller = v2_base.create_resource(collection_name, resource_name, |
userDataSync.contribution.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import { IWorkbenchContributionsRegistry, Extensions as WorkbenchExtensions, IWorkbenchContribution } from 'vs/workbench/common/contributions';
import { Registry } from 'vs/platform/registry/common/platform';
import { LifecyclePhase } from 'vs/platform/lifecycle/common/lifecycle';
import { UserDataSyncWorkbenchContribution } from 'vs/workbench/contrib/userDataSync/browser/userDataSync';
import { IConfigurationService, ConfigurationTarget } from 'vs/platform/configuration/common/configuration';
import { IUserDataSyncEnablementService } from 'vs/platform/userDataSync/common/userDataSync';
class | implements IWorkbenchContribution {
constructor(
@IConfigurationService private readonly configurationService: IConfigurationService,
@IUserDataSyncEnablementService userDataSyncEnablementService: IUserDataSyncEnablementService,
) {
if (!configurationService.getValue('sync.enableSettings')) {
userDataSyncEnablementService.setResourceEnablement('settings', false);
}
if (!configurationService.getValue('sync.enableKeybindings')) {
userDataSyncEnablementService.setResourceEnablement('keybindings', false);
}
if (!configurationService.getValue('sync.enableUIState')) {
userDataSyncEnablementService.setResourceEnablement('globalState', false);
}
if (!configurationService.getValue('sync.enableExtensions')) {
userDataSyncEnablementService.setResourceEnablement('extensions', false);
}
if (configurationService.getValue('sync.enable')) {
userDataSyncEnablementService.setEnablement(true);
}
this.removeFromConfiguration();
}
private async removeFromConfiguration(): Promise<void> {
await this.configurationService.updateValue('sync.enable', undefined, ConfigurationTarget.USER);
await this.configurationService.updateValue('sync.enableSettings', undefined, ConfigurationTarget.USER);
await this.configurationService.updateValue('sync.enableKeybindings', undefined, ConfigurationTarget.USER);
await this.configurationService.updateValue('sync.enableUIState', undefined, ConfigurationTarget.USER);
await this.configurationService.updateValue('sync.enableExtensions', undefined, ConfigurationTarget.USER);
}
}
const workbenchRegistry = Registry.as<IWorkbenchContributionsRegistry>(WorkbenchExtensions.Workbench);
workbenchRegistry.registerWorkbenchContribution(UserDataSyncWorkbenchContribution, LifecyclePhase.Ready);
workbenchRegistry.registerWorkbenchContribution(UserDataSyncSettingsMigrationContribution, LifecyclePhase.Ready);
| UserDataSyncSettingsMigrationContribution |
string_cache.rs | use crate::binxml::name::{BinXmlName, BinXmlNameLink};
use crate::err::DeserializationResult;
use crate::ChunkOffset;
use log::trace;
use std::borrow::BorrowMut;
use std::collections::HashMap;
use std::io::{Cursor, Seek, SeekFrom};
#[derive(Debug)]
pub struct StringCache(HashMap<ChunkOffset, BinXmlName>);
impl StringCache {
pub fn populate(data: &[u8], offsets: &[ChunkOffset]) -> DeserializationResult<Self> {
let mut cache = HashMap::new();
let mut cursor = Cursor::new(data);
let cursor_ref = cursor.borrow_mut();
for &offset in offsets.iter().filter(|&&offset| offset > 0) {
try_seek!(cursor_ref, offset, "first xml string")?;
loop {
let string_position = cursor_ref.position() as ChunkOffset;
let link = BinXmlNameLink::from_stream(cursor_ref)?;
let name = BinXmlName::from_stream(cursor_ref)?;
cache.insert(string_position, name);
trace!("\tNext string will be at {:?}", link.next_string);
match link.next_string {
Some(offset) => {
try_seek!(cursor_ref, offset, "next xml string")?;
}
None => break,
}
}
}
Ok(StringCache(cache))
}
pub fn | (&self, offset: ChunkOffset) -> Option<&BinXmlName> {
self.0.get(&offset)
}
pub fn len(&self) -> usize {
self.0.len()
}
}
| get_cached_string |
coverage.py | """
Calculate coverage statistics, cf. https://github.com/lexibank/abvdoceanic/issues/3
"""
from pathlib import Path
from cltoolkit import Wordlist
from pycldf import Dataset
from pyclts import CLTS
from tabulate import tabulate
from cldfbench.cli_util import with_dataset, get_dataset
def run(args):
| path = (Path(__file__).parents[1]).joinpath("cldf/cldf-metadata.json")
# Load data
bipa = CLTS().bipa
wl = Wordlist([ Dataset.from_metadata(path) ], ts=bipa)
# Create coverage table
args.log.info("Creating coverage table...")
table = []
for language in wl.languages:
table += [[language.name, len(language.concepts), len(language.forms_with_sounds),
len(language.sound_inventory.consonants), len(language.sound_inventory.vowels)]]
return tabulate(table, headers=["Name", "Concepts", "Forms", "Consonants", "Vowels"], tablefmt="pipe") |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.